diff --git a/Gopkg.lock b/Gopkg.lock index 79ef59a11..a734b2c0d 100644 --- a/Gopkg.lock +++ b/Gopkg.lock @@ -2,15 +2,23 @@ [[projects]] - digest = "1:5c3894b2aa4d6bead0ceeea6831b305d62879c871780e7b76296ded1b004bc57" + digest = "1:5ad08b0e14866764a6d7475eb11c9cf05cad9a52c442593bdfa544703ff77f61" name = "cloud.google.com/go" packages = ["compute/metadata"] pruneopts = "NUT" - revision = "97efc2c9ffd9fe8ef47f7f3203dc60bbca547374" - version = "v0.28.0" + revision = "74b12019e2aa53ec27882158f59192d7cd6d1998" + version = "v0.33.1" [[projects]] - digest = "1:9fe4851c1eb1ab8c7486fee4e2d06db0e6509d6772211177e631c1abfb41b720" + digest = "1:f323f98930459f65f4699b5bfba563743680e2633d96e72d61a9732648e2d07c" + name = "contrib.go.opencensus.io/exporter/ocagent" + packages = ["."] + pruneopts = "NUT" + revision = "00af367e65149ff1f2f4b93bbfbb84fd9297170d" + version = "v0.2.0" + +[[projects]] + digest = "1:fff60f8e65c264c3fe391e671cd84ce292a748c0a3fa19cdcef0cc34ffc123ae" name = "github.com/Azure/go-autorest" packages = [ "autorest", @@ -18,19 +26,19 @@ "autorest/azure", "autorest/date", "logger", - "version", + "tracing", ] pruneopts = "NUT" - revision = "9bc4033dd347c7f416fca46b2f42a043dc1fbdf6" - version = "v10.15.5" + revision = "f401b1ccc8eb505927fae7a0c7f6406d37ca1c7e" + version = "v11.2.8" [[projects]] - digest = "1:01252cd79aac70f16cac02a72a1067dd136e0ad6d5b597d0129cf74c739fd8d1" + digest = "1:d848e2bdc690ea54c4b49894b67a05db318a97ee6561879b814c2c1f82f61406" name = "github.com/Sirupsen/logrus" packages = ["."] pruneopts = "NUT" - revision = "a67f783a3814b8729bd2dac5780b5f78f8dbd64d" - version = "v1.1.0" + revision = "bcd833dfe83d3cebad139e4a29ed79cb2318bf95" + version = "v1.2.0" [[projects]] branch = "master" @@ -48,6 +56,19 @@ pruneopts = "NUT" revision = "3a771d992973f24aa725d07868b467d1ddfceafb" +[[projects]] + digest = "1:65b0d980b428a6ad4425f2df4cd5410edd81f044cf527bd1c345368444649e58" + name = "github.com/census-instrumentation/opencensus-proto" + packages = [ + "gen-go/agent/common/v1", + "gen-go/agent/trace/v1", + "gen-go/resource/v1", + "gen-go/trace/v1", + ] + pruneopts = "NUT" + revision = "7f2434bc10da710debe5c4315ed6d4df454b4024" + version = "v0.1.0" + [[projects]] digest = "1:ffe9824d294da03b391f44e1ae8281281b4afc1bdaa9588c9097785e3af10cec" name = "github.com/davecgh/go-spew" @@ -64,16 +85,6 @@ revision = "06ea1031745cb8b3dab3f6a236daf2b0aa468b7e" version = "v3.2.0" -[[projects]] - digest = "1:4189ee6a3844f555124d9d2656fe7af02fca961c2a9bad9074789df13a0c62e0" - name = "github.com/docker/distribution" - packages = [ - "digestset", - "reference", - ] - pruneopts = "NUT" - revision = "edc3ab29cdff8694dd6feb85cfeb4b5f1b38ed9c" - [[projects]] digest = "1:4340101f42556a9cb2f7a360a0e95a019bfef6247d92e6c4c46f2433cf86a482" name = "github.com/docker/go-units" @@ -84,14 +95,14 @@ [[projects]] branch = "master" - digest = "1:da25cf063072a10461c19320e82117d85f9d60be4c95a62bc8d5a49acf7d0ca5" + digest = "1:eb8f1b1913bffd6e788deee9fe4ba3a4d83267aff6045d3be33105e35ece290b" name = "github.com/docker/spdystream" packages = [ ".", "spdy", ] pruneopts = "NUT" - revision = "bc6354cbbc295e925e4c611ffe90c1f287ee54db" + revision = "6480d4af844c189cf5dd913db24ddd339d3a4f85" [[projects]] branch = "master" @@ -109,6 +120,14 @@ revision = "44cc805cf13205b55f69e14bcb69867d1ae92f98" version = "v1.1.0" +[[projects]] + digest = "1:32598368f409bbee79deb9d43569fcd92b9fb27f39155f5e166b3371217f051f" + name = "github.com/evanphx/json-patch" + packages = ["."] + pruneopts = "NUT" + revision = "72bf35d0ff611848c1dc9df0f976c81192392fa5" + version = "v4.1.0" + [[projects]] digest = "1:1b91ae0dc69a41d4c2ed23ea5cffb721ea63f5037ca4b81e6d6771fbb8f45129" name = "github.com/fsnotify/fsnotify" @@ -125,14 +144,6 @@ pruneopts = "NUT" revision = "8306686428a5fe132eac8cb7c4848af725098bd4" -[[projects]] - digest = "1:81466b4218bf6adddac2572a30ac733a9255919bc2f470b4827a317bd4ee1756" - name = "github.com/ghodss/yaml" - packages = ["."] - pruneopts = "NUT" - revision = "0ca9ea5df5451ffdf184b4428c902747c2c11cd7" - version = "v1.0.0" - [[projects]] digest = "1:cd4f86461732066e277335465962660cbf02999e18d5bbb5e9285eac4608b970" name = "github.com/gogo/protobuf" @@ -146,24 +157,16 @@ revision = "636bf0302bc95575d69441b25a2603156ffdddf1" version = "v1.1.1" -[[projects]] - branch = "master" - digest = "1:e2b86e41f3d669fc36b50d31d32d22c8ac656c75aa5ea89717ce7177e134ff2a" - name = "github.com/golang/glog" - packages = ["."] - pruneopts = "NUT" - revision = "23def4e6c14b4da8ac2ed8007337bc5eb5007998" - [[projects]] branch = "master" digest = "1:3fb07f8e222402962fa190eb060608b34eddfb64562a18e2167df2de0ece85d8" name = "github.com/golang/groupcache" packages = ["lru"] pruneopts = "NUT" - revision = "6f2cf27854a4a29e3811b0371547be335d411b8b" + revision = "c65c006176ff7ff98bb916961c7abbc6b0afc0aa" [[projects]] - digest = "1:63ccdfbd20f7ccd2399d0647a7d100b122f79c13bb83da9660b1598396fd9f62" + digest = "1:479e958ad7ae540d7a3c565d1839cc7c8ab9b627640144443f1e88d11d4023d0" name = "github.com/golang/protobuf" packages = [ "proto", @@ -171,6 +174,7 @@ "ptypes/any", "ptypes/duration", "ptypes/timestamp", + "ptypes/wrappers", ] pruneopts = "NUT" revision = "aa810b61a9c79d51363740d207bb46cf8e620ed5" @@ -193,12 +197,12 @@ revision = "24818f796faf91cd76ec7bddd72458fbced7a6c1" [[projects]] - digest = "1:a1578f7323eca2b88021fdc9a79a99833d40b12c32a5ea4f284e2fad19ea2657" + digest = "1:56a1f3949ebb7fa22fa6b4e4ac0fe0f77cc4faee5b57413e6fa9199a8458faf1" name = "github.com/google/uuid" packages = ["."] pruneopts = "NUT" - revision = "d460ce9f8df2e77fb1ba55ca87fafed96c607494" - version = "v1.0.0" + revision = "9b3b1e0f5f99ae461456d768e7d301a7acdaa2d8" + version = "v1.1.0" [[projects]] digest = "1:06a7dadb7b760767341ffb6c8d377238d68a1226f2b21b5d497d2e3f6ecf6b4e" @@ -214,7 +218,7 @@ [[projects]] branch = "master" - digest = "1:8ccbd0e8ef8b6e59b331991fcb5477fb5e6c51b7d1be09789d7dc0a33e95bf6a" + digest = "1:3f5485f5f0ea50de409c25414eaf4154bd54b2fa9ef03fc4a0a278967daac906" name = "github.com/gophercloud/gophercloud" packages = [ ".", @@ -226,7 +230,7 @@ "pagination", ] pruneopts = "NUT" - revision = "39db44929bf62867520de8607bd519217a78f802" + revision = "8f4eb476f72cbb430de821134208c549a643e99b" [[projects]] branch = "master" @@ -237,7 +241,7 @@ "diskcache", ] pruneopts = "NUT" - revision = "9cad4c3443a7200dd6400aef47183728de563a38" + revision = "c63ab54fda8f77302f8d414e19933f2b6026a089" [[projects]] digest = "1:b42cde0e1f3c816dd57f57f7bbcf05ca40263ad96f168714c130c611fc0856a6" @@ -281,12 +285,12 @@ version = "v1.1.5" [[projects]] - branch = "master" - digest = "1:c8a452cc8dd4ef9f857570ce2be31ca257a0928bf3c2b08cd7e11972b985c6d7" + digest = "1:4059c14e87a2de3a434430340521b5feece186c1469eff0834c29a63870de3ed" name = "github.com/konsorten/go-windows-terminal-sequences" packages = ["."] pruneopts = "NUT" - revision = "b729f2633dfe35f4d1d8a32385f6685610ce1cb5" + revision = "5c8c8bd35d3832f5d134ae1e1e375b69a4d25242" + version = "v1.0.1" [[projects]] branch = "master" @@ -325,11 +329,11 @@ [[projects]] branch = "master" - digest = "1:5fe20cfe4ef484c237cec9f947b2a6fa90bad4b8610fd014f0e4211e13d82d5d" + digest = "1:a45ae66dea4c899d79fceb116accfa1892105c251f0dcd9a217ddc276b42ec68" name = "github.com/mitchellh/mapstructure" packages = ["."] pruneopts = "NUT" - revision = "fa473d140ef3c6adf42d6b391fe76707f1f243c8" + revision = "3536a929edddb9a5b34bd6861dc4a9647cb459fe" [[projects]] digest = "1:2f42fa12d6911c7b7659738758631bec870b7e9b4c6be5444f963cdcfccc191f" @@ -348,27 +352,47 @@ version = "1.0.1" [[projects]] - branch = "master" digest = "1:af70f88d68d35881112c13fa94ae3fcb53cfe14c4b8fb3d87a345bbf442d2747" name = "github.com/moul/http2curl" packages = ["."] pruneopts = "NUT" revision = "9ac6cf4d929b2fa8fd2d2e6dec5bb0feb4f4911d" + version = "v1.0.0" [[projects]] branch = "master" - digest = "1:7b782f694db508189ee26915bb12dcb74aefd5d96411ad8b04a12d880df1b810" + digest = "1:9e33629d4ec9e9344715a54fa0a107f23ce800deb13999b0190df04c3540ccb5" + name = "github.com/ncabatoff/go-seq" + packages = ["seq"] + pruneopts = "NUT" + revision = "b08ef85ed83364cba413c98a94bbd4169a0ce70b" + +[[projects]] + branch = "master" + digest = "1:fcdc1a06529f364e1ba0e8a85540ae7ebbbe2e4b00d40245b24d1b8a3907b2e6" name = "github.com/ncabatoff/process-exporter" packages = [ ".", "proc", ] pruneopts = "NUT" - revision = "5917bc766b95a1fa3c2ae85340f4de02a6b7e15e" - source = "github.com/aledbf/process-exporter" + revision = "bdf24ef23850ba2fc593f244256482375f7cbfcd" [[projects]] - digest = "1:0be1cd4c73d5e22a30edcf32a18e9809a370a7a4a52c4f41a86070b34da93fef" + branch = "add-proc-status" + digest = "1:75f0f2e92ea523185b28b9377984503a7b2be7c7451831eca5478b20998a3799" + name = "github.com/ncabatoff/procfs" + packages = [ + ".", + "internal/util", + "nfs", + "xfs", + ] + pruneopts = "NUT" + revision = "e1a38cb53622f65e073c5e750e6498a44ebfbd2a" + +[[projects]] + digest = "1:cdc5cfc04dd0b98f86433207fe6d9879757c46734441a08886acc11251c7ed4a" name = "github.com/onsi/ginkgo" packages = [ ".", @@ -391,11 +415,11 @@ "types", ] pruneopts = "NUT" - revision = "3774a09d95489ccaa16032e0770d08ea77ba6184" - version = "v1.6.0" + revision = "2e1be8f7d90e9d3e3e58b0ce470f2f14d075406f" + version = "v1.7.0" [[projects]] - digest = "1:95f40a9db820078d1795c7ba2d476016aca05dc4267eaf6752a925e437cb351f" + digest = "1:0db10c512b410a1ecd8845e31db52622c08b76198f7ab76afb25319c84a7fd4b" name = "github.com/onsi/gomega" packages = [ ".", @@ -412,16 +436,8 @@ "types", ] pruneopts = "NUT" - revision = "7615b9433f86a8bdf29709bf288bc4fd0636a369" - version = "v1.4.2" - -[[projects]] - digest = "1:e0cc8395ea893c898ff5eb0850f4d9851c1f57c78c232304a026379a47a552d0" - name = "github.com/opencontainers/go-digest" - packages = ["."] - pruneopts = "NUT" - revision = "279bed98673dd5bef374d3b6e4b09e2af76183bf" - version = "v1.0.0-rc1" + revision = "65fb64232476ad9046e57c26cd0bff3d3a8dc6cd" + version = "v1.4.3" [[projects]] digest = "1:c9e0e109a897ef306f865e55e07ecb1c3024edafd103e1c2b1a06a852dc91cb3" @@ -484,7 +500,7 @@ [[projects]] branch = "master" - digest = "1:3b374d4b78e68dbab0cc07ce56e75d8b267ce1ac1de6c7b610191789abd3fee1" + digest = "1:69c96554af95dcf0afbabae18949708bd165a5c8393ee6e7299fc5a583f595a4" name = "github.com/prometheus/client_golang" packages = [ "prometheus", @@ -492,7 +508,7 @@ "prometheus/promhttp", ] pruneopts = "NUT" - revision = "2d5a6493f89f13c9779c2e097e0710e0f4478bae" + revision = "32b1bb4674c4db541df5b547bb1a325337522022" [[projects]] branch = "master" @@ -504,7 +520,7 @@ [[projects]] branch = "master" - digest = "1:fad5a35eea6a1a33d6c8f949fbc146f24275ca809ece854248187683f52cc30b" + digest = "1:06375f3b602de9c99fa99b8484f0e949fd5273e6e9c6592b5a0dd4cd9085f3ea" name = "github.com/prometheus/common" packages = [ "expfmt", @@ -512,11 +528,11 @@ "model", ] pruneopts = "NUT" - revision = "c7de2306084e37d54b8be01f3541a8464345e9a5" + revision = "4724e9255275ce38f7179b2478abeae4e28c904f" [[projects]] branch = "master" - digest = "1:26a2f5e891cc4d2321f18a0caa84c8e788663c17bed6a487f3cbe2c4295292d0" + digest = "1:102dea0c03a915acfc634b7c67f2662012b5483b56d9025e33f5188e112759b6" name = "github.com/prometheus/procfs" packages = [ ".", @@ -525,7 +541,7 @@ "xfs", ] pruneopts = "NUT" - revision = "418d78d0b9a7b7de3a6bbc8a23def624cc977bb2" + revision = "aa55a523dc0a8297edf51bb75e8eec13eb3be45d" [[projects]] digest = "1:330e9062b308ac597e28485699c02223bd052437a6eed32a173c9227dcb9d95a" @@ -539,12 +555,12 @@ version = "v1.1.2" [[projects]] - digest = "1:e3707aeaccd2adc89eba6c062fec72116fe1fc1ba71097da85b4d8ae1668a675" + digest = "1:9d8420bbf131d1618bde6530af37c3799340d3762cc47210c1d9532a4c3a2779" name = "github.com/spf13/pflag" packages = ["."] pruneopts = "NUT" - revision = "9a97c102cda95a86cec2345a6f09f55a939babf5" - version = "v1.0.2" + revision = "298182f68c66c05229eb03ac171abe6e309ee79a" + version = "v1.0.3" [[projects]] digest = "1:c10994a08ed2ff2cc7611d03ded8bb5f782096880b2daab391adbd9ab95a1764" @@ -555,21 +571,40 @@ version = "1.0.2" [[projects]] - branch = "master" - digest = "1:8e241498e35f550e5192ee6b1f6ff2c0a7ffe81feff9541d297facffe1383979" - name = "golang.org/x/crypto" + digest = "1:f2805adeca595d7dbd25173b57f83daaa79f44d43475263c4e34b05020eac9a7" + name = "go.opencensus.io" packages = [ - "ed25519", - "ed25519/internal/edwards25519", - "pbkdf2", - "ssh/terminal", + ".", + "exemplar", + "internal", + "internal/tagencoding", + "plugin/ochttp", + "plugin/ochttp/propagation/b3", + "plugin/ochttp/propagation/tracecontext", + "stats", + "stats/internal", + "stats/view", + "tag", + "trace", + "trace/internal", + "trace/propagation", + "trace/tracestate", ] pruneopts = "NUT" - revision = "5295e8364332db77d75fce11f1d19c053919a9c9" + revision = "b7bf3cdb64150a8c8c53b769fdeb2ba581bd4d4b" + version = "v0.18.0" [[projects]] branch = "master" - digest = "1:f7468b77e2eb541c768de1f12e3ad98debb07f1d49409f40f49b898588eca448" + digest = "1:38f553aff0273ad6f367cb0a0f8b6eecbaef8dc6cb8b50e57b6a81c1d5b1e332" + name = "golang.org/x/crypto" + packages = ["ssh/terminal"] + pruneopts = "NUT" + revision = "eb0de9b17e854e9b1ccd9963efafc79862359959" + +[[projects]] + branch = "master" + digest = "1:3441e889d2a118498de288fcfe0eb1726831a76c3285af5357674eb1cc150a0f" name = "golang.org/x/net" packages = [ "context", @@ -586,11 +621,11 @@ "trace", ] pruneopts = "NUT" - revision = "4dfa2610cdf3b287375bbba5b8f2a14d3b01d8de" + revision = "351d144fa1fc0bd934e2408202be0c29f25e35a0" [[projects]] branch = "master" - digest = "1:bc2b221d465bb28ce46e8d472ecdc424b9a9b541bd61d8c311c5f29c8dd75b1b" + digest = "1:169c27544e7f54a861a05cd84078b494eaf3e41543f0dfd4dd215fa32139cd40" name = "golang.org/x/oauth2" packages = [ ".", @@ -600,18 +635,26 @@ "jwt", ] pruneopts = "NUT" - revision = "d2e6202438beef2727060aa7cabdd924d92ebfd9" + revision = "28207608b83849a028d4f12e46533a6b6894ecaf" [[projects]] branch = "master" - digest = "1:c8c9c630940822c796f2cdb5f7218d9710ea7d544f379813d8680f07590c2fee" + digest = "1:5e4d81c50cffcb124b899e4f3eabec3930c73532f0096c27f94476728ba03028" + name = "golang.org/x/sync" + packages = ["semaphore"] + pruneopts = "NUT" + revision = "42b317875d0fa942474b76e1b46a6060d720ae6e" + +[[projects]] + branch = "master" + digest = "1:6ddfd101211f81df3ba1f474baf1c451f7708f01c1e0c4be49cd9f0af03596cf" name = "golang.org/x/sys" packages = [ "unix", "windows", ] pruneopts = "NUT" - revision = "dad3d9fb7b6e83d0f9ac8f54670f6334c3a287b4" + revision = "4ed8d59d0b35e1e29334a206d1b3f38b1e5dfb31" [[projects]] digest = "1:8a12cbc891b7130d3f660f8a309e5c0b083f831e6ac38cdaa1f12e63c12d6bea" @@ -650,14 +693,22 @@ [[projects]] branch = "master" - digest = "1:c9e7a4b4d47c0ed205d257648b0e5b0440880cb728506e318f8ac7cd36270bc4" + digest = "1:9fdc2b55e8e0fafe4b41884091e51e77344f7dc511c5acedcfd98200003bff90" name = "golang.org/x/time" packages = ["rate"] pruneopts = "NUT" - revision = "fbb02b2291d28baffd63558aa44b4b56f178d650" + revision = "85acf8d2951cb2a3bde7632f9ff273ef0379bcbd" [[projects]] - digest = "1:e2da54c7866453ac5831c61c7ec5d887f39328cac088c806553303bff4048e6f" + branch = "master" + digest = "1:5f003878aabe31d7f6b842d4de32b41c46c214bb629bb485387dbcce1edf5643" + name = "google.golang.org/api" + packages = ["support/bundler"] + pruneopts = "NUT" + revision = "af4fc4062c262223ddc2d92f5f35a93690db383a" + +[[projects]] + digest = "1:b63b351b57e64ae8aec1af030dc5e74a2676fcda62102f006ae4411fac1b04c8" name = "google.golang.org/appengine" packages = [ ".", @@ -672,8 +723,8 @@ "urlfetch", ] pruneopts = "NUT" - revision = "ae0ab99deb4dc413a2b4bd6c8bdd0eb67f1e4d06" - version = "v1.2.0" + revision = "4a4468ece617fc8205e99368fa2200e9d1fad421" + version = "v1.3.0" [[projects]] branch = "master" @@ -681,10 +732,10 @@ name = "google.golang.org/genproto" packages = ["googleapis/rpc/status"] pruneopts = "NUT" - revision = "0e822944c569bf5c9afd034adaa56208bd2906ac" + revision = "31ac5d88444a9e7ad18077db9a165d793ad06a2e" [[projects]] - digest = "1:5b805b8e03b29399b344655cac16873f026e54dc0a7c17b381f6f4d4c7b6d741" + digest = "1:2f91d3e11b666570f8c923912f1cc8cf2f0c6b7371b2687ee67a8f73f08c6272" name = "google.golang.org/grpc" packages = [ ".", @@ -715,8 +766,8 @@ "tap", ] pruneopts = "NUT" - revision = "8dea3dc473e90c8179e519d91302d0597c0ca1d1" - version = "v1.15.0" + revision = "2e463a05d100327ca47ac218281906921038fd95" + version = "v1.16.0" [[projects]] digest = "1:1b91ae0dc69a41d4c2ed23ea5cffb721ea63f5037ca4b81e6d6771fbb8f45129" @@ -751,19 +802,6 @@ revision = "d2d2541c53f18d2a059457998ce2876cc8e67cbf" version = "v0.9.1" -[[projects]] - digest = "1:812f9446bc99ebd1c66c55fa456ff7843f7105d22f11f0a2098bced37e9c6d32" - name = "gopkg.in/square/go-jose.v2" - packages = [ - ".", - "cipher", - "json", - "jwt", - ] - pruneopts = "NUT" - revision = "ef984e69dd356202fd4e4910d4d9c24468bdf0b8" - version = "v2.1.9" - [[projects]] branch = "v1" digest = "1:8fb1ccb16a6cfecbfdfeb84d8ea1cc7afa8f9ef16526bc2326f72d993e32cef1" @@ -773,15 +811,15 @@ revision = "dd632973f1e7218eb1089048e0798ec9ae7dceb8" [[projects]] - digest = "1:7c95b35057a0ff2e19f707173cc1a947fa43a6eb5c4d300d196ece0334046082" + digest = "1:18108594151654e9e696b27b181b953f9a90b16bf14d253dd1b397b025a1487f" name = "gopkg.in/yaml.v2" packages = ["."] pruneopts = "NUT" - revision = "5420a8b6744d3b0345ab293f6fcba19c978f1183" - version = "v2.2.1" + revision = "51d6538a90f86fe93ac480b35f37b2be17fef232" + version = "v2.2.2" [[projects]] - digest = "1:c6b0cfc418f5e8bb9169d78f1024cb1c9f3e61c9a76235134d26d3f28ecdf27b" + digest = "1:c453ddc26bdab1e4267683a588ad9046e48d803a73f124fe2927adbab6ff02a5" name = "k8s.io/api" packages = [ "admissionregistration/v1alpha1", @@ -789,6 +827,7 @@ "apps/v1", "apps/v1beta1", "apps/v1beta2", + "auditregistration/v1alpha1", "authentication/v1", "authentication/v1beta1", "authorization/v1", @@ -817,10 +856,10 @@ "storage/v1beta1", ] pruneopts = "NUT" - revision = "kubernetes-1.12.1" + revision = "kubernetes-1.13.0-rc.2" [[projects]] - digest = "1:6863750b53ac3e57c4ea2b068c6c07d3b42a6dc965f64c5fdd56ae2ab768deb5" + digest = "1:501a73762f1b2c4530206ffb657b39d8b58a9b40280d30e4509ae1232767962c" name = "k8s.io/apiextensions-apiserver" packages = [ "pkg/apis/apiextensions", @@ -828,24 +867,20 @@ "pkg/client/clientset/clientset", "pkg/client/clientset/clientset/scheme", "pkg/client/clientset/clientset/typed/apiextensions/v1beta1", - "pkg/features", ] pruneopts = "NUT" - revision = "kubernetes-1.12.1" + revision = "kubernetes-1.13.0-rc.2" [[projects]] - digest = "1:f48552d381c18aebe56e96a8b0478430def42a30ac20945f769eb90ada979b52" + digest = "1:692e27ed8a5eb2d74bde52d323d428814cd9a6e0f726d02ffd60fda7819e1ee7" name = "k8s.io/apimachinery" packages = [ - "pkg/api/equality", "pkg/api/errors", "pkg/api/meta", "pkg/api/resource", - "pkg/api/validation", "pkg/apis/meta/internalversion", "pkg/apis/meta/v1", "pkg/apis/meta/v1/unstructured", - "pkg/apis/meta/v1/validation", "pkg/apis/meta/v1beta1", "pkg/conversion", "pkg/conversion/queryparams", @@ -873,7 +908,6 @@ "pkg/util/mergepatch", "pkg/util/naming", "pkg/util/net", - "pkg/util/rand", "pkg/util/remotecommand", "pkg/util/runtime", "pkg/util/sets", @@ -890,25 +924,20 @@ "third_party/forked/golang/reflect", ] pruneopts = "NUT" - revision = "kubernetes-1.12.1" + revision = "kubernetes-1.13.0-rc.2" [[projects]] - digest = "1:8eaab7022d2018a1e398e049328f9ae6c35812bb3373441194f12d916d3b8140" + digest = "1:cc0487260dc4ffb2b513273ad8438497b8df2d8c0de90aaf03d22cc5b58e3fe1" name = "k8s.io/apiserver" packages = [ - "pkg/authentication/authenticator", - "pkg/authentication/serviceaccount", - "pkg/authentication/user", - "pkg/features", "pkg/server/healthz", - "pkg/util/feature", "pkg/util/logs", ] pruneopts = "NUT" - revision = "kubernetes-1.12.1" + revision = "kubernetes-1.13.0-rc.2" [[projects]] - digest = "1:8e32eb6edca8a05b29222291da9447034c71d7524705b28db7d34366fb393b3c" + digest = "1:14961132526c5e588ccfa30efd6c977db308c1b1fb83ad4043c3a92c961521ae" name = "k8s.io/client-go" packages = [ "discovery", @@ -921,6 +950,8 @@ "informers/apps/v1", "informers/apps/v1beta1", "informers/apps/v1beta2", + "informers/auditregistration", + "informers/auditregistration/v1alpha1", "informers/autoscaling", "informers/autoscaling/v1", "informers/autoscaling/v2beta1", @@ -970,6 +1001,8 @@ "kubernetes/typed/apps/v1beta1/fake", "kubernetes/typed/apps/v1beta2", "kubernetes/typed/apps/v1beta2/fake", + "kubernetes/typed/auditregistration/v1alpha1", + "kubernetes/typed/auditregistration/v1alpha1/fake", "kubernetes/typed/authentication/v1", "kubernetes/typed/authentication/v1/fake", "kubernetes/typed/authentication/v1beta1", @@ -1027,6 +1060,7 @@ "listers/apps/v1", "listers/apps/v1beta1", "listers/apps/v1beta2", + "listers/auditregistration/v1alpha1", "listers/autoscaling/v1", "listers/autoscaling/v2beta1", "listers/autoscaling/v2beta2", @@ -1076,12 +1110,10 @@ "tools/record", "tools/reference", "tools/remotecommand", - "tools/watch", "transport", "transport/spdy", "util/buffer", "util/cert", - "util/cert/triple", "util/connrotation", "util/exec", "util/flowcontrol", @@ -1092,11 +1124,19 @@ "util/workqueue", ] pruneopts = "NUT" - revision = "kubernetes-1.12.1" + revision = "kubernetes-1.13.0-rc.2" [[projects]] branch = "master" - digest = "1:9ca673cabdf1a203b841f27246e7f1211a6839a251bcd34c47d90a8e170fcf27" + digest = "1:aac84f18a5f8ef84b0048e4d8856521ef3d33cd50fd839326fa92befcd92bfd4" + name = "k8s.io/cloud-provider" + packages = ["."] + pruneopts = "NUT" + revision = "9b77dc1c384685cb732b3025ed5689dd597a5971" + +[[projects]] + branch = "master" + digest = "1:741fc393d821a1acb7f8c85cd3cf8675f2c7f08a47096c4ca7ef6229f5acc763" name = "k8s.io/csi-api" packages = [ "pkg/apis/csi/v1alpha1", @@ -1105,83 +1145,64 @@ "pkg/client/clientset/versioned/typed/csi/v1alpha1", ] pruneopts = "NUT" - revision = "31ae05d8096db803f5b4ff16cda6059c0a9cc861" + revision = "61a1735c3f5028c6dd5dc37e0f5573d8872507ba" + +[[projects]] + digest = "1:9cc257b3c9ff6a0158c9c661ab6eebda1fe8a4a4453cd5c4044dc9a2ebfb992b" + name = "k8s.io/klog" + packages = ["."] + pruneopts = "NUT" + revision = "a5bc97fbc634d635061f3146511332c7e313a55a" + version = "v0.1.0" [[projects]] branch = "master" - digest = "1:a2c842a1e0aed96fd732b535514556323a6f5edfded3b63e5e0ab1bce188aa54" + digest = "1:03a96603922fc1f6895ae083e1e16d943b55ef0656b56965351bd87e7d90485f" name = "k8s.io/kube-openapi" packages = ["pkg/util/proto"] pruneopts = "NUT" - revision = "e3762e86a74c878ffed47484592986685639c2cd" + revision = "0317810137be915b9cf888946c6e115c1bfac693" [[projects]] - digest = "1:75158cef6a899b0b047de1cb2b3a585eca459e64e797f99d89261b92674b8c2b" + digest = "1:df9bd8d59539e980d5d565b00a6d45e445e62d8c89d60fe0fa520ad6e54d32e4" name = "k8s.io/kubernetes" packages = [ "pkg/api/legacyscheme", - "pkg/api/service", "pkg/api/v1/pod", - "pkg/apis/autoscaling", - "pkg/apis/core", - "pkg/apis/core/helper", - "pkg/apis/core/install", - "pkg/apis/core/pods", - "pkg/apis/core/v1", - "pkg/apis/core/v1/helper", - "pkg/apis/core/validation", - "pkg/apis/extensions", - "pkg/apis/networking", - "pkg/apis/policy", - "pkg/apis/scheduling", - "pkg/capabilities", - "pkg/cloudprovider", - "pkg/controller", - "pkg/features", - "pkg/fieldpath", - "pkg/kubelet/apis", "pkg/kubelet/apis/cri/runtime/v1alpha2", "pkg/kubelet/container", - "pkg/kubelet/types", "pkg/kubelet/util/format", "pkg/kubelet/util/sliceutils", - "pkg/master/ports", - "pkg/scheduler/algorithm", - "pkg/scheduler/algorithm/priorities/util", - "pkg/scheduler/api", - "pkg/scheduler/cache", - "pkg/scheduler/util", - "pkg/security/apparmor", - "pkg/serviceaccount", "pkg/util/file", "pkg/util/filesystem", "pkg/util/hash", "pkg/util/io", "pkg/util/mount", - "pkg/util/net/sets", - "pkg/util/node", "pkg/util/nsenter", - "pkg/util/parsers", "pkg/util/sysctl", - "pkg/util/taints", "pkg/volume", "pkg/volume/util/fs", "pkg/volume/util/recyclerclient", "third_party/forked/golang/expansion", ] pruneopts = "NUT" - revision = "v1.12.1" + revision = "v1.13.0-rc.2" [[projects]] branch = "master" - digest = "1:b8bb2923aa316490408300d029eeb4f566d54269e91242eeef680c70c2a1c041" + digest = "1:381323c2fe2e890a3dd3b5d6dc6f2199068408cca89b24f6b7ca1c60f32644a5" name = "k8s.io/utils" - packages = [ - "exec", - "pointer", - ] + packages = ["exec"] pruneopts = "NUT" - revision = "cd34563cd63c2bd7c6fe88a73c4dcf34ed8a67cb" + revision = "0d26856f57b32ec3398579285e5c8a2bfe8c5243" + +[[projects]] + digest = "1:8730e0150dfb2b7e173890c8b9868e7a273082ef8e39f4940e3506a481cf895c" + name = "sigs.k8s.io/yaml" + packages = ["."] + pruneopts = "NUT" + revision = "fd68e9863619f6ec2fdd8625fe1f02e7c877e480" + version = "v1.1.0" [solve-meta] analyzer-name = "dep" @@ -1189,7 +1210,6 @@ input-imports = [ "github.com/armon/go-proxyproto", "github.com/eapache/channels", - "github.com/golang/glog", "github.com/imdario/mergo", "github.com/json-iterator/go", "github.com/kylelemons/godebug/pretty", @@ -1221,6 +1241,7 @@ "k8s.io/apimachinery/pkg/apis/meta/v1", "k8s.io/apimachinery/pkg/fields", "k8s.io/apimachinery/pkg/labels", + "k8s.io/apimachinery/pkg/runtime", "k8s.io/apimachinery/pkg/runtime/schema", "k8s.io/apimachinery/pkg/util/intstr", "k8s.io/apimachinery/pkg/util/runtime", @@ -1228,6 +1249,7 @@ "k8s.io/apimachinery/pkg/util/uuid", "k8s.io/apimachinery/pkg/util/wait", "k8s.io/apimachinery/pkg/version", + "k8s.io/apimachinery/pkg/watch", "k8s.io/apiserver/pkg/server/healthz", "k8s.io/apiserver/pkg/util/logs", "k8s.io/client-go/informers", @@ -1244,9 +1266,9 @@ "k8s.io/client-go/tools/leaderelection/resourcelock", "k8s.io/client-go/tools/record", "k8s.io/client-go/util/cert", - "k8s.io/client-go/util/cert/triple", "k8s.io/client-go/util/flowcontrol", "k8s.io/client-go/util/workqueue", + "k8s.io/klog", "k8s.io/kubernetes/pkg/api/v1/pod", "k8s.io/kubernetes/pkg/kubelet/util/sliceutils", "k8s.io/kubernetes/pkg/util/filesystem", diff --git a/Gopkg.toml b/Gopkg.toml index b61b20d2e..5ad1e3b38 100644 --- a/Gopkg.toml +++ b/Gopkg.toml @@ -39,6 +39,10 @@ name = "gopkg.in/fsnotify.v1" source = "https://github.com/fsnotify/fsnotify.git" +[[override]] + name = "github.com/golang/glog" + source = "k8s.io/klog/glog" + [[constraint]] name = "github.com/eapache/channels" branch = "master" @@ -47,10 +51,6 @@ branch = "master" name = "github.com/armon/go-proxyproto" -[[constraint]] - branch = "master" - name = "github.com/golang/glog" - [[constraint]] name = "github.com/imdario/mergo" version = "0.2.4" @@ -69,7 +69,6 @@ [[constraint]] name = "github.com/ncabatoff/process-exporter" - source = "github.com/aledbf/process-exporter" branch = "master" [[constraint]] @@ -84,10 +83,6 @@ name = "github.com/prometheus/client_golang" branch = "master" -[[constraint]] - name = "github.com/spf13/pflag" - version = "1.0.0" - [[constraint]] name = "github.com/zakjan/cert-chain-resolver" version = "1.0.1" @@ -102,24 +97,24 @@ [[constraint]] name = "k8s.io/kubernetes" - revision = "v1.12.1" + revision = "v1.13.0-rc.2" [[constraint]] name = "k8s.io/api" - revision = "kubernetes-1.12.1" + revision = "kubernetes-1.13.0-rc.2" [[constraint]] name = "k8s.io/apimachinery" - revision = "kubernetes-1.12.1" + revision = "kubernetes-1.13.0-rc.2" [[constraint]] name = "k8s.io/client-go" - revision = "kubernetes-1.12.1" + revision = "kubernetes-1.13.0-rc.2" [[constraint]] name = "k8s.io/apiextensions-apiserver" - revision = "kubernetes-1.12.1" + revision = "kubernetes-1.13.0-rc.2" [[constraint]] name = "k8s.io/apiserver" - revision = "kubernetes-1.12.1" + revision = "kubernetes-1.13.0-rc.2" diff --git a/cmd/nginx/flags.go b/cmd/nginx/flags.go index f02cb898c..b2e491cf5 100644 --- a/cmd/nginx/flags.go +++ b/cmd/nginx/flags.go @@ -21,10 +21,10 @@ import ( "fmt" "os" - "github.com/golang/glog" "github.com/spf13/pflag" apiv1 "k8s.io/api/core/v1" + "k8s.io/klog" "k8s.io/ingress-nginx/internal/ingress/annotations/class" "k8s.io/ingress-nginx/internal/ingress/annotations/parser" @@ -168,7 +168,7 @@ Feature backed by OpenResty Lua libraries. Requires that OCSP stapling is not en flag.CommandLine.Parse([]string{}) pflag.VisitAll(func(flag *pflag.Flag) { - glog.V(2).Infof("FLAG: --%s=%q", flag.Name, flag.Value) + klog.V(2).Infof("FLAG: --%s=%q", flag.Name, flag.Value) }) if *showVersion { @@ -176,10 +176,10 @@ Feature backed by OpenResty Lua libraries. Requires that OCSP stapling is not en } if *ingressClass != "" { - glog.Infof("Watching for Ingress class: %s", *ingressClass) + klog.Infof("Watching for Ingress class: %s", *ingressClass) if *ingressClass != class.DefaultClass { - glog.Warningf("Only Ingresses with class %q will be processed by this Ingress controller", *ingressClass) + klog.Warningf("Only Ingresses with class %q will be processed by this Ingress controller", *ingressClass) } class.IngressClass = *ingressClass @@ -209,7 +209,7 @@ Feature backed by OpenResty Lua libraries. Requires that OCSP stapling is not en } if !*enableSSLChainCompletion { - glog.Warningf("SSL certificate chain completion is disabled (--enable-ssl-chain-completion=false)") + klog.Warningf("SSL certificate chain completion is disabled (--enable-ssl-chain-completion=false)") } if *enableSSLChainCompletion && *dynamicCertificatesEnabled { diff --git a/cmd/nginx/main.go b/cmd/nginx/main.go index 5cd5098c1..8ababca87 100644 --- a/cmd/nginx/main.go +++ b/cmd/nginx/main.go @@ -28,7 +28,6 @@ import ( "syscall" "time" - "github.com/golang/glog" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus/promhttp" @@ -38,6 +37,7 @@ import ( "k8s.io/apiserver/pkg/server/healthz" "k8s.io/client-go/kubernetes" "k8s.io/client-go/tools/clientcmd" + "k8s.io/klog" "k8s.io/ingress-nginx/internal/file" "k8s.io/ingress-nginx/internal/ingress/controller" @@ -59,6 +59,8 @@ const ( ) func main() { + klog.InitFlags(nil) + rand.Seed(time.Now().UnixNano()) fmt.Println(version.String()) @@ -69,14 +71,14 @@ func main() { } if err != nil { - glog.Fatal(err) + klog.Fatal(err) } nginxVersion() fs, err := file.NewLocalFS() if err != nil { - glog.Fatal(err) + klog.Fatal(err) } kubeClient, err := createApiserverClient(conf.APIServerHost, conf.KubeConfigFile) @@ -87,24 +89,24 @@ func main() { if len(conf.DefaultService) > 0 { defSvcNs, defSvcName, err := k8s.ParseNameNS(conf.DefaultService) if err != nil { - glog.Fatal(err) + klog.Fatal(err) } _, err = kubeClient.CoreV1().Services(defSvcNs).Get(defSvcName, metav1.GetOptions{}) if err != nil { // TODO (antoineco): compare with error types from k8s.io/apimachinery/pkg/api/errors if strings.Contains(err.Error(), "cannot get services in the namespace") { - glog.Fatal("✖ The cluster seems to be running with a restrictive Authorization mode and the Ingress controller does not have the required permissions to operate normally.") + klog.Fatal("✖ The cluster seems to be running with a restrictive Authorization mode and the Ingress controller does not have the required permissions to operate normally.") } - glog.Fatalf("No service with name %v found: %v", conf.DefaultService, err) + klog.Fatalf("No service with name %v found: %v", conf.DefaultService, err) } - glog.Infof("Validated %v as the default backend.", conf.DefaultService) + klog.Infof("Validated %v as the default backend.", conf.DefaultService) } if conf.Namespace != "" { _, err = kubeClient.CoreV1().Namespaces().Get(conf.Namespace, metav1.GetOptions{}) if err != nil { - glog.Fatalf("No namespace with name %v found: %v", conf.Namespace, err) + klog.Fatalf("No namespace with name %v found: %v", conf.Namespace, err) } } @@ -112,7 +114,7 @@ func main() { defCert, defKey := ssl.GetFakeSSLCert() c, err := ssl.AddOrUpdateCertAndKey(fakeCertificate, defCert, defKey, []byte{}, fs) if err != nil { - glog.Fatalf("Error generating self-signed certificate: %v", err) + klog.Fatalf("Error generating self-signed certificate: %v", err) } conf.FakeCertificatePath = c.PemFileName @@ -132,7 +134,7 @@ func main() { if conf.EnableMetrics { mc, err = metric.NewCollector(conf.ListenPorts.Status, reg) if err != nil { - glog.Fatalf("Error creating prometheus collector: %v", err) + klog.Fatalf("Error creating prometheus collector: %v", err) } } mc.Start() @@ -163,18 +165,18 @@ func handleSigterm(ngx *controller.NGINXController, exit exiter) { signalChan := make(chan os.Signal, 1) signal.Notify(signalChan, syscall.SIGTERM) <-signalChan - glog.Info("Received SIGTERM, shutting down") + klog.Info("Received SIGTERM, shutting down") exitCode := 0 if err := ngx.Stop(); err != nil { - glog.Infof("Error during shutdown: %v", err) + klog.Infof("Error during shutdown: %v", err) exitCode = 1 } - glog.Info("Handled quit, awaiting Pod deletion") + klog.Info("Handled quit, awaiting Pod deletion") time.Sleep(10 * time.Second) - glog.Infof("Exiting with %v", exitCode) + klog.Infof("Exiting with %v", exitCode) exit(exitCode) } @@ -196,7 +198,7 @@ func createApiserverClient(apiserverHost, kubeConfig string) (*kubernetes.Client cfg.Burst = defaultBurst cfg.ContentType = "application/vnd.kubernetes.protobuf" - glog.Infof("Creating API client for %s", cfg.Host) + klog.Infof("Creating API client for %s", cfg.Host) client, err := kubernetes.NewForConfig(cfg) if err != nil { @@ -216,7 +218,7 @@ func createApiserverClient(apiserverHost, kubeConfig string) (*kubernetes.Client var lastErr error retries := 0 - glog.V(2).Info("Trying to discover Kubernetes version") + klog.V(2).Info("Trying to discover Kubernetes version") err = wait.ExponentialBackoff(defaultRetry, func() (bool, error) { v, err = client.Discovery().ServerVersion() @@ -225,7 +227,7 @@ func createApiserverClient(apiserverHost, kubeConfig string) (*kubernetes.Client } lastErr = err - glog.V(2).Infof("Unexpected error discovering Kubernetes version (attempt %v): %v", retries, err) + klog.V(2).Infof("Unexpected error discovering Kubernetes version (attempt %v): %v", retries, err) retries++ return false, nil }) @@ -237,10 +239,10 @@ func createApiserverClient(apiserverHost, kubeConfig string) (*kubernetes.Client // this should not happen, warn the user if retries > 0 { - glog.Warningf("Initial connection to the Kubernetes API server was retried %d times.", retries) + klog.Warningf("Initial connection to the Kubernetes API server was retried %d times.", retries) } - glog.Infof("Running in Kubernetes cluster version v%v.%v (%v) - git (%v) commit %v - platform %v", + klog.Infof("Running in Kubernetes cluster version v%v.%v (%v) - git (%v) commit %v - platform %v", v.Major, v.Minor, v.GitVersion, v.GitTreeState, v.GitCommit, v.Platform) return client, nil @@ -248,7 +250,7 @@ func createApiserverClient(apiserverHost, kubeConfig string) (*kubernetes.Client // Handler for fatal init errors. Prints a verbose error message and exits. func handleFatalInitError(err error) { - glog.Fatalf("Error while initiating a connection to the Kubernetes API server. "+ + klog.Fatalf("Error while initiating a connection to the Kubernetes API server. "+ "This could mean the cluster is misconfigured (e.g. it has invalid API server certificates "+ "or Service Accounts configuration). Reason: %s\n"+ "Refer to the troubleshooting guide for more information: "+ @@ -266,7 +268,7 @@ func registerHandlers(mux *http.ServeMux) { mux.HandleFunc("/stop", func(w http.ResponseWriter, r *http.Request) { err := syscall.Kill(syscall.Getpid(), syscall.SIGTERM) if err != nil { - glog.Errorf("Unexpected error: %v", err) + klog.Errorf("Unexpected error: %v", err) } }) } @@ -312,5 +314,5 @@ func startHTTPServer(port int, mux *http.ServeMux) { WriteTimeout: 300 * time.Second, IdleTimeout: 120 * time.Second, } - glog.Fatal(server.ListenAndServe()) + klog.Fatal(server.ListenAndServe()) } diff --git a/cmd/nginx/main_test.go b/cmd/nginx/main_test.go index f5ee95ca8..c4236ebfc 100644 --- a/cmd/nginx/main_test.go +++ b/cmd/nginx/main_test.go @@ -18,17 +18,18 @@ package main import ( "fmt" - "k8s.io/api/core/v1" - "k8s.io/client-go/kubernetes" - "k8s.io/client-go/kubernetes/fake" - "k8s.io/ingress-nginx/internal/file" - "k8s.io/ingress-nginx/internal/ingress/controller" "os" "syscall" "testing" "time" + "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/kubernetes" + "k8s.io/client-go/kubernetes/fake" + + "k8s.io/ingress-nginx/internal/file" + "k8s.io/ingress-nginx/internal/ingress/controller" ) func TestCreateApiserverClient(t *testing.T) { diff --git a/cmd/nginx/nginx.go b/cmd/nginx/nginx.go index 7f4983a10..c314cbe77 100644 --- a/cmd/nginx/nginx.go +++ b/cmd/nginx/nginx.go @@ -20,13 +20,13 @@ import ( "os" "os/exec" - "github.com/golang/glog" + "k8s.io/klog" ) func nginxVersion() { flag := "-v" - if glog.V(2) { + if klog.V(2) { flag = "-V" } diff --git a/internal/file/file.go b/internal/file/file.go index 7b434230b..9939df944 100644 --- a/internal/file/file.go +++ b/internal/file/file.go @@ -19,8 +19,8 @@ package file import ( "crypto/sha1" "encoding/hex" - "github.com/golang/glog" "io/ioutil" + "k8s.io/klog" ) // SHA1 returns the SHA1 of a file. @@ -28,7 +28,7 @@ func SHA1(filename string) string { hasher := sha1.New() s, err := ioutil.ReadFile(filename) if err != nil { - glog.Errorf("Error reading file %v", err) + klog.Errorf("Error reading file %v", err) return "" } diff --git a/internal/ingress/annotations/annotations.go b/internal/ingress/annotations/annotations.go index f0ab709cd..5365fbe9a 100644 --- a/internal/ingress/annotations/annotations.go +++ b/internal/ingress/annotations/annotations.go @@ -17,11 +17,11 @@ limitations under the License. package annotations import ( - "github.com/golang/glog" "github.com/imdario/mergo" "k8s.io/ingress-nginx/internal/ingress/annotations/canary" "k8s.io/ingress-nginx/internal/ingress/annotations/modsecurity" "k8s.io/ingress-nginx/internal/ingress/annotations/sslcipher" + "k8s.io/klog" apiv1 "k8s.io/api/core/v1" extensions "k8s.io/api/extensions/v1beta1" @@ -156,7 +156,7 @@ func (e Extractor) Extract(ing *extensions.Ingress) *Ingress { data := make(map[string]interface{}) for name, annotationParser := range e.annotations { val, err := annotationParser.Parse(ing) - glog.V(5).Infof("annotation %v in Ingress %v/%v: %v", name, ing.GetNamespace(), ing.GetName(), val) + klog.V(5).Infof("annotation %v in Ingress %v/%v: %v", name, ing.GetNamespace(), ing.GetName(), val) if err != nil { if errors.IsMissingAnnotations(err) { continue @@ -177,11 +177,11 @@ func (e Extractor) Extract(ing *extensions.Ingress) *Ingress { _, alreadyDenied := data[DeniedKeyName] if !alreadyDenied { data[DeniedKeyName] = err - glog.Errorf("error reading %v annotation in Ingress %v/%v: %v", name, ing.GetNamespace(), ing.GetName(), err) + klog.Errorf("error reading %v annotation in Ingress %v/%v: %v", name, ing.GetNamespace(), ing.GetName(), err) continue } - glog.V(5).Infof("error reading %v annotation in Ingress %v/%v: %v", name, ing.GetNamespace(), ing.GetName(), err) + klog.V(5).Infof("error reading %v annotation in Ingress %v/%v: %v", name, ing.GetNamespace(), ing.GetName(), err) } if val != nil { @@ -191,7 +191,7 @@ func (e Extractor) Extract(ing *extensions.Ingress) *Ingress { err := mergo.MapWithOverwrite(pia, data) if err != nil { - glog.Errorf("unexpected error merging extracted annotations: %v", err) + klog.Errorf("unexpected error merging extracted annotations: %v", err) } return pia diff --git a/internal/ingress/annotations/authreq/main.go b/internal/ingress/annotations/authreq/main.go index e6aebedc4..c5cdbc334 100644 --- a/internal/ingress/annotations/authreq/main.go +++ b/internal/ingress/annotations/authreq/main.go @@ -21,7 +21,7 @@ import ( "regexp" "strings" - "github.com/golang/glog" + "k8s.io/klog" extensions "k8s.io/api/extensions/v1beta1" @@ -146,12 +146,12 @@ func (a authReq) Parse(ing *extensions.Ingress) (interface{}, error) { // Optional Parameters signIn, err := parser.GetStringAnnotation("auth-signin", ing) if err != nil { - glog.Warning("auth-signin annotation is undefined and will not be set") + klog.Warning("auth-signin annotation is undefined and will not be set") } authSnippet, err := parser.GetStringAnnotation("auth-snippet", ing) if err != nil { - glog.Warning("auth-snippet annotation is undefined and will not be set") + klog.Warning("auth-snippet annotation is undefined and will not be set") } responseHeaders := []string{} diff --git a/internal/ingress/annotations/backendprotocol/main.go b/internal/ingress/annotations/backendprotocol/main.go index 4f9417a2f..a1b9819c7 100644 --- a/internal/ingress/annotations/backendprotocol/main.go +++ b/internal/ingress/annotations/backendprotocol/main.go @@ -20,8 +20,8 @@ import ( "regexp" "strings" - "github.com/golang/glog" extensions "k8s.io/api/extensions/v1beta1" + "k8s.io/klog" "k8s.io/ingress-nginx/internal/ingress/annotations/parser" "k8s.io/ingress-nginx/internal/ingress/resolver" @@ -57,7 +57,7 @@ func (a backendProtocol) Parse(ing *extensions.Ingress) (interface{}, error) { proto = strings.TrimSpace(strings.ToUpper(proto)) if !validProtocols.MatchString(proto) { - glog.Warningf("Protocol %v is not a valid value for the backend-protocol annotation. Using HTTP as protocol", proto) + klog.Warningf("Protocol %v is not a valid value for the backend-protocol annotation. Using HTTP as protocol", proto) return HTTP, nil } diff --git a/internal/ingress/annotations/class/main.go b/internal/ingress/annotations/class/main.go index 6b5a6d06c..d76d2be52 100644 --- a/internal/ingress/annotations/class/main.go +++ b/internal/ingress/annotations/class/main.go @@ -17,8 +17,8 @@ limitations under the License. package class import ( - "github.com/golang/glog" extensions "k8s.io/api/extensions/v1beta1" + "k8s.io/klog" ) const ( @@ -44,7 +44,7 @@ var ( func IsValid(ing *extensions.Ingress) bool { ingress, ok := ing.GetAnnotations()[IngressKey] if !ok { - glog.V(3).Infof("annotation %v is not present in ingress %v/%v", IngressKey, ing.Namespace, ing.Name) + klog.V(3).Infof("annotation %v is not present in ingress %v/%v", IngressKey, ing.Namespace, ing.Name) } // we have 2 valid combinations diff --git a/internal/ingress/annotations/sessionaffinity/main.go b/internal/ingress/annotations/sessionaffinity/main.go index f408f2708..098214a36 100644 --- a/internal/ingress/annotations/sessionaffinity/main.go +++ b/internal/ingress/annotations/sessionaffinity/main.go @@ -19,9 +19,8 @@ package sessionaffinity import ( "regexp" - "github.com/golang/glog" - extensions "k8s.io/api/extensions/v1beta1" + "k8s.io/klog" "k8s.io/ingress-nginx/internal/ingress/annotations/parser" "k8s.io/ingress-nginx/internal/ingress/resolver" @@ -87,31 +86,31 @@ func (a affinity) cookieAffinityParse(ing *extensions.Ingress) *Cookie { cookie.Name, err = parser.GetStringAnnotation(annotationAffinityCookieName, ing) if err != nil { - glog.V(3).Infof("Ingress %v: No value found in annotation %v. Using the default %v", ing.Name, annotationAffinityCookieName, defaultAffinityCookieName) + klog.V(3).Infof("Ingress %v: No value found in annotation %v. Using the default %v", ing.Name, annotationAffinityCookieName, defaultAffinityCookieName) cookie.Name = defaultAffinityCookieName } cookie.Hash, err = parser.GetStringAnnotation(annotationAffinityCookieHash, ing) if err != nil || !affinityCookieHashRegex.MatchString(cookie.Hash) { - glog.V(3).Infof("Invalid or no annotation value found in Ingress %v: %v. Setting it to default %v", ing.Name, annotationAffinityCookieHash, defaultAffinityCookieHash) + klog.V(3).Infof("Invalid or no annotation value found in Ingress %v: %v. Setting it to default %v", ing.Name, annotationAffinityCookieHash, defaultAffinityCookieHash) cookie.Hash = defaultAffinityCookieHash } cookie.Expires, err = parser.GetStringAnnotation(annotationAffinityCookieExpires, ing) if err != nil || !affinityCookieExpiresRegex.MatchString(cookie.Expires) { - glog.V(3).Infof("Invalid or no annotation value found in Ingress %v: %v. Ignoring it", ing.Name, annotationAffinityCookieExpires) + klog.V(3).Infof("Invalid or no annotation value found in Ingress %v: %v. Ignoring it", ing.Name, annotationAffinityCookieExpires) cookie.Expires = "" } cookie.MaxAge, err = parser.GetStringAnnotation(annotationAffinityCookieMaxAge, ing) if err != nil || !affinityCookieExpiresRegex.MatchString(cookie.MaxAge) { - glog.V(3).Infof("Invalid or no annotation value found in Ingress %v: %v. Ignoring it", ing.Name, annotationAffinityCookieMaxAge) + klog.V(3).Infof("Invalid or no annotation value found in Ingress %v: %v. Ignoring it", ing.Name, annotationAffinityCookieMaxAge) cookie.MaxAge = "" } cookie.Path, err = parser.GetStringAnnotation(annotationAffinityCookiePath, ing) if err != nil { - glog.V(3).Infof("Invalid or no annotation value found in Ingress %v: %v. Ignoring it", ing.Name, annotationAffinityCookieMaxAge) + klog.V(3).Infof("Invalid or no annotation value found in Ingress %v: %v. Ignoring it", ing.Name, annotationAffinityCookieMaxAge) } return cookie @@ -140,7 +139,7 @@ func (a affinity) Parse(ing *extensions.Ingress) (interface{}, error) { case "cookie": cookie = a.cookieAffinityParse(ing) default: - glog.V(3).Infof("No default affinity was found for Ingress %v", ing.Name) + klog.V(3).Infof("No default affinity was found for Ingress %v", ing.Name) } diff --git a/internal/ingress/controller/checker.go b/internal/ingress/controller/checker.go index 6dd6b3be0..9ab499888 100644 --- a/internal/ingress/controller/checker.go +++ b/internal/ingress/controller/checker.go @@ -59,7 +59,7 @@ func (n *NGINXController) Check(_ *http.Request) error { } // check the nginx master process is running - fs, err := proc.NewFS("/proc") + fs, err := proc.NewFS("/proc", false) if err != nil { return errors.Wrap(err, "unexpected error reading /proc directory") } diff --git a/internal/ingress/controller/config/config.go b/internal/ingress/controller/config/config.go index bd310e3be..84ea2f516 100644 --- a/internal/ingress/controller/config/config.go +++ b/internal/ingress/controller/config/config.go @@ -21,7 +21,7 @@ import ( "strconv" "time" - "github.com/golang/glog" + "k8s.io/klog" apiv1 "k8s.io/api/core/v1" @@ -675,7 +675,7 @@ func NewDefault() Configuration { NoAuthLocations: "/.well-known/acme-challenge", } - if glog.V(5) { + if klog.V(5) { cfg.ErrorLogLevel = "debug" } diff --git a/internal/ingress/controller/controller.go b/internal/ingress/controller/controller.go index 12dd50d56..4102fe57b 100644 --- a/internal/ingress/controller/controller.go +++ b/internal/ingress/controller/controller.go @@ -24,8 +24,8 @@ import ( "strings" "time" - "github.com/golang/glog" "github.com/mitchellh/hashstructure" + "k8s.io/klog" apiv1 "k8s.io/api/core/v1" extensions "k8s.io/api/extensions/v1beta1" @@ -143,7 +143,7 @@ func (n *NGINXController) syncIngress(interface{}) error { for _, loc := range server.Locations { if loc.Path != rootLocation { - glog.Warningf("Ignoring SSL Passthrough for location %q in server %q", loc.Path, server.Hostname) + klog.Warningf("Ignoring SSL Passthrough for location %q in server %q", loc.Path, server.Hostname) continue } passUpstreams = append(passUpstreams, &ingress.SSLPassthroughBackend{ @@ -166,12 +166,12 @@ func (n *NGINXController) syncIngress(interface{}) error { } if n.runningConfig.Equal(pcfg) { - glog.V(3).Infof("No configuration change detected, skipping backend reload.") + klog.V(3).Infof("No configuration change detected, skipping backend reload.") return nil } if !n.IsDynamicConfigurationEnough(pcfg) { - glog.Infof("Configuration changes detected, backend reload required.") + klog.Infof("Configuration changes detected, backend reload required.") hash, _ := hashstructure.Hash(pcfg, &hashstructure.HashOptions{ TagName: "json", @@ -183,13 +183,13 @@ func (n *NGINXController) syncIngress(interface{}) error { if err != nil { n.metricCollector.IncReloadErrorCount() n.metricCollector.ConfigSuccess(hash, false) - glog.Errorf("Unexpected failure reloading the backend:\n%v", err) + klog.Errorf("Unexpected failure reloading the backend:\n%v", err) return err } n.metricCollector.SetHosts(hosts) - glog.Infof("Backend successfully reloaded.") + klog.Infof("Backend successfully reloaded.") n.metricCollector.ConfigSuccess(hash, true) n.metricCollector.IncReloadCount() n.metricCollector.SetSSLExpireTime(servers) @@ -201,7 +201,7 @@ func (n *NGINXController) syncIngress(interface{}) error { // start listening on the configured port (default 18080) // For large configurations it might take a while so we loop // and back off - glog.Info("Initial sync, sleeping for 1 second.") + klog.Info("Initial sync, sleeping for 1 second.") time.Sleep(1 * time.Second) } @@ -215,15 +215,15 @@ func (n *NGINXController) syncIngress(interface{}) error { err := wait.ExponentialBackoff(retry, func() (bool, error) { err := configureDynamically(pcfg, n.cfg.ListenPorts.Status, n.cfg.DynamicCertificatesEnabled) if err == nil { - glog.V(2).Infof("Dynamic reconfiguration succeeded.") + klog.V(2).Infof("Dynamic reconfiguration succeeded.") return true, nil } - glog.Warningf("Dynamic reconfiguration failed: %v", err) + klog.Warningf("Dynamic reconfiguration failed: %v", err) return false, err }) if err != nil { - glog.Errorf("Unexpected failure reconfiguring NGINX:\n%v", err) + klog.Errorf("Unexpected failure reconfiguring NGINX:\n%v", err) return err } @@ -240,15 +240,15 @@ func (n *NGINXController) getStreamServices(configmapName string, proto apiv1.Pr if configmapName == "" { return []ingress.L4Service{} } - glog.V(3).Infof("Obtaining information about %v stream services from ConfigMap %q", proto, configmapName) + klog.V(3).Infof("Obtaining information about %v stream services from ConfigMap %q", proto, configmapName) _, _, err := k8s.ParseNameNS(configmapName) if err != nil { - glog.Errorf("Error parsing ConfigMap reference %q: %v", configmapName, err) + klog.Errorf("Error parsing ConfigMap reference %q: %v", configmapName, err) return []ingress.L4Service{} } configmap, err := n.store.GetConfigMap(configmapName) if err != nil { - glog.Errorf("Error getting ConfigMap %q: %v", configmapName, err) + klog.Errorf("Error getting ConfigMap %q: %v", configmapName, err) return []ingress.L4Service{} } var svcs []ingress.L4Service @@ -266,16 +266,16 @@ func (n *NGINXController) getStreamServices(configmapName string, proto apiv1.Pr for port, svcRef := range configmap.Data { externalPort, err := strconv.Atoi(port) if err != nil { - glog.Warningf("%q is not a valid %v port number", port, proto) + klog.Warningf("%q is not a valid %v port number", port, proto) continue } if reserverdPorts.Has(externalPort) { - glog.Warningf("Port %d cannot be used for %v stream services. It is reserved for the Ingress controller.", externalPort, proto) + klog.Warningf("Port %d cannot be used for %v stream services. It is reserved for the Ingress controller.", externalPort, proto) continue } nsSvcPort := strings.Split(svcRef, ":") if len(nsSvcPort) < 2 { - glog.Warningf("Invalid Service reference %q for %v port %d", svcRef, proto, externalPort) + klog.Warningf("Invalid Service reference %q for %v port %d", svcRef, proto, externalPort) continue } nsName := nsSvcPort[0] @@ -293,19 +293,19 @@ func (n *NGINXController) getStreamServices(configmapName string, proto apiv1.Pr } svcNs, svcName, err := k8s.ParseNameNS(nsName) if err != nil { - glog.Warningf("%v", err) + klog.Warningf("%v", err) continue } svc, err := n.store.GetService(nsName) if err != nil { - glog.Warningf("Error getting Service %q: %v", nsName, err) + klog.Warningf("Error getting Service %q: %v", nsName, err) continue } var endps []ingress.Endpoint targetPort, err := strconv.Atoi(svcPort) if err != nil { // not a port number, fall back to using port name - glog.V(3).Infof("Searching Endpoints with %v port name %q for Service %q", proto, svcPort, nsName) + klog.V(3).Infof("Searching Endpoints with %v port name %q for Service %q", proto, svcPort, nsName) for _, sp := range svc.Spec.Ports { if sp.Name == svcPort { if sp.Protocol == proto { @@ -315,7 +315,7 @@ func (n *NGINXController) getStreamServices(configmapName string, proto apiv1.Pr } } } else { - glog.V(3).Infof("Searching Endpoints with %v port number %d for Service %q", proto, targetPort, nsName) + klog.V(3).Infof("Searching Endpoints with %v port number %d for Service %q", proto, targetPort, nsName) for _, sp := range svc.Spec.Ports { if sp.Port == int32(targetPort) { if sp.Protocol == proto { @@ -328,7 +328,7 @@ func (n *NGINXController) getStreamServices(configmapName string, proto apiv1.Pr // stream services cannot contain empty upstreams and there is // no default backend equivalent if len(endps) == 0 { - glog.Warningf("Service %q does not have any active Endpoint for %v port %v", nsName, proto, svcPort) + klog.Warningf("Service %q does not have any active Endpoint for %v port %v", nsName, proto, svcPort) continue } svcs = append(svcs, ingress.L4Service{ @@ -365,14 +365,14 @@ func (n *NGINXController) getDefaultUpstream() *ingress.Backend { svc, err := n.store.GetService(svcKey) if err != nil { - glog.Warningf("Error getting default backend %q: %v", svcKey, err) + klog.Warningf("Error getting default backend %q: %v", svcKey, err) upstream.Endpoints = append(upstream.Endpoints, n.DefaultEndpoint()) return upstream } endps := getEndpoints(svc, &svc.Spec.Ports[0], apiv1.ProtocolTCP, n.store.GetServiceEndpoints) if len(endps) == 0 { - glog.Warningf("Service %q does not have any active Endpoint", svcKey) + klog.Warningf("Service %q does not have any active Endpoint", svcKey) endps = []ingress.Endpoint{n.DefaultEndpoint()} } @@ -406,7 +406,7 @@ func (n *NGINXController) getBackendServers(ingresses []*ingress.Ingress) ([]*in if rule.HTTP == nil && host != defServerName { - glog.V(3).Infof("Ingress %q does not contain any HTTP rule, using default backend", ingKey) + klog.V(3).Infof("Ingress %q does not contain any HTTP rule, using default backend", ingKey) continue } @@ -417,16 +417,16 @@ func (n *NGINXController) getBackendServers(ingresses []*ingress.Ingress) ([]*in if server.CertificateAuth.CAFileName == "" { server.CertificateAuth = anns.CertificateAuth if server.CertificateAuth.Secret != "" && server.CertificateAuth.CAFileName == "" { - glog.V(3).Infof("Secret %q has no 'ca.crt' key, mutual authentication disabled for Ingress %q", + klog.V(3).Infof("Secret %q has no 'ca.crt' key, mutual authentication disabled for Ingress %q", server.CertificateAuth.Secret, ingKey) } } else { - glog.V(3).Infof("Server %q is already configured for mutual authentication (Ingress %q)", + klog.V(3).Infof("Server %q is already configured for mutual authentication (Ingress %q)", server.Hostname, ingKey) } if rule.HTTP == nil { - glog.V(3).Infof("Ingress %q does not contain any HTTP rule, using default backend", ingKey) + klog.V(3).Infof("Ingress %q does not contain any HTTP rule, using default backend", ingKey) continue } @@ -451,12 +451,12 @@ func (n *NGINXController) getBackendServers(ingresses []*ingress.Ingress) ([]*in addLoc = false if !loc.IsDefBackend { - glog.V(3).Infof("Location %q already configured for server %q with upstream %q (Ingress %q)", + klog.V(3).Infof("Location %q already configured for server %q with upstream %q (Ingress %q)", loc.Path, server.Hostname, loc.Backend, ingKey) break } - glog.V(3).Infof("Replacing location %q for server %q with upstream %q to use upstream %q (Ingress %q)", + klog.V(3).Infof("Replacing location %q for server %q with upstream %q to use upstream %q (Ingress %q)", loc.Path, server.Hostname, loc.Backend, ups.Name, ingKey) loc.Backend = ups.Name @@ -496,7 +496,7 @@ func (n *NGINXController) getBackendServers(ingresses []*ingress.Ingress) ([]*in // new location if addLoc { - glog.V(3).Infof("Adding location %q for server %q with upstream %q (Ingress %q)", + klog.V(3).Infof("Adding location %q for server %q with upstream %q (Ingress %q)", nginxPath, server.Hostname, ups.Name, ingKey) loc := &ingress.Location{ @@ -543,7 +543,7 @@ func (n *NGINXController) getBackendServers(ingresses []*ingress.Ingress) ([]*in if anns.SessionAffinity.Type == "cookie" { cookiePath := anns.SessionAffinity.Cookie.Path if anns.Rewrite.UseRegex && cookiePath == "" { - glog.Warningf("session-cookie-path should be set when use-regex is true") + klog.Warningf("session-cookie-path should be set when use-regex is true") } ups.SessionAffinity.CookieSessionAffinity.Name = anns.SessionAffinity.Cookie.Name @@ -562,7 +562,7 @@ func (n *NGINXController) getBackendServers(ingresses []*ingress.Ingress) ([]*in } if anns.Canary.Enabled { - glog.Infof("Canary ingress %v detected. Finding eligible backends to merge into.", ing.Name) + klog.Infof("Canary ingress %v detected. Finding eligible backends to merge into.", ing.Name) mergeAlternativeBackends(ing, upstreams, servers) } } @@ -577,13 +577,13 @@ func (n *NGINXController) getBackendServers(ingresses []*ingress.Ingress) ([]*in for _, location := range server.Locations { if upstream.Name == location.Backend { if len(upstream.Endpoints) == 0 { - glog.V(3).Infof("Upstream %q has no active Endpoint", upstream.Name) + klog.V(3).Infof("Upstream %q has no active Endpoint", upstream.Name) // check if the location contains endpoints and a custom default backend if location.DefaultBackend != nil { sp := location.DefaultBackend.Spec.Ports[0] endps := getEndpoints(location.DefaultBackend, &sp, apiv1.ProtocolTCP, n.store.GetServiceEndpoints) if len(endps) > 0 { - glog.V(3).Infof("Using custom default backend for location %q in server %q (Service \"%v/%v\")", + klog.V(3).Infof("Using custom default backend for location %q in server %q (Service \"%v/%v\")", location.Path, server.Hostname, location.DefaultBackend.Namespace, location.DefaultBackend.Name) nb := upstream.DeepCopy() @@ -599,7 +599,7 @@ func (n *NGINXController) getBackendServers(ingresses []*ingress.Ingress) ([]*in if server.SSLPassthrough { if location.Path == rootLocation { if location.Backend == defUpstreamName { - glog.Warningf("Server %q has no default backend, ignoring SSL Passthrough.", server.Hostname) + klog.Warningf("Server %q has no default backend, ignoring SSL Passthrough.", server.Hostname) continue } isHTTPSfrom = append(isHTTPSfrom, server) @@ -650,7 +650,7 @@ func (n *NGINXController) createUpstreams(data []*ingress.Ingress, du *ingress.B if ing.Spec.Backend != nil { defBackend = upstreamName(ing.Namespace, ing.Spec.Backend.ServiceName, ing.Spec.Backend.ServicePort) - glog.V(3).Infof("Creating upstream %q", defBackend) + klog.V(3).Infof("Creating upstream %q", defBackend) upstreams[defBackend] = newUpstream(defBackend) if upstreams[defBackend].SecureCACert.Secret == "" { upstreams[defBackend].SecureCACert = anns.SecureUpstream.CACert @@ -668,7 +668,7 @@ func (n *NGINXController) createUpstreams(data []*ingress.Ingress, du *ingress.B if anns.ServiceUpstream { endpoint, err := n.getServiceClusterEndpoint(svcKey, ing.Spec.Backend) if err != nil { - glog.Errorf("Failed to determine a suitable ClusterIP Endpoint for Service %q: %v", svcKey, err) + klog.Errorf("Failed to determine a suitable ClusterIP Endpoint for Service %q: %v", svcKey, err) } else { upstreams[defBackend].Endpoints = []ingress.Endpoint{endpoint} } @@ -688,13 +688,13 @@ func (n *NGINXController) createUpstreams(data []*ingress.Ingress, du *ingress.B endps, err := n.serviceEndpoints(svcKey, ing.Spec.Backend.ServicePort.String()) upstreams[defBackend].Endpoints = append(upstreams[defBackend].Endpoints, endps...) if err != nil { - glog.Warningf("Error creating upstream %q: %v", defBackend, err) + klog.Warningf("Error creating upstream %q: %v", defBackend, err) } } s, err := n.store.GetService(svcKey) if err != nil { - glog.Warningf("Error obtaining Service %q: %v", svcKey, err) + klog.Warningf("Error obtaining Service %q: %v", svcKey, err) } upstreams[defBackend].Service = s } @@ -711,7 +711,7 @@ func (n *NGINXController) createUpstreams(data []*ingress.Ingress, du *ingress.B continue } - glog.V(3).Infof("Creating upstream %q", name) + klog.V(3).Infof("Creating upstream %q", name) upstreams[name] = newUpstream(name) upstreams[name].Port = path.Backend.ServicePort @@ -733,7 +733,7 @@ func (n *NGINXController) createUpstreams(data []*ingress.Ingress, du *ingress.B if anns.ServiceUpstream { endpoint, err := n.getServiceClusterEndpoint(svcKey, &path.Backend) if err != nil { - glog.Errorf("Failed to determine a suitable ClusterIP Endpoint for Service %q: %v", svcKey, err) + klog.Errorf("Failed to determine a suitable ClusterIP Endpoint for Service %q: %v", svcKey, err) } else { upstreams[name].Endpoints = []ingress.Endpoint{endpoint} } @@ -752,7 +752,7 @@ func (n *NGINXController) createUpstreams(data []*ingress.Ingress, du *ingress.B if len(upstreams[name].Endpoints) == 0 { endp, err := n.serviceEndpoints(svcKey, path.Backend.ServicePort.String()) if err != nil { - glog.Warningf("Error obtaining Endpoints for Service %q: %v", svcKey, err) + klog.Warningf("Error obtaining Endpoints for Service %q: %v", svcKey, err) continue } upstreams[name].Endpoints = endp @@ -760,7 +760,7 @@ func (n *NGINXController) createUpstreams(data []*ingress.Ingress, du *ingress.B s, err := n.store.GetService(svcKey) if err != nil { - glog.Warningf("Error obtaining Service %q: %v", svcKey, err) + klog.Warningf("Error obtaining Service %q: %v", svcKey, err) continue } @@ -816,7 +816,7 @@ func (n *NGINXController) serviceEndpoints(svcKey, backendPort string) ([]ingres return upstreams, err } - glog.V(3).Infof("Obtaining ports information for Service %q", svcKey) + klog.V(3).Infof("Obtaining ports information for Service %q", svcKey) for _, servicePort := range svc.Spec.Ports { // targetPort could be a string, use either the port name or number (int) if strconv.Itoa(int(servicePort.Port)) == backendPort || @@ -825,7 +825,7 @@ func (n *NGINXController) serviceEndpoints(svcKey, backendPort string) ([]ingres endps := getEndpoints(svc, &servicePort, apiv1.ProtocolTCP, n.store.GetServiceEndpoints) if len(endps) == 0 { - glog.Warningf("Service %q does not have any active Endpoint.", svcKey) + klog.Warningf("Service %q does not have any active Endpoint.", svcKey) } if n.cfg.SortBackends { @@ -848,7 +848,7 @@ func (n *NGINXController) serviceEndpoints(svcKey, backendPort string) ([]ingres if len(svc.Spec.Ports) == 0 && svc.Spec.Type == apiv1.ServiceTypeExternalName { externalPort, err := strconv.Atoi(backendPort) if err != nil { - glog.Warningf("Only numeric ports are allowed in ExternalName Services: %q is not a valid port number.", backendPort) + klog.Warningf("Only numeric ports are allowed in ExternalName Services: %q is not a valid port number.", backendPort) return upstreams, nil } @@ -859,7 +859,7 @@ func (n *NGINXController) serviceEndpoints(svcKey, backendPort string) ([]ingres } endps := getEndpoints(svc, &servicePort, apiv1.ProtocolTCP, n.store.GetServiceEndpoints) if len(endps) == 0 { - glog.Warningf("Service %q does not have any active Endpoint.", svcKey) + klog.Warningf("Service %q does not have any active Endpoint.", svcKey) return upstreams, nil } @@ -950,7 +950,7 @@ func (n *NGINXController) createServers(data []*ingress.Ingress, // special "catch all" case, Ingress with a backend but no rule defLoc := servers[defServerName].Locations[0] if defLoc.IsDefBackend && len(ing.Spec.Rules) == 0 { - glog.Infof("Ingress %q defines a backend but no rule. Using it to configure the catch-all server %q", + klog.Infof("Ingress %q defines a backend but no rule. Using it to configure the catch-all server %q", ingKey, defServerName) defLoc.IsDefBackend = false @@ -978,7 +978,7 @@ func (n *NGINXController) createServers(data []*ingress.Ingress, defLoc.BackendProtocol = anns.BackendProtocol defLoc.ModSecurity = anns.ModSecurity } else { - glog.V(3).Infof("Ingress %q defines both a backend and rules. Using its backend as default upstream for all its rules.", + klog.V(3).Infof("Ingress %q defines both a backend and rules. Using its backend as default upstream for all its rules.", ingKey) } } @@ -1029,7 +1029,7 @@ func (n *NGINXController) createServers(data []*ingress.Ingress, aliases["Alias"] = host } } else { - glog.Warningf("Aliases already configured for server %q, skipping (Ingress %q)", + klog.Warningf("Aliases already configured for server %q, skipping (Ingress %q)", host, ingKey) } } @@ -1038,7 +1038,7 @@ func (n *NGINXController) createServers(data []*ingress.Ingress, if servers[host].ServerSnippet == "" { servers[host].ServerSnippet = anns.ServerSnippet } else { - glog.Warningf("Server snippet already configured for server %q, skipping (Ingress %q)", + klog.Warningf("Server snippet already configured for server %q, skipping (Ingress %q)", host, ingKey) } } @@ -1054,14 +1054,14 @@ func (n *NGINXController) createServers(data []*ingress.Ingress, } if len(ing.Spec.TLS) == 0 { - glog.V(3).Infof("Ingress %q does not contains a TLS section.", ingKey) + klog.V(3).Infof("Ingress %q does not contains a TLS section.", ingKey) continue } tlsSecretName := extractTLSSecretName(host, ing, n.store.GetLocalSSLCert) if tlsSecretName == "" { - glog.V(3).Infof("Host %q is listed in the TLS section but secretName is empty. Using default certificate.", host) + klog.V(3).Infof("Host %q is listed in the TLS section but secretName is empty. Using default certificate.", host) servers[host].SSLCert.PemFileName = defaultPemFileName servers[host].SSLCert.PemSHA = defaultPemSHA continue @@ -1070,7 +1070,7 @@ func (n *NGINXController) createServers(data []*ingress.Ingress, secrKey := fmt.Sprintf("%v/%v", ing.Namespace, tlsSecretName) cert, err := n.store.GetLocalSSLCert(secrKey) if err != nil { - glog.Warningf("Error getting SSL certificate %q: %v. Using default certificate", secrKey, err) + klog.Warningf("Error getting SSL certificate %q: %v. Using default certificate", secrKey, err) servers[host].SSLCert.PemFileName = defaultPemFileName servers[host].SSLCert.PemSHA = defaultPemSHA continue @@ -1078,15 +1078,15 @@ func (n *NGINXController) createServers(data []*ingress.Ingress, err = cert.Certificate.VerifyHostname(host) if err != nil { - glog.Warningf("Unexpected error validating SSL certificate %q for server %q: %v", secrKey, host, err) - glog.Warning("Validating certificate against DNS names. This will be deprecated in a future version.") + klog.Warningf("Unexpected error validating SSL certificate %q for server %q: %v", secrKey, host, err) + klog.Warning("Validating certificate against DNS names. This will be deprecated in a future version.") // check the Common Name field // https://github.com/golang/go/issues/22922 err := verifyHostname(host, cert.Certificate) if err != nil { - glog.Warningf("SSL certificate %q does not contain a Common Name or Subject Alternative Name for server %q: %v", + klog.Warningf("SSL certificate %q does not contain a Common Name or Subject Alternative Name for server %q: %v", secrKey, host, err) - glog.Warningf("Using default certificate") + klog.Warningf("Using default certificate") servers[host].SSLCert.PemFileName = defaultPemFileName servers[host].SSLCert.PemSHA = defaultPemSHA continue @@ -1102,14 +1102,14 @@ func (n *NGINXController) createServers(data []*ingress.Ingress, servers[host].SSLCert = *cert if cert.ExpireTime.Before(time.Now().Add(240 * time.Hour)) { - glog.Warningf("SSL certificate for server %q is about to expire (%v)", host, cert.ExpireTime) + klog.Warningf("SSL certificate for server %q is about to expire (%v)", host, cert.ExpireTime) } } } for alias, host := range aliases { if _, ok := servers[alias]; ok { - glog.Warningf("Conflicting hostname (%v) and alias (%v). Removing alias to avoid conflicts.", host, alias) + klog.Warningf("Conflicting hostname (%v) and alias (%v). Removing alias to avoid conflicts.", host, alias) servers[host].Alias = "" } } @@ -1124,7 +1124,7 @@ func canMergeBackend(primary *ingress.Backend, alternative *ingress.Backend) boo // Performs the merge action and checks to ensure that one two alternative backends do not merge into each other func mergeAlternativeBackend(priUps *ingress.Backend, altUps *ingress.Backend) bool { if priUps.NoServer { - glog.Warningf("unable to merge alternative backend %v into primary backend %v because %v is a primary backend", + klog.Warningf("unable to merge alternative backend %v into primary backend %v because %v is a primary backend", altUps.Name, priUps.Name, priUps.Name) return false } @@ -1154,7 +1154,7 @@ func mergeAlternativeBackends(ing *ingress.Ingress, upstreams map[string]*ingres priUps := upstreams[loc.Backend] if canMergeBackend(priUps, altUps) { - glog.Infof("matching backend %v found for alternative backend %v", + klog.Infof("matching backend %v found for alternative backend %v", priUps.Name, altUps.Name) merged = mergeAlternativeBackend(priUps, altUps) @@ -1162,7 +1162,7 @@ func mergeAlternativeBackends(ing *ingress.Ingress, upstreams map[string]*ingres } if !merged { - glog.Warningf("unable to find real backend for alternative backend %v. Deleting.", altUps.Name) + klog.Warningf("unable to find real backend for alternative backend %v. Deleting.", altUps.Name) delete(upstreams, altUps.Name) } } @@ -1177,7 +1177,7 @@ func mergeAlternativeBackends(ing *ingress.Ingress, upstreams map[string]*ingres server, ok := servers[rule.Host] if !ok { - glog.Errorf("cannot merge alternative backend %s into hostname %s that does not exist", + klog.Errorf("cannot merge alternative backend %s into hostname %s that does not exist", altUps.Name, rule.Host) @@ -1189,7 +1189,7 @@ func mergeAlternativeBackends(ing *ingress.Ingress, upstreams map[string]*ingres priUps := upstreams[loc.Backend] if canMergeBackend(priUps, altUps) && loc.Path == path.Path { - glog.Infof("matching backend %v found for alternative backend %v", + klog.Infof("matching backend %v found for alternative backend %v", priUps.Name, altUps.Name) merged = mergeAlternativeBackend(priUps, altUps) @@ -1197,7 +1197,7 @@ func mergeAlternativeBackends(ing *ingress.Ingress, upstreams map[string]*ingres } if !merged { - glog.Warningf("unable to find real backend for alternative backend %v. Deleting.", altUps.Name) + klog.Warningf("unable to find real backend for alternative backend %v. Deleting.", altUps.Name) delete(upstreams, altUps.Name) } } @@ -1232,7 +1232,7 @@ func extractTLSSecretName(host string, ing *ingress.Ingress, cert, err := getLocalSSLCert(secrKey) if err != nil { - glog.Warningf("Error getting SSL certificate %q: %v", secrKey, err) + klog.Warningf("Error getting SSL certificate %q: %v", secrKey, err) continue } @@ -1244,7 +1244,7 @@ func extractTLSSecretName(host string, ing *ingress.Ingress, if err != nil { continue } - glog.V(3).Infof("Found SSL certificate matching host %q: %q", host, secrKey) + klog.V(3).Infof("Found SSL certificate matching host %q: %q", host, secrKey) return tls.SecretName } diff --git a/internal/ingress/controller/endpoints.go b/internal/ingress/controller/endpoints.go index 233d62d80..188043a23 100644 --- a/internal/ingress/controller/endpoints.go +++ b/internal/ingress/controller/endpoints.go @@ -22,7 +22,7 @@ import ( "reflect" "strconv" - "github.com/golang/glog" + "k8s.io/klog" corev1 "k8s.io/api/core/v1" @@ -48,18 +48,18 @@ func getEndpoints(s *corev1.Service, port *corev1.ServicePort, proto corev1.Prot // ExternalName services if s.Spec.Type == corev1.ServiceTypeExternalName { - glog.V(3).Infof("Ingress using Service %q of type ExternalName.", svcKey) + klog.V(3).Infof("Ingress using Service %q of type ExternalName.", svcKey) targetPort := port.TargetPort.IntValue() if targetPort <= 0 { - glog.Errorf("ExternalName Service %q has an invalid port (%v)", svcKey, targetPort) + klog.Errorf("ExternalName Service %q has an invalid port (%v)", svcKey, targetPort) return upsServers } if net.ParseIP(s.Spec.ExternalName) == nil { _, err := net.LookupHost(s.Spec.ExternalName) if err != nil { - glog.Errorf("Error resolving host %q: %v", s.Spec.ExternalName, err) + klog.Errorf("Error resolving host %q: %v", s.Spec.ExternalName, err) return upsServers } } @@ -70,10 +70,10 @@ func getEndpoints(s *corev1.Service, port *corev1.ServicePort, proto corev1.Prot }) } - glog.V(3).Infof("Getting Endpoints for Service %q and port %v", svcKey, port.String()) + klog.V(3).Infof("Getting Endpoints for Service %q and port %v", svcKey, port.String()) ep, err := getServiceEndpoints(svcKey) if err != nil { - glog.Warningf("Error obtaining Endpoints for Service %q: %v", svcKey, err) + klog.Warningf("Error obtaining Endpoints for Service %q: %v", svcKey, err) return upsServers } @@ -113,6 +113,6 @@ func getEndpoints(s *corev1.Service, port *corev1.ServicePort, proto corev1.Prot } } - glog.V(3).Infof("Endpoints found for Service %q: %v", svcKey, upsServers) + klog.V(3).Infof("Endpoints found for Service %q: %v", svcKey, upsServers) return upsServers } diff --git a/internal/ingress/controller/nginx.go b/internal/ingress/controller/nginx.go index 40cfc6ac0..2c04480f2 100644 --- a/internal/ingress/controller/nginx.go +++ b/internal/ingress/controller/nginx.go @@ -34,8 +34,6 @@ import ( "text/template" "time" - "github.com/golang/glog" - proxyproto "github.com/armon/go-proxyproto" "github.com/eapache/channels" apiv1 "k8s.io/api/core/v1" @@ -44,6 +42,7 @@ import ( v1core "k8s.io/client-go/kubernetes/typed/core/v1" "k8s.io/client-go/tools/record" "k8s.io/client-go/util/flowcontrol" + "k8s.io/klog" "k8s.io/kubernetes/pkg/util/filesystem" "k8s.io/ingress-nginx/internal/file" @@ -76,14 +75,14 @@ var ( // NewNGINXController creates a new NGINX Ingress controller. func NewNGINXController(config *Configuration, mc metric.Collector, fs file.Filesystem) *NGINXController { eventBroadcaster := record.NewBroadcaster() - eventBroadcaster.StartLogging(glog.Infof) + eventBroadcaster.StartLogging(klog.Infof) eventBroadcaster.StartRecordingToSink(&v1core.EventSinkImpl{ Interface: config.Client.CoreV1().Events(config.Namespace), }) h, err := dns.GetSystemNameServers() if err != nil { - glog.Warningf("Error reading system nameservers: %v", err) + klog.Warningf("Error reading system nameservers: %v", err) } n := &NGINXController{ @@ -113,7 +112,7 @@ func NewNGINXController(config *Configuration, mc metric.Collector, fs file.File pod, err := k8s.GetPodDetails(config.Client) if err != nil { - glog.Fatalf("unexpected error obtaining pod information: %v", err) + klog.Fatalf("unexpected error obtaining pod information: %v", err) } n.store = store.New( @@ -147,14 +146,14 @@ func NewNGINXController(config *Configuration, mc metric.Collector, fs file.File UseNodeInternalIP: config.UseNodeInternalIP, }) } else { - glog.Warning("Update of Ingress status is disabled (flag --update-status)") + klog.Warning("Update of Ingress status is disabled (flag --update-status)") } onTemplateChange := func() { template, err := ngx_template.NewTemplate(tmplPath, fs) if err != nil { // this error is different from the rest because it must be clear why nginx is not working - glog.Errorf(` + klog.Errorf(` ------------------------------------------------------------------------------- Error loading new template: %v ------------------------------------------------------------------------------- @@ -163,13 +162,13 @@ Error loading new template: %v } n.t = template - glog.Info("New NGINX configuration template loaded.") + klog.Info("New NGINX configuration template loaded.") n.syncQueue.EnqueueTask(task.GetDummyObject("template-change")) } ngxTpl, err := ngx_template.NewTemplate(tmplPath, fs) if err != nil { - glog.Fatalf("Invalid NGINX configuration template: %v", err) + klog.Fatalf("Invalid NGINX configuration template: %v", err) } n.t = ngxTpl @@ -181,7 +180,7 @@ Error loading new template: %v _, err = watch.NewFileWatcher(tmplPath, onTemplateChange) if err != nil { - glog.Fatalf("Error creating file watcher for %v: %v", tmplPath, err) + klog.Fatalf("Error creating file watcher for %v: %v", tmplPath, err) } filesToWatch := []string{} @@ -199,16 +198,16 @@ Error loading new template: %v }) if err != nil { - glog.Fatalf("Error creating file watchers: %v", err) + klog.Fatalf("Error creating file watchers: %v", err) } for _, f := range filesToWatch { _, err = watch.NewFileWatcher(f, func() { - glog.Infof("File %v changed. Reloading NGINX", f) + klog.Infof("File %v changed. Reloading NGINX", f) n.syncQueue.EnqueueTask(task.GetDummyObject("file-change")) }) if err != nil { - glog.Fatalf("Error creating file watcher for %v: %v", f, err) + klog.Fatalf("Error creating file watcher for %v: %v", f, err) } } @@ -262,7 +261,7 @@ type NGINXController struct { // Start starts a new NGINX master process running in the foreground. func (n *NGINXController) Start() { - glog.Info("Starting NGINX Ingress controller") + klog.Info("Starting NGINX Ingress controller") n.store.Run(n.stopCh) @@ -283,7 +282,7 @@ func (n *NGINXController) Start() { n.setupSSLProxy() } - glog.Info("Starting NGINX process") + klog.Info("Starting NGINX process") n.start(cmd) go n.syncQueue.Run(time.Second, n.stopCh) @@ -319,7 +318,7 @@ func (n *NGINXController) Start() { break } if evt, ok := event.(store.Event); ok { - glog.V(3).Infof("Event %v received - object %v", evt.Type, evt.Obj) + klog.V(3).Infof("Event %v received - object %v", evt.Type, evt.Obj) if evt.Type == store.ConfigurationEvent { // TODO: is this necessary? Consider removing this special case n.syncQueue.EnqueueTask(task.GetDummyObject("configmap-change")) @@ -328,7 +327,7 @@ func (n *NGINXController) Start() { n.syncQueue.EnqueueSkippableTask(evt.Obj) } else { - glog.Warningf("Unexpected event type received %T", event) + klog.Warningf("Unexpected event type received %T", event) } case <-n.stopCh: break @@ -347,7 +346,7 @@ func (n *NGINXController) Stop() error { return fmt.Errorf("shutdown already in progress") } - glog.Info("Shutting down controller queues") + klog.Info("Shutting down controller queues") close(n.stopCh) go n.syncQueue.Shutdown() if n.syncStatus != nil { @@ -355,7 +354,7 @@ func (n *NGINXController) Stop() error { } // send stop signal to NGINX - glog.Info("Stopping NGINX process") + klog.Info("Stopping NGINX process") cmd := nginxExecCommand("-s", "quit") cmd.Stdout = os.Stdout cmd.Stderr = os.Stderr @@ -368,7 +367,7 @@ func (n *NGINXController) Stop() error { timer := time.NewTicker(time.Second * 1) for range timer.C { if !process.IsNginxRunning() { - glog.Info("NGINX process has stopped") + klog.Info("NGINX process has stopped") timer.Stop() break } @@ -381,7 +380,7 @@ func (n *NGINXController) start(cmd *exec.Cmd) { cmd.Stdout = os.Stdout cmd.Stderr = os.Stderr if err := cmd.Start(); err != nil { - glog.Fatalf("NGINX error: %v", err) + klog.Fatalf("NGINX error: %v", err) n.ngxErrCh <- err return } @@ -444,7 +443,7 @@ func (n *NGINXController) OnUpdate(ingressCfg ingress.Configuration) error { for _, pb := range ingressCfg.PassthroughBackends { svc := pb.Service if svc == nil { - glog.Warningf("Missing Service for SSL Passthrough backend %q", pb.Backend) + klog.Warningf("Missing Service for SSL Passthrough backend %q", pb.Backend) continue } port, err := strconv.Atoi(pb.Port.String()) @@ -497,7 +496,7 @@ func (n *NGINXController) OnUpdate(ingressCfg ingress.Configuration) error { } else { n = fmt.Sprintf("www.%v", srv.Hostname) } - glog.V(3).Infof("Creating redirect from %q to %q", srv.Hostname, n) + klog.V(3).Infof("Creating redirect from %q to %q", srv.Hostname, n) if _, ok := redirectServers[n]; !ok { found := false for _, esrv := range ingressCfg.Servers { @@ -514,24 +513,24 @@ func (n *NGINXController) OnUpdate(ingressCfg ingress.Configuration) error { } if cfg.ServerNameHashBucketSize == 0 { nameHashBucketSize := nginxHashBucketSize(longestName) - glog.V(3).Infof("Adjusting ServerNameHashBucketSize variable to %d", nameHashBucketSize) + klog.V(3).Infof("Adjusting ServerNameHashBucketSize variable to %d", nameHashBucketSize) cfg.ServerNameHashBucketSize = nameHashBucketSize } serverNameHashMaxSize := nextPowerOf2(serverNameBytes) if cfg.ServerNameHashMaxSize < serverNameHashMaxSize { - glog.V(3).Infof("Adjusting ServerNameHashMaxSize variable to %d", serverNameHashMaxSize) + klog.V(3).Infof("Adjusting ServerNameHashMaxSize variable to %d", serverNameHashMaxSize) cfg.ServerNameHashMaxSize = serverNameHashMaxSize } // the limit of open files is per worker process // and we leave some room to avoid consuming all the FDs available wp, err := strconv.Atoi(cfg.WorkerProcesses) - glog.V(3).Infof("Number of worker processes: %d", wp) + klog.V(3).Infof("Number of worker processes: %d", wp) if err != nil { wp = 1 } maxOpenFiles := (sysctlFSFileMax() / wp) - 1024 - glog.V(2).Infof("Maximum number of open file descriptors: %d", maxOpenFiles) + klog.V(2).Infof("Maximum number of open file descriptors: %d", maxOpenFiles) if maxOpenFiles < 1024 { // this means the value of RLIMIT_NOFILE is too low. maxOpenFiles = 1024 @@ -541,7 +540,7 @@ func (n *NGINXController) OnUpdate(ingressCfg ingress.Configuration) error { if cfg.ProxySetHeaders != "" { cmap, err := n.store.GetConfigMap(cfg.ProxySetHeaders) if err != nil { - glog.Warningf("Error reading ConfigMap %q from local store: %v", cfg.ProxySetHeaders, err) + klog.Warningf("Error reading ConfigMap %q from local store: %v", cfg.ProxySetHeaders, err) } setHeaders = cmap.Data @@ -551,7 +550,7 @@ func (n *NGINXController) OnUpdate(ingressCfg ingress.Configuration) error { if cfg.AddHeaders != "" { cmap, err := n.store.GetConfigMap(cfg.AddHeaders) if err != nil { - glog.Warningf("Error reading ConfigMap %q from local store: %v", cfg.AddHeaders, err) + klog.Warningf("Error reading ConfigMap %q from local store: %v", cfg.AddHeaders, err) } addHeaders = cmap.Data @@ -563,7 +562,7 @@ func (n *NGINXController) OnUpdate(ingressCfg ingress.Configuration) error { secret, err := n.store.GetSecret(secretName) if err != nil { - glog.Warningf("Error reading Secret %q from local store: %v", secretName, err) + klog.Warningf("Error reading Secret %q from local store: %v", secretName, err) } nsSecName := strings.Replace(secretName, "/", "-", -1) @@ -572,7 +571,7 @@ func (n *NGINXController) OnUpdate(ingressCfg ingress.Configuration) error { if ok { pemFileName, err := ssl.AddOrUpdateDHParam(nsSecName, dh, n.fileSystem) if err != nil { - glog.Warningf("Error adding or updating dhparam file %v: %v", nsSecName, err) + klog.Warningf("Error adding or updating dhparam file %v: %v", nsSecName, err) } else { sslDHParam = pemFileName } @@ -624,7 +623,7 @@ func (n *NGINXController) OnUpdate(ingressCfg ingress.Configuration) error { return err } - if glog.V(2) { + if klog.V(2) { src, _ := ioutil.ReadFile(cfgPath) if !bytes.Equal(src, content) { tmpfile, err := ioutil.TempFile("", "new-nginx-cfg") @@ -640,7 +639,7 @@ func (n *NGINXController) OnUpdate(ingressCfg ingress.Configuration) error { // TODO: executing diff can return exit code != 0 diffOutput, _ := exec.Command("diff", "-u", cfgPath, tmpfile.Name()).CombinedOutput() - glog.Infof("NGINX configuration diff:\n%v", string(diffOutput)) + klog.Infof("NGINX configuration diff:\n%v", string(diffOutput)) // we do not defer the deletion of temp files in order // to keep them around for inspection in case of error @@ -691,7 +690,7 @@ func (n *NGINXController) setupSSLProxy() { sslPort := n.cfg.ListenPorts.HTTPS proxyPort := n.cfg.ListenPorts.SSLProxy - glog.Info("Starting TLS proxy for SSL Passthrough") + klog.Info("Starting TLS proxy for SSL Passthrough") n.Proxy = &TCPProxy{ Default: &TCPServer{ Hostname: "localhost", @@ -703,7 +702,7 @@ func (n *NGINXController) setupSSLProxy() { listener, err := net.Listen("tcp", fmt.Sprintf(":%v", sslPort)) if err != nil { - glog.Fatalf("%v", err) + klog.Fatalf("%v", err) } proxyList := &proxyproto.Listener{Listener: listener, ProxyHeaderTimeout: cfg.ProxyProtocolHeaderTimeout} @@ -723,11 +722,11 @@ func (n *NGINXController) setupSSLProxy() { } if err != nil { - glog.Warningf("Error accepting TCP connection: %v", err) + klog.Warningf("Error accepting TCP connection: %v", err) continue } - glog.V(3).Infof("Handling connection from remote address %s to local %s", conn.RemoteAddr(), conn.LocalAddr()) + klog.V(3).Infof("Handling connection from remote address %s to local %s", conn.RemoteAddr(), conn.LocalAddr()) go n.Proxy.Handle(conn) } }() @@ -884,7 +883,7 @@ func post(url string, data interface{}) error { return err } - glog.V(2).Infof("Posting to %s", url) + klog.V(2).Infof("Posting to %s", url) resp, err := http.Post(url, "application/json", bytes.NewReader(buf)) if err != nil { return err @@ -892,7 +891,7 @@ func post(url string, data interface{}) error { defer func() { if err := resp.Body.Close(); err != nil { - glog.Warningf("Error while closing response body:\n%v", err) + klog.Warningf("Error while closing response body:\n%v", err) } }() diff --git a/internal/ingress/controller/process/nginx.go b/internal/ingress/controller/process/nginx.go index a4777adae..2f31dbc80 100644 --- a/internal/ingress/controller/process/nginx.go +++ b/internal/ingress/controller/process/nginx.go @@ -24,9 +24,9 @@ import ( "syscall" "time" - "github.com/golang/glog" ps "github.com/mitchellh/go-ps" "github.com/ncabatoff/process-exporter/proc" + "k8s.io/klog" ) // IsRespawnIfRequired checks if error type is exec.ExitError or not @@ -37,7 +37,7 @@ func IsRespawnIfRequired(err error) bool { } waitStatus := exitError.Sys().(syscall.WaitStatus) - glog.Warningf(` + klog.Warningf(` ------------------------------------------------------------------------------- NGINX master process died (%v): %v ------------------------------------------------------------------------------- @@ -56,9 +56,9 @@ func WaitUntilPortIsAvailable(port int) { } conn.Close() // kill nginx worker processes - fs, err := proc.NewFS("/proc") + fs, err := proc.NewFS("/proc", false) if err != nil { - glog.Errorf("unexpected error reading /proc information: %v", err) + klog.Errorf("unexpected error reading /proc information: %v", err) continue } @@ -66,14 +66,14 @@ func WaitUntilPortIsAvailable(port int) { for _, p := range procs { pn, err := p.Comm() if err != nil { - glog.Errorf("unexpected error obtaining process information: %v", err) + klog.Errorf("unexpected error obtaining process information: %v", err) continue } if pn == "nginx" { osp, err := os.FindProcess(p.PID) if err != nil { - glog.Errorf("unexpected error obtaining process information: %v", err) + klog.Errorf("unexpected error obtaining process information: %v", err) continue } osp.Signal(syscall.SIGQUIT) diff --git a/internal/ingress/controller/store/backend_ssl.go b/internal/ingress/controller/store/backend_ssl.go index 9939d4a6c..57ce68cf5 100644 --- a/internal/ingress/controller/store/backend_ssl.go +++ b/internal/ingress/controller/store/backend_ssl.go @@ -20,8 +20,8 @@ import ( "fmt" "strings" - "github.com/golang/glog" "github.com/imdario/mergo" + "k8s.io/klog" apiv1 "k8s.io/api/core/v1" extensions "k8s.io/api/extensions/v1beta1" @@ -39,13 +39,13 @@ func (s k8sStore) syncSecret(key string) { s.mu.Lock() defer s.mu.Unlock() - glog.V(3).Infof("Syncing Secret %q", key) + klog.V(3).Infof("Syncing Secret %q", key) // TODO: getPemCertificate should not write to disk to avoid unnecessary overhead cert, err := s.getPemCertificate(key) if err != nil { if !isErrSecretForAuth(err) { - glog.Warningf("Error obtaining X.509 certificate: %v", err) + klog.Warningf("Error obtaining X.509 certificate: %v", err) } return } @@ -57,7 +57,7 @@ func (s k8sStore) syncSecret(key string) { // no need to update return } - glog.Infof("Updating Secret %q in the local store", key) + klog.Infof("Updating Secret %q in the local store", key) s.sslStore.Update(key, cert) // this update must trigger an update // (like an update event from a change in Ingress) @@ -65,7 +65,7 @@ func (s k8sStore) syncSecret(key string) { return } - glog.Infof("Adding Secret %q to the local store", key) + klog.Infof("Adding Secret %q to the local store", key) s.sslStore.Add(key, cert) // this update must trigger an update // (like an update event from a change in Ingress) @@ -116,7 +116,7 @@ func (s k8sStore) getPemCertificate(secretName string) (*ingress.SSLCert, error) if ca != nil { msg += " and authentication" } - glog.V(3).Info(msg) + klog.V(3).Info(msg) } else if ca != nil { sslCert, err = ssl.AddCertAuth(nsSecName, ca, s.filesystem) @@ -127,7 +127,7 @@ func (s k8sStore) getPemCertificate(secretName string) (*ingress.SSLCert, error) // makes this secret in 'syncSecret' to be used for Certificate Authentication // this does not enable Certificate Authentication - glog.V(3).Infof("Configuring Secret %q for TLS authentication", secretName) + klog.V(3).Infof("Configuring Secret %q for TLS authentication", secretName) } else { if auth != nil { @@ -158,7 +158,7 @@ func (s k8sStore) checkSSLChainIssues() { data, err := ssl.FullChainCert(secret.PemFileName, s.filesystem) if err != nil { - glog.Errorf("Error generating CA certificate chain for Secret %q: %v", secrKey, err) + klog.Errorf("Error generating CA certificate chain for Secret %q: %v", secrKey, err) continue } @@ -166,13 +166,13 @@ func (s k8sStore) checkSSLChainIssues() { file, err := s.filesystem.Create(fullChainPemFileName) if err != nil { - glog.Errorf("Error creating SSL certificate file for Secret %q: %v", secrKey, err) + klog.Errorf("Error creating SSL certificate file for Secret %q: %v", secrKey, err) continue } _, err = file.Write(data) if err != nil { - glog.Errorf("Error creating SSL certificate for Secret %q: %v", secrKey, err) + klog.Errorf("Error creating SSL certificate for Secret %q: %v", secrKey, err) continue } @@ -180,13 +180,13 @@ func (s k8sStore) checkSSLChainIssues() { err = mergo.MergeWithOverwrite(dst, secret) if err != nil { - glog.Errorf("Error creating SSL certificate for Secret %q: %v", secrKey, err) + klog.Errorf("Error creating SSL certificate for Secret %q: %v", secrKey, err) continue } dst.FullChainPemFileName = fullChainPemFileName - glog.Infof("Updating local copy of SSL certificate %q with missing intermediate CA certs", secrKey) + klog.Infof("Updating local copy of SSL certificate %q with missing intermediate CA certs", secrKey) s.sslStore.Update(secrKey, dst) // this update must trigger an update // (like an update event from a change in Ingress) diff --git a/internal/ingress/controller/store/store.go b/internal/ingress/controller/store/store.go index 1d90688d1..ce67ef3c4 100644 --- a/internal/ingress/controller/store/store.go +++ b/internal/ingress/controller/store/store.go @@ -25,7 +25,6 @@ import ( "time" "github.com/eapache/channels" - "github.com/golang/glog" corev1 "k8s.io/api/core/v1" extensions "k8s.io/api/extensions/v1beta1" @@ -41,6 +40,7 @@ import ( clientcorev1 "k8s.io/client-go/kubernetes/typed/core/v1" "k8s.io/client-go/tools/cache" "k8s.io/client-go/tools/record" + "k8s.io/klog" "k8s.io/ingress-nginx/internal/file" "k8s.io/ingress-nginx/internal/ingress" @@ -247,7 +247,7 @@ func New(checkOCSP bool, } eventBroadcaster := record.NewBroadcaster() - eventBroadcaster.StartLogging(glog.Infof) + eventBroadcaster.StartLogging(klog.Infof) eventBroadcaster.StartRecordingToSink(&clientcorev1.EventSinkImpl{ Interface: client.CoreV1().Events(namespace), }) @@ -305,7 +305,7 @@ func New(checkOCSP bool, ing := obj.(*extensions.Ingress) if !class.IsValid(ing) { a, _ := parser.GetStringAnnotation(class.IngressKey, ing) - glog.Infof("ignoring add for ingress %v based on annotation %v with value %v", ing.Name, class.IngressKey, a) + klog.Infof("ignoring add for ingress %v based on annotation %v with value %v", ing.Name, class.IngressKey, a) return } recorder.Eventf(ing, corev1.EventTypeNormal, "CREATE", fmt.Sprintf("Ingress %s/%s", ing.Namespace, ing.Name)) @@ -325,17 +325,17 @@ func New(checkOCSP bool, // If we reached here it means the ingress was deleted but its final state is unrecorded. tombstone, ok := obj.(cache.DeletedFinalStateUnknown) if !ok { - glog.Errorf("couldn't get object from tombstone %#v", obj) + klog.Errorf("couldn't get object from tombstone %#v", obj) return } ing, ok = tombstone.Obj.(*extensions.Ingress) if !ok { - glog.Errorf("Tombstone contained object that is not an Ingress: %#v", obj) + klog.Errorf("Tombstone contained object that is not an Ingress: %#v", obj) return } } if !class.IsValid(ing) { - glog.Infof("ignoring delete for ingress %v based on annotation %v", ing.Name, class.IngressKey) + klog.Infof("ignoring delete for ingress %v based on annotation %v", ing.Name, class.IngressKey) return } recorder.Eventf(ing, corev1.EventTypeNormal, "DELETE", fmt.Sprintf("Ingress %s/%s", ing.Namespace, ing.Name)) @@ -356,10 +356,10 @@ func New(checkOCSP bool, validOld := class.IsValid(oldIng) validCur := class.IsValid(curIng) if !validOld && validCur { - glog.Infof("creating ingress %v based on annotation %v", curIng.Name, class.IngressKey) + klog.Infof("creating ingress %v based on annotation %v", curIng.Name, class.IngressKey) recorder.Eventf(curIng, corev1.EventTypeNormal, "CREATE", fmt.Sprintf("Ingress %s/%s", curIng.Namespace, curIng.Name)) } else if validOld && !validCur { - glog.Infof("removing ingress %v based on annotation %v", curIng.Name, class.IngressKey) + klog.Infof("removing ingress %v based on annotation %v", curIng.Name, class.IngressKey) recorder.Eventf(curIng, corev1.EventTypeNormal, "DELETE", fmt.Sprintf("Ingress %s/%s", curIng.Namespace, curIng.Name)) } else if validCur && !reflect.DeepEqual(old, cur) { recorder.Eventf(curIng, corev1.EventTypeNormal, "UPDATE", fmt.Sprintf("Ingress %s/%s", curIng.Namespace, curIng.Name)) @@ -387,11 +387,11 @@ func New(checkOCSP bool, // find references in ingresses and update local ssl certs if ings := store.secretIngressMap.Reference(key); len(ings) > 0 { - glog.Infof("secret %v was added and it is used in ingress annotations. Parsing...", key) + klog.Infof("secret %v was added and it is used in ingress annotations. Parsing...", key) for _, ingKey := range ings { ing, err := store.getIngress(ingKey) if err != nil { - glog.Errorf("could not find Ingress %v in local store", ingKey) + klog.Errorf("could not find Ingress %v in local store", ingKey) continue } store.syncIngress(ing) @@ -414,11 +414,11 @@ func New(checkOCSP bool, // find references in ingresses and update local ssl certs if ings := store.secretIngressMap.Reference(key); len(ings) > 0 { - glog.Infof("secret %v was updated and it is used in ingress annotations. Parsing...", key) + klog.Infof("secret %v was updated and it is used in ingress annotations. Parsing...", key) for _, ingKey := range ings { ing, err := store.getIngress(ingKey) if err != nil { - glog.Errorf("could not find Ingress %v in local store", ingKey) + klog.Errorf("could not find Ingress %v in local store", ingKey) continue } store.syncIngress(ing) @@ -437,12 +437,12 @@ func New(checkOCSP bool, // If we reached here it means the secret was deleted but its final state is unrecorded. tombstone, ok := obj.(cache.DeletedFinalStateUnknown) if !ok { - glog.Errorf("couldn't get object from tombstone %#v", obj) + klog.Errorf("couldn't get object from tombstone %#v", obj) return } sec, ok = tombstone.Obj.(*corev1.Secret) if !ok { - glog.Errorf("Tombstone contained object that is not a Secret: %#v", obj) + klog.Errorf("Tombstone contained object that is not a Secret: %#v", obj) return } } @@ -453,11 +453,11 @@ func New(checkOCSP bool, // find references in ingresses if ings := store.secretIngressMap.Reference(key); len(ings) > 0 { - glog.Infof("secret %v was deleted and it is used in ingress annotations. Parsing...", key) + klog.Infof("secret %v was deleted and it is used in ingress annotations. Parsing...", key) for _, ingKey := range ings { ing, err := store.getIngress(ingKey) if err != nil { - glog.Errorf("could not find Ingress %v in local store", ingKey) + klog.Errorf("could not find Ingress %v in local store", ingKey) continue } store.syncIngress(ing) @@ -527,7 +527,7 @@ func New(checkOCSP bool, key := k8s.MetaNamespaceKey(ingKey) ing, err := store.getIngress(key) if err != nil { - glog.Errorf("could not find Ingress %v in local store: %v", key, err) + klog.Errorf("could not find Ingress %v in local store: %v", key, err) continue } store.syncIngress(ing) @@ -581,7 +581,7 @@ func New(checkOCSP bool, ns, name, _ := k8s.ParseNameNS(configmap) cm, err := client.CoreV1().ConfigMaps(ns).Get(name, metav1.GetOptions{}) if err != nil { - glog.Warningf("Unexpected error reading configuration configmap: %v", err) + klog.Warningf("Unexpected error reading configuration configmap: %v", err) } store.setConfig(cm) @@ -592,7 +592,7 @@ func New(checkOCSP bool, // annotation to a go struct func (s *k8sStore) syncIngress(ing *extensions.Ingress) { key := k8s.MetaNamespaceKey(ing) - glog.V(3).Infof("updating annotations information for ingress %v", key) + klog.V(3).Infof("updating annotations information for ingress %v", key) copyIng := &extensions.Ingress{} ing.ObjectMeta.DeepCopyInto(©Ing.ObjectMeta) @@ -615,7 +615,7 @@ func (s *k8sStore) syncIngress(ing *extensions.Ingress) { ParsedAnnotations: s.annotations.Extract(ing), }) if err != nil { - glog.Error(err) + klog.Error(err) } } @@ -623,7 +623,7 @@ func (s *k8sStore) syncIngress(ing *extensions.Ingress) { // references in secretIngressMap. func (s *k8sStore) updateSecretIngressMap(ing *extensions.Ingress) { key := k8s.MetaNamespaceKey(ing) - glog.V(3).Infof("updating references to secrets for ingress %v", key) + klog.V(3).Infof("updating references to secrets for ingress %v", key) // delete all existing references first s.secretIngressMap.Delete(key) @@ -649,7 +649,7 @@ func (s *k8sStore) updateSecretIngressMap(ing *extensions.Ingress) { for _, ann := range secretAnnotations { secrKey, err := objectRefAnnotationNsKey(ann, ing) if err != nil && !errors.IsMissingAnnotations(err) { - glog.Errorf("error reading secret reference in annotation %q: %s", ann, err) + klog.Errorf("error reading secret reference in annotation %q: %s", ann, err) continue } if secrKey != "" { @@ -775,18 +775,18 @@ func (s k8sStore) writeSSLSessionTicketKey(cmap *corev1.ConfigMap, fileName stri // 81 used instead of 80 because of padding if !(ticketBytes == 48 || ticketBytes == 81) { - glog.Warningf("ssl-session-ticket-key must contain either 48 or 80 bytes") + klog.Warningf("ssl-session-ticket-key must contain either 48 or 80 bytes") } decodedTicket, err := base64.StdEncoding.DecodeString(ticketString) if err != nil { - glog.Errorf("unexpected error decoding ssl-session-ticket-key: %v", err) + klog.Errorf("unexpected error decoding ssl-session-ticket-key: %v", err) return } err = ioutil.WriteFile(fileName, decodedTicket, file.ReadWriteByUser) if err != nil { - glog.Errorf("unexpected error writing ssl-session-ticket-key to %s: %v", fileName, err) + klog.Errorf("unexpected error writing ssl-session-ticket-key to %s: %v", fileName, err) return } diff --git a/internal/ingress/controller/tcp.go b/internal/ingress/controller/tcp.go index cfaca7b20..f1dc980ce 100644 --- a/internal/ingress/controller/tcp.go +++ b/internal/ingress/controller/tcp.go @@ -21,7 +21,7 @@ import ( "io" "net" - "github.com/golang/glog" + "k8s.io/klog" "github.com/paultag/sniff/parser" ) @@ -63,19 +63,19 @@ func (p *TCPProxy) Handle(conn net.Conn) { length, err := conn.Read(data) if err != nil { - glog.V(4).Infof("Error reading the first 4k of the connection: %s", err) + klog.V(4).Infof("Error reading the first 4k of the connection: %s", err) return } proxy := p.Default hostname, err := parser.GetHostname(data[:]) if err == nil { - glog.V(4).Infof("Parsed hostname from TLS Client Hello: %s", hostname) + klog.V(4).Infof("Parsed hostname from TLS Client Hello: %s", hostname) proxy = p.Get(hostname) } if proxy == nil { - glog.V(4).Infof("There is no configured proxy for SSL connections.") + klog.V(4).Infof("There is no configured proxy for SSL connections.") return } @@ -96,16 +96,16 @@ func (p *TCPProxy) Handle(conn net.Conn) { protocol = "TCP6" } proxyProtocolHeader := fmt.Sprintf("PROXY %s %s %s %d %d\r\n", protocol, remoteAddr.IP.String(), localAddr.IP.String(), remoteAddr.Port, localAddr.Port) - glog.V(4).Infof("Writing Proxy Protocol header: %s", proxyProtocolHeader) + klog.V(4).Infof("Writing Proxy Protocol header: %s", proxyProtocolHeader) _, err = fmt.Fprintf(clientConn, proxyProtocolHeader) } if err != nil { - glog.Errorf("Error writing Proxy Protocol header: %s", err) + klog.Errorf("Error writing Proxy Protocol header: %s", err) clientConn.Close() } else { _, err = clientConn.Write(data[:length]) if err != nil { - glog.Errorf("Error writing the first 4k of proxy data: %s", err) + klog.Errorf("Error writing the first 4k of proxy data: %s", err) clientConn.Close() } } diff --git a/internal/ingress/controller/template/configmap.go b/internal/ingress/controller/template/configmap.go index 99cc8ec33..0d0954849 100644 --- a/internal/ingress/controller/template/configmap.go +++ b/internal/ingress/controller/template/configmap.go @@ -23,7 +23,7 @@ import ( "strings" "time" - "github.com/golang/glog" + "k8s.io/klog" "github.com/mitchellh/hashstructure" "github.com/mitchellh/mapstructure" @@ -83,7 +83,7 @@ func ReadConfig(src map[string]string) config.Configuration { for _, i := range strings.Split(val, ",") { j, err := strconv.Atoi(i) if err != nil { - glog.Warningf("%v is not a valid http code: %v", i, err) + klog.Warningf("%v is not a valid http code: %v", i, err) } else { errors = append(errors, j) } @@ -118,7 +118,7 @@ func ReadConfig(src map[string]string) config.Configuration { bindAddressIpv4List = append(bindAddressIpv4List, fmt.Sprintf("%v", ns)) } } else { - glog.Warningf("%v is not a valid textual representation of an IP address", i) + klog.Warningf("%v is not a valid textual representation of an IP address", i) } } } @@ -140,12 +140,12 @@ func ReadConfig(src map[string]string) config.Configuration { delete(conf, httpRedirectCode) j, err := strconv.Atoi(val) if err != nil { - glog.Warningf("%v is not a valid HTTP code: %v", val, err) + klog.Warningf("%v is not a valid HTTP code: %v", val, err) } else { if validRedirectCodes.Has(j) { to.HTTPRedirectCode = j } else { - glog.Warningf("The code %v is not a valid as HTTP redirect code. Using the default.", val) + klog.Warningf("The code %v is not a valid as HTTP redirect code. Using the default.", val) } } } @@ -155,7 +155,7 @@ func ReadConfig(src map[string]string) config.Configuration { delete(conf, proxyHeaderTimeout) duration, err := time.ParseDuration(val) if err != nil { - glog.Warningf("proxy-protocol-header-timeout of %v encountered an error while being parsed %v. Switching to use default value instead.", val, err) + klog.Warningf("proxy-protocol-header-timeout of %v encountered an error while being parsed %v. Switching to use default value instead.", val, err) } else { to.ProxyProtocolHeaderTimeout = duration } @@ -166,7 +166,7 @@ func ReadConfig(src map[string]string) config.Configuration { delete(conf, proxyStreamResponses) j, err := strconv.Atoi(val) if err != nil { - glog.Warningf("%v is not a valid number: %v", val, err) + klog.Warningf("%v is not a valid number: %v", val, err) } else { streamResponses = j } @@ -220,18 +220,18 @@ func ReadConfig(src map[string]string) config.Configuration { decoder, err := mapstructure.NewDecoder(config) if err != nil { - glog.Warningf("unexpected error merging defaults: %v", err) + klog.Warningf("unexpected error merging defaults: %v", err) } err = decoder.Decode(conf) if err != nil { - glog.Warningf("unexpected error merging defaults: %v", err) + klog.Warningf("unexpected error merging defaults: %v", err) } hash, err := hashstructure.Hash(to, &hashstructure.HashOptions{ TagName: "json", }) if err != nil { - glog.Warningf("unexpected error obtaining hash: %v", err) + klog.Warningf("unexpected error obtaining hash: %v", err) } to.Checksum = fmt.Sprintf("%v", hash) @@ -245,7 +245,7 @@ func filterErrors(codes []int) []int { if code > 299 && code < 600 { fa = append(fa, code) } else { - glog.Warningf("error code %v is not valid for custom error pages", code) + klog.Warningf("error code %v is not valid for custom error pages", code) } } diff --git a/internal/ingress/controller/template/template.go b/internal/ingress/controller/template/template.go index da2c116de..8e69da230 100644 --- a/internal/ingress/controller/template/template.go +++ b/internal/ingress/controller/template/template.go @@ -31,7 +31,6 @@ import ( text_template "text/template" "time" - "github.com/golang/glog" "github.com/pkg/errors" "k8s.io/apimachinery/pkg/util/sets" @@ -41,6 +40,7 @@ import ( "k8s.io/ingress-nginx/internal/ingress/annotations/ratelimit" "k8s.io/ingress-nginx/internal/ingress/controller/config" ing_net "k8s.io/ingress-nginx/internal/net" + "k8s.io/klog" ) const ( @@ -84,12 +84,12 @@ func (t *Template) Write(conf config.TemplateConfig) ([]byte, error) { outCmdBuf := t.bp.Get() defer t.bp.Put(outCmdBuf) - if glog.V(3) { + if klog.V(3) { b, err := json.Marshal(conf) if err != nil { - glog.Errorf("unexpected error: %v", err) + klog.Errorf("unexpected error: %v", err) } - glog.Infof("NGINX configuration: %v", string(b)) + klog.Infof("NGINX configuration: %v", string(b)) } err := t.tmpl.Execute(tmplBuf, conf) @@ -103,7 +103,7 @@ func (t *Template) Write(conf config.TemplateConfig) ([]byte, error) { cmd.Stdin = tmplBuf cmd.Stdout = outCmdBuf if err := cmd.Run(); err != nil { - glog.Warningf("unexpected error cleaning template: %v", err) + klog.Warningf("unexpected error cleaning template: %v", err) return tmplBuf.Bytes(), nil } @@ -202,7 +202,7 @@ func shouldConfigureLuaRestyWAF(disableLuaRestyWAF bool, mode string) bool { func buildLuaSharedDictionaries(s interface{}, disableLuaRestyWAF bool) string { servers, ok := s.([]*ingress.Server) if !ok { - glog.Errorf("expected an '[]*ingress.Server' type but %T was returned", s) + klog.Errorf("expected an '[]*ingress.Server' type but %T was returned", s) return "" } @@ -238,12 +238,12 @@ func buildLuaSharedDictionaries(s interface{}, disableLuaRestyWAF bool) string { func buildResolversForLua(res interface{}, disableIpv6 interface{}) string { nss, ok := res.([]net.IP) if !ok { - glog.Errorf("expected a '[]net.IP' type but %T was returned", res) + klog.Errorf("expected a '[]net.IP' type but %T was returned", res) return "" } no6, ok := disableIpv6.(bool) if !ok { - glog.Errorf("expected a 'bool' type but %T was returned", disableIpv6) + klog.Errorf("expected a 'bool' type but %T was returned", disableIpv6) return "" } @@ -267,12 +267,12 @@ func buildResolvers(res interface{}, disableIpv6 interface{}) string { // NGINX need IPV6 addresses to be surrounded by brackets nss, ok := res.([]net.IP) if !ok { - glog.Errorf("expected a '[]net.IP' type but %T was returned", res) + klog.Errorf("expected a '[]net.IP' type but %T was returned", res) return "" } no6, ok := disableIpv6.(bool) if !ok { - glog.Errorf("expected a 'bool' type but %T was returned", disableIpv6) + klog.Errorf("expected a 'bool' type but %T was returned", disableIpv6) return "" } @@ -316,7 +316,7 @@ func stripLocationModifer(path string) string { func enforceRegexModifier(input interface{}) bool { locations, ok := input.([]*ingress.Location) if !ok { - glog.Errorf("expected an '[]*ingress.Location' type but %T was returned", input) + klog.Errorf("expected an '[]*ingress.Location' type but %T was returned", input) return false } @@ -333,7 +333,7 @@ func enforceRegexModifier(input interface{}) bool { func buildLocation(input interface{}, enforceRegex bool) string { location, ok := input.(*ingress.Location) if !ok { - glog.Errorf("expected an '*ingress.Location' type but %T was returned", input) + klog.Errorf("expected an '*ingress.Location' type but %T was returned", input) return slash } @@ -360,7 +360,7 @@ func buildLocation(input interface{}, enforceRegex bool) string { func buildAuthLocation(input interface{}) string { location, ok := input.(*ingress.Location) if !ok { - glog.Errorf("expected an '*ingress.Location' type but %T was returned", input) + klog.Errorf("expected an '*ingress.Location' type but %T was returned", input) return "" } @@ -378,7 +378,7 @@ func buildAuthResponseHeaders(input interface{}) []string { location, ok := input.(*ingress.Location) res := []string{} if !ok { - glog.Errorf("expected an '*ingress.Location' type but %T was returned", input) + klog.Errorf("expected an '*ingress.Location' type but %T was returned", input) return res } @@ -398,7 +398,7 @@ func buildAuthResponseHeaders(input interface{}) []string { func buildLogFormatUpstream(input interface{}) string { cfg, ok := input.(config.Configuration) if !ok { - glog.Errorf("expected a 'config.Configuration' type but %T was returned", input) + klog.Errorf("expected a 'config.Configuration' type but %T was returned", input) return "" } @@ -412,13 +412,13 @@ func buildLogFormatUpstream(input interface{}) string { func buildProxyPass(host string, b interface{}, loc interface{}) string { backends, ok := b.([]*ingress.Backend) if !ok { - glog.Errorf("expected an '[]*ingress.Backend' type but %T was returned", b) + klog.Errorf("expected an '[]*ingress.Backend' type but %T was returned", b) return "" } location, ok := loc.(*ingress.Location) if !ok { - glog.Errorf("expected a '*ingress.Location' type but %T was returned", loc) + klog.Errorf("expected a '*ingress.Location' type but %T was returned", loc) return "" } @@ -520,7 +520,7 @@ func filterRateLimits(input interface{}) []ratelimit.Config { servers, ok := input.([]*ingress.Server) if !ok { - glog.Errorf("expected a '[]ratelimit.RateLimit' type but %T was returned", input) + klog.Errorf("expected a '[]ratelimit.RateLimit' type but %T was returned", input) return ratelimits } for _, server := range servers { @@ -544,7 +544,7 @@ func buildRateLimitZones(input interface{}) []string { servers, ok := input.([]*ingress.Server) if !ok { - glog.Errorf("expected a '[]*ingress.Server' type but %T was returned", input) + klog.Errorf("expected a '[]*ingress.Server' type but %T was returned", input) return zones.List() } @@ -594,7 +594,7 @@ func buildRateLimit(input interface{}) []string { loc, ok := input.(*ingress.Location) if !ok { - glog.Errorf("expected an '*ingress.Location' type but %T was returned", input) + klog.Errorf("expected an '*ingress.Location' type but %T was returned", input) return limits } @@ -634,7 +634,7 @@ func buildRateLimit(input interface{}) []string { func isLocationInLocationList(location interface{}, rawLocationList string) bool { loc, ok := location.(*ingress.Location) if !ok { - glog.Errorf("expected an '*ingress.Location' type but %T was returned", location) + klog.Errorf("expected an '*ingress.Location' type but %T was returned", location) return false } @@ -656,7 +656,7 @@ func isLocationInLocationList(location interface{}, rawLocationList string) bool func isLocationAllowed(input interface{}) bool { loc, ok := input.(*ingress.Location) if !ok { - glog.Errorf("expected an '*ingress.Location' type but %T was returned", input) + klog.Errorf("expected an '*ingress.Location' type but %T was returned", input) return false } @@ -675,7 +675,7 @@ var ( func buildDenyVariable(a interface{}) string { l, ok := a.(string) if !ok { - glog.Errorf("expected a 'string' type but %T was returned", a) + klog.Errorf("expected a 'string' type but %T was returned", a) return "" } @@ -689,7 +689,7 @@ func buildDenyVariable(a interface{}) string { func buildUpstreamName(loc interface{}) string { location, ok := loc.(*ingress.Location) if !ok { - glog.Errorf("expected a '*ingress.Location' type but %T was returned", loc) + klog.Errorf("expected a '*ingress.Location' type but %T was returned", loc) return "" } @@ -701,7 +701,7 @@ func buildUpstreamName(loc interface{}) string { func buildNextUpstream(i, r interface{}) string { nextUpstream, ok := i.(string) if !ok { - glog.Errorf("expected a 'string' type but %T was returned", i) + klog.Errorf("expected a 'string' type but %T was returned", i) return "" } @@ -738,13 +738,13 @@ var nginxOffsetRegex = regexp.MustCompile("^[0-9]+[kKmMgG]{0,1}$") func isValidByteSize(input interface{}, isOffset bool) bool { s, ok := input.(string) if !ok { - glog.Errorf("expected an 'string' type but %T was returned", input) + klog.Errorf("expected an 'string' type but %T was returned", input) return false } s = strings.TrimSpace(s) if s == "" { - glog.V(2).Info("empty byte size, hence it will not be set") + klog.V(2).Info("empty byte size, hence it will not be set") return false } @@ -765,13 +765,13 @@ type ingressInformation struct { func getIngressInformation(i, p interface{}) *ingressInformation { ing, ok := i.(*ingress.Ingress) if !ok { - glog.Errorf("expected an '*ingress.Ingress' type but %T was returned", i) + klog.Errorf("expected an '*ingress.Ingress' type but %T was returned", i) return &ingressInformation{} } path, ok := p.(string) if !ok { - glog.Errorf("expected a 'string' type but %T was returned", p) + klog.Errorf("expected a 'string' type but %T was returned", p) return &ingressInformation{} } @@ -808,7 +808,7 @@ func getIngressInformation(i, p interface{}) *ingressInformation { func buildForwardedFor(input interface{}) string { s, ok := input.(string) if !ok { - glog.Errorf("expected a 'string' type but %T was returned", input) + klog.Errorf("expected a 'string' type but %T was returned", input) return "" } @@ -820,7 +820,7 @@ func buildForwardedFor(input interface{}) string { func buildAuthSignURL(input interface{}) string { s, ok := input.(string) if !ok { - glog.Errorf("expected an 'string' type but %T was returned", input) + klog.Errorf("expected an 'string' type but %T was returned", input) return "" } @@ -855,7 +855,7 @@ func randomString() string { func buildOpentracing(input interface{}) string { cfg, ok := input.(config.Configuration) if !ok { - glog.Errorf("expected a 'config.Configuration' type but %T was returned", input) + klog.Errorf("expected a 'config.Configuration' type but %T was returned", input) return "" } @@ -881,7 +881,7 @@ func buildOpentracing(input interface{}) string { func buildInfluxDB(input interface{}) string { cfg, ok := input.(influxdb.Config) if !ok { - glog.Errorf("expected an 'influxdb.Config' type but %T was returned", input) + klog.Errorf("expected an 'influxdb.Config' type but %T was returned", input) return "" } @@ -901,7 +901,7 @@ func buildInfluxDB(input interface{}) string { func proxySetHeader(loc interface{}) string { location, ok := loc.(*ingress.Location) if !ok { - glog.Errorf("expected a '*ingress.Location' type but %T was returned", loc) + klog.Errorf("expected a '*ingress.Location' type but %T was returned", loc) return "proxy_set_header" } @@ -932,7 +932,7 @@ func buildCustomErrorDeps(proxySetHeaders map[string]string, errorCodes []int, e func collectCustomErrorsPerServer(input interface{}) []int { server, ok := input.(*ingress.Server) if !ok { - glog.Errorf("expected a '*ingress.Server' type but %T was returned", input) + klog.Errorf("expected a '*ingress.Server' type but %T was returned", input) return nil } @@ -954,7 +954,7 @@ func collectCustomErrorsPerServer(input interface{}) []int { func opentracingPropagateContext(loc interface{}) string { location, ok := loc.(*ingress.Location) if !ok { - glog.Errorf("expected a '*ingress.Location' type but %T was returned", loc) + klog.Errorf("expected a '*ingress.Location' type but %T was returned", loc) return "opentracing_propagate_context" } diff --git a/internal/ingress/controller/util.go b/internal/ingress/controller/util.go index 4e03b5e8f..4046d02f7 100644 --- a/internal/ingress/controller/util.go +++ b/internal/ingress/controller/util.go @@ -24,7 +24,7 @@ import ( "fmt" - "github.com/golang/glog" + "k8s.io/klog" api "k8s.io/api/core/v1" "k8s.io/kubernetes/pkg/util/sysctl" @@ -57,7 +57,7 @@ func upstreamName(namespace string, service string, port intstr.IntOrString) str func sysctlSomaxconn() int { maxConns, err := sysctl.New().GetSysctl("net/core/somaxconn") if err != nil || maxConns < 512 { - glog.V(3).Infof("net.core.somaxconn=%v (using system default)", maxConns) + klog.V(3).Infof("net.core.somaxconn=%v (using system default)", maxConns) return 511 } @@ -70,10 +70,10 @@ func sysctlFSFileMax() int { var rLimit syscall.Rlimit err := syscall.Getrlimit(syscall.RLIMIT_NOFILE, &rLimit) if err != nil { - glog.Errorf("Error reading system maximum number of open file descriptors (RLIMIT_NOFILE): %v", err) + klog.Errorf("Error reading system maximum number of open file descriptors (RLIMIT_NOFILE): %v", err) return 0 } - glog.V(2).Infof("rlimit.max=%v", rLimit.Max) + klog.V(2).Infof("rlimit.max=%v", rLimit.Max) return int(rLimit.Max) } diff --git a/internal/ingress/metric/collectors/controller.go b/internal/ingress/metric/collectors/controller.go index 13656f7a1..c24caaa45 100644 --- a/internal/ingress/metric/collectors/controller.go +++ b/internal/ingress/metric/collectors/controller.go @@ -20,8 +20,8 @@ import ( "fmt" "time" - "github.com/golang/glog" "github.com/prometheus/client_golang/prometheus" + "k8s.io/klog" "k8s.io/apimachinery/pkg/util/sets" "k8s.io/ingress-nginx/internal/ingress" @@ -181,11 +181,11 @@ func (cm *Controller) SetSSLExpireTime(servers []*ingress.Server) { func (cm *Controller) RemoveMetrics(hosts []string, registry prometheus.Gatherer) { mfs, err := registry.Gather() if err != nil { - glog.Errorf("Error gathering metrics: %v", err) + klog.Errorf("Error gathering metrics: %v", err) return } - glog.V(2).Infof("removing SSL certificate metrics for %v hosts", hosts) + klog.V(2).Infof("removing SSL certificate metrics for %v hosts", hosts) toRemove := sets.NewString(hosts...) for _, mf := range mfs { @@ -212,10 +212,10 @@ func (cm *Controller) RemoveMetrics(hosts []string, registry prometheus.Gatherer continue } - glog.V(2).Infof("Removing prometheus metric from gauge %v for host %v", metricName, host) + klog.V(2).Infof("Removing prometheus metric from gauge %v for host %v", metricName, host) removed := cm.sslExpireTime.Delete(labels) if !removed { - glog.V(2).Infof("metric %v for host %v with labels not removed: %v", metricName, host, labels) + klog.V(2).Infof("metric %v for host %v with labels not removed: %v", metricName, host, labels) } } } diff --git a/internal/ingress/metric/collectors/nginx_status.go b/internal/ingress/metric/collectors/nginx_status.go index 5ce9b2e9e..0df9d8d08 100644 --- a/internal/ingress/metric/collectors/nginx_status.go +++ b/internal/ingress/metric/collectors/nginx_status.go @@ -23,8 +23,8 @@ import ( "regexp" "strconv" - "github.com/golang/glog" "github.com/prometheus/client_golang/prometheus" + "k8s.io/klog" ) var ( @@ -189,7 +189,7 @@ func parse(data string) *basicStatus { func getNginxStatus(port int, path string) (*basicStatus, error) { url := fmt.Sprintf("http://0.0.0.0:%v%v", port, path) - glog.V(3).Infof("start scraping url: %v", url) + klog.V(3).Infof("start scraping url: %v", url) data, err := httpBody(url) @@ -204,7 +204,7 @@ func getNginxStatus(port int, path string) (*basicStatus, error) { func (p nginxStatusCollector) scrape(ch chan<- prometheus.Metric) { s, err := getNginxStatus(p.ngxHealthPort, p.ngxStatusPath) if err != nil { - glog.Warningf("unexpected error obtaining nginx status info: %v", err) + klog.Warningf("unexpected error obtaining nginx status info: %v", err) return } diff --git a/internal/ingress/metric/collectors/process.go b/internal/ingress/metric/collectors/process.go index 24c2ce3ab..dcb78597a 100644 --- a/internal/ingress/metric/collectors/process.go +++ b/internal/ingress/metric/collectors/process.go @@ -17,9 +17,10 @@ limitations under the License. package collectors import ( + "fmt" "path/filepath" - "github.com/golang/glog" + "k8s.io/klog" common "github.com/ncabatoff/process-exporter" "github.com/ncabatoff/process-exporter/proc" @@ -37,7 +38,7 @@ type Stopable interface { Stop() } -// BinaryNameMatcher ... +// BinaryNameMatcher define a namer using the binary name type BinaryNameMatcher struct { Name string Binary string @@ -45,7 +46,7 @@ type BinaryNameMatcher struct { // MatchAndName returns false if the match failed, otherwise // true and the resulting name. -func (em BinaryNameMatcher) MatchAndName(nacl common.NameAndCmdline) (bool, string) { +func (em BinaryNameMatcher) MatchAndName(nacl common.ProcAttributes) (bool, string) { if len(nacl.Cmdline) == 0 { return false, "" } @@ -53,6 +54,11 @@ func (em BinaryNameMatcher) MatchAndName(nacl common.NameAndCmdline) (bool, stri return em.Name == cmd, "" } +// String returns the name of the binary to match +func (em BinaryNameMatcher) String() string { + return fmt.Sprintf("%+v", em.Binary) +} + type namedProcessData struct { numProcs *prometheus.Desc cpuSecs *prometheus.Desc @@ -86,7 +92,7 @@ var binary = "/usr/bin/nginx" // NewNGINXProcess returns a new prometheus collector for the nginx process func NewNGINXProcess(pod, namespace, ingressClass string) (NGINXProcessCollector, error) { - fs, err := proc.NewFS("/proc") + fs, err := proc.NewFS("/proc", false) if err != nil { return nil, err } @@ -98,11 +104,11 @@ func NewNGINXProcess(pod, namespace, ingressClass string) (NGINXProcessCollector p := namedProcess{ scrapeChan: make(chan scrapeRequest), - Grouper: proc.NewGrouper(true, nm), + Grouper: proc.NewGrouper(nm, true, false, false), fs: fs, } - _, err = p.Update(p.fs.AllProcs()) + _, _, err = p.Update(p.fs.AllProcs()) if err != nil { return nil, err } @@ -184,23 +190,23 @@ func (p namedProcess) Stop() { } func (p namedProcess) scrape(ch chan<- prometheus.Metric) { - _, err := p.Update(p.fs.AllProcs()) + _, groups, err := p.Update(p.fs.AllProcs()) if err != nil { - glog.Warningf("unexpected error obtaining nginx process info: %v", err) + klog.Warningf("unexpected error obtaining nginx process info: %v", err) return } - for _, gcounts := range p.Groups() { + for _, gcounts := range groups { ch <- prometheus.MustNewConstMetric(p.data.numProcs, prometheus.GaugeValue, float64(gcounts.Procs)) ch <- prometheus.MustNewConstMetric(p.data.memResidentbytes, - prometheus.GaugeValue, float64(gcounts.Memresident)) + prometheus.GaugeValue, float64(gcounts.Memory.ResidentBytes)) ch <- prometheus.MustNewConstMetric(p.data.memVirtualbytes, - prometheus.GaugeValue, float64(gcounts.Memvirtual)) + prometheus.GaugeValue, float64(gcounts.Memory.VirtualBytes)) ch <- prometheus.MustNewConstMetric(p.data.startTime, prometheus.GaugeValue, float64(gcounts.OldestStartTime.Unix())) ch <- prometheus.MustNewConstMetric(p.data.cpuSecs, - prometheus.CounterValue, gcounts.Cpu) + prometheus.CounterValue, gcounts.CPUSystemTime) ch <- prometheus.MustNewConstMetric(p.data.readBytes, prometheus.CounterValue, float64(gcounts.ReadBytes)) ch <- prometheus.MustNewConstMetric(p.data.writeBytes, diff --git a/internal/ingress/metric/collectors/socket.go b/internal/ingress/metric/collectors/socket.go index 00c780d31..7421602f1 100644 --- a/internal/ingress/metric/collectors/socket.go +++ b/internal/ingress/metric/collectors/socket.go @@ -23,10 +23,10 @@ import ( "net" "os" - "github.com/golang/glog" jsoniter "github.com/json-iterator/go" "github.com/prometheus/client_golang/prometheus" "k8s.io/apimachinery/pkg/util/sets" + "k8s.io/klog" ) type upstream struct { @@ -203,19 +203,19 @@ func NewSocketCollector(pod, namespace, class string) (*SocketCollector, error) } func (sc *SocketCollector) handleMessage(msg []byte) { - glog.V(5).Infof("msg: %v", string(msg)) + klog.V(5).Infof("msg: %v", string(msg)) // Unmarshal bytes var statsBatch []socketData err := jsoniter.ConfigCompatibleWithStandardLibrary.Unmarshal(msg, &statsBatch) if err != nil { - glog.Errorf("Unexpected error deserializing JSON payload: %v. Payload:\n%v", err, string(msg)) + klog.Errorf("Unexpected error deserializing JSON payload: %v. Payload:\n%v", err, string(msg)) return } for _, stats := range statsBatch { if !sc.hosts.Has(stats.Host) { - glog.V(3).Infof("skiping metric for host %v that is not being served", stats.Host) + klog.V(3).Infof("skiping metric for host %v that is not being served", stats.Host) continue } @@ -243,7 +243,7 @@ func (sc *SocketCollector) handleMessage(msg []byte) { requestsMetric, err := sc.requests.GetMetricWith(collectorLabels) if err != nil { - glog.Errorf("Error fetching requests metric: %v", err) + klog.Errorf("Error fetching requests metric: %v", err) } else { requestsMetric.Inc() } @@ -251,7 +251,7 @@ func (sc *SocketCollector) handleMessage(msg []byte) { if stats.Latency != -1 { latencyMetric, err := sc.upstreamLatency.GetMetricWith(latencyLabels) if err != nil { - glog.Errorf("Error fetching latency metric: %v", err) + klog.Errorf("Error fetching latency metric: %v", err) } else { latencyMetric.Observe(stats.Latency) } @@ -260,7 +260,7 @@ func (sc *SocketCollector) handleMessage(msg []byte) { if stats.RequestTime != -1 { requestTimeMetric, err := sc.requestTime.GetMetricWith(requestLabels) if err != nil { - glog.Errorf("Error fetching request duration metric: %v", err) + klog.Errorf("Error fetching request duration metric: %v", err) } else { requestTimeMetric.Observe(stats.RequestTime) } @@ -269,7 +269,7 @@ func (sc *SocketCollector) handleMessage(msg []byte) { if stats.RequestLength != -1 { requestLengthMetric, err := sc.requestLength.GetMetricWith(requestLabels) if err != nil { - glog.Errorf("Error fetching request length metric: %v", err) + klog.Errorf("Error fetching request length metric: %v", err) } else { requestLengthMetric.Observe(stats.RequestLength) } @@ -278,7 +278,7 @@ func (sc *SocketCollector) handleMessage(msg []byte) { if stats.ResponseTime != -1 { responseTimeMetric, err := sc.responseTime.GetMetricWith(requestLabels) if err != nil { - glog.Errorf("Error fetching upstream response time metric: %v", err) + klog.Errorf("Error fetching upstream response time metric: %v", err) } else { responseTimeMetric.Observe(stats.ResponseTime) } @@ -287,14 +287,14 @@ func (sc *SocketCollector) handleMessage(msg []byte) { if stats.ResponseLength != -1 { bytesSentMetric, err := sc.bytesSent.GetMetricWith(requestLabels) if err != nil { - glog.Errorf("Error fetching bytes sent metric: %v", err) + klog.Errorf("Error fetching bytes sent metric: %v", err) } else { bytesSentMetric.Observe(stats.ResponseLength) } responseSizeMetric, err := sc.responseLength.GetMetricWith(requestLabels) if err != nil { - glog.Errorf("Error fetching bytes sent metric: %v", err) + klog.Errorf("Error fetching bytes sent metric: %v", err) } else { responseSizeMetric.Observe(stats.ResponseLength) } @@ -325,12 +325,12 @@ func (sc *SocketCollector) Stop() { func (sc *SocketCollector) RemoveMetrics(ingresses []string, registry prometheus.Gatherer) { mfs, err := registry.Gather() if err != nil { - glog.Errorf("Error gathering metrics: %v", err) + klog.Errorf("Error gathering metrics: %v", err) return } // 1. remove metrics of removed ingresses - glog.V(2).Infof("removing ingresses %v from metrics", ingresses) + klog.V(2).Infof("removing ingresses %v from metrics", ingresses) for _, mf := range mfs { metricName := mf.GetName() metric, ok := sc.metricMapping[metricName] @@ -362,13 +362,13 @@ func (sc *SocketCollector) RemoveMetrics(ingresses []string, registry prometheus continue } - glog.V(2).Infof("Removing prometheus metric from histogram %v for ingress %v", metricName, ingKey) + klog.V(2).Infof("Removing prometheus metric from histogram %v for ingress %v", metricName, ingKey) h, ok := metric.(*prometheus.HistogramVec) if ok { removed := h.Delete(labels) if !removed { - glog.V(2).Infof("metric %v for ingress %v with labels not removed: %v", metricName, ingKey, labels) + klog.V(2).Infof("metric %v for ingress %v with labels not removed: %v", metricName, ingKey, labels) } } @@ -376,7 +376,7 @@ func (sc *SocketCollector) RemoveMetrics(ingresses []string, registry prometheus if ok { removed := s.Delete(labels) if !removed { - glog.V(2).Infof("metric %v for ingress %v with labels not removed: %v", metricName, ingKey, labels) + klog.V(2).Infof("metric %v for ingress %v with labels not removed: %v", metricName, ingKey, labels) } } } diff --git a/internal/ingress/status/status.go b/internal/ingress/status/status.go index d941f54ed..d4208c4da 100644 --- a/internal/ingress/status/status.go +++ b/internal/ingress/status/status.go @@ -25,8 +25,8 @@ import ( "strings" "time" - "github.com/golang/glog" "github.com/pkg/errors" + "k8s.io/klog" pool "gopkg.in/go-playground/pool.v3" apiv1 "k8s.io/api/core/v1" @@ -123,7 +123,7 @@ func (s statusSync) Run() { var stopCh chan struct{} callbacks := leaderelection.LeaderCallbacks{ OnStartedLeading: func(ctx context.Context) { - glog.V(2).Infof("I am the new status update leader") + klog.V(2).Infof("I am the new status update leader") stopCh = make(chan struct{}) go s.syncQueue.Run(time.Second, stopCh) // trigger initial sync @@ -136,7 +136,7 @@ func (s statusSync) Run() { }, stopCh) }, OnStoppedLeading: func() { - glog.V(2).Info("I am not status update leader anymore") + klog.V(2).Info("I am not status update leader anymore") close(stopCh) // cancel the context @@ -145,7 +145,7 @@ func (s statusSync) Run() { cancelContext = newLeaderCtx(ctx) }, OnNewLeader: func(identity string) { - glog.Infof("new leader elected: %v", identity) + klog.Infof("new leader elected: %v", identity) }, } @@ -176,7 +176,7 @@ func (s statusSync) Run() { Callbacks: callbacks, }) if err != nil { - glog.Fatalf("unexpected error starting leader election: %v", err) + klog.Fatalf("unexpected error starting leader election: %v", err) } cancelContext = newLeaderCtx(ctx) @@ -193,36 +193,36 @@ func (s statusSync) Shutdown() { } if !s.UpdateStatusOnShutdown { - glog.Warningf("skipping update of status of Ingress rules") + klog.Warningf("skipping update of status of Ingress rules") return } - glog.Info("updating status of Ingress rules (remove)") + klog.Info("updating status of Ingress rules (remove)") addrs, err := s.runningAddresses() if err != nil { - glog.Errorf("error obtaining running IPs: %v", addrs) + klog.Errorf("error obtaining running IPs: %v", addrs) return } if len(addrs) > 1 { // leave the job to the next leader - glog.Infof("leaving status update for next leader (%v)", len(addrs)) + klog.Infof("leaving status update for next leader (%v)", len(addrs)) return } if s.isRunningMultiplePods() { - glog.V(2).Infof("skipping Ingress status update (multiple pods running - another one will be elected as master)") + klog.V(2).Infof("skipping Ingress status update (multiple pods running - another one will be elected as master)") return } - glog.Infof("removing address from ingress status (%v)", addrs) + klog.Infof("removing address from ingress status (%v)", addrs) s.updateStatus([]apiv1.LoadBalancerIngress{}) } func (s *statusSync) sync(key interface{}) error { if s.syncQueue.IsShuttingDown() { - glog.V(2).Infof("skipping Ingress status update (shutting down in progress)") + klog.V(2).Infof("skipping Ingress status update (shutting down in progress)") return nil } @@ -247,7 +247,7 @@ func (s statusSync) keyfunc(input interface{}) (interface{}, error) { func NewStatusSyncer(config Config) Sync { pod, err := k8s.GetPodDetails(config.Client) if err != nil { - glog.Fatalf("unexpected error obtaining pod information: %v", err) + klog.Fatalf("unexpected error obtaining pod information: %v", err) } st := statusSync{ @@ -360,7 +360,7 @@ func (s *statusSync) updateStatus(newIngressPoint []apiv1.LoadBalancerIngress) { curIPs := ing.Status.LoadBalancer.Ingress sort.SliceStable(curIPs, lessLoadBalancerIngress(curIPs)) if ingressSliceEqual(curIPs, newIngressPoint) { - glog.V(3).Infof("skipping update of Ingress %v/%v (no change)", ing.Namespace, ing.Name) + klog.V(3).Infof("skipping update of Ingress %v/%v (no change)", ing.Namespace, ing.Name) continue } @@ -385,11 +385,11 @@ func runUpdate(ing *ingress.Ingress, status []apiv1.LoadBalancerIngress, return nil, errors.Wrap(err, fmt.Sprintf("unexpected error searching Ingress %v/%v", ing.Namespace, ing.Name)) } - glog.Infof("updating Ingress %v/%v status to %v", currIng.Namespace, currIng.Name, status) + klog.Infof("updating Ingress %v/%v status to %v", currIng.Namespace, currIng.Name, status) currIng.Status.LoadBalancer.Ingress = status _, err = ingClient.UpdateStatus(currIng) if err != nil { - glog.Warningf("error updating ingress rule: %v", err) + klog.Warningf("error updating ingress rule: %v", err) } return true, nil diff --git a/internal/k8s/main.go b/internal/k8s/main.go index d10f3e5db..2636dcd6f 100644 --- a/internal/k8s/main.go +++ b/internal/k8s/main.go @@ -21,11 +21,11 @@ import ( "os" "strings" - "github.com/golang/glog" apiv1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" clientset "k8s.io/client-go/kubernetes" "k8s.io/client-go/tools/cache" + "k8s.io/klog" ) // ParseNameNS parses a string searching a namespace and name @@ -42,7 +42,7 @@ func ParseNameNS(input string) (string, string, error) { func GetNodeIPOrName(kubeClient clientset.Interface, name string, useInternalIP bool) string { node, err := kubeClient.CoreV1().Nodes().Get(name, metav1.GetOptions{}) if err != nil { - glog.Errorf("Error getting node %v: %v", name, err) + klog.Errorf("Error getting node %v: %v", name, err) return "" } @@ -104,7 +104,7 @@ func GetPodDetails(kubeClient clientset.Interface) (*PodInfo, error) { func MetaNamespaceKey(obj interface{}) string { key, err := cache.DeletionHandlingMetaNamespaceKeyFunc(obj) if err != nil { - glog.Warning(err) + klog.Warning(err) } return key diff --git a/internal/net/dns/dns.go b/internal/net/dns/dns.go index 1249f7910..1edd64cf9 100644 --- a/internal/net/dns/dns.go +++ b/internal/net/dns/dns.go @@ -21,7 +21,7 @@ import ( "net" "strings" - "github.com/golang/glog" + "k8s.io/klog" ) var defResolvConf = "/etc/resolv.conf" @@ -53,6 +53,6 @@ func GetSystemNameServers() ([]net.IP, error) { } } - glog.V(3).Infof("nameservers IP address/es to use: %v", nameservers) + klog.V(3).Infof("nameservers IP address/es to use: %v", nameservers) return nameservers, nil } diff --git a/internal/net/ssl/ssl.go b/internal/net/ssl/ssl.go index 8daf79f5f..9ff7d0c07 100644 --- a/internal/net/ssl/ssl.go +++ b/internal/net/ssl/ssl.go @@ -32,8 +32,8 @@ import ( "strconv" "time" - "github.com/golang/glog" "github.com/zakjan/cert-chain-resolver/certUtil" + "k8s.io/klog" "k8s.io/apimachinery/pkg/util/sets" @@ -56,7 +56,7 @@ func AddOrUpdateCertAndKey(name string, cert, key, ca []byte, if err != nil { return nil, fmt.Errorf("could not create temp pem file %v: %v", pemFileName, err) } - glog.V(3).Infof("Creating temp file %v for Keypair: %v", tempPemFile.Name(), pemName) + klog.V(3).Infof("Creating temp file %v for Keypair: %v", tempPemFile.Name(), pemName) _, err = tempPemFile.Write(cert) if err != nil { @@ -110,11 +110,11 @@ func AddOrUpdateCertAndKey(name string, cert, key, ca []byte, } if len(pemCert.Extensions) > 0 { - glog.V(3).Info("parsing ssl certificate extensions") + klog.V(3).Info("parsing ssl certificate extensions") for _, ext := range getExtension(pemCert, oidExtensionSubjectAltName) { dns, _, _, err := parseSANExtension(ext.Value) if err != nil { - glog.Warningf("unexpected error parsing certificate extensions: %v", err) + klog.Warningf("unexpected error parsing certificate extensions: %v", err) continue } @@ -224,11 +224,11 @@ func CreateSSLCert(name string, cert, key, ca []byte) (*ingress.SSLCert, error) } if len(pemCert.Extensions) > 0 { - glog.V(3).Info("parsing ssl certificate extensions") + klog.V(3).Info("parsing ssl certificate extensions") for _, ext := range getExtension(pemCert, oidExtensionSubjectAltName) { dns, _, _, err := parseSANExtension(ext.Value) if err != nil { - glog.Warningf("unexpected error parsing certificate extensions: %v", err) + klog.Warningf("unexpected error parsing certificate extensions: %v", err) continue } @@ -366,7 +366,7 @@ func AddCertAuth(name string, ca []byte, fs file.Filesystem) (*ingress.SSLCert, return nil, fmt.Errorf("could not write CA file %v: %v", caFileName, err) } - glog.V(3).Infof("Created CA Certificate for Authentication: %v", caFileName) + klog.V(3).Infof("Created CA Certificate for Authentication: %v", caFileName) return &ingress.SSLCert{ Certificate: pemCert, CAFileName: caFileName, @@ -382,7 +382,7 @@ func AddOrUpdateDHParam(name string, dh []byte, fs file.Filesystem) (string, err tempPemFile, err := fs.TempFile(file.DefaultSSLDirectory, pemName) - glog.V(3).Infof("Creating temp file %v for DH param: %v", tempPemFile.Name(), pemName) + klog.V(3).Infof("Creating temp file %v for DH param: %v", tempPemFile.Name(), pemName) if err != nil { return "", fmt.Errorf("could not create temp pem file %v: %v", pemFileName, err) } @@ -432,7 +432,7 @@ func GetFakeSSLCert() ([]byte, []byte) { priv, err = rsa.GenerateKey(rand.Reader, 2048) if err != nil { - glog.Fatalf("failed to generate fake private key: %s", err) + klog.Fatalf("failed to generate fake private key: %s", err) } notBefore := time.Now() @@ -443,7 +443,7 @@ func GetFakeSSLCert() ([]byte, []byte) { serialNumber, err := rand.Int(rand.Reader, serialNumberLimit) if err != nil { - glog.Fatalf("failed to generate fake serial number: %s", err) + klog.Fatalf("failed to generate fake serial number: %s", err) } template := x509.Certificate{ @@ -462,7 +462,7 @@ func GetFakeSSLCert() ([]byte, []byte) { } derBytes, err := x509.CreateCertificate(rand.Reader, &template, &template, &priv.(*rsa.PrivateKey).PublicKey, priv) if err != nil { - glog.Fatalf("Failed to create fake certificate: %s", err) + klog.Fatalf("Failed to create fake certificate: %s", err) } cert := pem.EncodeToMemory(&pem.Block{Type: "CERTIFICATE", Bytes: derBytes}) diff --git a/internal/net/ssl/ssl_test.go b/internal/net/ssl/ssl_test.go index f3ec381d5..121b55411 100644 --- a/internal/net/ssl/ssl_test.go +++ b/internal/net/ssl/ssl_test.go @@ -18,20 +18,20 @@ package ssl import ( "bytes" + "crypto/rsa" "crypto/x509" "fmt" "testing" "time" certutil "k8s.io/client-go/util/cert" - "k8s.io/client-go/util/cert/triple" "k8s.io/ingress-nginx/internal/file" ) // generateRSACerts generates a self signed certificate using a self generated ca -func generateRSACerts(host string) (*triple.KeyPair, *triple.KeyPair, error) { - ca, err := triple.NewCA("self-sign-ca") +func generateRSACerts(host string) (*keyPair, *keyPair, error) { + ca, err := newCA("self-sign-ca") if err != nil { return nil, nil, err } @@ -50,7 +50,7 @@ func generateRSACerts(host string) (*triple.KeyPair, *triple.KeyPair, error) { return nil, nil, fmt.Errorf("unable to sign the server certificate: %v", err) } - return &triple.KeyPair{ + return &keyPair{ Key: key, Cert: cert, }, ca, nil @@ -182,3 +182,26 @@ func TestCreateSSLCert(t *testing.T) { t.Fatalf("expected cname echoheaders but %v returned", ngxCert.CN[0]) } } + +type keyPair struct { + Key *rsa.PrivateKey + Cert *x509.Certificate +} + +func newCA(name string) (*keyPair, error) { + key, err := certutil.NewPrivateKey() + if err != nil { + return nil, fmt.Errorf("unable to create a private key for a new CA: %v", err) + } + config := certutil.Config{ + CommonName: name, + } + cert, err := certutil.NewSelfSignedCACert(config, key) + if err != nil { + return nil, fmt.Errorf("unable to create a self-signed certificate for a new CA: %v", err) + } + return &keyPair{ + Key: key, + Cert: cert, + }, nil +} diff --git a/internal/task/queue.go b/internal/task/queue.go index 4c82a6024..872b37cdb 100644 --- a/internal/task/queue.go +++ b/internal/task/queue.go @@ -20,7 +20,7 @@ import ( "fmt" "time" - "github.com/golang/glog" + "k8s.io/klog" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/wait" @@ -75,7 +75,7 @@ func (t *Queue) EnqueueSkippableTask(obj interface{}) { // enqueue enqueues ns/name of the given api object in the task queue. func (t *Queue) enqueue(obj interface{}, skippable bool) { if t.IsShuttingDown() { - glog.Errorf("queue has been shutdown, failed to enqueue: %v", obj) + klog.Errorf("queue has been shutdown, failed to enqueue: %v", obj) return } @@ -84,10 +84,10 @@ func (t *Queue) enqueue(obj interface{}, skippable bool) { // make sure the timestamp is bigger than lastSync ts = time.Now().Add(24 * time.Hour).UnixNano() } - glog.V(3).Infof("queuing item %v", obj) + klog.V(3).Infof("queuing item %v", obj) key, err := t.fn(obj) if err != nil { - glog.Errorf("%v", err) + klog.Errorf("%v", err) return } t.queue.Add(Element{ @@ -119,15 +119,15 @@ func (t *Queue) worker() { item := key.(Element) if t.lastSync > item.Timestamp { - glog.V(3).Infof("skipping %v sync (%v > %v)", item.Key, t.lastSync, item.Timestamp) + klog.V(3).Infof("skipping %v sync (%v > %v)", item.Key, t.lastSync, item.Timestamp) t.queue.Forget(key) t.queue.Done(key) continue } - glog.V(3).Infof("syncing %v", item.Key) + klog.V(3).Infof("syncing %v", item.Key) if err := t.sync(key); err != nil { - glog.Warningf("requeuing %v, err %v", item.Key, err) + klog.Warningf("requeuing %v, err %v", item.Key, err) t.queue.AddRateLimited(Element{ Key: item.Key, Timestamp: time.Now().UnixNano(), diff --git a/test/e2e/e2e_test.go b/test/e2e/e2e_test.go index 8de277bc5..a9cc45315 100644 --- a/test/e2e/e2e_test.go +++ b/test/e2e/e2e_test.go @@ -19,8 +19,8 @@ package e2e import ( "testing" - "github.com/golang/glog" "k8s.io/client-go/tools/clientcmd" + "k8s.io/klog" "k8s.io/ingress-nginx/test/e2e/framework" ) @@ -29,7 +29,7 @@ func init() { framework.RegisterParseFlags() if "" == framework.TestContext.KubeConfig { - glog.Fatalf("environment variable %v must be set", clientcmd.RecommendedConfigPathEnvVar) + klog.Fatalf("environment variable %v must be set", clientcmd.RecommendedConfigPathEnvVar) } } func TestE2E(t *testing.T) { diff --git a/test/e2e/framework/framework.go b/test/e2e/framework/framework.go index ae0a6ec5b..75efa16c6 100644 --- a/test/e2e/framework/framework.go +++ b/test/e2e/framework/framework.go @@ -30,8 +30,8 @@ import ( "k8s.io/client-go/kubernetes" restclient "k8s.io/client-go/rest" - "github.com/golang/glog" "github.com/pkg/errors" + "k8s.io/klog" . "github.com/onsi/ginkgo" . "github.com/onsi/gomega" @@ -235,8 +235,8 @@ func (f *Framework) matchNginxConditions(name string, matcher func(cfg string) b var match bool errs := InterceptGomegaFailures(func() { - if glog.V(10) && len(o) > 0 { - glog.Infof("nginx.conf:\n%v", o) + if klog.V(10) && len(o) > 0 { + klog.Infof("nginx.conf:\n%v", o) } // passes the nginx config to the passed function @@ -250,7 +250,7 @@ func (f *Framework) matchNginxConditions(name string, matcher func(cfg string) b } if len(errs) > 0 { - glog.V(2).Infof("Errors waiting for conditions: %v", errs) + klog.V(2).Infof("Errors waiting for conditions: %v", errs) } return false, nil @@ -329,7 +329,7 @@ func UpdateDeployment(kubeClientSet kubernetes.Interface, namespace string, name } if *deployment.Spec.Replicas != int32(replicas) { - glog.Infof("updating replica count from %v to %v...", *deployment.Spec.Replicas, replicas) + klog.Infof("updating replica count from %v to %v...", *deployment.Spec.Replicas, replicas) deployment, err := kubeClientSet.AppsV1beta1().Deployments(namespace).Get(name, metav1.GetOptions{}) if err != nil { return err diff --git a/vendor/cloud.google.com/go/.travis.yml b/vendor/cloud.google.com/go/.travis.yml deleted file mode 100644 index 1fb56f2bb..000000000 --- a/vendor/cloud.google.com/go/.travis.yml +++ /dev/null @@ -1,25 +0,0 @@ -sudo: false -language: go -go: -- 1.6.x -- 1.7.x -- 1.8.x -- 1.9.x -- 1.10.x -- 1.11.x -install: -- go get -v cloud.google.com/go/... -script: -- openssl aes-256-cbc -K $encrypted_a8b3f4fc85f4_key -iv $encrypted_a8b3f4fc85f4_iv -in keys.tar.enc -out keys.tar -d -- tar xvf keys.tar -- GCLOUD_TESTS_GOLANG_PROJECT_ID="dulcet-port-762" - GCLOUD_TESTS_GOLANG_KEY="$(pwd)/dulcet-port-762-key.json" - GCLOUD_TESTS_GOLANG_FIRESTORE_PROJECT_ID="gcloud-golang-firestore-tests" - GCLOUD_TESTS_GOLANG_FIRESTORE_KEY="$(pwd)/gcloud-golang-firestore-tests-key.json" - GCLOUD_TESTS_GOLANG_KEYRING="projects/dulcet-port-762/locations/us/keyRings/go-integration-test" - GCLOUD_TESTS_GOLANG_ENABLE_REPLAY=yes - travis_wait ./run-tests.sh $TRAVIS_COMMIT -env: - matrix: - # The GCLOUD_TESTS_API_KEY environment variable. - secure: VdldogUOoubQ60LhuHJ+g/aJoBiujkSkWEWl79Zb8cvQorcQbxISS+JsOOp4QkUOU4WwaHAm8/3pIH1QMWOR6O78DaLmDKi5Q4RpkVdCpUXy+OAfQaZIcBsispMrjxLXnqFjo9ELnrArfjoeCTzaX0QTCfwQwVmigC8rR30JBKI= diff --git a/vendor/cloud.google.com/go/CHANGES.md b/vendor/cloud.google.com/go/CHANGES.md index 30e26cdd6..2ca794335 100644 --- a/vendor/cloud.google.com/go/CHANGES.md +++ b/vendor/cloud.google.com/go/CHANGES.md @@ -1,5 +1,108 @@ # Changes +## v0.33.1 + +all: release v0.33.1 + +- compute: Removes an erroneously added go.mod. +- logging: Populate source location in fromLogEntry. + +## v0.33.0 + +- bttest: + - Add support for apply_label_transformer. +- expr: + - Add expr library. +- firestore: + - Support retrieval of missing documents. +- kms: + - Add IAM methods. +- pubsub: + - Clarify extension documentation. +- scheduler: + - Add v1beta1 client. +- vision: + - Add product search helper. + - Add new product search client. + +## v0.32.0 + +Note: This release is the last to support Go 1.6 and 1.8. + +- bigquery: + - Add support for removing an expiration. + - Ignore NeverExpire in Table.Create. + - Validate table expiration time. +- cbt: + - Add note about not supporting arbitrary bytes. +- datastore: + - Align key checks. +- firestore: + - Return an error when using Start/End without providing values. +- pubsub: + - Add pstest Close method. + - Clarify MaxExtension documentation. +- securitycenter: + - Add v1beta1 client. +- spanner: + - Allow nil in mutations. + - Improve doc of SessionPoolConfig.MaxOpened. + - Increase session deletion timeout from 5s to 15s. + +## v0.31.0 + +- bigtable: + - Group mutations across multiple requests. +- bigquery: + - Link to bigquery troubleshooting errors page in bigquery.Error comment. +- cbt: + - Fix go generate command. + - Document usage of both maxage + maxversions. +- datastore: + - Passing nil keys results in ErrInvalidKey. +- firestore: + - Clarify what Document.DataTo does with untouched struct fields. +- profile: + - Validate service name in agent. +- pubsub: + - Fix deadlock with pstest and ctx.Cancel. + - Fix a possible deadlock in pstest. +- trace: + - Update doc URL with new fragment. + +Special thanks to @fastest963 for going above and beyond helping us to debug +hard-to-reproduce Pub/Sub issues. + +## v0.30.0 + +- spanner: DML support added. See https://godoc.org/cloud.google.com/go/spanner#hdr-DML_and_Partitioned_DML for more information. +- bigtable: bttest supports row sample filter. +- functions: metadata package added for accessing Cloud Functions resource metadata. + +## v0.29.0 + +- bigtable: + - Add retry to all idempotent RPCs. + - cbt supports complex GC policies. + - Emulator supports arbitrary bytes in regex filters. +- firestore: Add ArrayUnion and ArrayRemove. +- logging: Add the ContextFunc option to supply the context used for + asynchronous RPCs. +- profiler: Ignore NotDefinedError when fetching the instance name +- pubsub: + - BEHAVIOR CHANGE: Receive doesn't retry if an RPC returns codes.Cancelled. + - BEHAVIOR CHANGE: Receive retries on Unavailable intead of returning. + - Fix deadlock. + - Restore Ack/Nack/Modacks metrics. + - Improve context handling in iterator. + - Implement synchronous mode for Receive. + - pstest: add Pull. +- spanner: Add a metric for the number of sessions currently opened. +- storage: + - Canceling the context releases all resources. + - Add additional RetentionPolicy attributes. +- vision/apiv1: Add LocalizeObjects method. + ## v0.28.0 - bigtable: diff --git a/vendor/cloud.google.com/go/CONTRIBUTING.md b/vendor/cloud.google.com/go/CONTRIBUTING.md index 9db3f8319..eef41ed0f 100644 --- a/vendor/cloud.google.com/go/CONTRIBUTING.md +++ b/vendor/cloud.google.com/go/CONTRIBUTING.md @@ -1,117 +1,177 @@ # Contributing 1. Sign one of the contributor license agreements below. -1. `go get golang.org/x/review/git-codereview` to install the code reviewing tool. +1. `go get golang.org/x/review/git-codereview` to install the code reviewing +tool. 1. You will need to ensure that your `GOBIN` directory (by default - `$GOPATH/bin`) is in your `PATH` so that git can find the command. + `$GOPATH/bin`) is in your `PATH` so that git can find the command. 1. If you would like, you may want to set up aliases for git-codereview, - such that `git codereview change` becomes `git change`. See the - [godoc](https://godoc.org/golang.org/x/review/git-codereview) for details. + such that `git codereview change` becomes `git change`. See the + [godoc](https://godoc.org/golang.org/x/review/git-codereview) for details. 1. Should you run into issues with the git-codereview tool, please note - that all error messages will assume that you have set up these - aliases. + that all error messages will assume that you have set up these aliases. 1. Get the cloud package by running `go get -d cloud.google.com/go`. - 1. If you have already checked out the source, make sure that the remote git - origin is https://code.googlesource.com/gocloud: + 1. If you have already checked out the source, make sure that the remote + git origin is https://code.googlesource.com/gocloud: + + ``` + git remote set-url origin https://code.googlesource.com/gocloud + ``` - git remote set-url origin https://code.googlesource.com/gocloud 1. Make sure your auth is configured correctly by visiting - https://code.googlesource.com, clicking "Generate Password", and following - the directions. +https://code.googlesource.com, clicking "Generate Password", and following the +directions. 1. Make changes and create a change by running `git codereview change `, provide a commit message, and use `git codereview mail` to create a Gerrit CL. -1. Keep amending to the change with `git codereview change` and mail as your receive -feedback. Each new mailed amendment will create a new patch set for your change in Gerrit. +1. Keep amending to the change with `git codereview change` and mail as your +receive feedback. Each new mailed amendment will create a new patch set for +your change in Gerrit. ## Integration Tests -In addition to the unit tests, you may run the integration test suite. +In addition to the unit tests, you may run the integration test suite. These +directions describe setting up your environment to run integration tests for +_all_ packages: note that many of these instructions may be redundant if you +intend only to run integration tests on a single package. -To run the integrations tests, creating and configuration of a project in the -Google Developers Console is required. +#### GCP Setup -After creating a project, you must [create a service account](https://developers.google.com/identity/protocols/OAuth2ServiceAccount#creatinganaccount). -Ensure the project-level **Owner** -[IAM role](console.cloud.google.com/iam-admin/iam/project) role is added to the -service account. Alternatively, the account can be granted all of the following roles: -- **Editor** -- **Logs Configuration Writer** -- **PubSub Admin** +To run the integrations tests, creation and configuration of two projects in +the Google Developers Console is required: one specifically for Firestore +integration tests, and another for all other integration tests. We'll refer to +these projects as "general project" and "Firestore project". -Once you create a project, set the following environment variables to be able to -run the against the actual APIs. +After creating each project, you must [create a service account](https://developers.google.com/identity/protocols/OAuth2ServiceAccount#creatinganaccount) +for each project. Ensure the project-level **Owner** +[IAM role](console.cloud.google.com/iam-admin/iam/project) role is added to +each service account. During the creation of the service account, you should +download the JSON credential file for use later. -- **GCLOUD_TESTS_GOLANG_PROJECT_ID**: Developers Console project's ID (e.g. bamboo-shift-455) -- **GCLOUD_TESTS_GOLANG_KEY**: The path to the JSON key file. +Next, ensure the following APIs are enabled in the general project: -Some packages require additional environment variables to be set: +- BigQuery API +- BigQuery Data Transfer API +- Cloud Dataproc API +- Cloud Dataproc Control API Private +- Cloud Datastore API +- Cloud Firestore API +- Cloud Key Management Service (KMS) API +- Cloud Natural Language API +- Cloud OS Login API +- Cloud Pub/Sub API +- Cloud Resource Manager API +- Cloud Spanner API +- Cloud Speech API +- Cloud Translation API +- Cloud Video Intelligence API +- Cloud Vision API +- Compute Engine API +- Compute Engine Instance Group Manager API +- Container Registry API +- Firebase Rules API +- Google Cloud APIs +- Google Cloud Deployment Manager V2 API +- Google Cloud SQL +- Google Cloud Storage +- Google Cloud Storage JSON API +- Google Compute Engine Instance Group Updater API +- Google Compute Engine Instance Groups API +- Kubernetes Engine API +- Stackdriver Error Reporting API -- firestore - - **GCLOUD_TESTS_GOLANG_FIRESTORE_PROJECT_ID**: project ID for Firestore. - - **GCLOUD_TESTS_GOLANG_FIRESTORE_KEY**: The path to the JSON key file. -- storage - - **GCLOUD_TESTS_GOLANG_KEYRING**: The full name of the keyring for the tests, in the - form "projects/P/locations/L/keyRings/R". -- translate - - **GCLOUD_TESTS_API_KEY**: API key for using the Translate API. -- profiler - - **GCLOUD_TESTS_GOLANG_ZONE**: Compute Engine zone. +Next, create a Datastore database in the general project, and a Firestore +database in the Firestore project. -Some packages can record the RPCs during integration tests to a file for -subsequent replay. To record, pass the `-record` flag to `go test`. The -recording will be saved to the _package_`.replay` file. To replay integration -tests from a saved recording, the replay file must be present, the `-short` flag -must be passed to `go test`, and the **GCLOUD_TESTS_GOLANG_ENABLE_REPLAY** -environment variable must have a non-empty value. +Finally, in the general project, create an API key for the translate API: -Install the [gcloud command-line tool][gcloudcli] to your machine and use it -to create some resources used in integration tests. +- Go to GCP Developer Console. +- Navigate to APIs & Services > Credentials. +- Click Create Credentials > API Key. +- Save this key for use in `GCLOUD_TESTS_API_KEY` as described below. + +#### Local Setup + +Once the two projects are created and configured, set the following environment +variables: + +- `GCLOUD_TESTS_GOLANG_PROJECT_ID`: Developers Console project's ID (e.g. +bamboo-shift-455) for the general project. +- `GCLOUD_TESTS_GOLANG_KEY`: The path to the JSON key file of the general +project's service account. +- `GCLOUD_TESTS_GOLANG_FIRESTORE_PROJECT_ID`: Developers Console project's ID +(e.g. doorway-cliff-677) for the Firestore project. +- `GCLOUD_TESTS_GOLANG_FIRESTORE_KEY`: The path to the JSON key file of the +Firestore project's service account. +- `GCLOUD_TESTS_GOLANG_KEYRING`: The full name of the keyring for the tests, +in the form +"projects/P/locations/L/keyRings/R". The creation of this is described below. +- `GCLOUD_TESTS_API_KEY`: API key for using the Translate API. +- `GCLOUD_TESTS_GOLANG_ZONE`: Compute Engine zone. + +Install the [gcloud command-line tool][gcloudcli] to your machine and use it to +create some resources used in integration tests. From the project's root directory: ``` sh -# Set the default project in your env. +# Sets the default project in your env. $ gcloud config set project $GCLOUD_TESTS_GOLANG_PROJECT_ID -# Authenticate the gcloud tool with your account. +# Authenticates the gcloud tool with your account. $ gcloud auth login # Create the indexes used in the datastore integration tests. -$ gcloud preview datastore create-indexes datastore/testdata/index.yaml +$ gcloud datastore create-indexes datastore/testdata/index.yaml -# Create a Google Cloud storage bucket with the same name as your test project, +# Creates a Google Cloud storage bucket with the same name as your test project, # and with the Stackdriver Logging service account as owner, for the sink # integration tests in logging. $ gsutil mb gs://$GCLOUD_TESTS_GOLANG_PROJECT_ID $ gsutil acl ch -g cloud-logs@google.com:O gs://$GCLOUD_TESTS_GOLANG_PROJECT_ID -# Create a PubSub topic for integration tests of storage notifications. +# Creates a PubSub topic for integration tests of storage notifications. $ gcloud beta pubsub topics create go-storage-notification-test +# Next, go to the Pub/Sub dashboard in GCP console. Authorize the user +# "service-@gs-project-accounts.iam.gserviceaccount.com" +# as a publisher to that topic. -# Create a Spanner instance for the spanner integration tests. -$ gcloud beta spanner instances create go-integration-test --config regional-us-central1 --nodes 1 --description 'Instance for go client test' -# NOTE: Spanner instances are priced by the node-hour, so you may want to delete -# the instance after testing with 'gcloud beta spanner instances delete'. +# Creates a Spanner instance for the spanner integration tests. +$ gcloud beta spanner instances create go-integration-test --config regional-us-central1 --nodes 10 --description 'Instance for go client test' +# NOTE: Spanner instances are priced by the node-hour, so you may want to +# delete the instance after testing with 'gcloud beta spanner instances delete'. -# For Storage integration tests: -# Enable KMS for your project in the Cloud Console. -# Create a KMS keyring, in the same location as the default location for your project's buckets. -$ gcloud kms keyrings create MY_KEYRING --location MY_LOCATION -# Create two keys in the keyring, named key1 and key2. -$ gcloud kms keys create key1 --keyring MY_KEYRING --location MY_LOCATION --purpose encryption -$ gcloud kms keys create key2 --keyring MY_KEYRING --location MY_LOCATION --purpose encryption -# As mentioned above, set the GCLOUD_TESTS_GOLANG_KEYRING environment variable. -$ export GCLOUD_TESTS_GOLANG_KEYRING=projects/$GCLOUD_TESTS_GOLANG_PROJECT_ID/locations/MY_LOCATION/keyRings/MY_KEYRING -# Authorize Google Cloud Storage to encrypt and decrypt using key1. +$ export MY_KEYRING=some-keyring-name +$ export MY_LOCATION=global +# Creates a KMS keyring, in the same location as the default location for your +# project's buckets. +$ gcloud kms keyrings create $MY_KEYRING --location $MY_LOCATION +# Creates two keys in the keyring, named key1 and key2. +$ gcloud kms keys create key1 --keyring $MY_KEYRING --location $MY_LOCATION --purpose encryption +$ gcloud kms keys create key2 --keyring $MY_KEYRING --location $MY_LOCATION --purpose encryption +# Sets the GCLOUD_TESTS_GOLANG_KEYRING environment variable. +$ export GCLOUD_TESTS_GOLANG_KEYRING=projects/$GCLOUD_TESTS_GOLANG_PROJECT_ID/locations/$MY_LOCATION/keyRings/$MY_KEYRING +# Authorizes Google Cloud Storage to encrypt and decrypt using key1. gsutil kms authorize -p $GCLOUD_TESTS_GOLANG_PROJECT_ID -k $GCLOUD_TESTS_GOLANG_KEYRING/cryptoKeys/key1 ``` -Once you've done the necessary setup, you can run the integration tests by running: +#### Running + +Once you've done the necessary setup, you can run the integration tests by +running: ``` sh $ go test -v cloud.google.com/go/... ``` +#### Replay + +Some packages can record the RPCs during integration tests to a file for +subsequent replay. To record, pass the `-record` flag to `go test`. The +recording will be saved to the _package_`.replay` file. To replay integration +tests from a saved recording, the replay file must be present, the `-short` +flag must be passed to `go test`, and the `GCLOUD_TESTS_GOLANG_ENABLE_REPLAY` +environment variable must have a non-empty value. + ## Contributor License Agreements Before we can accept your pull requests you'll need to sign a Contributor diff --git a/vendor/cloud.google.com/go/README.md b/vendor/cloud.google.com/go/README.md index 09370f36b..f85886370 100644 --- a/vendor/cloud.google.com/go/README.md +++ b/vendor/cloud.google.com/go/README.md @@ -54,6 +54,7 @@ Google API | Status | Package [Asset][cloud-asset] | alpha | [`godoc.org/cloud.google.com/go/asset/v1beta`][cloud-asset-ref] [BigQuery][cloud-bigquery] | stable | [`godoc.org/cloud.google.com/go/bigquery`][cloud-bigquery-ref] [Bigtable][cloud-bigtable] | stable | [`godoc.org/cloud.google.com/go/bigtable`][cloud-bigtable-ref] +[Cloudtasks][cloud-tasks] | beta | [`godoc.org/cloud.google.com/go/cloudtasks/apiv2beta3`][cloud-tasks-ref] [Container][cloud-container] | stable | [`godoc.org/cloud.google.com/go/container/apiv1`][cloud-container-ref] [ContainerAnalysis][cloud-containeranalysis] | beta | [`godoc.org/cloud.google.com/go/containeranalysis/apiv1beta1`][cloud-containeranalysis-ref] [Dataproc][cloud-dataproc] | stable | [`godoc.org/cloud.google.com/go/dataproc/apiv1`][cloud-dataproc-ref] @@ -501,3 +502,6 @@ for more information. [cloud-asset]: https://cloud.google.com/security-command-center/docs/how-to-asset-inventory [cloud-asset-docs]: https://cloud.google.com/security-command-center/docs/how-to-asset-inventory [cloud-asset-ref]: https://godoc.org/cloud.google.com/go/asset/apiv1 + +[cloud-tasks]: https://cloud.google.com/tasks/ +[cloud-tasks-ref]: https://godoc.org/cloud.google.com/go/cloudtasks/apiv2beta3 diff --git a/vendor/cloud.google.com/go/RELEASING.md b/vendor/cloud.google.com/go/RELEASING.md index 459208da0..1028a261a 100644 --- a/vendor/cloud.google.com/go/RELEASING.md +++ b/vendor/cloud.google.com/go/RELEASING.md @@ -1,13 +1,47 @@ -# How to Release this Repo +# How to Create a New Release -1. Determine the current release version with `git tag -l`. It should look - something like `vX.Y.Z`. We'll call the current - version `$CV` and the new version `$NV`. -1. On master, run `git log $CV..` to list all the changes since the last - release. -1. Edit `CHANGES.md` to include a summary of the changes. -1. Mail the CL containing the `CHANGES.md` changes. When the CL is approved, submit it. -1. Without submitting any other CLs: - a. Switch to master. - b. Tag the repo with the next version: `git tag $NV`. - c. Push the tag: `git push origin $NV`. +## Prerequisites + +Install [releasetool](https://github.com/googleapis/releasetool). + +## Create a release + +1. `cd` into the root directory, e.g., `~/go/src/cloud.google.com/go` +1. Checkout the master branch and ensure a clean and up-to-date state. + ``` + git checkout master + git pull --tags origin master + ``` +1. Run releasetool to generate a changelog from the last version. Note, + releasetool will prompt if the new version is a major, minor, or patch + version. + ``` + releasetool start --language go + ``` +1. Format the output to match CHANGES.md. +1. Submit a CL with the changes in CHANGES.md. The commit message should look + like this (where `v0.31.0` is instead the correct version number): + ``` + all: Release v0.31.0 + ``` +1. Wait for approval from all reviewers and then submit the CL. +1. Return to the master branch and pull the release commit. + ``` + git checkout master + git pull origin master + ``` +1. Tag the current commit with the new version (e.g., `v0.31.0`) + ``` + releasetool tag --language go + ``` +1. Publish the tag to GoogleSource (i.e., origin): + ``` + git push origin $NEW_VERSION + ``` +1. Visit the [releases page][releases] on GitHub and click the "Draft a new + release" button. For tag version, enter the tag published in the previous + step. For the release title, use the version (e.g., `v0.31.0`). For the + description, copy the changes added to CHANGES.md. + + +[releases]: https://github.com/GoogleCloudPlatform/google-cloud-go/releases diff --git a/vendor/cloud.google.com/go/compute/metadata/metadata.go b/vendor/cloud.google.com/go/compute/metadata/metadata.go index 9d0660be4..0d929a619 100644 --- a/vendor/cloud.google.com/go/compute/metadata/metadata.go +++ b/vendor/cloud.google.com/go/compute/metadata/metadata.go @@ -20,6 +20,7 @@ package metadata // import "cloud.google.com/go/compute/metadata" import ( + "context" "encoding/json" "fmt" "io/ioutil" @@ -31,9 +32,6 @@ import ( "strings" "sync" "time" - - "golang.org/x/net/context" - "golang.org/x/net/context/ctxhttp" ) const ( @@ -143,7 +141,7 @@ func testOnGCE() bool { go func() { req, _ := http.NewRequest("GET", "http://"+metadataIP, nil) req.Header.Set("User-Agent", userAgent) - res, err := ctxhttp.Do(ctx, defaultClient.hc, req) + res, err := defaultClient.hc.Do(req.WithContext(ctx)) if err != nil { resc <- false return diff --git a/vendor/cloud.google.com/go/regen-gapic.sh b/vendor/cloud.google.com/go/regen-gapic.sh index 7de67763c..252c9be0d 100755 --- a/vendor/cloud.google.com/go/regen-gapic.sh +++ b/vendor/cloud.google.com/go/regen-gapic.sh @@ -23,6 +23,7 @@ set -ex APIS=( +google/api/expr/artman_cel.yaml google/iam/artman_iam_admin.yaml google/cloud/asset/artman_cloudasset_v1beta1.yaml google/iam/credentials/artman_iamcredentials_v1.yaml @@ -37,6 +38,8 @@ google/cloud/oslogin/artman_oslogin_v1.yaml google/cloud/oslogin/artman_oslogin_v1beta.yaml google/cloud/redis/artman_redis_v1beta1.yaml google/cloud/redis/artman_redis_v1.yaml +google/cloud/scheduler/artman_cloudscheduler_v1beta1.yaml +google/cloud/securitycenter/artman_securitycenter_v1beta1.yaml google/cloud/speech/artman_speech_v1.yaml google/cloud/speech/artman_speech_v1p1beta1.yaml google/cloud/tasks/artman_cloudtasks_v2beta2.yaml @@ -47,7 +50,6 @@ google/cloud/videointelligence/artman_videointelligence_v1beta1.yaml google/cloud/videointelligence/artman_videointelligence_v1beta2.yaml google/cloud/vision/artman_vision_v1.yaml google/cloud/vision/artman_vision_v1p1beta1.yaml -google/container/artman_container.yaml google/devtools/artman_clouddebugger.yaml google/devtools/clouderrorreporting/artman_errorreporting.yaml google/devtools/cloudtrace/artman_cloudtrace_v1.yaml @@ -70,6 +72,13 @@ for api in "${APIS[@]}"; do cp -r artman-genfiles/gapi-*/cloud.google.com/go/* $GOPATH/src/cloud.google.com/go/ done +# NOTE(pongad): `sed -i` doesn't work on Macs, because -i option needs an argument. +# `-i ''` doesn't work on GNU, since the empty string is treated as a file name. +# So we just create the backup and delete it after. +ver=$(date +%Y%m%d) +find $GOPATH/src/cloud.google.com/go/ -name '*.go' -exec sed -i.backup -e "s/^const versionClient.*/const versionClient = \"$ver\"/" '{}' + +find $GOPATH/src/cloud.google.com/go/ -name '*.backup' -delete + #go list cloud.google.com/go/... | grep apiv | xargs go test #go test -short cloud.google.com/go/... diff --git a/vendor/contrib.go.opencensus.io/exporter/ocagent/.travis.yml b/vendor/contrib.go.opencensus.io/exporter/ocagent/.travis.yml new file mode 100644 index 000000000..192981b7f --- /dev/null +++ b/vendor/contrib.go.opencensus.io/exporter/ocagent/.travis.yml @@ -0,0 +1,17 @@ +language: go + +go: + - 1.10.x + +go_import_path: contrib.go.opencensus.io/exporter/ocagent + +before_script: + - GO_FILES=$(find . -iname '*.go' | grep -v /vendor/) # All the .go files, excluding vendor/ if any + - PKGS=$(go list ./... | grep -v /vendor/) # All the import paths, excluding vendor/ if any + +script: + - go build ./... # Ensure dependency updates don't break build + - if [ -n "$(gofmt -s -l $GO_FILES)" ]; then echo "gofmt the following files:"; gofmt -s -l $GO_FILES; exit 1; fi + - go vet ./... + - go test -v -race $PKGS # Run all the tests with the race detector enabled + - 'if [[ $TRAVIS_GO_VERSION = 1.8* ]]; then ! golint ./... | grep -vE "(_mock|_string|\.pb)\.go:"; fi' diff --git a/vendor/contrib.go.opencensus.io/exporter/ocagent/CONTRIBUTING.md b/vendor/contrib.go.opencensus.io/exporter/ocagent/CONTRIBUTING.md new file mode 100644 index 000000000..0786fdf43 --- /dev/null +++ b/vendor/contrib.go.opencensus.io/exporter/ocagent/CONTRIBUTING.md @@ -0,0 +1,24 @@ +# How to contribute + +We'd love to accept your patches and contributions to this project. There are +just a few small guidelines you need to follow. + +## Contributor License Agreement + +Contributions to this project must be accompanied by a Contributor License +Agreement. You (or your employer) retain the copyright to your contribution, +this simply gives us permission to use and redistribute your contributions as +part of the project. Head over to to see +your current agreements on file or to sign a new one. + +You generally only need to submit a CLA once, so if you've already submitted one +(even if it was for a different project), you probably don't need to do it +again. + +## Code reviews + +All submissions, including submissions by project members, require review. We +use GitHub pull requests for this purpose. Consult [GitHub Help] for more +information on using pull requests. + +[GitHub Help]: https://help.github.com/articles/about-pull-requests/ diff --git a/vendor/github.com/opencontainers/go-digest/LICENSE.code b/vendor/contrib.go.opencensus.io/exporter/ocagent/LICENSE similarity index 93% rename from vendor/github.com/opencontainers/go-digest/LICENSE.code rename to vendor/contrib.go.opencensus.io/exporter/ocagent/LICENSE index 0ea3ff81e..261eeb9e9 100644 --- a/vendor/github.com/opencontainers/go-digest/LICENSE.code +++ b/vendor/contrib.go.opencensus.io/exporter/ocagent/LICENSE @@ -1,7 +1,6 @@ - Apache License Version 2.0, January 2004 - https://www.apache.org/licenses/ + http://www.apache.org/licenses/ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION @@ -176,13 +175,24 @@ END OF TERMS AND CONDITIONS - Copyright 2016 Docker, Inc. + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at - https://www.apache.org/licenses/LICENSE-2.0 + http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, diff --git a/vendor/contrib.go.opencensus.io/exporter/ocagent/README.md b/vendor/contrib.go.opencensus.io/exporter/ocagent/README.md new file mode 100644 index 000000000..64550ee60 --- /dev/null +++ b/vendor/contrib.go.opencensus.io/exporter/ocagent/README.md @@ -0,0 +1,61 @@ +# OpenCensus Agent Go Exporter + +[![Build Status][travis-image]][travis-url] [![GoDoc][godoc-image]][godoc-url] + + +This repository contains the Go implementation of the OpenCensus Agent (OC-Agent) Exporter. +OC-Agent is a deamon process running in a VM that can retrieve spans/stats/metrics from +OpenCensus Library, export them to other backends and possibly push configurations back to +Library. See more details on [OC-Agent Readme][OCAgentReadme]. + +Note: This is an experimental repository and is likely to get backwards-incompatible changes. +Ultimately we may want to move the OC-Agent Go Exporter to [OpenCensus Go core library][OpenCensusGo]. + +## Installation + +```bash +$ go get -u contrib.go.opencensus.io/exporter/ocagent/v1 +``` + +## Usage + +```go +import ( + "context" + "fmt" + "log" + "time" + + "contrib.go.opencensus.io/exporter/ocagent/v1" + "go.opencensus.io/trace" +) + +func Example() { + exp, err := ocagent.NewExporter(ocagent.WithInsecure(), ocagent.WithServiceName("your-service-name")) + if err != nil { + log.Fatalf("Failed to create the agent exporter: %v", err) + } + defer exp.Stop() + + // Now register it as a trace exporter. + trace.RegisterExporter(exp) + + // Then use the OpenCensus tracing library, like we normally would. + ctx, span := trace.StartSpan(context.Background(), "AgentExporter-Example") + defer span.End() + + for i := 0; i < 10; i++ { + _, iSpan := trace.StartSpan(ctx, fmt.Sprintf("Sample-%d", i)) + <-time.After(6 * time.Millisecond) + iSpan.End() + } +} +``` + +[OCAgentReadme]: https://github.com/census-instrumentation/opencensus-proto/tree/master/opencensus/proto/agent#opencensus-agent-proto +[OpenCensusGo]: https://github.com/census-instrumentation/opencensus-go +[godoc-image]: https://godoc.org/contrib.go.opencensus.io/exporter/ocagent?status.svg +[godoc-url]: https://godoc.org/contrib.go.opencensus.io/exporter/ocagent +[travis-image]: https://travis-ci.org/census-ecosystem/opencensus-go-exporter-ocagent.svg?branch=master +[travis-url]: https://travis-ci.org/census-ecosystem/opencensus-go-exporter-ocagent + diff --git a/vendor/contrib.go.opencensus.io/exporter/ocagent/common.go b/vendor/contrib.go.opencensus.io/exporter/ocagent/common.go new file mode 100644 index 000000000..297e44b6e --- /dev/null +++ b/vendor/contrib.go.opencensus.io/exporter/ocagent/common.go @@ -0,0 +1,38 @@ +// Copyright 2018, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package ocagent + +import ( + "math/rand" + "time" +) + +var randSrc = rand.New(rand.NewSource(time.Now().UnixNano())) + +// retries function fn upto n times, if fn returns an error lest it returns nil early. +// It applies exponential backoff in units of (1< 0 { + port = ae.agentPort + } + return fmt.Sprintf("%s:%d", DefaultAgentHost, port) +} + +func (ae *Exporter) doStartLocked() error { + if ae.started { + return nil + } + + // Now start it + cc, err := ae.dialToAgent() + if err != nil { + return err + } + ae.grpcClientConn = cc + + // Initiate the trace service by sending over node identifier info. + traceSvcClient := agenttracepb.NewTraceServiceClient(cc) + traceExporter, err := traceSvcClient.Export(context.Background()) + if err != nil { + return fmt.Errorf("Exporter.Start:: TraceServiceClient: %v", err) + } + + firstTraceMessage := &agenttracepb.ExportTraceServiceRequest{Node: ae.nodeInfo} + err = nTriesWithExponentialBackoff(maxInitialTracesRetries, 200*time.Microsecond, func() error { + return traceExporter.Send(firstTraceMessage) + }) + if err != nil { + return fmt.Errorf("Exporter.Start:: Failed to initiate the Config service: %v", err) + } + ae.traceExporter = traceExporter + + // Initiate the config service by sending over node identifier info. + configStream, err := traceSvcClient.Config(context.Background()) + if err != nil { + return fmt.Errorf("Exporter.Start:: ConfigStream: %v", err) + } + firstCfgMessage := &agenttracepb.CurrentLibraryConfig{Node: ae.nodeInfo} + err = nTriesWithExponentialBackoff(maxInitialConfigRetries, 200*time.Microsecond, func() error { + return configStream.Send(firstCfgMessage) + }) + if err != nil { + return fmt.Errorf("Exporter.Start:: Failed to initiate the Config service: %v", err) + } + + // In the background, handle trace configurations that are beamed down + // by the agent, but also reply to it with the applied configuration. + go ae.handleConfigStreaming(configStream) + + return nil +} + +// dialToAgent performs a best case attempt to dial to the agent. +// It retries failed dials with: +// * gRPC dialTimeout of 1s +// * exponential backoff, 5 times with a period of 50ms +// hence in the worst case of (no agent actually available), it +// will take at least: +// (5 * 1s) + ((1<<5)-1) * 0.01 s = 5s + 1.55s = 6.55s +func (ae *Exporter) dialToAgent() (*grpc.ClientConn, error) { + addr := ae.prepareAgentAddress() + dialOpts := []grpc.DialOption{grpc.WithBlock()} + if ae.canDialInsecure { + dialOpts = append(dialOpts, grpc.WithInsecure()) + } + + var cc *grpc.ClientConn + dialOpts = append(dialOpts, grpc.WithTimeout(1*time.Second)) + dialBackoffWaitPeriod := 50 * time.Millisecond + err := nTriesWithExponentialBackoff(5, dialBackoffWaitPeriod, func() error { + var err error + cc, err = grpc.Dial(addr, dialOpts...) + return err + }) + return cc, err +} + +func (ae *Exporter) handleConfigStreaming(configStream agenttracepb.TraceService_ConfigClient) error { + for { + recv, err := configStream.Recv() + if err != nil { + // TODO: Check if this is a transient error or exponential backoff-able. + return err + } + cfg := recv.Config + if cfg == nil { + continue + } + + // Otherwise now apply the trace configuration sent down from the agent + if psamp := cfg.GetProbabilitySampler(); psamp != nil { + trace.ApplyConfig(trace.Config{DefaultSampler: trace.ProbabilitySampler(psamp.SamplingProbability)}) + } else if csamp := cfg.GetConstantSampler(); csamp != nil { + alwaysSample := csamp.Decision == true + if alwaysSample { + trace.ApplyConfig(trace.Config{DefaultSampler: trace.AlwaysSample()}) + } else { + trace.ApplyConfig(trace.Config{DefaultSampler: trace.NeverSample()}) + } + } else { // TODO: Add the rate limiting sampler here + } + + // Then finally send back to upstream the newly applied configuration + err = configStream.Send(&agenttracepb.CurrentLibraryConfig{Config: &tracepb.TraceConfig{Sampler: cfg.Sampler}}) + if err != nil { + return err + } + } +} + +var ( + errNotStarted = errors.New("not started") +) + +// Stop shuts down all the connections and resources +// related to the exporter. +func (ae *Exporter) Stop() error { + ae.mu.Lock() + defer ae.mu.Unlock() + + if !ae.started { + return errNotStarted + } + if ae.stopped { + // TODO: tell the user that we've already stopped, so perhaps a sentinel error? + return nil + } + + ae.Flush() + + // Now close the underlying gRPC connection. + var err error + if ae.grpcClientConn != nil { + err = ae.grpcClientConn.Close() + } + + // At this point we can change the state variables: started and stopped + ae.started = false + ae.stopped = true + + return err +} + +func (ae *Exporter) ExportSpan(sd *trace.SpanData) { + if sd == nil { + return + } + _ = ae.traceBundler.Add(sd, -1) +} + +func (ae *Exporter) uploadTraces(sdl []*trace.SpanData) { + if len(sdl) == 0 { + return + } + protoSpans := make([]*tracepb.Span, 0, len(sdl)) + for _, sd := range sdl { + if sd != nil { + protoSpans = append(protoSpans, ocSpanToProtoSpan(sd)) + } + } + + if len(protoSpans) > 0 { + _ = ae.traceExporter.Send(&agenttracepb.ExportTraceServiceRequest{ + Spans: protoSpans, + }) + } +} + +func (ae *Exporter) Flush() { + ae.traceBundler.Flush() +} diff --git a/vendor/contrib.go.opencensus.io/exporter/ocagent/options.go b/vendor/contrib.go.opencensus.io/exporter/ocagent/options.go new file mode 100644 index 000000000..751f311f1 --- /dev/null +++ b/vendor/contrib.go.opencensus.io/exporter/ocagent/options.go @@ -0,0 +1,80 @@ +// Copyright 2018, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package ocagent + +const ( + DefaultAgentPort uint16 = 55678 + DefaultAgentHost string = "localhost" +) + +type ExporterOption interface { + withExporter(e *Exporter) +} + +type portSetter uint16 + +func (ps portSetter) withExporter(e *Exporter) { + e.agentPort = uint16(ps) +} + +var _ ExporterOption = (*portSetter)(nil) + +type insecureGrpcConnection int + +var _ ExporterOption = (*insecureGrpcConnection)(nil) + +func (igc *insecureGrpcConnection) withExporter(e *Exporter) { + e.canDialInsecure = true +} + +// WithInsecure disables client transport security for the exporter's gRPC connection +// just like grpc.WithInsecure() https://godoc.org/google.golang.org/grpc#WithInsecure +// does. Note, by default, client security is required unless WithInsecure is used. +func WithInsecure() ExporterOption { return new(insecureGrpcConnection) } + +// WithPort allows one to override the port that the exporter will +// connect to the agent on, instead of using DefaultAgentPort. +func WithPort(port uint16) ExporterOption { + return portSetter(port) +} + +type addressSetter string + +func (as addressSetter) withExporter(e *Exporter) { + e.agentAddress = string(as) +} + +var _ ExporterOption = (*addressSetter)(nil) + +// WithAddress allows one to set the address that the exporter will +// connect to the agent on. If unset, it will instead try to use +// connect to DefaultAgentHost:DefaultAgentPort +func WithAddress(addr string) ExporterOption { + return addressSetter(addr) +} + +type serviceNameSetter string + +func (sns serviceNameSetter) withExporter(e *Exporter) { + e.serviceName = string(sns) +} + +var _ ExporterOption = (*serviceNameSetter)(nil) + +// WithServiceName allows one to set/override the service name +// that the exporter will report to the agent. +func WithServiceName(serviceName string) ExporterOption { + return serviceNameSetter(serviceName) +} diff --git a/vendor/contrib.go.opencensus.io/exporter/ocagent/transform_spans.go b/vendor/contrib.go.opencensus.io/exporter/ocagent/transform_spans.go new file mode 100644 index 000000000..ba7b51d20 --- /dev/null +++ b/vendor/contrib.go.opencensus.io/exporter/ocagent/transform_spans.go @@ -0,0 +1,162 @@ +// Copyright 2018, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package ocagent + +import ( + "time" + + "go.opencensus.io/trace" + "go.opencensus.io/trace/tracestate" + + tracepb "github.com/census-instrumentation/opencensus-proto/gen-go/trace/v1" + "github.com/golang/protobuf/ptypes/timestamp" +) + +func ocSpanToProtoSpan(sd *trace.SpanData) *tracepb.Span { + if sd == nil { + return nil + } + var namePtr *tracepb.TruncatableString + if sd.Name != "" { + namePtr = &tracepb.TruncatableString{Value: sd.Name} + } + return &tracepb.Span{ + TraceId: sd.TraceID[:], + SpanId: sd.SpanID[:], + ParentSpanId: sd.ParentSpanID[:], + Status: ocStatusToProtoStatus(sd.Status), + StartTime: timeToTimestamp(sd.StartTime), + EndTime: timeToTimestamp(sd.EndTime), + Links: ocLinksToProtoLinks(sd.Links), + Kind: ocSpanKindToProtoSpanKind(sd.SpanKind), + Name: namePtr, + Attributes: ocAttributesToProtoAttributes(sd.Attributes), + Tracestate: ocTracestateToProtoTracestate(sd.Tracestate), + } +} + +var blankStatus trace.Status + +func ocStatusToProtoStatus(status trace.Status) *tracepb.Status { + if status == blankStatus { + return nil + } + return &tracepb.Status{ + Code: status.Code, + Message: status.Message, + } +} + +func ocLinksToProtoLinks(links []trace.Link) *tracepb.Span_Links { + if len(links) == 0 { + return nil + } + + sl := make([]*tracepb.Span_Link, 0, len(links)) + for _, ocLink := range links { + // This redefinition is necessary to prevent ocLink.*ID[:] copies + // being reused -- in short we need a new ocLink per iteration. + ocLink := ocLink + + sl = append(sl, &tracepb.Span_Link{ + TraceId: ocLink.TraceID[:], + SpanId: ocLink.SpanID[:], + Type: ocLinkTypeToProtoLinkType(ocLink.Type), + }) + } + + return &tracepb.Span_Links{ + Link: sl, + } +} + +func ocLinkTypeToProtoLinkType(oct trace.LinkType) tracepb.Span_Link_Type { + switch oct { + case trace.LinkTypeChild: + return tracepb.Span_Link_CHILD_LINKED_SPAN + case trace.LinkTypeParent: + return tracepb.Span_Link_PARENT_LINKED_SPAN + default: + return tracepb.Span_Link_TYPE_UNSPECIFIED + } +} + +func ocAttributesToProtoAttributes(attrs map[string]interface{}) *tracepb.Span_Attributes { + if len(attrs) == 0 { + return nil + } + outMap := make(map[string]*tracepb.AttributeValue) + for k, v := range attrs { + switch v := v.(type) { + case bool: + outMap[k] = &tracepb.AttributeValue{Value: &tracepb.AttributeValue_BoolValue{BoolValue: v}} + + case int: + outMap[k] = &tracepb.AttributeValue{Value: &tracepb.AttributeValue_IntValue{IntValue: int64(v)}} + + case int64: + outMap[k] = &tracepb.AttributeValue{Value: &tracepb.AttributeValue_IntValue{IntValue: v}} + + case string: + outMap[k] = &tracepb.AttributeValue{ + Value: &tracepb.AttributeValue_StringValue{ + StringValue: &tracepb.TruncatableString{Value: v}, + }, + } + } + } + return &tracepb.Span_Attributes{ + AttributeMap: outMap, + } +} + +func timeToTimestamp(t time.Time) *timestamp.Timestamp { + nanoTime := t.UnixNano() + return ×tamp.Timestamp{ + Seconds: nanoTime / 1e9, + Nanos: int32(nanoTime % 1e9), + } +} + +func ocSpanKindToProtoSpanKind(kind int) tracepb.Span_SpanKind { + switch kind { + case trace.SpanKindClient: + return tracepb.Span_CLIENT + case trace.SpanKindServer: + return tracepb.Span_SERVER + default: + return tracepb.Span_SPAN_KIND_UNSPECIFIED + } +} + +func ocTracestateToProtoTracestate(ts *tracestate.Tracestate) *tracepb.Span_Tracestate { + if ts == nil { + return nil + } + return &tracepb.Span_Tracestate{ + Entries: ocTracestateEntriesToProtoTracestateEntries(ts.Entries()), + } +} + +func ocTracestateEntriesToProtoTracestateEntries(entries []tracestate.Entry) []*tracepb.Span_Tracestate_Entry { + protoEntries := make([]*tracepb.Span_Tracestate_Entry, 0, len(entries)) + for _, entry := range entries { + protoEntries = append(protoEntries, &tracepb.Span_Tracestate_Entry{ + Key: entry.Key, + Value: entry.Value, + }) + } + return protoEntries +} diff --git a/vendor/contrib.go.opencensus.io/exporter/ocagent/version.go b/vendor/contrib.go.opencensus.io/exporter/ocagent/version.go new file mode 100644 index 000000000..68be4c75b --- /dev/null +++ b/vendor/contrib.go.opencensus.io/exporter/ocagent/version.go @@ -0,0 +1,17 @@ +// Copyright 2018, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package ocagent + +const Version = "0.0.1" diff --git a/vendor/github.com/Azure/go-autorest/.gitignore b/vendor/github.com/Azure/go-autorest/.gitignore index ab262cbe5..3350aaf70 100644 --- a/vendor/github.com/Azure/go-autorest/.gitignore +++ b/vendor/github.com/Azure/go-autorest/.gitignore @@ -9,6 +9,7 @@ _obj _test .DS_Store .idea/ +.vscode/ # Architecture specific extensions/prefixes *.[568vq] diff --git a/vendor/github.com/Azure/go-autorest/.travis.yml b/vendor/github.com/Azure/go-autorest/.travis.yml index 2664a2056..e125eb3c9 100644 --- a/vendor/github.com/Azure/go-autorest/.travis.yml +++ b/vendor/github.com/Azure/go-autorest/.travis.yml @@ -6,8 +6,6 @@ go: - master - 1.11.x - 1.10.x - - 1.9.x - - 1.8.x matrix: allow_failures: @@ -20,7 +18,7 @@ before_install: - curl -L -o $GOPATH/bin/dep https://github.com/golang/dep/releases/download/v$DEP_VERSION/dep-linux-amd64 && chmod +x $GOPATH/bin/dep install: - - go get -u github.com/golang/lint/golint + - go get -u golang.org/x/lint/golint - go get -u github.com/stretchr/testify - go get -u github.com/GoASTScanner/gas - dep ensure @@ -32,4 +30,4 @@ script: - go vet ./autorest/... - test -z "$(gas ./autorest/... | tee /dev/stderr | grep Error)" - go build -v ./autorest/... - - go test -v ./autorest/... + - go test -race -v ./autorest/... diff --git a/vendor/github.com/Azure/go-autorest/CHANGELOG.md b/vendor/github.com/Azure/go-autorest/CHANGELOG.md index 8500cb8df..15d8313c0 100644 --- a/vendor/github.com/Azure/go-autorest/CHANGELOG.md +++ b/vendor/github.com/Azure/go-autorest/CHANGELOG.md @@ -1,5 +1,100 @@ # CHANGELOG +## v11.2.8 + +### Bug Fixes + +- Deprecate content in the `version` package. The functionality has been superseded by content in the `autorest` package. + +## v11.2.7 + +### Bug Fixes + +- Fix environment variable name for enabling tracing from `AZURE_SDK_TRACING_ENABELD` to `AZURE_SDK_TRACING_ENABLED`. + Note that for backward compatibility reasons, both will work until the next major version release of the package. + +## v11.2.6 + +### Bug Fixes + +- If zero bytes are read from a polling response body don't attempt to unmarshal them. + +## v11.2.5 + +### Bug Fixes + +- Removed race condition in `autorest.DoRetryForStatusCodes`. + +## v11.2.4 + +### Bug Fixes + +- Function `cli.ProfilePath` now respects environment `AZURE_CONFIG_DIR` if available. + +## v11.2.1 + +NOTE: Versions of Go prior to 1.10 have been removed from CI as they no +longer work with golint. + +### Bug Fixes + +- Method `MSIConfig.Authorizer` now supports user-assigned identities. +- The adal package now reports its own user-agent string. + +## v11.2.0 + +### New Features + +- Added `tracing` package that enables instrumentation of HTTP and API calls. + Setting the env variable `AZURE_SDK_TRACING_ENABLED` or calling `tracing.Enable` + will start instrumenting the code for metrics and traces. + Additionally, setting the env variable `OCAGENT_TRACE_EXPORTER_ENDPOINT` or + calling `tracing.EnableWithAIForwarding` will start the instrumentation and connect to an + App Insights Local Forwarder that is needs to be running. Note that if the + AI Local Forwarder is not running tracking will still be enabled. + By default, instrumentation is disabled. Once enabled, instrumentation can also + be programatically disabled by calling `Disable`. +- Added `DoneWithContext` call for checking LRO status. `Done` has been deprecated. + +### Bug Fixes + +- Don't use the initial request's context for LRO polling. +- Don't override the `refreshLock` and the `http.Client` when unmarshalling `ServicePrincipalToken` if + it is already set. + +## v11.1.1 + +### Bug Fixes + +- When creating a future always include the polling tracker even if there's a failure; this allows the underlying response to be obtained by the caller. + +## v11.1.0 + +### New Features + +- Added `auth.NewAuthorizerFromCLI` to create an authorizer configured from the Azure 2.0 CLI. +- Added `adal.NewOAuthConfigWithAPIVersion` to create an OAuthConfig with the specified API version. + +## v11.0.1 + +### New Features + +- Added `x5c` header to client assertion for certificate Issuer+Subject Name authentication. + +## v11.0.0 + +### Breaking Changes + +- To handle differences between ADFS and AAD the following fields have had their types changed from `string` to `json.Number` + - ExpiresIn + - ExpiresOn + - NotBefore + +### New Features + +- Added `auth.NewAuthorizerFromFileWithResource` to create an authorizer from the config file with the specified resource. +- Setting a client's `PollingDuration` to zero will use the provided context to control a LRO's polling duration. + ## v10.15.5 ### Bug Fixes @@ -28,21 +123,21 @@ ### Bug Fixes -- If an LRO API returns a ```Failed``` provisioning state in the initial response return an error at that point so the caller doesn't have to poll. -- For failed LROs without an OData v4 error include the response body in the error's ```AdditionalInfo``` field to aid in diagnosing the failure. +- If an LRO API returns a `Failed` provisioning state in the initial response return an error at that point so the caller doesn't have to poll. +- For failed LROs without an OData v4 error include the response body in the error's `AdditionalInfo` field to aid in diagnosing the failure. ## v10.15.0 ### New Features - Add initial support for request/response logging via setting environment variables. - Setting ```AZURE_GO_SDK_LOG_LEVEL``` to ```LogInfo``` will log request/response - without their bodies. To include the bodies set the log level to ```LogDebug```. + Setting `AZURE_GO_SDK_LOG_LEVEL` to `LogInfo` will log request/response + without their bodies. To include the bodies set the log level to `LogDebug`. By default the logger writes to strerr, however it can also write to stdout or a file - if specified in ```AZURE_GO_SDK_LOG_FILE```. Note that if the specified file + if specified in `AZURE_GO_SDK_LOG_FILE`. Note that if the specified file already exists it will be truncated. IMPORTANT: by default the logger will redact the Authorization and Ocp-Apim-Subscription-Key - headers. Any other secrets will *not* be redacted. + headers. Any other secrets will _not_ be redacted. ## v10.14.0 @@ -124,10 +219,10 @@ ### Deprecated Methods -| Old Method | New Method | -|-------------:|:-----------:| -|azure.NewFuture() | azure.NewFutureFromResponse()| -|Future.WaitForCompletion() | Future.WaitForCompletionRef()| +| Old Method | New Method | +| -------------------------: | :---------------------------: | +| azure.NewFuture() | azure.NewFutureFromResponse() | +| Future.WaitForCompletion() | Future.WaitForCompletionRef() | ### New Features @@ -159,7 +254,7 @@ ### New Features -- Added *WithContext() methods to ADAL token refresh operations. +- Added \*WithContext() methods to ADAL token refresh operations. ## v10.6.2 @@ -192,12 +287,14 @@ ## v10.4.0 ### New Features + - Added helper for parsing Azure Resource ID's. - Added deprecation message to utils.GetEnvVarOrExit() ## v10.3.0 ### New Features + - Added EnvironmentFromURL method to load an Environment from a given URL. This function is particularly useful in the private and hybrid Cloud model, where one may define their own endpoints - Added TokenAudience endpoint to Environment structure. This is useful in private and hybrid cloud models where TokenAudience endpoint can be different from ResourceManagerEndpoint @@ -255,6 +352,7 @@ - The adal.Token type has been decomposed from adal.ServicePrincipalToken (this was necessary in order to fix the token refresh race). ## v9.10.0 + - Fix the Service Bus suffix in Azure public env - Add Service Bus Endpoint (AAD ResourceURI) for use in [Azure Service Bus RBAC Preview](https://docs.microsoft.com/en-us/azure/service-bus-messaging/service-bus-role-based-access-control) @@ -308,6 +406,7 @@ ## v9.5.3 ### Bug Fixes + - Don't remove encoding of existing URL Query parameters when calling autorest.WithQueryParameters. - Set correct Content Type when using autorest.WithFormData. @@ -315,7 +414,7 @@ ### Bug Fixes -- Check for nil *http.Response before dereferencing it. +- Check for nil \*http.Response before dereferencing it. ## v9.5.1 @@ -399,8 +498,8 @@ ### Bug Fixes - - RetriableRequest can now tolerate a ReadSeekable body being read but not reset. - - Adding missing Apache Headers +- RetriableRequest can now tolerate a ReadSeekable body being read but not reset. +- Adding missing Apache Headers ## v9.0.0 @@ -429,9 +528,11 @@ Updates to Error string formats for clarity. Also, adding a copy of the http.Res - Make RetriableRequest work with multiple versions of Go ## v8.1.1 + Updates the RetriableRequest to take advantage of GetBody() added in Go 1.8. ## v8.1.0 + Adds RetriableRequest type for more efficient handling of retrying HTTP requests. ## v8.0.0 @@ -440,9 +541,11 @@ ADAL refactored into its own package. Support for UNIX time. ## v7.3.1 + - Version Testing now removed from production bits that are shipped with the library. ## v7.3.0 + - Exposing new `RespondDecorator`, `ByDiscardingBody`. This allows operations to acknowledge that they do not need either the entire or a trailing portion of accepts response body. In doing so, Go's http library can reuse HTTP @@ -452,40 +555,49 @@ Support for UNIX time. - Updating Glide dependencies. ## v7.2.5 + - Fixed the Active Directory endpoint for the China cloud. - Removes UTF-8 BOM if present in response payload. - Added telemetry. ## v7.2.3 + - Fixing bug in calls to `DelayForBackoff` that caused doubling of delay duration. ## v7.2.2 + - autorest/azure: added ASM and ARM VM DNS suffixes. ## v7.2.1 + - fixed parsing of UTC times that are not RFC3339 conformant. ## v7.2.0 + - autorest/validation: Reformat validation error for better error message. ## v7.1.0 + - preparer: Added support for multipart formdata - WithMultiPartFormdata() - preparer: Added support for sending file in request body - WithFile - client: Added RetryDuration parameter. - autorest/validation: new package for validation code for Azure Go SDK. ## v7.0.7 + - Add trailing / to endpoint - azure: add EnvironmentFromName ## v7.0.6 + - Add retry logic for 408, 500, 502, 503 and 504 status codes. - Change url path and query encoding logic. - Fix DelayForBackoff for proper exponential delay. - Add CookieJar in Client. ## v7.0.5 + - Add check to start polling only when status is in [200,201,202]. - Refactoring for unchecked errors. - azure/persist changes. @@ -494,20 +606,25 @@ Support for UNIX time. - Add attribute details in service error. ## v7.0.4 + - Better error messages for long running operation failures ## v7.0.3 + - Corrected DoPollForAsynchronous to properly handle the initial response ## v7.0.2 + - Corrected DoPollForAsynchronous to continue using the polling method first discovered ## v7.0.1 + - Fixed empty JSON input error in ByUnmarshallingJSON - Fixed polling support for GET calls - Changed format name from TimeRfc1123 to TimeRFC1123 ## v7.0.0 + - Added ByCopying responder with supporting TeeReadCloser - Rewrote Azure asynchronous handling - Reverted to only unmarshalling JSON @@ -524,9 +641,11 @@ only checked for one of those (that is, the presence of the `Azure-AsyncOperatio The new code correctly covers all cases and aligns with the other Azure SDKs. ## v6.1.0 + - Introduced `date.ByUnmarshallingJSONDate` and `date.ByUnmarshallingJSONTime` to enable JSON encoded values. ## v6.0.0 + - Completely reworked the handling of polled and asynchronous requests - Removed unnecessary routines - Reworked `mocks.Sender` to replay a series of `http.Response` objects @@ -537,21 +656,25 @@ Handling polled and asynchronous requests is no longer part of `Client#Send`. In and `azure.DoPollForAsynchronous` for examples. ## v5.0.0 + - Added new RespondDecorators unmarshalling primitive types - Corrected application of inspection and authorization PrependDecorators ## v4.0.0 + - Added support for Azure long-running operations. - Added cancelation support to all decorators and functions that may delay. - Breaking: `DelayForBackoff` now accepts a channel, which may be nil. ## v3.1.0 + - Add support for OAuth Device Flow authorization. - Add support for ServicePrincipalTokens that are backed by an existing token, rather than other secret material. - Add helpers for persisting and restoring Tokens. - Increased code coverage in the github.com/Azure/autorest/azure package ## v3.0.0 + - Breaking: `NewErrorWithError` no longer takes `statusCode int`. - Breaking: `NewErrorWithStatusCode` is replaced with `NewErrorWithResponse`. - Breaking: `Client#Send()` no longer takes `codes ...int` argument. diff --git a/vendor/github.com/Azure/go-autorest/Gopkg.lock b/vendor/github.com/Azure/go-autorest/Gopkg.lock index 4cf49542f..8513320af 100644 --- a/vendor/github.com/Azure/go-autorest/Gopkg.lock +++ b/vendor/github.com/Azure/go-autorest/Gopkg.lock @@ -1,6 +1,26 @@ # This file is autogenerated, do not edit; changes may be undone by the next 'dep ensure'. +[[projects]] + digest = "1:e1b859e3d9e90007d5fbf25edf57733b224f1857f6592636130afab3af8cfae7" + name = "contrib.go.opencensus.io/exporter/ocagent" + packages = ["."] + pruneopts = "" + revision = "00af367e65149ff1f2f4b93bbfbb84fd9297170d" + version = "v0.2.0" + +[[projects]] + digest = "1:e0a4505d5cf7ac6b5d92e3aee79d838b5f1ae8e9641ec7fa5d1e9b01d7a7ea95" + name = "github.com/census-instrumentation/opencensus-proto" + packages = [ + "gen-go/agent/common/v1", + "gen-go/agent/trace/v1", + "gen-go/trace/v1", + ] + pruneopts = "" + revision = "24333298e36590ea0716598caacc8959fc393c48" + version = "v0.0.2" + [[projects]] digest = "1:0deddd908b6b4b768cfc272c16ee61e7088a60f7fe2f06c547bd3d8e1f8b8e77" name = "github.com/davecgh/go-spew" @@ -18,20 +38,35 @@ version = "v3.2.0" [[projects]] - branch = "master" digest = "1:7f175a633086a933d1940a7e7dc2154a0070a7c25fb4a2f671f3eef1a34d1fd7" name = "github.com/dimchansky/utfbom" packages = ["."] pruneopts = "" revision = "5448fe645cb1964ba70ac8f9f2ffe975e61a536c" + version = "v1.0.0" + +[[projects]] + digest = "1:3dd078fda7500c341bc26cfbc6c6a34614f295a2457149fc1045cab767cbcf18" + name = "github.com/golang/protobuf" + packages = [ + "proto", + "ptypes", + "ptypes/any", + "ptypes/duration", + "ptypes/timestamp", + "ptypes/wrappers", + ] + pruneopts = "" + revision = "aa810b61a9c79d51363740d207bb46cf8e620ed5" + version = "v1.2.0" [[projects]] - branch = "master" digest = "1:096a8a9182648da3d00ff243b88407838902b6703fc12657f76890e08d1899bf" name = "github.com/mitchellh/go-homedir" packages = ["."] pruneopts = "" revision = "ae18d6b8b3205b561c79e8e5f69bff09736185f4" + version = "v1.0.0" [[projects]] digest = "1:256484dbbcd271f9ecebc6795b2df8cad4c458dd0f5fd82a8c2fa0c29f233411" @@ -52,25 +87,160 @@ revision = "f35b8ab0b5a2cef36673838d662e249dd9c94686" version = "v1.2.2" +[[projects]] + digest = "1:ad67dfd3799a2c58f6c65871dd141d8b53f61f600aec48ce8d7fa16a4d5476f8" + name = "go.opencensus.io" + packages = [ + ".", + "exemplar", + "internal", + "internal/tagencoding", + "plugin/ochttp", + "plugin/ochttp/propagation/b3", + "plugin/ochttp/propagation/tracecontext", + "stats", + "stats/internal", + "stats/view", + "tag", + "trace", + "trace/internal", + "trace/propagation", + "trace/tracestate", + ] + pruneopts = "" + revision = "b7bf3cdb64150a8c8c53b769fdeb2ba581bd4d4b" + version = "v0.18.0" + [[projects]] branch = "master" - digest = "1:793a79198b755828dec284c6f1325e24e09186f1b7ba818b65c7c35104ed86eb" + digest = "1:78f41d38365ccef743e54ed854a2faf73313ba0750c621116a8eeb0395590bd0" name = "golang.org/x/crypto" packages = [ "pkcs12", "pkcs12/internal/rc2", ] pruneopts = "" - revision = "614d502a4dac94afa3a6ce146bd1736da82514c6" + revision = "0c41d7ab0a0ee717d4590a44bcb987dfd9e183eb" + +[[projects]] + branch = "master" + digest = "1:547dcb6aebfb7fb17947660ebb034470c13f4d63d893def190a2f7ba3d09bc38" + name = "golang.org/x/net" + packages = [ + "context", + "http/httpguts", + "http2", + "http2/hpack", + "idna", + "internal/timeseries", + "trace", + ] + pruneopts = "" + revision = "49bb7cea24b1df9410e1712aa6433dae904ff66a" + +[[projects]] + branch = "master" + digest = "1:b2ea75de0ccb2db2ac79356407f8a4cd8f798fe15d41b381c00abf3ae8e55ed1" + name = "golang.org/x/sync" + packages = ["semaphore"] + pruneopts = "" + revision = "1d60e4601c6fd243af51cc01ddf169918a5407ca" + +[[projects]] + branch = "master" + digest = "1:2ed0bf267e44950120acd95570227e28184573ffb099bd85b529ee148e004ddb" + name = "golang.org/x/sys" + packages = ["unix"] + pruneopts = "" + revision = "fa43e7bc11baaae89f3f902b2b4d832b68234844" + +[[projects]] + digest = "1:5acd3512b047305d49e8763eef7ba423901e85d5dd2fd1e71778a0ea8de10bd4" + name = "golang.org/x/text" + packages = [ + "collate", + "collate/build", + "internal/colltab", + "internal/gen", + "internal/tag", + "internal/triegen", + "internal/ucd", + "language", + "secure/bidirule", + "transform", + "unicode/bidi", + "unicode/cldr", + "unicode/norm", + "unicode/rangetable", + ] + pruneopts = "" + revision = "f21a4dfb5e38f5895301dc265a8def02365cc3d0" + version = "v0.3.0" + +[[projects]] + branch = "master" + digest = "1:ca2e72555afbcebdead7c961b135650b5111dbbaf37a874de63976fdda57f129" + name = "google.golang.org/api" + packages = ["support/bundler"] + pruneopts = "" + revision = "c51f30376ab7ec4f22b65de846a41593c8b70f07" + +[[projects]] + branch = "master" + digest = "1:1b3b4ec811695907c4a3cb92e4f32834a4a42459bff7e02068b6b2b5344803cd" + name = "google.golang.org/genproto" + packages = ["googleapis/rpc/status"] + pruneopts = "" + revision = "af9cb2a35e7f169ec875002c1829c9b315cddc04" + +[[projects]] + digest = "1:15656947b87a6a240e61dcfae9e71a55a8d5677f240d12ab48f02cdbabf1e309" + name = "google.golang.org/grpc" + packages = [ + ".", + "balancer", + "balancer/base", + "balancer/roundrobin", + "codes", + "connectivity", + "credentials", + "encoding", + "encoding/proto", + "grpclog", + "internal", + "internal/backoff", + "internal/channelz", + "internal/envconfig", + "internal/grpcrand", + "internal/transport", + "keepalive", + "metadata", + "naming", + "peer", + "resolver", + "resolver/dns", + "resolver/passthrough", + "stats", + "status", + "tap", + ] + pruneopts = "" + revision = "8dea3dc473e90c8179e519d91302d0597c0ca1d1" + version = "v1.15.0" [solve-meta] analyzer-name = "dep" analyzer-version = 1 input-imports = [ + "contrib.go.opencensus.io/exporter/ocagent", "github.com/dgrijalva/jwt-go", "github.com/dimchansky/utfbom", "github.com/mitchellh/go-homedir", "github.com/stretchr/testify/require", + "go.opencensus.io/plugin/ochttp", + "go.opencensus.io/plugin/ochttp/propagation/tracecontext", + "go.opencensus.io/stats/view", + "go.opencensus.io/trace", "golang.org/x/crypto/pkcs12", ] solver-name = "gps-cdcl" diff --git a/vendor/github.com/Azure/go-autorest/Gopkg.toml b/vendor/github.com/Azure/go-autorest/Gopkg.toml index 917dae322..e4040c89e 100644 --- a/vendor/github.com/Azure/go-autorest/Gopkg.toml +++ b/vendor/github.com/Azure/go-autorest/Gopkg.toml @@ -25,17 +25,21 @@ version = "3.1.0" [[constraint]] - branch = "master" name = "github.com/dimchansky/utfbom" + version = "1.0.0" [[constraint]] - branch = "master" name = "github.com/mitchellh/go-homedir" + version = "1.0.0" [[constraint]] name = "github.com/stretchr/testify" version = "1.2.0" [[constraint]] - branch = "master" - name = "golang.org/x/crypto" + name = "go.opencensus.io" + version = "0.18.0" + +[[constraint]] + name = "contrib.go.opencensus.io/exporter/ocagent" + version = "0.2.0" diff --git a/vendor/github.com/Azure/go-autorest/autorest/adal/config.go b/vendor/github.com/Azure/go-autorest/autorest/adal/config.go index bee5e61dd..8c83a917f 100644 --- a/vendor/github.com/Azure/go-autorest/autorest/adal/config.go +++ b/vendor/github.com/Azure/go-autorest/autorest/adal/config.go @@ -19,10 +19,6 @@ import ( "net/url" ) -const ( - activeDirectoryAPIVersion = "1.0" -) - // OAuthConfig represents the endpoints needed // in OAuth operations type OAuthConfig struct { @@ -46,11 +42,25 @@ func validateStringParam(param, name string) error { // NewOAuthConfig returns an OAuthConfig with tenant specific urls func NewOAuthConfig(activeDirectoryEndpoint, tenantID string) (*OAuthConfig, error) { + apiVer := "1.0" + return NewOAuthConfigWithAPIVersion(activeDirectoryEndpoint, tenantID, &apiVer) +} + +// NewOAuthConfigWithAPIVersion returns an OAuthConfig with tenant specific urls. +// If apiVersion is not nil the "api-version" query parameter will be appended to the endpoint URLs with the specified value. +func NewOAuthConfigWithAPIVersion(activeDirectoryEndpoint, tenantID string, apiVersion *string) (*OAuthConfig, error) { if err := validateStringParam(activeDirectoryEndpoint, "activeDirectoryEndpoint"); err != nil { return nil, err } + api := "" // it's legal for tenantID to be empty so don't validate it - const activeDirectoryEndpointTemplate = "%s/oauth2/%s?api-version=%s" + if apiVersion != nil { + if err := validateStringParam(*apiVersion, "apiVersion"); err != nil { + return nil, err + } + api = fmt.Sprintf("?api-version=%s", *apiVersion) + } + const activeDirectoryEndpointTemplate = "%s/oauth2/%s%s" u, err := url.Parse(activeDirectoryEndpoint) if err != nil { return nil, err @@ -59,15 +69,15 @@ func NewOAuthConfig(activeDirectoryEndpoint, tenantID string) (*OAuthConfig, err if err != nil { return nil, err } - authorizeURL, err := u.Parse(fmt.Sprintf(activeDirectoryEndpointTemplate, tenantID, "authorize", activeDirectoryAPIVersion)) + authorizeURL, err := u.Parse(fmt.Sprintf(activeDirectoryEndpointTemplate, tenantID, "authorize", api)) if err != nil { return nil, err } - tokenURL, err := u.Parse(fmt.Sprintf(activeDirectoryEndpointTemplate, tenantID, "token", activeDirectoryAPIVersion)) + tokenURL, err := u.Parse(fmt.Sprintf(activeDirectoryEndpointTemplate, tenantID, "token", api)) if err != nil { return nil, err } - deviceCodeURL, err := u.Parse(fmt.Sprintf(activeDirectoryEndpointTemplate, tenantID, "devicecode", activeDirectoryAPIVersion)) + deviceCodeURL, err := u.Parse(fmt.Sprintf(activeDirectoryEndpointTemplate, tenantID, "devicecode", api)) if err != nil { return nil, err } diff --git a/vendor/github.com/Azure/go-autorest/autorest/adal/sender.go b/vendor/github.com/Azure/go-autorest/autorest/adal/sender.go index 0e5ad14d3..834401e00 100644 --- a/vendor/github.com/Azure/go-autorest/autorest/adal/sender.go +++ b/vendor/github.com/Azure/go-autorest/autorest/adal/sender.go @@ -38,7 +38,7 @@ func (sf SenderFunc) Do(r *http.Request) (*http.Response, error) { return sf(r) } -// SendDecorator takes and possibily decorates, by wrapping, a Sender. Decorators may affect the +// SendDecorator takes and possibly decorates, by wrapping, a Sender. Decorators may affect the // http.Request and pass it along or, first, pass the http.Request along then react to the // http.Response result. type SendDecorator func(Sender) Sender diff --git a/vendor/github.com/Azure/go-autorest/autorest/adal/token.go b/vendor/github.com/Azure/go-autorest/autorest/adal/token.go index 32aea8389..52ca37866 100644 --- a/vendor/github.com/Azure/go-autorest/autorest/adal/token.go +++ b/vendor/github.com/Azure/go-autorest/autorest/adal/token.go @@ -29,13 +29,12 @@ import ( "net" "net/http" "net/url" - "strconv" "strings" "sync" "time" "github.com/Azure/go-autorest/autorest/date" - "github.com/Azure/go-autorest/version" + "github.com/Azure/go-autorest/tracing" "github.com/dgrijalva/jwt-go" ) @@ -97,18 +96,27 @@ type RefresherWithContext interface { type TokenRefreshCallback func(Token) error // Token encapsulates the access token used to authorize Azure requests. +// https://docs.microsoft.com/en-us/azure/active-directory/develop/v1-oauth2-client-creds-grant-flow#service-to-service-access-token-response type Token struct { AccessToken string `json:"access_token"` RefreshToken string `json:"refresh_token"` - ExpiresIn string `json:"expires_in"` - ExpiresOn string `json:"expires_on"` - NotBefore string `json:"not_before"` + ExpiresIn json.Number `json:"expires_in"` + ExpiresOn json.Number `json:"expires_on"` + NotBefore json.Number `json:"not_before"` Resource string `json:"resource"` Type string `json:"token_type"` } +func newToken() Token { + return Token{ + ExpiresIn: "0", + ExpiresOn: "0", + NotBefore: "0", + } +} + // IsZero returns true if the token object is zero-initialized. func (t Token) IsZero() bool { return t == Token{} @@ -116,12 +124,12 @@ func (t Token) IsZero() bool { // Expires returns the time.Time when the Token expires. func (t Token) Expires() time.Time { - s, err := strconv.Atoi(t.ExpiresOn) + s, err := t.ExpiresOn.Float64() if err != nil { s = -3600 } - expiration := date.NewUnixTimeFromSeconds(float64(s)) + expiration := date.NewUnixTimeFromSeconds(s) return time.Time(expiration).UTC() } @@ -218,6 +226,8 @@ func (secret *ServicePrincipalCertificateSecret) SignJwt(spt *ServicePrincipalTo token := jwt.New(jwt.SigningMethodRS256) token.Header["x5t"] = thumbprint + x5c := []string{base64.StdEncoding.EncodeToString(secret.Certificate.Raw)} + token.Header["x5c"] = x5c token.Claims = jwt.MapClaims{ "aud": spt.inner.OauthConfig.TokenEndpoint.String(), "iss": spt.inner.ClientID, @@ -375,8 +385,13 @@ func (spt *ServicePrincipalToken) UnmarshalJSON(data []byte) error { if err != nil { return err } - spt.refreshLock = &sync.RWMutex{} - spt.sender = &http.Client{} + // Don't override the refreshLock or the sender if those have been already set. + if spt.refreshLock == nil { + spt.refreshLock = &sync.RWMutex{} + } + if spt.sender == nil { + spt.sender = &http.Client{Transport: tracing.Transport} + } return nil } @@ -414,6 +429,7 @@ func NewServicePrincipalTokenWithSecret(oauthConfig OAuthConfig, id string, reso } spt := &ServicePrincipalToken{ inner: servicePrincipalToken{ + Token: newToken(), OauthConfig: oauthConfig, Secret: secret, ClientID: id, @@ -422,7 +438,7 @@ func NewServicePrincipalTokenWithSecret(oauthConfig OAuthConfig, id string, reso RefreshWithin: defaultRefresh, }, refreshLock: &sync.RWMutex{}, - sender: &http.Client{}, + sender: &http.Client{Transport: tracing.Transport}, refreshCallbacks: callbacks, } return spt, nil @@ -653,6 +669,7 @@ func newServicePrincipalTokenFromMSI(msiEndpoint, resource string, userAssignedI spt := &ServicePrincipalToken{ inner: servicePrincipalToken{ + Token: newToken(), OauthConfig: OAuthConfig{ TokenEndpoint: *msiEndpointURL, }, @@ -662,7 +679,7 @@ func newServicePrincipalTokenFromMSI(msiEndpoint, resource string, userAssignedI RefreshWithin: defaultRefresh, }, refreshLock: &sync.RWMutex{}, - sender: &http.Client{}, + sender: &http.Client{Transport: tracing.Transport}, refreshCallbacks: callbacks, MaxMSIRefreshAttempts: defaultMaxMSIRefreshAttempts, } @@ -779,7 +796,7 @@ func (spt *ServicePrincipalToken) refreshInternal(ctx context.Context, resource if err != nil { return fmt.Errorf("adal: Failed to build the refresh request. Error = '%v'", err) } - req.Header.Add("User-Agent", version.UserAgent()) + req.Header.Add("User-Agent", userAgent()) req = req.WithContext(ctx) if !isIMDS(spt.inner.OauthConfig.TokenEndpoint) { v := url.Values{} diff --git a/vendor/github.com/Azure/go-autorest/version/version.go b/vendor/github.com/Azure/go-autorest/autorest/adal/version.go similarity index 68% rename from vendor/github.com/Azure/go-autorest/version/version.go rename to vendor/github.com/Azure/go-autorest/autorest/adal/version.go index 94de7c51e..3944edf05 100644 --- a/vendor/github.com/Azure/go-autorest/version/version.go +++ b/vendor/github.com/Azure/go-autorest/autorest/adal/version.go @@ -1,4 +1,9 @@ -package version +package adal + +import ( + "fmt" + "runtime" +) // Copyright 2017 Microsoft Corporation // @@ -14,24 +19,17 @@ package version // See the License for the specific language governing permissions and // limitations under the License. -import ( - "fmt" - "runtime" -) - -// Number contains the semantic version of this SDK. -const Number = "v10.15.5" +const number = "v1.0.0" var ( - userAgent = fmt.Sprintf("Go/%s (%s-%s) go-autorest/%s", + ua = fmt.Sprintf("Go/%s (%s-%s) go-autorest/adal/%s", runtime.Version(), runtime.GOARCH, runtime.GOOS, - Number, + number, ) ) -// UserAgent returns a string containing the Go version, system archityecture and OS, and the go-autorest version. -func UserAgent() string { - return userAgent +func userAgent() string { + return ua } diff --git a/vendor/github.com/Azure/go-autorest/autorest/authorization.go b/vendor/github.com/Azure/go-autorest/autorest/authorization.go index 77eff45bd..bc474b406 100644 --- a/vendor/github.com/Azure/go-autorest/autorest/authorization.go +++ b/vendor/github.com/Azure/go-autorest/autorest/authorization.go @@ -21,6 +21,7 @@ import ( "strings" "github.com/Azure/go-autorest/autorest/adal" + "github.com/Azure/go-autorest/tracing" ) const ( @@ -68,7 +69,7 @@ func NewAPIKeyAuthorizer(headers map[string]interface{}, queryParameters map[str return &APIKeyAuthorizer{headers: headers, queryParameters: queryParameters} } -// WithAuthorization returns a PrepareDecorator that adds an HTTP headers and Query Paramaters +// WithAuthorization returns a PrepareDecorator that adds an HTTP headers and Query Parameters. func (aka *APIKeyAuthorizer) WithAuthorization() PrepareDecorator { return func(p Preparer) Preparer { return DecoratePreparer(p, WithHeaders(aka.headers), WithQueryParameters(aka.queryParameters)) @@ -147,7 +148,7 @@ type BearerAuthorizerCallback struct { // is invoked when the HTTP request is submitted. func NewBearerAuthorizerCallback(sender Sender, callback BearerAuthorizerCallbackFunc) *BearerAuthorizerCallback { if sender == nil { - sender = &http.Client{} + sender = &http.Client{Transport: tracing.Transport} } return &BearerAuthorizerCallback{sender: sender, callback: callback} } diff --git a/vendor/github.com/Azure/go-autorest/autorest/azure/async.go b/vendor/github.com/Azure/go-autorest/autorest/azure/async.go index 9dd7a1d27..3f6a2c097 100644 --- a/vendor/github.com/Azure/go-autorest/autorest/azure/async.go +++ b/vendor/github.com/Azure/go-autorest/autorest/azure/async.go @@ -26,6 +26,7 @@ import ( "time" "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/tracing" ) const ( @@ -58,10 +59,7 @@ func NewFuture(req *http.Request) Future { // with the initial response from an asynchronous operation. func NewFutureFromResponse(resp *http.Response) (Future, error) { pt, err := createPollingTracker(resp) - if err != nil { - return Future{}, err - } - return Future{pt: pt}, nil + return Future{pt: pt}, err } // Response returns the last HTTP response. @@ -89,7 +87,23 @@ func (f Future) PollingMethod() PollingMethodType { } // Done queries the service to see if the operation has completed. +// Deprecated: Use DoneWithContext() func (f *Future) Done(sender autorest.Sender) (bool, error) { + return f.DoneWithContext(context.Background(), sender) +} + +// DoneWithContext queries the service to see if the operation has completed. +func (f *Future) DoneWithContext(ctx context.Context, sender autorest.Sender) (done bool, err error) { + ctx = tracing.StartSpan(ctx, "github.com/Azure/go-autorest/autorest/azure/async.DoneWithContext") + defer func() { + sc := -1 + resp := f.Response() + if resp != nil { + sc = resp.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + // support for legacy Future implementation if f.req != nil { resp, err := sender.Do(f.req) @@ -110,7 +124,7 @@ func (f *Future) Done(sender autorest.Sender) (bool, error) { if f.pt.hasTerminated() { return true, f.pt.pollingError() } - if err := f.pt.pollForStatus(sender); err != nil { + if err := f.pt.pollForStatus(ctx, sender); err != nil { return false, err } if err := f.pt.checkForErrors(); err != nil { @@ -167,11 +181,25 @@ func (f Future) WaitForCompletion(ctx context.Context, client autorest.Client) e // running operation has completed, the provided context is cancelled, or the client's // polling duration has been exceeded. It will retry failed polling attempts based on // the retry value defined in the client up to the maximum retry attempts. -func (f *Future) WaitForCompletionRef(ctx context.Context, client autorest.Client) error { - ctx, cancel := context.WithTimeout(ctx, client.PollingDuration) - defer cancel() - done, err := f.Done(client) - for attempts := 0; !done; done, err = f.Done(client) { +func (f *Future) WaitForCompletionRef(ctx context.Context, client autorest.Client) (err error) { + ctx = tracing.StartSpan(ctx, "github.com/Azure/go-autorest/autorest/azure/async.WaitForCompletionRef") + defer func() { + sc := -1 + resp := f.Response() + if resp != nil { + sc = resp.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + cancelCtx := ctx + if d := client.PollingDuration; d != 0 { + var cancel context.CancelFunc + cancelCtx, cancel = context.WithTimeout(ctx, d) + defer cancel() + } + + done, err := f.DoneWithContext(ctx, client) + for attempts := 0; !done; done, err = f.DoneWithContext(ctx, client) { if attempts >= client.RetryAttempts { return autorest.NewErrorWithError(err, "Future", "WaitForCompletion", f.pt.latestResponse(), "the number of retries has been exceeded") } @@ -195,12 +223,12 @@ func (f *Future) WaitForCompletionRef(ctx context.Context, client autorest.Clien attempts++ } // wait until the delay elapses or the context is cancelled - delayElapsed := autorest.DelayForBackoff(delay, delayAttempt, ctx.Done()) + delayElapsed := autorest.DelayForBackoff(delay, delayAttempt, cancelCtx.Done()) if !delayElapsed { - return autorest.NewErrorWithError(ctx.Err(), "Future", "WaitForCompletion", f.pt.latestResponse(), "context has been cancelled") + return autorest.NewErrorWithError(cancelCtx.Err(), "Future", "WaitForCompletion", f.pt.latestResponse(), "context has been cancelled") } } - return err + return } // MarshalJSON implements the json.Marshaler interface. @@ -285,7 +313,7 @@ type pollingTracker interface { initializeState() error // makes an HTTP request to check the status of the LRO - pollForStatus(sender autorest.Sender) error + pollForStatus(ctx context.Context, sender autorest.Sender) error // updates internal tracker state, call this after each call to pollForStatus updatePollingState(provStateApl bool) error @@ -399,6 +427,10 @@ func (pt *pollingTrackerBase) updateRawBody() error { if err != nil { return autorest.NewErrorWithError(err, "pollingTrackerBase", "updateRawBody", nil, "failed to read response body") } + // observed in 204 responses over HTTP/2.0; the content length is -1 but body is empty + if len(b) == 0 { + return nil + } // put the body back so it's available to other callers pt.resp.Body = ioutil.NopCloser(bytes.NewReader(b)) if err = json.Unmarshal(b, &pt.rawBody); err != nil { @@ -408,15 +440,13 @@ func (pt *pollingTrackerBase) updateRawBody() error { return nil } -func (pt *pollingTrackerBase) pollForStatus(sender autorest.Sender) error { +func (pt *pollingTrackerBase) pollForStatus(ctx context.Context, sender autorest.Sender) error { req, err := http.NewRequest(http.MethodGet, pt.URI, nil) if err != nil { return autorest.NewErrorWithError(err, "pollingTrackerBase", "pollForStatus", nil, "failed to create HTTP request") } - // attach the context from the original request if available (it will be absent for deserialized futures) - if pt.resp != nil { - req = req.WithContext(pt.resp.Request.Context()) - } + + req = req.WithContext(ctx) pt.resp, err = sender.Do(req) if err != nil { return autorest.NewErrorWithError(err, "pollingTrackerBase", "pollForStatus", nil, "failed to send HTTP request") @@ -445,7 +475,7 @@ func (pt *pollingTrackerBase) updateErrorFromResponse() { re := respErr{} defer pt.resp.Body.Close() var b []byte - if b, err = ioutil.ReadAll(pt.resp.Body); err != nil { + if b, err = ioutil.ReadAll(pt.resp.Body); err != nil || len(b) == 0 { goto Default } if err = json.Unmarshal(b, &re); err != nil { @@ -663,7 +693,7 @@ func (pt *pollingTrackerPatch) updatePollingMethod() error { } } // for 202 prefer the Azure-AsyncOperation header but fall back to Location if necessary - // note the absense of the "final GET" mechanism for PATCH + // note the absence of the "final GET" mechanism for PATCH if pt.resp.StatusCode == http.StatusAccepted { ao, err := getURLFromAsyncOpHeader(pt.resp) if err != nil { diff --git a/vendor/github.com/Azure/go-autorest/autorest/azure/rp.go b/vendor/github.com/Azure/go-autorest/autorest/azure/rp.go index bd34f0ed5..86ce9f2b5 100644 --- a/vendor/github.com/Azure/go-autorest/autorest/azure/rp.go +++ b/vendor/github.com/Azure/go-autorest/autorest/azure/rp.go @@ -140,8 +140,8 @@ func register(client autorest.Client, originalReq *http.Request, re RequestError } // poll for registered provisioning state - now := time.Now() - for err == nil && time.Since(now) < client.PollingDuration { + registrationStartTime := time.Now() + for err == nil && (client.PollingDuration == 0 || (client.PollingDuration != 0 && time.Since(registrationStartTime) < client.PollingDuration)) { // taken from the resources SDK // https://github.com/Azure/azure-sdk-for-go/blob/9f366792afa3e0ddaecdc860e793ba9d75e76c27/arm/resources/resources/providers.go#L45 preparer := autorest.CreatePreparer( @@ -183,7 +183,7 @@ func register(client autorest.Client, originalReq *http.Request, re RequestError return originalReq.Context().Err() } } - if !(time.Since(now) < client.PollingDuration) { + if client.PollingDuration != 0 && !(time.Since(registrationStartTime) < client.PollingDuration) { return errors.New("polling for resource provider registration has exceeded the polling duration") } return err diff --git a/vendor/github.com/Azure/go-autorest/autorest/client.go b/vendor/github.com/Azure/go-autorest/autorest/client.go index 5c558c83a..4874e6e82 100644 --- a/vendor/github.com/Azure/go-autorest/autorest/client.go +++ b/vendor/github.com/Azure/go-autorest/autorest/client.go @@ -26,7 +26,7 @@ import ( "time" "github.com/Azure/go-autorest/logger" - "github.com/Azure/go-autorest/version" + "github.com/Azure/go-autorest/tracing" ) const ( @@ -147,6 +147,7 @@ type Client struct { PollingDelay time.Duration // PollingDuration sets the maximum polling time after which an error is returned. + // Setting this to zero will use the provided context to control the duration. PollingDuration time.Duration // RetryAttempts sets the default number of retry attempts for client. @@ -173,7 +174,7 @@ func NewClientWithUserAgent(ua string) Client { PollingDuration: DefaultPollingDuration, RetryAttempts: DefaultRetryAttempts, RetryDuration: DefaultRetryDuration, - UserAgent: version.UserAgent(), + UserAgent: UserAgent(), } c.Sender = c.sender() c.AddToUserAgent(ua) @@ -229,8 +230,10 @@ func (c Client) Do(r *http.Request) (*http.Response, error) { func (c Client) sender() Sender { if c.Sender == nil { j, _ := cookiejar.New(nil) - return &http.Client{Jar: j} + client := &http.Client{Jar: j, Transport: tracing.Transport} + return client } + return c.Sender } diff --git a/vendor/github.com/Azure/go-autorest/autorest/sender.go b/vendor/github.com/Azure/go-autorest/autorest/sender.go index e8893a282..6665d7c00 100644 --- a/vendor/github.com/Azure/go-autorest/autorest/sender.go +++ b/vendor/github.com/Azure/go-autorest/autorest/sender.go @@ -21,6 +21,8 @@ import ( "net/http" "strconv" "time" + + "github.com/Azure/go-autorest/tracing" ) // Sender is the interface that wraps the Do method to send HTTP requests. @@ -38,7 +40,7 @@ func (sf SenderFunc) Do(r *http.Request) (*http.Response, error) { return sf(r) } -// SendDecorator takes and possibily decorates, by wrapping, a Sender. Decorators may affect the +// SendDecorator takes and possibly decorates, by wrapping, a Sender. Decorators may affect the // http.Request and pass it along or, first, pass the http.Request along then react to the // http.Response result. type SendDecorator func(Sender) Sender @@ -68,7 +70,7 @@ func DecorateSender(s Sender, decorators ...SendDecorator) Sender { // // Send will not poll or retry requests. func Send(r *http.Request, decorators ...SendDecorator) (*http.Response, error) { - return SendWithSender(&http.Client{}, r, decorators...) + return SendWithSender(&http.Client{Transport: tracing.Transport}, r, decorators...) } // SendWithSender sends the passed http.Request, through the provided Sender, returning the @@ -216,8 +218,7 @@ func DoRetryForStatusCodes(attempts int, backoff time.Duration, codes ...int) Se return SenderFunc(func(r *http.Request) (resp *http.Response, err error) { rr := NewRetriableRequest(r) // Increment to add the first call (attempts denotes number of retries) - attempts++ - for attempt := 0; attempt < attempts; { + for attempt := 0; attempt < attempts+1; { err = rr.Prepare() if err != nil { return resp, err diff --git a/vendor/github.com/Azure/go-autorest/autorest/utility.go b/vendor/github.com/Azure/go-autorest/autorest/utility.go index bfddd90b5..08cf11c11 100644 --- a/vendor/github.com/Azure/go-autorest/autorest/utility.go +++ b/vendor/github.com/Azure/go-autorest/autorest/utility.go @@ -157,7 +157,7 @@ func AsStringSlice(s interface{}) ([]string, error) { } // String method converts interface v to string. If interface is a list, it -// joins list elements using the seperator. Note that only sep[0] will be used for +// joins list elements using the separator. Note that only sep[0] will be used for // joining if any separator is specified. func String(v interface{}, sep ...string) string { if len(sep) == 0 { diff --git a/vendor/github.com/Azure/go-autorest/autorest/version.go b/vendor/github.com/Azure/go-autorest/autorest/version.go index 3c6451546..32c4e00cf 100644 --- a/vendor/github.com/Azure/go-autorest/autorest/version.go +++ b/vendor/github.com/Azure/go-autorest/autorest/version.go @@ -1,7 +1,5 @@ package autorest -import "github.com/Azure/go-autorest/version" - // Copyright 2017 Microsoft Corporation // // Licensed under the Apache License, Version 2.0 (the "License"); @@ -16,7 +14,28 @@ import "github.com/Azure/go-autorest/version" // See the License for the specific language governing permissions and // limitations under the License. +import ( + "fmt" + "runtime" +) + +const number = "v11.2.8" + +var ( + userAgent = fmt.Sprintf("Go/%s (%s-%s) go-autorest/%s", + runtime.Version(), + runtime.GOARCH, + runtime.GOOS, + number, + ) +) + +// UserAgent returns a string containing the Go version, system architecture and OS, and the go-autorest version. +func UserAgent() string { + return userAgent +} + // Version returns the semantic version (see http://semver.org). func Version() string { - return version.Number + return number } diff --git a/vendor/github.com/Azure/go-autorest/logger/logger.go b/vendor/github.com/Azure/go-autorest/logger/logger.go index 756fd80ca..da09f394c 100644 --- a/vendor/github.com/Azure/go-autorest/logger/logger.go +++ b/vendor/github.com/Azure/go-autorest/logger/logger.go @@ -162,7 +162,7 @@ type Writer interface { // WriteResponse writes the specified HTTP response to the logger if the log level is greater than // or equal to LogInfo. The response body, if set, is logged at level LogDebug or higher. // Custom filters can be specified to exclude URL, header, and/or body content from the log. - // By default no respone content is excluded. + // By default no response content is excluded. WriteResponse(resp *http.Response, filter Filter) } @@ -318,7 +318,7 @@ func (fl fileLogger) WriteResponse(resp *http.Response, filter Filter) { // returns true if the provided body should be included in the log func (fl fileLogger) shouldLogBody(header http.Header, body io.ReadCloser) bool { ct := header.Get("Content-Type") - return fl.logLevel >= LogDebug && body != nil && strings.Index(ct, "application/octet-stream") == -1 + return fl.logLevel >= LogDebug && body != nil && !strings.Contains(ct, "application/octet-stream") } // creates standard header for log entries, it contains a timestamp and the log level diff --git a/vendor/github.com/Azure/go-autorest/tracing/tracing.go b/vendor/github.com/Azure/go-autorest/tracing/tracing.go new file mode 100644 index 000000000..cd61cb18b --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/tracing/tracing.go @@ -0,0 +1,190 @@ +package tracing + +// Copyright 2018 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "context" + "fmt" + "net/http" + "os" + + "contrib.go.opencensus.io/exporter/ocagent" + "go.opencensus.io/plugin/ochttp" + "go.opencensus.io/plugin/ochttp/propagation/tracecontext" + "go.opencensus.io/stats/view" + "go.opencensus.io/trace" +) + +var ( + // Transport is the default tracing RoundTripper. The custom options setter will control + // if traces are being emitted or not. + Transport = &ochttp.Transport{ + Propagation: &tracecontext.HTTPFormat{}, + GetStartOptions: getStartOptions, + } + + // enabled is the flag for marking if tracing is enabled. + enabled = false + + // Sampler is the tracing sampler. If tracing is disabled it will never sample. Otherwise + // it will be using the parent sampler or the default. + sampler = trace.NeverSample() + + // Views for metric instrumentation. + views = map[string]*view.View{} + + // the trace exporter + traceExporter trace.Exporter +) + +func init() { + enableFromEnv() +} + +func enableFromEnv() { + _, ok := os.LookupEnv("AZURE_SDK_TRACING_ENABLED") + _, legacyOk := os.LookupEnv("AZURE_SDK_TRACING_ENABELD") + if ok || legacyOk { + agentEndpoint, ok := os.LookupEnv("OCAGENT_TRACE_EXPORTER_ENDPOINT") + + if ok { + EnableWithAIForwarding(agentEndpoint) + } else { + Enable() + } + } +} + +// IsEnabled returns true if monitoring is enabled for the sdk. +func IsEnabled() bool { + return enabled +} + +// Enable will start instrumentation for metrics and traces. +func Enable() error { + enabled = true + sampler = nil + + err := initStats() + return err +} + +// Disable will disable instrumentation for metrics and traces. +func Disable() { + disableStats() + sampler = trace.NeverSample() + if traceExporter != nil { + trace.UnregisterExporter(traceExporter) + } + enabled = false +} + +// EnableWithAIForwarding will start instrumentation and will connect to app insights forwarder +// exporter making the metrics and traces available in app insights. +func EnableWithAIForwarding(agentEndpoint string) (err error) { + err = Enable() + if err != nil { + return err + } + + traceExporter, err := ocagent.NewExporter(ocagent.WithInsecure(), ocagent.WithAddress(agentEndpoint)) + if err != nil { + return err + } + trace.RegisterExporter(traceExporter) + return +} + +// getStartOptions is the custom options setter for the ochttp package. +func getStartOptions(*http.Request) trace.StartOptions { + return trace.StartOptions{ + Sampler: sampler, + } +} + +// initStats registers the views for the http metrics +func initStats() (err error) { + clientViews := []*view.View{ + ochttp.ClientCompletedCount, + ochttp.ClientRoundtripLatencyDistribution, + ochttp.ClientReceivedBytesDistribution, + ochttp.ClientSentBytesDistribution, + } + for _, cv := range clientViews { + vn := fmt.Sprintf("Azure/go-autorest/tracing-%s", cv.Name) + views[vn] = cv.WithName(vn) + err = view.Register(views[vn]) + if err != nil { + return err + } + } + return +} + +// disableStats will unregister the previously registered metrics +func disableStats() { + for _, v := range views { + view.Unregister(v) + } +} + +// StartSpan starts a trace span +func StartSpan(ctx context.Context, name string) context.Context { + ctx, _ = trace.StartSpan(ctx, name, trace.WithSampler(sampler)) + return ctx +} + +// EndSpan ends a previously started span stored in the context +func EndSpan(ctx context.Context, httpStatusCode int, err error) { + span := trace.FromContext(ctx) + + if span == nil { + return + } + + if err != nil { + span.SetStatus(trace.Status{Message: err.Error(), Code: toTraceStatusCode(httpStatusCode)}) + } + span.End() +} + +// toTraceStatusCode converts HTTP Codes to OpenCensus codes as defined +// at https://github.com/census-instrumentation/opencensus-specs/blob/master/trace/HTTP.md#status +func toTraceStatusCode(httpStatusCode int) int32 { + switch { + case http.StatusOK <= httpStatusCode && httpStatusCode < http.StatusBadRequest: + return trace.StatusCodeOK + case httpStatusCode == http.StatusBadRequest: + return trace.StatusCodeInvalidArgument + case httpStatusCode == http.StatusUnauthorized: // 401 is actually unauthenticated. + return trace.StatusCodeUnauthenticated + case httpStatusCode == http.StatusForbidden: + return trace.StatusCodePermissionDenied + case httpStatusCode == http.StatusNotFound: + return trace.StatusCodeNotFound + case httpStatusCode == http.StatusTooManyRequests: + return trace.StatusCodeResourceExhausted + case httpStatusCode == 499: + return trace.StatusCodeCancelled + case httpStatusCode == http.StatusNotImplemented: + return trace.StatusCodeUnimplemented + case httpStatusCode == http.StatusServiceUnavailable: + return trace.StatusCodeUnavailable + case httpStatusCode == http.StatusGatewayTimeout: + return trace.StatusCodeDeadlineExceeded + default: + return trace.StatusCodeUnknown + } +} diff --git a/vendor/github.com/Sirupsen/logrus/.gitignore b/vendor/github.com/Sirupsen/logrus/.gitignore index 66be63a00..6b7d7d1e8 100644 --- a/vendor/github.com/Sirupsen/logrus/.gitignore +++ b/vendor/github.com/Sirupsen/logrus/.gitignore @@ -1 +1,2 @@ logrus +vendor diff --git a/vendor/github.com/Sirupsen/logrus/CHANGELOG.md b/vendor/github.com/Sirupsen/logrus/CHANGELOG.md index 170269608..cb85d9f9f 100644 --- a/vendor/github.com/Sirupsen/logrus/CHANGELOG.md +++ b/vendor/github.com/Sirupsen/logrus/CHANGELOG.md @@ -1,3 +1,15 @@ +# 1.2.0 +This new release introduces: + * A new method `SetReportCaller` in the `Logger` to enable the file, line and calling function from which the trace has been issued + * A new trace level named `Trace` whose level is below `Debug` + * A configurable exit function to be called upon a Fatal trace + * The `Level` object now implements `encoding.TextUnmarshaler` interface + +# 1.1.1 +This is a bug fix release. + * fix the build break on Solaris + * don't drop a whole trace in JSONFormatter when a field param is a function pointer which can not be serialized + # 1.1.0 This new release introduces: * several fixes: diff --git a/vendor/github.com/Sirupsen/logrus/README.md b/vendor/github.com/Sirupsen/logrus/README.md index 072e99be3..093bb13f8 100644 --- a/vendor/github.com/Sirupsen/logrus/README.md +++ b/vendor/github.com/Sirupsen/logrus/README.md @@ -56,8 +56,39 @@ time="2015-03-26T01:27:38-04:00" level=warning msg="The group's number increased time="2015-03-26T01:27:38-04:00" level=debug msg="Temperature changes" temperature=-4 time="2015-03-26T01:27:38-04:00" level=panic msg="It's over 9000!" animal=orca size=9009 time="2015-03-26T01:27:38-04:00" level=fatal msg="The ice breaks!" err=&{0x2082280c0 map[animal:orca size:9009] 2015-03-26 01:27:38.441574009 -0400 EDT panic It's over 9000!} number=100 omg=true -exit status 1 ``` +To ensure this behaviour even if a TTY is attached, set your formatter as follows: + +```go + log.SetFormatter(&log.TextFormatter{ + DisableColors: true, + FullTimestamp: true, + }) +``` + +#### Logging Method Name + +If you wish to add the calling method as a field, instruct the logger via: +```go +log.SetReportCaller(true) +``` +This adds the caller as 'method' like so: + +```json +{"animal":"penguin","level":"fatal","method":"github.com/sirupsen/arcticcreatures.migrate","msg":"a penguin swims by", +"time":"2014-03-10 19:57:38.562543129 -0400 EDT"} +``` + +```text +time="2015-03-26T01:27:38-04:00" level=fatal method=github.com/sirupsen/arcticcreatures.migrate msg="a penguin swims by" animal=penguin +``` +Note that this does add measurable overhead - the cost will depend on the version of Go, but is +between 20 and 40% in recent tests with 1.6 and 1.7. You can validate this in your +environment via benchmarks: +``` +go test -bench=.*CallerTracing +``` + #### Case-sensitivity @@ -246,9 +277,10 @@ A list of currently known of service hook can be found in this wiki [page](https #### Level logging -Logrus has six logging levels: Debug, Info, Warning, Error, Fatal and Panic. +Logrus has seven logging levels: Trace, Debug, Info, Warning, Error, Fatal and Panic. ```go +log.Trace("Something very low level.") log.Debug("Useful debugging information.") log.Info("Something noteworthy happened!") log.Warn("You should probably take a look at this.") diff --git a/vendor/github.com/Sirupsen/logrus/entry.go b/vendor/github.com/Sirupsen/logrus/entry.go index 4efedddfe..cc85d3aab 100644 --- a/vendor/github.com/Sirupsen/logrus/entry.go +++ b/vendor/github.com/Sirupsen/logrus/entry.go @@ -4,11 +4,30 @@ import ( "bytes" "fmt" "os" + "reflect" + "runtime" + "strings" "sync" "time" ) -var bufferPool *sync.Pool +var ( + bufferPool *sync.Pool + + // qualified package name, cached at first use + logrusPackage string + + // Positions in the call stack when tracing to report the calling method + minimumCallerDepth int + + // Used for caller information initialisation + callerInitOnce sync.Once +) + +const ( + maximumCallerDepth int = 25 + knownLogrusFrames int = 4 +) func init() { bufferPool = &sync.Pool{ @@ -16,15 +35,18 @@ func init() { return new(bytes.Buffer) }, } + + // start at the bottom of the stack before the package-name cache is primed + minimumCallerDepth = 1 } // Defines the key when adding errors using WithError. var ErrorKey = "error" // An entry is the final or intermediate Logrus logging entry. It contains all -// the fields passed with WithField{,s}. It's finally logged when Debug, Info, -// Warn, Error, Fatal or Panic is called on it. These objects can be reused and -// passed around as much as you wish to avoid field duplication. +// the fields passed with WithField{,s}. It's finally logged when Trace, Debug, +// Info, Warn, Error, Fatal or Panic is called on it. These objects can be +// reused and passed around as much as you wish to avoid field duplication. type Entry struct { Logger *Logger @@ -34,22 +56,28 @@ type Entry struct { // Time at which the log entry was created Time time.Time - // Level the log entry was logged at: Debug, Info, Warn, Error, Fatal or Panic + // Level the log entry was logged at: Trace, Debug, Info, Warn, Error, Fatal or Panic // This field will be set on entry firing and the value will be equal to the one in Logger struct field. Level Level - // Message passed to Debug, Info, Warn, Error, Fatal or Panic + // Calling method, with package name + Caller *runtime.Frame + + // Message passed to Trace, Debug, Info, Warn, Error, Fatal or Panic Message string // When formatter is called in entry.log(), a Buffer may be set to entry Buffer *bytes.Buffer + + // err may contain a field formatting error + err string } func NewEntry(logger *Logger) *Entry { return &Entry{ Logger: logger, - // Default is five fields, give a little extra room - Data: make(Fields, 5), + // Default is three fields, plus one optional. Give a little extra room. + Data: make(Fields, 6), } } @@ -80,10 +108,18 @@ func (entry *Entry) WithFields(fields Fields) *Entry { for k, v := range entry.Data { data[k] = v } + var field_err string for k, v := range fields { - data[k] = v + if t := reflect.TypeOf(v); t != nil && t.Kind() == reflect.Func { + field_err = fmt.Sprintf("can not add field %q", k) + if entry.err != "" { + field_err = entry.err + ", " + field_err + } + } else { + data[k] = v + } } - return &Entry{Logger: entry.Logger, Data: data, Time: entry.Time} + return &Entry{Logger: entry.Logger, Data: data, Time: entry.Time, err: field_err} } // Overrides the time of the Entry. @@ -91,6 +127,57 @@ func (entry *Entry) WithTime(t time.Time) *Entry { return &Entry{Logger: entry.Logger, Data: entry.Data, Time: t} } +// getPackageName reduces a fully qualified function name to the package name +// There really ought to be to be a better way... +func getPackageName(f string) string { + for { + lastPeriod := strings.LastIndex(f, ".") + lastSlash := strings.LastIndex(f, "/") + if lastPeriod > lastSlash { + f = f[:lastPeriod] + } else { + break + } + } + + return f +} + +// getCaller retrieves the name of the first non-logrus calling function +func getCaller() *runtime.Frame { + // Restrict the lookback frames to avoid runaway lookups + pcs := make([]uintptr, maximumCallerDepth) + depth := runtime.Callers(minimumCallerDepth, pcs) + frames := runtime.CallersFrames(pcs[:depth]) + + // cache this package's fully-qualified name + callerInitOnce.Do(func() { + logrusPackage = getPackageName(runtime.FuncForPC(pcs[0]).Name()) + + // now that we have the cache, we can skip a minimum count of known-logrus functions + // XXX this is dubious, the number of frames may vary store an entry in a logger interface + minimumCallerDepth = knownLogrusFrames + }) + + for f, again := frames.Next(); again; f, again = frames.Next() { + pkg := getPackageName(f.Function) + + // If the caller isn't part of this package, we're done + if pkg != logrusPackage { + return &f + } + } + + // if we got here, we failed to find the caller's context + return nil +} + +func (entry Entry) HasCaller() (has bool) { + return entry.Logger != nil && + entry.Logger.ReportCaller && + entry.Caller != nil +} + // This function is not declared with a pointer value because otherwise // race conditions will occur when using multiple goroutines func (entry Entry) log(level Level, msg string) { @@ -107,6 +194,9 @@ func (entry Entry) log(level Level, msg string) { entry.Level = level entry.Message = msg + if entry.Logger.ReportCaller { + entry.Caller = getCaller() + } entry.fireHooks() @@ -150,6 +240,12 @@ func (entry *Entry) write() { } } +func (entry *Entry) Trace(args ...interface{}) { + if entry.Logger.IsLevelEnabled(TraceLevel) { + entry.log(TraceLevel, fmt.Sprint(args...)) + } +} + func (entry *Entry) Debug(args ...interface{}) { if entry.Logger.IsLevelEnabled(DebugLevel) { entry.log(DebugLevel, fmt.Sprint(args...)) @@ -186,7 +282,7 @@ func (entry *Entry) Fatal(args ...interface{}) { if entry.Logger.IsLevelEnabled(FatalLevel) { entry.log(FatalLevel, fmt.Sprint(args...)) } - Exit(1) + entry.Logger.Exit(1) } func (entry *Entry) Panic(args ...interface{}) { @@ -198,6 +294,12 @@ func (entry *Entry) Panic(args ...interface{}) { // Entry Printf family functions +func (entry *Entry) Tracef(format string, args ...interface{}) { + if entry.Logger.IsLevelEnabled(TraceLevel) { + entry.Trace(fmt.Sprintf(format, args...)) + } +} + func (entry *Entry) Debugf(format string, args ...interface{}) { if entry.Logger.IsLevelEnabled(DebugLevel) { entry.Debug(fmt.Sprintf(format, args...)) @@ -234,7 +336,7 @@ func (entry *Entry) Fatalf(format string, args ...interface{}) { if entry.Logger.IsLevelEnabled(FatalLevel) { entry.Fatal(fmt.Sprintf(format, args...)) } - Exit(1) + entry.Logger.Exit(1) } func (entry *Entry) Panicf(format string, args ...interface{}) { @@ -245,6 +347,12 @@ func (entry *Entry) Panicf(format string, args ...interface{}) { // Entry Println family functions +func (entry *Entry) Traceln(args ...interface{}) { + if entry.Logger.IsLevelEnabled(TraceLevel) { + entry.Trace(entry.sprintlnn(args...)) + } +} + func (entry *Entry) Debugln(args ...interface{}) { if entry.Logger.IsLevelEnabled(DebugLevel) { entry.Debug(entry.sprintlnn(args...)) @@ -281,7 +389,7 @@ func (entry *Entry) Fatalln(args ...interface{}) { if entry.Logger.IsLevelEnabled(FatalLevel) { entry.Fatal(entry.sprintlnn(args...)) } - Exit(1) + entry.Logger.Exit(1) } func (entry *Entry) Panicln(args ...interface{}) { diff --git a/vendor/github.com/Sirupsen/logrus/exported.go b/vendor/github.com/Sirupsen/logrus/exported.go index fb2a7a1f0..7342613c3 100644 --- a/vendor/github.com/Sirupsen/logrus/exported.go +++ b/vendor/github.com/Sirupsen/logrus/exported.go @@ -24,6 +24,12 @@ func SetFormatter(formatter Formatter) { std.SetFormatter(formatter) } +// SetReportCaller sets whether the standard logger will include the calling +// method as a field. +func SetReportCaller(include bool) { + std.SetReportCaller(include) +} + // SetLevel sets the standard logger level. func SetLevel(level Level) { std.SetLevel(level) @@ -77,6 +83,11 @@ func WithTime(t time.Time) *Entry { return std.WithTime(t) } +// Trace logs a message at level Trace on the standard logger. +func Trace(args ...interface{}) { + std.Trace(args...) +} + // Debug logs a message at level Debug on the standard logger. func Debug(args ...interface{}) { std.Debug(args...) @@ -117,6 +128,11 @@ func Fatal(args ...interface{}) { std.Fatal(args...) } +// Tracef logs a message at level Trace on the standard logger. +func Tracef(format string, args ...interface{}) { + std.Tracef(format, args...) +} + // Debugf logs a message at level Debug on the standard logger. func Debugf(format string, args ...interface{}) { std.Debugf(format, args...) @@ -157,6 +173,11 @@ func Fatalf(format string, args ...interface{}) { std.Fatalf(format, args...) } +// Traceln logs a message at level Trace on the standard logger. +func Traceln(args ...interface{}) { + std.Traceln(args...) +} + // Debugln logs a message at level Debug on the standard logger. func Debugln(args ...interface{}) { std.Debugln(args...) diff --git a/vendor/github.com/Sirupsen/logrus/formatter.go b/vendor/github.com/Sirupsen/logrus/formatter.go index 83c74947b..408883773 100644 --- a/vendor/github.com/Sirupsen/logrus/formatter.go +++ b/vendor/github.com/Sirupsen/logrus/formatter.go @@ -2,7 +2,16 @@ package logrus import "time" -const defaultTimestampFormat = time.RFC3339 +// Default key names for the default fields +const ( + defaultTimestampFormat = time.RFC3339 + FieldKeyMsg = "msg" + FieldKeyLevel = "level" + FieldKeyTime = "time" + FieldKeyLogrusError = "logrus_error" + FieldKeyFunc = "func" + FieldKeyFile = "file" +) // The Formatter interface is used to implement a custom Formatter. It takes an // `Entry`. It exposes all the fields, including the default ones: @@ -18,7 +27,7 @@ type Formatter interface { Format(*Entry) ([]byte, error) } -// This is to not silently overwrite `time`, `msg` and `level` fields when +// This is to not silently overwrite `time`, `msg`, `func` and `level` fields when // dumping it. If this code wasn't there doing: // // logrus.WithField("level", 1).Info("hello") @@ -30,7 +39,7 @@ type Formatter interface { // // It's not exported because it's still using Data in an opinionated way. It's to // avoid code duplication between the two default formatters. -func prefixFieldClashes(data Fields, fieldMap FieldMap) { +func prefixFieldClashes(data Fields, fieldMap FieldMap, reportCaller bool) { timeKey := fieldMap.resolve(FieldKeyTime) if t, ok := data[timeKey]; ok { data["fields."+timeKey] = t @@ -48,4 +57,22 @@ func prefixFieldClashes(data Fields, fieldMap FieldMap) { data["fields."+levelKey] = l delete(data, levelKey) } + + logrusErrKey := fieldMap.resolve(FieldKeyLogrusError) + if l, ok := data[logrusErrKey]; ok { + data["fields."+logrusErrKey] = l + delete(data, logrusErrKey) + } + + // If reportCaller is not set, 'func' will not conflict. + if reportCaller { + funcKey := fieldMap.resolve(FieldKeyFunc) + if l, ok := data[funcKey]; ok { + data["fields."+funcKey] = l + } + fileKey := fieldMap.resolve(FieldKeyFile) + if l, ok := data[fileKey]; ok { + data["fields."+fileKey] = l + } + } } diff --git a/vendor/github.com/Sirupsen/logrus/go.mod b/vendor/github.com/Sirupsen/logrus/go.mod index f4fed02fb..94574cc63 100644 --- a/vendor/github.com/Sirupsen/logrus/go.mod +++ b/vendor/github.com/Sirupsen/logrus/go.mod @@ -2,8 +2,9 @@ module github.com/sirupsen/logrus require ( github.com/davecgh/go-spew v1.1.1 // indirect - github.com/konsorten/go-windows-terminal-sequences v0.0.0-20180402223658-b729f2633dfe + github.com/konsorten/go-windows-terminal-sequences v1.0.1 github.com/pmezard/go-difflib v1.0.0 // indirect + github.com/stretchr/objx v0.1.1 // indirect github.com/stretchr/testify v1.2.2 golang.org/x/crypto v0.0.0-20180904163835-0709b304e793 golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33 diff --git a/vendor/github.com/Sirupsen/logrus/go.sum b/vendor/github.com/Sirupsen/logrus/go.sum index 1f0d71964..133d34ae1 100644 --- a/vendor/github.com/Sirupsen/logrus/go.sum +++ b/vendor/github.com/Sirupsen/logrus/go.sum @@ -2,8 +2,11 @@ github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/konsorten/go-windows-terminal-sequences v0.0.0-20180402223658-b729f2633dfe h1:CHRGQ8V7OlCYtwaKPJi3iA7J+YdNKdo8j7nG5IgDhjs= github.com/konsorten/go-windows-terminal-sequences v0.0.0-20180402223658-b729f2633dfe/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/stretchr/objx v0.1.1 h1:2vfRuCMp5sSVIDSqO8oNnWJq7mPa6KVP3iPIwFBuy8A= +github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/testify v1.2.2 h1:bSDNvY7ZPG5RlJ8otE/7V6gMiyenm9RtJ7IUVIAoJ1w= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= golang.org/x/crypto v0.0.0-20180904163835-0709b304e793 h1:u+LnwYTOOW7Ukr/fppxEb1Nwz0AtPflrblfvUudpo+I= diff --git a/vendor/github.com/Sirupsen/logrus/json_formatter.go b/vendor/github.com/Sirupsen/logrus/json_formatter.go index d3dadefe6..260575359 100644 --- a/vendor/github.com/Sirupsen/logrus/json_formatter.go +++ b/vendor/github.com/Sirupsen/logrus/json_formatter.go @@ -11,13 +11,6 @@ type fieldKey string // FieldMap allows customization of the key names for default fields. type FieldMap map[fieldKey]string -// Default key names for the default fields -const ( - FieldKeyMsg = "msg" - FieldKeyLevel = "level" - FieldKeyTime = "time" -) - func (f FieldMap) resolve(key fieldKey) string { if k, ok := f[key]; ok { return k @@ -41,9 +34,10 @@ type JSONFormatter struct { // As an example: // formatter := &JSONFormatter{ // FieldMap: FieldMap{ - // FieldKeyTime: "@timestamp", + // FieldKeyTime: "@timestamp", // FieldKeyLevel: "@level", - // FieldKeyMsg: "@message", + // FieldKeyMsg: "@message", + // FieldKeyFunc: "@caller", // }, // } FieldMap FieldMap @@ -54,7 +48,7 @@ type JSONFormatter struct { // Format renders a single log entry func (f *JSONFormatter) Format(entry *Entry) ([]byte, error) { - data := make(Fields, len(entry.Data)+3) + data := make(Fields, len(entry.Data)+4) for k, v := range entry.Data { switch v := v.(type) { case error: @@ -72,18 +66,25 @@ func (f *JSONFormatter) Format(entry *Entry) ([]byte, error) { data = newData } - prefixFieldClashes(data, f.FieldMap) + prefixFieldClashes(data, f.FieldMap, entry.HasCaller()) timestampFormat := f.TimestampFormat if timestampFormat == "" { timestampFormat = defaultTimestampFormat } + if entry.err != "" { + data[f.FieldMap.resolve(FieldKeyLogrusError)] = entry.err + } if !f.DisableTimestamp { data[f.FieldMap.resolve(FieldKeyTime)] = entry.Time.Format(timestampFormat) } data[f.FieldMap.resolve(FieldKeyMsg)] = entry.Message data[f.FieldMap.resolve(FieldKeyLevel)] = entry.Level.String() + if entry.HasCaller() { + data[f.FieldMap.resolve(FieldKeyFunc)] = entry.Caller.Function + data[f.FieldMap.resolve(FieldKeyFile)] = fmt.Sprintf("%s:%d", entry.Caller.File, entry.Caller.Line) + } var b *bytes.Buffer if entry.Buffer != nil { diff --git a/vendor/github.com/Sirupsen/logrus/logger.go b/vendor/github.com/Sirupsen/logrus/logger.go index b67bfcbd3..5ceca0eab 100644 --- a/vendor/github.com/Sirupsen/logrus/logger.go +++ b/vendor/github.com/Sirupsen/logrus/logger.go @@ -24,6 +24,10 @@ type Logger struct { // own that implements the `Formatter` interface, see the `README` or included // formatters for examples. Formatter Formatter + + // Flag for whether to log caller info (off by default) + ReportCaller bool + // The logging level the logger should log at. This is typically (and defaults // to) `logrus.Info`, which allows Info(), Warn(), Error() and Fatal() to be // logged. @@ -32,8 +36,12 @@ type Logger struct { mu MutexWrap // Reusable empty entry entryPool sync.Pool + // Function to exit the application, defaults to `os.Exit()` + ExitFunc exitFunc } +type exitFunc func(int) + type MutexWrap struct { lock sync.Mutex disabled bool @@ -69,10 +77,12 @@ func (mw *MutexWrap) Disable() { // It's recommended to make this a global instance called `log`. func New() *Logger { return &Logger{ - Out: os.Stderr, - Formatter: new(TextFormatter), - Hooks: make(LevelHooks), - Level: InfoLevel, + Out: os.Stderr, + Formatter: new(TextFormatter), + Hooks: make(LevelHooks), + Level: InfoLevel, + ExitFunc: os.Exit, + ReportCaller: false, } } @@ -121,6 +131,14 @@ func (logger *Logger) WithTime(t time.Time) *Entry { return entry.WithTime(t) } +func (logger *Logger) Tracef(format string, args ...interface{}) { + if logger.IsLevelEnabled(TraceLevel) { + entry := logger.newEntry() + entry.Tracef(format, args...) + logger.releaseEntry(entry) + } +} + func (logger *Logger) Debugf(format string, args ...interface{}) { if logger.IsLevelEnabled(DebugLevel) { entry := logger.newEntry() @@ -173,7 +191,7 @@ func (logger *Logger) Fatalf(format string, args ...interface{}) { entry.Fatalf(format, args...) logger.releaseEntry(entry) } - Exit(1) + logger.Exit(1) } func (logger *Logger) Panicf(format string, args ...interface{}) { @@ -184,6 +202,14 @@ func (logger *Logger) Panicf(format string, args ...interface{}) { } } +func (logger *Logger) Trace(args ...interface{}) { + if logger.IsLevelEnabled(TraceLevel) { + entry := logger.newEntry() + entry.Trace(args...) + logger.releaseEntry(entry) + } +} + func (logger *Logger) Debug(args ...interface{}) { if logger.IsLevelEnabled(DebugLevel) { entry := logger.newEntry() @@ -236,7 +262,7 @@ func (logger *Logger) Fatal(args ...interface{}) { entry.Fatal(args...) logger.releaseEntry(entry) } - Exit(1) + logger.Exit(1) } func (logger *Logger) Panic(args ...interface{}) { @@ -247,6 +273,14 @@ func (logger *Logger) Panic(args ...interface{}) { } } +func (logger *Logger) Traceln(args ...interface{}) { + if logger.IsLevelEnabled(TraceLevel) { + entry := logger.newEntry() + entry.Traceln(args...) + logger.releaseEntry(entry) + } +} + func (logger *Logger) Debugln(args ...interface{}) { if logger.IsLevelEnabled(DebugLevel) { entry := logger.newEntry() @@ -299,7 +333,7 @@ func (logger *Logger) Fatalln(args ...interface{}) { entry.Fatalln(args...) logger.releaseEntry(entry) } - Exit(1) + logger.Exit(1) } func (logger *Logger) Panicln(args ...interface{}) { @@ -310,6 +344,14 @@ func (logger *Logger) Panicln(args ...interface{}) { } } +func (logger *Logger) Exit(code int) { + runHandlers() + if logger.ExitFunc == nil { + logger.ExitFunc = os.Exit + } + logger.ExitFunc(code) +} + //When file is opened with appending mode, it's safe to //write concurrently to a file (within 4k message on Linux). //In these cases user can choose to disable the lock. @@ -357,6 +399,12 @@ func (logger *Logger) SetOutput(output io.Writer) { logger.Out = output } +func (logger *Logger) SetReportCaller(reportCaller bool) { + logger.mu.Lock() + defer logger.mu.Unlock() + logger.ReportCaller = reportCaller +} + // ReplaceHooks replaces the logger hooks and returns the old ones func (logger *Logger) ReplaceHooks(hooks LevelHooks) LevelHooks { logger.mu.Lock() diff --git a/vendor/github.com/Sirupsen/logrus/logrus.go b/vendor/github.com/Sirupsen/logrus/logrus.go index fa0b9dea8..4ef451866 100644 --- a/vendor/github.com/Sirupsen/logrus/logrus.go +++ b/vendor/github.com/Sirupsen/logrus/logrus.go @@ -15,6 +15,8 @@ type Level uint32 // Convert the Level to a string. E.g. PanicLevel becomes "panic". func (level Level) String() string { switch level { + case TraceLevel: + return "trace" case DebugLevel: return "debug" case InfoLevel: @@ -47,12 +49,26 @@ func ParseLevel(lvl string) (Level, error) { return InfoLevel, nil case "debug": return DebugLevel, nil + case "trace": + return TraceLevel, nil } var l Level return l, fmt.Errorf("not a valid logrus Level: %q", lvl) } +// UnmarshalText implements encoding.TextUnmarshaler. +func (level *Level) UnmarshalText(text []byte) error { + l, err := ParseLevel(string(text)) + if err != nil { + return err + } + + *level = Level(l) + + return nil +} + // A constant exposing all logging levels var AllLevels = []Level{ PanicLevel, @@ -61,6 +77,7 @@ var AllLevels = []Level{ WarnLevel, InfoLevel, DebugLevel, + TraceLevel, } // These are the different logging levels. You can set the logging level to log @@ -69,7 +86,7 @@ const ( // PanicLevel level, highest level of severity. Logs and then calls panic with the // message passed to Debug, Info, ... PanicLevel Level = iota - // FatalLevel level. Logs and then calls `os.Exit(1)`. It will exit even if the + // FatalLevel level. Logs and then calls `logger.Exit(1)`. It will exit even if the // logging level is set to Panic. FatalLevel // ErrorLevel level. Logs. Used for errors that should definitely be noted. @@ -82,6 +99,8 @@ const ( InfoLevel // DebugLevel level. Usually only enabled when debugging. Very verbose logging. DebugLevel + // TraceLevel level. Designates finer-grained informational events than the Debug. + TraceLevel ) // Won't compile if StdLogger can't be realized by a log.Logger @@ -148,3 +167,12 @@ type FieldLogger interface { // IsFatalEnabled() bool // IsPanicEnabled() bool } + +// Ext1FieldLogger (the first extension to FieldLogger) is superfluous, it is +// here for consistancy. Do not use. Use Logger or Entry instead. +type Ext1FieldLogger interface { + FieldLogger + Tracef(format string, args ...interface{}) + Trace(args ...interface{}) + Traceln(args ...interface{}) +} diff --git a/vendor/github.com/Sirupsen/logrus/terminal_bsd.go b/vendor/github.com/Sirupsen/logrus/terminal_bsd.go deleted file mode 100644 index 62ca252d0..000000000 --- a/vendor/github.com/Sirupsen/logrus/terminal_bsd.go +++ /dev/null @@ -1,17 +0,0 @@ -// +build darwin freebsd openbsd netbsd dragonfly -// +build !appengine,!js - -package logrus - -import ( - "io" - - "golang.org/x/sys/unix" -) - -const ioctlReadTermios = unix.TIOCGETA - -type Termios unix.Termios - -func initTerminal(w io.Writer) { -} diff --git a/vendor/github.com/Sirupsen/logrus/terminal_linux.go b/vendor/github.com/Sirupsen/logrus/terminal_linux.go deleted file mode 100644 index 18066f08a..000000000 --- a/vendor/github.com/Sirupsen/logrus/terminal_linux.go +++ /dev/null @@ -1,21 +0,0 @@ -// Based on ssh/terminal: -// Copyright 2013 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build !appengine,!js - -package logrus - -import ( - "io" - - "golang.org/x/sys/unix" -) - -const ioctlReadTermios = unix.TCGETS - -type Termios unix.Termios - -func initTerminal(w io.Writer) { -} diff --git a/vendor/github.com/Sirupsen/logrus/terminal_notwindows.go b/vendor/github.com/Sirupsen/logrus/terminal_notwindows.go new file mode 100644 index 000000000..3dbd23720 --- /dev/null +++ b/vendor/github.com/Sirupsen/logrus/terminal_notwindows.go @@ -0,0 +1,8 @@ +// +build !windows + +package logrus + +import "io" + +func initTerminal(w io.Writer) { +} diff --git a/vendor/github.com/Sirupsen/logrus/text_formatter.go b/vendor/github.com/Sirupsen/logrus/text_formatter.go index 67fb686c6..49ec92f17 100644 --- a/vendor/github.com/Sirupsen/logrus/text_formatter.go +++ b/vendor/github.com/Sirupsen/logrus/text_formatter.go @@ -107,14 +107,14 @@ func (f *TextFormatter) isColored() bool { // Format renders a single log entry func (f *TextFormatter) Format(entry *Entry) ([]byte, error) { - prefixFieldClashes(entry.Data, f.FieldMap) + prefixFieldClashes(entry.Data, f.FieldMap, entry.HasCaller()) keys := make([]string, 0, len(entry.Data)) for k := range entry.Data { keys = append(keys, k) } - fixedKeys := make([]string, 0, 3+len(entry.Data)) + fixedKeys := make([]string, 0, 4+len(entry.Data)) if !f.DisableTimestamp { fixedKeys = append(fixedKeys, f.FieldMap.resolve(FieldKeyTime)) } @@ -122,6 +122,13 @@ func (f *TextFormatter) Format(entry *Entry) ([]byte, error) { if entry.Message != "" { fixedKeys = append(fixedKeys, f.FieldMap.resolve(FieldKeyMsg)) } + if entry.err != "" { + fixedKeys = append(fixedKeys, f.FieldMap.resolve(FieldKeyLogrusError)) + } + if entry.HasCaller() { + fixedKeys = append(fixedKeys, + f.FieldMap.resolve(FieldKeyFunc), f.FieldMap.resolve(FieldKeyFile)) + } if !f.DisableSorting { if f.SortingFunc == nil { @@ -157,13 +164,19 @@ func (f *TextFormatter) Format(entry *Entry) ([]byte, error) { } else { for _, key := range fixedKeys { var value interface{} - switch key { - case f.FieldMap.resolve(FieldKeyTime): + switch { + case key == f.FieldMap.resolve(FieldKeyTime): value = entry.Time.Format(timestampFormat) - case f.FieldMap.resolve(FieldKeyLevel): + case key == f.FieldMap.resolve(FieldKeyLevel): value = entry.Level.String() - case f.FieldMap.resolve(FieldKeyMsg): + case key == f.FieldMap.resolve(FieldKeyMsg): value = entry.Message + case key == f.FieldMap.resolve(FieldKeyLogrusError): + value = entry.err + case key == f.FieldMap.resolve(FieldKeyFunc) && entry.HasCaller(): + value = entry.Caller.Function + case key == f.FieldMap.resolve(FieldKeyFile) && entry.HasCaller(): + value = fmt.Sprintf("%s:%d", entry.Caller.File, entry.Caller.Line) default: value = entry.Data[key] } @@ -178,7 +191,7 @@ func (f *TextFormatter) Format(entry *Entry) ([]byte, error) { func (f *TextFormatter) printColored(b *bytes.Buffer, entry *Entry, keys []string, timestampFormat string) { var levelColor int switch entry.Level { - case DebugLevel: + case DebugLevel, TraceLevel: levelColor = gray case WarnLevel: levelColor = yellow @@ -197,12 +210,19 @@ func (f *TextFormatter) printColored(b *bytes.Buffer, entry *Entry, keys []strin // the behavior of logrus text_formatter the same as the stdlib log package entry.Message = strings.TrimSuffix(entry.Message, "\n") + caller := "" + + if entry.HasCaller() { + caller = fmt.Sprintf("%s:%d %s()", + entry.Caller.File, entry.Caller.Line, entry.Caller.Function) + } + if f.DisableTimestamp { - fmt.Fprintf(b, "\x1b[%dm%s\x1b[0m %-44s ", levelColor, levelText, entry.Message) + fmt.Fprintf(b, "\x1b[%dm%s\x1b[0m%s %-44s ", levelColor, levelText, caller, entry.Message) } else if !f.FullTimestamp { - fmt.Fprintf(b, "\x1b[%dm%s\x1b[0m[%04d] %-44s ", levelColor, levelText, int(entry.Time.Sub(baseTimestamp)/time.Second), entry.Message) + fmt.Fprintf(b, "\x1b[%dm%s\x1b[0m[%04d]%s %-44s ", levelColor, levelText, int(entry.Time.Sub(baseTimestamp)/time.Second), caller, entry.Message) } else { - fmt.Fprintf(b, "\x1b[%dm%s\x1b[0m[%s] %-44s ", levelColor, levelText, entry.Time.Format(timestampFormat), entry.Message) + fmt.Fprintf(b, "\x1b[%dm%s\x1b[0m[%s]%s %-44s ", levelColor, levelText, entry.Time.Format(timestampFormat), caller, entry.Message) } for _, k := range keys { v := entry.Data[k] diff --git a/vendor/github.com/Sirupsen/logrus/writer.go b/vendor/github.com/Sirupsen/logrus/writer.go index 7bdebedc6..9e1f75135 100644 --- a/vendor/github.com/Sirupsen/logrus/writer.go +++ b/vendor/github.com/Sirupsen/logrus/writer.go @@ -24,6 +24,8 @@ func (entry *Entry) WriterLevel(level Level) *io.PipeWriter { var printFunc func(args ...interface{}) switch level { + case TraceLevel: + printFunc = entry.Trace case DebugLevel: printFunc = entry.Debug case InfoLevel: diff --git a/vendor/github.com/census-instrumentation/opencensus-proto/.gitignore b/vendor/github.com/census-instrumentation/opencensus-proto/.gitignore new file mode 100644 index 000000000..365330047 --- /dev/null +++ b/vendor/github.com/census-instrumentation/opencensus-proto/.gitignore @@ -0,0 +1,36 @@ +# Gradle +.gradle +build +gen_gradle +protobuf-gradle-plugin.i* +gradle.properties +local.properties + +# Bazel +bazel-* + +# Maven +target + +# IntelliJ IDEA +.idea +*.iml + +# Eclipse +.classpath +.project +.settings +bin + +# OS X +.DS_Store + +# Emacs +*~ +\#*\# + +# VS Code +.vscode + +# Other +TAGS diff --git a/vendor/github.com/census-instrumentation/opencensus-proto/.travis.yml b/vendor/github.com/census-instrumentation/opencensus-proto/.travis.yml new file mode 100644 index 000000000..0373f2614 --- /dev/null +++ b/vendor/github.com/census-instrumentation/opencensus-proto/.travis.yml @@ -0,0 +1,49 @@ +sudo: required +dist: trusty + +matrix: + include: + - language: java + jdk: oraclejdk8 + env: BUILD=BAZEL + + - language: java + jdk: oraclejdk8 + env: BUILD=GRADLE + +before_install: + - case "$BUILD" in + "BAZEL") + echo "deb [arch=amd64] http://storage.googleapis.com/bazel-apt stable jdk1.8" | sudo tee /etc/apt/sources.list.d/bazel.list ; + curl https://storage.googleapis.com/bazel-apt/doc/apt-key.pub.gpg | sudo apt-key add - ; + sudo apt-get update ;; + esac + +install: + - case "$BUILD" in + "BAZEL") + sudo apt-get install bazel ; + bazel version;; + esac + +script: + - case "$BUILD" in + "BAZEL") + cd src; + bazel build --show_result=100 ... ;; + "GRADLE") + ./gradlew clean assemble --stacktrace ; + ./gradlew check --stacktrace ;; + *) + echo "Missing case $BUILD" ; + exit 1 ;; + esac + +before_cache: + - rm -f $HOME/.gradle/caches/modules-2/modules-2.lock + +cache: + directories: + - $HOME/.gradle + - $HOME/.gradle/caches/ + - $HOME/.gradle/wrapper/ diff --git a/vendor/github.com/census-instrumentation/opencensus-proto/AUTHORS b/vendor/github.com/census-instrumentation/opencensus-proto/AUTHORS new file mode 100644 index 000000000..e068e731e --- /dev/null +++ b/vendor/github.com/census-instrumentation/opencensus-proto/AUTHORS @@ -0,0 +1 @@ +Google Inc. \ No newline at end of file diff --git a/vendor/github.com/census-instrumentation/opencensus-proto/CONTRIBUTING.md b/vendor/github.com/census-instrumentation/opencensus-proto/CONTRIBUTING.md new file mode 100644 index 000000000..f6a2112ba --- /dev/null +++ b/vendor/github.com/census-instrumentation/opencensus-proto/CONTRIBUTING.md @@ -0,0 +1,26 @@ +# How to submit a bug report + +If you received an error message, please include it and any exceptions. + +We commonly need to know which language you are using (e.g. Java) and what +platform you are on: + * Operating system (i.e., ```uname -a```) + +# How to contribute + +We definitely welcome patches and contributions to Census! Here are some +guideline and information about how to do so. + +## Before getting started + +In order to protect both you and ourselves, you will need to sign the +[Contributor License Agreement](https://cla.developers.google.com/clas). + +We follow the [Google Proto Style Guide]( +https://developers.google.com/protocol-buffers/docs/style). + +## Proposing changes + +Make sure that `bazel build :all` completes successfully without any new warnings. +Then create a Pull Request with your changes. When the changes are accepted, they +will be merged or cherry-picked by a Census core developer. diff --git a/vendor/gopkg.in/square/go-jose.v2/LICENSE b/vendor/github.com/census-instrumentation/opencensus-proto/LICENSE similarity index 100% rename from vendor/gopkg.in/square/go-jose.v2/LICENSE rename to vendor/github.com/census-instrumentation/opencensus-proto/LICENSE diff --git a/vendor/github.com/census-instrumentation/opencensus-proto/README.md b/vendor/github.com/census-instrumentation/opencensus-proto/README.md new file mode 100644 index 000000000..372f5f543 --- /dev/null +++ b/vendor/github.com/census-instrumentation/opencensus-proto/README.md @@ -0,0 +1,80 @@ +OpenCensus Proto - Language Independent Interface Types For OpenCensus +=============================================================== +[![Build Status][travis-image]][travis-url] +[![Maven Central][maven-image]][maven-url] + +Census provides a framework to define and collect stats against metrics and to +break those stats down across user-defined dimensions. + +The Census framework is natively available in many languages (e.g. C++, Go, +and Java). The API interface types are defined using protos to ensure +consistency and interoperability for the different implementations. + +## Quickstart + +### Install to Go + +```bash +$ go get -u github.com/census-instrumentation/opencensus-proto +``` + +In most cases you should depend on the gen-go files directly. If you are +building with Bazel, there are also go_proto_library build rules available. +See [PR/132](https://github.com/census-instrumentation/opencensus-proto/pull/132) +for details. However, please note that Bazel doesn't generate the final +artifacts. + +### Add the dependencies to your Java project + +For Maven add to `pom.xml`: +```xml + + io.opencensus + opencensus-proto + 0.0.2 + +``` + +For Gradle add to dependencies: +```gradle +compile 'io.opencensus:opencensus-proto:0.0.2' +``` + +[travis-image]: https://travis-ci.org/census-instrumentation/opencensus-proto.svg?branch=master +[travis-url]: https://travis-ci.org/census-instrumentation/opencensus-proto +[maven-image]: https://maven-badges.herokuapp.com/maven-central/io.opencensus/opencensus-proto/badge.svg +[maven-url]: https://maven-badges.herokuapp.com/maven-central/io.opencensus/opencensus-proto + +### Add the dependencies to Bazel project + +In WORKSPACE, add: +``` +git_repository( + name = "io_opencensus_proto", + strip_prefix = "src", + tag = "v0.0.2", # CURRENT_OPENCENSUS_PROTO_VERSION + remote = "https://github.com/census-instrumentation/opencensus-proto", +) +``` +or + +``` +http_archive( + name = "io_opencensus_proto", + strip_prefix = "opencensus-proto-master/src", + urls = ["https://github.com/census-instrumentation/opencensus-proto/archive/master.zip"], +) +``` + +In BUILD.bazel: +```bazel +proto_library( + name = "foo_proto", + srcs = ["foo.proto"], + deps = [ + "@io_opencensus_proto//opencensus/proto/metrics/v1:metrics_proto", + "@io_opencensus_proto//opencensus/proto/trace/v1:trace_proto", + # etc. + ], +) +``` diff --git a/vendor/github.com/census-instrumentation/opencensus-proto/RELEASING.md b/vendor/github.com/census-instrumentation/opencensus-proto/RELEASING.md new file mode 100644 index 000000000..0268ffc3d --- /dev/null +++ b/vendor/github.com/census-instrumentation/opencensus-proto/RELEASING.md @@ -0,0 +1,190 @@ +# How to Create a Release of OpenCensus Proto (for Maintainers Only) + +## Build Environments + +We re-generate gen-go files and deploy jars to Maven Central under the following systems: + +- Ubuntu 14.04 + +Other systems may also work, but we haven't verified them. + +## Release Go files + +To generate the Go files from protos, you'll need to install protoc and protoc-gen-go plugin first. +Follow the instructions [here](http://google.github.io/proto-lens/installing-protoc.html) and +[here](https://github.com/golang/protobuf#installation). + +Then run the following commands to re-generate the gen-go files: + +```bash +$ cd $(go env GOPATH)/src/github.com/census-instrumentation/opencensus-proto +$ git checkout -b update-gen-go +$ rm -rf gen-go +$ cd src +$ ./mkgogen.sh +$ git add -A +$ git commit -m "Update gen-go files." +``` + +Go through PR review and merge the changes to GitHub. + +## Tagging the Release + +Our release branches follow the naming convention of `v..x`, while the tags include the +patch version `v..`. For example, the same branch `v0.4.x` would be used to create +all `v0.4` tags (e.g. `v0.4.0`, `v0.4.1`). + +In this section upstream repository refers to the main opencensus-proto github +repository. + +Before any push to the upstream repository you need to create a [personal access +token](https://help.github.com/articles/creating-a-personal-access-token-for-the-command-line/). + +1. Create the release branch and push it to GitHub: + + ```bash + $ MAJOR=0 MINOR=4 PATCH=0 # Set appropriately for new release + $ VERSION_FILES=( + build.gradle + pom.xml + ) + $ git checkout -b v$MAJOR.$MINOR.x master + $ git push upstream v$MAJOR.$MINOR.x + ``` + +2. Enable branch protection for the new branch, if you have admin access. + Otherwise, let someone with admin access know that there is a new release + branch. + + - Open the branch protection settings for the new branch, by following + [Github's instructions](https://help.github.com/articles/configuring-protected-branches/). + - Copy the settings from a previous branch, i.e., check + - `Protect this branch` + - `Require pull request reviews before merging` + - `Require status checks to pass before merging` + - `Include administrators` + + Enable the following required status checks: + - `cla/google` + - `continuous-integration/travis-ci` + - Uncheck everything else. + - Click "Save changes". + +3. For `master` branch: + + - Change root build files to the next minor snapshot (e.g. + `0.5.0-SNAPSHOT`). + + ```bash + $ git checkout -b bump-version master + # Change version to next minor (and keep -SNAPSHOT) + $ sed -i 's/[0-9]\+\.[0-9]\+\.[0-9]\+\(.*CURRENT_OPENCENSUS_PROTO_VERSION\)/'$MAJOR.$((MINOR+1)).0'\1/' \ + "${VERSION_FILES[@]}" + $ ./gradlew build + $ git commit -a -m "Start $MAJOR.$((MINOR+1)).0 development cycle" + ``` + + - Go through PR review and push the master branch to GitHub: + + ```bash + $ git checkout master + $ git merge --ff-only bump-version + $ git push upstream master + ``` + +4. For `vMajor.Minor.x` branch: + + - Change root build files to remove "-SNAPSHOT" for the next release + version (e.g. `0.4.0`). Commit the result and make a tag: + + ```bash + $ git checkout -b release v$MAJOR.$MINOR.x + # Change version to remove -SNAPSHOT + $ sed -i 's/-SNAPSHOT\(.*CURRENT_OPENCENSUS_PROTO_VERSION\)/\1/' "${VERSION_FILES[@]}" + $ ./gradlew build + $ git commit -a -m "Bump version to $MAJOR.$MINOR.$PATCH" + $ git tag -a v$MAJOR.$MINOR.$PATCH -m "Version $MAJOR.$MINOR.$PATCH" + ``` + + - Change root build files to the next snapshot version (e.g. + `0.4.1-SNAPSHOT`). Commit the result: + + ```bash + # Change version to next patch and add -SNAPSHOT + $ sed -i 's/[0-9]\+\.[0-9]\+\.[0-9]\+\(.*CURRENT_OPENCENSUS_PROTO_VERSION\)/'$MAJOR.$MINOR.$((PATCH+1))-SNAPSHOT'\1/' \ + "${VERSION_FILES[@]}" + $ ./gradlew build + $ git commit -a -m "Bump version to $MAJOR.$MINOR.$((PATCH+1))-SNAPSHOT" + ``` + + - Go through PR review and push the release tag and updated release branch + to GitHub: + + ```bash + $ git checkout v$MAJOR.$MINOR.x + $ git merge --ff-only release + $ git push upstream v$MAJOR.$MINOR.$PATCH + $ git push upstream v$MAJOR.$MINOR.x + ``` + +## Release Java Jar + +Deployment to Maven Central (or the snapshot repo) is for all of the artifacts +from the project. + +### Prerequisites + +If you haven't done already, please follow the instructions +[here](https://github.com/census-instrumentation/opencensus-java/blob/master/RELEASING.md#prerequisites) +to set up the OSSRH (OSS Repository Hosting) account and signing keys. This is required for releasing +to Maven Central. + +### Branch + +Before building/deploying, be sure to switch to the appropriate tag. The tag +must reference a commit that has been pushed to the main repository, i.e., has +gone through code review. For the current release use: + +```bash +$ git checkout -b v$MAJOR.$MINOR.$PATCH tags/v$MAJOR.$MINOR.$PATCH +``` + +### Initial Deployment + +The following command will build the whole project and upload it to Maven +Central. Parallel building [is not safe during +uploadArchives](https://issues.gradle.org/browse/GRADLE-3420). + +```bash +$ ./gradlew clean build && ./gradlew -Dorg.gradle.parallel=false uploadArchives +``` + +If the version has the `-SNAPSHOT` suffix, the artifacts will automatically go +to the snapshot repository. Otherwise it's a release deployment and the +artifacts will go to a staging repository. + +When deploying a Release, the deployment will create [a new staging +repository](https://oss.sonatype.org/#stagingRepositories). You'll need to look +up the ID in the OSSRH UI (usually in the form of `opencensus-*`). + +### Releasing on Maven Central + +Once all of the artifacts have been pushed to the staging repository, the +repository must first be `closed`, which will trigger several sanity checks on +the repository. If this completes successfully, the repository can then be +`released`, which will begin the process of pushing the new artifacts to Maven +Central (the staging repository will be destroyed in the process). You can see +the complete process for releasing to Maven Central on the [OSSRH +site](http://central.sonatype.org/pages/releasing-the-deployment.html). + +## Announcement + +Once deployment is done, go to Github [release +page](https://github.com/census-instrumentation/opencensus-proto/releases), press +`Draft a new release` to write release notes about the new release. + +You can use `git log upstream/v$MAJOR.$((MINOR-1)).x..upstream/v$MAJOR.$MINOR.x --graph --first-parent` +or the Github [compare tool](https://github.com/census-instrumentation/opencensus-proto/compare/) +to view a summary of all commits since last release as a reference. + +Please pick major or important user-visible changes only. diff --git a/vendor/github.com/census-instrumentation/opencensus-proto/build.gradle b/vendor/github.com/census-instrumentation/opencensus-proto/build.gradle new file mode 100644 index 000000000..8b996605d --- /dev/null +++ b/vendor/github.com/census-instrumentation/opencensus-proto/build.gradle @@ -0,0 +1,170 @@ +description = 'Opencensus Proto' + +apply plugin: 'idea' +apply plugin: 'java' +apply plugin: 'com.google.protobuf' +apply plugin: 'maven' +apply plugin: "signing" + +group = "io.opencensus" +version = "0.1.0" // CURRENT_OPENCENSUS_PROTO_VERSION + +sourceCompatibility = 1.6 +targetCompatibility = 1.6 + +repositories { + maven { url "https://plugins.gradle.org/m2/" } +} + +jar.manifest { + attributes('Implementation-Title': name, + 'Implementation-Version': version, + 'Built-By': System.getProperty('user.name'), + 'Built-JDK': System.getProperty('java.version'), + 'Source-Compatibility': sourceCompatibility, + 'Target-Compatibility': targetCompatibility) +} + +def protobufVersion = '3.5.1' +def protocVersion = '3.5.1' +def grpcVersion = "1.14.0" // CURRENT_GRPC_VERSION + +buildscript { + repositories { + maven { url "https://plugins.gradle.org/m2/" } + } + dependencies { + classpath "com.google.protobuf:protobuf-gradle-plugin:0.8.6" + } +} + +sourceSets { + main { + proto { + srcDir 'src' + } + } +} + +dependencies { + compile "com.google.protobuf:protobuf-java:${protobufVersion}", + "io.grpc:grpc-protobuf:${grpcVersion}", + "io.grpc:grpc-stub:${grpcVersion}" + + compileOnly "javax.annotation:javax.annotation-api:1.2" +} + +protobuf { + protoc { + // The artifact spec for the Protobuf Compiler + artifact = "com.google.protobuf:protoc:${protocVersion}" + } + plugins { + grpc { + artifact = "io.grpc:protoc-gen-grpc-java:${grpcVersion}" + } + } + generateProtoTasks { + all()*.plugins { + grpc {} + } + ofSourceSet('main') + } + + generatedFilesBaseDir = "$projectDir/gen_gradle/src" +} + +// Disable all java warnings for proto generated files build +compileJava { + options.compilerArgs += ["-Xlint:none"] + options.encoding = "UTF-8" +} + +clean { + delete protobuf.generatedFilesBaseDir +} + +// IntelliJ complains that the generated classes are not found, ask IntelliJ to include the +// generated Java directories as source folders. +idea { + module { + sourceDirs += file("${protobuf.generatedFilesBaseDir}/main/java"); + // If you have additional sourceSets and/or codegen plugins, add all of them + } +} + +signing { + required false + sign configurations.archives +} + +javadoc.source = "$projectDir/gen_gradle/src" + +javadoc.options { + encoding = 'UTF-8' + links 'https://docs.oracle.com/javase/8/docs/api/' +} + +task javadocJar(type: Jar) { + classifier = 'javadoc' + from javadoc +} + +task sourcesJar(type: Jar) { + classifier = 'sources' + from sourceSets.main.allSource +} + +artifacts { + archives javadocJar, sourcesJar +} + +uploadArchives { + repositories { + mavenDeployer { + beforeDeployment { MavenDeployment deployment -> signing.signPom(deployment) } + + def configureAuth = { + if (rootProject.hasProperty('ossrhUsername') && rootProject.hasProperty('ossrhPassword')) { + authentication(userName:rootProject.ossrhUsername, password: rootProject.ossrhPassword) + } + } + + repository(url: "https://oss.sonatype.org/service/local/staging/deploy/maven2/", configureAuth) + + snapshotRepository(url: "https://oss.sonatype.org/content/repositories/snapshots/", configureAuth) + + pom.project { + name "OpenCensus" + packaging 'jar' + description project.description + url 'https://github.com/census-instrumentation/opencensus-proto' + + scm { + connection 'scm:svn:https://github.com/census-instrumentation/opencensus-proto' + developerConnection 'scm:git:git@github.com/census-instrumentation/opencensus-proto' + url 'https://github.com/census-instrumentation/opencensus-proto' + } + + licenses { + license { + name 'The Apache License, Version 2.0' + url 'http://www.apache.org/licenses/LICENSE-2.0.txt' + } + } + + developers { + developer { + id 'io.opencensus' + name 'OpenCensus Contributors' + email 'census-developers@googlegroups.com' + url 'opencensus.io' + // https://issues.gradle.org/browse/GRADLE-2719 + organization = 'OpenCensus Authors' + organizationUrl 'https://www.opencensus.io' + } + } + } + } + } + } diff --git a/vendor/github.com/census-instrumentation/opencensus-proto/gen-go/agent/common/v1/common.pb.go b/vendor/github.com/census-instrumentation/opencensus-proto/gen-go/agent/common/v1/common.pb.go new file mode 100644 index 000000000..2f12e428e --- /dev/null +++ b/vendor/github.com/census-instrumentation/opencensus-proto/gen-go/agent/common/v1/common.pb.go @@ -0,0 +1,356 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: opencensus/proto/agent/common/v1/common.proto + +package v1 + +import ( + fmt "fmt" + proto "github.com/golang/protobuf/proto" + timestamp "github.com/golang/protobuf/ptypes/timestamp" + math "math" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +type LibraryInfo_Language int32 + +const ( + LibraryInfo_LANGUAGE_UNSPECIFIED LibraryInfo_Language = 0 + LibraryInfo_CPP LibraryInfo_Language = 1 + LibraryInfo_C_SHARP LibraryInfo_Language = 2 + LibraryInfo_ERLANG LibraryInfo_Language = 3 + LibraryInfo_GO_LANG LibraryInfo_Language = 4 + LibraryInfo_JAVA LibraryInfo_Language = 5 + LibraryInfo_NODE_JS LibraryInfo_Language = 6 + LibraryInfo_PHP LibraryInfo_Language = 7 + LibraryInfo_PYTHON LibraryInfo_Language = 8 + LibraryInfo_RUBY LibraryInfo_Language = 9 +) + +var LibraryInfo_Language_name = map[int32]string{ + 0: "LANGUAGE_UNSPECIFIED", + 1: "CPP", + 2: "C_SHARP", + 3: "ERLANG", + 4: "GO_LANG", + 5: "JAVA", + 6: "NODE_JS", + 7: "PHP", + 8: "PYTHON", + 9: "RUBY", +} + +var LibraryInfo_Language_value = map[string]int32{ + "LANGUAGE_UNSPECIFIED": 0, + "CPP": 1, + "C_SHARP": 2, + "ERLANG": 3, + "GO_LANG": 4, + "JAVA": 5, + "NODE_JS": 6, + "PHP": 7, + "PYTHON": 8, + "RUBY": 9, +} + +func (x LibraryInfo_Language) String() string { + return proto.EnumName(LibraryInfo_Language_name, int32(x)) +} + +func (LibraryInfo_Language) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_126c72ed8a252c84, []int{2, 0} +} + +// Identifier metadata of the Node (Application instrumented with OpenCensus) +// that connects to OpenCensus Agent. +// In the future we plan to extend the identifier proto definition to support +// additional information (e.g cloud id, etc.) +type Node struct { + // Identifier that uniquely identifies a process within a VM/container. + Identifier *ProcessIdentifier `protobuf:"bytes,1,opt,name=identifier,proto3" json:"identifier,omitempty"` + // Information on the OpenCensus Library that initiates the stream. + LibraryInfo *LibraryInfo `protobuf:"bytes,2,opt,name=library_info,json=libraryInfo,proto3" json:"library_info,omitempty"` + // Additional information on service. + ServiceInfo *ServiceInfo `protobuf:"bytes,3,opt,name=service_info,json=serviceInfo,proto3" json:"service_info,omitempty"` + // Additional attributes. + Attributes map[string]string `protobuf:"bytes,4,rep,name=attributes,proto3" json:"attributes,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Node) Reset() { *m = Node{} } +func (m *Node) String() string { return proto.CompactTextString(m) } +func (*Node) ProtoMessage() {} +func (*Node) Descriptor() ([]byte, []int) { + return fileDescriptor_126c72ed8a252c84, []int{0} +} + +func (m *Node) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Node.Unmarshal(m, b) +} +func (m *Node) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Node.Marshal(b, m, deterministic) +} +func (m *Node) XXX_Merge(src proto.Message) { + xxx_messageInfo_Node.Merge(m, src) +} +func (m *Node) XXX_Size() int { + return xxx_messageInfo_Node.Size(m) +} +func (m *Node) XXX_DiscardUnknown() { + xxx_messageInfo_Node.DiscardUnknown(m) +} + +var xxx_messageInfo_Node proto.InternalMessageInfo + +func (m *Node) GetIdentifier() *ProcessIdentifier { + if m != nil { + return m.Identifier + } + return nil +} + +func (m *Node) GetLibraryInfo() *LibraryInfo { + if m != nil { + return m.LibraryInfo + } + return nil +} + +func (m *Node) GetServiceInfo() *ServiceInfo { + if m != nil { + return m.ServiceInfo + } + return nil +} + +func (m *Node) GetAttributes() map[string]string { + if m != nil { + return m.Attributes + } + return nil +} + +// Identifier that uniquely identifies a process within a VM/container. +type ProcessIdentifier struct { + // The host name. Usually refers to the machine/container name. + // For example: os.Hostname() in Go, socket.gethostname() in Python. + HostName string `protobuf:"bytes,1,opt,name=host_name,json=hostName,proto3" json:"host_name,omitempty"` + // Process id. + Pid uint32 `protobuf:"varint,2,opt,name=pid,proto3" json:"pid,omitempty"` + // Start time of this ProcessIdentifier. Represented in epoch time. + StartTimestamp *timestamp.Timestamp `protobuf:"bytes,3,opt,name=start_timestamp,json=startTimestamp,proto3" json:"start_timestamp,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ProcessIdentifier) Reset() { *m = ProcessIdentifier{} } +func (m *ProcessIdentifier) String() string { return proto.CompactTextString(m) } +func (*ProcessIdentifier) ProtoMessage() {} +func (*ProcessIdentifier) Descriptor() ([]byte, []int) { + return fileDescriptor_126c72ed8a252c84, []int{1} +} + +func (m *ProcessIdentifier) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ProcessIdentifier.Unmarshal(m, b) +} +func (m *ProcessIdentifier) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ProcessIdentifier.Marshal(b, m, deterministic) +} +func (m *ProcessIdentifier) XXX_Merge(src proto.Message) { + xxx_messageInfo_ProcessIdentifier.Merge(m, src) +} +func (m *ProcessIdentifier) XXX_Size() int { + return xxx_messageInfo_ProcessIdentifier.Size(m) +} +func (m *ProcessIdentifier) XXX_DiscardUnknown() { + xxx_messageInfo_ProcessIdentifier.DiscardUnknown(m) +} + +var xxx_messageInfo_ProcessIdentifier proto.InternalMessageInfo + +func (m *ProcessIdentifier) GetHostName() string { + if m != nil { + return m.HostName + } + return "" +} + +func (m *ProcessIdentifier) GetPid() uint32 { + if m != nil { + return m.Pid + } + return 0 +} + +func (m *ProcessIdentifier) GetStartTimestamp() *timestamp.Timestamp { + if m != nil { + return m.StartTimestamp + } + return nil +} + +// Information on OpenCensus Library. +type LibraryInfo struct { + // Language of OpenCensus Library. + Language LibraryInfo_Language `protobuf:"varint,1,opt,name=language,proto3,enum=opencensus.proto.agent.common.v1.LibraryInfo_Language" json:"language,omitempty"` + // Version of Agent exporter of Library. + ExporterVersion string `protobuf:"bytes,2,opt,name=exporter_version,json=exporterVersion,proto3" json:"exporter_version,omitempty"` + // Version of OpenCensus Library. + CoreLibraryVersion string `protobuf:"bytes,3,opt,name=core_library_version,json=coreLibraryVersion,proto3" json:"core_library_version,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *LibraryInfo) Reset() { *m = LibraryInfo{} } +func (m *LibraryInfo) String() string { return proto.CompactTextString(m) } +func (*LibraryInfo) ProtoMessage() {} +func (*LibraryInfo) Descriptor() ([]byte, []int) { + return fileDescriptor_126c72ed8a252c84, []int{2} +} + +func (m *LibraryInfo) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_LibraryInfo.Unmarshal(m, b) +} +func (m *LibraryInfo) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_LibraryInfo.Marshal(b, m, deterministic) +} +func (m *LibraryInfo) XXX_Merge(src proto.Message) { + xxx_messageInfo_LibraryInfo.Merge(m, src) +} +func (m *LibraryInfo) XXX_Size() int { + return xxx_messageInfo_LibraryInfo.Size(m) +} +func (m *LibraryInfo) XXX_DiscardUnknown() { + xxx_messageInfo_LibraryInfo.DiscardUnknown(m) +} + +var xxx_messageInfo_LibraryInfo proto.InternalMessageInfo + +func (m *LibraryInfo) GetLanguage() LibraryInfo_Language { + if m != nil { + return m.Language + } + return LibraryInfo_LANGUAGE_UNSPECIFIED +} + +func (m *LibraryInfo) GetExporterVersion() string { + if m != nil { + return m.ExporterVersion + } + return "" +} + +func (m *LibraryInfo) GetCoreLibraryVersion() string { + if m != nil { + return m.CoreLibraryVersion + } + return "" +} + +// Additional service information. +type ServiceInfo struct { + // Name of the service. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ServiceInfo) Reset() { *m = ServiceInfo{} } +func (m *ServiceInfo) String() string { return proto.CompactTextString(m) } +func (*ServiceInfo) ProtoMessage() {} +func (*ServiceInfo) Descriptor() ([]byte, []int) { + return fileDescriptor_126c72ed8a252c84, []int{3} +} + +func (m *ServiceInfo) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ServiceInfo.Unmarshal(m, b) +} +func (m *ServiceInfo) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ServiceInfo.Marshal(b, m, deterministic) +} +func (m *ServiceInfo) XXX_Merge(src proto.Message) { + xxx_messageInfo_ServiceInfo.Merge(m, src) +} +func (m *ServiceInfo) XXX_Size() int { + return xxx_messageInfo_ServiceInfo.Size(m) +} +func (m *ServiceInfo) XXX_DiscardUnknown() { + xxx_messageInfo_ServiceInfo.DiscardUnknown(m) +} + +var xxx_messageInfo_ServiceInfo proto.InternalMessageInfo + +func (m *ServiceInfo) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func init() { + proto.RegisterEnum("opencensus.proto.agent.common.v1.LibraryInfo_Language", LibraryInfo_Language_name, LibraryInfo_Language_value) + proto.RegisterType((*Node)(nil), "opencensus.proto.agent.common.v1.Node") + proto.RegisterMapType((map[string]string)(nil), "opencensus.proto.agent.common.v1.Node.AttributesEntry") + proto.RegisterType((*ProcessIdentifier)(nil), "opencensus.proto.agent.common.v1.ProcessIdentifier") + proto.RegisterType((*LibraryInfo)(nil), "opencensus.proto.agent.common.v1.LibraryInfo") + proto.RegisterType((*ServiceInfo)(nil), "opencensus.proto.agent.common.v1.ServiceInfo") +} + +func init() { + proto.RegisterFile("opencensus/proto/agent/common/v1/common.proto", fileDescriptor_126c72ed8a252c84) +} + +var fileDescriptor_126c72ed8a252c84 = []byte{ + // 590 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x94, 0x4f, 0x4f, 0xdb, 0x3e, + 0x1c, 0xc6, 0x7f, 0x69, 0x0a, 0xb4, 0xdf, 0xfc, 0x06, 0x99, 0xc5, 0xa1, 0x62, 0x87, 0xb1, 0xee, + 0xc2, 0x0e, 0x4d, 0x06, 0x48, 0xd3, 0x34, 0x69, 0x87, 0x52, 0x3a, 0x28, 0x42, 0x25, 0x72, 0x01, + 0x89, 0x5d, 0xa2, 0xb4, 0xb8, 0xc1, 0x5a, 0x63, 0x57, 0xb6, 0x53, 0x8d, 0xd3, 0x8e, 0xd3, 0xde, + 0xc0, 0x5e, 0xd4, 0x5e, 0xd5, 0x64, 0x3b, 0x69, 0xa3, 0x71, 0x28, 0xb7, 0xef, 0x9f, 0xe7, 0xf9, + 0x38, 0x7a, 0x6c, 0x05, 0x3a, 0x7c, 0x4e, 0xd8, 0x84, 0x30, 0x99, 0xcb, 0x70, 0x2e, 0xb8, 0xe2, + 0x61, 0x92, 0x12, 0xa6, 0xc2, 0x09, 0xcf, 0x32, 0xce, 0xc2, 0xc5, 0x61, 0x51, 0x05, 0x66, 0x89, + 0xf6, 0x57, 0x72, 0x3b, 0x09, 0x8c, 0x3c, 0x28, 0x44, 0x8b, 0xc3, 0xbd, 0xd7, 0x29, 0xe7, 0xe9, + 0x8c, 0x58, 0xd8, 0x38, 0x9f, 0x86, 0x8a, 0x66, 0x44, 0xaa, 0x24, 0x9b, 0x5b, 0x43, 0xfb, 0xb7, + 0x0b, 0xf5, 0x21, 0xbf, 0x27, 0x68, 0x04, 0x40, 0xef, 0x09, 0x53, 0x74, 0x4a, 0x89, 0x68, 0x39, + 0xfb, 0xce, 0x81, 0x77, 0x74, 0x1c, 0xac, 0x3b, 0x20, 0x88, 0x04, 0x9f, 0x10, 0x29, 0x07, 0x4b, + 0x2b, 0xae, 0x60, 0x50, 0x04, 0xff, 0xcf, 0xe8, 0x58, 0x24, 0xe2, 0x31, 0xa6, 0x6c, 0xca, 0x5b, + 0x35, 0x83, 0xed, 0xac, 0xc7, 0x5e, 0x5a, 0xd7, 0x80, 0x4d, 0x39, 0xf6, 0x66, 0xab, 0x46, 0x13, + 0x25, 0x11, 0x0b, 0x3a, 0x21, 0x96, 0xe8, 0x3e, 0x97, 0x38, 0xb2, 0x2e, 0x4b, 0x94, 0xab, 0x06, + 0xdd, 0x02, 0x24, 0x4a, 0x09, 0x3a, 0xce, 0x15, 0x91, 0xad, 0xfa, 0xbe, 0x7b, 0xe0, 0x1d, 0x7d, + 0x58, 0xcf, 0xd3, 0xa1, 0x05, 0xdd, 0xa5, 0xb1, 0xcf, 0x94, 0x78, 0xc4, 0x15, 0xd2, 0xde, 0x67, + 0xd8, 0xf9, 0x67, 0x8d, 0x7c, 0x70, 0xbf, 0x91, 0x47, 0x13, 0x6e, 0x13, 0xeb, 0x12, 0xed, 0xc2, + 0xc6, 0x22, 0x99, 0xe5, 0xc4, 0x24, 0xd3, 0xc4, 0xb6, 0xf9, 0x54, 0xfb, 0xe8, 0xb4, 0x7f, 0x3a, + 0xf0, 0xf2, 0x49, 0xb8, 0xe8, 0x15, 0x34, 0x1f, 0xb8, 0x54, 0x31, 0x4b, 0x32, 0x52, 0x70, 0x1a, + 0x7a, 0x30, 0x4c, 0x32, 0xa2, 0xf1, 0x73, 0x7a, 0x6f, 0x50, 0x2f, 0xb0, 0x2e, 0x51, 0x0f, 0x76, + 0xa4, 0x4a, 0x84, 0x8a, 0x97, 0xd7, 0x5e, 0x04, 0xb6, 0x17, 0xd8, 0x87, 0x11, 0x94, 0x0f, 0x23, + 0xb8, 0x2e, 0x15, 0x78, 0xdb, 0x58, 0x96, 0x7d, 0xfb, 0x4f, 0x0d, 0xbc, 0xca, 0x7d, 0x20, 0x0c, + 0x8d, 0x59, 0xc2, 0xd2, 0x3c, 0x49, 0xed, 0x27, 0x6c, 0x3f, 0x27, 0xae, 0x0a, 0x20, 0xb8, 0x2c, + 0xdc, 0x78, 0xc9, 0x41, 0xef, 0xc0, 0x27, 0xdf, 0xe7, 0x5c, 0x28, 0x22, 0xe2, 0x05, 0x11, 0x92, + 0x72, 0x56, 0x44, 0xb2, 0x53, 0xce, 0x6f, 0xed, 0x18, 0xbd, 0x87, 0xdd, 0x09, 0x17, 0x24, 0x2e, + 0x1f, 0x56, 0x29, 0x77, 0x8d, 0x1c, 0xe9, 0x5d, 0x71, 0x58, 0xe1, 0x68, 0xff, 0x72, 0xa0, 0x51, + 0x9e, 0x89, 0x5a, 0xb0, 0x7b, 0xd9, 0x1d, 0x9e, 0xdd, 0x74, 0xcf, 0xfa, 0xf1, 0xcd, 0x70, 0x14, + 0xf5, 0x7b, 0x83, 0x2f, 0x83, 0xfe, 0xa9, 0xff, 0x1f, 0xda, 0x02, 0xb7, 0x17, 0x45, 0xbe, 0x83, + 0x3c, 0xd8, 0xea, 0xc5, 0xa3, 0xf3, 0x2e, 0x8e, 0xfc, 0x1a, 0x02, 0xd8, 0xec, 0x63, 0xed, 0xf0, + 0x5d, 0xbd, 0x38, 0xbb, 0x8a, 0x4d, 0x53, 0x47, 0x0d, 0xa8, 0x5f, 0x74, 0x6f, 0xbb, 0xfe, 0x86, + 0x1e, 0x0f, 0xaf, 0x4e, 0xfb, 0xf1, 0xc5, 0xc8, 0xdf, 0xd4, 0x94, 0xe8, 0x3c, 0xf2, 0xb7, 0xb4, + 0x31, 0xba, 0xbb, 0x3e, 0xbf, 0x1a, 0xfa, 0x0d, 0xad, 0xc5, 0x37, 0x27, 0x77, 0x7e, 0xb3, 0xfd, + 0x06, 0xbc, 0xca, 0x4b, 0x44, 0x08, 0xea, 0x95, 0xab, 0x34, 0xf5, 0xc9, 0x0f, 0x78, 0x4b, 0xf9, + 0xda, 0x44, 0x4f, 0xbc, 0x9e, 0x29, 0x23, 0xbd, 0x8c, 0x9c, 0xaf, 0x83, 0x94, 0xaa, 0x87, 0x7c, + 0xac, 0x05, 0xa1, 0xf5, 0x75, 0x28, 0x93, 0x4a, 0xe4, 0x19, 0x61, 0x2a, 0x51, 0x94, 0xb3, 0x70, + 0x85, 0xec, 0xd8, 0x9f, 0x4b, 0x4a, 0x58, 0x27, 0x7d, 0xf2, 0x8f, 0x19, 0x6f, 0x9a, 0xed, 0xf1, + 0xdf, 0x00, 0x00, 0x00, 0xff, 0xff, 0x94, 0xe5, 0x77, 0x76, 0x8e, 0x04, 0x00, 0x00, +} diff --git a/vendor/github.com/census-instrumentation/opencensus-proto/gen-go/agent/trace/v1/trace_service.pb.go b/vendor/github.com/census-instrumentation/opencensus-proto/gen-go/agent/trace/v1/trace_service.pb.go new file mode 100644 index 000000000..01eddf4cc --- /dev/null +++ b/vendor/github.com/census-instrumentation/opencensus-proto/gen-go/agent/trace/v1/trace_service.pb.go @@ -0,0 +1,443 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: opencensus/proto/agent/trace/v1/trace_service.proto + +package v1 + +import ( + fmt "fmt" + v1 "github.com/census-instrumentation/opencensus-proto/gen-go/agent/common/v1" + v12 "github.com/census-instrumentation/opencensus-proto/gen-go/resource/v1" + v11 "github.com/census-instrumentation/opencensus-proto/gen-go/trace/v1" + proto "github.com/golang/protobuf/proto" + context "golang.org/x/net/context" + grpc "google.golang.org/grpc" + math "math" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +type CurrentLibraryConfig struct { + // This is required only in the first message on the stream or if the + // previous sent CurrentLibraryConfig message has a different Node (e.g. + // when the same RPC is used to configure multiple Applications). + Node *v1.Node `protobuf:"bytes,1,opt,name=node,proto3" json:"node,omitempty"` + // Current configuration. + Config *v11.TraceConfig `protobuf:"bytes,2,opt,name=config,proto3" json:"config,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *CurrentLibraryConfig) Reset() { *m = CurrentLibraryConfig{} } +func (m *CurrentLibraryConfig) String() string { return proto.CompactTextString(m) } +func (*CurrentLibraryConfig) ProtoMessage() {} +func (*CurrentLibraryConfig) Descriptor() ([]byte, []int) { + return fileDescriptor_7027f99caf7ac6a5, []int{0} +} + +func (m *CurrentLibraryConfig) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_CurrentLibraryConfig.Unmarshal(m, b) +} +func (m *CurrentLibraryConfig) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_CurrentLibraryConfig.Marshal(b, m, deterministic) +} +func (m *CurrentLibraryConfig) XXX_Merge(src proto.Message) { + xxx_messageInfo_CurrentLibraryConfig.Merge(m, src) +} +func (m *CurrentLibraryConfig) XXX_Size() int { + return xxx_messageInfo_CurrentLibraryConfig.Size(m) +} +func (m *CurrentLibraryConfig) XXX_DiscardUnknown() { + xxx_messageInfo_CurrentLibraryConfig.DiscardUnknown(m) +} + +var xxx_messageInfo_CurrentLibraryConfig proto.InternalMessageInfo + +func (m *CurrentLibraryConfig) GetNode() *v1.Node { + if m != nil { + return m.Node + } + return nil +} + +func (m *CurrentLibraryConfig) GetConfig() *v11.TraceConfig { + if m != nil { + return m.Config + } + return nil +} + +type UpdatedLibraryConfig struct { + // This field is ignored when the RPC is used to configure only one Application. + // This is required only in the first message on the stream or if the + // previous sent UpdatedLibraryConfig message has a different Node. + Node *v1.Node `protobuf:"bytes,1,opt,name=node,proto3" json:"node,omitempty"` + // Requested updated configuration. + Config *v11.TraceConfig `protobuf:"bytes,2,opt,name=config,proto3" json:"config,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *UpdatedLibraryConfig) Reset() { *m = UpdatedLibraryConfig{} } +func (m *UpdatedLibraryConfig) String() string { return proto.CompactTextString(m) } +func (*UpdatedLibraryConfig) ProtoMessage() {} +func (*UpdatedLibraryConfig) Descriptor() ([]byte, []int) { + return fileDescriptor_7027f99caf7ac6a5, []int{1} +} + +func (m *UpdatedLibraryConfig) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_UpdatedLibraryConfig.Unmarshal(m, b) +} +func (m *UpdatedLibraryConfig) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_UpdatedLibraryConfig.Marshal(b, m, deterministic) +} +func (m *UpdatedLibraryConfig) XXX_Merge(src proto.Message) { + xxx_messageInfo_UpdatedLibraryConfig.Merge(m, src) +} +func (m *UpdatedLibraryConfig) XXX_Size() int { + return xxx_messageInfo_UpdatedLibraryConfig.Size(m) +} +func (m *UpdatedLibraryConfig) XXX_DiscardUnknown() { + xxx_messageInfo_UpdatedLibraryConfig.DiscardUnknown(m) +} + +var xxx_messageInfo_UpdatedLibraryConfig proto.InternalMessageInfo + +func (m *UpdatedLibraryConfig) GetNode() *v1.Node { + if m != nil { + return m.Node + } + return nil +} + +func (m *UpdatedLibraryConfig) GetConfig() *v11.TraceConfig { + if m != nil { + return m.Config + } + return nil +} + +type ExportTraceServiceRequest struct { + // This is required only in the first message on the stream or if the + // previous sent ExportTraceServiceRequest message has a different Node (e.g. + // when the same RPC is used to send Spans from multiple Applications). + Node *v1.Node `protobuf:"bytes,1,opt,name=node,proto3" json:"node,omitempty"` + // A list of Spans that belong to the last received Node. + Spans []*v11.Span `protobuf:"bytes,2,rep,name=spans,proto3" json:"spans,omitempty"` + // The resource for the spans in this message that do not have an explicit + // resource set. + // If unset, the most recently set resource in the RPC stream applies. It is + // valid to never be set within a stream, e.g. when no resource info is known. + Resource *v12.Resource `protobuf:"bytes,3,opt,name=resource,proto3" json:"resource,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ExportTraceServiceRequest) Reset() { *m = ExportTraceServiceRequest{} } +func (m *ExportTraceServiceRequest) String() string { return proto.CompactTextString(m) } +func (*ExportTraceServiceRequest) ProtoMessage() {} +func (*ExportTraceServiceRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_7027f99caf7ac6a5, []int{2} +} + +func (m *ExportTraceServiceRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ExportTraceServiceRequest.Unmarshal(m, b) +} +func (m *ExportTraceServiceRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ExportTraceServiceRequest.Marshal(b, m, deterministic) +} +func (m *ExportTraceServiceRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_ExportTraceServiceRequest.Merge(m, src) +} +func (m *ExportTraceServiceRequest) XXX_Size() int { + return xxx_messageInfo_ExportTraceServiceRequest.Size(m) +} +func (m *ExportTraceServiceRequest) XXX_DiscardUnknown() { + xxx_messageInfo_ExportTraceServiceRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_ExportTraceServiceRequest proto.InternalMessageInfo + +func (m *ExportTraceServiceRequest) GetNode() *v1.Node { + if m != nil { + return m.Node + } + return nil +} + +func (m *ExportTraceServiceRequest) GetSpans() []*v11.Span { + if m != nil { + return m.Spans + } + return nil +} + +func (m *ExportTraceServiceRequest) GetResource() *v12.Resource { + if m != nil { + return m.Resource + } + return nil +} + +type ExportTraceServiceResponse struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ExportTraceServiceResponse) Reset() { *m = ExportTraceServiceResponse{} } +func (m *ExportTraceServiceResponse) String() string { return proto.CompactTextString(m) } +func (*ExportTraceServiceResponse) ProtoMessage() {} +func (*ExportTraceServiceResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_7027f99caf7ac6a5, []int{3} +} + +func (m *ExportTraceServiceResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ExportTraceServiceResponse.Unmarshal(m, b) +} +func (m *ExportTraceServiceResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ExportTraceServiceResponse.Marshal(b, m, deterministic) +} +func (m *ExportTraceServiceResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_ExportTraceServiceResponse.Merge(m, src) +} +func (m *ExportTraceServiceResponse) XXX_Size() int { + return xxx_messageInfo_ExportTraceServiceResponse.Size(m) +} +func (m *ExportTraceServiceResponse) XXX_DiscardUnknown() { + xxx_messageInfo_ExportTraceServiceResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_ExportTraceServiceResponse proto.InternalMessageInfo + +func init() { + proto.RegisterType((*CurrentLibraryConfig)(nil), "opencensus.proto.agent.trace.v1.CurrentLibraryConfig") + proto.RegisterType((*UpdatedLibraryConfig)(nil), "opencensus.proto.agent.trace.v1.UpdatedLibraryConfig") + proto.RegisterType((*ExportTraceServiceRequest)(nil), "opencensus.proto.agent.trace.v1.ExportTraceServiceRequest") + proto.RegisterType((*ExportTraceServiceResponse)(nil), "opencensus.proto.agent.trace.v1.ExportTraceServiceResponse") +} + +func init() { + proto.RegisterFile("opencensus/proto/agent/trace/v1/trace_service.proto", fileDescriptor_7027f99caf7ac6a5) +} + +var fileDescriptor_7027f99caf7ac6a5 = []byte{ + // 423 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xcc, 0x54, 0xbf, 0x6b, 0xdb, 0x40, + 0x14, 0xee, 0xd9, 0xad, 0x28, 0xe7, 0x2e, 0x15, 0x1d, 0x54, 0x51, 0xb0, 0x11, 0xb4, 0x18, 0x5a, + 0x9d, 0x2a, 0x1b, 0x2f, 0x2e, 0x74, 0xb0, 0x29, 0x74, 0x28, 0xc5, 0xc8, 0xed, 0x92, 0xc5, 0xc8, + 0xd2, 0x8b, 0xa2, 0xc1, 0x77, 0xca, 0xdd, 0x49, 0x24, 0x90, 0x2d, 0x43, 0xf6, 0x0c, 0xf9, 0xc3, + 0xf2, 0x17, 0x05, 0xdd, 0xc9, 0x3f, 0x12, 0x5b, 0x11, 0x24, 0x4b, 0xb6, 0x87, 0xde, 0xf7, 0x7d, + 0xf7, 0xbd, 0x7b, 0xdf, 0x09, 0x0f, 0x59, 0x06, 0x34, 0x02, 0x2a, 0x72, 0xe1, 0x65, 0x9c, 0x49, + 0xe6, 0x85, 0x09, 0x50, 0xe9, 0x49, 0x1e, 0x46, 0xe0, 0x15, 0xbe, 0x2e, 0x16, 0x02, 0x78, 0x91, + 0x46, 0x40, 0x14, 0xc4, 0xec, 0x6e, 0x49, 0xfa, 0x0b, 0x51, 0x24, 0xa2, 0xb0, 0xa4, 0xf0, 0x6d, + 0xb7, 0x46, 0x35, 0x62, 0xab, 0x15, 0xa3, 0xa5, 0xac, 0xae, 0x34, 0xdb, 0xfe, 0xba, 0x07, 0xe7, + 0x20, 0x58, 0xce, 0xb5, 0x83, 0x75, 0x5d, 0x81, 0x3f, 0xef, 0x81, 0xef, 0x7b, 0xad, 0x60, 0xdf, + 0x1a, 0x60, 0x8b, 0x88, 0xd1, 0xe3, 0x34, 0xd1, 0x68, 0xe7, 0x1a, 0xe1, 0x0f, 0xd3, 0x9c, 0x73, + 0xa0, 0xf2, 0x4f, 0xba, 0xe4, 0x21, 0x3f, 0x9f, 0xaa, 0xb6, 0x39, 0xc6, 0xaf, 0x29, 0x8b, 0xc1, + 0x42, 0x3d, 0xd4, 0xef, 0x0c, 0xbe, 0x90, 0x9a, 0xc9, 0xab, 0x71, 0x0a, 0x9f, 0xfc, 0x65, 0x31, + 0x04, 0x8a, 0x63, 0xfe, 0xc4, 0x86, 0x3e, 0xc4, 0x6a, 0xd5, 0xb1, 0xd7, 0x37, 0x46, 0xfe, 0x95, + 0x85, 0x3e, 0x33, 0xa8, 0x58, 0xca, 0xd4, 0xff, 0x2c, 0x0e, 0x25, 0xc4, 0x2f, 0xc7, 0xd4, 0x2d, + 0xc2, 0x1f, 0x7f, 0x9d, 0x65, 0x8c, 0x4b, 0xd5, 0x9d, 0xeb, 0x60, 0x04, 0x70, 0x9a, 0x83, 0x90, + 0xcf, 0x72, 0x36, 0xc2, 0x6f, 0x44, 0x16, 0x52, 0x61, 0xb5, 0x7a, 0xed, 0x7e, 0x67, 0xd0, 0x7d, + 0xc4, 0xd8, 0x3c, 0x0b, 0x69, 0xa0, 0xd1, 0xe6, 0x04, 0xbf, 0x5d, 0x27, 0xc4, 0x6a, 0xd7, 0x1d, + 0xbb, 0xc9, 0x50, 0xe1, 0x93, 0xa0, 0xaa, 0x83, 0x0d, 0xcf, 0xf9, 0x84, 0xed, 0x43, 0x33, 0x89, + 0x8c, 0x51, 0x01, 0x83, 0x9b, 0x16, 0x7e, 0xb7, 0xdb, 0x30, 0x2f, 0xb0, 0x51, 0x6d, 0x62, 0x44, + 0x1a, 0x9e, 0x02, 0x39, 0x94, 0x2a, 0xbb, 0x99, 0x76, 0x68, 0xef, 0xce, 0xab, 0x3e, 0xfa, 0x8e, + 0xcc, 0x2b, 0x84, 0x0d, 0xed, 0xd6, 0x1c, 0x37, 0xea, 0xd4, 0xae, 0xca, 0xfe, 0xf1, 0x24, 0xae, + 0xbe, 0x12, 0xed, 0x64, 0x72, 0x89, 0xb0, 0x93, 0xb2, 0x26, 0x9d, 0xc9, 0xfb, 0x5d, 0x89, 0x59, + 0x89, 0x98, 0xa1, 0xa3, 0xdf, 0x49, 0x2a, 0x4f, 0xf2, 0x65, 0x19, 0x05, 0x4f, 0x93, 0xdd, 0x94, + 0x0a, 0xc9, 0xf3, 0x15, 0x50, 0x19, 0xca, 0x94, 0x51, 0x6f, 0xab, 0xeb, 0xea, 0x17, 0x9c, 0x00, + 0x75, 0x93, 0x87, 0x7f, 0xa8, 0xa5, 0xa1, 0x9a, 0xc3, 0xbb, 0x00, 0x00, 0x00, 0xff, 0xff, 0xcf, + 0x9c, 0x9b, 0xf7, 0xcb, 0x04, 0x00, 0x00, +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion4 + +// TraceServiceClient is the client API for TraceService service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. +type TraceServiceClient interface { + // After initialization, this RPC must be kept alive for the entire life of + // the application. The agent pushes configs down to applications via a + // stream. + Config(ctx context.Context, opts ...grpc.CallOption) (TraceService_ConfigClient, error) + // For performance reasons, it is recommended to keep this RPC + // alive for the entire life of the application. + Export(ctx context.Context, opts ...grpc.CallOption) (TraceService_ExportClient, error) +} + +type traceServiceClient struct { + cc *grpc.ClientConn +} + +func NewTraceServiceClient(cc *grpc.ClientConn) TraceServiceClient { + return &traceServiceClient{cc} +} + +func (c *traceServiceClient) Config(ctx context.Context, opts ...grpc.CallOption) (TraceService_ConfigClient, error) { + stream, err := c.cc.NewStream(ctx, &_TraceService_serviceDesc.Streams[0], "/opencensus.proto.agent.trace.v1.TraceService/Config", opts...) + if err != nil { + return nil, err + } + x := &traceServiceConfigClient{stream} + return x, nil +} + +type TraceService_ConfigClient interface { + Send(*CurrentLibraryConfig) error + Recv() (*UpdatedLibraryConfig, error) + grpc.ClientStream +} + +type traceServiceConfigClient struct { + grpc.ClientStream +} + +func (x *traceServiceConfigClient) Send(m *CurrentLibraryConfig) error { + return x.ClientStream.SendMsg(m) +} + +func (x *traceServiceConfigClient) Recv() (*UpdatedLibraryConfig, error) { + m := new(UpdatedLibraryConfig) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +func (c *traceServiceClient) Export(ctx context.Context, opts ...grpc.CallOption) (TraceService_ExportClient, error) { + stream, err := c.cc.NewStream(ctx, &_TraceService_serviceDesc.Streams[1], "/opencensus.proto.agent.trace.v1.TraceService/Export", opts...) + if err != nil { + return nil, err + } + x := &traceServiceExportClient{stream} + return x, nil +} + +type TraceService_ExportClient interface { + Send(*ExportTraceServiceRequest) error + Recv() (*ExportTraceServiceResponse, error) + grpc.ClientStream +} + +type traceServiceExportClient struct { + grpc.ClientStream +} + +func (x *traceServiceExportClient) Send(m *ExportTraceServiceRequest) error { + return x.ClientStream.SendMsg(m) +} + +func (x *traceServiceExportClient) Recv() (*ExportTraceServiceResponse, error) { + m := new(ExportTraceServiceResponse) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +// TraceServiceServer is the server API for TraceService service. +type TraceServiceServer interface { + // After initialization, this RPC must be kept alive for the entire life of + // the application. The agent pushes configs down to applications via a + // stream. + Config(TraceService_ConfigServer) error + // For performance reasons, it is recommended to keep this RPC + // alive for the entire life of the application. + Export(TraceService_ExportServer) error +} + +func RegisterTraceServiceServer(s *grpc.Server, srv TraceServiceServer) { + s.RegisterService(&_TraceService_serviceDesc, srv) +} + +func _TraceService_Config_Handler(srv interface{}, stream grpc.ServerStream) error { + return srv.(TraceServiceServer).Config(&traceServiceConfigServer{stream}) +} + +type TraceService_ConfigServer interface { + Send(*UpdatedLibraryConfig) error + Recv() (*CurrentLibraryConfig, error) + grpc.ServerStream +} + +type traceServiceConfigServer struct { + grpc.ServerStream +} + +func (x *traceServiceConfigServer) Send(m *UpdatedLibraryConfig) error { + return x.ServerStream.SendMsg(m) +} + +func (x *traceServiceConfigServer) Recv() (*CurrentLibraryConfig, error) { + m := new(CurrentLibraryConfig) + if err := x.ServerStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +func _TraceService_Export_Handler(srv interface{}, stream grpc.ServerStream) error { + return srv.(TraceServiceServer).Export(&traceServiceExportServer{stream}) +} + +type TraceService_ExportServer interface { + Send(*ExportTraceServiceResponse) error + Recv() (*ExportTraceServiceRequest, error) + grpc.ServerStream +} + +type traceServiceExportServer struct { + grpc.ServerStream +} + +func (x *traceServiceExportServer) Send(m *ExportTraceServiceResponse) error { + return x.ServerStream.SendMsg(m) +} + +func (x *traceServiceExportServer) Recv() (*ExportTraceServiceRequest, error) { + m := new(ExportTraceServiceRequest) + if err := x.ServerStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +var _TraceService_serviceDesc = grpc.ServiceDesc{ + ServiceName: "opencensus.proto.agent.trace.v1.TraceService", + HandlerType: (*TraceServiceServer)(nil), + Methods: []grpc.MethodDesc{}, + Streams: []grpc.StreamDesc{ + { + StreamName: "Config", + Handler: _TraceService_Config_Handler, + ServerStreams: true, + ClientStreams: true, + }, + { + StreamName: "Export", + Handler: _TraceService_Export_Handler, + ServerStreams: true, + ClientStreams: true, + }, + }, + Metadata: "opencensus/proto/agent/trace/v1/trace_service.proto", +} diff --git a/vendor/github.com/census-instrumentation/opencensus-proto/gen-go/resource/v1/resource.pb.go b/vendor/github.com/census-instrumentation/opencensus-proto/gen-go/resource/v1/resource.pb.go new file mode 100644 index 000000000..560dbd94a --- /dev/null +++ b/vendor/github.com/census-instrumentation/opencensus-proto/gen-go/resource/v1/resource.pb.go @@ -0,0 +1,99 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: opencensus/proto/resource/v1/resource.proto + +package v1 + +import ( + fmt "fmt" + proto "github.com/golang/protobuf/proto" + math "math" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +// Resource information. +type Resource struct { + // Type identifier for the resource. + Type string `protobuf:"bytes,1,opt,name=type,proto3" json:"type,omitempty"` + // Set of labels that describe the resource. + Labels map[string]string `protobuf:"bytes,2,rep,name=labels,proto3" json:"labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Resource) Reset() { *m = Resource{} } +func (m *Resource) String() string { return proto.CompactTextString(m) } +func (*Resource) ProtoMessage() {} +func (*Resource) Descriptor() ([]byte, []int) { + return fileDescriptor_584700775a2fc762, []int{0} +} + +func (m *Resource) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Resource.Unmarshal(m, b) +} +func (m *Resource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Resource.Marshal(b, m, deterministic) +} +func (m *Resource) XXX_Merge(src proto.Message) { + xxx_messageInfo_Resource.Merge(m, src) +} +func (m *Resource) XXX_Size() int { + return xxx_messageInfo_Resource.Size(m) +} +func (m *Resource) XXX_DiscardUnknown() { + xxx_messageInfo_Resource.DiscardUnknown(m) +} + +var xxx_messageInfo_Resource proto.InternalMessageInfo + +func (m *Resource) GetType() string { + if m != nil { + return m.Type + } + return "" +} + +func (m *Resource) GetLabels() map[string]string { + if m != nil { + return m.Labels + } + return nil +} + +func init() { + proto.RegisterType((*Resource)(nil), "opencensus.proto.resource.v1.Resource") + proto.RegisterMapType((map[string]string)(nil), "opencensus.proto.resource.v1.Resource.LabelsEntry") +} + +func init() { + proto.RegisterFile("opencensus/proto/resource/v1/resource.proto", fileDescriptor_584700775a2fc762) +} + +var fileDescriptor_584700775a2fc762 = []byte{ + // 234 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0xd2, 0xce, 0x2f, 0x48, 0xcd, + 0x4b, 0x4e, 0xcd, 0x2b, 0x2e, 0x2d, 0xd6, 0x2f, 0x28, 0xca, 0x2f, 0xc9, 0xd7, 0x2f, 0x4a, 0x2d, + 0xce, 0x2f, 0x2d, 0x4a, 0x4e, 0xd5, 0x2f, 0x33, 0x84, 0xb3, 0xf5, 0xc0, 0x52, 0x42, 0x32, 0x08, + 0xc5, 0x10, 0x11, 0x3d, 0xb8, 0x82, 0x32, 0x43, 0xa5, 0xa5, 0x8c, 0x5c, 0x1c, 0x41, 0x50, 0xbe, + 0x90, 0x10, 0x17, 0x4b, 0x49, 0x65, 0x41, 0xaa, 0x04, 0xa3, 0x02, 0xa3, 0x06, 0x67, 0x10, 0x98, + 0x2d, 0xe4, 0xc5, 0xc5, 0x96, 0x93, 0x98, 0x94, 0x9a, 0x53, 0x2c, 0xc1, 0xa4, 0xc0, 0xac, 0xc1, + 0x6d, 0x64, 0xa4, 0x87, 0xcf, 0x3c, 0x3d, 0x98, 0x59, 0x7a, 0x3e, 0x60, 0x4d, 0xae, 0x79, 0x25, + 0x45, 0x95, 0x41, 0x50, 0x13, 0xa4, 0x2c, 0xb9, 0xb8, 0x91, 0x84, 0x85, 0x04, 0xb8, 0x98, 0xb3, + 0x53, 0x2b, 0xa1, 0xb6, 0x81, 0x98, 0x42, 0x22, 0x5c, 0xac, 0x65, 0x89, 0x39, 0xa5, 0xa9, 0x12, + 0x4c, 0x60, 0x31, 0x08, 0xc7, 0x8a, 0xc9, 0x82, 0xd1, 0xa9, 0x92, 0x4b, 0x3e, 0x33, 0x1f, 0xaf, + 0xd5, 0x4e, 0xbc, 0x30, 0xbb, 0x03, 0x40, 0x52, 0x01, 0x8c, 0x51, 0xae, 0xe9, 0x99, 0x25, 0x19, + 0xa5, 0x49, 0x7a, 0xc9, 0xf9, 0xb9, 0xfa, 0x10, 0x5d, 0xba, 0x99, 0x79, 0xc5, 0x25, 0x45, 0xa5, + 0xb9, 0xa9, 0x79, 0x25, 0x89, 0x25, 0x99, 0xf9, 0x79, 0xfa, 0x08, 0x03, 0x75, 0x21, 0x01, 0x99, + 0x9e, 0x9a, 0xa7, 0x9b, 0x8e, 0x12, 0x9e, 0x49, 0x6c, 0x60, 0x19, 0x63, 0x40, 0x00, 0x00, 0x00, + 0xff, 0xff, 0x8e, 0x11, 0xaf, 0xda, 0x76, 0x01, 0x00, 0x00, +} diff --git a/vendor/github.com/census-instrumentation/opencensus-proto/gen-go/trace/v1/trace.pb.go b/vendor/github.com/census-instrumentation/opencensus-proto/gen-go/trace/v1/trace.pb.go new file mode 100644 index 000000000..c34e38892 --- /dev/null +++ b/vendor/github.com/census-instrumentation/opencensus-proto/gen-go/trace/v1/trace.pb.go @@ -0,0 +1,1654 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: opencensus/proto/trace/v1/trace.proto + +package v1 + +import ( + fmt "fmt" + proto "github.com/golang/protobuf/proto" + timestamp "github.com/golang/protobuf/ptypes/timestamp" + wrappers "github.com/golang/protobuf/ptypes/wrappers" + math "math" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +// Type of span. Can be used to specify additional relationships between spans +// in addition to a parent/child relationship. +type Span_SpanKind int32 + +const ( + // Unspecified. + Span_SPAN_KIND_UNSPECIFIED Span_SpanKind = 0 + // Indicates that the span covers server-side handling of an RPC or other + // remote network request. + Span_SERVER Span_SpanKind = 1 + // Indicates that the span covers the client-side wrapper around an RPC or + // other remote request. + Span_CLIENT Span_SpanKind = 2 +) + +var Span_SpanKind_name = map[int32]string{ + 0: "SPAN_KIND_UNSPECIFIED", + 1: "SERVER", + 2: "CLIENT", +} + +var Span_SpanKind_value = map[string]int32{ + "SPAN_KIND_UNSPECIFIED": 0, + "SERVER": 1, + "CLIENT": 2, +} + +func (x Span_SpanKind) String() string { + return proto.EnumName(Span_SpanKind_name, int32(x)) +} + +func (Span_SpanKind) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_8ea38bbb821bf584, []int{0, 0} +} + +// Indicates whether the message was sent or received. +type Span_TimeEvent_MessageEvent_Type int32 + +const ( + // Unknown event type. + Span_TimeEvent_MessageEvent_TYPE_UNSPECIFIED Span_TimeEvent_MessageEvent_Type = 0 + // Indicates a sent message. + Span_TimeEvent_MessageEvent_SENT Span_TimeEvent_MessageEvent_Type = 1 + // Indicates a received message. + Span_TimeEvent_MessageEvent_RECEIVED Span_TimeEvent_MessageEvent_Type = 2 +) + +var Span_TimeEvent_MessageEvent_Type_name = map[int32]string{ + 0: "TYPE_UNSPECIFIED", + 1: "SENT", + 2: "RECEIVED", +} + +var Span_TimeEvent_MessageEvent_Type_value = map[string]int32{ + "TYPE_UNSPECIFIED": 0, + "SENT": 1, + "RECEIVED": 2, +} + +func (x Span_TimeEvent_MessageEvent_Type) String() string { + return proto.EnumName(Span_TimeEvent_MessageEvent_Type_name, int32(x)) +} + +func (Span_TimeEvent_MessageEvent_Type) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_8ea38bbb821bf584, []int{0, 2, 1, 0} +} + +// The relationship of the current span relative to the linked span: child, +// parent, or unspecified. +type Span_Link_Type int32 + +const ( + // The relationship of the two spans is unknown, or known but other + // than parent-child. + Span_Link_TYPE_UNSPECIFIED Span_Link_Type = 0 + // The linked span is a child of the current span. + Span_Link_CHILD_LINKED_SPAN Span_Link_Type = 1 + // The linked span is a parent of the current span. + Span_Link_PARENT_LINKED_SPAN Span_Link_Type = 2 +) + +var Span_Link_Type_name = map[int32]string{ + 0: "TYPE_UNSPECIFIED", + 1: "CHILD_LINKED_SPAN", + 2: "PARENT_LINKED_SPAN", +} + +var Span_Link_Type_value = map[string]int32{ + "TYPE_UNSPECIFIED": 0, + "CHILD_LINKED_SPAN": 1, + "PARENT_LINKED_SPAN": 2, +} + +func (x Span_Link_Type) String() string { + return proto.EnumName(Span_Link_Type_name, int32(x)) +} + +func (Span_Link_Type) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_8ea38bbb821bf584, []int{0, 4, 0} +} + +// A span represents a single operation within a trace. Spans can be +// nested to form a trace tree. Often, a trace contains a root span +// that describes the end-to-end latency, and one or more subspans for +// its sub-operations. A trace can also contain multiple root spans, +// or none at all. Spans do not need to be contiguous - there may be +// gaps or overlaps between spans in a trace. +// +// The next id is 16. +// TODO(bdrutu): Add an example. +type Span struct { + // A unique identifier for a trace. All spans from the same trace share + // the same `trace_id`. The ID is a 16-byte array. + // + // This field is required. + TraceId []byte `protobuf:"bytes,1,opt,name=trace_id,json=traceId,proto3" json:"trace_id,omitempty"` + // A unique identifier for a span within a trace, assigned when the span + // is created. The ID is an 8-byte array. + // + // This field is required. + SpanId []byte `protobuf:"bytes,2,opt,name=span_id,json=spanId,proto3" json:"span_id,omitempty"` + // The Tracestate on the span. + Tracestate *Span_Tracestate `protobuf:"bytes,15,opt,name=tracestate,proto3" json:"tracestate,omitempty"` + // The `span_id` of this span's parent span. If this is a root span, then this + // field must be empty. The ID is an 8-byte array. + ParentSpanId []byte `protobuf:"bytes,3,opt,name=parent_span_id,json=parentSpanId,proto3" json:"parent_span_id,omitempty"` + // A description of the span's operation. + // + // For example, the name can be a qualified method name or a file name + // and a line number where the operation is called. A best practice is to use + // the same display name at the same call point in an application. + // This makes it easier to correlate spans in different traces. + // + // This field is required. + Name *TruncatableString `protobuf:"bytes,4,opt,name=name,proto3" json:"name,omitempty"` + // Distinguishes between spans generated in a particular context. For example, + // two spans with the same name may be distinguished using `CLIENT` + // and `SERVER` to identify queueing latency associated with the span. + Kind Span_SpanKind `protobuf:"varint,14,opt,name=kind,proto3,enum=opencensus.proto.trace.v1.Span_SpanKind" json:"kind,omitempty"` + // The start time of the span. On the client side, this is the time kept by + // the local machine where the span execution starts. On the server side, this + // is the time when the server's application handler starts running. + StartTime *timestamp.Timestamp `protobuf:"bytes,5,opt,name=start_time,json=startTime,proto3" json:"start_time,omitempty"` + // The end time of the span. On the client side, this is the time kept by + // the local machine where the span execution ends. On the server side, this + // is the time when the server application handler stops running. + EndTime *timestamp.Timestamp `protobuf:"bytes,6,opt,name=end_time,json=endTime,proto3" json:"end_time,omitempty"` + // A set of attributes on the span. + Attributes *Span_Attributes `protobuf:"bytes,7,opt,name=attributes,proto3" json:"attributes,omitempty"` + // A stack trace captured at the start of the span. + StackTrace *StackTrace `protobuf:"bytes,8,opt,name=stack_trace,json=stackTrace,proto3" json:"stack_trace,omitempty"` + // The included time events. + TimeEvents *Span_TimeEvents `protobuf:"bytes,9,opt,name=time_events,json=timeEvents,proto3" json:"time_events,omitempty"` + // The included links. + Links *Span_Links `protobuf:"bytes,10,opt,name=links,proto3" json:"links,omitempty"` + // An optional final status for this span. + Status *Status `protobuf:"bytes,11,opt,name=status,proto3" json:"status,omitempty"` + // A highly recommended but not required flag that identifies when a trace + // crosses a process boundary. True when the parent_span belongs to the + // same process as the current span. + SameProcessAsParentSpan *wrappers.BoolValue `protobuf:"bytes,12,opt,name=same_process_as_parent_span,json=sameProcessAsParentSpan,proto3" json:"same_process_as_parent_span,omitempty"` + // An optional number of child spans that were generated while this span + // was active. If set, allows an implementation to detect missing child spans. + ChildSpanCount *wrappers.UInt32Value `protobuf:"bytes,13,opt,name=child_span_count,json=childSpanCount,proto3" json:"child_span_count,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Span) Reset() { *m = Span{} } +func (m *Span) String() string { return proto.CompactTextString(m) } +func (*Span) ProtoMessage() {} +func (*Span) Descriptor() ([]byte, []int) { + return fileDescriptor_8ea38bbb821bf584, []int{0} +} + +func (m *Span) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Span.Unmarshal(m, b) +} +func (m *Span) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Span.Marshal(b, m, deterministic) +} +func (m *Span) XXX_Merge(src proto.Message) { + xxx_messageInfo_Span.Merge(m, src) +} +func (m *Span) XXX_Size() int { + return xxx_messageInfo_Span.Size(m) +} +func (m *Span) XXX_DiscardUnknown() { + xxx_messageInfo_Span.DiscardUnknown(m) +} + +var xxx_messageInfo_Span proto.InternalMessageInfo + +func (m *Span) GetTraceId() []byte { + if m != nil { + return m.TraceId + } + return nil +} + +func (m *Span) GetSpanId() []byte { + if m != nil { + return m.SpanId + } + return nil +} + +func (m *Span) GetTracestate() *Span_Tracestate { + if m != nil { + return m.Tracestate + } + return nil +} + +func (m *Span) GetParentSpanId() []byte { + if m != nil { + return m.ParentSpanId + } + return nil +} + +func (m *Span) GetName() *TruncatableString { + if m != nil { + return m.Name + } + return nil +} + +func (m *Span) GetKind() Span_SpanKind { + if m != nil { + return m.Kind + } + return Span_SPAN_KIND_UNSPECIFIED +} + +func (m *Span) GetStartTime() *timestamp.Timestamp { + if m != nil { + return m.StartTime + } + return nil +} + +func (m *Span) GetEndTime() *timestamp.Timestamp { + if m != nil { + return m.EndTime + } + return nil +} + +func (m *Span) GetAttributes() *Span_Attributes { + if m != nil { + return m.Attributes + } + return nil +} + +func (m *Span) GetStackTrace() *StackTrace { + if m != nil { + return m.StackTrace + } + return nil +} + +func (m *Span) GetTimeEvents() *Span_TimeEvents { + if m != nil { + return m.TimeEvents + } + return nil +} + +func (m *Span) GetLinks() *Span_Links { + if m != nil { + return m.Links + } + return nil +} + +func (m *Span) GetStatus() *Status { + if m != nil { + return m.Status + } + return nil +} + +func (m *Span) GetSameProcessAsParentSpan() *wrappers.BoolValue { + if m != nil { + return m.SameProcessAsParentSpan + } + return nil +} + +func (m *Span) GetChildSpanCount() *wrappers.UInt32Value { + if m != nil { + return m.ChildSpanCount + } + return nil +} + +// This field conveys information about request position in multiple distributed tracing graphs. +// It is a list of Tracestate.Entry with a maximum of 32 members in the list. +// +// See the https://github.com/w3c/distributed-tracing for more details about this field. +type Span_Tracestate struct { + // A list of entries that represent the Tracestate. + Entries []*Span_Tracestate_Entry `protobuf:"bytes,1,rep,name=entries,proto3" json:"entries,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Span_Tracestate) Reset() { *m = Span_Tracestate{} } +func (m *Span_Tracestate) String() string { return proto.CompactTextString(m) } +func (*Span_Tracestate) ProtoMessage() {} +func (*Span_Tracestate) Descriptor() ([]byte, []int) { + return fileDescriptor_8ea38bbb821bf584, []int{0, 0} +} + +func (m *Span_Tracestate) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Span_Tracestate.Unmarshal(m, b) +} +func (m *Span_Tracestate) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Span_Tracestate.Marshal(b, m, deterministic) +} +func (m *Span_Tracestate) XXX_Merge(src proto.Message) { + xxx_messageInfo_Span_Tracestate.Merge(m, src) +} +func (m *Span_Tracestate) XXX_Size() int { + return xxx_messageInfo_Span_Tracestate.Size(m) +} +func (m *Span_Tracestate) XXX_DiscardUnknown() { + xxx_messageInfo_Span_Tracestate.DiscardUnknown(m) +} + +var xxx_messageInfo_Span_Tracestate proto.InternalMessageInfo + +func (m *Span_Tracestate) GetEntries() []*Span_Tracestate_Entry { + if m != nil { + return m.Entries + } + return nil +} + +type Span_Tracestate_Entry struct { + // The key must begin with a lowercase letter, and can only contain + // lowercase letters 'a'-'z', digits '0'-'9', underscores '_', dashes + // '-', asterisks '*', and forward slashes '/'. + Key string `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` + // The value is opaque string up to 256 characters printable ASCII + // RFC0020 characters (i.e., the range 0x20 to 0x7E) except ',' and '='. + // Note that this also excludes tabs, newlines, carriage returns, etc. + Value string `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Span_Tracestate_Entry) Reset() { *m = Span_Tracestate_Entry{} } +func (m *Span_Tracestate_Entry) String() string { return proto.CompactTextString(m) } +func (*Span_Tracestate_Entry) ProtoMessage() {} +func (*Span_Tracestate_Entry) Descriptor() ([]byte, []int) { + return fileDescriptor_8ea38bbb821bf584, []int{0, 0, 0} +} + +func (m *Span_Tracestate_Entry) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Span_Tracestate_Entry.Unmarshal(m, b) +} +func (m *Span_Tracestate_Entry) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Span_Tracestate_Entry.Marshal(b, m, deterministic) +} +func (m *Span_Tracestate_Entry) XXX_Merge(src proto.Message) { + xxx_messageInfo_Span_Tracestate_Entry.Merge(m, src) +} +func (m *Span_Tracestate_Entry) XXX_Size() int { + return xxx_messageInfo_Span_Tracestate_Entry.Size(m) +} +func (m *Span_Tracestate_Entry) XXX_DiscardUnknown() { + xxx_messageInfo_Span_Tracestate_Entry.DiscardUnknown(m) +} + +var xxx_messageInfo_Span_Tracestate_Entry proto.InternalMessageInfo + +func (m *Span_Tracestate_Entry) GetKey() string { + if m != nil { + return m.Key + } + return "" +} + +func (m *Span_Tracestate_Entry) GetValue() string { + if m != nil { + return m.Value + } + return "" +} + +// A set of attributes, each with a key and a value. +type Span_Attributes struct { + // The set of attributes. The value can be a string, an integer, or the + // Boolean values `true` and `false`. For example: + // + // "/instance_id": "my-instance" + // "/http/user_agent": "" + // "/http/server_latency": 300 + // "abc.com/myattribute": true + AttributeMap map[string]*AttributeValue `protobuf:"bytes,1,rep,name=attribute_map,json=attributeMap,proto3" json:"attribute_map,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + // The number of attributes that were discarded. Attributes can be discarded + // because their keys are too long or because there are too many attributes. + // If this value is 0, then no attributes were dropped. + DroppedAttributesCount int32 `protobuf:"varint,2,opt,name=dropped_attributes_count,json=droppedAttributesCount,proto3" json:"dropped_attributes_count,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Span_Attributes) Reset() { *m = Span_Attributes{} } +func (m *Span_Attributes) String() string { return proto.CompactTextString(m) } +func (*Span_Attributes) ProtoMessage() {} +func (*Span_Attributes) Descriptor() ([]byte, []int) { + return fileDescriptor_8ea38bbb821bf584, []int{0, 1} +} + +func (m *Span_Attributes) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Span_Attributes.Unmarshal(m, b) +} +func (m *Span_Attributes) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Span_Attributes.Marshal(b, m, deterministic) +} +func (m *Span_Attributes) XXX_Merge(src proto.Message) { + xxx_messageInfo_Span_Attributes.Merge(m, src) +} +func (m *Span_Attributes) XXX_Size() int { + return xxx_messageInfo_Span_Attributes.Size(m) +} +func (m *Span_Attributes) XXX_DiscardUnknown() { + xxx_messageInfo_Span_Attributes.DiscardUnknown(m) +} + +var xxx_messageInfo_Span_Attributes proto.InternalMessageInfo + +func (m *Span_Attributes) GetAttributeMap() map[string]*AttributeValue { + if m != nil { + return m.AttributeMap + } + return nil +} + +func (m *Span_Attributes) GetDroppedAttributesCount() int32 { + if m != nil { + return m.DroppedAttributesCount + } + return 0 +} + +// A time-stamped annotation or message event in the Span. +type Span_TimeEvent struct { + // The time the event occurred. + Time *timestamp.Timestamp `protobuf:"bytes,1,opt,name=time,proto3" json:"time,omitempty"` + // A `TimeEvent` can contain either an `Annotation` object or a + // `MessageEvent` object, but not both. + // + // Types that are valid to be assigned to Value: + // *Span_TimeEvent_Annotation_ + // *Span_TimeEvent_MessageEvent_ + Value isSpan_TimeEvent_Value `protobuf_oneof:"value"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Span_TimeEvent) Reset() { *m = Span_TimeEvent{} } +func (m *Span_TimeEvent) String() string { return proto.CompactTextString(m) } +func (*Span_TimeEvent) ProtoMessage() {} +func (*Span_TimeEvent) Descriptor() ([]byte, []int) { + return fileDescriptor_8ea38bbb821bf584, []int{0, 2} +} + +func (m *Span_TimeEvent) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Span_TimeEvent.Unmarshal(m, b) +} +func (m *Span_TimeEvent) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Span_TimeEvent.Marshal(b, m, deterministic) +} +func (m *Span_TimeEvent) XXX_Merge(src proto.Message) { + xxx_messageInfo_Span_TimeEvent.Merge(m, src) +} +func (m *Span_TimeEvent) XXX_Size() int { + return xxx_messageInfo_Span_TimeEvent.Size(m) +} +func (m *Span_TimeEvent) XXX_DiscardUnknown() { + xxx_messageInfo_Span_TimeEvent.DiscardUnknown(m) +} + +var xxx_messageInfo_Span_TimeEvent proto.InternalMessageInfo + +func (m *Span_TimeEvent) GetTime() *timestamp.Timestamp { + if m != nil { + return m.Time + } + return nil +} + +type isSpan_TimeEvent_Value interface { + isSpan_TimeEvent_Value() +} + +type Span_TimeEvent_Annotation_ struct { + Annotation *Span_TimeEvent_Annotation `protobuf:"bytes,2,opt,name=annotation,proto3,oneof"` +} + +type Span_TimeEvent_MessageEvent_ struct { + MessageEvent *Span_TimeEvent_MessageEvent `protobuf:"bytes,3,opt,name=message_event,json=messageEvent,proto3,oneof"` +} + +func (*Span_TimeEvent_Annotation_) isSpan_TimeEvent_Value() {} + +func (*Span_TimeEvent_MessageEvent_) isSpan_TimeEvent_Value() {} + +func (m *Span_TimeEvent) GetValue() isSpan_TimeEvent_Value { + if m != nil { + return m.Value + } + return nil +} + +func (m *Span_TimeEvent) GetAnnotation() *Span_TimeEvent_Annotation { + if x, ok := m.GetValue().(*Span_TimeEvent_Annotation_); ok { + return x.Annotation + } + return nil +} + +func (m *Span_TimeEvent) GetMessageEvent() *Span_TimeEvent_MessageEvent { + if x, ok := m.GetValue().(*Span_TimeEvent_MessageEvent_); ok { + return x.MessageEvent + } + return nil +} + +// XXX_OneofFuncs is for the internal use of the proto package. +func (*Span_TimeEvent) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _Span_TimeEvent_OneofMarshaler, _Span_TimeEvent_OneofUnmarshaler, _Span_TimeEvent_OneofSizer, []interface{}{ + (*Span_TimeEvent_Annotation_)(nil), + (*Span_TimeEvent_MessageEvent_)(nil), + } +} + +func _Span_TimeEvent_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*Span_TimeEvent) + // value + switch x := m.Value.(type) { + case *Span_TimeEvent_Annotation_: + b.EncodeVarint(2<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.Annotation); err != nil { + return err + } + case *Span_TimeEvent_MessageEvent_: + b.EncodeVarint(3<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.MessageEvent); err != nil { + return err + } + case nil: + default: + return fmt.Errorf("Span_TimeEvent.Value has unexpected type %T", x) + } + return nil +} + +func _Span_TimeEvent_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*Span_TimeEvent) + switch tag { + case 2: // value.annotation + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(Span_TimeEvent_Annotation) + err := b.DecodeMessage(msg) + m.Value = &Span_TimeEvent_Annotation_{msg} + return true, err + case 3: // value.message_event + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(Span_TimeEvent_MessageEvent) + err := b.DecodeMessage(msg) + m.Value = &Span_TimeEvent_MessageEvent_{msg} + return true, err + default: + return false, nil + } +} + +func _Span_TimeEvent_OneofSizer(msg proto.Message) (n int) { + m := msg.(*Span_TimeEvent) + // value + switch x := m.Value.(type) { + case *Span_TimeEvent_Annotation_: + s := proto.Size(x.Annotation) + n += 1 // tag and wire + n += proto.SizeVarint(uint64(s)) + n += s + case *Span_TimeEvent_MessageEvent_: + s := proto.Size(x.MessageEvent) + n += 1 // tag and wire + n += proto.SizeVarint(uint64(s)) + n += s + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + +// A text annotation with a set of attributes. +type Span_TimeEvent_Annotation struct { + // A user-supplied message describing the event. + Description *TruncatableString `protobuf:"bytes,1,opt,name=description,proto3" json:"description,omitempty"` + // A set of attributes on the annotation. + Attributes *Span_Attributes `protobuf:"bytes,2,opt,name=attributes,proto3" json:"attributes,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Span_TimeEvent_Annotation) Reset() { *m = Span_TimeEvent_Annotation{} } +func (m *Span_TimeEvent_Annotation) String() string { return proto.CompactTextString(m) } +func (*Span_TimeEvent_Annotation) ProtoMessage() {} +func (*Span_TimeEvent_Annotation) Descriptor() ([]byte, []int) { + return fileDescriptor_8ea38bbb821bf584, []int{0, 2, 0} +} + +func (m *Span_TimeEvent_Annotation) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Span_TimeEvent_Annotation.Unmarshal(m, b) +} +func (m *Span_TimeEvent_Annotation) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Span_TimeEvent_Annotation.Marshal(b, m, deterministic) +} +func (m *Span_TimeEvent_Annotation) XXX_Merge(src proto.Message) { + xxx_messageInfo_Span_TimeEvent_Annotation.Merge(m, src) +} +func (m *Span_TimeEvent_Annotation) XXX_Size() int { + return xxx_messageInfo_Span_TimeEvent_Annotation.Size(m) +} +func (m *Span_TimeEvent_Annotation) XXX_DiscardUnknown() { + xxx_messageInfo_Span_TimeEvent_Annotation.DiscardUnknown(m) +} + +var xxx_messageInfo_Span_TimeEvent_Annotation proto.InternalMessageInfo + +func (m *Span_TimeEvent_Annotation) GetDescription() *TruncatableString { + if m != nil { + return m.Description + } + return nil +} + +func (m *Span_TimeEvent_Annotation) GetAttributes() *Span_Attributes { + if m != nil { + return m.Attributes + } + return nil +} + +// An event describing a message sent/received between Spans. +type Span_TimeEvent_MessageEvent struct { + // The type of MessageEvent. Indicates whether the message was sent or + // received. + Type Span_TimeEvent_MessageEvent_Type `protobuf:"varint,1,opt,name=type,proto3,enum=opencensus.proto.trace.v1.Span_TimeEvent_MessageEvent_Type" json:"type,omitempty"` + // An identifier for the MessageEvent's message that can be used to match + // SENT and RECEIVED MessageEvents. For example, this field could + // represent a sequence ID for a streaming RPC. It is recommended to be + // unique within a Span. + Id uint64 `protobuf:"varint,2,opt,name=id,proto3" json:"id,omitempty"` + // The number of uncompressed bytes sent or received. + UncompressedSize uint64 `protobuf:"varint,3,opt,name=uncompressed_size,json=uncompressedSize,proto3" json:"uncompressed_size,omitempty"` + // The number of compressed bytes sent or received. If zero, assumed to + // be the same size as uncompressed. + CompressedSize uint64 `protobuf:"varint,4,opt,name=compressed_size,json=compressedSize,proto3" json:"compressed_size,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Span_TimeEvent_MessageEvent) Reset() { *m = Span_TimeEvent_MessageEvent{} } +func (m *Span_TimeEvent_MessageEvent) String() string { return proto.CompactTextString(m) } +func (*Span_TimeEvent_MessageEvent) ProtoMessage() {} +func (*Span_TimeEvent_MessageEvent) Descriptor() ([]byte, []int) { + return fileDescriptor_8ea38bbb821bf584, []int{0, 2, 1} +} + +func (m *Span_TimeEvent_MessageEvent) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Span_TimeEvent_MessageEvent.Unmarshal(m, b) +} +func (m *Span_TimeEvent_MessageEvent) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Span_TimeEvent_MessageEvent.Marshal(b, m, deterministic) +} +func (m *Span_TimeEvent_MessageEvent) XXX_Merge(src proto.Message) { + xxx_messageInfo_Span_TimeEvent_MessageEvent.Merge(m, src) +} +func (m *Span_TimeEvent_MessageEvent) XXX_Size() int { + return xxx_messageInfo_Span_TimeEvent_MessageEvent.Size(m) +} +func (m *Span_TimeEvent_MessageEvent) XXX_DiscardUnknown() { + xxx_messageInfo_Span_TimeEvent_MessageEvent.DiscardUnknown(m) +} + +var xxx_messageInfo_Span_TimeEvent_MessageEvent proto.InternalMessageInfo + +func (m *Span_TimeEvent_MessageEvent) GetType() Span_TimeEvent_MessageEvent_Type { + if m != nil { + return m.Type + } + return Span_TimeEvent_MessageEvent_TYPE_UNSPECIFIED +} + +func (m *Span_TimeEvent_MessageEvent) GetId() uint64 { + if m != nil { + return m.Id + } + return 0 +} + +func (m *Span_TimeEvent_MessageEvent) GetUncompressedSize() uint64 { + if m != nil { + return m.UncompressedSize + } + return 0 +} + +func (m *Span_TimeEvent_MessageEvent) GetCompressedSize() uint64 { + if m != nil { + return m.CompressedSize + } + return 0 +} + +// A collection of `TimeEvent`s. A `TimeEvent` is a time-stamped annotation +// on the span, consisting of either user-supplied key-value pairs, or +// details of a message sent/received between Spans. +type Span_TimeEvents struct { + // A collection of `TimeEvent`s. + TimeEvent []*Span_TimeEvent `protobuf:"bytes,1,rep,name=time_event,json=timeEvent,proto3" json:"time_event,omitempty"` + // The number of dropped annotations in all the included time events. + // If the value is 0, then no annotations were dropped. + DroppedAnnotationsCount int32 `protobuf:"varint,2,opt,name=dropped_annotations_count,json=droppedAnnotationsCount,proto3" json:"dropped_annotations_count,omitempty"` + // The number of dropped message events in all the included time events. + // If the value is 0, then no message events were dropped. + DroppedMessageEventsCount int32 `protobuf:"varint,3,opt,name=dropped_message_events_count,json=droppedMessageEventsCount,proto3" json:"dropped_message_events_count,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Span_TimeEvents) Reset() { *m = Span_TimeEvents{} } +func (m *Span_TimeEvents) String() string { return proto.CompactTextString(m) } +func (*Span_TimeEvents) ProtoMessage() {} +func (*Span_TimeEvents) Descriptor() ([]byte, []int) { + return fileDescriptor_8ea38bbb821bf584, []int{0, 3} +} + +func (m *Span_TimeEvents) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Span_TimeEvents.Unmarshal(m, b) +} +func (m *Span_TimeEvents) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Span_TimeEvents.Marshal(b, m, deterministic) +} +func (m *Span_TimeEvents) XXX_Merge(src proto.Message) { + xxx_messageInfo_Span_TimeEvents.Merge(m, src) +} +func (m *Span_TimeEvents) XXX_Size() int { + return xxx_messageInfo_Span_TimeEvents.Size(m) +} +func (m *Span_TimeEvents) XXX_DiscardUnknown() { + xxx_messageInfo_Span_TimeEvents.DiscardUnknown(m) +} + +var xxx_messageInfo_Span_TimeEvents proto.InternalMessageInfo + +func (m *Span_TimeEvents) GetTimeEvent() []*Span_TimeEvent { + if m != nil { + return m.TimeEvent + } + return nil +} + +func (m *Span_TimeEvents) GetDroppedAnnotationsCount() int32 { + if m != nil { + return m.DroppedAnnotationsCount + } + return 0 +} + +func (m *Span_TimeEvents) GetDroppedMessageEventsCount() int32 { + if m != nil { + return m.DroppedMessageEventsCount + } + return 0 +} + +// A pointer from the current span to another span in the same trace or in a +// different trace. For example, this can be used in batching operations, +// where a single batch handler processes multiple requests from different +// traces or when the handler receives a request from a different project. +type Span_Link struct { + // A unique identifier for a trace. All spans from the same trace share + // the same `trace_id`. The ID is a 16-byte array. + TraceId []byte `protobuf:"bytes,1,opt,name=trace_id,json=traceId,proto3" json:"trace_id,omitempty"` + // A unique identifier for a span within a trace, assigned when the span + // is created. The ID is an 8-byte array. + SpanId []byte `protobuf:"bytes,2,opt,name=span_id,json=spanId,proto3" json:"span_id,omitempty"` + // The relationship of the current span relative to the linked span. + Type Span_Link_Type `protobuf:"varint,3,opt,name=type,proto3,enum=opencensus.proto.trace.v1.Span_Link_Type" json:"type,omitempty"` + // A set of attributes on the link. + Attributes *Span_Attributes `protobuf:"bytes,4,opt,name=attributes,proto3" json:"attributes,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Span_Link) Reset() { *m = Span_Link{} } +func (m *Span_Link) String() string { return proto.CompactTextString(m) } +func (*Span_Link) ProtoMessage() {} +func (*Span_Link) Descriptor() ([]byte, []int) { + return fileDescriptor_8ea38bbb821bf584, []int{0, 4} +} + +func (m *Span_Link) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Span_Link.Unmarshal(m, b) +} +func (m *Span_Link) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Span_Link.Marshal(b, m, deterministic) +} +func (m *Span_Link) XXX_Merge(src proto.Message) { + xxx_messageInfo_Span_Link.Merge(m, src) +} +func (m *Span_Link) XXX_Size() int { + return xxx_messageInfo_Span_Link.Size(m) +} +func (m *Span_Link) XXX_DiscardUnknown() { + xxx_messageInfo_Span_Link.DiscardUnknown(m) +} + +var xxx_messageInfo_Span_Link proto.InternalMessageInfo + +func (m *Span_Link) GetTraceId() []byte { + if m != nil { + return m.TraceId + } + return nil +} + +func (m *Span_Link) GetSpanId() []byte { + if m != nil { + return m.SpanId + } + return nil +} + +func (m *Span_Link) GetType() Span_Link_Type { + if m != nil { + return m.Type + } + return Span_Link_TYPE_UNSPECIFIED +} + +func (m *Span_Link) GetAttributes() *Span_Attributes { + if m != nil { + return m.Attributes + } + return nil +} + +// A collection of links, which are references from this span to a span +// in the same or different trace. +type Span_Links struct { + // A collection of links. + Link []*Span_Link `protobuf:"bytes,1,rep,name=link,proto3" json:"link,omitempty"` + // The number of dropped links after the maximum size was enforced. If + // this value is 0, then no links were dropped. + DroppedLinksCount int32 `protobuf:"varint,2,opt,name=dropped_links_count,json=droppedLinksCount,proto3" json:"dropped_links_count,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Span_Links) Reset() { *m = Span_Links{} } +func (m *Span_Links) String() string { return proto.CompactTextString(m) } +func (*Span_Links) ProtoMessage() {} +func (*Span_Links) Descriptor() ([]byte, []int) { + return fileDescriptor_8ea38bbb821bf584, []int{0, 5} +} + +func (m *Span_Links) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Span_Links.Unmarshal(m, b) +} +func (m *Span_Links) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Span_Links.Marshal(b, m, deterministic) +} +func (m *Span_Links) XXX_Merge(src proto.Message) { + xxx_messageInfo_Span_Links.Merge(m, src) +} +func (m *Span_Links) XXX_Size() int { + return xxx_messageInfo_Span_Links.Size(m) +} +func (m *Span_Links) XXX_DiscardUnknown() { + xxx_messageInfo_Span_Links.DiscardUnknown(m) +} + +var xxx_messageInfo_Span_Links proto.InternalMessageInfo + +func (m *Span_Links) GetLink() []*Span_Link { + if m != nil { + return m.Link + } + return nil +} + +func (m *Span_Links) GetDroppedLinksCount() int32 { + if m != nil { + return m.DroppedLinksCount + } + return 0 +} + +// The `Status` type defines a logical error model that is suitable for different +// programming environments, including REST APIs and RPC APIs. This proto's fields +// are a subset of those of +// [google.rpc.Status](https://github.com/googleapis/googleapis/blob/master/google/rpc/status.proto), +// which is used by [gRPC](https://github.com/grpc). +type Status struct { + // The status code. + Code int32 `protobuf:"varint,1,opt,name=code,proto3" json:"code,omitempty"` + // A developer-facing error message, which should be in English. + Message string `protobuf:"bytes,2,opt,name=message,proto3" json:"message,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Status) Reset() { *m = Status{} } +func (m *Status) String() string { return proto.CompactTextString(m) } +func (*Status) ProtoMessage() {} +func (*Status) Descriptor() ([]byte, []int) { + return fileDescriptor_8ea38bbb821bf584, []int{1} +} + +func (m *Status) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Status.Unmarshal(m, b) +} +func (m *Status) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Status.Marshal(b, m, deterministic) +} +func (m *Status) XXX_Merge(src proto.Message) { + xxx_messageInfo_Status.Merge(m, src) +} +func (m *Status) XXX_Size() int { + return xxx_messageInfo_Status.Size(m) +} +func (m *Status) XXX_DiscardUnknown() { + xxx_messageInfo_Status.DiscardUnknown(m) +} + +var xxx_messageInfo_Status proto.InternalMessageInfo + +func (m *Status) GetCode() int32 { + if m != nil { + return m.Code + } + return 0 +} + +func (m *Status) GetMessage() string { + if m != nil { + return m.Message + } + return "" +} + +// The value of an Attribute. +type AttributeValue struct { + // The type of the value. + // + // Types that are valid to be assigned to Value: + // *AttributeValue_StringValue + // *AttributeValue_IntValue + // *AttributeValue_BoolValue + // *AttributeValue_DoubleValue + Value isAttributeValue_Value `protobuf_oneof:"value"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *AttributeValue) Reset() { *m = AttributeValue{} } +func (m *AttributeValue) String() string { return proto.CompactTextString(m) } +func (*AttributeValue) ProtoMessage() {} +func (*AttributeValue) Descriptor() ([]byte, []int) { + return fileDescriptor_8ea38bbb821bf584, []int{2} +} + +func (m *AttributeValue) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_AttributeValue.Unmarshal(m, b) +} +func (m *AttributeValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_AttributeValue.Marshal(b, m, deterministic) +} +func (m *AttributeValue) XXX_Merge(src proto.Message) { + xxx_messageInfo_AttributeValue.Merge(m, src) +} +func (m *AttributeValue) XXX_Size() int { + return xxx_messageInfo_AttributeValue.Size(m) +} +func (m *AttributeValue) XXX_DiscardUnknown() { + xxx_messageInfo_AttributeValue.DiscardUnknown(m) +} + +var xxx_messageInfo_AttributeValue proto.InternalMessageInfo + +type isAttributeValue_Value interface { + isAttributeValue_Value() +} + +type AttributeValue_StringValue struct { + StringValue *TruncatableString `protobuf:"bytes,1,opt,name=string_value,json=stringValue,proto3,oneof"` +} + +type AttributeValue_IntValue struct { + IntValue int64 `protobuf:"varint,2,opt,name=int_value,json=intValue,proto3,oneof"` +} + +type AttributeValue_BoolValue struct { + BoolValue bool `protobuf:"varint,3,opt,name=bool_value,json=boolValue,proto3,oneof"` +} + +type AttributeValue_DoubleValue struct { + DoubleValue float64 `protobuf:"fixed64,4,opt,name=double_value,json=doubleValue,proto3,oneof"` +} + +func (*AttributeValue_StringValue) isAttributeValue_Value() {} + +func (*AttributeValue_IntValue) isAttributeValue_Value() {} + +func (*AttributeValue_BoolValue) isAttributeValue_Value() {} + +func (*AttributeValue_DoubleValue) isAttributeValue_Value() {} + +func (m *AttributeValue) GetValue() isAttributeValue_Value { + if m != nil { + return m.Value + } + return nil +} + +func (m *AttributeValue) GetStringValue() *TruncatableString { + if x, ok := m.GetValue().(*AttributeValue_StringValue); ok { + return x.StringValue + } + return nil +} + +func (m *AttributeValue) GetIntValue() int64 { + if x, ok := m.GetValue().(*AttributeValue_IntValue); ok { + return x.IntValue + } + return 0 +} + +func (m *AttributeValue) GetBoolValue() bool { + if x, ok := m.GetValue().(*AttributeValue_BoolValue); ok { + return x.BoolValue + } + return false +} + +func (m *AttributeValue) GetDoubleValue() float64 { + if x, ok := m.GetValue().(*AttributeValue_DoubleValue); ok { + return x.DoubleValue + } + return 0 +} + +// XXX_OneofFuncs is for the internal use of the proto package. +func (*AttributeValue) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _AttributeValue_OneofMarshaler, _AttributeValue_OneofUnmarshaler, _AttributeValue_OneofSizer, []interface{}{ + (*AttributeValue_StringValue)(nil), + (*AttributeValue_IntValue)(nil), + (*AttributeValue_BoolValue)(nil), + (*AttributeValue_DoubleValue)(nil), + } +} + +func _AttributeValue_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*AttributeValue) + // value + switch x := m.Value.(type) { + case *AttributeValue_StringValue: + b.EncodeVarint(1<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.StringValue); err != nil { + return err + } + case *AttributeValue_IntValue: + b.EncodeVarint(2<<3 | proto.WireVarint) + b.EncodeVarint(uint64(x.IntValue)) + case *AttributeValue_BoolValue: + t := uint64(0) + if x.BoolValue { + t = 1 + } + b.EncodeVarint(3<<3 | proto.WireVarint) + b.EncodeVarint(t) + case *AttributeValue_DoubleValue: + b.EncodeVarint(4<<3 | proto.WireFixed64) + b.EncodeFixed64(math.Float64bits(x.DoubleValue)) + case nil: + default: + return fmt.Errorf("AttributeValue.Value has unexpected type %T", x) + } + return nil +} + +func _AttributeValue_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*AttributeValue) + switch tag { + case 1: // value.string_value + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(TruncatableString) + err := b.DecodeMessage(msg) + m.Value = &AttributeValue_StringValue{msg} + return true, err + case 2: // value.int_value + if wire != proto.WireVarint { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeVarint() + m.Value = &AttributeValue_IntValue{int64(x)} + return true, err + case 3: // value.bool_value + if wire != proto.WireVarint { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeVarint() + m.Value = &AttributeValue_BoolValue{x != 0} + return true, err + case 4: // value.double_value + if wire != proto.WireFixed64 { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeFixed64() + m.Value = &AttributeValue_DoubleValue{math.Float64frombits(x)} + return true, err + default: + return false, nil + } +} + +func _AttributeValue_OneofSizer(msg proto.Message) (n int) { + m := msg.(*AttributeValue) + // value + switch x := m.Value.(type) { + case *AttributeValue_StringValue: + s := proto.Size(x.StringValue) + n += 1 // tag and wire + n += proto.SizeVarint(uint64(s)) + n += s + case *AttributeValue_IntValue: + n += 1 // tag and wire + n += proto.SizeVarint(uint64(x.IntValue)) + case *AttributeValue_BoolValue: + n += 1 // tag and wire + n += 1 + case *AttributeValue_DoubleValue: + n += 1 // tag and wire + n += 8 + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + +// The call stack which originated this span. +type StackTrace struct { + // Stack frames in this stack trace. + StackFrames *StackTrace_StackFrames `protobuf:"bytes,1,opt,name=stack_frames,json=stackFrames,proto3" json:"stack_frames,omitempty"` + // The hash ID is used to conserve network bandwidth for duplicate + // stack traces within a single trace. + // + // Often multiple spans will have identical stack traces. + // The first occurrence of a stack trace should contain both + // `stack_frames` and a value in `stack_trace_hash_id`. + // + // Subsequent spans within the same request can refer + // to that stack trace by setting only `stack_trace_hash_id`. + // + // TODO: describe how to deal with the case where stack_trace_hash_id is + // zero because it was not set. + StackTraceHashId uint64 `protobuf:"varint,2,opt,name=stack_trace_hash_id,json=stackTraceHashId,proto3" json:"stack_trace_hash_id,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *StackTrace) Reset() { *m = StackTrace{} } +func (m *StackTrace) String() string { return proto.CompactTextString(m) } +func (*StackTrace) ProtoMessage() {} +func (*StackTrace) Descriptor() ([]byte, []int) { + return fileDescriptor_8ea38bbb821bf584, []int{3} +} + +func (m *StackTrace) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_StackTrace.Unmarshal(m, b) +} +func (m *StackTrace) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_StackTrace.Marshal(b, m, deterministic) +} +func (m *StackTrace) XXX_Merge(src proto.Message) { + xxx_messageInfo_StackTrace.Merge(m, src) +} +func (m *StackTrace) XXX_Size() int { + return xxx_messageInfo_StackTrace.Size(m) +} +func (m *StackTrace) XXX_DiscardUnknown() { + xxx_messageInfo_StackTrace.DiscardUnknown(m) +} + +var xxx_messageInfo_StackTrace proto.InternalMessageInfo + +func (m *StackTrace) GetStackFrames() *StackTrace_StackFrames { + if m != nil { + return m.StackFrames + } + return nil +} + +func (m *StackTrace) GetStackTraceHashId() uint64 { + if m != nil { + return m.StackTraceHashId + } + return 0 +} + +// A single stack frame in a stack trace. +type StackTrace_StackFrame struct { + // The fully-qualified name that uniquely identifies the function or + // method that is active in this frame. + FunctionName *TruncatableString `protobuf:"bytes,1,opt,name=function_name,json=functionName,proto3" json:"function_name,omitempty"` + // An un-mangled function name, if `function_name` is + // [mangled](http://www.avabodh.com/cxxin/namemangling.html). The name can + // be fully qualified. + OriginalFunctionName *TruncatableString `protobuf:"bytes,2,opt,name=original_function_name,json=originalFunctionName,proto3" json:"original_function_name,omitempty"` + // The name of the source file where the function call appears. + FileName *TruncatableString `protobuf:"bytes,3,opt,name=file_name,json=fileName,proto3" json:"file_name,omitempty"` + // The line number in `file_name` where the function call appears. + LineNumber int64 `protobuf:"varint,4,opt,name=line_number,json=lineNumber,proto3" json:"line_number,omitempty"` + // The column number where the function call appears, if available. + // This is important in JavaScript because of its anonymous functions. + ColumnNumber int64 `protobuf:"varint,5,opt,name=column_number,json=columnNumber,proto3" json:"column_number,omitempty"` + // The binary module from where the code was loaded. + LoadModule *Module `protobuf:"bytes,6,opt,name=load_module,json=loadModule,proto3" json:"load_module,omitempty"` + // The version of the deployed source code. + SourceVersion *TruncatableString `protobuf:"bytes,7,opt,name=source_version,json=sourceVersion,proto3" json:"source_version,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *StackTrace_StackFrame) Reset() { *m = StackTrace_StackFrame{} } +func (m *StackTrace_StackFrame) String() string { return proto.CompactTextString(m) } +func (*StackTrace_StackFrame) ProtoMessage() {} +func (*StackTrace_StackFrame) Descriptor() ([]byte, []int) { + return fileDescriptor_8ea38bbb821bf584, []int{3, 0} +} + +func (m *StackTrace_StackFrame) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_StackTrace_StackFrame.Unmarshal(m, b) +} +func (m *StackTrace_StackFrame) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_StackTrace_StackFrame.Marshal(b, m, deterministic) +} +func (m *StackTrace_StackFrame) XXX_Merge(src proto.Message) { + xxx_messageInfo_StackTrace_StackFrame.Merge(m, src) +} +func (m *StackTrace_StackFrame) XXX_Size() int { + return xxx_messageInfo_StackTrace_StackFrame.Size(m) +} +func (m *StackTrace_StackFrame) XXX_DiscardUnknown() { + xxx_messageInfo_StackTrace_StackFrame.DiscardUnknown(m) +} + +var xxx_messageInfo_StackTrace_StackFrame proto.InternalMessageInfo + +func (m *StackTrace_StackFrame) GetFunctionName() *TruncatableString { + if m != nil { + return m.FunctionName + } + return nil +} + +func (m *StackTrace_StackFrame) GetOriginalFunctionName() *TruncatableString { + if m != nil { + return m.OriginalFunctionName + } + return nil +} + +func (m *StackTrace_StackFrame) GetFileName() *TruncatableString { + if m != nil { + return m.FileName + } + return nil +} + +func (m *StackTrace_StackFrame) GetLineNumber() int64 { + if m != nil { + return m.LineNumber + } + return 0 +} + +func (m *StackTrace_StackFrame) GetColumnNumber() int64 { + if m != nil { + return m.ColumnNumber + } + return 0 +} + +func (m *StackTrace_StackFrame) GetLoadModule() *Module { + if m != nil { + return m.LoadModule + } + return nil +} + +func (m *StackTrace_StackFrame) GetSourceVersion() *TruncatableString { + if m != nil { + return m.SourceVersion + } + return nil +} + +// A collection of stack frames, which can be truncated. +type StackTrace_StackFrames struct { + // Stack frames in this call stack. + Frame []*StackTrace_StackFrame `protobuf:"bytes,1,rep,name=frame,proto3" json:"frame,omitempty"` + // The number of stack frames that were dropped because there + // were too many stack frames. + // If this value is 0, then no stack frames were dropped. + DroppedFramesCount int32 `protobuf:"varint,2,opt,name=dropped_frames_count,json=droppedFramesCount,proto3" json:"dropped_frames_count,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *StackTrace_StackFrames) Reset() { *m = StackTrace_StackFrames{} } +func (m *StackTrace_StackFrames) String() string { return proto.CompactTextString(m) } +func (*StackTrace_StackFrames) ProtoMessage() {} +func (*StackTrace_StackFrames) Descriptor() ([]byte, []int) { + return fileDescriptor_8ea38bbb821bf584, []int{3, 1} +} + +func (m *StackTrace_StackFrames) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_StackTrace_StackFrames.Unmarshal(m, b) +} +func (m *StackTrace_StackFrames) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_StackTrace_StackFrames.Marshal(b, m, deterministic) +} +func (m *StackTrace_StackFrames) XXX_Merge(src proto.Message) { + xxx_messageInfo_StackTrace_StackFrames.Merge(m, src) +} +func (m *StackTrace_StackFrames) XXX_Size() int { + return xxx_messageInfo_StackTrace_StackFrames.Size(m) +} +func (m *StackTrace_StackFrames) XXX_DiscardUnknown() { + xxx_messageInfo_StackTrace_StackFrames.DiscardUnknown(m) +} + +var xxx_messageInfo_StackTrace_StackFrames proto.InternalMessageInfo + +func (m *StackTrace_StackFrames) GetFrame() []*StackTrace_StackFrame { + if m != nil { + return m.Frame + } + return nil +} + +func (m *StackTrace_StackFrames) GetDroppedFramesCount() int32 { + if m != nil { + return m.DroppedFramesCount + } + return 0 +} + +// A description of a binary module. +type Module struct { + // TODO: document the meaning of this field. + // For example: main binary, kernel modules, and dynamic libraries + // such as libc.so, sharedlib.so. + Module *TruncatableString `protobuf:"bytes,1,opt,name=module,proto3" json:"module,omitempty"` + // A unique identifier for the module, usually a hash of its + // contents. + BuildId *TruncatableString `protobuf:"bytes,2,opt,name=build_id,json=buildId,proto3" json:"build_id,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Module) Reset() { *m = Module{} } +func (m *Module) String() string { return proto.CompactTextString(m) } +func (*Module) ProtoMessage() {} +func (*Module) Descriptor() ([]byte, []int) { + return fileDescriptor_8ea38bbb821bf584, []int{4} +} + +func (m *Module) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Module.Unmarshal(m, b) +} +func (m *Module) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Module.Marshal(b, m, deterministic) +} +func (m *Module) XXX_Merge(src proto.Message) { + xxx_messageInfo_Module.Merge(m, src) +} +func (m *Module) XXX_Size() int { + return xxx_messageInfo_Module.Size(m) +} +func (m *Module) XXX_DiscardUnknown() { + xxx_messageInfo_Module.DiscardUnknown(m) +} + +var xxx_messageInfo_Module proto.InternalMessageInfo + +func (m *Module) GetModule() *TruncatableString { + if m != nil { + return m.Module + } + return nil +} + +func (m *Module) GetBuildId() *TruncatableString { + if m != nil { + return m.BuildId + } + return nil +} + +// A string that might be shortened to a specified length. +type TruncatableString struct { + // The shortened string. For example, if the original string was 500 bytes long and + // the limit of the string was 128 bytes, then this value contains the first 128 + // bytes of the 500-byte string. Note that truncation always happens on a + // character boundary, to ensure that a truncated string is still valid UTF-8. + // Because it may contain multi-byte characters, the size of the truncated string + // may be less than the truncation limit. + Value string `protobuf:"bytes,1,opt,name=value,proto3" json:"value,omitempty"` + // The number of bytes removed from the original string. If this + // value is 0, then the string was not shortened. + TruncatedByteCount int32 `protobuf:"varint,2,opt,name=truncated_byte_count,json=truncatedByteCount,proto3" json:"truncated_byte_count,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *TruncatableString) Reset() { *m = TruncatableString{} } +func (m *TruncatableString) String() string { return proto.CompactTextString(m) } +func (*TruncatableString) ProtoMessage() {} +func (*TruncatableString) Descriptor() ([]byte, []int) { + return fileDescriptor_8ea38bbb821bf584, []int{5} +} + +func (m *TruncatableString) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_TruncatableString.Unmarshal(m, b) +} +func (m *TruncatableString) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_TruncatableString.Marshal(b, m, deterministic) +} +func (m *TruncatableString) XXX_Merge(src proto.Message) { + xxx_messageInfo_TruncatableString.Merge(m, src) +} +func (m *TruncatableString) XXX_Size() int { + return xxx_messageInfo_TruncatableString.Size(m) +} +func (m *TruncatableString) XXX_DiscardUnknown() { + xxx_messageInfo_TruncatableString.DiscardUnknown(m) +} + +var xxx_messageInfo_TruncatableString proto.InternalMessageInfo + +func (m *TruncatableString) GetValue() string { + if m != nil { + return m.Value + } + return "" +} + +func (m *TruncatableString) GetTruncatedByteCount() int32 { + if m != nil { + return m.TruncatedByteCount + } + return 0 +} + +func init() { + proto.RegisterEnum("opencensus.proto.trace.v1.Span_SpanKind", Span_SpanKind_name, Span_SpanKind_value) + proto.RegisterEnum("opencensus.proto.trace.v1.Span_TimeEvent_MessageEvent_Type", Span_TimeEvent_MessageEvent_Type_name, Span_TimeEvent_MessageEvent_Type_value) + proto.RegisterEnum("opencensus.proto.trace.v1.Span_Link_Type", Span_Link_Type_name, Span_Link_Type_value) + proto.RegisterType((*Span)(nil), "opencensus.proto.trace.v1.Span") + proto.RegisterType((*Span_Tracestate)(nil), "opencensus.proto.trace.v1.Span.Tracestate") + proto.RegisterType((*Span_Tracestate_Entry)(nil), "opencensus.proto.trace.v1.Span.Tracestate.Entry") + proto.RegisterType((*Span_Attributes)(nil), "opencensus.proto.trace.v1.Span.Attributes") + proto.RegisterMapType((map[string]*AttributeValue)(nil), "opencensus.proto.trace.v1.Span.Attributes.AttributeMapEntry") + proto.RegisterType((*Span_TimeEvent)(nil), "opencensus.proto.trace.v1.Span.TimeEvent") + proto.RegisterType((*Span_TimeEvent_Annotation)(nil), "opencensus.proto.trace.v1.Span.TimeEvent.Annotation") + proto.RegisterType((*Span_TimeEvent_MessageEvent)(nil), "opencensus.proto.trace.v1.Span.TimeEvent.MessageEvent") + proto.RegisterType((*Span_TimeEvents)(nil), "opencensus.proto.trace.v1.Span.TimeEvents") + proto.RegisterType((*Span_Link)(nil), "opencensus.proto.trace.v1.Span.Link") + proto.RegisterType((*Span_Links)(nil), "opencensus.proto.trace.v1.Span.Links") + proto.RegisterType((*Status)(nil), "opencensus.proto.trace.v1.Status") + proto.RegisterType((*AttributeValue)(nil), "opencensus.proto.trace.v1.AttributeValue") + proto.RegisterType((*StackTrace)(nil), "opencensus.proto.trace.v1.StackTrace") + proto.RegisterType((*StackTrace_StackFrame)(nil), "opencensus.proto.trace.v1.StackTrace.StackFrame") + proto.RegisterType((*StackTrace_StackFrames)(nil), "opencensus.proto.trace.v1.StackTrace.StackFrames") + proto.RegisterType((*Module)(nil), "opencensus.proto.trace.v1.Module") + proto.RegisterType((*TruncatableString)(nil), "opencensus.proto.trace.v1.TruncatableString") +} + +func init() { + proto.RegisterFile("opencensus/proto/trace/v1/trace.proto", fileDescriptor_8ea38bbb821bf584) +} + +var fileDescriptor_8ea38bbb821bf584 = []byte{ + // 1524 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x58, 0x5b, 0x6f, 0x1b, 0xb7, + 0x12, 0xf6, 0xea, 0xae, 0x91, 0xac, 0xc8, 0x8c, 0x93, 0xc8, 0x3a, 0x39, 0x27, 0x3e, 0x4a, 0x82, + 0xe3, 0x9c, 0xd6, 0x72, 0xe2, 0xa4, 0x41, 0xae, 0x48, 0x7d, 0x91, 0x2b, 0xc5, 0x8e, 0xaa, 0x50, + 0x8a, 0xd1, 0x0b, 0x8a, 0xc5, 0x4a, 0x4b, 0xcb, 0x5b, 0x4b, 0xdc, 0xed, 0x92, 0xeb, 0xc2, 0x79, + 0xeb, 0x53, 0x51, 0xf4, 0xad, 0x40, 0xd1, 0x3f, 0xd0, 0x87, 0xfe, 0x9d, 0xa2, 0xbf, 0xa3, 0xbf, + 0xa0, 0x2f, 0x05, 0xc9, 0xbd, 0xc9, 0x49, 0x6c, 0x55, 0x79, 0x31, 0xb8, 0xe4, 0x7c, 0x1f, 0x39, + 0x9c, 0x6f, 0x86, 0x63, 0xc1, 0x4d, 0xdb, 0x21, 0x74, 0x40, 0x28, 0xf3, 0xd8, 0x9a, 0xe3, 0xda, + 0xdc, 0x5e, 0xe3, 0xae, 0x31, 0x20, 0x6b, 0xc7, 0x77, 0xd4, 0xa0, 0x2e, 0x27, 0xd1, 0x52, 0x64, + 0xa6, 0x66, 0xea, 0x6a, 0xf5, 0xf8, 0x4e, 0xf5, 0xda, 0xd0, 0xb6, 0x87, 0x23, 0xa2, 0xd0, 0x7d, + 0xef, 0x60, 0x8d, 0x5b, 0x63, 0xc2, 0xb8, 0x31, 0x76, 0x94, 0x65, 0xf5, 0x3f, 0xa7, 0x0d, 0xbe, + 0x75, 0x0d, 0xc7, 0x21, 0xae, 0xcf, 0x54, 0xfb, 0xee, 0x12, 0xa4, 0xba, 0x8e, 0x41, 0xd1, 0x12, + 0xe4, 0x24, 0xab, 0x6e, 0x99, 0x15, 0x6d, 0x59, 0x5b, 0x29, 0xe2, 0xac, 0xfc, 0x6e, 0x99, 0xe8, + 0x0a, 0x64, 0x99, 0x63, 0x50, 0xb1, 0x92, 0x90, 0x2b, 0x19, 0xf1, 0xd9, 0x32, 0xd1, 0x73, 0x00, + 0x69, 0xc3, 0xb8, 0xc1, 0x49, 0xe5, 0xc2, 0xb2, 0xb6, 0x52, 0x58, 0xff, 0x7f, 0xfd, 0x9d, 0xa7, + 0xad, 0x8b, 0x8d, 0xea, 0xbd, 0x10, 0x81, 0x63, 0x68, 0x74, 0x03, 0x4a, 0x8e, 0xe1, 0x12, 0xca, + 0xf5, 0x60, 0xaf, 0xa4, 0xdc, 0xab, 0xa8, 0x66, 0xbb, 0x6a, 0xc7, 0x8f, 0x21, 0x45, 0x8d, 0x31, + 0xa9, 0xa4, 0xe4, 0x5e, 0x1f, 0x9e, 0xb1, 0x57, 0xcf, 0xf5, 0xe8, 0xc0, 0xe0, 0x46, 0x7f, 0x44, + 0xba, 0xdc, 0xb5, 0xe8, 0x10, 0x4b, 0x24, 0x7a, 0x02, 0xa9, 0x23, 0x8b, 0x9a, 0x95, 0xd2, 0xb2, + 0xb6, 0x52, 0x5a, 0x5f, 0x39, 0xef, 0xb4, 0xe2, 0xcf, 0xae, 0x45, 0x4d, 0x2c, 0x51, 0xe8, 0x21, + 0x00, 0xe3, 0x86, 0xcb, 0x75, 0x71, 0xcf, 0x95, 0xb4, 0x3c, 0x45, 0xb5, 0xae, 0xee, 0xb8, 0x1e, + 0xdc, 0x71, 0xbd, 0x17, 0x04, 0x01, 0xe7, 0xa5, 0xb5, 0xf8, 0x46, 0x1f, 0x41, 0x8e, 0x50, 0x53, + 0x01, 0x33, 0xe7, 0x02, 0xb3, 0x84, 0x9a, 0x12, 0xf6, 0x1c, 0xc0, 0xe0, 0xdc, 0xb5, 0xfa, 0x1e, + 0x27, 0xac, 0x92, 0x9d, 0xee, 0x8e, 0x37, 0x42, 0x04, 0x8e, 0xa1, 0xd1, 0x0e, 0x14, 0x18, 0x37, + 0x06, 0x47, 0xba, 0xb4, 0xae, 0xe4, 0x24, 0xd9, 0xcd, 0xb3, 0xc8, 0x84, 0xb5, 0x0c, 0x18, 0x06, + 0x16, 0x8e, 0xd1, 0x2e, 0x14, 0x84, 0x1b, 0x3a, 0x39, 0x26, 0x94, 0xb3, 0x4a, 0x7e, 0xca, 0xc0, + 0x5b, 0x63, 0xd2, 0x90, 0x08, 0x0c, 0x3c, 0x1c, 0xa3, 0xc7, 0x90, 0x1e, 0x59, 0xf4, 0x88, 0x55, + 0xe0, 0xfc, 0xe3, 0x08, 0x9a, 0x3d, 0x61, 0x8c, 0x15, 0x06, 0x3d, 0x84, 0x8c, 0x90, 0x8f, 0xc7, + 0x2a, 0x05, 0x89, 0xfe, 0xef, 0xd9, 0xce, 0x70, 0x8f, 0x61, 0x1f, 0x80, 0x3e, 0x83, 0x7f, 0x31, + 0x63, 0x4c, 0x74, 0xc7, 0xb5, 0x07, 0x84, 0x31, 0xdd, 0x60, 0x7a, 0x4c, 0x80, 0x95, 0xe2, 0x3b, + 0x42, 0xb4, 0x69, 0xdb, 0xa3, 0x7d, 0x63, 0xe4, 0x11, 0x7c, 0x45, 0xc0, 0x3b, 0x0a, 0xbd, 0xc1, + 0x3a, 0xa1, 0x4c, 0xd1, 0x0e, 0x94, 0x07, 0x87, 0xd6, 0xc8, 0x54, 0x4a, 0x1e, 0xd8, 0x1e, 0xe5, + 0x95, 0x79, 0x49, 0x77, 0xf5, 0x0d, 0xba, 0x57, 0x2d, 0xca, 0xef, 0xae, 0x2b, 0xc2, 0x92, 0x44, + 0x09, 0x8a, 0x2d, 0x81, 0xa9, 0xfe, 0xa0, 0x01, 0x44, 0xd9, 0x82, 0x9e, 0x43, 0x96, 0x50, 0xee, + 0x5a, 0x84, 0x55, 0xb4, 0xe5, 0xe4, 0x4a, 0x61, 0xfd, 0xf6, 0xf4, 0xa9, 0x56, 0x6f, 0x50, 0xee, + 0x9e, 0xe0, 0x80, 0xa0, 0xba, 0x06, 0x69, 0x39, 0x83, 0xca, 0x90, 0x3c, 0x22, 0x27, 0x32, 0xe3, + 0xf3, 0x58, 0x0c, 0xd1, 0x22, 0xa4, 0x8f, 0xc5, 0x71, 0x64, 0xae, 0xe7, 0xb1, 0xfa, 0xa8, 0xfe, + 0x9c, 0x00, 0x88, 0x54, 0x85, 0x0c, 0x98, 0x0f, 0x75, 0xa5, 0x8f, 0x0d, 0xc7, 0x3f, 0xd1, 0x93, + 0xe9, 0x85, 0x19, 0x0d, 0x5f, 0x18, 0x8e, 0x3a, 0x5d, 0xd1, 0x88, 0x4d, 0xa1, 0x07, 0x50, 0x31, + 0x5d, 0xdb, 0x71, 0x88, 0xa9, 0x47, 0x12, 0xf6, 0x6f, 0x53, 0x1c, 0x2d, 0x8d, 0x2f, 0xfb, 0xeb, + 0x11, 0xa9, 0xba, 0xb7, 0xaf, 0x61, 0xe1, 0x0d, 0xf2, 0xb7, 0x38, 0xfa, 0x2c, 0xee, 0x68, 0x61, + 0xfd, 0xd6, 0x19, 0x67, 0x0f, 0xe9, 0x54, 0xa0, 0x14, 0xee, 0x51, 0xe2, 0x81, 0x56, 0xfd, 0x35, + 0x0d, 0xf9, 0x50, 0xd8, 0xa8, 0x0e, 0x29, 0x99, 0xdf, 0xda, 0xb9, 0xf9, 0x2d, 0xed, 0xd0, 0x3e, + 0x80, 0x41, 0xa9, 0xcd, 0x0d, 0x6e, 0xd9, 0xd4, 0x3f, 0xc7, 0xbd, 0xa9, 0xf3, 0xa8, 0xbe, 0x11, + 0x62, 0x9b, 0x73, 0x38, 0xc6, 0x84, 0xbe, 0x82, 0xf9, 0x31, 0x61, 0xcc, 0x18, 0xfa, 0x39, 0x2a, + 0x6b, 0x69, 0x61, 0xfd, 0xfe, 0xf4, 0xd4, 0x2f, 0x14, 0x5c, 0x7e, 0x34, 0xe7, 0x70, 0x71, 0x1c, + 0xfb, 0xae, 0xfe, 0xa6, 0x01, 0x44, 0x7b, 0xa3, 0x36, 0x14, 0x4c, 0xc2, 0x06, 0xae, 0xe5, 0x48, + 0x37, 0xb4, 0x19, 0x6a, 0x73, 0x9c, 0xe0, 0x54, 0xc9, 0x4b, 0xbc, 0x4f, 0xc9, 0xab, 0xfe, 0xa5, + 0x41, 0x31, 0xee, 0x0b, 0xfa, 0x14, 0x52, 0xfc, 0xc4, 0x51, 0x21, 0x2a, 0xad, 0x3f, 0x9e, 0xed, + 0x46, 0xea, 0xbd, 0x13, 0x87, 0x60, 0x49, 0x84, 0x4a, 0x90, 0xf0, 0x1f, 0xc6, 0x14, 0x4e, 0x58, + 0x26, 0xfa, 0x00, 0x16, 0x3c, 0x3a, 0xb0, 0xc7, 0x8e, 0x4b, 0x18, 0x23, 0xa6, 0xce, 0xac, 0xd7, + 0x44, 0xde, 0x7f, 0x0a, 0x97, 0xe3, 0x0b, 0x5d, 0xeb, 0x35, 0x41, 0xff, 0x83, 0x0b, 0xa7, 0x4d, + 0x53, 0xd2, 0xb4, 0x34, 0x69, 0x58, 0xbb, 0x07, 0x29, 0xb1, 0x27, 0x5a, 0x84, 0x72, 0xef, 0xf3, + 0x4e, 0x43, 0x7f, 0xd5, 0xee, 0x76, 0x1a, 0x5b, 0xad, 0x9d, 0x56, 0x63, 0xbb, 0x3c, 0x87, 0x72, + 0x90, 0xea, 0x36, 0xda, 0xbd, 0xb2, 0x86, 0x8a, 0x90, 0xc3, 0x8d, 0xad, 0x46, 0x6b, 0xbf, 0xb1, + 0x5d, 0x4e, 0x6c, 0x66, 0x7d, 0x89, 0x57, 0xff, 0x10, 0xa5, 0x24, 0xaa, 0xb9, 0x4d, 0x80, 0xa8, + 0x80, 0xfb, 0xb9, 0x7b, 0x6b, 0xea, 0xab, 0xc0, 0xf9, 0xb0, 0x7c, 0xa3, 0x47, 0xb0, 0x14, 0x66, + 0x69, 0xa8, 0x88, 0xc9, 0x34, 0xbd, 0x12, 0xa4, 0x69, 0xb4, 0x2e, 0xf3, 0x14, 0x3d, 0x83, 0xab, + 0x01, 0x76, 0x42, 0xad, 0x01, 0x3c, 0x29, 0xe1, 0x01, 0x7f, 0xfc, 0xfe, 0xfd, 0x44, 0xff, 0x29, + 0x01, 0x29, 0xf1, 0x1c, 0xcc, 0xd4, 0xbc, 0x3c, 0xf5, 0x85, 0x90, 0x94, 0x42, 0xb8, 0x35, 0xcd, + 0xb3, 0x13, 0x0f, 0xfb, 0xa4, 0x48, 0x53, 0xef, 0x23, 0xd2, 0xda, 0xee, 0x99, 0xc1, 0xbd, 0x04, + 0x0b, 0x5b, 0xcd, 0xd6, 0xde, 0xb6, 0xbe, 0xd7, 0x6a, 0xef, 0x36, 0xb6, 0xf5, 0x6e, 0x67, 0xa3, + 0x5d, 0xd6, 0xd0, 0x65, 0x40, 0x9d, 0x0d, 0xdc, 0x68, 0xf7, 0x26, 0xe6, 0x13, 0xd5, 0x6f, 0x20, + 0x2d, 0x9f, 0x48, 0xf4, 0x00, 0x52, 0xe2, 0x91, 0xf4, 0xc3, 0x7b, 0x63, 0x1a, 0x07, 0xb1, 0x44, + 0xa0, 0x3a, 0x5c, 0x0c, 0x02, 0x23, 0x9f, 0xd9, 0x89, 0x70, 0x2e, 0xf8, 0x4b, 0x72, 0x13, 0x19, + 0x87, 0xda, 0x53, 0xc8, 0x05, 0x7d, 0x12, 0x5a, 0x82, 0x4b, 0xe2, 0x20, 0xfa, 0x6e, 0xab, 0xbd, + 0x7d, 0xca, 0x11, 0x80, 0x4c, 0xb7, 0x81, 0xf7, 0x1b, 0xb8, 0xac, 0x89, 0xf1, 0xd6, 0x5e, 0x4b, + 0x68, 0x36, 0x51, 0xbb, 0x0f, 0x19, 0xf5, 0x36, 0x23, 0x04, 0xa9, 0x81, 0x6d, 0xaa, 0xe4, 0x4c, + 0x63, 0x39, 0x46, 0x15, 0xc8, 0xfa, 0xea, 0xf0, 0x5f, 0xa4, 0xe0, 0xb3, 0xf6, 0xbb, 0x06, 0xa5, + 0xc9, 0xca, 0x8c, 0x5e, 0x42, 0x91, 0xc9, 0x8a, 0xa2, 0xab, 0xd2, 0x3e, 0x43, 0x2d, 0x6a, 0xce, + 0xe1, 0x82, 0xe2, 0x50, 0x94, 0xff, 0x86, 0xbc, 0x45, 0xb9, 0x1e, 0x3d, 0x15, 0xc9, 0xe6, 0x1c, + 0xce, 0x59, 0x94, 0xab, 0xe5, 0x6b, 0x00, 0x7d, 0xdb, 0x1e, 0xf9, 0xeb, 0x42, 0x4c, 0xb9, 0xe6, + 0x1c, 0xce, 0xf7, 0x83, 0x36, 0x01, 0x5d, 0x87, 0xa2, 0x69, 0x7b, 0xfd, 0x11, 0xf1, 0x4d, 0x84, + 0x54, 0x34, 0xb1, 0x89, 0x9a, 0x95, 0x46, 0x61, 0xa2, 0xd6, 0x7e, 0xcc, 0x00, 0x44, 0x5d, 0x17, + 0xea, 0x09, 0x7f, 0x44, 0xc7, 0x76, 0xe0, 0x1a, 0x63, 0xf9, 0xf0, 0x0b, 0x7f, 0xee, 0x4c, 0xd5, + 0xb2, 0xa9, 0xe1, 0x8e, 0x04, 0x62, 0xd5, 0xf8, 0xa9, 0x0f, 0xb4, 0x0a, 0x17, 0x63, 0x7d, 0xa0, + 0x7e, 0x68, 0xb0, 0x43, 0x3d, 0xac, 0x61, 0xe5, 0xa8, 0xd1, 0x6b, 0x1a, 0xec, 0xb0, 0x65, 0x56, + 0xff, 0x4c, 0xfa, 0x67, 0x92, 0x70, 0xf4, 0x12, 0xe6, 0x0f, 0x3c, 0x3a, 0x10, 0x89, 0xac, 0xcb, + 0x66, 0x7c, 0x96, 0x82, 0x5f, 0x0c, 0x28, 0xda, 0x82, 0xb2, 0x0f, 0x97, 0x6d, 0xd7, 0x1a, 0x5a, + 0xd4, 0x18, 0xe9, 0x93, 0xdc, 0x89, 0x19, 0xb8, 0x17, 0x03, 0xae, 0x9d, 0xf8, 0x1e, 0x2d, 0xc8, + 0x1f, 0x58, 0x23, 0xa2, 0x68, 0x93, 0x33, 0xd0, 0xe6, 0x04, 0x5c, 0x52, 0x5d, 0x83, 0xc2, 0xc8, + 0xa2, 0x44, 0xa7, 0xde, 0xb8, 0x4f, 0x5c, 0x19, 0xd1, 0x24, 0x06, 0x31, 0xd5, 0x96, 0x33, 0xe8, + 0x3a, 0xcc, 0x0f, 0xec, 0x91, 0x37, 0xa6, 0x81, 0x49, 0x5a, 0x9a, 0x14, 0xd5, 0xa4, 0x6f, 0xb4, + 0x09, 0x85, 0x91, 0x6d, 0x98, 0xfa, 0xd8, 0x36, 0xbd, 0x51, 0xf0, 0x3f, 0xc1, 0x59, 0x0d, 0xec, + 0x0b, 0x69, 0x88, 0x41, 0xa0, 0xd4, 0x18, 0x75, 0xa1, 0xc4, 0x6c, 0xcf, 0x1d, 0x10, 0xfd, 0x98, + 0xb8, 0x4c, 0xbc, 0xbe, 0xd9, 0x19, 0x3c, 0x9b, 0x57, 0x1c, 0xfb, 0x8a, 0xa2, 0xfa, 0xbd, 0x06, + 0x85, 0x98, 0x76, 0xd0, 0x0e, 0xa4, 0xa5, 0xfc, 0xa6, 0x69, 0x3b, 0xdf, 0xa6, 0x3e, 0xac, 0xe0, + 0xe8, 0x36, 0x2c, 0x06, 0x65, 0x45, 0xc9, 0x79, 0xa2, 0xae, 0x20, 0x7f, 0x4d, 0x6d, 0xaa, 0x0a, + 0xcb, 0x2f, 0x1a, 0x64, 0x7c, 0x4f, 0xb7, 0x21, 0xe3, 0x5f, 0xd4, 0x2c, 0x72, 0xf3, 0xb1, 0xe8, + 0x13, 0xc8, 0xf5, 0x3d, 0xd1, 0x9a, 0xfb, 0x72, 0xff, 0xa7, 0x3c, 0x59, 0x89, 0x6e, 0x99, 0xb5, + 0x2f, 0x61, 0xe1, 0x8d, 0xd5, 0xa8, 0x75, 0xd6, 0x62, 0xad, 0xb3, 0x70, 0x9b, 0x2b, 0x53, 0x62, + 0xea, 0xfd, 0x13, 0x4e, 0x26, 0xdd, 0x0e, 0xd7, 0x36, 0x4f, 0x38, 0x91, 0x6e, 0x6f, 0x3a, 0x70, + 0xd5, 0xb2, 0xdf, 0x7d, 0xae, 0x4d, 0xf5, 0x5f, 0x41, 0x47, 0x4c, 0x76, 0xb4, 0x2f, 0x36, 0x87, + 0x16, 0x3f, 0xf4, 0xfa, 0xf5, 0x81, 0x3d, 0x5e, 0x53, 0xf6, 0xab, 0x16, 0x65, 0xdc, 0xf5, 0xc6, + 0x84, 0xaa, 0xf7, 0x76, 0x2d, 0xa2, 0x5a, 0x55, 0xbf, 0x33, 0x0c, 0x09, 0x5d, 0x1d, 0x46, 0x3f, + 0x37, 0xf4, 0x33, 0x72, 0xfa, 0xee, 0xdf, 0x01, 0x00, 0x00, 0xff, 0xff, 0xb1, 0x5e, 0xa6, 0x6a, + 0x92, 0x10, 0x00, 0x00, +} diff --git a/vendor/github.com/census-instrumentation/opencensus-proto/gen-go/trace/v1/trace_config.pb.go b/vendor/github.com/census-instrumentation/opencensus-proto/gen-go/trace/v1/trace_config.pb.go new file mode 100644 index 000000000..929e8995b --- /dev/null +++ b/vendor/github.com/census-instrumentation/opencensus-proto/gen-go/trace/v1/trace_config.pb.go @@ -0,0 +1,406 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: opencensus/proto/trace/v1/trace_config.proto + +package v1 + +import ( + fmt "fmt" + proto "github.com/golang/protobuf/proto" + math "math" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +// Global configuration of the trace service. +type TraceConfig struct { + // The global default sampler used to make decisions on span sampling. + // + // Types that are valid to be assigned to Sampler: + // *TraceConfig_ProbabilitySampler + // *TraceConfig_ConstantSampler + // *TraceConfig_RateLimitingSampler + Sampler isTraceConfig_Sampler `protobuf_oneof:"sampler"` + // The global default max number of attributes per span. + MaxNumberOfAttributes int64 `protobuf:"varint,4,opt,name=max_number_of_attributes,json=maxNumberOfAttributes,proto3" json:"max_number_of_attributes,omitempty"` + // The global default max number of annotation events per span. + MaxNumberOfAnnotations int64 `protobuf:"varint,5,opt,name=max_number_of_annotations,json=maxNumberOfAnnotations,proto3" json:"max_number_of_annotations,omitempty"` + // The global default max number of message events per span. + MaxNumberOfMessageEvents int64 `protobuf:"varint,6,opt,name=max_number_of_message_events,json=maxNumberOfMessageEvents,proto3" json:"max_number_of_message_events,omitempty"` + // The global default max number of link entries per span. + MaxNumberOfLinks int64 `protobuf:"varint,7,opt,name=max_number_of_links,json=maxNumberOfLinks,proto3" json:"max_number_of_links,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *TraceConfig) Reset() { *m = TraceConfig{} } +func (m *TraceConfig) String() string { return proto.CompactTextString(m) } +func (*TraceConfig) ProtoMessage() {} +func (*TraceConfig) Descriptor() ([]byte, []int) { + return fileDescriptor_5359209b41ff50c5, []int{0} +} + +func (m *TraceConfig) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_TraceConfig.Unmarshal(m, b) +} +func (m *TraceConfig) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_TraceConfig.Marshal(b, m, deterministic) +} +func (m *TraceConfig) XXX_Merge(src proto.Message) { + xxx_messageInfo_TraceConfig.Merge(m, src) +} +func (m *TraceConfig) XXX_Size() int { + return xxx_messageInfo_TraceConfig.Size(m) +} +func (m *TraceConfig) XXX_DiscardUnknown() { + xxx_messageInfo_TraceConfig.DiscardUnknown(m) +} + +var xxx_messageInfo_TraceConfig proto.InternalMessageInfo + +type isTraceConfig_Sampler interface { + isTraceConfig_Sampler() +} + +type TraceConfig_ProbabilitySampler struct { + ProbabilitySampler *ProbabilitySampler `protobuf:"bytes,1,opt,name=probability_sampler,json=probabilitySampler,proto3,oneof"` +} + +type TraceConfig_ConstantSampler struct { + ConstantSampler *ConstantSampler `protobuf:"bytes,2,opt,name=constant_sampler,json=constantSampler,proto3,oneof"` +} + +type TraceConfig_RateLimitingSampler struct { + RateLimitingSampler *RateLimitingSampler `protobuf:"bytes,3,opt,name=rate_limiting_sampler,json=rateLimitingSampler,proto3,oneof"` +} + +func (*TraceConfig_ProbabilitySampler) isTraceConfig_Sampler() {} + +func (*TraceConfig_ConstantSampler) isTraceConfig_Sampler() {} + +func (*TraceConfig_RateLimitingSampler) isTraceConfig_Sampler() {} + +func (m *TraceConfig) GetSampler() isTraceConfig_Sampler { + if m != nil { + return m.Sampler + } + return nil +} + +func (m *TraceConfig) GetProbabilitySampler() *ProbabilitySampler { + if x, ok := m.GetSampler().(*TraceConfig_ProbabilitySampler); ok { + return x.ProbabilitySampler + } + return nil +} + +func (m *TraceConfig) GetConstantSampler() *ConstantSampler { + if x, ok := m.GetSampler().(*TraceConfig_ConstantSampler); ok { + return x.ConstantSampler + } + return nil +} + +func (m *TraceConfig) GetRateLimitingSampler() *RateLimitingSampler { + if x, ok := m.GetSampler().(*TraceConfig_RateLimitingSampler); ok { + return x.RateLimitingSampler + } + return nil +} + +func (m *TraceConfig) GetMaxNumberOfAttributes() int64 { + if m != nil { + return m.MaxNumberOfAttributes + } + return 0 +} + +func (m *TraceConfig) GetMaxNumberOfAnnotations() int64 { + if m != nil { + return m.MaxNumberOfAnnotations + } + return 0 +} + +func (m *TraceConfig) GetMaxNumberOfMessageEvents() int64 { + if m != nil { + return m.MaxNumberOfMessageEvents + } + return 0 +} + +func (m *TraceConfig) GetMaxNumberOfLinks() int64 { + if m != nil { + return m.MaxNumberOfLinks + } + return 0 +} + +// XXX_OneofFuncs is for the internal use of the proto package. +func (*TraceConfig) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _TraceConfig_OneofMarshaler, _TraceConfig_OneofUnmarshaler, _TraceConfig_OneofSizer, []interface{}{ + (*TraceConfig_ProbabilitySampler)(nil), + (*TraceConfig_ConstantSampler)(nil), + (*TraceConfig_RateLimitingSampler)(nil), + } +} + +func _TraceConfig_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*TraceConfig) + // sampler + switch x := m.Sampler.(type) { + case *TraceConfig_ProbabilitySampler: + b.EncodeVarint(1<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.ProbabilitySampler); err != nil { + return err + } + case *TraceConfig_ConstantSampler: + b.EncodeVarint(2<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.ConstantSampler); err != nil { + return err + } + case *TraceConfig_RateLimitingSampler: + b.EncodeVarint(3<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.RateLimitingSampler); err != nil { + return err + } + case nil: + default: + return fmt.Errorf("TraceConfig.Sampler has unexpected type %T", x) + } + return nil +} + +func _TraceConfig_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*TraceConfig) + switch tag { + case 1: // sampler.probability_sampler + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(ProbabilitySampler) + err := b.DecodeMessage(msg) + m.Sampler = &TraceConfig_ProbabilitySampler{msg} + return true, err + case 2: // sampler.constant_sampler + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(ConstantSampler) + err := b.DecodeMessage(msg) + m.Sampler = &TraceConfig_ConstantSampler{msg} + return true, err + case 3: // sampler.rate_limiting_sampler + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(RateLimitingSampler) + err := b.DecodeMessage(msg) + m.Sampler = &TraceConfig_RateLimitingSampler{msg} + return true, err + default: + return false, nil + } +} + +func _TraceConfig_OneofSizer(msg proto.Message) (n int) { + m := msg.(*TraceConfig) + // sampler + switch x := m.Sampler.(type) { + case *TraceConfig_ProbabilitySampler: + s := proto.Size(x.ProbabilitySampler) + n += 1 // tag and wire + n += proto.SizeVarint(uint64(s)) + n += s + case *TraceConfig_ConstantSampler: + s := proto.Size(x.ConstantSampler) + n += 1 // tag and wire + n += proto.SizeVarint(uint64(s)) + n += s + case *TraceConfig_RateLimitingSampler: + s := proto.Size(x.RateLimitingSampler) + n += 1 // tag and wire + n += proto.SizeVarint(uint64(s)) + n += s + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + +// Sampler that tries to uniformly sample traces with a given probability. +// The probability of sampling a trace is equal to that of the specified probability. +type ProbabilitySampler struct { + // The desired probability of sampling. Must be within [0.0, 1.0]. + SamplingProbability float64 `protobuf:"fixed64,1,opt,name=samplingProbability,proto3" json:"samplingProbability,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ProbabilitySampler) Reset() { *m = ProbabilitySampler{} } +func (m *ProbabilitySampler) String() string { return proto.CompactTextString(m) } +func (*ProbabilitySampler) ProtoMessage() {} +func (*ProbabilitySampler) Descriptor() ([]byte, []int) { + return fileDescriptor_5359209b41ff50c5, []int{1} +} + +func (m *ProbabilitySampler) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ProbabilitySampler.Unmarshal(m, b) +} +func (m *ProbabilitySampler) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ProbabilitySampler.Marshal(b, m, deterministic) +} +func (m *ProbabilitySampler) XXX_Merge(src proto.Message) { + xxx_messageInfo_ProbabilitySampler.Merge(m, src) +} +func (m *ProbabilitySampler) XXX_Size() int { + return xxx_messageInfo_ProbabilitySampler.Size(m) +} +func (m *ProbabilitySampler) XXX_DiscardUnknown() { + xxx_messageInfo_ProbabilitySampler.DiscardUnknown(m) +} + +var xxx_messageInfo_ProbabilitySampler proto.InternalMessageInfo + +func (m *ProbabilitySampler) GetSamplingProbability() float64 { + if m != nil { + return m.SamplingProbability + } + return 0 +} + +// Sampler that makes a constant decision (either always "yes" or always "no") +// on span sampling. +type ConstantSampler struct { + // Whether spans should be always sampled, or never sampled. + Decision bool `protobuf:"varint,1,opt,name=decision,proto3" json:"decision,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ConstantSampler) Reset() { *m = ConstantSampler{} } +func (m *ConstantSampler) String() string { return proto.CompactTextString(m) } +func (*ConstantSampler) ProtoMessage() {} +func (*ConstantSampler) Descriptor() ([]byte, []int) { + return fileDescriptor_5359209b41ff50c5, []int{2} +} + +func (m *ConstantSampler) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ConstantSampler.Unmarshal(m, b) +} +func (m *ConstantSampler) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ConstantSampler.Marshal(b, m, deterministic) +} +func (m *ConstantSampler) XXX_Merge(src proto.Message) { + xxx_messageInfo_ConstantSampler.Merge(m, src) +} +func (m *ConstantSampler) XXX_Size() int { + return xxx_messageInfo_ConstantSampler.Size(m) +} +func (m *ConstantSampler) XXX_DiscardUnknown() { + xxx_messageInfo_ConstantSampler.DiscardUnknown(m) +} + +var xxx_messageInfo_ConstantSampler proto.InternalMessageInfo + +func (m *ConstantSampler) GetDecision() bool { + if m != nil { + return m.Decision + } + return false +} + +// Sampler that tries to sample with a rate per time window. +type RateLimitingSampler struct { + // Rate per second. + Qps int64 `protobuf:"varint,1,opt,name=qps,proto3" json:"qps,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *RateLimitingSampler) Reset() { *m = RateLimitingSampler{} } +func (m *RateLimitingSampler) String() string { return proto.CompactTextString(m) } +func (*RateLimitingSampler) ProtoMessage() {} +func (*RateLimitingSampler) Descriptor() ([]byte, []int) { + return fileDescriptor_5359209b41ff50c5, []int{3} +} + +func (m *RateLimitingSampler) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_RateLimitingSampler.Unmarshal(m, b) +} +func (m *RateLimitingSampler) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_RateLimitingSampler.Marshal(b, m, deterministic) +} +func (m *RateLimitingSampler) XXX_Merge(src proto.Message) { + xxx_messageInfo_RateLimitingSampler.Merge(m, src) +} +func (m *RateLimitingSampler) XXX_Size() int { + return xxx_messageInfo_RateLimitingSampler.Size(m) +} +func (m *RateLimitingSampler) XXX_DiscardUnknown() { + xxx_messageInfo_RateLimitingSampler.DiscardUnknown(m) +} + +var xxx_messageInfo_RateLimitingSampler proto.InternalMessageInfo + +func (m *RateLimitingSampler) GetQps() int64 { + if m != nil { + return m.Qps + } + return 0 +} + +func init() { + proto.RegisterType((*TraceConfig)(nil), "opencensus.proto.trace.v1.TraceConfig") + proto.RegisterType((*ProbabilitySampler)(nil), "opencensus.proto.trace.v1.ProbabilitySampler") + proto.RegisterType((*ConstantSampler)(nil), "opencensus.proto.trace.v1.ConstantSampler") + proto.RegisterType((*RateLimitingSampler)(nil), "opencensus.proto.trace.v1.RateLimitingSampler") +} + +func init() { + proto.RegisterFile("opencensus/proto/trace/v1/trace_config.proto", fileDescriptor_5359209b41ff50c5) +} + +var fileDescriptor_5359209b41ff50c5 = []byte{ + // 431 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x7c, 0x93, 0x41, 0x6f, 0xd4, 0x30, + 0x10, 0x85, 0x09, 0x5b, 0xda, 0x32, 0x3d, 0x74, 0xe5, 0xa8, 0x28, 0x45, 0x3d, 0x54, 0x7b, 0xa1, + 0x42, 0x24, 0xa1, 0x70, 0x40, 0x5c, 0x90, 0xd8, 0x0a, 0xc4, 0xa1, 0xc0, 0x2a, 0x20, 0x21, 0x71, + 0x09, 0x4e, 0xea, 0x0d, 0x16, 0xf1, 0x38, 0xd8, 0x93, 0xa8, 0xfc, 0x17, 0x7e, 0x6c, 0x15, 0x67, + 0x95, 0x64, 0xbb, 0xed, 0xde, 0xec, 0xf7, 0xde, 0xf7, 0x2c, 0x79, 0x6c, 0x78, 0xa1, 0x2b, 0x81, + 0xb9, 0x40, 0x5b, 0xdb, 0xb8, 0x32, 0x9a, 0x74, 0x4c, 0x86, 0xe7, 0x22, 0x6e, 0xce, 0xbb, 0x45, + 0x9a, 0x6b, 0x5c, 0xca, 0x22, 0x72, 0x1e, 0x3b, 0x1e, 0xd2, 0x9d, 0x12, 0xb9, 0x50, 0xd4, 0x9c, + 0xcf, 0xfe, 0xef, 0xc0, 0xc1, 0xf7, 0x76, 0x73, 0xe1, 0x00, 0xf6, 0x0b, 0xfc, 0xca, 0xe8, 0x8c, + 0x67, 0xb2, 0x94, 0xf4, 0x2f, 0xb5, 0x5c, 0x55, 0xa5, 0x30, 0x81, 0x77, 0xea, 0x9d, 0x1d, 0xbc, + 0x0a, 0xa3, 0x7b, 0x8b, 0xa2, 0xc5, 0x40, 0x7d, 0xeb, 0xa0, 0x4f, 0x0f, 0x12, 0x56, 0x6d, 0xa8, + 0xec, 0x07, 0x4c, 0x73, 0x8d, 0x96, 0x38, 0x52, 0x5f, 0xff, 0xd0, 0xd5, 0x3f, 0xdf, 0x52, 0x7f, + 0xb1, 0x42, 0x86, 0xee, 0xc3, 0x7c, 0x5d, 0x62, 0x57, 0x70, 0x64, 0x38, 0x89, 0xb4, 0x94, 0x4a, + 0x92, 0xc4, 0xa2, 0x6f, 0x9f, 0xb8, 0xf6, 0x68, 0x4b, 0x7b, 0xc2, 0x49, 0x5c, 0xae, 0xb0, 0xe1, + 0x04, 0xdf, 0x6c, 0xca, 0xec, 0x0d, 0x04, 0x8a, 0x5f, 0xa7, 0x58, 0xab, 0x4c, 0x98, 0x54, 0x2f, + 0x53, 0x4e, 0x64, 0x64, 0x56, 0x93, 0xb0, 0xc1, 0xce, 0xa9, 0x77, 0x36, 0x49, 0x8e, 0x14, 0xbf, + 0xfe, 0xe2, 0xec, 0xaf, 0xcb, 0xf7, 0xbd, 0xc9, 0xde, 0xc2, 0xf1, 0x2d, 0x10, 0x51, 0x13, 0x27, + 0xa9, 0xd1, 0x06, 0x8f, 0x1c, 0xf9, 0x64, 0x4c, 0x0e, 0x2e, 0x7b, 0x07, 0x27, 0xeb, 0xa8, 0x12, + 0xd6, 0xf2, 0x42, 0xa4, 0xa2, 0x11, 0x48, 0x36, 0xd8, 0x75, 0x74, 0x30, 0xa2, 0x3f, 0x77, 0x81, + 0x0f, 0xce, 0x67, 0x21, 0xf8, 0xeb, 0x7c, 0x29, 0xf1, 0x8f, 0x0d, 0xf6, 0x1c, 0x36, 0x1d, 0x61, + 0x97, 0xad, 0x3e, 0x7f, 0x0c, 0x7b, 0xab, 0xab, 0x9b, 0x7d, 0x04, 0xb6, 0x39, 0x58, 0xf6, 0x12, + 0x7c, 0x17, 0x90, 0x58, 0x8c, 0x5c, 0xf7, 0x48, 0xbc, 0xe4, 0x2e, 0x6b, 0x16, 0xc2, 0xe1, 0xad, + 0x09, 0xb2, 0xa7, 0xb0, 0x7f, 0x25, 0x72, 0x69, 0xa5, 0x46, 0x47, 0xee, 0x27, 0xfd, 0x7e, 0xf6, + 0x0c, 0xfc, 0x3b, 0x46, 0xc2, 0xa6, 0x30, 0xf9, 0x5b, 0x59, 0x97, 0x9e, 0x24, 0xed, 0x72, 0xde, + 0xc0, 0x89, 0xd4, 0xf7, 0x0f, 0x76, 0x3e, 0x1d, 0xbd, 0xed, 0x45, 0x6b, 0x2d, 0xbc, 0x9f, 0xf3, + 0x42, 0xd2, 0xef, 0x3a, 0x8b, 0x72, 0xad, 0xe2, 0x8e, 0x0a, 0x25, 0x5a, 0x32, 0xb5, 0x12, 0xd8, + 0xdd, 0x78, 0x3c, 0x14, 0x86, 0xdd, 0xef, 0x2a, 0x04, 0x86, 0xc5, 0xf0, 0xc9, 0xb2, 0x5d, 0x27, + 0xbf, 0xbe, 0x09, 0x00, 0x00, 0xff, 0xff, 0x1e, 0x98, 0xa1, 0x9e, 0x88, 0x03, 0x00, 0x00, +} diff --git a/vendor/github.com/census-instrumentation/opencensus-proto/gradlew b/vendor/github.com/census-instrumentation/opencensus-proto/gradlew new file mode 100755 index 000000000..cccdd3d51 --- /dev/null +++ b/vendor/github.com/census-instrumentation/opencensus-proto/gradlew @@ -0,0 +1,172 @@ +#!/usr/bin/env sh + +############################################################################## +## +## Gradle start up script for UN*X +## +############################################################################## + +# Attempt to set APP_HOME +# Resolve links: $0 may be a link +PRG="$0" +# Need this for relative symlinks. +while [ -h "$PRG" ] ; do + ls=`ls -ld "$PRG"` + link=`expr "$ls" : '.*-> \(.*\)$'` + if expr "$link" : '/.*' > /dev/null; then + PRG="$link" + else + PRG=`dirname "$PRG"`"/$link" + fi +done +SAVED="`pwd`" +cd "`dirname \"$PRG\"`/" >/dev/null +APP_HOME="`pwd -P`" +cd "$SAVED" >/dev/null + +APP_NAME="Gradle" +APP_BASE_NAME=`basename "$0"` + +# Add default JVM options here. You can also use JAVA_OPTS and GRADLE_OPTS to pass JVM options to this script. +DEFAULT_JVM_OPTS="" + +# Use the maximum available, or set MAX_FD != -1 to use that value. +MAX_FD="maximum" + +warn () { + echo "$*" +} + +die () { + echo + echo "$*" + echo + exit 1 +} + +# OS specific support (must be 'true' or 'false'). +cygwin=false +msys=false +darwin=false +nonstop=false +case "`uname`" in + CYGWIN* ) + cygwin=true + ;; + Darwin* ) + darwin=true + ;; + MINGW* ) + msys=true + ;; + NONSTOP* ) + nonstop=true + ;; +esac + +CLASSPATH=$APP_HOME/gradle/wrapper/gradle-wrapper.jar + +# Determine the Java command to use to start the JVM. +if [ -n "$JAVA_HOME" ] ; then + if [ -x "$JAVA_HOME/jre/sh/java" ] ; then + # IBM's JDK on AIX uses strange locations for the executables + JAVACMD="$JAVA_HOME/jre/sh/java" + else + JAVACMD="$JAVA_HOME/bin/java" + fi + if [ ! -x "$JAVACMD" ] ; then + die "ERROR: JAVA_HOME is set to an invalid directory: $JAVA_HOME + +Please set the JAVA_HOME variable in your environment to match the +location of your Java installation." + fi +else + JAVACMD="java" + which java >/dev/null 2>&1 || die "ERROR: JAVA_HOME is not set and no 'java' command could be found in your PATH. + +Please set the JAVA_HOME variable in your environment to match the +location of your Java installation." +fi + +# Increase the maximum file descriptors if we can. +if [ "$cygwin" = "false" -a "$darwin" = "false" -a "$nonstop" = "false" ] ; then + MAX_FD_LIMIT=`ulimit -H -n` + if [ $? -eq 0 ] ; then + if [ "$MAX_FD" = "maximum" -o "$MAX_FD" = "max" ] ; then + MAX_FD="$MAX_FD_LIMIT" + fi + ulimit -n $MAX_FD + if [ $? -ne 0 ] ; then + warn "Could not set maximum file descriptor limit: $MAX_FD" + fi + else + warn "Could not query maximum file descriptor limit: $MAX_FD_LIMIT" + fi +fi + +# For Darwin, add options to specify how the application appears in the dock +if $darwin; then + GRADLE_OPTS="$GRADLE_OPTS \"-Xdock:name=$APP_NAME\" \"-Xdock:icon=$APP_HOME/media/gradle.icns\"" +fi + +# For Cygwin, switch paths to Windows format before running java +if $cygwin ; then + APP_HOME=`cygpath --path --mixed "$APP_HOME"` + CLASSPATH=`cygpath --path --mixed "$CLASSPATH"` + JAVACMD=`cygpath --unix "$JAVACMD"` + + # We build the pattern for arguments to be converted via cygpath + ROOTDIRSRAW=`find -L / -maxdepth 1 -mindepth 1 -type d 2>/dev/null` + SEP="" + for dir in $ROOTDIRSRAW ; do + ROOTDIRS="$ROOTDIRS$SEP$dir" + SEP="|" + done + OURCYGPATTERN="(^($ROOTDIRS))" + # Add a user-defined pattern to the cygpath arguments + if [ "$GRADLE_CYGPATTERN" != "" ] ; then + OURCYGPATTERN="$OURCYGPATTERN|($GRADLE_CYGPATTERN)" + fi + # Now convert the arguments - kludge to limit ourselves to /bin/sh + i=0 + for arg in "$@" ; do + CHECK=`echo "$arg"|egrep -c "$OURCYGPATTERN" -` + CHECK2=`echo "$arg"|egrep -c "^-"` ### Determine if an option + + if [ $CHECK -ne 0 ] && [ $CHECK2 -eq 0 ] ; then ### Added a condition + eval `echo args$i`=`cygpath --path --ignore --mixed "$arg"` + else + eval `echo args$i`="\"$arg\"" + fi + i=$((i+1)) + done + case $i in + (0) set -- ;; + (1) set -- "$args0" ;; + (2) set -- "$args0" "$args1" ;; + (3) set -- "$args0" "$args1" "$args2" ;; + (4) set -- "$args0" "$args1" "$args2" "$args3" ;; + (5) set -- "$args0" "$args1" "$args2" "$args3" "$args4" ;; + (6) set -- "$args0" "$args1" "$args2" "$args3" "$args4" "$args5" ;; + (7) set -- "$args0" "$args1" "$args2" "$args3" "$args4" "$args5" "$args6" ;; + (8) set -- "$args0" "$args1" "$args2" "$args3" "$args4" "$args5" "$args6" "$args7" ;; + (9) set -- "$args0" "$args1" "$args2" "$args3" "$args4" "$args5" "$args6" "$args7" "$args8" ;; + esac +fi + +# Escape application args +save () { + for i do printf %s\\n "$i" | sed "s/'/'\\\\''/g;1s/^/'/;\$s/\$/' \\\\/" ; done + echo " " +} +APP_ARGS=$(save "$@") + +# Collect all arguments for the java command, following the shell quoting and substitution rules +eval set -- $DEFAULT_JVM_OPTS $JAVA_OPTS $GRADLE_OPTS "\"-Dorg.gradle.appname=$APP_BASE_NAME\"" -classpath "\"$CLASSPATH\"" org.gradle.wrapper.GradleWrapperMain "$APP_ARGS" + +# by default we should be in the correct project dir, but when run from Finder on Mac, the cwd is wrong +if [ "$(uname)" = "Darwin" ] && [ "$HOME" = "$PWD" ]; then + cd "$(dirname "$0")" +fi + +exec "$JAVACMD" "$@" diff --git a/vendor/github.com/census-instrumentation/opencensus-proto/gradlew.bat b/vendor/github.com/census-instrumentation/opencensus-proto/gradlew.bat new file mode 100644 index 000000000..e95643d6a --- /dev/null +++ b/vendor/github.com/census-instrumentation/opencensus-proto/gradlew.bat @@ -0,0 +1,84 @@ +@if "%DEBUG%" == "" @echo off +@rem ########################################################################## +@rem +@rem Gradle startup script for Windows +@rem +@rem ########################################################################## + +@rem Set local scope for the variables with windows NT shell +if "%OS%"=="Windows_NT" setlocal + +set DIRNAME=%~dp0 +if "%DIRNAME%" == "" set DIRNAME=. +set APP_BASE_NAME=%~n0 +set APP_HOME=%DIRNAME% + +@rem Add default JVM options here. You can also use JAVA_OPTS and GRADLE_OPTS to pass JVM options to this script. +set DEFAULT_JVM_OPTS= + +@rem Find java.exe +if defined JAVA_HOME goto findJavaFromJavaHome + +set JAVA_EXE=java.exe +%JAVA_EXE% -version >NUL 2>&1 +if "%ERRORLEVEL%" == "0" goto init + +echo. +echo ERROR: JAVA_HOME is not set and no 'java' command could be found in your PATH. +echo. +echo Please set the JAVA_HOME variable in your environment to match the +echo location of your Java installation. + +goto fail + +:findJavaFromJavaHome +set JAVA_HOME=%JAVA_HOME:"=% +set JAVA_EXE=%JAVA_HOME%/bin/java.exe + +if exist "%JAVA_EXE%" goto init + +echo. +echo ERROR: JAVA_HOME is set to an invalid directory: %JAVA_HOME% +echo. +echo Please set the JAVA_HOME variable in your environment to match the +echo location of your Java installation. + +goto fail + +:init +@rem Get command-line arguments, handling Windows variants + +if not "%OS%" == "Windows_NT" goto win9xME_args + +:win9xME_args +@rem Slurp the command line arguments. +set CMD_LINE_ARGS= +set _SKIP=2 + +:win9xME_args_slurp +if "x%~1" == "x" goto execute + +set CMD_LINE_ARGS=%* + +:execute +@rem Setup the command line + +set CLASSPATH=%APP_HOME%\gradle\wrapper\gradle-wrapper.jar + +@rem Execute Gradle +"%JAVA_EXE%" %DEFAULT_JVM_OPTS% %JAVA_OPTS% %GRADLE_OPTS% "-Dorg.gradle.appname=%APP_BASE_NAME%" -classpath "%CLASSPATH%" org.gradle.wrapper.GradleWrapperMain %CMD_LINE_ARGS% + +:end +@rem End local scope for the variables with windows NT shell +if "%ERRORLEVEL%"=="0" goto mainEnd + +:fail +rem Set variable GRADLE_EXIT_CONSOLE if you need the _script_ return code instead of +rem the _cmd.exe /c_ return code! +if not "" == "%GRADLE_EXIT_CONSOLE%" exit 1 +exit /b 1 + +:mainEnd +if "%OS%"=="Windows_NT" endlocal + +:omega diff --git a/vendor/github.com/census-instrumentation/opencensus-proto/settings.gradle b/vendor/github.com/census-instrumentation/opencensus-proto/settings.gradle new file mode 100644 index 000000000..52ecb4e13 --- /dev/null +++ b/vendor/github.com/census-instrumentation/opencensus-proto/settings.gradle @@ -0,0 +1 @@ +rootProject.name = "opencensus-proto" diff --git a/vendor/github.com/docker/distribution/.gitignore b/vendor/github.com/docker/distribution/.gitignore deleted file mode 100644 index 1c3ae0a77..000000000 --- a/vendor/github.com/docker/distribution/.gitignore +++ /dev/null @@ -1,37 +0,0 @@ -# Compiled Object files, Static and Dynamic libs (Shared Objects) -*.o -*.a -*.so - -# Folders -_obj -_test - -# Architecture specific extensions/prefixes -*.[568vq] -[568vq].out - -*.cgo1.go -*.cgo2.c -_cgo_defun.c -_cgo_gotypes.go -_cgo_export.* - -_testmain.go - -*.exe -*.test -*.prof - -# never checkin from the bin file (for now) -bin/* - -# Test key files -*.pem - -# Cover profiles -*.out - -# Editor/IDE specific files. -*.sublime-project -*.sublime-workspace diff --git a/vendor/github.com/docker/distribution/.mailmap b/vendor/github.com/docker/distribution/.mailmap deleted file mode 100644 index d99106019..000000000 --- a/vendor/github.com/docker/distribution/.mailmap +++ /dev/null @@ -1,18 +0,0 @@ -Stephen J Day Stephen Day -Stephen J Day Stephen Day -Olivier Gambier Olivier Gambier -Brian Bland Brian Bland -Brian Bland Brian Bland -Josh Hawn Josh Hawn -Richard Scothern Richard -Richard Scothern Richard Scothern -Andrew Meredith Andrew Meredith -harche harche -Jessie Frazelle -Sharif Nassar Sharif Nassar -Sven Dowideit Sven Dowideit -Vincent Giersch Vincent Giersch -davidli davidli -Omer Cohen Omer Cohen -Eric Yang Eric Yang -Nikita Tarasov Nikita diff --git a/vendor/github.com/docker/distribution/AUTHORS b/vendor/github.com/docker/distribution/AUTHORS deleted file mode 100644 index 252ff8aa2..000000000 --- a/vendor/github.com/docker/distribution/AUTHORS +++ /dev/null @@ -1,182 +0,0 @@ -a-palchikov -Aaron Lehmann -Aaron Schlesinger -Aaron Vinson -Adam Duke -Adam Enger -Adrian Mouat -Ahmet Alp Balkan -Alex Chan -Alex Elman -Alexey Gladkov -allencloud -amitshukla -Amy Lindburg -Andrew Hsu -Andrew Meredith -Andrew T Nguyen -Andrey Kostov -Andy Goldstein -Anis Elleuch -Anton Tiurin -Antonio Mercado -Antonio Murdaca -Anusha Ragunathan -Arien Holthuizen -Arnaud Porterie -Arthur Baars -Asuka Suzuki -Avi Miller -Ayose Cazorla -BadZen -Ben Bodenmiller -Ben Firshman -bin liu -Brian Bland -burnettk -Carson A -Cezar Sa Espinola -Charles Smith -Chris Dillon -cuiwei13 -cyli -Daisuke Fujita -Daniel Huhn -Darren Shepherd -Dave Trombley -Dave Tucker -David Lawrence -David Verhasselt -David Xia -davidli -Dejan Golja -Derek McGowan -Diogo Mónica -DJ Enriquez -Donald Huang -Doug Davis -Edgar Lee -Eric Yang -Fabio Berchtold -Fabio Huser -farmerworking -Felix Yan -Florentin Raud -Frank Chen -Frederick F. Kautz IV -gabriell nascimento -Gleb Schukin -harche -Henri Gomez -Hu Keping -Hua Wang -HuKeping -Ian Babrou -igayoso -Jack Griffin -James Findley -Jason Freidman -Jason Heiss -Jeff Nickoloff -Jess Frazelle -Jessie Frazelle -jhaohai -Jianqing Wang -Jihoon Chung -Joao Fernandes -John Mulhausen -John Starks -Jon Johnson -Jon Poler -Jonathan Boulle -Jordan Liggitt -Josh Chorlton -Josh Hawn -Julien Fernandez -Ke Xu -Keerthan Mala -Kelsey Hightower -Kenneth Lim -Kenny Leung -Li Yi -Liu Hua -liuchang0812 -Lloyd Ramey -Louis Kottmann -Luke Carpenter -Marcus Martins -Mary Anthony -Matt Bentley -Matt Duch -Matt Moore -Matt Robenolt -Matthew Green -Michael Prokop -Michal Minar -Michal Minář -Mike Brown -Miquel Sabaté -Misty Stanley-Jones -Misty Stanley-Jones -Morgan Bauer -moxiegirl -Nathan Sullivan -nevermosby -Nghia Tran -Nikita Tarasov -Noah Treuhaft -Nuutti Kotivuori -Oilbeater -Olivier Gambier -Olivier Jacques -Omer Cohen -Patrick Devine -Phil Estes -Philip Misiowiec -Pierre-Yves Ritschard -Qiao Anran -Randy Barlow -Richard Scothern -Rodolfo Carvalho -Rusty Conover -Sean Boran -Sebastiaan van Stijn -Sebastien Coavoux -Serge Dubrouski -Sharif Nassar -Shawn Falkner-Horine -Shreyas Karnik -Simon Thulbourn -spacexnice -Spencer Rinehart -Stan Hu -Stefan Majewsky -Stefan Weil -Stephen J Day -Sungho Moon -Sven Dowideit -Sylvain Baubeau -Ted Reed -tgic -Thomas Sjögren -Tianon Gravi -Tibor Vass -Tonis Tiigi -Tony Holdstock-Brown -Trevor Pounds -Troels Thomsen -Victor Vieux -Victoria Bialas -Vincent Batts -Vincent Demeester -Vincent Giersch -W. Trevor King -weiyuan.yl -xg.song -xiekeyang -Yann ROBERT -yaoyao.xyy -yuexiao-wang -yuzou -zhouhaibing089 -姜继忠 diff --git a/vendor/github.com/docker/distribution/BUILDING.md b/vendor/github.com/docker/distribution/BUILDING.md deleted file mode 100644 index c59828182..000000000 --- a/vendor/github.com/docker/distribution/BUILDING.md +++ /dev/null @@ -1,117 +0,0 @@ - -# Building the registry source - -## Use-case - -This is useful if you intend to actively work on the registry. - -### Alternatives - -Most people should use the [official Registry docker image](https://hub.docker.com/r/library/registry/). - -People looking for advanced operational use cases might consider rolling their own image with a custom Dockerfile inheriting `FROM registry:2`. - -OS X users who want to run natively can do so following [the instructions here](https://github.com/docker/docker.github.io/blob/master/registry/recipes/osx-setup-guide.md). - -### Gotchas - -You are expected to know your way around with go & git. - -If you are a casual user with no development experience, and no preliminary knowledge of go, building from source is probably not a good solution for you. - -## Build the development environment - -The first prerequisite of properly building distribution targets is to have a Go -development environment setup. Please follow [How to Write Go Code](https://golang.org/doc/code.html) -for proper setup. If done correctly, you should have a GOROOT and GOPATH set in the -environment. - -If a Go development environment is setup, one can use `go get` to install the -`registry` command from the current latest: - - go get github.com/docker/distribution/cmd/registry - -The above will install the source repository into the `GOPATH`. - -Now create the directory for the registry data (this might require you to set permissions properly) - - mkdir -p /var/lib/registry - -... or alternatively `export REGISTRY_STORAGE_FILESYSTEM_ROOTDIRECTORY=/somewhere` if you want to store data into another location. - -The `registry` -binary can then be run with the following: - - $ $GOPATH/bin/registry --version - $GOPATH/bin/registry github.com/docker/distribution v2.0.0-alpha.1+unknown - -> __NOTE:__ While you do not need to use `go get` to checkout the distribution -> project, for these build instructions to work, the project must be checked -> out in the correct location in the `GOPATH`. This should almost always be -> `$GOPATH/src/github.com/docker/distribution`. - -The registry can be run with the default config using the following -incantation: - - $ $GOPATH/bin/registry serve $GOPATH/src/github.com/docker/distribution/cmd/registry/config-example.yml - INFO[0000] endpoint local-5003 disabled, skipping app.id=34bbec38-a91a-494a-9a3f-b72f9010081f version=v2.0.0-alpha.1+unknown - INFO[0000] endpoint local-8083 disabled, skipping app.id=34bbec38-a91a-494a-9a3f-b72f9010081f version=v2.0.0-alpha.1+unknown - INFO[0000] listening on :5000 app.id=34bbec38-a91a-494a-9a3f-b72f9010081f version=v2.0.0-alpha.1+unknown - INFO[0000] debug server listening localhost:5001 - -If it is working, one should see the above log messages. - -### Repeatable Builds - -For the full development experience, one should `cd` into -`$GOPATH/src/github.com/docker/distribution`. From there, the regular `go` -commands, such as `go test`, should work per package (please see -[Developing](#developing) if they don't work). - -A `Makefile` has been provided as a convenience to support repeatable builds. -Please install the following into `GOPATH` for it to work: - - go get github.com/golang/lint/golint - -Once these commands are available in the `GOPATH`, run `make` to get a full -build: - - $ make - + clean - + fmt - + vet - + lint - + build - github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar - github.com/sirupsen/logrus - github.com/docker/libtrust - ... - github.com/yvasiyarov/gorelic - github.com/docker/distribution/registry/handlers - github.com/docker/distribution/cmd/registry - + test - ... - ok github.com/docker/distribution/digest 7.875s - ok github.com/docker/distribution/manifest 0.028s - ok github.com/docker/distribution/notifications 17.322s - ? github.com/docker/distribution/registry [no test files] - ok github.com/docker/distribution/registry/api/v2 0.101s - ? github.com/docker/distribution/registry/auth [no test files] - ok github.com/docker/distribution/registry/auth/silly 0.011s - ... - + /Users/sday/go/src/github.com/docker/distribution/bin/registry - + /Users/sday/go/src/github.com/docker/distribution/bin/registry-api-descriptor-template - + binaries - -The above provides a repeatable build using the contents of the vendor -directory. This includes formatting, vetting, linting, building, -testing and generating tagged binaries. We can verify this worked by running -the registry binary generated in the "./bin" directory: - - $ ./bin/registry -version - ./bin/registry github.com/docker/distribution v2.0.0-alpha.2-80-g16d8b2c.m - -### Optional build tags - -Optional [build tags](http://golang.org/pkg/go/build/) can be provided using -the environment variable `DOCKER_BUILDTAGS`. diff --git a/vendor/github.com/docker/distribution/CHANGELOG.md b/vendor/github.com/docker/distribution/CHANGELOG.md deleted file mode 100644 index e7b16b3c2..000000000 --- a/vendor/github.com/docker/distribution/CHANGELOG.md +++ /dev/null @@ -1,108 +0,0 @@ -# Changelog - -## 2.6.0 (2017-01-18) - -#### Storage -- S3: fixed bug in delete due to read-after-write inconsistency -- S3: allow EC2 IAM roles to be used when authorizing region endpoints -- S3: add Object ACL Support -- S3: fix delete method's notion of subpaths -- S3: use multipart upload API in `Move` method for performance -- S3: add v2 signature signing for legacy S3 clones -- Swift: add simple heuristic to detect incomplete DLOs during read ops -- Swift: support different user and tenant domains -- Swift: bulk deletes in chunks -- Aliyun OSS: fix delete method's notion of subpaths -- Aliyun OSS: optimize data copy after upload finishes -- Azure: close leaking response body -- Fix storage drivers dropping non-EOF errors when listing repositories -- Compare path properly when listing repositories in catalog -- Add a foreign layer URL host whitelist -- Improve catalog enumerate runtime - -#### Registry -- Export `storage.CreateOptions` in top-level package -- Enable notifications to endpoints that use self-signed certificates -- Properly validate multi-URL foreign layers -- Add control over validation of URLs in pushed manifests -- Proxy mode: fix socket leak when pull is cancelled -- Tag service: properly handle error responses on HEAD request -- Support for custom authentication URL in proxying registry -- Add configuration option to disable access logging -- Add notification filtering by target media type -- Manifest: `References()` returns all children -- Honor `X-Forwarded-Port` and Forwarded headers -- Reference: Preserve tag and digest in With* functions -- Add policy configuration for enforcing repository classes - -#### Client -- Changes the client Tags `All()` method to follow links -- Allow registry clients to connect via HTTP2 -- Better handling of OAuth errors in client - -#### Spec -- Manifest: clarify relationship between urls and foreign layers -- Authorization: add support for repository classes - -#### Manifest -- Override media type returned from `Stat()` for existing manifests -- Add plugin mediatype to distribution manifest - -#### Docs -- Document `TOOMANYREQUESTS` error code -- Document required Let's Encrypt port -- Improve documentation around implementation of OAuth2 -- Improve documentation for configuration - -#### Auth -- Add support for registry type in scope -- Add support for using v2 ping challenges for v1 -- Add leeway to JWT `nbf` and `exp` checking -- htpasswd: dynamically parse htpasswd file -- Fix missing auth headers with PATCH HTTP request when pushing to default port - -#### Dockerfile -- Update to go1.7 -- Reorder Dockerfile steps for better layer caching - -#### Notes - -Documentation has moved to the documentation repository at -`github.com/docker/docker.github.io/tree/master/registry` - -The registry is go 1.7 compliant, and passes newer, more restrictive `lint` and `vet` ing. - - -## 2.5.0 (2016-06-14) - -#### Storage -- Ensure uploads directory is cleaned after upload is committed -- Add ability to cap concurrent operations in filesystem driver -- S3: Add 'us-gov-west-1' to the valid region list -- Swift: Handle ceph not returning Last-Modified header for HEAD requests -- Add redirect middleware - -#### Registry -- Add support for blobAccessController middleware -- Add support for layers from foreign sources -- Remove signature store -- Add support for Let's Encrypt -- Correct yaml key names in configuration - -#### Client -- Add option to get content digest from manifest get - -#### Spec -- Update the auth spec scope grammar to reflect the fact that hostnames are optionally supported -- Clarify API documentation around catalog fetch behavior - -#### API -- Support returning HTTP 429 (Too Many Requests) - -#### Documentation -- Update auth documentation examples to show "expires in" as int - -#### Docker Image -- Use Alpine Linux as base image - - diff --git a/vendor/github.com/docker/distribution/CONTRIBUTING.md b/vendor/github.com/docker/distribution/CONTRIBUTING.md deleted file mode 100644 index 1f1441bee..000000000 --- a/vendor/github.com/docker/distribution/CONTRIBUTING.md +++ /dev/null @@ -1,148 +0,0 @@ -# Contributing to the registry - -## Before reporting an issue... - -### If your problem is with... - - - automated builds - - your account on the [Docker Hub](https://hub.docker.com/) - - any other [Docker Hub](https://hub.docker.com/) issue - -Then please do not report your issue here - you should instead report it to [https://support.docker.com](https://support.docker.com) - -### If you... - - - need help setting up your registry - - can't figure out something - - are not sure what's going on or what your problem is - -Then please do not open an issue here yet - you should first try one of the following support forums: - - - irc: #docker-distribution on freenode - - mailing-list: or https://groups.google.com/a/dockerproject.org/forum/#!forum/distribution - -### Reporting security issues - -The Docker maintainers take security seriously. If you discover a security -issue, please bring it to their attention right away! - -Please **DO NOT** file a public issue, instead send your report privately to -[security@docker.com](mailto:security@docker.com). - -## Reporting an issue properly - -By following these simple rules you will get better and faster feedback on your issue. - - - search the bugtracker for an already reported issue - -### If you found an issue that describes your problem: - - - please read other user comments first, and confirm this is the same issue: a given error condition might be indicative of different problems - you may also find a workaround in the comments - - please refrain from adding "same thing here" or "+1" comments - - you don't need to comment on an issue to get notified of updates: just hit the "subscribe" button - - comment if you have some new, technical and relevant information to add to the case - - __DO NOT__ comment on closed issues or merged PRs. If you think you have a related problem, open up a new issue and reference the PR or issue. - -### If you have not found an existing issue that describes your problem: - - 1. create a new issue, with a succinct title that describes your issue: - - bad title: "It doesn't work with my docker" - - good title: "Private registry push fail: 400 error with E_INVALID_DIGEST" - 2. copy the output of: - - `docker version` - - `docker info` - - `docker exec registry -version` - 3. copy the command line you used to launch your Registry - 4. restart your docker daemon in debug mode (add `-D` to the daemon launch arguments) - 5. reproduce your problem and get your docker daemon logs showing the error - 6. if relevant, copy your registry logs that show the error - 7. provide any relevant detail about your specific Registry configuration (e.g., storage backend used) - 8. indicate if you are using an enterprise proxy, Nginx, or anything else between you and your Registry - -## Contributing a patch for a known bug, or a small correction - -You should follow the basic GitHub workflow: - - 1. fork - 2. commit a change - 3. make sure the tests pass - 4. PR - -Additionally, you must [sign your commits](https://github.com/docker/docker/blob/master/CONTRIBUTING.md#sign-your-work). It's very simple: - - - configure your name with git: `git config user.name "Real Name" && git config user.email mail@example.com` - - sign your commits using `-s`: `git commit -s -m "My commit"` - -Some simple rules to ensure quick merge: - - - clearly point to the issue(s) you want to fix in your PR comment (e.g., `closes #12345`) - - prefer multiple (smaller) PRs addressing individual issues over a big one trying to address multiple issues at once - - if you need to amend your PR following comments, please squash instead of adding more commits - -## Contributing new features - -You are heavily encouraged to first discuss what you want to do. You can do so on the irc channel, or by opening an issue that clearly describes the use case you want to fulfill, or the problem you are trying to solve. - -If this is a major new feature, you should then submit a proposal that describes your technical solution and reasoning. -If you did discuss it first, this will likely be greenlighted very fast. It's advisable to address all feedback on this proposal before starting actual work. - -Then you should submit your implementation, clearly linking to the issue (and possible proposal). - -Your PR will be reviewed by the community, then ultimately by the project maintainers, before being merged. - -It's mandatory to: - - - interact respectfully with other community members and maintainers - more generally, you are expected to abide by the [Docker community rules](https://github.com/docker/docker/blob/master/CONTRIBUTING.md#docker-community-guidelines) - - address maintainers' comments and modify your submission accordingly - - write tests for any new code - -Complying to these simple rules will greatly accelerate the review process, and will ensure you have a pleasant experience in contributing code to the Registry. - -Have a look at a great, successful contribution: the [Swift driver PR](https://github.com/docker/distribution/pull/493) - -## Coding Style - -Unless explicitly stated, we follow all coding guidelines from the Go -community. While some of these standards may seem arbitrary, they somehow seem -to result in a solid, consistent codebase. - -It is possible that the code base does not currently comply with these -guidelines. We are not looking for a massive PR that fixes this, since that -goes against the spirit of the guidelines. All new contributions should make a -best effort to clean up and make the code base better than they left it. -Obviously, apply your best judgement. Remember, the goal here is to make the -code base easier for humans to navigate and understand. Always keep that in -mind when nudging others to comply. - -The rules: - -1. All code should be formatted with `gofmt -s`. -2. All code should pass the default levels of - [`golint`](https://github.com/golang/lint). -3. All code should follow the guidelines covered in [Effective - Go](http://golang.org/doc/effective_go.html) and [Go Code Review - Comments](https://github.com/golang/go/wiki/CodeReviewComments). -4. Comment the code. Tell us the why, the history and the context. -5. Document _all_ declarations and methods, even private ones. Declare - expectations, caveats and anything else that may be important. If a type - gets exported, having the comments already there will ensure it's ready. -6. Variable name length should be proportional to its context and no longer. - `noCommaALongVariableNameLikeThisIsNotMoreClearWhenASimpleCommentWouldDo`. - In practice, short methods will have short variable names and globals will - have longer names. -7. No underscores in package names. If you need a compound name, step back, - and re-examine why you need a compound name. If you still think you need a - compound name, lose the underscore. -8. No utils or helpers packages. If a function is not general enough to - warrant its own package, it has not been written generally enough to be a - part of a util package. Just leave it unexported and well-documented. -9. All tests should run with `go test` and outside tooling should not be - required. No, we don't need another unit testing framework. Assertion - packages are acceptable if they provide _real_ incremental value. -10. Even though we call these "rules" above, they are actually just - guidelines. Since you've read all the rules, you now know that. - -If you are having trouble getting into the mood of idiomatic Go, we recommend -reading through [Effective Go](http://golang.org/doc/effective_go.html). The -[Go Blog](http://blog.golang.org/) is also a great resource. Drinking the -kool-aid is a lot easier than going thirsty. diff --git a/vendor/github.com/docker/distribution/Dockerfile b/vendor/github.com/docker/distribution/Dockerfile deleted file mode 100644 index ac8dbca2f..000000000 --- a/vendor/github.com/docker/distribution/Dockerfile +++ /dev/null @@ -1,21 +0,0 @@ -FROM golang:1.8-alpine - -ENV DISTRIBUTION_DIR /go/src/github.com/docker/distribution -ENV DOCKER_BUILDTAGS include_oss include_gcs - -ARG GOOS=linux -ARG GOARCH=amd64 - -RUN set -ex \ - && apk add --no-cache make git - -WORKDIR $DISTRIBUTION_DIR -COPY . $DISTRIBUTION_DIR -COPY cmd/registry/config-dev.yml /etc/docker/registry/config.yml - -RUN make PREFIX=/go clean binaries - -VOLUME ["/var/lib/registry"] -EXPOSE 5000 -ENTRYPOINT ["registry"] -CMD ["serve", "/etc/docker/registry/config.yml"] diff --git a/vendor/github.com/docker/distribution/MAINTAINERS b/vendor/github.com/docker/distribution/MAINTAINERS deleted file mode 100644 index bda400150..000000000 --- a/vendor/github.com/docker/distribution/MAINTAINERS +++ /dev/null @@ -1,58 +0,0 @@ -# Distribution maintainers file -# -# This file describes who runs the docker/distribution project and how. -# This is a living document - if you see something out of date or missing, speak up! -# -# It is structured to be consumable by both humans and programs. -# To extract its contents programmatically, use any TOML-compliant parser. -# -# This file is compiled into the MAINTAINERS file in docker/opensource. -# -[Org] - [Org."Core maintainers"] - people = [ - "aaronlehmann", - "dmcgowan", - "dmp42", - "richardscothern", - "shykes", - "stevvooe", - ] - -[people] - -# A reference list of all people associated with the project. -# All other sections should refer to people by their canonical key -# in the people section. - - # ADD YOURSELF HERE IN ALPHABETICAL ORDER - - [people.aaronlehmann] - Name = "Aaron Lehmann" - Email = "aaron.lehmann@docker.com" - GitHub = "aaronlehmann" - - [people.dmcgowan] - Name = "Derek McGowan" - Email = "derek@mcgstyle.net" - GitHub = "dmcgowan" - - [people.dmp42] - Name = "Olivier Gambier" - Email = "olivier@docker.com" - GitHub = "dmp42" - - [people.richardscothern] - Name = "Richard Scothern" - Email = "richard.scothern@gmail.com" - GitHub = "richardscothern" - - [people.shykes] - Name = "Solomon Hykes" - Email = "solomon@docker.com" - GitHub = "shykes" - - [people.stevvooe] - Name = "Stephen Day" - Email = "stephen.day@docker.com" - GitHub = "stevvooe" diff --git a/vendor/github.com/docker/distribution/Makefile b/vendor/github.com/docker/distribution/Makefile deleted file mode 100644 index 7c6f9c7a6..000000000 --- a/vendor/github.com/docker/distribution/Makefile +++ /dev/null @@ -1,99 +0,0 @@ -# Set an output prefix, which is the local directory if not specified -PREFIX?=$(shell pwd) - - -# Used to populate version variable in main package. -VERSION=$(shell git describe --match 'v[0-9]*' --dirty='.m' --always) - -# Allow turning off function inlining and variable registerization -ifeq (${DISABLE_OPTIMIZATION},true) - GO_GCFLAGS=-gcflags "-N -l" - VERSION:="$(VERSION)-noopt" -endif - -GO_LDFLAGS=-ldflags "-X `go list ./version`.Version=$(VERSION)" - -.PHONY: all build binaries clean dep-restore dep-save dep-validate fmt lint test test-full vet -.DEFAULT: all -all: fmt vet lint build test binaries - -AUTHORS: .mailmap .git/HEAD - git log --format='%aN <%aE>' | sort -fu > $@ - -# This only needs to be generated by hand when cutting full releases. -version/version.go: - ./version/version.sh > $@ - -# Required for go 1.5 to build -GO15VENDOREXPERIMENT := 1 - -# Go files -GOFILES=$(shell find . -type f -name '*.go') - -# Package list -PKGS=$(shell go list -tags "${DOCKER_BUILDTAGS}" ./... | grep -v ^github.com/docker/distribution/vendor/) - -# Resolving binary dependencies for specific targets -GOLINT=$(shell which golint || echo '') -VNDR=$(shell which vndr || echo '') - -${PREFIX}/bin/registry: $(GOFILES) - @echo "+ $@" - @go build -tags "${DOCKER_BUILDTAGS}" -o $@ ${GO_LDFLAGS} ${GO_GCFLAGS} ./cmd/registry - -${PREFIX}/bin/digest: $(GOFILES) - @echo "+ $@" - @go build -tags "${DOCKER_BUILDTAGS}" -o $@ ${GO_LDFLAGS} ${GO_GCFLAGS} ./cmd/digest - -${PREFIX}/bin/registry-api-descriptor-template: $(GOFILES) - @echo "+ $@" - @go build -o $@ ${GO_LDFLAGS} ${GO_GCFLAGS} ./cmd/registry-api-descriptor-template - -docs/spec/api.md: docs/spec/api.md.tmpl ${PREFIX}/bin/registry-api-descriptor-template - ./bin/registry-api-descriptor-template $< > $@ - -vet: - @echo "+ $@" - @go vet -tags "${DOCKER_BUILDTAGS}" $(PKGS) - -fmt: - @echo "+ $@" - @test -z "$$(gofmt -s -l . 2>&1 | grep -v ^vendor/ | tee /dev/stderr)" || \ - (echo >&2 "+ please format Go code with 'gofmt -s'" && false) - -lint: - @echo "+ $@" - $(if $(GOLINT), , \ - $(error Please install golint: `go get -u github.com/golang/lint/golint`)) - @test -z "$$($(GOLINT) ./... 2>&1 | grep -v ^vendor/ | tee /dev/stderr)" - -build: - @echo "+ $@" - @go build -tags "${DOCKER_BUILDTAGS}" -v ${GO_LDFLAGS} $(PKGS) - -test: - @echo "+ $@" - @go test -test.short -tags "${DOCKER_BUILDTAGS}" $(PKGS) - -test-full: - @echo "+ $@" - @go test -tags "${DOCKER_BUILDTAGS}" $(PKGS) - -binaries: ${PREFIX}/bin/registry ${PREFIX}/bin/digest ${PREFIX}/bin/registry-api-descriptor-template - @echo "+ $@" - -clean: - @echo "+ $@" - @rm -rf "${PREFIX}/bin/registry" "${PREFIX}/bin/digest" "${PREFIX}/bin/registry-api-descriptor-template" - -dep-validate: - @echo "+ $@" - $(if $(VNDR), , \ - $(error Please install vndr: go get github.com/lk4d4/vndr)) - @rm -Rf .vendor.bak - @mv vendor .vendor.bak - @$(VNDR) - @test -z "$$(diff -r vendor .vendor.bak 2>&1 | tee /dev/stderr)" || \ - (echo >&2 "+ inconsistent dependencies! what you have in vendor.conf does not match with what you have in vendor" && false) - @rm -Rf vendor - @mv .vendor.bak vendor diff --git a/vendor/github.com/docker/distribution/README.md b/vendor/github.com/docker/distribution/README.md deleted file mode 100644 index 998878850..000000000 --- a/vendor/github.com/docker/distribution/README.md +++ /dev/null @@ -1,130 +0,0 @@ -# Distribution - -The Docker toolset to pack, ship, store, and deliver content. - -This repository's main product is the Docker Registry 2.0 implementation -for storing and distributing Docker images. It supersedes the -[docker/docker-registry](https://github.com/docker/docker-registry) -project with a new API design, focused around security and performance. - - - -[![Circle CI](https://circleci.com/gh/docker/distribution/tree/master.svg?style=svg)](https://circleci.com/gh/docker/distribution/tree/master) -[![GoDoc](https://godoc.org/github.com/docker/distribution?status.svg)](https://godoc.org/github.com/docker/distribution) - -This repository contains the following components: - -|**Component** |Description | -|--------------------|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| -| **registry** | An implementation of the [Docker Registry HTTP API V2](docs/spec/api.md) for use with docker 1.6+. | -| **libraries** | A rich set of libraries for interacting with distribution components. Please see [godoc](https://godoc.org/github.com/docker/distribution) for details. **Note**: These libraries are **unstable**. | -| **specifications** | _Distribution_ related specifications are available in [docs/spec](docs/spec) | -| **documentation** | Docker's full documentation set is available at [docs.docker.com](https://docs.docker.com). This repository [contains the subset](docs/) related just to the registry. | - -### How does this integrate with Docker engine? - -This project should provide an implementation to a V2 API for use in the [Docker -core project](https://github.com/docker/docker). The API should be embeddable -and simplify the process of securely pulling and pushing content from `docker` -daemons. - -### What are the long term goals of the Distribution project? - -The _Distribution_ project has the further long term goal of providing a -secure tool chain for distributing content. The specifications, APIs and tools -should be as useful with Docker as they are without. - -Our goal is to design a professional grade and extensible content distribution -system that allow users to: - -* Enjoy an efficient, secured and reliable way to store, manage, package and - exchange content -* Hack/roll their own on top of healthy open-source components -* Implement their own home made solution through good specs, and solid - extensions mechanism. - -## More about Registry 2.0 - -The new registry implementation provides the following benefits: - -- faster push and pull -- new, more efficient implementation -- simplified deployment -- pluggable storage backend -- webhook notifications - -For information on upcoming functionality, please see [ROADMAP.md](ROADMAP.md). - -### Who needs to deploy a registry? - -By default, Docker users pull images from Docker's public registry instance. -[Installing Docker](https://docs.docker.com/engine/installation/) gives users this -ability. Users can also push images to a repository on Docker's public registry, -if they have a [Docker Hub](https://hub.docker.com/) account. - -For some users and even companies, this default behavior is sufficient. For -others, it is not. - -For example, users with their own software products may want to maintain a -registry for private, company images. Also, you may wish to deploy your own -image repository for images used to test or in continuous integration. For these -use cases and others, [deploying your own registry instance](https://github.com/docker/docker.github.io/blob/master/registry/deploying.md) -may be the better choice. - -### Migration to Registry 2.0 - -For those who have previously deployed their own registry based on the Registry -1.0 implementation and wish to deploy a Registry 2.0 while retaining images, -data migration is required. A tool to assist with migration efforts has been -created. For more information see [docker/migrator](https://github.com/docker/migrator). - -## Contribute - -Please see [CONTRIBUTING.md](CONTRIBUTING.md) for details on how to contribute -issues, fixes, and patches to this project. If you are contributing code, see -the instructions for [building a development environment](BUILDING.md). - -## Support - -If any issues are encountered while using the _Distribution_ project, several -avenues are available for support: - - - - - - - - - - - - - - - - - - -
- IRC - - #docker-distribution on FreeNode -
- Issue Tracker - - github.com/docker/distribution/issues -
- Google Groups - - https://groups.google.com/a/dockerproject.org/forum/#!forum/distribution -
- Mailing List - - docker@dockerproject.org -
- - -## License - -This project is distributed under [Apache License, Version 2.0](LICENSE). diff --git a/vendor/github.com/docker/distribution/RELEASE-CHECKLIST.md b/vendor/github.com/docker/distribution/RELEASE-CHECKLIST.md deleted file mode 100644 index 73eba5a87..000000000 --- a/vendor/github.com/docker/distribution/RELEASE-CHECKLIST.md +++ /dev/null @@ -1,44 +0,0 @@ -## Registry Release Checklist - -10. Compile release notes detailing features and since the last release. - - Update the `CHANGELOG.md` file and create a PR to master with the updates. -Once that PR has been approved by maintainers the change may be cherry-picked -to the release branch (new release branches may be forked from this commit). - -20. Update the version file: `https://github.com/docker/distribution/blob/master/version/version.go` - -30. Update the `MAINTAINERS` (if necessary), `AUTHORS` and `.mailmap` files. - -``` -make AUTHORS -``` - -40. Create a signed tag. - - Distribution uses semantic versioning. Tags are of the format -`vx.y.z[-rcn]`. You will need PGP installed and a PGP key which has been added -to your Github account. The comment for the tag should include the release -notes, use previous tags as a guide for formatting consistently. Run -`git tag -s vx.y.z[-rcn]` to create tag and `git -v vx.y.z[-rcn]` to verify tag, -check comment and correct commit hash. - -50. Push the signed tag - -60. Create a new [release](https://github.com/docker/distribution/releases). In the case of a release candidate, tick the `pre-release` checkbox. - -70. Update the registry binary in [distribution library image repo](https://github.com/docker/distribution-library-image) by running the update script and opening a pull request. - -80. Update the official image. Add the new version in the [official images repo](https://github.com/docker-library/official-images) by appending a new version to the `registry/registry` file with the git hash pointed to by the signed tag. Update the major version to point to the latest version and the minor version to point to new patch release if necessary. -e.g. to release `2.3.1` - - `2.3.1 (new)` - - `2.3.0 -> 2.3.0` can be removed - - `2 -> 2.3.1` - - `2.3 -> 2.3.1` - -90. Build a new distribution/registry image on [Docker hub](https://hub.docker.com/u/distribution/dashboard) by adding a new automated build with the new tag and re-building the images. - diff --git a/vendor/github.com/docker/distribution/ROADMAP.md b/vendor/github.com/docker/distribution/ROADMAP.md deleted file mode 100644 index 701127afe..000000000 --- a/vendor/github.com/docker/distribution/ROADMAP.md +++ /dev/null @@ -1,267 +0,0 @@ -# Roadmap - -The Distribution Project consists of several components, some of which are -still being defined. This document defines the high-level goals of the -project, identifies the current components, and defines the release- -relationship to the Docker Platform. - -* [Distribution Goals](#distribution-goals) -* [Distribution Components](#distribution-components) -* [Project Planning](#project-planning): release-relationship to the Docker Platform. - -This road map is a living document, providing an overview of the goals and -considerations made in respect of the future of the project. - -## Distribution Goals - -- Replace the existing [docker registry](github.com/docker/docker-registry) - implementation as the primary implementation. -- Replace the existing push and pull code in the docker engine with the - distribution package. -- Define a strong data model for distributing docker images -- Provide a flexible distribution tool kit for use in the docker platform -- Unlock new distribution models - -## Distribution Components - -Components of the Distribution Project are managed via github [milestones](https://github.com/docker/distribution/milestones). Upcoming -features and bugfixes for a component will be added to the relevant milestone. If a feature or -bugfix is not part of a milestone, it is currently unscheduled for -implementation. - -* [Registry](#registry) -* [Distribution Package](#distribution-package) - -*** - -### Registry - -The new Docker registry is the main portion of the distribution repository. -Registry 2.0 is the first release of the next-generation registry. This was -primarily focused on implementing the [new registry -API](https://github.com/docker/distribution/blob/master/docs/spec/api.md), -with a focus on security and performance. - -Following from the Distribution project goals above, we have a set of goals -for registry v2 that we would like to follow in the design. New features -should be compared against these goals. - -#### Data Storage and Distribution First - -The registry's first goal is to provide a reliable, consistent storage -location for Docker images. The registry should only provide the minimal -amount of indexing required to fetch image data and no more. - -This means we should be selective in new features and API additions, including -those that may require expensive, ever growing indexes. Requests should be -servable in "constant time". - -#### Content Addressability - -All data objects used in the registry API should be content addressable. -Content identifiers should be secure and verifiable. This provides a secure, -reliable base from which to build more advanced content distribution systems. - -#### Content Agnostic - -In the past, changes to the image format would require large changes in Docker -and the Registry. By decoupling the distribution and image format, we can -allow the formats to progress without having to coordinate between the two. -This means that we should be focused on decoupling Docker from the registry -just as much as decoupling the registry from Docker. Such an approach will -allow us to unlock new distribution models that haven't been possible before. - -We can take this further by saying that the new registry should be content -agnostic. The registry provides a model of names, tags, manifests and content -addresses and that model can be used to work with content. - -#### Simplicity - -The new registry should be closer to a microservice component than its -predecessor. This means it should have a narrower API and a low number of -service dependencies. It should be easy to deploy. - -This means that other solutions should be explored before changing the API or -adding extra dependencies. If functionality is required, can it be added as an -extension or companion service. - -#### Extensibility - -The registry should provide extension points to add functionality. By keeping -the scope narrow, but providing the ability to add functionality. - -Features like search, indexing, synchronization and registry explorers fall -into this category. No such feature should be added unless we've found it -impossible to do through an extension. - -#### Active Feature Discussions - -The following are feature discussions that are currently active. - -If you don't see your favorite, unimplemented feature, feel free to contact us -via IRC or the mailing list and we can talk about adding it. The goal here is -to make sure that new features go through a rigid design process before -landing in the registry. - -##### Proxying to other Registries - -A _pull-through caching_ mode exists for the registry, but is restricted from -within the docker client to only mirror the official Docker Hub. This functionality -can be expanded when image provenance has been specified and implemented in the -distribution project. - -##### Metadata storage - -Metadata for the registry is currently stored with the manifest and layer data on -the storage backend. While this is a big win for simplicity and reliably maintaining -state, it comes with the cost of consistency and high latency. The mutable registry -metadata operations should be abstracted behind an API which will allow ACID compliant -storage systems to handle metadata. - -##### Peer to Peer transfer - -Discussion has started here: https://docs.google.com/document/d/1rYDpSpJiQWmCQy8Cuiaa3NH-Co33oK_SC9HeXYo87QA/edit - -##### Indexing, Search and Discovery - -The original registry provided some implementation of search for use with -private registries. Support has been elided from V2 since we'd like to both -decouple search functionality from the registry. The makes the registry -simpler to deploy, especially in use cases where search is not needed, and -let's us decouple the image format from the registry. - -There are explorations into using the catalog API and notification system to -build external indexes. The current line of thought is that we will define a -common search API to index and query docker images. Such a system could be run -as a companion to a registry or set of registries to power discovery. - -The main issue with search and discovery is that there are so many ways to -accomplish it. There are two aspects to this project. The first is deciding on -how it will be done, including an API definition that can work with changing -data formats. The second is the process of integrating with `docker search`. -We expect that someone attempts to address the problem with the existing tools -and propose it as a standard search API or uses it to inform a standardization -process. Once this has been explored, we integrate with the docker client. - -Please see the following for more detail: - -- https://github.com/docker/distribution/issues/206 - -##### Deletes - -> __NOTE:__ Deletes are a much asked for feature. Before requesting this -feature or participating in discussion, we ask that you read this section in -full and understand the problems behind deletes. - -While, at first glance, implementing deleting seems simple, there are a number -mitigating factors that make many solutions not ideal or even pathological in -the context of a registry. The following paragraph discuss the background and -approaches that could be applied to arrive at a solution. - -The goal of deletes in any system is to remove unused or unneeded data. Only -data requested for deletion should be removed and no other data. Removing -unintended data is worse than _not_ removing data that was requested for -removal but ideally, both are supported. Generally, according to this rule, we -err on holding data longer than needed, ensuring that it is only removed when -we can be certain that it can be removed. With the current behavior, we opt to -hold onto the data forever, ensuring that data cannot be incorrectly removed. - -To understand the problems with implementing deletes, one must understand the -data model. All registry data is stored in a filesystem layout, implemented on -a "storage driver", effectively a _virtual file system_ (VFS). The storage -system must assume that this VFS layer will be eventually consistent and has -poor read- after-write consistency, since this is the lower common denominator -among the storage drivers. This is mitigated by writing values in reverse- -dependent order, but makes wider transactional operations unsafe. - -Layered on the VFS model is a content-addressable _directed, acyclic graph_ -(DAG) made up of blobs. Manifests reference layers. Tags reference manifests. -Since the same data can be referenced by multiple manifests, we only store -data once, even if it is in different repositories. Thus, we have a set of -blobs, referenced by tags and manifests. If we want to delete a blob we need -to be certain that it is no longer referenced by another manifest or tag. When -we delete a manifest, we also can try to delete the referenced blobs. Deciding -whether or not a blob has an active reference is the crux of the problem. - -Conceptually, deleting a manifest and its resources is quite simple. Just find -all the manifests, enumerate the referenced blobs and delete the blobs not in -that set. An astute observer will recognize this as a garbage collection -problem. As with garbage collection in programming languages, this is very -simple when one always has a consistent view. When one adds parallelism and an -inconsistent view of data, it becomes very challenging. - -A simple example can demonstrate this. Let's say we are deleting a manifest -_A_ in one process. We scan the manifest and decide that all the blobs are -ready for deletion. Concurrently, we have another process accepting a new -manifest _B_ referencing one or more blobs from the manifest _A_. Manifest _B_ -is accepted and all the blobs are considered present, so the operation -proceeds. The original process then deletes the referenced blobs, assuming -they were unreferenced. The manifest _B_, which we thought had all of its data -present, can no longer be served by the registry, since the dependent data has -been deleted. - -Deleting data from the registry safely requires some way to coordinate this -operation. The following approaches are being considered: - -- _Reference Counting_ - Maintain a count of references to each blob. This is - challenging for a number of reasons: 1. maintaining a consistent consensus - of reference counts across a set of Registries and 2. Building the initial - list of reference counts for an existing registry. These challenges can be - met with a consensus protocol like Paxos or Raft in the first case and a - necessary but simple scan in the second.. -- _Lock the World GC_ - Halt all writes to the data store. Walk the data store - and find all blob references. Delete all unreferenced blobs. This approach - is very simple but requires disabling writes for a period of time while the - service reads all data. This is slow and expensive but very accurate and - effective. -- _Generational GC_ - Do something similar to above but instead of blocking - writes, writes are sent to another storage backend while reads are broadcast - to the new and old backends. GC is then performed on the read-only portion. - Because writes land in the new backend, the data in the read-only section - can be safely deleted. The main drawbacks of this approach are complexity - and coordination. -- _Centralized Oracle_ - Using a centralized, transactional database, we can - know exactly which data is referenced at any given time. This avoids - coordination problem by managing this data in a single location. We trade - off metadata scalability for simplicity and performance. This is a very good - option for most registry deployments. This would create a bottleneck for - registry metadata. However, metadata is generally not the main bottleneck - when serving images. - -Please let us know if other solutions exist that we have yet to enumerate. -Note that for any approach, implementation is a massive consideration. For -example, a mark-sweep based solution may seem simple but the amount of work in -coordination offset the extra work it might take to build a _Centralized -Oracle_. We'll accept proposals for any solution but please coordinate with us -before dropping code. - -At this time, we have traded off simplicity and ease of deployment for disk -space. Simplicity and ease of deployment tend to reduce developer involvement, -which is currently the most expensive resource in software engineering. Taking -on any solution for deletes will greatly effect these factors, trading off -very cheap disk space for a complex deployment and operational story. - -Please see the following issues for more detail: - -- https://github.com/docker/distribution/issues/422 -- https://github.com/docker/distribution/issues/461 -- https://github.com/docker/distribution/issues/462 - -### Distribution Package - -At its core, the Distribution Project is a set of Go packages that make up -Distribution Components. At this time, most of these packages make up the -Registry implementation. - -The package itself is considered unstable. If you're using it, please take care to vendor the dependent version. - -For feature additions, please see the Registry section. In the future, we may break out a -separate Roadmap for distribution-specific features that apply to more than -just the registry. - -*** - -### Project Planning - -An [Open-Source Planning Process](https://github.com/docker/distribution/wiki/Open-Source-Planning-Process) is used to define the Roadmap. [Project Pages](https://github.com/docker/distribution/wiki) define the goals for each Milestone and identify current progress. - diff --git a/vendor/github.com/docker/distribution/blobs.go b/vendor/github.com/docker/distribution/blobs.go deleted file mode 100644 index 01d309029..000000000 --- a/vendor/github.com/docker/distribution/blobs.go +++ /dev/null @@ -1,257 +0,0 @@ -package distribution - -import ( - "errors" - "fmt" - "io" - "net/http" - "time" - - "github.com/docker/distribution/context" - "github.com/docker/distribution/reference" - "github.com/opencontainers/go-digest" -) - -var ( - // ErrBlobExists returned when blob already exists - ErrBlobExists = errors.New("blob exists") - - // ErrBlobDigestUnsupported when blob digest is an unsupported version. - ErrBlobDigestUnsupported = errors.New("unsupported blob digest") - - // ErrBlobUnknown when blob is not found. - ErrBlobUnknown = errors.New("unknown blob") - - // ErrBlobUploadUnknown returned when upload is not found. - ErrBlobUploadUnknown = errors.New("blob upload unknown") - - // ErrBlobInvalidLength returned when the blob has an expected length on - // commit, meaning mismatched with the descriptor or an invalid value. - ErrBlobInvalidLength = errors.New("blob invalid length") -) - -// ErrBlobInvalidDigest returned when digest check fails. -type ErrBlobInvalidDigest struct { - Digest digest.Digest - Reason error -} - -func (err ErrBlobInvalidDigest) Error() string { - return fmt.Sprintf("invalid digest for referenced layer: %v, %v", - err.Digest, err.Reason) -} - -// ErrBlobMounted returned when a blob is mounted from another repository -// instead of initiating an upload session. -type ErrBlobMounted struct { - From reference.Canonical - Descriptor Descriptor -} - -func (err ErrBlobMounted) Error() string { - return fmt.Sprintf("blob mounted from: %v to: %v", - err.From, err.Descriptor) -} - -// Descriptor describes targeted content. Used in conjunction with a blob -// store, a descriptor can be used to fetch, store and target any kind of -// blob. The struct also describes the wire protocol format. Fields should -// only be added but never changed. -type Descriptor struct { - // MediaType describe the type of the content. All text based formats are - // encoded as utf-8. - MediaType string `json:"mediaType,omitempty"` - - // Size in bytes of content. - Size int64 `json:"size,omitempty"` - - // Digest uniquely identifies the content. A byte stream can be verified - // against against this digest. - Digest digest.Digest `json:"digest,omitempty"` - - // URLs contains the source URLs of this content. - URLs []string `json:"urls,omitempty"` - - // NOTE: Before adding a field here, please ensure that all - // other options have been exhausted. Much of the type relationships - // depend on the simplicity of this type. -} - -// Descriptor returns the descriptor, to make it satisfy the Describable -// interface. Note that implementations of Describable are generally objects -// which can be described, not simply descriptors; this exception is in place -// to make it more convenient to pass actual descriptors to functions that -// expect Describable objects. -func (d Descriptor) Descriptor() Descriptor { - return d -} - -// BlobStatter makes blob descriptors available by digest. The service may -// provide a descriptor of a different digest if the provided digest is not -// canonical. -type BlobStatter interface { - // Stat provides metadata about a blob identified by the digest. If the - // blob is unknown to the describer, ErrBlobUnknown will be returned. - Stat(ctx context.Context, dgst digest.Digest) (Descriptor, error) -} - -// BlobDeleter enables deleting blobs from storage. -type BlobDeleter interface { - Delete(ctx context.Context, dgst digest.Digest) error -} - -// BlobEnumerator enables iterating over blobs from storage -type BlobEnumerator interface { - Enumerate(ctx context.Context, ingester func(dgst digest.Digest) error) error -} - -// BlobDescriptorService manages metadata about a blob by digest. Most -// implementations will not expose such an interface explicitly. Such mappings -// should be maintained by interacting with the BlobIngester. Hence, this is -// left off of BlobService and BlobStore. -type BlobDescriptorService interface { - BlobStatter - - // SetDescriptor assigns the descriptor to the digest. The provided digest and - // the digest in the descriptor must map to identical content but they may - // differ on their algorithm. The descriptor must have the canonical - // digest of the content and the digest algorithm must match the - // annotators canonical algorithm. - // - // Such a facility can be used to map blobs between digest domains, with - // the restriction that the algorithm of the descriptor must match the - // canonical algorithm (ie sha256) of the annotator. - SetDescriptor(ctx context.Context, dgst digest.Digest, desc Descriptor) error - - // Clear enables descriptors to be unlinked - Clear(ctx context.Context, dgst digest.Digest) error -} - -// BlobDescriptorServiceFactory creates middleware for BlobDescriptorService. -type BlobDescriptorServiceFactory interface { - BlobAccessController(svc BlobDescriptorService) BlobDescriptorService -} - -// ReadSeekCloser is the primary reader type for blob data, combining -// io.ReadSeeker with io.Closer. -type ReadSeekCloser interface { - io.ReadSeeker - io.Closer -} - -// BlobProvider describes operations for getting blob data. -type BlobProvider interface { - // Get returns the entire blob identified by digest along with the descriptor. - Get(ctx context.Context, dgst digest.Digest) ([]byte, error) - - // Open provides a ReadSeekCloser to the blob identified by the provided - // descriptor. If the blob is not known to the service, an error will be - // returned. - Open(ctx context.Context, dgst digest.Digest) (ReadSeekCloser, error) -} - -// BlobServer can serve blobs via http. -type BlobServer interface { - // ServeBlob attempts to serve the blob, identified by dgst, via http. The - // service may decide to redirect the client elsewhere or serve the data - // directly. - // - // This handler only issues successful responses, such as 2xx or 3xx, - // meaning it serves data or issues a redirect. If the blob is not - // available, an error will be returned and the caller may still issue a - // response. - // - // The implementation may serve the same blob from a different digest - // domain. The appropriate headers will be set for the blob, unless they - // have already been set by the caller. - ServeBlob(ctx context.Context, w http.ResponseWriter, r *http.Request, dgst digest.Digest) error -} - -// BlobIngester ingests blob data. -type BlobIngester interface { - // Put inserts the content p into the blob service, returning a descriptor - // or an error. - Put(ctx context.Context, mediaType string, p []byte) (Descriptor, error) - - // Create allocates a new blob writer to add a blob to this service. The - // returned handle can be written to and later resumed using an opaque - // identifier. With this approach, one can Close and Resume a BlobWriter - // multiple times until the BlobWriter is committed or cancelled. - Create(ctx context.Context, options ...BlobCreateOption) (BlobWriter, error) - - // Resume attempts to resume a write to a blob, identified by an id. - Resume(ctx context.Context, id string) (BlobWriter, error) -} - -// BlobCreateOption is a general extensible function argument for blob creation -// methods. A BlobIngester may choose to honor any or none of the given -// BlobCreateOptions, which can be specific to the implementation of the -// BlobIngester receiving them. -// TODO (brianbland): unify this with ManifestServiceOption in the future -type BlobCreateOption interface { - Apply(interface{}) error -} - -// CreateOptions is a collection of blob creation modifiers relevant to general -// blob storage intended to be configured by the BlobCreateOption.Apply method. -type CreateOptions struct { - Mount struct { - ShouldMount bool - From reference.Canonical - // Stat allows to pass precalculated descriptor to link and return. - // Blob access check will be skipped if set. - Stat *Descriptor - } -} - -// BlobWriter provides a handle for inserting data into a blob store. -// Instances should be obtained from BlobWriteService.Writer and -// BlobWriteService.Resume. If supported by the store, a writer can be -// recovered with the id. -type BlobWriter interface { - io.WriteCloser - io.ReaderFrom - - // Size returns the number of bytes written to this blob. - Size() int64 - - // ID returns the identifier for this writer. The ID can be used with the - // Blob service to later resume the write. - ID() string - - // StartedAt returns the time this blob write was started. - StartedAt() time.Time - - // Commit completes the blob writer process. The content is verified - // against the provided provisional descriptor, which may result in an - // error. Depending on the implementation, written data may be validated - // against the provisional descriptor fields. If MediaType is not present, - // the implementation may reject the commit or assign "application/octet- - // stream" to the blob. The returned descriptor may have a different - // digest depending on the blob store, referred to as the canonical - // descriptor. - Commit(ctx context.Context, provisional Descriptor) (canonical Descriptor, err error) - - // Cancel ends the blob write without storing any data and frees any - // associated resources. Any data written thus far will be lost. Cancel - // implementations should allow multiple calls even after a commit that - // result in a no-op. This allows use of Cancel in a defer statement, - // increasing the assurance that it is correctly called. - Cancel(ctx context.Context) error -} - -// BlobService combines the operations to access, read and write blobs. This -// can be used to describe remote blob services. -type BlobService interface { - BlobStatter - BlobProvider - BlobIngester -} - -// BlobStore represent the entire suite of blob related operations. Such an -// implementation can access, read, write, delete and serve blobs. -type BlobStore interface { - BlobService - BlobServer - BlobDeleter -} diff --git a/vendor/github.com/docker/distribution/circle.yml b/vendor/github.com/docker/distribution/circle.yml deleted file mode 100644 index ddc76c86c..000000000 --- a/vendor/github.com/docker/distribution/circle.yml +++ /dev/null @@ -1,94 +0,0 @@ -# Pony-up! -machine: - pre: - # Install gvm - - bash < <(curl -s -S -L https://raw.githubusercontent.com/moovweb/gvm/1.0.22/binscripts/gvm-installer) - # Install codecov for coverage - - pip install --user codecov - - post: - # go - - gvm install go1.8 --prefer-binary --name=stable - - environment: - # Convenient shortcuts to "common" locations - CHECKOUT: /home/ubuntu/$CIRCLE_PROJECT_REPONAME - BASE_DIR: src/github.com/$CIRCLE_PROJECT_USERNAME/$CIRCLE_PROJECT_REPONAME - # Trick circle brainflat "no absolute path" behavior - BASE_STABLE: ../../../$HOME/.gvm/pkgsets/stable/global/$BASE_DIR - DOCKER_BUILDTAGS: "include_oss include_gcs" - # Workaround Circle parsing dumb bugs and/or YAML wonkyness - CIRCLE_PAIN: "mode: set" - - hosts: - # Not used yet - fancy: 127.0.0.1 - -dependencies: - pre: - # Copy the code to the gopath of all go versions - - > - gvm use stable && - mkdir -p "$(dirname $BASE_STABLE)" && - cp -R "$CHECKOUT" "$BASE_STABLE" - - override: - # Install dependencies for every copied clone/go version - - gvm use stable && go get github.com/lk4d4/vndr: - pwd: $BASE_STABLE - - post: - # For the stable go version, additionally install linting tools - - > - gvm use stable && - go get github.com/axw/gocov/gocov github.com/golang/lint/golint - -test: - pre: - # Output the go versions we are going to test - # - gvm use old && go version - - gvm use stable && go version - - # Ensure validation of dependencies - - git fetch origin: - pwd: $BASE_STABLE - - gvm use stable && if test -n "`git diff --stat=1000 origin/master | grep -E \"^[[:space:]]*vendor\"`"; then make dep-validate; fi: - pwd: $BASE_STABLE - - # First thing: build everything. This will catch compile errors, and it's - # also necessary for go vet to work properly (see #807). - - gvm use stable && go install $(go list ./... | grep -v "/vendor/"): - pwd: $BASE_STABLE - - # FMT - - gvm use stable && make fmt: - pwd: $BASE_STABLE - - # VET - - gvm use stable && make vet: - pwd: $BASE_STABLE - - # LINT - - gvm use stable && make lint: - pwd: $BASE_STABLE - - override: - # Test stable, and report - - gvm use stable; export ROOT_PACKAGE=$(go list .); go list -tags "$DOCKER_BUILDTAGS" ./... | grep -v "/vendor/" | xargs -L 1 -I{} bash -c 'export PACKAGE={}; go test -tags "$DOCKER_BUILDTAGS" -test.short -coverprofile=$GOPATH/src/$PACKAGE/coverage.out -coverpkg=$(./coverpkg.sh $PACKAGE $ROOT_PACKAGE) $PACKAGE': - timeout: 1000 - pwd: $BASE_STABLE - - # Test stable with race - - gvm use stable; export ROOT_PACKAGE=$(go list .); go list -tags "$DOCKER_BUILDTAGS" ./... | grep -v "/vendor/" | grep -v "registry/handlers" | grep -v "registry/storage/driver" | xargs -L 1 -I{} bash -c 'export PACKAGE={}; go test -race -tags "$DOCKER_BUILDTAGS" -test.short $PACKAGE': - timeout: 1000 - pwd: $BASE_STABLE - post: - # Report to codecov - - bash <(curl -s https://codecov.io/bash): - pwd: $BASE_STABLE - - ## Notes - # Do we want these as well? - # - go get code.google.com/p/go.tools/cmd/goimports - # - test -z "$(goimports -l -w ./... | tee /dev/stderr)" - # http://labix.org/gocheck diff --git a/vendor/github.com/docker/distribution/coverpkg.sh b/vendor/github.com/docker/distribution/coverpkg.sh deleted file mode 100755 index 25d419ae8..000000000 --- a/vendor/github.com/docker/distribution/coverpkg.sh +++ /dev/null @@ -1,7 +0,0 @@ -#!/usr/bin/env bash -# Given a subpackage and the containing package, figures out which packages -# need to be passed to `go test -coverpkg`: this includes all of the -# subpackage's dependencies within the containing package, as well as the -# subpackage itself. -DEPENDENCIES="$(go list -f $'{{range $f := .Deps}}{{$f}}\n{{end}}' ${1} | grep ${2} | grep -v github.com/docker/distribution/vendor)" -echo "${1} ${DEPENDENCIES}" | xargs echo -n | tr ' ' ',' diff --git a/vendor/github.com/docker/distribution/digestset/set.go b/vendor/github.com/docker/distribution/digestset/set.go deleted file mode 100644 index 71327dca7..000000000 --- a/vendor/github.com/docker/distribution/digestset/set.go +++ /dev/null @@ -1,247 +0,0 @@ -package digestset - -import ( - "errors" - "sort" - "strings" - "sync" - - digest "github.com/opencontainers/go-digest" -) - -var ( - // ErrDigestNotFound is used when a matching digest - // could not be found in a set. - ErrDigestNotFound = errors.New("digest not found") - - // ErrDigestAmbiguous is used when multiple digests - // are found in a set. None of the matching digests - // should be considered valid matches. - ErrDigestAmbiguous = errors.New("ambiguous digest string") -) - -// Set is used to hold a unique set of digests which -// may be easily referenced by easily referenced by a string -// representation of the digest as well as short representation. -// The uniqueness of the short representation is based on other -// digests in the set. If digests are omitted from this set, -// collisions in a larger set may not be detected, therefore it -// is important to always do short representation lookups on -// the complete set of digests. To mitigate collisions, an -// appropriately long short code should be used. -type Set struct { - mutex sync.RWMutex - entries digestEntries -} - -// NewSet creates an empty set of digests -// which may have digests added. -func NewSet() *Set { - return &Set{ - entries: digestEntries{}, - } -} - -// checkShortMatch checks whether two digests match as either whole -// values or short values. This function does not test equality, -// rather whether the second value could match against the first -// value. -func checkShortMatch(alg digest.Algorithm, hex, shortAlg, shortHex string) bool { - if len(hex) == len(shortHex) { - if hex != shortHex { - return false - } - if len(shortAlg) > 0 && string(alg) != shortAlg { - return false - } - } else if !strings.HasPrefix(hex, shortHex) { - return false - } else if len(shortAlg) > 0 && string(alg) != shortAlg { - return false - } - return true -} - -// Lookup looks for a digest matching the given string representation. -// If no digests could be found ErrDigestNotFound will be returned -// with an empty digest value. If multiple matches are found -// ErrDigestAmbiguous will be returned with an empty digest value. -func (dst *Set) Lookup(d string) (digest.Digest, error) { - dst.mutex.RLock() - defer dst.mutex.RUnlock() - if len(dst.entries) == 0 { - return "", ErrDigestNotFound - } - var ( - searchFunc func(int) bool - alg digest.Algorithm - hex string - ) - dgst, err := digest.Parse(d) - if err == digest.ErrDigestInvalidFormat { - hex = d - searchFunc = func(i int) bool { - return dst.entries[i].val >= d - } - } else { - hex = dgst.Hex() - alg = dgst.Algorithm() - searchFunc = func(i int) bool { - if dst.entries[i].val == hex { - return dst.entries[i].alg >= alg - } - return dst.entries[i].val >= hex - } - } - idx := sort.Search(len(dst.entries), searchFunc) - if idx == len(dst.entries) || !checkShortMatch(dst.entries[idx].alg, dst.entries[idx].val, string(alg), hex) { - return "", ErrDigestNotFound - } - if dst.entries[idx].alg == alg && dst.entries[idx].val == hex { - return dst.entries[idx].digest, nil - } - if idx+1 < len(dst.entries) && checkShortMatch(dst.entries[idx+1].alg, dst.entries[idx+1].val, string(alg), hex) { - return "", ErrDigestAmbiguous - } - - return dst.entries[idx].digest, nil -} - -// Add adds the given digest to the set. An error will be returned -// if the given digest is invalid. If the digest already exists in the -// set, this operation will be a no-op. -func (dst *Set) Add(d digest.Digest) error { - if err := d.Validate(); err != nil { - return err - } - dst.mutex.Lock() - defer dst.mutex.Unlock() - entry := &digestEntry{alg: d.Algorithm(), val: d.Hex(), digest: d} - searchFunc := func(i int) bool { - if dst.entries[i].val == entry.val { - return dst.entries[i].alg >= entry.alg - } - return dst.entries[i].val >= entry.val - } - idx := sort.Search(len(dst.entries), searchFunc) - if idx == len(dst.entries) { - dst.entries = append(dst.entries, entry) - return nil - } else if dst.entries[idx].digest == d { - return nil - } - - entries := append(dst.entries, nil) - copy(entries[idx+1:], entries[idx:len(entries)-1]) - entries[idx] = entry - dst.entries = entries - return nil -} - -// Remove removes the given digest from the set. An err will be -// returned if the given digest is invalid. If the digest does -// not exist in the set, this operation will be a no-op. -func (dst *Set) Remove(d digest.Digest) error { - if err := d.Validate(); err != nil { - return err - } - dst.mutex.Lock() - defer dst.mutex.Unlock() - entry := &digestEntry{alg: d.Algorithm(), val: d.Hex(), digest: d} - searchFunc := func(i int) bool { - if dst.entries[i].val == entry.val { - return dst.entries[i].alg >= entry.alg - } - return dst.entries[i].val >= entry.val - } - idx := sort.Search(len(dst.entries), searchFunc) - // Not found if idx is after or value at idx is not digest - if idx == len(dst.entries) || dst.entries[idx].digest != d { - return nil - } - - entries := dst.entries - copy(entries[idx:], entries[idx+1:]) - entries = entries[:len(entries)-1] - dst.entries = entries - - return nil -} - -// All returns all the digests in the set -func (dst *Set) All() []digest.Digest { - dst.mutex.RLock() - defer dst.mutex.RUnlock() - retValues := make([]digest.Digest, len(dst.entries)) - for i := range dst.entries { - retValues[i] = dst.entries[i].digest - } - - return retValues -} - -// ShortCodeTable returns a map of Digest to unique short codes. The -// length represents the minimum value, the maximum length may be the -// entire value of digest if uniqueness cannot be achieved without the -// full value. This function will attempt to make short codes as short -// as possible to be unique. -func ShortCodeTable(dst *Set, length int) map[digest.Digest]string { - dst.mutex.RLock() - defer dst.mutex.RUnlock() - m := make(map[digest.Digest]string, len(dst.entries)) - l := length - resetIdx := 0 - for i := 0; i < len(dst.entries); i++ { - var short string - extended := true - for extended { - extended = false - if len(dst.entries[i].val) <= l { - short = dst.entries[i].digest.String() - } else { - short = dst.entries[i].val[:l] - for j := i + 1; j < len(dst.entries); j++ { - if checkShortMatch(dst.entries[j].alg, dst.entries[j].val, "", short) { - if j > resetIdx { - resetIdx = j - } - extended = true - } else { - break - } - } - if extended { - l++ - } - } - } - m[dst.entries[i].digest] = short - if i >= resetIdx { - l = length - } - } - return m -} - -type digestEntry struct { - alg digest.Algorithm - val string - digest digest.Digest -} - -type digestEntries []*digestEntry - -func (d digestEntries) Len() int { - return len(d) -} - -func (d digestEntries) Less(i, j int) bool { - if d[i].val != d[j].val { - return d[i].val < d[j].val - } - return d[i].alg < d[j].alg -} - -func (d digestEntries) Swap(i, j int) { - d[i], d[j] = d[j], d[i] -} diff --git a/vendor/github.com/docker/distribution/doc.go b/vendor/github.com/docker/distribution/doc.go deleted file mode 100644 index bdd8cb708..000000000 --- a/vendor/github.com/docker/distribution/doc.go +++ /dev/null @@ -1,7 +0,0 @@ -// Package distribution will define the interfaces for the components of -// docker distribution. The goal is to allow users to reliably package, ship -// and store content related to docker images. -// -// This is currently a work in progress. More details are available in the -// README.md. -package distribution diff --git a/vendor/github.com/docker/distribution/errors.go b/vendor/github.com/docker/distribution/errors.go deleted file mode 100644 index 020d33258..000000000 --- a/vendor/github.com/docker/distribution/errors.go +++ /dev/null @@ -1,115 +0,0 @@ -package distribution - -import ( - "errors" - "fmt" - "strings" - - "github.com/opencontainers/go-digest" -) - -// ErrAccessDenied is returned when an access to a requested resource is -// denied. -var ErrAccessDenied = errors.New("access denied") - -// ErrManifestNotModified is returned when a conditional manifest GetByTag -// returns nil due to the client indicating it has the latest version -var ErrManifestNotModified = errors.New("manifest not modified") - -// ErrUnsupported is returned when an unimplemented or unsupported action is -// performed -var ErrUnsupported = errors.New("operation unsupported") - -// ErrTagUnknown is returned if the given tag is not known by the tag service -type ErrTagUnknown struct { - Tag string -} - -func (err ErrTagUnknown) Error() string { - return fmt.Sprintf("unknown tag=%s", err.Tag) -} - -// ErrRepositoryUnknown is returned if the named repository is not known by -// the registry. -type ErrRepositoryUnknown struct { - Name string -} - -func (err ErrRepositoryUnknown) Error() string { - return fmt.Sprintf("unknown repository name=%s", err.Name) -} - -// ErrRepositoryNameInvalid should be used to denote an invalid repository -// name. Reason may set, indicating the cause of invalidity. -type ErrRepositoryNameInvalid struct { - Name string - Reason error -} - -func (err ErrRepositoryNameInvalid) Error() string { - return fmt.Sprintf("repository name %q invalid: %v", err.Name, err.Reason) -} - -// ErrManifestUnknown is returned if the manifest is not known by the -// registry. -type ErrManifestUnknown struct { - Name string - Tag string -} - -func (err ErrManifestUnknown) Error() string { - return fmt.Sprintf("unknown manifest name=%s tag=%s", err.Name, err.Tag) -} - -// ErrManifestUnknownRevision is returned when a manifest cannot be found by -// revision within a repository. -type ErrManifestUnknownRevision struct { - Name string - Revision digest.Digest -} - -func (err ErrManifestUnknownRevision) Error() string { - return fmt.Sprintf("unknown manifest name=%s revision=%s", err.Name, err.Revision) -} - -// ErrManifestUnverified is returned when the registry is unable to verify -// the manifest. -type ErrManifestUnverified struct{} - -func (ErrManifestUnverified) Error() string { - return "unverified manifest" -} - -// ErrManifestVerification provides a type to collect errors encountered -// during manifest verification. Currently, it accepts errors of all types, -// but it may be narrowed to those involving manifest verification. -type ErrManifestVerification []error - -func (errs ErrManifestVerification) Error() string { - var parts []string - for _, err := range errs { - parts = append(parts, err.Error()) - } - - return fmt.Sprintf("errors verifying manifest: %v", strings.Join(parts, ",")) -} - -// ErrManifestBlobUnknown returned when a referenced blob cannot be found. -type ErrManifestBlobUnknown struct { - Digest digest.Digest -} - -func (err ErrManifestBlobUnknown) Error() string { - return fmt.Sprintf("unknown blob %v on manifest", err.Digest) -} - -// ErrManifestNameInvalid should be used to denote an invalid manifest -// name. Reason may set, indicating the cause of invalidity. -type ErrManifestNameInvalid struct { - Name string - Reason error -} - -func (err ErrManifestNameInvalid) Error() string { - return fmt.Sprintf("manifest name %q invalid: %v", err.Name, err.Reason) -} diff --git a/vendor/github.com/docker/distribution/manifests.go b/vendor/github.com/docker/distribution/manifests.go deleted file mode 100644 index 2c99f25d3..000000000 --- a/vendor/github.com/docker/distribution/manifests.go +++ /dev/null @@ -1,125 +0,0 @@ -package distribution - -import ( - "fmt" - "mime" - - "github.com/docker/distribution/context" - "github.com/opencontainers/go-digest" -) - -// Manifest represents a registry object specifying a set of -// references and an optional target -type Manifest interface { - // References returns a list of objects which make up this manifest. - // A reference is anything which can be represented by a - // distribution.Descriptor. These can consist of layers, resources or other - // manifests. - // - // While no particular order is required, implementations should return - // them from highest to lowest priority. For example, one might want to - // return the base layer before the top layer. - References() []Descriptor - - // Payload provides the serialized format of the manifest, in addition to - // the media type. - Payload() (mediaType string, payload []byte, err error) -} - -// ManifestBuilder creates a manifest allowing one to include dependencies. -// Instances can be obtained from a version-specific manifest package. Manifest -// specific data is passed into the function which creates the builder. -type ManifestBuilder interface { - // Build creates the manifest from his builder. - Build(ctx context.Context) (Manifest, error) - - // References returns a list of objects which have been added to this - // builder. The dependencies are returned in the order they were added, - // which should be from base to head. - References() []Descriptor - - // AppendReference includes the given object in the manifest after any - // existing dependencies. If the add fails, such as when adding an - // unsupported dependency, an error may be returned. - // - // The destination of the reference is dependent on the manifest type and - // the dependency type. - AppendReference(dependency Describable) error -} - -// ManifestService describes operations on image manifests. -type ManifestService interface { - // Exists returns true if the manifest exists. - Exists(ctx context.Context, dgst digest.Digest) (bool, error) - - // Get retrieves the manifest specified by the given digest - Get(ctx context.Context, dgst digest.Digest, options ...ManifestServiceOption) (Manifest, error) - - // Put creates or updates the given manifest returning the manifest digest - Put(ctx context.Context, manifest Manifest, options ...ManifestServiceOption) (digest.Digest, error) - - // Delete removes the manifest specified by the given digest. Deleting - // a manifest that doesn't exist will return ErrManifestNotFound - Delete(ctx context.Context, dgst digest.Digest) error -} - -// ManifestEnumerator enables iterating over manifests -type ManifestEnumerator interface { - // Enumerate calls ingester for each manifest. - Enumerate(ctx context.Context, ingester func(digest.Digest) error) error -} - -// Describable is an interface for descriptors -type Describable interface { - Descriptor() Descriptor -} - -// ManifestMediaTypes returns the supported media types for manifests. -func ManifestMediaTypes() (mediaTypes []string) { - for t := range mappings { - if t != "" { - mediaTypes = append(mediaTypes, t) - } - } - return -} - -// UnmarshalFunc implements manifest unmarshalling a given MediaType -type UnmarshalFunc func([]byte) (Manifest, Descriptor, error) - -var mappings = make(map[string]UnmarshalFunc, 0) - -// UnmarshalManifest looks up manifest unmarshal functions based on -// MediaType -func UnmarshalManifest(ctHeader string, p []byte) (Manifest, Descriptor, error) { - // Need to look up by the actual media type, not the raw contents of - // the header. Strip semicolons and anything following them. - var mediaType string - if ctHeader != "" { - var err error - mediaType, _, err = mime.ParseMediaType(ctHeader) - if err != nil { - return nil, Descriptor{}, err - } - } - - unmarshalFunc, ok := mappings[mediaType] - if !ok { - unmarshalFunc, ok = mappings[""] - if !ok { - return nil, Descriptor{}, fmt.Errorf("unsupported manifest media type and no default available: %s", mediaType) - } - } - - return unmarshalFunc(p) -} - -// RegisterManifestSchema registers an UnmarshalFunc for a given schema type. This -// should be called from specific -func RegisterManifestSchema(mediaType string, u UnmarshalFunc) error { - if _, ok := mappings[mediaType]; ok { - return fmt.Errorf("manifest media type registration would overwrite existing: %s", mediaType) - } - mappings[mediaType] = u - return nil -} diff --git a/vendor/github.com/docker/distribution/reference/helpers.go b/vendor/github.com/docker/distribution/reference/helpers.go deleted file mode 100644 index 978df7eab..000000000 --- a/vendor/github.com/docker/distribution/reference/helpers.go +++ /dev/null @@ -1,42 +0,0 @@ -package reference - -import "path" - -// IsNameOnly returns true if reference only contains a repo name. -func IsNameOnly(ref Named) bool { - if _, ok := ref.(NamedTagged); ok { - return false - } - if _, ok := ref.(Canonical); ok { - return false - } - return true -} - -// FamiliarName returns the familiar name string -// for the given named, familiarizing if needed. -func FamiliarName(ref Named) string { - if nn, ok := ref.(normalizedNamed); ok { - return nn.Familiar().Name() - } - return ref.Name() -} - -// FamiliarString returns the familiar string representation -// for the given reference, familiarizing if needed. -func FamiliarString(ref Reference) string { - if nn, ok := ref.(normalizedNamed); ok { - return nn.Familiar().String() - } - return ref.String() -} - -// FamiliarMatch reports whether ref matches the specified pattern. -// See https://godoc.org/path#Match for supported patterns. -func FamiliarMatch(pattern string, ref Reference) (bool, error) { - matched, err := path.Match(pattern, FamiliarString(ref)) - if namedRef, isNamed := ref.(Named); isNamed && !matched { - matched, _ = path.Match(pattern, FamiliarName(namedRef)) - } - return matched, err -} diff --git a/vendor/github.com/docker/distribution/reference/normalize.go b/vendor/github.com/docker/distribution/reference/normalize.go deleted file mode 100644 index 2d71fc5e9..000000000 --- a/vendor/github.com/docker/distribution/reference/normalize.go +++ /dev/null @@ -1,170 +0,0 @@ -package reference - -import ( - "errors" - "fmt" - "strings" - - "github.com/docker/distribution/digestset" - "github.com/opencontainers/go-digest" -) - -var ( - legacyDefaultDomain = "index.docker.io" - defaultDomain = "docker.io" - officialRepoName = "library" - defaultTag = "latest" -) - -// normalizedNamed represents a name which has been -// normalized and has a familiar form. A familiar name -// is what is used in Docker UI. An example normalized -// name is "docker.io/library/ubuntu" and corresponding -// familiar name of "ubuntu". -type normalizedNamed interface { - Named - Familiar() Named -} - -// ParseNormalizedNamed parses a string into a named reference -// transforming a familiar name from Docker UI to a fully -// qualified reference. If the value may be an identifier -// use ParseAnyReference. -func ParseNormalizedNamed(s string) (Named, error) { - if ok := anchoredIdentifierRegexp.MatchString(s); ok { - return nil, fmt.Errorf("invalid repository name (%s), cannot specify 64-byte hexadecimal strings", s) - } - domain, remainder := splitDockerDomain(s) - var remoteName string - if tagSep := strings.IndexRune(remainder, ':'); tagSep > -1 { - remoteName = remainder[:tagSep] - } else { - remoteName = remainder - } - if strings.ToLower(remoteName) != remoteName { - return nil, errors.New("invalid reference format: repository name must be lowercase") - } - - ref, err := Parse(domain + "/" + remainder) - if err != nil { - return nil, err - } - named, isNamed := ref.(Named) - if !isNamed { - return nil, fmt.Errorf("reference %s has no name", ref.String()) - } - return named, nil -} - -// splitDockerDomain splits a repository name to domain and remotename string. -// If no valid domain is found, the default domain is used. Repository name -// needs to be already validated before. -func splitDockerDomain(name string) (domain, remainder string) { - i := strings.IndexRune(name, '/') - if i == -1 || (!strings.ContainsAny(name[:i], ".:") && name[:i] != "localhost") { - domain, remainder = defaultDomain, name - } else { - domain, remainder = name[:i], name[i+1:] - } - if domain == legacyDefaultDomain { - domain = defaultDomain - } - if domain == defaultDomain && !strings.ContainsRune(remainder, '/') { - remainder = officialRepoName + "/" + remainder - } - return -} - -// familiarizeName returns a shortened version of the name familiar -// to to the Docker UI. Familiar names have the default domain -// "docker.io" and "library/" repository prefix removed. -// For example, "docker.io/library/redis" will have the familiar -// name "redis" and "docker.io/dmcgowan/myapp" will be "dmcgowan/myapp". -// Returns a familiarized named only reference. -func familiarizeName(named namedRepository) repository { - repo := repository{ - domain: named.Domain(), - path: named.Path(), - } - - if repo.domain == defaultDomain { - repo.domain = "" - // Handle official repositories which have the pattern "library/" - if split := strings.Split(repo.path, "/"); len(split) == 2 && split[0] == officialRepoName { - repo.path = split[1] - } - } - return repo -} - -func (r reference) Familiar() Named { - return reference{ - namedRepository: familiarizeName(r.namedRepository), - tag: r.tag, - digest: r.digest, - } -} - -func (r repository) Familiar() Named { - return familiarizeName(r) -} - -func (t taggedReference) Familiar() Named { - return taggedReference{ - namedRepository: familiarizeName(t.namedRepository), - tag: t.tag, - } -} - -func (c canonicalReference) Familiar() Named { - return canonicalReference{ - namedRepository: familiarizeName(c.namedRepository), - digest: c.digest, - } -} - -// TagNameOnly adds the default tag "latest" to a reference if it only has -// a repo name. -func TagNameOnly(ref Named) Named { - if IsNameOnly(ref) { - namedTagged, err := WithTag(ref, defaultTag) - if err != nil { - // Default tag must be valid, to create a NamedTagged - // type with non-validated input the WithTag function - // should be used instead - panic(err) - } - return namedTagged - } - return ref -} - -// ParseAnyReference parses a reference string as a possible identifier, -// full digest, or familiar name. -func ParseAnyReference(ref string) (Reference, error) { - if ok := anchoredIdentifierRegexp.MatchString(ref); ok { - return digestReference("sha256:" + ref), nil - } - if dgst, err := digest.Parse(ref); err == nil { - return digestReference(dgst), nil - } - - return ParseNormalizedNamed(ref) -} - -// ParseAnyReferenceWithSet parses a reference string as a possible short -// identifier to be matched in a digest set, a full digest, or familiar name. -func ParseAnyReferenceWithSet(ref string, ds *digestset.Set) (Reference, error) { - if ok := anchoredShortIdentifierRegexp.MatchString(ref); ok { - dgst, err := ds.Lookup(ref) - if err == nil { - return digestReference(dgst), nil - } - } else { - if dgst, err := digest.Parse(ref); err == nil { - return digestReference(dgst), nil - } - } - - return ParseNormalizedNamed(ref) -} diff --git a/vendor/github.com/docker/distribution/reference/reference.go b/vendor/github.com/docker/distribution/reference/reference.go deleted file mode 100644 index 2f66cca87..000000000 --- a/vendor/github.com/docker/distribution/reference/reference.go +++ /dev/null @@ -1,433 +0,0 @@ -// Package reference provides a general type to represent any way of referencing images within the registry. -// Its main purpose is to abstract tags and digests (content-addressable hash). -// -// Grammar -// -// reference := name [ ":" tag ] [ "@" digest ] -// name := [domain '/'] path-component ['/' path-component]* -// domain := domain-component ['.' domain-component]* [':' port-number] -// domain-component := /([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9])/ -// port-number := /[0-9]+/ -// path-component := alpha-numeric [separator alpha-numeric]* -// alpha-numeric := /[a-z0-9]+/ -// separator := /[_.]|__|[-]*/ -// -// tag := /[\w][\w.-]{0,127}/ -// -// digest := digest-algorithm ":" digest-hex -// digest-algorithm := digest-algorithm-component [ digest-algorithm-separator digest-algorithm-component ]* -// digest-algorithm-separator := /[+.-_]/ -// digest-algorithm-component := /[A-Za-z][A-Za-z0-9]*/ -// digest-hex := /[0-9a-fA-F]{32,}/ ; At least 128 bit digest value -// -// identifier := /[a-f0-9]{64}/ -// short-identifier := /[a-f0-9]{6,64}/ -package reference - -import ( - "errors" - "fmt" - "strings" - - "github.com/opencontainers/go-digest" -) - -const ( - // NameTotalLengthMax is the maximum total number of characters in a repository name. - NameTotalLengthMax = 255 -) - -var ( - // ErrReferenceInvalidFormat represents an error while trying to parse a string as a reference. - ErrReferenceInvalidFormat = errors.New("invalid reference format") - - // ErrTagInvalidFormat represents an error while trying to parse a string as a tag. - ErrTagInvalidFormat = errors.New("invalid tag format") - - // ErrDigestInvalidFormat represents an error while trying to parse a string as a tag. - ErrDigestInvalidFormat = errors.New("invalid digest format") - - // ErrNameContainsUppercase is returned for invalid repository names that contain uppercase characters. - ErrNameContainsUppercase = errors.New("repository name must be lowercase") - - // ErrNameEmpty is returned for empty, invalid repository names. - ErrNameEmpty = errors.New("repository name must have at least one component") - - // ErrNameTooLong is returned when a repository name is longer than NameTotalLengthMax. - ErrNameTooLong = fmt.Errorf("repository name must not be more than %v characters", NameTotalLengthMax) - - // ErrNameNotCanonical is returned when a name is not canonical. - ErrNameNotCanonical = errors.New("repository name must be canonical") -) - -// Reference is an opaque object reference identifier that may include -// modifiers such as a hostname, name, tag, and digest. -type Reference interface { - // String returns the full reference - String() string -} - -// Field provides a wrapper type for resolving correct reference types when -// working with encoding. -type Field struct { - reference Reference -} - -// AsField wraps a reference in a Field for encoding. -func AsField(reference Reference) Field { - return Field{reference} -} - -// Reference unwraps the reference type from the field to -// return the Reference object. This object should be -// of the appropriate type to further check for different -// reference types. -func (f Field) Reference() Reference { - return f.reference -} - -// MarshalText serializes the field to byte text which -// is the string of the reference. -func (f Field) MarshalText() (p []byte, err error) { - return []byte(f.reference.String()), nil -} - -// UnmarshalText parses text bytes by invoking the -// reference parser to ensure the appropriately -// typed reference object is wrapped by field. -func (f *Field) UnmarshalText(p []byte) error { - r, err := Parse(string(p)) - if err != nil { - return err - } - - f.reference = r - return nil -} - -// Named is an object with a full name -type Named interface { - Reference - Name() string -} - -// Tagged is an object which has a tag -type Tagged interface { - Reference - Tag() string -} - -// NamedTagged is an object including a name and tag. -type NamedTagged interface { - Named - Tag() string -} - -// Digested is an object which has a digest -// in which it can be referenced by -type Digested interface { - Reference - Digest() digest.Digest -} - -// Canonical reference is an object with a fully unique -// name including a name with domain and digest -type Canonical interface { - Named - Digest() digest.Digest -} - -// namedRepository is a reference to a repository with a name. -// A namedRepository has both domain and path components. -type namedRepository interface { - Named - Domain() string - Path() string -} - -// Domain returns the domain part of the Named reference -func Domain(named Named) string { - if r, ok := named.(namedRepository); ok { - return r.Domain() - } - domain, _ := splitDomain(named.Name()) - return domain -} - -// Path returns the name without the domain part of the Named reference -func Path(named Named) (name string) { - if r, ok := named.(namedRepository); ok { - return r.Path() - } - _, path := splitDomain(named.Name()) - return path -} - -func splitDomain(name string) (string, string) { - match := anchoredNameRegexp.FindStringSubmatch(name) - if len(match) != 3 { - return "", name - } - return match[1], match[2] -} - -// SplitHostname splits a named reference into a -// hostname and name string. If no valid hostname is -// found, the hostname is empty and the full value -// is returned as name -// DEPRECATED: Use Domain or Path -func SplitHostname(named Named) (string, string) { - if r, ok := named.(namedRepository); ok { - return r.Domain(), r.Path() - } - return splitDomain(named.Name()) -} - -// Parse parses s and returns a syntactically valid Reference. -// If an error was encountered it is returned, along with a nil Reference. -// NOTE: Parse will not handle short digests. -func Parse(s string) (Reference, error) { - matches := ReferenceRegexp.FindStringSubmatch(s) - if matches == nil { - if s == "" { - return nil, ErrNameEmpty - } - if ReferenceRegexp.FindStringSubmatch(strings.ToLower(s)) != nil { - return nil, ErrNameContainsUppercase - } - return nil, ErrReferenceInvalidFormat - } - - if len(matches[1]) > NameTotalLengthMax { - return nil, ErrNameTooLong - } - - var repo repository - - nameMatch := anchoredNameRegexp.FindStringSubmatch(matches[1]) - if nameMatch != nil && len(nameMatch) == 3 { - repo.domain = nameMatch[1] - repo.path = nameMatch[2] - } else { - repo.domain = "" - repo.path = matches[1] - } - - ref := reference{ - namedRepository: repo, - tag: matches[2], - } - if matches[3] != "" { - var err error - ref.digest, err = digest.Parse(matches[3]) - if err != nil { - return nil, err - } - } - - r := getBestReferenceType(ref) - if r == nil { - return nil, ErrNameEmpty - } - - return r, nil -} - -// ParseNamed parses s and returns a syntactically valid reference implementing -// the Named interface. The reference must have a name and be in the canonical -// form, otherwise an error is returned. -// If an error was encountered it is returned, along with a nil Reference. -// NOTE: ParseNamed will not handle short digests. -func ParseNamed(s string) (Named, error) { - named, err := ParseNormalizedNamed(s) - if err != nil { - return nil, err - } - if named.String() != s { - return nil, ErrNameNotCanonical - } - return named, nil -} - -// WithName returns a named object representing the given string. If the input -// is invalid ErrReferenceInvalidFormat will be returned. -func WithName(name string) (Named, error) { - if len(name) > NameTotalLengthMax { - return nil, ErrNameTooLong - } - - match := anchoredNameRegexp.FindStringSubmatch(name) - if match == nil || len(match) != 3 { - return nil, ErrReferenceInvalidFormat - } - return repository{ - domain: match[1], - path: match[2], - }, nil -} - -// WithTag combines the name from "name" and the tag from "tag" to form a -// reference incorporating both the name and the tag. -func WithTag(name Named, tag string) (NamedTagged, error) { - if !anchoredTagRegexp.MatchString(tag) { - return nil, ErrTagInvalidFormat - } - var repo repository - if r, ok := name.(namedRepository); ok { - repo.domain = r.Domain() - repo.path = r.Path() - } else { - repo.path = name.Name() - } - if canonical, ok := name.(Canonical); ok { - return reference{ - namedRepository: repo, - tag: tag, - digest: canonical.Digest(), - }, nil - } - return taggedReference{ - namedRepository: repo, - tag: tag, - }, nil -} - -// WithDigest combines the name from "name" and the digest from "digest" to form -// a reference incorporating both the name and the digest. -func WithDigest(name Named, digest digest.Digest) (Canonical, error) { - if !anchoredDigestRegexp.MatchString(digest.String()) { - return nil, ErrDigestInvalidFormat - } - var repo repository - if r, ok := name.(namedRepository); ok { - repo.domain = r.Domain() - repo.path = r.Path() - } else { - repo.path = name.Name() - } - if tagged, ok := name.(Tagged); ok { - return reference{ - namedRepository: repo, - tag: tagged.Tag(), - digest: digest, - }, nil - } - return canonicalReference{ - namedRepository: repo, - digest: digest, - }, nil -} - -// TrimNamed removes any tag or digest from the named reference. -func TrimNamed(ref Named) Named { - domain, path := SplitHostname(ref) - return repository{ - domain: domain, - path: path, - } -} - -func getBestReferenceType(ref reference) Reference { - if ref.Name() == "" { - // Allow digest only references - if ref.digest != "" { - return digestReference(ref.digest) - } - return nil - } - if ref.tag == "" { - if ref.digest != "" { - return canonicalReference{ - namedRepository: ref.namedRepository, - digest: ref.digest, - } - } - return ref.namedRepository - } - if ref.digest == "" { - return taggedReference{ - namedRepository: ref.namedRepository, - tag: ref.tag, - } - } - - return ref -} - -type reference struct { - namedRepository - tag string - digest digest.Digest -} - -func (r reference) String() string { - return r.Name() + ":" + r.tag + "@" + r.digest.String() -} - -func (r reference) Tag() string { - return r.tag -} - -func (r reference) Digest() digest.Digest { - return r.digest -} - -type repository struct { - domain string - path string -} - -func (r repository) String() string { - return r.Name() -} - -func (r repository) Name() string { - if r.domain == "" { - return r.path - } - return r.domain + "/" + r.path -} - -func (r repository) Domain() string { - return r.domain -} - -func (r repository) Path() string { - return r.path -} - -type digestReference digest.Digest - -func (d digestReference) String() string { - return digest.Digest(d).String() -} - -func (d digestReference) Digest() digest.Digest { - return digest.Digest(d) -} - -type taggedReference struct { - namedRepository - tag string -} - -func (t taggedReference) String() string { - return t.Name() + ":" + t.tag -} - -func (t taggedReference) Tag() string { - return t.tag -} - -type canonicalReference struct { - namedRepository - digest digest.Digest -} - -func (c canonicalReference) String() string { - return c.Name() + "@" + c.digest.String() -} - -func (c canonicalReference) Digest() digest.Digest { - return c.digest -} diff --git a/vendor/github.com/docker/distribution/reference/regexp.go b/vendor/github.com/docker/distribution/reference/regexp.go deleted file mode 100644 index 786034932..000000000 --- a/vendor/github.com/docker/distribution/reference/regexp.go +++ /dev/null @@ -1,143 +0,0 @@ -package reference - -import "regexp" - -var ( - // alphaNumericRegexp defines the alpha numeric atom, typically a - // component of names. This only allows lower case characters and digits. - alphaNumericRegexp = match(`[a-z0-9]+`) - - // separatorRegexp defines the separators allowed to be embedded in name - // components. This allow one period, one or two underscore and multiple - // dashes. - separatorRegexp = match(`(?:[._]|__|[-]*)`) - - // nameComponentRegexp restricts registry path component names to start - // with at least one letter or number, with following parts able to be - // separated by one period, one or two underscore and multiple dashes. - nameComponentRegexp = expression( - alphaNumericRegexp, - optional(repeated(separatorRegexp, alphaNumericRegexp))) - - // domainComponentRegexp restricts the registry domain component of a - // repository name to start with a component as defined by DomainRegexp - // and followed by an optional port. - domainComponentRegexp = match(`(?:[a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9])`) - - // DomainRegexp defines the structure of potential domain components - // that may be part of image names. This is purposely a subset of what is - // allowed by DNS to ensure backwards compatibility with Docker image - // names. - DomainRegexp = expression( - domainComponentRegexp, - optional(repeated(literal(`.`), domainComponentRegexp)), - optional(literal(`:`), match(`[0-9]+`))) - - // TagRegexp matches valid tag names. From docker/docker:graph/tags.go. - TagRegexp = match(`[\w][\w.-]{0,127}`) - - // anchoredTagRegexp matches valid tag names, anchored at the start and - // end of the matched string. - anchoredTagRegexp = anchored(TagRegexp) - - // DigestRegexp matches valid digests. - DigestRegexp = match(`[A-Za-z][A-Za-z0-9]*(?:[-_+.][A-Za-z][A-Za-z0-9]*)*[:][[:xdigit:]]{32,}`) - - // anchoredDigestRegexp matches valid digests, anchored at the start and - // end of the matched string. - anchoredDigestRegexp = anchored(DigestRegexp) - - // NameRegexp is the format for the name component of references. The - // regexp has capturing groups for the domain and name part omitting - // the separating forward slash from either. - NameRegexp = expression( - optional(DomainRegexp, literal(`/`)), - nameComponentRegexp, - optional(repeated(literal(`/`), nameComponentRegexp))) - - // anchoredNameRegexp is used to parse a name value, capturing the - // domain and trailing components. - anchoredNameRegexp = anchored( - optional(capture(DomainRegexp), literal(`/`)), - capture(nameComponentRegexp, - optional(repeated(literal(`/`), nameComponentRegexp)))) - - // ReferenceRegexp is the full supported format of a reference. The regexp - // is anchored and has capturing groups for name, tag, and digest - // components. - ReferenceRegexp = anchored(capture(NameRegexp), - optional(literal(":"), capture(TagRegexp)), - optional(literal("@"), capture(DigestRegexp))) - - // IdentifierRegexp is the format for string identifier used as a - // content addressable identifier using sha256. These identifiers - // are like digests without the algorithm, since sha256 is used. - IdentifierRegexp = match(`([a-f0-9]{64})`) - - // ShortIdentifierRegexp is the format used to represent a prefix - // of an identifier. A prefix may be used to match a sha256 identifier - // within a list of trusted identifiers. - ShortIdentifierRegexp = match(`([a-f0-9]{6,64})`) - - // anchoredIdentifierRegexp is used to check or match an - // identifier value, anchored at start and end of string. - anchoredIdentifierRegexp = anchored(IdentifierRegexp) - - // anchoredShortIdentifierRegexp is used to check if a value - // is a possible identifier prefix, anchored at start and end - // of string. - anchoredShortIdentifierRegexp = anchored(ShortIdentifierRegexp) -) - -// match compiles the string to a regular expression. -var match = regexp.MustCompile - -// literal compiles s into a literal regular expression, escaping any regexp -// reserved characters. -func literal(s string) *regexp.Regexp { - re := match(regexp.QuoteMeta(s)) - - if _, complete := re.LiteralPrefix(); !complete { - panic("must be a literal") - } - - return re -} - -// expression defines a full expression, where each regular expression must -// follow the previous. -func expression(res ...*regexp.Regexp) *regexp.Regexp { - var s string - for _, re := range res { - s += re.String() - } - - return match(s) -} - -// optional wraps the expression in a non-capturing group and makes the -// production optional. -func optional(res ...*regexp.Regexp) *regexp.Regexp { - return match(group(expression(res...)).String() + `?`) -} - -// repeated wraps the regexp in a non-capturing group to get one or more -// matches. -func repeated(res ...*regexp.Regexp) *regexp.Regexp { - return match(group(expression(res...)).String() + `+`) -} - -// group wraps the regexp in a non-capturing group. -func group(res ...*regexp.Regexp) *regexp.Regexp { - return match(`(?:` + expression(res...).String() + `)`) -} - -// capture wraps the expression in a capturing group. -func capture(res ...*regexp.Regexp) *regexp.Regexp { - return match(`(` + expression(res...).String() + `)`) -} - -// anchored anchors the regular expression by adding start and end delimiters. -func anchored(res ...*regexp.Regexp) *regexp.Regexp { - return match(`^` + expression(res...).String() + `$`) -} diff --git a/vendor/github.com/docker/distribution/registry.go b/vendor/github.com/docker/distribution/registry.go deleted file mode 100644 index 1da1d533f..000000000 --- a/vendor/github.com/docker/distribution/registry.go +++ /dev/null @@ -1,97 +0,0 @@ -package distribution - -import ( - "github.com/docker/distribution/context" - "github.com/docker/distribution/reference" -) - -// Scope defines the set of items that match a namespace. -type Scope interface { - // Contains returns true if the name belongs to the namespace. - Contains(name string) bool -} - -type fullScope struct{} - -func (f fullScope) Contains(string) bool { - return true -} - -// GlobalScope represents the full namespace scope which contains -// all other scopes. -var GlobalScope = Scope(fullScope{}) - -// Namespace represents a collection of repositories, addressable by name. -// Generally, a namespace is backed by a set of one or more services, -// providing facilities such as registry access, trust, and indexing. -type Namespace interface { - // Scope describes the names that can be used with this Namespace. The - // global namespace will have a scope that matches all names. The scope - // effectively provides an identity for the namespace. - Scope() Scope - - // Repository should return a reference to the named repository. The - // registry may or may not have the repository but should always return a - // reference. - Repository(ctx context.Context, name reference.Named) (Repository, error) - - // Repositories fills 'repos' with a lexicographically sorted catalog of repositories - // up to the size of 'repos' and returns the value 'n' for the number of entries - // which were filled. 'last' contains an offset in the catalog, and 'err' will be - // set to io.EOF if there are no more entries to obtain. - Repositories(ctx context.Context, repos []string, last string) (n int, err error) - - // Blobs returns a blob enumerator to access all blobs - Blobs() BlobEnumerator - - // BlobStatter returns a BlobStatter to control - BlobStatter() BlobStatter -} - -// RepositoryEnumerator describes an operation to enumerate repositories -type RepositoryEnumerator interface { - Enumerate(ctx context.Context, ingester func(string) error) error -} - -// ManifestServiceOption is a function argument for Manifest Service methods -type ManifestServiceOption interface { - Apply(ManifestService) error -} - -// WithTag allows a tag to be passed into Put -func WithTag(tag string) ManifestServiceOption { - return WithTagOption{tag} -} - -// WithTagOption holds a tag -type WithTagOption struct{ Tag string } - -// Apply conforms to the ManifestServiceOption interface -func (o WithTagOption) Apply(m ManifestService) error { - // no implementation - return nil -} - -// Repository is a named collection of manifests and layers. -type Repository interface { - // Named returns the name of the repository. - Named() reference.Named - - // Manifests returns a reference to this repository's manifest service. - // with the supplied options applied. - Manifests(ctx context.Context, options ...ManifestServiceOption) (ManifestService, error) - - // Blobs returns a reference to this repository's blob service. - Blobs(ctx context.Context) BlobStore - - // TODO(stevvooe): The above BlobStore return can probably be relaxed to - // be a BlobService for use with clients. This will allow such - // implementations to avoid implementing ServeBlob. - - // Tags returns a reference to this repositories tag service - Tags(ctx context.Context) TagService -} - -// TODO(stevvooe): Must add close methods to all these. May want to change the -// way instances are created to better reflect internal dependency -// relationships. diff --git a/vendor/github.com/docker/distribution/tags.go b/vendor/github.com/docker/distribution/tags.go deleted file mode 100644 index 503056596..000000000 --- a/vendor/github.com/docker/distribution/tags.go +++ /dev/null @@ -1,27 +0,0 @@ -package distribution - -import ( - "github.com/docker/distribution/context" -) - -// TagService provides access to information about tagged objects. -type TagService interface { - // Get retrieves the descriptor identified by the tag. Some - // implementations may differentiate between "trusted" tags and - // "untrusted" tags. If a tag is "untrusted", the mapping will be returned - // as an ErrTagUntrusted error, with the target descriptor. - Get(ctx context.Context, tag string) (Descriptor, error) - - // Tag associates the tag with the provided descriptor, updating the - // current association, if needed. - Tag(ctx context.Context, tag string, desc Descriptor) error - - // Untag removes the given tag association - Untag(ctx context.Context, tag string) error - - // All returns the set of tags managed by this tag service - All(ctx context.Context) ([]string, error) - - // Lookup returns the set of tags referencing the given digest. - Lookup(ctx context.Context, digest Descriptor) ([]string, error) -} diff --git a/vendor/github.com/docker/distribution/vendor.conf b/vendor/github.com/docker/distribution/vendor.conf deleted file mode 100644 index d67edd779..000000000 --- a/vendor/github.com/docker/distribution/vendor.conf +++ /dev/null @@ -1,43 +0,0 @@ -github.com/Azure/azure-sdk-for-go 088007b3b08cc02b27f2eadfdcd870958460ce7e -github.com/Azure/go-autorest ec5f4903f77ed9927ac95b19ab8e44ada64c1356 -github.com/sirupsen/logrus 3d4380f53a34dcdc95f0c1db702615992b38d9a4 -github.com/aws/aws-sdk-go c6fc52983ea2375810aa38ddb5370e9cdf611716 -github.com/bshuster-repo/logrus-logstash-hook d2c0ecc1836d91814e15e23bb5dc309c3ef51f4a -github.com/bugsnag/bugsnag-go b1d153021fcd90ca3f080db36bec96dc690fb274 -github.com/bugsnag/osext 0dd3f918b21bec95ace9dc86c7e70266cfc5c702 -github.com/bugsnag/panicwrap e2c28503fcd0675329da73bf48b33404db873782 -github.com/denverdino/aliyungo afedced274aa9a7fcdd47ac97018f0f8db4e5de2 -github.com/dgrijalva/jwt-go a601269ab70c205d26370c16f7c81e9017c14e04 -github.com/docker/goamz f0a21f5b2e12f83a505ecf79b633bb2035cf6f85 -github.com/docker/libtrust fa567046d9b14f6aa788882a950d69651d230b21 -github.com/garyburd/redigo 535138d7bcd717d6531c701ef5933d98b1866257 -github.com/go-ini/ini 2ba15ac2dc9cdf88c110ec2dc0ced7fa45f5678c -github.com/golang/protobuf 8d92cf5fc15a4382f8964b08e1f42a75c0591aa3 -github.com/gorilla/context 14f550f51af52180c2eefed15e5fd18d63c0a64a -github.com/gorilla/handlers 60c7bfde3e33c201519a200a4507a158cc03a17b -github.com/gorilla/mux 599cba5e7b6137d46ddf58fb1765f5d928e69604 -github.com/inconshreveable/mousetrap 76626ae9c91c4f2a10f34cad8ce83ea42c93bb75 -github.com/jmespath/go-jmespath bd40a432e4c76585ef6b72d3fd96fb9b6dc7b68d -github.com/miekg/dns 271c58e0c14f552178ea321a545ff9af38930f39 -github.com/mitchellh/mapstructure 482a9fd5fa83e8c4e7817413b80f3eb8feec03ef -github.com/ncw/swift b964f2ca856aac39885e258ad25aec08d5f64ee6 -github.com/spf13/cobra 312092086bed4968099259622145a0c9ae280064 -github.com/spf13/pflag 5644820622454e71517561946e3d94b9f9db6842 -github.com/stevvooe/resumable 2aaf90b2ceea5072cb503ef2a620b08ff3119870 -github.com/xenolf/lego a9d8cec0e6563575e5868a005359ac97911b5985 -github.com/yvasiyarov/go-metrics 57bccd1ccd43f94bb17fdd8bf3007059b802f85e -github.com/yvasiyarov/gorelic a9bba5b9ab508a086f9a12b8c51fab68478e2128 -github.com/yvasiyarov/newrelic_platform_go b21fdbd4370f3717f3bbd2bf41c223bc273068e6 -golang.org/x/crypto c10c31b5e94b6f7a0283272dc2bb27163dcea24b -golang.org/x/net 4876518f9e71663000c348837735820161a42df7 -golang.org/x/oauth2 045497edb6234273d67dbc25da3f2ddbc4c4cacf -golang.org/x/time a4bde12657593d5e90d0533a3e4fd95e635124cb -google.golang.org/api 9bf6e6e569ff057f75d9604a46c52928f17d2b54 -google.golang.org/appengine 12d5545dc1cfa6047a286d5e853841b6471f4c19 -google.golang.org/cloud 975617b05ea8a58727e6c1a06b6161ff4185a9f2 -google.golang.org/grpc d3ddb4469d5a1b949fc7a7da7c1d6a0d1b6de994 -gopkg.in/check.v1 64131543e7896d5bcc6bd5a76287eb75ea96c673 -gopkg.in/square/go-jose.v1 40d457b439244b546f023d056628e5184136899b -gopkg.in/yaml.v2 bef53efd0c76e49e6de55ead051f886bea7e9420 -rsc.io/letsencrypt e770c10b0f1a64775ae91d240407ce00d1a5bdeb https://github.com/dmcgowan/letsencrypt.git -github.com/opencontainers/go-digest a6d0ee40d4207ea02364bd3b9e8e77b9159ba1eb diff --git a/vendor/github.com/docker/spdystream/handlers.go b/vendor/github.com/docker/spdystream/handlers.go index b59fa5fdc..d4ee7be81 100644 --- a/vendor/github.com/docker/spdystream/handlers.go +++ b/vendor/github.com/docker/spdystream/handlers.go @@ -30,9 +30,7 @@ func MirrorStreamHandler(stream *Stream) { }() } -// NoopStreamHandler does nothing when stream connects, most -// likely used with RejectAuthHandler which will not allow any -// streams to make it to the stream handler. +// NoopStreamHandler does nothing when stream connects. func NoOpStreamHandler(stream *Stream) { stream.SendReply(http.Header{}, false) } diff --git a/vendor/github.com/evanphx/json-patch/.travis.yml b/vendor/github.com/evanphx/json-patch/.travis.yml new file mode 100644 index 000000000..2092c72c4 --- /dev/null +++ b/vendor/github.com/evanphx/json-patch/.travis.yml @@ -0,0 +1,16 @@ +language: go + +go: + - 1.8 + - 1.7 + +install: + - if ! go get code.google.com/p/go.tools/cmd/cover; then go get golang.org/x/tools/cmd/cover; fi + - go get github.com/jessevdk/go-flags + +script: + - go get + - go test -cover ./... + +notifications: + email: false diff --git a/vendor/github.com/evanphx/json-patch/LICENSE b/vendor/github.com/evanphx/json-patch/LICENSE new file mode 100644 index 000000000..0eb9b72d8 --- /dev/null +++ b/vendor/github.com/evanphx/json-patch/LICENSE @@ -0,0 +1,25 @@ +Copyright (c) 2014, Evan Phoenix +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +* Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. +* Redistributions in binary form must reproduce the above copyright notice + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. +* Neither the name of the Evan Phoenix nor the names of its contributors + may be used to endorse or promote products derived from this software + without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/evanphx/json-patch/README.md b/vendor/github.com/evanphx/json-patch/README.md new file mode 100644 index 000000000..a711d7961 --- /dev/null +++ b/vendor/github.com/evanphx/json-patch/README.md @@ -0,0 +1,292 @@ +# JSON-Patch +`jsonpatch` is a library which provides functionallity for both applying +[RFC6902 JSON patches](http://tools.ietf.org/html/rfc6902) against documents, as +well as for calculating & applying [RFC7396 JSON merge patches](https://tools.ietf.org/html/rfc7396). + +[![GoDoc](https://godoc.org/github.com/evanphx/json-patch?status.svg)](http://godoc.org/github.com/evanphx/json-patch) +[![Build Status](https://travis-ci.org/evanphx/json-patch.svg?branch=master)](https://travis-ci.org/evanphx/json-patch) +[![Report Card](https://goreportcard.com/badge/github.com/evanphx/json-patch)](https://goreportcard.com/report/github.com/evanphx/json-patch) + +# Get It! + +**Latest and greatest**: +```bash +go get -u github.com/evanphx/json-patch +``` + +**Stable Versions**: +* Version 4: `go get -u gopkg.in/evanphx/json-patch.v4` + +(previous versions below `v3` are unavailable) + +# Use It! +* [Create and apply a merge patch](#create-and-apply-a-merge-patch) +* [Create and apply a JSON Patch](#create-and-apply-a-json-patch) +* [Comparing JSON documents](#comparing-json-documents) +* [Combine merge patches](#combine-merge-patches) + + +# Configuration + +There is a single global configuration variable `jsonpatch.SupportNegativeIndices'. This +defaults to `true` and enables the non-standard practice of allowing negative indices +to mean indices starting at the end of an array. This functionality can be disabled +by setting `jsonpatch.SupportNegativeIndices = false`. + +## Create and apply a merge patch +Given both an original JSON document and a modified JSON document, you can create +a [Merge Patch](https://tools.ietf.org/html/rfc7396) document. + +It can describe the changes needed to convert from the original to the +modified JSON document. + +Once you have a merge patch, you can apply it to other JSON documents using the +`jsonpatch.MergePatch(document, patch)` function. + +```go +package main + +import ( + "fmt" + + jsonpatch "github.com/evanphx/json-patch" +) + +func main() { + // Let's create a merge patch from these two documents... + original := []byte(`{"name": "John", "age": 24, "height": 3.21}`) + target := []byte(`{"name": "Jane", "age": 24}`) + + patch, err := jsonpatch.CreateMergePatch(original, target) + if err != nil { + panic(err) + } + + // Now lets apply the patch against a different JSON document... + + alternative := []byte(`{"name": "Tina", "age": 28, "height": 3.75}`) + modifiedAlternative, err := jsonpatch.MergePatch(alternative, patch) + + fmt.Printf("patch document: %s\n", patch) + fmt.Printf("updated alternative doc: %s\n", modifiedAlternative) +} +``` + +When ran, you get the following output: + +```bash +$ go run main.go +patch document: {"height":null,"name":"Jane"} +updated tina doc: {"age":28,"name":"Jane"} +``` + +## Create and apply a JSON Patch +You can create patch objects using `DecodePatch([]byte)`, which can then +be applied against JSON documents. + +The following is an example of creating a patch from two operations, and +applying it against a JSON document. + +```go +package main + +import ( + "fmt" + + jsonpatch "github.com/evanphx/json-patch" +) + +func main() { + original := []byte(`{"name": "John", "age": 24, "height": 3.21}`) + patchJSON := []byte(`[ + {"op": "replace", "path": "/name", "value": "Jane"}, + {"op": "remove", "path": "/height"} + ]`) + + patch, err := jsonpatch.DecodePatch(patchJSON) + if err != nil { + panic(err) + } + + modified, err := patch.Apply(original) + if err != nil { + panic(err) + } + + fmt.Printf("Original document: %s\n", original) + fmt.Printf("Modified document: %s\n", modified) +} +``` + +When ran, you get the following output: + +```bash +$ go run main.go +Original document: {"name": "John", "age": 24, "height": 3.21} +Modified document: {"age":24,"name":"Jane"} +``` + +## Comparing JSON documents +Due to potential whitespace and ordering differences, one cannot simply compare +JSON strings or byte-arrays directly. + +As such, you can instead use `jsonpatch.Equal(document1, document2)` to +determine if two JSON documents are _structurally_ equal. This ignores +whitespace differences, and key-value ordering. + +```go +package main + +import ( + "fmt" + + jsonpatch "github.com/evanphx/json-patch" +) + +func main() { + original := []byte(`{"name": "John", "age": 24, "height": 3.21}`) + similar := []byte(` + { + "age": 24, + "height": 3.21, + "name": "John" + } + `) + different := []byte(`{"name": "Jane", "age": 20, "height": 3.37}`) + + if jsonpatch.Equal(original, similar) { + fmt.Println(`"original" is structurally equal to "similar"`) + } + + if !jsonpatch.Equal(original, different) { + fmt.Println(`"original" is _not_ structurally equal to "similar"`) + } +} +``` + +When ran, you get the following output: +```bash +$ go run main.go +"original" is structurally equal to "similar" +"original" is _not_ structurally equal to "similar" +``` + +## Combine merge patches +Given two JSON merge patch documents, it is possible to combine them into a +single merge patch which can describe both set of changes. + +The resulting merge patch can be used such that applying it results in a +document structurally similar as merging each merge patch to the document +in succession. + +```go +package main + +import ( + "fmt" + + jsonpatch "github.com/evanphx/json-patch" +) + +func main() { + original := []byte(`{"name": "John", "age": 24, "height": 3.21}`) + + nameAndHeight := []byte(`{"height":null,"name":"Jane"}`) + ageAndEyes := []byte(`{"age":4.23,"eyes":"blue"}`) + + // Let's combine these merge patch documents... + combinedPatch, err := jsonpatch.MergeMergePatches(nameAndHeight, ageAndEyes) + if err != nil { + panic(err) + } + + // Apply each patch individual against the original document + withoutCombinedPatch, err := jsonpatch.MergePatch(original, nameAndHeight) + if err != nil { + panic(err) + } + + withoutCombinedPatch, err = jsonpatch.MergePatch(withoutCombinedPatch, ageAndEyes) + if err != nil { + panic(err) + } + + // Apply the combined patch against the original document + + withCombinedPatch, err := jsonpatch.MergePatch(original, combinedPatch) + if err != nil { + panic(err) + } + + // Do both result in the same thing? They should! + if jsonpatch.Equal(withCombinedPatch, withoutCombinedPatch) { + fmt.Println("Both JSON documents are structurally the same!") + } + + fmt.Printf("combined merge patch: %s", combinedPatch) +} +``` + +When ran, you get the following output: +```bash +$ go run main.go +Both JSON documents are structurally the same! +combined merge patch: {"age":4.23,"eyes":"blue","height":null,"name":"Jane"} +``` + +# CLI for comparing JSON documents +You can install the commandline program `json-patch`. + +This program can take multiple JSON patch documents as arguments, +and fed a JSON document from `stdin`. It will apply the patch(es) against +the document and output the modified doc. + +**patch.1.json** +```json +[ + {"op": "replace", "path": "/name", "value": "Jane"}, + {"op": "remove", "path": "/height"} +] +``` + +**patch.2.json** +```json +[ + {"op": "add", "path": "/address", "value": "123 Main St"}, + {"op": "replace", "path": "/age", "value": "21"} +] +``` + +**document.json** +```json +{ + "name": "John", + "age": 24, + "height": 3.21 +} +``` + +You can then run: + +```bash +$ go install github.com/evanphx/json-patch/cmd/json-patch +$ cat document.json | json-patch -p patch.1.json -p patch.2.json +{"address":"123 Main St","age":"21","name":"Jane"} +``` + +# Help It! +Contributions are welcomed! Leave [an issue](https://github.com/evanphx/json-patch/issues) +or [create a PR](https://github.com/evanphx/json-patch/compare). + + +Before creating a pull request, we'd ask that you make sure tests are passing +and that you have added new tests when applicable. + +Contributors can run tests using: + +```bash +go test -cover ./... +``` + +Builds for pull requests are tested automatically +using [TravisCI](https://travis-ci.org/evanphx/json-patch). diff --git a/vendor/github.com/evanphx/json-patch/merge.go b/vendor/github.com/evanphx/json-patch/merge.go new file mode 100644 index 000000000..6806c4c20 --- /dev/null +++ b/vendor/github.com/evanphx/json-patch/merge.go @@ -0,0 +1,383 @@ +package jsonpatch + +import ( + "bytes" + "encoding/json" + "fmt" + "reflect" +) + +func merge(cur, patch *lazyNode, mergeMerge bool) *lazyNode { + curDoc, err := cur.intoDoc() + + if err != nil { + pruneNulls(patch) + return patch + } + + patchDoc, err := patch.intoDoc() + + if err != nil { + return patch + } + + mergeDocs(curDoc, patchDoc, mergeMerge) + + return cur +} + +func mergeDocs(doc, patch *partialDoc, mergeMerge bool) { + for k, v := range *patch { + if v == nil { + if mergeMerge { + (*doc)[k] = nil + } else { + delete(*doc, k) + } + } else { + cur, ok := (*doc)[k] + + if !ok || cur == nil { + pruneNulls(v) + (*doc)[k] = v + } else { + (*doc)[k] = merge(cur, v, mergeMerge) + } + } + } +} + +func pruneNulls(n *lazyNode) { + sub, err := n.intoDoc() + + if err == nil { + pruneDocNulls(sub) + } else { + ary, err := n.intoAry() + + if err == nil { + pruneAryNulls(ary) + } + } +} + +func pruneDocNulls(doc *partialDoc) *partialDoc { + for k, v := range *doc { + if v == nil { + delete(*doc, k) + } else { + pruneNulls(v) + } + } + + return doc +} + +func pruneAryNulls(ary *partialArray) *partialArray { + newAry := []*lazyNode{} + + for _, v := range *ary { + if v != nil { + pruneNulls(v) + newAry = append(newAry, v) + } + } + + *ary = newAry + + return ary +} + +var errBadJSONDoc = fmt.Errorf("Invalid JSON Document") +var errBadJSONPatch = fmt.Errorf("Invalid JSON Patch") +var errBadMergeTypes = fmt.Errorf("Mismatched JSON Documents") + +// MergeMergePatches merges two merge patches together, such that +// applying this resulting merged merge patch to a document yields the same +// as merging each merge patch to the document in succession. +func MergeMergePatches(patch1Data, patch2Data []byte) ([]byte, error) { + return doMergePatch(patch1Data, patch2Data, true) +} + +// MergePatch merges the patchData into the docData. +func MergePatch(docData, patchData []byte) ([]byte, error) { + return doMergePatch(docData, patchData, false) +} + +func doMergePatch(docData, patchData []byte, mergeMerge bool) ([]byte, error) { + doc := &partialDoc{} + + docErr := json.Unmarshal(docData, doc) + + patch := &partialDoc{} + + patchErr := json.Unmarshal(patchData, patch) + + if _, ok := docErr.(*json.SyntaxError); ok { + return nil, errBadJSONDoc + } + + if _, ok := patchErr.(*json.SyntaxError); ok { + return nil, errBadJSONPatch + } + + if docErr == nil && *doc == nil { + return nil, errBadJSONDoc + } + + if patchErr == nil && *patch == nil { + return nil, errBadJSONPatch + } + + if docErr != nil || patchErr != nil { + // Not an error, just not a doc, so we turn straight into the patch + if patchErr == nil { + if mergeMerge { + doc = patch + } else { + doc = pruneDocNulls(patch) + } + } else { + patchAry := &partialArray{} + patchErr = json.Unmarshal(patchData, patchAry) + + if patchErr != nil { + return nil, errBadJSONPatch + } + + pruneAryNulls(patchAry) + + out, patchErr := json.Marshal(patchAry) + + if patchErr != nil { + return nil, errBadJSONPatch + } + + return out, nil + } + } else { + mergeDocs(doc, patch, mergeMerge) + } + + return json.Marshal(doc) +} + +// resemblesJSONArray indicates whether the byte-slice "appears" to be +// a JSON array or not. +// False-positives are possible, as this function does not check the internal +// structure of the array. It only checks that the outer syntax is present and +// correct. +func resemblesJSONArray(input []byte) bool { + input = bytes.TrimSpace(input) + + hasPrefix := bytes.HasPrefix(input, []byte("[")) + hasSuffix := bytes.HasSuffix(input, []byte("]")) + + return hasPrefix && hasSuffix +} + +// CreateMergePatch will return a merge patch document capable of converting +// the original document(s) to the modified document(s). +// The parameters can be bytes of either two JSON Documents, or two arrays of +// JSON documents. +// The merge patch returned follows the specification defined at http://tools.ietf.org/html/draft-ietf-appsawg-json-merge-patch-07 +func CreateMergePatch(originalJSON, modifiedJSON []byte) ([]byte, error) { + originalResemblesArray := resemblesJSONArray(originalJSON) + modifiedResemblesArray := resemblesJSONArray(modifiedJSON) + + // Do both byte-slices seem like JSON arrays? + if originalResemblesArray && modifiedResemblesArray { + return createArrayMergePatch(originalJSON, modifiedJSON) + } + + // Are both byte-slices are not arrays? Then they are likely JSON objects... + if !originalResemblesArray && !modifiedResemblesArray { + return createObjectMergePatch(originalJSON, modifiedJSON) + } + + // None of the above? Then return an error because of mismatched types. + return nil, errBadMergeTypes +} + +// createObjectMergePatch will return a merge-patch document capable of +// converting the original document to the modified document. +func createObjectMergePatch(originalJSON, modifiedJSON []byte) ([]byte, error) { + originalDoc := map[string]interface{}{} + modifiedDoc := map[string]interface{}{} + + err := json.Unmarshal(originalJSON, &originalDoc) + if err != nil { + return nil, errBadJSONDoc + } + + err = json.Unmarshal(modifiedJSON, &modifiedDoc) + if err != nil { + return nil, errBadJSONDoc + } + + dest, err := getDiff(originalDoc, modifiedDoc) + if err != nil { + return nil, err + } + + return json.Marshal(dest) +} + +// createArrayMergePatch will return an array of merge-patch documents capable +// of converting the original document to the modified document for each +// pair of JSON documents provided in the arrays. +// Arrays of mismatched sizes will result in an error. +func createArrayMergePatch(originalJSON, modifiedJSON []byte) ([]byte, error) { + originalDocs := []json.RawMessage{} + modifiedDocs := []json.RawMessage{} + + err := json.Unmarshal(originalJSON, &originalDocs) + if err != nil { + return nil, errBadJSONDoc + } + + err = json.Unmarshal(modifiedJSON, &modifiedDocs) + if err != nil { + return nil, errBadJSONDoc + } + + total := len(originalDocs) + if len(modifiedDocs) != total { + return nil, errBadJSONDoc + } + + result := []json.RawMessage{} + for i := 0; i < len(originalDocs); i++ { + original := originalDocs[i] + modified := modifiedDocs[i] + + patch, err := createObjectMergePatch(original, modified) + if err != nil { + return nil, err + } + + result = append(result, json.RawMessage(patch)) + } + + return json.Marshal(result) +} + +// Returns true if the array matches (must be json types). +// As is idiomatic for go, an empty array is not the same as a nil array. +func matchesArray(a, b []interface{}) bool { + if len(a) != len(b) { + return false + } + if (a == nil && b != nil) || (a != nil && b == nil) { + return false + } + for i := range a { + if !matchesValue(a[i], b[i]) { + return false + } + } + return true +} + +// Returns true if the values matches (must be json types) +// The types of the values must match, otherwise it will always return false +// If two map[string]interface{} are given, all elements must match. +func matchesValue(av, bv interface{}) bool { + if reflect.TypeOf(av) != reflect.TypeOf(bv) { + return false + } + switch at := av.(type) { + case string: + bt := bv.(string) + if bt == at { + return true + } + case float64: + bt := bv.(float64) + if bt == at { + return true + } + case bool: + bt := bv.(bool) + if bt == at { + return true + } + case nil: + // Both nil, fine. + return true + case map[string]interface{}: + bt := bv.(map[string]interface{}) + for key := range at { + if !matchesValue(at[key], bt[key]) { + return false + } + } + for key := range bt { + if !matchesValue(at[key], bt[key]) { + return false + } + } + return true + case []interface{}: + bt := bv.([]interface{}) + return matchesArray(at, bt) + } + return false +} + +// getDiff returns the (recursive) difference between a and b as a map[string]interface{}. +func getDiff(a, b map[string]interface{}) (map[string]interface{}, error) { + into := map[string]interface{}{} + for key, bv := range b { + av, ok := a[key] + // value was added + if !ok { + into[key] = bv + continue + } + // If types have changed, replace completely + if reflect.TypeOf(av) != reflect.TypeOf(bv) { + into[key] = bv + continue + } + // Types are the same, compare values + switch at := av.(type) { + case map[string]interface{}: + bt := bv.(map[string]interface{}) + dst := make(map[string]interface{}, len(bt)) + dst, err := getDiff(at, bt) + if err != nil { + return nil, err + } + if len(dst) > 0 { + into[key] = dst + } + case string, float64, bool: + if !matchesValue(av, bv) { + into[key] = bv + } + case []interface{}: + bt := bv.([]interface{}) + if !matchesArray(at, bt) { + into[key] = bv + } + case nil: + switch bv.(type) { + case nil: + // Both nil, fine. + default: + into[key] = bv + } + default: + panic(fmt.Sprintf("Unknown type:%T in key %s", av, key)) + } + } + // Now add all deleted values as nil + for key := range a { + _, found := b[key] + if !found { + into[key] = nil + } + } + return into, nil +} diff --git a/vendor/github.com/evanphx/json-patch/patch.go b/vendor/github.com/evanphx/json-patch/patch.go new file mode 100644 index 000000000..f26b6824b --- /dev/null +++ b/vendor/github.com/evanphx/json-patch/patch.go @@ -0,0 +1,682 @@ +package jsonpatch + +import ( + "bytes" + "encoding/json" + "fmt" + "strconv" + "strings" +) + +const ( + eRaw = iota + eDoc + eAry +) + +var SupportNegativeIndices bool = true + +type lazyNode struct { + raw *json.RawMessage + doc partialDoc + ary partialArray + which int +} + +type operation map[string]*json.RawMessage + +// Patch is an ordered collection of operations. +type Patch []operation + +type partialDoc map[string]*lazyNode +type partialArray []*lazyNode + +type container interface { + get(key string) (*lazyNode, error) + set(key string, val *lazyNode) error + add(key string, val *lazyNode) error + remove(key string) error +} + +func newLazyNode(raw *json.RawMessage) *lazyNode { + return &lazyNode{raw: raw, doc: nil, ary: nil, which: eRaw} +} + +func (n *lazyNode) MarshalJSON() ([]byte, error) { + switch n.which { + case eRaw: + return json.Marshal(n.raw) + case eDoc: + return json.Marshal(n.doc) + case eAry: + return json.Marshal(n.ary) + default: + return nil, fmt.Errorf("Unknown type") + } +} + +func (n *lazyNode) UnmarshalJSON(data []byte) error { + dest := make(json.RawMessage, len(data)) + copy(dest, data) + n.raw = &dest + n.which = eRaw + return nil +} + +func (n *lazyNode) intoDoc() (*partialDoc, error) { + if n.which == eDoc { + return &n.doc, nil + } + + if n.raw == nil { + return nil, fmt.Errorf("Unable to unmarshal nil pointer as partial document") + } + + err := json.Unmarshal(*n.raw, &n.doc) + + if err != nil { + return nil, err + } + + n.which = eDoc + return &n.doc, nil +} + +func (n *lazyNode) intoAry() (*partialArray, error) { + if n.which == eAry { + return &n.ary, nil + } + + if n.raw == nil { + return nil, fmt.Errorf("Unable to unmarshal nil pointer as partial array") + } + + err := json.Unmarshal(*n.raw, &n.ary) + + if err != nil { + return nil, err + } + + n.which = eAry + return &n.ary, nil +} + +func (n *lazyNode) compact() []byte { + buf := &bytes.Buffer{} + + if n.raw == nil { + return nil + } + + err := json.Compact(buf, *n.raw) + + if err != nil { + return *n.raw + } + + return buf.Bytes() +} + +func (n *lazyNode) tryDoc() bool { + if n.raw == nil { + return false + } + + err := json.Unmarshal(*n.raw, &n.doc) + + if err != nil { + return false + } + + n.which = eDoc + return true +} + +func (n *lazyNode) tryAry() bool { + if n.raw == nil { + return false + } + + err := json.Unmarshal(*n.raw, &n.ary) + + if err != nil { + return false + } + + n.which = eAry + return true +} + +func (n *lazyNode) equal(o *lazyNode) bool { + if n.which == eRaw { + if !n.tryDoc() && !n.tryAry() { + if o.which != eRaw { + return false + } + + return bytes.Equal(n.compact(), o.compact()) + } + } + + if n.which == eDoc { + if o.which == eRaw { + if !o.tryDoc() { + return false + } + } + + if o.which != eDoc { + return false + } + + for k, v := range n.doc { + ov, ok := o.doc[k] + + if !ok { + return false + } + + if v == nil && ov == nil { + continue + } + + if !v.equal(ov) { + return false + } + } + + return true + } + + if o.which != eAry && !o.tryAry() { + return false + } + + if len(n.ary) != len(o.ary) { + return false + } + + for idx, val := range n.ary { + if !val.equal(o.ary[idx]) { + return false + } + } + + return true +} + +func (o operation) kind() string { + if obj, ok := o["op"]; ok && obj != nil { + var op string + + err := json.Unmarshal(*obj, &op) + + if err != nil { + return "unknown" + } + + return op + } + + return "unknown" +} + +func (o operation) path() string { + if obj, ok := o["path"]; ok && obj != nil { + var op string + + err := json.Unmarshal(*obj, &op) + + if err != nil { + return "unknown" + } + + return op + } + + return "unknown" +} + +func (o operation) from() string { + if obj, ok := o["from"]; ok && obj != nil { + var op string + + err := json.Unmarshal(*obj, &op) + + if err != nil { + return "unknown" + } + + return op + } + + return "unknown" +} + +func (o operation) value() *lazyNode { + if obj, ok := o["value"]; ok { + return newLazyNode(obj) + } + + return nil +} + +func isArray(buf []byte) bool { +Loop: + for _, c := range buf { + switch c { + case ' ': + case '\n': + case '\t': + continue + case '[': + return true + default: + break Loop + } + } + + return false +} + +func findObject(pd *container, path string) (container, string) { + doc := *pd + + split := strings.Split(path, "/") + + if len(split) < 2 { + return nil, "" + } + + parts := split[1 : len(split)-1] + + key := split[len(split)-1] + + var err error + + for _, part := range parts { + + next, ok := doc.get(decodePatchKey(part)) + + if next == nil || ok != nil { + return nil, "" + } + + if isArray(*next.raw) { + doc, err = next.intoAry() + + if err != nil { + return nil, "" + } + } else { + doc, err = next.intoDoc() + + if err != nil { + return nil, "" + } + } + } + + return doc, decodePatchKey(key) +} + +func (d *partialDoc) set(key string, val *lazyNode) error { + (*d)[key] = val + return nil +} + +func (d *partialDoc) add(key string, val *lazyNode) error { + (*d)[key] = val + return nil +} + +func (d *partialDoc) get(key string) (*lazyNode, error) { + return (*d)[key], nil +} + +func (d *partialDoc) remove(key string) error { + _, ok := (*d)[key] + if !ok { + return fmt.Errorf("Unable to remove nonexistent key: %s", key) + } + + delete(*d, key) + return nil +} + +func (d *partialArray) set(key string, val *lazyNode) error { + if key == "-" { + *d = append(*d, val) + return nil + } + + idx, err := strconv.Atoi(key) + if err != nil { + return err + } + + sz := len(*d) + if idx+1 > sz { + sz = idx + 1 + } + + ary := make([]*lazyNode, sz) + + cur := *d + + copy(ary, cur) + + if idx >= len(ary) { + return fmt.Errorf("Unable to access invalid index: %d", idx) + } + + ary[idx] = val + + *d = ary + return nil +} + +func (d *partialArray) add(key string, val *lazyNode) error { + if key == "-" { + *d = append(*d, val) + return nil + } + + idx, err := strconv.Atoi(key) + if err != nil { + return err + } + + ary := make([]*lazyNode, len(*d)+1) + + cur := *d + + if idx >= len(ary) { + return fmt.Errorf("Unable to access invalid index: %d", idx) + } + + if SupportNegativeIndices { + if idx < -len(ary) { + return fmt.Errorf("Unable to access invalid index: %d", idx) + } + + if idx < 0 { + idx += len(ary) + } + } + + copy(ary[0:idx], cur[0:idx]) + ary[idx] = val + copy(ary[idx+1:], cur[idx:]) + + *d = ary + return nil +} + +func (d *partialArray) get(key string) (*lazyNode, error) { + idx, err := strconv.Atoi(key) + + if err != nil { + return nil, err + } + + if idx >= len(*d) { + return nil, fmt.Errorf("Unable to access invalid index: %d", idx) + } + + return (*d)[idx], nil +} + +func (d *partialArray) remove(key string) error { + idx, err := strconv.Atoi(key) + if err != nil { + return err + } + + cur := *d + + if idx >= len(cur) { + return fmt.Errorf("Unable to access invalid index: %d", idx) + } + + if SupportNegativeIndices { + if idx < -len(cur) { + return fmt.Errorf("Unable to access invalid index: %d", idx) + } + + if idx < 0 { + idx += len(cur) + } + } + + ary := make([]*lazyNode, len(cur)-1) + + copy(ary[0:idx], cur[0:idx]) + copy(ary[idx:], cur[idx+1:]) + + *d = ary + return nil + +} + +func (p Patch) add(doc *container, op operation) error { + path := op.path() + + con, key := findObject(doc, path) + + if con == nil { + return fmt.Errorf("jsonpatch add operation does not apply: doc is missing path: \"%s\"", path) + } + + return con.add(key, op.value()) +} + +func (p Patch) remove(doc *container, op operation) error { + path := op.path() + + con, key := findObject(doc, path) + + if con == nil { + return fmt.Errorf("jsonpatch remove operation does not apply: doc is missing path: \"%s\"", path) + } + + return con.remove(key) +} + +func (p Patch) replace(doc *container, op operation) error { + path := op.path() + + con, key := findObject(doc, path) + + if con == nil { + return fmt.Errorf("jsonpatch replace operation does not apply: doc is missing path: %s", path) + } + + _, ok := con.get(key) + if ok != nil { + return fmt.Errorf("jsonpatch replace operation does not apply: doc is missing key: %s", path) + } + + return con.set(key, op.value()) +} + +func (p Patch) move(doc *container, op operation) error { + from := op.from() + + con, key := findObject(doc, from) + + if con == nil { + return fmt.Errorf("jsonpatch move operation does not apply: doc is missing from path: %s", from) + } + + val, err := con.get(key) + if err != nil { + return err + } + + err = con.remove(key) + if err != nil { + return err + } + + path := op.path() + + con, key = findObject(doc, path) + + if con == nil { + return fmt.Errorf("jsonpatch move operation does not apply: doc is missing destination path: %s", path) + } + + return con.set(key, val) +} + +func (p Patch) test(doc *container, op operation) error { + path := op.path() + + con, key := findObject(doc, path) + + if con == nil { + return fmt.Errorf("jsonpatch test operation does not apply: is missing path: %s", path) + } + + val, err := con.get(key) + + if err != nil { + return err + } + + if val == nil { + if op.value().raw == nil { + return nil + } + return fmt.Errorf("Testing value %s failed", path) + } else if op.value() == nil { + return fmt.Errorf("Testing value %s failed", path) + } + + if val.equal(op.value()) { + return nil + } + + return fmt.Errorf("Testing value %s failed", path) +} + +func (p Patch) copy(doc *container, op operation) error { + from := op.from() + + con, key := findObject(doc, from) + + if con == nil { + return fmt.Errorf("jsonpatch copy operation does not apply: doc is missing from path: %s", from) + } + + val, err := con.get(key) + if err != nil { + return err + } + + path := op.path() + + con, key = findObject(doc, path) + + if con == nil { + return fmt.Errorf("jsonpatch copy operation does not apply: doc is missing destination path: %s", path) + } + + return con.set(key, val) +} + +// Equal indicates if 2 JSON documents have the same structural equality. +func Equal(a, b []byte) bool { + ra := make(json.RawMessage, len(a)) + copy(ra, a) + la := newLazyNode(&ra) + + rb := make(json.RawMessage, len(b)) + copy(rb, b) + lb := newLazyNode(&rb) + + return la.equal(lb) +} + +// DecodePatch decodes the passed JSON document as an RFC 6902 patch. +func DecodePatch(buf []byte) (Patch, error) { + var p Patch + + err := json.Unmarshal(buf, &p) + + if err != nil { + return nil, err + } + + return p, nil +} + +// Apply mutates a JSON document according to the patch, and returns the new +// document. +func (p Patch) Apply(doc []byte) ([]byte, error) { + return p.ApplyIndent(doc, "") +} + +// ApplyIndent mutates a JSON document according to the patch, and returns the new +// document indented. +func (p Patch) ApplyIndent(doc []byte, indent string) ([]byte, error) { + var pd container + if doc[0] == '[' { + pd = &partialArray{} + } else { + pd = &partialDoc{} + } + + err := json.Unmarshal(doc, pd) + + if err != nil { + return nil, err + } + + err = nil + + for _, op := range p { + switch op.kind() { + case "add": + err = p.add(&pd, op) + case "remove": + err = p.remove(&pd, op) + case "replace": + err = p.replace(&pd, op) + case "move": + err = p.move(&pd, op) + case "test": + err = p.test(&pd, op) + case "copy": + err = p.copy(&pd, op) + default: + err = fmt.Errorf("Unexpected kind: %s", op.kind()) + } + + if err != nil { + return nil, err + } + } + + if indent != "" { + return json.MarshalIndent(pd, "", indent) + } + + return json.Marshal(pd) +} + +// From http://tools.ietf.org/html/rfc6901#section-4 : +// +// Evaluation of each reference token begins by decoding any escaped +// character sequence. This is performed by first transforming any +// occurrence of the sequence '~1' to '/', and then transforming any +// occurrence of the sequence '~0' to '~'. + +var ( + rfc6901Decoder = strings.NewReplacer("~1", "/", "~0", "~") +) + +func decodePatchKey(k string) string { + return rfc6901Decoder.Replace(k) +} diff --git a/vendor/github.com/ghodss/yaml/.travis.yml b/vendor/github.com/ghodss/yaml/.travis.yml deleted file mode 100644 index 0e9d6edc0..000000000 --- a/vendor/github.com/ghodss/yaml/.travis.yml +++ /dev/null @@ -1,7 +0,0 @@ -language: go -go: - - 1.3 - - 1.4 -script: - - go test - - go build diff --git a/vendor/github.com/golang/groupcache/.travis.yml b/vendor/github.com/golang/groupcache/.travis.yml index ef6e67633..674925795 100644 --- a/vendor/github.com/golang/groupcache/.travis.yml +++ b/vendor/github.com/golang/groupcache/.travis.yml @@ -11,6 +11,7 @@ script: go: - 1.9.x - 1.10.x + - 1.11.x - master cache: diff --git a/vendor/github.com/golang/protobuf/ptypes/wrappers/wrappers.pb.go b/vendor/github.com/golang/protobuf/ptypes/wrappers/wrappers.pb.go new file mode 100644 index 000000000..0f0fa837f --- /dev/null +++ b/vendor/github.com/golang/protobuf/ptypes/wrappers/wrappers.pb.go @@ -0,0 +1,443 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: google/protobuf/wrappers.proto + +package wrappers // import "github.com/golang/protobuf/ptypes/wrappers" + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +// Wrapper message for `double`. +// +// The JSON representation for `DoubleValue` is JSON number. +type DoubleValue struct { + // The double value. + Value float64 `protobuf:"fixed64,1,opt,name=value,proto3" json:"value,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *DoubleValue) Reset() { *m = DoubleValue{} } +func (m *DoubleValue) String() string { return proto.CompactTextString(m) } +func (*DoubleValue) ProtoMessage() {} +func (*DoubleValue) Descriptor() ([]byte, []int) { + return fileDescriptor_wrappers_16c7c35c009f3253, []int{0} +} +func (*DoubleValue) XXX_WellKnownType() string { return "DoubleValue" } +func (m *DoubleValue) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_DoubleValue.Unmarshal(m, b) +} +func (m *DoubleValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_DoubleValue.Marshal(b, m, deterministic) +} +func (dst *DoubleValue) XXX_Merge(src proto.Message) { + xxx_messageInfo_DoubleValue.Merge(dst, src) +} +func (m *DoubleValue) XXX_Size() int { + return xxx_messageInfo_DoubleValue.Size(m) +} +func (m *DoubleValue) XXX_DiscardUnknown() { + xxx_messageInfo_DoubleValue.DiscardUnknown(m) +} + +var xxx_messageInfo_DoubleValue proto.InternalMessageInfo + +func (m *DoubleValue) GetValue() float64 { + if m != nil { + return m.Value + } + return 0 +} + +// Wrapper message for `float`. +// +// The JSON representation for `FloatValue` is JSON number. +type FloatValue struct { + // The float value. + Value float32 `protobuf:"fixed32,1,opt,name=value,proto3" json:"value,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *FloatValue) Reset() { *m = FloatValue{} } +func (m *FloatValue) String() string { return proto.CompactTextString(m) } +func (*FloatValue) ProtoMessage() {} +func (*FloatValue) Descriptor() ([]byte, []int) { + return fileDescriptor_wrappers_16c7c35c009f3253, []int{1} +} +func (*FloatValue) XXX_WellKnownType() string { return "FloatValue" } +func (m *FloatValue) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_FloatValue.Unmarshal(m, b) +} +func (m *FloatValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_FloatValue.Marshal(b, m, deterministic) +} +func (dst *FloatValue) XXX_Merge(src proto.Message) { + xxx_messageInfo_FloatValue.Merge(dst, src) +} +func (m *FloatValue) XXX_Size() int { + return xxx_messageInfo_FloatValue.Size(m) +} +func (m *FloatValue) XXX_DiscardUnknown() { + xxx_messageInfo_FloatValue.DiscardUnknown(m) +} + +var xxx_messageInfo_FloatValue proto.InternalMessageInfo + +func (m *FloatValue) GetValue() float32 { + if m != nil { + return m.Value + } + return 0 +} + +// Wrapper message for `int64`. +// +// The JSON representation for `Int64Value` is JSON string. +type Int64Value struct { + // The int64 value. + Value int64 `protobuf:"varint,1,opt,name=value,proto3" json:"value,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Int64Value) Reset() { *m = Int64Value{} } +func (m *Int64Value) String() string { return proto.CompactTextString(m) } +func (*Int64Value) ProtoMessage() {} +func (*Int64Value) Descriptor() ([]byte, []int) { + return fileDescriptor_wrappers_16c7c35c009f3253, []int{2} +} +func (*Int64Value) XXX_WellKnownType() string { return "Int64Value" } +func (m *Int64Value) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Int64Value.Unmarshal(m, b) +} +func (m *Int64Value) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Int64Value.Marshal(b, m, deterministic) +} +func (dst *Int64Value) XXX_Merge(src proto.Message) { + xxx_messageInfo_Int64Value.Merge(dst, src) +} +func (m *Int64Value) XXX_Size() int { + return xxx_messageInfo_Int64Value.Size(m) +} +func (m *Int64Value) XXX_DiscardUnknown() { + xxx_messageInfo_Int64Value.DiscardUnknown(m) +} + +var xxx_messageInfo_Int64Value proto.InternalMessageInfo + +func (m *Int64Value) GetValue() int64 { + if m != nil { + return m.Value + } + return 0 +} + +// Wrapper message for `uint64`. +// +// The JSON representation for `UInt64Value` is JSON string. +type UInt64Value struct { + // The uint64 value. + Value uint64 `protobuf:"varint,1,opt,name=value,proto3" json:"value,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *UInt64Value) Reset() { *m = UInt64Value{} } +func (m *UInt64Value) String() string { return proto.CompactTextString(m) } +func (*UInt64Value) ProtoMessage() {} +func (*UInt64Value) Descriptor() ([]byte, []int) { + return fileDescriptor_wrappers_16c7c35c009f3253, []int{3} +} +func (*UInt64Value) XXX_WellKnownType() string { return "UInt64Value" } +func (m *UInt64Value) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_UInt64Value.Unmarshal(m, b) +} +func (m *UInt64Value) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_UInt64Value.Marshal(b, m, deterministic) +} +func (dst *UInt64Value) XXX_Merge(src proto.Message) { + xxx_messageInfo_UInt64Value.Merge(dst, src) +} +func (m *UInt64Value) XXX_Size() int { + return xxx_messageInfo_UInt64Value.Size(m) +} +func (m *UInt64Value) XXX_DiscardUnknown() { + xxx_messageInfo_UInt64Value.DiscardUnknown(m) +} + +var xxx_messageInfo_UInt64Value proto.InternalMessageInfo + +func (m *UInt64Value) GetValue() uint64 { + if m != nil { + return m.Value + } + return 0 +} + +// Wrapper message for `int32`. +// +// The JSON representation for `Int32Value` is JSON number. +type Int32Value struct { + // The int32 value. + Value int32 `protobuf:"varint,1,opt,name=value,proto3" json:"value,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Int32Value) Reset() { *m = Int32Value{} } +func (m *Int32Value) String() string { return proto.CompactTextString(m) } +func (*Int32Value) ProtoMessage() {} +func (*Int32Value) Descriptor() ([]byte, []int) { + return fileDescriptor_wrappers_16c7c35c009f3253, []int{4} +} +func (*Int32Value) XXX_WellKnownType() string { return "Int32Value" } +func (m *Int32Value) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Int32Value.Unmarshal(m, b) +} +func (m *Int32Value) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Int32Value.Marshal(b, m, deterministic) +} +func (dst *Int32Value) XXX_Merge(src proto.Message) { + xxx_messageInfo_Int32Value.Merge(dst, src) +} +func (m *Int32Value) XXX_Size() int { + return xxx_messageInfo_Int32Value.Size(m) +} +func (m *Int32Value) XXX_DiscardUnknown() { + xxx_messageInfo_Int32Value.DiscardUnknown(m) +} + +var xxx_messageInfo_Int32Value proto.InternalMessageInfo + +func (m *Int32Value) GetValue() int32 { + if m != nil { + return m.Value + } + return 0 +} + +// Wrapper message for `uint32`. +// +// The JSON representation for `UInt32Value` is JSON number. +type UInt32Value struct { + // The uint32 value. + Value uint32 `protobuf:"varint,1,opt,name=value,proto3" json:"value,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *UInt32Value) Reset() { *m = UInt32Value{} } +func (m *UInt32Value) String() string { return proto.CompactTextString(m) } +func (*UInt32Value) ProtoMessage() {} +func (*UInt32Value) Descriptor() ([]byte, []int) { + return fileDescriptor_wrappers_16c7c35c009f3253, []int{5} +} +func (*UInt32Value) XXX_WellKnownType() string { return "UInt32Value" } +func (m *UInt32Value) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_UInt32Value.Unmarshal(m, b) +} +func (m *UInt32Value) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_UInt32Value.Marshal(b, m, deterministic) +} +func (dst *UInt32Value) XXX_Merge(src proto.Message) { + xxx_messageInfo_UInt32Value.Merge(dst, src) +} +func (m *UInt32Value) XXX_Size() int { + return xxx_messageInfo_UInt32Value.Size(m) +} +func (m *UInt32Value) XXX_DiscardUnknown() { + xxx_messageInfo_UInt32Value.DiscardUnknown(m) +} + +var xxx_messageInfo_UInt32Value proto.InternalMessageInfo + +func (m *UInt32Value) GetValue() uint32 { + if m != nil { + return m.Value + } + return 0 +} + +// Wrapper message for `bool`. +// +// The JSON representation for `BoolValue` is JSON `true` and `false`. +type BoolValue struct { + // The bool value. + Value bool `protobuf:"varint,1,opt,name=value,proto3" json:"value,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *BoolValue) Reset() { *m = BoolValue{} } +func (m *BoolValue) String() string { return proto.CompactTextString(m) } +func (*BoolValue) ProtoMessage() {} +func (*BoolValue) Descriptor() ([]byte, []int) { + return fileDescriptor_wrappers_16c7c35c009f3253, []int{6} +} +func (*BoolValue) XXX_WellKnownType() string { return "BoolValue" } +func (m *BoolValue) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_BoolValue.Unmarshal(m, b) +} +func (m *BoolValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_BoolValue.Marshal(b, m, deterministic) +} +func (dst *BoolValue) XXX_Merge(src proto.Message) { + xxx_messageInfo_BoolValue.Merge(dst, src) +} +func (m *BoolValue) XXX_Size() int { + return xxx_messageInfo_BoolValue.Size(m) +} +func (m *BoolValue) XXX_DiscardUnknown() { + xxx_messageInfo_BoolValue.DiscardUnknown(m) +} + +var xxx_messageInfo_BoolValue proto.InternalMessageInfo + +func (m *BoolValue) GetValue() bool { + if m != nil { + return m.Value + } + return false +} + +// Wrapper message for `string`. +// +// The JSON representation for `StringValue` is JSON string. +type StringValue struct { + // The string value. + Value string `protobuf:"bytes,1,opt,name=value,proto3" json:"value,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *StringValue) Reset() { *m = StringValue{} } +func (m *StringValue) String() string { return proto.CompactTextString(m) } +func (*StringValue) ProtoMessage() {} +func (*StringValue) Descriptor() ([]byte, []int) { + return fileDescriptor_wrappers_16c7c35c009f3253, []int{7} +} +func (*StringValue) XXX_WellKnownType() string { return "StringValue" } +func (m *StringValue) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_StringValue.Unmarshal(m, b) +} +func (m *StringValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_StringValue.Marshal(b, m, deterministic) +} +func (dst *StringValue) XXX_Merge(src proto.Message) { + xxx_messageInfo_StringValue.Merge(dst, src) +} +func (m *StringValue) XXX_Size() int { + return xxx_messageInfo_StringValue.Size(m) +} +func (m *StringValue) XXX_DiscardUnknown() { + xxx_messageInfo_StringValue.DiscardUnknown(m) +} + +var xxx_messageInfo_StringValue proto.InternalMessageInfo + +func (m *StringValue) GetValue() string { + if m != nil { + return m.Value + } + return "" +} + +// Wrapper message for `bytes`. +// +// The JSON representation for `BytesValue` is JSON string. +type BytesValue struct { + // The bytes value. + Value []byte `protobuf:"bytes,1,opt,name=value,proto3" json:"value,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *BytesValue) Reset() { *m = BytesValue{} } +func (m *BytesValue) String() string { return proto.CompactTextString(m) } +func (*BytesValue) ProtoMessage() {} +func (*BytesValue) Descriptor() ([]byte, []int) { + return fileDescriptor_wrappers_16c7c35c009f3253, []int{8} +} +func (*BytesValue) XXX_WellKnownType() string { return "BytesValue" } +func (m *BytesValue) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_BytesValue.Unmarshal(m, b) +} +func (m *BytesValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_BytesValue.Marshal(b, m, deterministic) +} +func (dst *BytesValue) XXX_Merge(src proto.Message) { + xxx_messageInfo_BytesValue.Merge(dst, src) +} +func (m *BytesValue) XXX_Size() int { + return xxx_messageInfo_BytesValue.Size(m) +} +func (m *BytesValue) XXX_DiscardUnknown() { + xxx_messageInfo_BytesValue.DiscardUnknown(m) +} + +var xxx_messageInfo_BytesValue proto.InternalMessageInfo + +func (m *BytesValue) GetValue() []byte { + if m != nil { + return m.Value + } + return nil +} + +func init() { + proto.RegisterType((*DoubleValue)(nil), "google.protobuf.DoubleValue") + proto.RegisterType((*FloatValue)(nil), "google.protobuf.FloatValue") + proto.RegisterType((*Int64Value)(nil), "google.protobuf.Int64Value") + proto.RegisterType((*UInt64Value)(nil), "google.protobuf.UInt64Value") + proto.RegisterType((*Int32Value)(nil), "google.protobuf.Int32Value") + proto.RegisterType((*UInt32Value)(nil), "google.protobuf.UInt32Value") + proto.RegisterType((*BoolValue)(nil), "google.protobuf.BoolValue") + proto.RegisterType((*StringValue)(nil), "google.protobuf.StringValue") + proto.RegisterType((*BytesValue)(nil), "google.protobuf.BytesValue") +} + +func init() { + proto.RegisterFile("google/protobuf/wrappers.proto", fileDescriptor_wrappers_16c7c35c009f3253) +} + +var fileDescriptor_wrappers_16c7c35c009f3253 = []byte{ + // 259 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x4b, 0xcf, 0xcf, 0x4f, + 0xcf, 0x49, 0xd5, 0x2f, 0x28, 0xca, 0x2f, 0xc9, 0x4f, 0x2a, 0x4d, 0xd3, 0x2f, 0x2f, 0x4a, 0x2c, + 0x28, 0x48, 0x2d, 0x2a, 0xd6, 0x03, 0x8b, 0x08, 0xf1, 0x43, 0xe4, 0xf5, 0x60, 0xf2, 0x4a, 0xca, + 0x5c, 0xdc, 0x2e, 0xf9, 0xa5, 0x49, 0x39, 0xa9, 0x61, 0x89, 0x39, 0xa5, 0xa9, 0x42, 0x22, 0x5c, + 0xac, 0x65, 0x20, 0x86, 0x04, 0xa3, 0x02, 0xa3, 0x06, 0x63, 0x10, 0x84, 0xa3, 0xa4, 0xc4, 0xc5, + 0xe5, 0x96, 0x93, 0x9f, 0x58, 0x82, 0x45, 0x0d, 0x13, 0x92, 0x1a, 0xcf, 0xbc, 0x12, 0x33, 0x13, + 0x2c, 0x6a, 0x98, 0x61, 0x6a, 0x94, 0xb9, 0xb8, 0x43, 0x71, 0x29, 0x62, 0x41, 0x35, 0xc8, 0xd8, + 0x08, 0x8b, 0x1a, 0x56, 0x34, 0x83, 0xb0, 0x2a, 0xe2, 0x85, 0x29, 0x52, 0xe4, 0xe2, 0x74, 0xca, + 0xcf, 0xcf, 0xc1, 0xa2, 0x84, 0x03, 0xc9, 0x9c, 0xe0, 0x92, 0xa2, 0xcc, 0xbc, 0x74, 0x2c, 0x8a, + 0x38, 0x91, 0x1c, 0xe4, 0x54, 0x59, 0x92, 0x5a, 0x8c, 0x45, 0x0d, 0x0f, 0x54, 0x8d, 0x53, 0x0d, + 0x97, 0x70, 0x72, 0x7e, 0xae, 0x1e, 0x5a, 0xe8, 0x3a, 0xf1, 0x86, 0x43, 0x83, 0x3f, 0x00, 0x24, + 0x12, 0xc0, 0x18, 0xa5, 0x95, 0x9e, 0x59, 0x92, 0x51, 0x9a, 0xa4, 0x97, 0x9c, 0x9f, 0xab, 0x9f, + 0x9e, 0x9f, 0x93, 0x98, 0x97, 0x8e, 0x88, 0xaa, 0x82, 0x92, 0xca, 0x82, 0xd4, 0x62, 0x78, 0x8c, + 0xfd, 0x60, 0x64, 0x5c, 0xc4, 0xc4, 0xec, 0x1e, 0xe0, 0xb4, 0x8a, 0x49, 0xce, 0x1d, 0x62, 0x6e, + 0x00, 0x54, 0xa9, 0x5e, 0x78, 0x6a, 0x4e, 0x8e, 0x77, 0x5e, 0x7e, 0x79, 0x5e, 0x08, 0x48, 0x4b, + 0x12, 0x1b, 0xd8, 0x0c, 0x63, 0x40, 0x00, 0x00, 0x00, 0xff, 0xff, 0x19, 0x6c, 0xb9, 0xb8, 0xfe, + 0x01, 0x00, 0x00, +} diff --git a/vendor/github.com/golang/protobuf/ptypes/wrappers/wrappers.proto b/vendor/github.com/golang/protobuf/ptypes/wrappers/wrappers.proto new file mode 100644 index 000000000..01947639a --- /dev/null +++ b/vendor/github.com/golang/protobuf/ptypes/wrappers/wrappers.proto @@ -0,0 +1,118 @@ +// Protocol Buffers - Google's data interchange format +// Copyright 2008 Google Inc. All rights reserved. +// https://developers.google.com/protocol-buffers/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// Wrappers for primitive (non-message) types. These types are useful +// for embedding primitives in the `google.protobuf.Any` type and for places +// where we need to distinguish between the absence of a primitive +// typed field and its default value. + +syntax = "proto3"; + +package google.protobuf; + +option csharp_namespace = "Google.Protobuf.WellKnownTypes"; +option cc_enable_arenas = true; +option go_package = "github.com/golang/protobuf/ptypes/wrappers"; +option java_package = "com.google.protobuf"; +option java_outer_classname = "WrappersProto"; +option java_multiple_files = true; +option objc_class_prefix = "GPB"; + +// Wrapper message for `double`. +// +// The JSON representation for `DoubleValue` is JSON number. +message DoubleValue { + // The double value. + double value = 1; +} + +// Wrapper message for `float`. +// +// The JSON representation for `FloatValue` is JSON number. +message FloatValue { + // The float value. + float value = 1; +} + +// Wrapper message for `int64`. +// +// The JSON representation for `Int64Value` is JSON string. +message Int64Value { + // The int64 value. + int64 value = 1; +} + +// Wrapper message for `uint64`. +// +// The JSON representation for `UInt64Value` is JSON string. +message UInt64Value { + // The uint64 value. + uint64 value = 1; +} + +// Wrapper message for `int32`. +// +// The JSON representation for `Int32Value` is JSON number. +message Int32Value { + // The int32 value. + int32 value = 1; +} + +// Wrapper message for `uint32`. +// +// The JSON representation for `UInt32Value` is JSON number. +message UInt32Value { + // The uint32 value. + uint32 value = 1; +} + +// Wrapper message for `bool`. +// +// The JSON representation for `BoolValue` is JSON `true` and `false`. +message BoolValue { + // The bool value. + bool value = 1; +} + +// Wrapper message for `string`. +// +// The JSON representation for `StringValue` is JSON string. +message StringValue { + // The string value. + string value = 1; +} + +// Wrapper message for `bytes`. +// +// The JSON representation for `BytesValue` is JSON string. +message BytesValue { + // The bytes value. + bytes value = 1; +} diff --git a/vendor/github.com/google/uuid/go.mod b/vendor/github.com/google/uuid/go.mod new file mode 100644 index 000000000..fc84cd79d --- /dev/null +++ b/vendor/github.com/google/uuid/go.mod @@ -0,0 +1 @@ +module github.com/google/uuid diff --git a/vendor/github.com/google/uuid/uuid.go b/vendor/github.com/google/uuid/uuid.go index 7f3643fe9..524404cc5 100644 --- a/vendor/github.com/google/uuid/uuid.go +++ b/vendor/github.com/google/uuid/uuid.go @@ -1,4 +1,4 @@ -// Copyright 2016 Google Inc. All rights reserved. +// Copyright 2018 Google Inc. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. @@ -35,20 +35,43 @@ const ( var rander = rand.Reader // random function -// Parse decodes s into a UUID or returns an error. Both the UUID form of -// xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx and -// urn:uuid:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx are decoded. +// Parse decodes s into a UUID or returns an error. Both the standard UUID +// forms of xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx and +// urn:uuid:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx are decoded as well as the +// Microsoft encoding {xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx} and the raw hex +// encoding: xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx. func Parse(s string) (UUID, error) { var uuid UUID - if len(s) != 36 { - if len(s) != 36+9 { - return uuid, fmt.Errorf("invalid UUID length: %d", len(s)) - } + switch len(s) { + // xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx + case 36: + + // urn:uuid:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx + case 36 + 9: if strings.ToLower(s[:9]) != "urn:uuid:" { return uuid, fmt.Errorf("invalid urn prefix: %q", s[:9]) } s = s[9:] + + // {xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx} + case 36 + 2: + s = s[1:] + + // xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx + case 32: + var ok bool + for i := range uuid { + uuid[i], ok = xtob(s[i*2], s[i*2+1]) + if !ok { + return uuid, errors.New("invalid UUID format") + } + } + return uuid, nil + default: + return uuid, fmt.Errorf("invalid UUID length: %d", len(s)) } + // s is now at least 36 bytes long + // it must be of the form xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx if s[8] != '-' || s[13] != '-' || s[18] != '-' || s[23] != '-' { return uuid, errors.New("invalid UUID format") } @@ -70,15 +93,29 @@ func Parse(s string) (UUID, error) { // ParseBytes is like Parse, except it parses a byte slice instead of a string. func ParseBytes(b []byte) (UUID, error) { var uuid UUID - if len(b) != 36 { - if len(b) != 36+9 { - return uuid, fmt.Errorf("invalid UUID length: %d", len(b)) - } + switch len(b) { + case 36: // xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx + case 36 + 9: // urn:uuid:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx if !bytes.Equal(bytes.ToLower(b[:9]), []byte("urn:uuid:")) { return uuid, fmt.Errorf("invalid urn prefix: %q", b[:9]) } b = b[9:] + case 36 + 2: // {xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx} + b = b[1:] + case 32: // xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx + var ok bool + for i := 0; i < 32; i += 2 { + uuid[i/2], ok = xtob(b[i], b[i+1]) + if !ok { + return uuid, errors.New("invalid UUID format") + } + } + return uuid, nil + default: + return uuid, fmt.Errorf("invalid UUID length: %d", len(b)) } + // s is now at least 36 bytes long + // it must be of the form xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx if b[8] != '-' || b[13] != '-' || b[18] != '-' || b[23] != '-' { return uuid, errors.New("invalid UUID format") } @@ -97,6 +134,16 @@ func ParseBytes(b []byte) (UUID, error) { return uuid, nil } +// MustParse is like Parse but panics if the string cannot be parsed. +// It simplifies safe initialization of global variables holding compiled UUIDs. +func MustParse(s string) UUID { + uuid, err := Parse(s) + if err != nil { + panic(`uuid: Parse(` + s + `): ` + err.Error()) + } + return uuid +} + // FromBytes creates a new UUID from a byte slice. Returns an error if the slice // does not have a length of 16. The bytes are copied from the slice. func FromBytes(b []byte) (uuid UUID, err error) { @@ -130,7 +177,7 @@ func (uuid UUID) URN() string { } func encodeHex(dst []byte, uuid UUID) { - hex.Encode(dst[:], uuid[:4]) + hex.Encode(dst, uuid[:4]) dst[8] = '-' hex.Encode(dst[9:13], uuid[4:6]) dst[13] = '-' diff --git a/vendor/github.com/gophercloud/gophercloud/.zuul.yaml b/vendor/github.com/gophercloud/gophercloud/.zuul.yaml index 92fd9d482..8c31ea160 100644 --- a/vendor/github.com/gophercloud/gophercloud/.zuul.yaml +++ b/vendor/github.com/gophercloud/gophercloud/.zuul.yaml @@ -22,6 +22,15 @@ global_env: OS_BRANCH: stable/queens +- job: + name: gophercloud-acceptance-test-rocky + parent: gophercloud-acceptance-test + description: | + Run gophercloud acceptance test on rocky branch + vars: + global_env: + OS_BRANCH: stable/rocky + - job: name: gophercloud-acceptance-test-pike parent: gophercloud-acceptance-test @@ -80,6 +89,9 @@ recheck-queens: jobs: - gophercloud-acceptance-test-queens + recheck-rocky: + jobs: + - gophercloud-acceptance-test-rocky periodic: jobs: - gophercloud-unittest diff --git a/vendor/github.com/gophercloud/gophercloud/params.go b/vendor/github.com/gophercloud/gophercloud/params.go index 19b8cf7bf..b9986660c 100644 --- a/vendor/github.com/gophercloud/gophercloud/params.go +++ b/vendor/github.com/gophercloud/gophercloud/params.go @@ -120,6 +120,22 @@ func BuildRequestBody(opts interface{}, parent string) (map[string]interface{}, continue } + if v.Kind() == reflect.Slice || (v.Kind() == reflect.Ptr && v.Elem().Kind() == reflect.Slice) { + sliceValue := v + if sliceValue.Kind() == reflect.Ptr { + sliceValue = sliceValue.Elem() + } + + for i := 0; i < sliceValue.Len(); i++ { + element := sliceValue.Index(i) + if element.Kind() == reflect.Struct || (element.Kind() == reflect.Ptr && element.Elem().Kind() == reflect.Struct) { + _, err := BuildRequestBody(element.Interface(), "") + if err != nil { + return nil, err + } + } + } + } if v.Kind() == reflect.Struct || (v.Kind() == reflect.Ptr && v.Elem().Kind() == reflect.Struct) { if zero { //fmt.Printf("value before change: %+v\n", optsValue.Field(i)) diff --git a/vendor/github.com/gophercloud/gophercloud/results.go b/vendor/github.com/gophercloud/gophercloud/results.go index 30d3b1559..94a16bff0 100644 --- a/vendor/github.com/gophercloud/gophercloud/results.go +++ b/vendor/github.com/gophercloud/gophercloud/results.go @@ -90,38 +90,40 @@ func (r Result) extractIntoPtr(to interface{}, label string) error { if typeOfV.NumField() > 0 && typeOfV.Field(0).Anonymous { newSlice := reflect.MakeSlice(reflect.SliceOf(typeOfV), 0, 0) - for _, v := range m[label].([]interface{}) { - // For each iteration of the slice, we create a new struct. - // This is to work around a bug where elements of a slice - // are reused and not overwritten when the same copy of the - // struct is used: - // - // https://github.com/golang/go/issues/21092 - // https://github.com/golang/go/issues/24155 - // https://play.golang.org/p/NHo3ywlPZli - newType := reflect.New(typeOfV).Elem() + if mSlice, ok := m[label].([]interface{}); ok { + for _, v := range mSlice { + // For each iteration of the slice, we create a new struct. + // This is to work around a bug where elements of a slice + // are reused and not overwritten when the same copy of the + // struct is used: + // + // https://github.com/golang/go/issues/21092 + // https://github.com/golang/go/issues/24155 + // https://play.golang.org/p/NHo3ywlPZli + newType := reflect.New(typeOfV).Elem() - b, err := json.Marshal(v) - if err != nil { - return err - } - - // This is needed for structs with an UnmarshalJSON method. - // Technically this is just unmarshalling the response into - // a struct that is never used, but it's good enough to - // trigger the UnmarshalJSON method. - for i := 0; i < newType.NumField(); i++ { - s := newType.Field(i).Addr().Interface() - - // Unmarshal is used rather than NewDecoder to also work - // around the above-mentioned bug. - err = json.Unmarshal(b, s) + b, err := json.Marshal(v) if err != nil { return err } - } - newSlice = reflect.Append(newSlice, newType) + // This is needed for structs with an UnmarshalJSON method. + // Technically this is just unmarshalling the response into + // a struct that is never used, but it's good enough to + // trigger the UnmarshalJSON method. + for i := 0; i < newType.NumField(); i++ { + s := newType.Field(i).Addr().Interface() + + // Unmarshal is used rather than NewDecoder to also work + // around the above-mentioned bug. + err = json.Unmarshal(b, s) + if err != nil { + return err + } + } + + newSlice = reflect.Append(newSlice, newType) + } } // "to" should now be properly modeled to receive the diff --git a/vendor/github.com/imdario/mergo/.gitignore b/vendor/github.com/imdario/mergo/.gitignore new file mode 100644 index 000000000..529c3412b --- /dev/null +++ b/vendor/github.com/imdario/mergo/.gitignore @@ -0,0 +1,33 @@ +#### joe made this: http://goel.io/joe + +#### go #### +# Binaries for programs and plugins +*.exe +*.dll +*.so +*.dylib + +# Test binary, build with `go test -c` +*.test + +# Output of the go coverage tool, specifically when used with LiteIDE +*.out + +# Project-local glide cache, RE: https://github.com/Masterminds/glide/issues/736 +.glide/ + +#### vim #### +# Swap +[._]*.s[a-v][a-z] +[._]*.sw[a-p] +[._]s[a-v][a-z] +[._]sw[a-p] + +# Session +Session.vim + +# Temporary +.netrwhist +*~ +# Auto-generated tag files +tags diff --git a/vendor/github.com/konsorten/go-windows-terminal-sequences/license b/vendor/github.com/konsorten/go-windows-terminal-sequences/LICENSE similarity index 100% rename from vendor/github.com/konsorten/go-windows-terminal-sequences/license rename to vendor/github.com/konsorten/go-windows-terminal-sequences/LICENSE diff --git a/vendor/github.com/konsorten/go-windows-terminal-sequences/go.mod b/vendor/github.com/konsorten/go-windows-terminal-sequences/go.mod new file mode 100644 index 000000000..716c61312 --- /dev/null +++ b/vendor/github.com/konsorten/go-windows-terminal-sequences/go.mod @@ -0,0 +1 @@ +module github.com/konsorten/go-windows-terminal-sequences diff --git a/vendor/github.com/mitchellh/mapstructure/.travis.yml b/vendor/github.com/mitchellh/mapstructure/.travis.yml index d9deadb86..1689c7d73 100644 --- a/vendor/github.com/mitchellh/mapstructure/.travis.yml +++ b/vendor/github.com/mitchellh/mapstructure/.travis.yml @@ -1,7 +1,7 @@ language: go go: - - 1.9.x + - "1.11.x" - tip script: diff --git a/vendor/github.com/mitchellh/mapstructure/CHANGELOG.md b/vendor/github.com/mitchellh/mapstructure/CHANGELOG.md new file mode 100644 index 000000000..3b3cb723f --- /dev/null +++ b/vendor/github.com/mitchellh/mapstructure/CHANGELOG.md @@ -0,0 +1,21 @@ +## 1.1.2 + +* Fix error when decode hook decodes interface implementation into interface + type. [GH-140] + +## 1.1.1 + +* Fix panic that can happen in `decodePtr` + +## 1.1.0 + +* Added `StringToIPHookFunc` to convert `string` to `net.IP` and `net.IPNet` [GH-133] +* Support struct to struct decoding [GH-137] +* If source map value is nil, then destination map value is nil (instead of empty) +* If source slice value is nil, then destination slice value is nil (instead of empty) +* If source pointer is nil, then destination pointer is set to nil (instead of + allocated zero value of type) + +## 1.0.0 + +* Initial tagged stable release. diff --git a/vendor/github.com/mitchellh/mapstructure/decode_hooks.go b/vendor/github.com/mitchellh/mapstructure/decode_hooks.go index 2a727575a..1f0abc65a 100644 --- a/vendor/github.com/mitchellh/mapstructure/decode_hooks.go +++ b/vendor/github.com/mitchellh/mapstructure/decode_hooks.go @@ -2,6 +2,8 @@ package mapstructure import ( "errors" + "fmt" + "net" "reflect" "strconv" "strings" @@ -115,6 +117,50 @@ func StringToTimeDurationHookFunc() DecodeHookFunc { } } +// StringToIPHookFunc returns a DecodeHookFunc that converts +// strings to net.IP +func StringToIPHookFunc() DecodeHookFunc { + return func( + f reflect.Type, + t reflect.Type, + data interface{}) (interface{}, error) { + if f.Kind() != reflect.String { + return data, nil + } + if t != reflect.TypeOf(net.IP{}) { + return data, nil + } + + // Convert it by parsing + ip := net.ParseIP(data.(string)) + if ip == nil { + return net.IP{}, fmt.Errorf("failed parsing ip %v", data) + } + + return ip, nil + } +} + +// StringToIPNetHookFunc returns a DecodeHookFunc that converts +// strings to net.IPNet +func StringToIPNetHookFunc() DecodeHookFunc { + return func( + f reflect.Type, + t reflect.Type, + data interface{}) (interface{}, error) { + if f.Kind() != reflect.String { + return data, nil + } + if t != reflect.TypeOf(net.IPNet{}) { + return data, nil + } + + // Convert it by parsing + _, net, err := net.ParseCIDR(data.(string)) + return net, err + } +} + // StringToTimeHookFunc returns a DecodeHookFunc that converts // strings to time.Time. func StringToTimeHookFunc(layout string) DecodeHookFunc { diff --git a/vendor/github.com/mitchellh/mapstructure/mapstructure.go b/vendor/github.com/mitchellh/mapstructure/mapstructure.go index d3222b8f4..256ee63fb 100644 --- a/vendor/github.com/mitchellh/mapstructure/mapstructure.go +++ b/vendor/github.com/mitchellh/mapstructure/mapstructure.go @@ -224,6 +224,17 @@ func (d *Decoder) Decode(input interface{}) error { // Decodes an unknown data type into a specific reflection value. func (d *Decoder) decode(name string, input interface{}, outVal reflect.Value) error { + var inputVal reflect.Value + if input != nil { + inputVal = reflect.ValueOf(input) + + // We need to check here if input is a typed nil. Typed nils won't + // match the "input == nil" below so we check that here. + if inputVal.Kind() == reflect.Ptr && inputVal.IsNil() { + input = nil + } + } + if input == nil { // If the data is nil, then we don't set anything, unless ZeroFields is set // to true. @@ -237,7 +248,6 @@ func (d *Decoder) decode(name string, input interface{}, outVal reflect.Value) e return nil } - inputVal := reflect.ValueOf(input) if !inputVal.IsValid() { // If the input value is invalid, then we just set the value // to be the zero value. @@ -260,8 +270,8 @@ func (d *Decoder) decode(name string, input interface{}, outVal reflect.Value) e } var err error - inputKind := getKind(outVal) - switch inputKind { + outputKind := getKind(outVal) + switch outputKind { case reflect.Bool: err = d.decodeBool(name, input, outVal) case reflect.Interface: @@ -288,7 +298,7 @@ func (d *Decoder) decode(name string, input interface{}, outVal reflect.Value) e err = d.decodeFunc(name, input, outVal) default: // If we reached this point then we weren't able to decode it - return fmt.Errorf("%s: unsupported type: %s", name, inputKind) + return fmt.Errorf("%s: unsupported type: %s", name, outputKind) } // If we reached here, then we successfully decoded SOMETHING, so @@ -306,7 +316,16 @@ func (d *Decoder) decodeBasic(name string, data interface{}, val reflect.Value) if val.IsValid() && val.Elem().IsValid() { return d.decode(name, data, val.Elem()) } + dataVal := reflect.ValueOf(data) + + // If the input data is a pointer, and the assigned type is the dereference + // of that exact pointer, then indirect it so that we can assign it. + // Example: *string to string + if dataVal.Kind() == reflect.Ptr && dataVal.Type().Elem() == val.Type() { + dataVal = reflect.Indirect(dataVal) + } + if !dataVal.IsValid() { dataVal = reflect.Zero(val.Type()) } @@ -323,7 +342,7 @@ func (d *Decoder) decodeBasic(name string, data interface{}, val reflect.Value) } func (d *Decoder) decodeString(name string, data interface{}, val reflect.Value) error { - dataVal := reflect.ValueOf(data) + dataVal := reflect.Indirect(reflect.ValueOf(data)) dataKind := getKind(dataVal) converted := true @@ -375,7 +394,7 @@ func (d *Decoder) decodeString(name string, data interface{}, val reflect.Value) } func (d *Decoder) decodeInt(name string, data interface{}, val reflect.Value) error { - dataVal := reflect.ValueOf(data) + dataVal := reflect.Indirect(reflect.ValueOf(data)) dataKind := getKind(dataVal) dataType := dataVal.Type() @@ -417,7 +436,7 @@ func (d *Decoder) decodeInt(name string, data interface{}, val reflect.Value) er } func (d *Decoder) decodeUint(name string, data interface{}, val reflect.Value) error { - dataVal := reflect.ValueOf(data) + dataVal := reflect.Indirect(reflect.ValueOf(data)) dataKind := getKind(dataVal) switch { @@ -460,7 +479,7 @@ func (d *Decoder) decodeUint(name string, data interface{}, val reflect.Value) e } func (d *Decoder) decodeBool(name string, data interface{}, val reflect.Value) error { - dataVal := reflect.ValueOf(data) + dataVal := reflect.Indirect(reflect.ValueOf(data)) dataKind := getKind(dataVal) switch { @@ -491,7 +510,7 @@ func (d *Decoder) decodeBool(name string, data interface{}, val reflect.Value) e } func (d *Decoder) decodeFloat(name string, data interface{}, val reflect.Value) error { - dataVal := reflect.ValueOf(data) + dataVal := reflect.Indirect(reflect.ValueOf(data)) dataKind := getKind(dataVal) dataType := dataVal.Type() @@ -595,6 +614,20 @@ func (d *Decoder) decodeMapFromMap(name string, dataVal reflect.Value, val refle // Accumulate errors errors := make([]string, 0) + // If the input data is empty, then we just match what the input data is. + if dataVal.Len() == 0 { + if dataVal.IsNil() { + if !val.IsNil() { + val.Set(dataVal) + } + } else { + // Set to empty allocated value + val.Set(valMap) + } + + return nil + } + for _, k := range dataVal.MapKeys() { fieldName := fmt.Sprintf("%s[%s]", name, k) @@ -706,11 +739,33 @@ func (d *Decoder) decodeMapFromStruct(name string, dataVal reflect.Value, val re } func (d *Decoder) decodePtr(name string, data interface{}, val reflect.Value) error { + // If the input data is nil, then we want to just set the output + // pointer to be nil as well. + isNil := data == nil + if !isNil { + switch v := reflect.Indirect(reflect.ValueOf(data)); v.Kind() { + case reflect.Chan, + reflect.Func, + reflect.Interface, + reflect.Map, + reflect.Ptr, + reflect.Slice: + isNil = v.IsNil() + } + } + if isNil { + if !val.IsNil() && val.CanSet() { + nilValue := reflect.New(val.Type()).Elem() + val.Set(nilValue) + } + + return nil + } + // Create an element of the concrete (non pointer) type and decode // into that. Then set the value of the pointer to this type. valType := val.Type() valElemType := valType.Elem() - if val.CanSet() { realVal := val if realVal.IsNil() || d.config.ZeroFields { @@ -752,33 +807,44 @@ func (d *Decoder) decodeSlice(name string, data interface{}, val reflect.Value) valSlice := val if valSlice.IsNil() || d.config.ZeroFields { + if d.config.WeaklyTypedInput { + switch { + // Slice and array we use the normal logic + case dataValKind == reflect.Slice, dataValKind == reflect.Array: + break + + // Empty maps turn into empty slices + case dataValKind == reflect.Map: + if dataVal.Len() == 0 { + val.Set(reflect.MakeSlice(sliceType, 0, 0)) + return nil + } + // Create slice of maps of other sizes + return d.decodeSlice(name, []interface{}{data}, val) + + case dataValKind == reflect.String && valElemType.Kind() == reflect.Uint8: + return d.decodeSlice(name, []byte(dataVal.String()), val) + + // All other types we try to convert to the slice type + // and "lift" it into it. i.e. a string becomes a string slice. + default: + // Just re-try this function with data as a slice. + return d.decodeSlice(name, []interface{}{data}, val) + } + } + // Check input type if dataValKind != reflect.Array && dataValKind != reflect.Slice { - if d.config.WeaklyTypedInput { - switch { - // Empty maps turn into empty slices - case dataValKind == reflect.Map: - if dataVal.Len() == 0 { - val.Set(reflect.MakeSlice(sliceType, 0, 0)) - return nil - } - // Create slice of maps of other sizes - return d.decodeSlice(name, []interface{}{data}, val) - - case dataValKind == reflect.String && valElemType.Kind() == reflect.Uint8: - return d.decodeSlice(name, []byte(dataVal.String()), val) - // All other types we try to convert to the slice type - // and "lift" it into it. i.e. a string becomes a string slice. - default: - // Just re-try this function with data as a slice. - return d.decodeSlice(name, []interface{}{data}, val) - } - } return fmt.Errorf( "'%s': source data must be an array or slice, got %s", name, dataValKind) } + // If the input value is empty, then don't allocate since non-nil != nil + if dataVal.Len() == 0 { + return nil + } + // Make a new slice to hold our result, same size as the original data. valSlice = reflect.MakeSlice(sliceType, dataVal.Len(), dataVal.Len()) } @@ -888,10 +954,29 @@ func (d *Decoder) decodeStruct(name string, data interface{}, val reflect.Value) } dataValKind := dataVal.Kind() - if dataValKind != reflect.Map { - return fmt.Errorf("'%s' expected a map, got '%s'", name, dataValKind) - } + switch dataValKind { + case reflect.Map: + return d.decodeStructFromMap(name, dataVal, val) + case reflect.Struct: + // Not the most efficient way to do this but we can optimize later if + // we want to. To convert from struct to struct we go to map first + // as an intermediary. + m := make(map[string]interface{}) + mval := reflect.Indirect(reflect.ValueOf(&m)) + if err := d.decodeMapFromStruct(name, dataVal, mval, mval); err != nil { + return err + } + + result := d.decodeStructFromMap(name, mval, val) + return result + + default: + return fmt.Errorf("'%s' expected a map, got '%s'", name, dataVal.Kind()) + } +} + +func (d *Decoder) decodeStructFromMap(name string, dataVal, val reflect.Value) error { dataValType := dataVal.Type() if kind := dataValType.Key().Kind(); kind != reflect.String && kind != reflect.Interface { return fmt.Errorf( diff --git a/vendor/github.com/ncabatoff/go-seq/.travis.yml b/vendor/github.com/ncabatoff/go-seq/.travis.yml new file mode 100644 index 000000000..7e25e8454 --- /dev/null +++ b/vendor/github.com/ncabatoff/go-seq/.travis.yml @@ -0,0 +1,19 @@ +sudo: false +language: go +before_install: + - go get github.com/mattn/goveralls +matrix: + include: + - go: 1.6.x + script: go test -v -race ./... + - go: 1.x + script: + - diff -u <(echo -n) <(gofmt -d .) + - go install -v ./... + - go vet -v ./... + - $GOPATH/bin/goveralls -v -race -service=travis-ci + - go: master + script: go test -v -race ./... + allow_failures: + - go: master + fast_finish: true diff --git a/vendor/github.com/ncabatoff/go-seq/LICENSE b/vendor/github.com/ncabatoff/go-seq/LICENSE new file mode 100644 index 000000000..6d87186a4 --- /dev/null +++ b/vendor/github.com/ncabatoff/go-seq/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2018 ncabatoff + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/ncabatoff/go-seq/README.md b/vendor/github.com/ncabatoff/go-seq/README.md new file mode 100644 index 000000000..47fef9596 --- /dev/null +++ b/vendor/github.com/ncabatoff/go-seq/README.md @@ -0,0 +1,44 @@ +# Package for ordering of Go values + +[![GoDoc](https://godoc.org/github.com/ncabatoff/go-seq/seq?status.svg)][godoc] +[![Build Status](https://travis-ci.org/ncabatoff/go-seq.svg?branch=master)][travis] +[![Coverage Status](https://coveralls.io/repos/github/ncabatoff/go-seq/badge.svg?branch=master)](https://coveralls.io/github/ncabatoff/go-seq?branch=master) + +This package is intended to allow ordering (most) values via reflection, +much like go-cmp allows comparing values for equality. + +This is helpful when trying to write `Less()` methods for sorting structures. +Provided all the types in question are supported, you can simply define it +as follows: + +```go +import "github.com/ncabatoff/go-seq/seq" + +type ( + MyType struct { + a int + b string + } + MyTypeSlice []MyType +) +func (m MyTypeSlice) Less(i, j int) bool { return seq.Compare(m[i], m[j]) < 0 } +``` + +Noteable unsupported types include the builtin `complex` type, channels, +functions, and maps with non-string keys. Pointers can be ordered if their +underlying types are orderable. + +[godoc]: https://godoc.org/github.com/ncabatoff/go-seq/seq +[travis]: https://travis-ci.org/ncabatoff/go-seq + +## Install + +``` +go get -u github.com/ncabatoff/go-seq/seq +``` + +## License + +MIT - See [LICENSE][license] file + +[license]: https://github.com/ncabatoff/go-seq/blob/master/LICENSE diff --git a/vendor/github.com/ncabatoff/go-seq/seq/compare.go b/vendor/github.com/ncabatoff/go-seq/seq/compare.go new file mode 100644 index 000000000..81b49ccbf --- /dev/null +++ b/vendor/github.com/ncabatoff/go-seq/seq/compare.go @@ -0,0 +1,176 @@ +package seq + +import ( + "fmt" + "reflect" + "sort" +) + +// Compare returns 0 if a and b are equal, -1 if a < b, or 1 if a > b. +// Panics if a and b are not of the same type, or are of a type not listed here. +// * Bools are compared assuming false < true. +// * Strings, integer and float values are compared as Go compares them. +// * Two nil pointers are equal; one nil pointer is treated as smaller than a non-nil pointer. +// * Non-nil pointers are compared by comparing the values they point to. +// * Structures are compared by comparing their fields in order. +// * Slices are compared by comparing elements sequentially. If the slices are of different +// length and all elements are the same up to the shorter length, the shorter slice is treated as +// smaller. +// * Maps can only be compared if they have string keys, in which case the ordered list of +// keys are first compared as string slices, and if they're equal then the values are compared +// sequentially in key order. +func Compare(a, b interface{}) int { + return compareValue(reflect.ValueOf(a), reflect.ValueOf(b)) +} + +func boolToInt(b bool) int { + if b { + return 1 + } + return 0 +} + +func compareValue(ir1, ir2 reflect.Value) int { + var zerovalue reflect.Value + r1, r2 := reflect.Indirect(ir1), reflect.Indirect(ir2) + + if r1 == zerovalue { + if r2 == zerovalue { + return 0 + } + return -1 + } + if r2 == zerovalue { + return 1 + } + + switch r1.Kind() { + case reflect.Bool: + v1, v2 := boolToInt(r1.Bool()), boolToInt(r2.Bool()) + if v1 < v2 { + return -1 + } + if v1 > v2 { + return 1 + } + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + v1, v2 := r1.Int(), r2.Int() + if v1 < v2 { + return -1 + } + if v1 > v2 { + return 1 + } + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + v1, v2 := r1.Uint(), r2.Uint() + if v1 < v2 { + return -1 + } + if v1 > v2 { + return 1 + } + case reflect.Float32, reflect.Float64: + v1, v2 := r1.Float(), r2.Float() + if v1 < v2 { + return -1 + } + if v1 > v2 { + return 1 + } + case reflect.Map: + return compareMap(r1, r2) + case reflect.Struct: + return compareStruct(r1, r2) + case reflect.Slice: + if r1.Type().Elem().Kind() == reflect.Uint8 { + // Not using bytes.Compare because that fails on unexported fields: + // return bytes.Compare(r1.Interface().([]byte), r2.Interface().([]byte)) + var s string + strtype := reflect.TypeOf(s) + v1, v2 := r1.Convert(strtype).String(), r2.Convert(strtype).String() + if v1 < v2 { + return -1 + } + if v1 > v2 { + return 1 + } + } + return compareSlice(r1, r2) + case reflect.String: + v1, v2 := r1.String(), r2.String() + if v1 < v2 { + return -1 + } + if v1 > v2 { + return 1 + } + default: + panic(fmt.Sprintf("don't know how to compare values of type %v", r1.Type())) + } + return 0 +} + +func compareStruct(r1, r2 reflect.Value) int { + if r1.Type() != r2.Type() { + panic(fmt.Sprintf("s1 and s2 are not of the same type: %v, %v", r1.Type(), r2.Type())) + } + + n := r1.NumField() + for i := 0; i < n; i++ { + c := compareValue(r1.Field(i), r2.Field(i)) + if c != 0 { + return c + } + } + return 0 +} + +func compareSlice(r1, r2 reflect.Value) int { + maxlen := r1.Len() + if r2.Len() < maxlen { + maxlen = r2.Len() + } + for i := 0; i < maxlen; i++ { + c := compareValue(r1.Index(i), r2.Index(i)) + if c != 0 { + return c + } + } + if r1.Len() > maxlen { + return 1 + } + if r2.Len() > maxlen { + return -1 + } + return 0 +} + +func sortedKeys(r1 reflect.Value) []string { + keys := make([]string, 0, r1.Len()) + for _, k := range r1.MapKeys() { + keys = append(keys, k.String()) + } + sort.Strings(keys) + return keys +} + +func compareMap(r1, r2 reflect.Value) int { + if r1.Type().Key().Kind() != reflect.String { + panic("can only compare maps with keys of type string") + } + + s1, s2 := sortedKeys(r1), sortedKeys(r2) + c := compareSlice(reflect.ValueOf(s1), reflect.ValueOf(s2)) + if c != 0 { + return c + } + + for _, k := range s1 { + vk := reflect.ValueOf(k) + c := compareValue(r1.MapIndex(vk), r2.MapIndex(vk)) + if c != 0 { + return c + } + } + return 0 +} diff --git a/vendor/github.com/ncabatoff/process-exporter/.gitignore b/vendor/github.com/ncabatoff/process-exporter/.gitignore index 77ed2b599..ce877f7b1 100644 --- a/vendor/github.com/ncabatoff/process-exporter/.gitignore +++ b/vendor/github.com/ncabatoff/process-exporter/.gitignore @@ -1,4 +1,5 @@ .*.sw? process-exporter -.tarballs -process-exporter-*.tar.gz +load-generator +integration-tester +dist diff --git a/vendor/github.com/ncabatoff/process-exporter/.goreleaser.yml b/vendor/github.com/ncabatoff/process-exporter/.goreleaser.yml new file mode 100644 index 000000000..d543e2059 --- /dev/null +++ b/vendor/github.com/ncabatoff/process-exporter/.goreleaser.yml @@ -0,0 +1,39 @@ +builds: + - main: cmd/process-exporter/main.go + binary: process-exporter + flags: -tags netgo + goos: + - linux + goarch: + - amd64 + - 386 + - arm + - arm64 + - ppc64 + - ppc64le +archive: + name_template: "process-exporter-{{ .Version }}.{{ .Os }}-{{ .Arch }}{{ if .Arm }}v{{ .Arm }}{{ end }}" + wrap_in_directory: true +nfpm: + homepage: https://github.com/ncabatoff/process-exporter + maintainer: nick.cabatoff+procexp@gmail.com + description: Prometheus exporter to report on processes running + license: MIT + formats: + - deb + - rpm + bindir: /usr/bin + files: + "packaging/process-exporter.service": "/lib/systemd/system/process-exporter.service" + config_files: + "packaging/conf/all.yaml": "/etc/process-exporter/all.yaml" + scripts: + postinstall: "packaging/scripts/postinstall.sh" + postremove: "packaging/scripts/postremove.sh" + preremove: "packaging/scripts/preremove.sh" +release: + github: + owner: ncabatoff + name: process-exporter + draft: false + prerelease: true diff --git a/vendor/github.com/ncabatoff/process-exporter/.promu.yml b/vendor/github.com/ncabatoff/process-exporter/.promu.yml deleted file mode 100644 index ae8ab4ebc..000000000 --- a/vendor/github.com/ncabatoff/process-exporter/.promu.yml +++ /dev/null @@ -1,35 +0,0 @@ -repository: - path: github.com/ncabatoff/process-exporter -build: - binaries: - - name: process-exporter - path: ./cmd/process-exporter - flags: -a -tags netgo -tarball: - files: - - LICENSE -crossbuild: - platforms: - - linux/amd64 - - linux/386 - - darwin/amd64 - - darwin/386 - - freebsd/amd64 - - freebsd/386 - - openbsd/amd64 - - openbsd/386 - - netbsd/amd64 - - netbsd/386 - - dragonfly/amd64 - - linux/arm - - linux/arm64 - - freebsd/arm - # Temporarily deactivated as golang.org/x/sys does not have syscalls - # implemented for that os/platform combination. - #- openbsd/arm - #- linux/mips64 - #- linux/mips64le - - netbsd/arm - - linux/ppc64 - - linux/ppc64le - diff --git a/vendor/github.com/ncabatoff/process-exporter/.travis.yml b/vendor/github.com/ncabatoff/process-exporter/.travis.yml new file mode 100644 index 000000000..d3a80be80 --- /dev/null +++ b/vendor/github.com/ncabatoff/process-exporter/.travis.yml @@ -0,0 +1,29 @@ +services: + - docker + +language: go + +env: + - IMAGE_TAG=`echo $TRAVIS_TAG|sed s/v//` + +go: + - 1.10.x + +before_install: + - sudo apt-get -qq update + - sudo apt-get install -y rpm + +go_import_path: github.com/ncabatoff/process-exporter + +script: + - make style vet test build smoke docker + - if [ -n "$IMAGE_TAG" ]; then make docker DOCKER_IMAGE_TAG=$IMAGE_TAG; fi + +after_success: + - docker login -u $DOCKER_USER -p "$DOCKER_PASSWORD" + - > + test -n "$TRAVIS_TAG" && + docker tag ncabatoff/process-exporter:$IMAGE_TAG ncabatoff/process-exporter:latest && + docker push ncabatoff/process-exporter:$IMAGE_TAG && + docker push ncabatoff/process-exporter:latest && + curl -sL http://git.io/goreleaser | bash diff --git a/vendor/github.com/ncabatoff/process-exporter/Dockerfile b/vendor/github.com/ncabatoff/process-exporter/Dockerfile index 77abaff65..926d34a84 100644 --- a/vendor/github.com/ncabatoff/process-exporter/Dockerfile +++ b/vendor/github.com/ncabatoff/process-exporter/Dockerfile @@ -1,17 +1,21 @@ # Start from a Debian image with the latest version of Go installed # and a workspace (GOPATH) configured at /go. -FROM golang - -# Copy the local package files to the container's workspace. -ADD . /go/src/github.com/ncabatoff/process-exporter +FROM golang:1.10 AS build +#RUN curl -L -s https://github.com/golang/dep/releases/download/v0.5.0/dep-linux-amd64 -o $GOPATH/bin/dep +#RUN chmod +x $GOPATH/bin/dep +WORKDIR /go/src/github.com/ncabatoff/process-exporter +ADD . . +#RUN dep ensure # Build the process-exporter command inside the container. -RUN make -C /go/src/github.com/ncabatoff/process-exporter +RUN make -USER root +FROM scratch + +COPY --from=build /go/src/github.com/ncabatoff/process-exporter/process-exporter /bin/process-exporter # Run the process-exporter command by default when the container starts. -ENTRYPOINT ["/go/src/github.com/ncabatoff/process-exporter/process-exporter"] +ENTRYPOINT ["/bin/process-exporter"] # Document that the service listens on port 9256. EXPOSE 9256 diff --git a/vendor/github.com/ncabatoff/process-exporter/Dockerfile.cloudbuild b/vendor/github.com/ncabatoff/process-exporter/Dockerfile.cloudbuild new file mode 100644 index 000000000..cb42b831b --- /dev/null +++ b/vendor/github.com/ncabatoff/process-exporter/Dockerfile.cloudbuild @@ -0,0 +1,4 @@ +FROM scratch +COPY gopath/bin/process-exporter /process-exporter +ENTRYPOINT ["/process-exporter"] +EXPOSE 9256 diff --git a/vendor/github.com/ncabatoff/process-exporter/Gopkg.lock b/vendor/github.com/ncabatoff/process-exporter/Gopkg.lock index 757df73e1..1b0c977be 100644 --- a/vendor/github.com/ncabatoff/process-exporter/Gopkg.lock +++ b/vendor/github.com/ncabatoff/process-exporter/Gopkg.lock @@ -3,73 +3,156 @@ [[projects]] branch = "master" + digest = "1:d6afaeed1502aa28e80a4ed0981d570ad91b2579193404256ce672ed0a609e0d" name = "github.com/beorn7/perks" packages = ["quantile"] - revision = "4c0e84591b9aa9e6dcfdf3e020114cd81f89d5f9" + pruneopts = "UT" + revision = "3a771d992973f24aa725d07868b467d1ddfceafb" [[projects]] - branch = "master" + digest = "1:15042ad3498153684d09f393bbaec6b216c8eec6d61f63dff711de7d64ed8861" name = "github.com/golang/protobuf" packages = ["proto"] - revision = "17ce1425424ab154092bbb43af630bd647f3bb0d" + pruneopts = "UT" + revision = "b4deda0973fb4c70b50d226b1af49f3da59f5265" + version = "v1.1.0" [[projects]] - branch = "master" - name = "github.com/kylelemons/godebug" - packages = ["diff","pretty"] - revision = "d65d576e9348f5982d7f6d83682b694e731a45c6" + digest = "1:d2754cafcab0d22c13541618a8029a70a8959eb3525ff201fe971637e2274cd0" + name = "github.com/google/go-cmp" + packages = [ + "cmp", + "cmp/cmpopts", + "cmp/internal/diff", + "cmp/internal/function", + "cmp/internal/value", + ] + pruneopts = "UT" + revision = "3af367b6b30c263d47e8895973edcca9a49cf029" + version = "v0.2.0" [[projects]] + digest = "1:ca955a9cd5b50b0f43d2cc3aeb35c951473eeca41b34eb67507f1dbcc0542394" + name = "github.com/kr/pretty" + packages = ["."] + pruneopts = "UT" + revision = "73f6ac0b30a98e433b289500d779f50c1a6f0712" + version = "v0.1.0" + +[[projects]] + digest = "1:15b5cc79aad436d47019f814fde81a10221c740dc8ddf769221a65097fb6c2e9" + name = "github.com/kr/text" + packages = ["."] + pruneopts = "UT" + revision = "e2ffdb16a802fe2bb95e2e35ff34f0e53aeef34f" + version = "v0.1.0" + +[[projects]] + digest = "1:ff5ebae34cfbf047d505ee150de27e60570e8c394b3b8fdbb720ff6ac71985fc" name = "github.com/matttproud/golang_protobuf_extensions" packages = ["pbutil"] - revision = "3247c84500bff8d9fb6d579d800f20b3e091582c" - version = "v1.0.0" + pruneopts = "UT" + revision = "c12348ce28de40eed0136aa2b644d0ee0650e56c" + version = "v1.0.1" [[projects]] branch = "master" + digest = "1:71520363c3acc43c35a2a53f79f6c61f110a026326c8b16dbdd351164765feac" name = "github.com/ncabatoff/fakescraper" packages = ["."] + pruneopts = "UT" revision = "15938421d91a82d197de7fc59aebcac65c43407d" [[projects]] + branch = "master" + digest = "1:9e33629d4ec9e9344715a54fa0a107f23ce800deb13999b0190df04c3540ccb5" + name = "github.com/ncabatoff/go-seq" + packages = ["seq"] + pruneopts = "UT" + revision = "b08ef85ed83364cba413c98a94bbd4169a0ce70b" + +[[projects]] + branch = "add-proc-status" + digest = "1:df5079557e0fa0fe9fb973f84fffd52e32ef26ada655900fdeea9b0848766c74" + name = "github.com/ncabatoff/procfs" + packages = [ + ".", + "internal/util", + "nfs", + "xfs", + ] + pruneopts = "UT" + revision = "e1a38cb53622f65e073c5e750e6498a44ebfbd2a" + +[[projects]] + digest = "1:b6221ec0f8903b556e127c449e7106b63e6867170c2d10a7c058623d086f2081" name = "github.com/prometheus/client_golang" packages = ["prometheus"] + pruneopts = "UT" revision = "c5b7fccd204277076155f10851dad72b76a49317" version = "v0.8.0" [[projects]] branch = "master" + digest = "1:2d5cd61daa5565187e1d96bae64dbbc6080dacf741448e9629c64fd93203b0d4" name = "github.com/prometheus/client_model" packages = ["go"] - revision = "6f3806018612930941127f2a7c6c453ba2c527d2" + pruneopts = "UT" + revision = "5c3871d89910bfb32f5fcab2aa4b9ec68e65a99f" [[projects]] branch = "master" + digest = "1:63b68062b8968092eb86bedc4e68894bd096ea6b24920faca8b9dcf451f54bb5" name = "github.com/prometheus/common" - packages = ["expfmt","internal/bitbucket.org/ww/goautoneg","model"] - revision = "2f17f4a9d485bf34b4bfaccc273805040e4f86c8" + packages = [ + "expfmt", + "internal/bitbucket.org/ww/goautoneg", + "model", + ] + pruneopts = "UT" + revision = "c7de2306084e37d54b8be01f3541a8464345e9a5" [[projects]] branch = "master" + digest = "1:8c49953a1414305f2ff5465147ee576dd705487c35b15918fcd4efdc0cb7a290" name = "github.com/prometheus/procfs" - packages = [".","xfs"] - revision = "e645f4e5aaa8506fc71d6edbc5c4ff02c04c46f2" + packages = [ + ".", + "internal/util", + "nfs", + "xfs", + ] + pruneopts = "UT" + revision = "05ee40e3a273f7245e8777337fc7b46e533a9a92" [[projects]] branch = "v1" + digest = "1:af715ae33cc1f5695c4b2a4e4b21d008add8802a99e15bb467ac7c32edb5000d" name = "gopkg.in/check.v1" packages = ["."] - revision = "20d25e2804050c1cd24a7eea1e7a6447dd0e74ec" + pruneopts = "UT" + revision = "788fd78401277ebd861206a03c884797c6ec5541" [[projects]] - branch = "v2" + digest = "1:342378ac4dcb378a5448dd723f0784ae519383532f5e70ade24132c4c8693202" name = "gopkg.in/yaml.v2" packages = ["."] - revision = "eb3733d160e74a9c7e442f435eb3bea458e1d19f" + pruneopts = "UT" + revision = "5420a8b6744d3b0345ab293f6fcba19c978f1183" + version = "v2.2.1" [solve-meta] analyzer-name = "dep" analyzer-version = 1 - inputs-digest = "abd920f891c3e5fe2ee27ce40acbdde66e0799704d160b01f22530df003adfe1" + input-imports = [ + "github.com/google/go-cmp/cmp", + "github.com/google/go-cmp/cmp/cmpopts", + "github.com/ncabatoff/fakescraper", + "github.com/ncabatoff/go-seq/seq", + "github.com/ncabatoff/procfs", + "github.com/prometheus/client_golang/prometheus", + "gopkg.in/check.v1", + "gopkg.in/yaml.v2", + ] solver-name = "gps-cdcl" solver-version = 1 diff --git a/vendor/github.com/ncabatoff/process-exporter/Gopkg.toml b/vendor/github.com/ncabatoff/process-exporter/Gopkg.toml index a5be94709..e800ee3c2 100644 --- a/vendor/github.com/ncabatoff/process-exporter/Gopkg.toml +++ b/vendor/github.com/ncabatoff/process-exporter/Gopkg.toml @@ -1,4 +1,3 @@ - # Gopkg.toml example # # Refer to https://github.com/golang/dep/blob/master/docs/Gopkg.toml.md @@ -17,30 +16,39 @@ # source = "github.com/myfork/project2" # # [[override]] -# name = "github.com/x/y" -# version = "2.4.0" +# name = "github.com/x/y" +# version = "2.4.0" +# +# [prune] +# non-go = false +# go-tests = true +# unused-packages = true [[constraint]] - branch = "master" - name = "github.com/kylelemons/godebug" + name = "github.com/google/go-cmp" + version = "0.2.0" [[constraint]] branch = "master" name = "github.com/ncabatoff/fakescraper" [[constraint]] - name = "github.com/prometheus/client_golang" - version = "0.8.0" + branch = "add-proc-status" + name = "github.com/ncabatoff/procfs" [[constraint]] - branch = "master" - name = "github.com/prometheus/procfs" + name = "github.com/prometheus/client_golang" + version = "0.8.0" [[constraint]] branch = "v1" name = "gopkg.in/check.v1" [[constraint]] - branch = "v2" name = "gopkg.in/yaml.v2" + version = "2.2.1" + +[prune] + go-tests = true + unused-packages = true diff --git a/vendor/github.com/ncabatoff/process-exporter/Makefile b/vendor/github.com/ncabatoff/process-exporter/Makefile index 7ce4e28e8..0a6e70782 100644 --- a/vendor/github.com/ncabatoff/process-exporter/Makefile +++ b/vendor/github.com/ncabatoff/process-exporter/Makefile @@ -1,32 +1,12 @@ -# Copyright 2015 The Prometheus Authors -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -GO := GO15VENDOREXPERIMENT=1 go -FIRST_GOPATH := $(firstword $(subst :, ,$(shell $(GO) env GOPATH))) -PROMU := $(FIRST_GOPATH)/bin/promu -pkgs = $(shell $(GO) list ./... | grep -v /vendor/) +pkgs = $(shell go list ./... | grep -v /vendor/) PREFIX ?= $(shell pwd) BIN_DIR ?= $(shell pwd) -DOCKER_IMAGE_NAME ?= process-exporter +DOCKER_IMAGE_NAME ?= ncabatoff/process-exporter DOCKER_IMAGE_TAG ?= $(subst /,-,$(shell git rev-parse --abbrev-ref HEAD)) +SMOKE_TEST = -config.path packaging/conf/all.yaml -once-to-stdout-delay 1s |grep -q 'namedprocess_namegroup_memory_bytes{groupname="process-exporte",memtype="virtual"}' -ifdef DEBUG - bindata_flags = -debug -endif - - -all: format vet build test +all: format vet test build smoke style: @echo ">> checking code style" @@ -34,38 +14,37 @@ style: test: @echo ">> running short tests" - @$(GO) test -short $(pkgs) + go test -short $(pkgs) format: @echo ">> formatting code" - @$(GO) fmt $(pkgs) + go fmt $(pkgs) vet: @echo ">> vetting code" - @$(GO) vet $(pkgs) + go vet $(pkgs) -build: promu - @echo ">> building binaries" - @$(PROMU) build --prefix $(PREFIX) +build: + @echo ">> building code" + cd cmd/process-exporter; CGO_ENABLED=0 go build -o ../../process-exporter -a -tags netgo -tarball: promu - @echo ">> building release tarball" - @$(PROMU) tarball --prefix $(PREFIX) $(BIN_DIR) +smoke: + @echo ">> smoke testing process-exporter" + ./process-exporter $(SMOKE_TEST) -crossbuild: promu - @echo ">> cross-building" - @$(PROMU) crossbuild - @$(PROMU) crossbuild tarballs +integ: + @echo ">> integration testing process-exporter" + go build -o integration-tester cmd/integration-tester/main.go + go build -o load-generator cmd/load-generator/main.go + ./integration-tester -write-size-bytes 65536 + +install: + @echo ">> installing binary" + cd cmd/process-exporter; CGO_ENABLED=0 go install -a -tags netgo docker: @echo ">> building docker image" - @docker build -t "$(DOCKER_IMAGE_NAME):$(DOCKER_IMAGE_TAG)" . + docker build -t "$(DOCKER_IMAGE_NAME):$(DOCKER_IMAGE_TAG)" . + docker run --rm -v `pwd`/packaging:/packaging "$(DOCKER_IMAGE_NAME):$(DOCKER_IMAGE_TAG)" $(SMOKE_TEST) -promu: - @echo ">> fetching promu" - @GOOS=$(shell uname -s | tr A-Z a-z) \ - GOARCH=$(subst x86_64,amd64,$(patsubst i%86,386,$(patsubst arm%,arm,$(shell uname -m)))) \ - $(GO) get -u github.com/prometheus/promu - - -.PHONY: all style format build test vet tarball crossbuild docker promu +.PHONY: all style format test vet build integ docker diff --git a/vendor/github.com/ncabatoff/process-exporter/README.md b/vendor/github.com/ncabatoff/process-exporter/README.md index 01cb32186..c14f17549 100644 --- a/vendor/github.com/ncabatoff/process-exporter/README.md +++ b/vendor/github.com/ncabatoff/process-exporter/README.md @@ -1,64 +1,111 @@ # process-exporter Prometheus exporter that mines /proc to report on selected processes. -The premise for this exporter is that sometimes you have apps that are -impractical to instrument directly, either because you don't control the code -or they're written in a language that isn't easy to instrument with Prometheus. -A fair bit of information can be gleaned from /proc, especially for -long-running programs. +[release]: https://github.com/ncabatoff/process-exporter/releases/latest -For most systems it won't be beneficial to create metrics for every process by -name: there are just too many of them and most don't do enough to merit it. -Various command-line options are provided to control how processes are grouped -and the groups are named. Run "process-exporter -man" to see a help page -giving details. +[![Release](https://img.shields.io/github/release/ncabatoff/process-exporter.svg?style=flat-square")][release] +[![Build Status](https://travis-ci.org/ncabatoff/process-exporter.svg?branch=master)](https://travis-ci.org/ncabatoff/process-exporter) +[![Powered By: GoReleaser](https://img.shields.io/badge/powered%20by-goreleaser-green.svg?branch=master)](https://github.com/goreleaser) -Metrics available currently include CPU usage, bytes written and read, and -number of processes in each group. +Some apps are impractical to instrument directly, either because you +don't control the code or they're written in a language that isn't easy to +instrument with Prometheus. We must instead resort to mining /proc. -Bytes read and written come from /proc/[pid]/io in recent enough kernels. -These correspond to the fields `read_bytes` and `write_bytes` respectively. -These IO stats come with plenty of caveats, see either the Linux kernel -documentation or man 5 proc. +## Installation -CPU usage comes from /proc/[pid]/stat fields utime (user time) and stime (system -time.) It has been translated into fractional seconds of CPU consumed. Since -it is a counter, using rate() will tell you how many fractional cores were running -code from this process during the interval given. +Either grab a package for your OS from the [Releases][release] page, or +install via [docker](https://hub.docker.com/r/ncabatoff/process-exporter/). -An example Grafana dashboard to view the metrics is available at https://grafana.net/dashboards/249 +## Running -## Instrumentation cost +Usage: -process-exporter will consume CPU in proportion to the number of processes in -the system and the rate at which new ones are created. The most expensive -parts - applying regexps and executing templates - are only applied once per -process seen. If you have mostly long-running processes process-exporter -should be lightweight: each time a scrape occurs, parsing of /proc/$pid/stat -and /proc/$pid/cmdline for every process being monitored and adding a few -numbers. +``` + process-exporter [options] -config.path filename.yml +``` -## Config +or via docker: + +``` + docker run -d --rm -p 9256:9256 --privileged -v /proc:/host/proc -v `pwd`:/config ncabatoff/process-exporter --procfs /host/proc -config.path /config/filename.yml + +``` + +Important options (run process-exporter --help for full list): + +-children (default:true) makes it so that any process that otherwise +isn't part of its own group becomes part of the first group found (if any) when +walking the process tree upwards. In other words, resource usage of +subprocesses is added to their parent's usage unless the subprocess identifies +as a different group name. + +-recheck (default:false) means that on each scrape the process names are +re-evaluated. This is disabled by default as an optimization, but since +processes can choose to change their names, this may result in a process +falling into the wrong group if we happen to see it for the first time before +it's assumed its proper name. + +-procnames is intended as a quick alternative to using a config file. Details +in the following section. + +## Configuration and group naming To select and group the processes to monitor, either provide command-line arguments or use a YAML configuration file. -To avoid confusion with the cmdline YAML element, we'll refer to the -null-delimited contents of `/proc//cmdline` as the array `argv[]`. +The recommended option is to use a config file via -config.path, but for +convenience and backwards compatability the -procnames/-namemapping options +exist as an alternative. + +### Using a config file + +The general format of the -config.path YAML file is a top-level +`process_names` section, containing a list of name matchers: + +``` +process_names: + - matcher1 + - matcher2 + ... + - matcherN +``` + +The default config shipped with the deb/rpm packages is: + +``` +process_names: + - name: "{{.Comm}}" + cmdline: + - '.+' +``` + +A process may only belong to one group: even if multiple items would match, the +first one listed in the file wins. + +(Side note: to avoid confusion with the cmdline YAML element, we'll refer to +the command-line arguments of a process `/proc//cmdline` as the array +`argv[]`.) + +#### Using a config file: group name Each item in `process_names` gives a recipe for identifying and naming processes. The optional `name` tag defines a template to use to name matching processes; if not specified, `name` defaults to `{{.ExeBase}}`. Template variables available: +- `{{.Comm}}` contains the basename of the original executable, i.e. 2nd field in `/proc//stat` - `{{.ExeBase}}` contains the basename of the executable - `{{.ExeFull}}` contains the fully qualified path of the executable +- `{{.Username}}` contains the username of the effective user - `{{.Matches}}` map contains all the matches resulting from applying cmdline regexps +#### Using a config file: process selectors + Each item in `process_names` must contain one or more selectors (`comm`, `exe` or `cmdline`); if more than one selector is present, they must all match. Each selector is a list of strings to match against a process's `comm`, `argv[0]`, -or in the case of `cmdline`, a regexp to apply to the command line. +or in the case of `cmdline`, a regexp to apply to the command line. The cmdline +regexp uses the [Go syntax](https://golang.org/pkg/regexp). For `comm` and `exe`, the list of strings is an OR, meaning any process matching any of the strings will be added to the item's group. @@ -67,10 +114,7 @@ For `cmdline`, the list of regexes is an AND, meaning they all must match. Any capturing groups in a regexp must use the `?P` option to assign a name to the capture, which is used to populate `.Matches`. -A process may only belong to one group: even if multiple items would match, the -first one listed in the file wins. - -Other performance tips: give an exe or comm clause in addition to any cmdline +Performance tip: give an exe or comm clause in addition to any cmdline clause, so you avoid executing the regexp when the executable name doesn't match. @@ -95,8 +139,7 @@ process_names: exe: - /usr/local/bin/process-exporter cmdline: - - -config.path\\s+(?P\\S+) - + - -config.path\s+(?P\S+) ``` @@ -118,43 +161,195 @@ process_names: ``` -## Docker +### Using -procnames/-namemapping instead of config.path -A docker image can be created with +Every name in the procnames list becomes a process group. The default name of +a process is the value found in the second field of /proc//stat +("comm"), which is truncated at 15 chars. Usually this is the same as the +name of the executable. + +If -namemapping isn't provided, every process with a comm value present +in -procnames is assigned to a group based on that name, and any other +processes are ignored. + +The -namemapping option is a comma-separated list of alternating +name,regexp values. It allows assigning a name to a process based on a +combination of the process name and command line. For example, using + + -namemapping "python2,([^/]+)\.py,java,-jar\s+([^/]+).jar" + +will make it so that each different python2 and java -jar invocation will be +tracked with distinct metrics. Processes whose remapped name is absent from +the procnames list will be ignored. On a Ubuntu Xenian machine being used as +a workstation, here's a good way of tracking resource usage for a few +different key user apps: + + process-exporter -namemapping "upstart,(--user)" \ + -procnames chromium-browse,bash,gvim,prometheus,process-exporter,upstart:-user + +Since upstart --user is the parent process of the X11 session, this will +make all apps started by the user fall into the group named "upstart:-user", +unless they're one of the others named explicitly with -procnames, like gvim. + +## Group Metrics + +There's no meaningful way to name a process that will only ever name a single process, so process-exporter assumes that every metric will be attached +to a group of processes - not a +[process group](https://en.wikipedia.org/wiki/Process_group) in the technical +sense, just one or more processes that meet a configuration's specification +of what should be monitored and how to name it. + +All these metrics start with `namedprocess_namegroup_` and have at minimum +the label `groupname`. + +### num_procs gauge + +Number of processes in this group. + +### cpu_user_seconds_total counter + +CPU usage based on /proc/[pid]/stat field utime(14) i.e. user time. +A value of 1 indicates that the processes in this group have been scheduled +in user mode for a total of 1 second on a single virtual CPU. + +### cpu_system_seconds_total counter + +CPU usage based on /proc/[pid]/stat field stime(15) i.e. system time. + +### read_bytes_total counter + +Bytes read based on /proc/[pid]/io field read_bytes. The man page +says + +> Attempt to count the number of bytes which this process really did cause to be fetched from the storage layer. This is accurate for block-backed filesystems. + +but I would take it with a grain of salt. + +### write_bytes_total counter + +Bytes written based on /proc/[pid]/io field write_bytes. As with +read_bytes, somewhat dubious. May be useful for isolating which processes +are doing the most I/O, but probably not measuring just how much I/O is happening. + +### major_page_faults_total counter + +Number of major page faults based on /proc/[pid]/stat field majflt(12). + +### minor_page_faults_total counter + +Number of minor page faults based on /proc/[pid]/stat field minflt(10). + +### context_switches_total counter + +Number of context switches based on /proc/[pid]/status fields voluntary_ctxt_switches +and nonvoluntary_ctxt_switches. The extra label `ctxswitchtype` can have two values: +`voluntary` and `nonvoluntary`. + +### memory_bytes gauge + +Number of bytes of memory used. The extra label `memtype` can have two values: + +*resident*: Field rss(24) from /proc/[pid]/stat, whose doc says: + +> This is just the pages which count toward text, data, or stack space. This does not include pages which have not been demand-loaded in, or which are swapped out. + +*virtual*: Field vsize(23) from /proc/[pid]/stat, virtual memory size. + +*swapped*: Field VmSwap from /proc/[pid]/status, translated from KB to bytes. + +### open_filedesc gauge + +Number of file descriptors, based on counting how many entries are in the directory +/proc/[pid]/fd. + +### worst_fd_ratio gauge + +Worst ratio of open filedescs to filedesc limit, amongst all the procs in the +group. The limit is the fd soft limit based on /proc/[pid]/limits. + +Normally Prometheus metrics ought to be as "basic" as possible (i.e. the raw +values rather than a derived ratio), but we use a ratio here because nothing +else makes sense. Suppose there are 10 procs in a given group, each with a +soft limit of 4096, and one of them has 4000 open fds and the others all have +40, their total fdcount is 4360 and total soft limit is 40960, so the ratio +is 1:10, but in fact one of the procs is about to run out of fds. With +worst_fd_ratio we're able to know this: in the above example it would be +0.97, rather than the 0.10 you'd see if you computed sum(open_filedesc) / +sum(limit_filedesc). + +### oldest_start_time_seconds gauge + +Epoch time (seconds since 1970/1/1) at which the oldest process in the group +started. This is derived from field starttime(22) from /proc/[pid]/stat, added +to boot time to make it relative to epoch. + +### num_threads gauge + +Sum of number of threads of all process in the group. Based on field num_threads(20) +from /proc/[pid]/stat. + +### states gauge + +Number of threads in the group in each of various states, based on the field +state(3) from /proc/[pid]/stat. + +The extra label `state` can have these values: `Running`, `Sleeping`, `Waiting`, `Zombie`, `Other`. + +## Group Thread Metrics + +All these metrics start with `namedprocess_namegroup_` and have at minimum +the labels `groupname` and `threadname`. `threadname` is field comm(2) from +/proc/[pid]/stat. Just as groupname breaks the set of processes down into +groups, threadname breaks a given process group down into subgroups. + +### thread_count gauge + +Number of threads in this thread subgroup. + +### thread_cpu_seconds_total counter + +Same as cpu_user_seconds_total and cpu_system_seconds_total, but broken down +per-thread subgroup. Unlike cpu_user_seconds_total/cpu_system_seconds_total, +the label `cpumode` is used to distinguish between `user` and `system` time. + +### thread_io_bytes_total counter + +Same as read_bytes_total and write_bytes_total, but broken down +per-thread subgroup. Unlike read_bytes_total/write_bytes_total, +the label `iomode` is used to distinguish between `read` and `write` bytes. + +### thread_major_page_faults_total counter + +Same as major_page_faults_total, but broken down per-thread subgroup. + +### thread_minor_page_faults_total counter + +Same as minor_page_faults_total, but broken down per-thread subgroup. + +### thread_context_switches_total counter + +Same as context_switches_total, but broken down per-thread subgroup. + +## Instrumentation cost + +process-exporter will consume CPU in proportion to the number of processes in +the system and the rate at which new ones are created. The most expensive +parts - applying regexps and executing templates - are only applied once per +process seen, unless the command-line option -recheck is provided. + +If you have mostly long-running processes process-exporter overhead should be +minimal: each time a scrape occurs, it will parse of /proc/$pid/stat and +/proc/$pid/cmdline for every process being monitored and add a few numbers. + +## Dashboards + +An example Grafana dashboard to view the metrics is available at https://grafana.net/dashboards/249 + +## Building + +Install [dep](https://github.com/golang/dep), then: ``` -make docker +dep ensure +make ``` - -Then run the docker, e.g. - -``` -docker run --privileged --name pexporter -d -v /proc:/host/proc -p 127.0.0.1:9256:9256 process-exporter:master -procfs /host/proc -procnames chromium-browse,bash,prometheus,gvim,upstart:-user -namemapping "upstart,(-user)" -``` - -This will expose metrics on http://localhost:9256/metrics. Leave off the -`127.0.0.1:` to publish on all interfaces. Leave off the --priviliged and -add the --user docker run argument if you only need to monitor processes -belonging to a single user. - -## History - -An earlier version of this exporter had options to enable auto-discovery of -which processes were consuming resources. This functionality has been removed. -These options were based on a percentage of resource usage, e.g. if an -untracked process consumed X% of CPU during a scrape, start tracking processes -with that name. However during any given scrape it's likely that most -processes are idle, so we could add a process that consumes minimal resources -but which happened to be active during the interval preceding the current -scrape. Over time this means that a great many processes wind up being -scraped, which becomes unmanageable to visualize. This could be mitigated by -looking at resource usage over longer intervals, but ultimately I didn't feel -this feature was important enough to invest more time in at this point. It may -re-appear at some point in the future, but no promises. - -Another lost feature: the "other" group was used to count usage by non-tracked -procs. This was useful to get an idea of what wasn't being monitored. But it -comes at a high cost: if you know what processes you care about, you're wasting -a lot of CPU to compute the usage of everything else that you don't care about. -The new approach is to minimize resources expended on non-tracked processes and -to require the user to whitelist the processes to track. diff --git a/vendor/github.com/ncabatoff/process-exporter/VERSION b/vendor/github.com/ncabatoff/process-exporter/VERSION deleted file mode 100644 index 6e8bf73aa..000000000 --- a/vendor/github.com/ncabatoff/process-exporter/VERSION +++ /dev/null @@ -1 +0,0 @@ -0.1.0 diff --git a/vendor/github.com/ncabatoff/process-exporter/cloudbuild.release.yaml b/vendor/github.com/ncabatoff/process-exporter/cloudbuild.release.yaml new file mode 100644 index 000000000..f1b200edb --- /dev/null +++ b/vendor/github.com/ncabatoff/process-exporter/cloudbuild.release.yaml @@ -0,0 +1,49 @@ +steps: +# - name: string +# args: string +# env: string +# dir: string +# id: string +# waitFor: string +# entrypoint: string +# secretEnv: string + + # Setup the workspace + - name: gcr.io/cloud-builders/go + env: ['PROJECT_ROOT=github.com/ncabatoff/process-exporter'] + args: ['env'] + + # Build project + - name: gcr.io/cloud-builders/docker + entrypoint: 'bash' + args: ['-c', 'docker build -t ncabatoff/process-exporter:`echo $TAG_NAME|sed s/^v//` .'] + + # Login to docker hub + - name: gcr.io/cloud-builders/docker + entrypoint: 'bash' + args: ['-c', 'docker login --username=ncabatoff --password=$$DOCKER_PASSWORD'] + secretEnv: ['DOCKER_PASSWORD'] + + # Push to docker hub + - name: gcr.io/cloud-builders/docker + entrypoint: 'bash' + args: ['-c', 'docker push ncabatoff/process-exporter:`echo $TAG_NAME|sed s/^v//`'] + + # Create github release + - name: goreleaser/goreleaser + entrypoint: /bin/sh + dir: gopath/src/github.com + env: ['GOPATH=/workspace/gopath'] + args: ['-c', 'cd ncabatoff/process-exporter && git tag $TAG_NAME && /goreleaser' ] + secretEnv: ['GITHUB_TOKEN'] + +secrets: + - kmsKeyName: projects/process-exporter/locations/global/keyRings/cloudbuild/cryptoKeys/mykey + secretEnv: + DOCKER_PASSWORD: | + CiQAeHUuEinm1h2j9mp8r0NjPw1l1bBwzDG+JHPUPf3GvtmdjXESMAD3wUauaxWrxid/zPunG67x + 5+1CYedV5exh0XwQ32eu4UkniS7HHJNWBudklaG0JA== + GITHUB_TOKEN: | + CiQAeHUuEhEKAvfIHlUZrCgHNScm0mDKI8Z1w/N3OzDk8Ql6kAUSUQD3wUau7qRc+H7OnTUo6b2Z + DKA1eMKHNg729KfHj2ZMqZXinrJloYMbZcZRXP9xv91xCq6QJB5UoFoyYDnXGdvgXC08YUstR6UB + H0bwHhe1GQ== diff --git a/vendor/github.com/ncabatoff/process-exporter/cloudbuild.yaml b/vendor/github.com/ncabatoff/process-exporter/cloudbuild.yaml new file mode 100644 index 000000000..a100a02af --- /dev/null +++ b/vendor/github.com/ncabatoff/process-exporter/cloudbuild.yaml @@ -0,0 +1,32 @@ +steps: +# - name: string +# args: string +# env: string +# dir: string +# id: string +# waitFor: string +# entrypoint: string +# secretEnv: string +# - name: gcr.io/cloud-builders/curl +# args: ['-L', '-s', '-o', 'dep', 'https://github.com/golang/dep/releases/download/v0.5.0/dep-linux-amd64'] +# - name: ubuntu +# args: ['chmod', '+x', 'dep'] + # Setup the workspace + - name: gcr.io/cloud-builders/go + env: ['PROJECT_ROOT=github.com/ncabatoff/process-exporter'] + args: ['env'] + # Run dep in the workspace created in previous step +# - name: gcr.io/cloud-builders/go +# entrypoint: /bin/sh +# dir: gopath/src/github.com +# env: ['GOPATH=/workspace/gopath'] +# args: ['-c', 'cd ncabatoff/process-exporter && /workspace/dep ensure -vendor-only' ] + - name: gcr.io/cloud-builders/go + entrypoint: /bin/sh + dir: gopath/src/github.com + env: ['GOPATH=/workspace/gopath'] + args: ['-c', 'make -C ncabatoff/process-exporter style vet test build integ install' ] + - name: gcr.io/cloud-builders/docker + args: ['build', '--tag=gcr.io/$PROJECT_ID/process-exporter', '.', '-f', 'Dockerfile.cloudbuild'] +images: ['gcr.io/$PROJECT_ID/process-exporter'] + \ No newline at end of file diff --git a/vendor/github.com/ncabatoff/process-exporter/common.go b/vendor/github.com/ncabatoff/process-exporter/common.go index ae5e82a37..c8502f14c 100644 --- a/vendor/github.com/ncabatoff/process-exporter/common.go +++ b/vendor/github.com/ncabatoff/process-exporter/common.go @@ -1,14 +1,18 @@ package common +import "fmt" + type ( - NameAndCmdline struct { - Name string - Cmdline []string + ProcAttributes struct { + Name string + Cmdline []string + Username string } MatchNamer interface { // MatchAndName returns false if the match failed, otherwise // true and the resulting name. - MatchAndName(NameAndCmdline) (bool, string) + MatchAndName(ProcAttributes) (bool, string) + fmt.Stringer } ) diff --git a/vendor/github.com/ncabatoff/process-exporter/proc/grouper.go b/vendor/github.com/ncabatoff/process-exporter/proc/grouper.go index 6ec3067ac..949c0070c 100644 --- a/vendor/github.com/ncabatoff/process-exporter/proc/grouper.go +++ b/vendor/github.com/ncabatoff/process-exporter/proc/grouper.go @@ -1,173 +1,179 @@ package proc import ( - common "github.com/ncabatoff/process-exporter" "time" + + seq "github.com/ncabatoff/go-seq/seq" + common "github.com/ncabatoff/process-exporter" ) type ( + // Grouper is the top-level interface to the process metrics. All tracked + // procs sharing the same group name are aggregated. Grouper struct { - namer common.MatchNamer - trackChildren bool - // track how much was seen last time so we can report the delta - GroupStats map[string]Counts - tracker *Tracker + // groupAccum records the historical accumulation of a group so that + // we can avoid ever decreasing the counts we return. + groupAccum map[string]Counts + tracker *Tracker + threadAccum map[string]map[string]Threads + debug bool } - GroupCountMap map[string]GroupCounts + // GroupByName maps group name to group metrics. + GroupByName map[string]Group - GroupCounts struct { + // Threads collects metrics for threads in a group sharing a thread name. + Threads struct { + Name string + NumThreads int Counts - Procs int - Memresident uint64 - Memvirtual uint64 + } + + // Group describes the metrics of a single group. + Group struct { + Counts + States + Wchans map[string]int + Procs int + Memory OldestStartTime time.Time OpenFDs uint64 WorstFDratio float64 + NumThreads uint64 + Threads []Threads } ) -func NewGrouper(trackChildren bool, namer common.MatchNamer) *Grouper { +// Returns true if x < y. Test designers should ensure they always have +// a unique name/numthreads combination for each group. +func lessThreads(x, y Threads) bool { return seq.Compare(x, y) < 0 } + +// NewGrouper creates a grouper. +func NewGrouper(namer common.MatchNamer, trackChildren, alwaysRecheck, debug bool) *Grouper { g := Grouper{ - trackChildren: trackChildren, - namer: namer, - GroupStats: make(map[string]Counts), - tracker: NewTracker(), + groupAccum: make(map[string]Counts), + threadAccum: make(map[string]map[string]Threads), + tracker: NewTracker(namer, trackChildren, alwaysRecheck, debug), + debug: debug, } return &g } -func (g *Grouper) checkAncestry(idinfo ProcIdInfo, newprocs map[ProcId]ProcIdInfo) string { - ppid := idinfo.ParentPid - pProcId := g.tracker.ProcIds[ppid] - if pProcId.Pid < 1 { - // Reached root of process tree without finding a tracked parent. - g.tracker.Ignore(idinfo.ProcId) - return "" - } - - // Is the parent already known to the tracker? - if ptproc, ok := g.tracker.Tracked[pProcId]; ok { - if ptproc != nil { - // We've found a tracked parent. - g.tracker.Track(ptproc.GroupName, idinfo) - return ptproc.GroupName - } else { - // We've found an untracked parent. - g.tracker.Ignore(idinfo.ProcId) - return "" - } - } - - // Is the parent another new process? - if pinfoid, ok := newprocs[pProcId]; ok { - if name := g.checkAncestry(pinfoid, newprocs); name != "" { - // We've found a tracked parent, which implies this entire lineage should be tracked. - g.tracker.Track(name, idinfo) - return name - } - } - - // Parent is dead, i.e. we never saw it, or there's no tracked proc in our ancestry. - g.tracker.Ignore(idinfo.ProcId) - return "" - -} - -// Update tracks any new procs that should be according to policy, and updates -// the metrics for already tracked procs. Permission errors are returned as a -// count, and will not affect the error return value. -func (g *Grouper) Update(iter ProcIter) (int, error) { - newProcs, permErrs, err := g.tracker.Update(iter) - if err != nil { - return permErrs, err - } - - // Step 1: track any new proc that should be tracked based on its name and cmdline. - untracked := make(map[ProcId]ProcIdInfo) - for _, idinfo := range newProcs { - wanted, gname := g.namer.MatchAndName(common.NameAndCmdline{Name: idinfo.Name, Cmdline: idinfo.Cmdline}) - if !wanted { - untracked[idinfo.ProcId] = idinfo - continue - } - - g.tracker.Track(gname, idinfo) - } - - // Step 2: track any untracked new proc that should be tracked because its parent is tracked. - if !g.trackChildren { - return permErrs, nil - } - - for _, idinfo := range untracked { - if _, ok := g.tracker.Tracked[idinfo.ProcId]; ok { - // Already tracked or ignored - continue - } - - g.checkAncestry(idinfo, untracked) - } - return permErrs, nil -} - -// groups returns the aggregate metrics for all groups tracked. This reflects -// solely what's currently running. -func (g *Grouper) groups() GroupCountMap { - gcounts := make(GroupCountMap) - +func groupadd(grp Group, ts Update) Group { var zeroTime time.Time - for _, tinfo := range g.tracker.Tracked { - if tinfo == nil { - continue - } - cur := gcounts[tinfo.GroupName] - cur.Procs++ - tstats := tinfo.GetStats() - cur.Memresident += tstats.Memory.Resident - cur.Memvirtual += tstats.Memory.Virtual - cur.OpenFDs += tstats.Filedesc.Open - openratio := float64(tstats.Filedesc.Open) / float64(tstats.Filedesc.Limit) - if cur.WorstFDratio < openratio { - cur.WorstFDratio = openratio - } - cur.Counts.Cpu += tstats.latest.Cpu - cur.Counts.ReadBytes += tstats.latest.ReadBytes - cur.Counts.WriteBytes += tstats.latest.WriteBytes - if cur.OldestStartTime == zeroTime || tstats.start.Before(cur.OldestStartTime) { - cur.OldestStartTime = tstats.start - } - gcounts[tinfo.GroupName] = cur + + grp.Procs++ + grp.Memory.ResidentBytes += ts.Memory.ResidentBytes + grp.Memory.VirtualBytes += ts.Memory.VirtualBytes + grp.Memory.VmSwapBytes += ts.Memory.VmSwapBytes + if ts.Filedesc.Open != -1 { + grp.OpenFDs += uint64(ts.Filedesc.Open) + } + openratio := float64(ts.Filedesc.Open) / float64(ts.Filedesc.Limit) + if grp.WorstFDratio < openratio { + grp.WorstFDratio = openratio + } + grp.NumThreads += ts.NumThreads + grp.Counts.Add(ts.Latest) + grp.States.Add(ts.States) + if grp.OldestStartTime == zeroTime || ts.Start.Before(grp.OldestStartTime) { + grp.OldestStartTime = ts.Start } - return gcounts + if grp.Wchans == nil { + grp.Wchans = make(map[string]int) + } + for wchan, count := range ts.Wchans { + grp.Wchans[wchan] += count + } + + return grp } -// Groups returns GroupCounts with Counts that never decrease in value from one -// call to the next. Even if processes exit, their CPU and IO contributions up -// to that point are included in the results. Even if no processes remain -// in a group it will still be included in the results. -func (g *Grouper) Groups() GroupCountMap { - groups := g.groups() +// Update asks the tracker to report on each tracked process by name. +// These are aggregated by groupname, augmented by accumulated counts +// from the past, and returned. Note that while the Tracker reports +// only what counts have changed since last cycle, Grouper.Update +// returns counts that never decrease. Even once the last process +// with name X disappears, name X will still appear in the results +// with the same counts as before; of course, all non-count metrics +// will be zero. +func (g *Grouper) Update(iter Iter) (CollectErrors, GroupByName, error) { + cerrs, tracked, err := g.tracker.Update(iter) + if err != nil { + return cerrs, nil, err + } + return cerrs, g.groups(tracked), nil +} - // First add any accumulated counts to what was just observed, +// Translate the updates into a new GroupByName and update internal history. +func (g *Grouper) groups(tracked []Update) GroupByName { + groups := make(GroupByName) + threadsByGroup := make(map[string][]ThreadUpdate) + + for _, update := range tracked { + groups[update.GroupName] = groupadd(groups[update.GroupName], update) + if update.Threads != nil { + threadsByGroup[update.GroupName] = + append(threadsByGroup[update.GroupName], update.Threads...) + } + } + + // Add any accumulated counts to what was just observed, // and update the accumulators. for gname, group := range groups { - if oldcounts, ok := g.GroupStats[gname]; ok { - group.Counts.Cpu += oldcounts.Cpu - group.Counts.ReadBytes += oldcounts.ReadBytes - group.Counts.WriteBytes += oldcounts.WriteBytes + if oldcounts, ok := g.groupAccum[gname]; ok { + group.Counts.Add(Delta(oldcounts)) } - g.GroupStats[gname] = group.Counts + g.groupAccum[gname] = group.Counts + group.Threads = g.threads(gname, threadsByGroup[gname]) groups[gname] = group } // Now add any groups that were observed in the past but aren't running now. - for gname, gcounts := range g.GroupStats { + for gname, gcounts := range g.groupAccum { if _, ok := groups[gname]; !ok { - groups[gname] = GroupCounts{Counts: gcounts} + groups[gname] = Group{Counts: gcounts} } } return groups } + +func (g *Grouper) threads(gname string, tracked []ThreadUpdate) []Threads { + if len(tracked) == 0 { + delete(g.threadAccum, gname) + return nil + } + + ret := make([]Threads, 0, len(tracked)) + threads := make(map[string]Threads) + + // First aggregate the thread metrics by thread name. + for _, nc := range tracked { + curthr := threads[nc.ThreadName] + curthr.NumThreads++ + curthr.Counts.Add(nc.Latest) + curthr.Name = nc.ThreadName + threads[nc.ThreadName] = curthr + } + + // Add any accumulated counts to what was just observed, + // and update the accumulators. + if history := g.threadAccum[gname]; history != nil { + for tname := range threads { + if oldcounts, ok := history[tname]; ok { + counts := threads[tname] + counts.Add(Delta(oldcounts.Counts)) + threads[tname] = counts + } + } + } + + g.threadAccum[gname] = threads + + for _, thr := range threads { + ret = append(ret, thr) + } + return ret +} diff --git a/vendor/github.com/ncabatoff/process-exporter/proc/read.go b/vendor/github.com/ncabatoff/process-exporter/proc/read.go index 290503641..d5e5b8aac 100644 --- a/vendor/github.com/ncabatoff/process-exporter/proc/read.go +++ b/vendor/github.com/ncabatoff/process-exporter/proc/read.go @@ -2,18 +2,21 @@ package proc import ( "fmt" + "os" + "path/filepath" + "strconv" "time" - "github.com/prometheus/procfs" + "github.com/ncabatoff/procfs" ) -func newProcIdStatic(pid, ppid int, startTime uint64, name string, cmdline []string) ProcIdStatic { - return ProcIdStatic{ProcId{pid, startTime}, ProcStatic{name, cmdline, ppid, time.Time{}}} -} +// ErrProcNotExist indicates a process couldn't be read because it doesn't exist, +// typically because it disappeared while we were reading it. +var ErrProcNotExist = fmt.Errorf("process does not exist") type ( - // ProcId uniquely identifies a process. - ProcId struct { + // ID uniquely identifies a process. + ID struct { // UNIX process id Pid int // The time the process started after system boot, the value is expressed @@ -21,82 +24,138 @@ type ( StartTimeRel uint64 } - // ProcStatic contains data read from /proc/pid/* - ProcStatic struct { - Name string - Cmdline []string - ParentPid int - StartTime time.Time + ThreadID ID + + // Static contains data read from /proc/pid/* + Static struct { + Name string + Cmdline []string + ParentPid int + StartTime time.Time + EffectiveUID int } - // ProcMetrics contains data read from /proc/pid/* - ProcMetrics struct { - CpuTime float64 - ReadBytes uint64 - WriteBytes uint64 + // Counts are metric counters common to threads and processes and groups. + Counts struct { + CPUUserTime float64 + CPUSystemTime float64 + ReadBytes uint64 + WriteBytes uint64 + MajorPageFaults uint64 + MinorPageFaults uint64 + CtxSwitchVoluntary uint64 + CtxSwitchNonvoluntary uint64 + } + + // Memory describes a proc's memory usage. + Memory struct { ResidentBytes uint64 VirtualBytes uint64 - OpenFDs uint64 - MaxFDs uint64 + VmSwapBytes uint64 } - ProcIdStatic struct { - ProcId - ProcStatic + // Filedesc describes a proc's file descriptor usage and soft limit. + Filedesc struct { + // Open is the count of open file descriptors, -1 if unknown. + Open int64 + // Limit is the fd soft limit for the process. + Limit uint64 } - ProcInfo struct { - ProcStatic - ProcMetrics + // States counts how many threads are in each state. + States struct { + Running int + Sleeping int + Waiting int + Zombie int + Other int } - ProcIdInfo struct { - ProcId - ProcStatic - ProcMetrics + // Metrics contains data read from /proc/pid/* + Metrics struct { + Counts + Memory + Filedesc + NumThreads uint64 + States + Wchan string } + // Thread contains per-thread data. + Thread struct { + ThreadID + ThreadName string + Counts + Wchan string + States + } + + // IDInfo groups all info for a single process. + IDInfo struct { + ID + Static + Metrics + Threads []Thread + } + + // ProcIdInfoThreads struct { + // ProcIdInfo + // Threads []ProcThread + // } + // Proc wraps the details of the underlying procfs-reading library. + // Any of these methods may fail if the process has disapeared. + // We try to return as much as possible rather than an error, e.g. + // if some /proc files are unreadable. Proc interface { // GetPid() returns the POSIX PID (process id). They may be reused over time. GetPid() int - // GetProcId() returns (pid,starttime), which can be considered a unique process id. - // It may fail if the caller doesn't have permission to read /proc//stat, or if - // the process has disapeared. - GetProcId() (ProcId, error) + // GetProcID() returns (pid,starttime), which can be considered a unique process id. + GetProcID() (ID, error) // GetStatic() returns various details read from files under /proc//. Technically // name may not be static, but we'll pretend it is. - // It may fail if the caller doesn't have permission to read those files, or if - // the process has disapeared. - GetStatic() (ProcStatic, error) + GetStatic() (Static, error) // GetMetrics() returns various metrics read from files under /proc//. - // It may fail if the caller doesn't have permission to read those files, or if - // the process has disapeared. - GetMetrics() (ProcMetrics, error) + // It returns an error on complete failure. Otherwise, it returns metrics + // and 0 on complete success, 1 if some (like I/O) couldn't be read. + GetMetrics() (Metrics, int, error) + GetStates() (States, error) + GetWchan() (string, error) + GetCounts() (Counts, int, error) + GetThreads() ([]Thread, error) } - // proc is a wrapper for procfs.Proc that caches results of some reads and implements Proc. - proc struct { + // proccache implements the Proc interface by acting as wrapper for procfs.Proc + // that caches results of some reads. + proccache struct { procfs.Proc - procid *ProcId - stat *procfs.ProcStat - cmdline []string - io *procfs.ProcIO - bootTime uint64 + procid *ID + stat *procfs.ProcStat + status *procfs.ProcStatus + cmdline []string + io *procfs.ProcIO + fs *FS + wchan *string } + proc struct { + proccache + } + + // procs is a fancier []Proc that saves on some copying. procs interface { get(int) Proc length() int } + // procfsprocs implements procs using procfs. procfsprocs struct { - Procs []procfs.Proc - bootTime uint64 + Procs []procfs.Proc + fs *FS } - // ProcIter is an iterator over a sequence of procs. - ProcIter interface { + // Iter is an iterator over a sequence of procs. + Iter interface { // Next returns true if the iterator is not exhausted. Next() bool // Close releases any resources the iterator uses. @@ -105,7 +164,7 @@ type ( Proc } - // procIterator implements the ProcIter interface using procfs. + // procIterator implements the Iter interface procIterator struct { // procs is the list of Proc we're iterating over. procs @@ -119,66 +178,101 @@ type ( Proc } - procIdInfos []ProcIdInfo + // Source is a source of procs. + Source interface { + // AllProcs returns all the processes in this source at this moment in time. + AllProcs() Iter + } + + // FS implements Source. + FS struct { + procfs.FS + BootTime uint64 + MountPoint string + debug bool + } ) -func procInfoIter(ps ...ProcIdInfo) ProcIter { - return &procIterator{procs: procIdInfos(ps), idx: -1} +func (ii IDInfo) String() string { + return fmt.Sprintf("%+v:%+v", ii.ID, ii.Static) } -func Info(p Proc) (ProcIdInfo, error) { - id, err := p.GetProcId() - if err != nil { - return ProcIdInfo{}, err - } - static, err := p.GetStatic() - if err != nil { - return ProcIdInfo{}, err - } - metrics, err := p.GetMetrics() - if err != nil { - return ProcIdInfo{}, err - } - return ProcIdInfo{id, static, metrics}, nil +// Add adds c2 to the counts. +func (c *Counts) Add(c2 Delta) { + c.CPUUserTime += c2.CPUUserTime + c.CPUSystemTime += c2.CPUSystemTime + c.ReadBytes += c2.ReadBytes + c.WriteBytes += c2.WriteBytes + c.MajorPageFaults += c2.MajorPageFaults + c.MinorPageFaults += c2.MinorPageFaults + c.CtxSwitchVoluntary += c2.CtxSwitchVoluntary + c.CtxSwitchNonvoluntary += c2.CtxSwitchNonvoluntary } -func (p procIdInfos) get(i int) Proc { - return &p[i] +// Sub subtracts c2 from the counts. +func (c Counts) Sub(c2 Counts) Delta { + c.CPUUserTime -= c2.CPUUserTime + c.CPUSystemTime -= c2.CPUSystemTime + c.ReadBytes -= c2.ReadBytes + c.WriteBytes -= c2.WriteBytes + c.MajorPageFaults -= c2.MajorPageFaults + c.MinorPageFaults -= c2.MinorPageFaults + c.CtxSwitchVoluntary -= c2.CtxSwitchVoluntary + c.CtxSwitchNonvoluntary -= c2.CtxSwitchNonvoluntary + return Delta(c) } -func (p procIdInfos) length() int { - return len(p) +func (s *States) Add(s2 States) { + s.Other += s2.Other + s.Running += s2.Running + s.Sleeping += s2.Sleeping + s.Waiting += s2.Waiting + s.Zombie += s2.Zombie } -func (p ProcIdInfo) GetPid() int { - return p.ProcId.Pid +func (p IDInfo) GetThreads() ([]Thread, error) { + return p.Threads, nil } -func (p ProcIdInfo) GetProcId() (ProcId, error) { - return p.ProcId, nil +// GetPid implements Proc. +func (p IDInfo) GetPid() int { + return p.ID.Pid } -func (p ProcIdInfo) GetStatic() (ProcStatic, error) { - return p.ProcStatic, nil +// GetProcID implements Proc. +func (p IDInfo) GetProcID() (ID, error) { + return p.ID, nil } -func (p ProcIdInfo) GetMetrics() (ProcMetrics, error) { - return p.ProcMetrics, nil +// GetStatic implements Proc. +func (p IDInfo) GetStatic() (Static, error) { + return p.Static, nil } -func (p procfsprocs) get(i int) Proc { - return &proc{Proc: p.Procs[i], bootTime: p.bootTime} +// GetCounts implements Proc. +func (p IDInfo) GetCounts() (Counts, int, error) { + return p.Metrics.Counts, 0, nil } -func (p procfsprocs) length() int { - return len(p.Procs) +// GetMetrics implements Proc. +func (p IDInfo) GetMetrics() (Metrics, int, error) { + return p.Metrics, 0, nil } -func (p *proc) GetPid() int { +// GetStates implements Proc. +func (p IDInfo) GetStates() (States, error) { + return p.States, nil +} + +func (p IDInfo) GetWchan() (string, error) { + return p.Wchan, nil +} + +func (p *proccache) GetPid() int { return p.Proc.PID } -func (p *proc) GetStat() (procfs.ProcStat, error) { +func (p *proccache) getStat() (procfs.ProcStat, error) { if p.stat == nil { stat, err := p.Proc.NewStat() if err != nil { @@ -190,19 +284,32 @@ func (p *proc) GetStat() (procfs.ProcStat, error) { return *p.stat, nil } -func (p *proc) GetProcId() (ProcId, error) { - if p.procid == nil { - stat, err := p.GetStat() +func (p *proccache) getStatus() (procfs.ProcStatus, error) { + if p.status == nil { + status, err := p.Proc.NewStatus() if err != nil { - return ProcId{}, err + return procfs.ProcStatus{}, err } - p.procid = &ProcId{Pid: p.GetPid(), StartTimeRel: stat.Starttime} + p.status = &status + } + + return *p.status, nil +} + +// GetProcID implements Proc. +func (p *proccache) GetProcID() (ID, error) { + if p.procid == nil { + stat, err := p.getStat() + if err != nil { + return ID{}, err + } + p.procid = &ID{Pid: p.GetPid(), StartTimeRel: stat.Starttime} } return *p.procid, nil } -func (p *proc) GetCmdLine() ([]string, error) { +func (p *proccache) getCmdLine() ([]string, error) { if p.cmdline == nil { cmdline, err := p.Proc.CmdLine() if err != nil { @@ -213,7 +320,18 @@ func (p *proc) GetCmdLine() ([]string, error) { return p.cmdline, nil } -func (p *proc) GetIo() (procfs.ProcIO, error) { +func (p *proccache) getWchan() (string, error) { + if p.wchan == nil { + wchan, err := p.Proc.Wchan() + if err != nil { + return "", err + } + p.wchan = &wchan + } + return *p.wchan, nil +} + +func (p *proccache) getIo() (procfs.ProcIO, error) { if p.io == nil { io, err := p.Proc.NewIO() if err != nil { @@ -224,56 +342,199 @@ func (p *proc) GetIo() (procfs.ProcIO, error) { return *p.io, nil } -func (p proc) GetStatic() (ProcStatic, error) { - cmdline, err := p.GetCmdLine() +// GetStatic returns the ProcStatic corresponding to this proc. +func (p *proccache) GetStatic() (Static, error) { + // /proc//cmdline is normally world-readable. + cmdline, err := p.getCmdLine() if err != nil { - return ProcStatic{}, err + return Static{}, err } - stat, err := p.GetStat() + + // /proc//stat is normally world-readable. + stat, err := p.getStat() if err != nil { - return ProcStatic{}, err + return Static{}, err } - startTime := time.Unix(int64(p.bootTime), 0) + startTime := time.Unix(int64(p.fs.BootTime), 0).UTC() startTime = startTime.Add(time.Second / userHZ * time.Duration(stat.Starttime)) - return ProcStatic{ - Name: stat.Comm, - Cmdline: cmdline, - ParentPid: stat.PPID, - StartTime: startTime, + + // /proc//status is normally world-readable. + status, err := p.getStatus() + if err != nil { + return Static{}, err + } + + return Static{ + Name: stat.Comm, + Cmdline: cmdline, + ParentPid: stat.PPID, + StartTime: startTime, + EffectiveUID: status.UIDEffective, }, nil } -func (p proc) GetMetrics() (ProcMetrics, error) { - io, err := p.GetIo() +func (p proc) GetCounts() (Counts, int, error) { + stat, err := p.getStat() if err != nil { - return ProcMetrics{}, err + if err == os.ErrNotExist { + err = ErrProcNotExist + } + return Counts{}, 0, err } - stat, err := p.GetStat() + + status, err := p.getStatus() if err != nil { - return ProcMetrics{}, err + if err == os.ErrNotExist { + err = ErrProcNotExist + } + return Counts{}, 0, err } + + io, err := p.getIo() + softerrors := 0 + if err != nil { + softerrors++ + } + return Counts{ + CPUUserTime: float64(stat.UTime) / userHZ, + CPUSystemTime: float64(stat.STime) / userHZ, + ReadBytes: io.ReadBytes, + WriteBytes: io.WriteBytes, + MajorPageFaults: uint64(stat.MajFlt), + MinorPageFaults: uint64(stat.MinFlt), + CtxSwitchVoluntary: uint64(status.VoluntaryCtxtSwitches), + CtxSwitchNonvoluntary: uint64(status.NonvoluntaryCtxtSwitches), + }, softerrors, nil +} + +func (p proc) GetWchan() (string, error) { + return p.getWchan() +} + +func (p proc) GetStates() (States, error) { + stat, err := p.getStat() + if err != nil { + return States{}, err + } + + var s States + switch stat.State { + case "R": + s.Running++ + case "S": + s.Sleeping++ + case "D": + s.Waiting++ + case "Z": + s.Zombie++ + default: + s.Other++ + } + return s, nil +} + +// GetMetrics returns the current metrics for the proc. The results are +// not cached. +func (p proc) GetMetrics() (Metrics, int, error) { + counts, softerrors, err := p.GetCounts() + if err != nil { + return Metrics{}, 0, err + } + + // We don't need to check for error here because p will have cached + // the successful result of calling getStat in GetCounts. + // Since GetMetrics isn't a pointer receiver method, our callers + // won't see the effect of the caching between calls. + stat, _ := p.getStat() + + // Ditto for states + states, _ := p.GetStates() + + status, err := p.getStatus() + if err != nil { + return Metrics{}, 0, err + } + numfds, err := p.Proc.FileDescriptorsLen() if err != nil { - return ProcMetrics{}, err + numfds = -1 + softerrors |= 1 } - limits, err := p.NewLimits() + + limits, err := p.Proc.NewLimits() if err != nil { - return ProcMetrics{}, err + return Metrics{}, 0, err } - return ProcMetrics{ - CpuTime: stat.CPUTime(), - ReadBytes: io.ReadBytes, - WriteBytes: io.WriteBytes, - ResidentBytes: uint64(stat.ResidentMemory()), - VirtualBytes: uint64(stat.VirtualMemory()), - OpenFDs: uint64(numfds), - MaxFDs: uint64(limits.OpenFiles), - }, nil + + wchan, err := p.getWchan() + if err != nil { + softerrors |= 1 + } + + return Metrics{ + Counts: counts, + Memory: Memory{ + ResidentBytes: uint64(stat.ResidentMemory()), + VirtualBytes: uint64(stat.VirtualMemory()), + VmSwapBytes: uint64(status.VmSwapKB * 1024), + }, + Filedesc: Filedesc{ + Open: int64(numfds), + Limit: uint64(limits.OpenFiles), + }, + NumThreads: uint64(stat.NumThreads), + States: states, + Wchan: wchan, + }, softerrors, nil } -type FS struct { - procfs.FS - BootTime uint64 +func (p proc) GetThreads() ([]Thread, error) { + fs, err := p.fs.threadFs(p.PID) + if err != nil { + return nil, err + } + + threads := []Thread{} + iter := fs.AllProcs() + for iter.Next() { + var id ID + id, err = iter.GetProcID() + if err != nil { + continue + } + + var static Static + static, err = iter.GetStatic() + if err != nil { + continue + } + + var counts Counts + counts, _, err = iter.GetCounts() + if err != nil { + continue + } + + wchan, _ := iter.GetWchan() + states, _ := iter.GetStates() + + threads = append(threads, Thread{ + ThreadID: ThreadID(id), + ThreadName: static.Name, + Counts: counts, + Wchan: wchan, + States: states, + }) + } + err = iter.Close() + if err != nil { + return nil, err + } + if len(threads) < 2 { + return nil, nil + } + + return threads, nil } // See https://github.com/prometheus/procfs/blob/master/proc_stat.go for details on userHZ. @@ -281,7 +542,7 @@ const userHZ = 100 // NewFS returns a new FS mounted under the given mountPoint. It will error // if the mount point can't be read. -func NewFS(mountPoint string) (*FS, error) { +func NewFS(mountPoint string, debug bool) (*FS, error) { fs, err := procfs.NewFS(mountPoint) if err != nil { return nil, err @@ -290,17 +551,38 @@ func NewFS(mountPoint string) (*FS, error) { if err != nil { return nil, err } - return &FS{fs, stat.BootTime}, nil + return &FS{fs, stat.BootTime, mountPoint, debug}, nil } -func (fs *FS) AllProcs() ProcIter { +func (fs *FS) threadFs(pid int) (*FS, error) { + mountPoint := filepath.Join(fs.MountPoint, strconv.Itoa(pid), "task") + tfs, err := procfs.NewFS(mountPoint) + if err != nil { + return nil, err + } + return &FS{tfs, fs.BootTime, mountPoint, false}, nil +} + +// AllProcs implements Source. +func (fs *FS) AllProcs() Iter { procs, err := fs.FS.AllProcs() if err != nil { err = fmt.Errorf("Error reading procs: %v", err) } - return &procIterator{procs: procfsprocs{procs, fs.BootTime}, err: err, idx: -1} + return &procIterator{procs: procfsprocs{procs, fs}, err: err, idx: -1} } +// get implements procs. +func (p procfsprocs) get(i int) Proc { + return &proc{proccache{Proc: p.Procs[i], fs: p.fs}} +} + +// length implements procs. +func (p procfsprocs) length() int { + return len(p.Procs) +} + +// Next implements Iter. func (pi *procIterator) Next() bool { pi.idx++ if pi.idx < pi.procs.length() { @@ -311,6 +593,7 @@ func (pi *procIterator) Next() bool { return pi.idx < pi.procs.length() } +// Close implements Iter. func (pi *procIterator) Close() error { pi.Next() pi.procs = nil diff --git a/vendor/github.com/ncabatoff/process-exporter/proc/tracker.go b/vendor/github.com/ncabatoff/process-exporter/proc/tracker.go index 30458a16a..126bc6596 100644 --- a/vendor/github.com/ncabatoff/process-exporter/proc/tracker.go +++ b/vendor/github.com/ncabatoff/process-exporter/proc/tracker.go @@ -2,179 +2,432 @@ package proc import ( "fmt" - "os" + "log" + "os/user" + "strconv" "time" + + seq "github.com/ncabatoff/go-seq/seq" + common "github.com/ncabatoff/process-exporter" ) type ( - Counts struct { - Cpu float64 - ReadBytes uint64 - WriteBytes uint64 - } - - Memory struct { - Resident uint64 - Virtual uint64 - } - - Filedesc struct { - Open uint64 - Limit uint64 - } - // Tracker tracks processes and records metrics. Tracker struct { - // Tracked holds the processes are being monitored. Processes + // namer determines what processes to track and names them + namer common.MatchNamer + // tracked holds the processes are being monitored. Processes // may be blacklisted such that they no longer get tracked by - // setting their value in the Tracked map to nil. - Tracked map[ProcId]*TrackedProc - // ProcIds is a map from pid to ProcId. This is a convenience + // setting their value in the tracked map to nil. + tracked map[ID]*trackedProc + // procIds is a map from pid to ProcId. This is a convenience // to allow finding the Tracked entry of a parent process. - ProcIds map[int]ProcId + procIds map[int]ID + // trackChildren makes Tracker track descendants of procs the + // namer wanted tracked. + trackChildren bool + // never ignore processes, i.e. always re-check untracked processes in case comm has changed + alwaysRecheck bool + username map[int]string + debug bool } - // TrackedProc accumulates metrics for a process, as well as + // Delta is an alias of Counts used to signal that its contents are not + // totals, but rather the result of subtracting two totals. + Delta Counts + + trackedThread struct { + name string + accum Counts + latest Delta + lastUpdate time.Time + wchan string + } + + // trackedProc accumulates metrics for a process, as well as // remembering an optional GroupName tag associated with it. - TrackedProc struct { + trackedProc struct { // lastUpdate is used internally during the update cycle to find which procs have exited lastUpdate time.Time - // info is the most recently obtained info for this proc - info ProcInfo - // accum is the total CPU and IO accrued since we started tracking this proc - accum Counts - // lastaccum is the CPU and IO accrued in the last Update() - lastaccum Counts - // GroupName is an optional tag for this proc. - GroupName string + // static + static Static + metrics Metrics + // lastaccum is the increment to the counters seen in the last update. + lastaccum Delta + // groupName is the tag for this proc given by the namer. + groupName string + threads map[ThreadID]trackedThread } - trackedStats struct { - aggregate, latest Counts + // ThreadUpdate describes what's changed for a thread since the last cycle. + ThreadUpdate struct { + // ThreadName is the name of the thread based on field of stat. + ThreadName string + // Latest is how much the counts increased since last cycle. + Latest Delta + } + + // Update reports on the latest stats for a process. + Update struct { + // GroupName is the name given by the namer to the process. + GroupName string + // Latest is how much the counts increased since last cycle. + Latest Delta + // Memory is the current memory usage. Memory + // Filedesc is the current fd usage/limit. Filedesc - start time.Time + // Start is the time the process started. + Start time.Time + // NumThreads is the number of threads. + NumThreads uint64 + // States is how many processes are in which run state. + States + // Wchans is how many threads are in each non-zero wchan. + Wchans map[string]int + // Threads are the thread updates for this process. + Threads []ThreadUpdate + } + + // CollectErrors describes non-fatal errors found while collecting proc + // metrics. + CollectErrors struct { + // Read is incremented every time GetMetrics() returns an error. + // This means we failed to load even the basics for the process, + // and not just because it disappeared on us. + Read int + // Partial is incremented every time we're unable to collect + // some metrics (e.g. I/O) for a tracked proc, but we're still able + // to get the basic stuff like cmdline and core stats. + Partial int } ) -func (tp *TrackedProc) GetName() string { - return tp.info.Name +func lessUpdateGroupName(x, y Update) bool { return x.GroupName < y.GroupName } + +func lessThreadUpdate(x, y ThreadUpdate) bool { return seq.Compare(x, y) < 0 } + +func lessCounts(x, y Counts) bool { return seq.Compare(x, y) < 0 } + +func (tp *trackedProc) getUpdate() Update { + u := Update{ + GroupName: tp.groupName, + Latest: tp.lastaccum, + Memory: tp.metrics.Memory, + Filedesc: tp.metrics.Filedesc, + Start: tp.static.StartTime, + NumThreads: tp.metrics.NumThreads, + States: tp.metrics.States, + Wchans: make(map[string]int), + } + if tp.metrics.Wchan != "" { + u.Wchans[tp.metrics.Wchan] = 1 + } + if len(tp.threads) > 1 { + for _, tt := range tp.threads { + u.Threads = append(u.Threads, ThreadUpdate{tt.name, tt.latest}) + if tt.wchan != "" { + u.Wchans[tt.wchan]++ + } + } + } + return u } -func (tp *TrackedProc) GetCmdLine() []string { - return tp.info.Cmdline -} - -func (tp *TrackedProc) GetStats() trackedStats { - mem := Memory{Resident: tp.info.ResidentBytes, Virtual: tp.info.VirtualBytes} - fd := Filedesc{Open: tp.info.OpenFDs, Limit: tp.info.MaxFDs} - return trackedStats{ - aggregate: tp.accum, - latest: tp.lastaccum, - Memory: mem, - Filedesc: fd, - start: tp.info.StartTime, +// NewTracker creates a Tracker. +func NewTracker(namer common.MatchNamer, trackChildren, alwaysRecheck, debug bool) *Tracker { + return &Tracker{ + namer: namer, + tracked: make(map[ID]*trackedProc), + procIds: make(map[int]ID), + trackChildren: trackChildren, + alwaysRecheck: alwaysRecheck, + username: make(map[int]string), + debug: debug, } } -func NewTracker() *Tracker { - return &Tracker{Tracked: make(map[ProcId]*TrackedProc), ProcIds: make(map[int]ProcId)} +func (t *Tracker) track(groupName string, idinfo IDInfo) { + tproc := trackedProc{ + groupName: groupName, + static: idinfo.Static, + metrics: idinfo.Metrics, + } + if len(idinfo.Threads) > 0 { + tproc.threads = make(map[ThreadID]trackedThread) + for _, thr := range idinfo.Threads { + tproc.threads[thr.ThreadID] = trackedThread{ + thr.ThreadName, thr.Counts, Delta{}, time.Time{}, thr.Wchan} + } + } + t.tracked[idinfo.ID] = &tproc } -func (t *Tracker) Track(groupName string, idinfo ProcIdInfo) { - info := ProcInfo{idinfo.ProcStatic, idinfo.ProcMetrics} - t.Tracked[idinfo.ProcId] = &TrackedProc{GroupName: groupName, info: info} +func (t *Tracker) ignore(id ID) { + // only ignore ID if we didn't set recheck to true + if t.alwaysRecheck == false { + t.tracked[id] = nil + } } -func (t *Tracker) Ignore(id ProcId) { - t.Tracked[id] = nil +func (tp *trackedProc) update(metrics Metrics, now time.Time, cerrs *CollectErrors, threads []Thread) { + // newcounts: resource consumption since last cycle + newcounts := metrics.Counts + tp.lastaccum = newcounts.Sub(tp.metrics.Counts) + tp.metrics = metrics + tp.lastUpdate = now + if len(threads) > 1 { + if tp.threads == nil { + tp.threads = make(map[ThreadID]trackedThread) + } + for _, thr := range threads { + tt := trackedThread{thr.ThreadName, thr.Counts, Delta{}, now, thr.Wchan} + if old, ok := tp.threads[thr.ThreadID]; ok { + tt.latest, tt.accum = thr.Counts.Sub(old.accum), thr.Counts + } + tp.threads[thr.ThreadID] = tt + } + for id, tt := range tp.threads { + if tt.lastUpdate != now { + delete(tp.threads, id) + } + } + } else { + tp.threads = nil + } } -// Scan procs and update metrics for those which are tracked. Processes that have gone -// away get removed from the Tracked map. New processes are returned, along with the count -// of permission errors. -func (t *Tracker) Update(procs ProcIter) ([]ProcIdInfo, int, error) { - now := time.Now() - var newProcs []ProcIdInfo - var permissionErrors int +// handleProc updates the tracker if it's a known and not ignored proc. +// If it's neither known nor ignored, newProc will be non-nil. +// It is not an error if the process disappears while we are reading +// its info out of /proc, it just means nothing will be returned and +// the tracker will be unchanged. +func (t *Tracker) handleProc(proc Proc, updateTime time.Time) (*IDInfo, CollectErrors) { + var cerrs CollectErrors + procID, err := proc.GetProcID() + if err != nil { + return nil, cerrs + } + + // Do nothing if we're ignoring this proc. + last, known := t.tracked[procID] + if known && last == nil { + return nil, cerrs + } + + metrics, softerrors, err := proc.GetMetrics() + if err != nil { + if t.debug { + log.Printf("error reading metrics for %+v: %v", procID, err) + } + // This usually happens due to the proc having exited, i.e. + // we lost the race. We don't count that as an error. + if err != ErrProcNotExist { + cerrs.Read++ + } + return nil, cerrs + } + + var threads []Thread + threads, err = proc.GetThreads() + if err != nil { + softerrors |= 1 + } + cerrs.Partial += softerrors + + if len(threads) > 0 { + metrics.Counts.CtxSwitchNonvoluntary, metrics.Counts.CtxSwitchVoluntary = 0, 0 + for _, thread := range threads { + metrics.Counts.CtxSwitchNonvoluntary += thread.Counts.CtxSwitchNonvoluntary + metrics.Counts.CtxSwitchVoluntary += thread.Counts.CtxSwitchVoluntary + metrics.States.Add(thread.States) + } + } + + var newProc *IDInfo + if known { + last.update(metrics, updateTime, &cerrs, threads) + } else { + static, err := proc.GetStatic() + if err != nil { + if t.debug { + log.Printf("error reading static details for %+v: %v", procID, err) + } + return nil, cerrs + } + newProc = &IDInfo{procID, static, metrics, threads} + if t.debug { + log.Printf("found new proc: %s", newProc) + } + + // Is this a new process with the same pid as one we already know? + // Then delete it from the known map, otherwise the cleanup in Update() + // will remove the ProcIds entry we're creating here. + if oldProcID, ok := t.procIds[procID.Pid]; ok { + delete(t.tracked, oldProcID) + } + t.procIds[procID.Pid] = procID + } + return newProc, cerrs +} + +// update scans procs and updates metrics for those which are tracked. Processes +// that have gone away get removed from the Tracked map. New processes are +// returned, along with the count of nonfatal errors. +func (t *Tracker) update(procs Iter) ([]IDInfo, CollectErrors, error) { + var newProcs []IDInfo + var colErrs CollectErrors + var now = time.Now() for procs.Next() { - procId, err := procs.GetProcId() - if err != nil { - continue + newProc, cerrs := t.handleProc(procs, now) + if newProc != nil { + newProcs = append(newProcs, *newProc) } - - last, known := t.Tracked[procId] - - // Are we ignoring this proc? - if known && last == nil { - continue - } - - // TODO if just the io file is unreadable, should we still return the other metrics? - metrics, err := procs.GetMetrics() - if err != nil { - if os.IsPermission(err) { - permissionErrors++ - t.Ignore(procId) - } - continue - } - - if known { - var newaccum, lastaccum Counts - dcpu := metrics.CpuTime - last.info.CpuTime - drbytes := metrics.ReadBytes - last.info.ReadBytes - dwbytes := metrics.WriteBytes - last.info.WriteBytes - - lastaccum = Counts{Cpu: dcpu, ReadBytes: drbytes, WriteBytes: dwbytes} - newaccum = Counts{ - Cpu: last.accum.Cpu + lastaccum.Cpu, - ReadBytes: last.accum.ReadBytes + lastaccum.ReadBytes, - WriteBytes: last.accum.WriteBytes + lastaccum.WriteBytes, - } - - last.info.ProcMetrics = metrics - last.lastUpdate = now - last.accum = newaccum - last.lastaccum = lastaccum - } else { - static, err := procs.GetStatic() - if err != nil { - continue - } - newProcs = append(newProcs, ProcIdInfo{procId, static, metrics}) - - // Is this a new process with the same pid as one we already know? - if oldProcId, ok := t.ProcIds[procId.Pid]; ok { - // Delete it from known, otherwise the cleanup below will remove the - // ProcIds entry we're about to create - delete(t.Tracked, oldProcId) - } - t.ProcIds[procId.Pid] = procId - } - + colErrs.Read += cerrs.Read + colErrs.Partial += cerrs.Partial } + err := procs.Close() if err != nil { - return nil, permissionErrors, fmt.Errorf("Error reading procs: %v", err) + return nil, colErrs, fmt.Errorf("Error reading procs: %v", err) } // Rather than allocating a new map each time to detect procs that have // disappeared, we bump the last update time on those that are still // present. Then as a second pass we traverse the map looking for // stale procs and removing them. - for procId, pinfo := range t.Tracked { + for procID, pinfo := range t.tracked { if pinfo == nil { // TODO is this a bug? we're not tracking the proc so we don't see it go away so ProcIds // and Tracked are leaking? continue } if pinfo.lastUpdate != now { - delete(t.Tracked, procId) - delete(t.ProcIds, procId.Pid) + delete(t.tracked, procID) + delete(t.procIds, procID.Pid) } } - return newProcs, permissionErrors, nil + return newProcs, colErrs, nil +} + +// checkAncestry walks the process tree recursively towards the root, +// stopping at pid 1 or upon finding a parent that's already tracked +// or ignored. If we find a tracked parent track this one too; if not, +// ignore this one. +func (t *Tracker) checkAncestry(idinfo IDInfo, newprocs map[ID]IDInfo) string { + ppid := idinfo.ParentPid + pProcID := t.procIds[ppid] + if pProcID.Pid < 1 { + if t.debug { + log.Printf("ignoring unmatched proc with no matched parent: %+v", idinfo) + } + // Reached root of process tree without finding a tracked parent. + t.ignore(idinfo.ID) + return "" + } + + // Is the parent already known to the tracker? + if ptproc, ok := t.tracked[pProcID]; ok { + if ptproc != nil { + if t.debug { + log.Printf("matched as %q because child of %+v: %+v", + ptproc.groupName, pProcID, idinfo) + } + // We've found a tracked parent. + t.track(ptproc.groupName, idinfo) + return ptproc.groupName + } + // We've found an untracked parent. + t.ignore(idinfo.ID) + return "" + } + + // Is the parent another new process? + if pinfoid, ok := newprocs[pProcID]; ok { + if name := t.checkAncestry(pinfoid, newprocs); name != "" { + if t.debug { + log.Printf("matched as %q because child of %+v: %+v", + name, pProcID, idinfo) + } + // We've found a tracked parent, which implies this entire lineage should be tracked. + t.track(name, idinfo) + return name + } + } + + // Parent is dead, i.e. we never saw it, or there's no tracked proc in our ancestry. + if t.debug { + log.Printf("ignoring unmatched proc with no matched parent: %+v", idinfo) + } + t.ignore(idinfo.ID) + return "" +} + +func (t *Tracker) lookupUid(uid int) string { + if name, ok := t.username[uid]; ok { + return name + } + + var name string + uidstr := strconv.Itoa(uid) + u, err := user.LookupId(uidstr) + if err != nil { + name = uidstr + } else { + name = u.Username + } + t.username[uid] = name + return name +} + +// Update modifies the tracker's internal state based on what it reads from +// iter. Tracks any new procs the namer wants tracked, and updates +// its metrics for existing tracked procs. Returns nonfatal errors +// and the status of all tracked procs, or an error if fatal. +func (t *Tracker) Update(iter Iter) (CollectErrors, []Update, error) { + newProcs, colErrs, err := t.update(iter) + if err != nil { + return colErrs, nil, err + } + + // Step 1: track any new proc that should be tracked based on its name and cmdline. + untracked := make(map[ID]IDInfo) + for _, idinfo := range newProcs { + nacl := common.ProcAttributes{ + Name: idinfo.Name, + Cmdline: idinfo.Cmdline, + Username: t.lookupUid(idinfo.EffectiveUID), + } + wanted, gname := t.namer.MatchAndName(nacl) + if wanted { + if t.debug { + log.Printf("matched as %q: %+v", gname, idinfo) + } + t.track(gname, idinfo) + } else { + untracked[idinfo.ID] = idinfo + } + } + + // Step 2: track any untracked new proc that should be tracked because its parent is tracked. + if t.trackChildren { + for _, idinfo := range untracked { + if _, ok := t.tracked[idinfo.ID]; ok { + // Already tracked or ignored in an earlier iteration + continue + } + + t.checkAncestry(idinfo, untracked) + } + } + + tp := []Update{} + for _, tproc := range t.tracked { + if tproc != nil { + tp = append(tp, tproc.getUpdate()) + } + } + return colErrs, tp, nil } diff --git a/vendor/github.com/ncabatoff/procfs/.gitignore b/vendor/github.com/ncabatoff/procfs/.gitignore new file mode 100644 index 000000000..25e3659ab --- /dev/null +++ b/vendor/github.com/ncabatoff/procfs/.gitignore @@ -0,0 +1 @@ +/fixtures/ diff --git a/vendor/github.com/prometheus/procfs/.travis.yml b/vendor/github.com/ncabatoff/procfs/.travis.yml similarity index 100% rename from vendor/github.com/prometheus/procfs/.travis.yml rename to vendor/github.com/ncabatoff/procfs/.travis.yml diff --git a/vendor/github.com/ncabatoff/procfs/CONTRIBUTING.md b/vendor/github.com/ncabatoff/procfs/CONTRIBUTING.md new file mode 100644 index 000000000..40503edbf --- /dev/null +++ b/vendor/github.com/ncabatoff/procfs/CONTRIBUTING.md @@ -0,0 +1,18 @@ +# Contributing + +Prometheus uses GitHub to manage reviews of pull requests. + +* If you have a trivial fix or improvement, go ahead and create a pull request, + addressing (with `@...`) the maintainer of this repository (see + [MAINTAINERS.md](MAINTAINERS.md)) in the description of the pull request. + +* If you plan to do something more involved, first discuss your ideas + on our [mailing list](https://groups.google.com/forum/?fromgroups#!forum/prometheus-developers). + This will avoid unnecessary work and surely give you and us a good deal + of inspiration. + +* Relevant coding style guidelines are the [Go Code Review + Comments](https://code.google.com/p/go-wiki/wiki/CodeReviewComments) + and the _Formatting and style_ section of Peter Bourgon's [Go: Best + Practices for Production + Environments](http://peter.bourgon.org/go-in-production/#formatting-and-style). diff --git a/vendor/github.com/ncabatoff/procfs/LICENSE b/vendor/github.com/ncabatoff/procfs/LICENSE new file mode 100644 index 000000000..261eeb9e9 --- /dev/null +++ b/vendor/github.com/ncabatoff/procfs/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/ncabatoff/procfs/MAINTAINERS.md b/vendor/github.com/ncabatoff/procfs/MAINTAINERS.md new file mode 100644 index 000000000..35993c41c --- /dev/null +++ b/vendor/github.com/ncabatoff/procfs/MAINTAINERS.md @@ -0,0 +1 @@ +* Tobias Schmidt diff --git a/vendor/github.com/ncabatoff/procfs/Makefile b/vendor/github.com/ncabatoff/procfs/Makefile new file mode 100644 index 000000000..4d1098394 --- /dev/null +++ b/vendor/github.com/ncabatoff/procfs/Makefile @@ -0,0 +1,77 @@ +# Copyright 2018 The Prometheus Authors +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Ensure GOBIN is not set during build so that promu is installed to the correct path +unexport GOBIN + +GO ?= go +GOFMT ?= $(GO)fmt +FIRST_GOPATH := $(firstword $(subst :, ,$(shell $(GO) env GOPATH))) +STATICCHECK := $(FIRST_GOPATH)/bin/staticcheck +pkgs = $(shell $(GO) list ./... | grep -v /vendor/) + +PREFIX ?= $(shell pwd) +BIN_DIR ?= $(shell pwd) + +ifdef DEBUG + bindata_flags = -debug +endif + +STATICCHECK_IGNORE = + +all: format staticcheck build test + +style: + @echo ">> checking code style" + @! $(GOFMT) -d $(shell find . -path ./vendor -prune -o -name '*.go' -print) | grep '^' + +check_license: + @echo ">> checking license header" + @./scripts/check_license.sh + +test: fixtures/.unpacked sysfs/fixtures/.unpacked + @echo ">> running all tests" + @$(GO) test -race $(shell $(GO) list ./... | grep -v /vendor/ | grep -v examples) + +format: + @echo ">> formatting code" + @$(GO) fmt $(pkgs) + +vet: + @echo ">> vetting code" + @$(GO) vet $(pkgs) + +staticcheck: $(STATICCHECK) + @echo ">> running staticcheck" + @$(STATICCHECK) -ignore "$(STATICCHECK_IGNORE)" $(pkgs) + +%/.unpacked: %.ttar + ./ttar -C $(dir $*) -x -f $*.ttar + touch $@ + +update_fixtures: fixtures.ttar sysfs/fixtures.ttar + +%fixtures.ttar: %/fixtures + rm -v $(dir $*)fixtures/.unpacked + ./ttar -C $(dir $*) -c -f $*fixtures.ttar fixtures/ + +$(FIRST_GOPATH)/bin/staticcheck: + @GOOS= GOARCH= $(GO) get -u honnef.co/go/tools/cmd/staticcheck + +.PHONY: all style check_license format test vet staticcheck + +# Declaring the binaries at their default locations as PHONY targets is a hack +# to ensure the latest version is downloaded on every make execution. +# If this is not desired, copy/symlink these binaries to a different path and +# set the respective environment variables. +.PHONY: $(GOPATH)/bin/staticcheck diff --git a/vendor/github.com/ncabatoff/procfs/NOTICE b/vendor/github.com/ncabatoff/procfs/NOTICE new file mode 100644 index 000000000..53c5e9aa1 --- /dev/null +++ b/vendor/github.com/ncabatoff/procfs/NOTICE @@ -0,0 +1,7 @@ +procfs provides functions to retrieve system, kernel and process +metrics from the pseudo-filesystem proc. + +Copyright 2014-2015 The Prometheus Authors + +This product includes software developed at +SoundCloud Ltd. (http://soundcloud.com/). diff --git a/vendor/github.com/ncabatoff/procfs/README.md b/vendor/github.com/ncabatoff/procfs/README.md new file mode 100644 index 000000000..209549471 --- /dev/null +++ b/vendor/github.com/ncabatoff/procfs/README.md @@ -0,0 +1,11 @@ +# procfs + +This procfs package provides functions to retrieve system, kernel and process +metrics from the pseudo-filesystem proc. + +*WARNING*: This package is a work in progress. Its API may still break in +backwards-incompatible ways without warnings. Use it at your own risk. + +[![GoDoc](https://godoc.org/github.com/prometheus/procfs?status.png)](https://godoc.org/github.com/prometheus/procfs) +[![Build Status](https://travis-ci.org/prometheus/procfs.svg?branch=master)](https://travis-ci.org/prometheus/procfs) +[![Go Report Card](https://goreportcard.com/badge/github.com/prometheus/procfs)](https://goreportcard.com/report/github.com/prometheus/procfs) diff --git a/vendor/github.com/ncabatoff/procfs/buddyinfo.go b/vendor/github.com/ncabatoff/procfs/buddyinfo.go new file mode 100644 index 000000000..d3a826807 --- /dev/null +++ b/vendor/github.com/ncabatoff/procfs/buddyinfo.go @@ -0,0 +1,95 @@ +// Copyright 2017 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package procfs + +import ( + "bufio" + "fmt" + "io" + "os" + "strconv" + "strings" +) + +// A BuddyInfo is the details parsed from /proc/buddyinfo. +// The data is comprised of an array of free fragments of each size. +// The sizes are 2^n*PAGE_SIZE, where n is the array index. +type BuddyInfo struct { + Node string + Zone string + Sizes []float64 +} + +// NewBuddyInfo reads the buddyinfo statistics. +func NewBuddyInfo() ([]BuddyInfo, error) { + fs, err := NewFS(DefaultMountPoint) + if err != nil { + return nil, err + } + + return fs.NewBuddyInfo() +} + +// NewBuddyInfo reads the buddyinfo statistics from the specified `proc` filesystem. +func (fs FS) NewBuddyInfo() ([]BuddyInfo, error) { + file, err := os.Open(fs.Path("buddyinfo")) + if err != nil { + return nil, err + } + defer file.Close() + + return parseBuddyInfo(file) +} + +func parseBuddyInfo(r io.Reader) ([]BuddyInfo, error) { + var ( + buddyInfo = []BuddyInfo{} + scanner = bufio.NewScanner(r) + bucketCount = -1 + ) + + for scanner.Scan() { + var err error + line := scanner.Text() + parts := strings.Fields(line) + + if len(parts) < 4 { + return nil, fmt.Errorf("invalid number of fields when parsing buddyinfo") + } + + node := strings.TrimRight(parts[1], ",") + zone := strings.TrimRight(parts[3], ",") + arraySize := len(parts[4:]) + + if bucketCount == -1 { + bucketCount = arraySize + } else { + if bucketCount != arraySize { + return nil, fmt.Errorf("mismatch in number of buddyinfo buckets, previous count %d, new count %d", bucketCount, arraySize) + } + } + + sizes := make([]float64, arraySize) + for i := 0; i < arraySize; i++ { + sizes[i], err = strconv.ParseFloat(parts[i+4], 64) + if err != nil { + return nil, fmt.Errorf("invalid value in buddyinfo: %s", err) + } + } + + buddyInfo = append(buddyInfo, BuddyInfo{node, zone, sizes}) + } + + return buddyInfo, scanner.Err() +} diff --git a/vendor/github.com/ncabatoff/procfs/doc.go b/vendor/github.com/ncabatoff/procfs/doc.go new file mode 100644 index 000000000..e2acd6d40 --- /dev/null +++ b/vendor/github.com/ncabatoff/procfs/doc.go @@ -0,0 +1,45 @@ +// Copyright 2014 Prometheus Team +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package procfs provides functions to retrieve system, kernel and process +// metrics from the pseudo-filesystem proc. +// +// Example: +// +// package main +// +// import ( +// "fmt" +// "log" +// +// "github.com/prometheus/procfs" +// ) +// +// func main() { +// p, err := procfs.Self() +// if err != nil { +// log.Fatalf("could not get process: %s", err) +// } +// +// stat, err := p.NewStat() +// if err != nil { +// log.Fatalf("could not get process stat: %s", err) +// } +// +// fmt.Printf("command: %s\n", stat.Comm) +// fmt.Printf("cpu time: %fs\n", stat.CPUTime()) +// fmt.Printf("vsize: %dB\n", stat.VirtualMemory()) +// fmt.Printf("rss: %dB\n", stat.ResidentMemory()) +// } +// +package procfs diff --git a/vendor/github.com/ncabatoff/procfs/fixtures.ttar b/vendor/github.com/ncabatoff/procfs/fixtures.ttar new file mode 100644 index 000000000..3e27c145c --- /dev/null +++ b/vendor/github.com/ncabatoff/procfs/fixtures.ttar @@ -0,0 +1,500 @@ +# Archive created by ttar -c -f fixtures.ttar fixtures/ +Directory: fixtures +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/26231 +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/26231/cmdline +Lines: 1 +vimNULLBYTEtest.goNULLBYTE+10NULLBYTEEOF +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/26231/comm +Lines: 1 +vim +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/26231/exe +SymlinkTo: /usr/bin/vim +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/26231/fd +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/26231/fd/0 +SymlinkTo: ../../symlinktargets/abc +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/26231/fd/1 +SymlinkTo: ../../symlinktargets/def +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/26231/fd/10 +SymlinkTo: ../../symlinktargets/xyz +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/26231/fd/2 +SymlinkTo: ../../symlinktargets/ghi +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/26231/fd/3 +SymlinkTo: ../../symlinktargets/uvw +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/26231/io +Lines: 7 +rchar: 750339 +wchar: 818609 +syscr: 7405 +syscw: 5245 +read_bytes: 1024 +write_bytes: 2048 +cancelled_write_bytes: -1024 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/26231/limits +Lines: 17 +Limit Soft Limit Hard Limit Units +Max cpu time unlimited unlimited seconds +Max file size unlimited unlimited bytes +Max data size unlimited unlimited bytes +Max stack size 8388608 unlimited bytes +Max core file size 0 unlimited bytes +Max resident set unlimited unlimited bytes +Max processes 62898 62898 processes +Max open files 2048 4096 files +Max locked memory 65536 65536 bytes +Max address space 8589934592 unlimited bytes +Max file locks unlimited unlimited locks +Max pending signals 62898 62898 signals +Max msgqueue size 819200 819200 bytes +Max nice priority 0 0 +Max realtime priority 0 0 +Max realtime timeout unlimited unlimited us +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/26231/mountstats +Lines: 19 +device rootfs mounted on / with fstype rootfs +device sysfs mounted on /sys with fstype sysfs +device proc mounted on /proc with fstype proc +device /dev/sda1 mounted on / with fstype ext4 +device 192.168.1.1:/srv/test mounted on /mnt/nfs/test with fstype nfs4 statvers=1.1 + opts: rw,vers=4.0,rsize=1048576,wsize=1048576,namlen=255,acregmin=3,acregmax=60,acdirmin=30,acdirmax=60,hard,proto=tcp,port=0,timeo=600,retrans=2,sec=sys,clientaddr=192.168.1.5,local_lock=none + age: 13968 + caps: caps=0xfff7,wtmult=512,dtsize=32768,bsize=0,namlen=255 + nfsv4: bm0=0xfdffafff,bm1=0xf9be3e,bm2=0x0,acl=0x0,pnfs=not configured + sec: flavor=1,pseudoflavor=1 + events: 52 226 0 0 1 13 398 0 0 331 0 47 0 0 77 0 0 77 0 0 0 0 0 0 0 0 0 + bytes: 1207640230 0 0 0 1210214218 0 295483 0 + RPC iostats version: 1.0 p/v: 100003/4 (nfs) + xprt: tcp 832 0 1 0 11 6428 6428 0 12154 0 24 26 5726 + per-op statistics + NULL: 0 0 0 0 0 0 0 0 + READ: 1298 1298 0 207680 1210292152 6 79386 79407 + WRITE: 0 0 0 0 0 0 0 0 + +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/26231/net +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/26231/net/dev +Lines: 4 +Inter-| Receive | Transmit + face |bytes packets errs drop fifo frame compressed multicast|bytes packets errs drop fifo colls carrier compressed + lo: 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 + eth0: 438 5 0 0 0 0 0 0 648 8 0 0 0 0 0 0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/26231/ns +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/26231/ns/mnt +SymlinkTo: mnt:[4026531840] +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/26231/ns/net +SymlinkTo: net:[4026531993] +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/26231/stat +Lines: 1 +26231 (vim) R 5392 7446 5392 34835 7446 4218880 32533 309516 26 82 1677 44 158 99 20 0 1 0 82375 56274944 1981 18446744073709551615 4194304 6294284 140736914091744 140736914087944 139965136429984 0 0 12288 1870679807 0 0 0 17 0 0 0 31 0 0 8391624 8481048 16420864 140736914093252 140736914093279 140736914093279 140736914096107 0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/26231/status +Lines: 50 +Name: gvim +State: S (sleeping) +Tgid: 26231 +Ngid: 0 +Pid: 26231 +PPid: 6429 +TracerPid: 0 +Uid: 1000 1000 1000 1000 +Gid: 1000 1000 1000 1000 +FDSize: 64 +Groups: 999 1000 1001 +NStgid: 26231 +NSpid: 26231 +NSpgid: 26231 +NSsid: 26231 +VmPeak: 768040 kB +VmSize: 713068 kB +VmLck: 0 kB +VmPin: 0 kB +VmHWM: 30652 kB +VmRSS: 30632 kB +VmData: 377304 kB +VmStk: 132 kB +VmExe: 2712 kB +VmLib: 69672 kB +VmPTE: 632 kB +VmPMD: 16 kB +VmSwap: 0 kB +HugetlbPages: 0 kB +Threads: 4 +SigQ: 1/128288 +SigPnd: 0000000000000000 +ShdPnd: 0000000000000000 +SigBlk: 0000000000000000 +SigIgn: 0000000000003001 +SigCgt: 00000001ed824efe +CapInh: 0000000000000000 +CapPrm: 0000000000000000 +CapEff: 0000000000000000 +CapBnd: 0000003fffffffff +CapAmb: 0000000000000000 +Seccomp: 0 +Speculation_Store_Bypass: thread vulnerable +Cpus_allowed: ffff +Cpus_allowed_list: 0-15 +Mems_allowed: 00000000,00000001 +Mems_allowed_list: 0 +voluntary_ctxt_switches: 6340 +nonvoluntary_ctxt_switches: 361 + +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/26232 +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/26232/cmdline +Lines: 0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/26232/comm +Lines: 1 +ata_sff +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/26232/fd +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/26232/fd/0 +SymlinkTo: ../../symlinktargets/abc +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/26232/fd/1 +SymlinkTo: ../../symlinktargets/def +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/26232/fd/2 +SymlinkTo: ../../symlinktargets/ghi +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/26232/fd/3 +SymlinkTo: ../../symlinktargets/uvw +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/26232/fd/4 +SymlinkTo: ../../symlinktargets/xyz +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/26232/limits +Lines: 17 +Limit Soft Limit Hard Limit Units +Max cpu time unlimited unlimited seconds +Max file size unlimited unlimited bytes +Max data size unlimited unlimited bytes +Max stack size 8388608 unlimited bytes +Max core file size 0 unlimited bytes +Max resident set unlimited unlimited bytes +Max processes 29436 29436 processes +Max open files 1024 4096 files +Max locked memory 65536 65536 bytes +Max address space unlimited unlimited bytes +Max file locks unlimited unlimited locks +Max pending signals 29436 29436 signals +Max msgqueue size 819200 819200 bytes +Max nice priority 0 0 +Max realtime priority 0 0 +Max realtime timeout unlimited unlimited us +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/26232/stat +Lines: 1 +33 (ata_sff) S 2 0 0 0 -1 69238880 0 0 0 0 0 0 0 0 0 -20 1 0 5 0 0 18446744073709551615 0 0 0 0 0 0 0 2147483647 0 18446744073709551615 0 0 17 1 0 0 0 0 0 0 0 0 0 0 0 0 0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/26233 +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/26233/cmdline +Lines: 1 +com.github.uiautomatorNULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTEEOF +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/584 +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/584/stat +Lines: 2 +1020 ((a b ) ( c d) ) R 28378 1020 28378 34842 1020 4218880 286 0 0 0 0 0 0 0 20 0 1 0 10839175 10395648 155 18446744073709551615 4194304 4238788 140736466511168 140736466511168 140609271124624 0 0 0 0 0 0 0 17 5 0 0 0 0 0 6336016 6337300 25579520 140736466515030 140736466515061 140736466515061 140736466518002 0 +#!/bin/cat /proc/self/stat +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/buddyinfo +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/buddyinfo/short +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/buddyinfo/short/buddyinfo +Lines: 3 +Node 0, zone +Node 0, zone +Node 0, zone +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/buddyinfo/sizemismatch +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/buddyinfo/sizemismatch/buddyinfo +Lines: 3 +Node 0, zone DMA 1 0 1 0 2 1 1 0 1 1 3 +Node 0, zone DMA32 759 572 791 475 194 45 12 0 0 0 0 0 +Node 0, zone Normal 4381 1093 185 1530 567 102 4 0 0 0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/buddyinfo/valid +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/buddyinfo/valid/buddyinfo +Lines: 3 +Node 0, zone DMA 1 0 1 0 2 1 1 0 1 1 3 +Node 0, zone DMA32 759 572 791 475 194 45 12 0 0 0 0 +Node 0, zone Normal 4381 1093 185 1530 567 102 4 0 0 0 0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/fs +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/fs/xfs +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/fs/xfs/stat +Lines: 23 +extent_alloc 92447 97589 92448 93751 +abt 0 0 0 0 +blk_map 1767055 188820 184891 92447 92448 2140766 0 +bmbt 0 0 0 0 +dir 185039 92447 92444 136422 +trans 706 944304 0 +ig 185045 58807 0 126238 0 33637 22 +log 2883 113448 9 17360 739 +push_ail 945014 0 134260 15483 0 3940 464 159985 0 40 +xstrat 92447 0 +rw 107739 94045 +attr 4 0 0 0 +icluster 8677 7849 135802 +vnodes 92601 0 0 0 92444 92444 92444 0 +buf 2666287 7122 2659202 3599 2 7085 0 10297 7085 +abtb2 184941 1277345 13257 13278 0 0 0 0 0 0 0 0 0 0 2746147 +abtc2 345295 2416764 172637 172658 0 0 0 0 0 0 0 0 0 0 21406023 +bmbt2 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 +ibt2 343004 1358467 0 0 0 0 0 0 0 0 0 0 0 0 0 +fibt2 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 +qm 0 0 0 0 0 0 0 0 +xpc 399724544 92823103 86219234 +debug 0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/mdstat +Lines: 26 +Personalities : [linear] [multipath] [raid0] [raid1] [raid6] [raid5] [raid4] [raid10] +md3 : active raid6 sda1[8] sdh1[7] sdg1[6] sdf1[5] sde1[11] sdd1[3] sdc1[10] sdb1[9] + 5853468288 blocks super 1.2 level 6, 64k chunk, algorithm 2 [8/8] [UUUUUUUU] + +md127 : active raid1 sdi2[0] sdj2[1] + 312319552 blocks [2/2] [UU] + +md0 : active raid1 sdk[2](S) sdi1[0] sdj1[1] + 248896 blocks [2/2] [UU] + +md4 : inactive raid1 sda3[0] sdb3[1] + 4883648 blocks [2/2] [UU] + +md6 : active raid1 sdb2[2] sda2[0] + 195310144 blocks [2/1] [U_] + [=>...................] recovery = 8.5% (16775552/195310144) finish=17.0min speed=259783K/sec + +md8 : active raid1 sdb1[1] sda1[0] + 195310144 blocks [2/2] [UU] + [=>...................] resync = 8.5% (16775552/195310144) finish=17.0min speed=259783K/sec + +md7 : active raid6 sdb1[0] sde1[3] sdd1[2] sdc1[1] + 7813735424 blocks super 1.2 level 6, 512k chunk, algorithm 2 [4/3] [U_UU] + bitmap: 0/30 pages [0KB], 65536KB chunk + +unused devices: +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/net +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/net/dev +Lines: 6 +Inter-| Receive | Transmit + face |bytes packets errs drop fifo frame compressed multicast|bytes packets errs drop fifo colls carrier compressed +vethf345468: 648 8 0 0 0 0 0 0 438 5 0 0 0 0 0 0 + lo: 1664039048 1566805 0 0 0 0 0 0 1664039048 1566805 0 0 0 0 0 0 +docker0: 2568 38 0 0 0 0 0 0 438 5 0 0 0 0 0 0 + eth0: 874354587 1036395 0 0 0 0 0 0 563352563 732147 0 0 0 0 0 0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/net/ip_vs +Lines: 21 +IP Virtual Server version 1.2.1 (size=4096) +Prot LocalAddress:Port Scheduler Flags + -> RemoteAddress:Port Forward Weight ActiveConn InActConn +TCP C0A80016:0CEA wlc + -> C0A85216:0CEA Tunnel 100 248 2 + -> C0A85318:0CEA Tunnel 100 248 2 + -> C0A85315:0CEA Tunnel 100 248 1 +TCP C0A80039:0CEA wlc + -> C0A85416:0CEA Tunnel 0 0 0 + -> C0A85215:0CEA Tunnel 100 1499 0 + -> C0A83215:0CEA Tunnel 100 1498 0 +TCP C0A80037:0CEA wlc + -> C0A8321A:0CEA Tunnel 0 0 0 + -> C0A83120:0CEA Tunnel 100 0 0 +TCP [2620:0000:0000:0000:0000:0000:0000:0001]:0050 sh + -> [2620:0000:0000:0000:0000:0000:0000:0002]:0050 Route 1 0 0 + -> [2620:0000:0000:0000:0000:0000:0000:0003]:0050 Route 1 0 0 + -> [2620:0000:0000:0000:0000:0000:0000:0004]:0050 Route 1 1 1 +FWM 10001000 wlc + -> C0A8321A:0CEA Route 0 0 1 + -> C0A83215:0CEA Route 0 0 2 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/net/ip_vs_stats +Lines: 6 + Total Incoming Outgoing Incoming Outgoing + Conns Packets Packets Bytes Bytes + 16AA370 E33656E5 0 51D8C8883AB3 0 + + Conns/s Pkts/s Pkts/s Bytes/s Bytes/s + 4 1FB3C 0 1282A8F 0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/net/rpc +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/net/rpc/nfs +Lines: 5 +net 18628 0 18628 6 +rpc 4329785 0 4338291 +proc2 18 2 69 0 0 4410 0 0 0 0 0 0 0 0 0 0 0 99 2 +proc3 22 1 4084749 29200 94754 32580 186 47747 7981 8639 0 6356 0 6962 0 7958 0 0 241 4 4 2 39 +proc4 61 1 0 0 0 0 0 0 0 0 0 0 0 1 1 0 0 0 0 0 0 0 2 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/net/rpc/nfsd +Lines: 11 +rc 0 6 18622 +fh 0 0 0 0 0 +io 157286400 0 +th 8 0 0.000 0.000 0.000 0.000 0.000 0.000 0.000 0.000 0.000 0.000 +ra 32 0 0 0 0 0 0 0 0 0 0 0 +net 18628 0 18628 6 +rpc 18628 0 0 0 0 +proc2 18 2 69 0 0 4410 0 0 0 0 0 0 0 0 0 0 0 99 2 +proc3 22 2 112 0 2719 111 0 0 0 0 0 0 0 0 0 0 0 27 216 0 2 1 0 +proc4 2 2 10853 +proc4ops 72 0 0 0 1098 2 0 0 0 0 8179 5896 0 0 0 0 5900 0 0 2 0 2 0 9609 0 2 150 1272 0 0 0 1236 0 0 0 0 3 3 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/net/xfrm_stat +Lines: 28 +XfrmInError 1 +XfrmInBufferError 2 +XfrmInHdrError 4 +XfrmInNoStates 3 +XfrmInStateProtoError 40 +XfrmInStateModeError 100 +XfrmInStateSeqError 6000 +XfrmInStateExpired 4 +XfrmInStateMismatch 23451 +XfrmInStateInvalid 55555 +XfrmInTmplMismatch 51 +XfrmInNoPols 65432 +XfrmInPolBlock 100 +XfrmInPolError 10000 +XfrmOutError 1000000 +XfrmOutBundleGenError 43321 +XfrmOutBundleCheckError 555 +XfrmOutNoStates 869 +XfrmOutStateProtoError 4542 +XfrmOutStateModeError 4 +XfrmOutStateSeqError 543 +XfrmOutStateExpired 565 +XfrmOutPolBlock 43456 +XfrmOutPolDead 7656 +XfrmOutPolError 1454 +XfrmFwdHdrError 6654 +XfrmOutStateInvalid 28765 +XfrmAcquireError 24532 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/self +SymlinkTo: 26231 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/stat +Lines: 16 +cpu 301854 612 111922 8979004 3552 2 3944 0 0 0 +cpu0 44490 19 21045 1087069 220 1 3410 0 0 0 +cpu1 47869 23 16474 1110787 591 0 46 0 0 0 +cpu2 46504 36 15916 1112321 441 0 326 0 0 0 +cpu3 47054 102 15683 1113230 533 0 60 0 0 0 +cpu4 28413 25 10776 1140321 217 0 8 0 0 0 +cpu5 29271 101 11586 1136270 672 0 30 0 0 0 +cpu6 29152 36 10276 1139721 319 0 29 0 0 0 +cpu7 29098 268 10164 1139282 555 0 31 0 0 0 +intr 8885917 17 0 0 0 0 0 0 0 1 79281 0 0 0 0 0 0 0 231237 0 0 0 0 250586 103 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 223424 190745 13 906 1283803 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 +ctxt 38014093 +btime 1418183276 +processes 26442 +procs_running 2 +procs_blocked 1 +softirq 5057579 250191 1481983 1647 211099 186066 0 1783454 622196 12499 508444 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/symlinktargets +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/symlinktargets/README +Lines: 2 +This directory contains some empty files that are the symlinks the files in the "fd" directory point to. +They are otherwise ignored by the tests +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/symlinktargets/abc +Lines: 0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/symlinktargets/def +Lines: 0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/symlinktargets/ghi +Lines: 0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/symlinktargets/uvw +Lines: 0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/symlinktargets/xyz +Lines: 0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - diff --git a/vendor/github.com/ncabatoff/procfs/fs.go b/vendor/github.com/ncabatoff/procfs/fs.go new file mode 100644 index 000000000..90076ffd4 --- /dev/null +++ b/vendor/github.com/ncabatoff/procfs/fs.go @@ -0,0 +1,82 @@ +// Copyright 2018 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package procfs + +import ( + "fmt" + "os" + "path" + + "github.com/ncabatoff/procfs/nfs" + "github.com/ncabatoff/procfs/xfs" +) + +// FS represents the pseudo-filesystem proc, which provides an interface to +// kernel data structures. +type FS string + +// DefaultMountPoint is the common mount point of the proc filesystem. +const DefaultMountPoint = "/proc" + +// NewFS returns a new FS mounted under the given mountPoint. It will error +// if the mount point can't be read. +func NewFS(mountPoint string) (FS, error) { + info, err := os.Stat(mountPoint) + if err != nil { + return "", fmt.Errorf("could not read %s: %s", mountPoint, err) + } + if !info.IsDir() { + return "", fmt.Errorf("mount point %s is not a directory", mountPoint) + } + + return FS(mountPoint), nil +} + +// Path returns the path of the given subsystem relative to the procfs root. +func (fs FS) Path(p ...string) string { + return path.Join(append([]string{string(fs)}, p...)...) +} + +// XFSStats retrieves XFS filesystem runtime statistics. +func (fs FS) XFSStats() (*xfs.Stats, error) { + f, err := os.Open(fs.Path("fs/xfs/stat")) + if err != nil { + return nil, err + } + defer f.Close() + + return xfs.ParseStats(f) +} + +// NFSClientRPCStats retrieves NFS client RPC statistics. +func (fs FS) NFSClientRPCStats() (*nfs.ClientRPCStats, error) { + f, err := os.Open(fs.Path("net/rpc/nfs")) + if err != nil { + return nil, err + } + defer f.Close() + + return nfs.ParseClientRPCStats(f) +} + +// NFSdServerRPCStats retrieves NFS daemon RPC statistics. +func (fs FS) NFSdServerRPCStats() (*nfs.ServerRPCStats, error) { + f, err := os.Open(fs.Path("net/rpc/nfsd")) + if err != nil { + return nil, err + } + defer f.Close() + + return nfs.ParseServerRPCStats(f) +} diff --git a/vendor/github.com/ncabatoff/procfs/internal/util/parse.go b/vendor/github.com/ncabatoff/procfs/internal/util/parse.go new file mode 100644 index 000000000..1ad21c91a --- /dev/null +++ b/vendor/github.com/ncabatoff/procfs/internal/util/parse.go @@ -0,0 +1,46 @@ +// Copyright 2018 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package util + +import "strconv" + +// ParseUint32s parses a slice of strings into a slice of uint32s. +func ParseUint32s(ss []string) ([]uint32, error) { + us := make([]uint32, 0, len(ss)) + for _, s := range ss { + u, err := strconv.ParseUint(s, 10, 32) + if err != nil { + return nil, err + } + + us = append(us, uint32(u)) + } + + return us, nil +} + +// ParseUint64s parses a slice of strings into a slice of uint64s. +func ParseUint64s(ss []string) ([]uint64, error) { + us := make([]uint64, 0, len(ss)) + for _, s := range ss { + u, err := strconv.ParseUint(s, 10, 64) + if err != nil { + return nil, err + } + + us = append(us, u) + } + + return us, nil +} diff --git a/vendor/github.com/ncabatoff/procfs/ipvs.go b/vendor/github.com/ncabatoff/procfs/ipvs.go new file mode 100644 index 000000000..e36d4a3bd --- /dev/null +++ b/vendor/github.com/ncabatoff/procfs/ipvs.go @@ -0,0 +1,259 @@ +// Copyright 2018 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package procfs + +import ( + "bufio" + "encoding/hex" + "errors" + "fmt" + "io" + "io/ioutil" + "net" + "os" + "strconv" + "strings" +) + +// IPVSStats holds IPVS statistics, as exposed by the kernel in `/proc/net/ip_vs_stats`. +type IPVSStats struct { + // Total count of connections. + Connections uint64 + // Total incoming packages processed. + IncomingPackets uint64 + // Total outgoing packages processed. + OutgoingPackets uint64 + // Total incoming traffic. + IncomingBytes uint64 + // Total outgoing traffic. + OutgoingBytes uint64 +} + +// IPVSBackendStatus holds current metrics of one virtual / real address pair. +type IPVSBackendStatus struct { + // The local (virtual) IP address. + LocalAddress net.IP + // The remote (real) IP address. + RemoteAddress net.IP + // The local (virtual) port. + LocalPort uint16 + // The remote (real) port. + RemotePort uint16 + // The local firewall mark + LocalMark string + // The transport protocol (TCP, UDP). + Proto string + // The current number of active connections for this virtual/real address pair. + ActiveConn uint64 + // The current number of inactive connections for this virtual/real address pair. + InactConn uint64 + // The current weight of this virtual/real address pair. + Weight uint64 +} + +// NewIPVSStats reads the IPVS statistics. +func NewIPVSStats() (IPVSStats, error) { + fs, err := NewFS(DefaultMountPoint) + if err != nil { + return IPVSStats{}, err + } + + return fs.NewIPVSStats() +} + +// NewIPVSStats reads the IPVS statistics from the specified `proc` filesystem. +func (fs FS) NewIPVSStats() (IPVSStats, error) { + file, err := os.Open(fs.Path("net/ip_vs_stats")) + if err != nil { + return IPVSStats{}, err + } + defer file.Close() + + return parseIPVSStats(file) +} + +// parseIPVSStats performs the actual parsing of `ip_vs_stats`. +func parseIPVSStats(file io.Reader) (IPVSStats, error) { + var ( + statContent []byte + statLines []string + statFields []string + stats IPVSStats + ) + + statContent, err := ioutil.ReadAll(file) + if err != nil { + return IPVSStats{}, err + } + + statLines = strings.SplitN(string(statContent), "\n", 4) + if len(statLines) != 4 { + return IPVSStats{}, errors.New("ip_vs_stats corrupt: too short") + } + + statFields = strings.Fields(statLines[2]) + if len(statFields) != 5 { + return IPVSStats{}, errors.New("ip_vs_stats corrupt: unexpected number of fields") + } + + stats.Connections, err = strconv.ParseUint(statFields[0], 16, 64) + if err != nil { + return IPVSStats{}, err + } + stats.IncomingPackets, err = strconv.ParseUint(statFields[1], 16, 64) + if err != nil { + return IPVSStats{}, err + } + stats.OutgoingPackets, err = strconv.ParseUint(statFields[2], 16, 64) + if err != nil { + return IPVSStats{}, err + } + stats.IncomingBytes, err = strconv.ParseUint(statFields[3], 16, 64) + if err != nil { + return IPVSStats{}, err + } + stats.OutgoingBytes, err = strconv.ParseUint(statFields[4], 16, 64) + if err != nil { + return IPVSStats{}, err + } + + return stats, nil +} + +// NewIPVSBackendStatus reads and returns the status of all (virtual,real) server pairs. +func NewIPVSBackendStatus() ([]IPVSBackendStatus, error) { + fs, err := NewFS(DefaultMountPoint) + if err != nil { + return []IPVSBackendStatus{}, err + } + + return fs.NewIPVSBackendStatus() +} + +// NewIPVSBackendStatus reads and returns the status of all (virtual,real) server pairs from the specified `proc` filesystem. +func (fs FS) NewIPVSBackendStatus() ([]IPVSBackendStatus, error) { + file, err := os.Open(fs.Path("net/ip_vs")) + if err != nil { + return nil, err + } + defer file.Close() + + return parseIPVSBackendStatus(file) +} + +func parseIPVSBackendStatus(file io.Reader) ([]IPVSBackendStatus, error) { + var ( + status []IPVSBackendStatus + scanner = bufio.NewScanner(file) + proto string + localMark string + localAddress net.IP + localPort uint16 + err error + ) + + for scanner.Scan() { + fields := strings.Fields(scanner.Text()) + if len(fields) == 0 { + continue + } + switch { + case fields[0] == "IP" || fields[0] == "Prot" || fields[1] == "RemoteAddress:Port": + continue + case fields[0] == "TCP" || fields[0] == "UDP": + if len(fields) < 2 { + continue + } + proto = fields[0] + localMark = "" + localAddress, localPort, err = parseIPPort(fields[1]) + if err != nil { + return nil, err + } + case fields[0] == "FWM": + if len(fields) < 2 { + continue + } + proto = fields[0] + localMark = fields[1] + localAddress = nil + localPort = 0 + case fields[0] == "->": + if len(fields) < 6 { + continue + } + remoteAddress, remotePort, err := parseIPPort(fields[1]) + if err != nil { + return nil, err + } + weight, err := strconv.ParseUint(fields[3], 10, 64) + if err != nil { + return nil, err + } + activeConn, err := strconv.ParseUint(fields[4], 10, 64) + if err != nil { + return nil, err + } + inactConn, err := strconv.ParseUint(fields[5], 10, 64) + if err != nil { + return nil, err + } + status = append(status, IPVSBackendStatus{ + LocalAddress: localAddress, + LocalPort: localPort, + LocalMark: localMark, + RemoteAddress: remoteAddress, + RemotePort: remotePort, + Proto: proto, + Weight: weight, + ActiveConn: activeConn, + InactConn: inactConn, + }) + } + } + return status, nil +} + +func parseIPPort(s string) (net.IP, uint16, error) { + var ( + ip net.IP + err error + ) + + switch len(s) { + case 13: + ip, err = hex.DecodeString(s[0:8]) + if err != nil { + return nil, 0, err + } + case 46: + ip = net.ParseIP(s[1:40]) + if ip == nil { + return nil, 0, fmt.Errorf("invalid IPv6 address: %s", s[1:40]) + } + default: + return nil, 0, fmt.Errorf("unexpected IP:Port: %s", s) + } + + portString := s[len(s)-4:] + if len(portString) != 4 { + return nil, 0, fmt.Errorf("unexpected port string format: %s", portString) + } + port, err := strconv.ParseUint(portString, 16, 16) + if err != nil { + return nil, 0, err + } + + return ip, uint16(port), nil +} diff --git a/vendor/github.com/ncabatoff/procfs/mdstat.go b/vendor/github.com/ncabatoff/procfs/mdstat.go new file mode 100644 index 000000000..9dc19583d --- /dev/null +++ b/vendor/github.com/ncabatoff/procfs/mdstat.go @@ -0,0 +1,151 @@ +// Copyright 2018 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package procfs + +import ( + "fmt" + "io/ioutil" + "regexp" + "strconv" + "strings" +) + +var ( + statuslineRE = regexp.MustCompile(`(\d+) blocks .*\[(\d+)/(\d+)\] \[[U_]+\]`) + buildlineRE = regexp.MustCompile(`\((\d+)/\d+\)`) +) + +// MDStat holds info parsed from /proc/mdstat. +type MDStat struct { + // Name of the device. + Name string + // activity-state of the device. + ActivityState string + // Number of active disks. + DisksActive int64 + // Total number of disks the device consists of. + DisksTotal int64 + // Number of blocks the device holds. + BlocksTotal int64 + // Number of blocks on the device that are in sync. + BlocksSynced int64 +} + +// ParseMDStat parses an mdstat-file and returns a struct with the relevant infos. +func (fs FS) ParseMDStat() (mdstates []MDStat, err error) { + mdStatusFilePath := fs.Path("mdstat") + content, err := ioutil.ReadFile(mdStatusFilePath) + if err != nil { + return []MDStat{}, fmt.Errorf("error parsing %s: %s", mdStatusFilePath, err) + } + + mdStates := []MDStat{} + lines := strings.Split(string(content), "\n") + for i, l := range lines { + if l == "" { + continue + } + if l[0] == ' ' { + continue + } + if strings.HasPrefix(l, "Personalities") || strings.HasPrefix(l, "unused") { + continue + } + + mainLine := strings.Split(l, " ") + if len(mainLine) < 3 { + return mdStates, fmt.Errorf("error parsing mdline: %s", l) + } + mdName := mainLine[0] + activityState := mainLine[2] + + if len(lines) <= i+3 { + return mdStates, fmt.Errorf( + "error parsing %s: too few lines for md device %s", + mdStatusFilePath, + mdName, + ) + } + + active, total, size, err := evalStatusline(lines[i+1]) + if err != nil { + return mdStates, fmt.Errorf("error parsing %s: %s", mdStatusFilePath, err) + } + + // j is the line number of the syncing-line. + j := i + 2 + if strings.Contains(lines[i+2], "bitmap") { // skip bitmap line + j = i + 3 + } + + // If device is syncing at the moment, get the number of currently + // synced bytes, otherwise that number equals the size of the device. + syncedBlocks := size + if strings.Contains(lines[j], "recovery") || strings.Contains(lines[j], "resync") { + syncedBlocks, err = evalBuildline(lines[j]) + if err != nil { + return mdStates, fmt.Errorf("error parsing %s: %s", mdStatusFilePath, err) + } + } + + mdStates = append(mdStates, MDStat{ + Name: mdName, + ActivityState: activityState, + DisksActive: active, + DisksTotal: total, + BlocksTotal: size, + BlocksSynced: syncedBlocks, + }) + } + + return mdStates, nil +} + +func evalStatusline(statusline string) (active, total, size int64, err error) { + matches := statuslineRE.FindStringSubmatch(statusline) + if len(matches) != 4 { + return 0, 0, 0, fmt.Errorf("unexpected statusline: %s", statusline) + } + + size, err = strconv.ParseInt(matches[1], 10, 64) + if err != nil { + return 0, 0, 0, fmt.Errorf("unexpected statusline %s: %s", statusline, err) + } + + total, err = strconv.ParseInt(matches[2], 10, 64) + if err != nil { + return 0, 0, 0, fmt.Errorf("unexpected statusline %s: %s", statusline, err) + } + + active, err = strconv.ParseInt(matches[3], 10, 64) + if err != nil { + return 0, 0, 0, fmt.Errorf("unexpected statusline %s: %s", statusline, err) + } + + return active, total, size, nil +} + +func evalBuildline(buildline string) (syncedBlocks int64, err error) { + matches := buildlineRE.FindStringSubmatch(buildline) + if len(matches) != 2 { + return 0, fmt.Errorf("unexpected buildline: %s", buildline) + } + + syncedBlocks, err = strconv.ParseInt(matches[1], 10, 64) + if err != nil { + return 0, fmt.Errorf("%s in buildline: %s", err, buildline) + } + + return syncedBlocks, nil +} diff --git a/vendor/github.com/ncabatoff/procfs/mountstats.go b/vendor/github.com/ncabatoff/procfs/mountstats.go new file mode 100644 index 000000000..7a8a1e099 --- /dev/null +++ b/vendor/github.com/ncabatoff/procfs/mountstats.go @@ -0,0 +1,606 @@ +// Copyright 2018 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package procfs + +// While implementing parsing of /proc/[pid]/mountstats, this blog was used +// heavily as a reference: +// https://utcc.utoronto.ca/~cks/space/blog/linux/NFSMountstatsIndex +// +// Special thanks to Chris Siebenmann for all of his posts explaining the +// various statistics available for NFS. + +import ( + "bufio" + "fmt" + "io" + "strconv" + "strings" + "time" +) + +// Constants shared between multiple functions. +const ( + deviceEntryLen = 8 + + fieldBytesLen = 8 + fieldEventsLen = 27 + + statVersion10 = "1.0" + statVersion11 = "1.1" + + fieldTransport10TCPLen = 10 + fieldTransport10UDPLen = 7 + + fieldTransport11TCPLen = 13 + fieldTransport11UDPLen = 10 +) + +// A Mount is a device mount parsed from /proc/[pid]/mountstats. +type Mount struct { + // Name of the device. + Device string + // The mount point of the device. + Mount string + // The filesystem type used by the device. + Type string + // If available additional statistics related to this Mount. + // Use a type assertion to determine if additional statistics are available. + Stats MountStats +} + +// A MountStats is a type which contains detailed statistics for a specific +// type of Mount. +type MountStats interface { + mountStats() +} + +// A MountStatsNFS is a MountStats implementation for NFSv3 and v4 mounts. +type MountStatsNFS struct { + // The version of statistics provided. + StatVersion string + // The age of the NFS mount. + Age time.Duration + // Statistics related to byte counters for various operations. + Bytes NFSBytesStats + // Statistics related to various NFS event occurrences. + Events NFSEventsStats + // Statistics broken down by filesystem operation. + Operations []NFSOperationStats + // Statistics about the NFS RPC transport. + Transport NFSTransportStats +} + +// mountStats implements MountStats. +func (m MountStatsNFS) mountStats() {} + +// A NFSBytesStats contains statistics about the number of bytes read and written +// by an NFS client to and from an NFS server. +type NFSBytesStats struct { + // Number of bytes read using the read() syscall. + Read uint64 + // Number of bytes written using the write() syscall. + Write uint64 + // Number of bytes read using the read() syscall in O_DIRECT mode. + DirectRead uint64 + // Number of bytes written using the write() syscall in O_DIRECT mode. + DirectWrite uint64 + // Number of bytes read from the NFS server, in total. + ReadTotal uint64 + // Number of bytes written to the NFS server, in total. + WriteTotal uint64 + // Number of pages read directly via mmap()'d files. + ReadPages uint64 + // Number of pages written directly via mmap()'d files. + WritePages uint64 +} + +// A NFSEventsStats contains statistics about NFS event occurrences. +type NFSEventsStats struct { + // Number of times cached inode attributes are re-validated from the server. + InodeRevalidate uint64 + // Number of times cached dentry nodes are re-validated from the server. + DnodeRevalidate uint64 + // Number of times an inode cache is cleared. + DataInvalidate uint64 + // Number of times cached inode attributes are invalidated. + AttributeInvalidate uint64 + // Number of times files or directories have been open()'d. + VFSOpen uint64 + // Number of times a directory lookup has occurred. + VFSLookup uint64 + // Number of times permissions have been checked. + VFSAccess uint64 + // Number of updates (and potential writes) to pages. + VFSUpdatePage uint64 + // Number of pages read directly via mmap()'d files. + VFSReadPage uint64 + // Number of times a group of pages have been read. + VFSReadPages uint64 + // Number of pages written directly via mmap()'d files. + VFSWritePage uint64 + // Number of times a group of pages have been written. + VFSWritePages uint64 + // Number of times directory entries have been read with getdents(). + VFSGetdents uint64 + // Number of times attributes have been set on inodes. + VFSSetattr uint64 + // Number of pending writes that have been forcefully flushed to the server. + VFSFlush uint64 + // Number of times fsync() has been called on directories and files. + VFSFsync uint64 + // Number of times locking has been attempted on a file. + VFSLock uint64 + // Number of times files have been closed and released. + VFSFileRelease uint64 + // Unknown. Possibly unused. + CongestionWait uint64 + // Number of times files have been truncated. + Truncation uint64 + // Number of times a file has been grown due to writes beyond its existing end. + WriteExtension uint64 + // Number of times a file was removed while still open by another process. + SillyRename uint64 + // Number of times the NFS server gave less data than expected while reading. + ShortRead uint64 + // Number of times the NFS server wrote less data than expected while writing. + ShortWrite uint64 + // Number of times the NFS server indicated EJUKEBOX; retrieving data from + // offline storage. + JukeboxDelay uint64 + // Number of NFS v4.1+ pNFS reads. + PNFSRead uint64 + // Number of NFS v4.1+ pNFS writes. + PNFSWrite uint64 +} + +// A NFSOperationStats contains statistics for a single operation. +type NFSOperationStats struct { + // The name of the operation. + Operation string + // Number of requests performed for this operation. + Requests uint64 + // Number of times an actual RPC request has been transmitted for this operation. + Transmissions uint64 + // Number of times a request has had a major timeout. + MajorTimeouts uint64 + // Number of bytes sent for this operation, including RPC headers and payload. + BytesSent uint64 + // Number of bytes received for this operation, including RPC headers and payload. + BytesReceived uint64 + // Duration all requests spent queued for transmission before they were sent. + CumulativeQueueTime time.Duration + // Duration it took to get a reply back after the request was transmitted. + CumulativeTotalResponseTime time.Duration + // Duration from when a request was enqueued to when it was completely handled. + CumulativeTotalRequestTime time.Duration +} + +// A NFSTransportStats contains statistics for the NFS mount RPC requests and +// responses. +type NFSTransportStats struct { + // The transport protocol used for the NFS mount. + Protocol string + // The local port used for the NFS mount. + Port uint64 + // Number of times the client has had to establish a connection from scratch + // to the NFS server. + Bind uint64 + // Number of times the client has made a TCP connection to the NFS server. + Connect uint64 + // Duration (in jiffies, a kernel internal unit of time) the NFS mount has + // spent waiting for connections to the server to be established. + ConnectIdleTime uint64 + // Duration since the NFS mount last saw any RPC traffic. + IdleTime time.Duration + // Number of RPC requests for this mount sent to the NFS server. + Sends uint64 + // Number of RPC responses for this mount received from the NFS server. + Receives uint64 + // Number of times the NFS server sent a response with a transaction ID + // unknown to this client. + BadTransactionIDs uint64 + // A running counter, incremented on each request as the current difference + // ebetween sends and receives. + CumulativeActiveRequests uint64 + // A running counter, incremented on each request by the current backlog + // queue size. + CumulativeBacklog uint64 + + // Stats below only available with stat version 1.1. + + // Maximum number of simultaneously active RPC requests ever used. + MaximumRPCSlotsUsed uint64 + // A running counter, incremented on each request as the current size of the + // sending queue. + CumulativeSendingQueue uint64 + // A running counter, incremented on each request as the current size of the + // pending queue. + CumulativePendingQueue uint64 +} + +// parseMountStats parses a /proc/[pid]/mountstats file and returns a slice +// of Mount structures containing detailed information about each mount. +// If available, statistics for each mount are parsed as well. +func parseMountStats(r io.Reader) ([]*Mount, error) { + const ( + device = "device" + statVersionPrefix = "statvers=" + + nfs3Type = "nfs" + nfs4Type = "nfs4" + ) + + var mounts []*Mount + + s := bufio.NewScanner(r) + for s.Scan() { + // Only look for device entries in this function + ss := strings.Fields(string(s.Bytes())) + if len(ss) == 0 || ss[0] != device { + continue + } + + m, err := parseMount(ss) + if err != nil { + return nil, err + } + + // Does this mount also possess statistics information? + if len(ss) > deviceEntryLen { + // Only NFSv3 and v4 are supported for parsing statistics + if m.Type != nfs3Type && m.Type != nfs4Type { + return nil, fmt.Errorf("cannot parse MountStats for fstype %q", m.Type) + } + + statVersion := strings.TrimPrefix(ss[8], statVersionPrefix) + + stats, err := parseMountStatsNFS(s, statVersion) + if err != nil { + return nil, err + } + + m.Stats = stats + } + + mounts = append(mounts, m) + } + + return mounts, s.Err() +} + +// parseMount parses an entry in /proc/[pid]/mountstats in the format: +// device [device] mounted on [mount] with fstype [type] +func parseMount(ss []string) (*Mount, error) { + if len(ss) < deviceEntryLen { + return nil, fmt.Errorf("invalid device entry: %v", ss) + } + + // Check for specific words appearing at specific indices to ensure + // the format is consistent with what we expect + format := []struct { + i int + s string + }{ + {i: 0, s: "device"}, + {i: 2, s: "mounted"}, + {i: 3, s: "on"}, + {i: 5, s: "with"}, + {i: 6, s: "fstype"}, + } + + for _, f := range format { + if ss[f.i] != f.s { + return nil, fmt.Errorf("invalid device entry: %v", ss) + } + } + + return &Mount{ + Device: ss[1], + Mount: ss[4], + Type: ss[7], + }, nil +} + +// parseMountStatsNFS parses a MountStatsNFS by scanning additional information +// related to NFS statistics. +func parseMountStatsNFS(s *bufio.Scanner, statVersion string) (*MountStatsNFS, error) { + // Field indicators for parsing specific types of data + const ( + fieldAge = "age:" + fieldBytes = "bytes:" + fieldEvents = "events:" + fieldPerOpStats = "per-op" + fieldTransport = "xprt:" + ) + + stats := &MountStatsNFS{ + StatVersion: statVersion, + } + + for s.Scan() { + ss := strings.Fields(string(s.Bytes())) + if len(ss) == 0 { + break + } + if len(ss) < 2 { + return nil, fmt.Errorf("not enough information for NFS stats: %v", ss) + } + + switch ss[0] { + case fieldAge: + // Age integer is in seconds + d, err := time.ParseDuration(ss[1] + "s") + if err != nil { + return nil, err + } + + stats.Age = d + case fieldBytes: + bstats, err := parseNFSBytesStats(ss[1:]) + if err != nil { + return nil, err + } + + stats.Bytes = *bstats + case fieldEvents: + estats, err := parseNFSEventsStats(ss[1:]) + if err != nil { + return nil, err + } + + stats.Events = *estats + case fieldTransport: + if len(ss) < 3 { + return nil, fmt.Errorf("not enough information for NFS transport stats: %v", ss) + } + + tstats, err := parseNFSTransportStats(ss[1:], statVersion) + if err != nil { + return nil, err + } + + stats.Transport = *tstats + } + + // When encountering "per-operation statistics", we must break this + // loop and parse them separately to ensure we can terminate parsing + // before reaching another device entry; hence why this 'if' statement + // is not just another switch case + if ss[0] == fieldPerOpStats { + break + } + } + + if err := s.Err(); err != nil { + return nil, err + } + + // NFS per-operation stats appear last before the next device entry + perOpStats, err := parseNFSOperationStats(s) + if err != nil { + return nil, err + } + + stats.Operations = perOpStats + + return stats, nil +} + +// parseNFSBytesStats parses a NFSBytesStats line using an input set of +// integer fields. +func parseNFSBytesStats(ss []string) (*NFSBytesStats, error) { + if len(ss) != fieldBytesLen { + return nil, fmt.Errorf("invalid NFS bytes stats: %v", ss) + } + + ns := make([]uint64, 0, fieldBytesLen) + for _, s := range ss { + n, err := strconv.ParseUint(s, 10, 64) + if err != nil { + return nil, err + } + + ns = append(ns, n) + } + + return &NFSBytesStats{ + Read: ns[0], + Write: ns[1], + DirectRead: ns[2], + DirectWrite: ns[3], + ReadTotal: ns[4], + WriteTotal: ns[5], + ReadPages: ns[6], + WritePages: ns[7], + }, nil +} + +// parseNFSEventsStats parses a NFSEventsStats line using an input set of +// integer fields. +func parseNFSEventsStats(ss []string) (*NFSEventsStats, error) { + if len(ss) != fieldEventsLen { + return nil, fmt.Errorf("invalid NFS events stats: %v", ss) + } + + ns := make([]uint64, 0, fieldEventsLen) + for _, s := range ss { + n, err := strconv.ParseUint(s, 10, 64) + if err != nil { + return nil, err + } + + ns = append(ns, n) + } + + return &NFSEventsStats{ + InodeRevalidate: ns[0], + DnodeRevalidate: ns[1], + DataInvalidate: ns[2], + AttributeInvalidate: ns[3], + VFSOpen: ns[4], + VFSLookup: ns[5], + VFSAccess: ns[6], + VFSUpdatePage: ns[7], + VFSReadPage: ns[8], + VFSReadPages: ns[9], + VFSWritePage: ns[10], + VFSWritePages: ns[11], + VFSGetdents: ns[12], + VFSSetattr: ns[13], + VFSFlush: ns[14], + VFSFsync: ns[15], + VFSLock: ns[16], + VFSFileRelease: ns[17], + CongestionWait: ns[18], + Truncation: ns[19], + WriteExtension: ns[20], + SillyRename: ns[21], + ShortRead: ns[22], + ShortWrite: ns[23], + JukeboxDelay: ns[24], + PNFSRead: ns[25], + PNFSWrite: ns[26], + }, nil +} + +// parseNFSOperationStats parses a slice of NFSOperationStats by scanning +// additional information about per-operation statistics until an empty +// line is reached. +func parseNFSOperationStats(s *bufio.Scanner) ([]NFSOperationStats, error) { + const ( + // Number of expected fields in each per-operation statistics set + numFields = 9 + ) + + var ops []NFSOperationStats + + for s.Scan() { + ss := strings.Fields(string(s.Bytes())) + if len(ss) == 0 { + // Must break when reading a blank line after per-operation stats to + // enable top-level function to parse the next device entry + break + } + + if len(ss) != numFields { + return nil, fmt.Errorf("invalid NFS per-operations stats: %v", ss) + } + + // Skip string operation name for integers + ns := make([]uint64, 0, numFields-1) + for _, st := range ss[1:] { + n, err := strconv.ParseUint(st, 10, 64) + if err != nil { + return nil, err + } + + ns = append(ns, n) + } + + ops = append(ops, NFSOperationStats{ + Operation: strings.TrimSuffix(ss[0], ":"), + Requests: ns[0], + Transmissions: ns[1], + MajorTimeouts: ns[2], + BytesSent: ns[3], + BytesReceived: ns[4], + CumulativeQueueTime: time.Duration(ns[5]) * time.Millisecond, + CumulativeTotalResponseTime: time.Duration(ns[6]) * time.Millisecond, + CumulativeTotalRequestTime: time.Duration(ns[7]) * time.Millisecond, + }) + } + + return ops, s.Err() +} + +// parseNFSTransportStats parses a NFSTransportStats line using an input set of +// integer fields matched to a specific stats version. +func parseNFSTransportStats(ss []string, statVersion string) (*NFSTransportStats, error) { + // Extract the protocol field. It is the only string value in the line + protocol := ss[0] + ss = ss[1:] + + switch statVersion { + case statVersion10: + var expectedLength int + if protocol == "tcp" { + expectedLength = fieldTransport10TCPLen + } else if protocol == "udp" { + expectedLength = fieldTransport10UDPLen + } else { + return nil, fmt.Errorf("invalid NFS protocol \"%s\" in stats 1.0 statement: %v", protocol, ss) + } + if len(ss) != expectedLength { + return nil, fmt.Errorf("invalid NFS transport stats 1.0 statement: %v", ss) + } + case statVersion11: + var expectedLength int + if protocol == "tcp" { + expectedLength = fieldTransport11TCPLen + } else if protocol == "udp" { + expectedLength = fieldTransport11UDPLen + } else { + return nil, fmt.Errorf("invalid NFS protocol \"%s\" in stats 1.1 statement: %v", protocol, ss) + } + if len(ss) != expectedLength { + return nil, fmt.Errorf("invalid NFS transport stats 1.1 statement: %v", ss) + } + default: + return nil, fmt.Errorf("unrecognized NFS transport stats version: %q", statVersion) + } + + // Allocate enough for v1.1 stats since zero value for v1.1 stats will be okay + // in a v1.0 response. Since the stat length is bigger for TCP stats, we use + // the TCP length here. + // + // Note: slice length must be set to length of v1.1 stats to avoid a panic when + // only v1.0 stats are present. + // See: https://github.com/prometheus/node_exporter/issues/571. + ns := make([]uint64, fieldTransport11TCPLen) + for i, s := range ss { + n, err := strconv.ParseUint(s, 10, 64) + if err != nil { + return nil, err + } + + ns[i] = n + } + + // The fields differ depending on the transport protocol (TCP or UDP) + // From https://utcc.utoronto.ca/%7Ecks/space/blog/linux/NFSMountstatsXprt + // + // For the udp RPC transport there is no connection count, connect idle time, + // or idle time (fields #3, #4, and #5); all other fields are the same. So + // we set them to 0 here. + if protocol == "udp" { + ns = append(ns[:2], append(make([]uint64, 3), ns[2:]...)...) + } + + return &NFSTransportStats{ + Protocol: protocol, + Port: ns[0], + Bind: ns[1], + Connect: ns[2], + ConnectIdleTime: ns[3], + IdleTime: time.Duration(ns[4]) * time.Second, + Sends: ns[5], + Receives: ns[6], + BadTransactionIDs: ns[7], + CumulativeActiveRequests: ns[8], + CumulativeBacklog: ns[9], + MaximumRPCSlotsUsed: ns[10], + CumulativeSendingQueue: ns[11], + CumulativePendingQueue: ns[12], + }, nil +} diff --git a/vendor/github.com/ncabatoff/procfs/net_dev.go b/vendor/github.com/ncabatoff/procfs/net_dev.go new file mode 100644 index 000000000..3f2523371 --- /dev/null +++ b/vendor/github.com/ncabatoff/procfs/net_dev.go @@ -0,0 +1,216 @@ +// Copyright 2018 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package procfs + +import ( + "bufio" + "errors" + "os" + "sort" + "strconv" + "strings" +) + +// NetDevLine is single line parsed from /proc/net/dev or /proc/[pid]/net/dev. +type NetDevLine struct { + Name string `json:"name"` // The name of the interface. + RxBytes uint64 `json:"rx_bytes"` // Cumulative count of bytes received. + RxPackets uint64 `json:"rx_packets"` // Cumulative count of packets received. + RxErrors uint64 `json:"rx_errors"` // Cumulative count of receive errors encountered. + RxDropped uint64 `json:"rx_dropped"` // Cumulative count of packets dropped while receiving. + RxFIFO uint64 `json:"rx_fifo"` // Cumulative count of FIFO buffer errors. + RxFrame uint64 `json:"rx_frame"` // Cumulative count of packet framing errors. + RxCompressed uint64 `json:"rx_compressed"` // Cumulative count of compressed packets received by the device driver. + RxMulticast uint64 `json:"rx_multicast"` // Cumulative count of multicast frames received by the device driver. + TxBytes uint64 `json:"tx_bytes"` // Cumulative count of bytes transmitted. + TxPackets uint64 `json:"tx_packets"` // Cumulative count of packets transmitted. + TxErrors uint64 `json:"tx_errors"` // Cumulative count of transmit errors encountered. + TxDropped uint64 `json:"tx_dropped"` // Cumulative count of packets dropped while transmitting. + TxFIFO uint64 `json:"tx_fifo"` // Cumulative count of FIFO buffer errors. + TxCollisions uint64 `json:"tx_collisions"` // Cumulative count of collisions detected on the interface. + TxCarrier uint64 `json:"tx_carrier"` // Cumulative count of carrier losses detected by the device driver. + TxCompressed uint64 `json:"tx_compressed"` // Cumulative count of compressed packets transmitted by the device driver. +} + +// NetDev is parsed from /proc/net/dev or /proc/[pid]/net/dev. The map keys +// are interface names. +type NetDev map[string]NetDevLine + +// NewNetDev returns kernel/system statistics read from /proc/net/dev. +func NewNetDev() (NetDev, error) { + fs, err := NewFS(DefaultMountPoint) + if err != nil { + return nil, err + } + + return fs.NewNetDev() +} + +// NewNetDev returns kernel/system statistics read from /proc/net/dev. +func (fs FS) NewNetDev() (NetDev, error) { + return newNetDev(fs.Path("net/dev")) +} + +// NewNetDev returns kernel/system statistics read from /proc/[pid]/net/dev. +func (p Proc) NewNetDev() (NetDev, error) { + return newNetDev(p.path("net/dev")) +} + +// newNetDev creates a new NetDev from the contents of the given file. +func newNetDev(file string) (NetDev, error) { + f, err := os.Open(file) + if err != nil { + return NetDev{}, err + } + defer f.Close() + + nd := NetDev{} + s := bufio.NewScanner(f) + for n := 0; s.Scan(); n++ { + // Skip the 2 header lines. + if n < 2 { + continue + } + + line, err := nd.parseLine(s.Text()) + if err != nil { + return nd, err + } + + nd[line.Name] = *line + } + + return nd, s.Err() +} + +// parseLine parses a single line from the /proc/net/dev file. Header lines +// must be filtered prior to calling this method. +func (nd NetDev) parseLine(rawLine string) (*NetDevLine, error) { + parts := strings.SplitN(rawLine, ":", 2) + if len(parts) != 2 { + return nil, errors.New("invalid net/dev line, missing colon") + } + fields := strings.Fields(strings.TrimSpace(parts[1])) + + var err error + line := &NetDevLine{} + + // Interface Name + line.Name = strings.TrimSpace(parts[0]) + if line.Name == "" { + return nil, errors.New("invalid net/dev line, empty interface name") + } + + // RX + line.RxBytes, err = strconv.ParseUint(fields[0], 10, 64) + if err != nil { + return nil, err + } + line.RxPackets, err = strconv.ParseUint(fields[1], 10, 64) + if err != nil { + return nil, err + } + line.RxErrors, err = strconv.ParseUint(fields[2], 10, 64) + if err != nil { + return nil, err + } + line.RxDropped, err = strconv.ParseUint(fields[3], 10, 64) + if err != nil { + return nil, err + } + line.RxFIFO, err = strconv.ParseUint(fields[4], 10, 64) + if err != nil { + return nil, err + } + line.RxFrame, err = strconv.ParseUint(fields[5], 10, 64) + if err != nil { + return nil, err + } + line.RxCompressed, err = strconv.ParseUint(fields[6], 10, 64) + if err != nil { + return nil, err + } + line.RxMulticast, err = strconv.ParseUint(fields[7], 10, 64) + if err != nil { + return nil, err + } + + // TX + line.TxBytes, err = strconv.ParseUint(fields[8], 10, 64) + if err != nil { + return nil, err + } + line.TxPackets, err = strconv.ParseUint(fields[9], 10, 64) + if err != nil { + return nil, err + } + line.TxErrors, err = strconv.ParseUint(fields[10], 10, 64) + if err != nil { + return nil, err + } + line.TxDropped, err = strconv.ParseUint(fields[11], 10, 64) + if err != nil { + return nil, err + } + line.TxFIFO, err = strconv.ParseUint(fields[12], 10, 64) + if err != nil { + return nil, err + } + line.TxCollisions, err = strconv.ParseUint(fields[13], 10, 64) + if err != nil { + return nil, err + } + line.TxCarrier, err = strconv.ParseUint(fields[14], 10, 64) + if err != nil { + return nil, err + } + line.TxCompressed, err = strconv.ParseUint(fields[15], 10, 64) + if err != nil { + return nil, err + } + + return line, nil +} + +// Total aggregates the values across interfaces and returns a new NetDevLine. +// The Name field will be a sorted comma separated list of interface names. +func (nd NetDev) Total() NetDevLine { + total := NetDevLine{} + + names := make([]string, 0, len(nd)) + for _, ifc := range nd { + names = append(names, ifc.Name) + total.RxBytes += ifc.RxBytes + total.RxPackets += ifc.RxPackets + total.RxPackets += ifc.RxPackets + total.RxErrors += ifc.RxErrors + total.RxDropped += ifc.RxDropped + total.RxFIFO += ifc.RxFIFO + total.RxFrame += ifc.RxFrame + total.RxCompressed += ifc.RxCompressed + total.RxMulticast += ifc.RxMulticast + total.TxBytes += ifc.TxBytes + total.TxPackets += ifc.TxPackets + total.TxErrors += ifc.TxErrors + total.TxDropped += ifc.TxDropped + total.TxFIFO += ifc.TxFIFO + total.TxCollisions += ifc.TxCollisions + total.TxCarrier += ifc.TxCarrier + total.TxCompressed += ifc.TxCompressed + } + sort.Strings(names) + total.Name = strings.Join(names, ", ") + + return total +} diff --git a/vendor/github.com/ncabatoff/procfs/nfs/nfs.go b/vendor/github.com/ncabatoff/procfs/nfs/nfs.go new file mode 100644 index 000000000..651bf6819 --- /dev/null +++ b/vendor/github.com/ncabatoff/procfs/nfs/nfs.go @@ -0,0 +1,263 @@ +// Copyright 2018 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package nfs implements parsing of /proc/net/rpc/nfsd. +// Fields are documented in https://www.svennd.be/nfsd-stats-explained-procnetrpcnfsd/ +package nfs + +// ReplyCache models the "rc" line. +type ReplyCache struct { + Hits uint64 + Misses uint64 + NoCache uint64 +} + +// FileHandles models the "fh" line. +type FileHandles struct { + Stale uint64 + TotalLookups uint64 + AnonLookups uint64 + DirNoCache uint64 + NoDirNoCache uint64 +} + +// InputOutput models the "io" line. +type InputOutput struct { + Read uint64 + Write uint64 +} + +// Threads models the "th" line. +type Threads struct { + Threads uint64 + FullCnt uint64 +} + +// ReadAheadCache models the "ra" line. +type ReadAheadCache struct { + CacheSize uint64 + CacheHistogram []uint64 + NotFound uint64 +} + +// Network models the "net" line. +type Network struct { + NetCount uint64 + UDPCount uint64 + TCPCount uint64 + TCPConnect uint64 +} + +// ClientRPC models the nfs "rpc" line. +type ClientRPC struct { + RPCCount uint64 + Retransmissions uint64 + AuthRefreshes uint64 +} + +// ServerRPC models the nfsd "rpc" line. +type ServerRPC struct { + RPCCount uint64 + BadCnt uint64 + BadFmt uint64 + BadAuth uint64 + BadcInt uint64 +} + +// V2Stats models the "proc2" line. +type V2Stats struct { + Null uint64 + GetAttr uint64 + SetAttr uint64 + Root uint64 + Lookup uint64 + ReadLink uint64 + Read uint64 + WrCache uint64 + Write uint64 + Create uint64 + Remove uint64 + Rename uint64 + Link uint64 + SymLink uint64 + MkDir uint64 + RmDir uint64 + ReadDir uint64 + FsStat uint64 +} + +// V3Stats models the "proc3" line. +type V3Stats struct { + Null uint64 + GetAttr uint64 + SetAttr uint64 + Lookup uint64 + Access uint64 + ReadLink uint64 + Read uint64 + Write uint64 + Create uint64 + MkDir uint64 + SymLink uint64 + MkNod uint64 + Remove uint64 + RmDir uint64 + Rename uint64 + Link uint64 + ReadDir uint64 + ReadDirPlus uint64 + FsStat uint64 + FsInfo uint64 + PathConf uint64 + Commit uint64 +} + +// ClientV4Stats models the nfs "proc4" line. +type ClientV4Stats struct { + Null uint64 + Read uint64 + Write uint64 + Commit uint64 + Open uint64 + OpenConfirm uint64 + OpenNoattr uint64 + OpenDowngrade uint64 + Close uint64 + Setattr uint64 + FsInfo uint64 + Renew uint64 + SetClientID uint64 + SetClientIDConfirm uint64 + Lock uint64 + Lockt uint64 + Locku uint64 + Access uint64 + Getattr uint64 + Lookup uint64 + LookupRoot uint64 + Remove uint64 + Rename uint64 + Link uint64 + Symlink uint64 + Create uint64 + Pathconf uint64 + StatFs uint64 + ReadLink uint64 + ReadDir uint64 + ServerCaps uint64 + DelegReturn uint64 + GetACL uint64 + SetACL uint64 + FsLocations uint64 + ReleaseLockowner uint64 + Secinfo uint64 + FsidPresent uint64 + ExchangeID uint64 + CreateSession uint64 + DestroySession uint64 + Sequence uint64 + GetLeaseTime uint64 + ReclaimComplete uint64 + LayoutGet uint64 + GetDeviceInfo uint64 + LayoutCommit uint64 + LayoutReturn uint64 + SecinfoNoName uint64 + TestStateID uint64 + FreeStateID uint64 + GetDeviceList uint64 + BindConnToSession uint64 + DestroyClientID uint64 + Seek uint64 + Allocate uint64 + DeAllocate uint64 + LayoutStats uint64 + Clone uint64 +} + +// ServerV4Stats models the nfsd "proc4" line. +type ServerV4Stats struct { + Null uint64 + Compound uint64 +} + +// V4Ops models the "proc4ops" line: NFSv4 operations +// Variable list, see: +// v4.0 https://tools.ietf.org/html/rfc3010 (38 operations) +// v4.1 https://tools.ietf.org/html/rfc5661 (58 operations) +// v4.2 https://tools.ietf.org/html/draft-ietf-nfsv4-minorversion2-41 (71 operations) +type V4Ops struct { + //Values uint64 // Variable depending on v4.x sub-version. TODO: Will this always at least include the fields in this struct? + Op0Unused uint64 + Op1Unused uint64 + Op2Future uint64 + Access uint64 + Close uint64 + Commit uint64 + Create uint64 + DelegPurge uint64 + DelegReturn uint64 + GetAttr uint64 + GetFH uint64 + Link uint64 + Lock uint64 + Lockt uint64 + Locku uint64 + Lookup uint64 + LookupRoot uint64 + Nverify uint64 + Open uint64 + OpenAttr uint64 + OpenConfirm uint64 + OpenDgrd uint64 + PutFH uint64 + PutPubFH uint64 + PutRootFH uint64 + Read uint64 + ReadDir uint64 + ReadLink uint64 + Remove uint64 + Rename uint64 + Renew uint64 + RestoreFH uint64 + SaveFH uint64 + SecInfo uint64 + SetAttr uint64 + Verify uint64 + Write uint64 + RelLockOwner uint64 +} + +// ClientRPCStats models all stats from /proc/net/rpc/nfs. +type ClientRPCStats struct { + Network Network + ClientRPC ClientRPC + V2Stats V2Stats + V3Stats V3Stats + ClientV4Stats ClientV4Stats +} + +// ServerRPCStats models all stats from /proc/net/rpc/nfsd. +type ServerRPCStats struct { + ReplyCache ReplyCache + FileHandles FileHandles + InputOutput InputOutput + Threads Threads + ReadAheadCache ReadAheadCache + Network Network + ServerRPC ServerRPC + V2Stats V2Stats + V3Stats V3Stats + ServerV4Stats ServerV4Stats + V4Ops V4Ops +} diff --git a/vendor/github.com/ncabatoff/procfs/nfs/parse.go b/vendor/github.com/ncabatoff/procfs/nfs/parse.go new file mode 100644 index 000000000..95a83cc5b --- /dev/null +++ b/vendor/github.com/ncabatoff/procfs/nfs/parse.go @@ -0,0 +1,317 @@ +// Copyright 2018 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package nfs + +import ( + "fmt" +) + +func parseReplyCache(v []uint64) (ReplyCache, error) { + if len(v) != 3 { + return ReplyCache{}, fmt.Errorf("invalid ReplyCache line %q", v) + } + + return ReplyCache{ + Hits: v[0], + Misses: v[1], + NoCache: v[2], + }, nil +} + +func parseFileHandles(v []uint64) (FileHandles, error) { + if len(v) != 5 { + return FileHandles{}, fmt.Errorf("invalid FileHandles, line %q", v) + } + + return FileHandles{ + Stale: v[0], + TotalLookups: v[1], + AnonLookups: v[2], + DirNoCache: v[3], + NoDirNoCache: v[4], + }, nil +} + +func parseInputOutput(v []uint64) (InputOutput, error) { + if len(v) != 2 { + return InputOutput{}, fmt.Errorf("invalid InputOutput line %q", v) + } + + return InputOutput{ + Read: v[0], + Write: v[1], + }, nil +} + +func parseThreads(v []uint64) (Threads, error) { + if len(v) != 2 { + return Threads{}, fmt.Errorf("invalid Threads line %q", v) + } + + return Threads{ + Threads: v[0], + FullCnt: v[1], + }, nil +} + +func parseReadAheadCache(v []uint64) (ReadAheadCache, error) { + if len(v) != 12 { + return ReadAheadCache{}, fmt.Errorf("invalid ReadAheadCache line %q", v) + } + + return ReadAheadCache{ + CacheSize: v[0], + CacheHistogram: v[1:11], + NotFound: v[11], + }, nil +} + +func parseNetwork(v []uint64) (Network, error) { + if len(v) != 4 { + return Network{}, fmt.Errorf("invalid Network line %q", v) + } + + return Network{ + NetCount: v[0], + UDPCount: v[1], + TCPCount: v[2], + TCPConnect: v[3], + }, nil +} + +func parseServerRPC(v []uint64) (ServerRPC, error) { + if len(v) != 5 { + return ServerRPC{}, fmt.Errorf("invalid RPC line %q", v) + } + + return ServerRPC{ + RPCCount: v[0], + BadCnt: v[1], + BadFmt: v[2], + BadAuth: v[3], + BadcInt: v[4], + }, nil +} + +func parseClientRPC(v []uint64) (ClientRPC, error) { + if len(v) != 3 { + return ClientRPC{}, fmt.Errorf("invalid RPC line %q", v) + } + + return ClientRPC{ + RPCCount: v[0], + Retransmissions: v[1], + AuthRefreshes: v[2], + }, nil +} + +func parseV2Stats(v []uint64) (V2Stats, error) { + values := int(v[0]) + if len(v[1:]) != values || values != 18 { + return V2Stats{}, fmt.Errorf("invalid V2Stats line %q", v) + } + + return V2Stats{ + Null: v[1], + GetAttr: v[2], + SetAttr: v[3], + Root: v[4], + Lookup: v[5], + ReadLink: v[6], + Read: v[7], + WrCache: v[8], + Write: v[9], + Create: v[10], + Remove: v[11], + Rename: v[12], + Link: v[13], + SymLink: v[14], + MkDir: v[15], + RmDir: v[16], + ReadDir: v[17], + FsStat: v[18], + }, nil +} + +func parseV3Stats(v []uint64) (V3Stats, error) { + values := int(v[0]) + if len(v[1:]) != values || values != 22 { + return V3Stats{}, fmt.Errorf("invalid V3Stats line %q", v) + } + + return V3Stats{ + Null: v[1], + GetAttr: v[2], + SetAttr: v[3], + Lookup: v[4], + Access: v[5], + ReadLink: v[6], + Read: v[7], + Write: v[8], + Create: v[9], + MkDir: v[10], + SymLink: v[11], + MkNod: v[12], + Remove: v[13], + RmDir: v[14], + Rename: v[15], + Link: v[16], + ReadDir: v[17], + ReadDirPlus: v[18], + FsStat: v[19], + FsInfo: v[20], + PathConf: v[21], + Commit: v[22], + }, nil +} + +func parseClientV4Stats(v []uint64) (ClientV4Stats, error) { + values := int(v[0]) + if len(v[1:]) != values { + return ClientV4Stats{}, fmt.Errorf("invalid ClientV4Stats line %q", v) + } + + // This function currently supports mapping 59 NFS v4 client stats. Older + // kernels may emit fewer stats, so we must detect this and pad out the + // values to match the expected slice size. + if values < 59 { + newValues := make([]uint64, 60) + copy(newValues, v) + v = newValues + } + + return ClientV4Stats{ + Null: v[1], + Read: v[2], + Write: v[3], + Commit: v[4], + Open: v[5], + OpenConfirm: v[6], + OpenNoattr: v[7], + OpenDowngrade: v[8], + Close: v[9], + Setattr: v[10], + FsInfo: v[11], + Renew: v[12], + SetClientID: v[13], + SetClientIDConfirm: v[14], + Lock: v[15], + Lockt: v[16], + Locku: v[17], + Access: v[18], + Getattr: v[19], + Lookup: v[20], + LookupRoot: v[21], + Remove: v[22], + Rename: v[23], + Link: v[24], + Symlink: v[25], + Create: v[26], + Pathconf: v[27], + StatFs: v[28], + ReadLink: v[29], + ReadDir: v[30], + ServerCaps: v[31], + DelegReturn: v[32], + GetACL: v[33], + SetACL: v[34], + FsLocations: v[35], + ReleaseLockowner: v[36], + Secinfo: v[37], + FsidPresent: v[38], + ExchangeID: v[39], + CreateSession: v[40], + DestroySession: v[41], + Sequence: v[42], + GetLeaseTime: v[43], + ReclaimComplete: v[44], + LayoutGet: v[45], + GetDeviceInfo: v[46], + LayoutCommit: v[47], + LayoutReturn: v[48], + SecinfoNoName: v[49], + TestStateID: v[50], + FreeStateID: v[51], + GetDeviceList: v[52], + BindConnToSession: v[53], + DestroyClientID: v[54], + Seek: v[55], + Allocate: v[56], + DeAllocate: v[57], + LayoutStats: v[58], + Clone: v[59], + }, nil +} + +func parseServerV4Stats(v []uint64) (ServerV4Stats, error) { + values := int(v[0]) + if len(v[1:]) != values || values != 2 { + return ServerV4Stats{}, fmt.Errorf("invalid V4Stats line %q", v) + } + + return ServerV4Stats{ + Null: v[1], + Compound: v[2], + }, nil +} + +func parseV4Ops(v []uint64) (V4Ops, error) { + values := int(v[0]) + if len(v[1:]) != values || values < 39 { + return V4Ops{}, fmt.Errorf("invalid V4Ops line %q", v) + } + + stats := V4Ops{ + Op0Unused: v[1], + Op1Unused: v[2], + Op2Future: v[3], + Access: v[4], + Close: v[5], + Commit: v[6], + Create: v[7], + DelegPurge: v[8], + DelegReturn: v[9], + GetAttr: v[10], + GetFH: v[11], + Link: v[12], + Lock: v[13], + Lockt: v[14], + Locku: v[15], + Lookup: v[16], + LookupRoot: v[17], + Nverify: v[18], + Open: v[19], + OpenAttr: v[20], + OpenConfirm: v[21], + OpenDgrd: v[22], + PutFH: v[23], + PutPubFH: v[24], + PutRootFH: v[25], + Read: v[26], + ReadDir: v[27], + ReadLink: v[28], + Remove: v[29], + Rename: v[30], + Renew: v[31], + RestoreFH: v[32], + SaveFH: v[33], + SecInfo: v[34], + SetAttr: v[35], + Verify: v[36], + Write: v[37], + RelLockOwner: v[38], + } + + return stats, nil +} diff --git a/vendor/github.com/ncabatoff/procfs/nfs/parse_nfs.go b/vendor/github.com/ncabatoff/procfs/nfs/parse_nfs.go new file mode 100644 index 000000000..94e572feb --- /dev/null +++ b/vendor/github.com/ncabatoff/procfs/nfs/parse_nfs.go @@ -0,0 +1,67 @@ +// Copyright 2018 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package nfs + +import ( + "bufio" + "fmt" + "io" + "strings" + + "github.com/ncabatoff/procfs/internal/util" +) + +// ParseClientRPCStats returns stats read from /proc/net/rpc/nfs +func ParseClientRPCStats(r io.Reader) (*ClientRPCStats, error) { + stats := &ClientRPCStats{} + + scanner := bufio.NewScanner(r) + for scanner.Scan() { + line := scanner.Text() + parts := strings.Fields(scanner.Text()) + // require at least + if len(parts) < 2 { + return nil, fmt.Errorf("invalid NFS metric line %q", line) + } + + values, err := util.ParseUint64s(parts[1:]) + if err != nil { + return nil, fmt.Errorf("error parsing NFS metric line: %s", err) + } + + switch metricLine := parts[0]; metricLine { + case "net": + stats.Network, err = parseNetwork(values) + case "rpc": + stats.ClientRPC, err = parseClientRPC(values) + case "proc2": + stats.V2Stats, err = parseV2Stats(values) + case "proc3": + stats.V3Stats, err = parseV3Stats(values) + case "proc4": + stats.ClientV4Stats, err = parseClientV4Stats(values) + default: + return nil, fmt.Errorf("unknown NFS metric line %q", metricLine) + } + if err != nil { + return nil, fmt.Errorf("errors parsing NFS metric line: %s", err) + } + } + + if err := scanner.Err(); err != nil { + return nil, fmt.Errorf("error scanning NFS file: %s", err) + } + + return stats, nil +} diff --git a/vendor/github.com/ncabatoff/procfs/nfs/parse_nfsd.go b/vendor/github.com/ncabatoff/procfs/nfs/parse_nfsd.go new file mode 100644 index 000000000..3dbfffe40 --- /dev/null +++ b/vendor/github.com/ncabatoff/procfs/nfs/parse_nfsd.go @@ -0,0 +1,89 @@ +// Copyright 2018 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package nfs + +import ( + "bufio" + "fmt" + "io" + "strings" + + "github.com/ncabatoff/procfs/internal/util" +) + +// ParseServerRPCStats returns stats read from /proc/net/rpc/nfsd +func ParseServerRPCStats(r io.Reader) (*ServerRPCStats, error) { + stats := &ServerRPCStats{} + + scanner := bufio.NewScanner(r) + for scanner.Scan() { + line := scanner.Text() + parts := strings.Fields(scanner.Text()) + // require at least + if len(parts) < 2 { + return nil, fmt.Errorf("invalid NFSd metric line %q", line) + } + label := parts[0] + + var values []uint64 + var err error + if label == "th" { + if len(parts) < 3 { + return nil, fmt.Errorf("invalid NFSd th metric line %q", line) + } + values, err = util.ParseUint64s(parts[1:3]) + } else { + values, err = util.ParseUint64s(parts[1:]) + } + if err != nil { + return nil, fmt.Errorf("error parsing NFSd metric line: %s", err) + } + + switch metricLine := parts[0]; metricLine { + case "rc": + stats.ReplyCache, err = parseReplyCache(values) + case "fh": + stats.FileHandles, err = parseFileHandles(values) + case "io": + stats.InputOutput, err = parseInputOutput(values) + case "th": + stats.Threads, err = parseThreads(values) + case "ra": + stats.ReadAheadCache, err = parseReadAheadCache(values) + case "net": + stats.Network, err = parseNetwork(values) + case "rpc": + stats.ServerRPC, err = parseServerRPC(values) + case "proc2": + stats.V2Stats, err = parseV2Stats(values) + case "proc3": + stats.V3Stats, err = parseV3Stats(values) + case "proc4": + stats.ServerV4Stats, err = parseServerV4Stats(values) + case "proc4ops": + stats.V4Ops, err = parseV4Ops(values) + default: + return nil, fmt.Errorf("unknown NFSd metric line %q", metricLine) + } + if err != nil { + return nil, fmt.Errorf("errors parsing NFSd metric line: %s", err) + } + } + + if err := scanner.Err(); err != nil { + return nil, fmt.Errorf("error scanning NFSd file: %s", err) + } + + return stats, nil +} diff --git a/vendor/github.com/ncabatoff/procfs/proc.go b/vendor/github.com/ncabatoff/procfs/proc.go new file mode 100644 index 000000000..af9719143 --- /dev/null +++ b/vendor/github.com/ncabatoff/procfs/proc.go @@ -0,0 +1,259 @@ +// Copyright 2018 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package procfs + +import ( + "bytes" + "fmt" + "io/ioutil" + "os" + "strconv" + "strings" +) + +// Proc provides information about a running process. +type Proc struct { + // The process ID. + PID int + + fs FS +} + +// Procs represents a list of Proc structs. +type Procs []Proc + +func (p Procs) Len() int { return len(p) } +func (p Procs) Swap(i, j int) { p[i], p[j] = p[j], p[i] } +func (p Procs) Less(i, j int) bool { return p[i].PID < p[j].PID } + +// Self returns a process for the current process read via /proc/self. +func Self() (Proc, error) { + fs, err := NewFS(DefaultMountPoint) + if err != nil { + return Proc{}, err + } + return fs.Self() +} + +// NewProc returns a process for the given pid under /proc. +func NewProc(pid int) (Proc, error) { + fs, err := NewFS(DefaultMountPoint) + if err != nil { + return Proc{}, err + } + return fs.NewProc(pid) +} + +// AllProcs returns a list of all currently available processes under /proc. +func AllProcs() (Procs, error) { + fs, err := NewFS(DefaultMountPoint) + if err != nil { + return Procs{}, err + } + return fs.AllProcs() +} + +// Self returns a process for the current process. +func (fs FS) Self() (Proc, error) { + p, err := os.Readlink(fs.Path("self")) + if err != nil { + return Proc{}, err + } + pid, err := strconv.Atoi(strings.Replace(p, string(fs), "", -1)) + if err != nil { + return Proc{}, err + } + return fs.NewProc(pid) +} + +// NewProc returns a process for the given pid. +func (fs FS) NewProc(pid int) (Proc, error) { + if _, err := os.Stat(fs.Path(strconv.Itoa(pid))); err != nil { + return Proc{}, err + } + return Proc{PID: pid, fs: fs}, nil +} + +// AllProcs returns a list of all currently available processes. +func (fs FS) AllProcs() (Procs, error) { + d, err := os.Open(fs.Path()) + if err != nil { + return Procs{}, err + } + defer d.Close() + + names, err := d.Readdirnames(-1) + if err != nil { + return Procs{}, fmt.Errorf("could not read %s: %s", d.Name(), err) + } + + p := Procs{} + for _, n := range names { + pid, err := strconv.ParseInt(n, 10, 64) + if err != nil { + continue + } + p = append(p, Proc{PID: int(pid), fs: fs}) + } + + return p, nil +} + +// CmdLine returns the command line of a process. +func (p Proc) CmdLine() ([]string, error) { + f, err := os.Open(p.path("cmdline")) + if err != nil { + return nil, err + } + defer f.Close() + + data, err := ioutil.ReadAll(f) + if err != nil { + return nil, err + } + + if len(data) < 1 { + return []string{}, nil + } + + return strings.Split(string(bytes.TrimRight(data, string("\x00"))), string(byte(0))), nil +} + +// Wchan returns the wchan (wait channel) of a process. +func (p Proc) Wchan() (string, error) { + f, err := os.Open(p.path("wchan")) + if err != nil { + return "", err + } + defer f.Close() + + data, err := ioutil.ReadAll(f) + if err != nil { + return "", err + } + + wchan := string(data) + if wchan == "" || wchan == "0" { + return "", nil + } + + return wchan, nil +} + +// Comm returns the command name of a process. +func (p Proc) Comm() (string, error) { + f, err := os.Open(p.path("comm")) + if err != nil { + return "", err + } + defer f.Close() + + data, err := ioutil.ReadAll(f) + if err != nil { + return "", err + } + + return strings.TrimSpace(string(data)), nil +} + +// Executable returns the absolute path of the executable command of a process. +func (p Proc) Executable() (string, error) { + exe, err := os.Readlink(p.path("exe")) + if os.IsNotExist(err) { + return "", nil + } + + return exe, err +} + +// FileDescriptors returns the currently open file descriptors of a process. +func (p Proc) FileDescriptors() ([]uintptr, error) { + names, err := p.fileDescriptors() + if err != nil { + return nil, err + } + + fds := make([]uintptr, len(names)) + for i, n := range names { + fd, err := strconv.ParseInt(n, 10, 32) + if err != nil { + return nil, fmt.Errorf("could not parse fd %s: %s", n, err) + } + fds[i] = uintptr(fd) + } + + return fds, nil +} + +// FileDescriptorTargets returns the targets of all file descriptors of a process. +// If a file descriptor is not a symlink to a file (like a socket), that value will be the empty string. +func (p Proc) FileDescriptorTargets() ([]string, error) { + names, err := p.fileDescriptors() + if err != nil { + return nil, err + } + + targets := make([]string, len(names)) + + for i, name := range names { + target, err := os.Readlink(p.path("fd", name)) + if err == nil { + targets[i] = target + } + } + + return targets, nil +} + +// FileDescriptorsLen returns the number of currently open file descriptors of +// a process. +func (p Proc) FileDescriptorsLen() (int, error) { + fds, err := p.fileDescriptors() + if err != nil { + return 0, err + } + + return len(fds), nil +} + +// MountStats retrieves statistics and configuration for mount points in a +// process's namespace. +func (p Proc) MountStats() ([]*Mount, error) { + f, err := os.Open(p.path("mountstats")) + if err != nil { + return nil, err + } + defer f.Close() + + return parseMountStats(f) +} + +func (p Proc) fileDescriptors() ([]string, error) { + d, err := os.Open(p.path("fd")) + if err != nil { + return nil, err + } + defer d.Close() + + names, err := d.Readdirnames(-1) + if err != nil { + return nil, fmt.Errorf("could not read %s: %s", d.Name(), err) + } + + return names, nil +} + +func (p Proc) path(pa ...string) string { + return p.fs.Path(append([]string{strconv.Itoa(p.PID)}, pa...)...) +} diff --git a/vendor/github.com/ncabatoff/procfs/proc_io.go b/vendor/github.com/ncabatoff/procfs/proc_io.go new file mode 100644 index 000000000..0251c83bf --- /dev/null +++ b/vendor/github.com/ncabatoff/procfs/proc_io.go @@ -0,0 +1,65 @@ +// Copyright 2018 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package procfs + +import ( + "fmt" + "io/ioutil" + "os" +) + +// ProcIO models the content of /proc//io. +type ProcIO struct { + // Chars read. + RChar uint64 + // Chars written. + WChar uint64 + // Read syscalls. + SyscR uint64 + // Write syscalls. + SyscW uint64 + // Bytes read. + ReadBytes uint64 + // Bytes written. + WriteBytes uint64 + // Bytes written, but taking into account truncation. See + // Documentation/filesystems/proc.txt in the kernel sources for + // detailed explanation. + CancelledWriteBytes int64 +} + +// NewIO creates a new ProcIO instance from a given Proc instance. +func (p Proc) NewIO() (ProcIO, error) { + pio := ProcIO{} + + f, err := os.Open(p.path("io")) + if err != nil { + return pio, err + } + defer f.Close() + + data, err := ioutil.ReadAll(f) + if err != nil { + return pio, err + } + + ioFormat := "rchar: %d\nwchar: %d\nsyscr: %d\nsyscw: %d\n" + + "read_bytes: %d\nwrite_bytes: %d\n" + + "cancelled_write_bytes: %d\n" + + _, err = fmt.Sscanf(string(data), ioFormat, &pio.RChar, &pio.WChar, &pio.SyscR, + &pio.SyscW, &pio.ReadBytes, &pio.WriteBytes, &pio.CancelledWriteBytes) + + return pio, err +} diff --git a/vendor/github.com/ncabatoff/procfs/proc_limits.go b/vendor/github.com/ncabatoff/procfs/proc_limits.go new file mode 100644 index 000000000..f04ba6fda --- /dev/null +++ b/vendor/github.com/ncabatoff/procfs/proc_limits.go @@ -0,0 +1,150 @@ +// Copyright 2018 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package procfs + +import ( + "bufio" + "fmt" + "os" + "regexp" + "strconv" +) + +// ProcLimits represents the soft limits for each of the process's resource +// limits. For more information see getrlimit(2): +// http://man7.org/linux/man-pages/man2/getrlimit.2.html. +type ProcLimits struct { + // CPU time limit in seconds. + CPUTime int64 + // Maximum size of files that the process may create. + FileSize int64 + // Maximum size of the process's data segment (initialized data, + // uninitialized data, and heap). + DataSize int64 + // Maximum size of the process stack in bytes. + StackSize int64 + // Maximum size of a core file. + CoreFileSize int64 + // Limit of the process's resident set in pages. + ResidentSet int64 + // Maximum number of processes that can be created for the real user ID of + // the calling process. + Processes int64 + // Value one greater than the maximum file descriptor number that can be + // opened by this process. + OpenFiles int64 + // Maximum number of bytes of memory that may be locked into RAM. + LockedMemory int64 + // Maximum size of the process's virtual memory address space in bytes. + AddressSpace int64 + // Limit on the combined number of flock(2) locks and fcntl(2) leases that + // this process may establish. + FileLocks int64 + // Limit of signals that may be queued for the real user ID of the calling + // process. + PendingSignals int64 + // Limit on the number of bytes that can be allocated for POSIX message + // queues for the real user ID of the calling process. + MsqqueueSize int64 + // Limit of the nice priority set using setpriority(2) or nice(2). + NicePriority int64 + // Limit of the real-time priority set using sched_setscheduler(2) or + // sched_setparam(2). + RealtimePriority int64 + // Limit (in microseconds) on the amount of CPU time that a process + // scheduled under a real-time scheduling policy may consume without making + // a blocking system call. + RealtimeTimeout int64 +} + +const ( + limitsFields = 3 + limitsUnlimited = "unlimited" +) + +var ( + limitsDelimiter = regexp.MustCompile(" +") +) + +// NewLimits returns the current soft limits of the process. +func (p Proc) NewLimits() (ProcLimits, error) { + f, err := os.Open(p.path("limits")) + if err != nil { + return ProcLimits{}, err + } + defer f.Close() + + var ( + l = ProcLimits{} + s = bufio.NewScanner(f) + ) + for s.Scan() { + fields := limitsDelimiter.Split(s.Text(), limitsFields) + if len(fields) != limitsFields { + return ProcLimits{}, fmt.Errorf( + "couldn't parse %s line %s", f.Name(), s.Text()) + } + + switch fields[0] { + case "Max cpu time": + l.CPUTime, err = parseInt(fields[1]) + case "Max file size": + l.FileSize, err = parseInt(fields[1]) + case "Max data size": + l.DataSize, err = parseInt(fields[1]) + case "Max stack size": + l.StackSize, err = parseInt(fields[1]) + case "Max core file size": + l.CoreFileSize, err = parseInt(fields[1]) + case "Max resident set": + l.ResidentSet, err = parseInt(fields[1]) + case "Max processes": + l.Processes, err = parseInt(fields[1]) + case "Max open files": + l.OpenFiles, err = parseInt(fields[1]) + case "Max locked memory": + l.LockedMemory, err = parseInt(fields[1]) + case "Max address space": + l.AddressSpace, err = parseInt(fields[1]) + case "Max file locks": + l.FileLocks, err = parseInt(fields[1]) + case "Max pending signals": + l.PendingSignals, err = parseInt(fields[1]) + case "Max msgqueue size": + l.MsqqueueSize, err = parseInt(fields[1]) + case "Max nice priority": + l.NicePriority, err = parseInt(fields[1]) + case "Max realtime priority": + l.RealtimePriority, err = parseInt(fields[1]) + case "Max realtime timeout": + l.RealtimeTimeout, err = parseInt(fields[1]) + } + if err != nil { + return ProcLimits{}, err + } + } + + return l, s.Err() +} + +func parseInt(s string) (int64, error) { + if s == limitsUnlimited { + return -1, nil + } + i, err := strconv.ParseInt(s, 10, 64) + if err != nil { + return 0, fmt.Errorf("couldn't parse value %s: %s", s, err) + } + return i, nil +} diff --git a/vendor/github.com/ncabatoff/procfs/proc_ns.go b/vendor/github.com/ncabatoff/procfs/proc_ns.go new file mode 100644 index 000000000..d06c26eba --- /dev/null +++ b/vendor/github.com/ncabatoff/procfs/proc_ns.go @@ -0,0 +1,68 @@ +// Copyright 2018 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package procfs + +import ( + "fmt" + "os" + "strconv" + "strings" +) + +// Namespace represents a single namespace of a process. +type Namespace struct { + Type string // Namespace type. + Inode uint32 // Inode number of the namespace. If two processes are in the same namespace their inodes will match. +} + +// Namespaces contains all of the namespaces that the process is contained in. +type Namespaces map[string]Namespace + +// NewNamespaces reads from /proc/[pid/ns/* to get the namespaces of which the +// process is a member. +func (p Proc) NewNamespaces() (Namespaces, error) { + d, err := os.Open(p.path("ns")) + if err != nil { + return nil, err + } + defer d.Close() + + names, err := d.Readdirnames(-1) + if err != nil { + return nil, fmt.Errorf("failed to read contents of ns dir: %v", err) + } + + ns := make(Namespaces, len(names)) + for _, name := range names { + target, err := os.Readlink(p.path("ns", name)) + if err != nil { + return nil, err + } + + fields := strings.SplitN(target, ":", 2) + if len(fields) != 2 { + return nil, fmt.Errorf("failed to parse namespace type and inode from '%v'", target) + } + + typ := fields[0] + inode, err := strconv.ParseUint(strings.Trim(fields[1], "[]"), 10, 32) + if err != nil { + return nil, fmt.Errorf("failed to parse inode from '%v': %v", fields[1], err) + } + + ns[name] = Namespace{typ, uint32(inode)} + } + + return ns, nil +} diff --git a/vendor/github.com/ncabatoff/procfs/proc_stat.go b/vendor/github.com/ncabatoff/procfs/proc_stat.go new file mode 100644 index 000000000..3cf2a9f18 --- /dev/null +++ b/vendor/github.com/ncabatoff/procfs/proc_stat.go @@ -0,0 +1,188 @@ +// Copyright 2018 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package procfs + +import ( + "bytes" + "fmt" + "io/ioutil" + "os" +) + +// Originally, this USER_HZ value was dynamically retrieved via a sysconf call +// which required cgo. However, that caused a lot of problems regarding +// cross-compilation. Alternatives such as running a binary to determine the +// value, or trying to derive it in some other way were all problematic. After +// much research it was determined that USER_HZ is actually hardcoded to 100 on +// all Go-supported platforms as of the time of this writing. This is why we +// decided to hardcode it here as well. It is not impossible that there could +// be systems with exceptions, but they should be very exotic edge cases, and +// in that case, the worst outcome will be two misreported metrics. +// +// See also the following discussions: +// +// - https://github.com/prometheus/node_exporter/issues/52 +// - https://github.com/prometheus/procfs/pull/2 +// - http://stackoverflow.com/questions/17410841/how-does-user-hz-solve-the-jiffy-scaling-issue +const userHZ = 100 + +// ProcStat provides status information about the process, +// read from /proc/[pid]/stat. +type ProcStat struct { + // The process ID. + PID int + // The filename of the executable. + Comm string + // The process state. + State string + // The PID of the parent of this process. + PPID int + // The process group ID of the process. + PGRP int + // The session ID of the process. + Session int + // The controlling terminal of the process. + TTY int + // The ID of the foreground process group of the controlling terminal of + // the process. + TPGID int + // The kernel flags word of the process. + Flags uint + // The number of minor faults the process has made which have not required + // loading a memory page from disk. + MinFlt uint + // The number of minor faults that the process's waited-for children have + // made. + CMinFlt uint + // The number of major faults the process has made which have required + // loading a memory page from disk. + MajFlt uint + // The number of major faults that the process's waited-for children have + // made. + CMajFlt uint + // Amount of time that this process has been scheduled in user mode, + // measured in clock ticks. + UTime uint + // Amount of time that this process has been scheduled in kernel mode, + // measured in clock ticks. + STime uint + // Amount of time that this process's waited-for children have been + // scheduled in user mode, measured in clock ticks. + CUTime uint + // Amount of time that this process's waited-for children have been + // scheduled in kernel mode, measured in clock ticks. + CSTime uint + // For processes running a real-time scheduling policy, this is the negated + // scheduling priority, minus one. + Priority int + // The nice value, a value in the range 19 (low priority) to -20 (high + // priority). + Nice int + // Number of threads in this process. + NumThreads int + // The time the process started after system boot, the value is expressed + // in clock ticks. + Starttime uint64 + // Virtual memory size in bytes. + VSize int + // Resident set size in pages. + RSS int + + fs FS +} + +// NewStat returns the current status information of the process. +func (p Proc) NewStat() (ProcStat, error) { + f, err := os.Open(p.path("stat")) + if err != nil { + return ProcStat{}, err + } + defer f.Close() + + data, err := ioutil.ReadAll(f) + if err != nil { + return ProcStat{}, err + } + + var ( + ignore int + + s = ProcStat{PID: p.PID, fs: p.fs} + l = bytes.Index(data, []byte("(")) + r = bytes.LastIndex(data, []byte(")")) + ) + + if l < 0 || r < 0 { + return ProcStat{}, fmt.Errorf( + "unexpected format, couldn't extract comm: %s", + data, + ) + } + + s.Comm = string(data[l+1 : r]) + _, err = fmt.Fscan( + bytes.NewBuffer(data[r+2:]), + &s.State, + &s.PPID, + &s.PGRP, + &s.Session, + &s.TTY, + &s.TPGID, + &s.Flags, + &s.MinFlt, + &s.CMinFlt, + &s.MajFlt, + &s.CMajFlt, + &s.UTime, + &s.STime, + &s.CUTime, + &s.CSTime, + &s.Priority, + &s.Nice, + &s.NumThreads, + &ignore, + &s.Starttime, + &s.VSize, + &s.RSS, + ) + if err != nil { + return ProcStat{}, err + } + + return s, nil +} + +// VirtualMemory returns the virtual memory size in bytes. +func (s ProcStat) VirtualMemory() int { + return s.VSize +} + +// ResidentMemory returns the resident memory size in bytes. +func (s ProcStat) ResidentMemory() int { + return s.RSS * os.Getpagesize() +} + +// StartTime returns the unix timestamp of the process in seconds. +func (s ProcStat) StartTime() (float64, error) { + stat, err := s.fs.NewStat() + if err != nil { + return 0, err + } + return float64(stat.BootTime) + (float64(s.Starttime) / userHZ), nil +} + +// CPUTime returns the total CPU user and system time in seconds. +func (s ProcStat) CPUTime() float64 { + return float64(s.UTime+s.STime) / userHZ +} diff --git a/vendor/github.com/ncabatoff/procfs/proc_status.go b/vendor/github.com/ncabatoff/procfs/proc_status.go new file mode 100644 index 000000000..d88b8bfc8 --- /dev/null +++ b/vendor/github.com/ncabatoff/procfs/proc_status.go @@ -0,0 +1,203 @@ +// Copyright 2018 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package procfs + +import ( + "fmt" + "io" + "io/ioutil" + "os" + "strings" +) + +// ProcStatus provides status information about the process, +// read from /proc/[pid]/status. +type ( + ProcStatus struct { + TID int + TracerPid int + UIDReal int + UIDEffective int + UIDSavedSet int + UIDFileSystem int + GIDReal int + GIDEffective int + GIDSavedSet int + GIDFileSystem int + FDSize int + VmPeakKB int + VmSizeKB int + VmLckKB int + VmHWMKB int + VmRSSKB int + VmDataKB int + VmStkKB int + VmExeKB int + VmLibKB int + VmPTEKB int + VmSwapKB int + VoluntaryCtxtSwitches int + NonvoluntaryCtxtSwitches int + } + + procStatusFiller func(*ProcStatus, string) error + procStatusBuilder struct { + scanners map[string]procStatusFiller + } +) + +func (ps *ProcStatus) refTID() []interface{} { + return []interface{}{&ps.TID} +} +func (ps *ProcStatus) refTracerPid() []interface{} { + return []interface{}{&ps.TracerPid} +} +func (ps *ProcStatus) refUID() []interface{} { + return []interface{}{&ps.UIDReal, &ps.UIDEffective, &ps.UIDSavedSet, &ps.UIDFileSystem} +} +func (ps *ProcStatus) refGID() []interface{} { + return []interface{}{&ps.GIDReal, &ps.GIDEffective, &ps.GIDSavedSet, &ps.GIDFileSystem} +} +func (ps *ProcStatus) refFDSize() []interface{} { + return []interface{}{&ps.FDSize} +} +func (ps *ProcStatus) refVmPeakKB() []interface{} { + return []interface{}{&ps.VmPeakKB} +} +func (ps *ProcStatus) refVmSizeKB() []interface{} { + return []interface{}{&ps.VmSizeKB} +} +func (ps *ProcStatus) refVmLckKB() []interface{} { + return []interface{}{&ps.VmLckKB} +} +func (ps *ProcStatus) refVmHWMKB() []interface{} { + return []interface{}{&ps.VmHWMKB} +} +func (ps *ProcStatus) refVmRSSKB() []interface{} { + return []interface{}{&ps.VmRSSKB} +} +func (ps *ProcStatus) refVmDataKB() []interface{} { + return []interface{}{&ps.VmDataKB} +} +func (ps *ProcStatus) refVmStkKB() []interface{} { + return []interface{}{&ps.VmStkKB} +} +func (ps *ProcStatus) refVmExeKB() []interface{} { + return []interface{}{&ps.VmExeKB} +} +func (ps *ProcStatus) refVmLibKB() []interface{} { + return []interface{}{&ps.VmLibKB} +} +func (ps *ProcStatus) refVmPTEKB() []interface{} { + return []interface{}{&ps.VmPTEKB} +} +func (ps *ProcStatus) refVmSwapKB() []interface{} { + return []interface{}{&ps.VmSwapKB} +} +func (ps *ProcStatus) refVoluntaryCtxtSwitches() []interface{} { + return []interface{}{&ps.VoluntaryCtxtSwitches} +} +func (ps *ProcStatus) refNonvoluntaryCtxtSwitches() []interface{} { + return []interface{}{&ps.NonvoluntaryCtxtSwitches} +} + +func newFiller(format string, ref func(ps *ProcStatus) []interface{}) procStatusFiller { + return procStatusFiller(func(ps *ProcStatus, s string) error { + _, err := fmt.Sscanf(s, format, ref(ps)...) + return err + }) +} + +func newProcStatusBuilder() *procStatusBuilder { + return &procStatusBuilder{ + scanners: map[string]procStatusFiller{ + "Pid": newFiller("%d", (*ProcStatus).refTID), + "TracerPid": newFiller("%d", (*ProcStatus).refTracerPid), + "Uid": newFiller("%d %d %d %d", (*ProcStatus).refUID), + "Gid": newFiller("%d %d %d %d", (*ProcStatus).refGID), + "FDSize": newFiller("%d", (*ProcStatus).refFDSize), + "VmPeak": newFiller("%d kB", (*ProcStatus).refVmPeakKB), + "VmSize": newFiller("%d kB", (*ProcStatus).refVmSizeKB), + "VmLck": newFiller("%d kB", (*ProcStatus).refVmLckKB), + "VmHWM": newFiller("%d kB", (*ProcStatus).refVmHWMKB), + "VmRSS": newFiller("%d kB", (*ProcStatus).refVmRSSKB), + "VmData": newFiller("%d kB", (*ProcStatus).refVmDataKB), + "VmStk": newFiller("%d kB", (*ProcStatus).refVmStkKB), + "VmExe": newFiller("%d kB", (*ProcStatus).refVmExeKB), + "VmLib": newFiller("%d kB", (*ProcStatus).refVmLibKB), + "VmPTE": newFiller("%d kB", (*ProcStatus).refVmPTEKB), + "VmSwap": newFiller("%d kB", (*ProcStatus).refVmSwapKB), + "voluntary_ctxt_switches": newFiller("%d", (*ProcStatus).refVoluntaryCtxtSwitches), + "nonvoluntary_ctxt_switches": newFiller("%d", (*ProcStatus).refNonvoluntaryCtxtSwitches), + }, + } +} + +func (b *procStatusBuilder) readStatus(r io.Reader) (ProcStatus, error) { + contents, err := ioutil.ReadAll(r) + if err != nil { + return ProcStatus{}, err + } + s := string(contents) + + var ps ProcStatus + for lineno := 0; s != ""; lineno++ { + crpos := strings.IndexByte(s, '\n') + if crpos == -1 { + return ProcStatus{}, fmt.Errorf("line %d from status file without newline: %s", lineno, s) + } + line := strings.TrimSpace(s[:crpos]) + s = s[crpos+1:] + if line == "" { + if s == "" { + break + } + continue + } + + pos := strings.IndexByte(line, ':') + if pos == -1 { + return ProcStatus{}, fmt.Errorf("line %d from status file without ':': %s", lineno, line) + } + + field := line[:pos] + scanner, ok := b.scanners[field] + if !ok { + continue + } + + err = scanner(&ps, line[pos+1:]) + // TODO: flag parse errors with some kind of "warning" error. + if err != nil { + // Be lenient about parse errors, because otherwise we miss out on some interesting + // procs. For example, my Ubuntu kernel (4.4.0-130-generic #156-Ubuntu) is showing + // a Chromium status file with "VmLib: 18446744073709442944 kB". + continue + } + } + return ps, nil +} + +var psb = newProcStatusBuilder() + +// NewStatus returns the current status information of the process. +func (p Proc) NewStatus() (ProcStatus, error) { + f, err := os.Open(p.path("status")) + if err != nil { + return ProcStatus{}, err + } + defer f.Close() + + return psb.readStatus(f) +} diff --git a/vendor/github.com/ncabatoff/procfs/stat.go b/vendor/github.com/ncabatoff/procfs/stat.go new file mode 100644 index 000000000..61eb6b0e3 --- /dev/null +++ b/vendor/github.com/ncabatoff/procfs/stat.go @@ -0,0 +1,232 @@ +// Copyright 2018 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package procfs + +import ( + "bufio" + "fmt" + "io" + "os" + "strconv" + "strings" +) + +// CPUStat shows how much time the cpu spend in various stages. +type CPUStat struct { + User float64 + Nice float64 + System float64 + Idle float64 + Iowait float64 + IRQ float64 + SoftIRQ float64 + Steal float64 + Guest float64 + GuestNice float64 +} + +// SoftIRQStat represent the softirq statistics as exported in the procfs stat file. +// A nice introduction can be found at https://0xax.gitbooks.io/linux-insides/content/interrupts/interrupts-9.html +// It is possible to get per-cpu stats by reading /proc/softirqs +type SoftIRQStat struct { + Hi uint64 + Timer uint64 + NetTx uint64 + NetRx uint64 + Block uint64 + BlockIoPoll uint64 + Tasklet uint64 + Sched uint64 + Hrtimer uint64 + Rcu uint64 +} + +// Stat represents kernel/system statistics. +type Stat struct { + // Boot time in seconds since the Epoch. + BootTime uint64 + // Summed up cpu statistics. + CPUTotal CPUStat + // Per-CPU statistics. + CPU []CPUStat + // Number of times interrupts were handled, which contains numbered and unnumbered IRQs. + IRQTotal uint64 + // Number of times a numbered IRQ was triggered. + IRQ []uint64 + // Number of times a context switch happened. + ContextSwitches uint64 + // Number of times a process was created. + ProcessCreated uint64 + // Number of processes currently running. + ProcessesRunning uint64 + // Number of processes currently blocked (waiting for IO). + ProcessesBlocked uint64 + // Number of times a softirq was scheduled. + SoftIRQTotal uint64 + // Detailed softirq statistics. + SoftIRQ SoftIRQStat +} + +// NewStat returns kernel/system statistics read from /proc/stat. +func NewStat() (Stat, error) { + fs, err := NewFS(DefaultMountPoint) + if err != nil { + return Stat{}, err + } + + return fs.NewStat() +} + +// Parse a cpu statistics line and returns the CPUStat struct plus the cpu id (or -1 for the overall sum). +func parseCPUStat(line string) (CPUStat, int64, error) { + cpuStat := CPUStat{} + var cpu string + + count, err := fmt.Sscanf(line, "%s %f %f %f %f %f %f %f %f %f %f", + &cpu, + &cpuStat.User, &cpuStat.Nice, &cpuStat.System, &cpuStat.Idle, + &cpuStat.Iowait, &cpuStat.IRQ, &cpuStat.SoftIRQ, &cpuStat.Steal, + &cpuStat.Guest, &cpuStat.GuestNice) + + if err != nil && err != io.EOF { + return CPUStat{}, -1, fmt.Errorf("couldn't parse %s (cpu): %s", line, err) + } + if count == 0 { + return CPUStat{}, -1, fmt.Errorf("couldn't parse %s (cpu): 0 elements parsed", line) + } + + cpuStat.User /= userHZ + cpuStat.Nice /= userHZ + cpuStat.System /= userHZ + cpuStat.Idle /= userHZ + cpuStat.Iowait /= userHZ + cpuStat.IRQ /= userHZ + cpuStat.SoftIRQ /= userHZ + cpuStat.Steal /= userHZ + cpuStat.Guest /= userHZ + cpuStat.GuestNice /= userHZ + + if cpu == "cpu" { + return cpuStat, -1, nil + } + + cpuID, err := strconv.ParseInt(cpu[3:], 10, 64) + if err != nil { + return CPUStat{}, -1, fmt.Errorf("couldn't parse %s (cpu/cpuid): %s", line, err) + } + + return cpuStat, cpuID, nil +} + +// Parse a softirq line. +func parseSoftIRQStat(line string) (SoftIRQStat, uint64, error) { + softIRQStat := SoftIRQStat{} + var total uint64 + var prefix string + + _, err := fmt.Sscanf(line, "%s %d %d %d %d %d %d %d %d %d %d %d", + &prefix, &total, + &softIRQStat.Hi, &softIRQStat.Timer, &softIRQStat.NetTx, &softIRQStat.NetRx, + &softIRQStat.Block, &softIRQStat.BlockIoPoll, + &softIRQStat.Tasklet, &softIRQStat.Sched, + &softIRQStat.Hrtimer, &softIRQStat.Rcu) + + if err != nil { + return SoftIRQStat{}, 0, fmt.Errorf("couldn't parse %s (softirq): %s", line, err) + } + + return softIRQStat, total, nil +} + +// NewStat returns an information about current kernel/system statistics. +func (fs FS) NewStat() (Stat, error) { + // See https://www.kernel.org/doc/Documentation/filesystems/proc.txt + + f, err := os.Open(fs.Path("stat")) + if err != nil { + return Stat{}, err + } + defer f.Close() + + stat := Stat{} + + scanner := bufio.NewScanner(f) + for scanner.Scan() { + line := scanner.Text() + parts := strings.Fields(scanner.Text()) + // require at least + if len(parts) < 2 { + continue + } + switch { + case parts[0] == "btime": + if stat.BootTime, err = strconv.ParseUint(parts[1], 10, 64); err != nil { + return Stat{}, fmt.Errorf("couldn't parse %s (btime): %s", parts[1], err) + } + case parts[0] == "intr": + if stat.IRQTotal, err = strconv.ParseUint(parts[1], 10, 64); err != nil { + return Stat{}, fmt.Errorf("couldn't parse %s (intr): %s", parts[1], err) + } + numberedIRQs := parts[2:] + stat.IRQ = make([]uint64, len(numberedIRQs)) + for i, count := range numberedIRQs { + if stat.IRQ[i], err = strconv.ParseUint(count, 10, 64); err != nil { + return Stat{}, fmt.Errorf("couldn't parse %s (intr%d): %s", count, i, err) + } + } + case parts[0] == "ctxt": + if stat.ContextSwitches, err = strconv.ParseUint(parts[1], 10, 64); err != nil { + return Stat{}, fmt.Errorf("couldn't parse %s (ctxt): %s", parts[1], err) + } + case parts[0] == "processes": + if stat.ProcessCreated, err = strconv.ParseUint(parts[1], 10, 64); err != nil { + return Stat{}, fmt.Errorf("couldn't parse %s (processes): %s", parts[1], err) + } + case parts[0] == "procs_running": + if stat.ProcessesRunning, err = strconv.ParseUint(parts[1], 10, 64); err != nil { + return Stat{}, fmt.Errorf("couldn't parse %s (procs_running): %s", parts[1], err) + } + case parts[0] == "procs_blocked": + if stat.ProcessesBlocked, err = strconv.ParseUint(parts[1], 10, 64); err != nil { + return Stat{}, fmt.Errorf("couldn't parse %s (procs_blocked): %s", parts[1], err) + } + case parts[0] == "softirq": + softIRQStats, total, err := parseSoftIRQStat(line) + if err != nil { + return Stat{}, err + } + stat.SoftIRQTotal = total + stat.SoftIRQ = softIRQStats + case strings.HasPrefix(parts[0], "cpu"): + cpuStat, cpuID, err := parseCPUStat(line) + if err != nil { + return Stat{}, err + } + if cpuID == -1 { + stat.CPUTotal = cpuStat + } else { + for int64(len(stat.CPU)) <= cpuID { + stat.CPU = append(stat.CPU, CPUStat{}) + } + stat.CPU[cpuID] = cpuStat + } + } + } + + if err := scanner.Err(); err != nil { + return Stat{}, fmt.Errorf("couldn't parse %s: %s", f.Name(), err) + } + + return stat, nil +} diff --git a/vendor/github.com/ncabatoff/procfs/ttar b/vendor/github.com/ncabatoff/procfs/ttar new file mode 100755 index 000000000..b0171a12b --- /dev/null +++ b/vendor/github.com/ncabatoff/procfs/ttar @@ -0,0 +1,389 @@ +#!/usr/bin/env bash + +# Purpose: plain text tar format +# Limitations: - only suitable for text files, directories, and symlinks +# - stores only filename, content, and mode +# - not designed for untrusted input +# +# Note: must work with bash version 3.2 (macOS) + +# Copyright 2017 Roger Luethi +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +set -o errexit -o nounset + +# Sanitize environment (for instance, standard sorting of glob matches) +export LC_ALL=C + +path="" +CMD="" +ARG_STRING="$*" + +#------------------------------------------------------------------------------ +# Not all sed implementations can work on null bytes. In order to make ttar +# work out of the box on macOS, use Python as a stream editor. + +USE_PYTHON=0 + +PYTHON_CREATE_FILTER=$(cat << 'PCF' +#!/usr/bin/env python + +import re +import sys + +for line in sys.stdin: + line = re.sub(r'EOF', r'\EOF', line) + line = re.sub(r'NULLBYTE', r'\NULLBYTE', line) + line = re.sub('\x00', r'NULLBYTE', line) + sys.stdout.write(line) +PCF +) + +PYTHON_EXTRACT_FILTER=$(cat << 'PEF' +#!/usr/bin/env python + +import re +import sys + +for line in sys.stdin: + line = re.sub(r'(?/dev/null; then + echo "ERROR Python not found. Aborting." + exit 2 + fi + USE_PYTHON=1 + fi +} + +#------------------------------------------------------------------------------ + +function usage { + bname=$(basename "$0") + cat << USAGE +Usage: $bname [-C ] -c -f (create archive) + $bname -t -f (list archive contents) + $bname [-C ] -x -f (extract archive) + +Options: + -C (change directory) + -v (verbose) + +Example: Change to sysfs directory, create ttar file from fixtures directory + $bname -C sysfs -c -f sysfs/fixtures.ttar fixtures/ +USAGE +exit "$1" +} + +function vecho { + if [ "${VERBOSE:-}" == "yes" ]; then + echo >&7 "$@" + fi +} + +function set_cmd { + if [ -n "$CMD" ]; then + echo "ERROR: more than one command given" + echo + usage 2 + fi + CMD=$1 +} + +unset VERBOSE + +while getopts :cf:htxvC: opt; do + case $opt in + c) + set_cmd "create" + ;; + f) + ARCHIVE=$OPTARG + ;; + h) + usage 0 + ;; + t) + set_cmd "list" + ;; + x) + set_cmd "extract" + ;; + v) + VERBOSE=yes + exec 7>&1 + ;; + C) + CDIR=$OPTARG + ;; + *) + echo >&2 "ERROR: invalid option -$OPTARG" + echo + usage 1 + ;; + esac +done + +# Remove processed options from arguments +shift $(( OPTIND - 1 )); + +if [ "${CMD:-}" == "" ]; then + echo >&2 "ERROR: no command given" + echo + usage 1 +elif [ "${ARCHIVE:-}" == "" ]; then + echo >&2 "ERROR: no archive name given" + echo + usage 1 +fi + +function list { + local path="" + local size=0 + local line_no=0 + local ttar_file=$1 + if [ -n "${2:-}" ]; then + echo >&2 "ERROR: too many arguments." + echo + usage 1 + fi + if [ ! -e "$ttar_file" ]; then + echo >&2 "ERROR: file not found ($ttar_file)" + echo + usage 1 + fi + while read -r line; do + line_no=$(( line_no + 1 )) + if [ $size -gt 0 ]; then + size=$(( size - 1 )) + continue + fi + if [[ $line =~ ^Path:\ (.*)$ ]]; then + path=${BASH_REMATCH[1]} + elif [[ $line =~ ^Lines:\ (.*)$ ]]; then + size=${BASH_REMATCH[1]} + echo "$path" + elif [[ $line =~ ^Directory:\ (.*)$ ]]; then + path=${BASH_REMATCH[1]} + echo "$path/" + elif [[ $line =~ ^SymlinkTo:\ (.*)$ ]]; then + echo "$path -> ${BASH_REMATCH[1]}" + fi + done < "$ttar_file" +} + +function extract { + local path="" + local size=0 + local line_no=0 + local ttar_file=$1 + if [ -n "${2:-}" ]; then + echo >&2 "ERROR: too many arguments." + echo + usage 1 + fi + if [ ! -e "$ttar_file" ]; then + echo >&2 "ERROR: file not found ($ttar_file)" + echo + usage 1 + fi + while IFS= read -r line; do + line_no=$(( line_no + 1 )) + local eof_without_newline + if [ "$size" -gt 0 ]; then + if [[ "$line" =~ [^\\]EOF ]]; then + # An EOF not preceeded by a backslash indicates that the line + # does not end with a newline + eof_without_newline=1 + else + eof_without_newline=0 + fi + # Replace NULLBYTE with null byte if at beginning of line + # Replace NULLBYTE with null byte unless preceeded by backslash + # Remove one backslash in front of NULLBYTE (if any) + # Remove EOF unless preceeded by backslash + # Remove one backslash in front of EOF + if [ $USE_PYTHON -eq 1 ]; then + echo -n "$line" | python -c "$PYTHON_EXTRACT_FILTER" >> "$path" + else + # The repeated pattern makes up for sed's lack of negative + # lookbehind assertions (for consecutive null bytes). + echo -n "$line" | \ + sed -e 's/^NULLBYTE/\x0/g; + s/\([^\\]\)NULLBYTE/\1\x0/g; + s/\([^\\]\)NULLBYTE/\1\x0/g; + s/\\NULLBYTE/NULLBYTE/g; + s/\([^\\]\)EOF/\1/g; + s/\\EOF/EOF/g; + ' >> "$path" + fi + if [[ "$eof_without_newline" -eq 0 ]]; then + echo >> "$path" + fi + size=$(( size - 1 )) + continue + fi + if [[ $line =~ ^Path:\ (.*)$ ]]; then + path=${BASH_REMATCH[1]} + if [ -e "$path" ] || [ -L "$path" ]; then + rm "$path" + fi + elif [[ $line =~ ^Lines:\ (.*)$ ]]; then + size=${BASH_REMATCH[1]} + # Create file even if it is zero-length. + touch "$path" + vecho " $path" + elif [[ $line =~ ^Mode:\ (.*)$ ]]; then + mode=${BASH_REMATCH[1]} + chmod "$mode" "$path" + vecho "$mode" + elif [[ $line =~ ^Directory:\ (.*)$ ]]; then + path=${BASH_REMATCH[1]} + mkdir -p "$path" + vecho " $path/" + elif [[ $line =~ ^SymlinkTo:\ (.*)$ ]]; then + ln -s "${BASH_REMATCH[1]}" "$path" + vecho " $path -> ${BASH_REMATCH[1]}" + elif [[ $line =~ ^# ]]; then + # Ignore comments between files + continue + else + echo >&2 "ERROR: Unknown keyword on line $line_no: $line" + exit 1 + fi + done < "$ttar_file" +} + +function div { + echo "# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -" \ + "- - - - - -" +} + +function get_mode { + local mfile=$1 + if [ -z "${STAT_OPTION:-}" ]; then + if stat -c '%a' "$mfile" >/dev/null 2>&1; then + # GNU stat + STAT_OPTION='-c' + STAT_FORMAT='%a' + else + # BSD stat + STAT_OPTION='-f' + # Octal output, user/group/other (omit file type, sticky bit) + STAT_FORMAT='%OLp' + fi + fi + stat "${STAT_OPTION}" "${STAT_FORMAT}" "$mfile" +} + +function _create { + shopt -s nullglob + local mode + local eof_without_newline + while (( "$#" )); do + file=$1 + if [ -L "$file" ]; then + echo "Path: $file" + symlinkTo=$(readlink "$file") + echo "SymlinkTo: $symlinkTo" + vecho " $file -> $symlinkTo" + div + elif [ -d "$file" ]; then + # Strip trailing slash (if there is one) + file=${file%/} + echo "Directory: $file" + mode=$(get_mode "$file") + echo "Mode: $mode" + vecho "$mode $file/" + div + # Find all files and dirs, including hidden/dot files + for x in "$file/"{*,.[^.]*}; do + _create "$x" + done + elif [ -f "$file" ]; then + echo "Path: $file" + lines=$(wc -l "$file"|awk '{print $1}') + eof_without_newline=0 + if [[ "$(wc -c "$file"|awk '{print $1}')" -gt 0 ]] && \ + [[ "$(tail -c 1 "$file" | wc -l)" -eq 0 ]]; then + eof_without_newline=1 + lines=$((lines+1)) + fi + echo "Lines: $lines" + # Add backslash in front of EOF + # Add backslash in front of NULLBYTE + # Replace null byte with NULLBYTE + if [ $USE_PYTHON -eq 1 ]; then + < "$file" python -c "$PYTHON_CREATE_FILTER" + else + < "$file" \ + sed 's/EOF/\\EOF/g; + s/NULLBYTE/\\NULLBYTE/g; + s/\x0/NULLBYTE/g; + ' + fi + if [[ "$eof_without_newline" -eq 1 ]]; then + # Finish line with EOF to indicate that the original line did + # not end with a linefeed + echo "EOF" + fi + mode=$(get_mode "$file") + echo "Mode: $mode" + vecho "$mode $file" + div + else + echo >&2 "ERROR: file not found ($file in $(pwd))" + exit 2 + fi + shift + done +} + +function create { + ttar_file=$1 + shift + if [ -z "${1:-}" ]; then + echo >&2 "ERROR: missing arguments." + echo + usage 1 + fi + if [ -e "$ttar_file" ]; then + rm "$ttar_file" + fi + exec > "$ttar_file" + echo "# Archive created by ttar $ARG_STRING" + _create "$@" +} + +test_environment + +if [ -n "${CDIR:-}" ]; then + if [[ "$ARCHIVE" != /* ]]; then + # Relative path: preserve the archive's location before changing + # directory + ARCHIVE="$(pwd)/$ARCHIVE" + fi + cd "$CDIR" +fi + +"$CMD" "$ARCHIVE" "$@" diff --git a/vendor/github.com/ncabatoff/procfs/xfrm.go b/vendor/github.com/ncabatoff/procfs/xfrm.go new file mode 100644 index 000000000..8f1508f0f --- /dev/null +++ b/vendor/github.com/ncabatoff/procfs/xfrm.go @@ -0,0 +1,187 @@ +// Copyright 2017 Prometheus Team +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package procfs + +import ( + "bufio" + "fmt" + "os" + "strconv" + "strings" +) + +// XfrmStat models the contents of /proc/net/xfrm_stat. +type XfrmStat struct { + // All errors which are not matched by other + XfrmInError int + // No buffer is left + XfrmInBufferError int + // Header Error + XfrmInHdrError int + // No state found + // i.e. either inbound SPI, address, or IPSEC protocol at SA is wrong + XfrmInNoStates int + // Transformation protocol specific error + // e.g. SA Key is wrong + XfrmInStateProtoError int + // Transformation mode specific error + XfrmInStateModeError int + // Sequence error + // e.g. sequence number is out of window + XfrmInStateSeqError int + // State is expired + XfrmInStateExpired int + // State has mismatch option + // e.g. UDP encapsulation type is mismatched + XfrmInStateMismatch int + // State is invalid + XfrmInStateInvalid int + // No matching template for states + // e.g. Inbound SAs are correct but SP rule is wrong + XfrmInTmplMismatch int + // No policy is found for states + // e.g. Inbound SAs are correct but no SP is found + XfrmInNoPols int + // Policy discards + XfrmInPolBlock int + // Policy error + XfrmInPolError int + // All errors which are not matched by others + XfrmOutError int + // Bundle generation error + XfrmOutBundleGenError int + // Bundle check error + XfrmOutBundleCheckError int + // No state was found + XfrmOutNoStates int + // Transformation protocol specific error + XfrmOutStateProtoError int + // Transportation mode specific error + XfrmOutStateModeError int + // Sequence error + // i.e sequence number overflow + XfrmOutStateSeqError int + // State is expired + XfrmOutStateExpired int + // Policy discads + XfrmOutPolBlock int + // Policy is dead + XfrmOutPolDead int + // Policy Error + XfrmOutPolError int + XfrmFwdHdrError int + XfrmOutStateInvalid int + XfrmAcquireError int +} + +// NewXfrmStat reads the xfrm_stat statistics. +func NewXfrmStat() (XfrmStat, error) { + fs, err := NewFS(DefaultMountPoint) + if err != nil { + return XfrmStat{}, err + } + + return fs.NewXfrmStat() +} + +// NewXfrmStat reads the xfrm_stat statistics from the 'proc' filesystem. +func (fs FS) NewXfrmStat() (XfrmStat, error) { + file, err := os.Open(fs.Path("net/xfrm_stat")) + if err != nil { + return XfrmStat{}, err + } + defer file.Close() + + var ( + x = XfrmStat{} + s = bufio.NewScanner(file) + ) + + for s.Scan() { + fields := strings.Fields(s.Text()) + + if len(fields) != 2 { + return XfrmStat{}, fmt.Errorf( + "couldn't parse %s line %s", file.Name(), s.Text()) + } + + name := fields[0] + value, err := strconv.Atoi(fields[1]) + if err != nil { + return XfrmStat{}, err + } + + switch name { + case "XfrmInError": + x.XfrmInError = value + case "XfrmInBufferError": + x.XfrmInBufferError = value + case "XfrmInHdrError": + x.XfrmInHdrError = value + case "XfrmInNoStates": + x.XfrmInNoStates = value + case "XfrmInStateProtoError": + x.XfrmInStateProtoError = value + case "XfrmInStateModeError": + x.XfrmInStateModeError = value + case "XfrmInStateSeqError": + x.XfrmInStateSeqError = value + case "XfrmInStateExpired": + x.XfrmInStateExpired = value + case "XfrmInStateInvalid": + x.XfrmInStateInvalid = value + case "XfrmInTmplMismatch": + x.XfrmInTmplMismatch = value + case "XfrmInNoPols": + x.XfrmInNoPols = value + case "XfrmInPolBlock": + x.XfrmInPolBlock = value + case "XfrmInPolError": + x.XfrmInPolError = value + case "XfrmOutError": + x.XfrmOutError = value + case "XfrmInStateMismatch": + x.XfrmInStateMismatch = value + case "XfrmOutBundleGenError": + x.XfrmOutBundleGenError = value + case "XfrmOutBundleCheckError": + x.XfrmOutBundleCheckError = value + case "XfrmOutNoStates": + x.XfrmOutNoStates = value + case "XfrmOutStateProtoError": + x.XfrmOutStateProtoError = value + case "XfrmOutStateModeError": + x.XfrmOutStateModeError = value + case "XfrmOutStateSeqError": + x.XfrmOutStateSeqError = value + case "XfrmOutStateExpired": + x.XfrmOutStateExpired = value + case "XfrmOutPolBlock": + x.XfrmOutPolBlock = value + case "XfrmOutPolDead": + x.XfrmOutPolDead = value + case "XfrmOutPolError": + x.XfrmOutPolError = value + case "XfrmFwdHdrError": + x.XfrmFwdHdrError = value + case "XfrmOutStateInvalid": + x.XfrmOutStateInvalid = value + case "XfrmAcquireError": + x.XfrmAcquireError = value + } + + } + + return x, s.Err() +} diff --git a/vendor/github.com/ncabatoff/procfs/xfs/parse.go b/vendor/github.com/ncabatoff/procfs/xfs/parse.go new file mode 100644 index 000000000..aeaf3302a --- /dev/null +++ b/vendor/github.com/ncabatoff/procfs/xfs/parse.go @@ -0,0 +1,330 @@ +// Copyright 2017 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package xfs + +import ( + "bufio" + "fmt" + "io" + "strings" + + "github.com/ncabatoff/procfs/internal/util" +) + +// ParseStats parses a Stats from an input io.Reader, using the format +// found in /proc/fs/xfs/stat. +func ParseStats(r io.Reader) (*Stats, error) { + const ( + // Fields parsed into stats structures. + fieldExtentAlloc = "extent_alloc" + fieldAbt = "abt" + fieldBlkMap = "blk_map" + fieldBmbt = "bmbt" + fieldDir = "dir" + fieldTrans = "trans" + fieldIg = "ig" + fieldLog = "log" + fieldRw = "rw" + fieldAttr = "attr" + fieldIcluster = "icluster" + fieldVnodes = "vnodes" + fieldBuf = "buf" + fieldXpc = "xpc" + + // Unimplemented at this time due to lack of documentation. + fieldPushAil = "push_ail" + fieldXstrat = "xstrat" + fieldAbtb2 = "abtb2" + fieldAbtc2 = "abtc2" + fieldBmbt2 = "bmbt2" + fieldIbt2 = "ibt2" + fieldFibt2 = "fibt2" + fieldQm = "qm" + fieldDebug = "debug" + ) + + var xfss Stats + + s := bufio.NewScanner(r) + for s.Scan() { + // Expect at least a string label and a single integer value, ex: + // - abt 0 + // - rw 1 2 + ss := strings.Fields(string(s.Bytes())) + if len(ss) < 2 { + continue + } + label := ss[0] + + // Extended precision counters are uint64 values. + if label == fieldXpc { + us, err := util.ParseUint64s(ss[1:]) + if err != nil { + return nil, err + } + + xfss.ExtendedPrecision, err = extendedPrecisionStats(us) + if err != nil { + return nil, err + } + + continue + } + + // All other counters are uint32 values. + us, err := util.ParseUint32s(ss[1:]) + if err != nil { + return nil, err + } + + switch label { + case fieldExtentAlloc: + xfss.ExtentAllocation, err = extentAllocationStats(us) + case fieldAbt: + xfss.AllocationBTree, err = btreeStats(us) + case fieldBlkMap: + xfss.BlockMapping, err = blockMappingStats(us) + case fieldBmbt: + xfss.BlockMapBTree, err = btreeStats(us) + case fieldDir: + xfss.DirectoryOperation, err = directoryOperationStats(us) + case fieldTrans: + xfss.Transaction, err = transactionStats(us) + case fieldIg: + xfss.InodeOperation, err = inodeOperationStats(us) + case fieldLog: + xfss.LogOperation, err = logOperationStats(us) + case fieldRw: + xfss.ReadWrite, err = readWriteStats(us) + case fieldAttr: + xfss.AttributeOperation, err = attributeOperationStats(us) + case fieldIcluster: + xfss.InodeClustering, err = inodeClusteringStats(us) + case fieldVnodes: + xfss.Vnode, err = vnodeStats(us) + case fieldBuf: + xfss.Buffer, err = bufferStats(us) + } + if err != nil { + return nil, err + } + } + + return &xfss, s.Err() +} + +// extentAllocationStats builds an ExtentAllocationStats from a slice of uint32s. +func extentAllocationStats(us []uint32) (ExtentAllocationStats, error) { + if l := len(us); l != 4 { + return ExtentAllocationStats{}, fmt.Errorf("incorrect number of values for XFS extent allocation stats: %d", l) + } + + return ExtentAllocationStats{ + ExtentsAllocated: us[0], + BlocksAllocated: us[1], + ExtentsFreed: us[2], + BlocksFreed: us[3], + }, nil +} + +// btreeStats builds a BTreeStats from a slice of uint32s. +func btreeStats(us []uint32) (BTreeStats, error) { + if l := len(us); l != 4 { + return BTreeStats{}, fmt.Errorf("incorrect number of values for XFS btree stats: %d", l) + } + + return BTreeStats{ + Lookups: us[0], + Compares: us[1], + RecordsInserted: us[2], + RecordsDeleted: us[3], + }, nil +} + +// BlockMappingStat builds a BlockMappingStats from a slice of uint32s. +func blockMappingStats(us []uint32) (BlockMappingStats, error) { + if l := len(us); l != 7 { + return BlockMappingStats{}, fmt.Errorf("incorrect number of values for XFS block mapping stats: %d", l) + } + + return BlockMappingStats{ + Reads: us[0], + Writes: us[1], + Unmaps: us[2], + ExtentListInsertions: us[3], + ExtentListDeletions: us[4], + ExtentListLookups: us[5], + ExtentListCompares: us[6], + }, nil +} + +// DirectoryOperationStats builds a DirectoryOperationStats from a slice of uint32s. +func directoryOperationStats(us []uint32) (DirectoryOperationStats, error) { + if l := len(us); l != 4 { + return DirectoryOperationStats{}, fmt.Errorf("incorrect number of values for XFS directory operation stats: %d", l) + } + + return DirectoryOperationStats{ + Lookups: us[0], + Creates: us[1], + Removes: us[2], + Getdents: us[3], + }, nil +} + +// TransactionStats builds a TransactionStats from a slice of uint32s. +func transactionStats(us []uint32) (TransactionStats, error) { + if l := len(us); l != 3 { + return TransactionStats{}, fmt.Errorf("incorrect number of values for XFS transaction stats: %d", l) + } + + return TransactionStats{ + Sync: us[0], + Async: us[1], + Empty: us[2], + }, nil +} + +// InodeOperationStats builds an InodeOperationStats from a slice of uint32s. +func inodeOperationStats(us []uint32) (InodeOperationStats, error) { + if l := len(us); l != 7 { + return InodeOperationStats{}, fmt.Errorf("incorrect number of values for XFS inode operation stats: %d", l) + } + + return InodeOperationStats{ + Attempts: us[0], + Found: us[1], + Recycle: us[2], + Missed: us[3], + Duplicate: us[4], + Reclaims: us[5], + AttributeChange: us[6], + }, nil +} + +// LogOperationStats builds a LogOperationStats from a slice of uint32s. +func logOperationStats(us []uint32) (LogOperationStats, error) { + if l := len(us); l != 5 { + return LogOperationStats{}, fmt.Errorf("incorrect number of values for XFS log operation stats: %d", l) + } + + return LogOperationStats{ + Writes: us[0], + Blocks: us[1], + NoInternalBuffers: us[2], + Force: us[3], + ForceSleep: us[4], + }, nil +} + +// ReadWriteStats builds a ReadWriteStats from a slice of uint32s. +func readWriteStats(us []uint32) (ReadWriteStats, error) { + if l := len(us); l != 2 { + return ReadWriteStats{}, fmt.Errorf("incorrect number of values for XFS read write stats: %d", l) + } + + return ReadWriteStats{ + Read: us[0], + Write: us[1], + }, nil +} + +// AttributeOperationStats builds an AttributeOperationStats from a slice of uint32s. +func attributeOperationStats(us []uint32) (AttributeOperationStats, error) { + if l := len(us); l != 4 { + return AttributeOperationStats{}, fmt.Errorf("incorrect number of values for XFS attribute operation stats: %d", l) + } + + return AttributeOperationStats{ + Get: us[0], + Set: us[1], + Remove: us[2], + List: us[3], + }, nil +} + +// InodeClusteringStats builds an InodeClusteringStats from a slice of uint32s. +func inodeClusteringStats(us []uint32) (InodeClusteringStats, error) { + if l := len(us); l != 3 { + return InodeClusteringStats{}, fmt.Errorf("incorrect number of values for XFS inode clustering stats: %d", l) + } + + return InodeClusteringStats{ + Iflush: us[0], + Flush: us[1], + FlushInode: us[2], + }, nil +} + +// VnodeStats builds a VnodeStats from a slice of uint32s. +func vnodeStats(us []uint32) (VnodeStats, error) { + // The attribute "Free" appears to not be available on older XFS + // stats versions. Therefore, 7 or 8 elements may appear in + // this slice. + l := len(us) + if l != 7 && l != 8 { + return VnodeStats{}, fmt.Errorf("incorrect number of values for XFS vnode stats: %d", l) + } + + s := VnodeStats{ + Active: us[0], + Allocate: us[1], + Get: us[2], + Hold: us[3], + Release: us[4], + Reclaim: us[5], + Remove: us[6], + } + + // Skip adding free, unless it is present. The zero value will + // be used in place of an actual count. + if l == 7 { + return s, nil + } + + s.Free = us[7] + return s, nil +} + +// BufferStats builds a BufferStats from a slice of uint32s. +func bufferStats(us []uint32) (BufferStats, error) { + if l := len(us); l != 9 { + return BufferStats{}, fmt.Errorf("incorrect number of values for XFS buffer stats: %d", l) + } + + return BufferStats{ + Get: us[0], + Create: us[1], + GetLocked: us[2], + GetLockedWaited: us[3], + BusyLocked: us[4], + MissLocked: us[5], + PageRetries: us[6], + PageFound: us[7], + GetRead: us[8], + }, nil +} + +// ExtendedPrecisionStats builds an ExtendedPrecisionStats from a slice of uint32s. +func extendedPrecisionStats(us []uint64) (ExtendedPrecisionStats, error) { + if l := len(us); l != 3 { + return ExtendedPrecisionStats{}, fmt.Errorf("incorrect number of values for XFS extended precision stats: %d", l) + } + + return ExtendedPrecisionStats{ + FlushBytes: us[0], + WriteBytes: us[1], + ReadBytes: us[2], + }, nil +} diff --git a/vendor/github.com/ncabatoff/procfs/xfs/xfs.go b/vendor/github.com/ncabatoff/procfs/xfs/xfs.go new file mode 100644 index 000000000..d86794b7c --- /dev/null +++ b/vendor/github.com/ncabatoff/procfs/xfs/xfs.go @@ -0,0 +1,163 @@ +// Copyright 2017 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package xfs provides access to statistics exposed by the XFS filesystem. +package xfs + +// Stats contains XFS filesystem runtime statistics, parsed from +// /proc/fs/xfs/stat. +// +// The names and meanings of each statistic were taken from +// http://xfs.org/index.php/Runtime_Stats and xfs_stats.h in the Linux +// kernel source. Most counters are uint32s (same data types used in +// xfs_stats.h), but some of the "extended precision stats" are uint64s. +type Stats struct { + // The name of the filesystem used to source these statistics. + // If empty, this indicates aggregated statistics for all XFS + // filesystems on the host. + Name string + + ExtentAllocation ExtentAllocationStats + AllocationBTree BTreeStats + BlockMapping BlockMappingStats + BlockMapBTree BTreeStats + DirectoryOperation DirectoryOperationStats + Transaction TransactionStats + InodeOperation InodeOperationStats + LogOperation LogOperationStats + ReadWrite ReadWriteStats + AttributeOperation AttributeOperationStats + InodeClustering InodeClusteringStats + Vnode VnodeStats + Buffer BufferStats + ExtendedPrecision ExtendedPrecisionStats +} + +// ExtentAllocationStats contains statistics regarding XFS extent allocations. +type ExtentAllocationStats struct { + ExtentsAllocated uint32 + BlocksAllocated uint32 + ExtentsFreed uint32 + BlocksFreed uint32 +} + +// BTreeStats contains statistics regarding an XFS internal B-tree. +type BTreeStats struct { + Lookups uint32 + Compares uint32 + RecordsInserted uint32 + RecordsDeleted uint32 +} + +// BlockMappingStats contains statistics regarding XFS block maps. +type BlockMappingStats struct { + Reads uint32 + Writes uint32 + Unmaps uint32 + ExtentListInsertions uint32 + ExtentListDeletions uint32 + ExtentListLookups uint32 + ExtentListCompares uint32 +} + +// DirectoryOperationStats contains statistics regarding XFS directory entries. +type DirectoryOperationStats struct { + Lookups uint32 + Creates uint32 + Removes uint32 + Getdents uint32 +} + +// TransactionStats contains statistics regarding XFS metadata transactions. +type TransactionStats struct { + Sync uint32 + Async uint32 + Empty uint32 +} + +// InodeOperationStats contains statistics regarding XFS inode operations. +type InodeOperationStats struct { + Attempts uint32 + Found uint32 + Recycle uint32 + Missed uint32 + Duplicate uint32 + Reclaims uint32 + AttributeChange uint32 +} + +// LogOperationStats contains statistics regarding the XFS log buffer. +type LogOperationStats struct { + Writes uint32 + Blocks uint32 + NoInternalBuffers uint32 + Force uint32 + ForceSleep uint32 +} + +// ReadWriteStats contains statistics regarding the number of read and write +// system calls for XFS filesystems. +type ReadWriteStats struct { + Read uint32 + Write uint32 +} + +// AttributeOperationStats contains statistics regarding manipulation of +// XFS extended file attributes. +type AttributeOperationStats struct { + Get uint32 + Set uint32 + Remove uint32 + List uint32 +} + +// InodeClusteringStats contains statistics regarding XFS inode clustering +// operations. +type InodeClusteringStats struct { + Iflush uint32 + Flush uint32 + FlushInode uint32 +} + +// VnodeStats contains statistics regarding XFS vnode operations. +type VnodeStats struct { + Active uint32 + Allocate uint32 + Get uint32 + Hold uint32 + Release uint32 + Reclaim uint32 + Remove uint32 + Free uint32 +} + +// BufferStats contains statistics regarding XFS read/write I/O buffers. +type BufferStats struct { + Get uint32 + Create uint32 + GetLocked uint32 + GetLockedWaited uint32 + BusyLocked uint32 + MissLocked uint32 + PageRetries uint32 + PageFound uint32 + GetRead uint32 +} + +// ExtendedPrecisionStats contains high precision counters used to track the +// total number of bytes read, written, or flushed, during XFS operations. +type ExtendedPrecisionStats struct { + FlushBytes uint64 + WriteBytes uint64 + ReadBytes uint64 +} diff --git a/vendor/github.com/onsi/ginkgo/.gitignore b/vendor/github.com/onsi/ginkgo/.gitignore index 18793c248..b9f9659d2 100644 --- a/vendor/github.com/onsi/ginkgo/.gitignore +++ b/vendor/github.com/onsi/ginkgo/.gitignore @@ -4,4 +4,4 @@ tmp/**/* *.coverprofile .vscode .idea/ -*.log \ No newline at end of file +*.log diff --git a/vendor/github.com/onsi/ginkgo/.travis.yml b/vendor/github.com/onsi/ginkgo/.travis.yml index 7ad39b78f..3900878bd 100644 --- a/vendor/github.com/onsi/ginkgo/.travis.yml +++ b/vendor/github.com/onsi/ginkgo/.travis.yml @@ -5,6 +5,7 @@ go: - 1.8.x - 1.9.x - 1.10.x + - 1.11.x install: - go get -v -t ./... diff --git a/vendor/github.com/onsi/ginkgo/CHANGELOG.md b/vendor/github.com/onsi/ginkgo/CHANGELOG.md index 32370206b..d7d797017 100644 --- a/vendor/github.com/onsi/ginkgo/CHANGELOG.md +++ b/vendor/github.com/onsi/ginkgo/CHANGELOG.md @@ -1,3 +1,12 @@ +## 1.7.0 + +### New Features +- Add JustAfterEach (#484) [0d4f080] + +### Fixes +- Correctly round suite time in junit reporter [2445fc1] +- Avoid using -i argument to go test for Golang 1.10+ [46bbc26] + ## 1.6.0 ### New Features diff --git a/vendor/github.com/onsi/ginkgo/config/config.go b/vendor/github.com/onsi/ginkgo/config/config.go index d4ed1fa57..5e509313c 100644 --- a/vendor/github.com/onsi/ginkgo/config/config.go +++ b/vendor/github.com/onsi/ginkgo/config/config.go @@ -20,7 +20,7 @@ import ( "fmt" ) -const VERSION = "1.6.0" +const VERSION = "1.7.0" type GinkgoConfigType struct { RandomSeed int64 diff --git a/vendor/github.com/onsi/ginkgo/ginkgo_dsl.go b/vendor/github.com/onsi/ginkgo/ginkgo_dsl.go index 158acdd5e..5aa96b4d9 100644 --- a/vendor/github.com/onsi/ginkgo/ginkgo_dsl.go +++ b/vendor/github.com/onsi/ginkgo/ginkgo_dsl.go @@ -590,6 +590,16 @@ func JustBeforeEach(body interface{}, timeout ...float64) bool { return true } +//JustAfterEach blocks are run after It blocks but *before* all AfterEach blocks. For more details, +//read the [documentation](http://onsi.github.io/ginkgo/#separating_creation_and_configuration_) +// +//Like It blocks, JustAfterEach blocks can be made asynchronous by providing a body function that accepts +//a Done channel +func JustAfterEach(body interface{}, timeout ...float64) bool { + globalSuite.PushJustAfterEachNode(body, codelocation.New(1), parseTimeout(timeout...)) + return true +} + //AfterEach blocks are run after It blocks. When multiple AfterEach blocks are defined in nested //Describe and Context blocks the innermost AfterEach blocks are run first. // diff --git a/vendor/github.com/onsi/ginkgo/internal/leafnodes/setup_nodes.go b/vendor/github.com/onsi/ginkgo/internal/leafnodes/setup_nodes.go index b4654cd29..e3e9cb7c5 100644 --- a/vendor/github.com/onsi/ginkgo/internal/leafnodes/setup_nodes.go +++ b/vendor/github.com/onsi/ginkgo/internal/leafnodes/setup_nodes.go @@ -40,3 +40,9 @@ func NewJustBeforeEachNode(body interface{}, codeLocation types.CodeLocation, ti runner: newRunner(body, codeLocation, timeout, failer, types.SpecComponentTypeJustBeforeEach, componentIndex), } } + +func NewJustAfterEachNode(body interface{}, codeLocation types.CodeLocation, timeout time.Duration, failer *failer.Failer, componentIndex int) *SetupNode { + return &SetupNode{ + runner: newRunner(body, codeLocation, timeout, failer, types.SpecComponentTypeJustAfterEach, componentIndex), + } +} diff --git a/vendor/github.com/onsi/ginkgo/internal/spec/spec.go b/vendor/github.com/onsi/ginkgo/internal/spec/spec.go index 77b23a4c7..7fd68ee8e 100644 --- a/vendor/github.com/onsi/ginkgo/internal/spec/spec.go +++ b/vendor/github.com/onsi/ginkgo/internal/spec/spec.go @@ -161,6 +161,18 @@ func (spec *Spec) runSample(sample int, writer io.Writer) { innerMostContainerIndexToUnwind := -1 defer func() { + for i := innerMostContainerIndexToUnwind; i >= 0; i-- { + container := spec.containers[i] + for _, justAfterEach := range container.SetupNodesOfType(types.SpecComponentTypeJustAfterEach) { + spec.announceSetupNode(writer, "JustAfterEach", container, justAfterEach) + justAfterEachState, justAfterEachFailure := justAfterEach.Run() + if justAfterEachState != types.SpecStatePassed && spec.state == types.SpecStatePassed { + spec.state = justAfterEachState + spec.failure = justAfterEachFailure + } + } + } + for i := innerMostContainerIndexToUnwind; i >= 0; i-- { container := spec.containers[i] for _, afterEach := range container.SetupNodesOfType(types.SpecComponentTypeAfterEach) { diff --git a/vendor/github.com/onsi/ginkgo/internal/suite/suite.go b/vendor/github.com/onsi/ginkgo/internal/suite/suite.go index f311e9a0d..3104bbc88 100644 --- a/vendor/github.com/onsi/ginkgo/internal/suite/suite.go +++ b/vendor/github.com/onsi/ginkgo/internal/suite/suite.go @@ -175,6 +175,13 @@ func (suite *Suite) PushJustBeforeEachNode(body interface{}, codeLocation types. suite.currentContainer.PushSetupNode(leafnodes.NewJustBeforeEachNode(body, codeLocation, timeout, suite.failer, suite.containerIndex)) } +func (suite *Suite) PushJustAfterEachNode(body interface{}, codeLocation types.CodeLocation, timeout time.Duration) { + if suite.running { + suite.failer.Fail("You may only call JustAfterEach from within a Describe or Context", codeLocation) + } + suite.currentContainer.PushSetupNode(leafnodes.NewJustAfterEachNode(body, codeLocation, timeout, suite.failer, suite.containerIndex)) +} + func (suite *Suite) PushAfterEachNode(body interface{}, codeLocation types.CodeLocation, timeout time.Duration) { if suite.running { suite.failer.Fail("You may only call AfterEach from within a Describe, Context or When", codeLocation) diff --git a/vendor/github.com/onsi/ginkgo/reporters/junit_reporter.go b/vendor/github.com/onsi/ginkgo/reporters/junit_reporter.go index 65b8964e5..2c9f3c792 100644 --- a/vendor/github.com/onsi/ginkgo/reporters/junit_reporter.go +++ b/vendor/github.com/onsi/ginkgo/reporters/junit_reporter.go @@ -121,7 +121,7 @@ func (reporter *JUnitReporter) SpecDidComplete(specSummary *types.SpecSummary) { func (reporter *JUnitReporter) SpecSuiteDidEnd(summary *types.SuiteSummary) { reporter.suite.Tests = summary.NumberOfSpecsThatWillBeRun - reporter.suite.Time = math.Trunc(summary.RunTime.Seconds() * 1000 / 1000) + reporter.suite.Time = math.Trunc(summary.RunTime.Seconds()*1000) / 1000 reporter.suite.Failures = summary.NumberOfFailedSpecs reporter.suite.Errors = 0 file, err := os.Create(reporter.filename) diff --git a/vendor/github.com/onsi/ginkgo/types/types.go b/vendor/github.com/onsi/ginkgo/types/types.go index baf1bd1c4..0e89521be 100644 --- a/vendor/github.com/onsi/ginkgo/types/types.go +++ b/vendor/github.com/onsi/ginkgo/types/types.go @@ -159,6 +159,7 @@ const ( SpecComponentTypeAfterSuite SpecComponentTypeBeforeEach SpecComponentTypeJustBeforeEach + SpecComponentTypeJustAfterEach SpecComponentTypeAfterEach SpecComponentTypeIt SpecComponentTypeMeasure diff --git a/vendor/github.com/onsi/gomega/.travis.yml b/vendor/github.com/onsi/gomega/.travis.yml index 783fb3b48..4d71367f6 100644 --- a/vendor/github.com/onsi/gomega/.travis.yml +++ b/vendor/github.com/onsi/gomega/.travis.yml @@ -1,4 +1,5 @@ language: go + go: - 1.6.x - 1.7.x @@ -7,10 +8,16 @@ go: - 1.10.x - 1.11.x +env: + - GO111MODULE=on + install: - - env GO111MODULE=on go get -v ./... - - env GO111MODULE=on go build ./... + - go get -v ./... + - go build ./... - go get github.com/onsi/ginkgo - go install github.com/onsi/ginkgo/ginkgo -script: env GO111MODULE=on $HOME/gopath/bin/ginkgo -p -r --randomizeAllSpecs --failOnPending --randomizeSuites --race && env GO111MODULE=on go vet +script: | + $HOME/gopath/bin/ginkgo -p -r --randomizeAllSpecs --failOnPending --randomizeSuites --race && + go vet && + [ -z "`gofmt -l -e -s -w .`" ] diff --git a/vendor/github.com/onsi/gomega/CHANGELOG.md b/vendor/github.com/onsi/gomega/CHANGELOG.md index f2aaea4ee..9153294f7 100644 --- a/vendor/github.com/onsi/gomega/CHANGELOG.md +++ b/vendor/github.com/onsi/gomega/CHANGELOG.md @@ -1,3 +1,10 @@ +## 1.4.3 + +### Fixes: + +- ensure file name and line numbers are correctly reported for XUnit [6fff58f] +- Fixed matcher for content-type (#305) [69d9b43] + ## 1.4.2 ### Fixes: diff --git a/vendor/github.com/onsi/gomega/gomega_dsl.go b/vendor/github.com/onsi/gomega/gomega_dsl.go index 51d7872fc..471f691a6 100644 --- a/vendor/github.com/onsi/gomega/gomega_dsl.go +++ b/vendor/github.com/onsi/gomega/gomega_dsl.go @@ -24,7 +24,7 @@ import ( "github.com/onsi/gomega/types" ) -const GOMEGA_VERSION = "1.4.2" +const GOMEGA_VERSION = "1.4.3" const nilFailHandlerPanic = `You are trying to make an assertion, but Gomega's fail handler is nil. If you're using Ginkgo then you probably forgot to put your assertion in an It(). @@ -46,12 +46,25 @@ func RegisterFailHandler(handler types.GomegaFailHandler) { globalFailWrapper = nil return } + globalFailWrapper = &types.GomegaFailWrapper{ Fail: handler, TWithHelper: testingtsupport.EmptyTWithHelper{}, } } +func RegisterFailHandlerWithT(t types.TWithHelper, handler types.GomegaFailHandler) { + if handler == nil { + globalFailWrapper = nil + return + } + + globalFailWrapper = &types.GomegaFailWrapper{ + Fail: handler, + TWithHelper: t, + } +} + //RegisterTestingT connects Gomega to Golang's XUnit style //Testing.T tests. It is now deprecated and you should use NewGomegaWithT() instead. // @@ -74,7 +87,12 @@ func RegisterFailHandler(handler types.GomegaFailHandler) { // // (As an aside: Ginkgo gets around this limitation by running parallel tests in different *processes*). func RegisterTestingT(t types.GomegaTestingT) { - RegisterFailHandler(testingtsupport.BuildTestingTGomegaFailWrapper(t).Fail) + tWithHelper, hasHelper := t.(types.TWithHelper) + if !hasHelper { + RegisterFailHandler(testingtsupport.BuildTestingTGomegaFailWrapper(t).Fail) + return + } + RegisterFailHandlerWithT(tWithHelper, testingtsupport.BuildTestingTGomegaFailWrapper(t).Fail) } //InterceptGomegaHandlers runs a given callback and returns an array of diff --git a/vendor/github.com/onsi/gomega/matchers/be_closed_matcher.go b/vendor/github.com/onsi/gomega/matchers/be_closed_matcher.go index ed6f69288..80c9c8bb1 100644 --- a/vendor/github.com/onsi/gomega/matchers/be_closed_matcher.go +++ b/vendor/github.com/onsi/gomega/matchers/be_closed_matcher.go @@ -23,8 +23,8 @@ func (matcher *BeClosedMatcher) Match(actual interface{}) (success bool, err err } winnerIndex, _, open := reflect.Select([]reflect.SelectCase{ - reflect.SelectCase{Dir: reflect.SelectRecv, Chan: channelValue}, - reflect.SelectCase{Dir: reflect.SelectDefault}, + {Dir: reflect.SelectRecv, Chan: channelValue}, + {Dir: reflect.SelectDefault}, }) var closed bool diff --git a/vendor/github.com/onsi/gomega/matchers/be_sent_matcher.go b/vendor/github.com/onsi/gomega/matchers/be_sent_matcher.go index d7c32233e..302dd1a0a 100644 --- a/vendor/github.com/onsi/gomega/matchers/be_sent_matcher.go +++ b/vendor/github.com/onsi/gomega/matchers/be_sent_matcher.go @@ -42,8 +42,8 @@ func (matcher *BeSentMatcher) Match(actual interface{}) (success bool, err error }() winnerIndex, _, _ := reflect.Select([]reflect.SelectCase{ - reflect.SelectCase{Dir: reflect.SelectSend, Chan: channelValue, Send: argValue}, - reflect.SelectCase{Dir: reflect.SelectDefault}, + {Dir: reflect.SelectSend, Chan: channelValue, Send: argValue}, + {Dir: reflect.SelectDefault}, }) var didSend bool diff --git a/vendor/github.com/onsi/gomega/matchers/match_yaml_matcher.go b/vendor/github.com/onsi/gomega/matchers/match_yaml_matcher.go index 070d60260..0c83c2b63 100644 --- a/vendor/github.com/onsi/gomega/matchers/match_yaml_matcher.go +++ b/vendor/github.com/onsi/gomega/matchers/match_yaml_matcher.go @@ -53,11 +53,11 @@ func normalise(input string) string { var val interface{} err := yaml.Unmarshal([]byte(input), &val) if err != nil { - panic(err) // guarded by Match + panic(err) // unreachable since Match already calls Unmarshal } output, err := yaml.Marshal(val) if err != nil { - panic(err) // guarded by Unmarshal + panic(err) // untested section, unreachable since we Unmarshal above } return strings.TrimSpace(string(output)) } diff --git a/vendor/github.com/onsi/gomega/matchers/receive_matcher.go b/vendor/github.com/onsi/gomega/matchers/receive_matcher.go index fc179916d..2018a6128 100644 --- a/vendor/github.com/onsi/gomega/matchers/receive_matcher.go +++ b/vendor/github.com/onsi/gomega/matchers/receive_matcher.go @@ -39,8 +39,8 @@ func (matcher *ReceiveMatcher) Match(actual interface{}) (success bool, err erro } winnerIndex, value, open := reflect.Select([]reflect.SelectCase{ - reflect.SelectCase{Dir: reflect.SelectRecv, Chan: channelValue}, - reflect.SelectCase{Dir: reflect.SelectDefault}, + {Dir: reflect.SelectRecv, Chan: channelValue}, + {Dir: reflect.SelectDefault}, }) var closed bool diff --git a/vendor/github.com/onsi/gomega/matchers/support/goraph/bipartitegraph/bipartitegraph.go b/vendor/github.com/onsi/gomega/matchers/support/goraph/bipartitegraph/bipartitegraph.go index 81b377111..8aaf8759d 100644 --- a/vendor/github.com/onsi/gomega/matchers/support/goraph/bipartitegraph/bipartitegraph.go +++ b/vendor/github.com/onsi/gomega/matchers/support/goraph/bipartitegraph/bipartitegraph.go @@ -14,12 +14,12 @@ type BipartiteGraph struct { func NewBipartiteGraph(leftValues, rightValues []interface{}, neighbours func(interface{}, interface{}) (bool, error)) (*BipartiteGraph, error) { left := NodeOrderedSet{} - for i, _ := range leftValues { + for i := range leftValues { left = append(left, Node{Id: i}) } right := NodeOrderedSet{} - for j, _ := range rightValues { + for j := range rightValues { right = append(right, Node{Id: j + len(left)}) } diff --git a/vendor/github.com/opencontainers/go-digest/.mailmap b/vendor/github.com/opencontainers/go-digest/.mailmap deleted file mode 100644 index ba611cb21..000000000 --- a/vendor/github.com/opencontainers/go-digest/.mailmap +++ /dev/null @@ -1 +0,0 @@ -Stephen J Day diff --git a/vendor/github.com/opencontainers/go-digest/.pullapprove.yml b/vendor/github.com/opencontainers/go-digest/.pullapprove.yml deleted file mode 100644 index 45fa4b9ec..000000000 --- a/vendor/github.com/opencontainers/go-digest/.pullapprove.yml +++ /dev/null @@ -1,12 +0,0 @@ -approve_by_comment: true -approve_regex: '^(Approved|lgtm|LGTM|:shipit:|:star:|:\+1:|:ship:)' -reject_regex: ^Rejected -reset_on_push: true -author_approval: ignored -signed_off_by: - required: true -reviewers: - teams: - - go-digest-maintainers - name: default - required: 2 diff --git a/vendor/github.com/opencontainers/go-digest/.travis.yml b/vendor/github.com/opencontainers/go-digest/.travis.yml deleted file mode 100644 index 7ea4ed1d2..000000000 --- a/vendor/github.com/opencontainers/go-digest/.travis.yml +++ /dev/null @@ -1,4 +0,0 @@ -language: go -go: - - 1.7 - - master diff --git a/vendor/github.com/opencontainers/go-digest/CONTRIBUTING.md b/vendor/github.com/opencontainers/go-digest/CONTRIBUTING.md deleted file mode 100644 index e4d962ac1..000000000 --- a/vendor/github.com/opencontainers/go-digest/CONTRIBUTING.md +++ /dev/null @@ -1,72 +0,0 @@ -# Contributing to Docker open source projects - -Want to hack on this project? Awesome! Here are instructions to get you started. - -This project is a part of the [Docker](https://www.docker.com) project, and follows -the same rules and principles. If you're already familiar with the way -Docker does things, you'll feel right at home. - -Otherwise, go read Docker's -[contributions guidelines](https://github.com/docker/docker/blob/master/CONTRIBUTING.md), -[issue triaging](https://github.com/docker/docker/blob/master/project/ISSUE-TRIAGE.md), -[review process](https://github.com/docker/docker/blob/master/project/REVIEWING.md) and -[branches and tags](https://github.com/docker/docker/blob/master/project/BRANCHES-AND-TAGS.md). - -For an in-depth description of our contribution process, visit the -contributors guide: [Understand how to contribute](https://docs.docker.com/opensource/workflow/make-a-contribution/) - -### Sign your work - -The sign-off is a simple line at the end of the explanation for the patch. Your -signature certifies that you wrote the patch or otherwise have the right to pass -it on as an open-source patch. The rules are pretty simple: if you can certify -the below (from [developercertificate.org](http://developercertificate.org/)): - -``` -Developer Certificate of Origin -Version 1.1 - -Copyright (C) 2004, 2006 The Linux Foundation and its contributors. -1 Letterman Drive -Suite D4700 -San Francisco, CA, 94129 - -Everyone is permitted to copy and distribute verbatim copies of this -license document, but changing it is not allowed. - - -Developer's Certificate of Origin 1.1 - -By making a contribution to this project, I certify that: - -(a) The contribution was created in whole or in part by me and I - have the right to submit it under the open source license - indicated in the file; or - -(b) The contribution is based upon previous work that, to the best - of my knowledge, is covered under an appropriate open source - license and I have the right under that license to submit that - work with modifications, whether created in whole or in part - by me, under the same open source license (unless I am - permitted to submit under a different license), as indicated - in the file; or - -(c) The contribution was provided directly to me by some other - person who certified (a), (b) or (c) and I have not modified - it. - -(d) I understand and agree that this project and the contribution - are public and that a record of the contribution (including all - personal information I submit with it, including my sign-off) is - maintained indefinitely and may be redistributed consistent with - this project or the open source license(s) involved. -``` - -Then you just add a line to every git commit message: - - Signed-off-by: Joe Smith - -Use your real name (sorry, no pseudonyms or anonymous contributions.) - -If you set your `user.name` and `user.email` git configs, you can sign your -commit automatically with `git commit -s`. diff --git a/vendor/github.com/opencontainers/go-digest/LICENSE.docs b/vendor/github.com/opencontainers/go-digest/LICENSE.docs deleted file mode 100644 index e26cd4fc8..000000000 --- a/vendor/github.com/opencontainers/go-digest/LICENSE.docs +++ /dev/null @@ -1,425 +0,0 @@ -Attribution-ShareAlike 4.0 International - -======================================================================= - -Creative Commons Corporation ("Creative Commons") is not a law firm and -does not provide legal services or legal advice. Distribution of -Creative Commons public licenses does not create a lawyer-client or -other relationship. Creative Commons makes its licenses and related -information available on an "as-is" basis. Creative Commons gives no -warranties regarding its licenses, any material licensed under their -terms and conditions, or any related information. Creative Commons -disclaims all liability for damages resulting from their use to the -fullest extent possible. - -Using Creative Commons Public Licenses - -Creative Commons public licenses provide a standard set of terms and -conditions that creators and other rights holders may use to share -original works of authorship and other material subject to copyright -and certain other rights specified in the public license below. The -following considerations are for informational purposes only, are not -exhaustive, and do not form part of our licenses. - - Considerations for licensors: Our public licenses are - intended for use by those authorized to give the public - permission to use material in ways otherwise restricted by - copyright and certain other rights. Our licenses are - irrevocable. Licensors should read and understand the terms - and conditions of the license they choose before applying it. - Licensors should also secure all rights necessary before - applying our licenses so that the public can reuse the - material as expected. Licensors should clearly mark any - material not subject to the license. This includes other CC- - licensed material, or material used under an exception or - limitation to copyright. More considerations for licensors: - wiki.creativecommons.org/Considerations_for_licensors - - Considerations for the public: By using one of our public - licenses, a licensor grants the public permission to use the - licensed material under specified terms and conditions. If - the licensor's permission is not necessary for any reason--for - example, because of any applicable exception or limitation to - copyright--then that use is not regulated by the license. Our - licenses grant only permissions under copyright and certain - other rights that a licensor has authority to grant. Use of - the licensed material may still be restricted for other - reasons, including because others have copyright or other - rights in the material. A licensor may make special requests, - such as asking that all changes be marked or described. - Although not required by our licenses, you are encouraged to - respect those requests where reasonable. More_considerations - for the public: - wiki.creativecommons.org/Considerations_for_licensees - -======================================================================= - -Creative Commons Attribution-ShareAlike 4.0 International Public -License - -By exercising the Licensed Rights (defined below), You accept and agree -to be bound by the terms and conditions of this Creative Commons -Attribution-ShareAlike 4.0 International Public License ("Public -License"). To the extent this Public License may be interpreted as a -contract, You are granted the Licensed Rights in consideration of Your -acceptance of these terms and conditions, and the Licensor grants You -such rights in consideration of benefits the Licensor receives from -making the Licensed Material available under these terms and -conditions. - - -Section 1 -- Definitions. - - a. Adapted Material means material subject to Copyright and Similar - Rights that is derived from or based upon the Licensed Material - and in which the Licensed Material is translated, altered, - arranged, transformed, or otherwise modified in a manner requiring - permission under the Copyright and Similar Rights held by the - Licensor. For purposes of this Public License, where the Licensed - Material is a musical work, performance, or sound recording, - Adapted Material is always produced where the Licensed Material is - synched in timed relation with a moving image. - - b. Adapter's License means the license You apply to Your Copyright - and Similar Rights in Your contributions to Adapted Material in - accordance with the terms and conditions of this Public License. - - c. BY-SA Compatible License means a license listed at - creativecommons.org/compatiblelicenses, approved by Creative - Commons as essentially the equivalent of this Public License. - - d. Copyright and Similar Rights means copyright and/or similar rights - closely related to copyright including, without limitation, - performance, broadcast, sound recording, and Sui Generis Database - Rights, without regard to how the rights are labeled or - categorized. For purposes of this Public License, the rights - specified in Section 2(b)(1)-(2) are not Copyright and Similar - Rights. - - e. Effective Technological Measures means those measures that, in the - absence of proper authority, may not be circumvented under laws - fulfilling obligations under Article 11 of the WIPO Copyright - Treaty adopted on December 20, 1996, and/or similar international - agreements. - - f. Exceptions and Limitations means fair use, fair dealing, and/or - any other exception or limitation to Copyright and Similar Rights - that applies to Your use of the Licensed Material. - - g. License Elements means the license attributes listed in the name - of a Creative Commons Public License. The License Elements of this - Public License are Attribution and ShareAlike. - - h. Licensed Material means the artistic or literary work, database, - or other material to which the Licensor applied this Public - License. - - i. Licensed Rights means the rights granted to You subject to the - terms and conditions of this Public License, which are limited to - all Copyright and Similar Rights that apply to Your use of the - Licensed Material and that the Licensor has authority to license. - - j. Licensor means the individual(s) or entity(ies) granting rights - under this Public License. - - k. Share means to provide material to the public by any means or - process that requires permission under the Licensed Rights, such - as reproduction, public display, public performance, distribution, - dissemination, communication, or importation, and to make material - available to the public including in ways that members of the - public may access the material from a place and at a time - individually chosen by them. - - l. Sui Generis Database Rights means rights other than copyright - resulting from Directive 96/9/EC of the European Parliament and of - the Council of 11 March 1996 on the legal protection of databases, - as amended and/or succeeded, as well as other essentially - equivalent rights anywhere in the world. - - m. You means the individual or entity exercising the Licensed Rights - under this Public License. Your has a corresponding meaning. - - -Section 2 -- Scope. - - a. License grant. - - 1. Subject to the terms and conditions of this Public License, - the Licensor hereby grants You a worldwide, royalty-free, - non-sublicensable, non-exclusive, irrevocable license to - exercise the Licensed Rights in the Licensed Material to: - - a. reproduce and Share the Licensed Material, in whole or - in part; and - - b. produce, reproduce, and Share Adapted Material. - - 2. Exceptions and Limitations. For the avoidance of doubt, where - Exceptions and Limitations apply to Your use, this Public - License does not apply, and You do not need to comply with - its terms and conditions. - - 3. Term. The term of this Public License is specified in Section - 6(a). - - 4. Media and formats; technical modifications allowed. The - Licensor authorizes You to exercise the Licensed Rights in - all media and formats whether now known or hereafter created, - and to make technical modifications necessary to do so. The - Licensor waives and/or agrees not to assert any right or - authority to forbid You from making technical modifications - necessary to exercise the Licensed Rights, including - technical modifications necessary to circumvent Effective - Technological Measures. For purposes of this Public License, - simply making modifications authorized by this Section 2(a) - (4) never produces Adapted Material. - - 5. Downstream recipients. - - a. Offer from the Licensor -- Licensed Material. Every - recipient of the Licensed Material automatically - receives an offer from the Licensor to exercise the - Licensed Rights under the terms and conditions of this - Public License. - - b. Additional offer from the Licensor -- Adapted Material. - Every recipient of Adapted Material from You - automatically receives an offer from the Licensor to - exercise the Licensed Rights in the Adapted Material - under the conditions of the Adapter's License You apply. - - c. No downstream restrictions. You may not offer or impose - any additional or different terms or conditions on, or - apply any Effective Technological Measures to, the - Licensed Material if doing so restricts exercise of the - Licensed Rights by any recipient of the Licensed - Material. - - 6. No endorsement. Nothing in this Public License constitutes or - may be construed as permission to assert or imply that You - are, or that Your use of the Licensed Material is, connected - with, or sponsored, endorsed, or granted official status by, - the Licensor or others designated to receive attribution as - provided in Section 3(a)(1)(A)(i). - - b. Other rights. - - 1. Moral rights, such as the right of integrity, are not - licensed under this Public License, nor are publicity, - privacy, and/or other similar personality rights; however, to - the extent possible, the Licensor waives and/or agrees not to - assert any such rights held by the Licensor to the limited - extent necessary to allow You to exercise the Licensed - Rights, but not otherwise. - - 2. Patent and trademark rights are not licensed under this - Public License. - - 3. To the extent possible, the Licensor waives any right to - collect royalties from You for the exercise of the Licensed - Rights, whether directly or through a collecting society - under any voluntary or waivable statutory or compulsory - licensing scheme. In all other cases the Licensor expressly - reserves any right to collect such royalties. - - -Section 3 -- License Conditions. - -Your exercise of the Licensed Rights is expressly made subject to the -following conditions. - - a. Attribution. - - 1. If You Share the Licensed Material (including in modified - form), You must: - - a. retain the following if it is supplied by the Licensor - with the Licensed Material: - - i. identification of the creator(s) of the Licensed - Material and any others designated to receive - attribution, in any reasonable manner requested by - the Licensor (including by pseudonym if - designated); - - ii. a copyright notice; - - iii. a notice that refers to this Public License; - - iv. a notice that refers to the disclaimer of - warranties; - - v. a URI or hyperlink to the Licensed Material to the - extent reasonably practicable; - - b. indicate if You modified the Licensed Material and - retain an indication of any previous modifications; and - - c. indicate the Licensed Material is licensed under this - Public License, and include the text of, or the URI or - hyperlink to, this Public License. - - 2. You may satisfy the conditions in Section 3(a)(1) in any - reasonable manner based on the medium, means, and context in - which You Share the Licensed Material. For example, it may be - reasonable to satisfy the conditions by providing a URI or - hyperlink to a resource that includes the required - information. - - 3. If requested by the Licensor, You must remove any of the - information required by Section 3(a)(1)(A) to the extent - reasonably practicable. - - b. ShareAlike. - - In addition to the conditions in Section 3(a), if You Share - Adapted Material You produce, the following conditions also apply. - - 1. The Adapter's License You apply must be a Creative Commons - license with the same License Elements, this version or - later, or a BY-SA Compatible License. - - 2. You must include the text of, or the URI or hyperlink to, the - Adapter's License You apply. You may satisfy this condition - in any reasonable manner based on the medium, means, and - context in which You Share Adapted Material. - - 3. You may not offer or impose any additional or different terms - or conditions on, or apply any Effective Technological - Measures to, Adapted Material that restrict exercise of the - rights granted under the Adapter's License You apply. - - -Section 4 -- Sui Generis Database Rights. - -Where the Licensed Rights include Sui Generis Database Rights that -apply to Your use of the Licensed Material: - - a. for the avoidance of doubt, Section 2(a)(1) grants You the right - to extract, reuse, reproduce, and Share all or a substantial - portion of the contents of the database; - - b. if You include all or a substantial portion of the database - contents in a database in which You have Sui Generis Database - Rights, then the database in which You have Sui Generis Database - Rights (but not its individual contents) is Adapted Material, - - including for purposes of Section 3(b); and - c. You must comply with the conditions in Section 3(a) if You Share - all or a substantial portion of the contents of the database. - -For the avoidance of doubt, this Section 4 supplements and does not -replace Your obligations under this Public License where the Licensed -Rights include other Copyright and Similar Rights. - - -Section 5 -- Disclaimer of Warranties and Limitation of Liability. - - a. UNLESS OTHERWISE SEPARATELY UNDERTAKEN BY THE LICENSOR, TO THE - EXTENT POSSIBLE, THE LICENSOR OFFERS THE LICENSED MATERIAL AS-IS - AND AS-AVAILABLE, AND MAKES NO REPRESENTATIONS OR WARRANTIES OF - ANY KIND CONCERNING THE LICENSED MATERIAL, WHETHER EXPRESS, - IMPLIED, STATUTORY, OR OTHER. THIS INCLUDES, WITHOUT LIMITATION, - WARRANTIES OF TITLE, MERCHANTABILITY, FITNESS FOR A PARTICULAR - PURPOSE, NON-INFRINGEMENT, ABSENCE OF LATENT OR OTHER DEFECTS, - ACCURACY, OR THE PRESENCE OR ABSENCE OF ERRORS, WHETHER OR NOT - KNOWN OR DISCOVERABLE. WHERE DISCLAIMERS OF WARRANTIES ARE NOT - ALLOWED IN FULL OR IN PART, THIS DISCLAIMER MAY NOT APPLY TO YOU. - - b. TO THE EXTENT POSSIBLE, IN NO EVENT WILL THE LICENSOR BE LIABLE - TO YOU ON ANY LEGAL THEORY (INCLUDING, WITHOUT LIMITATION, - NEGLIGENCE) OR OTHERWISE FOR ANY DIRECT, SPECIAL, INDIRECT, - INCIDENTAL, CONSEQUENTIAL, PUNITIVE, EXEMPLARY, OR OTHER LOSSES, - COSTS, EXPENSES, OR DAMAGES ARISING OUT OF THIS PUBLIC LICENSE OR - USE OF THE LICENSED MATERIAL, EVEN IF THE LICENSOR HAS BEEN - ADVISED OF THE POSSIBILITY OF SUCH LOSSES, COSTS, EXPENSES, OR - DAMAGES. WHERE A LIMITATION OF LIABILITY IS NOT ALLOWED IN FULL OR - IN PART, THIS LIMITATION MAY NOT APPLY TO YOU. - - c. The disclaimer of warranties and limitation of liability provided - above shall be interpreted in a manner that, to the extent - possible, most closely approximates an absolute disclaimer and - waiver of all liability. - - -Section 6 -- Term and Termination. - - a. This Public License applies for the term of the Copyright and - Similar Rights licensed here. However, if You fail to comply with - this Public License, then Your rights under this Public License - terminate automatically. - - b. Where Your right to use the Licensed Material has terminated under - Section 6(a), it reinstates: - - 1. automatically as of the date the violation is cured, provided - it is cured within 30 days of Your discovery of the - violation; or - - 2. upon express reinstatement by the Licensor. - - For the avoidance of doubt, this Section 6(b) does not affect any - right the Licensor may have to seek remedies for Your violations - of this Public License. - - c. For the avoidance of doubt, the Licensor may also offer the - Licensed Material under separate terms or conditions or stop - distributing the Licensed Material at any time; however, doing so - will not terminate this Public License. - - d. Sections 1, 5, 6, 7, and 8 survive termination of this Public - License. - - -Section 7 -- Other Terms and Conditions. - - a. The Licensor shall not be bound by any additional or different - terms or conditions communicated by You unless expressly agreed. - - b. Any arrangements, understandings, or agreements regarding the - Licensed Material not stated herein are separate from and - independent of the terms and conditions of this Public License. - - -Section 8 -- Interpretation. - - a. For the avoidance of doubt, this Public License does not, and - shall not be interpreted to, reduce, limit, restrict, or impose - conditions on any use of the Licensed Material that could lawfully - be made without permission under this Public License. - - b. To the extent possible, if any provision of this Public License is - deemed unenforceable, it shall be automatically reformed to the - minimum extent necessary to make it enforceable. If the provision - cannot be reformed, it shall be severed from this Public License - without affecting the enforceability of the remaining terms and - conditions. - - c. No term or condition of this Public License will be waived and no - failure to comply consented to unless expressly agreed to by the - Licensor. - - d. Nothing in this Public License constitutes or may be interpreted - as a limitation upon, or waiver of, any privileges and immunities - that apply to the Licensor or You, including from the legal - processes of any jurisdiction or authority. - - -======================================================================= - -Creative Commons is not a party to its public licenses. -Notwithstanding, Creative Commons may elect to apply one of its public -licenses to material it publishes and in those instances will be -considered the "Licensor." Except for the limited purpose of indicating -that material is shared under a Creative Commons public license or as -otherwise permitted by the Creative Commons policies published at -creativecommons.org/policies, Creative Commons does not authorize the -use of the trademark "Creative Commons" or any other trademark or logo -of Creative Commons without its prior written consent including, -without limitation, in connection with any unauthorized modifications -to any of its public licenses or any other arrangements, -understandings, or agreements concerning use of licensed material. For -the avoidance of doubt, this paragraph does not form part of the public -licenses. - -Creative Commons may be contacted at creativecommons.org. diff --git a/vendor/github.com/opencontainers/go-digest/MAINTAINERS b/vendor/github.com/opencontainers/go-digest/MAINTAINERS deleted file mode 100644 index 42a29795d..000000000 --- a/vendor/github.com/opencontainers/go-digest/MAINTAINERS +++ /dev/null @@ -1,9 +0,0 @@ -Aaron Lehmann (@aaronlehmann) -Brandon Philips (@philips) -Brendan Burns (@brendandburns) -Derek McGowan (@dmcgowan) -Jason Bouzane (@jbouzane) -John Starks (@jstarks) -Jonathan Boulle (@jonboulle) -Stephen Day (@stevvooe) -Vincent Batts (@vbatts) diff --git a/vendor/github.com/opencontainers/go-digest/README.md b/vendor/github.com/opencontainers/go-digest/README.md deleted file mode 100644 index 0f5a04092..000000000 --- a/vendor/github.com/opencontainers/go-digest/README.md +++ /dev/null @@ -1,104 +0,0 @@ -# go-digest - -[![GoDoc](https://godoc.org/github.com/opencontainers/go-digest?status.svg)](https://godoc.org/github.com/opencontainers/go-digest) [![Go Report Card](https://goreportcard.com/badge/github.com/opencontainers/go-digest)](https://goreportcard.com/report/github.com/opencontainers/go-digest) [![Build Status](https://travis-ci.org/opencontainers/go-digest.svg?branch=master)](https://travis-ci.org/opencontainers/go-digest) - -Common digest package used across the container ecosystem. - -Please see the [godoc](https://godoc.org/github.com/opencontainers/go-digest) for more information. - -# What is a digest? - -A digest is just a hash. - -The most common use case for a digest is to create a content -identifier for use in [Content Addressable Storage](https://en.wikipedia.org/wiki/Content-addressable_storage) -systems: - -```go -id := digest.FromBytes([]byte("my content")) -``` - -In the example above, the id can be used to uniquely identify -the byte slice "my content". This allows two disparate applications -to agree on a verifiable identifier without having to trust one -another. - -An identifying digest can be verified, as follows: - -```go -if id != digest.FromBytes([]byte("my content")) { - return errors.New("the content has changed!") -} -``` - -A `Verifier` type can be used to handle cases where an `io.Reader` -makes more sense: - -```go -rd := getContent() -verifier := id.Verifier() -io.Copy(verifier, rd) - -if !verifier.Verified() { - return errors.New("the content has changed!") -} -``` - -Using [Merkle DAGs](https://en.wikipedia.org/wiki/Merkle_tree), this -can power a rich, safe, content distribution system. - -# Usage - -While the [godoc](https://godoc.org/github.com/opencontainers/go-digest) is -considered the best resource, a few important items need to be called -out when using this package. - -1. Make sure to import the hash implementations into your application - or the package will panic. You should have something like the - following in the main (or other entrypoint) of your application: - - ```go - import ( - _ "crypto/sha256" - _ "crypto/sha512" - ) - ``` - This may seem inconvenient but it allows you replace the hash - implementations with others, such as https://github.com/stevvooe/resumable. - -2. Even though `digest.Digest` may be assemable as a string, _always_ - verify your input with `digest.Parse` or use `Digest.Validate` - when accepting untrusted input. While there are measures to - avoid common problems, this will ensure you have valid digests - in the rest of your application. - -# Stability - -The Go API, at this stage, is considered stable, unless otherwise noted. - -As always, before using a package export, read the [godoc](https://godoc.org/github.com/opencontainers/go-digest). - -# Contributing - -This package is considered fairly complete. It has been in production -in thousands (millions?) of deployments and is fairly battle-hardened. -New additions will be met with skepticism. If you think there is a -missing feature, please file a bug clearly describing the problem and -the alternatives you tried before submitting a PR. - -# Reporting security issues - -Please DO NOT file a public issue, instead send your report privately to -security@opencontainers.org. - -The maintainers take security seriously. If you discover a security issue, -please bring it to their attention right away! - -If you are reporting a security issue, do not create an issue or file a pull -request on GitHub. Instead, disclose the issue responsibly by sending an email -to security@opencontainers.org (which is inhabited only by the maintainers of -the various OCI projects). - -# Copyright and license - -Copyright © 2016 Docker, Inc. All rights reserved, except as follows. Code is released under the [Apache 2.0 license](LICENSE.code). This `README.md` file and the [`CONTRIBUTING.md`](CONTRIBUTING.md) file are licensed under the Creative Commons Attribution 4.0 International License under the terms and conditions set forth in the file [`LICENSE.docs`](LICENSE.docs). You may obtain a duplicate copy of the same license, titled CC BY-SA 4.0, at http://creativecommons.org/licenses/by-sa/4.0/. diff --git a/vendor/github.com/opencontainers/go-digest/algorithm.go b/vendor/github.com/opencontainers/go-digest/algorithm.go deleted file mode 100644 index 8813bd26f..000000000 --- a/vendor/github.com/opencontainers/go-digest/algorithm.go +++ /dev/null @@ -1,192 +0,0 @@ -// Copyright 2017 Docker, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// https://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package digest - -import ( - "crypto" - "fmt" - "hash" - "io" - "regexp" -) - -// Algorithm identifies and implementation of a digester by an identifier. -// Note the that this defines both the hash algorithm used and the string -// encoding. -type Algorithm string - -// supported digest types -const ( - SHA256 Algorithm = "sha256" // sha256 with hex encoding (lower case only) - SHA384 Algorithm = "sha384" // sha384 with hex encoding (lower case only) - SHA512 Algorithm = "sha512" // sha512 with hex encoding (lower case only) - - // Canonical is the primary digest algorithm used with the distribution - // project. Other digests may be used but this one is the primary storage - // digest. - Canonical = SHA256 -) - -var ( - // TODO(stevvooe): Follow the pattern of the standard crypto package for - // registration of digests. Effectively, we are a registerable set and - // common symbol access. - - // algorithms maps values to hash.Hash implementations. Other algorithms - // may be available but they cannot be calculated by the digest package. - algorithms = map[Algorithm]crypto.Hash{ - SHA256: crypto.SHA256, - SHA384: crypto.SHA384, - SHA512: crypto.SHA512, - } - - // anchoredEncodedRegexps contains anchored regular expressions for hex-encoded digests. - // Note that /A-F/ disallowed. - anchoredEncodedRegexps = map[Algorithm]*regexp.Regexp{ - SHA256: regexp.MustCompile(`^[a-f0-9]{64}$`), - SHA384: regexp.MustCompile(`^[a-f0-9]{96}$`), - SHA512: regexp.MustCompile(`^[a-f0-9]{128}$`), - } -) - -// Available returns true if the digest type is available for use. If this -// returns false, Digester and Hash will return nil. -func (a Algorithm) Available() bool { - h, ok := algorithms[a] - if !ok { - return false - } - - // check availability of the hash, as well - return h.Available() -} - -func (a Algorithm) String() string { - return string(a) -} - -// Size returns number of bytes returned by the hash. -func (a Algorithm) Size() int { - h, ok := algorithms[a] - if !ok { - return 0 - } - return h.Size() -} - -// Set implemented to allow use of Algorithm as a command line flag. -func (a *Algorithm) Set(value string) error { - if value == "" { - *a = Canonical - } else { - // just do a type conversion, support is queried with Available. - *a = Algorithm(value) - } - - if !a.Available() { - return ErrDigestUnsupported - } - - return nil -} - -// Digester returns a new digester for the specified algorithm. If the algorithm -// does not have a digester implementation, nil will be returned. This can be -// checked by calling Available before calling Digester. -func (a Algorithm) Digester() Digester { - return &digester{ - alg: a, - hash: a.Hash(), - } -} - -// Hash returns a new hash as used by the algorithm. If not available, the -// method will panic. Check Algorithm.Available() before calling. -func (a Algorithm) Hash() hash.Hash { - if !a.Available() { - // Empty algorithm string is invalid - if a == "" { - panic(fmt.Sprintf("empty digest algorithm, validate before calling Algorithm.Hash()")) - } - - // NOTE(stevvooe): A missing hash is usually a programming error that - // must be resolved at compile time. We don't import in the digest - // package to allow users to choose their hash implementation (such as - // when using stevvooe/resumable or a hardware accelerated package). - // - // Applications that may want to resolve the hash at runtime should - // call Algorithm.Available before call Algorithm.Hash(). - panic(fmt.Sprintf("%v not available (make sure it is imported)", a)) - } - - return algorithms[a].New() -} - -// Encode encodes the raw bytes of a digest, typically from a hash.Hash, into -// the encoded portion of the digest. -func (a Algorithm) Encode(d []byte) string { - // TODO(stevvooe): Currently, all algorithms use a hex encoding. When we - // add support for back registration, we can modify this accordingly. - return fmt.Sprintf("%x", d) -} - -// FromReader returns the digest of the reader using the algorithm. -func (a Algorithm) FromReader(rd io.Reader) (Digest, error) { - digester := a.Digester() - - if _, err := io.Copy(digester.Hash(), rd); err != nil { - return "", err - } - - return digester.Digest(), nil -} - -// FromBytes digests the input and returns a Digest. -func (a Algorithm) FromBytes(p []byte) Digest { - digester := a.Digester() - - if _, err := digester.Hash().Write(p); err != nil { - // Writes to a Hash should never fail. None of the existing - // hash implementations in the stdlib or hashes vendored - // here can return errors from Write. Having a panic in this - // condition instead of having FromBytes return an error value - // avoids unnecessary error handling paths in all callers. - panic("write to hash function returned error: " + err.Error()) - } - - return digester.Digest() -} - -// FromString digests the string input and returns a Digest. -func (a Algorithm) FromString(s string) Digest { - return a.FromBytes([]byte(s)) -} - -// Validate validates the encoded portion string -func (a Algorithm) Validate(encoded string) error { - r, ok := anchoredEncodedRegexps[a] - if !ok { - return ErrDigestUnsupported - } - // Digests much always be hex-encoded, ensuring that their hex portion will - // always be size*2 - if a.Size()*2 != len(encoded) { - return ErrDigestInvalidLength - } - if r.MatchString(encoded) { - return nil - } - return ErrDigestInvalidFormat -} diff --git a/vendor/github.com/opencontainers/go-digest/digest.go b/vendor/github.com/opencontainers/go-digest/digest.go deleted file mode 100644 index ad398cba2..000000000 --- a/vendor/github.com/opencontainers/go-digest/digest.go +++ /dev/null @@ -1,156 +0,0 @@ -// Copyright 2017 Docker, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// https://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package digest - -import ( - "fmt" - "hash" - "io" - "regexp" - "strings" -) - -// Digest allows simple protection of hex formatted digest strings, prefixed -// by their algorithm. Strings of type Digest have some guarantee of being in -// the correct format and it provides quick access to the components of a -// digest string. -// -// The following is an example of the contents of Digest types: -// -// sha256:7173b809ca12ec5dee4506cd86be934c4596dd234ee82c0662eac04a8c2c71dc -// -// This allows to abstract the digest behind this type and work only in those -// terms. -type Digest string - -// NewDigest returns a Digest from alg and a hash.Hash object. -func NewDigest(alg Algorithm, h hash.Hash) Digest { - return NewDigestFromBytes(alg, h.Sum(nil)) -} - -// NewDigestFromBytes returns a new digest from the byte contents of p. -// Typically, this can come from hash.Hash.Sum(...) or xxx.SumXXX(...) -// functions. This is also useful for rebuilding digests from binary -// serializations. -func NewDigestFromBytes(alg Algorithm, p []byte) Digest { - return NewDigestFromEncoded(alg, alg.Encode(p)) -} - -// NewDigestFromHex is deprecated. Please use NewDigestFromEncoded. -func NewDigestFromHex(alg, hex string) Digest { - return NewDigestFromEncoded(Algorithm(alg), hex) -} - -// NewDigestFromEncoded returns a Digest from alg and the encoded digest. -func NewDigestFromEncoded(alg Algorithm, encoded string) Digest { - return Digest(fmt.Sprintf("%s:%s", alg, encoded)) -} - -// DigestRegexp matches valid digest types. -var DigestRegexp = regexp.MustCompile(`[a-z0-9]+(?:[.+_-][a-z0-9]+)*:[a-zA-Z0-9=_-]+`) - -// DigestRegexpAnchored matches valid digest types, anchored to the start and end of the match. -var DigestRegexpAnchored = regexp.MustCompile(`^` + DigestRegexp.String() + `$`) - -var ( - // ErrDigestInvalidFormat returned when digest format invalid. - ErrDigestInvalidFormat = fmt.Errorf("invalid checksum digest format") - - // ErrDigestInvalidLength returned when digest has invalid length. - ErrDigestInvalidLength = fmt.Errorf("invalid checksum digest length") - - // ErrDigestUnsupported returned when the digest algorithm is unsupported. - ErrDigestUnsupported = fmt.Errorf("unsupported digest algorithm") -) - -// Parse parses s and returns the validated digest object. An error will -// be returned if the format is invalid. -func Parse(s string) (Digest, error) { - d := Digest(s) - return d, d.Validate() -} - -// FromReader consumes the content of rd until io.EOF, returning canonical digest. -func FromReader(rd io.Reader) (Digest, error) { - return Canonical.FromReader(rd) -} - -// FromBytes digests the input and returns a Digest. -func FromBytes(p []byte) Digest { - return Canonical.FromBytes(p) -} - -// FromString digests the input and returns a Digest. -func FromString(s string) Digest { - return Canonical.FromString(s) -} - -// Validate checks that the contents of d is a valid digest, returning an -// error if not. -func (d Digest) Validate() error { - s := string(d) - i := strings.Index(s, ":") - if i <= 0 || i+1 == len(s) { - return ErrDigestInvalidFormat - } - algorithm, encoded := Algorithm(s[:i]), s[i+1:] - if !algorithm.Available() { - if !DigestRegexpAnchored.MatchString(s) { - return ErrDigestInvalidFormat - } - return ErrDigestUnsupported - } - return algorithm.Validate(encoded) -} - -// Algorithm returns the algorithm portion of the digest. This will panic if -// the underlying digest is not in a valid format. -func (d Digest) Algorithm() Algorithm { - return Algorithm(d[:d.sepIndex()]) -} - -// Verifier returns a writer object that can be used to verify a stream of -// content against the digest. If the digest is invalid, the method will panic. -func (d Digest) Verifier() Verifier { - return hashVerifier{ - hash: d.Algorithm().Hash(), - digest: d, - } -} - -// Encoded returns the encoded portion of the digest. This will panic if the -// underlying digest is not in a valid format. -func (d Digest) Encoded() string { - return string(d[d.sepIndex()+1:]) -} - -// Hex is deprecated. Please use Digest.Encoded. -func (d Digest) Hex() string { - return d.Encoded() -} - -func (d Digest) String() string { - return string(d) -} - -func (d Digest) sepIndex() int { - i := strings.Index(string(d), ":") - - if i < 0 { - panic(fmt.Sprintf("no ':' separator in digest %q", d)) - } - - return i -} diff --git a/vendor/github.com/opencontainers/go-digest/digester.go b/vendor/github.com/opencontainers/go-digest/digester.go deleted file mode 100644 index 36fa2728e..000000000 --- a/vendor/github.com/opencontainers/go-digest/digester.go +++ /dev/null @@ -1,39 +0,0 @@ -// Copyright 2017 Docker, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// https://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package digest - -import "hash" - -// Digester calculates the digest of written data. Writes should go directly -// to the return value of Hash, while calling Digest will return the current -// value of the digest. -type Digester interface { - Hash() hash.Hash // provides direct access to underlying hash instance. - Digest() Digest -} - -// digester provides a simple digester definition that embeds a hasher. -type digester struct { - alg Algorithm - hash hash.Hash -} - -func (d *digester) Hash() hash.Hash { - return d.hash -} - -func (d *digester) Digest() Digest { - return NewDigest(d.alg, d.hash) -} diff --git a/vendor/github.com/opencontainers/go-digest/doc.go b/vendor/github.com/opencontainers/go-digest/doc.go deleted file mode 100644 index 491ea1ef1..000000000 --- a/vendor/github.com/opencontainers/go-digest/doc.go +++ /dev/null @@ -1,56 +0,0 @@ -// Copyright 2017 Docker, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// https://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package digest provides a generalized type to opaquely represent message -// digests and their operations within the registry. The Digest type is -// designed to serve as a flexible identifier in a content-addressable system. -// More importantly, it provides tools and wrappers to work with -// hash.Hash-based digests with little effort. -// -// Basics -// -// The format of a digest is simply a string with two parts, dubbed the -// "algorithm" and the "digest", separated by a colon: -// -// : -// -// An example of a sha256 digest representation follows: -// -// sha256:7173b809ca12ec5dee4506cd86be934c4596dd234ee82c0662eac04a8c2c71dc -// -// In this case, the string "sha256" is the algorithm and the hex bytes are -// the "digest". -// -// Because the Digest type is simply a string, once a valid Digest is -// obtained, comparisons are cheap, quick and simple to express with the -// standard equality operator. -// -// Verification -// -// The main benefit of using the Digest type is simple verification against a -// given digest. The Verifier interface, modeled after the stdlib hash.Hash -// interface, provides a common write sink for digest verification. After -// writing is complete, calling the Verifier.Verified method will indicate -// whether or not the stream of bytes matches the target digest. -// -// Missing Features -// -// In addition to the above, we intend to add the following features to this -// package: -// -// 1. A Digester type that supports write sink digest calculation. -// -// 2. Suspend and resume of ongoing digest calculations to support efficient digest verification in the registry. -// -package digest diff --git a/vendor/github.com/opencontainers/go-digest/verifiers.go b/vendor/github.com/opencontainers/go-digest/verifiers.go deleted file mode 100644 index 32125e918..000000000 --- a/vendor/github.com/opencontainers/go-digest/verifiers.go +++ /dev/null @@ -1,45 +0,0 @@ -// Copyright 2017 Docker, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// https://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package digest - -import ( - "hash" - "io" -) - -// Verifier presents a general verification interface to be used with message -// digests and other byte stream verifications. Users instantiate a Verifier -// from one of the various methods, write the data under test to it then check -// the result with the Verified method. -type Verifier interface { - io.Writer - - // Verified will return true if the content written to Verifier matches - // the digest. - Verified() bool -} - -type hashVerifier struct { - digest Digest - hash hash.Hash -} - -func (hv hashVerifier) Write(p []byte) (n int, err error) { - return hv.hash.Write(p) -} - -func (hv hashVerifier) Verified() bool { - return hv.digest == NewDigest(hv.digest.Algorithm(), hv.hash) -} diff --git a/vendor/github.com/prometheus/client_golang/CHANGELOG.md b/vendor/github.com/prometheus/client_golang/CHANGELOG.md index 330788a4e..5d93880b3 100644 --- a/vendor/github.com/prometheus/client_golang/CHANGELOG.md +++ b/vendor/github.com/prometheus/client_golang/CHANGELOG.md @@ -1,3 +1,67 @@ +## 0.9.1 / 2018-11-03 +* [FEATURE] Add `WriteToTextfile` function to facilitate the creation of + *.prom files for the textfile collector of the node exporter. #489 +* [ENHANCEMENT] More descriptive error messages for inconsistent label + cardinality. #487 +* [ENHANCEMENT] Exposition: Use a GZIP encoder pool to avoid allocations in + high-frequency scrape scenarios. #366 +* [ENHANCEMENT] Exposition: Streaming serving of metrics data while encoding. + #482 +* [ENHANCEMENT] API client: Add a way to return the body of a 5xx response. + #479 + +## 0.9.0 / 2018-10-15 +* [CHANGE] Go1.6 is no longer supported. +* [CHANGE] More refinements of the `Registry` consistency checks: Duplicated + labels are now detected, but inconsistent label dimensions are now allowed. + Collisions with the “magic” metric and label names in Summaries and + Histograms are detected now. #108 #417 #471 +* [CHANGE] Changed `ProcessCollector` constructor. #219 +* [CHANGE] Changed Go counter `go_memstats_heap_released_bytes_total` to gauge + `go_memstats_heap_released_bytes`. #229 +* [CHANGE] Unexported `LabelPairSorter`. #453 +* [CHANGE] Removed the `Untyped` metric from direct instrumentation. #340 +* [CHANGE] Unexported `MetricVec`. #319 +* [CHANGE] Removed deprecated `Set` method from `Counter` #247 +* [CHANGE] Removed deprecated `RegisterOrGet` and `MustRegisterOrGet`. #247 +* [CHANGE] API client: Introduced versioned packages. +* [FEATURE] A `Registerer` can be wrapped with prefixes and labels. #357 +* [FEATURE] “Describe by collect” helper function. #239 +* [FEATURE] Added package `testutil`. #58 +* [FEATURE] Timestamp can be explicitly set for const metrics. #187 +* [FEATURE] “Unchecked” collectors are possible now without cheating. #47 +* [FEATURE] Pushing to the Pushgateway reworked in package `push` to support + many new features. (The old functions are still usable but deprecated.) #372 + #341 +* [FEATURE] Configurable connection limit for scrapes. #179 +* [FEATURE] New HTTP middlewares to instrument `http.Handler` and + `http.RoundTripper`. The old middlewares and the pre-instrumented `/metrics` + handler are (strongly) deprecated. #316 #57 #101 #224 +* [FEATURE] “Currying” for metric vectors. #320 +* [FEATURE] A `Summary` can be created without quantiles. #118 +* [FEATURE] Added a `Timer` helper type. #231 +* [FEATURE] Added a Graphite bridge. #197 +* [FEATURE] Help strings are now optional. #460 +* [FEATURE] Added `process_virtual_memory_max_bytes` metric. #438 #440 +* [FEATURE] Added `go_gc_cpu_fraction` and `go_threads` metrics. #281 #277 +* [FEATURE] Added `promauto` package with auto-registering metrics. #385 #393 +* [FEATURE] Add `SetToCurrentTime` method to `Gauge`. #259 +* [FEATURE] API client: Add AlertManager, Status, and Target methods. #402 +* [FEATURE] API client: Add admin methods. #398 +* [FEATURE] API client: Support series API. #361 +* [FEATURE] API client: Support querying label values. +* [ENHANCEMENT] Smarter creation of goroutines during scraping. Solves memory + usage spikes in certain situations. #369 +* [ENHANCEMENT] Counters are now faster if dealing with integers only. #367 +* [ENHANCEMENT] Improved label validation. #274 #335 +* [BUGFIX] Creating a const metric with an invalid `Desc` returns an error. #460 +* [BUGFIX] Histogram observations don't race any longer with exposition. #275 +* [BUGFIX] Fixed goroutine leaks. #236 #472 +* [BUGFIX] Fixed an error message for exponential histogram buckets. #467 +* [BUGFIX] Fixed data race writing to the metric map. #401 +* [BUGFIX] API client: Decode JSON on a 4xx respons but do not on 204 + responses. #476 #414 + ## 0.8.0 / 2016-08-17 * [CHANGE] Registry is doing more consistency checks. This might break existing setups that used to export inconsistent metrics. diff --git a/vendor/github.com/prometheus/client_golang/ISSUE_TEMPLATE.md b/vendor/github.com/prometheus/client_golang/ISSUE_TEMPLATE.md deleted file mode 100644 index a456acf5b..000000000 --- a/vendor/github.com/prometheus/client_golang/ISSUE_TEMPLATE.md +++ /dev/null @@ -1,8 +0,0 @@ - diff --git a/vendor/github.com/prometheus/client_golang/Makefile.common b/vendor/github.com/prometheus/client_golang/Makefile.common index c9d832373..741579e60 100644 --- a/vendor/github.com/prometheus/client_golang/Makefile.common +++ b/vendor/github.com/prometheus/client_golang/Makefile.common @@ -16,7 +16,7 @@ # !!! Open PRs only against the prometheus/prometheus/Makefile.common repository! # Example usage : -# Create the main Makefile in the root project directory. +# Create the main Makefile in the root project directory. # include Makefile.common # customTarget: # @echo ">> Running customTarget" @@ -28,18 +28,53 @@ unexport GOBIN GO ?= go GOFMT ?= $(GO)fmt FIRST_GOPATH := $(firstword $(subst :, ,$(shell $(GO) env GOPATH))) +GOOPTS ?= + +GO_VERSION ?= $(shell $(GO) version) +GO_VERSION_NUMBER ?= $(word 3, $(GO_VERSION)) +PRE_GO_111 ?= $(shell echo $(GO_VERSION_NUMBER) | grep -E 'go1\.(10|[0-9])\.') + +unexport GOVENDOR +ifeq (, $(PRE_GO_111)) + ifneq (,$(wildcard go.mod)) + # Enforce Go modules support just in case the directory is inside GOPATH (and for Travis CI). + GO111MODULE := on + + ifneq (,$(wildcard vendor)) + # Always use the local vendor/ directory to satisfy the dependencies. + GOOPTS := $(GOOPTS) -mod=vendor + endif + endif +else + ifneq (,$(wildcard go.mod)) + ifneq (,$(wildcard vendor)) +$(warning This repository requires Go >= 1.11 because of Go modules) +$(warning Some recipes may not work as expected as the current Go runtime is '$(GO_VERSION_NUMBER)') + endif + else + # This repository isn't using Go modules (yet). + GOVENDOR := $(FIRST_GOPATH)/bin/govendor + endif + + unexport GO111MODULE +endif PROMU := $(FIRST_GOPATH)/bin/promu STATICCHECK := $(FIRST_GOPATH)/bin/staticcheck -GOVENDOR := $(FIRST_GOPATH)/bin/govendor pkgs = ./... +GO_VERSION ?= $(shell $(GO) version) +GO_BUILD_PLATFORM ?= $(subst /,-,$(lastword $(GO_VERSION))) + +PROMU_VERSION ?= 0.2.0 +PROMU_URL := https://github.com/prometheus/promu/releases/download/v$(PROMU_VERSION)/promu-$(PROMU_VERSION).$(GO_BUILD_PLATFORM).tar.gz + PREFIX ?= $(shell pwd) BIN_DIR ?= $(shell pwd) DOCKER_IMAGE_TAG ?= $(subst /,-,$(shell git rev-parse --abbrev-ref HEAD)) DOCKER_REPO ?= prom .PHONY: all -all: style staticcheck unused build test +all: precheck style staticcheck unused build test # This rule is used to forward a target like "build" to "common-build". This # allows a new "build" target to be defined in a Makefile which includes this @@ -70,37 +105,54 @@ common-check_license: .PHONY: common-test-short common-test-short: @echo ">> running short tests" - $(GO) test -short $(pkgs) + GO111MODULE=$(GO111MODULE) $(GO) test -short $(GOOPTS) $(pkgs) .PHONY: common-test common-test: @echo ">> running all tests" - $(GO) test -race $(pkgs) + GO111MODULE=$(GO111MODULE) $(GO) test -race $(GOOPTS) $(pkgs) .PHONY: common-format common-format: @echo ">> formatting code" - $(GO) fmt $(pkgs) + GO111MODULE=$(GO111MODULE) $(GO) fmt $(GOOPTS) $(pkgs) .PHONY: common-vet common-vet: @echo ">> vetting code" - $(GO) vet $(pkgs) + GO111MODULE=$(GO111MODULE) $(GO) vet $(GOOPTS) $(pkgs) .PHONY: common-staticcheck common-staticcheck: $(STATICCHECK) @echo ">> running staticcheck" +ifdef GO111MODULE + GO111MODULE=$(GO111MODULE) $(STATICCHECK) -ignore "$(STATICCHECK_IGNORE)" -checks "SA*" $(pkgs) +else $(STATICCHECK) -ignore "$(STATICCHECK_IGNORE)" $(pkgs) +endif .PHONY: common-unused common-unused: $(GOVENDOR) +ifdef GOVENDOR @echo ">> running check for unused packages" @$(GOVENDOR) list +unused | grep . && exit 1 || echo 'No unused packages' +else +ifdef GO111MODULE + @echo ">> running check for unused/missing packages in go.mod" + GO111MODULE=$(GO111MODULE) $(GO) mod tidy + @git diff --exit-code -- go.sum go.mod +ifneq (,$(wildcard vendor)) + @echo ">> running check for unused packages in vendor/" + GO111MODULE=$(GO111MODULE) $(GO) mod vendor + @git diff --exit-code -- go.sum go.mod vendor/ +endif +endif +endif .PHONY: common-build common-build: promu @echo ">> building binaries" - $(PROMU) build --prefix $(PREFIX) + GO111MODULE=$(GO111MODULE) $(PROMU) build --prefix $(PREFIX) .PHONY: common-tarball common-tarball: promu @@ -120,13 +172,52 @@ common-docker-tag-latest: docker tag "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME):$(DOCKER_IMAGE_TAG)" "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME):latest" .PHONY: promu -promu: - GOOS= GOARCH= $(GO) get -u github.com/prometheus/promu +promu: $(PROMU) + +$(PROMU): + curl -s -L $(PROMU_URL) | tar -xvz -C /tmp + mkdir -v -p $(FIRST_GOPATH)/bin + cp -v /tmp/promu-$(PROMU_VERSION).$(GO_BUILD_PLATFORM)/promu $(PROMU) + +.PHONY: proto +proto: + @echo ">> generating code from proto files" + @./scripts/genproto.sh .PHONY: $(STATICCHECK) $(STATICCHECK): - GOOS= GOARCH= $(GO) get -u honnef.co/go/tools/cmd/staticcheck +ifdef GO111MODULE +# Get staticcheck from a temporary directory to avoid modifying the local go.{mod,sum}. +# See https://github.com/golang/go/issues/27643. +# For now, we are using the next branch of staticcheck because master isn't compatible yet with Go modules. + tmpModule=$$(mktemp -d 2>&1) && \ + mkdir -p $${tmpModule}/staticcheck && \ + cd "$${tmpModule}"/staticcheck && \ + GO111MODULE=on $(GO) mod init example.com/staticcheck && \ + GO111MODULE=on GOOS= GOARCH= $(GO) get -u honnef.co/go/tools/cmd/staticcheck@next && \ + rm -rf $${tmpModule}; +else + GOOS= GOARCH= GO111MODULE=off $(GO) get -u honnef.co/go/tools/cmd/staticcheck +endif +ifdef GOVENDOR .PHONY: $(GOVENDOR) $(GOVENDOR): GOOS= GOARCH= $(GO) get -u github.com/kardianos/govendor +endif + +.PHONY: precheck +precheck:: + +define PRECHECK_COMMAND_template = +precheck:: $(1)_precheck + + +PRECHECK_COMMAND_$(1) ?= $(1) $$(strip $$(PRECHECK_OPTIONS_$(1))) +.PHONY: $(1)_precheck +$(1)_precheck: + @if ! $$(PRECHECK_COMMAND_$(1)) 1>/dev/null 2>&1; then \ + echo "Execution of '$$(PRECHECK_COMMAND_$(1))' command failed. Is $(1) installed?"; \ + exit 1; \ + fi +endef diff --git a/vendor/github.com/prometheus/client_golang/README.md b/vendor/github.com/prometheus/client_golang/README.md index 17d46ecaf..894a6a3e2 100644 --- a/vendor/github.com/prometheus/client_golang/README.md +++ b/vendor/github.com/prometheus/client_golang/README.md @@ -11,7 +11,7 @@ Prometheus HTTP API. __This library requires Go1.7 or later.__ -## Important note about releases, versioning, tagging, stability, and your favorite Go dependency management tool +## Important note about releases, versioning, tagging, and stability While our goal is to follow [Semantic Versioning](https://semver.org/), this repository is still pre-1.0.0. To quote the @@ -22,38 +22,42 @@ declaring something 1.0.0 doesn't make it 1.0.0. Instead, we are working towards a 1.0.0 release that actually deserves its major version number. Having said that, we aim for always keeping the tip of master in a workable -state and for only introducing ”mildly” breaking changes up to and including -[v0.9.0](https://github.com/prometheus/client_golang/milestone/1). After that, -a number of ”hard” breaking changes are planned, see the -[v0.10.0 milestone](https://github.com/prometheus/client_golang/milestone/2), -which should get the library much closer to 1.0.0 state. +state. We occasionally tag versions and track their changes in CHANGELOG.md, +but this happens mostly to keep dependency management tools happy and to give +people a handle they can talk about easily. In particular, all commits in the +master branch have passed the same testing and reviewing. There is no QA +process in place that would render tagged commits more stable or better tested +than others. -Dependency management in Go projects is still in flux, and there are many tools -floating around. While [dep](https://golang.github.io/dep/) might develop into -the de-facto standard tool, it is still officially experimental. The roadmap -for this library has been laid out with a lot of sometimes painful experience -in mind. We really cannot adjust it every other month to the needs of the -currently most popular or most promising Go dependency management tool. The -recommended course of action with dependency management tools is the following: +There is a plan behind the current (pre-1.0.0) versioning, though: -- Do not expect strict post-1.0.0 semver semantics prior to the 1.0.0 - release. If your dependency management tool expects strict post-1.0.0 semver - semantics, you have to wait. Sorry. -- If you want absolute certainty, please lock to a specific commit. You can - also lock to tags, but please don't ask for more tagging. This would suggest - some release or stability testing procedure that simply is not in place. As - said above, we are aiming for stability of the tip of master, but if we - tagged every single commit, locking to tags would be the same as locking to - commits. -- If you want to get the newer features and improvements and are willing to - take the minor risk of newly introduced bugs and “mild” breakage, just always - update to the tip of master (which is essentially the original idea of Go - dependency management). We recommend to not use features marked as - _deprecated_ in this case. -- Once [v0.9.0](https://github.com/prometheus/client_golang/milestone/1) is - out, you could lock to v0.9.x to get bugfixes (and perhaps minor new - features) while avoiding the “hard” breakage that will come with post-0.9 - features. +- v0.9 is the “production release”, currently tracked in the master + branch. “Patch” releases will usually be just bug fixes, indeed, but + important new features that do not require invasive code changes might also + be included in those. We do not plan any breaking changes from one v0.9.x + release to any later v0.9.y release, but nothing is guaranteed. Since the + master branch will eventually be switched over to track the upcoming v0.10 + (see below), we recommend to tell your dependency management tool of choice + to use the latest v0.9.x release, at least for your production software. In + that way, you should get bug fixes and non-invasive, low-risk new features + without the need to change anything on your part. +- v0.10 is a planned release that will have a _lot_ of breaking changes + (despite being only a “minor” release in the Semantic Versioning terminology, + but as said, pre-1.0.0 means nothing is guaranteed). Essentially, we have + been piling up feature requests that require breaking changes for a while, + and they are all collected in the + [v0.10 milestone](https://github.com/prometheus/client_golang/milestone/2). + Since there will be so many breaking changes, the development for v0.10 is + currently not happening in the master branch, but in the + [dev-0.10 branch](https://github.com/prometheus/client_golang/tree/dev-0.10). + It will violently change for a while, and it will definitely be in a + non-working state now and then. It should only be used for sneak-peaks and + discussions of the new features and designs. +- Once v0.10 is ready for real-life use, it will be merged into the master + branch (which is the reason why you should lock your dependency management + tool to v0.9.x and only migrate to v0.10 when both you and v0.10 are ready + for it). In the ideal case, v0.10 will be the basis for the future v1.0 + release, but we cannot provide an ETA at this time. ## Instrumenting applications @@ -62,8 +66,8 @@ recommended course of action with dependency management tools is the following: The [`prometheus` directory](https://github.com/prometheus/client_golang/tree/master/prometheus) contains the instrumentation library. See the -[best practices section](http://prometheus.io/docs/practices/naming/) of the -Prometheus documentation to learn more about instrumenting applications. +[guide](https://prometheus.io/docs/guides/go-application/) on the Prometheus +website to learn more about instrumenting applications. The [`examples` directory](https://github.com/prometheus/client_golang/tree/master/examples) diff --git a/vendor/github.com/prometheus/client_golang/VERSION b/vendor/github.com/prometheus/client_golang/VERSION index a3df0a695..f374f6662 100644 --- a/vendor/github.com/prometheus/client_golang/VERSION +++ b/vendor/github.com/prometheus/client_golang/VERSION @@ -1 +1 @@ -0.8.0 +0.9.1 diff --git a/vendor/github.com/prometheus/client_golang/go.mod b/vendor/github.com/prometheus/client_golang/go.mod new file mode 100644 index 000000000..69f9607dd --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/go.mod @@ -0,0 +1,12 @@ +module github.com/prometheus/client_golang + +require ( + github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973 + github.com/golang/protobuf v1.2.0 + github.com/matttproud/golang_protobuf_extensions v1.0.1 // indirect + github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910 + github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce + github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d + golang.org/x/net v0.0.0-20181114220301-adae6a3d119a + golang.org/x/sync v0.0.0-20181108010431-42b317875d0f // indirect +) diff --git a/vendor/github.com/prometheus/client_golang/go.sum b/vendor/github.com/prometheus/client_golang/go.sum new file mode 100644 index 000000000..52c8dd18c --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/go.sum @@ -0,0 +1,16 @@ +github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973 h1:xJ4a3vCFaGF/jqvzLMYoU8P317H5OQ+Via4RmuPwCS0= +github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= +github.com/golang/protobuf v1.2.0 h1:P3YflyNX/ehuJFLhxviNdFxQPkGK5cDcApsge1SqnvM= +github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU= +github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= +github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910 h1:idejC8f05m9MGOsuEi1ATq9shN03HrxNkD/luQvxCv8= +github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= +github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce h1:X0jFYGnHemYDIW6jlc+fSI8f9Cg+jqCnClYP2WgZT/A= +github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= +github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d h1:GoAlyOgbOEIFdaDqxJVlbOQ1DtGmZWs/Qau0hIlk+WQ= +github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= +golang.org/x/net v0.0.0-20181114220301-adae6a3d119a h1:gOpx8G595UYyvj8UK4+OFyY4rx037g3fmfhe5SasG3U= +golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/sync v0.0.0-20181108010431-42b317875d0f h1:Bl/8QSvNqXvPGPGXa2z5xUTmV7VDcZyvRZ+QQXkXTZQ= +golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= diff --git a/vendor/github.com/prometheus/client_golang/prometheus/collector.go b/vendor/github.com/prometheus/client_golang/prometheus/collector.go index 08491bef0..c0d70b2fa 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/collector.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/collector.go @@ -40,7 +40,8 @@ type Collector interface { // Collector may yield any Metric it sees fit in its Collect method. // // This method idempotently sends the same descriptors throughout the - // lifetime of the Collector. + // lifetime of the Collector. It may be called concurrently and + // therefore must be implemented in a concurrency safe way. // // If a Collector encounters an error while executing this method, it // must send an invalid descriptor (created with NewInvalidDesc) to diff --git a/vendor/github.com/prometheus/client_golang/prometheus/counter.go b/vendor/github.com/prometheus/client_golang/prometheus/counter.go index 765e4550c..d463e36d3 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/counter.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/counter.go @@ -136,7 +136,7 @@ func NewCounterVec(opts CounterOpts, labelNames []string) *CounterVec { return &CounterVec{ metricVec: newMetricVec(desc, func(lvs ...string) Metric { if len(lvs) != len(desc.variableLabels) { - panic(errInconsistentCardinality) + panic(makeInconsistentCardinalityError(desc.fqName, desc.variableLabels, lvs)) } result := &counter{desc: desc, labelPairs: makeLabelPairs(desc, lvs)} result.init(result) // Init self-collection. diff --git a/vendor/github.com/prometheus/client_golang/prometheus/desc.go b/vendor/github.com/prometheus/client_golang/prometheus/desc.go index 7b8827ffb..1d034f871 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/desc.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/desc.go @@ -93,7 +93,7 @@ func NewDesc(fqName, help string, variableLabels []string, constLabels Labels) * // First add only the const label names and sort them... for labelName := range constLabels { if !checkLabelName(labelName) { - d.err = fmt.Errorf("%q is not a valid label name", labelName) + d.err = fmt.Errorf("%q is not a valid label name for metric %q", labelName, fqName) return d } labelNames = append(labelNames, labelName) @@ -115,7 +115,7 @@ func NewDesc(fqName, help string, variableLabels []string, constLabels Labels) * // dimension with a different mix between preset and variable labels. for _, labelName := range variableLabels { if !checkLabelName(labelName) { - d.err = fmt.Errorf("%q is not a valid label name", labelName) + d.err = fmt.Errorf("%q is not a valid label name for metric %q", labelName, fqName) return d } labelNames = append(labelNames, "$"+labelName) diff --git a/vendor/github.com/prometheus/client_golang/prometheus/gauge.go b/vendor/github.com/prometheus/client_golang/prometheus/gauge.go index 17c72d7eb..71d406bd9 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/gauge.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/gauge.go @@ -147,7 +147,7 @@ func NewGaugeVec(opts GaugeOpts, labelNames []string) *GaugeVec { return &GaugeVec{ metricVec: newMetricVec(desc, func(lvs ...string) Metric { if len(lvs) != len(desc.variableLabels) { - panic(errInconsistentCardinality) + panic(makeInconsistentCardinalityError(desc.fqName, desc.variableLabels, lvs)) } result := &gauge{desc: desc, labelPairs: makeLabelPairs(desc, lvs)} result.init(result) // Init self-collection. diff --git a/vendor/github.com/prometheus/client_golang/prometheus/histogram.go b/vendor/github.com/prometheus/client_golang/prometheus/histogram.go index 4d7fa976e..f88da707b 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/histogram.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/histogram.go @@ -165,7 +165,7 @@ func NewHistogram(opts HistogramOpts) Histogram { func newHistogram(desc *Desc, opts HistogramOpts, labelValues ...string) Histogram { if len(desc.variableLabels) != len(labelValues) { - panic(errInconsistentCardinality) + panic(makeInconsistentCardinalityError(desc.fqName, desc.variableLabels, labelValues)) } for _, n := range desc.variableLabels { diff --git a/vendor/github.com/prometheus/client_golang/prometheus/http.go b/vendor/github.com/prometheus/client_golang/prometheus/http.go index 4b8e60273..9f0875bfc 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/http.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/http.go @@ -15,9 +15,7 @@ package prometheus import ( "bufio" - "bytes" "compress/gzip" - "fmt" "io" "net" "net/http" @@ -41,19 +39,10 @@ const ( acceptEncodingHeader = "Accept-Encoding" ) -var bufPool sync.Pool - -func getBuf() *bytes.Buffer { - buf := bufPool.Get() - if buf == nil { - return &bytes.Buffer{} - } - return buf.(*bytes.Buffer) -} - -func giveBuf(buf *bytes.Buffer) { - buf.Reset() - bufPool.Put(buf) +var gzipPool = sync.Pool{ + New: func() interface{} { + return gzip.NewWriter(nil) + }, } // Handler returns an HTTP handler for the DefaultGatherer. It is @@ -71,58 +60,40 @@ func Handler() http.Handler { // Deprecated: Use promhttp.HandlerFor(DefaultGatherer, promhttp.HandlerOpts{}) // instead. See there for further documentation. func UninstrumentedHandler() http.Handler { - return http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { + return http.HandlerFunc(func(rsp http.ResponseWriter, req *http.Request) { mfs, err := DefaultGatherer.Gather() if err != nil { - http.Error(w, "An error has occurred during metrics collection:\n\n"+err.Error(), http.StatusInternalServerError) + httpError(rsp, err) return } contentType := expfmt.Negotiate(req.Header) - buf := getBuf() - defer giveBuf(buf) - writer, encoding := decorateWriter(req, buf) - enc := expfmt.NewEncoder(writer, contentType) - var lastErr error + header := rsp.Header() + header.Set(contentTypeHeader, string(contentType)) + + w := io.Writer(rsp) + if gzipAccepted(req.Header) { + header.Set(contentEncodingHeader, "gzip") + gz := gzipPool.Get().(*gzip.Writer) + defer gzipPool.Put(gz) + + gz.Reset(w) + defer gz.Close() + + w = gz + } + + enc := expfmt.NewEncoder(w, contentType) + for _, mf := range mfs { if err := enc.Encode(mf); err != nil { - lastErr = err - http.Error(w, "An error has occurred during metrics encoding:\n\n"+err.Error(), http.StatusInternalServerError) + httpError(rsp, err) return } } - if closer, ok := writer.(io.Closer); ok { - closer.Close() - } - if lastErr != nil && buf.Len() == 0 { - http.Error(w, "No metrics encoded, last error:\n\n"+lastErr.Error(), http.StatusInternalServerError) - return - } - header := w.Header() - header.Set(contentTypeHeader, string(contentType)) - header.Set(contentLengthHeader, fmt.Sprint(buf.Len())) - if encoding != "" { - header.Set(contentEncodingHeader, encoding) - } - w.Write(buf.Bytes()) }) } -// decorateWriter wraps a writer to handle gzip compression if requested. It -// returns the decorated writer and the appropriate "Content-Encoding" header -// (which is empty if no compression is enabled). -func decorateWriter(request *http.Request, writer io.Writer) (io.Writer, string) { - header := request.Header.Get(acceptEncodingHeader) - parts := strings.Split(header, ",") - for _, part := range parts { - part = strings.TrimSpace(part) - if part == "gzip" || strings.HasPrefix(part, "gzip;") { - return gzip.NewWriter(writer), "gzip" - } - } - return writer, "" -} - var instLabels = []string{"method", "code"} type nower interface { @@ -503,3 +474,31 @@ func sanitizeCode(s int) string { return strconv.Itoa(s) } } + +// gzipAccepted returns whether the client will accept gzip-encoded content. +func gzipAccepted(header http.Header) bool { + a := header.Get(acceptEncodingHeader) + parts := strings.Split(a, ",") + for _, part := range parts { + part = strings.TrimSpace(part) + if part == "gzip" || strings.HasPrefix(part, "gzip;") { + return true + } + } + return false +} + +// httpError removes any content-encoding header and then calls http.Error with +// the provided error and http.StatusInternalServerErrer. Error contents is +// supposed to be uncompressed plain text. However, same as with a plain +// http.Error, any header settings will be void if the header has already been +// sent. The error message will still be written to the writer, but it will +// probably be of limited use. +func httpError(rsp http.ResponseWriter, err error) { + rsp.Header().Del(contentEncodingHeader) + http.Error( + rsp, + "An error has occurred while serving metrics:\n\n"+err.Error(), + http.StatusInternalServerError, + ) +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/labels.go b/vendor/github.com/prometheus/client_golang/prometheus/labels.go index e68f132ec..2744443ac 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/labels.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/labels.go @@ -37,9 +37,22 @@ const reservedLabelPrefix = "__" var errInconsistentCardinality = errors.New("inconsistent label cardinality") +func makeInconsistentCardinalityError(fqName string, labels, labelValues []string) error { + return fmt.Errorf( + "%s: %q has %d variable labels named %q but %d values %q were provided", + errInconsistentCardinality, fqName, + len(labels), labels, + len(labelValues), labelValues, + ) +} + func validateValuesInLabels(labels Labels, expectedNumberOfValues int) error { if len(labels) != expectedNumberOfValues { - return errInconsistentCardinality + return fmt.Errorf( + "%s: expected %d label values but got %d in %#v", + errInconsistentCardinality, expectedNumberOfValues, + len(labels), labels, + ) } for name, val := range labels { @@ -53,7 +66,11 @@ func validateValuesInLabels(labels Labels, expectedNumberOfValues int) error { func validateLabelValues(vals []string, expectedNumberOfValues int) error { if len(vals) != expectedNumberOfValues { - return errInconsistentCardinality + return fmt.Errorf( + "%s: expected %d label values but got %d in %#v", + errInconsistentCardinality, expectedNumberOfValues, + len(vals), vals, + ) } for _, val := range vals { diff --git a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/http.go b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/http.go index 01357374f..668eb6b3c 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/http.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/http.go @@ -32,7 +32,6 @@ package promhttp import ( - "bytes" "compress/gzip" "fmt" "io" @@ -53,19 +52,10 @@ const ( acceptEncodingHeader = "Accept-Encoding" ) -var bufPool sync.Pool - -func getBuf() *bytes.Buffer { - buf := bufPool.Get() - if buf == nil { - return &bytes.Buffer{} - } - return buf.(*bytes.Buffer) -} - -func giveBuf(buf *bytes.Buffer) { - buf.Reset() - bufPool.Put(buf) +var gzipPool = sync.Pool{ + New: func() interface{} { + return gzip.NewWriter(nil) + }, } // Handler returns an http.Handler for the prometheus.DefaultGatherer, using @@ -100,19 +90,18 @@ func HandlerFor(reg prometheus.Gatherer, opts HandlerOpts) http.Handler { inFlightSem = make(chan struct{}, opts.MaxRequestsInFlight) } - h := http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { + h := http.HandlerFunc(func(rsp http.ResponseWriter, req *http.Request) { if inFlightSem != nil { select { case inFlightSem <- struct{}{}: // All good, carry on. defer func() { <-inFlightSem }() default: - http.Error(w, fmt.Sprintf( + http.Error(rsp, fmt.Sprintf( "Limit of concurrent requests reached (%d), try again later.", opts.MaxRequestsInFlight, ), http.StatusServiceUnavailable) return } } - mfs, err := reg.Gather() if err != nil { if opts.ErrorLog != nil { @@ -123,26 +112,40 @@ func HandlerFor(reg prometheus.Gatherer, opts HandlerOpts) http.Handler { panic(err) case ContinueOnError: if len(mfs) == 0 { - http.Error(w, "No metrics gathered, last error:\n\n"+err.Error(), http.StatusInternalServerError) + // Still report the error if no metrics have been gathered. + httpError(rsp, err) return } case HTTPErrorOnError: - http.Error(w, "An error has occurred during metrics gathering:\n\n"+err.Error(), http.StatusInternalServerError) + httpError(rsp, err) return } } contentType := expfmt.Negotiate(req.Header) - buf := getBuf() - defer giveBuf(buf) - writer, encoding := decorateWriter(req, buf, opts.DisableCompression) - enc := expfmt.NewEncoder(writer, contentType) + header := rsp.Header() + header.Set(contentTypeHeader, string(contentType)) + + w := io.Writer(rsp) + if !opts.DisableCompression && gzipAccepted(req.Header) { + header.Set(contentEncodingHeader, "gzip") + gz := gzipPool.Get().(*gzip.Writer) + defer gzipPool.Put(gz) + + gz.Reset(w) + defer gz.Close() + + w = gz + } + + enc := expfmt.NewEncoder(w, contentType) + var lastErr error for _, mf := range mfs { if err := enc.Encode(mf); err != nil { lastErr = err if opts.ErrorLog != nil { - opts.ErrorLog.Println("error encoding metric family:", err) + opts.ErrorLog.Println("error encoding and sending metric family:", err) } switch opts.ErrorHandling { case PanicOnError: @@ -150,28 +153,15 @@ func HandlerFor(reg prometheus.Gatherer, opts HandlerOpts) http.Handler { case ContinueOnError: // Handled later. case HTTPErrorOnError: - http.Error(w, "An error has occurred during metrics encoding:\n\n"+err.Error(), http.StatusInternalServerError) + httpError(rsp, err) return } } } - if closer, ok := writer.(io.Closer); ok { - closer.Close() + + if lastErr != nil { + httpError(rsp, lastErr) } - if lastErr != nil && buf.Len() == 0 { - http.Error(w, "No metrics encoded, last error:\n\n"+lastErr.Error(), http.StatusInternalServerError) - return - } - header := w.Header() - header.Set(contentTypeHeader, string(contentType)) - header.Set(contentLengthHeader, fmt.Sprint(buf.Len())) - if encoding != "" { - header.Set(contentEncodingHeader, encoding) - } - if _, err := w.Write(buf.Bytes()); err != nil && opts.ErrorLog != nil { - opts.ErrorLog.Println("error while sending encoded metrics:", err) - } - // TODO(beorn7): Consider streaming serving of metrics. }) if opts.Timeout <= 0 { @@ -292,20 +282,30 @@ type HandlerOpts struct { Timeout time.Duration } -// decorateWriter wraps a writer to handle gzip compression if requested. It -// returns the decorated writer and the appropriate "Content-Encoding" header -// (which is empty if no compression is enabled). -func decorateWriter(request *http.Request, writer io.Writer, compressionDisabled bool) (io.Writer, string) { - if compressionDisabled { - return writer, "" - } - header := request.Header.Get(acceptEncodingHeader) - parts := strings.Split(header, ",") +// gzipAccepted returns whether the client will accept gzip-encoded content. +func gzipAccepted(header http.Header) bool { + a := header.Get(acceptEncodingHeader) + parts := strings.Split(a, ",") for _, part := range parts { part = strings.TrimSpace(part) if part == "gzip" || strings.HasPrefix(part, "gzip;") { - return gzip.NewWriter(writer), "gzip" + return true } } - return writer, "" + return false +} + +// httpError removes any content-encoding header and then calls http.Error with +// the provided error and http.StatusInternalServerErrer. Error contents is +// supposed to be uncompressed plain text. However, same as with a plain +// http.Error, any header settings will be void if the header has already been +// sent. The error message will still be written to the writer, but it will +// probably be of limited use. +func httpError(rsp http.ResponseWriter, err error) { + rsp.Header().Del(contentEncodingHeader) + http.Error( + rsp, + "An error has occurred while serving metrics:\n\n"+err.Error(), + http.StatusInternalServerError, + ) } diff --git a/vendor/github.com/prometheus/client_golang/prometheus/registry.go b/vendor/github.com/prometheus/client_golang/prometheus/registry.go index 2c0b90888..f98c81a86 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/registry.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/registry.go @@ -16,6 +16,9 @@ package prometheus import ( "bytes" "fmt" + "io/ioutil" + "os" + "path/filepath" "runtime" "sort" "strings" @@ -23,6 +26,7 @@ import ( "unicode/utf8" "github.com/golang/protobuf/proto" + "github.com/prometheus/common/expfmt" dto "github.com/prometheus/client_model/go" @@ -107,9 +111,6 @@ type Registerer interface { // Collector, and for providing a Collector that will not cause // inconsistent metrics on collection. (This would lead to scrape // errors.) - // - // It is in general not safe to register the same Collector multiple - // times concurrently. Register(Collector) error // MustRegister works like Register but registers any number of // Collectors and panics upon the first registration that causes an @@ -273,7 +274,12 @@ func (r *Registry) Register(c Collector) error { close(descChan) }() r.mtx.Lock() - defer r.mtx.Unlock() + defer func() { + // Drain channel in case of premature return to not leak a goroutine. + for range descChan { + } + r.mtx.Unlock() + }() // Conduct various tests... for desc := range descChan { @@ -531,6 +537,38 @@ func (r *Registry) Gather() ([]*dto.MetricFamily, error) { return internal.NormalizeMetricFamilies(metricFamiliesByName), errs.MaybeUnwrap() } +// WriteToTextfile calls Gather on the provided Gatherer, encodes the result in the +// Prometheus text format, and writes it to a temporary file. Upon success, the +// temporary file is renamed to the provided filename. +// +// This is intended for use with the textfile collector of the node exporter. +// Note that the node exporter expects the filename to be suffixed with ".prom". +func WriteToTextfile(filename string, g Gatherer) error { + tmp, err := ioutil.TempFile(filepath.Dir(filename), filepath.Base(filename)) + if err != nil { + return err + } + defer os.Remove(tmp.Name()) + + mfs, err := g.Gather() + if err != nil { + return err + } + for _, mf := range mfs { + if _, err := expfmt.MetricFamilyToText(tmp, mf); err != nil { + return err + } + } + if err := tmp.Close(); err != nil { + return err + } + + if err := os.Chmod(tmp.Name(), 0644); err != nil { + return err + } + return os.Rename(tmp.Name(), filename) +} + // processMetric is an internal helper method only used by the Gather method. func processMetric( metric Metric, @@ -785,6 +823,8 @@ func checkMetricConsistency( dtoMetric *dto.Metric, metricHashes map[uint64]struct{}, ) error { + name := metricFamily.GetName() + // Type consistency with metric family. if metricFamily.GetType() == dto.MetricType_GAUGE && dtoMetric.Gauge == nil || metricFamily.GetType() == dto.MetricType_COUNTER && dtoMetric.Counter == nil || @@ -793,33 +833,42 @@ func checkMetricConsistency( metricFamily.GetType() == dto.MetricType_UNTYPED && dtoMetric.Untyped == nil { return fmt.Errorf( "collected metric %q { %s} is not a %s", - metricFamily.GetName(), dtoMetric, metricFamily.GetType(), + name, dtoMetric, metricFamily.GetType(), ) } + previousLabelName := "" for _, labelPair := range dtoMetric.GetLabel() { - if !checkLabelName(labelPair.GetName()) { + labelName := labelPair.GetName() + if labelName == previousLabelName { return fmt.Errorf( - "collected metric %q { %s} has a label with an invalid name: %s", - metricFamily.GetName(), dtoMetric, labelPair.GetName(), + "collected metric %q { %s} has two or more labels with the same name: %s", + name, dtoMetric, labelName, ) } - if dtoMetric.Summary != nil && labelPair.GetName() == quantileLabel { + if !checkLabelName(labelName) { + return fmt.Errorf( + "collected metric %q { %s} has a label with an invalid name: %s", + name, dtoMetric, labelName, + ) + } + if dtoMetric.Summary != nil && labelName == quantileLabel { return fmt.Errorf( "collected metric %q { %s} must not have an explicit %q label", - metricFamily.GetName(), dtoMetric, quantileLabel, + name, dtoMetric, quantileLabel, ) } if !utf8.ValidString(labelPair.GetValue()) { return fmt.Errorf( "collected metric %q { %s} has a label named %q whose value is not utf8: %#v", - metricFamily.GetName(), dtoMetric, labelPair.GetName(), labelPair.GetValue()) + name, dtoMetric, labelName, labelPair.GetValue()) } + previousLabelName = labelName } // Is the metric unique (i.e. no other metric with the same name and the same labels)? h := hashNew() - h = hashAdd(h, metricFamily.GetName()) + h = hashAdd(h, name) h = hashAddByte(h, separatorByte) // Make sure label pairs are sorted. We depend on it for the consistency // check. @@ -833,7 +882,7 @@ func checkMetricConsistency( if _, exists := metricHashes[h]; exists { return fmt.Errorf( "collected metric %q { %s} was collected before with the same name and label values", - metricFamily.GetName(), dtoMetric, + name, dtoMetric, ) } metricHashes[h] = struct{}{} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/summary.go b/vendor/github.com/prometheus/client_golang/prometheus/summary.go index f7e92d829..2980614df 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/summary.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/summary.go @@ -181,7 +181,7 @@ func NewSummary(opts SummaryOpts) Summary { func newSummary(desc *Desc, opts SummaryOpts, labelValues ...string) Summary { if len(desc.variableLabels) != len(labelValues) { - panic(errInconsistentCardinality) + panic(makeInconsistentCardinalityError(desc.fqName, desc.variableLabels, labelValues)) } for _, n := range desc.variableLabels { diff --git a/vendor/github.com/prometheus/common/MAINTAINERS.md b/vendor/github.com/prometheus/common/MAINTAINERS.md index 1b3152161..d1cb20b89 100644 --- a/vendor/github.com/prometheus/common/MAINTAINERS.md +++ b/vendor/github.com/prometheus/common/MAINTAINERS.md @@ -1 +1 @@ -* Fabian Reinartz +* Brian Brazil diff --git a/vendor/github.com/prometheus/common/expfmt/text_create.go b/vendor/github.com/prometheus/common/expfmt/text_create.go index f11321cd0..8e473d0fe 100644 --- a/vendor/github.com/prometheus/common/expfmt/text_create.go +++ b/vendor/github.com/prometheus/common/expfmt/text_create.go @@ -14,13 +14,45 @@ package expfmt import ( + "bytes" "fmt" "io" "math" + "strconv" "strings" + "sync" + + "github.com/prometheus/common/model" dto "github.com/prometheus/client_model/go" - "github.com/prometheus/common/model" +) + +// enhancedWriter has all the enhanced write functions needed here. bytes.Buffer +// implements it. +type enhancedWriter interface { + io.Writer + WriteRune(r rune) (n int, err error) + WriteString(s string) (n int, err error) + WriteByte(c byte) error +} + +const ( + initialBufSize = 512 + initialNumBufSize = 24 +) + +var ( + bufPool = sync.Pool{ + New: func() interface{} { + return bytes.NewBuffer(make([]byte, 0, initialBufSize)) + }, + } + numBufPool = sync.Pool{ + New: func() interface{} { + b := make([]byte, 0, initialNumBufSize) + return &b + }, + } ) // MetricFamilyToText converts a MetricFamily proto message into text format and @@ -32,37 +64,92 @@ import ( // will result in invalid text format output. // // This method fulfills the type 'prometheus.encoder'. -func MetricFamilyToText(out io.Writer, in *dto.MetricFamily) (int, error) { - var written int - +func MetricFamilyToText(out io.Writer, in *dto.MetricFamily) (written int, err error) { // Fail-fast checks. if len(in.Metric) == 0 { - return written, fmt.Errorf("MetricFamily has no metrics: %s", in) + return 0, fmt.Errorf("MetricFamily has no metrics: %s", in) } name := in.GetName() if name == "" { - return written, fmt.Errorf("MetricFamily has no name: %s", in) + return 0, fmt.Errorf("MetricFamily has no name: %s", in) } + // Try the interface upgrade. If it doesn't work, we'll use a + // bytes.Buffer from the sync.Pool and write out its content to out in a + // single go in the end. + w, ok := out.(enhancedWriter) + if !ok { + b := bufPool.Get().(*bytes.Buffer) + b.Reset() + w = b + defer func() { + bWritten, bErr := out.Write(b.Bytes()) + written = bWritten + if err == nil { + err = bErr + } + bufPool.Put(b) + }() + } + + var n int + // Comments, first HELP, then TYPE. if in.Help != nil { - n, err := fmt.Fprintf( - out, "# HELP %s %s\n", - name, escapeString(*in.Help, false), - ) + n, err = w.WriteString("# HELP ") written += n if err != nil { - return written, err + return + } + n, err = w.WriteString(name) + written += n + if err != nil { + return + } + err = w.WriteByte(' ') + written++ + if err != nil { + return + } + n, err = writeEscapedString(w, *in.Help, false) + written += n + if err != nil { + return + } + err = w.WriteByte('\n') + written++ + if err != nil { + return } } - metricType := in.GetType() - n, err := fmt.Fprintf( - out, "# TYPE %s %s\n", - name, strings.ToLower(metricType.String()), - ) + n, err = w.WriteString("# TYPE ") written += n if err != nil { - return written, err + return + } + n, err = w.WriteString(name) + written += n + if err != nil { + return + } + metricType := in.GetType() + switch metricType { + case dto.MetricType_COUNTER: + n, err = w.WriteString(" counter\n") + case dto.MetricType_GAUGE: + n, err = w.WriteString(" gauge\n") + case dto.MetricType_SUMMARY: + n, err = w.WriteString(" summary\n") + case dto.MetricType_UNTYPED: + n, err = w.WriteString(" untyped\n") + case dto.MetricType_HISTOGRAM: + n, err = w.WriteString(" histogram\n") + default: + return written, fmt.Errorf("unknown metric type %s", metricType.String()) + } + written += n + if err != nil { + return } // Finally the samples, one line for each. @@ -75,9 +162,8 @@ func MetricFamilyToText(out io.Writer, in *dto.MetricFamily) (int, error) { ) } n, err = writeSample( - name, metric, "", "", + w, name, "", metric, "", 0, metric.Counter.GetValue(), - out, ) case dto.MetricType_GAUGE: if metric.Gauge == nil { @@ -86,9 +172,8 @@ func MetricFamilyToText(out io.Writer, in *dto.MetricFamily) (int, error) { ) } n, err = writeSample( - name, metric, "", "", + w, name, "", metric, "", 0, metric.Gauge.GetValue(), - out, ) case dto.MetricType_UNTYPED: if metric.Untyped == nil { @@ -97,9 +182,8 @@ func MetricFamilyToText(out io.Writer, in *dto.MetricFamily) (int, error) { ) } n, err = writeSample( - name, metric, "", "", + w, name, "", metric, "", 0, metric.Untyped.GetValue(), - out, ) case dto.MetricType_SUMMARY: if metric.Summary == nil { @@ -109,29 +193,26 @@ func MetricFamilyToText(out io.Writer, in *dto.MetricFamily) (int, error) { } for _, q := range metric.Summary.Quantile { n, err = writeSample( - name, metric, - model.QuantileLabel, fmt.Sprint(q.GetQuantile()), + w, name, "", metric, + model.QuantileLabel, q.GetQuantile(), q.GetValue(), - out, ) written += n if err != nil { - return written, err + return } } n, err = writeSample( - name+"_sum", metric, "", "", + w, name, "_sum", metric, "", 0, metric.Summary.GetSampleSum(), - out, ) - if err != nil { - return written, err - } written += n + if err != nil { + return + } n, err = writeSample( - name+"_count", metric, "", "", + w, name, "_count", metric, "", 0, float64(metric.Summary.GetSampleCount()), - out, ) case dto.MetricType_HISTOGRAM: if metric.Histogram == nil { @@ -140,46 +221,42 @@ func MetricFamilyToText(out io.Writer, in *dto.MetricFamily) (int, error) { ) } infSeen := false - for _, q := range metric.Histogram.Bucket { + for _, b := range metric.Histogram.Bucket { n, err = writeSample( - name+"_bucket", metric, - model.BucketLabel, fmt.Sprint(q.GetUpperBound()), - float64(q.GetCumulativeCount()), - out, + w, name, "_bucket", metric, + model.BucketLabel, b.GetUpperBound(), + float64(b.GetCumulativeCount()), ) written += n if err != nil { - return written, err + return } - if math.IsInf(q.GetUpperBound(), +1) { + if math.IsInf(b.GetUpperBound(), +1) { infSeen = true } } if !infSeen { n, err = writeSample( - name+"_bucket", metric, - model.BucketLabel, "+Inf", + w, name, "_bucket", metric, + model.BucketLabel, math.Inf(+1), float64(metric.Histogram.GetSampleCount()), - out, ) - if err != nil { - return written, err - } written += n + if err != nil { + return + } } n, err = writeSample( - name+"_sum", metric, "", "", + w, name, "_sum", metric, "", 0, metric.Histogram.GetSampleSum(), - out, ) - if err != nil { - return written, err - } written += n + if err != nil { + return + } n, err = writeSample( - name+"_count", metric, "", "", + w, name, "_count", metric, "", 0, float64(metric.Histogram.GetSampleCount()), - out, ) default: return written, fmt.Errorf( @@ -188,116 +265,204 @@ func MetricFamilyToText(out io.Writer, in *dto.MetricFamily) (int, error) { } written += n if err != nil { - return written, err + return } } - return written, nil + return } -// writeSample writes a single sample in text format to out, given the metric +// writeSample writes a single sample in text format to w, given the metric // name, the metric proto message itself, optionally an additional label name -// and value (use empty strings if not required), and the value. The function -// returns the number of bytes written and any error encountered. +// with a float64 value (use empty string as label name if not required), and +// the value. The function returns the number of bytes written and any error +// encountered. func writeSample( - name string, + w enhancedWriter, + name, suffix string, metric *dto.Metric, - additionalLabelName, additionalLabelValue string, + additionalLabelName string, additionalLabelValue float64, value float64, - out io.Writer, ) (int, error) { var written int - n, err := fmt.Fprint(out, name) + n, err := w.WriteString(name) written += n if err != nil { return written, err } - n, err = labelPairsToText( - metric.Label, - additionalLabelName, additionalLabelValue, - out, - ) - written += n - if err != nil { - return written, err - } - n, err = fmt.Fprintf(out, " %v", value) - written += n - if err != nil { - return written, err - } - if metric.TimestampMs != nil { - n, err = fmt.Fprintf(out, " %v", *metric.TimestampMs) + if suffix != "" { + n, err = w.WriteString(suffix) written += n if err != nil { return written, err } } - n, err = out.Write([]byte{'\n'}) + n, err = writeLabelPairs( + w, metric.Label, additionalLabelName, additionalLabelValue, + ) written += n if err != nil { return written, err } + err = w.WriteByte(' ') + written++ + if err != nil { + return written, err + } + n, err = writeFloat(w, value) + written += n + if err != nil { + return written, err + } + if metric.TimestampMs != nil { + err = w.WriteByte(' ') + written++ + if err != nil { + return written, err + } + n, err = writeInt(w, *metric.TimestampMs) + written += n + if err != nil { + return written, err + } + } + err = w.WriteByte('\n') + written++ + if err != nil { + return written, err + } return written, nil } -// labelPairsToText converts a slice of LabelPair proto messages plus the +// writeLabelPairs converts a slice of LabelPair proto messages plus the // explicitly given additional label pair into text formatted as required by the -// text format and writes it to 'out'. An empty slice in combination with an -// empty string 'additionalLabelName' results in nothing being -// written. Otherwise, the label pairs are written, escaped as required by the -// text format, and enclosed in '{...}'. The function returns the number of -// bytes written and any error encountered. -func labelPairsToText( +// text format and writes it to 'w'. An empty slice in combination with an empty +// string 'additionalLabelName' results in nothing being written. Otherwise, the +// label pairs are written, escaped as required by the text format, and enclosed +// in '{...}'. The function returns the number of bytes written and any error +// encountered. +func writeLabelPairs( + w enhancedWriter, in []*dto.LabelPair, - additionalLabelName, additionalLabelValue string, - out io.Writer, + additionalLabelName string, additionalLabelValue float64, ) (int, error) { if len(in) == 0 && additionalLabelName == "" { return 0, nil } - var written int - separator := '{' + var ( + written int + separator byte = '{' + ) for _, lp := range in { - n, err := fmt.Fprintf( - out, `%c%s="%s"`, - separator, lp.GetName(), escapeString(lp.GetValue(), true), - ) + err := w.WriteByte(separator) + written++ + if err != nil { + return written, err + } + n, err := w.WriteString(lp.GetName()) written += n if err != nil { return written, err } + n, err = w.WriteString(`="`) + written += n + if err != nil { + return written, err + } + n, err = writeEscapedString(w, lp.GetValue(), true) + written += n + if err != nil { + return written, err + } + err = w.WriteByte('"') + written++ + if err != nil { + return written, err + } separator = ',' } if additionalLabelName != "" { - n, err := fmt.Fprintf( - out, `%c%s="%s"`, - separator, additionalLabelName, - escapeString(additionalLabelValue, true), - ) + err := w.WriteByte(separator) + written++ + if err != nil { + return written, err + } + n, err := w.WriteString(additionalLabelName) written += n if err != nil { return written, err } + n, err = w.WriteString(`="`) + written += n + if err != nil { + return written, err + } + n, err = writeFloat(w, additionalLabelValue) + written += n + if err != nil { + return written, err + } + err = w.WriteByte('"') + written++ + if err != nil { + return written, err + } } - n, err := out.Write([]byte{'}'}) - written += n + err := w.WriteByte('}') + written++ if err != nil { return written, err } return written, nil } +// writeEscapedString replaces '\' by '\\', new line character by '\n', and - if +// includeDoubleQuote is true - '"' by '\"'. var ( - escape = strings.NewReplacer("\\", `\\`, "\n", `\n`) - escapeWithDoubleQuote = strings.NewReplacer("\\", `\\`, "\n", `\n`, "\"", `\"`) + escaper = strings.NewReplacer("\\", `\\`, "\n", `\n`) + quotedEscaper = strings.NewReplacer("\\", `\\`, "\n", `\n`, "\"", `\"`) ) -// escapeString replaces '\' by '\\', new line character by '\n', and - if -// includeDoubleQuote is true - '"' by '\"'. -func escapeString(v string, includeDoubleQuote bool) string { +func writeEscapedString(w enhancedWriter, v string, includeDoubleQuote bool) (int, error) { if includeDoubleQuote { - return escapeWithDoubleQuote.Replace(v) + return quotedEscaper.WriteString(w, v) + } else { + return escaper.WriteString(w, v) } - - return escape.Replace(v) +} + +// writeFloat is equivalent to fmt.Fprint with a float64 argument but hardcodes +// a few common cases for increased efficiency. For non-hardcoded cases, it uses +// strconv.AppendFloat to avoid allocations, similar to writeInt. +func writeFloat(w enhancedWriter, f float64) (int, error) { + switch { + case f == 1: + return 1, w.WriteByte('1') + case f == 0: + return 1, w.WriteByte('0') + case f == -1: + return w.WriteString("-1") + case math.IsNaN(f): + return w.WriteString("NaN") + case math.IsInf(f, +1): + return w.WriteString("+Inf") + case math.IsInf(f, -1): + return w.WriteString("-Inf") + default: + bp := numBufPool.Get().(*[]byte) + *bp = strconv.AppendFloat((*bp)[:0], f, 'g', -1, 64) + written, err := w.Write(*bp) + numBufPool.Put(bp) + return written, err + } +} + +// writeInt is equivalent to fmt.Fprint with an int64 argument but uses +// strconv.AppendInt with a byte slice taken from a sync.Pool to avoid +// allocations. +func writeInt(w enhancedWriter, i int64) (int, error) { + bp := numBufPool.Get().(*[]byte) + *bp = strconv.AppendInt((*bp)[:0], i, 10) + written, err := w.Write(*bp) + numBufPool.Put(bp) + return written, err } diff --git a/vendor/github.com/prometheus/common/model/time.go b/vendor/github.com/prometheus/common/model/time.go index 74ed5a9f7..46259b1f1 100644 --- a/vendor/github.com/prometheus/common/model/time.go +++ b/vendor/github.com/prometheus/common/model/time.go @@ -43,7 +43,7 @@ const ( // (1970-01-01 00:00 UTC) excluding leap seconds. type Time int64 -// Interval describes and interval between two timestamps. +// Interval describes an interval between two timestamps. type Interval struct { Start, End Time } diff --git a/vendor/github.com/prometheus/procfs/Makefile b/vendor/github.com/prometheus/procfs/Makefile index 4d1098394..40540b02e 100644 --- a/vendor/github.com/prometheus/procfs/Makefile +++ b/vendor/github.com/prometheus/procfs/Makefile @@ -11,49 +11,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -# Ensure GOBIN is not set during build so that promu is installed to the correct path -unexport GOBIN - -GO ?= go -GOFMT ?= $(GO)fmt -FIRST_GOPATH := $(firstword $(subst :, ,$(shell $(GO) env GOPATH))) -STATICCHECK := $(FIRST_GOPATH)/bin/staticcheck -pkgs = $(shell $(GO) list ./... | grep -v /vendor/) - -PREFIX ?= $(shell pwd) -BIN_DIR ?= $(shell pwd) - -ifdef DEBUG - bindata_flags = -debug -endif - -STATICCHECK_IGNORE = - -all: format staticcheck build test - -style: - @echo ">> checking code style" - @! $(GOFMT) -d $(shell find . -path ./vendor -prune -o -name '*.go' -print) | grep '^' - -check_license: - @echo ">> checking license header" - @./scripts/check_license.sh - -test: fixtures/.unpacked sysfs/fixtures/.unpacked - @echo ">> running all tests" - @$(GO) test -race $(shell $(GO) list ./... | grep -v /vendor/ | grep -v examples) - -format: - @echo ">> formatting code" - @$(GO) fmt $(pkgs) - -vet: - @echo ">> vetting code" - @$(GO) vet $(pkgs) - -staticcheck: $(STATICCHECK) - @echo ">> running staticcheck" - @$(STATICCHECK) -ignore "$(STATICCHECK_IGNORE)" $(pkgs) +include Makefile.common %/.unpacked: %.ttar ./ttar -C $(dir $*) -x -f $*.ttar @@ -65,13 +23,5 @@ update_fixtures: fixtures.ttar sysfs/fixtures.ttar rm -v $(dir $*)fixtures/.unpacked ./ttar -C $(dir $*) -c -f $*fixtures.ttar fixtures/ -$(FIRST_GOPATH)/bin/staticcheck: - @GOOS= GOARCH= $(GO) get -u honnef.co/go/tools/cmd/staticcheck - -.PHONY: all style check_license format test vet staticcheck - -# Declaring the binaries at their default locations as PHONY targets is a hack -# to ensure the latest version is downloaded on every make execution. -# If this is not desired, copy/symlink these binaries to a different path and -# set the respective environment variables. -.PHONY: $(GOPATH)/bin/staticcheck +.PHONY: test +test: fixtures/.unpacked sysfs/fixtures/.unpacked common-test diff --git a/vendor/github.com/prometheus/procfs/Makefile.common b/vendor/github.com/prometheus/procfs/Makefile.common new file mode 100644 index 000000000..741579e60 --- /dev/null +++ b/vendor/github.com/prometheus/procfs/Makefile.common @@ -0,0 +1,223 @@ +# Copyright 2018 The Prometheus Authors +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +# A common Makefile that includes rules to be reused in different prometheus projects. +# !!! Open PRs only against the prometheus/prometheus/Makefile.common repository! + +# Example usage : +# Create the main Makefile in the root project directory. +# include Makefile.common +# customTarget: +# @echo ">> Running customTarget" +# + +# Ensure GOBIN is not set during build so that promu is installed to the correct path +unexport GOBIN + +GO ?= go +GOFMT ?= $(GO)fmt +FIRST_GOPATH := $(firstword $(subst :, ,$(shell $(GO) env GOPATH))) +GOOPTS ?= + +GO_VERSION ?= $(shell $(GO) version) +GO_VERSION_NUMBER ?= $(word 3, $(GO_VERSION)) +PRE_GO_111 ?= $(shell echo $(GO_VERSION_NUMBER) | grep -E 'go1\.(10|[0-9])\.') + +unexport GOVENDOR +ifeq (, $(PRE_GO_111)) + ifneq (,$(wildcard go.mod)) + # Enforce Go modules support just in case the directory is inside GOPATH (and for Travis CI). + GO111MODULE := on + + ifneq (,$(wildcard vendor)) + # Always use the local vendor/ directory to satisfy the dependencies. + GOOPTS := $(GOOPTS) -mod=vendor + endif + endif +else + ifneq (,$(wildcard go.mod)) + ifneq (,$(wildcard vendor)) +$(warning This repository requires Go >= 1.11 because of Go modules) +$(warning Some recipes may not work as expected as the current Go runtime is '$(GO_VERSION_NUMBER)') + endif + else + # This repository isn't using Go modules (yet). + GOVENDOR := $(FIRST_GOPATH)/bin/govendor + endif + + unexport GO111MODULE +endif +PROMU := $(FIRST_GOPATH)/bin/promu +STATICCHECK := $(FIRST_GOPATH)/bin/staticcheck +pkgs = ./... + +GO_VERSION ?= $(shell $(GO) version) +GO_BUILD_PLATFORM ?= $(subst /,-,$(lastword $(GO_VERSION))) + +PROMU_VERSION ?= 0.2.0 +PROMU_URL := https://github.com/prometheus/promu/releases/download/v$(PROMU_VERSION)/promu-$(PROMU_VERSION).$(GO_BUILD_PLATFORM).tar.gz + +PREFIX ?= $(shell pwd) +BIN_DIR ?= $(shell pwd) +DOCKER_IMAGE_TAG ?= $(subst /,-,$(shell git rev-parse --abbrev-ref HEAD)) +DOCKER_REPO ?= prom + +.PHONY: all +all: precheck style staticcheck unused build test + +# This rule is used to forward a target like "build" to "common-build". This +# allows a new "build" target to be defined in a Makefile which includes this +# one and override "common-build" without override warnings. +%: common-% ; + +.PHONY: common-style +common-style: + @echo ">> checking code style" + @fmtRes=$$($(GOFMT) -d $$(find . -path ./vendor -prune -o -name '*.go' -print)); \ + if [ -n "$${fmtRes}" ]; then \ + echo "gofmt checking failed!"; echo "$${fmtRes}"; echo; \ + echo "Please ensure you are using $$($(GO) version) for formatting code."; \ + exit 1; \ + fi + +.PHONY: common-check_license +common-check_license: + @echo ">> checking license header" + @licRes=$$(for file in $$(find . -type f -iname '*.go' ! -path './vendor/*') ; do \ + awk 'NR<=3' $$file | grep -Eq "(Copyright|generated|GENERATED)" || echo $$file; \ + done); \ + if [ -n "$${licRes}" ]; then \ + echo "license header checking failed:"; echo "$${licRes}"; \ + exit 1; \ + fi + +.PHONY: common-test-short +common-test-short: + @echo ">> running short tests" + GO111MODULE=$(GO111MODULE) $(GO) test -short $(GOOPTS) $(pkgs) + +.PHONY: common-test +common-test: + @echo ">> running all tests" + GO111MODULE=$(GO111MODULE) $(GO) test -race $(GOOPTS) $(pkgs) + +.PHONY: common-format +common-format: + @echo ">> formatting code" + GO111MODULE=$(GO111MODULE) $(GO) fmt $(GOOPTS) $(pkgs) + +.PHONY: common-vet +common-vet: + @echo ">> vetting code" + GO111MODULE=$(GO111MODULE) $(GO) vet $(GOOPTS) $(pkgs) + +.PHONY: common-staticcheck +common-staticcheck: $(STATICCHECK) + @echo ">> running staticcheck" +ifdef GO111MODULE + GO111MODULE=$(GO111MODULE) $(STATICCHECK) -ignore "$(STATICCHECK_IGNORE)" -checks "SA*" $(pkgs) +else + $(STATICCHECK) -ignore "$(STATICCHECK_IGNORE)" $(pkgs) +endif + +.PHONY: common-unused +common-unused: $(GOVENDOR) +ifdef GOVENDOR + @echo ">> running check for unused packages" + @$(GOVENDOR) list +unused | grep . && exit 1 || echo 'No unused packages' +else +ifdef GO111MODULE + @echo ">> running check for unused/missing packages in go.mod" + GO111MODULE=$(GO111MODULE) $(GO) mod tidy + @git diff --exit-code -- go.sum go.mod +ifneq (,$(wildcard vendor)) + @echo ">> running check for unused packages in vendor/" + GO111MODULE=$(GO111MODULE) $(GO) mod vendor + @git diff --exit-code -- go.sum go.mod vendor/ +endif +endif +endif + +.PHONY: common-build +common-build: promu + @echo ">> building binaries" + GO111MODULE=$(GO111MODULE) $(PROMU) build --prefix $(PREFIX) + +.PHONY: common-tarball +common-tarball: promu + @echo ">> building release tarball" + $(PROMU) tarball --prefix $(PREFIX) $(BIN_DIR) + +.PHONY: common-docker +common-docker: + docker build -t "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME):$(DOCKER_IMAGE_TAG)" . + +.PHONY: common-docker-publish +common-docker-publish: + docker push "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)" + +.PHONY: common-docker-tag-latest +common-docker-tag-latest: + docker tag "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME):$(DOCKER_IMAGE_TAG)" "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME):latest" + +.PHONY: promu +promu: $(PROMU) + +$(PROMU): + curl -s -L $(PROMU_URL) | tar -xvz -C /tmp + mkdir -v -p $(FIRST_GOPATH)/bin + cp -v /tmp/promu-$(PROMU_VERSION).$(GO_BUILD_PLATFORM)/promu $(PROMU) + +.PHONY: proto +proto: + @echo ">> generating code from proto files" + @./scripts/genproto.sh + +.PHONY: $(STATICCHECK) +$(STATICCHECK): +ifdef GO111MODULE +# Get staticcheck from a temporary directory to avoid modifying the local go.{mod,sum}. +# See https://github.com/golang/go/issues/27643. +# For now, we are using the next branch of staticcheck because master isn't compatible yet with Go modules. + tmpModule=$$(mktemp -d 2>&1) && \ + mkdir -p $${tmpModule}/staticcheck && \ + cd "$${tmpModule}"/staticcheck && \ + GO111MODULE=on $(GO) mod init example.com/staticcheck && \ + GO111MODULE=on GOOS= GOARCH= $(GO) get -u honnef.co/go/tools/cmd/staticcheck@next && \ + rm -rf $${tmpModule}; +else + GOOS= GOARCH= GO111MODULE=off $(GO) get -u honnef.co/go/tools/cmd/staticcheck +endif + +ifdef GOVENDOR +.PHONY: $(GOVENDOR) +$(GOVENDOR): + GOOS= GOARCH= $(GO) get -u github.com/kardianos/govendor +endif + +.PHONY: precheck +precheck:: + +define PRECHECK_COMMAND_template = +precheck:: $(1)_precheck + + +PRECHECK_COMMAND_$(1) ?= $(1) $$(strip $$(PRECHECK_OPTIONS_$(1))) +.PHONY: $(1)_precheck +$(1)_precheck: + @if ! $$(PRECHECK_COMMAND_$(1)) 1>/dev/null 2>&1; then \ + echo "Execution of '$$(PRECHECK_COMMAND_$(1))' command failed. Is $(1) installed?"; \ + exit 1; \ + fi +endef diff --git a/vendor/github.com/prometheus/procfs/go.mod b/vendor/github.com/prometheus/procfs/go.mod new file mode 100644 index 000000000..e89ee6c90 --- /dev/null +++ b/vendor/github.com/prometheus/procfs/go.mod @@ -0,0 +1 @@ +module github.com/prometheus/procfs diff --git a/vendor/github.com/prometheus/procfs/internal/util/parse.go b/vendor/github.com/prometheus/procfs/internal/util/parse.go index 1ad21c91a..2ff228e9d 100644 --- a/vendor/github.com/prometheus/procfs/internal/util/parse.go +++ b/vendor/github.com/prometheus/procfs/internal/util/parse.go @@ -13,7 +13,11 @@ package util -import "strconv" +import ( + "io/ioutil" + "strconv" + "strings" +) // ParseUint32s parses a slice of strings into a slice of uint32s. func ParseUint32s(ss []string) ([]uint32, error) { @@ -44,3 +48,12 @@ func ParseUint64s(ss []string) ([]uint64, error) { return us, nil } + +// ReadUintFromFile reads a file and attempts to parse a uint64 from it. +func ReadUintFromFile(path string) (uint64, error) { + data, err := ioutil.ReadFile(path) + if err != nil { + return 0, err + } + return strconv.ParseUint(strings.TrimSpace(string(data)), 10, 64) +} diff --git a/vendor/github.com/prometheus/procfs/internal/util/sysreadfile_linux.go b/vendor/github.com/prometheus/procfs/internal/util/sysreadfile_linux.go new file mode 100644 index 000000000..df0d567b7 --- /dev/null +++ b/vendor/github.com/prometheus/procfs/internal/util/sysreadfile_linux.go @@ -0,0 +1,45 @@ +// Copyright 2018 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build !windows + +package util + +import ( + "bytes" + "os" + "syscall" +) + +// SysReadFile is a simplified ioutil.ReadFile that invokes syscall.Read directly. +// https://github.com/prometheus/node_exporter/pull/728/files +func SysReadFile(file string) (string, error) { + f, err := os.Open(file) + if err != nil { + return "", err + } + defer f.Close() + + // On some machines, hwmon drivers are broken and return EAGAIN. This causes + // Go's ioutil.ReadFile implementation to poll forever. + // + // Since we either want to read data or bail immediately, do the simplest + // possible read using syscall directly. + b := make([]byte, 128) + n, err := syscall.Read(int(f.Fd()), b) + if err != nil { + return "", err + } + + return string(bytes.TrimSpace(b[:n])), nil +} diff --git a/vendor/github.com/spf13/pflag/flag.go b/vendor/github.com/spf13/pflag/flag.go index 5cc710ccd..9beeda8ec 100644 --- a/vendor/github.com/spf13/pflag/flag.go +++ b/vendor/github.com/spf13/pflag/flag.go @@ -925,13 +925,16 @@ func stripUnknownFlagValue(args []string) []string { } first := args[0] - if first[0] == '-' { + if len(first) > 0 && first[0] == '-' { //--unknown --next-flag ... return args } //--unknown arg ... (args will be arg ...) - return args[1:] + if len(args) > 1 { + return args[1:] + } + return nil } func (f *FlagSet) parseLongArg(s string, args []string, fn parseFunc) (a []string, err error) { diff --git a/vendor/github.com/spf13/pflag/string_to_int.go b/vendor/github.com/spf13/pflag/string_to_int.go new file mode 100644 index 000000000..5ceda3965 --- /dev/null +++ b/vendor/github.com/spf13/pflag/string_to_int.go @@ -0,0 +1,149 @@ +package pflag + +import ( + "bytes" + "fmt" + "strconv" + "strings" +) + +// -- stringToInt Value +type stringToIntValue struct { + value *map[string]int + changed bool +} + +func newStringToIntValue(val map[string]int, p *map[string]int) *stringToIntValue { + ssv := new(stringToIntValue) + ssv.value = p + *ssv.value = val + return ssv +} + +// Format: a=1,b=2 +func (s *stringToIntValue) Set(val string) error { + ss := strings.Split(val, ",") + out := make(map[string]int, len(ss)) + for _, pair := range ss { + kv := strings.SplitN(pair, "=", 2) + if len(kv) != 2 { + return fmt.Errorf("%s must be formatted as key=value", pair) + } + var err error + out[kv[0]], err = strconv.Atoi(kv[1]) + if err != nil { + return err + } + } + if !s.changed { + *s.value = out + } else { + for k, v := range out { + (*s.value)[k] = v + } + } + s.changed = true + return nil +} + +func (s *stringToIntValue) Type() string { + return "stringToInt" +} + +func (s *stringToIntValue) String() string { + var buf bytes.Buffer + i := 0 + for k, v := range *s.value { + if i > 0 { + buf.WriteRune(',') + } + buf.WriteString(k) + buf.WriteRune('=') + buf.WriteString(strconv.Itoa(v)) + i++ + } + return "[" + buf.String() + "]" +} + +func stringToIntConv(val string) (interface{}, error) { + val = strings.Trim(val, "[]") + // An empty string would cause an empty map + if len(val) == 0 { + return map[string]int{}, nil + } + ss := strings.Split(val, ",") + out := make(map[string]int, len(ss)) + for _, pair := range ss { + kv := strings.SplitN(pair, "=", 2) + if len(kv) != 2 { + return nil, fmt.Errorf("%s must be formatted as key=value", pair) + } + var err error + out[kv[0]], err = strconv.Atoi(kv[1]) + if err != nil { + return nil, err + } + } + return out, nil +} + +// GetStringToInt return the map[string]int value of a flag with the given name +func (f *FlagSet) GetStringToInt(name string) (map[string]int, error) { + val, err := f.getFlagType(name, "stringToInt", stringToIntConv) + if err != nil { + return map[string]int{}, err + } + return val.(map[string]int), nil +} + +// StringToIntVar defines a string flag with specified name, default value, and usage string. +// The argument p points to a map[string]int variable in which to store the values of the multiple flags. +// The value of each argument will not try to be separated by comma +func (f *FlagSet) StringToIntVar(p *map[string]int, name string, value map[string]int, usage string) { + f.VarP(newStringToIntValue(value, p), name, "", usage) +} + +// StringToIntVarP is like StringToIntVar, but accepts a shorthand letter that can be used after a single dash. +func (f *FlagSet) StringToIntVarP(p *map[string]int, name, shorthand string, value map[string]int, usage string) { + f.VarP(newStringToIntValue(value, p), name, shorthand, usage) +} + +// StringToIntVar defines a string flag with specified name, default value, and usage string. +// The argument p points to a map[string]int variable in which to store the value of the flag. +// The value of each argument will not try to be separated by comma +func StringToIntVar(p *map[string]int, name string, value map[string]int, usage string) { + CommandLine.VarP(newStringToIntValue(value, p), name, "", usage) +} + +// StringToIntVarP is like StringToIntVar, but accepts a shorthand letter that can be used after a single dash. +func StringToIntVarP(p *map[string]int, name, shorthand string, value map[string]int, usage string) { + CommandLine.VarP(newStringToIntValue(value, p), name, shorthand, usage) +} + +// StringToInt defines a string flag with specified name, default value, and usage string. +// The return value is the address of a map[string]int variable that stores the value of the flag. +// The value of each argument will not try to be separated by comma +func (f *FlagSet) StringToInt(name string, value map[string]int, usage string) *map[string]int { + p := map[string]int{} + f.StringToIntVarP(&p, name, "", value, usage) + return &p +} + +// StringToIntP is like StringToInt, but accepts a shorthand letter that can be used after a single dash. +func (f *FlagSet) StringToIntP(name, shorthand string, value map[string]int, usage string) *map[string]int { + p := map[string]int{} + f.StringToIntVarP(&p, name, shorthand, value, usage) + return &p +} + +// StringToInt defines a string flag with specified name, default value, and usage string. +// The return value is the address of a map[string]int variable that stores the value of the flag. +// The value of each argument will not try to be separated by comma +func StringToInt(name string, value map[string]int, usage string) *map[string]int { + return CommandLine.StringToIntP(name, "", value, usage) +} + +// StringToIntP is like StringToInt, but accepts a shorthand letter that can be used after a single dash. +func StringToIntP(name, shorthand string, value map[string]int, usage string) *map[string]int { + return CommandLine.StringToIntP(name, shorthand, value, usage) +} diff --git a/vendor/github.com/spf13/pflag/string_to_string.go b/vendor/github.com/spf13/pflag/string_to_string.go new file mode 100644 index 000000000..890a01afc --- /dev/null +++ b/vendor/github.com/spf13/pflag/string_to_string.go @@ -0,0 +1,160 @@ +package pflag + +import ( + "bytes" + "encoding/csv" + "fmt" + "strings" +) + +// -- stringToString Value +type stringToStringValue struct { + value *map[string]string + changed bool +} + +func newStringToStringValue(val map[string]string, p *map[string]string) *stringToStringValue { + ssv := new(stringToStringValue) + ssv.value = p + *ssv.value = val + return ssv +} + +// Format: a=1,b=2 +func (s *stringToStringValue) Set(val string) error { + var ss []string + n := strings.Count(val, "=") + switch n { + case 0: + return fmt.Errorf("%s must be formatted as key=value", val) + case 1: + ss = append(ss, strings.Trim(val, `"`)) + default: + r := csv.NewReader(strings.NewReader(val)) + var err error + ss, err = r.Read() + if err != nil { + return err + } + } + + out := make(map[string]string, len(ss)) + for _, pair := range ss { + kv := strings.SplitN(pair, "=", 2) + if len(kv) != 2 { + return fmt.Errorf("%s must be formatted as key=value", pair) + } + out[kv[0]] = kv[1] + } + if !s.changed { + *s.value = out + } else { + for k, v := range out { + (*s.value)[k] = v + } + } + s.changed = true + return nil +} + +func (s *stringToStringValue) Type() string { + return "stringToString" +} + +func (s *stringToStringValue) String() string { + records := make([]string, 0, len(*s.value)>>1) + for k, v := range *s.value { + records = append(records, k+"="+v) + } + + var buf bytes.Buffer + w := csv.NewWriter(&buf) + if err := w.Write(records); err != nil { + panic(err) + } + w.Flush() + return "[" + strings.TrimSpace(buf.String()) + "]" +} + +func stringToStringConv(val string) (interface{}, error) { + val = strings.Trim(val, "[]") + // An empty string would cause an empty map + if len(val) == 0 { + return map[string]string{}, nil + } + r := csv.NewReader(strings.NewReader(val)) + ss, err := r.Read() + if err != nil { + return nil, err + } + out := make(map[string]string, len(ss)) + for _, pair := range ss { + kv := strings.SplitN(pair, "=", 2) + if len(kv) != 2 { + return nil, fmt.Errorf("%s must be formatted as key=value", pair) + } + out[kv[0]] = kv[1] + } + return out, nil +} + +// GetStringToString return the map[string]string value of a flag with the given name +func (f *FlagSet) GetStringToString(name string) (map[string]string, error) { + val, err := f.getFlagType(name, "stringToString", stringToStringConv) + if err != nil { + return map[string]string{}, err + } + return val.(map[string]string), nil +} + +// StringToStringVar defines a string flag with specified name, default value, and usage string. +// The argument p points to a map[string]string variable in which to store the values of the multiple flags. +// The value of each argument will not try to be separated by comma +func (f *FlagSet) StringToStringVar(p *map[string]string, name string, value map[string]string, usage string) { + f.VarP(newStringToStringValue(value, p), name, "", usage) +} + +// StringToStringVarP is like StringToStringVar, but accepts a shorthand letter that can be used after a single dash. +func (f *FlagSet) StringToStringVarP(p *map[string]string, name, shorthand string, value map[string]string, usage string) { + f.VarP(newStringToStringValue(value, p), name, shorthand, usage) +} + +// StringToStringVar defines a string flag with specified name, default value, and usage string. +// The argument p points to a map[string]string variable in which to store the value of the flag. +// The value of each argument will not try to be separated by comma +func StringToStringVar(p *map[string]string, name string, value map[string]string, usage string) { + CommandLine.VarP(newStringToStringValue(value, p), name, "", usage) +} + +// StringToStringVarP is like StringToStringVar, but accepts a shorthand letter that can be used after a single dash. +func StringToStringVarP(p *map[string]string, name, shorthand string, value map[string]string, usage string) { + CommandLine.VarP(newStringToStringValue(value, p), name, shorthand, usage) +} + +// StringToString defines a string flag with specified name, default value, and usage string. +// The return value is the address of a map[string]string variable that stores the value of the flag. +// The value of each argument will not try to be separated by comma +func (f *FlagSet) StringToString(name string, value map[string]string, usage string) *map[string]string { + p := map[string]string{} + f.StringToStringVarP(&p, name, "", value, usage) + return &p +} + +// StringToStringP is like StringToString, but accepts a shorthand letter that can be used after a single dash. +func (f *FlagSet) StringToStringP(name, shorthand string, value map[string]string, usage string) *map[string]string { + p := map[string]string{} + f.StringToStringVarP(&p, name, shorthand, value, usage) + return &p +} + +// StringToString defines a string flag with specified name, default value, and usage string. +// The return value is the address of a map[string]string variable that stores the value of the flag. +// The value of each argument will not try to be separated by comma +func StringToString(name string, value map[string]string, usage string) *map[string]string { + return CommandLine.StringToStringP(name, "", value, usage) +} + +// StringToStringP is like StringToString, but accepts a shorthand letter that can be used after a single dash. +func StringToStringP(name, shorthand string, value map[string]string, usage string) *map[string]string { + return CommandLine.StringToStringP(name, shorthand, value, usage) +} diff --git a/vendor/go.opencensus.io/.gitignore b/vendor/go.opencensus.io/.gitignore new file mode 100644 index 000000000..74a6db472 --- /dev/null +++ b/vendor/go.opencensus.io/.gitignore @@ -0,0 +1,9 @@ +/.idea/ + +# go.opencensus.io/exporter/aws +/exporter/aws/ + +# Exclude vendor, use dep ensure after checkout: +/vendor/github.com/ +/vendor/golang.org/ +/vendor/google.golang.org/ diff --git a/vendor/go.opencensus.io/.travis.yml b/vendor/go.opencensus.io/.travis.yml new file mode 100644 index 000000000..73c8571c3 --- /dev/null +++ b/vendor/go.opencensus.io/.travis.yml @@ -0,0 +1,27 @@ +language: go + +go: + # 1.8 is tested by AppVeyor + - 1.11.x + +go_import_path: go.opencensus.io + +# Don't email me the results of the test runs. +notifications: + email: false + +before_script: + - GO_FILES=$(find . -iname '*.go' | grep -v /vendor/) # All the .go files, excluding vendor/ if any + - PKGS=$(go list ./... | grep -v /vendor/) # All the import paths, excluding vendor/ if any + - curl https://raw.githubusercontent.com/golang/dep/master/install.sh | sh # Install latest dep release + - go get github.com/rakyll/embedmd + +script: + - embedmd -d README.md # Ensure embedded code is up-to-date + - go build ./... # Ensure dependency updates don't break build + - if [ -n "$(gofmt -s -l $GO_FILES)" ]; then echo "gofmt the following files:"; gofmt -s -l $GO_FILES; exit 1; fi + - go vet ./... + - go test -v -race $PKGS # Run all the tests with the race detector enabled + - GOARCH=386 go test -v $PKGS # Run all tests against a 386 architecture + - 'if [[ $TRAVIS_GO_VERSION = 1.8* ]]; then ! golint ./... | grep -vE "(_mock|_string|\.pb)\.go:"; fi' + - go run internal/check/version.go diff --git a/vendor/go.opencensus.io/AUTHORS b/vendor/go.opencensus.io/AUTHORS new file mode 100644 index 000000000..e491a9e7f --- /dev/null +++ b/vendor/go.opencensus.io/AUTHORS @@ -0,0 +1 @@ +Google Inc. diff --git a/vendor/go.opencensus.io/CONTRIBUTING.md b/vendor/go.opencensus.io/CONTRIBUTING.md new file mode 100644 index 000000000..3f3aed396 --- /dev/null +++ b/vendor/go.opencensus.io/CONTRIBUTING.md @@ -0,0 +1,56 @@ +# How to contribute + +We'd love to accept your patches and contributions to this project. There are +just a few small guidelines you need to follow. + +## Contributor License Agreement + +Contributions to this project must be accompanied by a Contributor License +Agreement. You (or your employer) retain the copyright to your contribution, +this simply gives us permission to use and redistribute your contributions as +part of the project. Head over to to see +your current agreements on file or to sign a new one. + +You generally only need to submit a CLA once, so if you've already submitted one +(even if it was for a different project), you probably don't need to do it +again. + +## Code reviews + +All submissions, including submissions by project members, require review. We +use GitHub pull requests for this purpose. Consult [GitHub Help] for more +information on using pull requests. + +[GitHub Help]: https://help.github.com/articles/about-pull-requests/ + +## Instructions + +Fork the repo, checkout the upstream repo to your GOPATH by: + +``` +$ go get -d go.opencensus.io +``` + +Add your fork as an origin: + +``` +cd $(go env GOPATH)/src/go.opencensus.io +git remote add fork git@github.com:YOUR_GITHUB_USERNAME/opencensus-go.git +``` + +Run tests: + +``` +$ go test ./... +``` + +Checkout a new branch, make modifications and push the branch to your fork: + +``` +$ git checkout -b feature +# edit files +$ git commit +$ git push fork feature +``` + +Open a pull request against the main opencensus-go repo. diff --git a/vendor/go.opencensus.io/Gopkg.lock b/vendor/go.opencensus.io/Gopkg.lock new file mode 100644 index 000000000..3be12ac8f --- /dev/null +++ b/vendor/go.opencensus.io/Gopkg.lock @@ -0,0 +1,231 @@ +# This file is autogenerated, do not edit; changes may be undone by the next 'dep ensure'. + + +[[projects]] + branch = "master" + digest = "1:eee9386329f4fcdf8d6c0def0c9771b634bdd5ba460d888aa98c17d59b37a76c" + name = "git.apache.org/thrift.git" + packages = ["lib/go/thrift"] + pruneopts = "UT" + revision = "6e67faa92827ece022380b211c2caaadd6145bf5" + source = "github.com/apache/thrift" + +[[projects]] + branch = "master" + digest = "1:d6afaeed1502aa28e80a4ed0981d570ad91b2579193404256ce672ed0a609e0d" + name = "github.com/beorn7/perks" + packages = ["quantile"] + pruneopts = "UT" + revision = "3a771d992973f24aa725d07868b467d1ddfceafb" + +[[projects]] + digest = "1:4c0989ca0bcd10799064318923b9bc2db6b4d6338dd75f3f2d86c3511aaaf5cf" + name = "github.com/golang/protobuf" + packages = [ + "proto", + "ptypes", + "ptypes/any", + "ptypes/duration", + "ptypes/timestamp", + ] + pruneopts = "UT" + revision = "aa810b61a9c79d51363740d207bb46cf8e620ed5" + version = "v1.2.0" + +[[projects]] + digest = "1:ff5ebae34cfbf047d505ee150de27e60570e8c394b3b8fdbb720ff6ac71985fc" + name = "github.com/matttproud/golang_protobuf_extensions" + packages = ["pbutil"] + pruneopts = "UT" + revision = "c12348ce28de40eed0136aa2b644d0ee0650e56c" + version = "v1.0.1" + +[[projects]] + digest = "1:824c8f3aa4c5f23928fa84ebbd5ed2e9443b3f0cb958a40c1f2fbed5cf5e64b1" + name = "github.com/openzipkin/zipkin-go" + packages = [ + ".", + "idgenerator", + "model", + "propagation", + "reporter", + "reporter/http", + ] + pruneopts = "UT" + revision = "d455a5674050831c1e187644faa4046d653433c2" + version = "v0.1.1" + +[[projects]] + digest = "1:d14a5f4bfecf017cb780bdde1b6483e5deb87e12c332544d2c430eda58734bcb" + name = "github.com/prometheus/client_golang" + packages = [ + "prometheus", + "prometheus/promhttp", + ] + pruneopts = "UT" + revision = "c5b7fccd204277076155f10851dad72b76a49317" + version = "v0.8.0" + +[[projects]] + branch = "master" + digest = "1:2d5cd61daa5565187e1d96bae64dbbc6080dacf741448e9629c64fd93203b0d4" + name = "github.com/prometheus/client_model" + packages = ["go"] + pruneopts = "UT" + revision = "5c3871d89910bfb32f5fcab2aa4b9ec68e65a99f" + +[[projects]] + branch = "master" + digest = "1:63b68062b8968092eb86bedc4e68894bd096ea6b24920faca8b9dcf451f54bb5" + name = "github.com/prometheus/common" + packages = [ + "expfmt", + "internal/bitbucket.org/ww/goautoneg", + "model", + ] + pruneopts = "UT" + revision = "c7de2306084e37d54b8be01f3541a8464345e9a5" + +[[projects]] + branch = "master" + digest = "1:8c49953a1414305f2ff5465147ee576dd705487c35b15918fcd4efdc0cb7a290" + name = "github.com/prometheus/procfs" + packages = [ + ".", + "internal/util", + "nfs", + "xfs", + ] + pruneopts = "UT" + revision = "05ee40e3a273f7245e8777337fc7b46e533a9a92" + +[[projects]] + branch = "master" + digest = "1:deafe4ab271911fec7de5b693d7faae3f38796d9eb8622e2b9e7df42bb3dfea9" + name = "golang.org/x/net" + packages = [ + "context", + "http/httpguts", + "http2", + "http2/hpack", + "idna", + "internal/timeseries", + "trace", + ] + pruneopts = "UT" + revision = "922f4815f713f213882e8ef45e0d315b164d705c" + +[[projects]] + branch = "master" + digest = "1:e0140c0c868c6e0f01c0380865194592c011fe521d6e12d78bfd33e756fe018a" + name = "golang.org/x/sync" + packages = ["semaphore"] + pruneopts = "UT" + revision = "1d60e4601c6fd243af51cc01ddf169918a5407ca" + +[[projects]] + branch = "master" + digest = "1:a3f00ac457c955fe86a41e1495e8f4c54cb5399d609374c5cc26aa7d72e542c8" + name = "golang.org/x/sys" + packages = ["unix"] + pruneopts = "UT" + revision = "3b58ed4ad3395d483fc92d5d14123ce2c3581fec" + +[[projects]] + digest = "1:a2ab62866c75542dd18d2b069fec854577a20211d7c0ea6ae746072a1dccdd18" + name = "golang.org/x/text" + packages = [ + "collate", + "collate/build", + "internal/colltab", + "internal/gen", + "internal/tag", + "internal/triegen", + "internal/ucd", + "language", + "secure/bidirule", + "transform", + "unicode/bidi", + "unicode/cldr", + "unicode/norm", + "unicode/rangetable", + ] + pruneopts = "UT" + revision = "f21a4dfb5e38f5895301dc265a8def02365cc3d0" + version = "v0.3.0" + +[[projects]] + branch = "master" + digest = "1:c0c17c94fe8bc1ab34e7f586a4a8b788c5e1f4f9f750ff23395b8b2f5a523530" + name = "google.golang.org/api" + packages = ["support/bundler"] + pruneopts = "UT" + revision = "e21acd801f91da814261b938941d193bb036441a" + +[[projects]] + branch = "master" + digest = "1:077c1c599507b3b3e9156d17d36e1e61928ee9b53a5b420f10f28ebd4a0b275c" + name = "google.golang.org/genproto" + packages = ["googleapis/rpc/status"] + pruneopts = "UT" + revision = "c66870c02cf823ceb633bcd05be3c7cda29976f4" + +[[projects]] + digest = "1:3dd7996ce6bf52dec6a2f69fa43e7c4cefea1d4dfa3c8ab7a5f8a9f7434e239d" + name = "google.golang.org/grpc" + packages = [ + ".", + "balancer", + "balancer/base", + "balancer/roundrobin", + "codes", + "connectivity", + "credentials", + "encoding", + "encoding/proto", + "grpclog", + "internal", + "internal/backoff", + "internal/channelz", + "internal/envconfig", + "internal/grpcrand", + "internal/transport", + "keepalive", + "metadata", + "naming", + "peer", + "resolver", + "resolver/dns", + "resolver/passthrough", + "stats", + "status", + "tap", + ] + pruneopts = "UT" + revision = "32fb0ac620c32ba40a4626ddf94d90d12cce3455" + version = "v1.14.0" + +[solve-meta] + analyzer-name = "dep" + analyzer-version = 1 + input-imports = [ + "git.apache.org/thrift.git/lib/go/thrift", + "github.com/golang/protobuf/proto", + "github.com/openzipkin/zipkin-go", + "github.com/openzipkin/zipkin-go/model", + "github.com/openzipkin/zipkin-go/reporter", + "github.com/openzipkin/zipkin-go/reporter/http", + "github.com/prometheus/client_golang/prometheus", + "github.com/prometheus/client_golang/prometheus/promhttp", + "golang.org/x/net/context", + "golang.org/x/net/http2", + "google.golang.org/api/support/bundler", + "google.golang.org/grpc", + "google.golang.org/grpc/codes", + "google.golang.org/grpc/grpclog", + "google.golang.org/grpc/metadata", + "google.golang.org/grpc/stats", + "google.golang.org/grpc/status", + ] + solver-name = "gps-cdcl" + solver-version = 1 diff --git a/vendor/go.opencensus.io/Gopkg.toml b/vendor/go.opencensus.io/Gopkg.toml new file mode 100644 index 000000000..a9f3cd68e --- /dev/null +++ b/vendor/go.opencensus.io/Gopkg.toml @@ -0,0 +1,36 @@ +# For v0.x.y dependencies, prefer adding a constraints of the form: version=">= 0.x.y" +# to avoid locking to a particular minor version which can cause dep to not be +# able to find a satisfying dependency graph. + +[[constraint]] + branch = "master" + name = "git.apache.org/thrift.git" + source = "github.com/apache/thrift" + +[[constraint]] + name = "github.com/golang/protobuf" + version = "1.0.0" + +[[constraint]] + name = "github.com/openzipkin/zipkin-go" + version = ">=0.1.0" + +[[constraint]] + name = "github.com/prometheus/client_golang" + version = ">=0.8.0" + +[[constraint]] + branch = "master" + name = "golang.org/x/net" + +[[constraint]] + branch = "master" + name = "google.golang.org/api" + +[[constraint]] + name = "google.golang.org/grpc" + version = "1.11.3" + +[prune] + go-tests = true + unused-packages = true diff --git a/vendor/go.opencensus.io/LICENSE b/vendor/go.opencensus.io/LICENSE new file mode 100644 index 000000000..7a4a3ea24 --- /dev/null +++ b/vendor/go.opencensus.io/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. \ No newline at end of file diff --git a/vendor/go.opencensus.io/README.md b/vendor/go.opencensus.io/README.md new file mode 100644 index 000000000..97d66983d --- /dev/null +++ b/vendor/go.opencensus.io/README.md @@ -0,0 +1,263 @@ +# OpenCensus Libraries for Go + +[![Build Status][travis-image]][travis-url] +[![Windows Build Status][appveyor-image]][appveyor-url] +[![GoDoc][godoc-image]][godoc-url] +[![Gitter chat][gitter-image]][gitter-url] + +OpenCensus Go is a Go implementation of OpenCensus, a toolkit for +collecting application performance and behavior monitoring data. +Currently it consists of three major components: tags, stats and tracing. + +## Installation + +``` +$ go get -u go.opencensus.io +``` + +The API of this project is still evolving, see: [Deprecation Policy](#deprecation-policy). +The use of vendoring or a dependency management tool is recommended. + +## Prerequisites + +OpenCensus Go libraries require Go 1.8 or later. + +## Getting Started + +The easiest way to get started using OpenCensus in your application is to use an existing +integration with your RPC framework: + +* [net/http](https://godoc.org/go.opencensus.io/plugin/ochttp) +* [gRPC](https://godoc.org/go.opencensus.io/plugin/ocgrpc) +* [database/sql](https://godoc.org/github.com/basvanbeek/ocsql) +* [Go kit](https://godoc.org/github.com/go-kit/kit/tracing/opencensus) +* [Groupcache](https://godoc.org/github.com/orijtech/groupcache) +* [Caddy webserver](https://godoc.org/github.com/orijtech/caddy) +* [MongoDB](https://godoc.org/github.com/orijtech/mongo-go-driver) +* [Redis gomodule/redigo](https://godoc.org/github.com/orijtech/redigo) +* [Redis goredis/redis](https://godoc.org/github.com/orijtech/redis) +* [Memcache](https://godoc.org/github.com/orijtech/gomemcache) + +If you're using a framework not listed here, you could either implement your own middleware for your +framework or use [custom stats](#stats) and [spans](#spans) directly in your application. + +## Exporters + +OpenCensus can export instrumentation data to various backends. +OpenCensus has exporter implementations for the following, users +can implement their own exporters by implementing the exporter interfaces +([stats](https://godoc.org/go.opencensus.io/stats/view#Exporter), +[trace](https://godoc.org/go.opencensus.io/trace#Exporter)): + +* [Prometheus][exporter-prom] for stats +* [OpenZipkin][exporter-zipkin] for traces +* [Stackdriver][exporter-stackdriver] Monitoring for stats and Trace for traces +* [Jaeger][exporter-jaeger] for traces +* [AWS X-Ray][exporter-xray] for traces +* [Datadog][exporter-datadog] for stats and traces +* [Graphite][exporter-graphite] for stats +* [Honeycomb][exporter-honeycomb] for traces + +## Overview + +![OpenCensus Overview](https://i.imgur.com/cf4ElHE.jpg) + +In a microservices environment, a user request may go through +multiple services until there is a response. OpenCensus allows +you to instrument your services and collect diagnostics data all +through your services end-to-end. + +## Tags + +Tags represent propagated key-value pairs. They are propagated using `context.Context` +in the same process or can be encoded to be transmitted on the wire. Usually, this will +be handled by an integration plugin, e.g. `ocgrpc.ServerHandler` and `ocgrpc.ClientHandler` +for gRPC. + +Package `tag` allows adding or modifying tags in the current context. + +[embedmd]:# (internal/readme/tags.go new) +```go +ctx, err = tag.New(ctx, + tag.Insert(osKey, "macOS-10.12.5"), + tag.Upsert(userIDKey, "cde36753ed"), +) +if err != nil { + log.Fatal(err) +} +``` + +## Stats + +OpenCensus is a low-overhead framework even if instrumentation is always enabled. +In order to be so, it is optimized to make recording of data points fast +and separate from the data aggregation. + +OpenCensus stats collection happens in two stages: + +* Definition of measures and recording of data points +* Definition of views and aggregation of the recorded data + +### Recording + +Measurements are data points associated with a measure. +Recording implicitly tags the set of Measurements with the tags from the +provided context: + +[embedmd]:# (internal/readme/stats.go record) +```go +stats.Record(ctx, videoSize.M(102478)) +``` + +### Views + +Views are how Measures are aggregated. You can think of them as queries over the +set of recorded data points (measurements). + +Views have two parts: the tags to group by and the aggregation type used. + +Currently three types of aggregations are supported: +* CountAggregation is used to count the number of times a sample was recorded. +* DistributionAggregation is used to provide a histogram of the values of the samples. +* SumAggregation is used to sum up all sample values. + +[embedmd]:# (internal/readme/stats.go aggs) +```go +distAgg := view.Distribution(0, 1<<32, 2<<32, 3<<32) +countAgg := view.Count() +sumAgg := view.Sum() +``` + +Here we create a view with the DistributionAggregation over our measure. + +[embedmd]:# (internal/readme/stats.go view) +```go +if err := view.Register(&view.View{ + Name: "example.com/video_size_distribution", + Description: "distribution of processed video size over time", + Measure: videoSize, + Aggregation: view.Distribution(0, 1<<32, 2<<32, 3<<32), +}); err != nil { + log.Fatalf("Failed to register view: %v", err) +} +``` + +Register begins collecting data for the view. Registered views' data will be +exported via the registered exporters. + +## Traces + +A distributed trace tracks the progression of a single user request as +it is handled by the services and processes that make up an application. +Each step is called a span in the trace. Spans include metadata about the step, +including especially the time spent in the step, called the span’s latency. + +Below you see a trace and several spans underneath it. + +![Traces and spans](https://i.imgur.com/7hZwRVj.png) + +### Spans + +Span is the unit step in a trace. Each span has a name, latency, status and +additional metadata. + +Below we are starting a span for a cache read and ending it +when we are done: + +[embedmd]:# (internal/readme/trace.go startend) +```go +ctx, span := trace.StartSpan(ctx, "cache.Get") +defer span.End() + +// Do work to get from cache. +``` + +### Propagation + +Spans can have parents or can be root spans if they don't have any parents. +The current span is propagated in-process and across the network to allow associating +new child spans with the parent. + +In the same process, `context.Context` is used to propagate spans. +`trace.StartSpan` creates a new span as a root if the current context +doesn't contain a span. Or, it creates a child of the span that is +already in current context. The returned context can be used to keep +propagating the newly created span in the current context. + +[embedmd]:# (internal/readme/trace.go startend) +```go +ctx, span := trace.StartSpan(ctx, "cache.Get") +defer span.End() + +// Do work to get from cache. +``` + +Across the network, OpenCensus provides different propagation +methods for different protocols. + +* gRPC integrations use the OpenCensus' [binary propagation format](https://godoc.org/go.opencensus.io/trace/propagation). +* HTTP integrations use Zipkin's [B3](https://github.com/openzipkin/b3-propagation) + by default but can be configured to use a custom propagation method by setting another + [propagation.HTTPFormat](https://godoc.org/go.opencensus.io/trace/propagation#HTTPFormat). + +## Execution Tracer + +With Go 1.11, OpenCensus Go will support integration with the Go execution tracer. +See [Debugging Latency in Go](https://medium.com/observability/debugging-latency-in-go-1-11-9f97a7910d68) +for an example of their mutual use. + +## Profiles + +OpenCensus tags can be applied as profiler labels +for users who are on Go 1.9 and above. + +[embedmd]:# (internal/readme/tags.go profiler) +```go +ctx, err = tag.New(ctx, + tag.Insert(osKey, "macOS-10.12.5"), + tag.Insert(userIDKey, "fff0989878"), +) +if err != nil { + log.Fatal(err) +} +tag.Do(ctx, func(ctx context.Context) { + // Do work. + // When profiling is on, samples will be + // recorded with the key/values from the tag map. +}) +``` + +A screenshot of the CPU profile from the program above: + +![CPU profile](https://i.imgur.com/jBKjlkw.png) + +## Deprecation Policy + +Before version 1.0.0, the following deprecation policy will be observed: + +No backwards-incompatible changes will be made except for the removal of symbols that have +been marked as *Deprecated* for at least one minor release (e.g. 0.9.0 to 0.10.0). A release +removing the *Deprecated* functionality will be made no sooner than 28 days after the first +release in which the functionality was marked *Deprecated*. + +[travis-image]: https://travis-ci.org/census-instrumentation/opencensus-go.svg?branch=master +[travis-url]: https://travis-ci.org/census-instrumentation/opencensus-go +[appveyor-image]: https://ci.appveyor.com/api/projects/status/vgtt29ps1783ig38?svg=true +[appveyor-url]: https://ci.appveyor.com/project/opencensusgoteam/opencensus-go/branch/master +[godoc-image]: https://godoc.org/go.opencensus.io?status.svg +[godoc-url]: https://godoc.org/go.opencensus.io +[gitter-image]: https://badges.gitter.im/census-instrumentation/lobby.svg +[gitter-url]: https://gitter.im/census-instrumentation/lobby?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge + + +[new-ex]: https://godoc.org/go.opencensus.io/tag#example-NewMap +[new-replace-ex]: https://godoc.org/go.opencensus.io/tag#example-NewMap--Replace + +[exporter-prom]: https://godoc.org/go.opencensus.io/exporter/prometheus +[exporter-stackdriver]: https://godoc.org/contrib.go.opencensus.io/exporter/stackdriver +[exporter-zipkin]: https://godoc.org/go.opencensus.io/exporter/zipkin +[exporter-jaeger]: https://godoc.org/go.opencensus.io/exporter/jaeger +[exporter-xray]: https://github.com/census-ecosystem/opencensus-go-exporter-aws +[exporter-datadog]: https://github.com/DataDog/opencensus-go-exporter-datadog +[exporter-graphite]: https://github.com/census-ecosystem/opencensus-go-exporter-graphite +[exporter-honeycomb]: https://github.com/honeycombio/opencensus-exporter diff --git a/vendor/go.opencensus.io/appveyor.yml b/vendor/go.opencensus.io/appveyor.yml new file mode 100644 index 000000000..98057888a --- /dev/null +++ b/vendor/go.opencensus.io/appveyor.yml @@ -0,0 +1,24 @@ +version: "{build}" + +platform: x64 + +clone_folder: c:\gopath\src\go.opencensus.io + +environment: + GOPATH: 'c:\gopath' + GOVERSION: '1.11' + GO111MODULE: 'on' + CGO_ENABLED: '0' # See: https://github.com/appveyor/ci/issues/2613 + +install: + - set PATH=%GOPATH%\bin;c:\go\bin;%PATH% + - go version + - go env + +build: false +deploy: false + +test_script: + - cd %APPVEYOR_BUILD_FOLDER% + - go build -v .\... + - go test -v .\... # No -race because cgo is disabled diff --git a/vendor/go.opencensus.io/exemplar/exemplar.go b/vendor/go.opencensus.io/exemplar/exemplar.go new file mode 100644 index 000000000..e676df837 --- /dev/null +++ b/vendor/go.opencensus.io/exemplar/exemplar.go @@ -0,0 +1,78 @@ +// Copyright 2018, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package exemplar implements support for exemplars. Exemplars are additional +// data associated with each measurement. +// +// Their purpose it to provide an example of the kind of thing +// (request, RPC, trace span, etc.) that resulted in that measurement. +package exemplar + +import ( + "context" + "time" +) + +const ( + KeyTraceID = "trace_id" + KeySpanID = "span_id" + KeyPrefixTag = "tag:" +) + +// Exemplar is an example data point associated with each bucket of a +// distribution type aggregation. +type Exemplar struct { + Value float64 // the value that was recorded + Timestamp time.Time // the time the value was recorded + Attachments Attachments // attachments (if any) +} + +// Attachments is a map of extra values associated with a recorded data point. +// The map should only be mutated from AttachmentExtractor functions. +type Attachments map[string]string + +// AttachmentExtractor is a function capable of extracting exemplar attachments +// from the context used to record measurements. +// The map passed to the function should be mutated and returned. It will +// initially be nil: the first AttachmentExtractor that would like to add keys to the +// map is responsible for initializing it. +type AttachmentExtractor func(ctx context.Context, a Attachments) Attachments + +var extractors []AttachmentExtractor + +// RegisterAttachmentExtractor registers the given extractor associated with the exemplar +// type name. +// +// Extractors will be used to attempt to extract exemplars from the context +// associated with each recorded measurement. +// +// Packages that support exemplars should register their extractor functions on +// initialization. +// +// RegisterAttachmentExtractor should not be called after any measurements have +// been recorded. +func RegisterAttachmentExtractor(e AttachmentExtractor) { + extractors = append(extractors, e) +} + +// NewFromContext extracts exemplars from the given context. +// Each registered AttachmentExtractor (see RegisterAttachmentExtractor) is called in an +// unspecified order to add attachments to the exemplar. +func AttachmentsFromContext(ctx context.Context) Attachments { + var a Attachments + for _, extractor := range extractors { + a = extractor(ctx, a) + } + return a +} diff --git a/vendor/go.opencensus.io/go.mod b/vendor/go.opencensus.io/go.mod new file mode 100644 index 000000000..1236f4c2f --- /dev/null +++ b/vendor/go.opencensus.io/go.mod @@ -0,0 +1,25 @@ +module go.opencensus.io + +require ( + git.apache.org/thrift.git v0.0.0-20180902110319-2566ecd5d999 + github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973 + github.com/ghodss/yaml v1.0.0 // indirect + github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b // indirect + github.com/golang/protobuf v1.2.0 + github.com/google/go-cmp v0.2.0 + github.com/grpc-ecosystem/grpc-gateway v1.5.0 // indirect + github.com/matttproud/golang_protobuf_extensions v1.0.1 + github.com/openzipkin/zipkin-go v0.1.1 + github.com/prometheus/client_golang v0.8.0 + github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910 + github.com/prometheus/common v0.0.0-20180801064454-c7de2306084e + github.com/prometheus/procfs v0.0.0-20180725123919-05ee40e3a273 + golang.org/x/net v0.0.0-20180906233101-161cd47e91fd + golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f + golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e + golang.org/x/text v0.3.0 + google.golang.org/api v0.0.0-20180910000450-7ca32eb868bf + google.golang.org/genproto v0.0.0-20180831171423-11092d34479b + google.golang.org/grpc v1.14.0 + gopkg.in/yaml.v2 v2.2.1 // indirect +) diff --git a/vendor/go.opencensus.io/go.sum b/vendor/go.opencensus.io/go.sum new file mode 100644 index 000000000..3e0bab884 --- /dev/null +++ b/vendor/go.opencensus.io/go.sum @@ -0,0 +1,48 @@ +git.apache.org/thrift.git v0.0.0-20180807212849-6e67faa92827/go.mod h1:fPE2ZNJGynbRyZ4dJvy6G277gSllfV2HJqblrnkyeyg= +git.apache.org/thrift.git v0.0.0-20180902110319-2566ecd5d999 h1:sihTnRgTOUSCQz0iS0pjZuFQy/z7GXCJgSBg3+rZKHw= +git.apache.org/thrift.git v0.0.0-20180902110319-2566ecd5d999/go.mod h1:fPE2ZNJGynbRyZ4dJvy6G277gSllfV2HJqblrnkyeyg= +github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973 h1:xJ4a3vCFaGF/jqvzLMYoU8P317H5OQ+Via4RmuPwCS0= +github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= +github.com/ghodss/yaml v1.0.0 h1:wQHKEahhL6wmXdzwWG11gIVCkOv05bNOh+Rxn0yngAk= +github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= +github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b h1:VKtxabqXZkF25pY9ekfRL6a582T4P37/31XEstQ5p58= +github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= +github.com/golang/protobuf v1.2.0 h1:P3YflyNX/ehuJFLhxviNdFxQPkGK5cDcApsge1SqnvM= +github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/google/go-cmp v0.2.0 h1:+dTQ8DZQJz0Mb/HjFlkptS1FeQ4cWSnN941F8aEG4SQ= +github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= +github.com/grpc-ecosystem/grpc-gateway v1.5.0 h1:WcmKMm43DR7RdtlkEXQJyo5ws8iTp98CyhCCbOHMvNI= +github.com/grpc-ecosystem/grpc-gateway v1.5.0/go.mod h1:RSKVYQBd5MCa4OVpNdGskqpgL2+G+NZTnrVHpWWfpdw= +github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU= +github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= +github.com/openzipkin/zipkin-go v0.1.1 h1:A/ADD6HaPnAKj3yS7HjGHRK77qi41Hi0DirOOIQAeIw= +github.com/openzipkin/zipkin-go v0.1.1/go.mod h1:NtoC/o8u3JlF1lSlyPNswIbeQH9bJTmOf0Erfk+hxe8= +github.com/prometheus/client_golang v0.8.0 h1:1921Yw9Gc3iSc4VQh3PIoOqgPCZS7G/4xQNVUp8Mda8= +github.com/prometheus/client_golang v0.8.0/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= +github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910 h1:idejC8f05m9MGOsuEi1ATq9shN03HrxNkD/luQvxCv8= +github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= +github.com/prometheus/common v0.0.0-20180801064454-c7de2306084e h1:n/3MEhJQjQxrOUCzh1Y3Re6aJUUWRp2M9+Oc3eVn/54= +github.com/prometheus/common v0.0.0-20180801064454-c7de2306084e/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= +github.com/prometheus/procfs v0.0.0-20180725123919-05ee40e3a273 h1:agujYaXJSxSo18YNX3jzl+4G6Bstwt+kqv47GS12uL0= +github.com/prometheus/procfs v0.0.0-20180725123919-05ee40e3a273/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= +golang.org/x/net v0.0.0-20180821023952-922f4815f713/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180906233101-161cd47e91fd h1:nTDtHvHSdCn1m6ITfMRqtOd/9+7a3s8RBNOZ3eYZzJA= +golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f h1:wMNYb4v58l5UBM7MYRLPG6ZhfOqbKu7X5eyFl8ZhKvA= +golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sys v0.0.0-20180821140842-3b58ed4ad339/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e h1:o3PsSEY8E4eXWkXrIP9YJALUkVZqzHJT5DOasTyn8Vs= +golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +google.golang.org/api v0.0.0-20180818000503-e21acd801f91/go.mod h1:4mhQ8q/RsB7i+udVvVy5NUi08OU8ZlA0gRVgrF7VFY0= +google.golang.org/api v0.0.0-20180910000450-7ca32eb868bf h1:rjxqQmxjyqerRKEj+tZW+MCm4LgpFXu18bsEoCMgDsk= +google.golang.org/api v0.0.0-20180910000450-7ca32eb868bf/go.mod h1:4mhQ8q/RsB7i+udVvVy5NUi08OU8ZlA0gRVgrF7VFY0= +google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/genproto v0.0.0-20180831171423-11092d34479b h1:lohp5blsw53GBXtLyLNaTXPXS9pJ1tiTw61ZHUoE9Qw= +google.golang.org/genproto v0.0.0-20180831171423-11092d34479b/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/grpc v1.14.0 h1:ArxJuB1NWfPY6r9Gp9gqwplT0Ge7nqv9msgu03lHLmo= +google.golang.org/grpc v1.14.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/yaml.v2 v2.2.1 h1:mUhvW9EsL+naU5Q3cakzfE91YhliOondGd6ZrsDBHQE= +gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= diff --git a/vendor/go.opencensus.io/internal/internal.go b/vendor/go.opencensus.io/internal/internal.go new file mode 100644 index 000000000..e1d1238d0 --- /dev/null +++ b/vendor/go.opencensus.io/internal/internal.go @@ -0,0 +1,37 @@ +// Copyright 2017, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package internal // import "go.opencensus.io/internal" + +import ( + "fmt" + "time" + + "go.opencensus.io" +) + +// UserAgent is the user agent to be added to the outgoing +// requests from the exporters. +var UserAgent = fmt.Sprintf("opencensus-go [%s]", opencensus.Version()) + +// MonotonicEndTime returns the end time at present +// but offset from start, monotonically. +// +// The monotonic clock is used in subtractions hence +// the duration since start added back to start gives +// end as a monotonic time. +// See https://golang.org/pkg/time/#hdr-Monotonic_Clocks +func MonotonicEndTime(start time.Time) time.Time { + return start.Add(time.Now().Sub(start)) +} diff --git a/vendor/go.opencensus.io/internal/sanitize.go b/vendor/go.opencensus.io/internal/sanitize.go new file mode 100644 index 000000000..de8ccf236 --- /dev/null +++ b/vendor/go.opencensus.io/internal/sanitize.go @@ -0,0 +1,50 @@ +// Copyright 2017, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package internal + +import ( + "strings" + "unicode" +) + +const labelKeySizeLimit = 100 + +// Sanitize returns a string that is trunacated to 100 characters if it's too +// long, and replaces non-alphanumeric characters to underscores. +func Sanitize(s string) string { + if len(s) == 0 { + return s + } + if len(s) > labelKeySizeLimit { + s = s[:labelKeySizeLimit] + } + s = strings.Map(sanitizeRune, s) + if unicode.IsDigit(rune(s[0])) { + s = "key_" + s + } + if s[0] == '_' { + s = "key" + s + } + return s +} + +// converts anything that is not a letter or digit to an underscore +func sanitizeRune(r rune) rune { + if unicode.IsLetter(r) || unicode.IsDigit(r) { + return r + } + // Everything else turns into an underscore + return '_' +} diff --git a/vendor/go.opencensus.io/internal/tagencoding/tagencoding.go b/vendor/go.opencensus.io/internal/tagencoding/tagencoding.go new file mode 100644 index 000000000..3b1af8b4b --- /dev/null +++ b/vendor/go.opencensus.io/internal/tagencoding/tagencoding.go @@ -0,0 +1,72 @@ +// Copyright 2017, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +// Package tagencoding contains the tag encoding +// used interally by the stats collector. +package tagencoding // import "go.opencensus.io/internal/tagencoding" + +type Values struct { + Buffer []byte + WriteIndex int + ReadIndex int +} + +func (vb *Values) growIfRequired(expected int) { + if len(vb.Buffer)-vb.WriteIndex < expected { + tmp := make([]byte, 2*(len(vb.Buffer)+1)+expected) + copy(tmp, vb.Buffer) + vb.Buffer = tmp + } +} + +func (vb *Values) WriteValue(v []byte) { + length := len(v) & 0xff + vb.growIfRequired(1 + length) + + // writing length of v + vb.Buffer[vb.WriteIndex] = byte(length) + vb.WriteIndex++ + + if length == 0 { + // No value was encoded for this key + return + } + + // writing v + copy(vb.Buffer[vb.WriteIndex:], v[:length]) + vb.WriteIndex += length +} + +// ReadValue is the helper method to read the values when decoding valuesBytes to a map[Key][]byte. +func (vb *Values) ReadValue() []byte { + // read length of v + length := int(vb.Buffer[vb.ReadIndex]) + vb.ReadIndex++ + if length == 0 { + // No value was encoded for this key + return nil + } + + // read value of v + v := make([]byte, length) + endIdx := vb.ReadIndex + length + copy(v, vb.Buffer[vb.ReadIndex:endIdx]) + vb.ReadIndex = endIdx + return v +} + +func (vb *Values) Bytes() []byte { + return vb.Buffer[:vb.WriteIndex] +} diff --git a/vendor/go.opencensus.io/internal/traceinternals.go b/vendor/go.opencensus.io/internal/traceinternals.go new file mode 100644 index 000000000..553ca68dc --- /dev/null +++ b/vendor/go.opencensus.io/internal/traceinternals.go @@ -0,0 +1,52 @@ +// Copyright 2017, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package internal + +import ( + "time" +) + +// Trace allows internal access to some trace functionality. +// TODO(#412): remove this +var Trace interface{} + +var LocalSpanStoreEnabled bool + +// BucketConfiguration stores the number of samples to store for span buckets +// for successful and failed spans for a particular span name. +type BucketConfiguration struct { + Name string + MaxRequestsSucceeded int + MaxRequestsErrors int +} + +// PerMethodSummary is a summary of the spans stored for a single span name. +type PerMethodSummary struct { + Active int + LatencyBuckets []LatencyBucketSummary + ErrorBuckets []ErrorBucketSummary +} + +// LatencyBucketSummary is a summary of a latency bucket. +type LatencyBucketSummary struct { + MinLatency, MaxLatency time.Duration + Size int +} + +// ErrorBucketSummary is a summary of an error bucket. +type ErrorBucketSummary struct { + ErrorCode int32 + Size int +} diff --git a/vendor/go.opencensus.io/opencensus.go b/vendor/go.opencensus.io/opencensus.go new file mode 100644 index 000000000..62f03486a --- /dev/null +++ b/vendor/go.opencensus.io/opencensus.go @@ -0,0 +1,21 @@ +// Copyright 2017, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package opencensus contains Go support for OpenCensus. +package opencensus // import "go.opencensus.io" + +// Version is the current release version of OpenCensus in use. +func Version() string { + return "0.18.0" +} diff --git a/vendor/go.opencensus.io/plugin/ochttp/client.go b/vendor/go.opencensus.io/plugin/ochttp/client.go new file mode 100644 index 000000000..da815b2a7 --- /dev/null +++ b/vendor/go.opencensus.io/plugin/ochttp/client.go @@ -0,0 +1,117 @@ +// Copyright 2018, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package ochttp + +import ( + "net/http" + "net/http/httptrace" + + "go.opencensus.io/trace" + "go.opencensus.io/trace/propagation" +) + +// Transport is an http.RoundTripper that instruments all outgoing requests with +// OpenCensus stats and tracing. +// +// The zero value is intended to be a useful default, but for +// now it's recommended that you explicitly set Propagation, since the default +// for this may change. +type Transport struct { + // Base may be set to wrap another http.RoundTripper that does the actual + // requests. By default http.DefaultTransport is used. + // + // If base HTTP roundtripper implements CancelRequest, + // the returned round tripper will be cancelable. + Base http.RoundTripper + + // Propagation defines how traces are propagated. If unspecified, a default + // (currently B3 format) will be used. + Propagation propagation.HTTPFormat + + // StartOptions are applied to the span started by this Transport around each + // request. + // + // StartOptions.SpanKind will always be set to trace.SpanKindClient + // for spans started by this transport. + StartOptions trace.StartOptions + + // GetStartOptions allows to set start options per request. If set, + // StartOptions is going to be ignored. + GetStartOptions func(*http.Request) trace.StartOptions + + // NameFromRequest holds the function to use for generating the span name + // from the information found in the outgoing HTTP Request. By default the + // name equals the URL Path. + FormatSpanName func(*http.Request) string + + // NewClientTrace may be set to a function allowing the current *trace.Span + // to be annotated with HTTP request event information emitted by the + // httptrace package. + NewClientTrace func(*http.Request, *trace.Span) *httptrace.ClientTrace + + // TODO: Implement tag propagation for HTTP. +} + +// RoundTrip implements http.RoundTripper, delegating to Base and recording stats and traces for the request. +func (t *Transport) RoundTrip(req *http.Request) (*http.Response, error) { + rt := t.base() + if isHealthEndpoint(req.URL.Path) { + return rt.RoundTrip(req) + } + // TODO: remove excessive nesting of http.RoundTrippers here. + format := t.Propagation + if format == nil { + format = defaultFormat + } + spanNameFormatter := t.FormatSpanName + if spanNameFormatter == nil { + spanNameFormatter = spanNameFromURL + } + + startOpts := t.StartOptions + if t.GetStartOptions != nil { + startOpts = t.GetStartOptions(req) + } + + rt = &traceTransport{ + base: rt, + format: format, + startOptions: trace.StartOptions{ + Sampler: startOpts.Sampler, + SpanKind: trace.SpanKindClient, + }, + formatSpanName: spanNameFormatter, + newClientTrace: t.NewClientTrace, + } + rt = statsTransport{base: rt} + return rt.RoundTrip(req) +} + +func (t *Transport) base() http.RoundTripper { + if t.Base != nil { + return t.Base + } + return http.DefaultTransport +} + +// CancelRequest cancels an in-flight request by closing its connection. +func (t *Transport) CancelRequest(req *http.Request) { + type canceler interface { + CancelRequest(*http.Request) + } + if cr, ok := t.base().(canceler); ok { + cr.CancelRequest(req) + } +} diff --git a/vendor/go.opencensus.io/plugin/ochttp/client_stats.go b/vendor/go.opencensus.io/plugin/ochttp/client_stats.go new file mode 100644 index 000000000..066ebb87f --- /dev/null +++ b/vendor/go.opencensus.io/plugin/ochttp/client_stats.go @@ -0,0 +1,135 @@ +// Copyright 2018, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package ochttp + +import ( + "context" + "io" + "net/http" + "strconv" + "sync" + "time" + + "go.opencensus.io/stats" + "go.opencensus.io/tag" +) + +// statsTransport is an http.RoundTripper that collects stats for the outgoing requests. +type statsTransport struct { + base http.RoundTripper +} + +// RoundTrip implements http.RoundTripper, delegating to Base and recording stats for the request. +func (t statsTransport) RoundTrip(req *http.Request) (*http.Response, error) { + ctx, _ := tag.New(req.Context(), + tag.Upsert(KeyClientHost, req.URL.Host), + tag.Upsert(Host, req.URL.Host), + tag.Upsert(KeyClientPath, req.URL.Path), + tag.Upsert(Path, req.URL.Path), + tag.Upsert(KeyClientMethod, req.Method), + tag.Upsert(Method, req.Method)) + req = req.WithContext(ctx) + track := &tracker{ + start: time.Now(), + ctx: ctx, + } + if req.Body == nil { + // TODO: Handle cases where ContentLength is not set. + track.reqSize = -1 + } else if req.ContentLength > 0 { + track.reqSize = req.ContentLength + } + stats.Record(ctx, ClientRequestCount.M(1)) + + // Perform request. + resp, err := t.base.RoundTrip(req) + + if err != nil { + track.statusCode = http.StatusInternalServerError + track.end() + } else { + track.statusCode = resp.StatusCode + if resp.Body == nil { + track.end() + } else { + track.body = resp.Body + resp.Body = track + } + } + return resp, err +} + +// CancelRequest cancels an in-flight request by closing its connection. +func (t statsTransport) CancelRequest(req *http.Request) { + type canceler interface { + CancelRequest(*http.Request) + } + if cr, ok := t.base.(canceler); ok { + cr.CancelRequest(req) + } +} + +type tracker struct { + ctx context.Context + respSize int64 + reqSize int64 + start time.Time + body io.ReadCloser + statusCode int + endOnce sync.Once +} + +var _ io.ReadCloser = (*tracker)(nil) + +func (t *tracker) end() { + t.endOnce.Do(func() { + latencyMs := float64(time.Since(t.start)) / float64(time.Millisecond) + m := []stats.Measurement{ + ClientSentBytes.M(t.reqSize), + ClientReceivedBytes.M(t.respSize), + ClientRoundtripLatency.M(latencyMs), + ClientLatency.M(latencyMs), + ClientResponseBytes.M(t.respSize), + } + if t.reqSize >= 0 { + m = append(m, ClientRequestBytes.M(t.reqSize)) + } + + stats.RecordWithTags(t.ctx, []tag.Mutator{ + tag.Upsert(StatusCode, strconv.Itoa(t.statusCode)), + tag.Upsert(KeyClientStatus, strconv.Itoa(t.statusCode)), + }, m...) + }) +} + +func (t *tracker) Read(b []byte) (int, error) { + n, err := t.body.Read(b) + switch err { + case nil: + t.respSize += int64(n) + return n, nil + case io.EOF: + t.end() + } + return n, err +} + +func (t *tracker) Close() error { + // Invoking endSpan on Close will help catch the cases + // in which a read returned a non-nil error, we set the + // span status but didn't end the span. + t.end() + return t.body.Close() +} diff --git a/vendor/go.opencensus.io/plugin/ochttp/doc.go b/vendor/go.opencensus.io/plugin/ochttp/doc.go new file mode 100644 index 000000000..10e626b16 --- /dev/null +++ b/vendor/go.opencensus.io/plugin/ochttp/doc.go @@ -0,0 +1,19 @@ +// Copyright 2018, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package ochttp provides OpenCensus instrumentation for net/http package. +// +// For server instrumentation, see Handler. For client-side instrumentation, +// see Transport. +package ochttp // import "go.opencensus.io/plugin/ochttp" diff --git a/vendor/go.opencensus.io/plugin/ochttp/propagation/b3/b3.go b/vendor/go.opencensus.io/plugin/ochttp/propagation/b3/b3.go new file mode 100644 index 000000000..f777772ec --- /dev/null +++ b/vendor/go.opencensus.io/plugin/ochttp/propagation/b3/b3.go @@ -0,0 +1,123 @@ +// Copyright 2018, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package b3 contains a propagation.HTTPFormat implementation +// for B3 propagation. See https://github.com/openzipkin/b3-propagation +// for more details. +package b3 // import "go.opencensus.io/plugin/ochttp/propagation/b3" + +import ( + "encoding/hex" + "net/http" + + "go.opencensus.io/trace" + "go.opencensus.io/trace/propagation" +) + +// B3 headers that OpenCensus understands. +const ( + TraceIDHeader = "X-B3-TraceId" + SpanIDHeader = "X-B3-SpanId" + SampledHeader = "X-B3-Sampled" +) + +// HTTPFormat implements propagation.HTTPFormat to propagate +// traces in HTTP headers in B3 propagation format. +// HTTPFormat skips the X-B3-ParentId and X-B3-Flags headers +// because there are additional fields not represented in the +// OpenCensus span context. Spans created from the incoming +// header will be the direct children of the client-side span. +// Similarly, reciever of the outgoing spans should use client-side +// span created by OpenCensus as the parent. +type HTTPFormat struct{} + +var _ propagation.HTTPFormat = (*HTTPFormat)(nil) + +// SpanContextFromRequest extracts a B3 span context from incoming requests. +func (f *HTTPFormat) SpanContextFromRequest(req *http.Request) (sc trace.SpanContext, ok bool) { + tid, ok := ParseTraceID(req.Header.Get(TraceIDHeader)) + if !ok { + return trace.SpanContext{}, false + } + sid, ok := ParseSpanID(req.Header.Get(SpanIDHeader)) + if !ok { + return trace.SpanContext{}, false + } + sampled, _ := ParseSampled(req.Header.Get(SampledHeader)) + return trace.SpanContext{ + TraceID: tid, + SpanID: sid, + TraceOptions: sampled, + }, true +} + +// ParseTraceID parses the value of the X-B3-TraceId header. +func ParseTraceID(tid string) (trace.TraceID, bool) { + if tid == "" { + return trace.TraceID{}, false + } + b, err := hex.DecodeString(tid) + if err != nil { + return trace.TraceID{}, false + } + var traceID trace.TraceID + if len(b) <= 8 { + // The lower 64-bits. + start := 8 + (8 - len(b)) + copy(traceID[start:], b) + } else { + start := 16 - len(b) + copy(traceID[start:], b) + } + + return traceID, true +} + +// ParseSpanID parses the value of the X-B3-SpanId or X-B3-ParentSpanId headers. +func ParseSpanID(sid string) (spanID trace.SpanID, ok bool) { + if sid == "" { + return trace.SpanID{}, false + } + b, err := hex.DecodeString(sid) + if err != nil { + return trace.SpanID{}, false + } + start := 8 - len(b) + copy(spanID[start:], b) + return spanID, true +} + +// ParseSampled parses the value of the X-B3-Sampled header. +func ParseSampled(sampled string) (trace.TraceOptions, bool) { + switch sampled { + case "true", "1": + return trace.TraceOptions(1), true + default: + return trace.TraceOptions(0), false + } +} + +// SpanContextToRequest modifies the given request to include B3 headers. +func (f *HTTPFormat) SpanContextToRequest(sc trace.SpanContext, req *http.Request) { + req.Header.Set(TraceIDHeader, hex.EncodeToString(sc.TraceID[:])) + req.Header.Set(SpanIDHeader, hex.EncodeToString(sc.SpanID[:])) + + var sampled string + if sc.IsSampled() { + sampled = "1" + } else { + sampled = "0" + } + req.Header.Set(SampledHeader, sampled) +} diff --git a/vendor/go.opencensus.io/plugin/ochttp/propagation/tracecontext/propagation.go b/vendor/go.opencensus.io/plugin/ochttp/propagation/tracecontext/propagation.go new file mode 100644 index 000000000..65ab1e996 --- /dev/null +++ b/vendor/go.opencensus.io/plugin/ochttp/propagation/tracecontext/propagation.go @@ -0,0 +1,187 @@ +// Copyright 2018, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package tracecontext contains HTTP propagator for TraceContext standard. +// See https://github.com/w3c/distributed-tracing for more information. +package tracecontext // import "go.opencensus.io/plugin/ochttp/propagation/tracecontext" + +import ( + "encoding/hex" + "fmt" + "net/http" + "net/textproto" + "regexp" + "strings" + + "go.opencensus.io/trace" + "go.opencensus.io/trace/propagation" + "go.opencensus.io/trace/tracestate" +) + +const ( + supportedVersion = 0 + maxVersion = 254 + maxTracestateLen = 512 + traceparentHeader = "traceparent" + tracestateHeader = "tracestate" + trimOWSRegexFmt = `^[\x09\x20]*(.*[^\x20\x09])[\x09\x20]*$` +) + +var trimOWSRegExp = regexp.MustCompile(trimOWSRegexFmt) + +var _ propagation.HTTPFormat = (*HTTPFormat)(nil) + +// HTTPFormat implements the TraceContext trace propagation format. +type HTTPFormat struct{} + +// SpanContextFromRequest extracts a span context from incoming requests. +func (f *HTTPFormat) SpanContextFromRequest(req *http.Request) (sc trace.SpanContext, ok bool) { + h, ok := getRequestHeader(req, traceparentHeader, false) + if !ok { + return trace.SpanContext{}, false + } + sections := strings.Split(h, "-") + if len(sections) < 4 { + return trace.SpanContext{}, false + } + + if len(sections[0]) != 2 { + return trace.SpanContext{}, false + } + ver, err := hex.DecodeString(sections[0]) + if err != nil { + return trace.SpanContext{}, false + } + version := int(ver[0]) + if version > maxVersion { + return trace.SpanContext{}, false + } + + if version == 0 && len(sections) != 4 { + return trace.SpanContext{}, false + } + + if len(sections[1]) != 32 { + return trace.SpanContext{}, false + } + tid, err := hex.DecodeString(sections[1]) + if err != nil { + return trace.SpanContext{}, false + } + copy(sc.TraceID[:], tid) + + if len(sections[2]) != 16 { + return trace.SpanContext{}, false + } + sid, err := hex.DecodeString(sections[2]) + if err != nil { + return trace.SpanContext{}, false + } + copy(sc.SpanID[:], sid) + + opts, err := hex.DecodeString(sections[3]) + if err != nil || len(opts) < 1 { + return trace.SpanContext{}, false + } + sc.TraceOptions = trace.TraceOptions(opts[0]) + + // Don't allow all zero trace or span ID. + if sc.TraceID == [16]byte{} || sc.SpanID == [8]byte{} { + return trace.SpanContext{}, false + } + + sc.Tracestate = tracestateFromRequest(req) + return sc, true +} + +// getRequestHeader returns a combined header field according to RFC7230 section 3.2.2. +// If commaSeparated is true, multiple header fields with the same field name using be +// combined using ",". +// If no header was found using the given name, "ok" would be false. +// If more than one headers was found using the given name, while commaSeparated is false, +// "ok" would be false. +func getRequestHeader(req *http.Request, name string, commaSeparated bool) (hdr string, ok bool) { + v := req.Header[textproto.CanonicalMIMEHeaderKey(name)] + switch len(v) { + case 0: + return "", false + case 1: + return v[0], true + default: + return strings.Join(v, ","), commaSeparated + } +} + +// TODO(rghetia): return an empty Tracestate when parsing tracestate header encounters an error. +// Revisit to return additional boolean value to indicate parsing error when following issues +// are resolved. +// https://github.com/w3c/distributed-tracing/issues/172 +// https://github.com/w3c/distributed-tracing/issues/175 +func tracestateFromRequest(req *http.Request) *tracestate.Tracestate { + h, _ := getRequestHeader(req, tracestateHeader, true) + if h == "" { + return nil + } + + var entries []tracestate.Entry + pairs := strings.Split(h, ",") + hdrLenWithoutOWS := len(pairs) - 1 // Number of commas + for _, pair := range pairs { + matches := trimOWSRegExp.FindStringSubmatch(pair) + if matches == nil { + return nil + } + pair = matches[1] + hdrLenWithoutOWS += len(pair) + if hdrLenWithoutOWS > maxTracestateLen { + return nil + } + kv := strings.Split(pair, "=") + if len(kv) != 2 { + return nil + } + entries = append(entries, tracestate.Entry{Key: kv[0], Value: kv[1]}) + } + ts, err := tracestate.New(nil, entries...) + if err != nil { + return nil + } + + return ts +} + +func tracestateToRequest(sc trace.SpanContext, req *http.Request) { + var pairs = make([]string, 0, len(sc.Tracestate.Entries())) + if sc.Tracestate != nil { + for _, entry := range sc.Tracestate.Entries() { + pairs = append(pairs, strings.Join([]string{entry.Key, entry.Value}, "=")) + } + h := strings.Join(pairs, ",") + + if h != "" && len(h) <= maxTracestateLen { + req.Header.Set(tracestateHeader, h) + } + } +} + +// SpanContextToRequest modifies the given request to include traceparent and tracestate headers. +func (f *HTTPFormat) SpanContextToRequest(sc trace.SpanContext, req *http.Request) { + h := fmt.Sprintf("%x-%x-%x-%x", + []byte{supportedVersion}, + sc.TraceID[:], + sc.SpanID[:], + []byte{byte(sc.TraceOptions)}) + req.Header.Set(traceparentHeader, h) + tracestateToRequest(sc, req) +} diff --git a/vendor/go.opencensus.io/plugin/ochttp/route.go b/vendor/go.opencensus.io/plugin/ochttp/route.go new file mode 100644 index 000000000..dbe22d586 --- /dev/null +++ b/vendor/go.opencensus.io/plugin/ochttp/route.go @@ -0,0 +1,51 @@ +// Copyright 2018, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package ochttp + +import ( + "net/http" + + "go.opencensus.io/tag" +) + +// WithRouteTag returns an http.Handler that records stats with the +// http_server_route tag set to the given value. +func WithRouteTag(handler http.Handler, route string) http.Handler { + return taggedHandlerFunc(func(w http.ResponseWriter, r *http.Request) []tag.Mutator { + addRoute := []tag.Mutator{tag.Upsert(KeyServerRoute, route)} + ctx, _ := tag.New(r.Context(), addRoute...) + r = r.WithContext(ctx) + handler.ServeHTTP(w, r) + return addRoute + }) +} + +// taggedHandlerFunc is a http.Handler that returns tags describing the +// processing of the request. These tags will be recorded along with the +// measures in this package at the end of the request. +type taggedHandlerFunc func(w http.ResponseWriter, r *http.Request) []tag.Mutator + +func (h taggedHandlerFunc) ServeHTTP(w http.ResponseWriter, r *http.Request) { + tags := h(w, r) + if a, ok := r.Context().Value(addedTagsKey{}).(*addedTags); ok { + a.t = append(a.t, tags...) + } +} + +type addedTagsKey struct{} + +type addedTags struct { + t []tag.Mutator +} diff --git a/vendor/go.opencensus.io/plugin/ochttp/server.go b/vendor/go.opencensus.io/plugin/ochttp/server.go new file mode 100644 index 000000000..ff72de97a --- /dev/null +++ b/vendor/go.opencensus.io/plugin/ochttp/server.go @@ -0,0 +1,440 @@ +// Copyright 2018, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package ochttp + +import ( + "context" + "io" + "net/http" + "strconv" + "sync" + "time" + + "go.opencensus.io/stats" + "go.opencensus.io/tag" + "go.opencensus.io/trace" + "go.opencensus.io/trace/propagation" +) + +// Handler is an http.Handler wrapper to instrument your HTTP server with +// OpenCensus. It supports both stats and tracing. +// +// Tracing +// +// This handler is aware of the incoming request's span, reading it from request +// headers as configured using the Propagation field. +// The extracted span can be accessed from the incoming request's +// context. +// +// span := trace.FromContext(r.Context()) +// +// The server span will be automatically ended at the end of ServeHTTP. +type Handler struct { + // Propagation defines how traces are propagated. If unspecified, + // B3 propagation will be used. + Propagation propagation.HTTPFormat + + // Handler is the handler used to handle the incoming request. + Handler http.Handler + + // StartOptions are applied to the span started by this Handler around each + // request. + // + // StartOptions.SpanKind will always be set to trace.SpanKindServer + // for spans started by this transport. + StartOptions trace.StartOptions + + // GetStartOptions allows to set start options per request. If set, + // StartOptions is going to be ignored. + GetStartOptions func(*http.Request) trace.StartOptions + + // IsPublicEndpoint should be set to true for publicly accessible HTTP(S) + // servers. If true, any trace metadata set on the incoming request will + // be added as a linked trace instead of being added as a parent of the + // current trace. + IsPublicEndpoint bool + + // FormatSpanName holds the function to use for generating the span name + // from the information found in the incoming HTTP Request. By default the + // name equals the URL Path. + FormatSpanName func(*http.Request) string +} + +func (h *Handler) ServeHTTP(w http.ResponseWriter, r *http.Request) { + var tags addedTags + r, traceEnd := h.startTrace(w, r) + defer traceEnd() + w, statsEnd := h.startStats(w, r) + defer statsEnd(&tags) + handler := h.Handler + if handler == nil { + handler = http.DefaultServeMux + } + r = r.WithContext(context.WithValue(r.Context(), addedTagsKey{}, &tags)) + handler.ServeHTTP(w, r) +} + +func (h *Handler) startTrace(w http.ResponseWriter, r *http.Request) (*http.Request, func()) { + if isHealthEndpoint(r.URL.Path) { + return r, func() {} + } + var name string + if h.FormatSpanName == nil { + name = spanNameFromURL(r) + } else { + name = h.FormatSpanName(r) + } + ctx := r.Context() + + startOpts := h.StartOptions + if h.GetStartOptions != nil { + startOpts = h.GetStartOptions(r) + } + + var span *trace.Span + sc, ok := h.extractSpanContext(r) + if ok && !h.IsPublicEndpoint { + ctx, span = trace.StartSpanWithRemoteParent(ctx, name, sc, + trace.WithSampler(startOpts.Sampler), + trace.WithSpanKind(trace.SpanKindServer)) + } else { + ctx, span = trace.StartSpan(ctx, name, + trace.WithSampler(startOpts.Sampler), + trace.WithSpanKind(trace.SpanKindServer), + ) + if ok { + span.AddLink(trace.Link{ + TraceID: sc.TraceID, + SpanID: sc.SpanID, + Type: trace.LinkTypeChild, + Attributes: nil, + }) + } + } + span.AddAttributes(requestAttrs(r)...) + return r.WithContext(ctx), span.End +} + +func (h *Handler) extractSpanContext(r *http.Request) (trace.SpanContext, bool) { + if h.Propagation == nil { + return defaultFormat.SpanContextFromRequest(r) + } + return h.Propagation.SpanContextFromRequest(r) +} + +func (h *Handler) startStats(w http.ResponseWriter, r *http.Request) (http.ResponseWriter, func(tags *addedTags)) { + ctx, _ := tag.New(r.Context(), + tag.Upsert(Host, r.URL.Host), + tag.Upsert(Path, r.URL.Path), + tag.Upsert(Method, r.Method)) + track := &trackingResponseWriter{ + start: time.Now(), + ctx: ctx, + writer: w, + } + if r.Body == nil { + // TODO: Handle cases where ContentLength is not set. + track.reqSize = -1 + } else if r.ContentLength > 0 { + track.reqSize = r.ContentLength + } + stats.Record(ctx, ServerRequestCount.M(1)) + return track.wrappedResponseWriter(), track.end +} + +type trackingResponseWriter struct { + ctx context.Context + reqSize int64 + respSize int64 + start time.Time + statusCode int + statusLine string + endOnce sync.Once + writer http.ResponseWriter +} + +// Compile time assertion for ResponseWriter interface +var _ http.ResponseWriter = (*trackingResponseWriter)(nil) + +var logTagsErrorOnce sync.Once + +func (t *trackingResponseWriter) end(tags *addedTags) { + t.endOnce.Do(func() { + if t.statusCode == 0 { + t.statusCode = 200 + } + + span := trace.FromContext(t.ctx) + span.SetStatus(TraceStatus(t.statusCode, t.statusLine)) + span.AddAttributes(trace.Int64Attribute(StatusCodeAttribute, int64(t.statusCode))) + + m := []stats.Measurement{ + ServerLatency.M(float64(time.Since(t.start)) / float64(time.Millisecond)), + ServerResponseBytes.M(t.respSize), + } + if t.reqSize >= 0 { + m = append(m, ServerRequestBytes.M(t.reqSize)) + } + allTags := make([]tag.Mutator, len(tags.t)+1) + allTags[0] = tag.Upsert(StatusCode, strconv.Itoa(t.statusCode)) + copy(allTags[1:], tags.t) + stats.RecordWithTags(t.ctx, allTags, m...) + }) +} + +func (t *trackingResponseWriter) Header() http.Header { + return t.writer.Header() +} + +func (t *trackingResponseWriter) Write(data []byte) (int, error) { + n, err := t.writer.Write(data) + t.respSize += int64(n) + return n, err +} + +func (t *trackingResponseWriter) WriteHeader(statusCode int) { + t.writer.WriteHeader(statusCode) + t.statusCode = statusCode + t.statusLine = http.StatusText(t.statusCode) +} + +// wrappedResponseWriter returns a wrapped version of the original +// ResponseWriter and only implements the same combination of additional +// interfaces as the original. +// This implementation is based on https://github.com/felixge/httpsnoop. +func (t *trackingResponseWriter) wrappedResponseWriter() http.ResponseWriter { + var ( + hj, i0 = t.writer.(http.Hijacker) + cn, i1 = t.writer.(http.CloseNotifier) + pu, i2 = t.writer.(http.Pusher) + fl, i3 = t.writer.(http.Flusher) + rf, i4 = t.writer.(io.ReaderFrom) + ) + + switch { + case !i0 && !i1 && !i2 && !i3 && !i4: + return struct { + http.ResponseWriter + }{t} + case !i0 && !i1 && !i2 && !i3 && i4: + return struct { + http.ResponseWriter + io.ReaderFrom + }{t, rf} + case !i0 && !i1 && !i2 && i3 && !i4: + return struct { + http.ResponseWriter + http.Flusher + }{t, fl} + case !i0 && !i1 && !i2 && i3 && i4: + return struct { + http.ResponseWriter + http.Flusher + io.ReaderFrom + }{t, fl, rf} + case !i0 && !i1 && i2 && !i3 && !i4: + return struct { + http.ResponseWriter + http.Pusher + }{t, pu} + case !i0 && !i1 && i2 && !i3 && i4: + return struct { + http.ResponseWriter + http.Pusher + io.ReaderFrom + }{t, pu, rf} + case !i0 && !i1 && i2 && i3 && !i4: + return struct { + http.ResponseWriter + http.Pusher + http.Flusher + }{t, pu, fl} + case !i0 && !i1 && i2 && i3 && i4: + return struct { + http.ResponseWriter + http.Pusher + http.Flusher + io.ReaderFrom + }{t, pu, fl, rf} + case !i0 && i1 && !i2 && !i3 && !i4: + return struct { + http.ResponseWriter + http.CloseNotifier + }{t, cn} + case !i0 && i1 && !i2 && !i3 && i4: + return struct { + http.ResponseWriter + http.CloseNotifier + io.ReaderFrom + }{t, cn, rf} + case !i0 && i1 && !i2 && i3 && !i4: + return struct { + http.ResponseWriter + http.CloseNotifier + http.Flusher + }{t, cn, fl} + case !i0 && i1 && !i2 && i3 && i4: + return struct { + http.ResponseWriter + http.CloseNotifier + http.Flusher + io.ReaderFrom + }{t, cn, fl, rf} + case !i0 && i1 && i2 && !i3 && !i4: + return struct { + http.ResponseWriter + http.CloseNotifier + http.Pusher + }{t, cn, pu} + case !i0 && i1 && i2 && !i3 && i4: + return struct { + http.ResponseWriter + http.CloseNotifier + http.Pusher + io.ReaderFrom + }{t, cn, pu, rf} + case !i0 && i1 && i2 && i3 && !i4: + return struct { + http.ResponseWriter + http.CloseNotifier + http.Pusher + http.Flusher + }{t, cn, pu, fl} + case !i0 && i1 && i2 && i3 && i4: + return struct { + http.ResponseWriter + http.CloseNotifier + http.Pusher + http.Flusher + io.ReaderFrom + }{t, cn, pu, fl, rf} + case i0 && !i1 && !i2 && !i3 && !i4: + return struct { + http.ResponseWriter + http.Hijacker + }{t, hj} + case i0 && !i1 && !i2 && !i3 && i4: + return struct { + http.ResponseWriter + http.Hijacker + io.ReaderFrom + }{t, hj, rf} + case i0 && !i1 && !i2 && i3 && !i4: + return struct { + http.ResponseWriter + http.Hijacker + http.Flusher + }{t, hj, fl} + case i0 && !i1 && !i2 && i3 && i4: + return struct { + http.ResponseWriter + http.Hijacker + http.Flusher + io.ReaderFrom + }{t, hj, fl, rf} + case i0 && !i1 && i2 && !i3 && !i4: + return struct { + http.ResponseWriter + http.Hijacker + http.Pusher + }{t, hj, pu} + case i0 && !i1 && i2 && !i3 && i4: + return struct { + http.ResponseWriter + http.Hijacker + http.Pusher + io.ReaderFrom + }{t, hj, pu, rf} + case i0 && !i1 && i2 && i3 && !i4: + return struct { + http.ResponseWriter + http.Hijacker + http.Pusher + http.Flusher + }{t, hj, pu, fl} + case i0 && !i1 && i2 && i3 && i4: + return struct { + http.ResponseWriter + http.Hijacker + http.Pusher + http.Flusher + io.ReaderFrom + }{t, hj, pu, fl, rf} + case i0 && i1 && !i2 && !i3 && !i4: + return struct { + http.ResponseWriter + http.Hijacker + http.CloseNotifier + }{t, hj, cn} + case i0 && i1 && !i2 && !i3 && i4: + return struct { + http.ResponseWriter + http.Hijacker + http.CloseNotifier + io.ReaderFrom + }{t, hj, cn, rf} + case i0 && i1 && !i2 && i3 && !i4: + return struct { + http.ResponseWriter + http.Hijacker + http.CloseNotifier + http.Flusher + }{t, hj, cn, fl} + case i0 && i1 && !i2 && i3 && i4: + return struct { + http.ResponseWriter + http.Hijacker + http.CloseNotifier + http.Flusher + io.ReaderFrom + }{t, hj, cn, fl, rf} + case i0 && i1 && i2 && !i3 && !i4: + return struct { + http.ResponseWriter + http.Hijacker + http.CloseNotifier + http.Pusher + }{t, hj, cn, pu} + case i0 && i1 && i2 && !i3 && i4: + return struct { + http.ResponseWriter + http.Hijacker + http.CloseNotifier + http.Pusher + io.ReaderFrom + }{t, hj, cn, pu, rf} + case i0 && i1 && i2 && i3 && !i4: + return struct { + http.ResponseWriter + http.Hijacker + http.CloseNotifier + http.Pusher + http.Flusher + }{t, hj, cn, pu, fl} + case i0 && i1 && i2 && i3 && i4: + return struct { + http.ResponseWriter + http.Hijacker + http.CloseNotifier + http.Pusher + http.Flusher + io.ReaderFrom + }{t, hj, cn, pu, fl, rf} + default: + return struct { + http.ResponseWriter + }{t} + } +} diff --git a/vendor/go.opencensus.io/plugin/ochttp/span_annotating_client_trace.go b/vendor/go.opencensus.io/plugin/ochttp/span_annotating_client_trace.go new file mode 100644 index 000000000..05c6c56cc --- /dev/null +++ b/vendor/go.opencensus.io/plugin/ochttp/span_annotating_client_trace.go @@ -0,0 +1,169 @@ +// Copyright 2018, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package ochttp + +import ( + "crypto/tls" + "net/http" + "net/http/httptrace" + "strings" + + "go.opencensus.io/trace" +) + +type spanAnnotator struct { + sp *trace.Span +} + +// TODO: Remove NewSpanAnnotator at the next release. + +// NewSpanAnnotator returns a httptrace.ClientTrace which annotates +// all emitted httptrace events on the provided Span. +// Deprecated: Use NewSpanAnnotatingClientTrace instead +func NewSpanAnnotator(r *http.Request, s *trace.Span) *httptrace.ClientTrace { + return NewSpanAnnotatingClientTrace(r, s) +} + +// NewSpanAnnotatingClientTrace returns a httptrace.ClientTrace which annotates +// all emitted httptrace events on the provided Span. +func NewSpanAnnotatingClientTrace(_ *http.Request, s *trace.Span) *httptrace.ClientTrace { + sa := spanAnnotator{sp: s} + + return &httptrace.ClientTrace{ + GetConn: sa.getConn, + GotConn: sa.gotConn, + PutIdleConn: sa.putIdleConn, + GotFirstResponseByte: sa.gotFirstResponseByte, + Got100Continue: sa.got100Continue, + DNSStart: sa.dnsStart, + DNSDone: sa.dnsDone, + ConnectStart: sa.connectStart, + ConnectDone: sa.connectDone, + TLSHandshakeStart: sa.tlsHandshakeStart, + TLSHandshakeDone: sa.tlsHandshakeDone, + WroteHeaders: sa.wroteHeaders, + Wait100Continue: sa.wait100Continue, + WroteRequest: sa.wroteRequest, + } +} + +func (s spanAnnotator) getConn(hostPort string) { + attrs := []trace.Attribute{ + trace.StringAttribute("httptrace.get_connection.host_port", hostPort), + } + s.sp.Annotate(attrs, "GetConn") +} + +func (s spanAnnotator) gotConn(info httptrace.GotConnInfo) { + attrs := []trace.Attribute{ + trace.BoolAttribute("httptrace.got_connection.reused", info.Reused), + trace.BoolAttribute("httptrace.got_connection.was_idle", info.WasIdle), + } + if info.WasIdle { + attrs = append(attrs, + trace.StringAttribute("httptrace.got_connection.idle_time", info.IdleTime.String())) + } + s.sp.Annotate(attrs, "GotConn") +} + +// PutIdleConn implements a httptrace.ClientTrace hook +func (s spanAnnotator) putIdleConn(err error) { + var attrs []trace.Attribute + if err != nil { + attrs = append(attrs, + trace.StringAttribute("httptrace.put_idle_connection.error", err.Error())) + } + s.sp.Annotate(attrs, "PutIdleConn") +} + +func (s spanAnnotator) gotFirstResponseByte() { + s.sp.Annotate(nil, "GotFirstResponseByte") +} + +func (s spanAnnotator) got100Continue() { + s.sp.Annotate(nil, "Got100Continue") +} + +func (s spanAnnotator) dnsStart(info httptrace.DNSStartInfo) { + attrs := []trace.Attribute{ + trace.StringAttribute("httptrace.dns_start.host", info.Host), + } + s.sp.Annotate(attrs, "DNSStart") +} + +func (s spanAnnotator) dnsDone(info httptrace.DNSDoneInfo) { + var addrs []string + for _, addr := range info.Addrs { + addrs = append(addrs, addr.String()) + } + attrs := []trace.Attribute{ + trace.StringAttribute("httptrace.dns_done.addrs", strings.Join(addrs, " , ")), + } + if info.Err != nil { + attrs = append(attrs, + trace.StringAttribute("httptrace.dns_done.error", info.Err.Error())) + } + s.sp.Annotate(attrs, "DNSDone") +} + +func (s spanAnnotator) connectStart(network, addr string) { + attrs := []trace.Attribute{ + trace.StringAttribute("httptrace.connect_start.network", network), + trace.StringAttribute("httptrace.connect_start.addr", addr), + } + s.sp.Annotate(attrs, "ConnectStart") +} + +func (s spanAnnotator) connectDone(network, addr string, err error) { + attrs := []trace.Attribute{ + trace.StringAttribute("httptrace.connect_done.network", network), + trace.StringAttribute("httptrace.connect_done.addr", addr), + } + if err != nil { + attrs = append(attrs, + trace.StringAttribute("httptrace.connect_done.error", err.Error())) + } + s.sp.Annotate(attrs, "ConnectDone") +} + +func (s spanAnnotator) tlsHandshakeStart() { + s.sp.Annotate(nil, "TLSHandshakeStart") +} + +func (s spanAnnotator) tlsHandshakeDone(_ tls.ConnectionState, err error) { + var attrs []trace.Attribute + if err != nil { + attrs = append(attrs, + trace.StringAttribute("httptrace.tls_handshake_done.error", err.Error())) + } + s.sp.Annotate(attrs, "TLSHandshakeDone") +} + +func (s spanAnnotator) wroteHeaders() { + s.sp.Annotate(nil, "WroteHeaders") +} + +func (s spanAnnotator) wait100Continue() { + s.sp.Annotate(nil, "Wait100Continue") +} + +func (s spanAnnotator) wroteRequest(info httptrace.WroteRequestInfo) { + var attrs []trace.Attribute + if info.Err != nil { + attrs = append(attrs, + trace.StringAttribute("httptrace.wrote_request.error", info.Err.Error())) + } + s.sp.Annotate(attrs, "WroteRequest") +} diff --git a/vendor/go.opencensus.io/plugin/ochttp/stats.go b/vendor/go.opencensus.io/plugin/ochttp/stats.go new file mode 100644 index 000000000..46dcc8e57 --- /dev/null +++ b/vendor/go.opencensus.io/plugin/ochttp/stats.go @@ -0,0 +1,265 @@ +// Copyright 2018, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package ochttp + +import ( + "go.opencensus.io/stats" + "go.opencensus.io/stats/view" + "go.opencensus.io/tag" +) + +// The following client HTTP measures are supported for use in custom views. +var ( + // Deprecated: Use a Count aggregation over one of the other client measures to achieve the same effect. + ClientRequestCount = stats.Int64("opencensus.io/http/client/request_count", "Number of HTTP requests started", stats.UnitDimensionless) + // Deprecated: Use ClientSentBytes. + ClientRequestBytes = stats.Int64("opencensus.io/http/client/request_bytes", "HTTP request body size if set as ContentLength (uncompressed)", stats.UnitBytes) + // Deprecated: Use ClientReceivedBytes. + ClientResponseBytes = stats.Int64("opencensus.io/http/client/response_bytes", "HTTP response body size (uncompressed)", stats.UnitBytes) + // Deprecated: Use ClientRoundtripLatency. + ClientLatency = stats.Float64("opencensus.io/http/client/latency", "End-to-end latency", stats.UnitMilliseconds) +) + +// Client measures supported for use in custom views. +var ( + ClientSentBytes = stats.Int64( + "opencensus.io/http/client/sent_bytes", + "Total bytes sent in request body (not including headers)", + stats.UnitBytes, + ) + ClientReceivedBytes = stats.Int64( + "opencensus.io/http/client/received_bytes", + "Total bytes received in response bodies (not including headers but including error responses with bodies)", + stats.UnitBytes, + ) + ClientRoundtripLatency = stats.Float64( + "opencensus.io/http/client/roundtrip_latency", + "Time between first byte of request headers sent to last byte of response received, or terminal error", + stats.UnitMilliseconds, + ) +) + +// The following server HTTP measures are supported for use in custom views: +var ( + ServerRequestCount = stats.Int64("opencensus.io/http/server/request_count", "Number of HTTP requests started", stats.UnitDimensionless) + ServerRequestBytes = stats.Int64("opencensus.io/http/server/request_bytes", "HTTP request body size if set as ContentLength (uncompressed)", stats.UnitBytes) + ServerResponseBytes = stats.Int64("opencensus.io/http/server/response_bytes", "HTTP response body size (uncompressed)", stats.UnitBytes) + ServerLatency = stats.Float64("opencensus.io/http/server/latency", "End-to-end latency", stats.UnitMilliseconds) +) + +// The following tags are applied to stats recorded by this package. Host, Path +// and Method are applied to all measures. StatusCode is not applied to +// ClientRequestCount or ServerRequestCount, since it is recorded before the status is known. +var ( + // Host is the value of the HTTP Host header. + // + // The value of this tag can be controlled by the HTTP client, so you need + // to watch out for potentially generating high-cardinality labels in your + // metrics backend if you use this tag in views. + Host, _ = tag.NewKey("http.host") + + // StatusCode is the numeric HTTP response status code, + // or "error" if a transport error occurred and no status code was read. + StatusCode, _ = tag.NewKey("http.status") + + // Path is the URL path (not including query string) in the request. + // + // The value of this tag can be controlled by the HTTP client, so you need + // to watch out for potentially generating high-cardinality labels in your + // metrics backend if you use this tag in views. + Path, _ = tag.NewKey("http.path") + + // Method is the HTTP method of the request, capitalized (GET, POST, etc.). + Method, _ = tag.NewKey("http.method") + + // KeyServerRoute is a low cardinality string representing the logical + // handler of the request. This is usually the pattern registered on the a + // ServeMux (or similar string). + KeyServerRoute, _ = tag.NewKey("http_server_route") +) + +// Client tag keys. +var ( + // KeyClientMethod is the HTTP method, capitalized (i.e. GET, POST, PUT, DELETE, etc.). + KeyClientMethod, _ = tag.NewKey("http_client_method") + // KeyClientPath is the URL path (not including query string). + KeyClientPath, _ = tag.NewKey("http_client_path") + // KeyClientStatus is the HTTP status code as an integer (e.g. 200, 404, 500.), or "error" if no response status line was received. + KeyClientStatus, _ = tag.NewKey("http_client_status") + // KeyClientHost is the value of the request Host header. + KeyClientHost, _ = tag.NewKey("http_client_host") +) + +// Default distributions used by views in this package. +var ( + DefaultSizeDistribution = view.Distribution(0, 1024, 2048, 4096, 16384, 65536, 262144, 1048576, 4194304, 16777216, 67108864, 268435456, 1073741824, 4294967296) + DefaultLatencyDistribution = view.Distribution(0, 1, 2, 3, 4, 5, 6, 8, 10, 13, 16, 20, 25, 30, 40, 50, 65, 80, 100, 130, 160, 200, 250, 300, 400, 500, 650, 800, 1000, 2000, 5000, 10000, 20000, 50000, 100000) +) + +// Package ochttp provides some convenience views. +// You still need to register these views for data to actually be collected. +var ( + ClientSentBytesDistribution = &view.View{ + Name: "opencensus.io/http/client/sent_bytes", + Measure: ClientSentBytes, + Aggregation: DefaultSizeDistribution, + Description: "Total bytes sent in request body (not including headers), by HTTP method and response status", + TagKeys: []tag.Key{KeyClientMethod, KeyClientStatus}, + } + + ClientReceivedBytesDistribution = &view.View{ + Name: "opencensus.io/http/client/received_bytes", + Measure: ClientReceivedBytes, + Aggregation: DefaultSizeDistribution, + Description: "Total bytes received in response bodies (not including headers but including error responses with bodies), by HTTP method and response status", + TagKeys: []tag.Key{KeyClientMethod, KeyClientStatus}, + } + + ClientRoundtripLatencyDistribution = &view.View{ + Name: "opencensus.io/http/client/roundtrip_latency", + Measure: ClientRoundtripLatency, + Aggregation: DefaultLatencyDistribution, + Description: "End-to-end latency, by HTTP method and response status", + TagKeys: []tag.Key{KeyClientMethod, KeyClientStatus}, + } + + ClientCompletedCount = &view.View{ + Name: "opencensus.io/http/client/completed_count", + Measure: ClientRoundtripLatency, + Aggregation: view.Count(), + Description: "Count of completed requests, by HTTP method and response status", + TagKeys: []tag.Key{KeyClientMethod, KeyClientStatus}, + } +) + +var ( + // Deprecated: No direct replacement, but see ClientCompletedCount. + ClientRequestCountView = &view.View{ + Name: "opencensus.io/http/client/request_count", + Description: "Count of HTTP requests started", + Measure: ClientRequestCount, + Aggregation: view.Count(), + } + + // Deprecated: Use ClientSentBytesDistribution. + ClientRequestBytesView = &view.View{ + Name: "opencensus.io/http/client/request_bytes", + Description: "Size distribution of HTTP request body", + Measure: ClientSentBytes, + Aggregation: DefaultSizeDistribution, + } + + // Deprecated: Use ClientReceivedBytesDistribution. + ClientResponseBytesView = &view.View{ + Name: "opencensus.io/http/client/response_bytes", + Description: "Size distribution of HTTP response body", + Measure: ClientReceivedBytes, + Aggregation: DefaultSizeDistribution, + } + + // Deprecated: Use ClientRoundtripLatencyDistribution. + ClientLatencyView = &view.View{ + Name: "opencensus.io/http/client/latency", + Description: "Latency distribution of HTTP requests", + Measure: ClientRoundtripLatency, + Aggregation: DefaultLatencyDistribution, + } + + // Deprecated: Use ClientCompletedCount. + ClientRequestCountByMethod = &view.View{ + Name: "opencensus.io/http/client/request_count_by_method", + Description: "Client request count by HTTP method", + TagKeys: []tag.Key{Method}, + Measure: ClientSentBytes, + Aggregation: view.Count(), + } + + // Deprecated: Use ClientCompletedCount. + ClientResponseCountByStatusCode = &view.View{ + Name: "opencensus.io/http/client/response_count_by_status_code", + Description: "Client response count by status code", + TagKeys: []tag.Key{StatusCode}, + Measure: ClientRoundtripLatency, + Aggregation: view.Count(), + } +) + +var ( + ServerRequestCountView = &view.View{ + Name: "opencensus.io/http/server/request_count", + Description: "Count of HTTP requests started", + Measure: ServerRequestCount, + Aggregation: view.Count(), + } + + ServerRequestBytesView = &view.View{ + Name: "opencensus.io/http/server/request_bytes", + Description: "Size distribution of HTTP request body", + Measure: ServerRequestBytes, + Aggregation: DefaultSizeDistribution, + } + + ServerResponseBytesView = &view.View{ + Name: "opencensus.io/http/server/response_bytes", + Description: "Size distribution of HTTP response body", + Measure: ServerResponseBytes, + Aggregation: DefaultSizeDistribution, + } + + ServerLatencyView = &view.View{ + Name: "opencensus.io/http/server/latency", + Description: "Latency distribution of HTTP requests", + Measure: ServerLatency, + Aggregation: DefaultLatencyDistribution, + } + + ServerRequestCountByMethod = &view.View{ + Name: "opencensus.io/http/server/request_count_by_method", + Description: "Server request count by HTTP method", + TagKeys: []tag.Key{Method}, + Measure: ServerRequestCount, + Aggregation: view.Count(), + } + + ServerResponseCountByStatusCode = &view.View{ + Name: "opencensus.io/http/server/response_count_by_status_code", + Description: "Server response count by status code", + TagKeys: []tag.Key{StatusCode}, + Measure: ServerLatency, + Aggregation: view.Count(), + } +) + +// DefaultClientViews are the default client views provided by this package. +// Deprecated: No replacement. Register the views you would like individually. +var DefaultClientViews = []*view.View{ + ClientRequestCountView, + ClientRequestBytesView, + ClientResponseBytesView, + ClientLatencyView, + ClientRequestCountByMethod, + ClientResponseCountByStatusCode, +} + +// DefaultServerViews are the default server views provided by this package. +// Deprecated: No replacement. Register the views you would like individually. +var DefaultServerViews = []*view.View{ + ServerRequestCountView, + ServerRequestBytesView, + ServerResponseBytesView, + ServerLatencyView, + ServerRequestCountByMethod, + ServerResponseCountByStatusCode, +} diff --git a/vendor/go.opencensus.io/plugin/ochttp/trace.go b/vendor/go.opencensus.io/plugin/ochttp/trace.go new file mode 100644 index 000000000..819a2d5ff --- /dev/null +++ b/vendor/go.opencensus.io/plugin/ochttp/trace.go @@ -0,0 +1,228 @@ +// Copyright 2018, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package ochttp + +import ( + "io" + "net/http" + "net/http/httptrace" + + "go.opencensus.io/plugin/ochttp/propagation/b3" + "go.opencensus.io/trace" + "go.opencensus.io/trace/propagation" +) + +// TODO(jbd): Add godoc examples. + +var defaultFormat propagation.HTTPFormat = &b3.HTTPFormat{} + +// Attributes recorded on the span for the requests. +// Only trace exporters will need them. +const ( + HostAttribute = "http.host" + MethodAttribute = "http.method" + PathAttribute = "http.path" + UserAgentAttribute = "http.user_agent" + StatusCodeAttribute = "http.status_code" +) + +type traceTransport struct { + base http.RoundTripper + startOptions trace.StartOptions + format propagation.HTTPFormat + formatSpanName func(*http.Request) string + newClientTrace func(*http.Request, *trace.Span) *httptrace.ClientTrace +} + +// TODO(jbd): Add message events for request and response size. + +// RoundTrip creates a trace.Span and inserts it into the outgoing request's headers. +// The created span can follow a parent span, if a parent is presented in +// the request's context. +func (t *traceTransport) RoundTrip(req *http.Request) (*http.Response, error) { + name := t.formatSpanName(req) + // TODO(jbd): Discuss whether we want to prefix + // outgoing requests with Sent. + ctx, span := trace.StartSpan(req.Context(), name, + trace.WithSampler(t.startOptions.Sampler), + trace.WithSpanKind(trace.SpanKindClient)) + + if t.newClientTrace != nil { + req = req.WithContext(httptrace.WithClientTrace(ctx, t.newClientTrace(req, span))) + } else { + req = req.WithContext(ctx) + } + + if t.format != nil { + // SpanContextToRequest will modify its Request argument, which is + // contrary to the contract for http.RoundTripper, so we need to + // pass it a copy of the Request. + // However, the Request struct itself was already copied by + // the WithContext calls above and so we just need to copy the header. + header := make(http.Header) + for k, v := range req.Header { + header[k] = v + } + req.Header = header + t.format.SpanContextToRequest(span.SpanContext(), req) + } + + span.AddAttributes(requestAttrs(req)...) + resp, err := t.base.RoundTrip(req) + if err != nil { + span.SetStatus(trace.Status{Code: trace.StatusCodeUnknown, Message: err.Error()}) + span.End() + return resp, err + } + + span.AddAttributes(responseAttrs(resp)...) + span.SetStatus(TraceStatus(resp.StatusCode, resp.Status)) + + // span.End() will be invoked after + // a read from resp.Body returns io.EOF or when + // resp.Body.Close() is invoked. + resp.Body = &bodyTracker{rc: resp.Body, span: span} + return resp, err +} + +// bodyTracker wraps a response.Body and invokes +// trace.EndSpan on encountering io.EOF on reading +// the body of the original response. +type bodyTracker struct { + rc io.ReadCloser + span *trace.Span +} + +var _ io.ReadCloser = (*bodyTracker)(nil) + +func (bt *bodyTracker) Read(b []byte) (int, error) { + n, err := bt.rc.Read(b) + + switch err { + case nil: + return n, nil + case io.EOF: + bt.span.End() + default: + // For all other errors, set the span status + bt.span.SetStatus(trace.Status{ + // Code 2 is the error code for Internal server error. + Code: 2, + Message: err.Error(), + }) + } + return n, err +} + +func (bt *bodyTracker) Close() error { + // Invoking endSpan on Close will help catch the cases + // in which a read returned a non-nil error, we set the + // span status but didn't end the span. + bt.span.End() + return bt.rc.Close() +} + +// CancelRequest cancels an in-flight request by closing its connection. +func (t *traceTransport) CancelRequest(req *http.Request) { + type canceler interface { + CancelRequest(*http.Request) + } + if cr, ok := t.base.(canceler); ok { + cr.CancelRequest(req) + } +} + +func spanNameFromURL(req *http.Request) string { + return req.URL.Path +} + +func requestAttrs(r *http.Request) []trace.Attribute { + return []trace.Attribute{ + trace.StringAttribute(PathAttribute, r.URL.Path), + trace.StringAttribute(HostAttribute, r.URL.Host), + trace.StringAttribute(MethodAttribute, r.Method), + trace.StringAttribute(UserAgentAttribute, r.UserAgent()), + } +} + +func responseAttrs(resp *http.Response) []trace.Attribute { + return []trace.Attribute{ + trace.Int64Attribute(StatusCodeAttribute, int64(resp.StatusCode)), + } +} + +// TraceStatus is a utility to convert the HTTP status code to a trace.Status that +// represents the outcome as closely as possible. +func TraceStatus(httpStatusCode int, statusLine string) trace.Status { + var code int32 + if httpStatusCode < 200 || httpStatusCode >= 400 { + code = trace.StatusCodeUnknown + } + switch httpStatusCode { + case 499: + code = trace.StatusCodeCancelled + case http.StatusBadRequest: + code = trace.StatusCodeInvalidArgument + case http.StatusGatewayTimeout: + code = trace.StatusCodeDeadlineExceeded + case http.StatusNotFound: + code = trace.StatusCodeNotFound + case http.StatusForbidden: + code = trace.StatusCodePermissionDenied + case http.StatusUnauthorized: // 401 is actually unauthenticated. + code = trace.StatusCodeUnauthenticated + case http.StatusTooManyRequests: + code = trace.StatusCodeResourceExhausted + case http.StatusNotImplemented: + code = trace.StatusCodeUnimplemented + case http.StatusServiceUnavailable: + code = trace.StatusCodeUnavailable + case http.StatusOK: + code = trace.StatusCodeOK + } + return trace.Status{Code: code, Message: codeToStr[code]} +} + +var codeToStr = map[int32]string{ + trace.StatusCodeOK: `OK`, + trace.StatusCodeCancelled: `CANCELLED`, + trace.StatusCodeUnknown: `UNKNOWN`, + trace.StatusCodeInvalidArgument: `INVALID_ARGUMENT`, + trace.StatusCodeDeadlineExceeded: `DEADLINE_EXCEEDED`, + trace.StatusCodeNotFound: `NOT_FOUND`, + trace.StatusCodeAlreadyExists: `ALREADY_EXISTS`, + trace.StatusCodePermissionDenied: `PERMISSION_DENIED`, + trace.StatusCodeResourceExhausted: `RESOURCE_EXHAUSTED`, + trace.StatusCodeFailedPrecondition: `FAILED_PRECONDITION`, + trace.StatusCodeAborted: `ABORTED`, + trace.StatusCodeOutOfRange: `OUT_OF_RANGE`, + trace.StatusCodeUnimplemented: `UNIMPLEMENTED`, + trace.StatusCodeInternal: `INTERNAL`, + trace.StatusCodeUnavailable: `UNAVAILABLE`, + trace.StatusCodeDataLoss: `DATA_LOSS`, + trace.StatusCodeUnauthenticated: `UNAUTHENTICATED`, +} + +func isHealthEndpoint(path string) bool { + // Health checking is pretty frequent and + // traces collected for health endpoints + // can be extremely noisy and expensive. + // Disable canonical health checking endpoints + // like /healthz and /_ah/health for now. + if path == "/healthz" || path == "/_ah/health" { + return true + } + return false +} diff --git a/vendor/go.opencensus.io/stats/doc.go b/vendor/go.opencensus.io/stats/doc.go new file mode 100644 index 000000000..00d473ee0 --- /dev/null +++ b/vendor/go.opencensus.io/stats/doc.go @@ -0,0 +1,69 @@ +// Copyright 2017, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +/* +Package stats contains support for OpenCensus stats recording. + +OpenCensus allows users to create typed measures, record measurements, +aggregate the collected data, and export the aggregated data. + +Measures + +A measure represents a type of data point to be tracked and recorded. +For example, latency, request Mb/s, and response Mb/s are measures +to collect from a server. + +Measure constructors such as Int64 and Float64 automatically +register the measure by the given name. Each registered measure needs +to be unique by name. Measures also have a description and a unit. + +Libraries can define and export measures. Application authors can then +create views and collect and break down measures by the tags they are +interested in. + +Recording measurements + +Measurement is a data point to be collected for a measure. For example, +for a latency (ms) measure, 100 is a measurement that represents a 100ms +latency event. Measurements are created from measures with +the current context. Tags from the current context are recorded with the +measurements if they are any. + +Recorded measurements are dropped immediately if no views are registered for them. +There is usually no need to conditionally enable and disable +recording to reduce cost. Recording of measurements is cheap. + +Libraries can always record measurements, and applications can later decide +on which measurements they want to collect by registering views. This allows +libraries to turn on the instrumentation by default. + +Exemplars + +For a given recorded measurement, the associated exemplar is a diagnostic map +that gives more information about the measurement. + +When aggregated using a Distribution aggregation, an exemplar is kept for each +bucket in the Distribution. This allows you to easily find an example of a +measurement that fell into each bucket. + +For example, if you also use the OpenCensus trace package and you +record a measurement with a context that contains a sampled trace span, +then the trace span will be added to the exemplar associated with the measurement. + +When exported to a supporting back end, you should be able to easily navigate +to example traces that fell into each bucket in the Distribution. + +*/ +package stats // import "go.opencensus.io/stats" diff --git a/vendor/go.opencensus.io/stats/internal/record.go b/vendor/go.opencensus.io/stats/internal/record.go new file mode 100644 index 000000000..ed5455205 --- /dev/null +++ b/vendor/go.opencensus.io/stats/internal/record.go @@ -0,0 +1,25 @@ +// Copyright 2018, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package internal + +import ( + "go.opencensus.io/tag" +) + +// DefaultRecorder will be called for each Record call. +var DefaultRecorder func(tags *tag.Map, measurement interface{}, attachments map[string]string) + +// SubscriptionReporter reports when a view subscribed with a measure. +var SubscriptionReporter func(measure string) diff --git a/vendor/go.opencensus.io/stats/internal/validation.go b/vendor/go.opencensus.io/stats/internal/validation.go new file mode 100644 index 000000000..b946667f9 --- /dev/null +++ b/vendor/go.opencensus.io/stats/internal/validation.go @@ -0,0 +1,28 @@ +// Copyright 2018, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package internal // import "go.opencensus.io/stats/internal" + +const ( + MaxNameLength = 255 +) + +func IsPrintable(str string) bool { + for _, r := range str { + if !(r >= ' ' && r <= '~') { + return false + } + } + return true +} diff --git a/vendor/go.opencensus.io/stats/measure.go b/vendor/go.opencensus.io/stats/measure.go new file mode 100644 index 000000000..64d02b196 --- /dev/null +++ b/vendor/go.opencensus.io/stats/measure.go @@ -0,0 +1,123 @@ +// Copyright 2017, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package stats + +import ( + "sync" + "sync/atomic" +) + +// Measure represents a single numeric value to be tracked and recorded. +// For example, latency, request bytes, and response bytes could be measures +// to collect from a server. +// +// Measures by themselves have no outside effects. In order to be exported, +// the measure needs to be used in a View. If no Views are defined over a +// measure, there is very little cost in recording it. +type Measure interface { + // Name returns the name of this measure. + // + // Measure names are globally unique (among all libraries linked into your program). + // We recommend prefixing the measure name with a domain name relevant to your + // project or application. + // + // Measure names are never sent over the wire or exported to backends. + // They are only used to create Views. + Name() string + + // Description returns the human-readable description of this measure. + Description() string + + // Unit returns the units for the values this measure takes on. + // + // Units are encoded according to the case-sensitive abbreviations from the + // Unified Code for Units of Measure: http://unitsofmeasure.org/ucum.html + Unit() string +} + +// measureDescriptor is the untyped descriptor associated with each measure. +// Int64Measure and Float64Measure wrap measureDescriptor to provide typed +// recording APIs. +// Two Measures with the same name will have the same measureDescriptor. +type measureDescriptor struct { + subs int32 // access atomically + + name string + description string + unit string +} + +func (m *measureDescriptor) subscribe() { + atomic.StoreInt32(&m.subs, 1) +} + +func (m *measureDescriptor) subscribed() bool { + return atomic.LoadInt32(&m.subs) == 1 +} + +// Name returns the name of the measure. +func (m *measureDescriptor) Name() string { + return m.name +} + +// Description returns the description of the measure. +func (m *measureDescriptor) Description() string { + return m.description +} + +// Unit returns the unit of the measure. +func (m *measureDescriptor) Unit() string { + return m.unit +} + +var ( + mu sync.RWMutex + measures = make(map[string]*measureDescriptor) +) + +func registerMeasureHandle(name, desc, unit string) *measureDescriptor { + mu.Lock() + defer mu.Unlock() + + if stored, ok := measures[name]; ok { + return stored + } + m := &measureDescriptor{ + name: name, + description: desc, + unit: unit, + } + measures[name] = m + return m +} + +// Measurement is the numeric value measured when recording stats. Each measure +// provides methods to create measurements of their kind. For example, Int64Measure +// provides M to convert an int64 into a measurement. +type Measurement struct { + v float64 + m *measureDescriptor +} + +// Value returns the value of the Measurement as a float64. +func (m Measurement) Value() float64 { + return m.v +} + +// Measure returns the Measure from which this Measurement was created. +func (m Measurement) Measure() Measure { + return m.m +} diff --git a/vendor/go.opencensus.io/stats/measure_float64.go b/vendor/go.opencensus.io/stats/measure_float64.go new file mode 100644 index 000000000..acedb21c4 --- /dev/null +++ b/vendor/go.opencensus.io/stats/measure_float64.go @@ -0,0 +1,36 @@ +// Copyright 2017, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package stats + +// Float64Measure is a measure for float64 values. +type Float64Measure struct { + *measureDescriptor +} + +// M creates a new float64 measurement. +// Use Record to record measurements. +func (m *Float64Measure) M(v float64) Measurement { + return Measurement{m: m.measureDescriptor, v: v} +} + +// Float64 creates a new measure for float64 values. +// +// See the documentation for interface Measure for more guidance on the +// parameters of this function. +func Float64(name, description, unit string) *Float64Measure { + mi := registerMeasureHandle(name, description, unit) + return &Float64Measure{mi} +} diff --git a/vendor/go.opencensus.io/stats/measure_int64.go b/vendor/go.opencensus.io/stats/measure_int64.go new file mode 100644 index 000000000..c4243ba74 --- /dev/null +++ b/vendor/go.opencensus.io/stats/measure_int64.go @@ -0,0 +1,36 @@ +// Copyright 2017, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package stats + +// Int64Measure is a measure for int64 values. +type Int64Measure struct { + *measureDescriptor +} + +// M creates a new int64 measurement. +// Use Record to record measurements. +func (m *Int64Measure) M(v int64) Measurement { + return Measurement{m: m.measureDescriptor, v: float64(v)} +} + +// Int64 creates a new measure for int64 values. +// +// See the documentation for interface Measure for more guidance on the +// parameters of this function. +func Int64(name, description, unit string) *Int64Measure { + mi := registerMeasureHandle(name, description, unit) + return &Int64Measure{mi} +} diff --git a/vendor/go.opencensus.io/stats/record.go b/vendor/go.opencensus.io/stats/record.go new file mode 100644 index 000000000..0aced02c3 --- /dev/null +++ b/vendor/go.opencensus.io/stats/record.go @@ -0,0 +1,69 @@ +// Copyright 2018, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package stats + +import ( + "context" + + "go.opencensus.io/exemplar" + "go.opencensus.io/stats/internal" + "go.opencensus.io/tag" +) + +func init() { + internal.SubscriptionReporter = func(measure string) { + mu.Lock() + measures[measure].subscribe() + mu.Unlock() + } +} + +// Record records one or multiple measurements with the same context at once. +// If there are any tags in the context, measurements will be tagged with them. +func Record(ctx context.Context, ms ...Measurement) { + recorder := internal.DefaultRecorder + if recorder == nil { + return + } + if len(ms) == 0 { + return + } + record := false + for _, m := range ms { + if m.m.subscribed() { + record = true + break + } + } + if !record { + return + } + recorder(tag.FromContext(ctx), ms, exemplar.AttachmentsFromContext(ctx)) +} + +// RecordWithTags records one or multiple measurements at once. +// +// Measurements will be tagged with the tags in the context mutated by the mutators. +// RecordWithTags is useful if you want to record with tag mutations but don't want +// to propagate the mutations in the context. +func RecordWithTags(ctx context.Context, mutators []tag.Mutator, ms ...Measurement) error { + ctx, err := tag.New(ctx, mutators...) + if err != nil { + return err + } + Record(ctx, ms...) + return nil +} diff --git a/vendor/go.opencensus.io/stats/units.go b/vendor/go.opencensus.io/stats/units.go new file mode 100644 index 000000000..6931a5f29 --- /dev/null +++ b/vendor/go.opencensus.io/stats/units.go @@ -0,0 +1,25 @@ +// Copyright 2018, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package stats + +// Units are encoded according to the case-sensitive abbreviations from the +// Unified Code for Units of Measure: http://unitsofmeasure.org/ucum.html +const ( + UnitNone = "1" // Deprecated: Use UnitDimensionless. + UnitDimensionless = "1" + UnitBytes = "By" + UnitMilliseconds = "ms" +) diff --git a/vendor/go.opencensus.io/stats/view/aggregation.go b/vendor/go.opencensus.io/stats/view/aggregation.go new file mode 100644 index 000000000..b7f169b4a --- /dev/null +++ b/vendor/go.opencensus.io/stats/view/aggregation.go @@ -0,0 +1,120 @@ +// Copyright 2017, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package view + +// AggType represents the type of aggregation function used on a View. +type AggType int + +// All available aggregation types. +const ( + AggTypeNone AggType = iota // no aggregation; reserved for future use. + AggTypeCount // the count aggregation, see Count. + AggTypeSum // the sum aggregation, see Sum. + AggTypeDistribution // the distribution aggregation, see Distribution. + AggTypeLastValue // the last value aggregation, see LastValue. +) + +func (t AggType) String() string { + return aggTypeName[t] +} + +var aggTypeName = map[AggType]string{ + AggTypeNone: "None", + AggTypeCount: "Count", + AggTypeSum: "Sum", + AggTypeDistribution: "Distribution", + AggTypeLastValue: "LastValue", +} + +// Aggregation represents a data aggregation method. Use one of the functions: +// Count, Sum, or Distribution to construct an Aggregation. +type Aggregation struct { + Type AggType // Type is the AggType of this Aggregation. + Buckets []float64 // Buckets are the bucket endpoints if this Aggregation represents a distribution, see Distribution. + + newData func() AggregationData +} + +var ( + aggCount = &Aggregation{ + Type: AggTypeCount, + newData: func() AggregationData { + return &CountData{} + }, + } + aggSum = &Aggregation{ + Type: AggTypeSum, + newData: func() AggregationData { + return &SumData{} + }, + } +) + +// Count indicates that data collected and aggregated +// with this method will be turned into a count value. +// For example, total number of accepted requests can be +// aggregated by using Count. +func Count() *Aggregation { + return aggCount +} + +// Sum indicates that data collected and aggregated +// with this method will be summed up. +// For example, accumulated request bytes can be aggregated by using +// Sum. +func Sum() *Aggregation { + return aggSum +} + +// Distribution indicates that the desired aggregation is +// a histogram distribution. +// +// An distribution aggregation may contain a histogram of the values in the +// population. The bucket boundaries for that histogram are described +// by the bounds. This defines len(bounds)+1 buckets. +// +// If len(bounds) >= 2 then the boundaries for bucket index i are: +// +// [-infinity, bounds[i]) for i = 0 +// [bounds[i-1], bounds[i]) for 0 < i < length +// [bounds[i-1], +infinity) for i = length +// +// If len(bounds) is 0 then there is no histogram associated with the +// distribution. There will be a single bucket with boundaries +// (-infinity, +infinity). +// +// If len(bounds) is 1 then there is no finite buckets, and that single +// element is the common boundary of the overflow and underflow buckets. +func Distribution(bounds ...float64) *Aggregation { + return &Aggregation{ + Type: AggTypeDistribution, + Buckets: bounds, + newData: func() AggregationData { + return newDistributionData(bounds) + }, + } +} + +// LastValue only reports the last value recorded using this +// aggregation. All other measurements will be dropped. +func LastValue() *Aggregation { + return &Aggregation{ + Type: AggTypeLastValue, + newData: func() AggregationData { + return &LastValueData{} + }, + } +} diff --git a/vendor/go.opencensus.io/stats/view/aggregation_data.go b/vendor/go.opencensus.io/stats/view/aggregation_data.go new file mode 100644 index 000000000..960b94601 --- /dev/null +++ b/vendor/go.opencensus.io/stats/view/aggregation_data.go @@ -0,0 +1,235 @@ +// Copyright 2017, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package view + +import ( + "math" + + "go.opencensus.io/exemplar" +) + +// AggregationData represents an aggregated value from a collection. +// They are reported on the view data during exporting. +// Mosts users won't directly access aggregration data. +type AggregationData interface { + isAggregationData() bool + addSample(e *exemplar.Exemplar) + clone() AggregationData + equal(other AggregationData) bool +} + +const epsilon = 1e-9 + +// CountData is the aggregated data for the Count aggregation. +// A count aggregation processes data and counts the recordings. +// +// Most users won't directly access count data. +type CountData struct { + Value int64 +} + +func (a *CountData) isAggregationData() bool { return true } + +func (a *CountData) addSample(_ *exemplar.Exemplar) { + a.Value = a.Value + 1 +} + +func (a *CountData) clone() AggregationData { + return &CountData{Value: a.Value} +} + +func (a *CountData) equal(other AggregationData) bool { + a2, ok := other.(*CountData) + if !ok { + return false + } + + return a.Value == a2.Value +} + +// SumData is the aggregated data for the Sum aggregation. +// A sum aggregation processes data and sums up the recordings. +// +// Most users won't directly access sum data. +type SumData struct { + Value float64 +} + +func (a *SumData) isAggregationData() bool { return true } + +func (a *SumData) addSample(e *exemplar.Exemplar) { + a.Value += e.Value +} + +func (a *SumData) clone() AggregationData { + return &SumData{Value: a.Value} +} + +func (a *SumData) equal(other AggregationData) bool { + a2, ok := other.(*SumData) + if !ok { + return false + } + return math.Pow(a.Value-a2.Value, 2) < epsilon +} + +// DistributionData is the aggregated data for the +// Distribution aggregation. +// +// Most users won't directly access distribution data. +// +// For a distribution with N bounds, the associated DistributionData will have +// N+1 buckets. +type DistributionData struct { + Count int64 // number of data points aggregated + Min float64 // minimum value in the distribution + Max float64 // max value in the distribution + Mean float64 // mean of the distribution + SumOfSquaredDev float64 // sum of the squared deviation from the mean + CountPerBucket []int64 // number of occurrences per bucket + // ExemplarsPerBucket is slice the same length as CountPerBucket containing + // an exemplar for the associated bucket, or nil. + ExemplarsPerBucket []*exemplar.Exemplar + bounds []float64 // histogram distribution of the values +} + +func newDistributionData(bounds []float64) *DistributionData { + bucketCount := len(bounds) + 1 + return &DistributionData{ + CountPerBucket: make([]int64, bucketCount), + ExemplarsPerBucket: make([]*exemplar.Exemplar, bucketCount), + bounds: bounds, + Min: math.MaxFloat64, + Max: math.SmallestNonzeroFloat64, + } +} + +// Sum returns the sum of all samples collected. +func (a *DistributionData) Sum() float64 { return a.Mean * float64(a.Count) } + +func (a *DistributionData) variance() float64 { + if a.Count <= 1 { + return 0 + } + return a.SumOfSquaredDev / float64(a.Count-1) +} + +func (a *DistributionData) isAggregationData() bool { return true } + +func (a *DistributionData) addSample(e *exemplar.Exemplar) { + f := e.Value + if f < a.Min { + a.Min = f + } + if f > a.Max { + a.Max = f + } + a.Count++ + a.addToBucket(e) + + if a.Count == 1 { + a.Mean = f + return + } + + oldMean := a.Mean + a.Mean = a.Mean + (f-a.Mean)/float64(a.Count) + a.SumOfSquaredDev = a.SumOfSquaredDev + (f-oldMean)*(f-a.Mean) +} + +func (a *DistributionData) addToBucket(e *exemplar.Exemplar) { + var count *int64 + var ex **exemplar.Exemplar + for i, b := range a.bounds { + if e.Value < b { + count = &a.CountPerBucket[i] + ex = &a.ExemplarsPerBucket[i] + break + } + } + if count == nil { + count = &a.CountPerBucket[len(a.bounds)] + ex = &a.ExemplarsPerBucket[len(a.bounds)] + } + *count++ + *ex = maybeRetainExemplar(*ex, e) +} + +func maybeRetainExemplar(old, cur *exemplar.Exemplar) *exemplar.Exemplar { + if old == nil { + return cur + } + + // Heuristic to pick the "better" exemplar: first keep the one with a + // sampled trace attachment, if neither have a trace attachment, pick the + // one with more attachments. + _, haveTraceID := cur.Attachments[exemplar.KeyTraceID] + if haveTraceID || len(cur.Attachments) >= len(old.Attachments) { + return cur + } + return old +} + +func (a *DistributionData) clone() AggregationData { + c := *a + c.CountPerBucket = append([]int64(nil), a.CountPerBucket...) + c.ExemplarsPerBucket = append([]*exemplar.Exemplar(nil), a.ExemplarsPerBucket...) + return &c +} + +func (a *DistributionData) equal(other AggregationData) bool { + a2, ok := other.(*DistributionData) + if !ok { + return false + } + if a2 == nil { + return false + } + if len(a.CountPerBucket) != len(a2.CountPerBucket) { + return false + } + for i := range a.CountPerBucket { + if a.CountPerBucket[i] != a2.CountPerBucket[i] { + return false + } + } + return a.Count == a2.Count && a.Min == a2.Min && a.Max == a2.Max && math.Pow(a.Mean-a2.Mean, 2) < epsilon && math.Pow(a.variance()-a2.variance(), 2) < epsilon +} + +// LastValueData returns the last value recorded for LastValue aggregation. +type LastValueData struct { + Value float64 +} + +func (l *LastValueData) isAggregationData() bool { + return true +} + +func (l *LastValueData) addSample(e *exemplar.Exemplar) { + l.Value = e.Value +} + +func (l *LastValueData) clone() AggregationData { + return &LastValueData{l.Value} +} + +func (l *LastValueData) equal(other AggregationData) bool { + a2, ok := other.(*LastValueData) + if !ok { + return false + } + return l.Value == a2.Value +} diff --git a/vendor/go.opencensus.io/stats/view/collector.go b/vendor/go.opencensus.io/stats/view/collector.go new file mode 100644 index 000000000..32415d485 --- /dev/null +++ b/vendor/go.opencensus.io/stats/view/collector.go @@ -0,0 +1,87 @@ +// Copyright 2017, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package view + +import ( + "sort" + + "go.opencensus.io/exemplar" + + "go.opencensus.io/internal/tagencoding" + "go.opencensus.io/tag" +) + +type collector struct { + // signatures holds the aggregations values for each unique tag signature + // (values for all keys) to its aggregator. + signatures map[string]AggregationData + // Aggregation is the description of the aggregation to perform for this + // view. + a *Aggregation +} + +func (c *collector) addSample(s string, e *exemplar.Exemplar) { + aggregator, ok := c.signatures[s] + if !ok { + aggregator = c.a.newData() + c.signatures[s] = aggregator + } + aggregator.addSample(e) +} + +// collectRows returns a snapshot of the collected Row values. +func (c *collector) collectedRows(keys []tag.Key) []*Row { + rows := make([]*Row, 0, len(c.signatures)) + for sig, aggregator := range c.signatures { + tags := decodeTags([]byte(sig), keys) + row := &Row{Tags: tags, Data: aggregator.clone()} + rows = append(rows, row) + } + return rows +} + +func (c *collector) clearRows() { + c.signatures = make(map[string]AggregationData) +} + +// encodeWithKeys encodes the map by using values +// only associated with the keys provided. +func encodeWithKeys(m *tag.Map, keys []tag.Key) []byte { + vb := &tagencoding.Values{ + Buffer: make([]byte, len(keys)), + } + for _, k := range keys { + v, _ := m.Value(k) + vb.WriteValue([]byte(v)) + } + return vb.Bytes() +} + +// decodeTags decodes tags from the buffer and +// orders them by the keys. +func decodeTags(buf []byte, keys []tag.Key) []tag.Tag { + vb := &tagencoding.Values{Buffer: buf} + var tags []tag.Tag + for _, k := range keys { + v := vb.ReadValue() + if v != nil { + tags = append(tags, tag.Tag{Key: k, Value: string(v)}) + } + } + vb.ReadIndex = 0 + sort.Slice(tags, func(i, j int) bool { return tags[i].Key.Name() < tags[j].Key.Name() }) + return tags +} diff --git a/vendor/go.opencensus.io/stats/view/doc.go b/vendor/go.opencensus.io/stats/view/doc.go new file mode 100644 index 000000000..dced225c3 --- /dev/null +++ b/vendor/go.opencensus.io/stats/view/doc.go @@ -0,0 +1,47 @@ +// Copyright 2017, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +// Package view contains support for collecting and exposing aggregates over stats. +// +// In order to collect measurements, views need to be defined and registered. +// A view allows recorded measurements to be filtered and aggregated. +// +// All recorded measurements can be grouped by a list of tags. +// +// OpenCensus provides several aggregation methods: Count, Distribution and Sum. +// +// Count only counts the number of measurement points recorded. +// Distribution provides statistical summary of the aggregated data by counting +// how many recorded measurements fall into each bucket. +// Sum adds up the measurement values. +// LastValue just keeps track of the most recently recorded measurement value. +// All aggregations are cumulative. +// +// Views can be registerd and unregistered at any time during program execution. +// +// Libraries can define views but it is recommended that in most cases registering +// views be left up to applications. +// +// Exporting +// +// Collected and aggregated data can be exported to a metric collection +// backend by registering its exporter. +// +// Multiple exporters can be registered to upload the data to various +// different back ends. +package view // import "go.opencensus.io/stats/view" + +// TODO(acetechnologist): Add a link to the language independent OpenCensus +// spec when it is available. diff --git a/vendor/go.opencensus.io/stats/view/export.go b/vendor/go.opencensus.io/stats/view/export.go new file mode 100644 index 000000000..7cb59718f --- /dev/null +++ b/vendor/go.opencensus.io/stats/view/export.go @@ -0,0 +1,58 @@ +// Copyright 2017, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package view + +import "sync" + +var ( + exportersMu sync.RWMutex // guards exporters + exporters = make(map[Exporter]struct{}) +) + +// Exporter exports the collected records as view data. +// +// The ExportView method should return quickly; if an +// Exporter takes a significant amount of time to +// process a Data, that work should be done on another goroutine. +// +// It is safe to assume that ExportView will not be called concurrently from +// multiple goroutines. +// +// The Data should not be modified. +type Exporter interface { + ExportView(viewData *Data) +} + +// RegisterExporter registers an exporter. +// Collected data will be reported via all the +// registered exporters. Once you no longer +// want data to be exported, invoke UnregisterExporter +// with the previously registered exporter. +// +// Binaries can register exporters, libraries shouldn't register exporters. +func RegisterExporter(e Exporter) { + exportersMu.Lock() + defer exportersMu.Unlock() + + exporters[e] = struct{}{} +} + +// UnregisterExporter unregisters an exporter. +func UnregisterExporter(e Exporter) { + exportersMu.Lock() + defer exportersMu.Unlock() + + delete(exporters, e) +} diff --git a/vendor/go.opencensus.io/stats/view/view.go b/vendor/go.opencensus.io/stats/view/view.go new file mode 100644 index 000000000..c2a08af67 --- /dev/null +++ b/vendor/go.opencensus.io/stats/view/view.go @@ -0,0 +1,185 @@ +// Copyright 2017, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package view + +import ( + "bytes" + "fmt" + "reflect" + "sort" + "sync/atomic" + "time" + + "go.opencensus.io/exemplar" + + "go.opencensus.io/stats" + "go.opencensus.io/stats/internal" + "go.opencensus.io/tag" +) + +// View allows users to aggregate the recorded stats.Measurements. +// Views need to be passed to the Register function to be before data will be +// collected and sent to Exporters. +type View struct { + Name string // Name of View. Must be unique. If unset, will default to the name of the Measure. + Description string // Description is a human-readable description for this view. + + // TagKeys are the tag keys describing the grouping of this view. + // A single Row will be produced for each combination of associated tag values. + TagKeys []tag.Key + + // Measure is a stats.Measure to aggregate in this view. + Measure stats.Measure + + // Aggregation is the aggregation function tp apply to the set of Measurements. + Aggregation *Aggregation +} + +// WithName returns a copy of the View with a new name. This is useful for +// renaming views to cope with limitations placed on metric names by various +// backends. +func (v *View) WithName(name string) *View { + vNew := *v + vNew.Name = name + return &vNew +} + +// same compares two views and returns true if they represent the same aggregation. +func (v *View) same(other *View) bool { + if v == other { + return true + } + if v == nil { + return false + } + return reflect.DeepEqual(v.Aggregation, other.Aggregation) && + v.Measure.Name() == other.Measure.Name() +} + +// canonicalize canonicalizes v by setting explicit +// defaults for Name and Description and sorting the TagKeys +func (v *View) canonicalize() error { + if v.Measure == nil { + return fmt.Errorf("cannot register view %q: measure not set", v.Name) + } + if v.Aggregation == nil { + return fmt.Errorf("cannot register view %q: aggregation not set", v.Name) + } + if v.Name == "" { + v.Name = v.Measure.Name() + } + if v.Description == "" { + v.Description = v.Measure.Description() + } + if err := checkViewName(v.Name); err != nil { + return err + } + sort.Slice(v.TagKeys, func(i, j int) bool { + return v.TagKeys[i].Name() < v.TagKeys[j].Name() + }) + return nil +} + +// viewInternal is the internal representation of a View. +type viewInternal struct { + view *View // view is the canonicalized View definition associated with this view. + subscribed uint32 // 1 if someone is subscribed and data need to be exported, use atomic to access + collector *collector +} + +func newViewInternal(v *View) (*viewInternal, error) { + return &viewInternal{ + view: v, + collector: &collector{make(map[string]AggregationData), v.Aggregation}, + }, nil +} + +func (v *viewInternal) subscribe() { + atomic.StoreUint32(&v.subscribed, 1) +} + +func (v *viewInternal) unsubscribe() { + atomic.StoreUint32(&v.subscribed, 0) +} + +// isSubscribed returns true if the view is exporting +// data by subscription. +func (v *viewInternal) isSubscribed() bool { + return atomic.LoadUint32(&v.subscribed) == 1 +} + +func (v *viewInternal) clearRows() { + v.collector.clearRows() +} + +func (v *viewInternal) collectedRows() []*Row { + return v.collector.collectedRows(v.view.TagKeys) +} + +func (v *viewInternal) addSample(m *tag.Map, e *exemplar.Exemplar) { + if !v.isSubscribed() { + return + } + sig := string(encodeWithKeys(m, v.view.TagKeys)) + v.collector.addSample(sig, e) +} + +// A Data is a set of rows about usage of the single measure associated +// with the given view. Each row is specific to a unique set of tags. +type Data struct { + View *View + Start, End time.Time + Rows []*Row +} + +// Row is the collected value for a specific set of key value pairs a.k.a tags. +type Row struct { + Tags []tag.Tag + Data AggregationData +} + +func (r *Row) String() string { + var buffer bytes.Buffer + buffer.WriteString("{ ") + buffer.WriteString("{ ") + for _, t := range r.Tags { + buffer.WriteString(fmt.Sprintf("{%v %v}", t.Key.Name(), t.Value)) + } + buffer.WriteString(" }") + buffer.WriteString(fmt.Sprintf("%v", r.Data)) + buffer.WriteString(" }") + return buffer.String() +} + +// Equal returns true if both rows are equal. Tags are expected to be ordered +// by the key name. Even both rows have the same tags but the tags appear in +// different orders it will return false. +func (r *Row) Equal(other *Row) bool { + if r == other { + return true + } + return reflect.DeepEqual(r.Tags, other.Tags) && r.Data.equal(other.Data) +} + +func checkViewName(name string) error { + if len(name) > internal.MaxNameLength { + return fmt.Errorf("view name cannot be larger than %v", internal.MaxNameLength) + } + if !internal.IsPrintable(name) { + return fmt.Errorf("view name needs to be an ASCII string") + } + return nil +} diff --git a/vendor/go.opencensus.io/stats/view/worker.go b/vendor/go.opencensus.io/stats/view/worker.go new file mode 100644 index 000000000..63b0ee3cc --- /dev/null +++ b/vendor/go.opencensus.io/stats/view/worker.go @@ -0,0 +1,229 @@ +// Copyright 2017, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package view + +import ( + "fmt" + "time" + + "go.opencensus.io/stats" + "go.opencensus.io/stats/internal" + "go.opencensus.io/tag" +) + +func init() { + defaultWorker = newWorker() + go defaultWorker.start() + internal.DefaultRecorder = record +} + +type measureRef struct { + measure string + views map[*viewInternal]struct{} +} + +type worker struct { + measures map[string]*measureRef + views map[string]*viewInternal + startTimes map[*viewInternal]time.Time + + timer *time.Ticker + c chan command + quit, done chan bool +} + +var defaultWorker *worker + +var defaultReportingDuration = 10 * time.Second + +// Find returns a registered view associated with this name. +// If no registered view is found, nil is returned. +func Find(name string) (v *View) { + req := &getViewByNameReq{ + name: name, + c: make(chan *getViewByNameResp), + } + defaultWorker.c <- req + resp := <-req.c + return resp.v +} + +// Register begins collecting data for the given views. +// Once a view is registered, it reports data to the registered exporters. +func Register(views ...*View) error { + for _, v := range views { + if err := v.canonicalize(); err != nil { + return err + } + } + req := ®isterViewReq{ + views: views, + err: make(chan error), + } + defaultWorker.c <- req + return <-req.err +} + +// Unregister the given views. Data will not longer be exported for these views +// after Unregister returns. +// It is not necessary to unregister from views you expect to collect for the +// duration of your program execution. +func Unregister(views ...*View) { + names := make([]string, len(views)) + for i := range views { + names[i] = views[i].Name + } + req := &unregisterFromViewReq{ + views: names, + done: make(chan struct{}), + } + defaultWorker.c <- req + <-req.done +} + +// RetrieveData gets a snapshot of the data collected for the the view registered +// with the given name. It is intended for testing only. +func RetrieveData(viewName string) ([]*Row, error) { + req := &retrieveDataReq{ + now: time.Now(), + v: viewName, + c: make(chan *retrieveDataResp), + } + defaultWorker.c <- req + resp := <-req.c + return resp.rows, resp.err +} + +func record(tags *tag.Map, ms interface{}, attachments map[string]string) { + req := &recordReq{ + tm: tags, + ms: ms.([]stats.Measurement), + attachments: attachments, + t: time.Now(), + } + defaultWorker.c <- req +} + +// SetReportingPeriod sets the interval between reporting aggregated views in +// the program. If duration is less than or equal to zero, it enables the +// default behavior. +// +// Note: each exporter makes different promises about what the lowest supported +// duration is. For example, the Stackdriver exporter recommends a value no +// lower than 1 minute. Consult each exporter per your needs. +func SetReportingPeriod(d time.Duration) { + // TODO(acetechnologist): ensure that the duration d is more than a certain + // value. e.g. 1s + req := &setReportingPeriodReq{ + d: d, + c: make(chan bool), + } + defaultWorker.c <- req + <-req.c // don't return until the timer is set to the new duration. +} + +func newWorker() *worker { + return &worker{ + measures: make(map[string]*measureRef), + views: make(map[string]*viewInternal), + startTimes: make(map[*viewInternal]time.Time), + timer: time.NewTicker(defaultReportingDuration), + c: make(chan command, 1024), + quit: make(chan bool), + done: make(chan bool), + } +} + +func (w *worker) start() { + for { + select { + case cmd := <-w.c: + cmd.handleCommand(w) + case <-w.timer.C: + w.reportUsage(time.Now()) + case <-w.quit: + w.timer.Stop() + close(w.c) + w.done <- true + return + } + } +} + +func (w *worker) stop() { + w.quit <- true + <-w.done +} + +func (w *worker) getMeasureRef(name string) *measureRef { + if mr, ok := w.measures[name]; ok { + return mr + } + mr := &measureRef{ + measure: name, + views: make(map[*viewInternal]struct{}), + } + w.measures[name] = mr + return mr +} + +func (w *worker) tryRegisterView(v *View) (*viewInternal, error) { + vi, err := newViewInternal(v) + if err != nil { + return nil, err + } + if x, ok := w.views[vi.view.Name]; ok { + if !x.view.same(vi.view) { + return nil, fmt.Errorf("cannot register view %q; a different view with the same name is already registered", v.Name) + } + + // the view is already registered so there is nothing to do and the + // command is considered successful. + return x, nil + } + w.views[vi.view.Name] = vi + ref := w.getMeasureRef(vi.view.Measure.Name()) + ref.views[vi] = struct{}{} + return vi, nil +} + +func (w *worker) reportView(v *viewInternal, now time.Time) { + if !v.isSubscribed() { + return + } + rows := v.collectedRows() + _, ok := w.startTimes[v] + if !ok { + w.startTimes[v] = now + } + viewData := &Data{ + View: v.view, + Start: w.startTimes[v], + End: time.Now(), + Rows: rows, + } + exportersMu.Lock() + for e := range exporters { + e.ExportView(viewData) + } + exportersMu.Unlock() +} + +func (w *worker) reportUsage(now time.Time) { + for _, v := range w.views { + w.reportView(v, now) + } +} diff --git a/vendor/go.opencensus.io/stats/view/worker_commands.go b/vendor/go.opencensus.io/stats/view/worker_commands.go new file mode 100644 index 000000000..b38f26f42 --- /dev/null +++ b/vendor/go.opencensus.io/stats/view/worker_commands.go @@ -0,0 +1,183 @@ +// Copyright 2017, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package view + +import ( + "errors" + "fmt" + "strings" + "time" + + "go.opencensus.io/exemplar" + + "go.opencensus.io/stats" + "go.opencensus.io/stats/internal" + "go.opencensus.io/tag" +) + +type command interface { + handleCommand(w *worker) +} + +// getViewByNameReq is the command to get a view given its name. +type getViewByNameReq struct { + name string + c chan *getViewByNameResp +} + +type getViewByNameResp struct { + v *View +} + +func (cmd *getViewByNameReq) handleCommand(w *worker) { + v := w.views[cmd.name] + if v == nil { + cmd.c <- &getViewByNameResp{nil} + return + } + cmd.c <- &getViewByNameResp{v.view} +} + +// registerViewReq is the command to register a view. +type registerViewReq struct { + views []*View + err chan error +} + +func (cmd *registerViewReq) handleCommand(w *worker) { + var errstr []string + for _, view := range cmd.views { + vi, err := w.tryRegisterView(view) + if err != nil { + errstr = append(errstr, fmt.Sprintf("%s: %v", view.Name, err)) + continue + } + internal.SubscriptionReporter(view.Measure.Name()) + vi.subscribe() + } + if len(errstr) > 0 { + cmd.err <- errors.New(strings.Join(errstr, "\n")) + } else { + cmd.err <- nil + } +} + +// unregisterFromViewReq is the command to unregister to a view. Has no +// impact on the data collection for client that are pulling data from the +// library. +type unregisterFromViewReq struct { + views []string + done chan struct{} +} + +func (cmd *unregisterFromViewReq) handleCommand(w *worker) { + for _, name := range cmd.views { + vi, ok := w.views[name] + if !ok { + continue + } + + // Report pending data for this view before removing it. + w.reportView(vi, time.Now()) + + vi.unsubscribe() + if !vi.isSubscribed() { + // this was the last subscription and view is not collecting anymore. + // The collected data can be cleared. + vi.clearRows() + } + delete(w.views, name) + } + cmd.done <- struct{}{} +} + +// retrieveDataReq is the command to retrieve data for a view. +type retrieveDataReq struct { + now time.Time + v string + c chan *retrieveDataResp +} + +type retrieveDataResp struct { + rows []*Row + err error +} + +func (cmd *retrieveDataReq) handleCommand(w *worker) { + vi, ok := w.views[cmd.v] + if !ok { + cmd.c <- &retrieveDataResp{ + nil, + fmt.Errorf("cannot retrieve data; view %q is not registered", cmd.v), + } + return + } + + if !vi.isSubscribed() { + cmd.c <- &retrieveDataResp{ + nil, + fmt.Errorf("cannot retrieve data; view %q has no subscriptions or collection is not forcibly started", cmd.v), + } + return + } + cmd.c <- &retrieveDataResp{ + vi.collectedRows(), + nil, + } +} + +// recordReq is the command to record data related to multiple measures +// at once. +type recordReq struct { + tm *tag.Map + ms []stats.Measurement + attachments map[string]string + t time.Time +} + +func (cmd *recordReq) handleCommand(w *worker) { + for _, m := range cmd.ms { + if (m == stats.Measurement{}) { // not registered + continue + } + ref := w.getMeasureRef(m.Measure().Name()) + for v := range ref.views { + e := &exemplar.Exemplar{ + Value: m.Value(), + Timestamp: cmd.t, + Attachments: cmd.attachments, + } + v.addSample(cmd.tm, e) + } + } +} + +// setReportingPeriodReq is the command to modify the duration between +// reporting the collected data to the registered clients. +type setReportingPeriodReq struct { + d time.Duration + c chan bool +} + +func (cmd *setReportingPeriodReq) handleCommand(w *worker) { + w.timer.Stop() + if cmd.d <= 0 { + w.timer = time.NewTicker(defaultReportingDuration) + } else { + w.timer = time.NewTicker(cmd.d) + } + cmd.c <- true +} diff --git a/vendor/go.opencensus.io/tag/context.go b/vendor/go.opencensus.io/tag/context.go new file mode 100644 index 000000000..dcc13f498 --- /dev/null +++ b/vendor/go.opencensus.io/tag/context.go @@ -0,0 +1,67 @@ +// Copyright 2017, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package tag + +import ( + "context" + + "go.opencensus.io/exemplar" +) + +// FromContext returns the tag map stored in the context. +func FromContext(ctx context.Context) *Map { + // The returned tag map shouldn't be mutated. + ts := ctx.Value(mapCtxKey) + if ts == nil { + return nil + } + return ts.(*Map) +} + +// NewContext creates a new context with the given tag map. +// To propagate a tag map to downstream methods and downstream RPCs, add a tag map +// to the current context. NewContext will return a copy of the current context, +// and put the tag map into the returned one. +// If there is already a tag map in the current context, it will be replaced with m. +func NewContext(ctx context.Context, m *Map) context.Context { + return context.WithValue(ctx, mapCtxKey, m) +} + +type ctxKey struct{} + +var mapCtxKey = ctxKey{} + +func init() { + exemplar.RegisterAttachmentExtractor(extractTagsAttachments) +} + +func extractTagsAttachments(ctx context.Context, a exemplar.Attachments) exemplar.Attachments { + m := FromContext(ctx) + if m == nil { + return a + } + if len(m.m) == 0 { + return a + } + if a == nil { + a = make(map[string]string) + } + + for k, v := range m.m { + a[exemplar.KeyPrefixTag+k.Name()] = v + } + return a +} diff --git a/vendor/go.opencensus.io/tag/doc.go b/vendor/go.opencensus.io/tag/doc.go new file mode 100644 index 000000000..da16b74e4 --- /dev/null +++ b/vendor/go.opencensus.io/tag/doc.go @@ -0,0 +1,26 @@ +// Copyright 2017, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +/* +Package tag contains OpenCensus tags. + +Tags are key-value pairs. Tags provide additional cardinality to +the OpenCensus instrumentation data. + +Tags can be propagated on the wire and in the same +process via context.Context. Encode and Decode should be +used to represent tags into their binary propagation form. +*/ +package tag // import "go.opencensus.io/tag" diff --git a/vendor/go.opencensus.io/tag/key.go b/vendor/go.opencensus.io/tag/key.go new file mode 100644 index 000000000..ebbed9500 --- /dev/null +++ b/vendor/go.opencensus.io/tag/key.go @@ -0,0 +1,35 @@ +// Copyright 2017, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package tag + +// Key represents a tag key. +type Key struct { + name string +} + +// NewKey creates or retrieves a string key identified by name. +// Calling NewKey consequently with the same name returns the same key. +func NewKey(name string) (Key, error) { + if !checkKeyName(name) { + return Key{}, errInvalidKeyName + } + return Key{name: name}, nil +} + +// Name returns the name of the key. +func (k Key) Name() string { + return k.name +} diff --git a/vendor/go.opencensus.io/tag/map.go b/vendor/go.opencensus.io/tag/map.go new file mode 100644 index 000000000..5b72ba6ad --- /dev/null +++ b/vendor/go.opencensus.io/tag/map.go @@ -0,0 +1,197 @@ +// Copyright 2017, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package tag + +import ( + "bytes" + "context" + "fmt" + "sort" +) + +// Tag is a key value pair that can be propagated on wire. +type Tag struct { + Key Key + Value string +} + +// Map is a map of tags. Use New to create a context containing +// a new Map. +type Map struct { + m map[Key]string +} + +// Value returns the value for the key if a value for the key exists. +func (m *Map) Value(k Key) (string, bool) { + if m == nil { + return "", false + } + v, ok := m.m[k] + return v, ok +} + +func (m *Map) String() string { + if m == nil { + return "nil" + } + keys := make([]Key, 0, len(m.m)) + for k := range m.m { + keys = append(keys, k) + } + sort.Slice(keys, func(i, j int) bool { return keys[i].Name() < keys[j].Name() }) + + var buffer bytes.Buffer + buffer.WriteString("{ ") + for _, k := range keys { + buffer.WriteString(fmt.Sprintf("{%v %v}", k.name, m.m[k])) + } + buffer.WriteString(" }") + return buffer.String() +} + +func (m *Map) insert(k Key, v string) { + if _, ok := m.m[k]; ok { + return + } + m.m[k] = v +} + +func (m *Map) update(k Key, v string) { + if _, ok := m.m[k]; ok { + m.m[k] = v + } +} + +func (m *Map) upsert(k Key, v string) { + m.m[k] = v +} + +func (m *Map) delete(k Key) { + delete(m.m, k) +} + +func newMap() *Map { + return &Map{m: make(map[Key]string)} +} + +// Mutator modifies a tag map. +type Mutator interface { + Mutate(t *Map) (*Map, error) +} + +// Insert returns a mutator that inserts a +// value associated with k. If k already exists in the tag map, +// mutator doesn't update the value. +func Insert(k Key, v string) Mutator { + return &mutator{ + fn: func(m *Map) (*Map, error) { + if !checkValue(v) { + return nil, errInvalidValue + } + m.insert(k, v) + return m, nil + }, + } +} + +// Update returns a mutator that updates the +// value of the tag associated with k with v. If k doesn't +// exists in the tag map, the mutator doesn't insert the value. +func Update(k Key, v string) Mutator { + return &mutator{ + fn: func(m *Map) (*Map, error) { + if !checkValue(v) { + return nil, errInvalidValue + } + m.update(k, v) + return m, nil + }, + } +} + +// Upsert returns a mutator that upserts the +// value of the tag associated with k with v. It inserts the +// value if k doesn't exist already. It mutates the value +// if k already exists. +func Upsert(k Key, v string) Mutator { + return &mutator{ + fn: func(m *Map) (*Map, error) { + if !checkValue(v) { + return nil, errInvalidValue + } + m.upsert(k, v) + return m, nil + }, + } +} + +// Delete returns a mutator that deletes +// the value associated with k. +func Delete(k Key) Mutator { + return &mutator{ + fn: func(m *Map) (*Map, error) { + m.delete(k) + return m, nil + }, + } +} + +// New returns a new context that contains a tag map +// originated from the incoming context and modified +// with the provided mutators. +func New(ctx context.Context, mutator ...Mutator) (context.Context, error) { + m := newMap() + orig := FromContext(ctx) + if orig != nil { + for k, v := range orig.m { + if !checkKeyName(k.Name()) { + return ctx, fmt.Errorf("key:%q: %v", k, errInvalidKeyName) + } + if !checkValue(v) { + return ctx, fmt.Errorf("key:%q value:%q: %v", k.Name(), v, errInvalidValue) + } + m.insert(k, v) + } + } + var err error + for _, mod := range mutator { + m, err = mod.Mutate(m) + if err != nil { + return ctx, err + } + } + return NewContext(ctx, m), nil +} + +// Do is similar to pprof.Do: a convenience for installing the tags +// from the context as Go profiler labels. This allows you to +// correlated runtime profiling with stats. +// +// It converts the key/values from the given map to Go profiler labels +// and calls pprof.Do. +// +// Do is going to do nothing if your Go version is below 1.9. +func Do(ctx context.Context, f func(ctx context.Context)) { + do(ctx, f) +} + +type mutator struct { + fn func(t *Map) (*Map, error) +} + +func (m *mutator) Mutate(t *Map) (*Map, error) { + return m.fn(t) +} diff --git a/vendor/go.opencensus.io/tag/map_codec.go b/vendor/go.opencensus.io/tag/map_codec.go new file mode 100644 index 000000000..3e998950c --- /dev/null +++ b/vendor/go.opencensus.io/tag/map_codec.go @@ -0,0 +1,234 @@ +// Copyright 2017, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package tag + +import ( + "encoding/binary" + "fmt" +) + +// KeyType defines the types of keys allowed. Currently only keyTypeString is +// supported. +type keyType byte + +const ( + keyTypeString keyType = iota + keyTypeInt64 + keyTypeTrue + keyTypeFalse + + tagsVersionID = byte(0) +) + +type encoderGRPC struct { + buf []byte + writeIdx, readIdx int +} + +// writeKeyString writes the fieldID '0' followed by the key string and value +// string. +func (eg *encoderGRPC) writeTagString(k, v string) { + eg.writeByte(byte(keyTypeString)) + eg.writeStringWithVarintLen(k) + eg.writeStringWithVarintLen(v) +} + +func (eg *encoderGRPC) writeTagUint64(k string, i uint64) { + eg.writeByte(byte(keyTypeInt64)) + eg.writeStringWithVarintLen(k) + eg.writeUint64(i) +} + +func (eg *encoderGRPC) writeTagTrue(k string) { + eg.writeByte(byte(keyTypeTrue)) + eg.writeStringWithVarintLen(k) +} + +func (eg *encoderGRPC) writeTagFalse(k string) { + eg.writeByte(byte(keyTypeFalse)) + eg.writeStringWithVarintLen(k) +} + +func (eg *encoderGRPC) writeBytesWithVarintLen(bytes []byte) { + length := len(bytes) + + eg.growIfRequired(binary.MaxVarintLen64 + length) + eg.writeIdx += binary.PutUvarint(eg.buf[eg.writeIdx:], uint64(length)) + copy(eg.buf[eg.writeIdx:], bytes) + eg.writeIdx += length +} + +func (eg *encoderGRPC) writeStringWithVarintLen(s string) { + length := len(s) + + eg.growIfRequired(binary.MaxVarintLen64 + length) + eg.writeIdx += binary.PutUvarint(eg.buf[eg.writeIdx:], uint64(length)) + copy(eg.buf[eg.writeIdx:], s) + eg.writeIdx += length +} + +func (eg *encoderGRPC) writeByte(v byte) { + eg.growIfRequired(1) + eg.buf[eg.writeIdx] = v + eg.writeIdx++ +} + +func (eg *encoderGRPC) writeUint32(i uint32) { + eg.growIfRequired(4) + binary.LittleEndian.PutUint32(eg.buf[eg.writeIdx:], i) + eg.writeIdx += 4 +} + +func (eg *encoderGRPC) writeUint64(i uint64) { + eg.growIfRequired(8) + binary.LittleEndian.PutUint64(eg.buf[eg.writeIdx:], i) + eg.writeIdx += 8 +} + +func (eg *encoderGRPC) readByte() byte { + b := eg.buf[eg.readIdx] + eg.readIdx++ + return b +} + +func (eg *encoderGRPC) readUint32() uint32 { + i := binary.LittleEndian.Uint32(eg.buf[eg.readIdx:]) + eg.readIdx += 4 + return i +} + +func (eg *encoderGRPC) readUint64() uint64 { + i := binary.LittleEndian.Uint64(eg.buf[eg.readIdx:]) + eg.readIdx += 8 + return i +} + +func (eg *encoderGRPC) readBytesWithVarintLen() ([]byte, error) { + if eg.readEnded() { + return nil, fmt.Errorf("unexpected end while readBytesWithVarintLen '%x' starting at idx '%v'", eg.buf, eg.readIdx) + } + length, valueStart := binary.Uvarint(eg.buf[eg.readIdx:]) + if valueStart <= 0 { + return nil, fmt.Errorf("unexpected end while readBytesWithVarintLen '%x' starting at idx '%v'", eg.buf, eg.readIdx) + } + + valueStart += eg.readIdx + valueEnd := valueStart + int(length) + if valueEnd > len(eg.buf) { + return nil, fmt.Errorf("malformed encoding: length:%v, upper:%v, maxLength:%v", length, valueEnd, len(eg.buf)) + } + + eg.readIdx = valueEnd + return eg.buf[valueStart:valueEnd], nil +} + +func (eg *encoderGRPC) readStringWithVarintLen() (string, error) { + bytes, err := eg.readBytesWithVarintLen() + if err != nil { + return "", err + } + return string(bytes), nil +} + +func (eg *encoderGRPC) growIfRequired(expected int) { + if len(eg.buf)-eg.writeIdx < expected { + tmp := make([]byte, 2*(len(eg.buf)+1)+expected) + copy(tmp, eg.buf) + eg.buf = tmp + } +} + +func (eg *encoderGRPC) readEnded() bool { + return eg.readIdx >= len(eg.buf) +} + +func (eg *encoderGRPC) bytes() []byte { + return eg.buf[:eg.writeIdx] +} + +// Encode encodes the tag map into a []byte. It is useful to propagate +// the tag maps on wire in binary format. +func Encode(m *Map) []byte { + eg := &encoderGRPC{ + buf: make([]byte, len(m.m)), + } + eg.writeByte(byte(tagsVersionID)) + for k, v := range m.m { + eg.writeByte(byte(keyTypeString)) + eg.writeStringWithVarintLen(k.name) + eg.writeBytesWithVarintLen([]byte(v)) + } + return eg.bytes() +} + +// Decode decodes the given []byte into a tag map. +func Decode(bytes []byte) (*Map, error) { + ts := newMap() + err := DecodeEach(bytes, ts.upsert) + if err != nil { + // no partial failures + return nil, err + } + return ts, nil +} + +// DecodeEach decodes the given serialized tag map, calling handler for each +// tag key and value decoded. +func DecodeEach(bytes []byte, fn func(key Key, val string)) error { + eg := &encoderGRPC{ + buf: bytes, + } + if len(eg.buf) == 0 { + return nil + } + + version := eg.readByte() + if version > tagsVersionID { + return fmt.Errorf("cannot decode: unsupported version: %q; supports only up to: %q", version, tagsVersionID) + } + + for !eg.readEnded() { + typ := keyType(eg.readByte()) + + if typ != keyTypeString { + return fmt.Errorf("cannot decode: invalid key type: %q", typ) + } + + k, err := eg.readBytesWithVarintLen() + if err != nil { + return err + } + + v, err := eg.readBytesWithVarintLen() + if err != nil { + return err + } + + key, err := NewKey(string(k)) + if err != nil { + return err + } + val := string(v) + if !checkValue(val) { + return errInvalidValue + } + fn(key, val) + if err != nil { + return err + } + } + return nil +} diff --git a/vendor/go.opencensus.io/tag/profile_19.go b/vendor/go.opencensus.io/tag/profile_19.go new file mode 100644 index 000000000..f81cd0b4a --- /dev/null +++ b/vendor/go.opencensus.io/tag/profile_19.go @@ -0,0 +1,31 @@ +// Copyright 2018, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build go1.9 + +package tag + +import ( + "context" + "runtime/pprof" +) + +func do(ctx context.Context, f func(ctx context.Context)) { + m := FromContext(ctx) + keyvals := make([]string, 0, 2*len(m.m)) + for k, v := range m.m { + keyvals = append(keyvals, k.Name(), v) + } + pprof.Do(ctx, pprof.Labels(keyvals...), f) +} diff --git a/vendor/go.opencensus.io/tag/profile_not19.go b/vendor/go.opencensus.io/tag/profile_not19.go new file mode 100644 index 000000000..83adbce56 --- /dev/null +++ b/vendor/go.opencensus.io/tag/profile_not19.go @@ -0,0 +1,23 @@ +// Copyright 2018, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build !go1.9 + +package tag + +import "context" + +func do(ctx context.Context, f func(ctx context.Context)) { + f(ctx) +} diff --git a/vendor/go.opencensus.io/tag/validate.go b/vendor/go.opencensus.io/tag/validate.go new file mode 100644 index 000000000..0939fc674 --- /dev/null +++ b/vendor/go.opencensus.io/tag/validate.go @@ -0,0 +1,56 @@ +// Copyright 2017, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package tag + +import "errors" + +const ( + maxKeyLength = 255 + + // valid are restricted to US-ASCII subset (range 0x20 (' ') to 0x7e ('~')). + validKeyValueMin = 32 + validKeyValueMax = 126 +) + +var ( + errInvalidKeyName = errors.New("invalid key name: only ASCII characters accepted; max length must be 255 characters") + errInvalidValue = errors.New("invalid value: only ASCII characters accepted; max length must be 255 characters") +) + +func checkKeyName(name string) bool { + if len(name) == 0 { + return false + } + if len(name) > maxKeyLength { + return false + } + return isASCII(name) +} + +func isASCII(s string) bool { + for _, c := range s { + if (c < validKeyValueMin) || (c > validKeyValueMax) { + return false + } + } + return true +} + +func checkValue(v string) bool { + if len(v) > maxKeyLength { + return false + } + return isASCII(v) +} diff --git a/vendor/go.opencensus.io/trace/basetypes.go b/vendor/go.opencensus.io/trace/basetypes.go new file mode 100644 index 000000000..01f0f9083 --- /dev/null +++ b/vendor/go.opencensus.io/trace/basetypes.go @@ -0,0 +1,114 @@ +// Copyright 2017, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package trace + +import ( + "fmt" + "time" +) + +type ( + // TraceID is a 16-byte identifier for a set of spans. + TraceID [16]byte + + // SpanID is an 8-byte identifier for a single span. + SpanID [8]byte +) + +func (t TraceID) String() string { + return fmt.Sprintf("%02x", t[:]) +} + +func (s SpanID) String() string { + return fmt.Sprintf("%02x", s[:]) +} + +// Annotation represents a text annotation with a set of attributes and a timestamp. +type Annotation struct { + Time time.Time + Message string + Attributes map[string]interface{} +} + +// Attribute represents a key-value pair on a span, link or annotation. +// Construct with one of: BoolAttribute, Int64Attribute, or StringAttribute. +type Attribute struct { + key string + value interface{} +} + +// BoolAttribute returns a bool-valued attribute. +func BoolAttribute(key string, value bool) Attribute { + return Attribute{key: key, value: value} +} + +// Int64Attribute returns an int64-valued attribute. +func Int64Attribute(key string, value int64) Attribute { + return Attribute{key: key, value: value} +} + +// StringAttribute returns a string-valued attribute. +func StringAttribute(key string, value string) Attribute { + return Attribute{key: key, value: value} +} + +// LinkType specifies the relationship between the span that had the link +// added, and the linked span. +type LinkType int32 + +// LinkType values. +const ( + LinkTypeUnspecified LinkType = iota // The relationship of the two spans is unknown. + LinkTypeChild // The current span is a child of the linked span. + LinkTypeParent // The current span is the parent of the linked span. +) + +// Link represents a reference from one span to another span. +type Link struct { + TraceID TraceID + SpanID SpanID + Type LinkType + // Attributes is a set of attributes on the link. + Attributes map[string]interface{} +} + +// MessageEventType specifies the type of message event. +type MessageEventType int32 + +// MessageEventType values. +const ( + MessageEventTypeUnspecified MessageEventType = iota // Unknown event type. + MessageEventTypeSent // Indicates a sent RPC message. + MessageEventTypeRecv // Indicates a received RPC message. +) + +// MessageEvent represents an event describing a message sent or received on the network. +type MessageEvent struct { + Time time.Time + EventType MessageEventType + MessageID int64 + UncompressedByteSize int64 + CompressedByteSize int64 +} + +// Status is the status of a Span. +type Status struct { + // Code is a status code. Zero indicates success. + // + // If Code will be propagated to Google APIs, it ideally should be a value from + // https://github.com/googleapis/googleapis/blob/master/google/rpc/code.proto . + Code int32 + Message string +} diff --git a/vendor/go.opencensus.io/trace/config.go b/vendor/go.opencensus.io/trace/config.go new file mode 100644 index 000000000..0816892ea --- /dev/null +++ b/vendor/go.opencensus.io/trace/config.go @@ -0,0 +1,48 @@ +// Copyright 2018, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package trace + +import ( + "sync" + + "go.opencensus.io/trace/internal" +) + +// Config represents the global tracing configuration. +type Config struct { + // DefaultSampler is the default sampler used when creating new spans. + DefaultSampler Sampler + + // IDGenerator is for internal use only. + IDGenerator internal.IDGenerator +} + +var configWriteMu sync.Mutex + +// ApplyConfig applies changes to the global tracing configuration. +// +// Fields not provided in the given config are going to be preserved. +func ApplyConfig(cfg Config) { + configWriteMu.Lock() + defer configWriteMu.Unlock() + c := *config.Load().(*Config) + if cfg.DefaultSampler != nil { + c.DefaultSampler = cfg.DefaultSampler + } + if cfg.IDGenerator != nil { + c.IDGenerator = cfg.IDGenerator + } + config.Store(&c) +} diff --git a/vendor/go.opencensus.io/trace/doc.go b/vendor/go.opencensus.io/trace/doc.go new file mode 100644 index 000000000..04b1ee4f3 --- /dev/null +++ b/vendor/go.opencensus.io/trace/doc.go @@ -0,0 +1,53 @@ +// Copyright 2017, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +/* +Package trace contains support for OpenCensus distributed tracing. + +The following assumes a basic familiarity with OpenCensus concepts. +See http://opencensus.io + + +Exporting Traces + +To export collected tracing data, register at least one exporter. You can use +one of the provided exporters or write your own. + + trace.RegisterExporter(exporter) + +By default, traces will be sampled relatively rarely. To change the sampling +frequency for your entire program, call ApplyConfig. Use a ProbabilitySampler +to sample a subset of traces, or use AlwaysSample to collect a trace on every run: + + trace.ApplyConfig(trace.Config{DefaultSampler: trace.AlwaysSample()}) + +Be careful about using trace.AlwaysSample in a production application with +significant traffic: a new trace will be started and exported for every request. + +Adding Spans to a Trace + +A trace consists of a tree of spans. In Go, the current span is carried in a +context.Context. + +It is common to want to capture all the activity of a function call in a span. For +this to work, the function must take a context.Context as a parameter. Add these two +lines to the top of the function: + + ctx, span := trace.StartSpan(ctx, "example.com/Run") + defer span.End() + +StartSpan will create a new top-level span if the context +doesn't contain another span, otherwise it will create a child span. +*/ +package trace // import "go.opencensus.io/trace" diff --git a/vendor/go.opencensus.io/trace/exemplar.go b/vendor/go.opencensus.io/trace/exemplar.go new file mode 100644 index 000000000..416d80590 --- /dev/null +++ b/vendor/go.opencensus.io/trace/exemplar.go @@ -0,0 +1,43 @@ +// Copyright 2018, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package trace + +import ( + "context" + "encoding/hex" + + "go.opencensus.io/exemplar" +) + +func init() { + exemplar.RegisterAttachmentExtractor(attachSpanContext) +} + +func attachSpanContext(ctx context.Context, a exemplar.Attachments) exemplar.Attachments { + span := FromContext(ctx) + if span == nil { + return a + } + sc := span.SpanContext() + if !sc.IsSampled() { + return a + } + if a == nil { + a = make(exemplar.Attachments) + } + a[exemplar.KeyTraceID] = hex.EncodeToString(sc.TraceID[:]) + a[exemplar.KeySpanID] = hex.EncodeToString(sc.SpanID[:]) + return a +} diff --git a/vendor/go.opencensus.io/trace/export.go b/vendor/go.opencensus.io/trace/export.go new file mode 100644 index 000000000..77a8c7357 --- /dev/null +++ b/vendor/go.opencensus.io/trace/export.go @@ -0,0 +1,90 @@ +// Copyright 2017, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package trace + +import ( + "sync" + "sync/atomic" + "time" +) + +// Exporter is a type for functions that receive sampled trace spans. +// +// The ExportSpan method should be safe for concurrent use and should return +// quickly; if an Exporter takes a significant amount of time to process a +// SpanData, that work should be done on another goroutine. +// +// The SpanData should not be modified, but a pointer to it can be kept. +type Exporter interface { + ExportSpan(s *SpanData) +} + +type exportersMap map[Exporter]struct{} + +var ( + exporterMu sync.Mutex + exporters atomic.Value +) + +// RegisterExporter adds to the list of Exporters that will receive sampled +// trace spans. +// +// Binaries can register exporters, libraries shouldn't register exporters. +func RegisterExporter(e Exporter) { + exporterMu.Lock() + new := make(exportersMap) + if old, ok := exporters.Load().(exportersMap); ok { + for k, v := range old { + new[k] = v + } + } + new[e] = struct{}{} + exporters.Store(new) + exporterMu.Unlock() +} + +// UnregisterExporter removes from the list of Exporters the Exporter that was +// registered with the given name. +func UnregisterExporter(e Exporter) { + exporterMu.Lock() + new := make(exportersMap) + if old, ok := exporters.Load().(exportersMap); ok { + for k, v := range old { + new[k] = v + } + } + delete(new, e) + exporters.Store(new) + exporterMu.Unlock() +} + +// SpanData contains all the information collected by a Span. +type SpanData struct { + SpanContext + ParentSpanID SpanID + SpanKind int + Name string + StartTime time.Time + // The wall clock time of EndTime will be adjusted to always be offset + // from StartTime by the duration of the span. + EndTime time.Time + // The values of Attributes each have type string, bool, or int64. + Attributes map[string]interface{} + Annotations []Annotation + MessageEvents []MessageEvent + Status + Links []Link + HasRemoteParent bool +} diff --git a/vendor/go.opencensus.io/trace/internal/internal.go b/vendor/go.opencensus.io/trace/internal/internal.go new file mode 100644 index 000000000..1c8b9b34b --- /dev/null +++ b/vendor/go.opencensus.io/trace/internal/internal.go @@ -0,0 +1,21 @@ +// Copyright 2018, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package internal provides trace internals. +package internal + +type IDGenerator interface { + NewTraceID() [16]byte + NewSpanID() [8]byte +} diff --git a/vendor/go.opencensus.io/trace/propagation/propagation.go b/vendor/go.opencensus.io/trace/propagation/propagation.go new file mode 100644 index 000000000..1eb190a96 --- /dev/null +++ b/vendor/go.opencensus.io/trace/propagation/propagation.go @@ -0,0 +1,108 @@ +// Copyright 2017, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package propagation implements the binary trace context format. +package propagation // import "go.opencensus.io/trace/propagation" + +// TODO: link to external spec document. + +// BinaryFormat format: +// +// Binary value: +// version_id: 1 byte representing the version id. +// +// For version_id = 0: +// +// version_format: +// field_format: +// +// Fields: +// +// TraceId: (field_id = 0, len = 16, default = "0000000000000000") - 16-byte array representing the trace_id. +// SpanId: (field_id = 1, len = 8, default = "00000000") - 8-byte array representing the span_id. +// TraceOptions: (field_id = 2, len = 1, default = "0") - 1-byte array representing the trace_options. +// +// Fields MUST be encoded using the field id order (smaller to higher). +// +// Valid value example: +// +// {0, 0, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 1, 97, +// 98, 99, 100, 101, 102, 103, 104, 2, 1} +// +// version_id = 0; +// trace_id = {64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79} +// span_id = {97, 98, 99, 100, 101, 102, 103, 104}; +// trace_options = {1}; + +import ( + "net/http" + + "go.opencensus.io/trace" +) + +// Binary returns the binary format representation of a SpanContext. +// +// If sc is the zero value, Binary returns nil. +func Binary(sc trace.SpanContext) []byte { + if sc == (trace.SpanContext{}) { + return nil + } + var b [29]byte + copy(b[2:18], sc.TraceID[:]) + b[18] = 1 + copy(b[19:27], sc.SpanID[:]) + b[27] = 2 + b[28] = uint8(sc.TraceOptions) + return b[:] +} + +// FromBinary returns the SpanContext represented by b. +// +// If b has an unsupported version ID or contains no TraceID, FromBinary +// returns with ok==false. +func FromBinary(b []byte) (sc trace.SpanContext, ok bool) { + if len(b) == 0 || b[0] != 0 { + return trace.SpanContext{}, false + } + b = b[1:] + if len(b) >= 17 && b[0] == 0 { + copy(sc.TraceID[:], b[1:17]) + b = b[17:] + } else { + return trace.SpanContext{}, false + } + if len(b) >= 9 && b[0] == 1 { + copy(sc.SpanID[:], b[1:9]) + b = b[9:] + } + if len(b) >= 2 && b[0] == 2 { + sc.TraceOptions = trace.TraceOptions(b[1]) + } + return sc, true +} + +// HTTPFormat implementations propagate span contexts +// in HTTP requests. +// +// SpanContextFromRequest extracts a span context from incoming +// requests. +// +// SpanContextToRequest modifies the given request to include the given +// span context. +type HTTPFormat interface { + SpanContextFromRequest(req *http.Request) (sc trace.SpanContext, ok bool) + SpanContextToRequest(sc trace.SpanContext, req *http.Request) +} + +// TODO(jbd): Find a more representative but short name for HTTPFormat. diff --git a/vendor/go.opencensus.io/trace/sampling.go b/vendor/go.opencensus.io/trace/sampling.go new file mode 100644 index 000000000..71c10f9e3 --- /dev/null +++ b/vendor/go.opencensus.io/trace/sampling.go @@ -0,0 +1,75 @@ +// Copyright 2017, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package trace + +import ( + "encoding/binary" +) + +const defaultSamplingProbability = 1e-4 + +// Sampler decides whether a trace should be sampled and exported. +type Sampler func(SamplingParameters) SamplingDecision + +// SamplingParameters contains the values passed to a Sampler. +type SamplingParameters struct { + ParentContext SpanContext + TraceID TraceID + SpanID SpanID + Name string + HasRemoteParent bool +} + +// SamplingDecision is the value returned by a Sampler. +type SamplingDecision struct { + Sample bool +} + +// ProbabilitySampler returns a Sampler that samples a given fraction of traces. +// +// It also samples spans whose parents are sampled. +func ProbabilitySampler(fraction float64) Sampler { + if !(fraction >= 0) { + fraction = 0 + } else if fraction >= 1 { + return AlwaysSample() + } + + traceIDUpperBound := uint64(fraction * (1 << 63)) + return Sampler(func(p SamplingParameters) SamplingDecision { + if p.ParentContext.IsSampled() { + return SamplingDecision{Sample: true} + } + x := binary.BigEndian.Uint64(p.TraceID[0:8]) >> 1 + return SamplingDecision{Sample: x < traceIDUpperBound} + }) +} + +// AlwaysSample returns a Sampler that samples every trace. +// Be careful about using this sampler in a production application with +// significant traffic: a new trace will be started and exported for every +// request. +func AlwaysSample() Sampler { + return func(p SamplingParameters) SamplingDecision { + return SamplingDecision{Sample: true} + } +} + +// NeverSample returns a Sampler that samples no traces. +func NeverSample() Sampler { + return func(p SamplingParameters) SamplingDecision { + return SamplingDecision{Sample: false} + } +} diff --git a/vendor/go.opencensus.io/trace/spanbucket.go b/vendor/go.opencensus.io/trace/spanbucket.go new file mode 100644 index 000000000..fbabad34c --- /dev/null +++ b/vendor/go.opencensus.io/trace/spanbucket.go @@ -0,0 +1,130 @@ +// Copyright 2017, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package trace + +import ( + "time" +) + +// samplePeriod is the minimum time between accepting spans in a single bucket. +const samplePeriod = time.Second + +// defaultLatencies contains the default latency bucket bounds. +// TODO: consider defaults, make configurable +var defaultLatencies = [...]time.Duration{ + 10 * time.Microsecond, + 100 * time.Microsecond, + time.Millisecond, + 10 * time.Millisecond, + 100 * time.Millisecond, + time.Second, + 10 * time.Second, + time.Minute, +} + +// bucket is a container for a set of spans for a particular error code or latency range. +type bucket struct { + nextTime time.Time // next time we can accept a span + buffer []*SpanData // circular buffer of spans + nextIndex int // location next SpanData should be placed in buffer + overflow bool // whether the circular buffer has wrapped around +} + +func makeBucket(bufferSize int) bucket { + return bucket{ + buffer: make([]*SpanData, bufferSize), + } +} + +// add adds a span to the bucket, if nextTime has been reached. +func (b *bucket) add(s *SpanData) { + if s.EndTime.Before(b.nextTime) { + return + } + if len(b.buffer) == 0 { + return + } + b.nextTime = s.EndTime.Add(samplePeriod) + b.buffer[b.nextIndex] = s + b.nextIndex++ + if b.nextIndex == len(b.buffer) { + b.nextIndex = 0 + b.overflow = true + } +} + +// size returns the number of spans in the bucket. +func (b *bucket) size() int { + if b.overflow { + return len(b.buffer) + } + return b.nextIndex +} + +// span returns the ith span in the bucket. +func (b *bucket) span(i int) *SpanData { + if !b.overflow { + return b.buffer[i] + } + if i < len(b.buffer)-b.nextIndex { + return b.buffer[b.nextIndex+i] + } + return b.buffer[b.nextIndex+i-len(b.buffer)] +} + +// resize changes the size of the bucket to n, keeping up to n existing spans. +func (b *bucket) resize(n int) { + cur := b.size() + newBuffer := make([]*SpanData, n) + if cur < n { + for i := 0; i < cur; i++ { + newBuffer[i] = b.span(i) + } + b.buffer = newBuffer + b.nextIndex = cur + b.overflow = false + return + } + for i := 0; i < n; i++ { + newBuffer[i] = b.span(i + cur - n) + } + b.buffer = newBuffer + b.nextIndex = 0 + b.overflow = true +} + +// latencyBucket returns the appropriate bucket number for a given latency. +func latencyBucket(latency time.Duration) int { + i := 0 + for i < len(defaultLatencies) && latency >= defaultLatencies[i] { + i++ + } + return i +} + +// latencyBucketBounds returns the lower and upper bounds for a latency bucket +// number. +// +// The lower bound is inclusive, the upper bound is exclusive (except for the +// last bucket.) +func latencyBucketBounds(index int) (lower time.Duration, upper time.Duration) { + if index == 0 { + return 0, defaultLatencies[index] + } + if index == len(defaultLatencies) { + return defaultLatencies[index-1], 1<<63 - 1 + } + return defaultLatencies[index-1], defaultLatencies[index] +} diff --git a/vendor/go.opencensus.io/trace/spanstore.go b/vendor/go.opencensus.io/trace/spanstore.go new file mode 100644 index 000000000..c442d9902 --- /dev/null +++ b/vendor/go.opencensus.io/trace/spanstore.go @@ -0,0 +1,306 @@ +// Copyright 2017, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package trace + +import ( + "sync" + "time" + + "go.opencensus.io/internal" +) + +const ( + maxBucketSize = 100000 + defaultBucketSize = 10 +) + +var ( + ssmu sync.RWMutex // protects spanStores + spanStores = make(map[string]*spanStore) +) + +// This exists purely to avoid exposing internal methods used by z-Pages externally. +type internalOnly struct{} + +func init() { + //TODO(#412): remove + internal.Trace = &internalOnly{} +} + +// ReportActiveSpans returns the active spans for the given name. +func (i internalOnly) ReportActiveSpans(name string) []*SpanData { + s := spanStoreForName(name) + if s == nil { + return nil + } + var out []*SpanData + s.mu.Lock() + defer s.mu.Unlock() + for span := range s.active { + out = append(out, span.makeSpanData()) + } + return out +} + +// ReportSpansByError returns a sample of error spans. +// +// If code is nonzero, only spans with that status code are returned. +func (i internalOnly) ReportSpansByError(name string, code int32) []*SpanData { + s := spanStoreForName(name) + if s == nil { + return nil + } + var out []*SpanData + s.mu.Lock() + defer s.mu.Unlock() + if code != 0 { + if b, ok := s.errors[code]; ok { + for _, sd := range b.buffer { + if sd == nil { + break + } + out = append(out, sd) + } + } + } else { + for _, b := range s.errors { + for _, sd := range b.buffer { + if sd == nil { + break + } + out = append(out, sd) + } + } + } + return out +} + +// ConfigureBucketSizes sets the number of spans to keep per latency and error +// bucket for different span names. +func (i internalOnly) ConfigureBucketSizes(bcs []internal.BucketConfiguration) { + for _, bc := range bcs { + latencyBucketSize := bc.MaxRequestsSucceeded + if latencyBucketSize < 0 { + latencyBucketSize = 0 + } + if latencyBucketSize > maxBucketSize { + latencyBucketSize = maxBucketSize + } + errorBucketSize := bc.MaxRequestsErrors + if errorBucketSize < 0 { + errorBucketSize = 0 + } + if errorBucketSize > maxBucketSize { + errorBucketSize = maxBucketSize + } + spanStoreSetSize(bc.Name, latencyBucketSize, errorBucketSize) + } +} + +// ReportSpansPerMethod returns a summary of what spans are being stored for each span name. +func (i internalOnly) ReportSpansPerMethod() map[string]internal.PerMethodSummary { + out := make(map[string]internal.PerMethodSummary) + ssmu.RLock() + defer ssmu.RUnlock() + for name, s := range spanStores { + s.mu.Lock() + p := internal.PerMethodSummary{ + Active: len(s.active), + } + for code, b := range s.errors { + p.ErrorBuckets = append(p.ErrorBuckets, internal.ErrorBucketSummary{ + ErrorCode: code, + Size: b.size(), + }) + } + for i, b := range s.latency { + min, max := latencyBucketBounds(i) + p.LatencyBuckets = append(p.LatencyBuckets, internal.LatencyBucketSummary{ + MinLatency: min, + MaxLatency: max, + Size: b.size(), + }) + } + s.mu.Unlock() + out[name] = p + } + return out +} + +// ReportSpansByLatency returns a sample of successful spans. +// +// minLatency is the minimum latency of spans to be returned. +// maxLatency, if nonzero, is the maximum latency of spans to be returned. +func (i internalOnly) ReportSpansByLatency(name string, minLatency, maxLatency time.Duration) []*SpanData { + s := spanStoreForName(name) + if s == nil { + return nil + } + var out []*SpanData + s.mu.Lock() + defer s.mu.Unlock() + for i, b := range s.latency { + min, max := latencyBucketBounds(i) + if i+1 != len(s.latency) && max <= minLatency { + continue + } + if maxLatency != 0 && maxLatency < min { + continue + } + for _, sd := range b.buffer { + if sd == nil { + break + } + if minLatency != 0 || maxLatency != 0 { + d := sd.EndTime.Sub(sd.StartTime) + if d < minLatency { + continue + } + if maxLatency != 0 && d > maxLatency { + continue + } + } + out = append(out, sd) + } + } + return out +} + +// spanStore keeps track of spans stored for a particular span name. +// +// It contains all active spans; a sample of spans for failed requests, +// categorized by error code; and a sample of spans for successful requests, +// bucketed by latency. +type spanStore struct { + mu sync.Mutex // protects everything below. + active map[*Span]struct{} + errors map[int32]*bucket + latency []bucket + maxSpansPerErrorBucket int +} + +// newSpanStore creates a span store. +func newSpanStore(name string, latencyBucketSize int, errorBucketSize int) *spanStore { + s := &spanStore{ + active: make(map[*Span]struct{}), + latency: make([]bucket, len(defaultLatencies)+1), + maxSpansPerErrorBucket: errorBucketSize, + } + for i := range s.latency { + s.latency[i] = makeBucket(latencyBucketSize) + } + return s +} + +// spanStoreForName returns the spanStore for the given name. +// +// It returns nil if it doesn't exist. +func spanStoreForName(name string) *spanStore { + var s *spanStore + ssmu.RLock() + s, _ = spanStores[name] + ssmu.RUnlock() + return s +} + +// spanStoreForNameCreateIfNew returns the spanStore for the given name. +// +// It creates it if it didn't exist. +func spanStoreForNameCreateIfNew(name string) *spanStore { + ssmu.RLock() + s, ok := spanStores[name] + ssmu.RUnlock() + if ok { + return s + } + ssmu.Lock() + defer ssmu.Unlock() + s, ok = spanStores[name] + if ok { + return s + } + s = newSpanStore(name, defaultBucketSize, defaultBucketSize) + spanStores[name] = s + return s +} + +// spanStoreSetSize resizes the spanStore for the given name. +// +// It creates it if it didn't exist. +func spanStoreSetSize(name string, latencyBucketSize int, errorBucketSize int) { + ssmu.RLock() + s, ok := spanStores[name] + ssmu.RUnlock() + if ok { + s.resize(latencyBucketSize, errorBucketSize) + return + } + ssmu.Lock() + defer ssmu.Unlock() + s, ok = spanStores[name] + if ok { + s.resize(latencyBucketSize, errorBucketSize) + return + } + s = newSpanStore(name, latencyBucketSize, errorBucketSize) + spanStores[name] = s +} + +func (s *spanStore) resize(latencyBucketSize int, errorBucketSize int) { + s.mu.Lock() + for i := range s.latency { + s.latency[i].resize(latencyBucketSize) + } + for _, b := range s.errors { + b.resize(errorBucketSize) + } + s.maxSpansPerErrorBucket = errorBucketSize + s.mu.Unlock() +} + +// add adds a span to the active bucket of the spanStore. +func (s *spanStore) add(span *Span) { + s.mu.Lock() + s.active[span] = struct{}{} + s.mu.Unlock() +} + +// finished removes a span from the active set, and adds a corresponding +// SpanData to a latency or error bucket. +func (s *spanStore) finished(span *Span, sd *SpanData) { + latency := sd.EndTime.Sub(sd.StartTime) + if latency < 0 { + latency = 0 + } + code := sd.Status.Code + + s.mu.Lock() + delete(s.active, span) + if code == 0 { + s.latency[latencyBucket(latency)].add(sd) + } else { + if s.errors == nil { + s.errors = make(map[int32]*bucket) + } + if b := s.errors[code]; b != nil { + b.add(sd) + } else { + b := makeBucket(s.maxSpansPerErrorBucket) + s.errors[code] = &b + b.add(sd) + } + } + s.mu.Unlock() +} diff --git a/vendor/go.opencensus.io/trace/status_codes.go b/vendor/go.opencensus.io/trace/status_codes.go new file mode 100644 index 000000000..ec60effd1 --- /dev/null +++ b/vendor/go.opencensus.io/trace/status_codes.go @@ -0,0 +1,37 @@ +// Copyright 2018, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package trace + +// Status codes for use with Span.SetStatus. These correspond to the status +// codes used by gRPC defined here: https://github.com/googleapis/googleapis/blob/master/google/rpc/code.proto +const ( + StatusCodeOK = 0 + StatusCodeCancelled = 1 + StatusCodeUnknown = 2 + StatusCodeInvalidArgument = 3 + StatusCodeDeadlineExceeded = 4 + StatusCodeNotFound = 5 + StatusCodeAlreadyExists = 6 + StatusCodePermissionDenied = 7 + StatusCodeResourceExhausted = 8 + StatusCodeFailedPrecondition = 9 + StatusCodeAborted = 10 + StatusCodeOutOfRange = 11 + StatusCodeUnimplemented = 12 + StatusCodeInternal = 13 + StatusCodeUnavailable = 14 + StatusCodeDataLoss = 15 + StatusCodeUnauthenticated = 16 +) diff --git a/vendor/go.opencensus.io/trace/trace.go b/vendor/go.opencensus.io/trace/trace.go new file mode 100644 index 000000000..9e5e5f033 --- /dev/null +++ b/vendor/go.opencensus.io/trace/trace.go @@ -0,0 +1,516 @@ +// Copyright 2017, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package trace + +import ( + "context" + crand "crypto/rand" + "encoding/binary" + "fmt" + "math/rand" + "sync" + "sync/atomic" + "time" + + "go.opencensus.io/internal" + "go.opencensus.io/trace/tracestate" +) + +// Span represents a span of a trace. It has an associated SpanContext, and +// stores data accumulated while the span is active. +// +// Ideally users should interact with Spans by calling the functions in this +// package that take a Context parameter. +type Span struct { + // data contains information recorded about the span. + // + // It will be non-nil if we are exporting the span or recording events for it. + // Otherwise, data is nil, and the Span is simply a carrier for the + // SpanContext, so that the trace ID is propagated. + data *SpanData + mu sync.Mutex // protects the contents of *data (but not the pointer value.) + spanContext SpanContext + // spanStore is the spanStore this span belongs to, if any, otherwise it is nil. + *spanStore + endOnce sync.Once + + executionTracerTaskEnd func() // ends the execution tracer span +} + +// IsRecordingEvents returns true if events are being recorded for this span. +// Use this check to avoid computing expensive annotations when they will never +// be used. +func (s *Span) IsRecordingEvents() bool { + if s == nil { + return false + } + return s.data != nil +} + +// TraceOptions contains options associated with a trace span. +type TraceOptions uint32 + +// IsSampled returns true if the span will be exported. +func (sc SpanContext) IsSampled() bool { + return sc.TraceOptions.IsSampled() +} + +// setIsSampled sets the TraceOptions bit that determines whether the span will be exported. +func (sc *SpanContext) setIsSampled(sampled bool) { + if sampled { + sc.TraceOptions |= 1 + } else { + sc.TraceOptions &= ^TraceOptions(1) + } +} + +// IsSampled returns true if the span will be exported. +func (t TraceOptions) IsSampled() bool { + return t&1 == 1 +} + +// SpanContext contains the state that must propagate across process boundaries. +// +// SpanContext is not an implementation of context.Context. +// TODO: add reference to external Census docs for SpanContext. +type SpanContext struct { + TraceID TraceID + SpanID SpanID + TraceOptions TraceOptions + Tracestate *tracestate.Tracestate +} + +type contextKey struct{} + +// FromContext returns the Span stored in a context, or nil if there isn't one. +func FromContext(ctx context.Context) *Span { + s, _ := ctx.Value(contextKey{}).(*Span) + return s +} + +// NewContext returns a new context with the given Span attached. +func NewContext(parent context.Context, s *Span) context.Context { + return context.WithValue(parent, contextKey{}, s) +} + +// All available span kinds. Span kind must be either one of these values. +const ( + SpanKindUnspecified = iota + SpanKindServer + SpanKindClient +) + +// StartOptions contains options concerning how a span is started. +type StartOptions struct { + // Sampler to consult for this Span. If provided, it is always consulted. + // + // If not provided, then the behavior differs based on whether + // the parent of this Span is remote, local, or there is no parent. + // In the case of a remote parent or no parent, the + // default sampler (see Config) will be consulted. Otherwise, + // when there is a non-remote parent, no new sampling decision will be made: + // we will preserve the sampling of the parent. + Sampler Sampler + + // SpanKind represents the kind of a span. If none is set, + // SpanKindUnspecified is used. + SpanKind int +} + +// StartOption apply changes to StartOptions. +type StartOption func(*StartOptions) + +// WithSpanKind makes new spans to be created with the given kind. +func WithSpanKind(spanKind int) StartOption { + return func(o *StartOptions) { + o.SpanKind = spanKind + } +} + +// WithSampler makes new spans to be be created with a custom sampler. +// Otherwise, the global sampler is used. +func WithSampler(sampler Sampler) StartOption { + return func(o *StartOptions) { + o.Sampler = sampler + } +} + +// StartSpan starts a new child span of the current span in the context. If +// there is no span in the context, creates a new trace and span. +// +// Returned context contains the newly created span. You can use it to +// propagate the returned span in process. +func StartSpan(ctx context.Context, name string, o ...StartOption) (context.Context, *Span) { + var opts StartOptions + var parent SpanContext + if p := FromContext(ctx); p != nil { + parent = p.spanContext + } + for _, op := range o { + op(&opts) + } + span := startSpanInternal(name, parent != SpanContext{}, parent, false, opts) + + ctx, end := startExecutionTracerTask(ctx, name) + span.executionTracerTaskEnd = end + return NewContext(ctx, span), span +} + +// StartSpanWithRemoteParent starts a new child span of the span from the given parent. +// +// If the incoming context contains a parent, it ignores. StartSpanWithRemoteParent is +// preferred for cases where the parent is propagated via an incoming request. +// +// Returned context contains the newly created span. You can use it to +// propagate the returned span in process. +func StartSpanWithRemoteParent(ctx context.Context, name string, parent SpanContext, o ...StartOption) (context.Context, *Span) { + var opts StartOptions + for _, op := range o { + op(&opts) + } + span := startSpanInternal(name, parent != SpanContext{}, parent, true, opts) + ctx, end := startExecutionTracerTask(ctx, name) + span.executionTracerTaskEnd = end + return NewContext(ctx, span), span +} + +func startSpanInternal(name string, hasParent bool, parent SpanContext, remoteParent bool, o StartOptions) *Span { + span := &Span{} + span.spanContext = parent + + cfg := config.Load().(*Config) + + if !hasParent { + span.spanContext.TraceID = cfg.IDGenerator.NewTraceID() + } + span.spanContext.SpanID = cfg.IDGenerator.NewSpanID() + sampler := cfg.DefaultSampler + + if !hasParent || remoteParent || o.Sampler != nil { + // If this span is the child of a local span and no Sampler is set in the + // options, keep the parent's TraceOptions. + // + // Otherwise, consult the Sampler in the options if it is non-nil, otherwise + // the default sampler. + if o.Sampler != nil { + sampler = o.Sampler + } + span.spanContext.setIsSampled(sampler(SamplingParameters{ + ParentContext: parent, + TraceID: span.spanContext.TraceID, + SpanID: span.spanContext.SpanID, + Name: name, + HasRemoteParent: remoteParent}).Sample) + } + + if !internal.LocalSpanStoreEnabled && !span.spanContext.IsSampled() { + return span + } + + span.data = &SpanData{ + SpanContext: span.spanContext, + StartTime: time.Now(), + SpanKind: o.SpanKind, + Name: name, + HasRemoteParent: remoteParent, + } + if hasParent { + span.data.ParentSpanID = parent.SpanID + } + if internal.LocalSpanStoreEnabled { + var ss *spanStore + ss = spanStoreForNameCreateIfNew(name) + if ss != nil { + span.spanStore = ss + ss.add(span) + } + } + + return span +} + +// End ends the span. +func (s *Span) End() { + if s == nil { + return + } + if s.executionTracerTaskEnd != nil { + s.executionTracerTaskEnd() + } + if !s.IsRecordingEvents() { + return + } + s.endOnce.Do(func() { + exp, _ := exporters.Load().(exportersMap) + mustExport := s.spanContext.IsSampled() && len(exp) > 0 + if s.spanStore != nil || mustExport { + sd := s.makeSpanData() + sd.EndTime = internal.MonotonicEndTime(sd.StartTime) + if s.spanStore != nil { + s.spanStore.finished(s, sd) + } + if mustExport { + for e := range exp { + e.ExportSpan(sd) + } + } + } + }) +} + +// makeSpanData produces a SpanData representing the current state of the Span. +// It requires that s.data is non-nil. +func (s *Span) makeSpanData() *SpanData { + var sd SpanData + s.mu.Lock() + sd = *s.data + if s.data.Attributes != nil { + sd.Attributes = make(map[string]interface{}) + for k, v := range s.data.Attributes { + sd.Attributes[k] = v + } + } + s.mu.Unlock() + return &sd +} + +// SpanContext returns the SpanContext of the span. +func (s *Span) SpanContext() SpanContext { + if s == nil { + return SpanContext{} + } + return s.spanContext +} + +// SetName sets the name of the span, if it is recording events. +func (s *Span) SetName(name string) { + if !s.IsRecordingEvents() { + return + } + s.mu.Lock() + s.data.Name = name + s.mu.Unlock() +} + +// SetStatus sets the status of the span, if it is recording events. +func (s *Span) SetStatus(status Status) { + if !s.IsRecordingEvents() { + return + } + s.mu.Lock() + s.data.Status = status + s.mu.Unlock() +} + +// AddAttributes sets attributes in the span. +// +// Existing attributes whose keys appear in the attributes parameter are overwritten. +func (s *Span) AddAttributes(attributes ...Attribute) { + if !s.IsRecordingEvents() { + return + } + s.mu.Lock() + if s.data.Attributes == nil { + s.data.Attributes = make(map[string]interface{}) + } + copyAttributes(s.data.Attributes, attributes) + s.mu.Unlock() +} + +// copyAttributes copies a slice of Attributes into a map. +func copyAttributes(m map[string]interface{}, attributes []Attribute) { + for _, a := range attributes { + m[a.key] = a.value + } +} + +func (s *Span) lazyPrintfInternal(attributes []Attribute, format string, a ...interface{}) { + now := time.Now() + msg := fmt.Sprintf(format, a...) + var m map[string]interface{} + s.mu.Lock() + if len(attributes) != 0 { + m = make(map[string]interface{}) + copyAttributes(m, attributes) + } + s.data.Annotations = append(s.data.Annotations, Annotation{ + Time: now, + Message: msg, + Attributes: m, + }) + s.mu.Unlock() +} + +func (s *Span) printStringInternal(attributes []Attribute, str string) { + now := time.Now() + var a map[string]interface{} + s.mu.Lock() + if len(attributes) != 0 { + a = make(map[string]interface{}) + copyAttributes(a, attributes) + } + s.data.Annotations = append(s.data.Annotations, Annotation{ + Time: now, + Message: str, + Attributes: a, + }) + s.mu.Unlock() +} + +// Annotate adds an annotation with attributes. +// Attributes can be nil. +func (s *Span) Annotate(attributes []Attribute, str string) { + if !s.IsRecordingEvents() { + return + } + s.printStringInternal(attributes, str) +} + +// Annotatef adds an annotation with attributes. +func (s *Span) Annotatef(attributes []Attribute, format string, a ...interface{}) { + if !s.IsRecordingEvents() { + return + } + s.lazyPrintfInternal(attributes, format, a...) +} + +// AddMessageSendEvent adds a message send event to the span. +// +// messageID is an identifier for the message, which is recommended to be +// unique in this span and the same between the send event and the receive +// event (this allows to identify a message between the sender and receiver). +// For example, this could be a sequence id. +func (s *Span) AddMessageSendEvent(messageID, uncompressedByteSize, compressedByteSize int64) { + if !s.IsRecordingEvents() { + return + } + now := time.Now() + s.mu.Lock() + s.data.MessageEvents = append(s.data.MessageEvents, MessageEvent{ + Time: now, + EventType: MessageEventTypeSent, + MessageID: messageID, + UncompressedByteSize: uncompressedByteSize, + CompressedByteSize: compressedByteSize, + }) + s.mu.Unlock() +} + +// AddMessageReceiveEvent adds a message receive event to the span. +// +// messageID is an identifier for the message, which is recommended to be +// unique in this span and the same between the send event and the receive +// event (this allows to identify a message between the sender and receiver). +// For example, this could be a sequence id. +func (s *Span) AddMessageReceiveEvent(messageID, uncompressedByteSize, compressedByteSize int64) { + if !s.IsRecordingEvents() { + return + } + now := time.Now() + s.mu.Lock() + s.data.MessageEvents = append(s.data.MessageEvents, MessageEvent{ + Time: now, + EventType: MessageEventTypeRecv, + MessageID: messageID, + UncompressedByteSize: uncompressedByteSize, + CompressedByteSize: compressedByteSize, + }) + s.mu.Unlock() +} + +// AddLink adds a link to the span. +func (s *Span) AddLink(l Link) { + if !s.IsRecordingEvents() { + return + } + s.mu.Lock() + s.data.Links = append(s.data.Links, l) + s.mu.Unlock() +} + +func (s *Span) String() string { + if s == nil { + return "" + } + if s.data == nil { + return fmt.Sprintf("span %s", s.spanContext.SpanID) + } + s.mu.Lock() + str := fmt.Sprintf("span %s %q", s.spanContext.SpanID, s.data.Name) + s.mu.Unlock() + return str +} + +var config atomic.Value // access atomically + +func init() { + gen := &defaultIDGenerator{} + // initialize traceID and spanID generators. + var rngSeed int64 + for _, p := range []interface{}{ + &rngSeed, &gen.traceIDAdd, &gen.nextSpanID, &gen.spanIDInc, + } { + binary.Read(crand.Reader, binary.LittleEndian, p) + } + gen.traceIDRand = rand.New(rand.NewSource(rngSeed)) + gen.spanIDInc |= 1 + + config.Store(&Config{ + DefaultSampler: ProbabilitySampler(defaultSamplingProbability), + IDGenerator: gen, + }) +} + +type defaultIDGenerator struct { + sync.Mutex + + // Please keep these as the first fields + // so that these 8 byte fields will be aligned on addresses + // divisible by 8, on both 32-bit and 64-bit machines when + // performing atomic increments and accesses. + // See: + // * https://github.com/census-instrumentation/opencensus-go/issues/587 + // * https://github.com/census-instrumentation/opencensus-go/issues/865 + // * https://golang.org/pkg/sync/atomic/#pkg-note-BUG + nextSpanID uint64 + spanIDInc uint64 + + traceIDAdd [2]uint64 + traceIDRand *rand.Rand +} + +// NewSpanID returns a non-zero span ID from a randomly-chosen sequence. +func (gen *defaultIDGenerator) NewSpanID() [8]byte { + var id uint64 + for id == 0 { + id = atomic.AddUint64(&gen.nextSpanID, gen.spanIDInc) + } + var sid [8]byte + binary.LittleEndian.PutUint64(sid[:], id) + return sid +} + +// NewTraceID returns a non-zero trace ID from a randomly-chosen sequence. +// mu should be held while this function is called. +func (gen *defaultIDGenerator) NewTraceID() [16]byte { + var tid [16]byte + // Construct the trace ID from two outputs of traceIDRand, with a constant + // added to each half for additional entropy. + gen.Lock() + binary.LittleEndian.PutUint64(tid[0:8], gen.traceIDRand.Uint64()+gen.traceIDAdd[0]) + binary.LittleEndian.PutUint64(tid[8:16], gen.traceIDRand.Uint64()+gen.traceIDAdd[1]) + gen.Unlock() + return tid +} diff --git a/vendor/go.opencensus.io/trace/trace_go11.go b/vendor/go.opencensus.io/trace/trace_go11.go new file mode 100644 index 000000000..b7d8aaf28 --- /dev/null +++ b/vendor/go.opencensus.io/trace/trace_go11.go @@ -0,0 +1,32 @@ +// Copyright 2018, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build go1.11 + +package trace + +import ( + "context" + t "runtime/trace" +) + +func startExecutionTracerTask(ctx context.Context, name string) (context.Context, func()) { + if !t.IsEnabled() { + // Avoid additional overhead if + // runtime/trace is not enabled. + return ctx, func() {} + } + nctx, task := t.NewTask(ctx, name) + return nctx, task.End +} diff --git a/vendor/go.opencensus.io/trace/trace_nongo11.go b/vendor/go.opencensus.io/trace/trace_nongo11.go new file mode 100644 index 000000000..e25419859 --- /dev/null +++ b/vendor/go.opencensus.io/trace/trace_nongo11.go @@ -0,0 +1,25 @@ +// Copyright 2018, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build !go1.11 + +package trace + +import ( + "context" +) + +func startExecutionTracerTask(ctx context.Context, name string) (context.Context, func()) { + return ctx, func() {} +} diff --git a/vendor/go.opencensus.io/trace/tracestate/tracestate.go b/vendor/go.opencensus.io/trace/tracestate/tracestate.go new file mode 100644 index 000000000..2d6c713eb --- /dev/null +++ b/vendor/go.opencensus.io/trace/tracestate/tracestate.go @@ -0,0 +1,147 @@ +// Copyright 2018, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package tracestate implements support for the Tracestate header of the +// W3C TraceContext propagation format. +package tracestate + +import ( + "fmt" + "regexp" +) + +const ( + keyMaxSize = 256 + valueMaxSize = 256 + maxKeyValuePairs = 32 +) + +const ( + keyWithoutVendorFormat = `[a-z][_0-9a-z\-\*\/]{0,255}` + keyWithVendorFormat = `[a-z][_0-9a-z\-\*\/]{0,240}@[a-z][_0-9a-z\-\*\/]{0,13}` + keyFormat = `(` + keyWithoutVendorFormat + `)|(` + keyWithVendorFormat + `)` + valueFormat = `[\x20-\x2b\x2d-\x3c\x3e-\x7e]{0,255}[\x21-\x2b\x2d-\x3c\x3e-\x7e]` +) + +var keyValidationRegExp = regexp.MustCompile(`^(` + keyFormat + `)$`) +var valueValidationRegExp = regexp.MustCompile(`^(` + valueFormat + `)$`) + +// Tracestate represents tracing-system specific context in a list of key-value pairs. Tracestate allows different +// vendors propagate additional information and inter-operate with their legacy Id formats. +type Tracestate struct { + entries []Entry +} + +// Entry represents one key-value pair in a list of key-value pair of Tracestate. +type Entry struct { + // Key is an opaque string up to 256 characters printable. It MUST begin with a lowercase letter, + // and can only contain lowercase letters a-z, digits 0-9, underscores _, dashes -, asterisks *, and + // forward slashes /. + Key string + + // Value is an opaque string up to 256 characters printable ASCII RFC0020 characters (i.e., the + // range 0x20 to 0x7E) except comma , and =. + Value string +} + +// Entries returns a slice of Entry. +func (ts *Tracestate) Entries() []Entry { + if ts == nil { + return nil + } + return ts.entries +} + +func (ts *Tracestate) remove(key string) *Entry { + for index, entry := range ts.entries { + if entry.Key == key { + ts.entries = append(ts.entries[:index], ts.entries[index+1:]...) + return &entry + } + } + return nil +} + +func (ts *Tracestate) add(entries []Entry) error { + for _, entry := range entries { + ts.remove(entry.Key) + } + if len(ts.entries)+len(entries) > maxKeyValuePairs { + return fmt.Errorf("adding %d key-value pairs to current %d pairs exceeds the limit of %d", + len(entries), len(ts.entries), maxKeyValuePairs) + } + ts.entries = append(entries, ts.entries...) + return nil +} + +func isValid(entry Entry) bool { + return keyValidationRegExp.MatchString(entry.Key) && + valueValidationRegExp.MatchString(entry.Value) +} + +func containsDuplicateKey(entries ...Entry) (string, bool) { + keyMap := make(map[string]int) + for _, entry := range entries { + if _, ok := keyMap[entry.Key]; ok { + return entry.Key, true + } + keyMap[entry.Key] = 1 + } + return "", false +} + +func areEntriesValid(entries ...Entry) (*Entry, bool) { + for _, entry := range entries { + if !isValid(entry) { + return &entry, false + } + } + return nil, true +} + +// New creates a Tracestate object from a parent and/or entries (key-value pair). +// Entries from the parent are copied if present. The entries passed to this function +// are inserted in front of those copied from the parent. If an entry copied from the +// parent contains the same key as one of the entry in entries then the entry copied +// from the parent is removed. See add func. +// +// An error is returned with nil Tracestate if +// 1. one or more entry in entries is invalid. +// 2. two or more entries in the input entries have the same key. +// 3. the number of entries combined from the parent and the input entries exceeds maxKeyValuePairs. +// (duplicate entry is counted only once). +func New(parent *Tracestate, entries ...Entry) (*Tracestate, error) { + if parent == nil && len(entries) == 0 { + return nil, nil + } + if entry, ok := areEntriesValid(entries...); !ok { + return nil, fmt.Errorf("key-value pair {%s, %s} is invalid", entry.Key, entry.Value) + } + + if key, duplicate := containsDuplicateKey(entries...); duplicate { + return nil, fmt.Errorf("contains duplicate keys (%s)", key) + } + + tracestate := Tracestate{} + + if parent != nil && len(parent.entries) > 0 { + tracestate.entries = append([]Entry{}, parent.entries...) + } + + err := tracestate.add(entries) + if err != nil { + return nil, err + } + return &tracestate, nil +} diff --git a/vendor/golang.org/x/crypto/ed25519/ed25519.go b/vendor/golang.org/x/crypto/ed25519/ed25519.go deleted file mode 100644 index d6f683ba3..000000000 --- a/vendor/golang.org/x/crypto/ed25519/ed25519.go +++ /dev/null @@ -1,217 +0,0 @@ -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package ed25519 implements the Ed25519 signature algorithm. See -// https://ed25519.cr.yp.to/. -// -// These functions are also compatible with the “Ed25519” function defined in -// RFC 8032. However, unlike RFC 8032's formulation, this package's private key -// representation includes a public key suffix to make multiple signing -// operations with the same key more efficient. This package refers to the RFC -// 8032 private key as the “seed”. -package ed25519 - -// This code is a port of the public domain, “ref10” implementation of ed25519 -// from SUPERCOP. - -import ( - "bytes" - "crypto" - cryptorand "crypto/rand" - "crypto/sha512" - "errors" - "io" - "strconv" - - "golang.org/x/crypto/ed25519/internal/edwards25519" -) - -const ( - // PublicKeySize is the size, in bytes, of public keys as used in this package. - PublicKeySize = 32 - // PrivateKeySize is the size, in bytes, of private keys as used in this package. - PrivateKeySize = 64 - // SignatureSize is the size, in bytes, of signatures generated and verified by this package. - SignatureSize = 64 - // SeedSize is the size, in bytes, of private key seeds. These are the private key representations used by RFC 8032. - SeedSize = 32 -) - -// PublicKey is the type of Ed25519 public keys. -type PublicKey []byte - -// PrivateKey is the type of Ed25519 private keys. It implements crypto.Signer. -type PrivateKey []byte - -// Public returns the PublicKey corresponding to priv. -func (priv PrivateKey) Public() crypto.PublicKey { - publicKey := make([]byte, PublicKeySize) - copy(publicKey, priv[32:]) - return PublicKey(publicKey) -} - -// Seed returns the private key seed corresponding to priv. It is provided for -// interoperability with RFC 8032. RFC 8032's private keys correspond to seeds -// in this package. -func (priv PrivateKey) Seed() []byte { - seed := make([]byte, SeedSize) - copy(seed, priv[:32]) - return seed -} - -// Sign signs the given message with priv. -// Ed25519 performs two passes over messages to be signed and therefore cannot -// handle pre-hashed messages. Thus opts.HashFunc() must return zero to -// indicate the message hasn't been hashed. This can be achieved by passing -// crypto.Hash(0) as the value for opts. -func (priv PrivateKey) Sign(rand io.Reader, message []byte, opts crypto.SignerOpts) (signature []byte, err error) { - if opts.HashFunc() != crypto.Hash(0) { - return nil, errors.New("ed25519: cannot sign hashed message") - } - - return Sign(priv, message), nil -} - -// GenerateKey generates a public/private key pair using entropy from rand. -// If rand is nil, crypto/rand.Reader will be used. -func GenerateKey(rand io.Reader) (PublicKey, PrivateKey, error) { - if rand == nil { - rand = cryptorand.Reader - } - - seed := make([]byte, SeedSize) - if _, err := io.ReadFull(rand, seed); err != nil { - return nil, nil, err - } - - privateKey := NewKeyFromSeed(seed) - publicKey := make([]byte, PublicKeySize) - copy(publicKey, privateKey[32:]) - - return publicKey, privateKey, nil -} - -// NewKeyFromSeed calculates a private key from a seed. It will panic if -// len(seed) is not SeedSize. This function is provided for interoperability -// with RFC 8032. RFC 8032's private keys correspond to seeds in this -// package. -func NewKeyFromSeed(seed []byte) PrivateKey { - if l := len(seed); l != SeedSize { - panic("ed25519: bad seed length: " + strconv.Itoa(l)) - } - - digest := sha512.Sum512(seed) - digest[0] &= 248 - digest[31] &= 127 - digest[31] |= 64 - - var A edwards25519.ExtendedGroupElement - var hBytes [32]byte - copy(hBytes[:], digest[:]) - edwards25519.GeScalarMultBase(&A, &hBytes) - var publicKeyBytes [32]byte - A.ToBytes(&publicKeyBytes) - - privateKey := make([]byte, PrivateKeySize) - copy(privateKey, seed) - copy(privateKey[32:], publicKeyBytes[:]) - - return privateKey -} - -// Sign signs the message with privateKey and returns a signature. It will -// panic if len(privateKey) is not PrivateKeySize. -func Sign(privateKey PrivateKey, message []byte) []byte { - if l := len(privateKey); l != PrivateKeySize { - panic("ed25519: bad private key length: " + strconv.Itoa(l)) - } - - h := sha512.New() - h.Write(privateKey[:32]) - - var digest1, messageDigest, hramDigest [64]byte - var expandedSecretKey [32]byte - h.Sum(digest1[:0]) - copy(expandedSecretKey[:], digest1[:]) - expandedSecretKey[0] &= 248 - expandedSecretKey[31] &= 63 - expandedSecretKey[31] |= 64 - - h.Reset() - h.Write(digest1[32:]) - h.Write(message) - h.Sum(messageDigest[:0]) - - var messageDigestReduced [32]byte - edwards25519.ScReduce(&messageDigestReduced, &messageDigest) - var R edwards25519.ExtendedGroupElement - edwards25519.GeScalarMultBase(&R, &messageDigestReduced) - - var encodedR [32]byte - R.ToBytes(&encodedR) - - h.Reset() - h.Write(encodedR[:]) - h.Write(privateKey[32:]) - h.Write(message) - h.Sum(hramDigest[:0]) - var hramDigestReduced [32]byte - edwards25519.ScReduce(&hramDigestReduced, &hramDigest) - - var s [32]byte - edwards25519.ScMulAdd(&s, &hramDigestReduced, &expandedSecretKey, &messageDigestReduced) - - signature := make([]byte, SignatureSize) - copy(signature[:], encodedR[:]) - copy(signature[32:], s[:]) - - return signature -} - -// Verify reports whether sig is a valid signature of message by publicKey. It -// will panic if len(publicKey) is not PublicKeySize. -func Verify(publicKey PublicKey, message, sig []byte) bool { - if l := len(publicKey); l != PublicKeySize { - panic("ed25519: bad public key length: " + strconv.Itoa(l)) - } - - if len(sig) != SignatureSize || sig[63]&224 != 0 { - return false - } - - var A edwards25519.ExtendedGroupElement - var publicKeyBytes [32]byte - copy(publicKeyBytes[:], publicKey) - if !A.FromBytes(&publicKeyBytes) { - return false - } - edwards25519.FeNeg(&A.X, &A.X) - edwards25519.FeNeg(&A.T, &A.T) - - h := sha512.New() - h.Write(sig[:32]) - h.Write(publicKey[:]) - h.Write(message) - var digest [64]byte - h.Sum(digest[:0]) - - var hReduced [32]byte - edwards25519.ScReduce(&hReduced, &digest) - - var R edwards25519.ProjectiveGroupElement - var s [32]byte - copy(s[:], sig[32:]) - - // https://tools.ietf.org/html/rfc8032#section-5.1.7 requires that s be in - // the range [0, order) in order to prevent signature malleability. - if !edwards25519.ScMinimal(&s) { - return false - } - - edwards25519.GeDoubleScalarMultVartime(&R, &hReduced, &A, &s) - - var checkR [32]byte - R.ToBytes(&checkR) - return bytes.Equal(sig[:32], checkR[:]) -} diff --git a/vendor/golang.org/x/crypto/ed25519/internal/edwards25519/const.go b/vendor/golang.org/x/crypto/ed25519/internal/edwards25519/const.go deleted file mode 100644 index e39f086c1..000000000 --- a/vendor/golang.org/x/crypto/ed25519/internal/edwards25519/const.go +++ /dev/null @@ -1,1422 +0,0 @@ -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package edwards25519 - -// These values are from the public domain, “ref10” implementation of ed25519 -// from SUPERCOP. - -// d is a constant in the Edwards curve equation. -var d = FieldElement{ - -10913610, 13857413, -15372611, 6949391, 114729, -8787816, -6275908, -3247719, -18696448, -12055116, -} - -// d2 is 2*d. -var d2 = FieldElement{ - -21827239, -5839606, -30745221, 13898782, 229458, 15978800, -12551817, -6495438, 29715968, 9444199, -} - -// SqrtM1 is the square-root of -1 in the field. -var SqrtM1 = FieldElement{ - -32595792, -7943725, 9377950, 3500415, 12389472, -272473, -25146209, -2005654, 326686, 11406482, -} - -// A is a constant in the Montgomery-form of curve25519. -var A = FieldElement{ - 486662, 0, 0, 0, 0, 0, 0, 0, 0, 0, -} - -// bi contains precomputed multiples of the base-point. See the Ed25519 paper -// for a discussion about how these values are used. -var bi = [8]PreComputedGroupElement{ - { - FieldElement{25967493, -14356035, 29566456, 3660896, -12694345, 4014787, 27544626, -11754271, -6079156, 2047605}, - FieldElement{-12545711, 934262, -2722910, 3049990, -727428, 9406986, 12720692, 5043384, 19500929, -15469378}, - FieldElement{-8738181, 4489570, 9688441, -14785194, 10184609, -12363380, 29287919, 11864899, -24514362, -4438546}, - }, - { - FieldElement{15636291, -9688557, 24204773, -7912398, 616977, -16685262, 27787600, -14772189, 28944400, -1550024}, - FieldElement{16568933, 4717097, -11556148, -1102322, 15682896, -11807043, 16354577, -11775962, 7689662, 11199574}, - FieldElement{30464156, -5976125, -11779434, -15670865, 23220365, 15915852, 7512774, 10017326, -17749093, -9920357}, - }, - { - FieldElement{10861363, 11473154, 27284546, 1981175, -30064349, 12577861, 32867885, 14515107, -15438304, 10819380}, - FieldElement{4708026, 6336745, 20377586, 9066809, -11272109, 6594696, -25653668, 12483688, -12668491, 5581306}, - FieldElement{19563160, 16186464, -29386857, 4097519, 10237984, -4348115, 28542350, 13850243, -23678021, -15815942}, - }, - { - FieldElement{5153746, 9909285, 1723747, -2777874, 30523605, 5516873, 19480852, 5230134, -23952439, -15175766}, - FieldElement{-30269007, -3463509, 7665486, 10083793, 28475525, 1649722, 20654025, 16520125, 30598449, 7715701}, - FieldElement{28881845, 14381568, 9657904, 3680757, -20181635, 7843316, -31400660, 1370708, 29794553, -1409300}, - }, - { - FieldElement{-22518993, -6692182, 14201702, -8745502, -23510406, 8844726, 18474211, -1361450, -13062696, 13821877}, - FieldElement{-6455177, -7839871, 3374702, -4740862, -27098617, -10571707, 31655028, -7212327, 18853322, -14220951}, - FieldElement{4566830, -12963868, -28974889, -12240689, -7602672, -2830569, -8514358, -10431137, 2207753, -3209784}, - }, - { - FieldElement{-25154831, -4185821, 29681144, 7868801, -6854661, -9423865, -12437364, -663000, -31111463, -16132436}, - FieldElement{25576264, -2703214, 7349804, -11814844, 16472782, 9300885, 3844789, 15725684, 171356, 6466918}, - FieldElement{23103977, 13316479, 9739013, -16149481, 817875, -15038942, 8965339, -14088058, -30714912, 16193877}, - }, - { - FieldElement{-33521811, 3180713, -2394130, 14003687, -16903474, -16270840, 17238398, 4729455, -18074513, 9256800}, - FieldElement{-25182317, -4174131, 32336398, 5036987, -21236817, 11360617, 22616405, 9761698, -19827198, 630305}, - FieldElement{-13720693, 2639453, -24237460, -7406481, 9494427, -5774029, -6554551, -15960994, -2449256, -14291300}, - }, - { - FieldElement{-3151181, -5046075, 9282714, 6866145, -31907062, -863023, -18940575, 15033784, 25105118, -7894876}, - FieldElement{-24326370, 15950226, -31801215, -14592823, -11662737, -5090925, 1573892, -2625887, 2198790, -15804619}, - FieldElement{-3099351, 10324967, -2241613, 7453183, -5446979, -2735503, -13812022, -16236442, -32461234, -12290683}, - }, -} - -// base contains precomputed multiples of the base-point. See the Ed25519 paper -// for a discussion about how these values are used. -var base = [32][8]PreComputedGroupElement{ - { - { - FieldElement{25967493, -14356035, 29566456, 3660896, -12694345, 4014787, 27544626, -11754271, -6079156, 2047605}, - FieldElement{-12545711, 934262, -2722910, 3049990, -727428, 9406986, 12720692, 5043384, 19500929, -15469378}, - FieldElement{-8738181, 4489570, 9688441, -14785194, 10184609, -12363380, 29287919, 11864899, -24514362, -4438546}, - }, - { - FieldElement{-12815894, -12976347, -21581243, 11784320, -25355658, -2750717, -11717903, -3814571, -358445, -10211303}, - FieldElement{-21703237, 6903825, 27185491, 6451973, -29577724, -9554005, -15616551, 11189268, -26829678, -5319081}, - FieldElement{26966642, 11152617, 32442495, 15396054, 14353839, -12752335, -3128826, -9541118, -15472047, -4166697}, - }, - { - FieldElement{15636291, -9688557, 24204773, -7912398, 616977, -16685262, 27787600, -14772189, 28944400, -1550024}, - FieldElement{16568933, 4717097, -11556148, -1102322, 15682896, -11807043, 16354577, -11775962, 7689662, 11199574}, - FieldElement{30464156, -5976125, -11779434, -15670865, 23220365, 15915852, 7512774, 10017326, -17749093, -9920357}, - }, - { - FieldElement{-17036878, 13921892, 10945806, -6033431, 27105052, -16084379, -28926210, 15006023, 3284568, -6276540}, - FieldElement{23599295, -8306047, -11193664, -7687416, 13236774, 10506355, 7464579, 9656445, 13059162, 10374397}, - FieldElement{7798556, 16710257, 3033922, 2874086, 28997861, 2835604, 32406664, -3839045, -641708, -101325}, - }, - { - FieldElement{10861363, 11473154, 27284546, 1981175, -30064349, 12577861, 32867885, 14515107, -15438304, 10819380}, - FieldElement{4708026, 6336745, 20377586, 9066809, -11272109, 6594696, -25653668, 12483688, -12668491, 5581306}, - FieldElement{19563160, 16186464, -29386857, 4097519, 10237984, -4348115, 28542350, 13850243, -23678021, -15815942}, - }, - { - FieldElement{-15371964, -12862754, 32573250, 4720197, -26436522, 5875511, -19188627, -15224819, -9818940, -12085777}, - FieldElement{-8549212, 109983, 15149363, 2178705, 22900618, 4543417, 3044240, -15689887, 1762328, 14866737}, - FieldElement{-18199695, -15951423, -10473290, 1707278, -17185920, 3916101, -28236412, 3959421, 27914454, 4383652}, - }, - { - FieldElement{5153746, 9909285, 1723747, -2777874, 30523605, 5516873, 19480852, 5230134, -23952439, -15175766}, - FieldElement{-30269007, -3463509, 7665486, 10083793, 28475525, 1649722, 20654025, 16520125, 30598449, 7715701}, - FieldElement{28881845, 14381568, 9657904, 3680757, -20181635, 7843316, -31400660, 1370708, 29794553, -1409300}, - }, - { - FieldElement{14499471, -2729599, -33191113, -4254652, 28494862, 14271267, 30290735, 10876454, -33154098, 2381726}, - FieldElement{-7195431, -2655363, -14730155, 462251, -27724326, 3941372, -6236617, 3696005, -32300832, 15351955}, - FieldElement{27431194, 8222322, 16448760, -3907995, -18707002, 11938355, -32961401, -2970515, 29551813, 10109425}, - }, - }, - { - { - FieldElement{-13657040, -13155431, -31283750, 11777098, 21447386, 6519384, -2378284, -1627556, 10092783, -4764171}, - FieldElement{27939166, 14210322, 4677035, 16277044, -22964462, -12398139, -32508754, 12005538, -17810127, 12803510}, - FieldElement{17228999, -15661624, -1233527, 300140, -1224870, -11714777, 30364213, -9038194, 18016357, 4397660}, - }, - { - FieldElement{-10958843, -7690207, 4776341, -14954238, 27850028, -15602212, -26619106, 14544525, -17477504, 982639}, - FieldElement{29253598, 15796703, -2863982, -9908884, 10057023, 3163536, 7332899, -4120128, -21047696, 9934963}, - FieldElement{5793303, 16271923, -24131614, -10116404, 29188560, 1206517, -14747930, 4559895, -30123922, -10897950}, - }, - { - FieldElement{-27643952, -11493006, 16282657, -11036493, 28414021, -15012264, 24191034, 4541697, -13338309, 5500568}, - FieldElement{12650548, -1497113, 9052871, 11355358, -17680037, -8400164, -17430592, 12264343, 10874051, 13524335}, - FieldElement{25556948, -3045990, 714651, 2510400, 23394682, -10415330, 33119038, 5080568, -22528059, 5376628}, - }, - { - FieldElement{-26088264, -4011052, -17013699, -3537628, -6726793, 1920897, -22321305, -9447443, 4535768, 1569007}, - FieldElement{-2255422, 14606630, -21692440, -8039818, 28430649, 8775819, -30494562, 3044290, 31848280, 12543772}, - FieldElement{-22028579, 2943893, -31857513, 6777306, 13784462, -4292203, -27377195, -2062731, 7718482, 14474653}, - }, - { - FieldElement{2385315, 2454213, -22631320, 46603, -4437935, -15680415, 656965, -7236665, 24316168, -5253567}, - FieldElement{13741529, 10911568, -33233417, -8603737, -20177830, -1033297, 33040651, -13424532, -20729456, 8321686}, - FieldElement{21060490, -2212744, 15712757, -4336099, 1639040, 10656336, 23845965, -11874838, -9984458, 608372}, - }, - { - FieldElement{-13672732, -15087586, -10889693, -7557059, -6036909, 11305547, 1123968, -6780577, 27229399, 23887}, - FieldElement{-23244140, -294205, -11744728, 14712571, -29465699, -2029617, 12797024, -6440308, -1633405, 16678954}, - FieldElement{-29500620, 4770662, -16054387, 14001338, 7830047, 9564805, -1508144, -4795045, -17169265, 4904953}, - }, - { - FieldElement{24059557, 14617003, 19037157, -15039908, 19766093, -14906429, 5169211, 16191880, 2128236, -4326833}, - FieldElement{-16981152, 4124966, -8540610, -10653797, 30336522, -14105247, -29806336, 916033, -6882542, -2986532}, - FieldElement{-22630907, 12419372, -7134229, -7473371, -16478904, 16739175, 285431, 2763829, 15736322, 4143876}, - }, - { - FieldElement{2379352, 11839345, -4110402, -5988665, 11274298, 794957, 212801, -14594663, 23527084, -16458268}, - FieldElement{33431127, -11130478, -17838966, -15626900, 8909499, 8376530, -32625340, 4087881, -15188911, -14416214}, - FieldElement{1767683, 7197987, -13205226, -2022635, -13091350, 448826, 5799055, 4357868, -4774191, -16323038}, - }, - }, - { - { - FieldElement{6721966, 13833823, -23523388, -1551314, 26354293, -11863321, 23365147, -3949732, 7390890, 2759800}, - FieldElement{4409041, 2052381, 23373853, 10530217, 7676779, -12885954, 21302353, -4264057, 1244380, -12919645}, - FieldElement{-4421239, 7169619, 4982368, -2957590, 30256825, -2777540, 14086413, 9208236, 15886429, 16489664}, - }, - { - FieldElement{1996075, 10375649, 14346367, 13311202, -6874135, -16438411, -13693198, 398369, -30606455, -712933}, - FieldElement{-25307465, 9795880, -2777414, 14878809, -33531835, 14780363, 13348553, 12076947, -30836462, 5113182}, - FieldElement{-17770784, 11797796, 31950843, 13929123, -25888302, 12288344, -30341101, -7336386, 13847711, 5387222}, - }, - { - FieldElement{-18582163, -3416217, 17824843, -2340966, 22744343, -10442611, 8763061, 3617786, -19600662, 10370991}, - FieldElement{20246567, -14369378, 22358229, -543712, 18507283, -10413996, 14554437, -8746092, 32232924, 16763880}, - FieldElement{9648505, 10094563, 26416693, 14745928, -30374318, -6472621, 11094161, 15689506, 3140038, -16510092}, - }, - { - FieldElement{-16160072, 5472695, 31895588, 4744994, 8823515, 10365685, -27224800, 9448613, -28774454, 366295}, - FieldElement{19153450, 11523972, -11096490, -6503142, -24647631, 5420647, 28344573, 8041113, 719605, 11671788}, - FieldElement{8678025, 2694440, -6808014, 2517372, 4964326, 11152271, -15432916, -15266516, 27000813, -10195553}, - }, - { - FieldElement{-15157904, 7134312, 8639287, -2814877, -7235688, 10421742, 564065, 5336097, 6750977, -14521026}, - FieldElement{11836410, -3979488, 26297894, 16080799, 23455045, 15735944, 1695823, -8819122, 8169720, 16220347}, - FieldElement{-18115838, 8653647, 17578566, -6092619, -8025777, -16012763, -11144307, -2627664, -5990708, -14166033}, - }, - { - FieldElement{-23308498, -10968312, 15213228, -10081214, -30853605, -11050004, 27884329, 2847284, 2655861, 1738395}, - FieldElement{-27537433, -14253021, -25336301, -8002780, -9370762, 8129821, 21651608, -3239336, -19087449, -11005278}, - FieldElement{1533110, 3437855, 23735889, 459276, 29970501, 11335377, 26030092, 5821408, 10478196, 8544890}, - }, - { - FieldElement{32173121, -16129311, 24896207, 3921497, 22579056, -3410854, 19270449, 12217473, 17789017, -3395995}, - FieldElement{-30552961, -2228401, -15578829, -10147201, 13243889, 517024, 15479401, -3853233, 30460520, 1052596}, - FieldElement{-11614875, 13323618, 32618793, 8175907, -15230173, 12596687, 27491595, -4612359, 3179268, -9478891}, - }, - { - FieldElement{31947069, -14366651, -4640583, -15339921, -15125977, -6039709, -14756777, -16411740, 19072640, -9511060}, - FieldElement{11685058, 11822410, 3158003, -13952594, 33402194, -4165066, 5977896, -5215017, 473099, 5040608}, - FieldElement{-20290863, 8198642, -27410132, 11602123, 1290375, -2799760, 28326862, 1721092, -19558642, -3131606}, - }, - }, - { - { - FieldElement{7881532, 10687937, 7578723, 7738378, -18951012, -2553952, 21820786, 8076149, -27868496, 11538389}, - FieldElement{-19935666, 3899861, 18283497, -6801568, -15728660, -11249211, 8754525, 7446702, -5676054, 5797016}, - FieldElement{-11295600, -3793569, -15782110, -7964573, 12708869, -8456199, 2014099, -9050574, -2369172, -5877341}, - }, - { - FieldElement{-22472376, -11568741, -27682020, 1146375, 18956691, 16640559, 1192730, -3714199, 15123619, 10811505}, - FieldElement{14352098, -3419715, -18942044, 10822655, 32750596, 4699007, -70363, 15776356, -28886779, -11974553}, - FieldElement{-28241164, -8072475, -4978962, -5315317, 29416931, 1847569, -20654173, -16484855, 4714547, -9600655}, - }, - { - FieldElement{15200332, 8368572, 19679101, 15970074, -31872674, 1959451, 24611599, -4543832, -11745876, 12340220}, - FieldElement{12876937, -10480056, 33134381, 6590940, -6307776, 14872440, 9613953, 8241152, 15370987, 9608631}, - FieldElement{-4143277, -12014408, 8446281, -391603, 4407738, 13629032, -7724868, 15866074, -28210621, -8814099}, - }, - { - FieldElement{26660628, -15677655, 8393734, 358047, -7401291, 992988, -23904233, 858697, 20571223, 8420556}, - FieldElement{14620715, 13067227, -15447274, 8264467, 14106269, 15080814, 33531827, 12516406, -21574435, -12476749}, - FieldElement{236881, 10476226, 57258, -14677024, 6472998, 2466984, 17258519, 7256740, 8791136, 15069930}, - }, - { - FieldElement{1276410, -9371918, 22949635, -16322807, -23493039, -5702186, 14711875, 4874229, -30663140, -2331391}, - FieldElement{5855666, 4990204, -13711848, 7294284, -7804282, 1924647, -1423175, -7912378, -33069337, 9234253}, - FieldElement{20590503, -9018988, 31529744, -7352666, -2706834, 10650548, 31559055, -11609587, 18979186, 13396066}, - }, - { - FieldElement{24474287, 4968103, 22267082, 4407354, 24063882, -8325180, -18816887, 13594782, 33514650, 7021958}, - FieldElement{-11566906, -6565505, -21365085, 15928892, -26158305, 4315421, -25948728, -3916677, -21480480, 12868082}, - FieldElement{-28635013, 13504661, 19988037, -2132761, 21078225, 6443208, -21446107, 2244500, -12455797, -8089383}, - }, - { - FieldElement{-30595528, 13793479, -5852820, 319136, -25723172, -6263899, 33086546, 8957937, -15233648, 5540521}, - FieldElement{-11630176, -11503902, -8119500, -7643073, 2620056, 1022908, -23710744, -1568984, -16128528, -14962807}, - FieldElement{23152971, 775386, 27395463, 14006635, -9701118, 4649512, 1689819, 892185, -11513277, -15205948}, - }, - { - FieldElement{9770129, 9586738, 26496094, 4324120, 1556511, -3550024, 27453819, 4763127, -19179614, 5867134}, - FieldElement{-32765025, 1927590, 31726409, -4753295, 23962434, -16019500, 27846559, 5931263, -29749703, -16108455}, - FieldElement{27461885, -2977536, 22380810, 1815854, -23033753, -3031938, 7283490, -15148073, -19526700, 7734629}, - }, - }, - { - { - FieldElement{-8010264, -9590817, -11120403, 6196038, 29344158, -13430885, 7585295, -3176626, 18549497, 15302069}, - FieldElement{-32658337, -6171222, -7672793, -11051681, 6258878, 13504381, 10458790, -6418461, -8872242, 8424746}, - FieldElement{24687205, 8613276, -30667046, -3233545, 1863892, -1830544, 19206234, 7134917, -11284482, -828919}, - }, - { - FieldElement{11334899, -9218022, 8025293, 12707519, 17523892, -10476071, 10243738, -14685461, -5066034, 16498837}, - FieldElement{8911542, 6887158, -9584260, -6958590, 11145641, -9543680, 17303925, -14124238, 6536641, 10543906}, - FieldElement{-28946384, 15479763, -17466835, 568876, -1497683, 11223454, -2669190, -16625574, -27235709, 8876771}, - }, - { - FieldElement{-25742899, -12566864, -15649966, -846607, -33026686, -796288, -33481822, 15824474, -604426, -9039817}, - FieldElement{10330056, 70051, 7957388, -9002667, 9764902, 15609756, 27698697, -4890037, 1657394, 3084098}, - FieldElement{10477963, -7470260, 12119566, -13250805, 29016247, -5365589, 31280319, 14396151, -30233575, 15272409}, - }, - { - FieldElement{-12288309, 3169463, 28813183, 16658753, 25116432, -5630466, -25173957, -12636138, -25014757, 1950504}, - FieldElement{-26180358, 9489187, 11053416, -14746161, -31053720, 5825630, -8384306, -8767532, 15341279, 8373727}, - FieldElement{28685821, 7759505, -14378516, -12002860, -31971820, 4079242, 298136, -10232602, -2878207, 15190420}, - }, - { - FieldElement{-32932876, 13806336, -14337485, -15794431, -24004620, 10940928, 8669718, 2742393, -26033313, -6875003}, - FieldElement{-1580388, -11729417, -25979658, -11445023, -17411874, -10912854, 9291594, -16247779, -12154742, 6048605}, - FieldElement{-30305315, 14843444, 1539301, 11864366, 20201677, 1900163, 13934231, 5128323, 11213262, 9168384}, - }, - { - FieldElement{-26280513, 11007847, 19408960, -940758, -18592965, -4328580, -5088060, -11105150, 20470157, -16398701}, - FieldElement{-23136053, 9282192, 14855179, -15390078, -7362815, -14408560, -22783952, 14461608, 14042978, 5230683}, - FieldElement{29969567, -2741594, -16711867, -8552442, 9175486, -2468974, 21556951, 3506042, -5933891, -12449708}, - }, - { - FieldElement{-3144746, 8744661, 19704003, 4581278, -20430686, 6830683, -21284170, 8971513, -28539189, 15326563}, - FieldElement{-19464629, 10110288, -17262528, -3503892, -23500387, 1355669, -15523050, 15300988, -20514118, 9168260}, - FieldElement{-5353335, 4488613, -23803248, 16314347, 7780487, -15638939, -28948358, 9601605, 33087103, -9011387}, - }, - { - FieldElement{-19443170, -15512900, -20797467, -12445323, -29824447, 10229461, -27444329, -15000531, -5996870, 15664672}, - FieldElement{23294591, -16632613, -22650781, -8470978, 27844204, 11461195, 13099750, -2460356, 18151676, 13417686}, - FieldElement{-24722913, -4176517, -31150679, 5988919, -26858785, 6685065, 1661597, -12551441, 15271676, -15452665}, - }, - }, - { - { - FieldElement{11433042, -13228665, 8239631, -5279517, -1985436, -725718, -18698764, 2167544, -6921301, -13440182}, - FieldElement{-31436171, 15575146, 30436815, 12192228, -22463353, 9395379, -9917708, -8638997, 12215110, 12028277}, - FieldElement{14098400, 6555944, 23007258, 5757252, -15427832, -12950502, 30123440, 4617780, -16900089, -655628}, - }, - { - FieldElement{-4026201, -15240835, 11893168, 13718664, -14809462, 1847385, -15819999, 10154009, 23973261, -12684474}, - FieldElement{-26531820, -3695990, -1908898, 2534301, -31870557, -16550355, 18341390, -11419951, 32013174, -10103539}, - FieldElement{-25479301, 10876443, -11771086, -14625140, -12369567, 1838104, 21911214, 6354752, 4425632, -837822}, - }, - { - FieldElement{-10433389, -14612966, 22229858, -3091047, -13191166, 776729, -17415375, -12020462, 4725005, 14044970}, - FieldElement{19268650, -7304421, 1555349, 8692754, -21474059, -9910664, 6347390, -1411784, -19522291, -16109756}, - FieldElement{-24864089, 12986008, -10898878, -5558584, -11312371, -148526, 19541418, 8180106, 9282262, 10282508}, - }, - { - FieldElement{-26205082, 4428547, -8661196, -13194263, 4098402, -14165257, 15522535, 8372215, 5542595, -10702683}, - FieldElement{-10562541, 14895633, 26814552, -16673850, -17480754, -2489360, -2781891, 6993761, -18093885, 10114655}, - FieldElement{-20107055, -929418, 31422704, 10427861, -7110749, 6150669, -29091755, -11529146, 25953725, -106158}, - }, - { - FieldElement{-4234397, -8039292, -9119125, 3046000, 2101609, -12607294, 19390020, 6094296, -3315279, 12831125}, - FieldElement{-15998678, 7578152, 5310217, 14408357, -33548620, -224739, 31575954, 6326196, 7381791, -2421839}, - FieldElement{-20902779, 3296811, 24736065, -16328389, 18374254, 7318640, 6295303, 8082724, -15362489, 12339664}, - }, - { - FieldElement{27724736, 2291157, 6088201, -14184798, 1792727, 5857634, 13848414, 15768922, 25091167, 14856294}, - FieldElement{-18866652, 8331043, 24373479, 8541013, -701998, -9269457, 12927300, -12695493, -22182473, -9012899}, - FieldElement{-11423429, -5421590, 11632845, 3405020, 30536730, -11674039, -27260765, 13866390, 30146206, 9142070}, - }, - { - FieldElement{3924129, -15307516, -13817122, -10054960, 12291820, -668366, -27702774, 9326384, -8237858, 4171294}, - FieldElement{-15921940, 16037937, 6713787, 16606682, -21612135, 2790944, 26396185, 3731949, 345228, -5462949}, - FieldElement{-21327538, 13448259, 25284571, 1143661, 20614966, -8849387, 2031539, -12391231, -16253183, -13582083}, - }, - { - FieldElement{31016211, -16722429, 26371392, -14451233, -5027349, 14854137, 17477601, 3842657, 28012650, -16405420}, - FieldElement{-5075835, 9368966, -8562079, -4600902, -15249953, 6970560, -9189873, 16292057, -8867157, 3507940}, - FieldElement{29439664, 3537914, 23333589, 6997794, -17555561, -11018068, -15209202, -15051267, -9164929, 6580396}, - }, - }, - { - { - FieldElement{-12185861, -7679788, 16438269, 10826160, -8696817, -6235611, 17860444, -9273846, -2095802, 9304567}, - FieldElement{20714564, -4336911, 29088195, 7406487, 11426967, -5095705, 14792667, -14608617, 5289421, -477127}, - FieldElement{-16665533, -10650790, -6160345, -13305760, 9192020, -1802462, 17271490, 12349094, 26939669, -3752294}, - }, - { - FieldElement{-12889898, 9373458, 31595848, 16374215, 21471720, 13221525, -27283495, -12348559, -3698806, 117887}, - FieldElement{22263325, -6560050, 3984570, -11174646, -15114008, -566785, 28311253, 5358056, -23319780, 541964}, - FieldElement{16259219, 3261970, 2309254, -15534474, -16885711, -4581916, 24134070, -16705829, -13337066, -13552195}, - }, - { - FieldElement{9378160, -13140186, -22845982, -12745264, 28198281, -7244098, -2399684, -717351, 690426, 14876244}, - FieldElement{24977353, -314384, -8223969, -13465086, 28432343, -1176353, -13068804, -12297348, -22380984, 6618999}, - FieldElement{-1538174, 11685646, 12944378, 13682314, -24389511, -14413193, 8044829, -13817328, 32239829, -5652762}, - }, - { - FieldElement{-18603066, 4762990, -926250, 8885304, -28412480, -3187315, 9781647, -10350059, 32779359, 5095274}, - FieldElement{-33008130, -5214506, -32264887, -3685216, 9460461, -9327423, -24601656, 14506724, 21639561, -2630236}, - FieldElement{-16400943, -13112215, 25239338, 15531969, 3987758, -4499318, -1289502, -6863535, 17874574, 558605}, - }, - { - FieldElement{-13600129, 10240081, 9171883, 16131053, -20869254, 9599700, 33499487, 5080151, 2085892, 5119761}, - FieldElement{-22205145, -2519528, -16381601, 414691, -25019550, 2170430, 30634760, -8363614, -31999993, -5759884}, - FieldElement{-6845704, 15791202, 8550074, -1312654, 29928809, -12092256, 27534430, -7192145, -22351378, 12961482}, - }, - { - FieldElement{-24492060, -9570771, 10368194, 11582341, -23397293, -2245287, 16533930, 8206996, -30194652, -5159638}, - FieldElement{-11121496, -3382234, 2307366, 6362031, -135455, 8868177, -16835630, 7031275, 7589640, 8945490}, - FieldElement{-32152748, 8917967, 6661220, -11677616, -1192060, -15793393, 7251489, -11182180, 24099109, -14456170}, - }, - { - FieldElement{5019558, -7907470, 4244127, -14714356, -26933272, 6453165, -19118182, -13289025, -6231896, -10280736}, - FieldElement{10853594, 10721687, 26480089, 5861829, -22995819, 1972175, -1866647, -10557898, -3363451, -6441124}, - FieldElement{-17002408, 5906790, 221599, -6563147, 7828208, -13248918, 24362661, -2008168, -13866408, 7421392}, - }, - { - FieldElement{8139927, -6546497, 32257646, -5890546, 30375719, 1886181, -21175108, 15441252, 28826358, -4123029}, - FieldElement{6267086, 9695052, 7709135, -16603597, -32869068, -1886135, 14795160, -7840124, 13746021, -1742048}, - FieldElement{28584902, 7787108, -6732942, -15050729, 22846041, -7571236, -3181936, -363524, 4771362, -8419958}, - }, - }, - { - { - FieldElement{24949256, 6376279, -27466481, -8174608, -18646154, -9930606, 33543569, -12141695, 3569627, 11342593}, - FieldElement{26514989, 4740088, 27912651, 3697550, 19331575, -11472339, 6809886, 4608608, 7325975, -14801071}, - FieldElement{-11618399, -14554430, -24321212, 7655128, -1369274, 5214312, -27400540, 10258390, -17646694, -8186692}, - }, - { - FieldElement{11431204, 15823007, 26570245, 14329124, 18029990, 4796082, -31446179, 15580664, 9280358, -3973687}, - FieldElement{-160783, -10326257, -22855316, -4304997, -20861367, -13621002, -32810901, -11181622, -15545091, 4387441}, - FieldElement{-20799378, 12194512, 3937617, -5805892, -27154820, 9340370, -24513992, 8548137, 20617071, -7482001}, - }, - { - FieldElement{-938825, -3930586, -8714311, 16124718, 24603125, -6225393, -13775352, -11875822, 24345683, 10325460}, - FieldElement{-19855277, -1568885, -22202708, 8714034, 14007766, 6928528, 16318175, -1010689, 4766743, 3552007}, - FieldElement{-21751364, -16730916, 1351763, -803421, -4009670, 3950935, 3217514, 14481909, 10988822, -3994762}, - }, - { - FieldElement{15564307, -14311570, 3101243, 5684148, 30446780, -8051356, 12677127, -6505343, -8295852, 13296005}, - FieldElement{-9442290, 6624296, -30298964, -11913677, -4670981, -2057379, 31521204, 9614054, -30000824, 12074674}, - FieldElement{4771191, -135239, 14290749, -13089852, 27992298, 14998318, -1413936, -1556716, 29832613, -16391035}, - }, - { - FieldElement{7064884, -7541174, -19161962, -5067537, -18891269, -2912736, 25825242, 5293297, -27122660, 13101590}, - FieldElement{-2298563, 2439670, -7466610, 1719965, -27267541, -16328445, 32512469, -5317593, -30356070, -4190957}, - FieldElement{-30006540, 10162316, -33180176, 3981723, -16482138, -13070044, 14413974, 9515896, 19568978, 9628812}, - }, - { - FieldElement{33053803, 199357, 15894591, 1583059, 27380243, -4580435, -17838894, -6106839, -6291786, 3437740}, - FieldElement{-18978877, 3884493, 19469877, 12726490, 15913552, 13614290, -22961733, 70104, 7463304, 4176122}, - FieldElement{-27124001, 10659917, 11482427, -16070381, 12771467, -6635117, -32719404, -5322751, 24216882, 5944158}, - }, - { - FieldElement{8894125, 7450974, -2664149, -9765752, -28080517, -12389115, 19345746, 14680796, 11632993, 5847885}, - FieldElement{26942781, -2315317, 9129564, -4906607, 26024105, 11769399, -11518837, 6367194, -9727230, 4782140}, - FieldElement{19916461, -4828410, -22910704, -11414391, 25606324, -5972441, 33253853, 8220911, 6358847, -1873857}, - }, - { - FieldElement{801428, -2081702, 16569428, 11065167, 29875704, 96627, 7908388, -4480480, -13538503, 1387155}, - FieldElement{19646058, 5720633, -11416706, 12814209, 11607948, 12749789, 14147075, 15156355, -21866831, 11835260}, - FieldElement{19299512, 1155910, 28703737, 14890794, 2925026, 7269399, 26121523, 15467869, -26560550, 5052483}, - }, - }, - { - { - FieldElement{-3017432, 10058206, 1980837, 3964243, 22160966, 12322533, -6431123, -12618185, 12228557, -7003677}, - FieldElement{32944382, 14922211, -22844894, 5188528, 21913450, -8719943, 4001465, 13238564, -6114803, 8653815}, - FieldElement{22865569, -4652735, 27603668, -12545395, 14348958, 8234005, 24808405, 5719875, 28483275, 2841751}, - }, - { - FieldElement{-16420968, -1113305, -327719, -12107856, 21886282, -15552774, -1887966, -315658, 19932058, -12739203}, - FieldElement{-11656086, 10087521, -8864888, -5536143, -19278573, -3055912, 3999228, 13239134, -4777469, -13910208}, - FieldElement{1382174, -11694719, 17266790, 9194690, -13324356, 9720081, 20403944, 11284705, -14013818, 3093230}, - }, - { - FieldElement{16650921, -11037932, -1064178, 1570629, -8329746, 7352753, -302424, 16271225, -24049421, -6691850}, - FieldElement{-21911077, -5927941, -4611316, -5560156, -31744103, -10785293, 24123614, 15193618, -21652117, -16739389}, - FieldElement{-9935934, -4289447, -25279823, 4372842, 2087473, 10399484, 31870908, 14690798, 17361620, 11864968}, - }, - { - FieldElement{-11307610, 6210372, 13206574, 5806320, -29017692, -13967200, -12331205, -7486601, -25578460, -16240689}, - FieldElement{14668462, -12270235, 26039039, 15305210, 25515617, 4542480, 10453892, 6577524, 9145645, -6443880}, - FieldElement{5974874, 3053895, -9433049, -10385191, -31865124, 3225009, -7972642, 3936128, -5652273, -3050304}, - }, - { - FieldElement{30625386, -4729400, -25555961, -12792866, -20484575, 7695099, 17097188, -16303496, -27999779, 1803632}, - FieldElement{-3553091, 9865099, -5228566, 4272701, -5673832, -16689700, 14911344, 12196514, -21405489, 7047412}, - FieldElement{20093277, 9920966, -11138194, -5343857, 13161587, 12044805, -32856851, 4124601, -32343828, -10257566}, - }, - { - FieldElement{-20788824, 14084654, -13531713, 7842147, 19119038, -13822605, 4752377, -8714640, -21679658, 2288038}, - FieldElement{-26819236, -3283715, 29965059, 3039786, -14473765, 2540457, 29457502, 14625692, -24819617, 12570232}, - FieldElement{-1063558, -11551823, 16920318, 12494842, 1278292, -5869109, -21159943, -3498680, -11974704, 4724943}, - }, - { - FieldElement{17960970, -11775534, -4140968, -9702530, -8876562, -1410617, -12907383, -8659932, -29576300, 1903856}, - FieldElement{23134274, -14279132, -10681997, -1611936, 20684485, 15770816, -12989750, 3190296, 26955097, 14109738}, - FieldElement{15308788, 5320727, -30113809, -14318877, 22902008, 7767164, 29425325, -11277562, 31960942, 11934971}, - }, - { - FieldElement{-27395711, 8435796, 4109644, 12222639, -24627868, 14818669, 20638173, 4875028, 10491392, 1379718}, - FieldElement{-13159415, 9197841, 3875503, -8936108, -1383712, -5879801, 33518459, 16176658, 21432314, 12180697}, - FieldElement{-11787308, 11500838, 13787581, -13832590, -22430679, 10140205, 1465425, 12689540, -10301319, -13872883}, - }, - }, - { - { - FieldElement{5414091, -15386041, -21007664, 9643570, 12834970, 1186149, -2622916, -1342231, 26128231, 6032912}, - FieldElement{-26337395, -13766162, 32496025, -13653919, 17847801, -12669156, 3604025, 8316894, -25875034, -10437358}, - FieldElement{3296484, 6223048, 24680646, -12246460, -23052020, 5903205, -8862297, -4639164, 12376617, 3188849}, - }, - { - FieldElement{29190488, -14659046, 27549113, -1183516, 3520066, -10697301, 32049515, -7309113, -16109234, -9852307}, - FieldElement{-14744486, -9309156, 735818, -598978, -20407687, -5057904, 25246078, -15795669, 18640741, -960977}, - FieldElement{-6928835, -16430795, 10361374, 5642961, 4910474, 12345252, -31638386, -494430, 10530747, 1053335}, - }, - { - FieldElement{-29265967, -14186805, -13538216, -12117373, -19457059, -10655384, -31462369, -2948985, 24018831, 15026644}, - FieldElement{-22592535, -3145277, -2289276, 5953843, -13440189, 9425631, 25310643, 13003497, -2314791, -15145616}, - FieldElement{-27419985, -603321, -8043984, -1669117, -26092265, 13987819, -27297622, 187899, -23166419, -2531735}, - }, - { - FieldElement{-21744398, -13810475, 1844840, 5021428, -10434399, -15911473, 9716667, 16266922, -5070217, 726099}, - FieldElement{29370922, -6053998, 7334071, -15342259, 9385287, 2247707, -13661962, -4839461, 30007388, -15823341}, - FieldElement{-936379, 16086691, 23751945, -543318, -1167538, -5189036, 9137109, 730663, 9835848, 4555336}, - }, - { - FieldElement{-23376435, 1410446, -22253753, -12899614, 30867635, 15826977, 17693930, 544696, -11985298, 12422646}, - FieldElement{31117226, -12215734, -13502838, 6561947, -9876867, -12757670, -5118685, -4096706, 29120153, 13924425}, - FieldElement{-17400879, -14233209, 19675799, -2734756, -11006962, -5858820, -9383939, -11317700, 7240931, -237388}, - }, - { - FieldElement{-31361739, -11346780, -15007447, -5856218, -22453340, -12152771, 1222336, 4389483, 3293637, -15551743}, - FieldElement{-16684801, -14444245, 11038544, 11054958, -13801175, -3338533, -24319580, 7733547, 12796905, -6335822}, - FieldElement{-8759414, -10817836, -25418864, 10783769, -30615557, -9746811, -28253339, 3647836, 3222231, -11160462}, - }, - { - FieldElement{18606113, 1693100, -25448386, -15170272, 4112353, 10045021, 23603893, -2048234, -7550776, 2484985}, - FieldElement{9255317, -3131197, -12156162, -1004256, 13098013, -9214866, 16377220, -2102812, -19802075, -3034702}, - FieldElement{-22729289, 7496160, -5742199, 11329249, 19991973, -3347502, -31718148, 9936966, -30097688, -10618797}, - }, - { - FieldElement{21878590, -5001297, 4338336, 13643897, -3036865, 13160960, 19708896, 5415497, -7360503, -4109293}, - FieldElement{27736861, 10103576, 12500508, 8502413, -3413016, -9633558, 10436918, -1550276, -23659143, -8132100}, - FieldElement{19492550, -12104365, -29681976, -852630, -3208171, 12403437, 30066266, 8367329, 13243957, 8709688}, - }, - }, - { - { - FieldElement{12015105, 2801261, 28198131, 10151021, 24818120, -4743133, -11194191, -5645734, 5150968, 7274186}, - FieldElement{2831366, -12492146, 1478975, 6122054, 23825128, -12733586, 31097299, 6083058, 31021603, -9793610}, - FieldElement{-2529932, -2229646, 445613, 10720828, -13849527, -11505937, -23507731, 16354465, 15067285, -14147707}, - }, - { - FieldElement{7840942, 14037873, -33364863, 15934016, -728213, -3642706, 21403988, 1057586, -19379462, -12403220}, - FieldElement{915865, -16469274, 15608285, -8789130, -24357026, 6060030, -17371319, 8410997, -7220461, 16527025}, - FieldElement{32922597, -556987, 20336074, -16184568, 10903705, -5384487, 16957574, 52992, 23834301, 6588044}, - }, - { - FieldElement{32752030, 11232950, 3381995, -8714866, 22652988, -10744103, 17159699, 16689107, -20314580, -1305992}, - FieldElement{-4689649, 9166776, -25710296, -10847306, 11576752, 12733943, 7924251, -2752281, 1976123, -7249027}, - FieldElement{21251222, 16309901, -2983015, -6783122, 30810597, 12967303, 156041, -3371252, 12331345, -8237197}, - }, - { - FieldElement{8651614, -4477032, -16085636, -4996994, 13002507, 2950805, 29054427, -5106970, 10008136, -4667901}, - FieldElement{31486080, 15114593, -14261250, 12951354, 14369431, -7387845, 16347321, -13662089, 8684155, -10532952}, - FieldElement{19443825, 11385320, 24468943, -9659068, -23919258, 2187569, -26263207, -6086921, 31316348, 14219878}, - }, - { - FieldElement{-28594490, 1193785, 32245219, 11392485, 31092169, 15722801, 27146014, 6992409, 29126555, 9207390}, - FieldElement{32382935, 1110093, 18477781, 11028262, -27411763, -7548111, -4980517, 10843782, -7957600, -14435730}, - FieldElement{2814918, 7836403, 27519878, -7868156, -20894015, -11553689, -21494559, 8550130, 28346258, 1994730}, - }, - { - FieldElement{-19578299, 8085545, -14000519, -3948622, 2785838, -16231307, -19516951, 7174894, 22628102, 8115180}, - FieldElement{-30405132, 955511, -11133838, -15078069, -32447087, -13278079, -25651578, 3317160, -9943017, 930272}, - FieldElement{-15303681, -6833769, 28856490, 1357446, 23421993, 1057177, 24091212, -1388970, -22765376, -10650715}, - }, - { - FieldElement{-22751231, -5303997, -12907607, -12768866, -15811511, -7797053, -14839018, -16554220, -1867018, 8398970}, - FieldElement{-31969310, 2106403, -4736360, 1362501, 12813763, 16200670, 22981545, -6291273, 18009408, -15772772}, - FieldElement{-17220923, -9545221, -27784654, 14166835, 29815394, 7444469, 29551787, -3727419, 19288549, 1325865}, - }, - { - FieldElement{15100157, -15835752, -23923978, -1005098, -26450192, 15509408, 12376730, -3479146, 33166107, -8042750}, - FieldElement{20909231, 13023121, -9209752, 16251778, -5778415, -8094914, 12412151, 10018715, 2213263, -13878373}, - FieldElement{32529814, -11074689, 30361439, -16689753, -9135940, 1513226, 22922121, 6382134, -5766928, 8371348}, - }, - }, - { - { - FieldElement{9923462, 11271500, 12616794, 3544722, -29998368, -1721626, 12891687, -8193132, -26442943, 10486144}, - FieldElement{-22597207, -7012665, 8587003, -8257861, 4084309, -12970062, 361726, 2610596, -23921530, -11455195}, - FieldElement{5408411, -1136691, -4969122, 10561668, 24145918, 14240566, 31319731, -4235541, 19985175, -3436086}, - }, - { - FieldElement{-13994457, 16616821, 14549246, 3341099, 32155958, 13648976, -17577068, 8849297, 65030, 8370684}, - FieldElement{-8320926, -12049626, 31204563, 5839400, -20627288, -1057277, -19442942, 6922164, 12743482, -9800518}, - FieldElement{-2361371, 12678785, 28815050, 4759974, -23893047, 4884717, 23783145, 11038569, 18800704, 255233}, - }, - { - FieldElement{-5269658, -1773886, 13957886, 7990715, 23132995, 728773, 13393847, 9066957, 19258688, -14753793}, - FieldElement{-2936654, -10827535, -10432089, 14516793, -3640786, 4372541, -31934921, 2209390, -1524053, 2055794}, - FieldElement{580882, 16705327, 5468415, -2683018, -30926419, -14696000, -7203346, -8994389, -30021019, 7394435}, - }, - { - FieldElement{23838809, 1822728, -15738443, 15242727, 8318092, -3733104, -21672180, -3492205, -4821741, 14799921}, - FieldElement{13345610, 9759151, 3371034, -16137791, 16353039, 8577942, 31129804, 13496856, -9056018, 7402518}, - FieldElement{2286874, -4435931, -20042458, -2008336, -13696227, 5038122, 11006906, -15760352, 8205061, 1607563}, - }, - { - FieldElement{14414086, -8002132, 3331830, -3208217, 22249151, -5594188, 18364661, -2906958, 30019587, -9029278}, - FieldElement{-27688051, 1585953, -10775053, 931069, -29120221, -11002319, -14410829, 12029093, 9944378, 8024}, - FieldElement{4368715, -3709630, 29874200, -15022983, -20230386, -11410704, -16114594, -999085, -8142388, 5640030}, - }, - { - FieldElement{10299610, 13746483, 11661824, 16234854, 7630238, 5998374, 9809887, -16694564, 15219798, -14327783}, - FieldElement{27425505, -5719081, 3055006, 10660664, 23458024, 595578, -15398605, -1173195, -18342183, 9742717}, - FieldElement{6744077, 2427284, 26042789, 2720740, -847906, 1118974, 32324614, 7406442, 12420155, 1994844}, - }, - { - FieldElement{14012521, -5024720, -18384453, -9578469, -26485342, -3936439, -13033478, -10909803, 24319929, -6446333}, - FieldElement{16412690, -4507367, 10772641, 15929391, -17068788, -4658621, 10555945, -10484049, -30102368, -4739048}, - FieldElement{22397382, -7767684, -9293161, -12792868, 17166287, -9755136, -27333065, 6199366, 21880021, -12250760}, - }, - { - FieldElement{-4283307, 5368523, -31117018, 8163389, -30323063, 3209128, 16557151, 8890729, 8840445, 4957760}, - FieldElement{-15447727, 709327, -6919446, -10870178, -29777922, 6522332, -21720181, 12130072, -14796503, 5005757}, - FieldElement{-2114751, -14308128, 23019042, 15765735, -25269683, 6002752, 10183197, -13239326, -16395286, -2176112}, - }, - }, - { - { - FieldElement{-19025756, 1632005, 13466291, -7995100, -23640451, 16573537, -32013908, -3057104, 22208662, 2000468}, - FieldElement{3065073, -1412761, -25598674, -361432, -17683065, -5703415, -8164212, 11248527, -3691214, -7414184}, - FieldElement{10379208, -6045554, 8877319, 1473647, -29291284, -12507580, 16690915, 2553332, -3132688, 16400289}, - }, - { - FieldElement{15716668, 1254266, -18472690, 7446274, -8448918, 6344164, -22097271, -7285580, 26894937, 9132066}, - FieldElement{24158887, 12938817, 11085297, -8177598, -28063478, -4457083, -30576463, 64452, -6817084, -2692882}, - FieldElement{13488534, 7794716, 22236231, 5989356, 25426474, -12578208, 2350710, -3418511, -4688006, 2364226}, - }, - { - FieldElement{16335052, 9132434, 25640582, 6678888, 1725628, 8517937, -11807024, -11697457, 15445875, -7798101}, - FieldElement{29004207, -7867081, 28661402, -640412, -12794003, -7943086, 31863255, -4135540, -278050, -15759279}, - FieldElement{-6122061, -14866665, -28614905, 14569919, -10857999, -3591829, 10343412, -6976290, -29828287, -10815811}, - }, - { - FieldElement{27081650, 3463984, 14099042, -4517604, 1616303, -6205604, 29542636, 15372179, 17293797, 960709}, - FieldElement{20263915, 11434237, -5765435, 11236810, 13505955, -10857102, -16111345, 6493122, -19384511, 7639714}, - FieldElement{-2830798, -14839232, 25403038, -8215196, -8317012, -16173699, 18006287, -16043750, 29994677, -15808121}, - }, - { - FieldElement{9769828, 5202651, -24157398, -13631392, -28051003, -11561624, -24613141, -13860782, -31184575, 709464}, - FieldElement{12286395, 13076066, -21775189, -1176622, -25003198, 4057652, -32018128, -8890874, 16102007, 13205847}, - FieldElement{13733362, 5599946, 10557076, 3195751, -5557991, 8536970, -25540170, 8525972, 10151379, 10394400}, - }, - { - FieldElement{4024660, -16137551, 22436262, 12276534, -9099015, -2686099, 19698229, 11743039, -33302334, 8934414}, - FieldElement{-15879800, -4525240, -8580747, -2934061, 14634845, -698278, -9449077, 3137094, -11536886, 11721158}, - FieldElement{17555939, -5013938, 8268606, 2331751, -22738815, 9761013, 9319229, 8835153, -9205489, -1280045}, - }, - { - FieldElement{-461409, -7830014, 20614118, 16688288, -7514766, -4807119, 22300304, 505429, 6108462, -6183415}, - FieldElement{-5070281, 12367917, -30663534, 3234473, 32617080, -8422642, 29880583, -13483331, -26898490, -7867459}, - FieldElement{-31975283, 5726539, 26934134, 10237677, -3173717, -605053, 24199304, 3795095, 7592688, -14992079}, - }, - { - FieldElement{21594432, -14964228, 17466408, -4077222, 32537084, 2739898, 6407723, 12018833, -28256052, 4298412}, - FieldElement{-20650503, -11961496, -27236275, 570498, 3767144, -1717540, 13891942, -1569194, 13717174, 10805743}, - FieldElement{-14676630, -15644296, 15287174, 11927123, 24177847, -8175568, -796431, 14860609, -26938930, -5863836}, - }, - }, - { - { - FieldElement{12962541, 5311799, -10060768, 11658280, 18855286, -7954201, 13286263, -12808704, -4381056, 9882022}, - FieldElement{18512079, 11319350, -20123124, 15090309, 18818594, 5271736, -22727904, 3666879, -23967430, -3299429}, - FieldElement{-6789020, -3146043, 16192429, 13241070, 15898607, -14206114, -10084880, -6661110, -2403099, 5276065}, - }, - { - FieldElement{30169808, -5317648, 26306206, -11750859, 27814964, 7069267, 7152851, 3684982, 1449224, 13082861}, - FieldElement{10342826, 3098505, 2119311, 193222, 25702612, 12233820, 23697382, 15056736, -21016438, -8202000}, - FieldElement{-33150110, 3261608, 22745853, 7948688, 19370557, -15177665, -26171976, 6482814, -10300080, -11060101}, - }, - { - FieldElement{32869458, -5408545, 25609743, 15678670, -10687769, -15471071, 26112421, 2521008, -22664288, 6904815}, - FieldElement{29506923, 4457497, 3377935, -9796444, -30510046, 12935080, 1561737, 3841096, -29003639, -6657642}, - FieldElement{10340844, -6630377, -18656632, -2278430, 12621151, -13339055, 30878497, -11824370, -25584551, 5181966}, - }, - { - FieldElement{25940115, -12658025, 17324188, -10307374, -8671468, 15029094, 24396252, -16450922, -2322852, -12388574}, - FieldElement{-21765684, 9916823, -1300409, 4079498, -1028346, 11909559, 1782390, 12641087, 20603771, -6561742}, - FieldElement{-18882287, -11673380, 24849422, 11501709, 13161720, -4768874, 1925523, 11914390, 4662781, 7820689}, - }, - { - FieldElement{12241050, -425982, 8132691, 9393934, 32846760, -1599620, 29749456, 12172924, 16136752, 15264020}, - FieldElement{-10349955, -14680563, -8211979, 2330220, -17662549, -14545780, 10658213, 6671822, 19012087, 3772772}, - FieldElement{3753511, -3421066, 10617074, 2028709, 14841030, -6721664, 28718732, -15762884, 20527771, 12988982}, - }, - { - FieldElement{-14822485, -5797269, -3707987, 12689773, -898983, -10914866, -24183046, -10564943, 3299665, -12424953}, - FieldElement{-16777703, -15253301, -9642417, 4978983, 3308785, 8755439, 6943197, 6461331, -25583147, 8991218}, - FieldElement{-17226263, 1816362, -1673288, -6086439, 31783888, -8175991, -32948145, 7417950, -30242287, 1507265}, - }, - { - FieldElement{29692663, 6829891, -10498800, 4334896, 20945975, -11906496, -28887608, 8209391, 14606362, -10647073}, - FieldElement{-3481570, 8707081, 32188102, 5672294, 22096700, 1711240, -33020695, 9761487, 4170404, -2085325}, - FieldElement{-11587470, 14855945, -4127778, -1531857, -26649089, 15084046, 22186522, 16002000, -14276837, -8400798}, - }, - { - FieldElement{-4811456, 13761029, -31703877, -2483919, -3312471, 7869047, -7113572, -9620092, 13240845, 10965870}, - FieldElement{-7742563, -8256762, -14768334, -13656260, -23232383, 12387166, 4498947, 14147411, 29514390, 4302863}, - FieldElement{-13413405, -12407859, 20757302, -13801832, 14785143, 8976368, -5061276, -2144373, 17846988, -13971927}, - }, - }, - { - { - FieldElement{-2244452, -754728, -4597030, -1066309, -6247172, 1455299, -21647728, -9214789, -5222701, 12650267}, - FieldElement{-9906797, -16070310, 21134160, 12198166, -27064575, 708126, 387813, 13770293, -19134326, 10958663}, - FieldElement{22470984, 12369526, 23446014, -5441109, -21520802, -9698723, -11772496, -11574455, -25083830, 4271862}, - }, - { - FieldElement{-25169565, -10053642, -19909332, 15361595, -5984358, 2159192, 75375, -4278529, -32526221, 8469673}, - FieldElement{15854970, 4148314, -8893890, 7259002, 11666551, 13824734, -30531198, 2697372, 24154791, -9460943}, - FieldElement{15446137, -15806644, 29759747, 14019369, 30811221, -9610191, -31582008, 12840104, 24913809, 9815020}, - }, - { - FieldElement{-4709286, -5614269, -31841498, -12288893, -14443537, 10799414, -9103676, 13438769, 18735128, 9466238}, - FieldElement{11933045, 9281483, 5081055, -5183824, -2628162, -4905629, -7727821, -10896103, -22728655, 16199064}, - FieldElement{14576810, 379472, -26786533, -8317236, -29426508, -10812974, -102766, 1876699, 30801119, 2164795}, - }, - { - FieldElement{15995086, 3199873, 13672555, 13712240, -19378835, -4647646, -13081610, -15496269, -13492807, 1268052}, - FieldElement{-10290614, -3659039, -3286592, 10948818, 23037027, 3794475, -3470338, -12600221, -17055369, 3565904}, - FieldElement{29210088, -9419337, -5919792, -4952785, 10834811, -13327726, -16512102, -10820713, -27162222, -14030531}, - }, - { - FieldElement{-13161890, 15508588, 16663704, -8156150, -28349942, 9019123, -29183421, -3769423, 2244111, -14001979}, - FieldElement{-5152875, -3800936, -9306475, -6071583, 16243069, 14684434, -25673088, -16180800, 13491506, 4641841}, - FieldElement{10813417, 643330, -19188515, -728916, 30292062, -16600078, 27548447, -7721242, 14476989, -12767431}, - }, - { - FieldElement{10292079, 9984945, 6481436, 8279905, -7251514, 7032743, 27282937, -1644259, -27912810, 12651324}, - FieldElement{-31185513, -813383, 22271204, 11835308, 10201545, 15351028, 17099662, 3988035, 21721536, -3148940}, - FieldElement{10202177, -6545839, -31373232, -9574638, -32150642, -8119683, -12906320, 3852694, 13216206, 14842320}, - }, - { - FieldElement{-15815640, -10601066, -6538952, -7258995, -6984659, -6581778, -31500847, 13765824, -27434397, 9900184}, - FieldElement{14465505, -13833331, -32133984, -14738873, -27443187, 12990492, 33046193, 15796406, -7051866, -8040114}, - FieldElement{30924417, -8279620, 6359016, -12816335, 16508377, 9071735, -25488601, 15413635, 9524356, -7018878}, - }, - { - FieldElement{12274201, -13175547, 32627641, -1785326, 6736625, 13267305, 5237659, -5109483, 15663516, 4035784}, - FieldElement{-2951309, 8903985, 17349946, 601635, -16432815, -4612556, -13732739, -15889334, -22258478, 4659091}, - FieldElement{-16916263, -4952973, -30393711, -15158821, 20774812, 15897498, 5736189, 15026997, -2178256, -13455585}, - }, - }, - { - { - FieldElement{-8858980, -2219056, 28571666, -10155518, -474467, -10105698, -3801496, 278095, 23440562, -290208}, - FieldElement{10226241, -5928702, 15139956, 120818, -14867693, 5218603, 32937275, 11551483, -16571960, -7442864}, - FieldElement{17932739, -12437276, -24039557, 10749060, 11316803, 7535897, 22503767, 5561594, -3646624, 3898661}, - }, - { - FieldElement{7749907, -969567, -16339731, -16464, -25018111, 15122143, -1573531, 7152530, 21831162, 1245233}, - FieldElement{26958459, -14658026, 4314586, 8346991, -5677764, 11960072, -32589295, -620035, -30402091, -16716212}, - FieldElement{-12165896, 9166947, 33491384, 13673479, 29787085, 13096535, 6280834, 14587357, -22338025, 13987525}, - }, - { - FieldElement{-24349909, 7778775, 21116000, 15572597, -4833266, -5357778, -4300898, -5124639, -7469781, -2858068}, - FieldElement{9681908, -6737123, -31951644, 13591838, -6883821, 386950, 31622781, 6439245, -14581012, 4091397}, - FieldElement{-8426427, 1470727, -28109679, -1596990, 3978627, -5123623, -19622683, 12092163, 29077877, -14741988}, - }, - { - FieldElement{5269168, -6859726, -13230211, -8020715, 25932563, 1763552, -5606110, -5505881, -20017847, 2357889}, - FieldElement{32264008, -15407652, -5387735, -1160093, -2091322, -3946900, 23104804, -12869908, 5727338, 189038}, - FieldElement{14609123, -8954470, -6000566, -16622781, -14577387, -7743898, -26745169, 10942115, -25888931, -14884697}, - }, - { - FieldElement{20513500, 5557931, -15604613, 7829531, 26413943, -2019404, -21378968, 7471781, 13913677, -5137875}, - FieldElement{-25574376, 11967826, 29233242, 12948236, -6754465, 4713227, -8940970, 14059180, 12878652, 8511905}, - FieldElement{-25656801, 3393631, -2955415, -7075526, -2250709, 9366908, -30223418, 6812974, 5568676, -3127656}, - }, - { - FieldElement{11630004, 12144454, 2116339, 13606037, 27378885, 15676917, -17408753, -13504373, -14395196, 8070818}, - FieldElement{27117696, -10007378, -31282771, -5570088, 1127282, 12772488, -29845906, 10483306, -11552749, -1028714}, - FieldElement{10637467, -5688064, 5674781, 1072708, -26343588, -6982302, -1683975, 9177853, -27493162, 15431203}, - }, - { - FieldElement{20525145, 10892566, -12742472, 12779443, -29493034, 16150075, -28240519, 14943142, -15056790, -7935931}, - FieldElement{-30024462, 5626926, -551567, -9981087, 753598, 11981191, 25244767, -3239766, -3356550, 9594024}, - FieldElement{-23752644, 2636870, -5163910, -10103818, 585134, 7877383, 11345683, -6492290, 13352335, -10977084}, - }, - { - FieldElement{-1931799, -5407458, 3304649, -12884869, 17015806, -4877091, -29783850, -7752482, -13215537, -319204}, - FieldElement{20239939, 6607058, 6203985, 3483793, -18386976, -779229, -20723742, 15077870, -22750759, 14523817}, - FieldElement{27406042, -6041657, 27423596, -4497394, 4996214, 10002360, -28842031, -4545494, -30172742, -4805667}, - }, - }, - { - { - FieldElement{11374242, 12660715, 17861383, -12540833, 10935568, 1099227, -13886076, -9091740, -27727044, 11358504}, - FieldElement{-12730809, 10311867, 1510375, 10778093, -2119455, -9145702, 32676003, 11149336, -26123651, 4985768}, - FieldElement{-19096303, 341147, -6197485, -239033, 15756973, -8796662, -983043, 13794114, -19414307, -15621255}, - }, - { - FieldElement{6490081, 11940286, 25495923, -7726360, 8668373, -8751316, 3367603, 6970005, -1691065, -9004790}, - FieldElement{1656497, 13457317, 15370807, 6364910, 13605745, 8362338, -19174622, -5475723, -16796596, -5031438}, - FieldElement{-22273315, -13524424, -64685, -4334223, -18605636, -10921968, -20571065, -7007978, -99853, -10237333}, - }, - { - FieldElement{17747465, 10039260, 19368299, -4050591, -20630635, -16041286, 31992683, -15857976, -29260363, -5511971}, - FieldElement{31932027, -4986141, -19612382, 16366580, 22023614, 88450, 11371999, -3744247, 4882242, -10626905}, - FieldElement{29796507, 37186, 19818052, 10115756, -11829032, 3352736, 18551198, 3272828, -5190932, -4162409}, - }, - { - FieldElement{12501286, 4044383, -8612957, -13392385, -32430052, 5136599, -19230378, -3529697, 330070, -3659409}, - FieldElement{6384877, 2899513, 17807477, 7663917, -2358888, 12363165, 25366522, -8573892, -271295, 12071499}, - FieldElement{-8365515, -4042521, 25133448, -4517355, -6211027, 2265927, -32769618, 1936675, -5159697, 3829363}, - }, - { - FieldElement{28425966, -5835433, -577090, -4697198, -14217555, 6870930, 7921550, -6567787, 26333140, 14267664}, - FieldElement{-11067219, 11871231, 27385719, -10559544, -4585914, -11189312, 10004786, -8709488, -21761224, 8930324}, - FieldElement{-21197785, -16396035, 25654216, -1725397, 12282012, 11008919, 1541940, 4757911, -26491501, -16408940}, - }, - { - FieldElement{13537262, -7759490, -20604840, 10961927, -5922820, -13218065, -13156584, 6217254, -15943699, 13814990}, - FieldElement{-17422573, 15157790, 18705543, 29619, 24409717, -260476, 27361681, 9257833, -1956526, -1776914}, - FieldElement{-25045300, -10191966, 15366585, 15166509, -13105086, 8423556, -29171540, 12361135, -18685978, 4578290}, - }, - { - FieldElement{24579768, 3711570, 1342322, -11180126, -27005135, 14124956, -22544529, 14074919, 21964432, 8235257}, - FieldElement{-6528613, -2411497, 9442966, -5925588, 12025640, -1487420, -2981514, -1669206, 13006806, 2355433}, - FieldElement{-16304899, -13605259, -6632427, -5142349, 16974359, -10911083, 27202044, 1719366, 1141648, -12796236}, - }, - { - FieldElement{-12863944, -13219986, -8318266, -11018091, -6810145, -4843894, 13475066, -3133972, 32674895, 13715045}, - FieldElement{11423335, -5468059, 32344216, 8962751, 24989809, 9241752, -13265253, 16086212, -28740881, -15642093}, - FieldElement{-1409668, 12530728, -6368726, 10847387, 19531186, -14132160, -11709148, 7791794, -27245943, 4383347}, - }, - }, - { - { - FieldElement{-28970898, 5271447, -1266009, -9736989, -12455236, 16732599, -4862407, -4906449, 27193557, 6245191}, - FieldElement{-15193956, 5362278, -1783893, 2695834, 4960227, 12840725, 23061898, 3260492, 22510453, 8577507}, - FieldElement{-12632451, 11257346, -32692994, 13548177, -721004, 10879011, 31168030, 13952092, -29571492, -3635906}, - }, - { - FieldElement{3877321, -9572739, 32416692, 5405324, -11004407, -13656635, 3759769, 11935320, 5611860, 8164018}, - FieldElement{-16275802, 14667797, 15906460, 12155291, -22111149, -9039718, 32003002, -8832289, 5773085, -8422109}, - FieldElement{-23788118, -8254300, 1950875, 8937633, 18686727, 16459170, -905725, 12376320, 31632953, 190926}, - }, - { - FieldElement{-24593607, -16138885, -8423991, 13378746, 14162407, 6901328, -8288749, 4508564, -25341555, -3627528}, - FieldElement{8884438, -5884009, 6023974, 10104341, -6881569, -4941533, 18722941, -14786005, -1672488, 827625}, - FieldElement{-32720583, -16289296, -32503547, 7101210, 13354605, 2659080, -1800575, -14108036, -24878478, 1541286}, - }, - { - FieldElement{2901347, -1117687, 3880376, -10059388, -17620940, -3612781, -21802117, -3567481, 20456845, -1885033}, - FieldElement{27019610, 12299467, -13658288, -1603234, -12861660, -4861471, -19540150, -5016058, 29439641, 15138866}, - FieldElement{21536104, -6626420, -32447818, -10690208, -22408077, 5175814, -5420040, -16361163, 7779328, 109896}, - }, - { - FieldElement{30279744, 14648750, -8044871, 6425558, 13639621, -743509, 28698390, 12180118, 23177719, -554075}, - FieldElement{26572847, 3405927, -31701700, 12890905, -19265668, 5335866, -6493768, 2378492, 4439158, -13279347}, - FieldElement{-22716706, 3489070, -9225266, -332753, 18875722, -1140095, 14819434, -12731527, -17717757, -5461437}, - }, - { - FieldElement{-5056483, 16566551, 15953661, 3767752, -10436499, 15627060, -820954, 2177225, 8550082, -15114165}, - FieldElement{-18473302, 16596775, -381660, 15663611, 22860960, 15585581, -27844109, -3582739, -23260460, -8428588}, - FieldElement{-32480551, 15707275, -8205912, -5652081, 29464558, 2713815, -22725137, 15860482, -21902570, 1494193}, - }, - { - FieldElement{-19562091, -14087393, -25583872, -9299552, 13127842, 759709, 21923482, 16529112, 8742704, 12967017}, - FieldElement{-28464899, 1553205, 32536856, -10473729, -24691605, -406174, -8914625, -2933896, -29903758, 15553883}, - FieldElement{21877909, 3230008, 9881174, 10539357, -4797115, 2841332, 11543572, 14513274, 19375923, -12647961}, - }, - { - FieldElement{8832269, -14495485, 13253511, 5137575, 5037871, 4078777, 24880818, -6222716, 2862653, 9455043}, - FieldElement{29306751, 5123106, 20245049, -14149889, 9592566, 8447059, -2077124, -2990080, 15511449, 4789663}, - FieldElement{-20679756, 7004547, 8824831, -9434977, -4045704, -3750736, -5754762, 108893, 23513200, 16652362}, - }, - }, - { - { - FieldElement{-33256173, 4144782, -4476029, -6579123, 10770039, -7155542, -6650416, -12936300, -18319198, 10212860}, - FieldElement{2756081, 8598110, 7383731, -6859892, 22312759, -1105012, 21179801, 2600940, -9988298, -12506466}, - FieldElement{-24645692, 13317462, -30449259, -15653928, 21365574, -10869657, 11344424, 864440, -2499677, -16710063}, - }, - { - FieldElement{-26432803, 6148329, -17184412, -14474154, 18782929, -275997, -22561534, 211300, 2719757, 4940997}, - FieldElement{-1323882, 3911313, -6948744, 14759765, -30027150, 7851207, 21690126, 8518463, 26699843, 5276295}, - FieldElement{-13149873, -6429067, 9396249, 365013, 24703301, -10488939, 1321586, 149635, -15452774, 7159369}, - }, - { - FieldElement{9987780, -3404759, 17507962, 9505530, 9731535, -2165514, 22356009, 8312176, 22477218, -8403385}, - FieldElement{18155857, -16504990, 19744716, 9006923, 15154154, -10538976, 24256460, -4864995, -22548173, 9334109}, - FieldElement{2986088, -4911893, 10776628, -3473844, 10620590, -7083203, -21413845, 14253545, -22587149, 536906}, - }, - { - FieldElement{4377756, 8115836, 24567078, 15495314, 11625074, 13064599, 7390551, 10589625, 10838060, -15420424}, - FieldElement{-19342404, 867880, 9277171, -3218459, -14431572, -1986443, 19295826, -15796950, 6378260, 699185}, - FieldElement{7895026, 4057113, -7081772, -13077756, -17886831, -323126, -716039, 15693155, -5045064, -13373962}, - }, - { - FieldElement{-7737563, -5869402, -14566319, -7406919, 11385654, 13201616, 31730678, -10962840, -3918636, -9669325}, - FieldElement{10188286, -15770834, -7336361, 13427543, 22223443, 14896287, 30743455, 7116568, -21786507, 5427593}, - FieldElement{696102, 13206899, 27047647, -10632082, 15285305, -9853179, 10798490, -4578720, 19236243, 12477404}, - }, - { - FieldElement{-11229439, 11243796, -17054270, -8040865, -788228, -8167967, -3897669, 11180504, -23169516, 7733644}, - FieldElement{17800790, -14036179, -27000429, -11766671, 23887827, 3149671, 23466177, -10538171, 10322027, 15313801}, - FieldElement{26246234, 11968874, 32263343, -5468728, 6830755, -13323031, -15794704, -101982, -24449242, 10890804}, - }, - { - FieldElement{-31365647, 10271363, -12660625, -6267268, 16690207, -13062544, -14982212, 16484931, 25180797, -5334884}, - FieldElement{-586574, 10376444, -32586414, -11286356, 19801893, 10997610, 2276632, 9482883, 316878, 13820577}, - FieldElement{-9882808, -4510367, -2115506, 16457136, -11100081, 11674996, 30756178, -7515054, 30696930, -3712849}, - }, - { - FieldElement{32988917, -9603412, 12499366, 7910787, -10617257, -11931514, -7342816, -9985397, -32349517, 7392473}, - FieldElement{-8855661, 15927861, 9866406, -3649411, -2396914, -16655781, -30409476, -9134995, 25112947, -2926644}, - FieldElement{-2504044, -436966, 25621774, -5678772, 15085042, -5479877, -24884878, -13526194, 5537438, -13914319}, - }, - }, - { - { - FieldElement{-11225584, 2320285, -9584280, 10149187, -33444663, 5808648, -14876251, -1729667, 31234590, 6090599}, - FieldElement{-9633316, 116426, 26083934, 2897444, -6364437, -2688086, 609721, 15878753, -6970405, -9034768}, - FieldElement{-27757857, 247744, -15194774, -9002551, 23288161, -10011936, -23869595, 6503646, 20650474, 1804084}, - }, - { - FieldElement{-27589786, 15456424, 8972517, 8469608, 15640622, 4439847, 3121995, -10329713, 27842616, -202328}, - FieldElement{-15306973, 2839644, 22530074, 10026331, 4602058, 5048462, 28248656, 5031932, -11375082, 12714369}, - FieldElement{20807691, -7270825, 29286141, 11421711, -27876523, -13868230, -21227475, 1035546, -19733229, 12796920}, - }, - { - FieldElement{12076899, -14301286, -8785001, -11848922, -25012791, 16400684, -17591495, -12899438, 3480665, -15182815}, - FieldElement{-32361549, 5457597, 28548107, 7833186, 7303070, -11953545, -24363064, -15921875, -33374054, 2771025}, - FieldElement{-21389266, 421932, 26597266, 6860826, 22486084, -6737172, -17137485, -4210226, -24552282, 15673397}, - }, - { - FieldElement{-20184622, 2338216, 19788685, -9620956, -4001265, -8740893, -20271184, 4733254, 3727144, -12934448}, - FieldElement{6120119, 814863, -11794402, -622716, 6812205, -15747771, 2019594, 7975683, 31123697, -10958981}, - FieldElement{30069250, -11435332, 30434654, 2958439, 18399564, -976289, 12296869, 9204260, -16432438, 9648165}, - }, - { - FieldElement{32705432, -1550977, 30705658, 7451065, -11805606, 9631813, 3305266, 5248604, -26008332, -11377501}, - FieldElement{17219865, 2375039, -31570947, -5575615, -19459679, 9219903, 294711, 15298639, 2662509, -16297073}, - FieldElement{-1172927, -7558695, -4366770, -4287744, -21346413, -8434326, 32087529, -1222777, 32247248, -14389861}, - }, - { - FieldElement{14312628, 1221556, 17395390, -8700143, -4945741, -8684635, -28197744, -9637817, -16027623, -13378845}, - FieldElement{-1428825, -9678990, -9235681, 6549687, -7383069, -468664, 23046502, 9803137, 17597934, 2346211}, - FieldElement{18510800, 15337574, 26171504, 981392, -22241552, 7827556, -23491134, -11323352, 3059833, -11782870}, - }, - { - FieldElement{10141598, 6082907, 17829293, -1947643, 9830092, 13613136, -25556636, -5544586, -33502212, 3592096}, - FieldElement{33114168, -15889352, -26525686, -13343397, 33076705, 8716171, 1151462, 1521897, -982665, -6837803}, - FieldElement{-32939165, -4255815, 23947181, -324178, -33072974, -12305637, -16637686, 3891704, 26353178, 693168}, - }, - { - FieldElement{30374239, 1595580, -16884039, 13186931, 4600344, 406904, 9585294, -400668, 31375464, 14369965}, - FieldElement{-14370654, -7772529, 1510301, 6434173, -18784789, -6262728, 32732230, -13108839, 17901441, 16011505}, - FieldElement{18171223, -11934626, -12500402, 15197122, -11038147, -15230035, -19172240, -16046376, 8764035, 12309598}, - }, - }, - { - { - FieldElement{5975908, -5243188, -19459362, -9681747, -11541277, 14015782, -23665757, 1228319, 17544096, -10593782}, - FieldElement{5811932, -1715293, 3442887, -2269310, -18367348, -8359541, -18044043, -15410127, -5565381, 12348900}, - FieldElement{-31399660, 11407555, 25755363, 6891399, -3256938, 14872274, -24849353, 8141295, -10632534, -585479}, - }, - { - FieldElement{-12675304, 694026, -5076145, 13300344, 14015258, -14451394, -9698672, -11329050, 30944593, 1130208}, - FieldElement{8247766, -6710942, -26562381, -7709309, -14401939, -14648910, 4652152, 2488540, 23550156, -271232}, - FieldElement{17294316, -3788438, 7026748, 15626851, 22990044, 113481, 2267737, -5908146, -408818, -137719}, - }, - { - FieldElement{16091085, -16253926, 18599252, 7340678, 2137637, -1221657, -3364161, 14550936, 3260525, -7166271}, - FieldElement{-4910104, -13332887, 18550887, 10864893, -16459325, -7291596, -23028869, -13204905, -12748722, 2701326}, - FieldElement{-8574695, 16099415, 4629974, -16340524, -20786213, -6005432, -10018363, 9276971, 11329923, 1862132}, - }, - { - FieldElement{14763076, -15903608, -30918270, 3689867, 3511892, 10313526, -21951088, 12219231, -9037963, -940300}, - FieldElement{8894987, -3446094, 6150753, 3013931, 301220, 15693451, -31981216, -2909717, -15438168, 11595570}, - FieldElement{15214962, 3537601, -26238722, -14058872, 4418657, -15230761, 13947276, 10730794, -13489462, -4363670}, - }, - { - FieldElement{-2538306, 7682793, 32759013, 263109, -29984731, -7955452, -22332124, -10188635, 977108, 699994}, - FieldElement{-12466472, 4195084, -9211532, 550904, -15565337, 12917920, 19118110, -439841, -30534533, -14337913}, - FieldElement{31788461, -14507657, 4799989, 7372237, 8808585, -14747943, 9408237, -10051775, 12493932, -5409317}, - }, - { - FieldElement{-25680606, 5260744, -19235809, -6284470, -3695942, 16566087, 27218280, 2607121, 29375955, 6024730}, - FieldElement{842132, -2794693, -4763381, -8722815, 26332018, -12405641, 11831880, 6985184, -9940361, 2854096}, - FieldElement{-4847262, -7969331, 2516242, -5847713, 9695691, -7221186, 16512645, 960770, 12121869, 16648078}, - }, - { - FieldElement{-15218652, 14667096, -13336229, 2013717, 30598287, -464137, -31504922, -7882064, 20237806, 2838411}, - FieldElement{-19288047, 4453152, 15298546, -16178388, 22115043, -15972604, 12544294, -13470457, 1068881, -12499905}, - FieldElement{-9558883, -16518835, 33238498, 13506958, 30505848, -1114596, -8486907, -2630053, 12521378, 4845654}, - }, - { - FieldElement{-28198521, 10744108, -2958380, 10199664, 7759311, -13088600, 3409348, -873400, -6482306, -12885870}, - FieldElement{-23561822, 6230156, -20382013, 10655314, -24040585, -11621172, 10477734, -1240216, -3113227, 13974498}, - FieldElement{12966261, 15550616, -32038948, -1615346, 21025980, -629444, 5642325, 7188737, 18895762, 12629579}, - }, - }, - { - { - FieldElement{14741879, -14946887, 22177208, -11721237, 1279741, 8058600, 11758140, 789443, 32195181, 3895677}, - FieldElement{10758205, 15755439, -4509950, 9243698, -4879422, 6879879, -2204575, -3566119, -8982069, 4429647}, - FieldElement{-2453894, 15725973, -20436342, -10410672, -5803908, -11040220, -7135870, -11642895, 18047436, -15281743}, - }, - { - FieldElement{-25173001, -11307165, 29759956, 11776784, -22262383, -15820455, 10993114, -12850837, -17620701, -9408468}, - FieldElement{21987233, 700364, -24505048, 14972008, -7774265, -5718395, 32155026, 2581431, -29958985, 8773375}, - FieldElement{-25568350, 454463, -13211935, 16126715, 25240068, 8594567, 20656846, 12017935, -7874389, -13920155}, - }, - { - FieldElement{6028182, 6263078, -31011806, -11301710, -818919, 2461772, -31841174, -5468042, -1721788, -2776725}, - FieldElement{-12278994, 16624277, 987579, -5922598, 32908203, 1248608, 7719845, -4166698, 28408820, 6816612}, - FieldElement{-10358094, -8237829, 19549651, -12169222, 22082623, 16147817, 20613181, 13982702, -10339570, 5067943}, - }, - { - FieldElement{-30505967, -3821767, 12074681, 13582412, -19877972, 2443951, -19719286, 12746132, 5331210, -10105944}, - FieldElement{30528811, 3601899, -1957090, 4619785, -27361822, -15436388, 24180793, -12570394, 27679908, -1648928}, - FieldElement{9402404, -13957065, 32834043, 10838634, -26580150, -13237195, 26653274, -8685565, 22611444, -12715406}, - }, - { - FieldElement{22190590, 1118029, 22736441, 15130463, -30460692, -5991321, 19189625, -4648942, 4854859, 6622139}, - FieldElement{-8310738, -2953450, -8262579, -3388049, -10401731, -271929, 13424426, -3567227, 26404409, 13001963}, - FieldElement{-31241838, -15415700, -2994250, 8939346, 11562230, -12840670, -26064365, -11621720, -15405155, 11020693}, - }, - { - FieldElement{1866042, -7949489, -7898649, -10301010, 12483315, 13477547, 3175636, -12424163, 28761762, 1406734}, - FieldElement{-448555, -1777666, 13018551, 3194501, -9580420, -11161737, 24760585, -4347088, 25577411, -13378680}, - FieldElement{-24290378, 4759345, -690653, -1852816, 2066747, 10693769, -29595790, 9884936, -9368926, 4745410}, - }, - { - FieldElement{-9141284, 6049714, -19531061, -4341411, -31260798, 9944276, -15462008, -11311852, 10931924, -11931931}, - FieldElement{-16561513, 14112680, -8012645, 4817318, -8040464, -11414606, -22853429, 10856641, -20470770, 13434654}, - FieldElement{22759489, -10073434, -16766264, -1871422, 13637442, -10168091, 1765144, -12654326, 28445307, -5364710}, - }, - { - FieldElement{29875063, 12493613, 2795536, -3786330, 1710620, 15181182, -10195717, -8788675, 9074234, 1167180}, - FieldElement{-26205683, 11014233, -9842651, -2635485, -26908120, 7532294, -18716888, -9535498, 3843903, 9367684}, - FieldElement{-10969595, -6403711, 9591134, 9582310, 11349256, 108879, 16235123, 8601684, -139197, 4242895}, - }, - }, - { - { - FieldElement{22092954, -13191123, -2042793, -11968512, 32186753, -11517388, -6574341, 2470660, -27417366, 16625501}, - FieldElement{-11057722, 3042016, 13770083, -9257922, 584236, -544855, -7770857, 2602725, -27351616, 14247413}, - FieldElement{6314175, -10264892, -32772502, 15957557, -10157730, 168750, -8618807, 14290061, 27108877, -1180880}, - }, - { - FieldElement{-8586597, -7170966, 13241782, 10960156, -32991015, -13794596, 33547976, -11058889, -27148451, 981874}, - FieldElement{22833440, 9293594, -32649448, -13618667, -9136966, 14756819, -22928859, -13970780, -10479804, -16197962}, - FieldElement{-7768587, 3326786, -28111797, 10783824, 19178761, 14905060, 22680049, 13906969, -15933690, 3797899}, - }, - { - FieldElement{21721356, -4212746, -12206123, 9310182, -3882239, -13653110, 23740224, -2709232, 20491983, -8042152}, - FieldElement{9209270, -15135055, -13256557, -6167798, -731016, 15289673, 25947805, 15286587, 30997318, -6703063}, - FieldElement{7392032, 16618386, 23946583, -8039892, -13265164, -1533858, -14197445, -2321576, 17649998, -250080}, - }, - { - FieldElement{-9301088, -14193827, 30609526, -3049543, -25175069, -1283752, -15241566, -9525724, -2233253, 7662146}, - FieldElement{-17558673, 1763594, -33114336, 15908610, -30040870, -12174295, 7335080, -8472199, -3174674, 3440183}, - FieldElement{-19889700, -5977008, -24111293, -9688870, 10799743, -16571957, 40450, -4431835, 4862400, 1133}, - }, - { - FieldElement{-32856209, -7873957, -5422389, 14860950, -16319031, 7956142, 7258061, 311861, -30594991, -7379421}, - FieldElement{-3773428, -1565936, 28985340, 7499440, 24445838, 9325937, 29727763, 16527196, 18278453, 15405622}, - FieldElement{-4381906, 8508652, -19898366, -3674424, -5984453, 15149970, -13313598, 843523, -21875062, 13626197}, - }, - { - FieldElement{2281448, -13487055, -10915418, -2609910, 1879358, 16164207, -10783882, 3953792, 13340839, 15928663}, - FieldElement{31727126, -7179855, -18437503, -8283652, 2875793, -16390330, -25269894, -7014826, -23452306, 5964753}, - FieldElement{4100420, -5959452, -17179337, 6017714, -18705837, 12227141, -26684835, 11344144, 2538215, -7570755}, - }, - { - FieldElement{-9433605, 6123113, 11159803, -2156608, 30016280, 14966241, -20474983, 1485421, -629256, -15958862}, - FieldElement{-26804558, 4260919, 11851389, 9658551, -32017107, 16367492, -20205425, -13191288, 11659922, -11115118}, - FieldElement{26180396, 10015009, -30844224, -8581293, 5418197, 9480663, 2231568, -10170080, 33100372, -1306171}, - }, - { - FieldElement{15121113, -5201871, -10389905, 15427821, -27509937, -15992507, 21670947, 4486675, -5931810, -14466380}, - FieldElement{16166486, -9483733, -11104130, 6023908, -31926798, -1364923, 2340060, -16254968, -10735770, -10039824}, - FieldElement{28042865, -3557089, -12126526, 12259706, -3717498, -6945899, 6766453, -8689599, 18036436, 5803270}, - }, - }, - { - { - FieldElement{-817581, 6763912, 11803561, 1585585, 10958447, -2671165, 23855391, 4598332, -6159431, -14117438}, - FieldElement{-31031306, -14256194, 17332029, -2383520, 31312682, -5967183, 696309, 50292, -20095739, 11763584}, - FieldElement{-594563, -2514283, -32234153, 12643980, 12650761, 14811489, 665117, -12613632, -19773211, -10713562}, - }, - { - FieldElement{30464590, -11262872, -4127476, -12734478, 19835327, -7105613, -24396175, 2075773, -17020157, 992471}, - FieldElement{18357185, -6994433, 7766382, 16342475, -29324918, 411174, 14578841, 8080033, -11574335, -10601610}, - FieldElement{19598397, 10334610, 12555054, 2555664, 18821899, -10339780, 21873263, 16014234, 26224780, 16452269}, - }, - { - FieldElement{-30223925, 5145196, 5944548, 16385966, 3976735, 2009897, -11377804, -7618186, -20533829, 3698650}, - FieldElement{14187449, 3448569, -10636236, -10810935, -22663880, -3433596, 7268410, -10890444, 27394301, 12015369}, - FieldElement{19695761, 16087646, 28032085, 12999827, 6817792, 11427614, 20244189, -1312777, -13259127, -3402461}, - }, - { - FieldElement{30860103, 12735208, -1888245, -4699734, -16974906, 2256940, -8166013, 12298312, -8550524, -10393462}, - FieldElement{-5719826, -11245325, -1910649, 15569035, 26642876, -7587760, -5789354, -15118654, -4976164, 12651793}, - FieldElement{-2848395, 9953421, 11531313, -5282879, 26895123, -12697089, -13118820, -16517902, 9768698, -2533218}, - }, - { - FieldElement{-24719459, 1894651, -287698, -4704085, 15348719, -8156530, 32767513, 12765450, 4940095, 10678226}, - FieldElement{18860224, 15980149, -18987240, -1562570, -26233012, -11071856, -7843882, 13944024, -24372348, 16582019}, - FieldElement{-15504260, 4970268, -29893044, 4175593, -20993212, -2199756, -11704054, 15444560, -11003761, 7989037}, - }, - { - FieldElement{31490452, 5568061, -2412803, 2182383, -32336847, 4531686, -32078269, 6200206, -19686113, -14800171}, - FieldElement{-17308668, -15879940, -31522777, -2831, -32887382, 16375549, 8680158, -16371713, 28550068, -6857132}, - FieldElement{-28126887, -5688091, 16837845, -1820458, -6850681, 12700016, -30039981, 4364038, 1155602, 5988841}, - }, - { - FieldElement{21890435, -13272907, -12624011, 12154349, -7831873, 15300496, 23148983, -4470481, 24618407, 8283181}, - FieldElement{-33136107, -10512751, 9975416, 6841041, -31559793, 16356536, 3070187, -7025928, 1466169, 10740210}, - FieldElement{-1509399, -15488185, -13503385, -10655916, 32799044, 909394, -13938903, -5779719, -32164649, -15327040}, - }, - { - FieldElement{3960823, -14267803, -28026090, -15918051, -19404858, 13146868, 15567327, 951507, -3260321, -573935}, - FieldElement{24740841, 5052253, -30094131, 8961361, 25877428, 6165135, -24368180, 14397372, -7380369, -6144105}, - FieldElement{-28888365, 3510803, -28103278, -1158478, -11238128, -10631454, -15441463, -14453128, -1625486, -6494814}, - }, - }, - { - { - FieldElement{793299, -9230478, 8836302, -6235707, -27360908, -2369593, 33152843, -4885251, -9906200, -621852}, - FieldElement{5666233, 525582, 20782575, -8038419, -24538499, 14657740, 16099374, 1468826, -6171428, -15186581}, - FieldElement{-4859255, -3779343, -2917758, -6748019, 7778750, 11688288, -30404353, -9871238, -1558923, -9863646}, - }, - { - FieldElement{10896332, -7719704, 824275, 472601, -19460308, 3009587, 25248958, 14783338, -30581476, -15757844}, - FieldElement{10566929, 12612572, -31944212, 11118703, -12633376, 12362879, 21752402, 8822496, 24003793, 14264025}, - FieldElement{27713862, -7355973, -11008240, 9227530, 27050101, 2504721, 23886875, -13117525, 13958495, -5732453}, - }, - { - FieldElement{-23481610, 4867226, -27247128, 3900521, 29838369, -8212291, -31889399, -10041781, 7340521, -15410068}, - FieldElement{4646514, -8011124, -22766023, -11532654, 23184553, 8566613, 31366726, -1381061, -15066784, -10375192}, - FieldElement{-17270517, 12723032, -16993061, 14878794, 21619651, -6197576, 27584817, 3093888, -8843694, 3849921}, - }, - { - FieldElement{-9064912, 2103172, 25561640, -15125738, -5239824, 9582958, 32477045, -9017955, 5002294, -15550259}, - FieldElement{-12057553, -11177906, 21115585, -13365155, 8808712, -12030708, 16489530, 13378448, -25845716, 12741426}, - FieldElement{-5946367, 10645103, -30911586, 15390284, -3286982, -7118677, 24306472, 15852464, 28834118, -7646072}, - }, - { - FieldElement{-17335748, -9107057, -24531279, 9434953, -8472084, -583362, -13090771, 455841, 20461858, 5491305}, - FieldElement{13669248, -16095482, -12481974, -10203039, -14569770, -11893198, -24995986, 11293807, -28588204, -9421832}, - FieldElement{28497928, 6272777, -33022994, 14470570, 8906179, -1225630, 18504674, -14165166, 29867745, -8795943}, - }, - { - FieldElement{-16207023, 13517196, -27799630, -13697798, 24009064, -6373891, -6367600, -13175392, 22853429, -4012011}, - FieldElement{24191378, 16712145, -13931797, 15217831, 14542237, 1646131, 18603514, -11037887, 12876623, -2112447}, - FieldElement{17902668, 4518229, -411702, -2829247, 26878217, 5258055, -12860753, 608397, 16031844, 3723494}, - }, - { - FieldElement{-28632773, 12763728, -20446446, 7577504, 33001348, -13017745, 17558842, -7872890, 23896954, -4314245}, - FieldElement{-20005381, -12011952, 31520464, 605201, 2543521, 5991821, -2945064, 7229064, -9919646, -8826859}, - FieldElement{28816045, 298879, -28165016, -15920938, 19000928, -1665890, -12680833, -2949325, -18051778, -2082915}, - }, - { - FieldElement{16000882, -344896, 3493092, -11447198, -29504595, -13159789, 12577740, 16041268, -19715240, 7847707}, - FieldElement{10151868, 10572098, 27312476, 7922682, 14825339, 4723128, -32855931, -6519018, -10020567, 3852848}, - FieldElement{-11430470, 15697596, -21121557, -4420647, 5386314, 15063598, 16514493, -15932110, 29330899, -15076224}, - }, - }, - { - { - FieldElement{-25499735, -4378794, -15222908, -6901211, 16615731, 2051784, 3303702, 15490, -27548796, 12314391}, - FieldElement{15683520, -6003043, 18109120, -9980648, 15337968, -5997823, -16717435, 15921866, 16103996, -3731215}, - FieldElement{-23169824, -10781249, 13588192, -1628807, -3798557, -1074929, -19273607, 5402699, -29815713, -9841101}, - }, - { - FieldElement{23190676, 2384583, -32714340, 3462154, -29903655, -1529132, -11266856, 8911517, -25205859, 2739713}, - FieldElement{21374101, -3554250, -33524649, 9874411, 15377179, 11831242, -33529904, 6134907, 4931255, 11987849}, - FieldElement{-7732, -2978858, -16223486, 7277597, 105524, -322051, -31480539, 13861388, -30076310, 10117930}, - }, - { - FieldElement{-29501170, -10744872, -26163768, 13051539, -25625564, 5089643, -6325503, 6704079, 12890019, 15728940}, - FieldElement{-21972360, -11771379, -951059, -4418840, 14704840, 2695116, 903376, -10428139, 12885167, 8311031}, - FieldElement{-17516482, 5352194, 10384213, -13811658, 7506451, 13453191, 26423267, 4384730, 1888765, -5435404}, - }, - { - FieldElement{-25817338, -3107312, -13494599, -3182506, 30896459, -13921729, -32251644, -12707869, -19464434, -3340243}, - FieldElement{-23607977, -2665774, -526091, 4651136, 5765089, 4618330, 6092245, 14845197, 17151279, -9854116}, - FieldElement{-24830458, -12733720, -15165978, 10367250, -29530908, -265356, 22825805, -7087279, -16866484, 16176525}, - }, - { - FieldElement{-23583256, 6564961, 20063689, 3798228, -4740178, 7359225, 2006182, -10363426, -28746253, -10197509}, - FieldElement{-10626600, -4486402, -13320562, -5125317, 3432136, -6393229, 23632037, -1940610, 32808310, 1099883}, - FieldElement{15030977, 5768825, -27451236, -2887299, -6427378, -15361371, -15277896, -6809350, 2051441, -15225865}, - }, - { - FieldElement{-3362323, -7239372, 7517890, 9824992, 23555850, 295369, 5148398, -14154188, -22686354, 16633660}, - FieldElement{4577086, -16752288, 13249841, -15304328, 19958763, -14537274, 18559670, -10759549, 8402478, -9864273}, - FieldElement{-28406330, -1051581, -26790155, -907698, -17212414, -11030789, 9453451, -14980072, 17983010, 9967138}, - }, - { - FieldElement{-25762494, 6524722, 26585488, 9969270, 24709298, 1220360, -1677990, 7806337, 17507396, 3651560}, - FieldElement{-10420457, -4118111, 14584639, 15971087, -15768321, 8861010, 26556809, -5574557, -18553322, -11357135}, - FieldElement{2839101, 14284142, 4029895, 3472686, 14402957, 12689363, -26642121, 8459447, -5605463, -7621941}, - }, - { - FieldElement{-4839289, -3535444, 9744961, 2871048, 25113978, 3187018, -25110813, -849066, 17258084, -7977739}, - FieldElement{18164541, -10595176, -17154882, -1542417, 19237078, -9745295, 23357533, -15217008, 26908270, 12150756}, - FieldElement{-30264870, -7647865, 5112249, -7036672, -1499807, -6974257, 43168, -5537701, -32302074, 16215819}, - }, - }, - { - { - FieldElement{-6898905, 9824394, -12304779, -4401089, -31397141, -6276835, 32574489, 12532905, -7503072, -8675347}, - FieldElement{-27343522, -16515468, -27151524, -10722951, 946346, 16291093, 254968, 7168080, 21676107, -1943028}, - FieldElement{21260961, -8424752, -16831886, -11920822, -23677961, 3968121, -3651949, -6215466, -3556191, -7913075}, - }, - { - FieldElement{16544754, 13250366, -16804428, 15546242, -4583003, 12757258, -2462308, -8680336, -18907032, -9662799}, - FieldElement{-2415239, -15577728, 18312303, 4964443, -15272530, -12653564, 26820651, 16690659, 25459437, -4564609}, - FieldElement{-25144690, 11425020, 28423002, -11020557, -6144921, -15826224, 9142795, -2391602, -6432418, -1644817}, - }, - { - FieldElement{-23104652, 6253476, 16964147, -3768872, -25113972, -12296437, -27457225, -16344658, 6335692, 7249989}, - FieldElement{-30333227, 13979675, 7503222, -12368314, -11956721, -4621693, -30272269, 2682242, 25993170, -12478523}, - FieldElement{4364628, 5930691, 32304656, -10044554, -8054781, 15091131, 22857016, -10598955, 31820368, 15075278}, - }, - { - FieldElement{31879134, -8918693, 17258761, 90626, -8041836, -4917709, 24162788, -9650886, -17970238, 12833045}, - FieldElement{19073683, 14851414, -24403169, -11860168, 7625278, 11091125, -19619190, 2074449, -9413939, 14905377}, - FieldElement{24483667, -11935567, -2518866, -11547418, -1553130, 15355506, -25282080, 9253129, 27628530, -7555480}, - }, - { - FieldElement{17597607, 8340603, 19355617, 552187, 26198470, -3176583, 4593324, -9157582, -14110875, 15297016}, - FieldElement{510886, 14337390, -31785257, 16638632, 6328095, 2713355, -20217417, -11864220, 8683221, 2921426}, - FieldElement{18606791, 11874196, 27155355, -5281482, -24031742, 6265446, -25178240, -1278924, 4674690, 13890525}, - }, - { - FieldElement{13609624, 13069022, -27372361, -13055908, 24360586, 9592974, 14977157, 9835105, 4389687, 288396}, - FieldElement{9922506, -519394, 13613107, 5883594, -18758345, -434263, -12304062, 8317628, 23388070, 16052080}, - FieldElement{12720016, 11937594, -31970060, -5028689, 26900120, 8561328, -20155687, -11632979, -14754271, -10812892}, - }, - { - FieldElement{15961858, 14150409, 26716931, -665832, -22794328, 13603569, 11829573, 7467844, -28822128, 929275}, - FieldElement{11038231, -11582396, -27310482, -7316562, -10498527, -16307831, -23479533, -9371869, -21393143, 2465074}, - FieldElement{20017163, -4323226, 27915242, 1529148, 12396362, 15675764, 13817261, -9658066, 2463391, -4622140}, - }, - { - FieldElement{-16358878, -12663911, -12065183, 4996454, -1256422, 1073572, 9583558, 12851107, 4003896, 12673717}, - FieldElement{-1731589, -15155870, -3262930, 16143082, 19294135, 13385325, 14741514, -9103726, 7903886, 2348101}, - FieldElement{24536016, -16515207, 12715592, -3862155, 1511293, 10047386, -3842346, -7129159, -28377538, 10048127}, - }, - }, - { - { - FieldElement{-12622226, -6204820, 30718825, 2591312, -10617028, 12192840, 18873298, -7297090, -32297756, 15221632}, - FieldElement{-26478122, -11103864, 11546244, -1852483, 9180880, 7656409, -21343950, 2095755, 29769758, 6593415}, - FieldElement{-31994208, -2907461, 4176912, 3264766, 12538965, -868111, 26312345, -6118678, 30958054, 8292160}, - }, - { - FieldElement{31429822, -13959116, 29173532, 15632448, 12174511, -2760094, 32808831, 3977186, 26143136, -3148876}, - FieldElement{22648901, 1402143, -22799984, 13746059, 7936347, 365344, -8668633, -1674433, -3758243, -2304625}, - FieldElement{-15491917, 8012313, -2514730, -12702462, -23965846, -10254029, -1612713, -1535569, -16664475, 8194478}, - }, - { - FieldElement{27338066, -7507420, -7414224, 10140405, -19026427, -6589889, 27277191, 8855376, 28572286, 3005164}, - FieldElement{26287124, 4821776, 25476601, -4145903, -3764513, -15788984, -18008582, 1182479, -26094821, -13079595}, - FieldElement{-7171154, 3178080, 23970071, 6201893, -17195577, -4489192, -21876275, -13982627, 32208683, -1198248}, - }, - { - FieldElement{-16657702, 2817643, -10286362, 14811298, 6024667, 13349505, -27315504, -10497842, -27672585, -11539858}, - FieldElement{15941029, -9405932, -21367050, 8062055, 31876073, -238629, -15278393, -1444429, 15397331, -4130193}, - FieldElement{8934485, -13485467, -23286397, -13423241, -32446090, 14047986, 31170398, -1441021, -27505566, 15087184}, - }, - { - FieldElement{-18357243, -2156491, 24524913, -16677868, 15520427, -6360776, -15502406, 11461896, 16788528, -5868942}, - FieldElement{-1947386, 16013773, 21750665, 3714552, -17401782, -16055433, -3770287, -10323320, 31322514, -11615635}, - FieldElement{21426655, -5650218, -13648287, -5347537, -28812189, -4920970, -18275391, -14621414, 13040862, -12112948}, - }, - { - FieldElement{11293895, 12478086, -27136401, 15083750, -29307421, 14748872, 14555558, -13417103, 1613711, 4896935}, - FieldElement{-25894883, 15323294, -8489791, -8057900, 25967126, -13425460, 2825960, -4897045, -23971776, -11267415}, - FieldElement{-15924766, -5229880, -17443532, 6410664, 3622847, 10243618, 20615400, 12405433, -23753030, -8436416}, - }, - { - FieldElement{-7091295, 12556208, -20191352, 9025187, -17072479, 4333801, 4378436, 2432030, 23097949, -566018}, - FieldElement{4565804, -16025654, 20084412, -7842817, 1724999, 189254, 24767264, 10103221, -18512313, 2424778}, - FieldElement{366633, -11976806, 8173090, -6890119, 30788634, 5745705, -7168678, 1344109, -3642553, 12412659}, - }, - { - FieldElement{-24001791, 7690286, 14929416, -168257, -32210835, -13412986, 24162697, -15326504, -3141501, 11179385}, - FieldElement{18289522, -14724954, 8056945, 16430056, -21729724, 7842514, -6001441, -1486897, -18684645, -11443503}, - FieldElement{476239, 6601091, -6152790, -9723375, 17503545, -4863900, 27672959, 13403813, 11052904, 5219329}, - }, - }, - { - { - FieldElement{20678546, -8375738, -32671898, 8849123, -5009758, 14574752, 31186971, -3973730, 9014762, -8579056}, - FieldElement{-13644050, -10350239, -15962508, 5075808, -1514661, -11534600, -33102500, 9160280, 8473550, -3256838}, - FieldElement{24900749, 14435722, 17209120, -15292541, -22592275, 9878983, -7689309, -16335821, -24568481, 11788948}, - }, - { - FieldElement{-3118155, -11395194, -13802089, 14797441, 9652448, -6845904, -20037437, 10410733, -24568470, -1458691}, - FieldElement{-15659161, 16736706, -22467150, 10215878, -9097177, 7563911, 11871841, -12505194, -18513325, 8464118}, - FieldElement{-23400612, 8348507, -14585951, -861714, -3950205, -6373419, 14325289, 8628612, 33313881, -8370517}, - }, - { - FieldElement{-20186973, -4967935, 22367356, 5271547, -1097117, -4788838, -24805667, -10236854, -8940735, -5818269}, - FieldElement{-6948785, -1795212, -32625683, -16021179, 32635414, -7374245, 15989197, -12838188, 28358192, -4253904}, - FieldElement{-23561781, -2799059, -32351682, -1661963, -9147719, 10429267, -16637684, 4072016, -5351664, 5596589}, - }, - { - FieldElement{-28236598, -3390048, 12312896, 6213178, 3117142, 16078565, 29266239, 2557221, 1768301, 15373193}, - FieldElement{-7243358, -3246960, -4593467, -7553353, -127927, -912245, -1090902, -4504991, -24660491, 3442910}, - FieldElement{-30210571, 5124043, 14181784, 8197961, 18964734, -11939093, 22597931, 7176455, -18585478, 13365930}, - }, - { - FieldElement{-7877390, -1499958, 8324673, 4690079, 6261860, 890446, 24538107, -8570186, -9689599, -3031667}, - FieldElement{25008904, -10771599, -4305031, -9638010, 16265036, 15721635, 683793, -11823784, 15723479, -15163481}, - FieldElement{-9660625, 12374379, -27006999, -7026148, -7724114, -12314514, 11879682, 5400171, 519526, -1235876}, - }, - { - FieldElement{22258397, -16332233, -7869817, 14613016, -22520255, -2950923, -20353881, 7315967, 16648397, 7605640}, - FieldElement{-8081308, -8464597, -8223311, 9719710, 19259459, -15348212, 23994942, -5281555, -9468848, 4763278}, - FieldElement{-21699244, 9220969, -15730624, 1084137, -25476107, -2852390, 31088447, -7764523, -11356529, 728112}, - }, - { - FieldElement{26047220, -11751471, -6900323, -16521798, 24092068, 9158119, -4273545, -12555558, -29365436, -5498272}, - FieldElement{17510331, -322857, 5854289, 8403524, 17133918, -3112612, -28111007, 12327945, 10750447, 10014012}, - FieldElement{-10312768, 3936952, 9156313, -8897683, 16498692, -994647, -27481051, -666732, 3424691, 7540221}, - }, - { - FieldElement{30322361, -6964110, 11361005, -4143317, 7433304, 4989748, -7071422, -16317219, -9244265, 15258046}, - FieldElement{13054562, -2779497, 19155474, 469045, -12482797, 4566042, 5631406, 2711395, 1062915, -5136345}, - FieldElement{-19240248, -11254599, -29509029, -7499965, -5835763, 13005411, -6066489, 12194497, 32960380, 1459310}, - }, - }, - { - { - FieldElement{19852034, 7027924, 23669353, 10020366, 8586503, -6657907, 394197, -6101885, 18638003, -11174937}, - FieldElement{31395534, 15098109, 26581030, 8030562, -16527914, -5007134, 9012486, -7584354, -6643087, -5442636}, - FieldElement{-9192165, -2347377, -1997099, 4529534, 25766844, 607986, -13222, 9677543, -32294889, -6456008}, - }, - { - FieldElement{-2444496, -149937, 29348902, 8186665, 1873760, 12489863, -30934579, -7839692, -7852844, -8138429}, - FieldElement{-15236356, -15433509, 7766470, 746860, 26346930, -10221762, -27333451, 10754588, -9431476, 5203576}, - FieldElement{31834314, 14135496, -770007, 5159118, 20917671, -16768096, -7467973, -7337524, 31809243, 7347066}, - }, - { - FieldElement{-9606723, -11874240, 20414459, 13033986, 13716524, -11691881, 19797970, -12211255, 15192876, -2087490}, - FieldElement{-12663563, -2181719, 1168162, -3804809, 26747877, -14138091, 10609330, 12694420, 33473243, -13382104}, - FieldElement{33184999, 11180355, 15832085, -11385430, -1633671, 225884, 15089336, -11023903, -6135662, 14480053}, - }, - { - FieldElement{31308717, -5619998, 31030840, -1897099, 15674547, -6582883, 5496208, 13685227, 27595050, 8737275}, - FieldElement{-20318852, -15150239, 10933843, -16178022, 8335352, -7546022, -31008351, -12610604, 26498114, 66511}, - FieldElement{22644454, -8761729, -16671776, 4884562, -3105614, -13559366, 30540766, -4286747, -13327787, -7515095}, - }, - { - FieldElement{-28017847, 9834845, 18617207, -2681312, -3401956, -13307506, 8205540, 13585437, -17127465, 15115439}, - FieldElement{23711543, -672915, 31206561, -8362711, 6164647, -9709987, -33535882, -1426096, 8236921, 16492939}, - FieldElement{-23910559, -13515526, -26299483, -4503841, 25005590, -7687270, 19574902, 10071562, 6708380, -6222424}, - }, - { - FieldElement{2101391, -4930054, 19702731, 2367575, -15427167, 1047675, 5301017, 9328700, 29955601, -11678310}, - FieldElement{3096359, 9271816, -21620864, -15521844, -14847996, -7592937, -25892142, -12635595, -9917575, 6216608}, - FieldElement{-32615849, 338663, -25195611, 2510422, -29213566, -13820213, 24822830, -6146567, -26767480, 7525079}, - }, - { - FieldElement{-23066649, -13985623, 16133487, -7896178, -3389565, 778788, -910336, -2782495, -19386633, 11994101}, - FieldElement{21691500, -13624626, -641331, -14367021, 3285881, -3483596, -25064666, 9718258, -7477437, 13381418}, - FieldElement{18445390, -4202236, 14979846, 11622458, -1727110, -3582980, 23111648, -6375247, 28535282, 15779576}, - }, - { - FieldElement{30098053, 3089662, -9234387, 16662135, -21306940, 11308411, -14068454, 12021730, 9955285, -16303356}, - FieldElement{9734894, -14576830, -7473633, -9138735, 2060392, 11313496, -18426029, 9924399, 20194861, 13380996}, - FieldElement{-26378102, -7965207, -22167821, 15789297, -18055342, -6168792, -1984914, 15707771, 26342023, 10146099}, - }, - }, - { - { - FieldElement{-26016874, -219943, 21339191, -41388, 19745256, -2878700, -29637280, 2227040, 21612326, -545728}, - FieldElement{-13077387, 1184228, 23562814, -5970442, -20351244, -6348714, 25764461, 12243797, -20856566, 11649658}, - FieldElement{-10031494, 11262626, 27384172, 2271902, 26947504, -15997771, 39944, 6114064, 33514190, 2333242}, - }, - { - FieldElement{-21433588, -12421821, 8119782, 7219913, -21830522, -9016134, -6679750, -12670638, 24350578, -13450001}, - FieldElement{-4116307, -11271533, -23886186, 4843615, -30088339, 690623, -31536088, -10406836, 8317860, 12352766}, - FieldElement{18200138, -14475911, -33087759, -2696619, -23702521, -9102511, -23552096, -2287550, 20712163, 6719373}, - }, - { - FieldElement{26656208, 6075253, -7858556, 1886072, -28344043, 4262326, 11117530, -3763210, 26224235, -3297458}, - FieldElement{-17168938, -14854097, -3395676, -16369877, -19954045, 14050420, 21728352, 9493610, 18620611, -16428628}, - FieldElement{-13323321, 13325349, 11432106, 5964811, 18609221, 6062965, -5269471, -9725556, -30701573, -16479657}, - }, - { - FieldElement{-23860538, -11233159, 26961357, 1640861, -32413112, -16737940, 12248509, -5240639, 13735342, 1934062}, - FieldElement{25089769, 6742589, 17081145, -13406266, 21909293, -16067981, -15136294, -3765346, -21277997, 5473616}, - FieldElement{31883677, -7961101, 1083432, -11572403, 22828471, 13290673, -7125085, 12469656, 29111212, -5451014}, - }, - { - FieldElement{24244947, -15050407, -26262976, 2791540, -14997599, 16666678, 24367466, 6388839, -10295587, 452383}, - FieldElement{-25640782, -3417841, 5217916, 16224624, 19987036, -4082269, -24236251, -5915248, 15766062, 8407814}, - FieldElement{-20406999, 13990231, 15495425, 16395525, 5377168, 15166495, -8917023, -4388953, -8067909, 2276718}, - }, - { - FieldElement{30157918, 12924066, -17712050, 9245753, 19895028, 3368142, -23827587, 5096219, 22740376, -7303417}, - FieldElement{2041139, -14256350, 7783687, 13876377, -25946985, -13352459, 24051124, 13742383, -15637599, 13295222}, - FieldElement{33338237, -8505733, 12532113, 7977527, 9106186, -1715251, -17720195, -4612972, -4451357, -14669444}, - }, - { - FieldElement{-20045281, 5454097, -14346548, 6447146, 28862071, 1883651, -2469266, -4141880, 7770569, 9620597}, - FieldElement{23208068, 7979712, 33071466, 8149229, 1758231, -10834995, 30945528, -1694323, -33502340, -14767970}, - FieldElement{1439958, -16270480, -1079989, -793782, 4625402, 10647766, -5043801, 1220118, 30494170, -11440799}, - }, - { - FieldElement{-5037580, -13028295, -2970559, -3061767, 15640974, -6701666, -26739026, 926050, -1684339, -13333647}, - FieldElement{13908495, -3549272, 30919928, -6273825, -21521863, 7989039, 9021034, 9078865, 3353509, 4033511}, - FieldElement{-29663431, -15113610, 32259991, -344482, 24295849, -12912123, 23161163, 8839127, 27485041, 7356032}, - }, - }, - { - { - FieldElement{9661027, 705443, 11980065, -5370154, -1628543, 14661173, -6346142, 2625015, 28431036, -16771834}, - FieldElement{-23839233, -8311415, -25945511, 7480958, -17681669, -8354183, -22545972, 14150565, 15970762, 4099461}, - FieldElement{29262576, 16756590, 26350592, -8793563, 8529671, -11208050, 13617293, -9937143, 11465739, 8317062}, - }, - { - FieldElement{-25493081, -6962928, 32500200, -9419051, -23038724, -2302222, 14898637, 3848455, 20969334, -5157516}, - FieldElement{-20384450, -14347713, -18336405, 13884722, -33039454, 2842114, -21610826, -3649888, 11177095, 14989547}, - FieldElement{-24496721, -11716016, 16959896, 2278463, 12066309, 10137771, 13515641, 2581286, -28487508, 9930240}, - }, - { - FieldElement{-17751622, -2097826, 16544300, -13009300, -15914807, -14949081, 18345767, -13403753, 16291481, -5314038}, - FieldElement{-33229194, 2553288, 32678213, 9875984, 8534129, 6889387, -9676774, 6957617, 4368891, 9788741}, - FieldElement{16660756, 7281060, -10830758, 12911820, 20108584, -8101676, -21722536, -8613148, 16250552, -11111103}, - }, - { - FieldElement{-19765507, 2390526, -16551031, 14161980, 1905286, 6414907, 4689584, 10604807, -30190403, 4782747}, - FieldElement{-1354539, 14736941, -7367442, -13292886, 7710542, -14155590, -9981571, 4383045, 22546403, 437323}, - FieldElement{31665577, -12180464, -16186830, 1491339, -18368625, 3294682, 27343084, 2786261, -30633590, -14097016}, - }, - { - FieldElement{-14467279, -683715, -33374107, 7448552, 19294360, 14334329, -19690631, 2355319, -19284671, -6114373}, - FieldElement{15121312, -15796162, 6377020, -6031361, -10798111, -12957845, 18952177, 15496498, -29380133, 11754228}, - FieldElement{-2637277, -13483075, 8488727, -14303896, 12728761, -1622493, 7141596, 11724556, 22761615, -10134141}, - }, - { - FieldElement{16918416, 11729663, -18083579, 3022987, -31015732, -13339659, -28741185, -12227393, 32851222, 11717399}, - FieldElement{11166634, 7338049, -6722523, 4531520, -29468672, -7302055, 31474879, 3483633, -1193175, -4030831}, - FieldElement{-185635, 9921305, 31456609, -13536438, -12013818, 13348923, 33142652, 6546660, -19985279, -3948376}, - }, - { - FieldElement{-32460596, 11266712, -11197107, -7899103, 31703694, 3855903, -8537131, -12833048, -30772034, -15486313}, - FieldElement{-18006477, 12709068, 3991746, -6479188, -21491523, -10550425, -31135347, -16049879, 10928917, 3011958}, - FieldElement{-6957757, -15594337, 31696059, 334240, 29576716, 14796075, -30831056, -12805180, 18008031, 10258577}, - }, - { - FieldElement{-22448644, 15655569, 7018479, -4410003, -30314266, -1201591, -1853465, 1367120, 25127874, 6671743}, - FieldElement{29701166, -14373934, -10878120, 9279288, -17568, 13127210, 21382910, 11042292, 25838796, 4642684}, - FieldElement{-20430234, 14955537, -24126347, 8124619, -5369288, -5990470, 30468147, -13900640, 18423289, 4177476}, - }, - }, -} diff --git a/vendor/golang.org/x/crypto/ed25519/internal/edwards25519/edwards25519.go b/vendor/golang.org/x/crypto/ed25519/internal/edwards25519/edwards25519.go deleted file mode 100644 index fd03c252a..000000000 --- a/vendor/golang.org/x/crypto/ed25519/internal/edwards25519/edwards25519.go +++ /dev/null @@ -1,1793 +0,0 @@ -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package edwards25519 - -import "encoding/binary" - -// This code is a port of the public domain, “ref10” implementation of ed25519 -// from SUPERCOP. - -// FieldElement represents an element of the field GF(2^255 - 19). An element -// t, entries t[0]...t[9], represents the integer t[0]+2^26 t[1]+2^51 t[2]+2^77 -// t[3]+2^102 t[4]+...+2^230 t[9]. Bounds on each t[i] vary depending on -// context. -type FieldElement [10]int32 - -var zero FieldElement - -func FeZero(fe *FieldElement) { - copy(fe[:], zero[:]) -} - -func FeOne(fe *FieldElement) { - FeZero(fe) - fe[0] = 1 -} - -func FeAdd(dst, a, b *FieldElement) { - dst[0] = a[0] + b[0] - dst[1] = a[1] + b[1] - dst[2] = a[2] + b[2] - dst[3] = a[3] + b[3] - dst[4] = a[4] + b[4] - dst[5] = a[5] + b[5] - dst[6] = a[6] + b[6] - dst[7] = a[7] + b[7] - dst[8] = a[8] + b[8] - dst[9] = a[9] + b[9] -} - -func FeSub(dst, a, b *FieldElement) { - dst[0] = a[0] - b[0] - dst[1] = a[1] - b[1] - dst[2] = a[2] - b[2] - dst[3] = a[3] - b[3] - dst[4] = a[4] - b[4] - dst[5] = a[5] - b[5] - dst[6] = a[6] - b[6] - dst[7] = a[7] - b[7] - dst[8] = a[8] - b[8] - dst[9] = a[9] - b[9] -} - -func FeCopy(dst, src *FieldElement) { - copy(dst[:], src[:]) -} - -// Replace (f,g) with (g,g) if b == 1; -// replace (f,g) with (f,g) if b == 0. -// -// Preconditions: b in {0,1}. -func FeCMove(f, g *FieldElement, b int32) { - b = -b - f[0] ^= b & (f[0] ^ g[0]) - f[1] ^= b & (f[1] ^ g[1]) - f[2] ^= b & (f[2] ^ g[2]) - f[3] ^= b & (f[3] ^ g[3]) - f[4] ^= b & (f[4] ^ g[4]) - f[5] ^= b & (f[5] ^ g[5]) - f[6] ^= b & (f[6] ^ g[6]) - f[7] ^= b & (f[7] ^ g[7]) - f[8] ^= b & (f[8] ^ g[8]) - f[9] ^= b & (f[9] ^ g[9]) -} - -func load3(in []byte) int64 { - var r int64 - r = int64(in[0]) - r |= int64(in[1]) << 8 - r |= int64(in[2]) << 16 - return r -} - -func load4(in []byte) int64 { - var r int64 - r = int64(in[0]) - r |= int64(in[1]) << 8 - r |= int64(in[2]) << 16 - r |= int64(in[3]) << 24 - return r -} - -func FeFromBytes(dst *FieldElement, src *[32]byte) { - h0 := load4(src[:]) - h1 := load3(src[4:]) << 6 - h2 := load3(src[7:]) << 5 - h3 := load3(src[10:]) << 3 - h4 := load3(src[13:]) << 2 - h5 := load4(src[16:]) - h6 := load3(src[20:]) << 7 - h7 := load3(src[23:]) << 5 - h8 := load3(src[26:]) << 4 - h9 := (load3(src[29:]) & 8388607) << 2 - - FeCombine(dst, h0, h1, h2, h3, h4, h5, h6, h7, h8, h9) -} - -// FeToBytes marshals h to s. -// Preconditions: -// |h| bounded by 1.1*2^25,1.1*2^24,1.1*2^25,1.1*2^24,etc. -// -// Write p=2^255-19; q=floor(h/p). -// Basic claim: q = floor(2^(-255)(h + 19 2^(-25)h9 + 2^(-1))). -// -// Proof: -// Have |h|<=p so |q|<=1 so |19^2 2^(-255) q|<1/4. -// Also have |h-2^230 h9|<2^230 so |19 2^(-255)(h-2^230 h9)|<1/4. -// -// Write y=2^(-1)-19^2 2^(-255)q-19 2^(-255)(h-2^230 h9). -// Then 0> 25 - q = (h[0] + q) >> 26 - q = (h[1] + q) >> 25 - q = (h[2] + q) >> 26 - q = (h[3] + q) >> 25 - q = (h[4] + q) >> 26 - q = (h[5] + q) >> 25 - q = (h[6] + q) >> 26 - q = (h[7] + q) >> 25 - q = (h[8] + q) >> 26 - q = (h[9] + q) >> 25 - - // Goal: Output h-(2^255-19)q, which is between 0 and 2^255-20. - h[0] += 19 * q - // Goal: Output h-2^255 q, which is between 0 and 2^255-20. - - carry[0] = h[0] >> 26 - h[1] += carry[0] - h[0] -= carry[0] << 26 - carry[1] = h[1] >> 25 - h[2] += carry[1] - h[1] -= carry[1] << 25 - carry[2] = h[2] >> 26 - h[3] += carry[2] - h[2] -= carry[2] << 26 - carry[3] = h[3] >> 25 - h[4] += carry[3] - h[3] -= carry[3] << 25 - carry[4] = h[4] >> 26 - h[5] += carry[4] - h[4] -= carry[4] << 26 - carry[5] = h[5] >> 25 - h[6] += carry[5] - h[5] -= carry[5] << 25 - carry[6] = h[6] >> 26 - h[7] += carry[6] - h[6] -= carry[6] << 26 - carry[7] = h[7] >> 25 - h[8] += carry[7] - h[7] -= carry[7] << 25 - carry[8] = h[8] >> 26 - h[9] += carry[8] - h[8] -= carry[8] << 26 - carry[9] = h[9] >> 25 - h[9] -= carry[9] << 25 - // h10 = carry9 - - // Goal: Output h[0]+...+2^255 h10-2^255 q, which is between 0 and 2^255-20. - // Have h[0]+...+2^230 h[9] between 0 and 2^255-1; - // evidently 2^255 h10-2^255 q = 0. - // Goal: Output h[0]+...+2^230 h[9]. - - s[0] = byte(h[0] >> 0) - s[1] = byte(h[0] >> 8) - s[2] = byte(h[0] >> 16) - s[3] = byte((h[0] >> 24) | (h[1] << 2)) - s[4] = byte(h[1] >> 6) - s[5] = byte(h[1] >> 14) - s[6] = byte((h[1] >> 22) | (h[2] << 3)) - s[7] = byte(h[2] >> 5) - s[8] = byte(h[2] >> 13) - s[9] = byte((h[2] >> 21) | (h[3] << 5)) - s[10] = byte(h[3] >> 3) - s[11] = byte(h[3] >> 11) - s[12] = byte((h[3] >> 19) | (h[4] << 6)) - s[13] = byte(h[4] >> 2) - s[14] = byte(h[4] >> 10) - s[15] = byte(h[4] >> 18) - s[16] = byte(h[5] >> 0) - s[17] = byte(h[5] >> 8) - s[18] = byte(h[5] >> 16) - s[19] = byte((h[5] >> 24) | (h[6] << 1)) - s[20] = byte(h[6] >> 7) - s[21] = byte(h[6] >> 15) - s[22] = byte((h[6] >> 23) | (h[7] << 3)) - s[23] = byte(h[7] >> 5) - s[24] = byte(h[7] >> 13) - s[25] = byte((h[7] >> 21) | (h[8] << 4)) - s[26] = byte(h[8] >> 4) - s[27] = byte(h[8] >> 12) - s[28] = byte((h[8] >> 20) | (h[9] << 6)) - s[29] = byte(h[9] >> 2) - s[30] = byte(h[9] >> 10) - s[31] = byte(h[9] >> 18) -} - -func FeIsNegative(f *FieldElement) byte { - var s [32]byte - FeToBytes(&s, f) - return s[0] & 1 -} - -func FeIsNonZero(f *FieldElement) int32 { - var s [32]byte - FeToBytes(&s, f) - var x uint8 - for _, b := range s { - x |= b - } - x |= x >> 4 - x |= x >> 2 - x |= x >> 1 - return int32(x & 1) -} - -// FeNeg sets h = -f -// -// Preconditions: -// |f| bounded by 1.1*2^25,1.1*2^24,1.1*2^25,1.1*2^24,etc. -// -// Postconditions: -// |h| bounded by 1.1*2^25,1.1*2^24,1.1*2^25,1.1*2^24,etc. -func FeNeg(h, f *FieldElement) { - h[0] = -f[0] - h[1] = -f[1] - h[2] = -f[2] - h[3] = -f[3] - h[4] = -f[4] - h[5] = -f[5] - h[6] = -f[6] - h[7] = -f[7] - h[8] = -f[8] - h[9] = -f[9] -} - -func FeCombine(h *FieldElement, h0, h1, h2, h3, h4, h5, h6, h7, h8, h9 int64) { - var c0, c1, c2, c3, c4, c5, c6, c7, c8, c9 int64 - - /* - |h0| <= (1.1*1.1*2^52*(1+19+19+19+19)+1.1*1.1*2^50*(38+38+38+38+38)) - i.e. |h0| <= 1.2*2^59; narrower ranges for h2, h4, h6, h8 - |h1| <= (1.1*1.1*2^51*(1+1+19+19+19+19+19+19+19+19)) - i.e. |h1| <= 1.5*2^58; narrower ranges for h3, h5, h7, h9 - */ - - c0 = (h0 + (1 << 25)) >> 26 - h1 += c0 - h0 -= c0 << 26 - c4 = (h4 + (1 << 25)) >> 26 - h5 += c4 - h4 -= c4 << 26 - /* |h0| <= 2^25 */ - /* |h4| <= 2^25 */ - /* |h1| <= 1.51*2^58 */ - /* |h5| <= 1.51*2^58 */ - - c1 = (h1 + (1 << 24)) >> 25 - h2 += c1 - h1 -= c1 << 25 - c5 = (h5 + (1 << 24)) >> 25 - h6 += c5 - h5 -= c5 << 25 - /* |h1| <= 2^24; from now on fits into int32 */ - /* |h5| <= 2^24; from now on fits into int32 */ - /* |h2| <= 1.21*2^59 */ - /* |h6| <= 1.21*2^59 */ - - c2 = (h2 + (1 << 25)) >> 26 - h3 += c2 - h2 -= c2 << 26 - c6 = (h6 + (1 << 25)) >> 26 - h7 += c6 - h6 -= c6 << 26 - /* |h2| <= 2^25; from now on fits into int32 unchanged */ - /* |h6| <= 2^25; from now on fits into int32 unchanged */ - /* |h3| <= 1.51*2^58 */ - /* |h7| <= 1.51*2^58 */ - - c3 = (h3 + (1 << 24)) >> 25 - h4 += c3 - h3 -= c3 << 25 - c7 = (h7 + (1 << 24)) >> 25 - h8 += c7 - h7 -= c7 << 25 - /* |h3| <= 2^24; from now on fits into int32 unchanged */ - /* |h7| <= 2^24; from now on fits into int32 unchanged */ - /* |h4| <= 1.52*2^33 */ - /* |h8| <= 1.52*2^33 */ - - c4 = (h4 + (1 << 25)) >> 26 - h5 += c4 - h4 -= c4 << 26 - c8 = (h8 + (1 << 25)) >> 26 - h9 += c8 - h8 -= c8 << 26 - /* |h4| <= 2^25; from now on fits into int32 unchanged */ - /* |h8| <= 2^25; from now on fits into int32 unchanged */ - /* |h5| <= 1.01*2^24 */ - /* |h9| <= 1.51*2^58 */ - - c9 = (h9 + (1 << 24)) >> 25 - h0 += c9 * 19 - h9 -= c9 << 25 - /* |h9| <= 2^24; from now on fits into int32 unchanged */ - /* |h0| <= 1.8*2^37 */ - - c0 = (h0 + (1 << 25)) >> 26 - h1 += c0 - h0 -= c0 << 26 - /* |h0| <= 2^25; from now on fits into int32 unchanged */ - /* |h1| <= 1.01*2^24 */ - - h[0] = int32(h0) - h[1] = int32(h1) - h[2] = int32(h2) - h[3] = int32(h3) - h[4] = int32(h4) - h[5] = int32(h5) - h[6] = int32(h6) - h[7] = int32(h7) - h[8] = int32(h8) - h[9] = int32(h9) -} - -// FeMul calculates h = f * g -// Can overlap h with f or g. -// -// Preconditions: -// |f| bounded by 1.1*2^26,1.1*2^25,1.1*2^26,1.1*2^25,etc. -// |g| bounded by 1.1*2^26,1.1*2^25,1.1*2^26,1.1*2^25,etc. -// -// Postconditions: -// |h| bounded by 1.1*2^25,1.1*2^24,1.1*2^25,1.1*2^24,etc. -// -// Notes on implementation strategy: -// -// Using schoolbook multiplication. -// Karatsuba would save a little in some cost models. -// -// Most multiplications by 2 and 19 are 32-bit precomputations; -// cheaper than 64-bit postcomputations. -// -// There is one remaining multiplication by 19 in the carry chain; -// one *19 precomputation can be merged into this, -// but the resulting data flow is considerably less clean. -// -// There are 12 carries below. -// 10 of them are 2-way parallelizable and vectorizable. -// Can get away with 11 carries, but then data flow is much deeper. -// -// With tighter constraints on inputs, can squeeze carries into int32. -func FeMul(h, f, g *FieldElement) { - f0 := int64(f[0]) - f1 := int64(f[1]) - f2 := int64(f[2]) - f3 := int64(f[3]) - f4 := int64(f[4]) - f5 := int64(f[5]) - f6 := int64(f[6]) - f7 := int64(f[7]) - f8 := int64(f[8]) - f9 := int64(f[9]) - - f1_2 := int64(2 * f[1]) - f3_2 := int64(2 * f[3]) - f5_2 := int64(2 * f[5]) - f7_2 := int64(2 * f[7]) - f9_2 := int64(2 * f[9]) - - g0 := int64(g[0]) - g1 := int64(g[1]) - g2 := int64(g[2]) - g3 := int64(g[3]) - g4 := int64(g[4]) - g5 := int64(g[5]) - g6 := int64(g[6]) - g7 := int64(g[7]) - g8 := int64(g[8]) - g9 := int64(g[9]) - - g1_19 := int64(19 * g[1]) /* 1.4*2^29 */ - g2_19 := int64(19 * g[2]) /* 1.4*2^30; still ok */ - g3_19 := int64(19 * g[3]) - g4_19 := int64(19 * g[4]) - g5_19 := int64(19 * g[5]) - g6_19 := int64(19 * g[6]) - g7_19 := int64(19 * g[7]) - g8_19 := int64(19 * g[8]) - g9_19 := int64(19 * g[9]) - - h0 := f0*g0 + f1_2*g9_19 + f2*g8_19 + f3_2*g7_19 + f4*g6_19 + f5_2*g5_19 + f6*g4_19 + f7_2*g3_19 + f8*g2_19 + f9_2*g1_19 - h1 := f0*g1 + f1*g0 + f2*g9_19 + f3*g8_19 + f4*g7_19 + f5*g6_19 + f6*g5_19 + f7*g4_19 + f8*g3_19 + f9*g2_19 - h2 := f0*g2 + f1_2*g1 + f2*g0 + f3_2*g9_19 + f4*g8_19 + f5_2*g7_19 + f6*g6_19 + f7_2*g5_19 + f8*g4_19 + f9_2*g3_19 - h3 := f0*g3 + f1*g2 + f2*g1 + f3*g0 + f4*g9_19 + f5*g8_19 + f6*g7_19 + f7*g6_19 + f8*g5_19 + f9*g4_19 - h4 := f0*g4 + f1_2*g3 + f2*g2 + f3_2*g1 + f4*g0 + f5_2*g9_19 + f6*g8_19 + f7_2*g7_19 + f8*g6_19 + f9_2*g5_19 - h5 := f0*g5 + f1*g4 + f2*g3 + f3*g2 + f4*g1 + f5*g0 + f6*g9_19 + f7*g8_19 + f8*g7_19 + f9*g6_19 - h6 := f0*g6 + f1_2*g5 + f2*g4 + f3_2*g3 + f4*g2 + f5_2*g1 + f6*g0 + f7_2*g9_19 + f8*g8_19 + f9_2*g7_19 - h7 := f0*g7 + f1*g6 + f2*g5 + f3*g4 + f4*g3 + f5*g2 + f6*g1 + f7*g0 + f8*g9_19 + f9*g8_19 - h8 := f0*g8 + f1_2*g7 + f2*g6 + f3_2*g5 + f4*g4 + f5_2*g3 + f6*g2 + f7_2*g1 + f8*g0 + f9_2*g9_19 - h9 := f0*g9 + f1*g8 + f2*g7 + f3*g6 + f4*g5 + f5*g4 + f6*g3 + f7*g2 + f8*g1 + f9*g0 - - FeCombine(h, h0, h1, h2, h3, h4, h5, h6, h7, h8, h9) -} - -func feSquare(f *FieldElement) (h0, h1, h2, h3, h4, h5, h6, h7, h8, h9 int64) { - f0 := int64(f[0]) - f1 := int64(f[1]) - f2 := int64(f[2]) - f3 := int64(f[3]) - f4 := int64(f[4]) - f5 := int64(f[5]) - f6 := int64(f[6]) - f7 := int64(f[7]) - f8 := int64(f[8]) - f9 := int64(f[9]) - f0_2 := int64(2 * f[0]) - f1_2 := int64(2 * f[1]) - f2_2 := int64(2 * f[2]) - f3_2 := int64(2 * f[3]) - f4_2 := int64(2 * f[4]) - f5_2 := int64(2 * f[5]) - f6_2 := int64(2 * f[6]) - f7_2 := int64(2 * f[7]) - f5_38 := 38 * f5 // 1.31*2^30 - f6_19 := 19 * f6 // 1.31*2^30 - f7_38 := 38 * f7 // 1.31*2^30 - f8_19 := 19 * f8 // 1.31*2^30 - f9_38 := 38 * f9 // 1.31*2^30 - - h0 = f0*f0 + f1_2*f9_38 + f2_2*f8_19 + f3_2*f7_38 + f4_2*f6_19 + f5*f5_38 - h1 = f0_2*f1 + f2*f9_38 + f3_2*f8_19 + f4*f7_38 + f5_2*f6_19 - h2 = f0_2*f2 + f1_2*f1 + f3_2*f9_38 + f4_2*f8_19 + f5_2*f7_38 + f6*f6_19 - h3 = f0_2*f3 + f1_2*f2 + f4*f9_38 + f5_2*f8_19 + f6*f7_38 - h4 = f0_2*f4 + f1_2*f3_2 + f2*f2 + f5_2*f9_38 + f6_2*f8_19 + f7*f7_38 - h5 = f0_2*f5 + f1_2*f4 + f2_2*f3 + f6*f9_38 + f7_2*f8_19 - h6 = f0_2*f6 + f1_2*f5_2 + f2_2*f4 + f3_2*f3 + f7_2*f9_38 + f8*f8_19 - h7 = f0_2*f7 + f1_2*f6 + f2_2*f5 + f3_2*f4 + f8*f9_38 - h8 = f0_2*f8 + f1_2*f7_2 + f2_2*f6 + f3_2*f5_2 + f4*f4 + f9*f9_38 - h9 = f0_2*f9 + f1_2*f8 + f2_2*f7 + f3_2*f6 + f4_2*f5 - - return -} - -// FeSquare calculates h = f*f. Can overlap h with f. -// -// Preconditions: -// |f| bounded by 1.1*2^26,1.1*2^25,1.1*2^26,1.1*2^25,etc. -// -// Postconditions: -// |h| bounded by 1.1*2^25,1.1*2^24,1.1*2^25,1.1*2^24,etc. -func FeSquare(h, f *FieldElement) { - h0, h1, h2, h3, h4, h5, h6, h7, h8, h9 := feSquare(f) - FeCombine(h, h0, h1, h2, h3, h4, h5, h6, h7, h8, h9) -} - -// FeSquare2 sets h = 2 * f * f -// -// Can overlap h with f. -// -// Preconditions: -// |f| bounded by 1.65*2^26,1.65*2^25,1.65*2^26,1.65*2^25,etc. -// -// Postconditions: -// |h| bounded by 1.01*2^25,1.01*2^24,1.01*2^25,1.01*2^24,etc. -// See fe_mul.c for discussion of implementation strategy. -func FeSquare2(h, f *FieldElement) { - h0, h1, h2, h3, h4, h5, h6, h7, h8, h9 := feSquare(f) - - h0 += h0 - h1 += h1 - h2 += h2 - h3 += h3 - h4 += h4 - h5 += h5 - h6 += h6 - h7 += h7 - h8 += h8 - h9 += h9 - - FeCombine(h, h0, h1, h2, h3, h4, h5, h6, h7, h8, h9) -} - -func FeInvert(out, z *FieldElement) { - var t0, t1, t2, t3 FieldElement - var i int - - FeSquare(&t0, z) // 2^1 - FeSquare(&t1, &t0) // 2^2 - for i = 1; i < 2; i++ { // 2^3 - FeSquare(&t1, &t1) - } - FeMul(&t1, z, &t1) // 2^3 + 2^0 - FeMul(&t0, &t0, &t1) // 2^3 + 2^1 + 2^0 - FeSquare(&t2, &t0) // 2^4 + 2^2 + 2^1 - FeMul(&t1, &t1, &t2) // 2^4 + 2^3 + 2^2 + 2^1 + 2^0 - FeSquare(&t2, &t1) // 5,4,3,2,1 - for i = 1; i < 5; i++ { // 9,8,7,6,5 - FeSquare(&t2, &t2) - } - FeMul(&t1, &t2, &t1) // 9,8,7,6,5,4,3,2,1,0 - FeSquare(&t2, &t1) // 10..1 - for i = 1; i < 10; i++ { // 19..10 - FeSquare(&t2, &t2) - } - FeMul(&t2, &t2, &t1) // 19..0 - FeSquare(&t3, &t2) // 20..1 - for i = 1; i < 20; i++ { // 39..20 - FeSquare(&t3, &t3) - } - FeMul(&t2, &t3, &t2) // 39..0 - FeSquare(&t2, &t2) // 40..1 - for i = 1; i < 10; i++ { // 49..10 - FeSquare(&t2, &t2) - } - FeMul(&t1, &t2, &t1) // 49..0 - FeSquare(&t2, &t1) // 50..1 - for i = 1; i < 50; i++ { // 99..50 - FeSquare(&t2, &t2) - } - FeMul(&t2, &t2, &t1) // 99..0 - FeSquare(&t3, &t2) // 100..1 - for i = 1; i < 100; i++ { // 199..100 - FeSquare(&t3, &t3) - } - FeMul(&t2, &t3, &t2) // 199..0 - FeSquare(&t2, &t2) // 200..1 - for i = 1; i < 50; i++ { // 249..50 - FeSquare(&t2, &t2) - } - FeMul(&t1, &t2, &t1) // 249..0 - FeSquare(&t1, &t1) // 250..1 - for i = 1; i < 5; i++ { // 254..5 - FeSquare(&t1, &t1) - } - FeMul(out, &t1, &t0) // 254..5,3,1,0 -} - -func fePow22523(out, z *FieldElement) { - var t0, t1, t2 FieldElement - var i int - - FeSquare(&t0, z) - for i = 1; i < 1; i++ { - FeSquare(&t0, &t0) - } - FeSquare(&t1, &t0) - for i = 1; i < 2; i++ { - FeSquare(&t1, &t1) - } - FeMul(&t1, z, &t1) - FeMul(&t0, &t0, &t1) - FeSquare(&t0, &t0) - for i = 1; i < 1; i++ { - FeSquare(&t0, &t0) - } - FeMul(&t0, &t1, &t0) - FeSquare(&t1, &t0) - for i = 1; i < 5; i++ { - FeSquare(&t1, &t1) - } - FeMul(&t0, &t1, &t0) - FeSquare(&t1, &t0) - for i = 1; i < 10; i++ { - FeSquare(&t1, &t1) - } - FeMul(&t1, &t1, &t0) - FeSquare(&t2, &t1) - for i = 1; i < 20; i++ { - FeSquare(&t2, &t2) - } - FeMul(&t1, &t2, &t1) - FeSquare(&t1, &t1) - for i = 1; i < 10; i++ { - FeSquare(&t1, &t1) - } - FeMul(&t0, &t1, &t0) - FeSquare(&t1, &t0) - for i = 1; i < 50; i++ { - FeSquare(&t1, &t1) - } - FeMul(&t1, &t1, &t0) - FeSquare(&t2, &t1) - for i = 1; i < 100; i++ { - FeSquare(&t2, &t2) - } - FeMul(&t1, &t2, &t1) - FeSquare(&t1, &t1) - for i = 1; i < 50; i++ { - FeSquare(&t1, &t1) - } - FeMul(&t0, &t1, &t0) - FeSquare(&t0, &t0) - for i = 1; i < 2; i++ { - FeSquare(&t0, &t0) - } - FeMul(out, &t0, z) -} - -// Group elements are members of the elliptic curve -x^2 + y^2 = 1 + d * x^2 * -// y^2 where d = -121665/121666. -// -// Several representations are used: -// ProjectiveGroupElement: (X:Y:Z) satisfying x=X/Z, y=Y/Z -// ExtendedGroupElement: (X:Y:Z:T) satisfying x=X/Z, y=Y/Z, XY=ZT -// CompletedGroupElement: ((X:Z),(Y:T)) satisfying x=X/Z, y=Y/T -// PreComputedGroupElement: (y+x,y-x,2dxy) - -type ProjectiveGroupElement struct { - X, Y, Z FieldElement -} - -type ExtendedGroupElement struct { - X, Y, Z, T FieldElement -} - -type CompletedGroupElement struct { - X, Y, Z, T FieldElement -} - -type PreComputedGroupElement struct { - yPlusX, yMinusX, xy2d FieldElement -} - -type CachedGroupElement struct { - yPlusX, yMinusX, Z, T2d FieldElement -} - -func (p *ProjectiveGroupElement) Zero() { - FeZero(&p.X) - FeOne(&p.Y) - FeOne(&p.Z) -} - -func (p *ProjectiveGroupElement) Double(r *CompletedGroupElement) { - var t0 FieldElement - - FeSquare(&r.X, &p.X) - FeSquare(&r.Z, &p.Y) - FeSquare2(&r.T, &p.Z) - FeAdd(&r.Y, &p.X, &p.Y) - FeSquare(&t0, &r.Y) - FeAdd(&r.Y, &r.Z, &r.X) - FeSub(&r.Z, &r.Z, &r.X) - FeSub(&r.X, &t0, &r.Y) - FeSub(&r.T, &r.T, &r.Z) -} - -func (p *ProjectiveGroupElement) ToBytes(s *[32]byte) { - var recip, x, y FieldElement - - FeInvert(&recip, &p.Z) - FeMul(&x, &p.X, &recip) - FeMul(&y, &p.Y, &recip) - FeToBytes(s, &y) - s[31] ^= FeIsNegative(&x) << 7 -} - -func (p *ExtendedGroupElement) Zero() { - FeZero(&p.X) - FeOne(&p.Y) - FeOne(&p.Z) - FeZero(&p.T) -} - -func (p *ExtendedGroupElement) Double(r *CompletedGroupElement) { - var q ProjectiveGroupElement - p.ToProjective(&q) - q.Double(r) -} - -func (p *ExtendedGroupElement) ToCached(r *CachedGroupElement) { - FeAdd(&r.yPlusX, &p.Y, &p.X) - FeSub(&r.yMinusX, &p.Y, &p.X) - FeCopy(&r.Z, &p.Z) - FeMul(&r.T2d, &p.T, &d2) -} - -func (p *ExtendedGroupElement) ToProjective(r *ProjectiveGroupElement) { - FeCopy(&r.X, &p.X) - FeCopy(&r.Y, &p.Y) - FeCopy(&r.Z, &p.Z) -} - -func (p *ExtendedGroupElement) ToBytes(s *[32]byte) { - var recip, x, y FieldElement - - FeInvert(&recip, &p.Z) - FeMul(&x, &p.X, &recip) - FeMul(&y, &p.Y, &recip) - FeToBytes(s, &y) - s[31] ^= FeIsNegative(&x) << 7 -} - -func (p *ExtendedGroupElement) FromBytes(s *[32]byte) bool { - var u, v, v3, vxx, check FieldElement - - FeFromBytes(&p.Y, s) - FeOne(&p.Z) - FeSquare(&u, &p.Y) - FeMul(&v, &u, &d) - FeSub(&u, &u, &p.Z) // y = y^2-1 - FeAdd(&v, &v, &p.Z) // v = dy^2+1 - - FeSquare(&v3, &v) - FeMul(&v3, &v3, &v) // v3 = v^3 - FeSquare(&p.X, &v3) - FeMul(&p.X, &p.X, &v) - FeMul(&p.X, &p.X, &u) // x = uv^7 - - fePow22523(&p.X, &p.X) // x = (uv^7)^((q-5)/8) - FeMul(&p.X, &p.X, &v3) - FeMul(&p.X, &p.X, &u) // x = uv^3(uv^7)^((q-5)/8) - - var tmpX, tmp2 [32]byte - - FeSquare(&vxx, &p.X) - FeMul(&vxx, &vxx, &v) - FeSub(&check, &vxx, &u) // vx^2-u - if FeIsNonZero(&check) == 1 { - FeAdd(&check, &vxx, &u) // vx^2+u - if FeIsNonZero(&check) == 1 { - return false - } - FeMul(&p.X, &p.X, &SqrtM1) - - FeToBytes(&tmpX, &p.X) - for i, v := range tmpX { - tmp2[31-i] = v - } - } - - if FeIsNegative(&p.X) != (s[31] >> 7) { - FeNeg(&p.X, &p.X) - } - - FeMul(&p.T, &p.X, &p.Y) - return true -} - -func (p *CompletedGroupElement) ToProjective(r *ProjectiveGroupElement) { - FeMul(&r.X, &p.X, &p.T) - FeMul(&r.Y, &p.Y, &p.Z) - FeMul(&r.Z, &p.Z, &p.T) -} - -func (p *CompletedGroupElement) ToExtended(r *ExtendedGroupElement) { - FeMul(&r.X, &p.X, &p.T) - FeMul(&r.Y, &p.Y, &p.Z) - FeMul(&r.Z, &p.Z, &p.T) - FeMul(&r.T, &p.X, &p.Y) -} - -func (p *PreComputedGroupElement) Zero() { - FeOne(&p.yPlusX) - FeOne(&p.yMinusX) - FeZero(&p.xy2d) -} - -func geAdd(r *CompletedGroupElement, p *ExtendedGroupElement, q *CachedGroupElement) { - var t0 FieldElement - - FeAdd(&r.X, &p.Y, &p.X) - FeSub(&r.Y, &p.Y, &p.X) - FeMul(&r.Z, &r.X, &q.yPlusX) - FeMul(&r.Y, &r.Y, &q.yMinusX) - FeMul(&r.T, &q.T2d, &p.T) - FeMul(&r.X, &p.Z, &q.Z) - FeAdd(&t0, &r.X, &r.X) - FeSub(&r.X, &r.Z, &r.Y) - FeAdd(&r.Y, &r.Z, &r.Y) - FeAdd(&r.Z, &t0, &r.T) - FeSub(&r.T, &t0, &r.T) -} - -func geSub(r *CompletedGroupElement, p *ExtendedGroupElement, q *CachedGroupElement) { - var t0 FieldElement - - FeAdd(&r.X, &p.Y, &p.X) - FeSub(&r.Y, &p.Y, &p.X) - FeMul(&r.Z, &r.X, &q.yMinusX) - FeMul(&r.Y, &r.Y, &q.yPlusX) - FeMul(&r.T, &q.T2d, &p.T) - FeMul(&r.X, &p.Z, &q.Z) - FeAdd(&t0, &r.X, &r.X) - FeSub(&r.X, &r.Z, &r.Y) - FeAdd(&r.Y, &r.Z, &r.Y) - FeSub(&r.Z, &t0, &r.T) - FeAdd(&r.T, &t0, &r.T) -} - -func geMixedAdd(r *CompletedGroupElement, p *ExtendedGroupElement, q *PreComputedGroupElement) { - var t0 FieldElement - - FeAdd(&r.X, &p.Y, &p.X) - FeSub(&r.Y, &p.Y, &p.X) - FeMul(&r.Z, &r.X, &q.yPlusX) - FeMul(&r.Y, &r.Y, &q.yMinusX) - FeMul(&r.T, &q.xy2d, &p.T) - FeAdd(&t0, &p.Z, &p.Z) - FeSub(&r.X, &r.Z, &r.Y) - FeAdd(&r.Y, &r.Z, &r.Y) - FeAdd(&r.Z, &t0, &r.T) - FeSub(&r.T, &t0, &r.T) -} - -func geMixedSub(r *CompletedGroupElement, p *ExtendedGroupElement, q *PreComputedGroupElement) { - var t0 FieldElement - - FeAdd(&r.X, &p.Y, &p.X) - FeSub(&r.Y, &p.Y, &p.X) - FeMul(&r.Z, &r.X, &q.yMinusX) - FeMul(&r.Y, &r.Y, &q.yPlusX) - FeMul(&r.T, &q.xy2d, &p.T) - FeAdd(&t0, &p.Z, &p.Z) - FeSub(&r.X, &r.Z, &r.Y) - FeAdd(&r.Y, &r.Z, &r.Y) - FeSub(&r.Z, &t0, &r.T) - FeAdd(&r.T, &t0, &r.T) -} - -func slide(r *[256]int8, a *[32]byte) { - for i := range r { - r[i] = int8(1 & (a[i>>3] >> uint(i&7))) - } - - for i := range r { - if r[i] != 0 { - for b := 1; b <= 6 && i+b < 256; b++ { - if r[i+b] != 0 { - if r[i]+(r[i+b]<= -15 { - r[i] -= r[i+b] << uint(b) - for k := i + b; k < 256; k++ { - if r[k] == 0 { - r[k] = 1 - break - } - r[k] = 0 - } - } else { - break - } - } - } - } - } -} - -// GeDoubleScalarMultVartime sets r = a*A + b*B -// where a = a[0]+256*a[1]+...+256^31 a[31]. -// and b = b[0]+256*b[1]+...+256^31 b[31]. -// B is the Ed25519 base point (x,4/5) with x positive. -func GeDoubleScalarMultVartime(r *ProjectiveGroupElement, a *[32]byte, A *ExtendedGroupElement, b *[32]byte) { - var aSlide, bSlide [256]int8 - var Ai [8]CachedGroupElement // A,3A,5A,7A,9A,11A,13A,15A - var t CompletedGroupElement - var u, A2 ExtendedGroupElement - var i int - - slide(&aSlide, a) - slide(&bSlide, b) - - A.ToCached(&Ai[0]) - A.Double(&t) - t.ToExtended(&A2) - - for i := 0; i < 7; i++ { - geAdd(&t, &A2, &Ai[i]) - t.ToExtended(&u) - u.ToCached(&Ai[i+1]) - } - - r.Zero() - - for i = 255; i >= 0; i-- { - if aSlide[i] != 0 || bSlide[i] != 0 { - break - } - } - - for ; i >= 0; i-- { - r.Double(&t) - - if aSlide[i] > 0 { - t.ToExtended(&u) - geAdd(&t, &u, &Ai[aSlide[i]/2]) - } else if aSlide[i] < 0 { - t.ToExtended(&u) - geSub(&t, &u, &Ai[(-aSlide[i])/2]) - } - - if bSlide[i] > 0 { - t.ToExtended(&u) - geMixedAdd(&t, &u, &bi[bSlide[i]/2]) - } else if bSlide[i] < 0 { - t.ToExtended(&u) - geMixedSub(&t, &u, &bi[(-bSlide[i])/2]) - } - - t.ToProjective(r) - } -} - -// equal returns 1 if b == c and 0 otherwise, assuming that b and c are -// non-negative. -func equal(b, c int32) int32 { - x := uint32(b ^ c) - x-- - return int32(x >> 31) -} - -// negative returns 1 if b < 0 and 0 otherwise. -func negative(b int32) int32 { - return (b >> 31) & 1 -} - -func PreComputedGroupElementCMove(t, u *PreComputedGroupElement, b int32) { - FeCMove(&t.yPlusX, &u.yPlusX, b) - FeCMove(&t.yMinusX, &u.yMinusX, b) - FeCMove(&t.xy2d, &u.xy2d, b) -} - -func selectPoint(t *PreComputedGroupElement, pos int32, b int32) { - var minusT PreComputedGroupElement - bNegative := negative(b) - bAbs := b - (((-bNegative) & b) << 1) - - t.Zero() - for i := int32(0); i < 8; i++ { - PreComputedGroupElementCMove(t, &base[pos][i], equal(bAbs, i+1)) - } - FeCopy(&minusT.yPlusX, &t.yMinusX) - FeCopy(&minusT.yMinusX, &t.yPlusX) - FeNeg(&minusT.xy2d, &t.xy2d) - PreComputedGroupElementCMove(t, &minusT, bNegative) -} - -// GeScalarMultBase computes h = a*B, where -// a = a[0]+256*a[1]+...+256^31 a[31] -// B is the Ed25519 base point (x,4/5) with x positive. -// -// Preconditions: -// a[31] <= 127 -func GeScalarMultBase(h *ExtendedGroupElement, a *[32]byte) { - var e [64]int8 - - for i, v := range a { - e[2*i] = int8(v & 15) - e[2*i+1] = int8((v >> 4) & 15) - } - - // each e[i] is between 0 and 15 and e[63] is between 0 and 7. - - carry := int8(0) - for i := 0; i < 63; i++ { - e[i] += carry - carry = (e[i] + 8) >> 4 - e[i] -= carry << 4 - } - e[63] += carry - // each e[i] is between -8 and 8. - - h.Zero() - var t PreComputedGroupElement - var r CompletedGroupElement - for i := int32(1); i < 64; i += 2 { - selectPoint(&t, i/2, int32(e[i])) - geMixedAdd(&r, h, &t) - r.ToExtended(h) - } - - var s ProjectiveGroupElement - - h.Double(&r) - r.ToProjective(&s) - s.Double(&r) - r.ToProjective(&s) - s.Double(&r) - r.ToProjective(&s) - s.Double(&r) - r.ToExtended(h) - - for i := int32(0); i < 64; i += 2 { - selectPoint(&t, i/2, int32(e[i])) - geMixedAdd(&r, h, &t) - r.ToExtended(h) - } -} - -// The scalars are GF(2^252 + 27742317777372353535851937790883648493). - -// Input: -// a[0]+256*a[1]+...+256^31*a[31] = a -// b[0]+256*b[1]+...+256^31*b[31] = b -// c[0]+256*c[1]+...+256^31*c[31] = c -// -// Output: -// s[0]+256*s[1]+...+256^31*s[31] = (ab+c) mod l -// where l = 2^252 + 27742317777372353535851937790883648493. -func ScMulAdd(s, a, b, c *[32]byte) { - a0 := 2097151 & load3(a[:]) - a1 := 2097151 & (load4(a[2:]) >> 5) - a2 := 2097151 & (load3(a[5:]) >> 2) - a3 := 2097151 & (load4(a[7:]) >> 7) - a4 := 2097151 & (load4(a[10:]) >> 4) - a5 := 2097151 & (load3(a[13:]) >> 1) - a6 := 2097151 & (load4(a[15:]) >> 6) - a7 := 2097151 & (load3(a[18:]) >> 3) - a8 := 2097151 & load3(a[21:]) - a9 := 2097151 & (load4(a[23:]) >> 5) - a10 := 2097151 & (load3(a[26:]) >> 2) - a11 := (load4(a[28:]) >> 7) - b0 := 2097151 & load3(b[:]) - b1 := 2097151 & (load4(b[2:]) >> 5) - b2 := 2097151 & (load3(b[5:]) >> 2) - b3 := 2097151 & (load4(b[7:]) >> 7) - b4 := 2097151 & (load4(b[10:]) >> 4) - b5 := 2097151 & (load3(b[13:]) >> 1) - b6 := 2097151 & (load4(b[15:]) >> 6) - b7 := 2097151 & (load3(b[18:]) >> 3) - b8 := 2097151 & load3(b[21:]) - b9 := 2097151 & (load4(b[23:]) >> 5) - b10 := 2097151 & (load3(b[26:]) >> 2) - b11 := (load4(b[28:]) >> 7) - c0 := 2097151 & load3(c[:]) - c1 := 2097151 & (load4(c[2:]) >> 5) - c2 := 2097151 & (load3(c[5:]) >> 2) - c3 := 2097151 & (load4(c[7:]) >> 7) - c4 := 2097151 & (load4(c[10:]) >> 4) - c5 := 2097151 & (load3(c[13:]) >> 1) - c6 := 2097151 & (load4(c[15:]) >> 6) - c7 := 2097151 & (load3(c[18:]) >> 3) - c8 := 2097151 & load3(c[21:]) - c9 := 2097151 & (load4(c[23:]) >> 5) - c10 := 2097151 & (load3(c[26:]) >> 2) - c11 := (load4(c[28:]) >> 7) - var carry [23]int64 - - s0 := c0 + a0*b0 - s1 := c1 + a0*b1 + a1*b0 - s2 := c2 + a0*b2 + a1*b1 + a2*b0 - s3 := c3 + a0*b3 + a1*b2 + a2*b1 + a3*b0 - s4 := c4 + a0*b4 + a1*b3 + a2*b2 + a3*b1 + a4*b0 - s5 := c5 + a0*b5 + a1*b4 + a2*b3 + a3*b2 + a4*b1 + a5*b0 - s6 := c6 + a0*b6 + a1*b5 + a2*b4 + a3*b3 + a4*b2 + a5*b1 + a6*b0 - s7 := c7 + a0*b7 + a1*b6 + a2*b5 + a3*b4 + a4*b3 + a5*b2 + a6*b1 + a7*b0 - s8 := c8 + a0*b8 + a1*b7 + a2*b6 + a3*b5 + a4*b4 + a5*b3 + a6*b2 + a7*b1 + a8*b0 - s9 := c9 + a0*b9 + a1*b8 + a2*b7 + a3*b6 + a4*b5 + a5*b4 + a6*b3 + a7*b2 + a8*b1 + a9*b0 - s10 := c10 + a0*b10 + a1*b9 + a2*b8 + a3*b7 + a4*b6 + a5*b5 + a6*b4 + a7*b3 + a8*b2 + a9*b1 + a10*b0 - s11 := c11 + a0*b11 + a1*b10 + a2*b9 + a3*b8 + a4*b7 + a5*b6 + a6*b5 + a7*b4 + a8*b3 + a9*b2 + a10*b1 + a11*b0 - s12 := a1*b11 + a2*b10 + a3*b9 + a4*b8 + a5*b7 + a6*b6 + a7*b5 + a8*b4 + a9*b3 + a10*b2 + a11*b1 - s13 := a2*b11 + a3*b10 + a4*b9 + a5*b8 + a6*b7 + a7*b6 + a8*b5 + a9*b4 + a10*b3 + a11*b2 - s14 := a3*b11 + a4*b10 + a5*b9 + a6*b8 + a7*b7 + a8*b6 + a9*b5 + a10*b4 + a11*b3 - s15 := a4*b11 + a5*b10 + a6*b9 + a7*b8 + a8*b7 + a9*b6 + a10*b5 + a11*b4 - s16 := a5*b11 + a6*b10 + a7*b9 + a8*b8 + a9*b7 + a10*b6 + a11*b5 - s17 := a6*b11 + a7*b10 + a8*b9 + a9*b8 + a10*b7 + a11*b6 - s18 := a7*b11 + a8*b10 + a9*b9 + a10*b8 + a11*b7 - s19 := a8*b11 + a9*b10 + a10*b9 + a11*b8 - s20 := a9*b11 + a10*b10 + a11*b9 - s21 := a10*b11 + a11*b10 - s22 := a11 * b11 - s23 := int64(0) - - carry[0] = (s0 + (1 << 20)) >> 21 - s1 += carry[0] - s0 -= carry[0] << 21 - carry[2] = (s2 + (1 << 20)) >> 21 - s3 += carry[2] - s2 -= carry[2] << 21 - carry[4] = (s4 + (1 << 20)) >> 21 - s5 += carry[4] - s4 -= carry[4] << 21 - carry[6] = (s6 + (1 << 20)) >> 21 - s7 += carry[6] - s6 -= carry[6] << 21 - carry[8] = (s8 + (1 << 20)) >> 21 - s9 += carry[8] - s8 -= carry[8] << 21 - carry[10] = (s10 + (1 << 20)) >> 21 - s11 += carry[10] - s10 -= carry[10] << 21 - carry[12] = (s12 + (1 << 20)) >> 21 - s13 += carry[12] - s12 -= carry[12] << 21 - carry[14] = (s14 + (1 << 20)) >> 21 - s15 += carry[14] - s14 -= carry[14] << 21 - carry[16] = (s16 + (1 << 20)) >> 21 - s17 += carry[16] - s16 -= carry[16] << 21 - carry[18] = (s18 + (1 << 20)) >> 21 - s19 += carry[18] - s18 -= carry[18] << 21 - carry[20] = (s20 + (1 << 20)) >> 21 - s21 += carry[20] - s20 -= carry[20] << 21 - carry[22] = (s22 + (1 << 20)) >> 21 - s23 += carry[22] - s22 -= carry[22] << 21 - - carry[1] = (s1 + (1 << 20)) >> 21 - s2 += carry[1] - s1 -= carry[1] << 21 - carry[3] = (s3 + (1 << 20)) >> 21 - s4 += carry[3] - s3 -= carry[3] << 21 - carry[5] = (s5 + (1 << 20)) >> 21 - s6 += carry[5] - s5 -= carry[5] << 21 - carry[7] = (s7 + (1 << 20)) >> 21 - s8 += carry[7] - s7 -= carry[7] << 21 - carry[9] = (s9 + (1 << 20)) >> 21 - s10 += carry[9] - s9 -= carry[9] << 21 - carry[11] = (s11 + (1 << 20)) >> 21 - s12 += carry[11] - s11 -= carry[11] << 21 - carry[13] = (s13 + (1 << 20)) >> 21 - s14 += carry[13] - s13 -= carry[13] << 21 - carry[15] = (s15 + (1 << 20)) >> 21 - s16 += carry[15] - s15 -= carry[15] << 21 - carry[17] = (s17 + (1 << 20)) >> 21 - s18 += carry[17] - s17 -= carry[17] << 21 - carry[19] = (s19 + (1 << 20)) >> 21 - s20 += carry[19] - s19 -= carry[19] << 21 - carry[21] = (s21 + (1 << 20)) >> 21 - s22 += carry[21] - s21 -= carry[21] << 21 - - s11 += s23 * 666643 - s12 += s23 * 470296 - s13 += s23 * 654183 - s14 -= s23 * 997805 - s15 += s23 * 136657 - s16 -= s23 * 683901 - s23 = 0 - - s10 += s22 * 666643 - s11 += s22 * 470296 - s12 += s22 * 654183 - s13 -= s22 * 997805 - s14 += s22 * 136657 - s15 -= s22 * 683901 - s22 = 0 - - s9 += s21 * 666643 - s10 += s21 * 470296 - s11 += s21 * 654183 - s12 -= s21 * 997805 - s13 += s21 * 136657 - s14 -= s21 * 683901 - s21 = 0 - - s8 += s20 * 666643 - s9 += s20 * 470296 - s10 += s20 * 654183 - s11 -= s20 * 997805 - s12 += s20 * 136657 - s13 -= s20 * 683901 - s20 = 0 - - s7 += s19 * 666643 - s8 += s19 * 470296 - s9 += s19 * 654183 - s10 -= s19 * 997805 - s11 += s19 * 136657 - s12 -= s19 * 683901 - s19 = 0 - - s6 += s18 * 666643 - s7 += s18 * 470296 - s8 += s18 * 654183 - s9 -= s18 * 997805 - s10 += s18 * 136657 - s11 -= s18 * 683901 - s18 = 0 - - carry[6] = (s6 + (1 << 20)) >> 21 - s7 += carry[6] - s6 -= carry[6] << 21 - carry[8] = (s8 + (1 << 20)) >> 21 - s9 += carry[8] - s8 -= carry[8] << 21 - carry[10] = (s10 + (1 << 20)) >> 21 - s11 += carry[10] - s10 -= carry[10] << 21 - carry[12] = (s12 + (1 << 20)) >> 21 - s13 += carry[12] - s12 -= carry[12] << 21 - carry[14] = (s14 + (1 << 20)) >> 21 - s15 += carry[14] - s14 -= carry[14] << 21 - carry[16] = (s16 + (1 << 20)) >> 21 - s17 += carry[16] - s16 -= carry[16] << 21 - - carry[7] = (s7 + (1 << 20)) >> 21 - s8 += carry[7] - s7 -= carry[7] << 21 - carry[9] = (s9 + (1 << 20)) >> 21 - s10 += carry[9] - s9 -= carry[9] << 21 - carry[11] = (s11 + (1 << 20)) >> 21 - s12 += carry[11] - s11 -= carry[11] << 21 - carry[13] = (s13 + (1 << 20)) >> 21 - s14 += carry[13] - s13 -= carry[13] << 21 - carry[15] = (s15 + (1 << 20)) >> 21 - s16 += carry[15] - s15 -= carry[15] << 21 - - s5 += s17 * 666643 - s6 += s17 * 470296 - s7 += s17 * 654183 - s8 -= s17 * 997805 - s9 += s17 * 136657 - s10 -= s17 * 683901 - s17 = 0 - - s4 += s16 * 666643 - s5 += s16 * 470296 - s6 += s16 * 654183 - s7 -= s16 * 997805 - s8 += s16 * 136657 - s9 -= s16 * 683901 - s16 = 0 - - s3 += s15 * 666643 - s4 += s15 * 470296 - s5 += s15 * 654183 - s6 -= s15 * 997805 - s7 += s15 * 136657 - s8 -= s15 * 683901 - s15 = 0 - - s2 += s14 * 666643 - s3 += s14 * 470296 - s4 += s14 * 654183 - s5 -= s14 * 997805 - s6 += s14 * 136657 - s7 -= s14 * 683901 - s14 = 0 - - s1 += s13 * 666643 - s2 += s13 * 470296 - s3 += s13 * 654183 - s4 -= s13 * 997805 - s5 += s13 * 136657 - s6 -= s13 * 683901 - s13 = 0 - - s0 += s12 * 666643 - s1 += s12 * 470296 - s2 += s12 * 654183 - s3 -= s12 * 997805 - s4 += s12 * 136657 - s5 -= s12 * 683901 - s12 = 0 - - carry[0] = (s0 + (1 << 20)) >> 21 - s1 += carry[0] - s0 -= carry[0] << 21 - carry[2] = (s2 + (1 << 20)) >> 21 - s3 += carry[2] - s2 -= carry[2] << 21 - carry[4] = (s4 + (1 << 20)) >> 21 - s5 += carry[4] - s4 -= carry[4] << 21 - carry[6] = (s6 + (1 << 20)) >> 21 - s7 += carry[6] - s6 -= carry[6] << 21 - carry[8] = (s8 + (1 << 20)) >> 21 - s9 += carry[8] - s8 -= carry[8] << 21 - carry[10] = (s10 + (1 << 20)) >> 21 - s11 += carry[10] - s10 -= carry[10] << 21 - - carry[1] = (s1 + (1 << 20)) >> 21 - s2 += carry[1] - s1 -= carry[1] << 21 - carry[3] = (s3 + (1 << 20)) >> 21 - s4 += carry[3] - s3 -= carry[3] << 21 - carry[5] = (s5 + (1 << 20)) >> 21 - s6 += carry[5] - s5 -= carry[5] << 21 - carry[7] = (s7 + (1 << 20)) >> 21 - s8 += carry[7] - s7 -= carry[7] << 21 - carry[9] = (s9 + (1 << 20)) >> 21 - s10 += carry[9] - s9 -= carry[9] << 21 - carry[11] = (s11 + (1 << 20)) >> 21 - s12 += carry[11] - s11 -= carry[11] << 21 - - s0 += s12 * 666643 - s1 += s12 * 470296 - s2 += s12 * 654183 - s3 -= s12 * 997805 - s4 += s12 * 136657 - s5 -= s12 * 683901 - s12 = 0 - - carry[0] = s0 >> 21 - s1 += carry[0] - s0 -= carry[0] << 21 - carry[1] = s1 >> 21 - s2 += carry[1] - s1 -= carry[1] << 21 - carry[2] = s2 >> 21 - s3 += carry[2] - s2 -= carry[2] << 21 - carry[3] = s3 >> 21 - s4 += carry[3] - s3 -= carry[3] << 21 - carry[4] = s4 >> 21 - s5 += carry[4] - s4 -= carry[4] << 21 - carry[5] = s5 >> 21 - s6 += carry[5] - s5 -= carry[5] << 21 - carry[6] = s6 >> 21 - s7 += carry[6] - s6 -= carry[6] << 21 - carry[7] = s7 >> 21 - s8 += carry[7] - s7 -= carry[7] << 21 - carry[8] = s8 >> 21 - s9 += carry[8] - s8 -= carry[8] << 21 - carry[9] = s9 >> 21 - s10 += carry[9] - s9 -= carry[9] << 21 - carry[10] = s10 >> 21 - s11 += carry[10] - s10 -= carry[10] << 21 - carry[11] = s11 >> 21 - s12 += carry[11] - s11 -= carry[11] << 21 - - s0 += s12 * 666643 - s1 += s12 * 470296 - s2 += s12 * 654183 - s3 -= s12 * 997805 - s4 += s12 * 136657 - s5 -= s12 * 683901 - s12 = 0 - - carry[0] = s0 >> 21 - s1 += carry[0] - s0 -= carry[0] << 21 - carry[1] = s1 >> 21 - s2 += carry[1] - s1 -= carry[1] << 21 - carry[2] = s2 >> 21 - s3 += carry[2] - s2 -= carry[2] << 21 - carry[3] = s3 >> 21 - s4 += carry[3] - s3 -= carry[3] << 21 - carry[4] = s4 >> 21 - s5 += carry[4] - s4 -= carry[4] << 21 - carry[5] = s5 >> 21 - s6 += carry[5] - s5 -= carry[5] << 21 - carry[6] = s6 >> 21 - s7 += carry[6] - s6 -= carry[6] << 21 - carry[7] = s7 >> 21 - s8 += carry[7] - s7 -= carry[7] << 21 - carry[8] = s8 >> 21 - s9 += carry[8] - s8 -= carry[8] << 21 - carry[9] = s9 >> 21 - s10 += carry[9] - s9 -= carry[9] << 21 - carry[10] = s10 >> 21 - s11 += carry[10] - s10 -= carry[10] << 21 - - s[0] = byte(s0 >> 0) - s[1] = byte(s0 >> 8) - s[2] = byte((s0 >> 16) | (s1 << 5)) - s[3] = byte(s1 >> 3) - s[4] = byte(s1 >> 11) - s[5] = byte((s1 >> 19) | (s2 << 2)) - s[6] = byte(s2 >> 6) - s[7] = byte((s2 >> 14) | (s3 << 7)) - s[8] = byte(s3 >> 1) - s[9] = byte(s3 >> 9) - s[10] = byte((s3 >> 17) | (s4 << 4)) - s[11] = byte(s4 >> 4) - s[12] = byte(s4 >> 12) - s[13] = byte((s4 >> 20) | (s5 << 1)) - s[14] = byte(s5 >> 7) - s[15] = byte((s5 >> 15) | (s6 << 6)) - s[16] = byte(s6 >> 2) - s[17] = byte(s6 >> 10) - s[18] = byte((s6 >> 18) | (s7 << 3)) - s[19] = byte(s7 >> 5) - s[20] = byte(s7 >> 13) - s[21] = byte(s8 >> 0) - s[22] = byte(s8 >> 8) - s[23] = byte((s8 >> 16) | (s9 << 5)) - s[24] = byte(s9 >> 3) - s[25] = byte(s9 >> 11) - s[26] = byte((s9 >> 19) | (s10 << 2)) - s[27] = byte(s10 >> 6) - s[28] = byte((s10 >> 14) | (s11 << 7)) - s[29] = byte(s11 >> 1) - s[30] = byte(s11 >> 9) - s[31] = byte(s11 >> 17) -} - -// Input: -// s[0]+256*s[1]+...+256^63*s[63] = s -// -// Output: -// s[0]+256*s[1]+...+256^31*s[31] = s mod l -// where l = 2^252 + 27742317777372353535851937790883648493. -func ScReduce(out *[32]byte, s *[64]byte) { - s0 := 2097151 & load3(s[:]) - s1 := 2097151 & (load4(s[2:]) >> 5) - s2 := 2097151 & (load3(s[5:]) >> 2) - s3 := 2097151 & (load4(s[7:]) >> 7) - s4 := 2097151 & (load4(s[10:]) >> 4) - s5 := 2097151 & (load3(s[13:]) >> 1) - s6 := 2097151 & (load4(s[15:]) >> 6) - s7 := 2097151 & (load3(s[18:]) >> 3) - s8 := 2097151 & load3(s[21:]) - s9 := 2097151 & (load4(s[23:]) >> 5) - s10 := 2097151 & (load3(s[26:]) >> 2) - s11 := 2097151 & (load4(s[28:]) >> 7) - s12 := 2097151 & (load4(s[31:]) >> 4) - s13 := 2097151 & (load3(s[34:]) >> 1) - s14 := 2097151 & (load4(s[36:]) >> 6) - s15 := 2097151 & (load3(s[39:]) >> 3) - s16 := 2097151 & load3(s[42:]) - s17 := 2097151 & (load4(s[44:]) >> 5) - s18 := 2097151 & (load3(s[47:]) >> 2) - s19 := 2097151 & (load4(s[49:]) >> 7) - s20 := 2097151 & (load4(s[52:]) >> 4) - s21 := 2097151 & (load3(s[55:]) >> 1) - s22 := 2097151 & (load4(s[57:]) >> 6) - s23 := (load4(s[60:]) >> 3) - - s11 += s23 * 666643 - s12 += s23 * 470296 - s13 += s23 * 654183 - s14 -= s23 * 997805 - s15 += s23 * 136657 - s16 -= s23 * 683901 - s23 = 0 - - s10 += s22 * 666643 - s11 += s22 * 470296 - s12 += s22 * 654183 - s13 -= s22 * 997805 - s14 += s22 * 136657 - s15 -= s22 * 683901 - s22 = 0 - - s9 += s21 * 666643 - s10 += s21 * 470296 - s11 += s21 * 654183 - s12 -= s21 * 997805 - s13 += s21 * 136657 - s14 -= s21 * 683901 - s21 = 0 - - s8 += s20 * 666643 - s9 += s20 * 470296 - s10 += s20 * 654183 - s11 -= s20 * 997805 - s12 += s20 * 136657 - s13 -= s20 * 683901 - s20 = 0 - - s7 += s19 * 666643 - s8 += s19 * 470296 - s9 += s19 * 654183 - s10 -= s19 * 997805 - s11 += s19 * 136657 - s12 -= s19 * 683901 - s19 = 0 - - s6 += s18 * 666643 - s7 += s18 * 470296 - s8 += s18 * 654183 - s9 -= s18 * 997805 - s10 += s18 * 136657 - s11 -= s18 * 683901 - s18 = 0 - - var carry [17]int64 - - carry[6] = (s6 + (1 << 20)) >> 21 - s7 += carry[6] - s6 -= carry[6] << 21 - carry[8] = (s8 + (1 << 20)) >> 21 - s9 += carry[8] - s8 -= carry[8] << 21 - carry[10] = (s10 + (1 << 20)) >> 21 - s11 += carry[10] - s10 -= carry[10] << 21 - carry[12] = (s12 + (1 << 20)) >> 21 - s13 += carry[12] - s12 -= carry[12] << 21 - carry[14] = (s14 + (1 << 20)) >> 21 - s15 += carry[14] - s14 -= carry[14] << 21 - carry[16] = (s16 + (1 << 20)) >> 21 - s17 += carry[16] - s16 -= carry[16] << 21 - - carry[7] = (s7 + (1 << 20)) >> 21 - s8 += carry[7] - s7 -= carry[7] << 21 - carry[9] = (s9 + (1 << 20)) >> 21 - s10 += carry[9] - s9 -= carry[9] << 21 - carry[11] = (s11 + (1 << 20)) >> 21 - s12 += carry[11] - s11 -= carry[11] << 21 - carry[13] = (s13 + (1 << 20)) >> 21 - s14 += carry[13] - s13 -= carry[13] << 21 - carry[15] = (s15 + (1 << 20)) >> 21 - s16 += carry[15] - s15 -= carry[15] << 21 - - s5 += s17 * 666643 - s6 += s17 * 470296 - s7 += s17 * 654183 - s8 -= s17 * 997805 - s9 += s17 * 136657 - s10 -= s17 * 683901 - s17 = 0 - - s4 += s16 * 666643 - s5 += s16 * 470296 - s6 += s16 * 654183 - s7 -= s16 * 997805 - s8 += s16 * 136657 - s9 -= s16 * 683901 - s16 = 0 - - s3 += s15 * 666643 - s4 += s15 * 470296 - s5 += s15 * 654183 - s6 -= s15 * 997805 - s7 += s15 * 136657 - s8 -= s15 * 683901 - s15 = 0 - - s2 += s14 * 666643 - s3 += s14 * 470296 - s4 += s14 * 654183 - s5 -= s14 * 997805 - s6 += s14 * 136657 - s7 -= s14 * 683901 - s14 = 0 - - s1 += s13 * 666643 - s2 += s13 * 470296 - s3 += s13 * 654183 - s4 -= s13 * 997805 - s5 += s13 * 136657 - s6 -= s13 * 683901 - s13 = 0 - - s0 += s12 * 666643 - s1 += s12 * 470296 - s2 += s12 * 654183 - s3 -= s12 * 997805 - s4 += s12 * 136657 - s5 -= s12 * 683901 - s12 = 0 - - carry[0] = (s0 + (1 << 20)) >> 21 - s1 += carry[0] - s0 -= carry[0] << 21 - carry[2] = (s2 + (1 << 20)) >> 21 - s3 += carry[2] - s2 -= carry[2] << 21 - carry[4] = (s4 + (1 << 20)) >> 21 - s5 += carry[4] - s4 -= carry[4] << 21 - carry[6] = (s6 + (1 << 20)) >> 21 - s7 += carry[6] - s6 -= carry[6] << 21 - carry[8] = (s8 + (1 << 20)) >> 21 - s9 += carry[8] - s8 -= carry[8] << 21 - carry[10] = (s10 + (1 << 20)) >> 21 - s11 += carry[10] - s10 -= carry[10] << 21 - - carry[1] = (s1 + (1 << 20)) >> 21 - s2 += carry[1] - s1 -= carry[1] << 21 - carry[3] = (s3 + (1 << 20)) >> 21 - s4 += carry[3] - s3 -= carry[3] << 21 - carry[5] = (s5 + (1 << 20)) >> 21 - s6 += carry[5] - s5 -= carry[5] << 21 - carry[7] = (s7 + (1 << 20)) >> 21 - s8 += carry[7] - s7 -= carry[7] << 21 - carry[9] = (s9 + (1 << 20)) >> 21 - s10 += carry[9] - s9 -= carry[9] << 21 - carry[11] = (s11 + (1 << 20)) >> 21 - s12 += carry[11] - s11 -= carry[11] << 21 - - s0 += s12 * 666643 - s1 += s12 * 470296 - s2 += s12 * 654183 - s3 -= s12 * 997805 - s4 += s12 * 136657 - s5 -= s12 * 683901 - s12 = 0 - - carry[0] = s0 >> 21 - s1 += carry[0] - s0 -= carry[0] << 21 - carry[1] = s1 >> 21 - s2 += carry[1] - s1 -= carry[1] << 21 - carry[2] = s2 >> 21 - s3 += carry[2] - s2 -= carry[2] << 21 - carry[3] = s3 >> 21 - s4 += carry[3] - s3 -= carry[3] << 21 - carry[4] = s4 >> 21 - s5 += carry[4] - s4 -= carry[4] << 21 - carry[5] = s5 >> 21 - s6 += carry[5] - s5 -= carry[5] << 21 - carry[6] = s6 >> 21 - s7 += carry[6] - s6 -= carry[6] << 21 - carry[7] = s7 >> 21 - s8 += carry[7] - s7 -= carry[7] << 21 - carry[8] = s8 >> 21 - s9 += carry[8] - s8 -= carry[8] << 21 - carry[9] = s9 >> 21 - s10 += carry[9] - s9 -= carry[9] << 21 - carry[10] = s10 >> 21 - s11 += carry[10] - s10 -= carry[10] << 21 - carry[11] = s11 >> 21 - s12 += carry[11] - s11 -= carry[11] << 21 - - s0 += s12 * 666643 - s1 += s12 * 470296 - s2 += s12 * 654183 - s3 -= s12 * 997805 - s4 += s12 * 136657 - s5 -= s12 * 683901 - s12 = 0 - - carry[0] = s0 >> 21 - s1 += carry[0] - s0 -= carry[0] << 21 - carry[1] = s1 >> 21 - s2 += carry[1] - s1 -= carry[1] << 21 - carry[2] = s2 >> 21 - s3 += carry[2] - s2 -= carry[2] << 21 - carry[3] = s3 >> 21 - s4 += carry[3] - s3 -= carry[3] << 21 - carry[4] = s4 >> 21 - s5 += carry[4] - s4 -= carry[4] << 21 - carry[5] = s5 >> 21 - s6 += carry[5] - s5 -= carry[5] << 21 - carry[6] = s6 >> 21 - s7 += carry[6] - s6 -= carry[6] << 21 - carry[7] = s7 >> 21 - s8 += carry[7] - s7 -= carry[7] << 21 - carry[8] = s8 >> 21 - s9 += carry[8] - s8 -= carry[8] << 21 - carry[9] = s9 >> 21 - s10 += carry[9] - s9 -= carry[9] << 21 - carry[10] = s10 >> 21 - s11 += carry[10] - s10 -= carry[10] << 21 - - out[0] = byte(s0 >> 0) - out[1] = byte(s0 >> 8) - out[2] = byte((s0 >> 16) | (s1 << 5)) - out[3] = byte(s1 >> 3) - out[4] = byte(s1 >> 11) - out[5] = byte((s1 >> 19) | (s2 << 2)) - out[6] = byte(s2 >> 6) - out[7] = byte((s2 >> 14) | (s3 << 7)) - out[8] = byte(s3 >> 1) - out[9] = byte(s3 >> 9) - out[10] = byte((s3 >> 17) | (s4 << 4)) - out[11] = byte(s4 >> 4) - out[12] = byte(s4 >> 12) - out[13] = byte((s4 >> 20) | (s5 << 1)) - out[14] = byte(s5 >> 7) - out[15] = byte((s5 >> 15) | (s6 << 6)) - out[16] = byte(s6 >> 2) - out[17] = byte(s6 >> 10) - out[18] = byte((s6 >> 18) | (s7 << 3)) - out[19] = byte(s7 >> 5) - out[20] = byte(s7 >> 13) - out[21] = byte(s8 >> 0) - out[22] = byte(s8 >> 8) - out[23] = byte((s8 >> 16) | (s9 << 5)) - out[24] = byte(s9 >> 3) - out[25] = byte(s9 >> 11) - out[26] = byte((s9 >> 19) | (s10 << 2)) - out[27] = byte(s10 >> 6) - out[28] = byte((s10 >> 14) | (s11 << 7)) - out[29] = byte(s11 >> 1) - out[30] = byte(s11 >> 9) - out[31] = byte(s11 >> 17) -} - -// order is the order of Curve25519 in little-endian form. -var order = [4]uint64{0x5812631a5cf5d3ed, 0x14def9dea2f79cd6, 0, 0x1000000000000000} - -// ScMinimal returns true if the given scalar is less than the order of the -// curve. -func ScMinimal(scalar *[32]byte) bool { - for i := 3; ; i-- { - v := binary.LittleEndian.Uint64(scalar[i*8:]) - if v > order[i] { - return false - } else if v < order[i] { - break - } else if i == 0 { - return false - } - } - - return true -} diff --git a/vendor/golang.org/x/crypto/pbkdf2/pbkdf2.go b/vendor/golang.org/x/crypto/pbkdf2/pbkdf2.go deleted file mode 100644 index 593f65300..000000000 --- a/vendor/golang.org/x/crypto/pbkdf2/pbkdf2.go +++ /dev/null @@ -1,77 +0,0 @@ -// Copyright 2012 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -/* -Package pbkdf2 implements the key derivation function PBKDF2 as defined in RFC -2898 / PKCS #5 v2.0. - -A key derivation function is useful when encrypting data based on a password -or any other not-fully-random data. It uses a pseudorandom function to derive -a secure encryption key based on the password. - -While v2.0 of the standard defines only one pseudorandom function to use, -HMAC-SHA1, the drafted v2.1 specification allows use of all five FIPS Approved -Hash Functions SHA-1, SHA-224, SHA-256, SHA-384 and SHA-512 for HMAC. To -choose, you can pass the `New` functions from the different SHA packages to -pbkdf2.Key. -*/ -package pbkdf2 // import "golang.org/x/crypto/pbkdf2" - -import ( - "crypto/hmac" - "hash" -) - -// Key derives a key from the password, salt and iteration count, returning a -// []byte of length keylen that can be used as cryptographic key. The key is -// derived based on the method described as PBKDF2 with the HMAC variant using -// the supplied hash function. -// -// For example, to use a HMAC-SHA-1 based PBKDF2 key derivation function, you -// can get a derived key for e.g. AES-256 (which needs a 32-byte key) by -// doing: -// -// dk := pbkdf2.Key([]byte("some password"), salt, 4096, 32, sha1.New) -// -// Remember to get a good random salt. At least 8 bytes is recommended by the -// RFC. -// -// Using a higher iteration count will increase the cost of an exhaustive -// search but will also make derivation proportionally slower. -func Key(password, salt []byte, iter, keyLen int, h func() hash.Hash) []byte { - prf := hmac.New(h, password) - hashLen := prf.Size() - numBlocks := (keyLen + hashLen - 1) / hashLen - - var buf [4]byte - dk := make([]byte, 0, numBlocks*hashLen) - U := make([]byte, hashLen) - for block := 1; block <= numBlocks; block++ { - // N.B.: || means concatenation, ^ means XOR - // for each block T_i = U_1 ^ U_2 ^ ... ^ U_iter - // U_1 = PRF(password, salt || uint(i)) - prf.Reset() - prf.Write(salt) - buf[0] = byte(block >> 24) - buf[1] = byte(block >> 16) - buf[2] = byte(block >> 8) - buf[3] = byte(block) - prf.Write(buf[:4]) - dk = prf.Sum(dk) - T := dk[len(dk)-hashLen:] - copy(U, T) - - // U_n = PRF(password, U_(n-1)) - for n := 2; n <= iter; n++ { - prf.Reset() - prf.Write(U) - U = U[:0] - U = prf.Sum(U) - for x := range U { - T[x] ^= U[x] - } - } - } - return dk[:keyLen] -} diff --git a/vendor/golang.org/x/crypto/ssh/certs.go b/vendor/golang.org/x/crypto/ssh/certs.go index 42106f3f2..00ed9923e 100644 --- a/vendor/golang.org/x/crypto/ssh/certs.go +++ b/vendor/golang.org/x/crypto/ssh/certs.go @@ -222,6 +222,11 @@ type openSSHCertSigner struct { signer Signer } +type algorithmOpenSSHCertSigner struct { + *openSSHCertSigner + algorithmSigner AlgorithmSigner +} + // NewCertSigner returns a Signer that signs with the given Certificate, whose // private key is held by signer. It returns an error if the public key in cert // doesn't match the key used by signer. @@ -230,7 +235,12 @@ func NewCertSigner(cert *Certificate, signer Signer) (Signer, error) { return nil, errors.New("ssh: signer and cert have different public key") } - return &openSSHCertSigner{cert, signer}, nil + if algorithmSigner, ok := signer.(AlgorithmSigner); ok { + return &algorithmOpenSSHCertSigner{ + &openSSHCertSigner{cert, signer}, algorithmSigner}, nil + } else { + return &openSSHCertSigner{cert, signer}, nil + } } func (s *openSSHCertSigner) Sign(rand io.Reader, data []byte) (*Signature, error) { @@ -241,6 +251,10 @@ func (s *openSSHCertSigner) PublicKey() PublicKey { return s.pub } +func (s *algorithmOpenSSHCertSigner) SignWithAlgorithm(rand io.Reader, data []byte, algorithm string) (*Signature, error) { + return s.algorithmSigner.SignWithAlgorithm(rand, data, algorithm) +} + const sourceAddressCriticalOption = "source-address" // CertChecker does the work of verifying a certificate. Its methods diff --git a/vendor/golang.org/x/crypto/ssh/client.go b/vendor/golang.org/x/crypto/ssh/client.go index ae6ca775e..7b00bff1c 100644 --- a/vendor/golang.org/x/crypto/ssh/client.go +++ b/vendor/golang.org/x/crypto/ssh/client.go @@ -185,7 +185,7 @@ func Dial(network, addr string, config *ClientConfig) (*Client, error) { // keys. A HostKeyCallback must return nil if the host key is OK, or // an error to reject it. It receives the hostname as passed to Dial // or NewClientConn. The remote address is the RemoteAddr of the -// net.Conn underlying the the SSH connection. +// net.Conn underlying the SSH connection. type HostKeyCallback func(hostname string, remote net.Addr, key PublicKey) error // BannerCallback is the function type used for treat the banner sent by diff --git a/vendor/golang.org/x/crypto/ssh/keys.go b/vendor/golang.org/x/crypto/ssh/keys.go index 2261dc386..969804794 100644 --- a/vendor/golang.org/x/crypto/ssh/keys.go +++ b/vendor/golang.org/x/crypto/ssh/keys.go @@ -38,6 +38,16 @@ const ( KeyAlgoED25519 = "ssh-ed25519" ) +// These constants represent non-default signature algorithms that are supported +// as algorithm parameters to AlgorithmSigner.SignWithAlgorithm methods. See +// [PROTOCOL.agent] section 4.5.1 and +// https://tools.ietf.org/html/draft-ietf-curdle-rsa-sha2-10 +const ( + SigAlgoRSA = "ssh-rsa" + SigAlgoRSASHA2256 = "rsa-sha2-256" + SigAlgoRSASHA2512 = "rsa-sha2-512" +) + // parsePubKey parses a public key of the given algorithm. // Use ParsePublicKey for keys with prepended algorithm. func parsePubKey(in []byte, algo string) (pubKey PublicKey, rest []byte, err error) { @@ -301,6 +311,19 @@ type Signer interface { Sign(rand io.Reader, data []byte) (*Signature, error) } +// A AlgorithmSigner is a Signer that also supports specifying a specific +// algorithm to use for signing. +type AlgorithmSigner interface { + Signer + + // SignWithAlgorithm is like Signer.Sign, but allows specification of a + // non-default signing algorithm. See the SigAlgo* constants in this + // package for signature algorithms supported by this package. Callers may + // pass an empty string for the algorithm in which case the AlgorithmSigner + // will use its default algorithm. + SignWithAlgorithm(rand io.Reader, data []byte, algorithm string) (*Signature, error) +} + type rsaPublicKey rsa.PublicKey func (r *rsaPublicKey) Type() string { @@ -349,13 +372,21 @@ func (r *rsaPublicKey) Marshal() []byte { } func (r *rsaPublicKey) Verify(data []byte, sig *Signature) error { - if sig.Format != r.Type() { + var hash crypto.Hash + switch sig.Format { + case SigAlgoRSA: + hash = crypto.SHA1 + case SigAlgoRSASHA2256: + hash = crypto.SHA256 + case SigAlgoRSASHA2512: + hash = crypto.SHA512 + default: return fmt.Errorf("ssh: signature type %s for key type %s", sig.Format, r.Type()) } - h := crypto.SHA1.New() + h := hash.New() h.Write(data) digest := h.Sum(nil) - return rsa.VerifyPKCS1v15((*rsa.PublicKey)(r), crypto.SHA1, digest, sig.Blob) + return rsa.VerifyPKCS1v15((*rsa.PublicKey)(r), hash, digest, sig.Blob) } func (r *rsaPublicKey) CryptoPublicKey() crypto.PublicKey { @@ -459,6 +490,14 @@ func (k *dsaPrivateKey) PublicKey() PublicKey { } func (k *dsaPrivateKey) Sign(rand io.Reader, data []byte) (*Signature, error) { + return k.SignWithAlgorithm(rand, data, "") +} + +func (k *dsaPrivateKey) SignWithAlgorithm(rand io.Reader, data []byte, algorithm string) (*Signature, error) { + if algorithm != "" && algorithm != k.PublicKey().Type() { + return nil, fmt.Errorf("ssh: unsupported signature algorithm %s", algorithm) + } + h := crypto.SHA1.New() h.Write(data) digest := h.Sum(nil) @@ -691,16 +730,42 @@ func (s *wrappedSigner) PublicKey() PublicKey { } func (s *wrappedSigner) Sign(rand io.Reader, data []byte) (*Signature, error) { + return s.SignWithAlgorithm(rand, data, "") +} + +func (s *wrappedSigner) SignWithAlgorithm(rand io.Reader, data []byte, algorithm string) (*Signature, error) { var hashFunc crypto.Hash - switch key := s.pubKey.(type) { - case *rsaPublicKey, *dsaPublicKey: - hashFunc = crypto.SHA1 - case *ecdsaPublicKey: - hashFunc = ecHash(key.Curve) - case ed25519PublicKey: - default: - return nil, fmt.Errorf("ssh: unsupported key type %T", key) + if _, ok := s.pubKey.(*rsaPublicKey); ok { + // RSA keys support a few hash functions determined by the requested signature algorithm + switch algorithm { + case "", SigAlgoRSA: + algorithm = SigAlgoRSA + hashFunc = crypto.SHA1 + case SigAlgoRSASHA2256: + hashFunc = crypto.SHA256 + case SigAlgoRSASHA2512: + hashFunc = crypto.SHA512 + default: + return nil, fmt.Errorf("ssh: unsupported signature algorithm %s", algorithm) + } + } else { + // The only supported algorithm for all other key types is the same as the type of the key + if algorithm == "" { + algorithm = s.pubKey.Type() + } else if algorithm != s.pubKey.Type() { + return nil, fmt.Errorf("ssh: unsupported signature algorithm %s", algorithm) + } + + switch key := s.pubKey.(type) { + case *dsaPublicKey: + hashFunc = crypto.SHA1 + case *ecdsaPublicKey: + hashFunc = ecHash(key.Curve) + case ed25519PublicKey: + default: + return nil, fmt.Errorf("ssh: unsupported key type %T", key) + } } var digest []byte @@ -745,7 +810,7 @@ func (s *wrappedSigner) Sign(rand io.Reader, data []byte) (*Signature, error) { } return &Signature{ - Format: s.pubKey.Type(), + Format: algorithm, Blob: signature, }, nil } diff --git a/vendor/golang.org/x/crypto/ssh/server.go b/vendor/golang.org/x/crypto/ssh/server.go index d0f482531..e86e89661 100644 --- a/vendor/golang.org/x/crypto/ssh/server.go +++ b/vendor/golang.org/x/crypto/ssh/server.go @@ -404,7 +404,7 @@ userAuthLoop: perms, authErr = config.PasswordCallback(s, password) case "keyboard-interactive": if config.KeyboardInteractiveCallback == nil { - authErr = errors.New("ssh: keyboard-interactive auth not configubred") + authErr = errors.New("ssh: keyboard-interactive auth not configured") break } @@ -484,6 +484,7 @@ userAuthLoop: // sig.Format. This is usually the same, but // for certs, the names differ. if !isAcceptableAlgo(sig.Format) { + authErr = fmt.Errorf("ssh: algorithm %q not accepted", sig.Format) break } signedData := buildDataSignedForAuth(sessionID, userAuthReq, algoBytes, pubKeyData) diff --git a/vendor/golang.org/x/crypto/ssh/terminal/util.go b/vendor/golang.org/x/crypto/ssh/terminal/util.go index 731c89a28..391104084 100644 --- a/vendor/golang.org/x/crypto/ssh/terminal/util.go +++ b/vendor/golang.org/x/crypto/ssh/terminal/util.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build darwin dragonfly freebsd linux,!appengine netbsd openbsd +// +build aix darwin dragonfly freebsd linux,!appengine netbsd openbsd // Package terminal provides support functions for dealing with terminals, as // commonly found on UNIX systems. @@ -25,7 +25,7 @@ type State struct { termios unix.Termios } -// IsTerminal returns true if the given file descriptor is a terminal. +// IsTerminal returns whether the given file descriptor is a terminal. func IsTerminal(fd int) bool { _, err := unix.IoctlGetTermios(fd, ioctlReadTermios) return err == nil diff --git a/vendor/github.com/Sirupsen/logrus/terminal_appengine.go b/vendor/golang.org/x/crypto/ssh/terminal/util_aix.go similarity index 54% rename from vendor/github.com/Sirupsen/logrus/terminal_appengine.go rename to vendor/golang.org/x/crypto/ssh/terminal/util_aix.go index 72f679cdb..dfcd62785 100644 --- a/vendor/github.com/Sirupsen/logrus/terminal_appengine.go +++ b/vendor/golang.org/x/crypto/ssh/terminal/util_aix.go @@ -1,13 +1,12 @@ -// Based on ssh/terminal: // Copyright 2018 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build appengine +// +build aix -package logrus +package terminal -import "io" +import "golang.org/x/sys/unix" -func initTerminal(w io.Writer) { -} +const ioctlReadTermios = unix.TCGETS +const ioctlWriteTermios = unix.TCSETS diff --git a/vendor/golang.org/x/crypto/ssh/terminal/util_plan9.go b/vendor/golang.org/x/crypto/ssh/terminal/util_plan9.go index 799f049f0..9317ac7ed 100644 --- a/vendor/golang.org/x/crypto/ssh/terminal/util_plan9.go +++ b/vendor/golang.org/x/crypto/ssh/terminal/util_plan9.go @@ -21,7 +21,7 @@ import ( type State struct{} -// IsTerminal returns true if the given file descriptor is a terminal. +// IsTerminal returns whether the given file descriptor is a terminal. func IsTerminal(fd int) bool { return false } diff --git a/vendor/golang.org/x/crypto/ssh/terminal/util_solaris.go b/vendor/golang.org/x/crypto/ssh/terminal/util_solaris.go index 9e41b9f43..3d5f06a9f 100644 --- a/vendor/golang.org/x/crypto/ssh/terminal/util_solaris.go +++ b/vendor/golang.org/x/crypto/ssh/terminal/util_solaris.go @@ -17,7 +17,7 @@ type State struct { termios unix.Termios } -// IsTerminal returns true if the given file descriptor is a terminal. +// IsTerminal returns whether the given file descriptor is a terminal. func IsTerminal(fd int) bool { _, err := unix.IoctlGetTermio(fd, unix.TCGETA) return err == nil diff --git a/vendor/golang.org/x/crypto/ssh/terminal/util_windows.go b/vendor/golang.org/x/crypto/ssh/terminal/util_windows.go index 8618955df..6cb8a9503 100644 --- a/vendor/golang.org/x/crypto/ssh/terminal/util_windows.go +++ b/vendor/golang.org/x/crypto/ssh/terminal/util_windows.go @@ -26,7 +26,7 @@ type State struct { mode uint32 } -// IsTerminal returns true if the given file descriptor is a terminal. +// IsTerminal returns whether the given file descriptor is a terminal. func IsTerminal(fd int) bool { var st uint32 err := windows.GetConsoleMode(windows.Handle(fd), &st) diff --git a/vendor/golang.org/x/net/context/ctxhttp/ctxhttp.go b/vendor/golang.org/x/net/context/ctxhttp/ctxhttp.go index 606cf1f97..37dc0cfdb 100644 --- a/vendor/golang.org/x/net/context/ctxhttp/ctxhttp.go +++ b/vendor/golang.org/x/net/context/ctxhttp/ctxhttp.go @@ -2,18 +2,15 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build go1.7 - // Package ctxhttp provides helper functions for performing context-aware HTTP requests. package ctxhttp // import "golang.org/x/net/context/ctxhttp" import ( + "context" "io" "net/http" "net/url" "strings" - - "golang.org/x/net/context" ) // Do sends an HTTP request with the provided http.Client and returns diff --git a/vendor/golang.org/x/net/context/ctxhttp/ctxhttp_pre17.go b/vendor/golang.org/x/net/context/ctxhttp/ctxhttp_pre17.go deleted file mode 100644 index 926870cc2..000000000 --- a/vendor/golang.org/x/net/context/ctxhttp/ctxhttp_pre17.go +++ /dev/null @@ -1,147 +0,0 @@ -// Copyright 2015 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build !go1.7 - -package ctxhttp // import "golang.org/x/net/context/ctxhttp" - -import ( - "io" - "net/http" - "net/url" - "strings" - - "golang.org/x/net/context" -) - -func nop() {} - -var ( - testHookContextDoneBeforeHeaders = nop - testHookDoReturned = nop - testHookDidBodyClose = nop -) - -// Do sends an HTTP request with the provided http.Client and returns an HTTP response. -// If the client is nil, http.DefaultClient is used. -// If the context is canceled or times out, ctx.Err() will be returned. -func Do(ctx context.Context, client *http.Client, req *http.Request) (*http.Response, error) { - if client == nil { - client = http.DefaultClient - } - - // TODO(djd): Respect any existing value of req.Cancel. - cancel := make(chan struct{}) - req.Cancel = cancel - - type responseAndError struct { - resp *http.Response - err error - } - result := make(chan responseAndError, 1) - - // Make local copies of test hooks closed over by goroutines below. - // Prevents data races in tests. - testHookDoReturned := testHookDoReturned - testHookDidBodyClose := testHookDidBodyClose - - go func() { - resp, err := client.Do(req) - testHookDoReturned() - result <- responseAndError{resp, err} - }() - - var resp *http.Response - - select { - case <-ctx.Done(): - testHookContextDoneBeforeHeaders() - close(cancel) - // Clean up after the goroutine calling client.Do: - go func() { - if r := <-result; r.resp != nil { - testHookDidBodyClose() - r.resp.Body.Close() - } - }() - return nil, ctx.Err() - case r := <-result: - var err error - resp, err = r.resp, r.err - if err != nil { - return resp, err - } - } - - c := make(chan struct{}) - go func() { - select { - case <-ctx.Done(): - close(cancel) - case <-c: - // The response's Body is closed. - } - }() - resp.Body = ¬ifyingReader{resp.Body, c} - - return resp, nil -} - -// Get issues a GET request via the Do function. -func Get(ctx context.Context, client *http.Client, url string) (*http.Response, error) { - req, err := http.NewRequest("GET", url, nil) - if err != nil { - return nil, err - } - return Do(ctx, client, req) -} - -// Head issues a HEAD request via the Do function. -func Head(ctx context.Context, client *http.Client, url string) (*http.Response, error) { - req, err := http.NewRequest("HEAD", url, nil) - if err != nil { - return nil, err - } - return Do(ctx, client, req) -} - -// Post issues a POST request via the Do function. -func Post(ctx context.Context, client *http.Client, url string, bodyType string, body io.Reader) (*http.Response, error) { - req, err := http.NewRequest("POST", url, body) - if err != nil { - return nil, err - } - req.Header.Set("Content-Type", bodyType) - return Do(ctx, client, req) -} - -// PostForm issues a POST request via the Do function. -func PostForm(ctx context.Context, client *http.Client, url string, data url.Values) (*http.Response, error) { - return Post(ctx, client, url, "application/x-www-form-urlencoded", strings.NewReader(data.Encode())) -} - -// notifyingReader is an io.ReadCloser that closes the notify channel after -// Close is called or a Read fails on the underlying ReadCloser. -type notifyingReader struct { - io.ReadCloser - notify chan<- struct{} -} - -func (r *notifyingReader) Read(p []byte) (int, error) { - n, err := r.ReadCloser.Read(p) - if err != nil && r.notify != nil { - close(r.notify) - r.notify = nil - } - return n, err -} - -func (r *notifyingReader) Close() error { - err := r.ReadCloser.Close() - if r.notify != nil { - close(r.notify) - r.notify = nil - } - return err -} diff --git a/vendor/golang.org/x/net/html/parse.go b/vendor/golang.org/x/net/html/parse.go index 9889c09be..64a579372 100644 --- a/vendor/golang.org/x/net/html/parse.go +++ b/vendor/golang.org/x/net/html/parse.go @@ -1264,12 +1264,6 @@ func (p *parser) inBodyEndTagFormatting(tagAtom a.Atom) { switch commonAncestor.DataAtom { case a.Table, a.Tbody, a.Tfoot, a.Thead, a.Tr: p.fosterParent(lastNode) - case a.Template: - // TODO: remove namespace checking - if commonAncestor.Namespace == "html" { - commonAncestor = commonAncestor.LastChild - } - fallthrough default: commonAncestor.AppendChild(lastNode) } diff --git a/vendor/golang.org/x/net/http2/configure_transport.go b/vendor/golang.org/x/net/http2/configure_transport.go deleted file mode 100644 index 6356b3287..000000000 --- a/vendor/golang.org/x/net/http2/configure_transport.go +++ /dev/null @@ -1,82 +0,0 @@ -// Copyright 2015 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build go1.6 - -package http2 - -import ( - "crypto/tls" - "fmt" - "net/http" -) - -func configureTransport(t1 *http.Transport) (*Transport, error) { - connPool := new(clientConnPool) - t2 := &Transport{ - ConnPool: noDialClientConnPool{connPool}, - t1: t1, - } - connPool.t = t2 - if err := registerHTTPSProtocol(t1, noDialH2RoundTripper{t2}); err != nil { - return nil, err - } - if t1.TLSClientConfig == nil { - t1.TLSClientConfig = new(tls.Config) - } - if !strSliceContains(t1.TLSClientConfig.NextProtos, "h2") { - t1.TLSClientConfig.NextProtos = append([]string{"h2"}, t1.TLSClientConfig.NextProtos...) - } - if !strSliceContains(t1.TLSClientConfig.NextProtos, "http/1.1") { - t1.TLSClientConfig.NextProtos = append(t1.TLSClientConfig.NextProtos, "http/1.1") - } - upgradeFn := func(authority string, c *tls.Conn) http.RoundTripper { - addr := authorityAddr("https", authority) - if used, err := connPool.addConnIfNeeded(addr, t2, c); err != nil { - go c.Close() - return erringRoundTripper{err} - } else if !used { - // Turns out we don't need this c. - // For example, two goroutines made requests to the same host - // at the same time, both kicking off TCP dials. (since protocol - // was unknown) - go c.Close() - } - return t2 - } - if m := t1.TLSNextProto; len(m) == 0 { - t1.TLSNextProto = map[string]func(string, *tls.Conn) http.RoundTripper{ - "h2": upgradeFn, - } - } else { - m["h2"] = upgradeFn - } - return t2, nil -} - -// registerHTTPSProtocol calls Transport.RegisterProtocol but -// converting panics into errors. -func registerHTTPSProtocol(t *http.Transport, rt noDialH2RoundTripper) (err error) { - defer func() { - if e := recover(); e != nil { - err = fmt.Errorf("%v", e) - } - }() - t.RegisterProtocol("https", rt) - return nil -} - -// noDialH2RoundTripper is a RoundTripper which only tries to complete the request -// if there's already has a cached connection to the host. -// (The field is exported so it can be accessed via reflect from net/http; tested -// by TestNoDialH2RoundTripperType) -type noDialH2RoundTripper struct{ *Transport } - -func (rt noDialH2RoundTripper) RoundTrip(req *http.Request) (*http.Response, error) { - res, err := rt.Transport.RoundTrip(req) - if isNoCachedConnError(err) { - return nil, http.ErrSkipAltProtocol - } - return res, err -} diff --git a/vendor/golang.org/x/net/http2/frame.go b/vendor/golang.org/x/net/http2/frame.go index c85e31f26..b46791d1d 100644 --- a/vendor/golang.org/x/net/http2/frame.go +++ b/vendor/golang.org/x/net/http2/frame.go @@ -1477,7 +1477,7 @@ func (fr *Framer) maxHeaderStringLen() int { } // readMetaFrame returns 0 or more CONTINUATION frames from fr and -// merge them into into the provided hf and returns a MetaHeadersFrame +// merge them into the provided hf and returns a MetaHeadersFrame // with the decoded hpack values. func (fr *Framer) readMetaFrame(hf *HeadersFrame) (*MetaHeadersFrame, error) { if fr.AllowIllegalReads { diff --git a/vendor/golang.org/x/net/http2/go111.go b/vendor/golang.org/x/net/http2/go111.go index 9749dc0be..3a131016b 100644 --- a/vendor/golang.org/x/net/http2/go111.go +++ b/vendor/golang.org/x/net/http2/go111.go @@ -6,19 +6,22 @@ package http2 -import "net/textproto" +import ( + "net/http/httptrace" + "net/textproto" +) -func traceHasWroteHeaderField(trace *clientTrace) bool { +func traceHasWroteHeaderField(trace *httptrace.ClientTrace) bool { return trace != nil && trace.WroteHeaderField != nil } -func traceWroteHeaderField(trace *clientTrace, k, v string) { +func traceWroteHeaderField(trace *httptrace.ClientTrace, k, v string) { if trace != nil && trace.WroteHeaderField != nil { trace.WroteHeaderField(k, []string{v}) } } -func traceGot1xxResponseFunc(trace *clientTrace) func(int, textproto.MIMEHeader) error { +func traceGot1xxResponseFunc(trace *httptrace.ClientTrace) func(int, textproto.MIMEHeader) error { if trace != nil { return trace.Got1xxResponse } diff --git a/vendor/golang.org/x/net/http2/go16.go b/vendor/golang.org/x/net/http2/go16.go deleted file mode 100644 index 00b2e9e3c..000000000 --- a/vendor/golang.org/x/net/http2/go16.go +++ /dev/null @@ -1,16 +0,0 @@ -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build go1.6 - -package http2 - -import ( - "net/http" - "time" -) - -func transportExpectContinueTimeout(t1 *http.Transport) time.Duration { - return t1.ExpectContinueTimeout -} diff --git a/vendor/golang.org/x/net/http2/go17.go b/vendor/golang.org/x/net/http2/go17.go deleted file mode 100644 index d957b7bc5..000000000 --- a/vendor/golang.org/x/net/http2/go17.go +++ /dev/null @@ -1,121 +0,0 @@ -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build go1.7 - -package http2 - -import ( - "context" - "net" - "net/http" - "net/http/httptrace" - "time" -) - -type contextContext interface { - context.Context -} - -var errCanceled = context.Canceled - -func serverConnBaseContext(c net.Conn, opts *ServeConnOpts) (ctx contextContext, cancel func()) { - ctx, cancel = context.WithCancel(context.Background()) - ctx = context.WithValue(ctx, http.LocalAddrContextKey, c.LocalAddr()) - if hs := opts.baseConfig(); hs != nil { - ctx = context.WithValue(ctx, http.ServerContextKey, hs) - } - return -} - -func contextWithCancel(ctx contextContext) (_ contextContext, cancel func()) { - return context.WithCancel(ctx) -} - -func requestWithContext(req *http.Request, ctx contextContext) *http.Request { - return req.WithContext(ctx) -} - -type clientTrace httptrace.ClientTrace - -func reqContext(r *http.Request) context.Context { return r.Context() } - -func (t *Transport) idleConnTimeout() time.Duration { - if t.t1 != nil { - return t.t1.IdleConnTimeout - } - return 0 -} - -func setResponseUncompressed(res *http.Response) { res.Uncompressed = true } - -func traceGetConn(req *http.Request, hostPort string) { - trace := httptrace.ContextClientTrace(req.Context()) - if trace == nil || trace.GetConn == nil { - return - } - trace.GetConn(hostPort) -} - -func traceGotConn(req *http.Request, cc *ClientConn) { - trace := httptrace.ContextClientTrace(req.Context()) - if trace == nil || trace.GotConn == nil { - return - } - ci := httptrace.GotConnInfo{Conn: cc.tconn} - cc.mu.Lock() - ci.Reused = cc.nextStreamID > 1 - ci.WasIdle = len(cc.streams) == 0 && ci.Reused - if ci.WasIdle && !cc.lastActive.IsZero() { - ci.IdleTime = time.Now().Sub(cc.lastActive) - } - cc.mu.Unlock() - - trace.GotConn(ci) -} - -func traceWroteHeaders(trace *clientTrace) { - if trace != nil && trace.WroteHeaders != nil { - trace.WroteHeaders() - } -} - -func traceGot100Continue(trace *clientTrace) { - if trace != nil && trace.Got100Continue != nil { - trace.Got100Continue() - } -} - -func traceWait100Continue(trace *clientTrace) { - if trace != nil && trace.Wait100Continue != nil { - trace.Wait100Continue() - } -} - -func traceWroteRequest(trace *clientTrace, err error) { - if trace != nil && trace.WroteRequest != nil { - trace.WroteRequest(httptrace.WroteRequestInfo{Err: err}) - } -} - -func traceFirstResponseByte(trace *clientTrace) { - if trace != nil && trace.GotFirstResponseByte != nil { - trace.GotFirstResponseByte() - } -} - -func requestTrace(req *http.Request) *clientTrace { - trace := httptrace.ContextClientTrace(req.Context()) - return (*clientTrace)(trace) -} - -// Ping sends a PING frame to the server and waits for the ack. -func (cc *ClientConn) Ping(ctx context.Context) error { - return cc.ping(ctx) -} - -// Shutdown gracefully closes the client connection, waiting for running streams to complete. -func (cc *ClientConn) Shutdown(ctx context.Context) error { - return cc.shutdown(ctx) -} diff --git a/vendor/golang.org/x/net/http2/go17_not18.go b/vendor/golang.org/x/net/http2/go17_not18.go deleted file mode 100644 index b4c52ecec..000000000 --- a/vendor/golang.org/x/net/http2/go17_not18.go +++ /dev/null @@ -1,36 +0,0 @@ -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build go1.7,!go1.8 - -package http2 - -import "crypto/tls" - -// temporary copy of Go 1.7's private tls.Config.clone: -func cloneTLSConfig(c *tls.Config) *tls.Config { - return &tls.Config{ - Rand: c.Rand, - Time: c.Time, - Certificates: c.Certificates, - NameToCertificate: c.NameToCertificate, - GetCertificate: c.GetCertificate, - RootCAs: c.RootCAs, - NextProtos: c.NextProtos, - ServerName: c.ServerName, - ClientAuth: c.ClientAuth, - ClientCAs: c.ClientCAs, - InsecureSkipVerify: c.InsecureSkipVerify, - CipherSuites: c.CipherSuites, - PreferServerCipherSuites: c.PreferServerCipherSuites, - SessionTicketsDisabled: c.SessionTicketsDisabled, - SessionTicketKey: c.SessionTicketKey, - ClientSessionCache: c.ClientSessionCache, - MinVersion: c.MinVersion, - MaxVersion: c.MaxVersion, - CurvePreferences: c.CurvePreferences, - DynamicRecordSizingDisabled: c.DynamicRecordSizingDisabled, - Renegotiation: c.Renegotiation, - } -} diff --git a/vendor/golang.org/x/net/http2/go18.go b/vendor/golang.org/x/net/http2/go18.go deleted file mode 100644 index 4f30d228a..000000000 --- a/vendor/golang.org/x/net/http2/go18.go +++ /dev/null @@ -1,56 +0,0 @@ -// Copyright 2015 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build go1.8 - -package http2 - -import ( - "crypto/tls" - "io" - "net/http" -) - -func cloneTLSConfig(c *tls.Config) *tls.Config { - c2 := c.Clone() - c2.GetClientCertificate = c.GetClientCertificate // golang.org/issue/19264 - return c2 -} - -var _ http.Pusher = (*responseWriter)(nil) - -// Push implements http.Pusher. -func (w *responseWriter) Push(target string, opts *http.PushOptions) error { - internalOpts := pushOptions{} - if opts != nil { - internalOpts.Method = opts.Method - internalOpts.Header = opts.Header - } - return w.push(target, internalOpts) -} - -func configureServer18(h1 *http.Server, h2 *Server) error { - if h2.IdleTimeout == 0 { - if h1.IdleTimeout != 0 { - h2.IdleTimeout = h1.IdleTimeout - } else { - h2.IdleTimeout = h1.ReadTimeout - } - } - return nil -} - -func shouldLogPanic(panicValue interface{}) bool { - return panicValue != nil && panicValue != http.ErrAbortHandler -} - -func reqGetBody(req *http.Request) func() (io.ReadCloser, error) { - return req.GetBody -} - -func reqBodyIsNoBody(body io.ReadCloser) bool { - return body == http.NoBody -} - -func go18httpNoBody() io.ReadCloser { return http.NoBody } // for tests only diff --git a/vendor/golang.org/x/net/http2/go19.go b/vendor/golang.org/x/net/http2/go19.go deleted file mode 100644 index 38124ba56..000000000 --- a/vendor/golang.org/x/net/http2/go19.go +++ /dev/null @@ -1,16 +0,0 @@ -// Copyright 2015 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build go1.9 - -package http2 - -import ( - "net/http" -) - -func configureServer19(s *http.Server, conf *Server) error { - s.RegisterOnShutdown(conf.state.startGracefulShutdown) - return nil -} diff --git a/vendor/golang.org/x/net/http2/not_go111.go b/vendor/golang.org/x/net/http2/not_go111.go index 0df34e6d9..161bca7ce 100644 --- a/vendor/golang.org/x/net/http2/not_go111.go +++ b/vendor/golang.org/x/net/http2/not_go111.go @@ -6,12 +6,15 @@ package http2 -import "net/textproto" +import ( + "net/http/httptrace" + "net/textproto" +) -func traceHasWroteHeaderField(trace *clientTrace) bool { return false } +func traceHasWroteHeaderField(trace *httptrace.ClientTrace) bool { return false } -func traceWroteHeaderField(trace *clientTrace, k, v string) {} +func traceWroteHeaderField(trace *httptrace.ClientTrace, k, v string) {} -func traceGot1xxResponseFunc(trace *clientTrace) func(int, textproto.MIMEHeader) error { +func traceGot1xxResponseFunc(trace *httptrace.ClientTrace) func(int, textproto.MIMEHeader) error { return nil } diff --git a/vendor/golang.org/x/net/http2/not_go16.go b/vendor/golang.org/x/net/http2/not_go16.go deleted file mode 100644 index 508cebcc4..000000000 --- a/vendor/golang.org/x/net/http2/not_go16.go +++ /dev/null @@ -1,21 +0,0 @@ -// Copyright 2015 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build !go1.6 - -package http2 - -import ( - "net/http" - "time" -) - -func configureTransport(t1 *http.Transport) (*Transport, error) { - return nil, errTransportVersion -} - -func transportExpectContinueTimeout(t1 *http.Transport) time.Duration { - return 0 - -} diff --git a/vendor/golang.org/x/net/http2/not_go17.go b/vendor/golang.org/x/net/http2/not_go17.go deleted file mode 100644 index 7ffb25046..000000000 --- a/vendor/golang.org/x/net/http2/not_go17.go +++ /dev/null @@ -1,95 +0,0 @@ -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build !go1.7 - -package http2 - -import ( - "crypto/tls" - "errors" - "net" - "net/http" - "time" -) - -type contextContext interface { - Done() <-chan struct{} - Err() error -} - -var errCanceled = errors.New("canceled") - -type fakeContext struct{} - -func (fakeContext) Done() <-chan struct{} { return nil } -func (fakeContext) Err() error { panic("should not be called") } - -func reqContext(r *http.Request) fakeContext { - return fakeContext{} -} - -func setResponseUncompressed(res *http.Response) { - // Nothing. -} - -type clientTrace struct{} - -func requestTrace(*http.Request) *clientTrace { return nil } -func traceGetConn(*http.Request, string) {} -func traceGotConn(*http.Request, *ClientConn) {} -func traceFirstResponseByte(*clientTrace) {} -func traceWroteHeaders(*clientTrace) {} -func traceWroteRequest(*clientTrace, error) {} -func traceGot100Continue(trace *clientTrace) {} -func traceWait100Continue(trace *clientTrace) {} - -func nop() {} - -func serverConnBaseContext(c net.Conn, opts *ServeConnOpts) (ctx contextContext, cancel func()) { - return nil, nop -} - -func contextWithCancel(ctx contextContext) (_ contextContext, cancel func()) { - return ctx, nop -} - -func requestWithContext(req *http.Request, ctx contextContext) *http.Request { - return req -} - -// temporary copy of Go 1.6's private tls.Config.clone: -func cloneTLSConfig(c *tls.Config) *tls.Config { - return &tls.Config{ - Rand: c.Rand, - Time: c.Time, - Certificates: c.Certificates, - NameToCertificate: c.NameToCertificate, - GetCertificate: c.GetCertificate, - RootCAs: c.RootCAs, - NextProtos: c.NextProtos, - ServerName: c.ServerName, - ClientAuth: c.ClientAuth, - ClientCAs: c.ClientCAs, - InsecureSkipVerify: c.InsecureSkipVerify, - CipherSuites: c.CipherSuites, - PreferServerCipherSuites: c.PreferServerCipherSuites, - SessionTicketsDisabled: c.SessionTicketsDisabled, - SessionTicketKey: c.SessionTicketKey, - ClientSessionCache: c.ClientSessionCache, - MinVersion: c.MinVersion, - MaxVersion: c.MaxVersion, - CurvePreferences: c.CurvePreferences, - } -} - -func (cc *ClientConn) Ping(ctx contextContext) error { - return cc.ping(ctx) -} - -func (cc *ClientConn) Shutdown(ctx contextContext) error { - return cc.shutdown(ctx) -} - -func (t *Transport) idleConnTimeout() time.Duration { return 0 } diff --git a/vendor/golang.org/x/net/http2/not_go18.go b/vendor/golang.org/x/net/http2/not_go18.go deleted file mode 100644 index 6f8d3f86f..000000000 --- a/vendor/golang.org/x/net/http2/not_go18.go +++ /dev/null @@ -1,29 +0,0 @@ -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build !go1.8 - -package http2 - -import ( - "io" - "net/http" -) - -func configureServer18(h1 *http.Server, h2 *Server) error { - // No IdleTimeout to sync prior to Go 1.8. - return nil -} - -func shouldLogPanic(panicValue interface{}) bool { - return panicValue != nil -} - -func reqGetBody(req *http.Request) func() (io.ReadCloser, error) { - return nil -} - -func reqBodyIsNoBody(io.ReadCloser) bool { return false } - -func go18httpNoBody() io.ReadCloser { return nil } // for tests only diff --git a/vendor/golang.org/x/net/http2/not_go19.go b/vendor/golang.org/x/net/http2/not_go19.go deleted file mode 100644 index 5ae07726b..000000000 --- a/vendor/golang.org/x/net/http2/not_go19.go +++ /dev/null @@ -1,16 +0,0 @@ -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build !go1.9 - -package http2 - -import ( - "net/http" -) - -func configureServer19(s *http.Server, conf *Server) error { - // not supported prior to go1.9 - return nil -} diff --git a/vendor/golang.org/x/net/http2/server.go b/vendor/golang.org/x/net/http2/server.go index 56859d1f8..b57b6e2d0 100644 --- a/vendor/golang.org/x/net/http2/server.go +++ b/vendor/golang.org/x/net/http2/server.go @@ -28,6 +28,7 @@ package http2 import ( "bufio" "bytes" + "context" "crypto/tls" "errors" "fmt" @@ -209,12 +210,14 @@ func ConfigureServer(s *http.Server, conf *Server) error { conf = new(Server) } conf.state = &serverInternalState{activeConns: make(map[*serverConn]struct{})} - if err := configureServer18(s, conf); err != nil { - return err - } - if err := configureServer19(s, conf); err != nil { - return err + if h1, h2 := s, conf; h2.IdleTimeout == 0 { + if h1.IdleTimeout != 0 { + h2.IdleTimeout = h1.IdleTimeout + } else { + h2.IdleTimeout = h1.ReadTimeout + } } + s.RegisterOnShutdown(conf.state.startGracefulShutdown) if s.TLSConfig == nil { s.TLSConfig = new(tls.Config) @@ -435,6 +438,15 @@ func (s *Server) ServeConn(c net.Conn, opts *ServeConnOpts) { sc.serve() } +func serverConnBaseContext(c net.Conn, opts *ServeConnOpts) (ctx context.Context, cancel func()) { + ctx, cancel = context.WithCancel(context.Background()) + ctx = context.WithValue(ctx, http.LocalAddrContextKey, c.LocalAddr()) + if hs := opts.baseConfig(); hs != nil { + ctx = context.WithValue(ctx, http.ServerContextKey, hs) + } + return +} + func (sc *serverConn) rejectConn(err ErrCode, debug string) { sc.vlogf("http2: server rejecting conn: %v, %s", err, debug) // ignoring errors. hanging up anyway. @@ -450,7 +462,7 @@ type serverConn struct { conn net.Conn bw *bufferedWriter // writing to conn handler http.Handler - baseCtx contextContext + baseCtx context.Context framer *Framer doneServing chan struct{} // closed when serverConn.serve ends readFrameCh chan readFrameResult // written by serverConn.readFrames @@ -530,7 +542,7 @@ type stream struct { id uint32 body *pipe // non-nil if expecting DATA frames cw closeWaiter // closed wait stream transitions to closed state - ctx contextContext + ctx context.Context cancelCtx func() // owned by serverConn's serve loop: @@ -1110,7 +1122,7 @@ func (sc *serverConn) startFrameWrite(wr FrameWriteRequest) { // errHandlerPanicked is the error given to any callers blocked in a read from // Request.Body when the main goroutine panics. Since most handlers read in the -// the main ServeHTTP goroutine, this will show up rarely. +// main ServeHTTP goroutine, this will show up rarely. var errHandlerPanicked = errors.New("http2: handler panicked") // wroteFrame is called on the serve goroutine with the result of @@ -1882,7 +1894,7 @@ func (sc *serverConn) newStream(id, pusherID uint32, state streamState) *stream panic("internal error: cannot create stream with id 0") } - ctx, cancelCtx := contextWithCancel(sc.baseCtx) + ctx, cancelCtx := context.WithCancel(sc.baseCtx) st := &stream{ sc: sc, id: id, @@ -2048,7 +2060,7 @@ func (sc *serverConn) newWriterAndRequestNoBody(st *stream, rp requestParam) (*r Body: body, Trailer: trailer, } - req = requestWithContext(req, st.ctx) + req = req.WithContext(st.ctx) rws := responseWriterStatePool.Get().(*responseWriterState) bwSave := rws.bw @@ -2076,7 +2088,7 @@ func (sc *serverConn) runHandler(rw *responseWriter, req *http.Request, handler stream: rw.rws.stream, }) // Same as net/http: - if shouldLogPanic(e) { + if e != nil && e != http.ErrAbortHandler { const size = 64 << 10 buf := make([]byte, size) buf = buf[:runtime.Stack(buf, false)] @@ -2638,14 +2650,9 @@ var ( ErrPushLimitReached = errors.New("http2: push would exceed peer's SETTINGS_MAX_CONCURRENT_STREAMS") ) -// pushOptions is the internal version of http.PushOptions, which we -// cannot include here because it's only defined in Go 1.8 and later. -type pushOptions struct { - Method string - Header http.Header -} +var _ http.Pusher = (*responseWriter)(nil) -func (w *responseWriter) push(target string, opts pushOptions) error { +func (w *responseWriter) Push(target string, opts *http.PushOptions) error { st := w.rws.stream sc := st.sc sc.serveG.checkNotOn() @@ -2656,6 +2663,10 @@ func (w *responseWriter) push(target string, opts pushOptions) error { return ErrRecursivePush } + if opts == nil { + opts = new(http.PushOptions) + } + // Default options. if opts.Method == "" { opts.Method = "GET" diff --git a/vendor/golang.org/x/net/http2/transport.go b/vendor/golang.org/x/net/http2/transport.go index 9d1f2fadd..f272e8f9f 100644 --- a/vendor/golang.org/x/net/http2/transport.go +++ b/vendor/golang.org/x/net/http2/transport.go @@ -10,6 +10,7 @@ import ( "bufio" "bytes" "compress/gzip" + "context" "crypto/rand" "crypto/tls" "errors" @@ -21,6 +22,7 @@ import ( mathrand "math/rand" "net" "net/http" + "net/http/httptrace" "net/textproto" "sort" "strconv" @@ -95,6 +97,16 @@ type Transport struct { // to mean no limit. MaxHeaderListSize uint32 + // StrictMaxConcurrentStreams controls whether the server's + // SETTINGS_MAX_CONCURRENT_STREAMS should be respected + // globally. If false, new TCP connections are created to the + // server as needed to keep each under the per-connection + // SETTINGS_MAX_CONCURRENT_STREAMS limit. If true, the + // server's SETTINGS_MAX_CONCURRENT_STREAMS is interpreted as + // a global limit and callers of RoundTrip block when needed, + // waiting for their turn. + StrictMaxConcurrentStreams bool + // t1, if non-nil, is the standard library Transport using // this transport. Its settings are used (but not its // RoundTrip method, etc). @@ -118,16 +130,56 @@ func (t *Transport) disableCompression() bool { return t.DisableCompression || (t.t1 != nil && t.t1.DisableCompression) } -var errTransportVersion = errors.New("http2: ConfigureTransport is only supported starting at Go 1.6") - // ConfigureTransport configures a net/http HTTP/1 Transport to use HTTP/2. -// It requires Go 1.6 or later and returns an error if the net/http package is too old -// or if t1 has already been HTTP/2-enabled. +// It returns an error if t1 has already been HTTP/2-enabled. func ConfigureTransport(t1 *http.Transport) error { - _, err := configureTransport(t1) // in configure_transport.go (go1.6) or not_go16.go + _, err := configureTransport(t1) return err } +func configureTransport(t1 *http.Transport) (*Transport, error) { + connPool := new(clientConnPool) + t2 := &Transport{ + ConnPool: noDialClientConnPool{connPool}, + t1: t1, + } + connPool.t = t2 + if err := registerHTTPSProtocol(t1, noDialH2RoundTripper{t2}); err != nil { + return nil, err + } + if t1.TLSClientConfig == nil { + t1.TLSClientConfig = new(tls.Config) + } + if !strSliceContains(t1.TLSClientConfig.NextProtos, "h2") { + t1.TLSClientConfig.NextProtos = append([]string{"h2"}, t1.TLSClientConfig.NextProtos...) + } + if !strSliceContains(t1.TLSClientConfig.NextProtos, "http/1.1") { + t1.TLSClientConfig.NextProtos = append(t1.TLSClientConfig.NextProtos, "http/1.1") + } + upgradeFn := func(authority string, c *tls.Conn) http.RoundTripper { + addr := authorityAddr("https", authority) + if used, err := connPool.addConnIfNeeded(addr, t2, c); err != nil { + go c.Close() + return erringRoundTripper{err} + } else if !used { + // Turns out we don't need this c. + // For example, two goroutines made requests to the same host + // at the same time, both kicking off TCP dials. (since protocol + // was unknown) + go c.Close() + } + return t2 + } + if m := t1.TLSNextProto; len(m) == 0 { + t1.TLSNextProto = map[string]func(string, *tls.Conn) http.RoundTripper{ + "h2": upgradeFn, + } + } else { + m["h2"] = upgradeFn + } + return t2, nil +} + func (t *Transport) connPool() ClientConnPool { t.connPoolOnce.Do(t.initConnPool) return t.connPoolOrDef @@ -192,7 +244,7 @@ type ClientConn struct { type clientStream struct { cc *ClientConn req *http.Request - trace *clientTrace // or nil + trace *httptrace.ClientTrace // or nil ID uint32 resc chan resAndError bufPipe pipe // buffered pipe with the flow-controlled response payload @@ -226,7 +278,7 @@ type clientStream struct { // channel to be signaled. A non-nil error is returned only if the request was // canceled. func awaitRequestCancel(req *http.Request, done <-chan struct{}) error { - ctx := reqContext(req) + ctx := req.Context() if req.Cancel == nil && ctx.Done() == nil { return nil } @@ -401,8 +453,8 @@ func (t *Transport) RoundTripOpt(req *http.Request, opt RoundTripOpt) (*http.Res select { case <-time.After(time.Second * time.Duration(backoff)): continue - case <-reqContext(req).Done(): - return nil, reqContext(req).Err() + case <-req.Context().Done(): + return nil, req.Context().Err() } } } @@ -439,16 +491,15 @@ func shouldRetryRequest(req *http.Request, err error, afterBodyWrite bool) (*htt } // If the Body is nil (or http.NoBody), it's safe to reuse // this request and its Body. - if req.Body == nil || reqBodyIsNoBody(req.Body) { + if req.Body == nil || req.Body == http.NoBody { return req, nil } // If the request body can be reset back to its original // state via the optional req.GetBody, do that. - getBody := reqGetBody(req) // Go 1.8: getBody = req.GetBody - if getBody != nil { + if req.GetBody != nil { // TODO: consider a req.Body.Close here? or audit that all caller paths do? - body, err := getBody() + body, err := req.GetBody() if err != nil { return nil, err } @@ -494,7 +545,7 @@ func (t *Transport) dialClientConn(addr string, singleUse bool) (*ClientConn, er func (t *Transport) newTLSConfig(host string) *tls.Config { cfg := new(tls.Config) if t.TLSClientConfig != nil { - *cfg = *cloneTLSConfig(t.TLSClientConfig) + *cfg = *t.TLSClientConfig.Clone() } if !strSliceContains(cfg.NextProtos, NextProtoTLS) { cfg.NextProtos = append([]string{NextProtoTLS}, cfg.NextProtos...) @@ -545,7 +596,7 @@ func (t *Transport) expectContinueTimeout() time.Duration { if t.t1 == nil { return 0 } - return transportExpectContinueTimeout(t.t1) + return t.t1.ExpectContinueTimeout } func (t *Transport) NewClientConn(c net.Conn) (*ClientConn, error) { @@ -670,8 +721,19 @@ func (cc *ClientConn) idleStateLocked() (st clientConnIdleState) { if cc.singleUse && cc.nextStreamID > 1 { return } - st.canTakeNewRequest = cc.goAway == nil && !cc.closed && !cc.closing && - int64(cc.nextStreamID)+int64(cc.pendingRequests) < math.MaxInt32 + var maxConcurrentOkay bool + if cc.t.StrictMaxConcurrentStreams { + // We'll tell the caller we can take a new request to + // prevent the caller from dialing a new TCP + // connection, but then we'll block later before + // writing it. + maxConcurrentOkay = true + } else { + maxConcurrentOkay = int64(len(cc.streams)+1) < int64(cc.maxConcurrentStreams) + } + + st.canTakeNewRequest = cc.goAway == nil && !cc.closed && !cc.closing && maxConcurrentOkay && + int64(cc.nextStreamID)+2*int64(cc.pendingRequests) < math.MaxInt32 st.freshConn = cc.nextStreamID == 1 && st.canTakeNewRequest return } @@ -711,8 +773,7 @@ func (cc *ClientConn) closeIfIdle() { var shutdownEnterWaitStateHook = func() {} // Shutdown gracefully close the client connection, waiting for running streams to complete. -// Public implementation is in go17.go and not_go17.go -func (cc *ClientConn) shutdown(ctx contextContext) error { +func (cc *ClientConn) Shutdown(ctx context.Context) error { if err := cc.sendGoAway(); err != nil { return err } @@ -882,7 +943,7 @@ func checkConnHeaders(req *http.Request) error { // req.ContentLength, where 0 actually means zero (not unknown) and -1 // means unknown. func actualContentLength(req *http.Request) int64 { - if req.Body == nil || reqBodyIsNoBody(req.Body) { + if req.Body == nil || req.Body == http.NoBody { return 0 } if req.ContentLength != 0 { @@ -952,7 +1013,7 @@ func (cc *ClientConn) roundTrip(req *http.Request) (res *http.Response, gotErrAf cs := cc.newStream() cs.req = req - cs.trace = requestTrace(req) + cs.trace = httptrace.ContextClientTrace(req.Context()) cs.requestedGzip = requestedGzip bodyWriter := cc.t.getBodyWriterState(cs, body) cs.on100 = bodyWriter.on100 @@ -990,7 +1051,7 @@ func (cc *ClientConn) roundTrip(req *http.Request) (res *http.Response, gotErrAf readLoopResCh := cs.resc bodyWritten := false - ctx := reqContext(req) + ctx := req.Context() handleReadLoopResponse := func(re resAndError) (*http.Response, bool, error) { res := re.res @@ -1060,6 +1121,7 @@ func (cc *ClientConn) roundTrip(req *http.Request) (res *http.Response, gotErrAf default: } if err != nil { + cc.forgetStreamID(cs.ID) return nil, cs.getStartedWrite(), err } bodyWritten = true @@ -1181,6 +1243,7 @@ func (cs *clientStream) writeRequestBody(body io.Reader, bodyCloser io.Closer) ( sawEOF = true err = nil } else if err != nil { + cc.writeStreamReset(cs.ID, ErrCodeCancel, err) return err } @@ -1416,7 +1479,7 @@ func (cc *ClientConn) encodeHeaders(req *http.Request, addGzipHeader bool, trail return nil, errRequestHeaderListSize } - trace := requestTrace(req) + trace := httptrace.ContextClientTrace(req.Context()) traceHeaders := traceHasWroteHeaderField(trace) // Header list size is ok. Write the headers. @@ -1839,7 +1902,7 @@ func (rl *clientConnReadLoop) handleResponse(cs *clientStream, f *MetaHeadersFra res.Header.Del("Content-Length") res.ContentLength = -1 res.Body = &gzipReader{body: res.Body} - setResponseUncompressed(res) + res.Uncompressed = true } return res, nil } @@ -2216,8 +2279,7 @@ func (rl *clientConnReadLoop) processResetStream(f *RSTStreamFrame) error { } // Ping sends a PING frame to the server and waits for the ack. -// Public implementation is in go17.go and not_go17.go -func (cc *ClientConn) ping(ctx contextContext) error { +func (cc *ClientConn) Ping(ctx context.Context) error { c := make(chan struct{}) // Generate a random payload var p [8]byte @@ -2451,3 +2513,91 @@ func (s bodyWriterState) scheduleBodyWrite() { func isConnectionCloseRequest(req *http.Request) bool { return req.Close || httpguts.HeaderValuesContainsToken(req.Header["Connection"], "close") } + +// registerHTTPSProtocol calls Transport.RegisterProtocol but +// converting panics into errors. +func registerHTTPSProtocol(t *http.Transport, rt noDialH2RoundTripper) (err error) { + defer func() { + if e := recover(); e != nil { + err = fmt.Errorf("%v", e) + } + }() + t.RegisterProtocol("https", rt) + return nil +} + +// noDialH2RoundTripper is a RoundTripper which only tries to complete the request +// if there's already has a cached connection to the host. +// (The field is exported so it can be accessed via reflect from net/http; tested +// by TestNoDialH2RoundTripperType) +type noDialH2RoundTripper struct{ *Transport } + +func (rt noDialH2RoundTripper) RoundTrip(req *http.Request) (*http.Response, error) { + res, err := rt.Transport.RoundTrip(req) + if isNoCachedConnError(err) { + return nil, http.ErrSkipAltProtocol + } + return res, err +} + +func (t *Transport) idleConnTimeout() time.Duration { + if t.t1 != nil { + return t.t1.IdleConnTimeout + } + return 0 +} + +func traceGetConn(req *http.Request, hostPort string) { + trace := httptrace.ContextClientTrace(req.Context()) + if trace == nil || trace.GetConn == nil { + return + } + trace.GetConn(hostPort) +} + +func traceGotConn(req *http.Request, cc *ClientConn) { + trace := httptrace.ContextClientTrace(req.Context()) + if trace == nil || trace.GotConn == nil { + return + } + ci := httptrace.GotConnInfo{Conn: cc.tconn} + cc.mu.Lock() + ci.Reused = cc.nextStreamID > 1 + ci.WasIdle = len(cc.streams) == 0 && ci.Reused + if ci.WasIdle && !cc.lastActive.IsZero() { + ci.IdleTime = time.Now().Sub(cc.lastActive) + } + cc.mu.Unlock() + + trace.GotConn(ci) +} + +func traceWroteHeaders(trace *httptrace.ClientTrace) { + if trace != nil && trace.WroteHeaders != nil { + trace.WroteHeaders() + } +} + +func traceGot100Continue(trace *httptrace.ClientTrace) { + if trace != nil && trace.Got100Continue != nil { + trace.Got100Continue() + } +} + +func traceWait100Continue(trace *httptrace.ClientTrace) { + if trace != nil && trace.Wait100Continue != nil { + trace.Wait100Continue() + } +} + +func traceWroteRequest(trace *httptrace.ClientTrace, err error) { + if trace != nil && trace.WroteRequest != nil { + trace.WroteRequest(httptrace.WroteRequestInfo{Err: err}) + } +} + +func traceFirstResponseByte(trace *httptrace.ClientTrace) { + if trace != nil && trace.GotFirstResponseByte != nil { + trace.GotFirstResponseByte() + } +} diff --git a/vendor/golang.org/x/net/http2/write.go b/vendor/golang.org/x/net/http2/write.go index fae5c8adc..3849bc263 100644 --- a/vendor/golang.org/x/net/http2/write.go +++ b/vendor/golang.org/x/net/http2/write.go @@ -329,7 +329,7 @@ func (wu writeWindowUpdate) writeFrame(ctx writeContext) error { } // encodeHeaders encodes an http.Header. If keys is not nil, then (k, h[k]) -// is encoded only only if k is in keys. +// is encoded only if k is in keys. func encodeHeaders(enc *hpack.Encoder, h http.Header, keys []string) { if keys == nil { sorter := sorterPool.Get().(*sorter) diff --git a/vendor/golang.org/x/net/publicsuffix/table.go b/vendor/golang.org/x/net/publicsuffix/table.go index 1bdecb669..418f21677 100644 --- a/vendor/golang.org/x/net/publicsuffix/table.go +++ b/vendor/golang.org/x/net/publicsuffix/table.go @@ -2,7 +2,7 @@ package publicsuffix -const version = "publicsuffix.org's public_suffix_list.dat, git revision 545c3f0754686c54b449a63dc00f5110a28bd94e (2018-07-25T21:31:09Z)" +const version = "publicsuffix.org's public_suffix_list.dat, git revision 6f2b9e75eaf65bb75da83677655a59110088ebc5 (2018-10-03T13:34:55Z)" const ( nodesBitsChildren = 10 @@ -23,476 +23,476 @@ const ( ) // numTLD is the number of top level domains. -const numTLD = 1551 +const numTLD = 1546 // Text is the combined text of all labels. -const text = "9guacuiababia-goracleaningroks-theatreebinagisobetsumidatlantica" + - "sertairanzanquannefrankfurtatarantoyakokonoebinordre-landd-dnsho" + +const text = "9guacuiababia-goracleaningroks-theatreebinagisoccertificationatu" + + "rhistorisches3-ap-south-16-bambleclerc66biomutashinaiiyamanouchi" + + "kuhokuryugasakitcheninomiyakonojorpelandiyukindigenaklodzkochiku" + + "shinonsenergyukuhashimoichinosekigaharabirdartcenterprisesakimob" + + "etsuitainairforceoppdalimitednpalmspringsakerbirkenesoddtangenov" + + "araholtalenirasakindustriabirthplacebitballooningjovikarelianceb" + + "jarkoyurihonjournalisteinkjerusalembroideryusuharabjerkreimbarcl" + + "aycards3-eu-west-3utilitiesquare7bjugnieznord-aurdalpha-myqnapcl" + + "oud66blackfridayusuisserveircateringebuilderschmidtre-gauldalimo" + + "liserniablancomedicaltanissettaipeiheijindustriesteamfamberkeley" + + "uu2-localhostrowwlkpmgladefensells-for-less3-website-us-east-1bl" + + "oombergbauernuorochesterbloxcms3-website-us-west-1bluedancebmoat" + + "tachments3-website-us-west-2bms5yuzawabmweddinglassassinationalh" + + "eritagebnpparibaselburgleezebnrwedeploybomloabathsbcatholicaxias" + + "colipicenodumetlifeinsurancebondrangedalindaskvollindesnesakyota" + + "nabellunombresciabonnishiazainfinitintuitjomemorialinkyard-cloud" + + "eitybookingliwiceboomladbrokesalangenishigoboschaefflerdalvdalas" + + "kanittedallasallebesbyglandroverhalla-speziabostikariyaltakasago" + + "tpantheonsitebostonakijinsekikogentinglobalashovhachinohedmarkar" + + "lsoybotanicalgardenishiharabotanicgardenishiizunazukinuyamashina" + + "tsukigatakarazukameokameyamatotakadabotanybouncemerckmsdnipropet" + + "rovskjervoyagebounty-fullensakerrypropertiesalondonetskarmoybout" + + "iquebechattanooganordkappanamatsuzakinvestmentsaltdalivornobozen" + + "-suedtirolkuszczytnord-frontierbplacedekagaminord-odalwaysdataba" + + "seballangenoamishirasatochigiessensiositelemarkarpaczeladzlglobo" + + "avistaprintelligencebrandywinevalleybrasiliabrindisibenikebristo" + + "loseyouripirangap-northeast-3britishcolumbialowiezachpomorskieni" + + "shikatakatsukinzais-a-candidatebroadcastlefrakkestadray-dnstrace" + + "broadwaybroke-itjxfinitybrokerbronnoysundrayddnsfreebox-osascoli" + + "-picenordlandraydnsupdaterbrothermesaverdealstahaugesunderseapor" + + "tsinfolldalomzaporizhzheguris-a-catererbrowsersafetymarketsaludr" + + "ivefsnillfjordrobaknoluoktagajobojis-a-celticsfanishikatsuragit-" + + "repostre-totendofinternet-dnsalvadordalibabalsan-suedtirollagden" + + "esnaaseralingenkainanaejrietisalatinabenonicheltenham-radio-open" + + "airbusantiquest-a-la-maisondre-landroidrudunsalzburglogowegrowei" + + "bolognagatorockartuzybrumunddalondrinamsskoganeis-a-chefarmstead" + + "upontariodejaneirodoybrunelasticbeanstalkaruizawabrusselsamegawa" + + "bruxellesamnangerbryanskleppgafanpachigasakievennodesaarlandurba" + + "namexnetlifyis-a-conservativegarsheis-a-cpadualstackarumaifarsun" + + "durhamburgloppenzaolbia-tempio-olbiatempioolbialystokkembuchikum" + + "agayagawakkanaibetsubamericanfamilydscloudapplinzis-a-cubicle-sl" + + "avellinotairestaurantkmaxxjavald-aostaplesampagespeedmobilizerob" + + "rynewjerseybuskerudinewportlligatksatxn--0trq7p7nnishikawazukami" + + "tsuebuzentsujiiebuzzpanasonichernigovernmentmparaglidinglugmbhar" + + "tiffanybweirbzhitomirumalatvuopmicrolightingminakamichiharacolog" + + "nextdirectozsdeloittenrightathomeftparsannancolonialwilliamsburg" + + "rongacoloradoplateaudiocolumbusheycommunitysnesannohelplfinancia" + + "luccarbonia-iglesias-carboniaiglesiascarboniacomobaracomparemark" + + "erryhotelsanokashiwaracompute-1computerhistoryofscience-fictionc" + + "omsecuritytacticsantabarbaracondoshichinohealth-carereformitakeh" + + "araconferenceconstructionconsuladoharuovatrani-andria-barletta-t" + + "rani-andriaconsultanthropologyconsultingrossetouchihayaakasakawa" + + "haracontactraniandriabarlettatraniandriacontagematsubaracontempo" + + "raryarteducationalchikugojomedio-campidano-mediocampidanomedioco" + + "ntractorskenconventureshinodearthdfcbankashiwazakiyosemitecookin" + + "gchannelsdvrdnsdojoetsuwanouchikujogaszkolahppiacenzagancoolucer" + + "necooperativano-frankivskoleikangercopenhagencyclopedichirurgien" + + "s-dentistes-en-francecorsicahcesuolocus-2corvettemp-dnsantacruzs" + + "antafedjejuifminamidaitomandalukowfashioncosenzakopanexus-3cosid" + + "nsfor-better-thanawatchesantamariakecostumedizinhistorischesanto" + + "andreamhostersanukis-a-doctoraycouchpotatofriesaobernardownloady" + + "ndns-remotewdyndns-serverdaluroycouncilutskasukabedzin-the-banda" + + "ioiraseeklogesurancechirealmpmncouponsaogoncartoonartdecologiaco" + + "ursesaotomeloyalistjordalshalsencq-acranbrookuwanalyticsapporocr" + + "editcardyndns-webhopencraftranoycreditunioncremonashgabadaddjagu" + + "arqhachiojiyahoooshikamaishimodatecrewhalingroundhandlingroznycr" + + "icketrzyncrimeast-kazakhstanangercrotonecrownipartis-a-financial" + + "advisor-aurdaluxembourgrpartsardegnaroycrsvpartycruisesardiniacr" + + "yptonomichigangwoncuisinellair-traffic-controlleyculturalcentern" + + "opilawawhoswhokksundyndns-wikiracuneocupcakecuritibaghdadyndns-w" + + "orkisboringruecxn--12c1fe0bradescorporationcyberlevagangaviikano" + + "njis-a-geekasumigaurawa-mazowszextraspace-to-rentalstomakomaibar" + + "acymrussiacyonabaruminamiechizencyoutheworkpccwiiheyakageferrari" + + "ssagamiharaferreroticapebretonamicrosoftbankasuyamelbournefetsun" + + "dynnsarluxuryfguitarsaskatchewanfhvalerfidonnakanojohanamakinoha" + + "rafieldynservebbsarpsborguidefinimakanegasakinkobayashikaoirmina" + + "mifuranofigueresinstagingujoinvillevangerfilateliafilegearfilmin" + + "amiizukamishihoronobeauxartsandcraftsassaris-a-greenfinalfinance" + + "fineartsaudafinlandynuconnectransportefinnoyfirebaseappasadenara" + + "shinofirenzefirestonefirmdaleirvikaszubyfishingolffansauheradynv" + + "6fitjarfitnessettlementravelchannelfjalerflesbergulenflickragero" + + "tikakamigaharaflightsavannahgaflirflogintogurafloraflorenceflori" + + "davvenjargaulardalfloripaderbornfloristanohatajirittohmalvikatow" + + "iceflorogersaves-the-whalessandria-trani-barletta-andriatranibar" + + "lettaandriaflowersavonarusawafltravelersinsuranceflynnhosting-cl" + + "usterflynnhubargainstitutelevisionayorovigovtatsunobninskaragand" + + "authordalandemoneyokotempresashibetsukuibmdeportevadsobetsulikes" + + "-piedmonticellodingenavuotnaples3-eu-central-1fndynvpnplus-4for-" + + "ourfor-someeresistancefor-theaterforexrothachirogatakamatsukawaf" + + "orgotdnsaxoforsaleitungsenforsandasuololfortalfortmissoulancashi" + + "reggio-calabriafortworthadanorthwesternmutualforumzwildlifedorai" + + "nfracloudcontrolappassagensbschokokekschokoladenfosnescholarship" + + "schoolfotarivnefoxfordeatnurembergunmaoris-a-gurulvikatsushikabe" + + "eldengeluidyroyfozorafredrikstadtvschulefreeddnsgeekgalaxyfreede" + + "sktoperauniteroizumizakirovogradoyfreemasonryfreesitexashorokana" + + "iefreetlschwarzgwangjuniperfreiburguovdageaidnunzenfreightrdfres" + + "eniuscountryestateofdelawarezzoologyfribourgushikamifuranorth-ka" + + "zakhstanfriuli-v-giuliafriuli-ve-giuliafriuli-vegiuliafriuli-ven" + + "ezia-giuliafriuli-veneziagiuliafriuli-vgiuliafriuliv-giuliafriul" + + "ive-giuliafriulivegiuliafriulivenezia-giuliafriuliveneziagiuliaf" + + "riulivgiuliafrlfroganschweizfrognfrolandfrom-akrehamnfrom-alfrom" + + "-arfrom-azfrom-capetownnews-stagingwiddlewismillerfrom-codynalia" + + "sdaburfrom-ctrentin-sued-tirolfrom-dchiryukyuragifuchungbukharau" + + "malopolskanlandyndns-at-workinggrouparliamentoyosatoyonakagyokut" + + "oyokawafrom-debianfrom-flandersciencecentersciencehistoryfrom-ga" + + "usdalfrom-hichisochildrensgardenfrom-iafrom-idfrom-ilfrom-incheo" + + "nfrom-kscientistockholmestrandfrom-kyowariasahikawafrom-lancaste" + + "rfrom-mangonohejis-a-hard-workerfrom-mdfrom-meethnologyfrom-mifu" + + "nefrom-mnfrom-modalenfrom-mscjohnsonfrom-mtnfrom-nchitachinakaga" + + "wassamukawataricohdatsunanjoburgmodellingmxn--11b4c3dyndns-blogd" + + "nsamsclubindalorenskogrimstadyndns-freeboxosloftranakanotoddenis" + + "hinomiyashironofrom-ndfrom-nefrom-nh-serveblogsitextileksvikatsu" + + "yamarumorimachidafrom-njaworznotogawafrom-nminamimakis-a-hunterf" + + "rom-nv-infoodnetworkshoppingxn--12co0c3b4evalleaostaticscotlandf" + + "rom-nyfrom-ohkurafrom-oketohnoshooguyfrom-orfrom-padovaksdalfrom" + + "-pratohobby-sitefrom-ris-a-knightpointtokamachintaifun-dnsaliasi" + + "afrom-schoenbrunnfrom-sdfrom-tnfrom-txn--1ck2e1barreauctionflfan" + + "fshostrowiecasertairanzanquannefrankfurtattooceanographics3-fips" + + "-us-gov-west-1from-utazuerichardlikescandynamic-dnscrapper-sitef" + + "rom-val-daostavalleyfrom-vtrentin-suedtirolfrom-wafrom-wielunner" + + "from-wvalled-aostatoilfrom-wyfrosinonefrostalowa-wolawafroyahiko" + + "beardubaiduckdnscrappingfstavernfujiiderafujikawaguchikonefujimi" + + "nokamoenairportland-4-salernoboribetsuckscrysechitosetogitsuldal" + + "otenkawafujinomiyadavvesiidattowebcampinashikiminohosteroyrvikin" + + "gfujiokayamangyshlakasamatsudontexistmein-iservebeerfujisatoshon" + + "airtelefonicable-modemocraciafujisawafujishiroishidakabiratoride" + + "dyn-ip24fujitsurugashimaniwakuratefujixeroxn--1ctwolominamatakko" + + "kaminoyamaxunusualpersonfujiyoshidazaifudaigokaseljordfukayabeat" + + "serveminecraftrentino-a-adigefukuchiyamadafukudominichocolatemas" + + "ekasaokaminokawanishiaizubangefukuis-a-landscaperfukumitsubishig" + + "akiryuohtawaramotoineppuboliviajessheimperiafukuokazakisarazurec" + + "ontainerdpolicefukuroishikarikaturindalfukusakishiwadafukuyamaga" + + "takaharuslivinghistoryfunabashiriuchinadafunagatakahashimamakiso" + + "fukushimannore-og-uvdalfunahashikamiamakusatsumasendaisennangoog" + + "lecodespotaruis-a-lawyerfundaciofuoiskujukuriyamansionservemp3fu" + + "osskoczowilliamhillfurnitureggio-emilia-romagnakatombetsumitakag" + + "iizefurubirafurudonostiaafurukawairtrafficplexus-1fusodegaurafus" + + "saikisosakitagawafutabayamaguchinomigawafutboldlygoingnowhere-fo" + + "r-morenakatsugawafuttsurugiminamiminowafuturecmservep2passenger-" + + "associationfuturehostingfuturemailingfvgfylkesbiblackbaudcdn77-s" + + "ecurecifedexhibitionfyresdalhangoutsystemscloudfrontdoorhannanmo" + + "kuizumodenakayamarburghannosegawahanyuzenhapmirhareidsbergenhars" + + "tadharvestcelebrationhasamarcheapigeelvinckautokeinow-dnservesar" + + "casmatartanddesignhasaminami-alpssells-itrentino-aadigehashbangh" + + "asudahasura-appaviancarrierhasvikazohatogayaitakamoriokalmykiaha" + + "toyamazakitakamiizumisanofidelityhatsukaichikaiseis-a-linux-user" + + "anishiaritabashijonawatehattfjelldalhayashimamotobungotakadaplie" + + "rnewmexicoalhazuminobusellsyourhomegoodservicesevastopolehbodoes" + + "-itvedestrandhelsinkitakatakanabeautysfjordhembygdsforbundhemnes" + + "evenassisicilyhemsedalhepforgeherokussldheroyhgtvalledaostavange" + + "rhigashiagatsumagoianiahigashichichibunkyonanaoshimageandsoundan" + + "dvisionhigashihiroshimanehigashiizumozakitakyushuaiahigashikagaw" + + "ahigashikagurasoedahigashikawakitaaikitamihamadahigashikurumegur" + + "omskoghigashimatsushimaritimodernhigashimatsuyamakitaakitadaitoi" + + "gawahigashimurayamamotorcyclesewinbarrel-of-knowledgeologyokozem" + + "rhigashinarusembokukitamotosumy-gatewayhigashinehigashiomihachim" + + "anaustdalhigashiosakasayamanakakogawahigashishirakawamatakanezaw" + + "ahigashisumiyoshikawaminamiaikitanakagusukumoduminamiogunicomcas" + + "tresindevicesharis-a-llamarriottrentino-alto-adigehigashitsunosh" + + "iroomurahigashiurausukitashiobarahigashiyamatokoriyamanashiftedi" + + "tchyouripfizerhigashiyodogawahigashiyoshinogaris-a-musicianhirai" + + "zumisatokaizukaluganskypehirakatashinagawahiranais-a-nascarfanhi" + + "rarahiratsukagawahirayaizuwakamatsubushikusakadogawahistorichous" + + "esharpgfoggiahitachiomiyagildeskaliszhitachiotagopocznorfolkebib" + + "lelhitraeumtgeradellogliastradinghjartdalhjelmelandholeckobierzy" + + "ceholidayhomeipharmacienshawaiijimarnardalhomelinkitoolsztynsett" + + "lershellaspeziahomelinuxn--1lqs03nhomeofficehomesecuritymacapare" + + "cidahomesecuritypchofunatoriginsurecreationishinoomotegohomesens" + + "eminehomeunixn--1lqs71dhondahoneywellbeingzonehongotembaixadahon" + + "jyoitakaokamakurazakitaurayasudahornindalhorseoullensvanguardhor" + + "teneis-a-nurservegame-serverhospitalhoteleshimojis-a-painteracti" + + "vegaskimitsubatamibudejjuedischesapeakebayernrtrentino-altoadige" + + "hotmailhoyangerhoylandetroitskazunowruzhgorodeohumanitieshimokaw" + + "ahurdalhurumajis-a-patsfanhyllestadhyogoris-a-personaltrainerhyu" + + "gawarahyundaiwafunejfkharkovaojlljmphilatelyjnjcphiladelphiaarea" + + "dmyblogspotrentino-sued-tiroljoyentrentinoa-adigejoyokaichibalat" + + "inogiftshimotsumajpmorganjpnchoseiroumuenchenishinoshimatsushige" + + "jprshinichinanjurkoshunantankhmelnitskiyamarylandkosugekotohirad" + + "omainshinshinotsurgerykotourakouhokutamakis-a-studentalkounosupp" + + "lieshinshirokouyamashikekouzushimashikis-a-teacherkassymantechno" + + "logykozagawakozakis-a-techietis-a-photographerokuappharmacyshimo" + + "kitayamakozowindmillkpnkppspdnshintokushimakrasnodarkredstonekri" + + "stiansandcatshintomikasaharakristiansundkrodsheradkrokstadelvald" + + "aostarnbergkryminamisanrikubetsurfastpanelblagrarchaeologyeongbu" + + "klugsmileasinglest-mon-blogueurovisionionjukudoyamaceratabusebas" + + "topologyeonggiehtavuoatnagaivuotnagaokakyotambabydgoszczecinemad" + + "ridvagsoygardendoftheinternetflixilovecollegefantasyleaguernseyk" + + "umatorinokumejimasoykumenantokonamegatakasugais-a-therapistoiaku" + + "nisakis-an-accountantshimonitayanagithubusercontentrentino-s-tir" + + "olkunitachiarailwaykunitomigusukumamotoyamashikokuchuokunneppugl" + + "iakunstsammlungkunstunddesignkuokgrouphotographysiokurehabmerkur" + + "gankurobelaudiblebtimnetzkurogiminamiashigarakuroisoftwarendalen" + + "ugkuromatsunais-an-actorkurotakikawasakis-an-actresshimonosekika" + + "wakushirogawakustanais-an-anarchistoricalsocietykusupplykutchane" + + "lkutnokuzumakis-an-artisteigenkvafjordkvalsundkvamlidlugolekafjo" + + "rdkvanangenkvinesdalkvinnheradkviteseidskogkvitsoykwpspectrumina" + + "mitanekzmissilezajskmpspbarrell-of-knowledgeometre-experts-compt" + + "ables3-sa-east-1misugitokuyamatsumaebashikshacknetrentinoaadigem" + + "itourismolangevagrigentomologyeongnamegawakayamagazineat-urlmito" + + "yoakemiuramiyazurewebsiteshikagamiishibukawamiyotamanomjondalenm" + + "lbfanmonstermontrealestatefarmequipmentrentinoalto-adigemonza-br" + + "ianzaporizhzhiamonza-e-della-brianzapposhioyanaizumonzabrianzapt" + + "okyotangotsukitahatakahatakaishimogosenmonzaebrianzaramonzaedell" + + "abrianzamoonscalemoparachutingmordoviamoriyamatsumotofukemoriyos" + + "himinamiawajikis-certifiedogawarabikomaezakirunordreisa-geekddie" + + "lddanuorrikuzentakataiwanairguardiannakadomarinebraskaunjargalsa" + + "certmgretachikawakeisenbahnmormonmouthaebaruericssonyoursidegree" + + "moroyamatsunomortgagemoscowindowshirahamatonbetsurnadalmoseushis" + + "torymosjoenmoskeneshirakofuefukihaborokunohealthcareershiranukan" + + "agawamosshiraois-foundationmosviknx-serverrankoshigayanagawamote" + + "ginowaniihamatamakawajimanxn--2scrj9choshibuyachiyodattorelaymov" + + "iemovimientolgamovistargardmozilla-iotrentinoaltoadigemtranbymue" + + "nstermuginozawaonsenmuikamisunagawamukodairamulhouservehalflifes" + + "tylemunakatanemuncienciamuosattemupictetrentinos-tirolmurmanskol" + + "obrzegersundmurotorcraftrentinostirolmusashimurayamatsusakahogin" + + "ankokubunjis-gonemusashinoharamuseetrentinosued-tirolmuseumveren" + + "igingmusicargodaddyn-vpndnshiraokananiimihoboleslawiechoyodobash" + + "ichikashukujitawaravennakaiwamizawatchandclockashibatakasakiyosa" + + "tokigawamutsuzawamy-vigorgemy-wanggouvicenzamyactivedirectorymya" + + "sustor-elvdalmycdn77-sslattuminamiuonumassa-carrara-massacarrara" + + "massabusinessebyklegalloanshinyoshitomiokamogawamydattolocalhist" + + "orymyddnskingmydissentrentinosuedtirolmydroboehringerikemydshira" + + "takahagitlabormyeffectrentinsued-tirolmyfirewallonieruchomoscien" + + "ceandindustrynmyfritzmyftpaccesshishikuis-into-animeiwamarshalls" + + "tatebankfhappousrlmyhome-servermyjinomykolaivarggatrentinsuedtir" + + "olmymailermymediapchristiansburgriwataraidyndns-homednsamsungrok" + + "s-thisayamanobeokakudamatsuemyokohamamatsudamypepictureshisognem" + + "ypetshisuifuelveruminamiyamashirokawanabelembetsukubankhmelnytsk" + + "yivaporcloudnshinjournalismailillehammerfeste-iphilipsynology-di" + + "skstationmyphotoshibalestrandabergamoarekeymachinewhampshirebung" + + "oonoipifonyminanomypiagetmyiphostfoldnavymypsxn--30rr7ymysecurit" + + "ycamerakermyshopblockshitaramamytis-a-bookkeeperugiamytuleapiemo" + + "ntemyvnchristmasakinderoymywireitrentoyonezawapippulawypiszpitts" + + "burghofficialpiwatepixolinopizzapkomakiyosunndalplanetariumincom" + + "mbanklabudhabikinokawabarthadselfipatriaplantationplantshizuokan" + + "azawaplatformshangrilanshoujis-into-cartoonshimotsukeplaystation" + + "plazaplchromedicinakamagayachtsandnessjoenishiokoppegardyndns-ip" + + "armatta-varjjatoyotaparocherkasyno-dsandoyplumbingoplurinacional" + + "podlasiellaktyubinskiptveterinairealtorlandpodzonepohlpoivronpok" + + "erpokrovskomatsushimasfjordenpoliticartierpolitiendapolkowicepol" + + "tavalle-aostarostwodzislawinnershowapomorzeszowioshowtimemergenc" + + "yahabahcavuotnagareyamakeupowiathletajimabaridagawalbrzycharityd" + + "alceshriramsterdamnserverbaniapordenonepornporsangerporsangugepo" + + "rsgrunnanyokoshibahikariwanumatakazakis-into-gamessinazawapoznan" + + "praxis-a-bruinsfanprdpreservationpresidioprgmrprimelhusdecorativ" + + "eartsienarutomobellevuelosangelesjabbottrevisohughesigdalprincip" + + "eprivatizehealthinsuranceprochowiceproductionsilkomforbarsycente" + + "rtainmentaxihuanhktcp4profesionalprogressivenneslaskerrylogistic" + + "simple-urlpromombetsurgeonshalloffameldalpropertyprotectionproto" + + "netritonprudentialpruszkowitdkommunalforbundprzeworskogptplusgar" + + "denpupilotshizukuishimofusaitamatsukuris-into-carshimosuwalkis-a" + + "-playerpvhagakhanamigawapvtroandinosaurepaircraftingvollombardyn" + + "amisches-dnsirdalpwchryslerpzqldqponpesaro-urbino-pesarourbinope" + + "saromasvuotnaritakurashikis-leetnedalqslgbtrogstadquicksytesting" + + "quipelementslingqvchungnamdalseidfjordyndns-mailottestorfjordsto" + + "rjdevcloudcontrolledstpetersburgstreamuneuesokaneyamazoestudiost" + + "udyndns-at-homedepotenzamamidsundstuff-4-salestufftoread-booksne" + + "sokndalstuttgartrusteesusakis-lostrodawarasusonosuzakaniepcesuzu" + + "kanmakiwiensuzukis-not-certifieducatorahimeshimamateramobilysval" + + "bardunloppacifichurcharternidyndns-office-on-the-weberlincolnish" + + "itosashimizunaminamibosogndalottokorozawasveiosvelvikongsbergsvi" + + "zzerasvn-reposolarssonswedenswidnicasacamdvrcampinagrandebugatti" + + "pschlesischesologneswiebodzindianapolis-a-bloggerswiftcoverswino" + + "ujscienceandhistoryswisshikis-savedunetbankhakassiasynology-dsol" + + "undbeckomonowtvareservehttphoenixn--1qqw23atushuissier-justicetu" + + "valle-daostatic-accessootuxfamilytwmailvestre-slidrepbodynathome" + + "builtrvbashkiriautomotiveconomiasakuchinotsuchiurakawalmartatesh" + + "inanomachimkentateyamaustevollavangenaval-d-aosta-valleyboltatar" + + "antoyakokonoehimejibestaddnslivelanddnss3-ap-southeast-2ix4432-b" + + "ananarepublicaseihicampobassociatest-iservecounterstrike12hpaleo" + + "bihirosakikamijimatsuurabogadocscbgdyniabruzzoologicalvinklein-a" + + "ddrammenuernberggfarmerseine164-barcelonagasukeastcoastaldefence" + + "atonsbergjemnes3-ap-northeast-1337vestre-totennishiawakuravestva" + + "goyvevelstadvibo-valentiavibovalentiavideovillasnesoddenmarkhang" + + "elskjakdnepropetrovskiervaapsteiermarkoninjambylvinnicasadelamon" + + "edatingvinnytsiavipsinaappimientakayamattelekommunikationvirgini" + + "avirtual-userveexchangevirtualuserveftpinkomaganevirtueeldomein-" + + "vigorlicevirtuelvisakegawaviterboknowsitallvivoldavixn--32vp30ha" + + "gebostadvlaanderenvladikavkazimierz-dolnyvladimirvlogoipioneervo" + + "lkswagentsor-odalvologdanskonskowolayangrouphonefosshinjukumanov" + + "olvolkenkundenvolyngdalvossevangenvotevotingvotoyonowiwatsukiyon" + + "oticiaskoyabearalvahkijobserveronagarahkkeravjuegoshikikonaikawa" + + "chinaganoharamcoachampionshiphoptobishimaintenancebetsuikidsmyna" + + "sushiobarackmazerbaijan-mayenebakkeshibechambagriculturennebudap" + + "est-a-la-masionthewifiat-band-campaniawloclawekonsulatrobeepilep" + + "sydneywmflabsor-varangerworldworse-thandawowithgoogleapisa-hocke" + + "ynutsiracusakataketomisatotalwpdevcloudyclusterwritesthisblogsyt" + + "ewroclawithyoutuberspacekitagatakinouewtcminnesotaketakatoris-an" + + "-engineeringwtfastvps-serverisignwuozuwzmiuwajimaxn--3pxu8konyve" + + "lombardiamondshinkamigotoyohashimotottoris-a-rockstarachowicexn-" + + "-42c2d9axn--45br5cylxn--45brj9circustomerxn--45q11cistrondheimmo" + + "bilienishiwakis-a-democratoyotomiyazakis-a-designerxn--4gbrimini" + + "ngxn--4it168dxn--4it797kooris-a-socialistcgrouphdxn--4pvxs4allxn" + + "--54b7fta0ccitadeliveryggeexn--55qw42gxn--55qx5dxn--5js045dxn--5" + + "rtp49citichernihivgubarclays3-external-1xn--5rtq34kopervikherson" + + "xn--5su34j936bgsgxn--5tzm5gxn--6btw5axn--6frz82gxn--6orx2rxn--6q" + + "q986b3xlxn--7t0a264civilaviationissandiegoxn--80adxhksorfoldxn--" + + "80ao21axn--80aqecdr1axn--80asehdbasilicataniautoscanadaejeonbuk1" + + "2xn--80aswgxn--80audnedalnxn--8ltr62koryokamikawanehonbetsurutah" + + "araxn--8pvr4uxn--8y0a063axn--90a3academiamicaaarborteaches-yogas" + + "awaracingxn--90aeroportalabamagasakishimabaraogakibichuoxn--90ai" + + "shobarakawagoexn--90azhytomyravendbasketballyngenvironmentalcons" + + "ervationhlfanhs3-us-east-2xn--9dbhblg6dietcimdbatodayolasiteu-2x" + + "n--9dbq2axn--9et52uxn--9krt00axn--andy-iraxn--aroport-byandexn--" + + "3bst00minternationalfirearmshiojirishirifujiedaxn--asky-iraxn--a" + + "urskog-hland-jnbatsfjordiscountysvardolls3-us-gov-west-1xn--aver" + + "y-yuasakuhokkaidoomdnsiskinkyotobetsumidatlanticivilisationissay" + + "okkaichiropractichernivtsiciliaxn--b-5gaxn--b4w605ferdxn--balsan" + + "-sudtirol-rqis-slickharkivanylvenicexn--bck1b9a5dre4civilization" + + "issedalouvreisenisshingucciprianiigataishinomakindlegnicagliarib" + + "eiraokinawashirosatochiokinoshimaizuruhrxn--bdddj-mrabdxn--beara" + + "lvhki-y4axn--berlevg-jxaxn--bhcavuotna-s4axn--bhccavuotna-k7axn-" + + "-bidr-5nachikatsuuraxn--bievt-0qa2xn--bjarky-fyaotsurreyxn--bjdd" + + "ar-ptamayufuettertdasnetzxn--blt-elabourxn--bmlo-graingerxn--bod" + + "-2natalxn--bozen-sudtirol-76haibarakitahiroshimapartmentservepic" + + "servequakexn--brnny-wuacademy-firewall-gatewayxn--brnnysund-m8ac" + + "cident-investigation-aptibleaseating-organicbcieszynxn--brum-voa" + + "gatrysiljanxn--btsfjord-9zaxn--bulsan-sudtirol-rqis-uberleetrent" + + "ino-stirolxn--c1avgxn--c2br7gxn--c3s14misakis-an-entertainerxn--" + + "cck2b3bauhausposts-and-telecommunicationsncfdiscoveryombolzano-a" + + "ltoadigeu-3xn--cesena-forli-c2gxn--cesenaforli-0jgoraxn--cg4bkis" + + "-very-badajozxn--ciqpnxn--clchc0ea0b2g2a9gcdxn--comunicaes-v6a2o" + + "xn--correios-e-telecomunicaes-ghc29axn--czr694bbcn-north-1xn--cz" + + "rs0tulanxessolutionslupskommunexn--czru2dxn--czrw28bbtjmaxxxboxe" + + "napponazure-mobileu-4xn--d1acj3bbvacationswatch-and-clockerxn--d" + + "1alfaromeoxn--d1atunesomaxn--d5qv7z876civilwarmanagementoyotsuka" + + "idoxn--davvenjrga-y4axn--djrs72d6uyxn--djty4kosaigawaxn--dnna-gr" + + "ajewolterskluwerxn--drbak-wuaxn--dyry-iraxn--e1a4claimsandvikcor" + + "omantovalle-d-aostathellexn--eckvdtc9dxn--efvn9sorocabalsfjordxn" + + "--efvy88hair-surveillancexn--ehqz56nxn--elqq16hakatanortonxn--es" + + "tv75gxn--eveni-0qa01gaxn--f6qx53axn--fct429kosakaerodromegallupi" + + "nbarsyonlinewhollandevelopmentjeldsundgcanonoichinomiyakeu-1xn--" + + "fhbeiarnxn--finny-yuaxn--fiq228c5hsorreisahayakawakamiichikawami" + + "satourslzxn--fiq64beneventoeidsvollillesandefjordishakotanikkoeb" + + "enhavnikolaevents3-us-west-1xn--fiqs8sortlandxn--fiqz9soruminise" + + "rversicherungxn--fjord-lraxn--fjq720axn--fl-ziaxn--flor-jraxn--f" + + "lw351exn--forli-cesena-41gxn--forlicesena-ujgxn--fpcrj9c3dxn--fr" + + "de-grandrapidsoundcastronomy-routerxn--frna-woaraisaijosoyroroso" + + "uthcarolinarvikomorotsukamiokamikitayamatsuris-a-republicancerre" + + "searchaeologicaliforniaxn--frya-hraxn--fzc2c9e2clanbibaidarmenia" + + "xn--fzys8d69uvgmailxn--g2xx48cldmailowiczest-le-patroniyodogawax" + + "n--gckr3f0fauskedsmokorsetagayasells-for-ufcfanxn--gecrj9clickas" + + "hiharaxn--ggaviika-8ya47hakodatexn--gildeskl-g0axn--givuotna-8ya" + + "sakaiminatoyookannamilanotteroyxn--gjvik-wuaxn--gk3at1exn--gls-e" + + "lacaixaxn--gmq050is-very-evillagexn--gmqw5axn--h-2failxn--h1aegh" + + "akonexn--h2breg3evenesouthwestfalenxn--h2brj9c8clinichernovtsykk" + + "ylvenetogakushimotoganewyorkshirecipescaravantaarparisor-fronish" + + "imeraxn--h3cuzk1digitalxn--hbmer-xqaxn--hcesuolo-7ya35bentleyomi" + + "tanoceanographiqueverbankarasjohkamikoaniikappueblockbustermezgo" + + "rzeleccoffeedbackplaneapplegodoesntexisteingeekarasjokarasuyamar" + + "ugame-hostrolekamiminers3-us-west-2xn--hery-iraxn--hgebostad-g3a" + + "xn--hmmrfeasta-s4accident-prevention-webhostingxn--hnefoss-q1axn" + + "--hobl-iraxn--holtlen-hxaxn--hpmir-xqaxn--hxt814exn--hyanger-q1a" + + "xn--hylandet-54axn--i1b6b1a6a2exn--imr513nxn--indery-fyasugiving" + + "xn--io0a7is-very-goodyearxn--j1aefbsbxn--12cfi8ixb8luzernxn--j1a" + + "mhakubahccavuotnagasakikuchikuseikarugamvikaufenxn--j6w193gxn--j" + + "lq61u9w7beppublishproxyzjampagefrontappalmaseratiitatebayashiiba" + + "jddarchitecturealtychyattorneyagawakuyabukihokumakogenglandisrec" + + "htrainingjesdalillyonagoyaveroykeniwaizumiotsukumiyamazonawsadod" + + "gemologicallaziobiraustinnavigationavoibigawaukraanghkepnogataij" + + "i234lima-cityeatselinogradultarnobrzegyptian4tarumizusawaetnagah" + + "amaroyereportashkentatamotors3-ap-northeast-20001wwwebredirectme" + + "msettsupport3l3p0rtargets-itargivestbytomaritimekeeping12038xn--" + + "jlster-byasuokanraxn--jrpeland-54axn--jvr189misasaguris-byxn--k7" + + "yn95exn--karmy-yuaxn--kbrq7oxn--kcrx77d1x4axn--kfjord-iuaxn--klb" + + "u-woaxn--klt787dxn--kltp7dxn--kltx9axn--klty5xn--3ds443gxn--kolu" + + "okta-7ya57hakuis-a-liberalxn--kprw13dxn--kpry57dxn--kpu716fbx-os" + + "arufutsunomiyawakasaikaitakoelnxn--kput3is-very-nicexn--krager-g" + + "yatomitamamuraxn--kranghke-b0axn--krdsherad-m8axn--krehamn-dxaxn" + + "--krjohka-hwab49jdfastlylbarefootballfinanzgoraustrheimatunduhre" + + "nnesoyokosukanzakiyokawaraurskog-holandingjerdrumetacentrumeteor" + + "appalermomahachijolstereviewskrakowebspacebizenakasatsunairlined" + + "re-eikerevistanbulsan-suedtirol-o-g-i-natuurwetenschappenaumburg" + + "jerstadotsuruokakegawaugustowadaeguambulancebinordre-landd-dnsho" + "me-webservercelliguriagrocerybnikahokutobamagentositecnologiajud" + - "aicable-modemocraciaugustowadaeguambulancebizenakatombetsumitaka" + - "giizehimeji234lima-cityeatselinogradultarnobrzegyptian4tarumizus" + - "awaetnagahamaroyereportashkentatamotors3-ap-northeast-20001wwweb" + - "redirectmemsettsupport3l3p0rtargets-itargivestbytomaritimekeepin" + - "g12038biomutashinaindigenamsosnowiecatholicaxiascolipicenodumetl" + - "ifeinsurancebirdartcenterprisesakimobetsuitainairforceoppdalimol" + - "iserniabirkenesoddtangenovaraholtalenikonanporovnobirthplacebitb" + - "allooningjovikariyaltakasagotembaixadabjarkoyukuhashimoichinosek" + - "igaharabjerkreimbarclaycards3-eu-west-1bjugnieznord-aurdalpha-my" + - "qnapcloud66blackfridayurihonjournalisteinkjerusalembroideryusuha" + - "rablancomedicaltanissettaipeiheijindustriabloombergbauernuoroche" + - "sterbloxcms3-website-sa-east-1bluedancebmoattachments3-website-u" + - "s-east-1bms3-website-us-west-1bmweddingladefensells-for-less3-we" + - "bsite-us-west-2bnpparibaselburglassassinationalheritagebnrwedepl" + - "oyusuisserveirchattanooganordkappanamatsuzakindustriesteamfamber" + - "keleyuu2-localhostrowwlkpmgleezebomloabathsbcheltenham-radio-ope" + - "nairbusantiquest-a-la-maisondre-landroidivttasvuotnakanojohanama" + - "kinoharabondiyuzawabonninohekinannestadnpanasonichernigovernment" + - "jmaxxxboxenapponazure-mobilebookingliwiceboomladbrokes5yboschaef" + - "flerdalvdalaskanittedallasallebesbyglandroverhalla-speziabostika" + - "rlsoybostonakijinsekikogentinglobalashovhachinohedmarkarmoybotan" + - "icalgardeninomiyakonojorpelandrangedalindaskvollindesnesakyotana" + - "bellunombresciabotanicgardenirasakinfinitintuitjomemorialinkyard" + - "-cloudeitybotanybouncemerckmsdnipropetrovskjervoyagebounty-fulle" + - "nsakerrypropertiesalangenishiazainuyamashinatsukigatakarazukameo" + - "kameyamatotakadaboutiquebechernihivgubarclays3-eu-west-2bozen-su" + - "edtirolkuszczytnord-frontierbplacedekagaminord-odalwaysdatabaseb" + - "allangenoamishirasatochigiessensiositelekommunikationishigovtjxf" + - "initybrandywinevalleybrasiliabrindisibenikebristoloseyouripirang" + - "ap-northeast-3britishcolumbialowiezachpomorskienishiharabroadcas" + - "tlefrakkestadray-dnstracebroadwaybroke-itkmaxxjavald-aostaplesal" + - "ondonetskarpaczeladzlgloboavistaprintelligencebrokerbronnoysundr" + - "ayddnsfreebox-osascoli-picenordlandraydnsupdaterbrothermesaverde" + - "alstahaugesunderseaportsinfolldalivornobrowsersafetymarketsaltda" + - "lomzaporizhzhegurinvestmentsaludrivefsnillfjordrobaknoluoktagajo" + - "bojinzais-a-candidatebrumunddalondrinaplesalvadordalibabalsan-su" + - "edtirollagdenesnaaseralingenkainanaejrietisalatinabenonichernivt" + - "siciliabrunelasticbeanstalkaruizawabrusselsalzburglogowegroweibo" + - "lognagatorockartuzybruxellesamegawabryanskleppgafanpachigasakiev" + - "ennodesaarlandrudunsamnangerbrynewjerseybuskerudinewportlligatks" + - "atxn--0trq7p7nnishiizunazukis-a-catererbuzentsujiiebuzzparaglidi" + - "ngloppenzaolbia-tempio-olbiatempioolbialystokkembuchikumagayagaw" + - "akuyabukihokumakogenglandupontariodejaneirodoybweirbzhitomirumal" + - "atvuopmicrolightinglugmbhartiffanycoloradoplateaudiocolumbusheyc" + - "ommunitysvardoharuovatozsdeloittemp-dnsanokashiwaracomobaracompa" + - "remarkerryhotelsantabarbaracompute-1computerhistoryofscience-fic" + - "tioncomsecuritytacticsantacruzsantafedjejuifminamidaitomandaluce" + - "rnecondoshichinohealth-carereformitakeharaconferenceconstruction" + - "consuladollsantamariakeconsultanthropologyconsultingrossetouchih" + - "ayaakasakawaharacontactrani-andria-barletta-trani-andriacontagem" + - "atsubaracontemporaryarteducationalchikugojomedio-campidano-medio" + - "campidanomediocontractorskenconventureshinodearthdfcbankashiwaza" + - "kiyosemitecookingchannelsdvrdnsdojoetsuwanouchikujogaszkolahppia" + - "cenzagancoolukowfashioncooperativano-frankivskoleikangercopenhag" + - "encyclopedichitachinakagawatchandclockarumaifarsundyndns-blogdns" + - "amsclubindalorenskogrimstadyndns-freeboxosloftranakasatsunairpor" + - "tland-4-salernoboribetsucksamsungripescaravantaacorsicagliaribei" + - "raokinawashirosatochiokinoshimaizuruhrcorvettemasekasukabedzin-t" + - "he-bandaioiraseeklogesurancechirealmpmncosenzakopanerairguardian" + - "nakadomarinebraskaunjargalsacertmgretachikawakeisenbahncosidnsfo" + - "r-better-thanawatchesantoandreamhostersanukis-a-democratraniandr" + - "iabarlettatraniandriacostumedizinhistorischesaobernardownloadynd" + - "ns-remotewdyndns-serverdaluroycouchpotatofriesaogoncartoonartdec" + - "ologiacouncilutskasumigaurawa-mazowszextraspace-to-rentalstomako" + - "maibaracouponsaotomeloyalistjordalshalsencoursesapporocq-acranbr" + - "ookuwanalyticsardegnaroycreditcardyndns-webhopencraftranoycredit" + - "unioncremonashgabadaddjaguarqhachiojiyahoooshikamaishimodatecrew" + - "halingroundhandlingroznycricketrzyncrimeast-kazakhstanangercroto" + - "nexus-3crowniparsardiniacrsvpartis-a-designercruisesarluxembourg" + - "rpartsarpsborgruecryptonomichigangwoncuisinellair-traffic-contro" + - "lleyculturalcenternopilawawhoswhokksundyndns-wikiracuneocupcakec" + - "uritibaghdadyndns-workisboringuidefinimakanegasakinkobayashikaoi" + - "rminamiechizencxn--12c1fe0bradescorporationcyberlevagangaviikano" + - "njis-a-doctoraycymrussiacyonabaruminamifuranocyoutheworkpccwiihe" + - "yakageferrarissagamiharaferreroticanonoichinomiyakefetsundynnsar" + - "ufutsunomiyawakasaikaitakoelnfguitarsaudafhvalerfidonnakanotodde" + - "nfieldynservebbsasayamafigueresinstagingujoinvillevangerfilateli" + - "afilegearfilminamiizukamishihoronobeauxartsandcraftsauheradynuco" + - "nnectransportefinalfinancefineartsavannahgafinlandynv6finnoyfire" + - "baseappartyfirenzefirestonefirmdaleirvikasuyamelbournefishingolf" + - "fansaves-the-whalessandria-trani-barletta-andriatranibarlettaand" + - "riafitjarfitnessettlementravelchannelfjalerflesbergulenflickrage" + - "rotikakamigaharaflightsavonarusawaflirflogintogurafloraflorencef" + - "loridavvenjargaulardalfloripaderbornfloristanohatajirittohmalvik" + - "aszubyflorogersaxoflowersbschokokekschokoladenfltravelersinsuran" + - "ceflynnhosting-clusterflynnhubargainstitutelemarkarasjohkamikoan" + - "iikappueblockbustermezgorzeleccoffeedbackplaneapplegodoesntexist" + - "eingeekarasjokarasuyamarugame-hostrolekamiminers3-eu-west-3utili" + - "tiesquare7fndynvpnplus-4for-ourfor-someeresistancefor-theaterfor" + - "exrothachirogatakamatsukawaforgotdnscholarshipschoolforsaleitung" + - "senforsandasuololfortalfortmissoulancashireggio-calabriafortwort" + - "hadanorthwesternmutualforumzwildlifedorainfracloudcontrolappasad" + - "enaritakurashikis-a-geekatowicefosneschulefotarivnefoxfordeatnur" + - "embergunmapartmentschwarzgwangjuniperfozorafredrikstadtvschweizf" + - "reeddnsgeekgalaxyfreedesktoperauniteroizumizakirovogradoyfreemas" + - "onryfreesitevadsoccertificationfreetlsciencecentersciencehistory" + - "freiburguovdageaidnulvikatsushikabeeldengeluidyroyfreightrdfrese" + - "niuscountryestateofdelawarezzoologyfribourgushikamifuranorth-kaz" + - "akhstanfriuli-v-giuliafriuli-ve-giuliafriuli-vegiuliafriuli-vene" + - "zia-giuliafriuli-veneziagiuliafriuli-vgiuliafriuliv-giuliafriuli" + - "ve-giuliafriulivegiuliafriulivenezia-giuliafriuliveneziagiuliafr" + - "iulivgiuliafrlfroganscientistockholmestrandfrognfrolandfrom-akre" + - "hamnfrom-alfrom-arfrom-azfrom-capebretonamicrosoftbankatsuyamaru" + - "morimachidafrom-codynaliasdaburfrom-ctrentin-sued-tirolfrom-dchi" + - "tosetogitsuldalotenkawafrom-debianfrom-flanderscjohnsonfrom-gaus" + - "dalfrom-hichisochildrensgardenfrom-iafrom-idfrom-ilfrom-incheonf" + - "rom-kscotlandfrom-kyowariasahikawafrom-lancasterfrom-mangonoheji" + - "s-a-greenfrom-mdfrom-meethnologyfrom-mifunefrom-mnfrom-modalenfr" + - "om-mscrapper-sitefrom-mtnfrom-nchocolatelevisionishikawazukamits" + - "uefrom-ndfrom-nefrom-nh-serveblogsitexashorokanaiefrom-njaworzno" + - "togawafrom-nminamimakis-a-gurunzenfrom-nv-infoodnetworkshoppingw" + - "iddlewismillerfrom-nyfrom-ohkurafrom-oketohnoshooguyfrom-orfrom-" + - "padovaksdalfrom-pratohobby-sitextileksvikaufenfrom-ris-a-hard-wo" + - "rkerfrom-schoenbrunnfrom-sdfrom-tnfrom-txn--12co0c3b4evalleaosta" + - "ticscrappingxn--1ck2e1barreauctionavigationavoibmdeportenrightat" + - "homeftpalmaseratiitatebayashiibajddarchitecturealtydalces3-exter" + - "nal-1from-utazuerichardlikescandynamic-dnscrysechofunatoriginsur" + - "ecreationishimerafrom-val-daostavalleyfrom-vtrentin-suedtirolfro" + - "m-wafrom-wielunnerfrom-wvalled-aostatoilfrom-wyfrosinonefrostalo" + - "wa-wolawafroyahikobeardubaiduckdnserveminecraftrentino-a-adigefs" + - "tavernfujiiderafujikawaguchikonefujiminokamoenairtelecitychyatto" + - "rneyagawakkanaibetsubamericanfamilydscloudapplinzis-a-hunterfuji" + - "nomiyadavvesiidattowebcampinashikiminohosteroyrvikingfujiokayama" + - "ngyshlakasamatsudontexistmein-iservebeerfujisatoshonairtrafficpl" + - "exus-1fujisawafujishiroishidakabiratoridedyn-ip24fujitsurugashim" + - "aniwakuratefujixeroxn--1ctwolominamatakkokaminoyamaxunusualperso" + - "nfujiyoshidazaifudaigokaseljordfukayabeatservemp3fukuchiyamadafu" + - "kudominichonanbuildingriwataraidyndns-homednsandnessjoenishinomi" + - "yashironofukuis-a-knightpointtokamachintaifun-dnsaliasiafukumits" + - "ubishigakiryuohtawaramotoineppuboliviajessheimperiafukuokazakisa" + - "razurecontainerdpolicefukuroishikarikaturindalfukusakishiwadafuk" + - "uyamagatakaharuslivinghistoryfunabashiriuchinadafunagatakahashim" + - "amakisofukushimannore-og-uvdalfunahashikamiamakusatsumasendaisen" + - "nangoodyearfundaciofuoiskujukuriyamansionservep2passagenservepic" + - "servequakefuosskoczowilliamhillfurnitureggio-emilia-romagnakatsu" + - "gawafurubirafurudonostiaarpassenger-associationfurukawais-a-land" + - "scaperfusodegaurafussaikisosakitagawafutabayamaguchinomigawafutb" + - "oldlygoingnowhere-for-morenakayamanxn--1lqs03nfuttsurugiminamimi" + - "nowafuturecmservesarcasmatartanddesignfuturehostingfuturemailing" + - "fvgfylkesbiblackbaudcdn77-securecifederationfyresdalhannanmokuiz" + - "umodenaklodzkobierzycehannosegawahanyuzenhapmirhareidsbergenhars" + - "tadharvestcelebrationhasamarcheapaviancarrierhasaminami-alpssell" + - "s-itrentino-aadigehashbanghasudahasura-appfizerhasvikazohatogaya" + - "itakamoriokalmykiahatoyamazakitakamiizumisanofidelityhatsukaichi" + - "kaiseis-a-libertarianhattfjelldalhayashimamotobungotakadaplierne" + - "wmexicoalhazuminobusellsyourhomegoodsevenassisicilyhbodoes-itved" + - "estrandhelsinkitakatakanabeautysnesewinbarrel-of-knowledgeologyo" + - "kozeu-1hembygdsforbundhemnesharis-a-linux-useranishiaritabashijo" + - "nawatehemsedalhepforgeherokussldheroyhgtvalledaostavangerhigashi" + - "agatsumagoianiahigashichichibunkyonanaoshimageandsoundandvisionh" + - "igashihiroshimanehigashiizumozakitakyushuaiahigashikagawahigashi" + - "kagurasoedahigashikawakitaaikitamihamadahigashikurumeguromskoghi" + - "gashimatsushimaritimodernhigashimatsuyamakitaakitadaitoigawahiga" + - "shimurayamamotorcyclesharpgfoggiahigashinarusembokukitamotosumy-" + - "gatewayhigashinehigashiomihachimanaustdalhigashiosakasayamanakak" + - "ogawahigashishirakawamatakanezawahigashisumiyoshikawaminamiaikit" + - "anakagusukumoduminamiogunicomcastresindeviceshawaiijimarnardalhi" + - "gashitsunoshiroomurahigashiurausukitashiobarahigashiyamatokoriya" + - "manashifteditchyouripharmacienshellaspeziahigashiyodogawahigashi" + - "yoshinogaris-a-llamarriottrentino-alto-adigehiraizumisatokaizuka" + - "luganskypehirakatashinagawahiranais-a-musicianhirarahiratsukagaw" + - "ahirayaizuwakamatsubushikusakadogawahistorichouseshimojis-a-nasc" + - "arfanhitachiomiyagildeskaliszhitachiotagooglecodespotaruis-a-nur" + - "servegame-serverhitraeumtgeradellogliastradinghjartdalhjelmeland" + - "holeckochikushinonsenergyholidayhomeipharmacyshimokawahomelinkit" + - "oolsztynsettlershimokitayamahomelinuxn--1lqs71dhomeofficehomesec" + - "uritymacaparecidahomesecuritypchoseiroumuenchenishinoomotegohome" + - "senseminehomeunixn--1qqw23ahondahoneywellbeingzonehongopocznorfo" + - "lkebiblelhonjyoitakaokamakurazakitaurayasudahornindalhorseoullen" + - "svanguardhorteneis-a-painteractivegaskimitsubatamibudejjuedische" + - "sapeakebayernrtrentino-altoadigehospitalhoteleshimonitayanagithu" + - "busercontentrentino-s-tirolhotmailhoyangerhoylandetroitskazunowr" + - "uzhgorodeohumanitieshimonosekikawahurdalhurumajis-a-patsfanhylle" + - "stadhyogoris-a-personaltrainerhyugawarahyundaiwafunejfkharkovaoj" + - "lchoshibuyachiyodattorelayjlljmphilipsynology-diskstationjnjcphi" + - "latelyjoyentrentinoa-adigejoyokaichibalatinogiftshinjournalismai" + - "lillehammerfeste-iphoenixn--2m4a15ejpmorganjpnchoyodobashichikas" + - "hukujitawaravennakamagayachtsandoyjprshinjukumanojurkoshunantank" + - "hmelnitskiyamarylandkosugekotohiradomainshintokushimakotourakouh" + - "okutamakis-a-teacherkassymantechnologykounosupplieshintomikasaha" + - "rakouyamashikekouzushimashikis-a-techietis-a-photographerokuapph" + - "dkozagawakozakis-a-therapistoiakozowindmillkpnkppspdnshinyoshito" + - "miokamogawakrasnodarkredstonekristiansandcatshiojirishirifujieda" + - "kristiansundkrodsheradkrokstadelvaldaostarnbergkryminamisanrikub" + - "etsurfastpanelblagrarchaeologyeongbuklugsmileasinglest-mon-blogu" + - "eurovisionionjukudoyamaceratabusebastopologyeonggiehtavuoatnagai" + - "vuotnagaokakyotambabydgoszczecinemadridvagsoygardendoftheinterne" + - "tflixilovecollegefantasyleaguernseykumatorinokumejimasoykumenant" + - "okonamegatakasugais-an-accountantshimosuwalkis-a-playerkunisakis" + - "-an-actorkunitachiarailwaykunitomigusukumamotoyamashikokuchuokun" + - "neppugliakunstsammlungkunstunddesignkuokgroupictetrentinoaadigek" + - "urehabmerkurgankurobelaudiblebtimnetzkurogiminamiashigarakuroiso" + - "ftwarendalenugkuromatsunais-an-actresshimotsukekurotakikawasakis" + - "-an-anarchistoricalsocietykushirogawakustanais-an-artisteigenkus" + - "upplykutchanelkutnokuzumakis-an-engineeringkvafjordkvalsundkvaml" + - "idlugolekafjordkvanangenkvinesdalkvinnheradkviteseidskogkvitsoyk" + - "wpspectruminamitanekzmissilezajskmpspbarrell-of-knowledgeometre-" + - "experts-comptables3-fips-us-gov-west-1misugitokuyamatsumaebashik" + - "shacknetrentinoalto-adigemitourismolangevagrigentomologyeongname" + - "gawakayamagazineat-urlmitoyoakemiuramiyazurewebsiteshikagamiishi" + - "bukawamiyotamanomjondalenmlbfanmonstermontrealestatefarmequipmen" + - "trentinoaltoadigemonza-brianzaporizhzhiamonza-e-della-brianzappo" + - "shirakofuefukihaborokunohealthcareershiranukanagawamonzabrianzap" + - "tokyotangotpantheonsitemonzaebrianzaramonzaedellabrianzamoonscal" + - "emoparachutingmordoviamoriyamatsumotofukemoriyoshiminamiawajikis" + - "-foundationmormonmouthaebaruericssonyoursidegreemoroyamatsunomor" + - "tgagemoscowindowshiraois-gonemoseushistorymosjoenmoskeneshiraoka" + - "naniimihoboleslawiechristmasakinderoymosshiratakahagitlabormosvi" + - "knx-serverrankoshigayanagawamoteginowaniihamatamakawajimaoris-in" + - "to-animeiwamarshallstatebankfhappousrlmoviemovimientolgamovistar" + - "gardmozilla-iotrentinos-tirolmtranbymuenstermuginozawaonsenmuika" + - "misunagawamukodairamulhouservehalflifestylemunakatanemuncienciam" + - "uosattemupictureshishikuis-into-carshimotsumamurmanskolobrzegers" + - "undmurotorcraftrentinostirolmusashimurayamatsusakahoginankokubun" + - "jis-into-cartoonshinichinanmusashinoharamuseetrentinosued-tirolm" + - "useumverenigingmusicargodaddyn-vpndnshisognemutsuzawamy-vigorgem" + - "y-wanggouvicenzamyactivedirectorymyasustor-elvdalmycdn77-sslattu" + - "minamiuonumassa-carrara-massacarraramassabusinessebyklegalloansh" + - "ioyanaizumydattolocalhistorymyddnskingmydissentrentinosuedtirolm" + - "ydroboehringerikemydshisuifuelveruminamiyamashirokawanabelembets" + - "ukubankhmelnytskyivaporcloudnshinkamigotoyohashimotottoris-a-roc" + - "kstarachowicemyeffectrentinsued-tirolmyfirewallonieruchomoscienc" + - "eandindustrynmyfritzmyftpaccesshitaramamyhome-servermyjinomykola" + - "ivarggatrentinsuedtirolmymailermymediapchromedicinakamurataishin" + - "omakindlegnicafedexhibitionishinoshimatsushigemyokohamamatsudamy" + - "pepiemontemypetshizukuishimofusaitamatsukuris-into-gamessinazawa" + - "myphotoshibalestrandabergamoarekeymachinewhampshirebungoonoipifo" + - "nyminanomypiagetmyiphostfoldnavymypsxn--30rr7ymysecuritycamerake" + - "rmyshopblockshizuokanazawamytis-a-bookkeeperugiamytuleapilotshou" + - "jis-leetnedalmyvnchryslermywireitrentoyonezawapiszpittsburghoffi" + - "cialpiwatepixolinopizzapkomakiyosunndalplanetariumincommbanklabu" + - "dhabikinokawabarthadselfipatriaplantationplantshowaplatformshang" + - "rilanshowtimemergencyahabahcavuotnagareyamakeupowiathletajimabar" + - "idagawalbrzycharitysfjordplaystationplazaplchungnamdalseidfjordy" + - "ndns-iparliamentmparmatta-varjjatoyosatoyonakagyokutoyokawaplumb" + - "ingoplurinacionalpodlasiellaktyubinskiptveterinairealtorlandpodz" + - "onepohlpoivronpokerpokrovskomatsushimasfjordenpoliticartierpolit" + - "iendapolkowicepoltavalle-aostarostwodzislawinnershriramsterdamns" + - "erverbaniapomorzeszowiosienarutomobellevuelosangelesjabbottrevis" + - "ohughesigdalpordenonepornporsangerporsangugeporsgrunnanyokoshiba" + - "hikariwanumatakazakis-lostrodawarapoznanpraxis-a-bruinsfanprdpre" + - "servationpresidioprgmrprimelhusdecorativeartsilkomforbarsycenter" + - "tainmentattooceanographics3-sa-east-1principeprivatizehealthinsu" + - "ranceprochowiceproductionsimple-urlprofesionalprogressivenneslas" + - "kerrylogisticsirdalpromombetsurgeonshalloffameldalpropertyprotec" + - "tionprotonetritonprudentialpruszkowitdkommunalforbundprzeworskog" + - "ptplusgardenpupimientakayamattelefonicarbonia-iglesias-carboniai" + - "glesiascarboniapvhagakhanamigawapvtroandinosaurepaircraftingvoll" + - "ombardynamisches-dnslingpwchurcharternidyndns-mailottepzqldqponq" + - "slgbtrogstadquicksytestingquipelementslupskommuneqvcircleverapps" + - "potagerstorfjordstorjdevcloudcontrolledstpetersburgstreamuneueso" + - "kndalstudiostudyndns-at-homedepotenzamamidsundstuff-4-salestufft" + - "oread-booksnesolarssonstuttgartrusteesusakis-not-certifieducator" + - "ahimeshimamateramobilysusonosuzakaniepcesuzukanmakiwiensuzukis-s" + - "avedunetbankhakassiasvalbardunloppacificircustomersveiosvelvikon" + - "gsbergsvizzerasvn-reposologneswedenswidnicasacamdvrcampinagrande" + - "bugattipschlesischesolundbeckomonowtvareservehttphonefosshinshin" + - "otsurgeryswiebodzindianapolis-a-bloggerswiftcoverswinoujsciencea" + - "ndhistoryswisshikis-slickharkivanylvenicesynology-dsolutionslztu" + - "shuissier-justicetuvalle-daostatic-accessopotromsakakinokiatuxfa" + - "milytwmailvestre-slidrepbodynathomebuiltrvbashkiriautoscanadaeje" + - "onbuk12vestre-totennishiawakuravestvagoyvevelstadvibo-valentiavi" + - "bovalentiavideovillasnesoddenmarkhangelskjakdnepropetrovskiervaa" + - "psteiermarkoninjambylvinnicasadelamonedatingvinnytsiavipsinaappi" + - "nkomaganevirginiavirtual-userveexchangevirtualuserveftpioneervir" + - "tueeldomein-vigorlicevirtuelvisakegawaviterboknowsitallvivoldavi" + - "xn--32vp30hagebostadvlaanderenvladikavkazimierz-dolnyvladimirvlo" + - "goipippulawyvolkswagentsor-varangervologdanskonskowolayangroupho" + - "tographysiovolvolkenkundenvolyngdalvossevangenvotevotingvotoyono" + - "wiwatsukiyonoticiaskoyabearalvahkijobserveronagarahkkeravjuegosh" + - "ikikonaikawachinaganoharamcoachampionshiphoptobishimaintenancebe" + - "tsuikidsmynasushiobarackmazerbaijan-mayenebakkeshibechambagricul" + - "turennebudapest-a-la-masionthewifiat-band-campaniawloclawekonsul" + - "atrobeepilepsydneywmflabsorfoldworldworse-thandawowithgoogleapis" + - "a-hockeynutsiracusakataketomisatotalwpdevcloudyclusterwritesthis" + - "blogsytewroclawithyoutuberspacekitagatakinouewtcminnesotaketakat" + - "oris-an-entertainerwtfastvps-serverisignwuozuwzmiuwajimaxn--3pxu" + - "8konyvelombardiamondshinshiroxn--42c2d9axn--45br5cylxn--45brj9ci" + - "tadeliveryggeelvinckasaokaminokawanishiaizubangexn--45q11citiche" + - "rnovtsykkylvenetogakushimotoganewyorkshirecipesaro-urbino-pesaro" + - "urbinopesaromasvuotnakaiwamizawassamukawataricohdatsunanjoburgmi" + - "nakamichiharaxn--4gbriminingxn--4it168dxn--4it797kooris-a-soxfan" + - "xn--4pvxs4allxn--54b7fta0ccivilaviationishiwakis-a-conservativeg" + - "arsheis-a-cpadualstackashibatakasakiyosatokigawaxn--55qw42gxn--5" + - "5qx5dxn--5js045dxn--5rtp49civilisationissandiegoxn--5rtq34koperv" + - "ikhersonxn--5su34j936bgsgxn--5tzm5gxn--6btw5axn--6frz82gxn--6orx" + - "2rxn--6qq986b3xlxn--7t0a264civilizationissayokkaichiropractichir" + - "urgiens-dentistes-en-francexn--80adxhksorocabalsfjordxn--80ao21a" + - "xn--80aqecdr1axn--80asehdbasilicataniaveroykeniwaizumiotsukumiya" + - "mazonawsadodgemologicallavangenaval-d-aosta-valleyokotemrevistan" + - "bulsan-suedtirolaziobninskaragandaustrheimatunduhrennesoyboltate" + - "shinanomachimkentateyamaustevoll-o-g-i-naturhistorisches3-ap-sou" + - "theast-1kappchizippodhaleangaviikadenaamesjevuemielno-ip6xn--80a" + - "swgxn--80audnedalnxn--8ltr62koryokamikawanehonbetsurutaharaxn--8" + - "pvr4uxn--8y0a063axn--90a3academiamicaaarborteaches-yogasawaracin" + - "gxn--90aeroportalabamagasakishimabaraogakibichuoxn--90aishobarak" + - "awagoexn--90azhytomyravendbasketballyngenvironmentalconservation" + - "ayorovigotsukitahatakahatakaishimogosenflfanfshostrowiecasinordd" + - "alillesandefjordgcahcesuolocus-2xn--9dbhblg6dietcimdbatodayolasi" + - "teu-3xn--9dbq2axn--9et52uxn--9krt00axn--andy-iraxn--aroport-byan" + - "dexn--3bst00minternationalfirearmshirahamatonbetsurnadalxn--asky" + - "-iraxn--aurskog-hland-jnbatsfjordiscountyombolzano-altoadigeu-4x" + - "n--avery-yuasakuhokkaidoomdnsiskinkyotobetsulikes-piedmonticello" + - "dingenxn--b-5gaxn--b4w605ferdxn--balsan-sudtirol-rqis-uberleetre" + - "ntino-sued-tirolxn--bck1b9a5dre4civilwarmanagementoyotaparocherk" + - "asyno-dsandvikcoromantovalle-d-aostathellexn--bdddj-mrabdxn--bea" + - "ralvhki-y4axn--berlevg-jxaxn--bhcavuotna-s4axn--bhccavuotna-k7ax" + - "n--bidr-5nachikatsuuraxn--bievt-0qa2xn--bjarky-fyaotsurreyxn--bj" + - "ddar-ptamayufuettertdasnetzxn--blt-elabourxn--bmlo-graingerxn--b" + - "od-2natalxn--bozen-sudtirol-76haibarakitahiroshimarburgxn--brnny" + - "-wuacademy-firewall-gatewayxn--brnnysund-m8accident-investigatio" + - "n-aptibleaseating-organicbcieszynxn--brum-voagatrysiljanxn--btsf" + - "jord-9zaxn--bulsan-sudtirol-rqis-very-badajozxn--c1avgxn--c2br7g" + - "xn--c3s14misakis-byxn--cck2b3bauhausposts-and-telecommunications" + - "ncfdiscoveryomitanoddavocatanzarownproviderhcloudfunctions3-ca-c" + - "entral-1xn--cesena-forli-c2gxn--cesenaforli-0jgoraxn--cg4bkis-ve" + - "ry-evillagexn--ciqpnxn--clchc0ea0b2g2a9gcdxn--comunicaes-v6a2oxn" + - "--correios-e-telecomunicaes-ghc29axn--czr694bbcn-north-1xn--czrs" + - "0tulanxessomaxn--czru2dxn--czrw28bbtcp4xn--d1acj3bbvacationswatc" + - "h-and-clockerxn--d1alfaromeoxn--d1atunesomnarviikamitondabayashi" + - "ogamagoriziaxn--d5qv7z876claimsanfranciscofreakunemurorangeiseiy" + - "oichippubetsubetsugarugbyengerdalaheadjudygarlandyndns-picsangox" + - "n--davvenjrga-y4axn--djrs72d6uyxn--djty4kosaigawaxn--dnna-grajew" + - "olterskluwerxn--drbak-wuaxn--dyry-iraxn--e1a4clanbibaidarmeniaxn" + - "--eckvdtc9dxn--efvn9sorreisahayakawakamiichikawamisatoursnoasait" + - "oshimayfirstjohnxn--efvy88hair-surveillancexn--ehqz56nxn--elqq16" + - "hakatanortonxn--estv75gxn--eveni-0qa01gaxn--f6qx53axn--fct429kos" + - "akaerodromegallupinbarsyonlinewhollandevelopmentaxihuanavuotnara" + - "shinoceanographiqueu-2xn--fhbeiarnxn--finny-yuaxn--fiq228c5hsort" + - "landxn--fiq64beneventoeidsvollillyonagoyavoues3-eu-central-1xn--" + - "fiqs8soruminiserversicherungxn--fiqz9soundcastronomy-routerxn--f" + - "jord-lraxn--fjq720axn--fl-ziaxn--flor-jraxn--flw351exn--forli-ce" + - "sena-41gxn--forlicesena-ujgxn--fpcrj9c3dxn--frde-grandrapidsouth" + - "carolinarvikomorotsukamiokamikitayamatsuris-a-socialistcgrouphil" + - "adelphiaareadmyblogspotrentino-stirolxn--frna-woaraisaijosoyroro" + - "southwestfalenxn--frya-hraxn--fzc2c9e2cldmailouvreisenissedalowi" + - "czest-le-patronisshingucciprianiigataitogliattiresanjotoyotomiya" + - "zakis-a-cubicle-slavellinotairestaurantoyotsukaidoxn--fzys8d69uv" + - "gmailxn--g2xx48clickashiharaxn--gckr3f0fauskedsmokorsetagayasell" + - "s-for-ufcfanxn--gecrj9clinichiryukyuragifuchungbukharaumalopolsk" + - "anlandurbanamexnetlifyis-a-celticsfanishikatakatsukis-a-chefarms" + - "teadurhamburgmodellingmxn--11b4c3dyndns-at-workinggrouparisor-fr" + - "onishikatsuragit-repostre-totendofinternet-dnsampagespeedmobiliz" + - "eroxn--ggaviika-8ya47hakodatexn--gildeskl-g0axn--givuotna-8yasak" + - "aiminatoyookannamilanotteroyxn--gjvik-wuaxn--gk3at1exn--gls-elac" + - "aixaxn--gmq050is-very-goodhandsonxn--gmqw5axn--h-2failxn--h1aegh" + - "akonexn--h2breg3evenesowaxn--h2brj9c8cliniquenoharaxn--h3cuzk1di" + - "gitalxn--hbmer-xqaxn--hcesuolo-7ya35bentleyonaguniversityoriikar" + - "ateverbankaratsuginamikatagamilitaryoshiokaracoldwarmiastagevje-" + - "og-hornnes3-us-east-2xn--hery-iraxn--hgebostad-g3axn--hmmrfeasta" + - "-s4accident-prevention-webhostingxn--hnefoss-q1axn--hobl-iraxn--" + - "holtlen-hxaxn--hpmir-xqaxn--hxt814exn--hyanger-q1axn--hylandet-5" + - "4axn--i1b6b1a6a2exn--imr513nxn--indery-fyasugivingxn--io0a7is-ve" + - "ry-nicexn--j1aefbsbxn--12cfi8ixb8luxuryxn--j1amhakubahccavuotnag" + - "asakikuchikuseikarugamvikautokeinow-dnservicesevastopolexn--j6w1" + - "93gxn--jlq61u9w7beppublishproxyzjampagefrontappalmspringsakerxn-" + - "-jlster-byasuokanraxn--jrpeland-54axn--jvr189misasaguris-certifi" + - "edogawarabikomaezakirunordreisa-geekddielddanuorrikuzentakatajim" + - "idoriopretogoldpoint2thisamitsukexn--k7yn95exn--karmy-yuaxn--kbr" + - "q7oxn--kcrx77d1x4axn--kfjord-iuaxn--klbu-woaxn--klt787dxn--kltp7" + - "dxn--kltx9axn--klty5xn--3ds443gxn--koluokta-7ya57hakuis-a-lawyer" + - "xn--kprw13dxn--kpry57dxn--kpu716fbx-osasebofagexn--kput3is-very-" + - "sweetpepperxn--krager-gyatomitamamuraxn--kranghke-b0axn--krdsher" + - "ad-m8axn--krehamn-dxaxn--krjohka-hwab49jdfastlylbarefootballfina" + - "nzgorautomotiveconomiasakuchinotsuchiurakawalmartatsunobiraustra" + - "liaisondriobranconagawalesundds3-ap-southeast-2ix4432-bananarepu" + - "blicaseihicampobassociatest-iservecounterstrike12hpaleobihirosak" + - "ikamijimatsuurabogadocscbgdyniabruzzoologicalvinklein-addrammenu" + - "ernberggfarmerseine164-barcelonagasukeastcoastaldefenceatonsberg" + - "jemnes3-ap-northeast-1337xn--ksnes-uuaxn--kvfjord-nxaxn--kvitsy-" + - "fyatsukanumazuryxn--kvnangen-k0axn--l-1fairwindspeedpartnersokan" + - "eyamazoexn--l1accentureklamborghinikis-with-thebandovre-eikerxn-" + - "-laheadju-7yatsushiroxn--langevg-jxaxn--lcvr32dxn--ldingen-q1axn" + - "--leagaviika-52beskidyn-o-saurlandes3-us-gov-west-1xn--lesund-hu" + - "axn--lgbbat1ad8jelenia-goraxn--lgrd-poacctunkongsvingerxn--lhppi" + - "-xqaxn--linds-pramericanarturystykanoyakumoldelmenhorstalbansoox" + - "n--lns-qlapyxn--loabt-0qaxn--lrdal-sraxn--lrenskog-54axn--lt-lia" + - "clintonoshoesannaniyodogawaxn--lten-granexn--lury-iraxn--m3ch0j3" + - "axn--mely-iraxn--merker-kuaxn--mgb2ddespiegelxn--mgb9awbfbxosask" + - "atchewanxn--mgba3a3ejtuscanyxn--mgba3a4f16axn--mgba3a4franamizuh" + - "oldingspjelkavikomvuxn--2scrj9christiansburgroks-thisayamanobeok" + - "akudamatsuexn--mgba7c0bbn0axn--mgbaakc7dvfedorapeopleirfjordyndn" + - "s1xn--mgbaam7a8hakusanagochijiwadell-ogliastraderxn--mgbab2bdxn-" + - "-mgbai9a5eva00bestbuyshouses3-us-west-1xn--mgbai9azgqp6jeonnamer" + - "ikawauexn--mgbayh7gpalacexn--mgbb9fbpobanazawaxn--mgbbh1a71exn--" + - "mgbc0a9azcgxn--mgbca7dzdoxn--mgberp4a5d4a87gxn--mgberp4a5d4arxn-" + - "-mgbgu82axn--mgbi4ecexposedxn--mgbpl2fhskydivingxn--mgbqly7c0a67" + - "fbclothingdustkagoshimalselvendrellucaniaxn--mgbqly7cvafranziska" + - "nerimaringatlantakahamamurogawaxn--mgbt3dhdxn--mgbtf8flatangerxn" + - "--mgbtx2betainaboxfusejnynysagaeroclubmedecincinnationwidealerim" + - "o-i-ranadexeterxn--mgbx4cd0abbvieeexn--mix082fedoraprojectransur" + - "luzernxn--mix891feiraquarelleborkangerxn--mjndalen-64axn--mk0axi" + - "ndianmarketingxn--mk1bu44cngrondarxn--mkru45isleofmanchesterxn--" + - "mlatvuopmi-s4axn--mli-tlaquilanciaxn--mlselv-iuaxn--moreke-juaxn" + - "--mori-qsakuragawaxn--mosjen-eyawaraxn--mot-tlarvikoseis-a-stude" + - "ntalxn--mre-og-romsdal-qqbhzcateringebuilderschmidtre-gauldalima" + - "nowarudaxauthordalandemoneyokosukanzakiyokawaraustinnatuurwetens" + - "chappenaumburgjerstadotsuruokakegawaurskog-holandingjerdrumetace" + - "ntrumeteorappalermomahachijolstereviewskrakowebspacempresashibet" + - "sukuibigawaukraanghkepnogataijibestaddnslivelanddnss3-ap-south-1" + - "6-bambleclerc66xn--msy-ula0haldenxn--mtta-vrjjat-k7afamilycompan" + - "ycnpyatigorskodjeffersonxn--muost-0qaxn--mxtq1misawaxn--ngbc5azd" + - "xn--ngbe9e0axn--ngbrxn--3e0b707exn--nit225kosherbrookegawaxn--nm" + - "esjevuemie-tcbaltimore-og-romsdalipayxn--nnx388axn--nodessakurai" + - "ssmarterthanyoutwentexn--nqv7fs00emaxn--nry-yla5gxn--ntso0iqx3ax" + - "n--ntsq17gxn--nttery-byaeservehumourxn--nvuotna-hwaxn--nyqy26axn" + - "--o1achaseljeepsongdalenviknaharimalborkdalxn--o3cw4halsaintloui" + - "s-a-anarchistoireggiocalabriaxn--o3cyx2axn--od0algxn--od0aq3biei" + - "gersundishakotanhktjeldsundisrechtrainingjesdalimitedivtasvuodna" + - "kaniikawatanaguraxn--ogbpf8flekkefjordxn--oppegrd-ixaxn--ostery-" + - "fyawatahamaxn--osyro-wuaxn--otu796dxn--p1acfermochizukirkenesass" + - "aris-a-financialadvisor-aurdalvivanovodkamisatokashikiwakunigami" + - "harulminamiiselectrapaniizaxn--p1aixn--pbt977cnsannohelplfinanci" + - "aluccapitalonewspaperxn--pgbs0dhlxn--porsgu-sta26ferraraxn--pssu" + - "33lxn--pssy2uxn--q9jyb4cntoyouraxn--qcka1pmckinseyxn--qqqt11misc" + - "onfusedxn--qxamusementdllcube-serversaillespreadbettingxn--rady-" + - "iraxn--rdal-poaxn--rde-ulavagiskexn--rdy-0nabarixn--rennesy-v1ax" + - "n--rhkkervju-01aflakstadaokagakicks-assedicoguchikuzenxn--rholt-" + - "mragowoodsideltaiwanairlinedre-eikerxn--rhqv96gxn--rht27zxn--rht" + - "3dxn--rht61exn--risa-5nativeamericanantiquespydebergxn--risr-ira" + - "xn--rland-uuaxn--rlingen-mxaxn--rmskog-byaxn--rny31hammarfeastaf" + - "ricapetownnews-stagingxn--rovu88bielawalterxn--rros-granvindafjo" + - "rdxn--rskog-uuaxn--rst-0naturalhistorymuseumcenterxn--rsta-franc" + - "aiseharaxn--rvc1e0am3exn--ryken-vuaxn--ryrvik-byaxn--s-1faithruh" + - "eredumbrellajollamericanexpressexyxn--s9brj9collectionxn--sandne" + - "ssjen-ogbizxn--sandy-yuaxn--seral-lraxn--ses554gxn--sgne-gratang" + - "enxn--skierv-utazassnasabaerobaticketsrtromsojamisonxn--skjervy-" + - "v1axn--skjk-soaxn--sknit-yqaxn--sknland-fxaxn--slat-5naturalscie" + - "ncesnaturellesrvaroyxn--slt-elabcgxn--smla-hraxn--smna-gratis-a-" + - "bulls-fanxn--snase-nraxn--sndre-land-0cbremangerxn--snes-poaxn--" + - "snsa-roaxn--sr-aurdal-l8axn--sr-fron-q1axn--sr-odal-q1axn--sr-va" + - "ranger-ggbiellaakesvuemieleccexn--srfold-byaxn--srreisa-q1axn--s" + - "rum-grazxn--stfold-9xaxn--stjrdal-s1axn--stjrdalshalsen-sqbieszc" + - "zadygeyachimataikikugawarszawashingtondclkareliancexn--stre-tote" + - "n-zcbstoragexn--sudtirol-y0emmafann-arboretumbriamallamaceioxn--" + - "t60b56axn--tckweatherchannelxn--tiq49xqyjetztrentino-suedtirolxn" + - "--tjme-hraxn--tn0agrinet-freakstordalxn--tnsberg-q1axn--tor131ox" + - "n--trany-yuaxn--trentin-sud-tirol-tsjcbnlxn--trentin-sudtirol-b9" + - "ixn--trentino-sud-tirol-dckoshimizumakizunokunimimatakashimarylh" + - "urstgoryxn--trentino-sudtirol-usjevnakershuscultureggioemiliarom" + - "agnamsskoganeis-a-republicancerresearchaeologicaliforniaxn--tren" + - "tinosud-tirol-tsjewelryxn--trentinosudtirol-b9ixn--trentinsud-ti" + - "rol-98ixn--trentinsudtirol-rqixn--trgstad-r1axn--trna-woaxn--tro" + - "ms-zuaxn--tysvr-vraxn--uc0atvestfoldxn--uc0ay4axn--uist22hamurak" + - "amigoris-a-liberalxn--uisz3gxn--unjrga-rtaobaomoriguchiharagusar" + - "tstoregontrailroadxn--unup4yxn--uuwu58axn--vads-jraxn--vallee-ao" + - "ste-i2gxn--vallee-d-aoste-43hangglidingxn--valleeaoste-6jgxn--va" + - "lleedaoste-i2gxn--vard-jraxn--vegrshei-c0axn--vermgensberater-ct" + - "bievatmallorcadaques3-us-west-2xn--vermgensberatung-pwbifukagawa" + - "shtenawdev-myqnapcloudaccesscambridgestoneustarhubs3-website-ap-" + - "northeast-1xn--vestvgy-ixa6oxn--vg-yiabkhaziaxn--vgan-qoaxn--vgs" + - "y-qoa0jewishartgalleryxn--vgu402colognextdirectoystre-slidrettoz" + - "awaxn--vhquvestnesor-odalxn--vler-qoaxn--vre-eiker-k8axn--vrggt-" + - "xqadxn--vry-yla5gxn--vuq861bihorologyukiiyamanouchikuhokuryugasa" + - "kitchenhlfanhs3-website-ap-southeast-1xn--w4r85el8fhu5dnraxn--w4" + - "rs40lxn--wcvs22dxn--wgbh1colonialwilliamsburgrongaxn--wgbl6axn--" + - "xhq521bikedagestangeorgeorgiaxn--xkc2al3hye2axn--xkc2dl3a5ee0han" + - "goutsystemscloudfrontdoorxn--y9a3aquariumishimasudaxn--yer-znatu" + - "rbruksgymnxn--yfro4i67oxn--ygarden-p1axn--ygbi2ammxn--3hcrj9cist" + - "rondheimmobilienishiokoppegardyndns-office-on-the-weberlincolnis" + - "hitosashimizunaminamibosogndalottokorozawaxn--ystre-slidre-ujbil" + - "baogashimadachicagoboats3-website-ap-southeast-2xn--zbx025dxn--z" + - "f0ao64axn--zf0avxn--3oq18vl8pn36axn--zfr164billustrationikkoeben" + - "havnikolaevents3-website-eu-west-1xnbayxz" + "aicadaques3-ap-southeast-1kappchizippodhaleangaviikadenaamesjevu" + + "emielno-ip6xn--ksnes-uuaxn--kvfjord-nxaxn--kvitsy-fyatsukanumazu" + + "ryxn--kvnangen-k0axn--l-1fairwindsowaxn--l1accentureklamborghini" + + "kis-very-sweetpepperxn--laheadju-7yatsushiroxn--langevg-jxaxn--l" + + "cvr32dxn--ldingen-q1axn--leagaviika-52beskidyn-o-saurlandes3-web" + + "site-ap-northeast-1xn--lesund-huaxn--lgbbat1ad8jelenia-goraxn--l" + + "grd-poacctunkongsvingerxn--lhppi-xqaxn--linds-pramericanarturyst" + + "ykanoyakumoldelmenhorstalbansomnarviikamitondabayashiogamagorizi" + + "axn--lns-qlapyxn--loabt-0qaxn--lrdal-sraxn--lrenskog-54axn--lt-l" + + "iacliniquenoharaxn--lten-granexn--lury-iraxn--m3ch0j3axn--mely-i" + + "raxn--merker-kuaxn--mgb2ddespeedpartnersnoasaitoshimayfirstjohnx" + + "n--mgb9awbfbxosasayamaxn--mgba3a3ejtuscanyxn--mgba3a4f16axn--mgb" + + "a3a4franamizuholdingspiegelxn--mgba7c0bbn0axn--mgbaakc7dvfedorap" + + "eopleirfjordyndns1xn--mgbaam7a8hakusanagochijiwadell-ogliastrade" + + "rxn--mgbab2bdxn--mgbai9a5eva00bestbuyshouses3-website-ap-southea" + + "st-1xn--mgbai9azgqp6jeonnamerikawauexn--mgbayh7gpalacexn--mgbb9f" + + "bpobanazawaxn--mgbbh1a71exn--mgbc0a9azcgxn--mgbca7dzdoxn--mgberp" + + "4a5d4a87gxn--mgberp4a5d4arxn--mgbgu82axn--mgbi4ecexposedxn--mgbp" + + "l2fhskydivingxn--mgbqly7c0a67fbclintonoshoesanfranciscofreakunem" + + "urorangeiseiyoichippubetsubetsugarugbyengerdalaheadjudygarlandyn" + + "dns-picsangoxn--mgbqly7cvafranziskanerimaringatlantakahamamuroga" + + "waxn--mgbt3dhdxn--mgbtf8flatangerxn--mgbtx2betainaboxfusejnynysa" + + "gaeroclubmedecincinnationwidealerimo-i-ranadexeterxn--mgbx4cd0ab" + + "bvieeexn--mix082fedoraprojectransurlvivanovodkamisatokashikiwaku" + + "nigamiharulminamiiselectrapaniizaxn--mix891feiraquarelleborkange" + + "rxn--mjndalen-64axn--mk0axindianmarketingxn--mk1bu44clothingdust" + + "kagoshimalselvendrellucaniaxn--mkru45is-with-thebandovre-eikerxn" + + "--mlatvuopmi-s4axn--mli-tlaquilanciaxn--mlselv-iuaxn--moreke-jua" + + "xn--mori-qsakuragawaxn--mosjen-eyawaraxn--mot-tlarvikoseis-a-sox" + + "fanxn--mre-og-romsdal-qqbhzcasinorddalimanowarudavocatanzarownpr" + + "oviderhcloudfunctions3-eu-west-1xn--msy-ula0haldenxn--mtta-vrjja" + + "t-k7afamilycompanycn-northwest-1xn--muost-0qaxn--mxtq1misawaxn--" + + "ngbc5azdxn--ngbe9e0axn--ngbrxn--3e0b707exn--nit225kosherbrookega" + + "waxn--nmesjevuemie-tcbaltimore-og-romsdalipayxn--nnx388axn--node" + + "ssakuraisleofmanchesterxn--nqv7fs00emaxn--nry-yla5gxn--ntso0iqx3" + + "axn--ntsq17gxn--nttery-byaeservehumourxn--nvuotna-hwaxn--nyqy26a" + + "xn--o1achaseljeepsongdalenviknaharimalborkdalxn--o3cw4halsaintlo" + + "uis-a-anarchistoireggiocalabriaxn--o3cyx2axn--od0algxn--od0aq3bi" + + "eigersundivtasvuodnakamuratajimidoriopretogoldpoint2thisamitsuke" + + "vje-og-hornnes3-website-ap-southeast-2xn--ogbpf8flekkefjordxn--o" + + "ppegrd-ixaxn--ostery-fyawatahamaxn--osyro-wuaxn--otu796dxn--p1ac" + + "fermochizukirkenesasebofagexn--p1aissmarterthanyoutwentexn--pbt9" + + "77cngrondarxn--pgbs0dhlxn--porsgu-sta26ferraraxn--pssu33lxn--pss" + + "y2uxn--q9jyb4cnpyatigorskodjeffersonxn--qcka1pmckinseyxn--qqqt11" + + "misconfusedxn--qxamusementdllcube-serversaillespjelkavikomvuxn--" + + "2m4a15exn--rady-iraxn--rdal-poaxn--rde-ulavagiskexn--rdy-0nabari" + + "xn--rennesy-v1axn--rhkkervju-01aflakstadaokagakicks-assedicnsanj" + + "otoyouraxn--rholt-mragowoodsideltaitogliattirespreadbettingxn--r" + + "hqv96gxn--rht27zxn--rht3dxn--rht61exn--risa-5nativeamericanantiq" + + "uespydebergxn--risr-iraxn--rland-uuaxn--rlingen-mxaxn--rmskog-by" + + "axn--rny31hammarfeastafricapitalonewspaperxn--rovu88bielawalterx" + + "n--rros-granvindafjordxn--rskog-uuaxn--rst-0naturalhistorymuseum" + + "centerxn--rsta-francaiseharaxn--rvc1e0am3exn--ryken-vuaxn--ryrvi" + + "k-byaxn--s-1faithruheredumbrellajollamericanexpressexyxn--s9brj9" + + "cntoystre-slidrettozawaxn--sandnessjen-ogbizxn--sandy-yuaxn--ser" + + "al-lraxn--ses554gxn--sgne-gratangenxn--skierv-utazassnasabaeroba" + + "ticketsrtromsojamisonxn--skjervy-v1axn--skjk-soaxn--sknit-yqaxn-" + + "-sknland-fxaxn--slat-5naturalsciencesnaturellesrvaroyxn--slt-ela" + + "bcgxn--smla-hraxn--smna-gratis-a-bulls-fanxn--snase-nraxn--sndre" + + "-land-0cbremangerxn--snes-poaxn--snsa-roaxn--sr-aurdal-l8axn--sr" + + "-fron-q1axn--sr-odal-q1axn--sr-varanger-ggbiellaakesvuemieleccex" + + "n--srfold-byaxn--srreisa-q1axn--srum-grazxn--stfold-9xaxn--stjrd" + + "al-s1axn--stjrdalshalsen-sqbieszczadygeyachimataikikugawarszawas" + + "hingtondclkaratexn--stre-toten-zcbstoragexn--sudtirol-y0emmafann" + + "-arboretumbriamallamaceioxn--t60b56axn--tckweatherchannelxn--tiq" + + "49xqyjetztrentino-suedtirolxn--tjme-hraxn--tn0agrinet-freakstord" + + "alxn--tnsberg-q1axn--tor131oxn--trany-yuaxn--trentin-sud-tirol-t" + + "sjcbnlxn--trentin-sudtirol-b9ixn--trentino-sud-tirol-dckoshimizu" + + "makizunokunimimatakashimarylhurstgoryxn--trentino-sudtirol-usjev" + + "nakershuscultureggioemiliaromagnamsosnowiechonanbuildingripexn--" + + "trentinosud-tirol-tsjewelryxn--trentinosudtirol-b9ixn--trentinsu" + + "d-tirol-98ixn--trentinsudtirol-rqixn--trgstad-r1axn--trna-woaxn-" + + "-troms-zuaxn--tysvr-vraxn--uc0atvestfoldxn--uc0ay4axn--uist22ham" + + "urakamigoris-a-libertarianxn--uisz3gxn--unjrga-rtaobaomoriguchih" + + "aragusartstoregontrailroadxn--unup4yxn--uuwu58axn--vads-jraxn--v" + + "allee-aoste-i2gxn--vallee-d-aoste-43handsonxn--valleeaoste-6jgxn" + + "--valleedaoste-i2gxn--vard-jraxn--vegrshei-c0axn--vermgensberate" + + "r-ctbievatmallorcafederationikonanporovnoddavoues3-eu-west-2xn--" + + "vermgensberatung-pwbifukagawashtenawdev-myqnapcloudaccesscambrid" + + "gestoneustarhubs3-website-eu-west-1xn--vestvgy-ixa6oxn--vg-yiabk" + + "haziaxn--vgan-qoaxn--vgsy-qoa0jewishartgalleryxn--vgu402coguchik" + + "uzenxn--vhquvestnesopotromsakakinokiaxn--vler-qoaxn--vre-eiker-k" + + "8axn--vrggt-xqadxn--vry-yla5gxn--vuq861bihorologyonaguniversityo" + + "riikaratsuginamikatagamilitaryoshiokaracoldwarmiastagexn--w4r85e" + + "l8fhu5dnraxn--w4rs40lxn--wcvs22dxn--wgbh1collectionxn--wgbl6axn-" + + "-xhq521bikedagestangeorgeorgiaxaustraliaisondriobranconagawalesu" + + "ndds3-ca-central-1xn--xkc2al3hye2axn--xkc2dl3a5ee0hangglidingxn-" + + "-y9a3aquariumishimasudaxn--yer-znaturbruksgymnxn--yfro4i67oxn--y" + + "garden-p1axn--ygbi2ammxn--3hcrj9circleverappspotagerxn--ystre-sl" + + "idre-ujbilbaogashimadachicagoboats3-website-sa-east-1xn--zbx025d" + + "xn--zf0ao64axn--zf0avxn--3oq18vl8pn36axn--zfr164billustrationino" + + "hekinannestadivttasvuotnakaniikawatanaguraxnbayxz" // nodes is the list of nodes. Each node is represented as a uint32, which // encodes the node's children, wildcard bit and node type (as an index into @@ -512,8696 +512,8700 @@ const text = "9guacuiababia-goracleaningroks-theatreebinagisobetsumidatlantica" // [15 bits] text index // [ 6 bits] text length var nodes = [...]uint32{ - 0x329903, - 0x28a5c4, - 0x2ea306, - 0x2f1d43, - 0x2f1d46, - 0x3896c6, - 0x3af783, - 0x2030c4, - 0x372387, - 0x2e9f48, - 0x1a000c2, - 0x1f390c7, - 0x376389, - 0x2bf5ca, - 0x2bf5cb, - 0x22fc83, - 0x2ac706, - 0x2364c5, - 0x22036c2, - 0x3d0244, - 0x262343, - 0x204885, - 0x2603742, - 0x203743, - 0x2b2a1c4, - 0x205085, - 0x2e24bc2, - 0x393cce, - 0x2553c3, - 0x3a7f86, - 0x3200a82, - 0x2fa487, - 0x238f86, - 0x3601102, - 0x281483, - 0x281484, - 0x213046, - 0x208b88, - 0x27bb86, - 0x309a84, - 0x3a08e82, - 0x3424c9, - 0x226c87, - 0x3967c6, - 0x36ee49, - 0x2d4408, - 0x32b844, - 0x23e606, - 0x242dc6, - 0x3e02a42, - 0x3ab40f, - 0x27c54e, - 0x3572c4, - 0x211e05, - 0x329805, - 0x2f0d49, - 0x244149, - 0x213847, - 0x201286, - 0x2011c3, - 0x4221d42, - 0x22c243, - 0x26024a, - 0x4611783, - 0x259b85, - 0x323182, - 0x38a509, - 0x4a01742, - 0x20b104, - 0x311586, - 0x2768c5, - 0x369c44, - 0x5222244, - 0x208403, - 0x235504, - 0x5600fc2, - 0x26a604, - 0x5a83d04, - 0x37140a, - 0x5e00882, - 0x2ebc87, - 0x27bf08, - 0x6e034c2, - 0x2756c7, - 0x22ebc4, - 0x2c2587, - 0x22ebc5, - 0x33c847, - 0x38f246, - 0x307c84, - 0x307c85, - 0x290847, - 0x7e05002, - 0x324143, - 0x207ac2, - 0x38f1c3, - 0x8215402, - 0x215405, - 0x8600202, - 0x2bd444, - 0x27a605, - 0x357207, - 0x370d8e, - 0x23d0c4, - 0x236d04, - 0x20b403, - 0x3735c9, - 0x20b40b, - 0x21c948, - 0x36ec08, - 0x258748, - 0x21dec8, - 0x32b68a, - 0x33c747, - 0x2ad1c6, - 0x8a4f342, - 0x340b03, - 0x341843, - 0x341c44, - 0x3af7c3, - 0x340b43, - 0x1734b82, - 0x8e00bc2, - 0x281945, - 0x295646, - 0x27e904, - 0x35e907, - 0x3ced06, - 0x382384, - 0x382387, - 0x200bc3, - 0x92cb6c2, - 0x9720542, - 0x9a2e5c2, - 0x22e5c6, - 0x9e00282, - 0x2ab0c5, - 0x3360c3, - 0x3cb184, - 0x2edfc4, - 0x2edfc5, - 0x207183, - 0xa203a83, - 0xa60a982, - 0x20c2c5, - 0x20c2cb, - 0x20d086, - 0x25920b, - 0x239e44, - 0x20da89, - 0x20e784, - 0xaa0e9c2, - 0x20f203, - 0x20f783, - 0x1602742, - 0x3b8983, - 0x2103ca, - 0xae12802, - 0x3d04c5, - 0x2de10a, - 0x36ca44, - 0x212803, - 0x214204, - 0x215703, - 0x215704, - 0x215707, - 0x215e05, - 0x216e46, - 0x217146, - 0x217ec3, - 0x21c408, - 0x215183, - 0xb204ac2, - 0x24b688, - 0x3c47cb, - 0x221648, - 0x222b86, - 0x223c07, - 0x2288c8, - 0xc202142, - 0xc6c2742, - 0x3131c8, - 0x303007, - 0x282385, - 0x38e948, - 0x2dca48, - 0x2b11c3, - 0x22bcc4, - 0x341c82, - 0xca2db82, - 0xce06b82, - 0xd62dcc2, - 0x22dcc3, - 0xda00f82, - 0x203083, - 0x2e3304, - 0x20d303, - 0x324504, - 0x372a8b, - 0x235c43, - 0x2e6e06, - 0x235c44, - 0x3bb64e, - 0x24d845, - 0x3a8088, - 0x3a0107, - 0x3a010a, - 0x20b5c3, - 0x23b987, - 0x20b5c5, - 0x2325c4, - 0x2cd786, - 0x2cd787, - 0x2d7804, - 0x2efd07, - 0x302a44, - 0x200f84, - 0x3710c6, - 0x25d904, - 0x32cdc6, - 0x2078c3, - 0x38e708, - 0x2078c8, - 0x236cc3, - 0x3b8943, - 0x3b0984, - 0x3b50c3, - 0xde4ce42, - 0xe28e502, - 0x203a03, - 0x2084c6, - 0x208d03, - 0x231f84, - 0xe73d042, - 0x356203, - 0x33d043, - 0x219542, - 0xea0ae02, - 0x2c5206, - 0x237447, - 0x2ec307, - 0x399145, - 0x211384, - 0x290705, - 0x2838c7, - 0x2d24c9, - 0x2e29c6, - 0x2e8448, - 0x2fb686, - 0xee03442, - 0x353608, - 0x2fccc6, - 0x343945, - 0x318f87, - 0x319e44, - 0x319e45, - 0x2044c4, - 0x2044c8, - 0xf20c382, - 0xf600482, - 0x343646, - 0x200488, - 0x3553c5, - 0x356586, - 0x35d948, - 0x386088, - 0xfa0c105, - 0xfe3a0c4, - 0x388887, - 0x1020e202, - 0x10602d42, - 0x11a07c02, - 0x311685, - 0x2a5b45, - 0x259786, - 0x2be287, - 0x3c6807, - 0x1220d183, - 0x29f687, - 0x2e9d08, - 0x1b62efc9, - 0x393e87, - 0x22fec7, - 0x230908, - 0x231106, - 0x2320c6, - 0x232d0c, - 0x233a8a, - 0x234407, - 0x23638b, - 0x237287, - 0x23728e, - 0x1ba38204, - 0x2385c4, - 0x23bc07, - 0x264107, - 0x2431c6, - 0x2431c7, - 0x243a47, - 0x1be03382, - 0x244606, - 0x24460a, - 0x244e8b, - 0x246607, - 0x2471c5, - 0x247603, - 0x247b46, - 0x247b47, - 0x275083, - 0x1c200102, - 0x24894a, - 0x1c777a42, - 0x1ca4ce82, - 0x1ce4b382, - 0x1d239082, - 0x24c3c5, - 0x24cb44, - 0x1da1d2c2, - 0x26a685, - 0x245483, - 0x20e885, - 0x21ddc4, - 0x223ac4, - 0x30a986, - 0x31bc06, - 0x20c4c3, - 0x3b4984, - 0x370703, - 0x1ea03282, - 0x223f84, - 0x388e06, - 0x223f85, - 0x2d0a86, - 0x319088, - 0x2a6744, - 0x22f648, - 0x3a52c5, - 0x23fc08, + 0x32bb03, + 0x35ab84, + 0x2ea546, + 0x2f5883, + 0x2f5886, 0x38df86, - 0x322207, - 0x247944, - 0x247946, - 0x29f983, - 0x3a0783, - 0x318348, - 0x32db04, - 0x35df87, - 0x1fe06086, - 0x2db609, - 0x330808, - 0x33d0c8, - 0x39b184, - 0x2142c3, - 0x231942, - 0x20211702, - 0x20617c42, - 0x214983, - 0x20a22082, - 0x3724c4, - 0x24c1c6, - 0x324245, - 0x2a1cc3, - 0x22f304, - 0x2b5887, - 0x390503, - 0x240d48, - 0x2257c5, - 0x260d43, - 0x27a585, - 0x27a6c4, - 0x3016c6, - 0x22a404, - 0x22d606, - 0x357146, - 0x2bd984, - 0x237643, - 0x20e22482, - 0x236b05, - 0x200843, - 0x21202d02, - 0x232083, - 0x21d845, - 0x2355c3, - 0x2355c9, - 0x21600942, - 0x21e03782, - 0x28de05, - 0x21a546, - 0x2a74c6, - 0x2c5808, - 0x2c580b, - 0x20850b, - 0x359485, - 0x399345, - 0x2cbe09, - 0x1601042, - 0x2d0708, - 0x209084, - 0x22606ac2, - 0x25b083, - 0x22e642c6, - 0x23d508, - 0x23200c02, - 0x2279c8, - 0x2360b782, - 0x2bc00a, - 0x23ad1803, - 0x3d2246, - 0x35f088, - 0x30b7c8, - 0x2c8006, - 0x385507, - 0x3ab607, - 0x24294a, - 0x36cac4, - 0x35cc84, - 0x3759c9, - 0x243aae05, - 0x27c746, - 0x229c03, - 0x253004, - 0x246cbd04, - 0x373207, - 0x238407, - 0x2bb144, - 0x2e3385, - 0x259848, - 0x24d187, - 0x24d607, - 0x24a19902, - 0x313804, - 0x292b48, - 0x24ec04, - 0x250244, - 0x251385, - 0x2514c7, - 0x39e1c9, - 0x251f04, - 0x252489, - 0x2526c8, - 0x252d84, - 0x252d87, - 0x24e540c3, - 0x254247, - 0x1625a82, - 0x16b0d42, - 0x254dc6, - 0x255407, - 0x255884, - 0x256907, - 0x257487, - 0x258083, - 0x231ac2, - 0x208c42, - 0x271b03, - 0x271b04, - 0x271b0b, - 0x36ed08, - 0x25fd84, - 0x25bf45, - 0x25cd07, - 0x25e585, - 0x2d004a, - 0x25fcc3, - 0x25201482, - 0x223184, - 0x263ec9, - 0x2682c3, - 0x268387, - 0x3cc409, - 0x21d508, - 0x23ab03, - 0x27fc47, - 0x2802c9, - 0x26e9c3, - 0x2882c4, - 0x2897c9, - 0x28bf06, - 0x357503, - 0x2051c2, - 0x23e5c3, - 0x3c63c7, - 0x2dcdc5, - 0x34a2c6, - 0x25a644, - 0x2e4cc5, - 0x21ffc3, - 0x218106, - 0x20dc82, - 0x3ac1c4, - 0x25627242, - 0x25a39f03, - 0x25e02b02, - 0x250143, - 0x202b04, - 0x2175c7, - 0x3cb486, - 0x27dd02, - 0x2625dac2, - 0x319284, - 0x2662e342, - 0x26a00ac2, - 0x2b2b04, - 0x2b2b05, - 0x206a05, - 0x363b06, - 0x26e0f982, - 0x20f985, - 0x210785, - 0x212683, - 0x217746, - 0x222545, - 0x22e542, - 0x355005, - 0x22e544, - 0x2cf343, - 0x358543, - 0x2720ba82, - 0x2da587, - 0x3692c4, - 0x3692c9, - 0x252f04, - 0x2880c3, - 0x35c589, - 0x2880c8, - 0x276a59c4, - 0x2a59c6, - 0x2aad43, - 0x20a683, - 0x214d83, - 0x27af8fc2, - 0x2fc642, - 0x27e00642, - 0x337cc8, - 0x2f5488, - 0x3afdc6, - 0x26e105, - 0x23b805, - 0x202587, - 0x2c1a05, - 0x25cfc2, - 0x28297982, - 0x28600042, - 0x23de08, - 0x353545, - 0x2f2a04, - 0x24a745, - 0x24e787, - 0x271684, - 0x248842, - 0x28a04bc2, - 0x348684, - 0x358187, - 0x3cbf47, - 0x33c804, - 0x294cc3, - 0x236c04, - 0x236c08, - 0x232406, - 0x2cd60a, - 0x39fd04, - 0x2951c8, - 0x28c344, - 0x223d06, - 0x297944, - 0x311986, - 0x369589, - 0x239307, - 0x21c803, - 0x28e05642, - 0x39b403, - 0x20ebc2, - 0x2923ed42, - 0x315086, - 0x37efc8, - 0x2a7647, - 0x2fe609, - 0x294709, - 0x2a8e45, - 0x2a9f09, - 0x2aa6c5, - 0x2aa809, - 0x2abd45, - 0x2ad808, - 0x29612384, - 0x29a581c7, - 0x230283, - 0x2ada07, - 0x230286, - 0x2ae6c7, - 0x2a4b05, - 0x2e01c3, - 0x29e33842, - 0x212a44, - 0x2a22e382, - 0x2a6586c2, - 0x2f2046, - 0x27be85, - 0x2b09c7, - 0x275b83, - 0x33a5c4, - 0x207d43, - 0x312f03, - 0x2aa00d42, - 0x2b207842, - 0x3897c4, - 0x231a83, - 0x24bec5, - 0x2b615642, - 0x2be04182, - 0x3001c6, - 0x32dc44, - 0x3ce184, - 0x3ce18a, - 0x2c6005c2, - 0x26ce03, - 0x211b0a, - 0x219888, - 0x2ca24604, - 0x2005c3, - 0x20c9c3, - 0x258889, - 0x2080c9, - 0x277ec6, - 0x2ce19a43, - 0x222885, - 0x32f34d, - 0x219a46, - 0x22544b, - 0x2d2071c2, - 0x21fe48, - 0x2fa13282, - 0x2fe01142, - 0x2b9c45, - 0x30200b02, - 0x39f2c7, - 0x2b2ec7, - 0x20c8c3, - 0x324ec8, - 0x30602c82, - 0x2ab804, - 0x294ec3, - 0x36f305, - 0x245586, - 0x221b84, - 0x3b8903, - 0x2b1ec3, - 0x30a0b202, - 0x3992c4, - 0x3b6a05, - 0x3bc407, - 0x27ddc3, - 0x2b0fc3, - 0x2b1683, - 0x1615002, - 0x2b1743, - 0x2b1e43, - 0x30e0a242, - 0x310884, - 0x31be06, - 0x353d03, - 0x2b2183, - 0x312b38c2, - 0x2b38c8, - 0x2b4884, - 0x310e46, - 0x260787, - 0x273a06, - 0x3688c4, - 0x3ee05682, - 0x23014b, - 0x2f700e, - 0x21b0cf, - 0x2d46c3, - 0x3f661282, - 0x1646c02, - 0x3fa08802, - 0x2923c3, - 0x208803, - 0x2d2746, - 0x2e30c6, - 0x3c9007, - 0x300c44, - 0x3fe1a682, - 0x40225cc2, - 0x24e605, - 0x2ef687, - 0x395a86, - 0x406125c2, - 0x2125c4, - 0x2b8ac3, - 0x40a0b2c2, - 0x40f6de43, - 0x2b9504, - 0x2c1909, - 0x412c6fc2, - 0x41618e42, - 0x331885, - 0x41ac74c2, - 0x41e00e42, - 0x35bf47, - 0x215b49, - 0x37660b, - 0x3ab3c5, - 0x26d9c9, - 0x38c706, - 0x20d0c7, - 0x42201d44, - 0x216909, - 0x3410c7, - 0x2165c7, - 0x227b03, - 0x2b2986, - 0x313fc7, - 0x249583, - 0x36be86, - 0x42a10682, - 0x42e35842, - 0x39b543, - 0x33a1c5, - 0x393187, - 0x221046, - 0x2dcd45, - 0x259c44, - 0x27efc5, - 0x2fc044, - 0x432023c2, - 0x370007, - 0x2c5fc4, - 0x207fc4, - 0x207fcd, - 0x2d6889, - 0x22e2c8, - 0x2775c4, - 0x34c745, - 0x39bb87, - 0x208ec4, - 0x3cedc7, - 0x218bc5, - 0x43619e04, - 0x2b1885, - 0x266f84, - 0x286406, - 0x2be085, - 0x43a12582, - 0x3a25c3, - 0x2dce84, - 0x2dce85, - 0x3421c6, - 0x32d645, - 0x23aa84, - 0x25d383, - 0x221fc6, - 0x2fbc45, - 0x3cf745, - 0x2be184, - 0x2e9983, - 0x39fd8c, - 0x43f4e142, - 0x4420fc42, - 0x44604942, - 0x224f03, - 0x224f04, - 0x44a0e7c2, - 0x303548, - 0x34a385, - 0x247d84, - 0x364846, - 0x44e16182, - 0x4522c9c2, - 0x45602a82, - 0x2a7a45, - 0x2bd846, - 0x239a44, - 0x213586, - 0x2eba46, - 0x231dc3, - 0x45b33a0a, - 0x26dd45, - 0x260203, - 0x224d06, - 0x38b149, - 0x224d07, - 0x2a2488, - 0x2d42c9, - 0x276248, - 0x2f9686, - 0x20a783, - 0x45e9f702, - 0x3a1a88, - 0x46252782, - 0x46602002, - 0x20d203, - 0x2e2845, - 0x26e544, - 0x252b49, - 0x2eb2c4, - 0x219c48, - 0x20ec03, - 0x46f72f04, - 0x21a588, - 0x207f07, - 0x47212642, - 0x23f7c2, - 0x329785, - 0x269a49, - 0x272203, - 0x282104, - 0x32f304, - 0x203e83, - 0x283f0a, - 0x47726302, - 0x47a12882, - 0x2cb643, - 0x38c983, - 0x161ea82, - 0x3a7d43, - 0x47e2a542, - 0x48203202, - 0x48615584, - 0x215586, - 0x300406, - 0x245d84, - 0x27ba83, - 0x20ad43, - 0x2f7583, - 0x245206, - 0x38f585, - 0x2cb7c7, - 0x2cf045, - 0x2d02c6, - 0x2d0f48, - 0x2d1146, - 0x205844, - 0x29c2cb, - 0x2d4983, - 0x2d4985, - 0x2d4e08, - 0x22c482, - 0x35c242, - 0x48a4c442, - 0x48e06282, - 0x21a6c3, - 0x4926f582, - 0x26f583, - 0x2d5743, - 0x49a07242, - 0x49ed9fc6, - 0x25e406, - 0x4a2da102, - 0x4a60f7c2, - 0x4ab58582, - 0x4ae0bf82, - 0x4b225082, - 0x4b600a42, - 0x218f83, - 0x389185, - 0x34c8c6, - 0x4babf0c4, - 0x388c0a, - 0x3a9606, - 0x2e6704, - 0x23ec83, - 0x4c6039c2, - 0x201402, - 0x232043, - 0x4ca22103, - 0x301147, - 0x2bdf87, - 0x4e271c07, - 0x3c4a07, - 0x22a7c3, - 0x34a70a, - 0x3a0304, - 0x3c6944, - 0x3c694a, - 0x247005, - 0x4e6198c2, - 0x254d83, - 0x4ea00602, - 0x252ec3, - 0x39b3c3, - 0x4f200582, - 0x29f604, - 0x21f9c4, - 0x209905, - 0x30a305, - 0x31f946, - 0x321986, - 0x4f640e02, - 0x4fa01a42, - 0x306d85, - 0x25e112, - 0x349186, - 0x207783, - 0x2aef06, - 0x303805, - 0x1609a82, - 0x57e10e02, - 0x367403, - 0x210e03, - 0x2836c3, - 0x5820de02, - 0x22e803, - 0x58601202, - 0x209c43, - 0x3108c8, - 0x2597c3, - 0x2a8cc6, - 0x23c087, - 0x30ecc6, + 0x3b0fc3, + 0x27d304, + 0x30e5c7, + 0x2ea188, + 0x1a000c2, + 0x1f3b587, + 0x379ac9, + 0x2bc2ca, + 0x2bc2cb, + 0x22ce83, + 0x2aaf06, + 0x2360c5, + 0x220a5c2, + 0x3d04c4, + 0x256c83, + 0x368605, + 0x2610c02, + 0x358f03, + 0x2b2c3c4, + 0x368e05, + 0x2e1ebc2, + 0x39610e, + 0x251343, + 0x3a9546, + 0x3200a82, + 0x2fb287, + 0x238a46, + 0x3601cc2, + 0x22f703, + 0x27fa44, + 0x222006, + 0x204248, + 0x27d006, + 0x312144, + 0x3a04542, + 0x345049, + 0x220947, + 0x3989c6, + 0x371889, + 0x2dff08, + 0x32da44, + 0x2cdf06, + 0x247c46, + 0x3e01702, + 0x3ac90f, + 0x22854e, + 0x226044, + 0x209cc5, + 0x32ba05, + 0x2f2249, + 0x23fbc9, + 0x222807, + 0x2755c6, + 0x275503, + 0x4227442, + 0x227443, + 0x33abca, + 0x4616583, + 0x35d585, + 0x329342, + 0x38fbc9, + 0x4a03902, + 0x203904, + 0x31a286, + 0x2c1705, + 0x36c284, + 0x5218bc4, + 0x203ac3, + 0x235104, + 0x5601b82, + 0x265fc4, + 0x5a73f44, + 0x30d64a, + 0x5e00882, + 0x2f1787, + 0x365588, + 0x6e07b82, + 0x274d07, + 0x22e484, + 0x2bf287, + 0x22e485, + 0x33f287, + 0x256006, + 0x28eac4, + 0x329b05, + 0x2903c7, + 0x7e0c8c2, + 0x366e43, + 0x20dec2, + 0x3cc743, + 0x820f642, + 0x282e85, + 0x8600202, + 0x2b9b44, + 0x279185, + 0x225f87, + 0x30cfce, + 0x23dec4, + 0x236904, + 0x206ec3, + 0x30f809, + 0x206ecb, + 0x326508, + 0x371648, + 0x255308, + 0x217f88, + 0x32d88a, + 0x33f187, + 0x2ab9c6, + 0x8a4b382, + 0x342b83, + 0x343cc3, + 0x3447c4, + 0x3b1003, + 0x342bc3, + 0x1736b02, + 0x8e03fc2, + 0x27ff05, + 0x2947c6, + 0x27cc04, + 0x35bd87, + 0x303cc6, + 0x30c4c4, + 0x385787, + 0x203fc3, + 0x92c8042, + 0x970e842, + 0x9a2bf02, + 0x22bf06, + 0x9e00282, + 0x2a3f45, + 0x338043, + 0x3cc1c4, + 0x2edb84, + 0x2edb85, + 0x201a03, + 0xa373a83, + 0xa605fc2, + 0x208145, + 0x20814b, + 0x209206, + 0x35cc0b, + 0x26dec4, + 0x20af89, + 0x20bc84, + 0xaa0bec2, + 0x20c703, + 0x20c983, + 0xae0d1c2, + 0x3ba0c3, + 0x20d1ca, + 0xb20d9c2, + 0x3d0745, + 0x2ddaca, + 0x3a0544, + 0x20d9c3, + 0x20e704, + 0x210103, + 0x210104, + 0x210107, + 0x210ac5, + 0x211b06, + 0x212346, + 0x213103, + 0x215f08, + 0x21cd03, + 0xb6068c2, + 0x246688, + 0x3c5f0b, + 0x21c008, + 0x21c606, + 0x21d687, + 0x224c48, + 0xc60abc2, + 0xcabf442, + 0x31bec8, + 0x305e47, + 0x208945, + 0x208948, + 0x2dc148, + 0x2d2a83, + 0x22b404, + 0x344802, + 0xce2c1c2, + 0xd211502, + 0xda2c302, + 0x22c303, + 0xde00dc2, + 0x27d2c3, + 0x3c4404, + 0x209483, + 0x367204, 0x30eccb, - 0x2e6647, - 0x2faf44, - 0x58e01fc2, - 0x34a205, - 0x592220c3, - 0x2aacc3, - 0x2bc205, - 0x34a603, - 0x5974a606, - 0x2d088a, - 0x245a03, - 0x212f44, - 0x2003c6, - 0x343d46, - 0x59a482c3, - 0x33a487, - 0x277dc7, - 0x29dc05, - 0x350406, - 0x2a25c3, - 0x5c617983, - 0x5ca10482, - 0x359c04, - 0x214a09, - 0x23dc07, - 0x358d85, - 0x247344, - 0x375d08, + 0x235843, + 0x2e7046, + 0x235844, + 0x352e0e, + 0x34b9c5, + 0x2653c8, + 0x3a9647, + 0x3a964a, + 0x207083, + 0x35a987, + 0x207085, + 0x231c04, + 0x2d0c06, + 0x2d0c07, + 0x2fab84, + 0x2ef8c7, + 0x305884, + 0x2752c4, + 0x30d306, + 0x259ec4, + 0x3946c6, + 0x200dc3, + 0x208708, + 0x20dcc8, + 0x2368c3, + 0x3ba083, + 0x3b21c4, + 0x3b6803, + 0xe200bc2, + 0xe68be42, + 0x205883, + 0x203b86, + 0x2043c3, + 0x237f44, + 0xeb3fa82, + 0x355ac3, + 0x33fa83, + 0x213842, + 0xee01242, + 0x2c1ec6, + 0x237047, + 0x2f1e07, + 0x39b1c5, + 0x216184, + 0x28e705, + 0x273b07, + 0x2e81c9, + 0x2ec1c6, + 0x2fc4c8, + 0x3033c6, + 0xf20f382, + 0x335648, + 0x3cf806, + 0x389c85, + 0x3252c7, + 0x326144, + 0x326145, + 0x368244, + 0x368248, + 0xf608202, + 0xfa00482, + 0x347c46, + 0x200488, + 0x355e45, + 0x359bc6, + 0x380088, + 0x390d08, + 0xfe07f85, + 0x1026e144, + 0x38d147, + 0x1060b702, + 0x10b42c02, + 0x11e09302, + 0x31a385, + 0x286085, + 0x35d186, + 0x2ba987, + 0x22cec7, + 0x12609303, + 0x29de87, + 0x2e9f48, + 0x1ba2e889, + 0x3962c7, + 0x22fd47, + 0x2307c8, + 0x230fc6, + 0x231706, + 0x23234c, + 0x23378a, + 0x234107, + 0x235f8b, + 0x236e87, + 0x236e8e, + 0x1be37e04, + 0x238084, + 0x239547, + 0x260147, + 0x23e7c6, + 0x23e7c7, + 0x23ef87, + 0x1c22c842, + 0x23ff86, + 0x23ff8a, + 0x24080b, + 0x241f87, + 0x242a05, + 0x2439c3, + 0x243c06, + 0x243c07, + 0x271d83, + 0x1c600102, + 0x24448a, + 0x1cb7b002, + 0x1ce48b42, + 0x1d246382, + 0x1d638b42, 0x248045, - 0x5ce52185, - 0x288c09, - 0x396883, - 0x24ce04, - 0x5d20d682, - 0x21a8c3, - 0x5d691a42, - 0x291a46, - 0x1629b42, - 0x5da0be82, - 0x2a7948, - 0x2b7f83, - 0x2b17c7, - 0x303c05, - 0x2b7b45, - 0x30ef4b, - 0x2e5086, - 0x30f146, - 0x2e6286, - 0x288f44, - 0x2c1b06, - 0x5ded7248, - 0x235d03, - 0x206f43, - 0x206f44, - 0x30af84, - 0x30bd87, - 0x2e9485, - 0x5e2e95c2, - 0x5e609dc2, - 0x209dc5, - 0x2bfc44, - 0x2ec64b, - 0x2edec8, - 0x25b484, - 0x5ee12602, - 0x5f25b402, - 0x2b3b03, - 0x2ef0c4, - 0x2ef385, - 0x2efec7, - 0x2f2544, - 0x33c904, - 0x5f608642, - 0x37a389, - 0x2f3a05, - 0x3ab685, - 0x2f4585, - 0x5fa1a803, - 0x2f63c4, - 0x2f63cb, + 0x248804, + 0x1de17382, + 0x266045, + 0x240e03, + 0x20bd85, + 0x217e84, + 0x21b6c4, + 0x313046, + 0x26d486, + 0x208343, + 0x3b61c4, + 0x3cd043, + 0x1ee069c2, + 0x21da04, + 0x38d6c6, + 0x21da05, + 0x2cee86, + 0x3253c8, + 0x26d884, + 0x22d348, + 0x3a6745, + 0x323488, + 0x2b2c46, + 0x239087, + 0x28f304, + 0x28f306, + 0x29e183, + 0x3a1503, + 0x321188, + 0x32e984, + 0x35b407, + 0x2022d106, + 0x2dad09, + 0x3315c8, + 0x33fb08, + 0x34dc04, + 0x2029c3, + 0x23a182, + 0x20616502, + 0x20a12e42, + 0x204703, + 0x20e15c02, + 0x30e704, + 0x23c5c6, + 0x366f45, + 0x2a05c3, + 0x232804, + 0x2b1fc7, + 0x375183, + 0x23cdc8, + 0x21ef85, + 0x25d7c3, + 0x279105, + 0x279244, + 0x3030c6, + 0x222a44, + 0x223fc6, + 0x225ec6, + 0x2ba084, + 0x237243, + 0x21202dc2, + 0x236705, + 0x200843, + 0x21601802, + 0x232303, + 0x217905, + 0x2351c3, + 0x2351c9, + 0x21a00942, + 0x2221e5c2, + 0x28b745, + 0x214bc6, + 0x2031c6, + 0x320048, + 0x32004b, + 0x203bcb, + 0x220045, + 0x39b3c5, + 0x2c8789, + 0x1600c42, + 0x2ceb08, + 0x2090c4, + 0x22a012c2, + 0x207643, + 0x23260306, + 0x23e308, + 0x23604002, + 0x221688, + 0x23a07242, + 0x2b870a, + 0x23ecfe83, + 0x34e2c6, + 0x35c448, + 0x3143c8, + 0x2c4cc6, + 0x388c47, + 0x3acb07, + 0x2477ca, + 0x3a05c4, + 0x358c84, + 0x379649, + 0x247ac305, + 0x228746, + 0x21fb83, + 0x24fc84, + 0x24a23dc4, + 0x30f447, + 0x23a887, + 0x2b7844, + 0x28c1c5, + 0x35d248, + 0x248e47, + 0x2492c7, + 0x24e00d42, + 0x31c504, + 0x291648, + 0x24a9c4, + 0x24ce84, + 0x24dd05, + 0x24de47, + 0x22ee09, + 0x24eb04, + 0x24f309, + 0x24f548, + 0x24fa04, + 0x24fa07, + 0x25250043, + 0x2501c7, + 0x161f242, + 0x16ae502, + 0x250d46, + 0x251387, + 0x251784, + 0x252807, + 0x2542c7, + 0x254c43, + 0x23a302, + 0x204302, + 0x271243, + 0x271244, + 0x27124b, + 0x371748, + 0x25c484, + 0x258845, + 0x2592c7, + 0x25ab45, + 0x2d144a, + 0x25c3c3, + 0x25608282, + 0x21cc04, + 0x25ff09, + 0x264303, + 0x2643c7, + 0x28cbc9, + 0x2175c8, + 0x240643, + 0x27e207, + 0x27e889, + 0x26bf83, + 0x286604, + 0x2874c9, + 0x289a06, + 0x226283, + 0x202242, + 0x25dd03, + 0x3c79c7, + 0x2dc4c5, + 0x34ae46, + 0x2aa444, + 0x2f3b45, + 0x21a383, + 0x213346, + 0x20b182, + 0x3ada04, + 0x25a20f02, + 0x25e6df83, + 0x26202c02, + 0x24cd83, + 0x2127c4, + 0x2127c7, + 0x3cc4c6, + 0x2795c2, + 0x2665a082, + 0x3255c4, + 0x26a2c982, + 0x26e00ac2, + 0x2b00c4, + 0x2b00c5, + 0x36a785, + 0x361e86, + 0x2720a542, + 0x20a545, + 0x20cb85, + 0x20d583, + 0x212946, + 0x218ec5, + 0x22be82, + 0x354385, + 0x22be84, + 0x26d7c3, + 0x26da03, + 0x27607902, + 0x2d8307, + 0x39da84, + 0x39da89, + 0x24fb84, + 0x285f03, + 0x362448, + 0x27a85f04, + 0x285f06, + 0x2a3bc3, + 0x211f83, + 0x22b883, + 0x27ef9d82, + 0x2fdfc2, + 0x28200642, + 0x339c48, + 0x275c88, + 0x3b1606, + 0x24dbc5, + 0x3bcdc5, + 0x376587, + 0x2677c5, + 0x2049c2, + 0x28695b82, + 0x28a00042, + 0x2cd708, + 0x335585, + 0x2f2e84, + 0x24b605, + 0x24a387, + 0x25cb44, + 0x244382, + 0x28e032c2, + 0x349204, + 0x2270c7, + 0x28c707, + 0x33f244, + 0x293e43, + 0x236804, + 0x236808, + 0x231a46, + 0x2d0a8a, + 0x22ecc4, + 0x294348, + 0x289e44, + 0x21d786, + 0x295b44, + 0x31a686, + 0x39dd49, + 0x26ccc7, + 0x3263c3, + 0x29272302, + 0x2f7403, + 0x208b82, + 0x2966bc02, + 0x31dec6, + 0x383348, + 0x2a5087, + 0x3002c9, + 0x2937c9, + 0x2a6b05, + 0x2a7e09, + 0x2a85c5, + 0x2a8709, + 0x2a9a45, + 0x2aa708, + 0x29a0a244, + 0x29e54d87, + 0x230103, + 0x2aa907, + 0x230106, + 0x2ac007, + 0x2a2f05, + 0x2f0803, + 0x2a233542, + 0x20dc04, + 0x2a62c9c2, + 0x2aa55282, + 0x2f5b86, + 0x365505, + 0x2ae187, + 0x2569c3, + 0x33ca84, + 0x20e143, + 0x31bc03, + 0x2ae06982, + 0x2b607602, + 0x38e084, + 0x23a2c3, + 0x246ec5, + 0x2ba07502, + 0x2c203502, + 0x302b46, + 0x32eac4, + 0x322f04, + 0x322f0a, + 0x2ca005c2, + 0x269e83, + 0x2099ca, + 0x20f708, + 0x2ce1e084, + 0x2005c3, + 0x2065c3, + 0x255449, + 0x20e4c9, + 0x2a7746, + 0x2d20f8c3, + 0x219205, + 0x3301cd, + 0x20f8c6, + 0x21690b, + 0x2d600e82, + 0x21a208, + 0x2fe16002, + 0x30203a02, + 0x330805, + 0x30600b02, + 0x38f447, + 0x2e4607, + 0x201083, + 0x374288, + 0x30a02382, + 0x2a9504, + 0x294043, + 0x30a585, + 0x240f06, + 0x229684, + 0x3ba043, + 0x2aeb83, + 0x30e06682, + 0x39b344, + 0x3b8145, + 0x3bd507, + 0x27c0c3, + 0x2ae783, + 0x16ae842, + 0x2ae843, + 0x2aeb03, + 0x312027c2, + 0x319584, + 0x26d686, + 0x3a5fc3, + 0x2af743, + 0x316b0442, + 0x2b0448, + 0x2b1004, + 0x319b46, + 0x25f507, + 0x363a86, + 0x2ccec4, + 0x3f204a82, + 0x22ffcb, + 0x2f7cce, + 0x21574f, + 0x2e01c3, + 0x3fa5dcc2, + 0x1642582, + 0x3fe02342, + 0x290ec3, + 0x203ec3, + 0x2e8446, + 0x335b86, + 0x202347, + 0x302144, + 0x40214d02, + 0x4061f482, + 0x36f685, + 0x2ef247, + 0x397c86, + 0x40a0a482, + 0x20a484, + 0x2b5503, + 0x40e06d82, + 0x41370883, + 0x2b5d04, + 0x2be749, + 0x416c3c82, + 0x41a0eec2, + 0x3326c5, + 0x41ec4182, + 0x42202902, + 0x358007, + 0x210549, + 0x379d4b, + 0x3ac8c5, + 0x26ae09, + 0x392786, + 0x209247, + 0x42602904, + 0x2115c9, + 0x343147, + 0x211287, + 0x2217c3, + 0x2aff46, + 0x31ccc7, + 0x2450c3, + 0x286486, + 0x42e0d482, + 0x43235442, + 0x34b803, + 0x33c685, + 0x2017c7, + 0x21ba06, + 0x2dc445, + 0x35d644, + 0x288ac5, + 0x2fd9c4, + 0x43604582, + 0x3cc947, + 0x2c2c84, + 0x20e3c4, + 0x20e3cd, + 0x2d4ec9, + 0x22c908, + 0x256ec4, + 0x366385, + 0x204587, + 0x208f04, + 0x303d87, + 0x20ec45, + 0x43a0fc84, + 0x2e4c85, + 0x262fc4, + 0x284246, + 0x2ba785, + 0x43e0a442, + 0x3a36c3, + 0x2dc584, + 0x2dc585, + 0x344d46, + 0x239885, + 0x26eb04, + 0x259943, + 0x215b46, + 0x2febc5, + 0x304705, + 0x2ba884, + 0x22ed43, + 0x22ed4c, + 0x4434f5c2, + 0x4460a802, + 0x44a05142, + 0x216c03, + 0x216c04, + 0x44e0bcc2, + 0x307fc8, + 0x34af05, + 0x243344, + 0x24a1c6, + 0x45210e42, + 0x45627bc2, + 0x45a01e02, + 0x2b4dc5, + 0x2b9f46, + 0x226dc4, + 0x222546, + 0x2f1546, + 0x201e03, + 0x45f45b8a, + 0x26b185, + 0x33ab83, + 0x21ed06, + 0x390809, + 0x21ed07, + 0x29e608, + 0x2dfdc9, + 0x364a48, + 0x313946, + 0x206e83, + 0x4629df02, + 0x3a2b88, + 0x4664f602, + 0x46a09382, + 0x209383, + 0x2e1305, + 0x26bb04, + 0x249c49, + 0x2f0dc4, + 0x20fac8, + 0x20c103, + 0x4730f144, + 0x214c08, + 0x20e307, + 0x4760a502, + 0x23c182, + 0x32b985, + 0x2497c9, + 0x2287c3, + 0x280c04, + 0x330184, + 0x204603, + 0x281d4a, + 0x47b0b282, + 0x47e0da42, + 0x2c7fc3, + 0x392a03, + 0x162e902, + 0x3a9303, + 0x48225282, + 0x48603542, + 0x48a29d44, + 0x344306, + 0x302d86, + 0x241704, + 0x27a103, + 0x203543, + 0x2f8343, + 0x240b86, + 0x256345, + 0x2c8147, + 0x2cb445, + 0x2ce6c6, + 0x2cf348, + 0x2cf546, + 0x282384, + 0x29a4cb, + 0x2d2f43, + 0x2d2f45, + 0x2d33c8, + 0x227682, + 0x358302, + 0x48e480c2, + 0x49204842, + 0x214d43, + 0x4966ca42, + 0x26ca43, + 0x2d3d83, + 0x49e01ac2, + 0x4a2d7d46, + 0x25a9c6, + 0x4a6d7e82, + 0x4aa0c9c2, + 0x4ae6da42, + 0x4b207e02, + 0x4b61e302, + 0x4ba00a42, + 0x20f003, + 0x38da45, + 0x366506, + 0x4be26004, + 0x38d4ca, + 0x3aab06, + 0x2e6944, + 0x29a843, + 0x4ca05f02, + 0x203202, + 0x238003, + 0x4ce15c83, + 0x307907, + 0x2ba687, + 0x4e671347, + 0x3c6147, + 0x22a083, + 0x34b28a, + 0x2655c4, + 0x22d004, + 0x22d00a, + 0x23ad05, + 0x4ea0f742, + 0x250d03, + 0x4ee00602, + 0x24fb43, + 0x2f73c3, + 0x4f600582, + 0x29de04, + 0x219d84, + 0x3c46c5, + 0x3129c5, + 0x3287c6, + 0x332e86, + 0x4fa3ce82, + 0x4fe01e82, + 0x3c8805, + 0x25a6d2, + 0x349d06, + 0x289d83, + 0x2ac846, + 0x308285, + 0x1604742, + 0x5820d742, + 0x36b3c3, + 0x20d743, + 0x273903, + 0x5860b302, + 0x241f03, + 0x58a1b382, + 0x229d83, + 0x3195c8, + 0x2a6983, + 0x2a6986, + 0x336107, + 0x317846, + 0x31784b, + 0x2e6887, + 0x2fbd44, + 0x59201a82, + 0x34ad85, + 0x59615c43, + 0x22e043, + 0x2b8905, + 0x34b183, + 0x59b4b186, + 0x2cec8a, + 0x241383, + 0x221f04, + 0x2003c6, + 0x38a086, + 0x59e4b583, + 0x33c947, + 0x2a7647, + 0x29c405, + 0x345e86, + 0x29e743, + 0x5ca12b83, + 0x5ce04782, + 0x229b04, + 0x22b509, + 0x35ac45, + 0x22d8c4, + 0x381808, + 0x243605, + 0x5d243ac5, + 0x25b489, + 0x398a83, + 0x248ac4, + 0x5d602882, + 0x214f43, + 0x5da95602, + 0x2a0206, + 0x16256c2, + 0x5de07d02, + 0x2b4cc8, + 0x324b83, + 0x2e4bc7, + 0x317ac5, + 0x2b4885, + 0x2be94b, + 0x2e52c6, + 0x2beb46, + 0x2e64c6, + 0x33af44, + 0x2d5886, + 0x5e2e2c08, + 0x235903, + 0x271603, + 0x271604, + 0x314984, + 0x316dc7, + 0x2e96c5, + 0x5e6e9802, + 0x5ea057c2, + 0x2057c5, + 0x2ebd44, + 0x2ebd4b, + 0x2eda88, + 0x257d84, + 0x5f20a4c2, + 0x5f657d02, + 0x2b0683, + 0x2eec84, + 0x2eef45, + 0x2efa87, + 0x2f29c4, + 0x220084, + 0x5fa03d02, + 0x37db89, + 0x2f4005, + 0x3acb85, + 0x2f4b85, + 0x5fe14e83, 0x2f68c4, - 0x2f6b8b, - 0x2f74c5, - 0x21b20a, - 0x2f7c88, - 0x2f7e8a, - 0x2f8443, - 0x2f844a, - 0x60248202, - 0x60604c42, - 0x60a84743, - 0x60efb602, - 0x2fb603, - 0x6137b282, - 0x61736842, - 0x2fbec4, - 0x21c546, - 0x2132c5, - 0x2fcc43, - 0x329ec6, - 0x212dc5, - 0x282704, - 0x61a00902, - 0x2ff044, - 0x2cba8a, - 0x2eec47, - 0x276b46, - 0x31aa07, - 0x206003, - 0x2b9548, - 0x3ab04b, - 0x2c2045, - 0x352c05, - 0x352c06, + 0x2f68cb, + 0x2f7584, + 0x2f784b, + 0x2f8285, + 0x21588a, + 0x2f8a48, + 0x2f8c4a, + 0x2f9203, + 0x2f920a, + 0x6064b4c2, + 0x60a44042, + 0x60e82583, + 0x612fc442, + 0x2fc443, + 0x6177f302, + 0x61b387c2, + 0x2fc804, + 0x216046, + 0x222285, + 0x2fe403, + 0x32c0c6, + 0x221d85, + 0x2e1984, + 0x61e00902, + 0x2aef44, + 0x2c840a, + 0x2ee807, + 0x365346, + 0x35a7c7, + 0x23ffc3, + 0x2b5d48, + 0x3ac54b, + 0x2bed45, + 0x335285, + 0x335286, 0x2e8744, - 0x3b4ac8, - 0x232bc3, - 0x242cc4, - 0x242cc7, - 0x2fab86, - 0x205446, - 0x3bb48a, - 0x252504, - 0x35474a, - 0x61f91806, - 0x391807, - 0x25bfc7, - 0x277404, - 0x277409, - 0x31bac5, - 0x275e4b, - 0x2eb003, - 0x22d7c3, - 0x6221fe03, - 0x2327c4, - 0x62600682, - 0x333e46, - 0x62adff45, - 0x2af145, - 0x257246, - 0x2a0344, - 0x62e05942, - 0x247644, - 0x63204e42, - 0x344505, - 0x23c884, - 0x63e28203, - 0x64210e42, - 0x210e43, - 0x356786, - 0x64604fc2, - 0x22a188, - 0x224b84, - 0x224b86, - 0x38d206, - 0x20cb84, - 0x221f45, - 0x239fc8, - 0x23b687, - 0x3341c7, - 0x3341cf, - 0x292a46, - 0x243bc3, - 0x247cc4, - 0x210883, - 0x223e44, - 0x257384, - 0x64a12a82, - 0x28e203, - 0x257603, - 0x64e07bc2, - 0x23b943, - 0x372583, - 0x215e8a, - 0x38eb07, - 0x25c8cc, - 0x25cb86, - 0x25f906, - 0x260487, - 0x65230d47, - 0x26bb89, - 0x24b7c4, - 0x26d144, - 0x6561a702, - 0x65a01002, - 0x3bb846, - 0x33a284, - 0x28e686, - 0x2311c8, - 0x23d344, - 0x39f306, - 0x2a7485, - 0x365988, - 0x208703, - 0x294905, - 0x295883, - 0x3ab783, - 0x3ab784, - 0x223143, - 0x65e61182, - 0x66201f42, - 0x2eaec9, - 0x29c545, - 0x29fb04, - 0x2a1785, - 0x21b684, - 0x2c8f07, - 0x37c205, - 0x66671dc4, - 0x271dc8, - 0x2e7f06, - 0x2eab84, - 0x2eb148, - 0x2f0c07, - 0x66a02c42, - 0x2f4b04, - 0x210944, - 0x2bbb87, - 0x66e02c44, - 0x258d02, - 0x67216302, - 0x220ac3, - 0x2dd844, - 0x2a3143, - 0x2a3145, - 0x6763aa42, - 0x2fb4c5, - 0x2721c2, - 0x397045, - 0x2bba05, - 0x67a07742, - 0x33cfc4, - 0x67e00b42, - 0x2623c6, - 0x350b86, - 0x269b88, - 0x2c2f88, - 0x2f1fc4, - 0x2ff345, - 0x305cc9, - 0x3993c4, - 0x2d0844, - 0x217083, - 0x68242a85, - 0x37d987, - 0x251205, - 0x2a5c44, - 0x3a2a8d, - 0x2d48c2, - 0x2d48c3, - 0x3ad943, - 0x686035c2, - 0x3a4505, - 0x221dc7, - 0x2ba984, - 0x3c4ac7, - 0x2d44c9, - 0x2cbbc9, - 0x279847, - 0x290203, - 0x350d48, - 0x2686c9, - 0x3b5147, - 0x3c0045, - 0x2fdcc6, - 0x2fe146, + 0x205dc8, + 0x232203, + 0x247b44, + 0x247b47, + 0x2fb986, + 0x3691c6, + 0x352c4a, + 0x2292c4, + 0x2292ca, + 0x62373586, + 0x373587, + 0x2588c7, + 0x276644, + 0x276649, + 0x26d345, + 0x22d58b, + 0x2eb483, + 0x224183, + 0x6261a1c3, + 0x231e04, + 0x62a00682, + 0x2ed586, + 0x62f21b45, + 0x2aca85, + 0x253186, + 0x29f044, + 0x63208ac2, + 0x243a04, + 0x63606702, + 0x38a845, + 0x336904, + 0x64224583, + 0x6460d782, + 0x20d783, + 0x2669c6, + 0x64a022c2, + 0x225d08, + 0x21eb84, + 0x21eb86, + 0x393286, + 0x206784, + 0x215ac5, + 0x26e048, + 0x2e1d87, + 0x347d87, + 0x347d8f, + 0x291546, + 0x23fdc3, + 0x24a104, + 0x20cc83, + 0x21d8c4, + 0x2591c4, + 0x64e0dc42, + 0x28bb43, + 0x25b683, + 0x65201202, + 0x22f903, + 0x30e7c3, + 0x210b4a, + 0x208b07, + 0x25bd4c, + 0x25c006, + 0x25d886, + 0x25f207, + 0x65630c07, + 0x26c6c9, + 0x2467c4, + 0x271dc4, + 0x65a14d82, + 0x65e03182, + 0x353006, + 0x33c744, + 0x28bfc6, + 0x231088, + 0x23e144, + 0x38f486, + 0x203185, + 0x2939c8, + 0x203dc3, + 0x294fc5, + 0x29a743, + 0x3acc83, + 0x3acc84, + 0x21cbc3, + 0x6625dbc2, + 0x66601bc2, + 0x2eb349, + 0x2a3045, + 0x2a5604, + 0x2a60c5, + 0x20f584, + 0x2c5987, + 0x3899c5, + 0x66a71504, + 0x271508, + 0x2eb5c6, + 0x2f07c4, + 0x2f0c48, + 0x2f2107, + 0x66e0ac02, + 0x2f6b44, + 0x20cd44, + 0x2b8287, + 0x6720ac04, + 0x2d0182, + 0x6760eb02, + 0x21ae83, + 0x2dd204, + 0x2a1503, + 0x2a1505, + 0x67a28f82, 0x2fe2c5, - 0x2d6985, - 0x68a00c82, - 0x23b585, - 0x2b6ac8, - 0x2c4fc6, - 0x68e063c7, - 0x2bb084, - 0x304087, - 0x300dc6, - 0x69213b42, - 0x341ec6, - 0x3049ca, - 0x305245, - 0x696e68c2, - 0x69a945c2, - 0x314306, - 0x2b65c8, - 0x69fcc107, - 0x6a21d242, - 0x21de43, - 0x20d5c6, - 0x2285c4, - 0x3bfa06, - 0x206706, - 0x20574a, - 0x201685, - 0x2f51c6, - 0x34abc3, - 0x34abc4, - 0x208482, - 0x32dbc3, - 0x6a624f42, - 0x2f8903, - 0x211d84, - 0x2b6704, - 0x2b670a, - 0x21e903, - 0x27bc48, - 0x2f974a, - 0x23cb07, - 0x308406, - 0x262284, - 0x293602, - 0x2a6602, - 0x6aa007c2, - 0x236bc3, - 0x25bd87, + 0x265ac2, + 0x3a0905, + 0x2b8105, + 0x67e0ed82, + 0x33fa04, + 0x68200b42, + 0x200b46, + 0x324806, + 0x249908, + 0x2bfc88, + 0x2f5b04, + 0x305345, + 0x3432c9, + 0x39b444, + 0x2cec44, + 0x213043, + 0x68647905, + 0x383507, + 0x24e805, + 0x286184, + 0x3a6b8d, + 0x2e03c2, + 0x2e03c3, + 0x3af183, + 0x68a010c2, + 0x3a5905, + 0x2298c7, + 0x2b7084, + 0x3c6207, + 0x2dffc9, + 0x2c8549, + 0x2783c7, + 0x28e203, + 0x3249c8, + 0x26a389, + 0x3b6887, + 0x3c1245, + 0x2ff806, + 0x2ffe06, + 0x2fff85, + 0x2d4fc5, + 0x68e04082, + 0x27a905, + 0x2b2f08, + 0x2c1c86, + 0x6936a147, + 0x2b7784, + 0x2b23c7, + 0x3022c6, + 0x69643a42, + 0x344a46, + 0x306c4a, + 0x3074c5, + 0x69ae6b02, + 0x69e8c8c2, + 0x31d006, + 0x2b3d48, + 0x6a28c8c7, + 0x6a617302, + 0x217f03, + 0x209746, + 0x224944, + 0x3c0c06, + 0x36a486, + 0x3694ca, + 0x30bec5, + 0x2759c6, + 0x2f7203, + 0x2f7204, + 0x2023c2, + 0x32ea43, + 0x6aa16c42, + 0x2f96c3, + 0x209c44, + 0x2b3e84, + 0x2b3e8a, + 0x2189c3, + 0x27d0ca, + 0x280ec7, + 0x310ac6, + 0x256bc4, + 0x2926c2, + 0x2a42c2, + 0x6ae007c2, + 0x2367c3, + 0x258687, 0x2007c7, - 0x28a544, - 0x3ad7c7, - 0x2effc6, - 0x22e6c7, - 0x303144, - 0x353b45, - 0x21d085, - 0x6ae14fc2, - 0x214fc6, - 0x21ef03, - 0x221a02, - 0x221a06, - 0x6b200e02, - 0x6b6061c2, - 0x3c3145, - 0x6ba05102, - 0x6be01842, - 0x32dd85, - 0x2ce005, - 0x2a7fc5, - 0x6c261643, - 0x24c285, - 0x2e5147, - 0x3157c5, - 0x347d05, - 0x3a8184, - 0x333c46, - 0x3c6b84, - 0x6c6008c2, - 0x6d381ac5, - 0x2a69c7, - 0x39b848, - 0x254606, - 0x25460d, - 0x257c09, - 0x257c12, - 0x2ff785, - 0x3079c3, - 0x6d606342, - 0x316544, - 0x219ac3, - 0x3428c5, - 0x305f85, - 0x6da2e042, - 0x260d83, - 0x6de5dec2, - 0x6e6c28c2, - 0x6ea00082, - 0x2df085, - 0x3c4c03, - 0x250f48, - 0x6ee03502, - 0x6f20ab42, - 0x29f5c6, - 0x35ebca, - 0x219103, - 0x25d303, - 0x2fc9c3, - 0x70203582, - 0x7e61b882, - 0x7ee11a02, - 0x209642, - 0x341cc9, - 0x2c6404, - 0x2ac048, - 0x7f2fcc82, - 0x7f602242, - 0x2ac805, - 0x2367c8, - 0x317808, - 0x34d40c, - 0x23ca43, - 0x7fa1c882, - 0x7fe0a2c2, - 0x2848c6, - 0x309285, - 0x2758c3, - 0x27dbc6, - 0x3093c6, - 0x286483, - 0x30ad43, - 0x30b246, - 0x30c704, - 0x26fa86, - 0x2226c5, - 0x2226ca, - 0x39e784, - 0x30cdc4, - 0x30d50a, - 0x80209bc2, - 0x39e905, - 0x30e30a, - 0x30f2c5, - 0x30fb84, - 0x30fc86, - 0x30fe04, - 0x21ab86, - 0x80613b82, - 0x2f19c6, - 0x370545, - 0x36fa87, - 0x3a8946, - 0x260684, - 0x2db087, - 0x333946, - 0x239645, - 0x23f387, - 0x3b6387, - 0x3b638e, - 0x27d4c6, - 0x3cec85, - 0x20e347, - 0x20f803, - 0x20f807, - 0x228ec5, - 0x22dbc4, - 0x2383c2, - 0x249687, - 0x300cc4, - 0x249b44, - 0x28950b, - 0x21f2c3, - 0x2d1287, - 0x21f2c4, - 0x2f0a87, - 0x294003, - 0x345fcd, - 0x3a5148, - 0x24a484, - 0x271cc5, - 0x3147c5, - 0x314c03, - 0x80a24a82, - 0x316b43, - 0x3174c3, - 0x215144, - 0x2803c5, - 0x21ef87, - 0x34ac46, - 0x38af83, - 0x3585cb, - 0x27530b, - 0x2aa40b, - 0x37f50b, - 0x2e690a, - 0x32f08b, - 0x36b50b, - 0x39550c, + 0x288544, + 0x3af007, + 0x2efb86, + 0x22c007, + 0x305f84, + 0x3a6a85, + 0x217145, + 0x6b20fa02, + 0x343d46, + 0x21c3c3, + 0x229502, + 0x229506, + 0x6b60e382, + 0x6ba16a42, + 0x3c4245, + 0x6be17442, + 0x6c201102, + 0x32ec05, + 0x2c9fc5, + 0x2a5cc5, + 0x6c65e083, + 0x23c685, + 0x2e5387, + 0x31e605, + 0x34d085, + 0x2654c4, + 0x2ed386, + 0x3ad084, + 0x6ca008c2, + 0x6d784ec5, + 0x2a4687, + 0x366048, + 0x250586, + 0x25058d, + 0x2547c9, + 0x2547d2, + 0x3013c5, + 0x30a103, + 0x6da09702, + 0x31f384, + 0x20f943, + 0x345445, + 0x308ac5, + 0x6de2c682, + 0x25d803, + 0x6e25a482, + 0x6eabf5c2, + 0x6ee00082, + 0x2e3c85, + 0x3c6343, + 0x24e548, + 0x6f202202, + 0x6f602a82, + 0x29ddc6, + 0x3c9d4a, + 0x20f183, + 0x239803, + 0x343543, + 0x70602fc2, + 0x7ea13d82, + 0x7f20c842, + 0x204fc2, + 0x344849, + 0x2c30c4, + 0x2a9d48, + 0x7f6fe442, + 0x7fa08602, + 0x2ab005, + 0x2363c8, + 0x320648, + 0x34f04c, + 0x239b03, + 0x7fe62982, + 0x80205cc2, + 0x282706, + 0x311945, + 0x2559c3, + 0x27bec6, + 0x311a86, + 0x2842c3, + 0x313403, + 0x313e46, + 0x315404, + 0x255706, + 0x21904a, + 0x38e904, + 0x315ac4, + 0x31620a, + 0x8065c302, + 0x38ea85, + 0x316f8a, + 0x317fc5, + 0x318884, + 0x318986, + 0x318b04, + 0x215206, + 0x80a2c6c2, + 0x2f5506, + 0x3cce85, + 0x30ad07, + 0x3a9e46, + 0x25f404, + 0x2da787, + 0x345ac6, + 0x23b485, + 0x23b487, + 0x3b7ac7, + 0x3b7ace, + 0x27b7c6, + 0x303c45, + 0x20ab47, + 0x20ca03, + 0x20ca07, + 0x222f45, + 0x22c204, + 0x23a842, + 0x2451c7, + 0x3021c4, + 0x245684, + 0x28720b, + 0x219683, + 0x2cf687, + 0x219684, + 0x2f0647, + 0x2930c3, + 0x3470cd, + 0x3a65c8, + 0x245fc4, + 0x271405, + 0x31d605, + 0x31da43, + 0x80e1ea82, + 0x31f983, + 0x320303, + 0x343ec4, + 0x27e985, + 0x21c447, + 0x2f7286, + 0x390643, + 0x26da8b, + 0x27494b, + 0x30880b, + 0x2d268b, + 0x2e6b4a, + 0x32ff0b, + 0x36db4b, + 0x39770c, + 0x3cf58b, + 0x3d1551, + 0x320c4a, + 0x321f4b, + 0x32220c, + 0x32250b, + 0x322a4a, + 0x323cca, + 0x324f8e, + 0x3256cb, + 0x32598a, + 0x327011, + 0x32744a, + 0x32794b, + 0x327e8e, + 0x328a8c, + 0x328f0b, + 0x3291ce, + 0x32954c, + 0x32a04a, + 0x32b34c, + 0x8132b64a, + 0x32c248, + 0x32ce09, + 0x32efca, + 0x32f24a, + 0x32f4cb, + 0x333a0e, + 0x334911, + 0x33e289, + 0x33e4ca, + 0x33ef0b, + 0x340d4a, + 0x341596, + 0x34290b, + 0x342e8a, + 0x3437ca, + 0x34454b, + 0x344ec9, + 0x347a49, + 0x34864d, + 0x348f8b, + 0x349e8b, + 0x34a84b, + 0x34bf09, + 0x34c54e, + 0x34d24a, + 0x34e70a, + 0x34eb4a, + 0x34f68b, + 0x34fecb, + 0x350b4d, + 0x3538cd, + 0x354010, + 0x3544cb, + 0x354fcc, + 0x355bcb, + 0x357b0b, + 0x35914e, + 0x3598cb, + 0x3598cd, + 0x36098b, + 0x36140f, + 0x3617cb, + 0x36200a, + 0x362649, + 0x362e49, + 0x81763c0b, + 0x363ece, + 0x36b88b, + 0x36c70f, + 0x36e68b, + 0x36e94b, + 0x36ec0b, + 0x36f7ca, + 0x379949, + 0x37c84f, + 0x38140c, + 0x381fcc, + 0x38258e, + 0x382a8f, + 0x382e4e, + 0x3836d0, + 0x383acf, + 0x38448e, + 0x38504c, + 0x385352, + 0x386111, + 0x38690e, + 0x386d8e, + 0x3872cb, + 0x3872ce, + 0x38764f, + 0x387a0e, + 0x387d93, + 0x388251, + 0x38868c, + 0x38898e, + 0x388e0c, + 0x389353, + 0x38b310, + 0x38c08c, + 0x38c38c, + 0x38c84b, + 0x38dc8e, + 0x38e18b, + 0x38f84b, + 0x390a4c, + 0x396b4a, + 0x396f0c, + 0x39720c, + 0x397509, + 0x398b4b, + 0x398e08, + 0x3995c9, + 0x3995cf, + 0x39ad4b, + 0x81b9bb4a, + 0x39e98c, + 0x39fb4b, + 0x39fe09, + 0x3a06c8, + 0x3a0e0b, + 0x3a12cb, + 0x3a1e4a, + 0x3a20cb, + 0x3a290c, + 0x3a32c8, + 0x3a6ecb, + 0x3a9a8b, + 0x3ab70e, + 0x3acd8b, + 0x3ae18b, + 0x3b764b, + 0x3b7909, + 0x3b7e4d, + 0x3c168a, + 0x3c3b97, + 0x3c4f18, + 0x3c8109, + 0x3c974b, + 0x3cad94, + 0x3cb28b, + 0x3cb80a, + 0x3cbcca, + 0x3cbf4b, + 0x3cd490, + 0x3cd891, + 0x3cdf4a, + 0x3ceb8d, + 0x3cf28d, + 0x3d198b, + 0x343e43, + 0x81f64543, + 0x2ec646, + 0x2412c5, + 0x27f187, + 0x32fdc6, + 0x16602c2, + 0x2d8e09, + 0x32bec4, + 0x2e2748, + 0x21a103, + 0x31f2c7, + 0x217402, + 0x2ae1c3, + 0x8220c882, + 0x2c9886, + 0x2cac84, + 0x229ec4, + 0x377843, + 0x377845, + 0x82ac41c2, + 0x82ea8a84, + 0x276587, + 0x8325ac82, + 0x209303, + 0x2351c3, + 0x22b883, + 0x2287c3, + 0x215c83, + 0x24b583, + 0xe9148, + 0x202543, + 0x2000c2, + 0xaf0c8, + 0x209302, + 0x22b883, + 0x2287c3, + 0x215c83, + 0x2543, + 0x24b583, + 0x201203, + 0x33bf96, + 0x35f453, + 0x3aee89, + 0x38d048, + 0x34ac09, + 0x317106, + 0x349250, + 0x2446d3, + 0x2fba48, + 0x373e07, + 0x27a207, + 0x28880a, + 0x3758c9, + 0x3a3449, + 0x28b00b, + 0x256006, + 0x2059ca, + 0x21c606, + 0x32bac3, + 0x2d8245, + 0x208708, + 0x200c0d, + 0x31a44c, + 0x3034c7, + 0x3284cd, + 0x26e144, + 0x2320ca, + 0x2332ca, + 0x23378a, + 0x2449c7, + 0x23d807, + 0x241884, + 0x28f306, + 0x34b944, + 0x302788, + 0x2f0e09, + 0x320046, + 0x320048, + 0x2f6f0d, + 0x2c8789, + 0x3143c8, + 0x3acb07, + 0x3c448a, + 0x251386, + 0x25fcc7, + 0x2e3284, + 0x22bc47, + 0x22b88a, + 0x241ace, + 0x2677c5, 0x3cdc8b, - 0x3d1411, - 0x317e0a, - 0x31874b, - 0x318a0c, - 0x318d0b, - 0x319c0a, - 0x31c34a, - 0x31d34e, - 0x31ec0b, - 0x31eeca, - 0x320211, - 0x32064a, - 0x320b4b, - 0x32108e, - 0x3226cc, - 0x322d4b, - 0x32300e, - 0x32338c, - 0x327e4a, - 0x32914c, - 0x80f2944a, - 0x32a048, - 0x32ac09, - 0x32e14a, - 0x32e3ca, - 0x32e64b, - 0x3323ce, - 0x333411, - 0x33bb49, - 0x33bd8a, - 0x33c4cb, - 0x33ec8a, - 0x33f516, - 0x34088b, - 0x340e0a, - 0x34134a, - 0x3419cb, - 0x342349, - 0x346949, - 0x346f8d, - 0x34840b, - 0x34930b, - 0x349ccb, - 0x34b589, - 0x34bbce, - 0x34c10a, - 0x34cf0a, - 0x34d70a, - 0x34e20b, - 0x34ea4b, - 0x34f6cd, - 0x35268d, - 0x354c90, - 0x35514b, - 0x35570c, - 0x35630b, - 0x35ba4b, - 0x35d14e, - 0x35d64b, - 0x35d64d, - 0x36260b, - 0x36308f, - 0x36344b, - 0x363c8a, - 0x3641c9, - 0x3649c9, - 0x81365e0b, - 0x3660ce, - 0x3678cb, - 0x36a0cf, - 0x36c00b, - 0x36c2cb, - 0x36c58b, - 0x36cbca, - 0x376209, - 0x37904f, - 0x37d6cc, - 0x37db4c, - 0x37e20e, - 0x37e70f, - 0x37eace, - 0x3802d0, - 0x3806cf, - 0x38108e, - 0x381c4c, - 0x381f52, - 0x3829d1, - 0x3831ce, - 0x38364e, - 0x383b8b, - 0x383b8e, - 0x383f0f, - 0x3842ce, - 0x384653, - 0x384b11, - 0x384f4c, - 0x38524e, - 0x3856cc, - 0x385c13, - 0x386a50, - 0x3877cc, - 0x387acc, - 0x387f8b, - 0x3893ce, - 0x3898cb, - 0x38a18b, - 0x38b38c, - 0x39494a, - 0x394d0c, - 0x39500c, - 0x395309, - 0x39694b, - 0x396c08, - 0x397549, - 0x39754f, - 0x398ccb, - 0x81799aca, - 0x39c44c, - 0x39d60b, - 0x39d8c9, - 0x39f6c8, - 0x39f8cb, - 0x3a054b, - 0x3a10ca, - 0x3a134b, - 0x3a180c, - 0x3a21c8, - 0x3a590b, - 0x3a858b, - 0x3aa20e, - 0x3ab88b, - 0x3ac94b, - 0x3b5f0b, - 0x3b61c9, - 0x3b670d, - 0x3c048a, - 0x3c2a97, - 0x3c37d8, - 0x3c70c9, - 0x3c844b, - 0x3c9994, - 0x3c9e8b, - 0x3ca40a, - 0x3cac8a, - 0x3caf0b, - 0x3cb750, - 0x3cbb51, - 0x3cc64a, - 0x3cd28d, - 0x3cd98d, - 0x3d184b, - 0x2150c3, - 0x81b66743, - 0x2b45c6, - 0x245945, - 0x280bc7, - 0x32ef46, - 0x1604582, - 0x2b3c49, - 0x329cc4, - 0x2e3bc8, - 0x21fd43, - 0x316487, - 0x206bc2, - 0x2b0a03, - 0x81e01242, - 0x2ccec6, - 0x2ce884, - 0x359fc4, - 0x3273c3, - 0x3273c5, - 0x826c7502, - 0x82aaab84, - 0x277347, - 0x82e5e6c2, - 0x20d183, - 0x2355c3, - 0x214d83, - 0x272203, - 0x222103, - 0x2482c3, - 0xe8f08, - 0x2182c3, + 0x309f09, + 0x20e4c9, + 0x206307, + 0x20630a, + 0x2b81c7, + 0x2f7e09, + 0x2c6b88, + 0x31a9cb, + 0x2e1305, + 0x22c7ca, + 0x26d809, + 0x36764a, + 0x2cb4cb, + 0x22bb4b, + 0x28ad95, + 0x2fa145, + 0x3acb85, + 0x2f68ca, + 0x2a784a, + 0x309c87, + 0x20f2c3, + 0x352f88, + 0x2d638a, + 0x21eb86, + 0x26a1c9, + 0x2939c8, + 0x2f07c4, + 0x389109, + 0x2bfc88, + 0x2b2b87, + 0x384ec6, + 0x2a4687, + 0x2add87, + 0x240985, + 0x26760c, + 0x271405, + 0x209303, + 0x2351c3, + 0x22b883, + 0x215c83, + 0x2543, + 0x24b583, + 0x209302, + 0x209303, + 0x215c83, + 0x202543, + 0x24b583, + 0x209303, + 0x215c83, + 0x2543, + 0x2a6983, + 0x24b583, + 0xaf0c8, + 0x209303, + 0x2351c3, + 0x22b883, + 0x2287c3, + 0x215c83, + 0x2543, + 0x24b583, + 0xaf0c8, + 0x209302, + 0x2046c2, + 0x2fbcc2, + 0x202382, + 0x212782, + 0x2c45c2, + 0x90146, + 0x4e09303, + 0x2351c3, + 0x210a43, + 0x22b883, + 0x20f8c3, + 0x2287c3, + 0x2d8146, + 0x215c83, + 0x24b583, + 0x200f83, + 0xaf0c8, + 0x30f6c4, + 0x30ef07, + 0x378203, + 0x330804, + 0x206183, + 0x206383, + 0x22b883, + 0xe41c7, + 0x10de04, + 0x10cdc3, + 0x1680c5, 0x2000c2, - 0x1513c8, - 0x207c02, - 0x214d83, - 0x272203, - 0x222103, - 0x182c3, - 0x2482c3, - 0x215e83, - 0x339ad6, - 0x3610d3, - 0x3ad649, - 0x388788, - 0x34a089, - 0x30e486, - 0x3486d0, - 0x248b93, - 0x2fac48, - 0x3a5507, - 0x23ae87, - 0x27ed0a, - 0x203849, - 0x3a2349, - 0x28a90b, - 0x38f246, - 0x209fca, - 0x222b86, - 0x3298c3, - 0x2da4c5, - 0x38e708, - 0x26248d, - 0x31174c, - 0x2fb787, - 0x31d68d, - 0x23a0c4, - 0x232a8a, - 0x2335ca, - 0x233a8a, - 0x248e87, - 0x242187, - 0x245f04, - 0x247946, - 0x32d3c4, - 0x2ffe08, - 0x2eb309, - 0x2c5806, - 0x2c5808, - 0x2f4ecd, - 0x2cbe09, - 0x30b7c8, - 0x3ab607, - 0x28e88a, - 0x255406, - 0x263c87, - 0x2de684, - 0x22bec7, - 0x214d8a, - 0x24614e, - 0x2c1a05, - 0x3c170b, - 0x3077c9, - 0x2080c9, - 0x20c707, - 0x20c70a, - 0x2bbac7, - 0x2f7149, - 0x2ca208, - 0x311ccb, - 0x2e2845, - 0x22e18a, - 0x358349, - 0x27584a, - 0x2cf0cb, - 0x22bdcb, - 0x28a695, - 0x2e7dc5, - 0x3ab685, - 0x2f63ca, - 0x277fca, - 0x307547, - 0x219243, - 0x3bb7c8, - 0x2d814a, - 0x224b86, - 0x268509, - 0x365988, - 0x2eab84, - 0x3859c9, - 0x2c2f88, - 0x38dec7, - 0x381ac6, - 0x2a69c7, - 0x2b05c7, - 0x245005, - 0x358acc, - 0x271cc5, - 0x20d183, - 0x2355c3, - 0x214d83, - 0x222103, - 0x182c3, - 0x2482c3, - 0x207c02, - 0x20d183, - 0x222103, - 0x2182c3, - 0x2482c3, - 0x20d183, - 0x222103, - 0x182c3, - 0x2597c3, - 0x2482c3, - 0x1513c8, - 0x20d183, - 0x2355c3, - 0x214d83, - 0x272203, - 0x222103, - 0x182c3, - 0x2482c3, - 0x1513c8, - 0x207c02, - 0x206042, - 0x2faec2, - 0x202c82, - 0x202e42, - 0x2c7902, - 0x91806, - 0x4e0d183, - 0x2355c3, - 0x3d0443, - 0x214d83, - 0x219a43, - 0x272203, - 0x2da3c6, - 0x222103, - 0x2482c3, - 0x236883, - 0x1513c8, - 0x373484, - 0x372cc7, - 0x327d83, - 0x2b9c44, - 0x202483, - 0x20c783, - 0x214d83, - 0xdf5c7, - 0x171bc4, - 0x170b83, - 0x4345, - 0x2000c2, - 0x3a83, - 0x6207c02, - 0x648d109, - 0x8d98d, - 0x8dccd, - 0x2faec2, - 0x24604, - 0x4389, + 0x173a83, + 0x6209302, + 0x648a9c9, + 0x8b2cd, + 0x8b60d, + 0x2fbcc2, + 0x1e084, + 0x168109, 0x2003c2, - 0x6a24508, - 0xf5b44, - 0x1513c8, - 0x1442942, + 0x6a1df88, + 0xf6044, + 0xaf0c8, + 0x14260c2, 0x14005c2, - 0x1442942, - 0x150f506, - 0x231403, - 0x2b9343, - 0x720d183, - 0x232a84, - 0x76355c3, - 0x7a14d83, - 0x200d42, - 0x224604, - 0x222103, - 0x302883, - 0x200ec2, - 0x2482c3, - 0x21ce42, - 0x2fbe03, - 0x204fc2, - 0x205583, - 0x29fa03, - 0x203682, - 0x1513c8, - 0x231403, - 0x302883, - 0x200ec2, - 0x2fbe03, - 0x204fc2, - 0x205583, - 0x29fa03, - 0x203682, - 0x2fbe03, - 0x204fc2, - 0x205583, - 0x29fa03, - 0x203682, - 0x20d183, - 0x203a83, - 0x20d183, - 0x2355c3, - 0x214d83, - 0x224604, - 0x219a43, - 0x272203, - 0x2bf0c4, - 0x222103, - 0x2482c3, - 0x206442, - 0x21a803, - 0x1513c8, - 0x20d183, - 0x2355c3, - 0x214d83, - 0x272203, - 0x222103, - 0x2482c3, - 0x203a83, - 0x207c02, - 0x20d183, - 0x2355c3, - 0x214d83, - 0x224604, - 0x222103, - 0x2482c3, - 0x3c0045, - 0x22e042, + 0x14260c2, + 0x1518206, + 0x2312c3, + 0x2b5b43, + 0x7209303, + 0x2320c4, + 0x76351c3, + 0x7a2b883, + 0x206982, + 0x21e084, + 0x215c83, + 0x305543, + 0x203c02, + 0x24b583, + 0x216f02, + 0x2fc743, + 0x2022c2, + 0x201b43, + 0x293a83, + 0x20c902, + 0xaf0c8, + 0x2312c3, + 0x305543, + 0x203c02, + 0x2fc743, + 0x2022c2, + 0x201b43, + 0x293a83, + 0x20c902, + 0x2fc743, + 0x2022c2, + 0x201b43, + 0x293a83, + 0x20c902, + 0x209303, + 0x373a83, + 0x209303, + 0x2351c3, + 0x22b883, + 0x21e084, + 0x20f8c3, + 0x2287c3, + 0x226004, + 0x215c83, + 0x24b583, + 0x204482, + 0x214e83, + 0xaf0c8, + 0x209303, + 0x2351c3, + 0x22b883, + 0x2287c3, + 0x215c83, + 0x24b583, + 0x373a83, + 0x209302, + 0x209303, + 0x2351c3, + 0x22b883, + 0x21e084, + 0x215c83, + 0x24b583, + 0x3c1245, + 0x22c682, 0x2000c2, - 0x1513c8, - 0x1589e48, - 0x1615ca, - 0x214d83, - 0x205dc1, - 0x205e81, - 0x2042c1, - 0x204301, - 0x204341, - 0x215d81, - 0x20c241, - 0x22b281, - 0x207141, + 0xaf0c8, + 0x158e708, + 0x15f94a, + 0x22b883, + 0x22aa41, + 0x201601, + 0x20a081, + 0x201341, + 0x257ec1, + 0x20c7c1, + 0x201641, + 0x207801, + 0x320e41, 0x200001, 0x2000c1, 0x200201, - 0xf4d45, - 0x1513c8, + 0xf6d85, + 0xaf0c8, 0x200101, - 0x200d81, + 0x2029c1, 0x200501, - 0x201481, + 0x200d41, 0x200041, 0x200801, 0x200181, - 0x202f41, + 0x2027c1, 0x200701, 0x2004c1, - 0x200d01, + 0x201741, 0x200581, 0x2003c1, - 0x204c41, - 0x201301, + 0x201401, + 0x2076c1, 0x200401, 0x200741, 0x2007c1, 0x200081, - 0x202241, - 0x2020c1, - 0x207b01, - 0x2018c1, - 0x201241, - 0x20d183, - 0x2355c3, - 0x214d83, - 0x222103, - 0x2482c3, - 0x207c02, - 0x20d183, - 0x2355c3, + 0x204fc1, + 0x207301, + 0x20b6c1, + 0x201d81, + 0x202e01, + 0x209303, + 0x2351c3, + 0x22b883, + 0x215c83, + 0x24b583, + 0x209302, + 0x209303, + 0x2351c3, 0x2003c2, - 0x2482c3, - 0xdf5c7, - 0x7c8c7, - 0x39c86, - 0x3ef8a, - 0x8c488, - 0x5b7c8, - 0x5bc87, - 0x1b5706, - 0xe1505, - 0x127cc5, - 0xea506, - 0x44a06, - 0x28a904, - 0x275587, - 0x1513c8, - 0x2db184, - 0x20d183, - 0x2355c3, - 0x214d83, - 0x222103, - 0x2482c3, - 0x329648, - 0x202544, - 0x235504, - 0x239e44, - 0x2847c7, - 0x2d6e07, - 0x20d183, - 0x2385cb, - 0x20328a, - 0x275c47, - 0x241f88, - 0x36f388, - 0x2355c3, - 0x391cc7, - 0x3d0443, - 0x207488, - 0x20d849, - 0x224604, - 0x219a43, - 0x2e2ac8, - 0x272203, - 0x2d4aca, - 0x2da3c6, - 0x3a9607, - 0x222103, - 0x219146, - 0x3102c8, - 0x2482c3, - 0x2ea646, - 0x2ee10d, - 0x2efb88, - 0x2f68cb, - 0x259146, - 0x324e07, - 0x2256c5, - 0x202cca, - 0x22af45, - 0x25110a, - 0x22e042, - 0x2020c3, - 0x249b44, - 0x200006, - 0x3af783, - 0x3512c3, - 0x308bc3, - 0x202543, - 0x202f03, - 0x202a42, - 0x2d1fc5, - 0x2a9209, - 0x245683, - 0x208403, - 0x203c43, - 0x200201, - 0x2d0607, - 0x2dedc5, - 0x38e643, - 0x207183, - 0x239e44, - 0x275bc3, - 0x222608, - 0x364403, - 0x302ccd, - 0x27d588, - 0x207a86, - 0x32dc03, - 0x38b643, - 0x3a15c3, - 0xb60d183, - 0x234e08, - 0x2385c4, - 0x246603, - 0x200106, - 0x249fc8, - 0x20fa43, - 0x202d03, - 0x232083, - 0x2355c3, - 0x22c443, - 0x22d483, - 0x2a5c03, - 0x32db83, - 0x2279c3, - 0x239b03, - 0x38a405, - 0x255984, - 0x256587, - 0x231ac2, - 0x25b283, - 0x25d446, - 0x25fa83, - 0x260943, - 0x27af83, - 0x207003, - 0x373183, - 0x298207, - 0xba14d83, - 0x248343, - 0x20b383, - 0x207483, - 0x219883, - 0x2f1d03, - 0x366805, - 0x36e9c3, - 0x24fa49, - 0x218543, - 0x306283, - 0xbe500c3, - 0x2ab183, - 0x226788, - 0x2a9146, - 0x3b5d86, - 0x29d7c6, - 0x387107, - 0x211903, - 0x20d203, - 0x272203, - 0x28c586, - 0x22c482, - 0x2a3983, - 0x338105, - 0x222103, - 0x261807, - 0x16182c3, - 0x24ee43, - 0x236403, - 0x22da83, - 0x2482c3, - 0x223386, - 0x276186, - 0x379903, - 0x229b03, - 0x21a803, - 0x25cb43, - 0x30adc3, - 0x2fa3c3, - 0x2fbfc3, - 0x212dc5, - 0x206103, - 0x28e786, - 0x23bec8, - 0x22d7c3, - 0x370209, - 0x3690c8, - 0x226a48, - 0x359b45, - 0x2332ca, - 0x23f50a, - 0x240b0b, - 0x241b48, - 0x3b88c3, - 0x2fc003, - 0x305ec3, - 0x322988, - 0x3af343, - 0x34abc4, - 0x2643c3, - 0x2007c3, - 0x2ed3c3, - 0x263e03, - 0x236883, - 0x22e042, - 0x22ab83, - 0x23ca43, - 0x30cf83, - 0x30df44, - 0x249b44, - 0x2224c3, - 0x1513c8, - 0x2000c2, - 0x208e82, - 0x202a42, - 0x205a42, - 0x200202, - 0x202302, - 0x236c42, - 0x206ac2, - 0x200382, - 0x202a82, - 0x212642, - 0x206282, - 0x26f582, - 0x210482, - 0x2c7902, - 0x20d682, - 0x206f42, - 0x208642, - 0x2ed702, - 0x204a02, - 0x200682, - 0x21b142, - 0x205942, - 0x207bc2, - 0x201002, - 0x216cc2, - 0x201842, - 0xc2, - 0x8e82, - 0x2a42, - 0x5a42, - 0x202, - 0x2302, - 0x36c42, - 0x6ac2, - 0x382, - 0x2a82, - 0x12642, - 0x6282, - 0x6f582, - 0x10482, - 0xc7902, - 0xd682, - 0x6f42, - 0x8642, - 0xed702, - 0x4a02, - 0x682, - 0x1b142, - 0x5942, - 0x7bc2, - 0x1002, - 0x16cc2, - 0x1842, - 0x20d183, - 0x2355c3, - 0x214d83, - 0x222103, - 0x2482c3, - 0x20c2, - 0x20d183, - 0x2355c3, - 0x214d83, - 0x222103, - 0x2482c3, - 0x207c02, - 0x2482c3, - 0xd20d183, - 0x214d83, - 0x272203, - 0xe6003, - 0x230cc2, - 0x1513c8, - 0x20d183, - 0x2355c3, - 0x214d83, - 0x222103, - 0xe6003, - 0x2482c3, - 0x1242, - 0x2001c2, - 0x15c4145, - 0x212ac2, - 0x1513c8, - 0x7c02, - 0x237402, - 0x203882, - 0x23ec82, - 0x2198c2, - 0x240e02, - 0x127cc5, - 0x201f02, - 0x200ec2, - 0x20de02, - 0x201a02, - 0x20d682, - 0x3a1902, - 0x216302, - 0x292382, - 0xdf5c7, - 0xbe44d, - 0xe1589, - 0xa614b, - 0xe5008, - 0x750c9, - 0x107106, - 0x214d83, - 0x1513c8, - 0x171bc4, - 0x170b83, - 0x4345, - 0x1513c8, - 0xdd7c7, - 0x5c786, - 0x4389, - 0x19e0e, - 0x5b0c7, - 0x2000c2, - 0x28a904, - 0x207c02, - 0x20d183, - 0x206042, - 0x2355c3, - 0x200382, - 0x2db184, - 0x219a43, - 0x252782, - 0x222103, - 0x2003c2, - 0x2482c3, - 0x3ab686, - 0x32ec0f, - 0x605ec3, - 0x1513c8, - 0x207c02, - 0x3d0443, - 0x214d83, - 0x272203, - 0x182c3, - 0x19e08, - 0x1402a8b, - 0x141e70a, - 0x1474f47, - 0x7edcb, - 0xdf485, - 0xf4d45, - 0xdf5c7, - 0x207c02, - 0x20d183, - 0x214d83, - 0x222103, - 0x2000c2, - 0x202842, - 0x20a982, - 0x10a0d183, - 0x243d82, - 0x2355c3, - 0x225a82, - 0x227242, - 0x214d83, - 0x25cfc2, - 0x2752c2, - 0x2aab42, - 0x205242, - 0x291542, - 0x200802, - 0x204142, - 0x205642, - 0x27e102, - 0x23ed42, - 0x2b0fc2, - 0x2c7782, - 0x21ef42, - 0x24e6c2, - 0x272203, - 0x203202, - 0x222103, - 0x214ec2, - 0x28c882, - 0x2482c3, - 0x245702, - 0x207bc2, - 0x21a702, - 0x201f42, - 0x207742, - 0x2e68c2, - 0x214fc2, - 0x25dec2, - 0x221b42, - 0x31eeca, - 0x363c8a, - 0x39aa0a, - 0x3d29c2, - 0x22a782, - 0x3667c2, - 0x10f23fc9, - 0x11340b8a, - 0x14306c7, - 0x11600982, - 0x1410643, - 0x5982, - 0x140b8a, - 0x248084, - 0x11e0d183, - 0x2355c3, - 0x2526c4, - 0x214d83, - 0x224604, - 0x219a43, - 0x272203, - 0xe6204, - 0x4d43, - 0x222103, - 0x7e05, - 0x2182c3, - 0x2482c3, - 0x1533d04, - 0x206103, - 0x2020c3, - 0x1513c8, - 0x5e06, - 0x15b5684, - 0x1271c5, - 0x5ae8a, - 0x1290c2, - 0x1a7f86, - 0xbe11, - 0x12723fc9, - 0x127248, - 0x7ca88, - 0xfcf07, - 0x1742, - 0xf4d4b, - 0x14a4cb, - 0x18820a, - 0x9f0a, - 0x39e47, - 0x1513c8, - 0x116008, - 0xe107, - 0x1901ad0b, - 0x1d707, - 0x4ac2, - 0x3d687, - 0x14394a, - 0x5eb4f, - 0xfd60f, - 0x2d42, - 0x7c02, - 0xa5b48, - 0xf0eca, - 0xdd2ca, - 0xb140a, - 0x7d388, - 0x23088, - 0x610c8, - 0xdd788, - 0x192908, - 0x3282, - 0x1c41cf, - 0xa128b, - 0x83b88, - 0x37707, - 0x13168a, - 0x59d4b, - 0x7e409, - 0x131587, - 0x22f88, - 0x3f6cc, - 0x112187, - 0x17850a, - 0x6a488, - 0xfe38e, - 0x15884e, - 0x39c8b, - 0x3a68b, - 0x827cb, - 0xecf89, - 0xfb9cb, - 0x1ce74d, - 0x144c0b, - 0x40f0d, - 0x4128d, - 0x4484a, - 0x4998b, - 0x4a2cb, - 0x4de45, - 0x194282d0, - 0x1334f, - 0x11330f, - 0x15308d, - 0xbbcd0, - 0xb782, - 0x19a29f88, - 0x7c748, - 0x11754e, - 0x19f643c5, - 0x51f0b, - 0x1392d0, - 0x58588, - 0x2318a, - 0x3a849, - 0x68dc7, - 0x69107, - 0x692c7, - 0x69647, - 0x6a7c7, - 0x6adc7, - 0x6b5c7, - 0x6b887, - 0x6bdc7, - 0x6c0c7, - 0x6c787, - 0x6c947, - 0x6cb07, - 0x6ccc7, - 0x6cfc7, - 0x6d347, - 0x6dc07, - 0x6e247, - 0x6e807, - 0x6eac7, - 0x6ec87, - 0x6ef87, - 0x6f447, - 0x6f647, - 0x70087, - 0x70247, - 0x70407, - 0x70c87, - 0x71187, - 0x71887, - 0x72547, - 0x72807, - 0x72d07, - 0x72ec7, - 0x732c7, - 0x73b87, - 0x74107, - 0x74507, - 0x746c7, - 0x74887, - 0x77147, - 0x78447, - 0x78987, - 0x78f47, - 0x79107, - 0x79487, - 0x79a07, - 0xdc82, - 0x611ca, - 0xe6347, - 0x4005, - 0xaded1, - 0x12686, - 0x114d8a, - 0xa59ca, - 0x5c786, - 0xce20b, - 0x642, - 0x32411, - 0xb7d89, - 0x97589, - 0x5642, - 0x7354a, - 0xa8709, - 0xa8e4f, - 0xa944e, - 0xaa248, - 0x586c2, - 0x1b5bc9, - 0x199fce, - 0x1046cc, - 0xe774f, - 0x1afece, - 0x2b6cc, - 0x157cc9, - 0x11db91, - 0x11e148, - 0x1540d2, - 0x3facd, - 0x4780d, - 0x4c08b, - 0x19e095, - 0x5f3c9, - 0x6dfca, - 0x71549, - 0x73d10, - 0x7cc4b, - 0x8adcf, - 0x16bd4b, - 0x1bf18c, - 0x93150, - 0xa228a, - 0xa384d, - 0xa4dce, - 0xa5e0a, - 0xac40c, - 0xb0294, - 0xb7a11, - 0xbfb0b, - 0x1bb34f, - 0xdfe0d, - 0x150a4e, - 0x18dd8c, - 0xb620c, - 0xb770b, - 0xb838e, - 0xbf450, - 0xbff8b, - 0xc364d, - 0xc3f8f, - 0xc4b0c, - 0xc568e, - 0x117091, - 0x167d8c, - 0xd1587, - 0xd3ecd, - 0xd760c, - 0xd9050, - 0xe4acd, - 0xe8047, - 0xffa90, - 0x105448, - 0x132ccb, - 0x17690f, - 0x168708, - 0x114f8d, - 0x196fd0, - 0xfd509, - 0x1a2b2186, - 0xb3ac3, - 0xb87c5, - 0xb2c2, - 0x131b09, - 0x775ca, - 0x1a63d884, - 0x10df86, - 0x1fc4a, - 0x1a991109, - 0x94043, - 0x14d24a, - 0xdb411, - 0xdb849, - 0xdd247, - 0xddfc7, - 0xe6408, - 0xbf8b, - 0x12b489, - 0xe6b90, - 0xe704c, - 0xe7c08, - 0xe8345, - 0xca388, - 0x1b754a, - 0x1573c7, - 0x12cac7, - 0x1a42, - 0x139fca, - 0x113649, - 0x72bc5, - 0x6168a, - 0x1cc04f, - 0x13d80b, - 0x1668cc, - 0x159c12, - 0x9c645, - 0xe9288, - 0x1693ca, - 0x1aef4445, - 0x1664cc, - 0x136843, - 0x1a1902, - 0xfc30a, - 0x14fc68c, - 0x112508, - 0x410c8, - 0x13da87, - 0x4e42, - 0x4fc2, - 0x530d0, - 0x77a47, - 0x311cf, - 0xea506, - 0xfb8e, - 0x155f0b, - 0x4f208, - 0x7e7c9, - 0x171752, - 0x10b68d, - 0x10bbc8, - 0xa6009, - 0xd664d, - 0x103a09, - 0x19864b, - 0x111c8, - 0x81a48, - 0x88a48, - 0x88e09, - 0x8900a, - 0x8d30c, - 0xf664a, - 0x10ae07, - 0x42a8d, - 0xfee8b, - 0x129acc, - 0x2f7c8, - 0x4cc49, - 0x1a8190, - 0xab42, - 0x80d0d, - 0x3582, - 0x1b882, - 0x10ad4a, - 0x114c8a, - 0x11638b, - 0x4a48c, - 0x11590a, - 0x115d8e, - 0x1520d, - 0x1b1d2885, - 0x12de88, - 0x1242, - 0x12b7420e, - 0x13205a4e, - 0x13b92d8a, - 0x14326e8e, - 0x14b7084e, - 0x1533df0c, - 0x14306c7, - 0x14306c9, - 0x1410643, - 0x15b4cc0c, - 0x1620b789, - 0x16a1cbc9, - 0x1725ac89, - 0x5982, - 0x174151, - 0x5991, - 0x192ccd, - 0x126dd1, - 0x170791, - 0x13de4f, - 0x14cb4f, - 0xb6cc, - 0x1cb0c, - 0x5abcc, - 0x76e0d, - 0xc8455, - 0xf564c, - 0x16024c, - 0x1788d0, - 0x1826cc, - 0x1c34cc, - 0x1c4c99, - 0x1c9359, - 0x1d05d9, - 0x1d2394, - 0xe294, - 0xed14, - 0xf294, - 0xfed4, - 0x17a0e549, - 0x1800efc9, - 0x18b60309, - 0x12f1e349, - 0x5982, - 0x1371e349, - 0x5982, - 0xe28a, - 0x5982, - 0x13f1e349, - 0x5982, - 0xe28a, - 0x5982, - 0x1471e349, - 0x5982, - 0x14f1e349, - 0x5982, - 0x1571e349, - 0x5982, - 0xe28a, - 0x5982, - 0x15f1e349, - 0x5982, - 0xe28a, - 0x5982, - 0x1671e349, - 0x5982, - 0x16f1e349, - 0x5982, - 0xe28a, - 0x5982, - 0x1771e349, - 0x5982, - 0xe28a, - 0x5982, - 0x17f1e349, - 0x5982, - 0x1871e349, - 0x5982, - 0x18f1e349, - 0x5982, - 0xe28a, - 0x5982, - 0xbe05, - 0x188204, - 0x17420e, - 0x5a4e, - 0x2000e, - 0x192d8a, - 0x126e8e, - 0x17084e, - 0x13df0c, - 0x14cc0c, - 0xb789, - 0x1cbc9, - 0x5ac89, - 0xe549, - 0xefc9, - 0x160309, - 0xc864d, - 0xf549, - 0x10189, - 0x960c4, - 0x14b484, - 0x12e044, - 0x130ec4, - 0x7f084, - 0x12d704, - 0x470c4, - 0x5b504, - 0xfcf04, - 0x159fb43, - 0x11783, - 0xb782, - 0x15203, - 0x12182, - 0x12188, - 0x12b507, - 0x3282, - 0x2000c2, - 0x207c02, - 0x206042, - 0x219902, - 0x200382, - 0x2003c2, - 0x204fc2, - 0x20d183, - 0x2355c3, - 0x214d83, - 0x219883, - 0x222103, - 0x2482c3, - 0x1513c8, - 0x20d183, - 0x2355c3, - 0x222103, - 0x2482c3, - 0x7903, - 0x214d83, - 0x24604, - 0x2000c2, - 0x203a83, - 0x1d60d183, - 0x23d3c7, - 0x214d83, - 0x224f03, - 0x2bf0c4, - 0x222103, - 0x2482c3, - 0x2ed18a, - 0x3ab685, - 0x21a803, - 0x2061c2, - 0x1513c8, - 0x1513c8, - 0x7c02, - 0x133782, - 0x1df90e8b, - 0x1e21eac4, - 0x3d7c5, - 0xc105, - 0x101a86, - 0x1e60c105, - 0x57b83, - 0xd4883, - 0x171bc4, - 0x170b83, - 0x4345, - 0xf4d45, - 0x1513c8, - 0x1d707, - 0xd183, - 0x1ee3edc7, - 0x1e06, - 0x1f192bc5, - 0x1ec7, - 0x2398a, - 0x21308, - 0x23887, - 0x7fa48, - 0xda6c7, - 0xfb14f, - 0x180ec7, - 0x5b306, - 0x1392d0, - 0x13730f, - 0x159349, - 0x10e004, - 0x1f401f8e, - 0x15988c, - 0x59f4a, - 0x7e587, - 0xe57ca, - 0x1267c9, - 0x1a260c, - 0xc26ca, - 0x5ce4a, - 0x4389, - 0x10df86, - 0x7e64a, - 0x10c1ca, - 0x9cf4a, - 0x14dc89, - 0xdad48, - 0xdafc6, - 0xe19cd, - 0xb8c45, - 0x1fb75bcc, - 0x5b0c7, - 0x102389, - 0x134047, - 0xb1954, - 0x105a4b, - 0x839ca, - 0x1715ca, - 0xa648d, - 0x1516589, - 0x10b44c, - 0x10b9cb, - 0x39c83, - 0x39c83, - 0x39c86, - 0x39c83, - 0x101a88, - 0xbb149, - 0x3a83, - 0x1513c8, - 0x7c02, - 0x526c4, - 0x5da43, - 0x1c0045, - 0x20d183, - 0x2355c3, - 0x214d83, - 0x222103, - 0x2482c3, - 0x208403, - 0x20d183, - 0x2355c3, - 0x3d0443, - 0x214d83, - 0x272203, - 0x222103, - 0x2482c3, - 0x297743, - 0x2020c3, - 0x208403, - 0x28a904, - 0x20d183, - 0x2355c3, - 0x214d83, - 0x222103, - 0x2482c3, - 0x233903, - 0x20d183, - 0x2355c3, - 0x219903, - 0x3d0443, - 0x214d83, - 0x224604, - 0x308103, - 0x20d203, - 0x272203, - 0x222103, - 0x2482c3, - 0x21a803, - 0x20d603, - 0x21a0d183, - 0x2355c3, - 0x24f043, - 0x214d83, - 0x226cc3, - 0x20d203, - 0x2482c3, - 0x208643, - 0x35ee84, - 0x1513c8, - 0x2220d183, - 0x2355c3, - 0x2aa303, - 0x214d83, - 0x272203, - 0x2bf0c4, - 0x222103, - 0x2482c3, - 0x2192c3, - 0x1513c8, - 0x22a0d183, - 0x2355c3, - 0x3d0443, - 0x2182c3, - 0x2482c3, - 0x1513c8, - 0x14306c7, - 0x203a83, - 0x20d183, - 0x2355c3, - 0x214d83, - 0x224604, - 0x2bf0c4, - 0x222103, - 0x2482c3, - 0xf4d45, - 0xdf5c7, - 0xb1b8b, - 0xdbc44, - 0xb8c45, - 0x1589e48, - 0xaa94d, - 0x23e52185, - 0x96a84, - 0x15dc3, - 0xfd405, - 0x38f405, - 0x1513c8, - 0x1f242, - 0x49b83, - 0xf9006, - 0x32a1c8, - 0x3a4907, - 0x28a904, - 0x33d546, - 0x34ca06, - 0x1513c8, - 0x31d643, - 0x312d09, - 0x321f15, - 0x121f1f, - 0x20d183, - 0x2c8012, - 0x16c806, - 0x17de05, - 0x2318a, - 0x3a849, - 0x2c7dcf, - 0x2db184, - 0x23e845, - 0x306050, - 0x388987, - 0x2182c3, - 0x354608, - 0x161506, - 0x2a150a, - 0x2054c4, - 0x2f3e83, - 0x3ab686, - 0x2061c2, - 0x2eea0b, - 0x182c3, - 0x194044, - 0x20d183, - 0x2355c3, - 0x214d83, - 0x272203, - 0x222103, - 0x182c3, - 0x2482c3, - 0x2fa803, - 0x207c02, - 0xeb4c3, - 0x222103, - 0x2482c3, - 0x20d183, - 0x2355c3, - 0x214d83, - 0x272203, - 0x2482c3, - 0x20d183, - 0x2355c3, - 0x214d83, - 0x224f03, - 0x203203, - 0x2482c3, - 0x207c02, - 0x20d183, - 0x2355c3, - 0x222103, - 0x182c3, - 0x2482c3, - 0x2000c2, - 0x20d183, - 0x2355c3, - 0x214d83, - 0x222103, - 0x2482c3, - 0xc105, - 0x28a904, - 0x20d183, - 0x2355c3, - 0x215584, - 0x222103, - 0x2482c3, - 0x1513c8, - 0x20d183, - 0x2355c3, - 0x214d83, - 0x222103, - 0xe6003, - 0x2482c3, - 0x20d183, - 0x2355c3, - 0x3d0443, - 0x207483, - 0x272203, - 0x222103, - 0x182c3, - 0x2482c3, - 0x20d183, - 0x2355c3, - 0x214d83, - 0x2037c4, - 0x224604, - 0x222103, - 0x2482c3, - 0x2020c3, - 0x207c02, - 0x20d183, - 0x2355c3, - 0x214d83, - 0x222103, - 0xe6003, - 0x2482c3, - 0x1513c8, - 0x20d183, - 0x2355c3, - 0x214d83, - 0x2bd943, - 0x6ce03, - 0x24f03, - 0x222103, - 0x2482c3, - 0x31eeca, - 0x33f2c9, - 0x35c10b, - 0x35c84a, - 0x363c8a, - 0x37790b, - 0x38ad8a, - 0x39494a, - 0x39aa0a, - 0x39ac8b, - 0x3b7289, - 0x3be4ca, - 0x3be90b, - 0x3ca14b, - 0x3d11ca, - 0x20d183, - 0x2355c3, - 0x3d0443, - 0x272203, - 0x222103, - 0x182c3, - 0x2482c3, - 0x1c444b, - 0x62108, - 0xd6784, - 0xc0c6, - 0x44b09, - 0x1513c8, - 0x20d183, - 0x268dc4, - 0x203ac2, - 0x2bf0c4, - 0x204885, - 0x208403, - 0x28a904, - 0x20d183, - 0x2385c4, - 0x2355c3, - 0x2526c4, - 0x2db184, - 0x224604, - 0x20d203, - 0x222103, - 0x2482c3, - 0x280ec5, - 0x233903, - 0x21a803, - 0x295a83, - 0x271dc4, - 0x205d84, - 0x2bb405, - 0x1513c8, - 0x325e44, - 0x32cdc6, - 0x2044c4, - 0x207c02, - 0x24d707, - 0x254fc7, - 0x250244, - 0x25e585, - 0x2e4cc5, - 0x230285, - 0x224604, - 0x3871c8, - 0x237c46, - 0x318048, - 0x27e145, - 0x2e2845, - 0x3a0304, - 0x2482c3, - 0x2f5b44, - 0x376546, - 0x3ab783, - 0x271dc4, - 0x251205, - 0x331a84, - 0x2428c4, - 0x2061c2, - 0x22f546, - 0x3ad346, - 0x309285, - 0x2000c2, - 0x203a83, - 0x2ae07c02, - 0x22a104, - 0x200382, - 0x272203, - 0x20bf82, - 0x222103, - 0x2003c2, - 0x215e83, - 0x2020c3, - 0xaab84, - 0x1513c8, - 0x1513c8, - 0x214d83, - 0xe6003, - 0x2000c2, - 0x2ba07c02, - 0x214d83, - 0x26ca83, - 0x308103, - 0x21eac4, - 0x222103, - 0x2482c3, - 0x1513c8, - 0x2000c2, - 0x2c207c02, - 0x20d183, - 0x222103, - 0x182c3, - 0x2482c3, - 0x682, - 0x206342, - 0x22e042, - 0x224f03, - 0x2eca43, - 0x2000c2, - 0xf4d45, - 0x1513c8, - 0xdf5c7, - 0x207c02, - 0x2355c3, - 0x2526c4, - 0x202b03, - 0x214d83, - 0x207483, - 0x272203, - 0x222103, - 0x214b83, - 0x2482c3, - 0x219243, - 0x959d3, - 0xc7954, - 0xf4d45, - 0xdf5c7, - 0x102c06, - 0x777cb, - 0x39c86, - 0x5b607, - 0x5e586, - 0x649, - 0x17faca, - 0x8c34d, - 0xbe14c, - 0x10cb4a, - 0x148108, - 0x127cc5, - 0x239c8, - 0xea506, - 0x71a06, - 0x44a06, - 0x20b782, - 0x7084, - 0x8504e, - 0x5994c, - 0xf4d45, - 0x1883c7, - 0x249d1, - 0xfcd8a, - 0x20d183, - 0x7f9c5, - 0x4a808, - 0x2a344, - 0x2d427cc6, - 0xadec6, - 0xd2cc6, - 0x9180a, - 0x18e683, - 0x2da48b44, - 0x605, - 0xfb943, - 0x2de36a47, - 0x7e05, - 0xce2cc, - 0xf8108, - 0x9f84b, - 0x2e24f70c, - 0x1415d43, - 0xb9948, - 0xa1109, - 0x116688, - 0x141fb86, - 0x2e786149, - 0x197387, - 0xdf48a, - 0x10e88, - 0x101a88, - 0xfcf04, - 0x15fdc5, - 0x9f987, - 0x2ea9f983, - 0x2ef9b686, - 0x2f2f63c4, - 0x2f6fc4c7, - 0x101a84, - 0x101a84, - 0x101a84, - 0x101a84, - 0x20d183, - 0x2355c3, - 0x214d83, - 0x272203, - 0x222103, - 0x2482c3, - 0x2000c2, - 0x207c02, - 0x214d83, - 0x200d42, - 0x222103, - 0x2482c3, - 0x215e83, - 0x37e70f, - 0x37eace, - 0x1513c8, - 0x20d183, - 0x49e07, - 0x2355c3, - 0x214d83, - 0x219a43, - 0x222103, - 0x2482c3, - 0x192fc4, - 0x170cc4, - 0x173504, - 0x21f703, - 0x372787, - 0x200a82, - 0x2c9a49, - 0x208e82, - 0x2533cb, - 0x2a288a, - 0x2ad5c9, - 0x200542, - 0x370346, - 0x234715, - 0x253515, - 0x23ff13, - 0x253a93, - 0x221d42, - 0x221d45, - 0x32498c, - 0x2786cb, - 0x3c0f85, - 0x205a42, - 0x323182, - 0x38c606, - 0x201742, - 0x264606, - 0x22340d, - 0x207b8c, - 0x228344, - 0x200882, - 0x221bc2, - 0x354488, - 0x200202, - 0x226dc6, - 0x33284f, - 0x226dd0, - 0x2ebf44, - 0x2348d5, - 0x240093, - 0x210583, - 0x32364a, - 0x219007, - 0x34c349, - 0x2e5547, - 0x320542, - 0x200282, - 0x3b2246, - 0x203102, - 0x1513c8, - 0x202742, - 0x212802, - 0x228f87, - 0x330ac7, - 0x330ad1, - 0x21ce05, - 0x33834e, - 0x21ce0f, - 0x204ac2, - 0x219207, - 0x21f748, - 0x202142, - 0x2c2742, - 0x325006, - 0x33b30f, - 0x325010, - 0x22dcc2, - 0x200f82, - 0x23bd48, - 0x20d303, - 0x25dc08, - 0x20d30d, - 0x235c43, - 0x313a88, - 0x235c4f, - 0x23600e, - 0x37128a, - 0x2f9951, - 0x2f9dd0, - 0x2dc40d, - 0x2dc74c, - 0x200f87, - 0x3237c7, - 0x33d609, - 0x228442, - 0x202302, - 0x33e30c, - 0x33e80b, - 0x20ae02, - 0x2b78c6, - 0x203442, - 0x200482, - 0x202d42, - 0x207c02, - 0x22fcc4, - 0x23d9c7, - 0x203382, - 0x245147, - 0x246ec7, - 0x231502, - 0x206182, - 0x249cc5, - 0x21d2c2, - 0x38180e, - 0x2a674d, - 0x2355c3, - 0x289b4e, - 0x3badcd, - 0x324d83, - 0x202b42, - 0x287f84, - 0x236c82, - 0x208042, - 0x39dac5, - 0x3a0c47, - 0x24e742, - 0x219902, - 0x2522c7, - 0x255dc8, - 0x231ac2, - 0x29c6c6, - 0x34ee0c, - 0x34f30b, - 0x201482, - 0x2651cf, - 0x265590, - 0x26598f, - 0x265d55, - 0x266294, - 0x26678e, - 0x266b0e, - 0x266e8f, - 0x26724e, - 0x2675d4, - 0x267ad3, - 0x267f8d, - 0x279bc9, - 0x28e003, - 0x202b02, - 0x21e145, - 0x209546, - 0x200382, - 0x343147, - 0x214d83, - 0x200642, - 0x233cc8, - 0x2f9b91, - 0x2f9fd0, - 0x204182, - 0x284b07, - 0x200b02, - 0x209007, - 0x20b2c2, - 0x216c09, - 0x38c5c7, - 0x2a1888, - 0x227b06, - 0x2ec943, - 0x3253c5, - 0x235842, - 0x2004c2, - 0x3b2645, - 0x2596c5, - 0x2023c2, - 0x2023c3, - 0x2023c7, - 0x224807, - 0x202e02, - 0x331f84, - 0x225003, - 0x3181c9, - 0x2fafc8, - 0x204942, - 0x20e7c2, - 0x386887, - 0x3a0045, - 0x2bc548, - 0x334487, - 0x2052c3, - 0x290646, - 0x2dc28d, - 0x2dc60c, - 0x300286, - 0x203882, - 0x29f702, - 0x202002, - 0x235acf, - 0x235ece, - 0x2e4d47, - 0x200d02, - 0x35b405, - 0x35b406, - 0x22a542, - 0x203202, - 0x28f046, - 0x208f43, - 0x208f46, - 0x2cc445, - 0x2cc44d, - 0x2cca15, - 0x2cdccc, - 0x2ce5cd, - 0x2ce992, - 0x206282, - 0x26f582, - 0x200a42, - 0x226906, - 0x304586, - 0x201a42, - 0x2095c6, - 0x20de02, - 0x20de05, - 0x202e42, - 0x2a6849, - 0x22c28c, - 0x22c5cb, - 0x2003c2, - 0x256988, - 0x205882, - 0x210482, - 0x273006, - 0x31e2c5, - 0x391307, - 0x2ed445, - 0x290805, - 0x20c0c2, - 0x208e02, - 0x20d682, - 0x2e7a47, - 0x31ab0d, - 0x31ae8c, - 0x23b8c7, - 0x229b42, - 0x206f42, - 0x237f48, - 0x331c88, - 0x2e4088, - 0x314f44, - 0x2b8607, - 0x23d903, - 0x25b402, - 0x2054c2, - 0x2f2309, - 0x2fe787, - 0x208642, - 0x273405, - 0x204c42, - 0x230782, - 0x2c10c3, - 0x2c10c6, - 0x2fa3c2, - 0x2fbd82, - 0x200402, - 0x3bfe46, - 0x2b4287, - 0x2022c2, - 0x200902, - 0x25da4f, - 0x28998d, - 0x39a3ce, - 0x3bac4c, - 0x206782, - 0x2024c2, - 0x227945, - 0x31c506, - 0x217dc2, - 0x204a02, - 0x200682, - 0x289d04, - 0x2e2a44, - 0x32c0c6, - 0x204fc2, - 0x23b207, - 0x244343, - 0x244348, - 0x247408, - 0x39df07, - 0x255586, - 0x202c42, - 0x228003, - 0x228007, - 0x294b46, - 0x2f15c5, - 0x3152c8, - 0x200b42, - 0x370107, - 0x216cc2, - 0x2d48c2, - 0x20d4c2, - 0x21cf89, - 0x213b42, - 0x2010c2, - 0x23bb43, - 0x201707, - 0x202cc2, - 0x22c40c, - 0x22c70b, - 0x300306, - 0x2fb885, - 0x205102, - 0x201842, - 0x2be9c6, - 0x202bc3, - 0x307d07, - 0x276142, - 0x2008c2, - 0x234595, - 0x2536d5, - 0x23fdd3, - 0x253c13, - 0x39f447, - 0x3b8411, - 0x3b8b50, - 0x26a952, - 0x278b11, - 0x27ab88, - 0x27ab90, - 0x2910cf, - 0x2a2653, - 0x2ad392, - 0x2ae2d0, - 0x35158f, - 0x3b9152, - 0x3ba311, - 0x332f53, - 0x3b6b12, - 0x2b22cf, - 0x2c1c4e, - 0x2c91d2, - 0x2cc011, - 0x2d538f, - 0x2d838e, - 0x3bbf11, - 0x3bc6d0, - 0x2d9b52, - 0x2ddb91, - 0x3bccd0, - 0x3bd2cf, - 0x2e0511, - 0x2e21d0, - 0x2e8806, - 0x2f1e87, - 0x211c47, - 0x200c42, - 0x285985, - 0x37e547, - 0x22e042, - 0x202f82, - 0x22ab85, - 0x220603, - 0x3b5ac6, - 0x31accd, - 0x31b00c, - 0x209642, - 0x32480b, - 0x27858a, - 0x221c0a, - 0x2ba809, - 0x2f04cb, - 0x3345cd, - 0x3064cc, - 0x274d8a, - 0x27960c, - 0x297a0b, - 0x3c0dcc, - 0x3c12ce, - 0x3c1acb, - 0x3c1f8c, - 0x2b0f03, - 0x303906, - 0x307a42, - 0x2fcc82, - 0x2161c3, - 0x202242, - 0x229fc3, - 0x31a2c6, - 0x265f07, - 0x3058c6, - 0x2f1088, - 0x202248, - 0x310a06, - 0x20a2c2, - 0x308c4d, - 0x308f8c, - 0x2db247, - 0x30c987, - 0x237682, - 0x21aa02, - 0x213a42, - 0x256182, - 0x332757, - 0x338256, - 0x33b217, - 0x33e214, - 0x33e713, - 0x34ed14, - 0x34f213, - 0x3b5310, - 0x3b8319, - 0x3b8a58, - 0x3b905a, - 0x3ba219, - 0x3bbe19, - 0x3bc5d8, - 0x3bcbd8, - 0x3bd1d7, - 0x3c0cd4, - 0x3c11d6, - 0x3c19d3, - 0x3c1e94, - 0x207c02, - 0x222103, - 0x2482c3, - 0x20d183, - 0x2355c3, - 0x214d83, - 0x272203, - 0x2bf0c4, - 0x222103, - 0x2482c3, - 0x215e83, - 0x2000c2, - 0x2036c2, - 0x31692ec5, - 0x31a8b705, - 0x31fbfb06, - 0x1513c8, - 0x322b2845, - 0x207c02, - 0x206042, - 0x32604105, - 0x32a83245, - 0x32e84cc7, - 0x332871c9, - 0x33756984, - 0x200382, - 0x200642, - 0x33a60145, - 0x33e99189, - 0x34331408, - 0x346b0105, - 0x34b38807, - 0x34e6fc08, - 0x352e9145, - 0x35639546, - 0x35b86389, - 0x35ecdac8, - 0x362c3dc8, - 0x366997ca, - 0x36a7a404, - 0x36ea7145, - 0x372c0988, - 0x37731885, - 0x2180c2, - 0x37a4b143, - 0x37ea5346, - 0x38353f08, - 0x387113c6, - 0x38b64f48, - 0x38f4c8c6, - 0x3934af04, - 0x201402, - 0x39753787, - 0x39aab384, - 0x39e7de47, - 0x3a23c087, - 0x2003c2, - 0x3a69dc05, - 0x3aa4d384, - 0x3aee4787, - 0x3b243bc7, - 0x3b687dc6, - 0x3ba840c5, - 0x3be99287, - 0x3c2e7308, - 0x3c61e4c7, - 0x3cab5b09, - 0x3cece005, - 0x3d2dfc87, - 0x3d692506, - 0x3daca088, - 0x22b08d, - 0x2804c9, - 0x28c8cb, - 0x2a890b, - 0x2b360b, - 0x30d00b, - 0x31c70b, - 0x31c9cb, - 0x31d009, - 0x31f14b, - 0x31f40b, - 0x31fc0b, - 0x3208ca, - 0x320e0a, - 0x32140c, - 0x32848b, - 0x328eca, - 0x33c00a, - 0x34330e, - 0x34544e, - 0x3457ca, - 0x3472ca, - 0x348acb, - 0x348d8b, - 0x349a0b, - 0x36984b, - 0x369e4a, - 0x36ab0b, - 0x36adca, - 0x36b04a, - 0x36b2ca, - 0x38b88b, - 0x39580b, - 0x397c4e, - 0x397fcb, - 0x3a0e0b, - 0x3a1c8b, - 0x3a5bca, - 0x3a5e49, - 0x3a608a, - 0x3a7a8a, - 0x3b7d0b, - 0x3bebcb, - 0x3bf48a, - 0x3c070b, - 0x3c658b, - 0x3d0c0b, - 0x3de86048, - 0x3e28bac9, - 0x3e6a0f89, - 0x3eae3bc8, - 0x351c85, - 0x202a43, - 0x31bd84, - 0x373845, - 0x3566c6, - 0x2392c5, - 0x28b184, - 0x343048, - 0x314ac5, - 0x294244, - 0x214687, - 0x2a050a, - 0x24d9ca, - 0x2e4e47, - 0x21b807, - 0x3052c7, - 0x280087, - 0x300805, - 0x20a346, - 0x2bf2c7, - 0x249bc4, - 0x2e9706, - 0x2e9606, - 0x209985, - 0x39b2c4, - 0x29a806, - 0x29ea87, - 0x3ce3c6, - 0x353387, - 0x293b43, - 0x3a4ac6, - 0x22f345, - 0x284dc7, - 0x26d50a, - 0x233dc4, - 0x3590c8, - 0x318509, - 0x2e1787, - 0x328d46, - 0x3873c8, - 0x311b09, - 0x34c504, - 0x369244, - 0x2a2dc5, - 0x2befc8, - 0x2ca607, - 0x2df9c9, - 0x2edcc8, - 0x2e8906, - 0x333c46, - 0x29b1c8, - 0x36d5c6, - 0x28b705, - 0x287e86, - 0x27ea08, - 0x2359c6, - 0x25c14b, - 0x2d4746, - 0x29c84d, - 0x20c645, - 0x2ab246, - 0x23ea05, - 0x258e09, - 0x350507, - 0x35f588, - 0x2aee06, - 0x29ba49, - 0x349f46, - 0x26d485, - 0x2a2cc6, - 0x2c7346, - 0x2cfa49, - 0x202786, - 0x2a0207, - 0x245dc5, - 0x211583, - 0x25c2c5, - 0x29cb07, - 0x324c46, - 0x20c549, - 0x3bfb06, - 0x26de06, - 0x217a09, - 0x287889, - 0x2a36c7, - 0x371e48, - 0x2add09, - 0x285608, - 0x394b86, - 0x2dab05, - 0x23e00a, - 0x26de86, - 0x23d246, - 0x2d38c5, - 0x2cd488, - 0x39fbc7, - 0x23220a, - 0x252e06, - 0x280905, - 0x203606, - 0x3443c7, - 0x328c07, - 0x368205, - 0x26d645, - 0x282206, - 0x2a9786, - 0x2b1046, - 0x2c0e44, - 0x286749, - 0x28cb86, - 0x2fa58a, - 0x31c148, - 0x347a08, - 0x24d9ca, - 0x2260c5, - 0x29e9c5, - 0x2b4448, - 0x2b90c8, - 0x22fa87, - 0x316f46, - 0x336388, - 0x2ac947, - 0x284f08, - 0x2b8246, - 0x288608, - 0x2987c6, - 0x27e2c7, - 0x368fc6, - 0x29a806, - 0x3cf3ca, - 0x22fd46, - 0x2dab09, - 0x310b06, - 0x2eb84a, - 0x34af09, - 0x25f046, - 0x2b93c4, - 0x21e20d, - 0x28bd47, - 0x2bc2c6, - 0x2c3c85, - 0x349fc5, - 0x38d206, - 0x2e45c9, - 0x2cd007, - 0x27f486, - 0x2de506, - 0x28b209, - 0x28b644, - 0x24e584, - 0x326408, - 0x31a686, - 0x272a08, - 0x315648, - 0x2ab8c7, - 0x3b4189, - 0x2b1247, - 0x2b270a, - 0x2f2dcf, - 0x354a0a, - 0x227745, - 0x27ec45, - 0x357a05, - 0x2ebe87, - 0x237043, - 0x372048, - 0x22d1c6, - 0x22d2c9, - 0x2d2646, - 0x2d0d87, - 0x29b809, - 0x35f488, - 0x2d3987, - 0x317a83, - 0x351d05, - 0x343f05, - 0x2c0c8b, - 0x331944, - 0x241d84, - 0x27b606, - 0x317c47, - 0x39d04a, - 0x24b1c7, - 0x20d6c7, - 0x283245, - 0x3cb1c5, - 0x269d49, - 0x29a806, - 0x24b04d, - 0x2029c5, - 0x2b5743, - 0x201a03, - 0x3ad585, - 0x35af45, - 0x3873c8, - 0x27fd47, - 0x24e306, - 0x2a0c06, - 0x22b9c5, - 0x235887, - 0x3c8c07, - 0x237b07, - 0x2a71ca, - 0x3a4b88, - 0x2c0e44, - 0x281287, - 0x281c47, - 0x349006, - 0x297e47, - 0x2deb48, - 0x37fc88, - 0x250446, - 0x21ba48, - 0x202804, - 0x2bf2c6, - 0x2529c6, - 0x38fa46, - 0x23eb46, - 0x29dfc4, - 0x280146, - 0x2c2946, - 0x29abc6, - 0x231b06, - 0x218346, - 0x2de986, - 0x24e208, - 0x3b97c8, - 0x2d6ac8, - 0x2394c8, - 0x2b43c6, - 0x21b605, - 0x392586, - 0x2b0185, - 0x3a4647, - 0x2a7c45, - 0x215783, - 0x207205, - 0x3cab84, - 0x218485, - 0x205883, - 0x345a87, - 0x367bc8, - 0x353446, - 0x2b8d4d, - 0x27ec06, - 0x29a185, - 0x21cf83, - 0x2c0349, - 0x28b7c6, - 0x2953c6, - 0x2734c4, - 0x354987, - 0x312006, - 0x2cd2c5, - 0x200cc3, - 0x211084, - 0x281e06, - 0x252ac4, - 0x2d3508, - 0x20ab09, - 0x3199c9, - 0x2a2bca, - 0x2a418d, - 0x234147, - 0x23d0c6, - 0x223ac4, - 0x2871c9, - 0x28a308, - 0x28b946, - 0x23f286, - 0x297e47, - 0x2d2a46, - 0x2830c6, - 0x276406, - 0x23c10a, - 0x26fc08, - 0x31da85, - 0x261989, - 0x2cad8a, - 0x375f48, - 0x29e448, - 0x295348, - 0x2a084c, - 0x31cc45, - 0x2a0e88, - 0x3b9ac6, - 0x39ef86, - 0x3cca07, - 0x24b0c5, - 0x288005, - 0x319889, - 0x21ed07, - 0x22d285, - 0x2a9d47, - 0x201a03, - 0x2cb245, - 0x2296c8, - 0x32c747, - 0x29e309, - 0x2eab85, - 0x341244, - 0x2a3e48, - 0x2e2e47, - 0x2d3b48, - 0x39c248, - 0x2ac305, - 0x22d0c6, - 0x21c106, - 0x350849, - 0x2dd087, - 0x2b07c6, - 0x229c47, - 0x203b43, - 0x356984, - 0x2d8d05, - 0x259cc4, - 0x24f9c4, - 0x286e87, - 0x26c207, - 0x26a1c4, - 0x29e150, - 0x392107, - 0x3cb1c5, - 0x25510c, - 0x216344, - 0x2b6e88, - 0x27e1c9, - 0x383046, - 0x3167c8, - 0x21da04, - 0x27b908, - 0x232806, - 0x3cf248, - 0x29cdc6, - 0x289e4b, - 0x32a945, - 0x2d8b88, - 0x20af44, - 0x20af4a, - 0x29e309, - 0x368ec6, - 0x2f9508, - 0x2a5d05, - 0x31ea04, - 0x2b6d86, - 0x2379c8, - 0x286048, - 0x336c06, - 0x32c044, - 0x23df86, - 0x2b12c7, - 0x27dd47, - 0x297e4f, - 0x2086c7, - 0x25f107, - 0x35b2c5, - 0x367385, - 0x2a3389, - 0x2ea246, - 0x284205, - 0x287b87, - 0x2c8dc8, - 0x2d4185, - 0x368fc6, - 0x31bf88, - 0x3113ca, - 0x213c88, - 0x28edc7, - 0x2f3206, - 0x261946, - 0x2003c3, - 0x217f03, - 0x2caf49, - 0x2adb89, - 0x2b5a06, - 0x2eab85, - 0x31e708, - 0x2f9508, - 0x36d748, - 0x27648b, - 0x2b8f87, - 0x310109, - 0x2980c8, - 0x35dac4, - 0x35f8c8, - 0x290b89, - 0x2b0ac5, - 0x2ebd87, - 0x356a05, - 0x285f48, - 0x292d4b, - 0x298fd0, - 0x2aac85, - 0x21738c, - 0x24e4c5, - 0x2832c3, - 0x2b4ac6, - 0x2c1fc4, - 0x24d486, - 0x29ea87, - 0x203b84, - 0x248588, - 0x371f0d, - 0x316d85, - 0x234184, - 0x227484, - 0x296ac9, - 0x2d9408, - 0x32ae07, - 0x232888, - 0x286808, - 0x27f785, - 0x27c347, - 0x27f707, - 0x312ac7, - 0x26d649, - 0x391e49, - 0x270986, - 0x2dc946, - 0x287c46, - 0x346c45, - 0x39af04, - 0x3c3d86, - 0x3c86c6, - 0x27f7c8, - 0x34408b, - 0x26af47, - 0x223ac4, - 0x311f46, - 0x2dee87, - 0x323b05, - 0x388f05, - 0x23f4c4, - 0x391dc6, - 0x3c3e08, - 0x2871c9, - 0x24c9c6, - 0x28a108, - 0x2cd386, - 0x35a548, - 0x32f98c, - 0x27f646, - 0x299e4d, - 0x29a2cb, - 0x2a02c5, - 0x3c8d47, - 0x202886, - 0x328ac8, - 0x270a09, - 0x250708, - 0x3cb1c5, - 0x24a947, - 0x285708, - 0x31b5c9, - 0x3a5406, - 0x264bca, - 0x328848, - 0x25054b, - 0x2d5f8c, - 0x27ba08, - 0x281846, - 0x22cb48, - 0x311047, - 0x208809, - 0x33894d, - 0x29a706, - 0x31e888, - 0x3b9689, - 0x2c0f48, - 0x288708, - 0x2c338c, - 0x2c4687, - 0x2c5147, - 0x26d485, - 0x2b71c7, - 0x2c8c88, - 0x2b6e06, - 0x24c84c, - 0x2f7588, - 0x2d1748, - 0x31b8c6, - 0x343c87, - 0x270b84, - 0x2394c8, - 0x23ac0c, - 0x203b8c, - 0x2277c5, - 0x209a07, - 0x32bfc6, - 0x343c06, - 0x258fc8, - 0x376844, - 0x3ce3cb, - 0x23b34b, - 0x2f3206, - 0x371d87, - 0x36fd85, - 0x272945, - 0x3ce506, - 0x2a5cc5, - 0x331905, - 0x2cf887, - 0x284589, - 0x2a9944, - 0x260985, - 0x30e0c5, - 0x2d3288, - 0x2e5c45, - 0x2baec9, - 0x2b9c87, - 0x2b9c8b, - 0x31b206, - 0x24df49, - 0x39b208, - 0x2966c5, - 0x312bc8, - 0x391e88, - 0x263707, - 0x24ae47, - 0x286f09, - 0x3b9707, - 0x2a7b49, - 0x303e0c, - 0x2b5a08, - 0x2cd909, - 0x2d1407, - 0x2868c9, - 0x200b47, - 0x2d6088, - 0x3b4345, - 0x2bf246, - 0x2c3cc8, - 0x316948, - 0x2cac49, - 0x331947, - 0x256dc5, - 0x242e89, - 0x206c06, - 0x201844, - 0x201846, - 0x353d88, - 0x3a1647, - 0x344288, - 0x21bb09, - 0x36f947, - 0x2a06c6, - 0x3c8e04, - 0x207289, - 0x27c1c8, - 0x31b787, - 0x37a946, - 0x343fc6, - 0x23d1c4, - 0x3b9d06, - 0x201903, - 0x32a4c9, - 0x32a906, - 0x20a5c5, - 0x2a0c06, - 0x2cfe05, - 0x285b88, - 0x310f07, - 0x399746, - 0x204146, - 0x347a08, - 0x2a3507, - 0x29a745, - 0x29df48, - 0x3befc8, - 0x328848, - 0x24e385, - 0x2bf2c6, - 0x319789, - 0x3506c4, - 0x2cfc8b, - 0x282dcb, - 0x31d989, - 0x201a03, - 0x25d185, - 0x37c306, - 0x24f3c8, - 0x321a44, - 0x353446, - 0x2a7309, - 0x2da905, - 0x2cf7c6, - 0x2e2e46, - 0x203f04, - 0x21bcca, - 0x20a508, - 0x316946, - 0x2bd385, - 0x36fc07, - 0x35b187, - 0x22d0c4, - 0x283007, - 0x2b2704, - 0x2edd46, - 0x21d9c3, - 0x26d645, - 0x376bc5, - 0x3651c8, - 0x281445, - 0x27f389, - 0x239307, - 0x23930b, - 0x2a514c, - 0x2a574a, - 0x338807, - 0x200a03, - 0x27d688, - 0x24e545, - 0x2d4205, - 0x351dc4, - 0x2d5f86, - 0x27e1c6, - 0x3b9d47, - 0x24240b, - 0x29dfc4, - 0x2d2144, - 0x2c9644, - 0x2cf586, - 0x203b84, - 0x2bf0c8, - 0x351bc5, - 0x271045, - 0x36d687, - 0x3c8e49, - 0x35af45, - 0x38d20a, - 0x245cc9, - 0x2d78ca, - 0x23c249, - 0x35d544, - 0x2de5c5, - 0x2d2b48, - 0x2e484b, - 0x2a2dc5, - 0x2f1786, - 0x246f84, - 0x27f8c6, - 0x36f7c9, - 0x2def87, - 0x3bfcc8, - 0x2a4506, - 0x2b1247, - 0x286048, - 0x38d786, - 0x3c8904, - 0x37ee07, - 0x36bc45, - 0x3813c7, - 0x21d904, - 0x202806, - 0x2e7488, - 0x29a488, - 0x2ef687, - 0x24ed48, - 0x298885, - 0x2182c4, - 0x24d8c8, - 0x24ee44, - 0x245ac5, - 0x300a04, - 0x2aca47, - 0x28cc47, - 0x286a08, - 0x2d3cc6, - 0x2813c5, - 0x27f188, - 0x213e88, - 0x2a2b09, - 0x2830c6, - 0x232288, - 0x20adca, - 0x323b88, - 0x2e9145, - 0x225f86, - 0x245b88, - 0x24aa0a, - 0x239947, - 0x28acc5, - 0x292708, - 0x2b4084, - 0x2cd506, - 0x2c54c8, - 0x218346, - 0x204ec8, - 0x292147, - 0x214586, - 0x2b93c4, - 0x277c47, - 0x2b4d44, - 0x36f787, - 0x368c0d, - 0x22fb05, - 0x2e43cb, - 0x203e06, - 0x256a88, - 0x248544, - 0x2eb506, - 0x281e06, - 0x22ce87, - 0x299b0d, - 0x24bf07, - 0x2b5688, - 0x287385, - 0x25a548, - 0x2ca586, - 0x298908, - 0x23f986, - 0x390487, - 0x25c389, - 0x37c107, - 0x28bc08, - 0x277285, - 0x22ba48, - 0x343b45, - 0x2fe905, - 0x23c4c5, - 0x227a43, - 0x23ebc4, - 0x261985, - 0x386389, - 0x37a846, - 0x2dec48, - 0x24ac05, - 0x2b7087, - 0x2ab5ca, - 0x2cf709, - 0x2c724a, - 0x2d6b48, - 0x2a9b8c, - 0x287c0d, - 0x37af03, - 0x204dc8, - 0x211045, - 0x311186, - 0x35f306, - 0x355e45, - 0x229d49, - 0x200985, - 0x27f188, - 0x25e006, - 0x35cfc6, - 0x2a3d09, - 0x3aa047, - 0x293006, - 0x2ab548, - 0x38f948, - 0x2e3dc7, - 0x2c2ace, - 0x2ca7c5, - 0x31b4c5, - 0x218248, - 0x2f36c7, - 0x20ad82, - 0x2c2f04, - 0x24d38a, - 0x31b848, - 0x391fc6, - 0x29b948, - 0x21c106, - 0x38f688, - 0x2b07c8, - 0x2fe8c4, - 0x2b7445, - 0x6044c4, - 0x6044c4, - 0x6044c4, - 0x2087c3, - 0x343e46, - 0x27f646, - 0x29ff8c, - 0x201b43, - 0x21d906, - 0x250544, - 0x28b748, - 0x2a7145, - 0x24d486, - 0x2c0a88, - 0x2d80c6, - 0x3996c6, - 0x2e2c48, - 0x2d8d87, - 0x3cef49, - 0x37c44a, - 0x21e504, - 0x2a7c45, - 0x2df985, - 0x32c206, - 0x234186, - 0x29f1c6, - 0x2ff946, - 0x3cf084, - 0x3cf08b, - 0x2ca604, - 0x24e0c5, - 0x2afac5, - 0x2ab986, - 0x20ce88, - 0x287ac7, - 0x32a884, - 0x214ac3, - 0x2b3b85, - 0x2edb87, - 0x2879cb, - 0x3650c7, - 0x2c0988, - 0x2b7587, - 0x26e946, - 0x280788, - 0x29f3cb, - 0x373786, - 0x220bc9, - 0x29f545, - 0x317a83, - 0x2cf7c6, - 0x292048, - 0x21bb83, - 0x202943, - 0x286046, - 0x21c106, - 0x3772ca, - 0x281885, - 0x281c4b, - 0x2a0b4b, - 0x20cd83, - 0x21f303, - 0x2b2684, - 0x21bec7, - 0x27ba04, - 0x28b744, - 0x3b9944, - 0x323e88, - 0x2bd2c8, - 0x218e49, - 0x2ce088, - 0x23c747, - 0x231b06, - 0x2de88f, - 0x2ca906, - 0x2d6284, - 0x2bd10a, - 0x2eda87, - 0x2b4e46, - 0x292549, - 0x218dc5, - 0x365305, - 0x218f06, - 0x22bb83, - 0x2b40c9, - 0x26fd86, - 0x21b8c9, - 0x39d046, - 0x26d645, - 0x227bc5, - 0x2086c3, - 0x21c008, - 0x32afc7, - 0x22d1c4, - 0x28b5c8, - 0x39ed04, - 0x303706, - 0x2b4ac6, - 0x244086, - 0x2d8a49, - 0x2d4185, - 0x29a806, - 0x39f149, - 0x2c8986, - 0x2de986, - 0x3a3a86, - 0x23c685, - 0x300a06, - 0x390484, - 0x3b4345, - 0x2c3cc4, - 0x2b60c6, - 0x202984, - 0x200c43, - 0x28a3c5, - 0x2368c8, + 0x24b583, + 0xe41c7, + 0x288c7, + 0x3cf86, + 0x3b08a, + 0x89f88, + 0x580c8, + 0x58587, + 0x1b6e46, + 0xdf545, + 0x178145, + 0xea746, + 0x40386, + 0x28b004, + 0x274bc7, + 0xaf0c8, + 0x2da884, + 0x209303, + 0x2351c3, + 0x22b883, + 0x215c83, + 0x24b583, + 0x209303, + 0x2351c3, + 0x22b883, + 0x215c83, + 0x24b583, + 0x32b848, + 0x376544, + 0x235104, + 0x26dec4, 0x282607, - 0x321ac9, - 0x28abc8, - 0x29af91, - 0x2e2eca, - 0x2f3147, - 0x37ffc6, - 0x250544, - 0x2c3dc8, - 0x269f08, - 0x29b14a, - 0x2bac8d, - 0x2a2cc6, - 0x2e2d46, - 0x277d06, - 0x368087, - 0x2b5745, - 0x370407, - 0x28b685, - 0x2b9dc4, - 0x2aa0c6, - 0x31e5c7, - 0x2b3dcd, - 0x245ac7, - 0x342f48, - 0x27f489, - 0x225e86, - 0x3a5385, - 0x243404, - 0x353e86, - 0x22cfc6, - 0x31b9c6, - 0x29c1c8, - 0x22c203, - 0x22ce83, - 0x201ac5, - 0x281506, - 0x2b0785, - 0x2a4708, - 0x29ec4a, - 0x310804, - 0x28b748, - 0x295348, - 0x2ab7c7, - 0x24acc9, - 0x2c0688, - 0x287247, - 0x3b9bc6, - 0x21834a, - 0x353f08, - 0x350349, - 0x2d94c8, - 0x278209, - 0x37fe87, - 0x347f05, - 0x276686, - 0x2b6c88, - 0x2861c8, - 0x2954c8, - 0x2f3308, - 0x24e0c5, - 0x217a44, - 0x234f08, - 0x246d04, - 0x23c044, - 0x26d645, - 0x294287, - 0x3c8c09, - 0x22cc87, - 0x20ae05, - 0x27b806, - 0x364bc6, - 0x20b0c4, - 0x2a4046, - 0x27d304, - 0x29fc46, - 0x3c89c6, - 0x22b486, - 0x3cb1c5, - 0x2a45c7, - 0x200a03, - 0x228689, - 0x347808, - 0x2870c4, - 0x2870cd, - 0x29a588, - 0x30b048, - 0x3502c6, - 0x25c489, - 0x2cf709, - 0x36f4c5, - 0x29ed4a, - 0x27130a, - 0x28ce0c, - 0x28cf86, - 0x27d086, - 0x2cb186, - 0x39db89, - 0x3113c6, - 0x2a3546, - 0x200a46, - 0x2394c8, - 0x213c86, - 0x2d5c0b, - 0x294405, - 0x271045, - 0x27de45, - 0x326186, - 0x218303, - 0x244006, - 0x245a47, - 0x2c3c85, - 0x25fac5, - 0x349fc5, - 0x306c06, - 0x331304, - 0x331306, - 0x2bf849, - 0x32600c, - 0x2b9b08, - 0x237944, - 0x300706, - 0x203f06, - 0x292048, - 0x2f9508, - 0x325f09, - 0x36fc07, - 0x31a3c9, - 0x255b06, - 0x22ddc4, - 0x20e804, - 0x203684, - 0x286048, - 0x3c8a4a, - 0x35aec6, - 0x367247, - 0x381647, - 0x24e045, - 0x2df944, - 0x290b46, - 0x2b5786, - 0x249b83, - 0x347647, - 0x39c148, - 0x36f60a, - 0x3721c8, - 0x364f48, - 0x2029c5, - 0x2a03c5, - 0x26b045, - 0x24e406, - 0x36c946, - 0x371145, - 0x32a709, - 0x2df74c, - 0x26b107, - 0x29b1c8, - 0x271745, - 0x6044c4, - 0x2b4744, - 0x32c884, - 0x225306, - 0x2a1e4e, - 0x365387, - 0x368285, - 0x35064c, - 0x300b07, - 0x31e547, - 0x3554c9, - 0x359189, - 0x28acc5, - 0x347808, - 0x319789, - 0x328705, - 0x2c3bc8, - 0x26ff06, - 0x24db46, - 0x34af04, - 0x28f648, - 0x226043, - 0x27c9c4, - 0x2b3c05, - 0x396e47, - 0x344e85, - 0x20ac89, - 0x2a83cd, - 0x2b2c06, - 0x214b04, - 0x316ec8, - 0x2843ca, - 0x26b407, - 0x275d85, - 0x202f83, - 0x2a0d0e, - 0x21c10c, - 0x376047, - 0x2a2007, - 0x205183, - 0x311405, - 0x32c885, - 0x29bd08, - 0x299609, - 0x237846, - 0x27ba04, - 0x2f3086, - 0x236dcb, - 0x2dc00c, - 0x369687, - 0x2d5ec5, - 0x3beec8, - 0x2e3b85, - 0x2bd107, - 0x353787, - 0x245885, - 0x218303, - 0x3241c4, - 0x353c45, - 0x2a9845, - 0x2a9846, - 0x2af688, - 0x31e5c7, - 0x35f606, - 0x208a86, - 0x23c406, - 0x283749, - 0x27c447, - 0x31bc86, - 0x2dc186, - 0x27a306, - 0x2ab345, - 0x214346, - 0x366405, - 0x2e5cc8, - 0x293b8b, - 0x290546, - 0x381684, - 0x300049, - 0x239304, - 0x26fe88, - 0x201947, - 0x288604, - 0x2bfdc8, - 0x2c4f44, - 0x2ab384, - 0x28b505, - 0x316dc6, - 0x323dc7, - 0x204f83, - 0x2a0785, - 0x337284, - 0x31b506, - 0x36f548, - 0x203a85, - 0x293849, - 0x243085, - 0x21d908, - 0x3194c7, - 0x32aa08, - 0x2bee07, - 0x25f1c9, - 0x27ffc6, - 0x33c246, - 0x200a44, - 0x2d2085, - 0x3084cc, - 0x27de47, - 0x27eb07, - 0x233dc8, - 0x2b2c06, - 0x272b44, - 0x3adac4, - 0x286d89, - 0x2cb286, - 0x269dc7, - 0x23eac4, - 0x24d006, - 0x35eb45, - 0x2d3807, - 0x2d5b86, - 0x264a89, - 0x2d0547, - 0x297e47, - 0x2a3b86, - 0x24cf45, - 0x284088, - 0x26fc08, - 0x231d06, - 0x203ac5, - 0x327806, - 0x211ac3, - 0x29bb89, - 0x29ef4e, - 0x2beb48, - 0x39ee08, - 0x231b0b, - 0x293a86, - 0x34c8c4, - 0x287804, - 0x29f04a, - 0x217287, - 0x31bd45, - 0x220bc9, - 0x2c2a05, - 0x23c087, - 0x24ecc4, - 0x2a4c47, - 0x315548, - 0x2e1846, - 0x39eb49, - 0x2c078a, - 0x217206, - 0x29a0c6, - 0x2afa45, - 0x398585, - 0x33d2c7, - 0x24c648, - 0x35ea88, - 0x2fe8c6, - 0x227c45, - 0x233f0e, - 0x2c0e44, - 0x231c85, - 0x27b189, - 0x2ea048, - 0x28ed06, - 0x29da4c, - 0x29e850, - 0x2a1a8f, - 0x2a3288, - 0x338807, - 0x3cb1c5, - 0x261985, - 0x323c49, - 0x292909, - 0x23e086, - 0x2a2e47, - 0x2d1f85, - 0x22fa89, - 0x349086, - 0x31120d, - 0x285d09, - 0x28b744, - 0x2be8c8, - 0x234fc9, - 0x35b086, - 0x27d885, - 0x33c246, - 0x3bfb89, - 0x27c048, - 0x21b605, - 0x20aec4, - 0x29dc0b, - 0x35af45, - 0x24f446, - 0x287f46, - 0x2062c6, - 0x296ecb, - 0x293949, - 0x2089c5, - 0x3a4547, - 0x2e2e46, - 0x256c06, - 0x32c608, - 0x357ac9, - 0x342d0c, - 0x2ed988, - 0x30fe86, - 0x336c03, - 0x233446, - 0x25f085, - 0x281f88, - 0x227646, - 0x2d3a48, - 0x24b245, - 0x29b305, - 0x25a988, - 0x38f807, - 0x35f247, - 0x3b9d47, - 0x3167c8, - 0x32c308, - 0x2b5206, - 0x2b5f07, - 0x356847, - 0x296bca, - 0x202e43, - 0x326186, - 0x233e85, - 0x24d384, - 0x27f489, - 0x25f144, - 0x201704, - 0x29ce44, - 0x2a200b, - 0x32af07, - 0x234145, - 0x298588, - 0x27b806, - 0x27b808, - 0x2817c6, - 0x28f585, - 0x28f845, - 0x291686, - 0x291e08, - 0x292488, - 0x27f646, - 0x2983cf, - 0x29b650, - 0x20c645, - 0x200a03, - 0x22de85, - 0x310048, - 0x292809, - 0x328848, - 0x39e9c8, - 0x23cc88, - 0x32afc7, - 0x27b4c9, - 0x2d3c48, - 0x291d04, - 0x29ccc8, - 0x2d3349, - 0x2b6987, - 0x29cc44, - 0x22cd48, - 0x2a438a, - 0x2e6086, - 0x2a2cc6, - 0x282f89, - 0x29ea87, - 0x2d0c08, - 0x2290c8, - 0x2c9f08, - 0x39f585, - 0x211585, - 0x271045, - 0x32c845, - 0x395cc7, - 0x218305, - 0x2c3c85, - 0x201186, - 0x328787, - 0x2e4787, - 0x2a4686, - 0x2d7085, - 0x24f446, - 0x25f2c5, - 0x2d1e08, - 0x375ec4, - 0x2c8a06, - 0x32dd84, - 0x31ea08, - 0x3cf80a, - 0x27fd4c, - 0x242605, - 0x368146, - 0x342ec6, - 0x295f86, - 0x30ff04, - 0x35ee05, - 0x281147, - 0x29eb09, - 0x2cfb47, - 0x6044c4, - 0x6044c4, - 0x32ad85, - 0x2d4d84, - 0x29d40a, - 0x27b686, - 0x2809c4, - 0x209985, - 0x38dc85, - 0x2b5684, - 0x287b87, - 0x243007, - 0x2cf588, - 0x205148, - 0x21b609, - 0x270f88, - 0x29d5cb, - 0x2c3a44, - 0x256d05, - 0x284285, - 0x3b9cc9, - 0x357ac9, - 0x2fff48, - 0x2ed808, - 0x2ab984, - 0x203f45, - 0x202a43, - 0x32c1c5, - 0x29a886, - 0x29944c, - 0x23e9c6, - 0x27d786, - 0x28ef85, - 0x306c88, - 0x3ccb46, - 0x380146, - 0x2a2cc6, - 0x2e368c, - 0x31bb84, - 0x23c54a, - 0x28eec8, - 0x299287, - 0x337186, - 0x237907, - 0x2f2c85, - 0x37a946, - 0x363a06, - 0x375047, - 0x2826c4, - 0x2acb45, - 0x27b184, - 0x2b9e47, - 0x27b3c8, - 0x27cf0a, - 0x285587, - 0x20a687, - 0x338787, - 0x2e3cc9, - 0x29944a, - 0x22dd83, - 0x2825c5, - 0x204f03, - 0x3b9989, - 0x390708, - 0x35b2c7, - 0x328949, - 0x26fd06, - 0x3b4408, - 0x345a05, - 0x213f8a, - 0x211909, - 0x250309, - 0x3cca07, - 0x26a009, - 0x22b388, - 0x375206, - 0x368308, - 0x3d0147, - 0x3b9707, - 0x245cc7, - 0x2e7308, - 0x300586, - 0x2a4145, - 0x281147, - 0x299bc8, - 0x32dd04, - 0x2fa444, - 0x292f07, - 0x2b0b47, - 0x31960a, - 0x375186, - 0x25a34a, - 0x2c2e47, - 0x2c0c07, - 0x2acc04, - 0x2a7c04, - 0x2d3706, - 0x312284, - 0x31228c, - 0x3d1dc5, - 0x357909, - 0x2b2a84, - 0x2b5745, - 0x284348, - 0x280b85, - 0x38d206, - 0x22f984, - 0x2c394a, - 0x2dcf86, - 0x29e5ca, - 0x21e4c7, - 0x289f45, - 0x22bb85, - 0x24e08a, - 0x291f85, - 0x2a2bc6, - 0x246d04, - 0x2b2806, - 0x33d385, - 0x227706, - 0x2ef68c, - 0x2e390a, - 0x271404, - 0x231b06, - 0x29ea87, - 0x2d5b04, - 0x2394c8, - 0x2f1686, - 0x3814c9, - 0x2d7409, - 0x2b5b09, - 0x2cfe46, - 0x3d0246, - 0x368447, - 0x32a648, - 0x3d0049, - 0x32af07, - 0x298706, - 0x2b12c7, - 0x277bc5, - 0x2c0e44, - 0x368007, - 0x356a05, - 0x28b445, - 0x391547, - 0x245748, - 0x3bee46, - 0x29aa0d, - 0x29bf0f, - 0x2a0b4d, - 0x20ae44, - 0x2369c6, - 0x2d9808, - 0x200a05, - 0x296d88, - 0x2635ca, - 0x28b744, - 0x237006, - 0x2d6307, - 0x3bb1c7, - 0x2d8e49, - 0x3682c5, - 0x2b5684, - 0x2b738a, - 0x2c0249, - 0x26a107, - 0x29acc6, - 0x35b086, - 0x203e86, - 0x37eec6, - 0x2d870f, - 0x2d96c9, - 0x213c86, - 0x387006, - 0x329d09, - 0x2b6007, - 0x201fc3, - 0x23c646, - 0x217f03, - 0x355d08, - 0x2b1107, - 0x2a3489, - 0x2b4948, - 0x35f388, - 0x200c86, - 0x23e909, - 0x3538c5, - 0x233384, - 0x347fc7, - 0x39dc05, - 0x20ae44, - 0x234208, - 0x217544, - 0x2b5d47, - 0x367b46, - 0x2822c5, - 0x2d94c8, - 0x35af4b, - 0x2dfc87, - 0x24e306, - 0x2ca984, - 0x34c846, - 0x26d645, - 0x356a05, - 0x283e09, - 0x287789, - 0x2cfc04, - 0x3b9785, - 0x231b45, - 0x213e06, - 0x347908, - 0x2c23c6, - 0x39bf8b, - 0x382eca, - 0x2bef05, - 0x28f8c6, - 0x310505, - 0x2010c5, - 0x2ab407, - 0x326408, - 0x271004, - 0x269946, - 0x292506, - 0x22b547, - 0x317a44, - 0x281e06, - 0x2ebf85, - 0x2ebf89, - 0x3d0444, - 0x2dfac9, - 0x27f646, - 0x2c4748, - 0x231b45, - 0x381745, - 0x227706, - 0x342c09, - 0x359189, - 0x27d806, - 0x2ea148, - 0x2a8508, - 0x3104c4, - 0x2b8044, - 0x2b8048, - 0x2bc3c8, - 0x31a4c9, - 0x29a806, - 0x2a2cc6, - 0x33624d, - 0x353446, - 0x32f849, - 0x392685, - 0x218f06, - 0x2ca088, - 0x331245, - 0x356884, - 0x26d645, - 0x286c08, - 0x29d1c9, - 0x27b244, - 0x202806, - 0x280a4a, - 0x375f48, - 0x319789, - 0x38754a, - 0x3288c6, - 0x29c0c8, - 0x2bcec5, - 0x28c688, - 0x2f2d05, - 0x26fbc9, - 0x383949, - 0x201a82, - 0x29f545, - 0x272686, - 0x27f587, - 0x3886c5, - 0x315446, - 0x30c788, - 0x2b2c06, - 0x2d2a09, - 0x27ec06, - 0x32c488, - 0x359705, - 0x3c6f46, - 0x390588, - 0x286048, - 0x37fd88, - 0x2e8988, - 0x214344, - 0x22d103, - 0x2d2c44, - 0x285786, - 0x277c04, - 0x39ed47, - 0x380049, - 0x2c9645, - 0x2290c6, - 0x23c646, - 0x2af4cb, - 0x2b4d86, - 0x2bc6c6, - 0x2c8b08, - 0x333c46, - 0x2baf83, - 0x212103, - 0x2c0e44, - 0x232185, - 0x2cd1c7, - 0x27b3c8, - 0x27b3cf, - 0x28104b, - 0x347708, - 0x202886, - 0x347a0e, - 0x227703, - 0x2cd144, - 0x2b4d05, - 0x2b5506, - 0x290c4b, - 0x294346, - 0x31c009, - 0x2822c5, - 0x255688, - 0x2196c8, - 0x35904c, - 0x2a2046, - 0x32c206, - 0x2eab85, - 0x28b9c8, - 0x27fd45, - 0x35dac8, - 0x29ddca, - 0x2a0f89, - 0x6044c4, - 0x2000c2, - 0x3f207c02, - 0x200382, - 0x224604, - 0x202002, - 0x215584, - 0x201402, - 0x182c3, - 0x2003c2, - 0x207bc2, - 0x1513c8, - 0x20d183, - 0x2355c3, - 0x214d83, - 0x272203, - 0x222103, - 0x2482c3, - 0x203a83, - 0x20d183, - 0x2355c3, - 0x214d83, - 0x224604, - 0x222103, - 0x2482c3, - 0x210803, - 0x28a904, - 0x20d183, - 0x2385c4, - 0x2355c3, - 0x2db184, - 0x214d83, - 0x388987, - 0x272203, - 0x2182c3, - 0x354608, - 0x2482c3, - 0x2a150b, - 0x2f3e83, - 0x3ab686, - 0x2061c2, - 0x2eea0b, - 0x2355c3, - 0x214d83, - 0x222103, - 0x2482c3, - 0x20d183, - 0x2355c3, - 0x214d83, - 0x2482c3, - 0x205443, - 0x221503, - 0x2000c2, - 0x1513c8, - 0x213105, - 0x356a88, - 0x2ecac8, - 0x207c02, - 0x203705, - 0x307ec7, - 0x206ac2, - 0x248787, - 0x200382, - 0x260647, - 0x2bb709, - 0x2bca88, - 0x2c9d89, - 0x212a42, - 0x26ce07, - 0x231984, - 0x307f87, - 0x382dc7, - 0x261282, - 0x272203, - 0x206282, - 0x201402, - 0x2003c2, - 0x20d682, - 0x200902, - 0x207bc2, - 0x2abe05, - 0x325045, - 0x7c02, - 0x355c3, - 0x20d183, - 0x2355c3, - 0x20a743, - 0x214d83, - 0x207483, - 0x222103, - 0x2482c3, - 0x20d183, - 0x2355c3, - 0x214d83, - 0x222103, - 0x2482c3, - 0x20d183, - 0x2355c3, - 0x214d83, - 0x272203, - 0x222103, - 0xe6003, - 0x2482c3, - 0x10dc3, - 0x101, - 0x20d183, - 0x2355c3, - 0x214d83, - 0x224604, - 0x219a43, - 0x222103, - 0xe6003, - 0x2482c3, - 0x21b2c3, - 0x42474f46, - 0x9f983, - 0xcabc5, - 0x20d183, - 0x2355c3, - 0x214d83, - 0x222103, - 0x2482c3, - 0x207c02, - 0x20d183, - 0x2355c3, - 0x214d83, - 0x222103, - 0xe6003, - 0x2482c3, - 0x1882, - 0x1513c8, - 0x182c3, - 0xe6003, - 0x4c604, - 0xe3f85, - 0x2000c2, - 0x3ad444, - 0x20d183, - 0x2355c3, - 0x214d83, - 0x247fc3, - 0x230285, - 0x219a43, - 0x224f03, - 0x222103, - 0x252ec3, - 0x2482c3, - 0x215e83, - 0x2623c3, - 0x2020c3, - 0x20d183, - 0x2355c3, - 0x214d83, - 0x222103, - 0x2482c3, - 0x207c02, - 0x2482c3, - 0x1513c8, - 0x214d83, - 0xe6003, - 0x1513c8, - 0xe6003, - 0x2b9343, - 0x20d183, - 0x232a84, - 0x2355c3, - 0x214d83, - 0x200d42, - 0x272203, - 0x222103, - 0x2482c3, - 0x20d183, - 0x2355c3, - 0x214d83, - 0x200d42, - 0x20d203, - 0x222103, - 0x2482c3, - 0x2eca43, - 0x215e83, - 0x2000c2, - 0x207c02, - 0x214d83, - 0x222103, - 0x2482c3, - 0x3ab685, - 0x14dd46, - 0x28a904, - 0x2061c2, - 0x1513c8, - 0x2000c2, - 0xf4d45, - 0x1f548, - 0x193303, - 0x207c02, - 0x46893dc6, - 0x23084, - 0xb1b8b, - 0x3eec6, - 0x7c8c7, - 0x2355c3, - 0x50048, - 0x214d83, - 0x111a45, - 0x4284, - 0x26b1c3, - 0x559c7, - 0xde444, - 0x222103, - 0x7d906, - 0xe5e44, - 0xe6003, - 0x2482c3, - 0x2f5b44, - 0x12b507, - 0x14d949, - 0xb1948, - 0x1418c4, - 0x44a06, - 0x10e88, - 0x130a05, - 0x12249, - 0xf4d45, - 0x207c02, - 0x20d183, - 0x2355c3, - 0x214d83, - 0x272203, - 0x2182c3, - 0x2482c3, - 0x2f3e83, - 0x2061c2, - 0x1513c8, - 0x20d183, - 0x2355c3, - 0x214d83, - 0x219883, - 0x2bf0c4, - 0x222103, - 0x182c3, - 0x2482c3, - 0x20d183, - 0x2355c3, - 0x2db184, - 0x214d83, - 0x222103, - 0x2482c3, - 0x3ab686, - 0x2355c3, - 0x214d83, - 0x2e803, - 0xe6003, - 0x2482c3, - 0x20d183, - 0x2355c3, - 0x214d83, - 0x222103, - 0x2482c3, - 0xf4d45, - 0x7c8c7, - 0x1513c8, - 0x214d83, - 0x20d183, - 0x2355c3, - 0x214d83, - 0x222103, - 0x2482c3, - 0x4960d183, - 0x2355c3, - 0x222103, - 0x2482c3, - 0x1513c8, - 0x2000c2, - 0x207c02, - 0x20d183, - 0x214d83, - 0x222103, - 0x2003c2, - 0x2482c3, - 0x3390c7, - 0x312e4b, - 0x201103, - 0x23dd48, - 0x32a3c7, - 0x216246, - 0x211ec5, - 0x203849, - 0x27c548, - 0x37a409, - 0x3a6710, - 0x37a40b, - 0x2f0d49, - 0x201d83, - 0x201389, - 0x233786, - 0x23378c, - 0x2131c8, - 0x3cc848, - 0x3b5909, - 0x3bb8ce, - 0x2bb4cb, - 0x2768cc, - 0x208403, - 0x28d68c, - 0x208409, - 0x241e87, - 0x23550c, - 0x3c630a, - 0x248084, - 0x2509cd, - 0x28d548, - 0x21080d, - 0x294a46, - 0x28a90b, - 0x34dac9, - 0x387287, - 0x38fb86, - 0x36fe89, - 0x36f14a, - 0x31d7c8, - 0x2f3a84, - 0x33b987, - 0x24a147, - 0x23ecc4, - 0x2e5284, - 0x396349, - 0x3735c9, - 0x21dec8, - 0x210585, - 0x212985, - 0x20dcc6, - 0x250889, - 0x26384d, - 0x2f1888, - 0x20dbc7, - 0x211f48, - 0x3ced06, - 0x240bc4, - 0x2ab0c5, - 0x3cff46, - 0x3d1ac4, - 0x208307, - 0x209c8a, - 0x2144c4, - 0x217146, - 0x217ec9, - 0x217ecf, - 0x2193cd, - 0x21a286, - 0x21f150, - 0x21f546, - 0x21fac7, - 0x220387, - 0x22038f, - 0x220e09, - 0x2281c6, - 0x2288c7, - 0x2288c8, - 0x2294c9, - 0x282388, - 0x2e8dc7, - 0x213103, - 0x22ee46, - 0x3c3308, - 0x3bbb8a, - 0x3c4709, - 0x226303, - 0x307dc6, - 0x26978a, - 0x290987, - 0x241cca, - 0x30a68e, - 0x220f46, - 0x29f747, - 0x3576c6, - 0x2084c6, - 0x21138b, - 0x212b0a, - 0x2acf4d, - 0x3d0307, - 0x26c3c8, - 0x26c3c9, - 0x26c3cf, - 0x321c4c, - 0x26f7c9, - 0x37f78e, - 0x388a8a, - 0x2bd746, - 0x3019c6, - 0x31f68c, - 0x3216cc, - 0x333808, - 0x37c007, - 0x239845, - 0x294144, - 0x373a4e, - 0x26a404, - 0x35fb47, - 0x3abb0a, - 0x3ca654, - 0x22e88f, - 0x220548, - 0x22ed08, - 0x33cc8d, - 0x33cc8e, - 0x22efc9, - 0x230908, - 0x23090f, - 0x23520c, - 0x23520f, - 0x236707, - 0x238e0a, - 0x24b80b, - 0x23c988, - 0x240547, - 0x26410d, - 0x3308c6, - 0x250b86, - 0x243e89, - 0x278048, - 0x249108, - 0x24910e, - 0x312f47, - 0x24bac5, - 0x24c3c5, - 0x208e44, - 0x216506, - 0x21ddc8, - 0x372dc3, - 0x2f47ce, - 0x2644c8, - 0x37ab0b, - 0x309687, - 0x2fe705, - 0x247946, - 0x2aec07, - 0x2fa9c8, - 0x33d0c9, - 0x2331c5, - 0x28a408, - 0x3580c6, - 0x3a7e8a, - 0x373949, - 0x2355c9, - 0x2355cb, - 0x204b88, - 0x23eb89, - 0x210646, - 0x38664a, - 0x20cc4a, - 0x23900c, - 0x22d487, - 0x2c9b8a, - 0x32b9cb, - 0x32b9d9, - 0x314188, - 0x3ab705, - 0x2642c6, - 0x26e5c9, - 0x389246, - 0x2e344a, - 0x27c746, - 0x2cbd04, - 0x2cbd0d, - 0x373207, - 0x357ec9, - 0x24f045, - 0x24f5c8, - 0x24fe09, - 0x250244, - 0x251687, - 0x251688, - 0x251ac7, - 0x26b9c8, - 0x255fc7, - 0x208c45, - 0x25d5cc, - 0x25de09, - 0x2d004a, - 0x3a9ec9, - 0x201489, - 0x386dcc, - 0x261e4b, - 0x263008, - 0x2648c8, - 0x268384, - 0x2882c8, - 0x2897c9, - 0x3c63c7, - 0x218106, - 0x29d007, - 0x3192c9, - 0x32430b, - 0x295e07, - 0x3cb587, - 0x21e607, - 0x210784, - 0x210785, - 0x2dae85, - 0x34fc4b, - 0x3b3104, - 0x2be6c8, - 0x2f790a, - 0x358187, - 0x35c687, - 0x2900d2, - 0x29fb46, - 0x232406, - 0x32598e, - 0x2a17c6, - 0x2951c8, - 0x2961cf, - 0x210bc8, - 0x39a248, - 0x2c424a, - 0x2c4251, - 0x2a490e, - 0x24084a, - 0x24084c, - 0x230b07, - 0x230b10, - 0x3c8748, - 0x2a4b05, - 0x2af28a, - 0x3d1b0c, - 0x298a4d, - 0x304446, - 0x304447, - 0x30444c, - 0x38afcc, - 0x22288c, - 0x2ac5cb, - 0x38a484, - 0x283104, - 0x38bb09, - 0x3adb47, - 0x3943c9, - 0x20ca89, - 0x3bc407, - 0x3c6186, - 0x3c6189, - 0x2b0d03, - 0x2b2d0a, - 0x202f47, - 0x34498b, - 0x2acdca, - 0x231a04, - 0x35ef46, - 0x285809, - 0x312104, - 0x3d1e8a, - 0x24e605, - 0x2c1245, - 0x2c124d, - 0x2c158e, - 0x2d2d85, - 0x337906, - 0x3ab287, - 0x25d84a, - 0x378786, - 0x2eb084, - 0x2ff3c7, - 0x2720cb, - 0x3cedc7, - 0x27cb44, - 0x286406, - 0x28640d, - 0x2dd40c, - 0x221fc6, - 0x2f1a8a, - 0x352b06, - 0x243508, - 0x231f07, - 0x247d8a, - 0x38a006, - 0x281e83, - 0x2bd846, - 0x3c3188, - 0x38bc8a, - 0x288887, - 0x288888, - 0x28c804, - 0x338c07, - 0x206c88, - 0x29b348, - 0x2b5308, - 0x3b9e8a, - 0x2e2845, - 0x20d207, - 0x240693, - 0x25b946, - 0x219c48, - 0x223dc9, - 0x248648, - 0x200d0b, - 0x35f708, - 0x272204, - 0x25aa86, - 0x31c586, - 0x316c09, - 0x2c7547, - 0x25d6c8, - 0x29b4c6, - 0x391444, - 0x38f585, - 0x2d0388, - 0x331d8a, - 0x2cb988, - 0x2d1146, - 0x29c2ca, - 0x2a99c8, - 0x2d5908, - 0x2d64c8, - 0x2d6d46, - 0x2d9a06, - 0x3a998c, - 0x2d9fd0, - 0x2a3985, - 0x2109c8, - 0x32f490, - 0x2109d0, - 0x3a658e, - 0x3a960e, - 0x3a9614, - 0x3aed4f, - 0x3af106, - 0x3269d1, - 0x38fd13, - 0x390188, - 0x324785, - 0x23e288, - 0x2fde85, - 0x2e59cc, - 0x22a7c9, - 0x293f89, - 0x22ac47, - 0x3a0309, - 0x31a787, - 0x300886, - 0x2aaec7, - 0x205b05, - 0x210e03, - 0x372f89, - 0x25ff49, - 0x22e803, - 0x3885c4, - 0x2f530d, - 0x34b10f, - 0x391485, - 0x34a606, - 0x22d6c7, - 0x212f47, - 0x3c00c6, - 0x3c00cb, - 0x2a5905, - 0x25fd06, - 0x301847, - 0x2566c9, - 0x3834c6, - 0x371c85, - 0x366b8b, - 0x211806, - 0x358d85, - 0x25eec8, - 0x2a7948, - 0x350f0c, - 0x350f10, - 0x2b1f49, - 0x2b3487, - 0x30ef4b, - 0x2e7dc6, - 0x2e8c8a, - 0x2e99cb, - 0x2ea7ca, - 0x2eaa46, - 0x2ec905, - 0x32a2c6, - 0x23af48, - 0x22ad0a, - 0x33c91c, - 0x2f3f4c, - 0x2f4248, - 0x3ab685, - 0x389cc7, - 0x370f86, - 0x284745, - 0x21c546, - 0x3c0288, - 0x2c04c7, - 0x3bb7c8, - 0x25ba0a, - 0x22d7cc, - 0x20dec9, - 0x229247, - 0x289d04, - 0x24c486, - 0x399dca, - 0x20cb85, - 0x226a4c, - 0x228a88, - 0x31fa08, - 0x34378c, - 0x2303cc, - 0x231549, - 0x231787, - 0x37df0c, - 0x22afc4, - 0x25184a, - 0x30320c, - 0x27424b, - 0x2578cb, - 0x25cb86, - 0x260e07, - 0x230d47, - 0x230d4f, - 0x304e11, - 0x2e0e92, - 0x26294d, - 0x26294e, - 0x262c8e, - 0x3aef08, - 0x3aef12, - 0x26d148, - 0x224407, - 0x2543ca, - 0x2a8208, - 0x2a1785, - 0x395b0a, - 0x21f8c7, - 0x2f4b04, - 0x220ac3, - 0x238b05, - 0x2c44c7, - 0x302587, - 0x298c4e, - 0x34fecd, - 0x352009, - 0x242a85, - 0x3a6ac3, - 0x25b0c6, - 0x260c85, - 0x37ad48, - 0x2ba989, - 0x264305, - 0x26430f, - 0x2b1c87, - 0x211d45, - 0x30a04a, - 0x348286, - 0x2686c9, - 0x2fd9cc, - 0x2ff589, - 0x2110c6, - 0x2f770c, - 0x336d06, - 0x302188, - 0x302746, - 0x314306, - 0x2b4f04, - 0x316b83, - 0x2b670a, - 0x21e911, - 0x26f98a, - 0x2708c5, - 0x273747, - 0x25bd87, - 0x206d84, - 0x206d8b, - 0x2bc908, - 0x2be9c6, - 0x233e45, - 0x3a8184, - 0x251109, - 0x2008c4, - 0x248f47, - 0x2ff785, - 0x2ff787, - 0x325bc5, - 0x39f0c3, - 0x2242c8, - 0x35ebca, - 0x204f83, - 0x21314a, - 0x3bff06, - 0x26408f, - 0x3baa89, - 0x2f4750, - 0x2f9108, - 0x2d1849, - 0x299947, - 0x28638f, - 0x328d04, - 0x2db204, - 0x21f3c6, - 0x23ba86, - 0x3a284a, - 0x27dbc6, - 0x33f107, - 0x30b248, - 0x30b447, - 0x30c547, - 0x30d50a, - 0x30f38b, - 0x370545, - 0x2e0ac8, - 0x22fbc3, - 0x3b470c, - 0x341f0f, - 0x23964d, + 0x2d5447, + 0x209303, + 0x23808b, + 0x27d4ca, + 0x256a87, + 0x23ed88, + 0x30a608, + 0x2351c3, + 0x256587, + 0x210a43, + 0x202b08, + 0x205449, + 0x21e084, + 0x20f8c3, + 0x2ec2c8, + 0x2287c3, + 0x2d308a, + 0x2d8146, + 0x3aab07, + 0x215c83, + 0x20f1c6, + 0x318fc8, + 0x24b583, + 0x2ea886, + 0x2edccd, + 0x2ef748, + 0x2f758b, + 0x35cb46, + 0x3741c7, + 0x21ee85, + 0x376cca, + 0x22a805, + 0x24e70a, + 0x22c682, + 0x20ab43, + 0x245684, + 0x200006, + 0x3b0fc3, + 0x2aefc3, + 0x243003, + 0x2b7b03, + 0x376f03, + 0x201702, + 0x2d8845, + 0x2a6ec9, + 0x241003, + 0x203ac3, + 0x2146c3, + 0x200201, + 0x2cea07, + 0x2e39c5, + 0x394603, + 0x201a03, + 0x26dec4, + 0x256a03, + 0x218f88, + 0x362883, + 0x305b0d, + 0x27b888, + 0x20de86, + 0x32ea83, + 0x3a1083, + 0x3ad003, + 0xba09303, + 0x234a08, + 0x238084, + 0x241f83, + 0x200106, + 0x245b08, + 0x20a603, + 0x376d03, + 0x232303, + 0x2351c3, + 0x227643, + 0x25d0c3, + 0x229bc3, + 0x32ea03, + 0x221683, + 0x223dc3, + 0x38fac5, + 0x251884, + 0x252487, + 0x23a302, + 0x257b83, + 0x259a06, + 0x25c183, + 0x25d3c3, + 0x279603, + 0x36ad83, + 0x30f3c3, + 0x296407, + 0xbe2b883, + 0x246283, + 0x206e43, + 0x202b03, + 0x20f703, + 0x2f5843, + 0x364605, + 0x371403, + 0x24c689, + 0x2027c3, + 0x308dc3, + 0xc24cd03, + 0x2a4003, + 0x223788, + 0x2a6e06, + 0x3b74c6, + 0x29bfc6, + 0x38b9c7, + 0x214683, + 0x209383, + 0x2287c3, + 0x28a086, + 0x227682, + 0x2a0cc3, + 0x33a085, + 0x215c83, 0x25e247, - 0x352149, - 0x245fc7, - 0x25e708, - 0x3ca84c, - 0x2b88c8, - 0x271cc8, - 0x32d80e, - 0x33fa94, - 0x33ffa4, - 0x35cd8a, - 0x37afcb, - 0x31a844, - 0x31a849, - 0x237088, - 0x24cb85, - 0x3728ca, - 0x264707, - 0x32a1c4, - 0x203a83, - 0x20d183, - 0x2385c4, - 0x2355c3, - 0x214d83, - 0x224604, - 0x219a43, - 0x272203, - 0x2d9fc6, - 0x2bf0c4, - 0x222103, - 0x2482c3, - 0x21a803, + 0x1602543, + 0x229183, + 0x236003, + 0x224443, + 0x22e043, + 0x24b583, + 0x21ce06, + 0x364986, + 0x37d103, + 0x225683, + 0x214e83, + 0x25bfc3, + 0x313483, + 0x2fb1c3, + 0x2fd943, + 0x221d85, + 0x22d183, + 0x28c0c6, + 0x335f48, + 0x224183, + 0x3ccb49, + 0x39d888, + 0x220708, + 0x229a45, + 0x23b60a, + 0x23beca, + 0x23cb8b, + 0x23e948, + 0x3ba003, + 0x2fd983, + 0x34d183, + 0x348bc8, + 0x3b0b83, + 0x2f7204, + 0x260403, + 0x2007c3, + 0x22bac3, + 0x25fe43, + 0x200f83, + 0x22c682, + 0x22a443, + 0x239b03, + 0x315c83, + 0x316c44, + 0x245684, + 0x218e43, + 0xaf0c8, 0x2000c2, - 0x203a83, - 0x207c02, - 0x20d183, - 0x2385c4, - 0x2355c3, - 0x214d83, - 0x219a43, - 0x2d9fc6, - 0x222103, - 0x2482c3, - 0x1513c8, - 0x20d183, - 0x2355c3, - 0x3d0443, - 0x222103, - 0xe6003, - 0x2482c3, - 0x1513c8, - 0x20d183, - 0x2355c3, - 0x214d83, - 0x272203, - 0x2bf0c4, - 0x222103, - 0x2482c3, + 0x204542, + 0x201702, + 0x2013c2, + 0x200202, + 0x200c02, + 0x236842, + 0x2012c2, + 0x200382, + 0x201e02, + 0x20a502, + 0x204842, + 0x26ca42, + 0x204782, + 0x2c45c2, + 0x202882, + 0x20e102, + 0x203d02, + 0x2d2842, + 0x2063c2, + 0x200682, + 0x2157c2, + 0x208ac2, + 0x201202, + 0x203182, + 0x204882, + 0x201102, + 0xc2, + 0x4542, + 0x1702, + 0x13c2, + 0x202, + 0xc02, + 0x36842, + 0x12c2, + 0x382, + 0x1e02, + 0xa502, + 0x4842, + 0x6ca42, + 0x4782, + 0xc45c2, + 0x2882, + 0xe102, + 0x3d02, + 0xd2842, + 0x63c2, + 0x682, + 0x157c2, + 0x8ac2, + 0x1202, + 0x3182, + 0x4882, + 0x1102, + 0x209303, + 0x2351c3, + 0x22b883, + 0x215c83, + 0x24b583, + 0x7302, + 0x209303, + 0x2351c3, + 0x22b883, + 0x215c83, + 0x24b583, + 0x209302, + 0x24b583, + 0xd609303, + 0x22b883, + 0x2287c3, + 0xe6243, + 0x2203c2, + 0xaf0c8, + 0x209303, + 0x2351c3, + 0x22b883, + 0x215c83, + 0xe6243, + 0x24b583, + 0xc882, + 0x2001c2, + 0x15c5885, + 0x20dc82, + 0xaf0c8, + 0x9302, + 0x237002, + 0x201742, + 0x23f9c2, + 0x20f742, + 0x23ce82, + 0x178145, + 0x203142, + 0x203c02, + 0x20b302, + 0x200ec2, + 0x202882, + 0x3a2a02, + 0x20eb02, + 0x290e82, + 0xe41c7, + 0xbab4d, + 0xdf5c9, + 0xaa44b, + 0xe5248, + 0x793c9, + 0x109846, + 0x22b883, + 0xaf0c8, + 0x10de04, + 0x10cdc3, + 0x1680c5, + 0xaf0c8, + 0xdd187, + 0x59086, + 0x168109, + 0xfc8e, + 0x7687, 0x2000c2, - 0x308bc3, - 0x207c02, - 0x2355c3, - 0x214d83, - 0x272203, - 0x222103, - 0x2482c3, - 0x200f82, - 0x24ce42, - 0x207c02, - 0x20d183, - 0x201c42, - 0x2005c2, - 0x224604, - 0x215584, - 0x358582, - 0x2bf0c4, + 0x28b004, + 0x209302, + 0x209303, + 0x2046c2, + 0x2351c3, + 0x200382, + 0x2da884, + 0x20f8c3, + 0x24f602, + 0x215c83, 0x2003c2, - 0x2482c3, - 0x21a803, - 0x25cb86, - 0x22e042, + 0x24b583, + 0x3acb86, + 0x32fa8f, + 0x769c43, + 0xaf0c8, + 0x209302, + 0x210a43, + 0x22b883, + 0x2287c3, + 0x2543, + 0xfc88, + 0x1576a8b, + 0x14187ca, + 0x1471c47, + 0x888cb, + 0xe4085, + 0xf6d85, + 0xe41c7, + 0x209302, + 0x209303, + 0x22b883, + 0x215c83, + 0x2000c2, + 0x203882, + 0x205fc2, + 0x10e09303, + 0x23f802, + 0x2351c3, + 0x21f242, + 0x220f02, + 0x22b883, + 0x2049c2, + 0x2716c2, + 0x2a8a42, + 0x203402, + 0x28fe82, + 0x200802, + 0x201042, + 0x272302, + 0x27c402, + 0x26bc02, + 0x2ae782, + 0x2c4442, + 0x21c402, + 0x2b1802, + 0x2287c3, + 0x203542, + 0x215c83, + 0x22b9c2, + 0x2d1842, + 0x24b583, + 0x241082, + 0x201202, + 0x214d82, + 0x201bc2, + 0x20ed82, + 0x2e6b02, + 0x20fa02, + 0x25a482, + 0x229642, + 0x32598a, + 0x36200a, + 0x39ca8a, + 0x3d2bc2, + 0x22a042, + 0x3645c2, + 0x11366cc9, + 0x11742c0a, + 0x1430587, + 0x11a00982, + 0x140d443, + 0x1302, + 0x142c0a, + 0x19648e, + 0x243644, + 0x12209303, + 0x2351c3, + 0x24f544, + 0x22b883, + 0x21e084, + 0x20f8c3, + 0x2287c3, + 0xe6444, + 0x168ac3, + 0x215c83, + 0xe205, + 0x202543, + 0x24b583, + 0x14ed444, + 0x22d183, + 0x20ab43, + 0xaf0c8, + 0x169b86, + 0x15b6dc4, + 0x177645, + 0x744a, + 0x12b2c2, + 0x1a9546, + 0x7c91, + 0x12b66cc9, + 0x1776c8, + 0x28a88, + 0x1cfa47, + 0x3902, + 0xf6d8b, + 0x14b04b, + 0x18caca, + 0x590a, + 0x6dec7, + 0xaf0c8, + 0x11ee48, + 0xb607, + 0x1941538b, + 0x177c7, + 0x68c2, + 0x3e487, + 0x189c8a, + 0x5b10f, + 0xff14f, + 0x142c02, + 0x9302, + 0x86088, + 0xf23ca, + 0xdcc8a, + 0xd2cca, + 0x7b688, + 0x1cb08, + 0x5db08, + 0xdd148, + 0x10c608, + 0x69c2, + 0x1c590f, + 0x9ff8b, + 0x73dc8, + 0x37307, + 0x1324ca, + 0x15d74b, + 0x7c709, + 0x1323c7, + 0x1ca08, + 0x3c08c, + 0x11ae87, + 0x17baca, + 0x65e48, + 0x10004e, + 0x6738e, + 0x6dd0b, + 0x6e70b, + 0xe1a4b, + 0xecdc9, + 0xfe94b, + 0x10370d, + 0x18af4b, + 0x3cf8d, + 0x3d30d, + 0x401ca, + 0x454cb, + 0x45e0b, + 0x4a005, + 0x19824650, + 0x2230f, + 0x11c00f, + 0x154a4d, + 0xb83d0, + 0x7242, + 0x19e25b08, + 0x28748, + 0x12038e, + 0x1a362845, + 0x4eb0b, + 0x13b790, + 0x55148, + 0x1cc0a, + 0x6e8c9, + 0x64a07, + 0x64d47, + 0x64f07, + 0x65287, + 0x66187, + 0x66787, + 0x681c7, + 0x68487, + 0x68e47, + 0x69147, + 0x69807, + 0x699c7, + 0x69b87, + 0x69d47, + 0x6a047, + 0x6a787, + 0x6b047, + 0x6b807, + 0x6bdc7, + 0x6c087, + 0x6c247, + 0x6c547, + 0x6c907, + 0x6cb07, + 0x6f3c7, + 0x6f587, + 0x6f747, + 0x70447, + 0x70947, + 0x70fc7, + 0x72187, + 0x72447, + 0x72947, + 0x72b07, + 0x72f07, + 0x73407, + 0x74047, + 0x74447, + 0x74607, + 0x747c7, + 0x76387, + 0x76fc7, + 0x77507, + 0x77ac7, + 0x77c87, + 0x78007, + 0x78587, + 0xb182, + 0x5dc0a, + 0xe6587, + 0x87fc5, + 0xbc891, + 0xd586, + 0x11dbca, + 0x85f0a, + 0x59086, + 0x11f8b, + 0x642, + 0x31a51, + 0xb4ac9, + 0x95789, + 0x72302, + 0x7318a, + 0xa63c9, + 0xa6b0f, + 0xa710e, + 0xa8148, + 0x55282, + 0x1b7309, + 0x19c04e, + 0x10694c, + 0xe784f, + 0x1b170e, + 0x1e6cc, + 0x23bc9, + 0x26311, + 0x268c8, + 0x28c52, + 0x12334d, + 0x12398d, + 0x3c48b, + 0x42c95, + 0x47089, + 0x4da8a, + 0x5ca09, + 0x6b410, + 0x70d0b, + 0x8188f, + 0x8634b, + 0x16e38c, + 0x1c0290, + 0x9e40a, + 0xa0b8d, + 0xa1c0e, + 0xaa10a, + 0xaac0c, + 0xada54, + 0xb4751, + 0xfaf0b, + 0x152b0f, + 0x121a0d, + 0x1246ce, + 0xb2a4c, + 0xb398c, + 0xb444b, + 0xbbb4e, + 0xbc150, + 0xc034b, + 0xc09cd, + 0xc150f, + 0xc234c, + 0x11fece, + 0x13ead1, + 0xcc38c, + 0xd7287, + 0xdf9cd, + 0xfa98c, + 0xeb710, + 0xf394d, + 0xfd647, + 0x102410, + 0x134308, + 0x13dc4b, + 0x19194f, + 0xccd08, + 0x11ddcd, + 0x1a0890, + 0xff049, + 0x1a6af746, + 0xb0643, + 0xb5205, + 0x6d82, + 0x56ec9, + 0x7680a, + 0x1aa3e684, + 0x116c86, + 0x1a00a, + 0x1ad72e89, + 0x26083, + 0x14ee8a, + 0xdab11, + 0xdaf49, + 0xdcc07, + 0xdd987, + 0xe6648, + 0x7e0b, + 0x12d689, + 0xe6dd0, + 0xe728c, + 0xe7d08, + 0xe80c5, + 0xc6d08, + 0x1b8c8a, + 0x26147, + 0x74fc7, + 0x1e82, + 0x13c48a, + 0x11c349, + 0x72805, + 0x5e0ca, + 0x8c80f, + 0x194ecb, + 0x1646cc, + 0x29b12, + 0xa3145, + 0xe94c8, + 0x19db8a, + 0x1b2f4a45, + 0x1642cc, + 0x1387c3, + 0x1a2a02, + 0xfdc8a, + 0x14fe00c, + 0x11b208, + 0x3d148, + 0x195147, + 0x6702, + 0x22c2, + 0x532d0, + 0x7aa07, + 0x3108f, + 0xea746, + 0xa74e, + 0x1557cb, + 0x4b248, + 0x7cac9, + 0x10d992, + 0x11428d, + 0x1147c8, + 0xaa309, + 0xd4c8d, + 0x108489, + 0x19a6cb, + 0x8548, + 0x86d88, + 0x8abc8, + 0x13ae09, + 0x13b00a, + 0x8ee0c, + 0xf800a, + 0x1134c7, + 0x4790d, + 0x100b4b, + 0x12bccc, + 0x39bc8, + 0x48909, + 0x654d0, + 0x2a82, + 0x7f2cd, + 0x2fc2, + 0x13d82, + 0x11340a, + 0x11daca, + 0x11f1cb, + 0x45fcc, + 0x11e74a, + 0x11ebce, + 0x143f8d, + 0x1b5d2a85, + 0x12ed08, + 0xc882, + 0x12f1044e, + 0x137697ce, + 0x13e013ca, + 0x1477730e, + 0x14f0ca8e, + 0x157cd18c, + 0x1430587, + 0x1430589, + 0x140d443, + 0x15e5788c, + 0x167955c9, + 0x16fc4cc9, + 0x17607249, + 0x1302, + 0x110391, + 0x169711, + 0x130d, + 0x177251, + 0x10c9d1, + 0x1cd0cf, + 0x577cf, + 0x19550c, + 0x1c4c0c, + 0x718c, + 0x1266cd, + 0x75e55, + 0xc510c, + 0x12e38c, + 0x131a10, + 0x14e40c, + 0x15e5cc, + 0x17be99, + 0x185ad9, + 0x19e359, + 0x1c63d4, + 0x1d0854, + 0xaa94, + 0xb794, + 0xc214, + 0x17ec51c9, + 0x1840ad49, + 0x18f2e449, + 0x13226ac9, + 0x1302, + 0x13a26ac9, + 0x1302, + 0xaa8a, + 0x1302, + 0x14226ac9, + 0x1302, + 0xaa8a, + 0x1302, + 0x14a26ac9, + 0x1302, + 0x15226ac9, + 0x1302, + 0x15a26ac9, + 0x1302, + 0xaa8a, + 0x1302, + 0x16226ac9, + 0x1302, + 0xaa8a, + 0x1302, + 0x16a26ac9, + 0x1302, + 0x17226ac9, + 0x1302, + 0xaa8a, + 0x1302, + 0x17a26ac9, + 0x1302, + 0xaa8a, + 0x1302, + 0x18226ac9, + 0x1302, + 0x18a26ac9, + 0x1302, + 0x19226ac9, + 0x1302, + 0xaa8a, + 0x1302, + 0x7c85, + 0x18cac4, + 0x11044e, + 0x1697ce, + 0x1a3ce, + 0x13ca, + 0x17730e, + 0x10ca8e, + 0x1cd18c, + 0x5788c, + 0x1955c9, + 0x1c4cc9, + 0x7249, + 0xc51c9, + 0xad49, + 0x12e449, + 0x7604d, + 0xba49, + 0xc4c9, + 0x14be04, + 0x12eec4, + 0x1401c4, + 0x144444, + 0x88b84, + 0x39944, + 0x3adc4, + 0x57e04, + 0x1cfa44, + 0x15a5e83, + 0x16583, + 0x7242, + 0x143f83, + 0xa042, + 0xa048, + 0x12d707, + 0x69c2, + 0x2000c2, + 0x209302, + 0x2046c2, + 0x200d42, + 0x200382, + 0x2003c2, + 0x2022c2, + 0x209303, + 0x2351c3, + 0x22b883, + 0x20f703, + 0x215c83, + 0x24b583, + 0xaf0c8, + 0x209303, + 0x2351c3, + 0x215c83, + 0x24b583, + 0xdb83, + 0x22b883, + 0x1e084, + 0x2000c2, + 0x373a83, + 0x1da09303, + 0x23e1c7, + 0x22b883, + 0x216c03, + 0x226004, + 0x215c83, + 0x24b583, + 0x2678ca, + 0x3acb85, + 0x214e83, + 0x216a42, + 0xaf0c8, + 0xaf0c8, + 0x9302, + 0x134c82, + 0x1e372c0b, + 0x1e62e944, + 0x3e5c5, + 0x7f85, + 0x122846, + 0x1ea07f85, + 0x54743, + 0xe0383, + 0x10de04, + 0x10cdc3, + 0x1680c5, + 0xf6d85, + 0xaf0c8, + 0x177c7, + 0x9303, + 0x1f23aec7, + 0x175e06, + 0x1f50c8c5, + 0x175ec7, + 0x1d40a, + 0x1bcc8, + 0x1d307, + 0x7e008, + 0xd8447, + 0xfbf4f, + 0x1842c7, + 0x57c06, + 0x13b790, + 0x13928f, + 0x1ff09, + 0x116d04, + 0x1f975f8e, + 0x2044c, + 0x15d94a, + 0x7c887, + 0xe5a0a, + 0x174789, + 0x1a370c, + 0xbf3ca, + 0x5940a, + 0x168109, + 0x116c86, + 0x7c94a, + 0x114eca, + 0x9b74a, + 0x151689, + 0xda448, + 0xda6c6, + 0xe048d, + 0xb5685, + 0x1ff816cc, + 0x7687, + 0x105149, + 0xed787, + 0xe4d54, + 0x107ccb, + 0x73c0a, + 0x10d80a, + 0xa414d, + 0x151f3c9, + 0x11404c, + 0x1145cb, + 0x3cf83, + 0x3cf83, + 0x3cf86, + 0x3cf83, + 0x122848, + 0xb7849, + 0x173a83, + 0xaf0c8, + 0x9302, + 0x4f544, + 0x5a003, + 0x1c1245, + 0x209303, + 0x2351c3, + 0x22b883, + 0x215c83, + 0x24b583, + 0x203ac3, + 0x209303, + 0x2351c3, + 0x210a43, + 0x22b883, + 0x2287c3, + 0x215c83, + 0x24b583, + 0x295943, + 0x20ab43, + 0x203ac3, + 0x28b004, + 0x209303, + 0x2351c3, + 0x22b883, + 0x215c83, + 0x24b583, + 0x233603, + 0x209303, + 0x2351c3, + 0x20f783, + 0x210a43, + 0x22b883, + 0x21e084, + 0x329f83, + 0x209383, + 0x2287c3, + 0x215c83, + 0x24b583, + 0x214e83, + 0x209783, + 0x21e09303, + 0x2351c3, + 0x24b083, + 0x22b883, + 0x220983, + 0x209383, + 0x24b583, + 0x203d03, + 0x3ca004, + 0xaf0c8, + 0x22609303, + 0x2351c3, + 0x2a8203, + 0x22b883, + 0x2287c3, + 0x226004, + 0x215c83, + 0x24b583, + 0x20f343, + 0xaf0c8, + 0x22e09303, + 0x2351c3, + 0x210a43, + 0x202543, + 0x24b583, + 0xaf0c8, + 0x1430587, + 0x373a83, + 0x209303, + 0x2351c3, + 0x22b883, + 0x21e084, + 0x226004, + 0x215c83, + 0x24b583, + 0xf6d85, + 0xe41c7, + 0xe4f8b, + 0xdb344, + 0xb5685, + 0x158e708, + 0xa884d, + 0x24243ac5, + 0x91f04, + 0xd983, + 0xfef45, + 0x2561c5, + 0xaf0c8, + 0x19602, + 0x456c3, + 0xf9dc6, + 0x32c3c8, + 0x3a5d07, + 0x28b004, + 0x394c06, + 0x3c4ac6, + 0xaf0c8, + 0x325283, + 0x31ba09, + 0x238d95, + 0x38d9f, + 0x209303, + 0x2c4cd2, + 0x16ee86, + 0x182285, + 0x1cc0a, + 0x6e8c9, + 0x2c4a8f, + 0x2da884, + 0x2ce145, + 0x308b90, + 0x38d247, + 0x202543, + 0x229188, + 0x15f886, + 0x2a538a, + 0x21df44, + 0x2f4483, + 0x3acb86, + 0x216a42, + 0x2ee5cb, + 0x2543, + 0x1a2344, + 0x209303, + 0x2351c3, + 0x22b883, + 0x2287c3, + 0x215c83, + 0x2543, + 0x24b583, + 0x2fb603, + 0x209302, + 0xf0fc3, + 0x215c83, + 0x24b583, + 0x209303, + 0x2351c3, + 0x22b883, + 0x2287c3, + 0x24b583, + 0x209303, + 0x2351c3, + 0x22b883, + 0x216c03, + 0x241703, + 0x24b583, + 0x209302, + 0x209303, + 0x2351c3, + 0x215c83, + 0x2543, + 0x24b583, + 0x2000c2, + 0x209303, + 0x2351c3, + 0x22b883, + 0x215c83, + 0x24b583, + 0x7f85, + 0x28b004, + 0x209303, + 0x2351c3, + 0x229d44, + 0x215c83, + 0x24b583, + 0xaf0c8, + 0x209303, + 0x2351c3, + 0x22b883, + 0x215c83, + 0xe6243, + 0x24b583, + 0x209303, + 0x2351c3, + 0x210a43, + 0x202b03, + 0x2287c3, + 0x215c83, + 0x2543, + 0x24b583, + 0x209303, + 0x2351c3, + 0x22b883, + 0x375844, + 0x21e084, + 0x215c83, + 0x24b583, + 0x20ab43, + 0x209302, + 0x209303, + 0x2351c3, + 0x22b883, + 0x215c83, + 0xe6243, + 0x24b583, + 0xaf0c8, + 0x209303, + 0x2351c3, + 0x22b883, + 0x2ba043, + 0x69e83, + 0x16c03, + 0x215c83, + 0x24b583, + 0x32598a, + 0x341349, + 0x3581cb, + 0x35884a, + 0x36200a, + 0x37aecb, + 0x39044a, + 0x396b4a, + 0x39ca8a, + 0x39cd0b, + 0x3b89c9, + 0x3bf5ca, + 0x3bfa0b, + 0x3cb54b, + 0x3d130a, + 0x209303, + 0x2351c3, + 0x210a43, + 0x2287c3, + 0x215c83, + 0x2543, + 0x24b583, + 0x1c5b8b, + 0x5eb48, + 0xd4dc4, + 0x7f46, + 0x40489, + 0xaf0c8, + 0x209303, + 0x264a04, + 0x213b02, + 0x226004, + 0x368605, + 0x203ac3, + 0x28b004, + 0x209303, + 0x238084, + 0x2351c3, + 0x24f544, + 0x2da884, + 0x21e084, + 0x209383, + 0x215c83, + 0x24b583, + 0x27f485, + 0x233603, + 0x214e83, + 0x205dc3, + 0x271504, + 0x369b04, + 0x2b7b05, + 0xaf0c8, + 0x30be04, + 0x3946c6, + 0x368244, + 0x209302, + 0x2493c7, + 0x250f47, + 0x24ce84, + 0x25ab45, + 0x2f3b45, + 0x230105, + 0x21e084, + 0x38ba88, + 0x237846, + 0x320e88, + 0x27c445, + 0x2e1305, + 0x2655c4, + 0x24b583, + 0x2f6044, + 0x379c86, + 0x3acc83, + 0x271504, + 0x24e805, + 0x256e44, + 0x247744, + 0x216a42, + 0x22d246, + 0x3aeb86, + 0x311945, + 0x2000c2, + 0x373a83, + 0x2b209302, + 0x225c84, + 0x200382, + 0x2287c3, + 0x207e02, + 0x215c83, + 0x2003c2, + 0x2fc846, + 0x201203, + 0x20ab43, + 0xa8a84, + 0xaf0c8, + 0xaf0c8, + 0x22b883, + 0xe6243, + 0x2000c2, + 0x2be09302, + 0x22b883, + 0x269b03, + 0x329f83, + 0x22e944, + 0x215c83, + 0x24b583, + 0xaf0c8, + 0x2000c2, + 0x2c609302, + 0x209303, + 0x215c83, + 0x2543, + 0x24b583, + 0x682, + 0x209702, + 0x22c682, + 0x216c03, + 0x2ec143, + 0x2000c2, + 0xf6d85, + 0xaf0c8, + 0xe41c7, + 0x209302, + 0x2351c3, + 0x24f544, + 0x202c03, + 0x22b883, + 0x202b03, + 0x2287c3, + 0x215c83, + 0x213203, + 0x24b583, + 0x20f2c3, + 0x9a893, + 0xc4614, + 0xf6d85, + 0xe41c7, + 0x105a46, + 0x76a0b, + 0x3cf86, + 0x57f07, + 0x5ab46, + 0x649, + 0xe1f0a, + 0x89e4d, + 0xba84c, + 0x11584a, + 0x181cc8, + 0x178145, + 0x1d448, + 0xea746, + 0x71146, + 0x40386, + 0x207242, + 0x16ae04, + 0x8e7c6, + 0x82e8e, + 0x15d34c, + 0xf6d85, + 0x18cc87, + 0x1e9d1, + 0x1cf8ca, + 0x209303, + 0x7df85, + 0x4b6c8, + 0x22984, + 0x2d821986, + 0xbc886, + 0xde186, + 0x9014a, + 0x194643, + 0x2de44684, + 0x605, + 0x103683, + 0x2e236647, + 0xe205, + 0x1204c, + 0xf8ec8, + 0x9e04b, + 0x2e64c34c, + 0x140c783, + 0xb6148, + 0x9fe09, + 0x11f4c8, + 0x1419f46, + 0x2eb90dc9, + 0x1a0c47, + 0xe408a, + 0xd7c8, + 0x122848, + 0x1cfa44, + 0x1cac45, + 0x9e187, + 0x2ee9e183, + 0x2f365e86, + 0x2f6f68c4, + 0x2fafde47, + 0x122844, + 0x122844, + 0x122844, + 0x122844, + 0x209303, + 0x2351c3, + 0x22b883, + 0x2287c3, + 0x215c83, + 0x24b583, + 0x2000c2, + 0x209302, + 0x22b883, + 0x206982, + 0x215c83, + 0x24b583, + 0x201203, + 0x382a8f, + 0x382e4e, + 0xaf0c8, + 0x209303, + 0x45947, + 0x2351c3, + 0x22b883, + 0x20f8c3, + 0x215c83, + 0x24b583, + 0x1604, + 0x10cf04, + 0x10f744, + 0x219ac3, + 0x30e9c7, + 0x200a82, + 0x2c63c9, + 0x204542, + 0x2535cb, + 0x29ea0a, + 0x2abdc9, + 0x200542, + 0x3ccc86, + 0x232bd5, + 0x253715, + 0x2343d3, + 0x253c93, + 0x227442, + 0x229845, + 0x30bb0c, + 0x27724b, + 0x3c2185, + 0x2013c2, + 0x329342, + 0x392686, + 0x203902, + 0x260646, + 0x21ce8d, + 0x20df8c, + 0x2246c4, + 0x200882, + 0x219002, + 0x229008, + 0x200202, + 0x220a86, + 0x333e8f, + 0x220a90, + 0x2f1a44, + 0x232d95, + 0x234553, + 0x20d383, + 0x32980a, + 0x20f087, + 0x34d489, + 0x2e5787, + 0x30e842, + 0x200282, + 0x3b3a86, + 0x201782, + 0xaf0c8, + 0x20d1c2, + 0x20d9c2, + 0x223007, + 0x33fdc7, + 0x33fdd1, + 0x216ec5, + 0x33a2ce, + 0x216ecf, + 0x2068c2, + 0x20f287, + 0x219b08, + 0x20abc2, + 0x2bf442, + 0x33d7c6, + 0x33d7cf, + 0x3743d0, + 0x22c302, + 0x200dc2, + 0x335dc8, + 0x209483, + 0x25a1c8, + 0x20948d, + 0x235843, + 0x31c788, + 0x23584f, + 0x235c0e, + 0x30d4ca, + 0x22f0d1, + 0x22f550, + 0x2dbb0d, + 0x2dbe4c, + 0x2752c7, + 0x329987, + 0x394cc9, + 0x2247c2, + 0x200c02, + 0x3403cc, + 0x3408cb, + 0x201242, + 0x2b4606, + 0x20f382, + 0x200482, + 0x342c02, + 0x209302, + 0x22fb44, + 0x23aa47, + 0x22c842, + 0x240ac7, + 0x242847, + 0x21fb02, + 0x22d202, + 0x245805, + 0x217382, + 0x384c0e, + 0x2a440d, + 0x2351c3, + 0x28784e, + 0x3bc50d, + 0x29af83, + 0x202482, + 0x285dc4, + 0x236882, + 0x20a682, + 0x3a0005, + 0x3a19c7, + 0x24a342, + 0x200d42, + 0x24f147, + 0x251cc8, + 0x23a302, + 0x2a31c6, + 0x35028c, + 0x35078b, + 0x208282, + 0x26120f, + 0x2615d0, + 0x2619cf, + 0x261d95, + 0x2622d4, + 0x2627ce, + 0x262b4e, + 0x262ecf, + 0x26328e, + 0x263614, + 0x263b13, + 0x263fcd, + 0x278749, + 0x28b943, + 0x202c02, + 0x218205, + 0x204ec6, + 0x200382, + 0x37ee87, + 0x22b883, + 0x200642, + 0x2339c8, + 0x22f311, + 0x22f750, + 0x203502, + 0x282947, + 0x200b02, + 0x209047, + 0x206d82, + 0x2118c9, + 0x392647, + 0x2a61c8, + 0x2217c6, + 0x2ec043, + 0x3672c5, + 0x235442, + 0x2004c2, + 0x3b3e85, + 0x35d0c5, + 0x204582, + 0x219343, + 0x3763c7, + 0x216d07, + 0x202d42, + 0x257344, + 0x21e283, + 0x321009, + 0x2fbdc8, + 0x205142, + 0x20bcc2, + 0x391507, + 0x22f005, + 0x2b8c48, + 0x348047, + 0x212e83, + 0x28e646, + 0x2db98d, + 0x2dbd0c, + 0x302c06, + 0x201742, + 0x29df02, + 0x209382, + 0x2356cf, + 0x235ace, + 0x2f3bc7, + 0x2025c2, + 0x3574c5, + 0x3574c6, + 0x225282, + 0x203542, + 0x28d146, + 0x208f83, + 0x208f86, + 0x2c8e05, + 0x2c8e0d, + 0x2c93d5, + 0x2c9c8c, + 0x2ca9cd, + 0x2cad92, + 0x204842, + 0x26ca42, + 0x200a42, + 0x257686, + 0x306806, + 0x201e82, + 0x204f46, + 0x20b302, + 0x20b305, + 0x212782, + 0x2a4509, + 0x22748c, + 0x2277cb, + 0x2003c2, + 0x252888, + 0x20ef42, + 0x204782, + 0x272c46, + 0x226a45, + 0x373087, + 0x2ecfc5, + 0x290385, + 0x207f42, + 0x2044c2, + 0x202882, + 0x2e7b47, + 0x2fc90d, + 0x2fcc8c, + 0x35a8c7, + 0x2256c2, + 0x20e102, + 0x237b48, + 0x257048, + 0x2e7ec8, + 0x31dd84, + 0x2bbdc7, + 0x23e703, + 0x257d02, + 0x21ad42, + 0x2f2789, + 0x300447, + 0x203d02, + 0x273045, + 0x244042, + 0x230642, + 0x2bdf03, + 0x2bdf06, + 0x2fb1c2, + 0x2fc6c2, + 0x200402, + 0x3c1046, + 0x2d9447, + 0x201902, + 0x200902, + 0x25a00f, + 0x28768d, + 0x39c44e, + 0x3bc38c, + 0x203282, + 0x201182, + 0x221605, + 0x323e86, + 0x215e02, + 0x2063c2, + 0x200682, + 0x287a04, + 0x2ec244, + 0x255946, + 0x2022c2, + 0x27a587, + 0x243703, + 0x243708, + 0x243d88, + 0x24d907, + 0x254446, + 0x20ac02, + 0x239603, + 0x333607, + 0x295206, + 0x2f5105, + 0x31e108, + 0x200b42, + 0x3cca47, + 0x204882, + 0x2e03c2, + 0x208502, + 0x217049, + 0x243a42, + 0x201b42, + 0x2540c3, + 0x30bf47, + 0x203c42, + 0x22760c, + 0x22790b, + 0x302c86, + 0x3035c5, + 0x217442, + 0x201102, + 0x2bb0c6, + 0x27ac43, + 0x329b87, + 0x212002, + 0x2008c2, + 0x232a55, + 0x2538d5, + 0x234293, + 0x253e13, + 0x38f5c7, + 0x3b9b51, + 0x3ba290, + 0x266312, + 0x277691, + 0x280348, + 0x280350, + 0x28fa0f, + 0x29e7d3, + 0x2abb92, + 0x2bcc90, + 0x33decf, + 0x3ba892, + 0x3bba51, + 0x2af293, + 0x3b8252, + 0x2af88f, + 0x2c5c4e, + 0x2c8992, + 0x2d3951, + 0x2d59cf, + 0x2d65ce, + 0x3bd011, + 0x3bd7d0, + 0x2d78d2, + 0x2dd551, + 0x3bddd0, + 0x3be3cf, + 0x2de551, + 0x2e0c90, + 0x2e8806, + 0x2f59c7, + 0x209b07, + 0x204042, + 0x2837c5, + 0x3828c7, + 0x22c682, + 0x208042, + 0x22a445, + 0x21a9c3, + 0x3b7206, + 0x2fcacd, + 0x2fce0c, + 0x204fc2, + 0x30b98b, + 0x27710a, + 0x22970a, + 0x2b6f09, + 0x2f008b, + 0x34818d, + 0x30900c, + 0x271a8a, + 0x27818c, + 0x295c0b, + 0x3c1fcc, + 0x3c24ce, + 0x3c2bcb, + 0x3c308c, + 0x2ae6c3, + 0x308386, + 0x30a182, + 0x2fe442, + 0x210e83, + 0x208602, + 0x225b43, + 0x35a086, + 0x261f47, + 0x334786, + 0x2f2588, + 0x376248, + 0x319706, + 0x205cc2, + 0x31130d, + 0x31164c, + 0x2da947, + 0x315687, + 0x237282, + 0x215082, + 0x255ac2, + 0x252082, + 0x333d97, + 0x33a1d6, + 0x33d6d7, + 0x3402d4, + 0x3407d3, + 0x350194, + 0x350693, + 0x3b6a50, + 0x3b9a59, + 0x3ba198, + 0x3ba79a, + 0x3bb959, + 0x3bcf19, + 0x3bd6d8, + 0x3bdcd8, + 0x3be2d7, + 0x3c1ed4, + 0x3c23d6, + 0x3c2ad3, + 0x3c2f94, + 0x209302, + 0x215c83, + 0x24b583, + 0x209303, + 0x2351c3, + 0x22b883, + 0x2287c3, + 0x226004, + 0x215c83, + 0x24b583, + 0x201203, + 0x2000c2, + 0x20a5c2, + 0x31a919c5, + 0x31e89205, + 0x323c0d06, + 0xaf0c8, + 0x326afe05, + 0x209302, + 0x2046c2, + 0x32b0c305, + 0x32e81785, + 0x33282b07, + 0x33685009, + 0x33a66bc4, + 0x200382, + 0x200642, + 0x33e5c845, + 0x34297389, + 0x34732248, + 0x34aad8c5, + 0x34f3a787, + 0x3522ac88, + 0x356e9385, + 0x35a6cf06, + 0x35f91009, + 0x362d0f48, + 0x366c0808, + 0x36a979ca, + 0x36e78f84, + 0x37202e45, + 0x376bd7c8, + 0x37b326c5, + 0x213302, + 0x37e485c3, + 0x382a3546, + 0x387237c8, + 0x38b1a0c6, + 0x38f633c8, + 0x39366506, + 0x3964ef04, + 0x203202, + 0x39b357c7, + 0x39ea9084, + 0x3a27c147, + 0x3a736107, + 0x2003c2, + 0x3aa9c405, + 0x3ae49044, + 0x3b2fa647, + 0x3b63fdc7, + 0x3ba85c06, + 0x3be81f05, + 0x3c297487, + 0x3c6eadc8, + 0x3ca18587, + 0x3ceb5889, + 0x3d2c9fc5, + 0x3d721887, + 0x3da91006, + 0x3dec6a08, + 0x22a94d, + 0x27ea89, + 0x2a65cb, + 0x2a830b, + 0x3a3f0b, + 0x315d0b, + 0x32408b, + 0x32434b, + 0x324c49, + 0x325c0b, + 0x325ecb, + 0x326a0b, + 0x3276ca, + 0x327c0a, + 0x32820c, + 0x32a68b, + 0x32b0ca, + 0x33e74a, + 0x34564e, + 0x34654e, + 0x3468ca, + 0x34898a, + 0x34964b, + 0x34990b, + 0x34a58b, + 0x36be8b, + 0x36c48a, + 0x36d14b, + 0x36d40a, + 0x36d68a, + 0x36d90a, + 0x3916cb, + 0x397a0b, + 0x399cce, + 0x39a04b, + 0x3a1b8b, + 0x3a2d8b, + 0x3a718a, + 0x3a7409, + 0x3a764a, + 0x3a904a, + 0x3b944b, + 0x3bfccb, + 0x3c068a, + 0x3c190b, + 0x3c7b8b, + 0x3d0d4b, + 0x3e283e88, + 0x3e6895c9, + 0x3ea9fc89, + 0x3eee2748, + 0x351c05, + 0x201dc3, + 0x26d604, + 0x30fa85, + 0x266906, + 0x26cc85, + 0x288c84, + 0x37ed88, + 0x31d905, + 0x293304, + 0x3d2007, + 0x29f20a, + 0x34bb4a, + 0x2f3cc7, + 0x213d07, + 0x307547, + 0x27e647, + 0x301d05, + 0x211c46, + 0x2bb9c7, + 0x245704, + 0x2e9946, + 0x2e9846, + 0x3c4745, + 0x34dd44, + 0x298a06, + 0x29d287, + 0x2eca46, + 0x3353c7, + 0x26d6c3, + 0x3c7e46, + 0x232845, + 0x282c07, + 0x26a94a, + 0x233ac4, + 0x21fc88, + 0x2b30c9, + 0x2e7547, + 0x32af46, + 0x38bc88, + 0x31a809, + 0x34d644, + 0x39da04, + 0x2a1185, + 0x2bb6c8, + 0x2c6f87, + 0x3215c9, + 0x267fc8, + 0x2e8906, + 0x2ed386, + 0x2993c8, + 0x370006, + 0x289205, + 0x285cc6, + 0x27cd08, + 0x2355c6, + 0x258a4b, + 0x2e0246, + 0x29b04d, + 0x206245, + 0x2a8f46, + 0x22ad45, + 0x35c809, + 0x3525c7, + 0x3ca408, + 0x2ac746, + 0x299c49, + 0x34aac6, + 0x26a8c5, + 0x2a1086, + 0x2c4006, + 0x2cbe49, + 0x376786, + 0x29ef07, + 0x241745, + 0x216383, + 0x258bc5, + 0x29b307, + 0x256446, + 0x206149, + 0x3c0d06, + 0x26b246, + 0x212c09, + 0x2856c9, + 0x2a1a87, + 0x30e088, + 0x2bc6c9, + 0x283448, + 0x396d86, + 0x2da205, + 0x2cd90a, + 0x26b2c6, + 0x23e046, + 0x2d20c5, + 0x2d0908, + 0x22eb87, + 0x23184a, + 0x24fa86, + 0x27eec5, + 0x375686, + 0x38a707, + 0x32ae07, + 0x2cc805, + 0x26aa85, + 0x3bca86, + 0x2b0706, + 0x2d2906, + 0x2bdc84, + 0x284589, + 0x28a446, + 0x2fb38a, + 0x22ccc8, + 0x34cd88, + 0x34bb4a, + 0x21b345, + 0x29d1c5, + 0x2ec4c8, + 0x2dca08, + 0x230347, + 0x31fd86, + 0x338308, + 0x2ab147, + 0x282d48, + 0x2b4306, + 0x286948, + 0x2969c6, + 0x27c5c7, + 0x39d786, + 0x298a06, + 0x30438a, + 0x22fbc6, + 0x2da209, + 0x319806, + 0x2f134a, + 0x24ef09, + 0x2fd2c6, + 0x2b5bc4, + 0x2182cd, + 0x289847, + 0x2b89c6, + 0x2c06c5, + 0x34ab45, + 0x393286, + 0x2fa489, + 0x2d0487, + 0x27da46, + 0x2e3106, + 0x288d09, + 0x289144, + 0x36f604, + 0x30b388, + 0x35a446, + 0x272648, + 0x31e488, + 0x2a95c7, + 0x3b59c9, + 0x2d2b07, + 0x2afcca, + 0x2f324f, + 0x345e4a, + 0x221405, + 0x27cf45, + 0x21b085, + 0x2f1987, + 0x236c43, + 0x30e288, + 0x365a46, + 0x365b49, + 0x2e8346, + 0x2cf187, + 0x299a09, + 0x3ca308, + 0x2d2187, + 0x3208c3, + 0x351c85, + 0x38a245, + 0x2bdacb, + 0x332784, + 0x23eb84, + 0x279c86, + 0x320a87, + 0x39f58a, + 0x248647, + 0x209847, + 0x281785, + 0x3cc205, + 0x26fec9, + 0x298a06, + 0x2484cd, + 0x3769c5, + 0x2b1e83, + 0x202703, + 0x3aedc5, + 0x357005, + 0x38bc88, + 0x27e307, + 0x36f386, + 0x29f906, + 0x22b105, + 0x235487, + 0x201f47, + 0x237707, + 0x202eca, + 0x3c7f08, + 0x2bdc84, + 0x27f847, + 0x280747, + 0x349b86, + 0x296047, + 0x2e3748, + 0x2e20c8, + 0x24d086, + 0x213f48, + 0x2d6e44, + 0x2bb9c6, + 0x249ac6, + 0x372506, + 0x2ce446, + 0x223a44, + 0x27e706, + 0x2bf646, + 0x298dc6, + 0x23a346, + 0x2025c6, + 0x2e3586, + 0x36f288, + 0x3baf08, + 0x2d5108, + 0x26ce88, + 0x2ec446, + 0x20f505, + 0x367e06, + 0x2ad945, + 0x3a5a47, + 0x268085, + 0x210183, + 0x201a85, + 0x22e044, + 0x202705, + 0x23f183, + 0x346b87, + 0x36bb88, + 0x335486, + 0x2dc68d, + 0x27cf06, + 0x298385, + 0x217043, + 0x2bd189, + 0x2892c6, + 0x294546, + 0x273104, + 0x345dc7, + 0x31ad06, + 0x2d0745, + 0x247343, + 0x208404, + 0x280906, + 0x249bc4, + 0x2d1d08, + 0x203309, + 0x281549, + 0x2a0f8a, + 0x2a258d, + 0x233e47, + 0x23dec6, + 0x21b6c4, + 0x285009, + 0x288308, + 0x289446, + 0x23b386, + 0x296047, + 0x2ddf06, + 0x26f246, + 0x364c06, + 0x33618a, + 0x22ac88, + 0x323245, + 0x25e3c9, + 0x2c770a, + 0x2ffb48, + 0x29cc48, + 0x2944c8, + 0x29f54c, + 0x3245c5, + 0x29fb88, + 0x3bb206, + 0x38f106, + 0x3ce307, + 0x248545, + 0x285e45, + 0x281409, + 0x210907, + 0x365b05, + 0x2a7c47, + 0x202703, + 0x2c7bc5, + 0x224e08, + 0x2ca747, + 0x29cb09, + 0x2f07c5, + 0x345544, + 0x2a2248, + 0x335907, + 0x2d2348, + 0x3d2888, + 0x2aa005, + 0x365946, + 0x214606, + 0x352909, + 0x2c9ac7, + 0x2adf86, + 0x2257c7, + 0x202c43, + 0x266bc4, + 0x2d6f45, + 0x35d6c4, + 0x24c604, + 0x284cc7, + 0x269287, + 0x270344, + 0x29c950, + 0x367987, + 0x3cc205, + 0x25108c, + 0x211004, + 0x2b6508, + 0x27c4c9, + 0x386786, + 0x31f608, + 0x217ac4, + 0x279f88, + 0x231e46, + 0x304208, + 0x29b5c6, + 0x28a18b, + 0x32cb45, + 0x2d6dc8, + 0x203744, + 0x20374a, + 0x29cb09, + 0x39d686, + 0x3137c8, + 0x286245, + 0x2da004, + 0x2b6406, + 0x2375c8, + 0x283e88, + 0x338b86, + 0x2558c4, + 0x2cd886, + 0x2d2b87, + 0x27c047, + 0x29604f, + 0x203d87, + 0x2fd387, + 0x357385, + 0x36b345, + 0x2a1749, + 0x2ea486, + 0x282045, + 0x2859c7, + 0x2c5848, + 0x2dfc85, + 0x39d786, + 0x22cb08, + 0x31a0ca, + 0x24ab08, + 0x28cec7, + 0x2f3686, + 0x25e386, + 0x2003c3, + 0x20ef43, + 0x2c78c9, + 0x2bc549, + 0x2b5786, + 0x2f07c5, + 0x2d9d08, + 0x3137c8, + 0x370188, + 0x364c8b, + 0x2dc8c7, + 0x318e09, + 0x2962c8, + 0x380204, + 0x3ca748, + 0x28f4c9, + 0x2ae285, + 0x2f1887, + 0x266c45, + 0x283d88, + 0x29184b, + 0x2971d0, + 0x2a8b85, + 0x21258c, + 0x36f545, + 0x256943, + 0x317e46, + 0x2becc4, + 0x249146, + 0x29d287, + 0x22cb84, + 0x2440c8, + 0x30e14d, + 0x31fbc5, + 0x233e84, + 0x221144, + 0x291f49, + 0x2b10c8, + 0x32d007, + 0x231ec8, + 0x284648, + 0x27dd45, + 0x228347, + 0x27dcc7, + 0x31b7c7, + 0x26aa89, + 0x256709, + 0x25ed86, + 0x2dc046, + 0x285a86, + 0x353cc5, + 0x39cf84, + 0x3c54c6, + 0x3c99c6, + 0x27dd88, + 0x38a3cb, + 0x27ab87, + 0x21b6c4, + 0x31ac46, + 0x2e3a87, + 0x366805, + 0x38d7c5, + 0x22eb44, + 0x256686, + 0x3c5548, + 0x285009, + 0x24c086, + 0x288108, + 0x2d0806, + 0x356608, + 0x2cf94c, + 0x27dc06, + 0x29804d, + 0x2984cb, + 0x29efc5, + 0x202087, + 0x376886, + 0x32acc8, + 0x25ee09, + 0x24d348, + 0x3cc205, + 0x24b807, + 0x283548, + 0x2d9689, + 0x2cd546, + 0x260c0a, + 0x32aa48, + 0x24d18b, + 0x2d45cc, + 0x27a088, + 0x27fe06, + 0x227d48, + 0x319d47, + 0x203ec9, + 0x33a8cd, + 0x298906, + 0x2d9e88, + 0x3badc9, + 0x2bdd88, + 0x286a48, + 0x2c008c, + 0x2c1087, + 0x2c1e07, + 0x26a8c5, + 0x2b3447, + 0x2c5708, + 0x2b6486, + 0x24bf0c, + 0x2f8348, + 0x2cfdc8, + 0x26d146, + 0x389fc7, + 0x25ef84, + 0x26ce88, + 0x373b8c, + 0x287b4c, + 0x221485, + 0x3c47c7, + 0x255846, + 0x389f46, + 0x35c9c8, + 0x379f84, + 0x2eca4b, + 0x27a6cb, + 0x2f3686, + 0x30dfc7, + 0x3673c5, + 0x272585, + 0x2ecb86, + 0x286205, + 0x332745, + 0x2cbc87, + 0x2823c9, + 0x2b08c4, + 0x25d405, + 0x2e8b85, + 0x2d1a88, + 0x2e5e85, + 0x2b75c9, + 0x330847, + 0x33084b, + 0x2fd006, + 0x36efc9, + 0x34dc88, + 0x29e305, + 0x31b8c8, + 0x256748, + 0x25ce47, + 0x24bd07, + 0x284d49, + 0x304147, + 0x2ace09, + 0x2b214c, + 0x2b5788, + 0x2d0d89, + 0x2d1207, + 0x284709, + 0x256d07, + 0x2d46c8, + 0x3b5b85, + 0x2bb946, + 0x2c0708, + 0x31f788, + 0x2c75c9, + 0x332787, + 0x252cc5, + 0x247d09, + 0x36a986, + 0x291004, + 0x30c086, + 0x323648, + 0x3a6047, + 0x38a5c8, + 0x214009, + 0x30abc7, + 0x29f3c6, + 0x202144, + 0x201b09, + 0x2281c8, + 0x26d007, + 0x37e146, + 0x38a306, + 0x23dfc4, + 0x3bb446, + 0x202683, + 0x32c6c9, + 0x32cb06, + 0x211ec5, + 0x29f906, + 0x2cc205, + 0x2839c8, + 0x319c07, + 0x39b7c6, + 0x30c346, + 0x34cd88, + 0x2a18c7, + 0x298945, + 0x29c748, + 0x3c00c8, + 0x32aa48, + 0x36f405, + 0x2bb9c6, + 0x281309, + 0x352784, + 0x2cc08b, + 0x26ef4b, + 0x323149, + 0x202703, + 0x259745, + 0x22d986, + 0x382388, + 0x332f44, + 0x335486, + 0x203009, + 0x2e2dc5, + 0x2cbbc6, + 0x335906, + 0x211e04, + 0x2141ca, + 0x211e08, + 0x31f786, + 0x2b9a85, + 0x255b07, + 0x357247, + 0x365944, + 0x26f187, + 0x268044, + 0x268046, + 0x217a83, + 0x26aa85, + 0x391c05, + 0x363648, + 0x27fa05, + 0x27d949, + 0x26ccc7, + 0x26cccb, + 0x2a334c, + 0x2a394a, + 0x33a787, + 0x200a03, + 0x27b988, + 0x36f5c5, + 0x2dfd05, + 0x351d44, + 0x2d45c6, + 0x27c4c6, + 0x3bb487, + 0x24728b, + 0x223a44, + 0x2d89c4, + 0x2c5fc4, + 0x2cb986, + 0x22cb84, + 0x2bb7c8, + 0x351b45, + 0x270805, + 0x3700c7, + 0x202189, + 0x357005, + 0x39328a, + 0x241649, + 0x2b01ca, + 0x3362c9, + 0x379844, + 0x2e31c5, + 0x2de008, + 0x2fa70b, + 0x2a1185, + 0x2f52c6, + 0x242904, + 0x27de86, + 0x30aa49, + 0x2e3b87, + 0x3c0ec8, + 0x2a2906, + 0x2d2b07, + 0x283e88, + 0x393806, + 0x202a44, + 0x383187, + 0x36e285, + 0x3847c7, + 0x2179c4, + 0x376806, + 0x2eaf48, + 0x298688, + 0x2ef247, + 0x26ec88, + 0x296a85, + 0x202544, + 0x34ba48, + 0x26ed84, + 0x21b005, + 0x301f04, + 0x2ab247, + 0x28a507, + 0x284848, + 0x2d24c6, + 0x27f985, + 0x27d748, + 0x24ad08, + 0x2a0ec9, + 0x26f246, + 0x2318c8, + 0x2035ca, + 0x366888, + 0x2e9385, + 0x21f746, + 0x241508, + 0x24b8ca, + 0x226cc7, + 0x288745, + 0x291208, + 0x2d9244, + 0x2d0986, + 0x2c2188, + 0x2025c6, + 0x368c48, + 0x290c47, + 0x3d1f06, + 0x2b5bc4, + 0x2a74c7, + 0x2b1484, + 0x30aa07, + 0x2cd20d, + 0x2303c5, + 0x2fa28b, + 0x287dc6, + 0x252988, + 0x244084, + 0x2f1006, + 0x280906, + 0x228087, + 0x297d0d, + 0x246f07, + 0x2b1dc8, + 0x2851c5, + 0x35df48, + 0x2c6f06, + 0x296b08, + 0x23c346, + 0x375107, + 0x258c89, + 0x3898c7, + 0x289708, + 0x2764c5, + 0x22b188, + 0x389e85, + 0x3005c5, + 0x336545, + 0x221703, + 0x285d44, + 0x25e3c5, + 0x391009, + 0x37e046, + 0x2e3848, + 0x24bac5, + 0x2b3307, + 0x2a92ca, + 0x2cbb09, + 0x2c3f0a, + 0x2d5188, + 0x2a7a8c, + 0x285a4d, + 0x309543, + 0x368b48, + 0x2083c5, + 0x319e86, + 0x3ca186, + 0x355705, + 0x2258c9, + 0x200985, + 0x27d748, + 0x25a5c6, + 0x358fc6, + 0x2a2109, + 0x3ab547, + 0x291b06, + 0x2a9248, + 0x372408, + 0x2e2947, + 0x2bf7ce, + 0x2c7145, + 0x2d9585, + 0x2024c8, + 0x3018c7, 0x203582, - 0x224a82, - 0x4be10bc3, - 0x4c230b03, - 0x5c6c6, - 0x5c6c6, - 0x28a904, - 0x2182c3, - 0x1dc4a, - 0x11388c, - 0x1540c, - 0xca9cd, - 0xf4d45, - 0x8e2cc, - 0x39e47, - 0x15a46, - 0x1a408, - 0x1d707, - 0x21848, - 0x18248a, - 0x102a47, - 0x4ce8e505, - 0xdbcc9, - 0x3744b, - 0x1c444b, - 0x7c948, - 0x1a049, - 0x1cc2ca, - 0x13db0e, - 0x115a4d, - 0x144844b, - 0xdd2ca, - 0x23084, - 0x6b706, - 0x4a808, - 0x83b88, - 0x37707, - 0x23985, - 0x94d87, - 0x7e409, - 0x112187, - 0x6a488, - 0x107689, - 0x50e84, - 0x51c45, - 0x13a2ce, - 0x15974d, - 0x7c748, - 0x4d36ea46, - 0x4dd6ea48, - 0xb32c8, - 0x1392d0, - 0x5808c, - 0x69487, - 0x6a2c7, - 0x6d887, - 0x72387, - 0xdc82, - 0x67c7, - 0x12ccc, - 0x171205, - 0x76007, - 0xa7806, - 0xa8709, - 0xaa248, - 0x586c2, + 0x2bfc04, + 0x24904a, + 0x26d0c8, + 0x256886, + 0x299b48, + 0x214606, + 0x372148, + 0x2adf88, + 0x300584, + 0x2b36c5, + 0x768244, + 0x768244, + 0x768244, + 0x202303, + 0x38a186, + 0x27dc06, + 0x29ec8c, + 0x202503, + 0x2179c6, + 0x22af04, + 0x289248, + 0x202e45, + 0x249146, + 0x2bd8c8, + 0x2d6306, + 0x39b746, + 0x39d488, + 0x2d6fc7, + 0x303f09, + 0x354d4a, + 0x202e84, + 0x268085, + 0x318b45, + 0x2ca206, + 0x233e86, + 0x29d9c6, + 0x301586, + 0x304044, + 0x30404b, + 0x267b04, + 0x255b85, + 0x2ad285, + 0x2a9686, + 0x206a88, + 0x285907, + 0x32ca84, + 0x22b5c3, + 0x2d8d45, + 0x267e87, + 0x28580b, + 0x363547, + 0x2bd7c8, + 0x2b3807, + 0x26bf06, + 0x27ed48, + 0x29dbcb, + 0x30f9c6, + 0x213489, + 0x29dd45, + 0x3208c3, + 0x2cbbc6, + 0x290b48, + 0x202a83, + 0x267f83, + 0x283e86, + 0x214606, + 0x37a88a, + 0x27fe45, + 0x28074b, + 0x29f84b, + 0x206983, + 0x2196c3, + 0x2afc44, + 0x2143c7, + 0x27a084, + 0x289244, + 0x3bb084, + 0x366b88, + 0x2b99c8, + 0x20eec9, + 0x2ca048, + 0x3367c7, + 0x23a346, + 0x2e348f, + 0x2c7286, + 0x2d48c4, + 0x2b980a, + 0x267d87, + 0x2b1586, + 0x291049, + 0x20ee45, + 0x363785, + 0x20ef86, + 0x22b2c3, + 0x2d9289, + 0x22ae06, + 0x213dc9, + 0x39f586, + 0x26aa85, + 0x221885, + 0x203d83, + 0x214508, + 0x32d1c7, + 0x365a44, + 0x2890c8, + 0x38ee84, + 0x308186, + 0x317e46, + 0x23fb06, + 0x2d6c89, + 0x2dfc85, + 0x298a06, + 0x38f2c9, + 0x2c5406, + 0x2e3586, + 0x3a4e86, + 0x27e4c5, + 0x301f06, + 0x375104, + 0x3b5b85, + 0x2c0704, + 0x2b2906, + 0x376984, + 0x204043, + 0x2883c5, + 0x2364c8, + 0x2e1887, + 0x332fc9, + 0x288648, + 0x299191, + 0x33598a, + 0x2f35c7, + 0x2e2406, + 0x22af04, + 0x2c0808, + 0x270088, + 0x29934a, + 0x2b738d, + 0x2a1086, + 0x39d586, + 0x2a7586, + 0x2cc687, + 0x2b1e85, + 0x3ccd47, + 0x289185, + 0x330984, + 0x2a7fc6, + 0x2d9bc7, + 0x2d8f8d, + 0x241447, + 0x37ec88, + 0x27da49, + 0x21f646, + 0x2cd4c5, + 0x23f1c4, + 0x323746, + 0x365846, + 0x26d246, + 0x29a3c8, + 0x227403, + 0x228083, + 0x375ac5, + 0x27fac6, + 0x2adf45, + 0x2a2b08, + 0x29d44a, + 0x319504, + 0x289248, + 0x2944c8, + 0x2a94c7, + 0x24bb89, + 0x2bd4c8, + 0x285087, + 0x3bb306, + 0x2025ca, + 0x3237c8, + 0x352409, + 0x2b1188, + 0x35af09, + 0x2e22c7, + 0x381ac5, + 0x364e86, + 0x2b6308, + 0x284008, + 0x294648, + 0x2f3788, + 0x255b85, + 0x212c44, + 0x234b08, + 0x242684, + 0x3360c4, + 0x26aa85, + 0x293347, + 0x201f49, + 0x227e87, + 0x203605, + 0x279e86, + 0x363046, + 0x2038c4, + 0x2a2446, + 0x27b604, + 0x2a5746, + 0x201d06, + 0x213ac6, + 0x3cc205, + 0x2a29c7, + 0x200a03, + 0x224a09, + 0x34cb88, + 0x284f04, + 0x284f0d, + 0x298788, + 0x314a48, + 0x352386, + 0x258d89, + 0x2cbb09, + 0x30a745, + 0x29d54a, + 0x270aca, + 0x28a6cc, + 0x28a846, + 0x27b386, + 0x2c7b06, + 0x3a00c9, + 0x31a0c6, + 0x2a1906, + 0x200a46, + 0x26ce88, + 0x24ab06, + 0x2d424b, + 0x2934c5, + 0x270805, + 0x27c145, + 0x30b106, + 0x202583, + 0x23fa86, + 0x2413c7, + 0x2c06c5, + 0x25c1c5, + 0x34ab45, + 0x3c8686, + 0x30a804, + 0x332146, + 0x2fac49, + 0x30af8c, + 0x3306c8, + 0x237544, + 0x301c06, + 0x287ec6, + 0x290b48, + 0x3137c8, + 0x30ae89, + 0x255b07, + 0x35a189, + 0x251a06, + 0x22c404, + 0x20bd04, + 0x283c84, + 0x283e88, + 0x201d8a, + 0x356f86, + 0x36b207, + 0x384a47, + 0x36f0c5, + 0x321544, + 0x28f486, + 0x2b1ec6, + 0x2456c3, + 0x34c9c7, + 0x3d2788, + 0x30a88a, + 0x30e408, + 0x3633c8, + 0x3769c5, + 0x29f0c5, + 0x27ac85, + 0x36f486, + 0x3a0446, + 0x30d385, + 0x32c909, + 0x32134c, + 0x27ad47, + 0x2993c8, + 0x25fb85, + 0x768244, + 0x2ed944, + 0x2ca884, + 0x21e586, + 0x2a074e, + 0x363807, + 0x2cc885, + 0x35270c, + 0x302007, + 0x2d9b47, + 0x355f49, + 0x21fd49, + 0x288745, + 0x34cb88, + 0x281309, + 0x32a905, + 0x2c0608, + 0x22af86, + 0x34bcc6, + 0x24ef04, + 0x28d648, + 0x21f803, + 0x2289c4, + 0x2d8dc5, + 0x399047, + 0x38b1c5, + 0x203489, + 0x2b4ecd, + 0x2e4346, + 0x22b604, + 0x31fd08, + 0x28220a, + 0x27b047, + 0x22d4c5, + 0x228a03, + 0x29fa0e, + 0x21460c, + 0x2ffc47, + 0x2a0907, + 0x217a03, + 0x31a105, + 0x2ca885, + 0x299f08, + 0x297809, + 0x237446, + 0x27a084, + 0x2f3506, + 0x2369cb, + 0x2db70c, + 0x39de47, + 0x2d4505, + 0x3bffc8, + 0x2e2705, + 0x2b9807, + 0x3357c7, + 0x241205, + 0x202583, + 0x366ec4, + 0x3a5f05, + 0x2b07c5, + 0x2b07c6, + 0x2c0e88, + 0x2d9bc7, + 0x3ca486, + 0x204146, + 0x336486, + 0x273989, + 0x228447, + 0x26d506, + 0x2db886, + 0x278e86, + 0x2a9045, + 0x20c806, + 0x364205, + 0x2e5f08, + 0x292c4b, + 0x28e546, + 0x384a84, + 0x3029c9, + 0x26ccc4, + 0x22af08, + 0x30c187, + 0x286944, + 0x2bbf88, + 0x2c1c04, + 0x2a9084, + 0x289005, + 0x31fc06, + 0x366ac7, + 0x206743, + 0x29f485, + 0x339204, + 0x2d95c6, + 0x30a7c8, + 0x373a85, + 0x292909, + 0x247f05, + 0x2179c8, + 0x281047, + 0x32cc08, + 0x2bb507, + 0x2fd449, + 0x27e586, + 0x33e986, + 0x200a44, + 0x2d8905, + 0x310b8c, + 0x27c147, + 0x27ce07, + 0x233ac8, + 0x2e4346, + 0x272784, + 0x3af304, + 0x284bc9, + 0x2c7c06, + 0x26ff47, + 0x2ce3c4, + 0x248cc6, + 0x3c9cc5, + 0x2d2007, + 0x2d41c6, + 0x260ac9, + 0x2ce947, + 0x296047, + 0x2a1f86, + 0x248c05, + 0x281ec8, + 0x22ac88, + 0x23a546, + 0x373ac5, + 0x377c86, + 0x202ac3, + 0x299d89, + 0x29d74e, + 0x2bb248, + 0x38ef88, + 0x23a34b, + 0x292b46, + 0x366504, + 0x285644, + 0x29d84a, + 0x212487, + 0x26d5c5, + 0x213489, + 0x2bf705, + 0x336107, + 0x24aa84, + 0x2aaa87, + 0x31e388, + 0x2e7606, + 0x38ecc9, + 0x2bd5ca, + 0x212406, + 0x2982c6, + 0x2ad205, + 0x39a605, + 0x35bf07, + 0x2482c8, + 0x3c9c08, + 0x300586, + 0x221905, + 0x233c0e, + 0x2bdc84, + 0x23a4c5, + 0x279809, + 0x2ea288, + 0x28ce06, + 0x29c24c, + 0x29d050, + 0x2a038f, + 0x2a1648, + 0x33a787, + 0x3cc205, + 0x25e3c5, + 0x366949, + 0x291409, + 0x2cd986, + 0x2a1207, + 0x2d8805, + 0x230349, + 0x349c06, + 0x319f0d, + 0x283b49, + 0x289244, + 0x2bafc8, + 0x234bc9, + 0x357146, + 0x27bb85, + 0x33e986, + 0x3c0d89, + 0x3656c8, + 0x20f505, + 0x2036c4, + 0x29c40b, + 0x357005, + 0x29c546, + 0x285d86, + 0x36a046, + 0x29234b, + 0x292a09, + 0x204085, + 0x3a5947, + 0x335906, + 0x252b06, + 0x2ca608, + 0x21b149, + 0x37ea4c, + 0x267c88, + 0x318b86, + 0x338b83, + 0x23b786, + 0x292185, + 0x280a88, + 0x221306, + 0x2d2248, + 0x2486c5, + 0x299505, + 0x35e388, + 0x3722c7, + 0x3ca0c7, + 0x3bb487, + 0x31f608, + 0x2ca308, + 0x2b1946, + 0x2b2747, + 0x266a87, + 0x29204a, + 0x23ee83, + 0x30b106, + 0x201ec5, + 0x249044, + 0x27da49, + 0x2fd3c4, + 0x2e1904, + 0x29b644, + 0x2a090b, + 0x32d107, + 0x233e45, + 0x296788, + 0x279e86, + 0x279e88, + 0x27fd86, + 0x28d585, + 0x28d845, + 0x28ffc6, + 0x290908, + 0x290f88, + 0x27dc06, + 0x2965cf, + 0x299850, + 0x206245, + 0x200a03, + 0x22c4c5, + 0x318d48, + 0x291309, + 0x32aa48, + 0x38eb48, + 0x23da88, + 0x32d1c7, + 0x279b49, + 0x2d2448, + 0x290804, + 0x29b4c8, + 0x2d1b49, + 0x2b2dc7, + 0x29b444, + 0x227f48, + 0x2a278a, + 0x2e62c6, + 0x2a1086, + 0x26f109, + 0x29d287, + 0x2cf008, + 0x223148, + 0x2c6888, + 0x38f705, + 0x216385, + 0x270805, + 0x2ca845, + 0x397ec7, + 0x202585, + 0x2c06c5, + 0x2754c6, + 0x32a987, + 0x2fa647, + 0x2a2a86, + 0x2d56c5, + 0x29c546, + 0x27ba45, + 0x2d8688, + 0x2ffac4, + 0x2c5486, + 0x32ec04, + 0x2da008, + 0x3047ca, + 0x27e30c, + 0x247485, + 0x2cc746, + 0x37ec06, + 0x29ae46, + 0x318c04, + 0x3c9f85, + 0x27f707, + 0x29d309, + 0x2cbf47, + 0x768244, + 0x768244, + 0x32cf85, + 0x2d3344, + 0x29bc0a, + 0x279d06, + 0x27ef84, + 0x3c4745, + 0x393d05, + 0x2b1dc4, + 0x2859c7, + 0x247e87, + 0x2cb988, + 0x368ec8, + 0x20f509, + 0x270748, + 0x29bdcb, + 0x2b02c4, + 0x252c05, + 0x2820c5, + 0x3bb409, + 0x21b149, + 0x3028c8, + 0x267b08, + 0x2a9684, + 0x287f05, + 0x201dc3, + 0x2ca1c5, + 0x298a86, + 0x29764c, + 0x22ad06, + 0x27ba86, + 0x28d085, + 0x3c8708, + 0x3ce446, + 0x2e2586, + 0x2a1086, + 0x2b0b0c, + 0x26d404, + 0x3365ca, + 0x28cfc8, + 0x297487, + 0x339106, + 0x237507, + 0x2f3105, + 0x37e146, + 0x361d86, + 0x378cc7, + 0x2bd2c4, + 0x2ab345, + 0x279804, + 0x330a07, + 0x279a48, + 0x27b20a, + 0x2833c7, + 0x2a8c47, + 0x33a707, + 0x2e2849, + 0x29764a, + 0x22c3c3, + 0x2e1845, + 0x213b03, + 0x3bb0c9, + 0x375388, + 0x357387, + 0x32ab49, + 0x22ad86, + 0x3b5c48, + 0x346b05, + 0x24ae0a, + 0x216709, + 0x24cf49, + 0x3ce307, + 0x270189, + 0x2139c8, + 0x378e86, + 0x2cc908, + 0x3d03c7, + 0x304147, + 0x241647, + 0x2eadc8, + 0x301a86, + 0x2a2545, + 0x27f707, + 0x297dc8, + 0x32eb84, + 0x2fb244, + 0x291a07, + 0x2ae307, + 0x28118a, + 0x378e06, + 0x35dd4a, + 0x2bfb47, + 0x2bda47, + 0x2ab404, + 0x2acec4, + 0x2d1f06, + 0x31af84, + 0x31af8c, + 0x34de45, + 0x21af89, + 0x2b0044, + 0x2b1e85, + 0x282188, + 0x27f145, + 0x393286, + 0x230244, + 0x2ebaca, + 0x2c99c6, + 0x29cdca, + 0x218587, + 0x28a285, + 0x22b2c5, + 0x36f10a, + 0x290a85, + 0x2a0f86, + 0x242684, + 0x2afdc6, + 0x35bfc5, + 0x2213c6, + 0x2ef24c, + 0x2b0d8a, + 0x270bc4, + 0x23a346, + 0x29d287, + 0x2d4144, + 0x26ce88, + 0x2f51c6, + 0x3848c9, + 0x2df7c9, + 0x2b5889, + 0x2cc246, + 0x3d04c6, + 0x2cca47, + 0x32c848, + 0x3d02c9, + 0x32d107, + 0x296906, + 0x2d2b87, + 0x2a7445, + 0x2bdc84, + 0x2cc607, + 0x266c45, + 0x288f45, + 0x3732c7, + 0x2410c8, + 0x3bff46, + 0x298c0d, + 0x29a10f, + 0x29f84d, + 0x203044, + 0x2365c6, + 0x2d7588, + 0x200a05, + 0x292208, + 0x25cd0a, + 0x289244, + 0x236c06, + 0x2d4947, + 0x223a47, + 0x2d7089, + 0x2cc8c5, + 0x2b1dc4, + 0x2b360a, + 0x2bd089, + 0x270287, + 0x298ec6, + 0x357146, + 0x287e46, + 0x383246, + 0x2d694f, + 0x2d7449, + 0x24ab06, + 0x38b8c6, + 0x32bf09, + 0x2b2847, + 0x209343, + 0x296b86, + 0x20ef43, + 0x3555c8, + 0x2d29c7, + 0x2a1849, + 0x317cc8, + 0x3ca208, + 0x3328c6, + 0x2ce209, + 0x3a6805, + 0x23b6c4, + 0x381b87, + 0x3a0145, + 0x203044, + 0x233f08, + 0x212744, + 0x2b2587, + 0x36bb06, + 0x3bcb45, + 0x2b1188, + 0x35700b, + 0x321887, + 0x36f386, + 0x2c7304, + 0x366486, + 0x26aa85, + 0x266c45, + 0x281c49, + 0x2855c9, + 0x2cc004, + 0x3041c5, + 0x23a385, + 0x24ac86, + 0x34cc88, + 0x2bf0c6, + 0x3d25cb, + 0x38660a, + 0x2bb605, + 0x28d8c6, + 0x319205, + 0x275405, + 0x2a9107, + 0x30b388, + 0x2707c4, + 0x2496c6, + 0x291006, + 0x213b87, + 0x320884, + 0x280906, + 0x2f1a85, + 0x2f1a89, + 0x210a44, + 0x3216c9, + 0x27dc06, + 0x2c1148, + 0x23a385, + 0x384b45, + 0x2213c6, + 0x37e949, + 0x21fd49, + 0x27bb06, + 0x2ea388, + 0x2b5008, + 0x3191c4, + 0x2b4104, + 0x2b4108, + 0x2b8ac8, + 0x35a289, + 0x298a06, + 0x2a1086, + 0x3381cd, + 0x335486, + 0x2cf809, + 0x367f05, + 0x20ef86, + 0x2c6a08, + 0x332085, + 0x266ac4, + 0x26aa85, + 0x284a48, + 0x29b9c9, + 0x2798c4, + 0x376806, + 0x27f00a, + 0x2ffb48, + 0x281309, + 0x38be0a, + 0x32aac6, + 0x29a2c8, + 0x2b95c5, + 0x28d248, + 0x2f3185, + 0x22ac49, + 0x387089, + 0x237482, + 0x29dd45, + 0x2722c6, + 0x27db47, + 0x38cf85, + 0x31e286, + 0x315488, + 0x2e4346, + 0x2ddec9, + 0x27cf06, + 0x2ca488, + 0x2202c5, + 0x3ad446, + 0x375208, + 0x283e88, + 0x2e21c8, + 0x2e8988, + 0x20c804, + 0x266083, + 0x2de104, + 0x2835c6, + 0x2a7484, + 0x38eec7, + 0x2e2489, + 0x2c5fc5, + 0x223146, + 0x296b86, + 0x2c0ccb, + 0x2b14c6, + 0x2b8dc6, + 0x2c5588, + 0x2ed386, + 0x2b7683, + 0x209fc3, + 0x2bdc84, + 0x2317c5, + 0x2d0647, + 0x279a48, + 0x279a4f, + 0x27f60b, + 0x34ca88, + 0x376886, + 0x34cd8e, + 0x2213c3, + 0x2d05c4, + 0x2b1445, + 0x2b1c46, + 0x28f58b, + 0x293406, + 0x22cb89, + 0x3bcb45, + 0x254548, + 0x205288, + 0x21fc0c, + 0x2a0946, + 0x2ca206, + 0x2f07c5, + 0x2894c8, + 0x27e305, + 0x380208, + 0x29c5ca, + 0x29fc89, + 0x768244, + 0x2000c2, + 0x3f609302, + 0x200382, + 0x21e084, + 0x209382, + 0x229d44, + 0x203202, + 0x2543, + 0x2003c2, + 0x201202, + 0xaf0c8, + 0x209303, + 0x2351c3, + 0x22b883, + 0x2287c3, + 0x215c83, + 0x24b583, + 0x373a83, + 0x209303, + 0x2351c3, + 0x22b883, + 0x21e084, + 0x215c83, + 0x24b583, + 0x20cc03, + 0x28b004, + 0x209303, + 0x238084, + 0x2351c3, + 0x2da884, + 0x22b883, + 0x38d247, + 0x2287c3, + 0x202543, + 0x229188, + 0x24b583, + 0x2a538b, + 0x2f4483, + 0x3acb86, + 0x216a42, + 0x2ee5cb, + 0x2351c3, + 0x22b883, + 0x215c83, + 0x24b583, + 0x209303, + 0x2351c3, + 0x22b883, + 0x24b583, + 0x220003, + 0x21bec3, + 0x2000c2, + 0xaf0c8, + 0x2220c5, + 0x266cc8, + 0x2fe488, + 0x209302, + 0x375785, + 0x329d47, + 0x2012c2, + 0x2442c7, + 0x200382, + 0x25f3c7, + 0x2b7e09, + 0x2b9188, + 0x2c6709, + 0x20dc02, + 0x269e87, + 0x23a1c4, + 0x329e07, + 0x386507, + 0x25dcc2, + 0x2287c3, + 0x204842, + 0x203202, + 0x2003c2, + 0x202882, + 0x200902, + 0x201202, + 0x2a9b05, + 0x33d805, + 0x9302, + 0x351c3, + 0x209303, + 0x2351c3, + 0x206843, + 0x22b883, + 0x202b03, + 0x215c83, + 0x24b583, + 0x209303, + 0x2351c3, + 0x22b883, + 0x215c83, + 0x24b583, + 0x209303, + 0x2351c3, + 0x22b883, + 0x2287c3, + 0x215c83, + 0xe6243, + 0x24b583, + 0xd703, + 0x101, + 0x209303, + 0x2351c3, + 0x22b883, + 0x21e084, + 0x20f8c3, + 0x215c83, + 0xe6243, + 0x24b583, + 0x215943, + 0x42871c46, + 0x9e183, + 0xc7545, + 0x209303, + 0x2351c3, + 0x22b883, + 0x215c83, + 0x24b583, + 0x209302, + 0x209303, + 0x2351c3, + 0x22b883, + 0x215c83, + 0xe6243, + 0x24b583, + 0x6102, + 0xaf0c8, + 0x2543, + 0xe6243, + 0x48284, + 0xe2b05, + 0x2000c2, + 0x3aec84, + 0x209303, + 0x2351c3, + 0x22b883, + 0x243583, + 0x230105, + 0x20f8c3, + 0x216c03, + 0x215c83, + 0x24fb43, + 0x24b583, + 0x201203, + 0x200b43, + 0x20ab43, + 0x209303, + 0x2351c3, + 0x22b883, + 0x215c83, + 0x24b583, + 0x209302, + 0x24b583, + 0xaf0c8, + 0x22b883, + 0xe6243, + 0xaf0c8, + 0xe6243, + 0x2b5b43, + 0x209303, + 0x2320c4, + 0x2351c3, + 0x22b883, + 0x206982, + 0x2287c3, + 0x215c83, + 0x24b583, + 0x209303, + 0x2351c3, + 0x22b883, + 0x206982, + 0x209383, + 0x215c83, + 0x24b583, + 0x2ec143, + 0x201203, + 0x2000c2, + 0x209302, + 0x22b883, + 0x215c83, + 0x24b583, + 0x3acb85, + 0x151746, + 0x28b004, + 0x216a42, + 0xaf0c8, + 0x2000c2, + 0xf6d85, + 0x19908, + 0x1943, + 0x209302, + 0x46c92e86, + 0x1cb04, + 0xe4f8b, + 0x3afc6, + 0x288c7, + 0x2351c3, + 0x4cc88, + 0x22b883, + 0x11a745, + 0x168004, + 0x22afc3, + 0x518c7, + 0xdde04, + 0x215c83, + 0x7bc06, + 0xe6084, + 0xe6243, + 0x24b583, + 0x2f6044, + 0x12d707, + 0x151349, + 0xe4d48, + 0xf7484, + 0x40386, + 0xd7c8, + 0x13fd05, + 0xa109, + 0xf6d85, + 0x209302, + 0x209303, + 0x2351c3, + 0x22b883, + 0x2287c3, + 0x202543, + 0x24b583, + 0x2f4483, + 0x216a42, + 0xaf0c8, + 0x209303, + 0x2351c3, + 0x22b883, + 0x20f703, + 0x226004, + 0x215c83, + 0x2543, + 0x24b583, + 0x209303, + 0x2351c3, + 0x2da884, + 0x22b883, + 0x215c83, + 0x24b583, + 0x3acb86, + 0x2351c3, + 0x22b883, + 0x41f03, + 0xe6243, + 0x24b583, + 0x209303, + 0x2351c3, + 0x22b883, + 0x215c83, + 0x24b583, + 0xf6d85, + 0x288c7, + 0xaf0c8, + 0x22b883, + 0x209303, + 0x2351c3, + 0x22b883, + 0x215c83, + 0x24b583, + 0x49a09303, + 0x2351c3, + 0x215c83, + 0x24b583, + 0xaf0c8, + 0x2000c2, + 0x209302, + 0x209303, + 0x22b883, + 0x215c83, + 0x2003c2, + 0x24b583, + 0x33b587, + 0x31bb4b, + 0x204243, + 0x2cd648, + 0x32c5c7, + 0x210f06, + 0x209d85, + 0x3758c9, + 0x228548, + 0x37dc09, + 0x3a7cd0, + 0x37dc0b, + 0x2f2249, + 0x202943, + 0x2756c9, + 0x233486, + 0x23348c, + 0x222188, + 0x3ce148, + 0x3b7049, + 0x35308e, + 0x2b7bcb, + 0x3650cc, + 0x203ac3, + 0x28f18c, + 0x203ac9, + 0x23ec87, + 0x23510c, + 0x3c790a, + 0x243644, + 0x24d60d, + 0x28f048, + 0x20cc0d, + 0x295106, + 0x28b00b, + 0x3514c9, + 0x38bb47, + 0x3674c6, + 0x3cc7c9, + 0x30a3ca, + 0x328608, + 0x2f4084, + 0x341187, + 0x245c87, + 0x2ce5c4, + 0x2e54c4, + 0x398549, + 0x30f809, + 0x217f88, + 0x20d385, + 0x20db45, + 0x20b1c6, + 0x24d4c9, + 0x25cf8d, + 0x2f53c8, + 0x20b0c7, + 0x209e08, + 0x303cc6, + 0x23cc44, + 0x2a3f45, + 0x3d01c6, + 0x3d1c04, + 0x2039c7, + 0x20568a, + 0x20f444, + 0x212346, + 0x213109, + 0x21310f, + 0x2136cd, + 0x214906, + 0x219510, + 0x219906, + 0x219e87, + 0x21a747, + 0x21a74f, + 0x21b7c9, + 0x224546, + 0x224c47, + 0x224c48, + 0x225009, + 0x3bcc08, + 0x2e9007, + 0x2220c3, + 0x22e706, + 0x377088, + 0x35334a, + 0x3c5e49, + 0x21b583, + 0x329c46, + 0x24950a, + 0x290507, + 0x23eaca, + 0x312d4e, + 0x21b906, + 0x29df47, + 0x21f9c6, + 0x203b86, + 0x21618b, + 0x221aca, + 0x2ab74d, + 0x3d0587, + 0x269448, + 0x269449, + 0x26944f, + 0x33314c, + 0x280d09, + 0x2e148e, + 0x38d34a, + 0x2b9e46, + 0x322786, + 0x332bcc, + 0x334d0c, + 0x345988, + 0x3897c7, + 0x2d9a45, + 0x293204, + 0x30fc8e, + 0x265dc4, + 0x3ca9c7, + 0x3cba4a, + 0x22db14, + 0x22e14f, + 0x21a908, + 0x22e5c8, + 0x33f6cd, + 0x33f6ce, + 0x22e889, + 0x2307c8, + 0x2307cf, + 0x234e0c, + 0x234e0f, + 0x236307, + 0x2388ca, + 0x24680b, + 0x239a48, + 0x23b907, + 0x26014d, + 0x331686, + 0x24d7c6, + 0x23f909, + 0x2a78c8, + 0x244c48, + 0x244c4e, + 0x31bc47, + 0x246ac5, + 0x248045, + 0x204504, + 0x2111c6, + 0x217e88, + 0x30f003, + 0x2f4dce, + 0x260508, + 0x37e30b, + 0x311d47, + 0x3003c5, + 0x28f306, + 0x2ac547, + 0x2fb7c8, + 0x33fb09, + 0x331905, + 0x288408, + 0x227006, + 0x3a944a, + 0x30fb89, + 0x2351c9, + 0x2351cb, + 0x368908, + 0x2ce489, + 0x20d446, + 0x3912ca, + 0x20684a, + 0x238acc, + 0x365d07, + 0x2c650a, + 0x32dbcb, + 0x32dbd9, + 0x31ce88, + 0x3acc05, + 0x260306, + 0x26bb89, + 0x38db06, + 0x28c28a, + 0x228746, + 0x223dc4, + 0x2c868d, + 0x30f447, + 0x223dc9, + 0x24b085, + 0x24c208, + 0x24ca49, + 0x24ce84, + 0x24e007, + 0x24e008, + 0x24e307, + 0x2685c8, + 0x251ec7, + 0x204305, + 0x259b8c, + 0x25a3c9, + 0x2d144a, + 0x3ab3c9, + 0x2757c9, + 0x38b68c, + 0x25e88b, + 0x25f6c8, + 0x260908, + 0x2643c4, + 0x286608, + 0x2874c9, + 0x3c79c7, + 0x213346, + 0x29b807, + 0x28e889, + 0x36700b, + 0x29acc7, + 0x3cc5c7, + 0x2186c7, + 0x20cb84, + 0x20cb85, + 0x2da585, + 0x3510cb, + 0x3b4944, + 0x2badc8, + 0x2f86ca, + 0x2270c7, + 0x3c2907, + 0x28e0d2, + 0x2a5646, + 0x231a46, + 0x371cce, + 0x2a6106, + 0x294348, + 0x294acf, + 0x20cfc8, + 0x39c2c8, + 0x2c17ca, + 0x2c17d1, + 0x2a2d0e, + 0x20104a, + 0x20104c, + 0x2309c7, + 0x2309d0, + 0x3c9a48, + 0x2a2f05, + 0x2acbca, + 0x3d1c4c, + 0x296c4d, + 0x3066c6, + 0x3066c7, + 0x3066cc, + 0x39068c, + 0x21920c, + 0x2aadcb, + 0x38fb44, + 0x26f284, + 0x3991c9, + 0x3af387, + 0x3a26c9, + 0x206689, + 0x3bd507, + 0x3c7786, + 0x3c7789, + 0x2ae4c3, + 0x2e444a, + 0x376f47, + 0x38accb, + 0x2ab5ca, + 0x23a244, + 0x3b6286, + 0x283649, + 0x31ae04, + 0x34df0a, + 0x36f685, + 0x2be085, + 0x2be08d, + 0x2be3ce, + 0x2de245, + 0x339886, + 0x3ac787, + 0x259e0a, + 0x37bd46, + 0x2eb504, + 0x3053c7, + 0x2659cb, + 0x303d87, + 0x228b44, + 0x284246, + 0x28424d, + 0x2dcdcc, + 0x215b46, + 0x2f55ca, + 0x335186, + 0x23f2c8, + 0x237ec7, + 0x24334a, + 0x362cc6, + 0x280983, + 0x2b9f46, + 0x3c4288, + 0x39934a, + 0x286bc7, + 0x286bc8, + 0x2d25c4, + 0x28d3c7, + 0x36aa08, + 0x299548, + 0x2b1a48, + 0x3bb5ca, + 0x2e1305, + 0x209387, + 0x23ba53, + 0x258246, + 0x20fac8, + 0x21d849, + 0x244188, + 0x33294b, + 0x3ca588, + 0x265b04, + 0x35e486, + 0x323f06, + 0x31fa49, + 0x2c4207, + 0x259c88, + 0x2996c6, + 0x3731c4, + 0x256345, + 0x2ce788, + 0x25714a, + 0x2c8308, + 0x2cf546, + 0x29a4ca, + 0x2b0948, + 0x2d3f48, + 0x2d4b08, + 0x2d5386, + 0x2d7786, + 0x3aae8c, + 0x2d7d50, + 0x2a0cc5, + 0x20cdc8, + 0x330310, + 0x20cdd0, + 0x3a7b4e, + 0x3aab0e, + 0x3aab14, + 0x3b058f, + 0x3b0946, + 0x200f11, + 0x374993, + 0x374e08, + 0x30b905, + 0x2cdb88, + 0x2ff9c5, + 0x2e5c0c, + 0x22a089, + 0x293049, + 0x22a507, + 0x3a9849, + 0x35a547, + 0x301d86, + 0x2a3d47, + 0x21a485, + 0x20d743, + 0x30f1c9, + 0x25c649, + 0x241f03, + 0x38ce84, + 0x275b0d, + 0x35c0cf, + 0x373205, + 0x34b186, + 0x224087, + 0x221f07, + 0x3c12c6, + 0x3c12cb, + 0x2a3b05, + 0x25c406, + 0x303247, + 0x2525c9, + 0x386c06, + 0x30dec5, + 0x20478b, + 0x216606, + 0x35ac45, + 0x24ed88, + 0x2b4cc8, + 0x2aec0c, + 0x2aec10, + 0x2ae8c9, + 0x308687, + 0x2be94b, + 0x2fa146, + 0x2e8eca, + 0x2e9c0b, + 0x2eaa0a, + 0x2eac86, + 0x2ec005, + 0x32c4c6, + 0x27a2c8, + 0x22a5ca, + 0x33f35c, + 0x2f454c, + 0x2f4848, + 0x3acb85, + 0x38e587, + 0x30d1c6, + 0x282585, + 0x216046, + 0x3c1488, + 0x2bd307, + 0x352f88, + 0x25830a, + 0x22418c, + 0x20b3c9, + 0x2232c7, + 0x287a04, + 0x248106, + 0x39be4a, + 0x206785, + 0x22070c, + 0x222b08, + 0x328888, + 0x389acc, + 0x23140c, + 0x239d89, + 0x239fc7, + 0x24a50c, + 0x22a884, + 0x25150a, + 0x30604c, + 0x27418b, + 0x25b94b, + 0x25c006, + 0x264547, + 0x230c07, + 0x230c0f, + 0x307091, + 0x2deed2, + 0x26878d, + 0x26878e, + 0x268ace, + 0x3b0748, + 0x3b0752, + 0x271dc8, + 0x21de87, + 0x25034a, + 0x2a5f08, + 0x2a60c5, + 0x397d0a, + 0x219c87, + 0x2f6b44, + 0x21ae83, + 0x2385c5, + 0x2c1a47, + 0x306307, + 0x296e4e, + 0x351f8d, + 0x359549, + 0x247905, + 0x3a8083, + 0x207686, + 0x25d705, + 0x37e548, + 0x2b7089, + 0x260345, + 0x26034f, + 0x2e5087, + 0x209c05, + 0x31270a, + 0x381e46, + 0x26a389, + 0x2ff50c, + 0x3011c9, + 0x208446, + 0x2f84cc, + 0x338c86, + 0x304f48, + 0x305586, + 0x31d006, + 0x2b1644, + 0x31f9c3, + 0x2b3e8a, + 0x313a11, + 0x25560a, + 0x25ecc5, + 0x26fc07, + 0x258687, + 0x2f0d44, + 0x36ab0b, + 0x2b9008, + 0x2bb0c6, + 0x233b45, + 0x2654c4, + 0x24e709, + 0x2008c4, + 0x244a87, + 0x3013c5, + 0x3013c7, + 0x371f05, + 0x38f243, + 0x21dd48, + 0x3c9d4a, + 0x206743, + 0x22210a, + 0x3c1106, + 0x2600cf, + 0x3bc1c9, + 0x2f4d50, + 0x2f9ec8, + 0x2cfec9, + 0x297b47, + 0x2841cf, + 0x32af04, + 0x2da904, + 0x219786, + 0x35aa86, + 0x3a394a, + 0x27bec6, + 0x358687, + 0x313e48, + 0x314047, + 0x315247, + 0x31620a, + 0x31808b, + 0x3cce85, + 0x2deb08, + 0x230483, + 0x3b5f4c, + 0x344a8f, + 0x2d984d, + 0x25a807, + 0x359689, + 0x241947, + 0x25acc8, + 0x22dd0c, + 0x2b5308, + 0x271408, + 0x32e68e, + 0x341b14, + 0x342024, + 0x358d8a, + 0x37f04b, + 0x35a604, + 0x35a609, + 0x236c88, + 0x248845, + 0x30eb0a, + 0x260747, + 0x32c3c4, + 0x373a83, + 0x209303, + 0x238084, + 0x2351c3, + 0x22b883, + 0x21e084, + 0x20f8c3, + 0x2287c3, + 0x2d7d46, + 0x226004, + 0x215c83, + 0x24b583, + 0x214e83, + 0x2000c2, + 0x373a83, + 0x209302, + 0x209303, + 0x238084, + 0x2351c3, + 0x22b883, + 0x20f8c3, + 0x2d7d46, + 0x215c83, + 0x24b583, + 0xaf0c8, + 0x209303, + 0x2351c3, + 0x210a43, + 0x215c83, + 0xe6243, + 0x24b583, + 0xaf0c8, + 0x209303, + 0x2351c3, + 0x22b883, + 0x2287c3, + 0x226004, + 0x215c83, + 0x24b583, + 0x2000c2, + 0x243003, + 0x209302, + 0x2351c3, + 0x22b883, + 0x2287c3, + 0x215c83, + 0x24b583, + 0x200dc2, + 0x200bc2, + 0x209302, + 0x209303, + 0x20e7c2, + 0x2005c2, + 0x21e084, + 0x229d44, + 0x26da42, + 0x226004, + 0x2003c2, + 0x24b583, + 0x214e83, + 0x25c006, + 0x22c682, + 0x202fc2, + 0x21ea82, + 0x4c20cfc3, + 0x4c601043, + 0x58fc6, + 0x58fc6, + 0x28b004, + 0x202543, + 0x17d0a, + 0x11c58c, + 0x14418c, + 0xc734d, + 0xf6d85, + 0x8bc0c, + 0x6dec7, + 0x10446, + 0x14a88, + 0x177c7, + 0x1c208, + 0x18588a, + 0x105887, + 0x4d28be45, + 0xdb3c9, + 0x3704b, + 0x1c5b8b, + 0x28948, + 0xfec9, + 0x8ca8a, + 0x1951ce, + 0x11e88d, + 0x1443f8b, + 0xdcc8a, + 0x1cb04, + 0x68306, + 0x4b6c8, + 0x73dc8, + 0x37307, + 0x1d405, + 0x93f07, + 0x7c709, + 0x11ae87, + 0x65e48, + 0x109dc9, + 0x4e484, + 0x4ff05, + 0x13c78e, + 0x2030d, + 0x28748, + 0x4d771486, + 0x4e171488, + 0xe4a08, + 0x13b790, + 0x54c4c, + 0x650c7, + 0x65c87, + 0x6acc7, + 0x71fc7, + 0xb182, + 0x16a547, + 0x21c8c, + 0x10d445, + 0x2d747, + 0xa5246, + 0xa63c9, + 0xa8148, + 0x55282, 0x5c2, - 0x3cf0b, - 0xe5ec7, - 0x157cc9, - 0x5f3c9, - 0x168708, - 0xb38c2, - 0x1a4789, - 0xd300a, - 0x6206, - 0xcee09, - 0xdd247, - 0xdd989, - 0xe0348, - 0xe1347, - 0xe27c9, - 0xe6805, - 0xe6b90, - 0x1c9186, - 0x127cc5, - 0x165807, - 0x1ce90d, - 0x47245, - 0x29b46, - 0xeef07, - 0xf5b58, - 0x112508, - 0x600a, - 0x4e42, - 0x5768a, - 0x6f10d, - 0x1002, - 0xea506, - 0x90f08, - 0x4f208, - 0x70609, - 0x10bbc8, - 0x7a84e, - 0x5b0c7, - 0x10670d, - 0xfc445, - 0x6548, - 0x1aaec8, - 0x107106, - 0xab42, - 0xda7c6, - 0x44a06, - 0x1242, + 0x3dd0b, + 0xe6107, + 0x23bc9, + 0x47089, + 0xccd08, + 0xb0442, + 0x1a5b89, + 0xd180a, + 0x169f86, + 0xcb209, + 0xdcc07, + 0xdd349, + 0xde388, + 0xdf387, + 0xe1289, + 0xe6a45, + 0xe6dd0, + 0x12e1c6, + 0x178145, + 0x8ec87, + 0x1038cd, + 0x42a85, + 0x256c6, + 0xeeac7, + 0xf6058, + 0x11b208, + 0x169d8a, + 0x6702, + 0x5b70a, + 0x76c8d, + 0x3182, + 0xea746, + 0x8f848, + 0x4b248, + 0x6f949, + 0x1147c8, + 0x8000e, + 0x7687, + 0x10924d, + 0xfddc5, + 0x16a2c8, + 0x1ac3c8, + 0x109846, + 0x2a82, + 0xd8546, + 0x40386, + 0xc882, 0x401, - 0x627c7, - 0x60203, - 0x4d6f63c4, - 0x4da97703, + 0x5f087, + 0x13ab83, + 0x4daf68c4, + 0x4de95903, 0xc1, - 0x17746, + 0x12946, 0xc1, 0x201, - 0x17746, - 0x60203, - 0x1476d05, - 0x248084, - 0x20d183, - 0x2526c4, - 0x224604, - 0x222103, - 0x223c85, - 0x21b2c3, - 0x206103, - 0x3c0045, - 0x2020c3, - 0x4ee0d183, - 0x2355c3, - 0x214d83, + 0x12946, + 0x13ab83, + 0x14f2005, + 0x243644, + 0x209303, + 0x24f544, + 0x21e084, + 0x215c83, + 0x21d705, + 0x215943, + 0x22d183, + 0x3c1245, + 0x20ab43, + 0x4f209303, + 0x2351c3, + 0x22b883, 0x200181, - 0x272203, - 0x215584, - 0x2bf0c4, - 0x222103, - 0x2482c3, - 0x215e83, - 0x1513c8, + 0x2287c3, + 0x229d44, + 0x226004, + 0x215c83, + 0x24b583, + 0x201203, + 0xaf0c8, 0x2000c2, - 0x203a83, - 0x207c02, - 0x20d183, - 0x2355c3, - 0x3d0443, + 0x373a83, + 0x209302, + 0x209303, + 0x2351c3, + 0x210a43, 0x2005c2, - 0x224604, - 0x219a43, - 0x272203, - 0x222103, - 0x2182c3, - 0x2482c3, - 0x2020c3, - 0x1513c8, - 0x341c82, - 0x1cdf07, - 0x7c02, - 0x18f505, - 0x581cf, - 0x1589e48, - 0x10bf4e, - 0x4fe27402, - 0x329948, - 0x227886, - 0x2c5b46, - 0x227207, - 0x50202842, - 0x507ba908, - 0x225c8a, - 0x268f08, - 0x208e82, - 0x3447c9, - 0x370587, - 0x218086, - 0x224009, - 0x20a484, - 0x216146, - 0x2c5f44, - 0x205804, - 0x25d1c9, - 0x302f06, - 0x2b4745, - 0x256445, - 0x22ffc7, - 0x2c30c7, - 0x29fd84, - 0x227446, - 0x2f6f85, - 0x2ac8c5, - 0x310445, - 0x218a47, - 0x3094c5, - 0x328249, - 0x392285, - 0x2fab04, - 0x3786c7, - 0x3908ce, - 0x3265c9, - 0x325849, - 0x323946, - 0x245408, - 0x2eb60b, - 0x364ccc, - 0x346cc6, - 0x276787, - 0x2b2905, - 0x2e528a, - 0x21dfc9, - 0x322b09, - 0x393086, - 0x301605, - 0x24c745, - 0x330609, - 0x3105cb, - 0x27a486, - 0x34b706, - 0x20dbc4, - 0x28fd86, - 0x24bb48, - 0x3c3006, - 0x23a186, - 0x2091c8, - 0x20a987, - 0x20b1c9, - 0x20b9c5, - 0x1513c8, - 0x294d04, - 0x30cac4, - 0x212805, - 0x3b09c9, - 0x222d07, - 0x222d0b, - 0x22654a, - 0x22a705, - 0x50a038c2, - 0x2acc87, - 0x50e2aa08, - 0x216847, - 0x2dcc85, - 0x32d44a, - 0x7c02, - 0x25610b, - 0x27d18a, - 0x25fe46, - 0x210f03, - 0x36894d, - 0x39bd0c, - 0x2139cd, - 0x24ec85, - 0x376c85, - 0x372e07, - 0x218749, - 0x225b86, - 0x263b45, - 0x2d7ec8, - 0x28fc83, - 0x2ecdc8, - 0x28fc88, - 0x2c6c47, - 0x34c588, - 0x39af89, - 0x2de707, - 0x3129c7, - 0x344648, - 0x2e8184, - 0x2e8187, - 0x294948, - 0x35d406, - 0x35fecf, - 0x239b07, - 0x3559c6, - 0x2318c5, - 0x224c03, - 0x24dcc7, - 0x385943, - 0x251d86, - 0x2540c6, - 0x254946, - 0x293645, - 0x26b9c3, - 0x3a4408, - 0x387d49, - 0x39c6cb, - 0x254ac8, - 0x255c85, - 0x257185, - 0x51231ac2, - 0x2aaf89, - 0x224687, - 0x25fd85, - 0x25d0c7, - 0x25f7c6, - 0x37ed85, - 0x260acb, - 0x263004, - 0x268ac5, - 0x268c07, - 0x279e06, - 0x27a245, - 0x2884c7, - 0x289287, - 0x2e4704, - 0x28e0ca, - 0x28eb08, - 0x2bcf49, - 0x23e5c5, - 0x3654c6, - 0x24bd0a, - 0x256346, - 0x26bf07, - 0x2bcc0d, - 0x2a5449, - 0x3a3705, - 0x373fc7, - 0x390cc8, - 0x390348, - 0x39ba07, - 0x20a206, - 0x22c047, - 0x252f03, - 0x302e84, - 0x37c905, - 0x3a8d07, - 0x3ace49, - 0x23a4c8, - 0x3cab05, - 0x248344, - 0x254c85, - 0x2631cd, - 0x205242, - 0x2c20c6, - 0x2ea446, - 0x30d28a, - 0x3936c6, - 0x399d05, - 0x205245, - 0x205247, - 0x3a7ccc, - 0x2b314a, - 0x28fa46, - 0x2d9905, - 0x28fbc6, - 0x28ff07, - 0x291bc6, - 0x29354c, - 0x224149, - 0x51617bc7, - 0x296585, - 0x296586, - 0x297188, - 0x250d85, - 0x2a6405, - 0x2a6b88, - 0x2a6d8a, - 0x51a7e102, - 0x51e0ebc2, - 0x2d21c5, - 0x277c03, - 0x2497c8, - 0x207983, - 0x2a7004, - 0x26880b, - 0x209748, - 0x303c48, - 0x5238f309, - 0x2abb09, - 0x2ac246, - 0x2ae888, - 0x2aea89, - 0x2af886, - 0x2afa05, - 0x24ea86, - 0x2afec9, - 0x392787, - 0x3c6e06, - 0x2d2787, - 0x392a47, - 0x21c844, - 0x52712809, - 0x284988, - 0x3ba808, - 0x391687, - 0x2cb446, - 0x218549, - 0x2c6207, - 0x258c0a, - 0x25a188, - 0x216f87, - 0x217d46, - 0x36560a, - 0x39dd48, - 0x2e9ec5, - 0x229a45, - 0x2fca87, - 0x301f49, - 0x379b0b, - 0x31fe88, - 0x392309, - 0x254ec7, - 0x2b974c, - 0x2ba00c, - 0x2ba30a, - 0x2ba58c, - 0x2c5ac8, - 0x2c5cc8, - 0x2c5ec4, - 0x2c63c9, - 0x2c6609, - 0x2c684a, - 0x2c6ac9, - 0x2c6e07, - 0x3b234c, - 0x237e46, - 0x2c98c8, - 0x256406, - 0x38db46, - 0x3a3607, - 0x3245c8, - 0x3275cb, - 0x216707, - 0x238bc9, - 0x380cc9, - 0x252847, - 0x25a904, - 0x273887, - 0x399546, - 0x216046, - 0x2f1c45, - 0x24fc08, - 0x293e84, - 0x293e86, - 0x2b300b, - 0x32d049, - 0x218946, - 0x218bc9, - 0x2128c6, - 0x331f88, - 0x225003, - 0x301785, - 0x23a2c9, - 0x26b385, - 0x303544, - 0x279306, - 0x241645, - 0x258406, - 0x30f707, - 0x32b8c6, - 0x22df4b, - 0x386547, - 0x256e86, - 0x231dc6, - 0x230086, - 0x29fd49, - 0x2ef90a, - 0x2becc5, - 0x2ed50d, - 0x2a6e86, - 0x2f7b06, - 0x2f4646, - 0x243485, - 0x2e6e87, - 0x2fe9c7, - 0x38ebce, - 0x272203, - 0x2cb409, - 0x388fc9, - 0x2e5687, - 0x26edc7, - 0x29f2c5, - 0x37aa45, - 0x52b9648f, - 0x2d1a87, - 0x2d1c48, - 0x2d2944, - 0x2d2ec6, - 0x52e4c442, - 0x2d6fc6, - 0x2d9fc6, - 0x32790e, - 0x2ecc0a, - 0x207606, - 0x3bb08a, - 0x214789, - 0x2444c5, - 0x342a88, - 0x350186, - 0x29d848, - 0x34ad88, - 0x3a564b, - 0x227305, - 0x309548, - 0x20930c, - 0x2dcb47, - 0x254306, - 0x352d48, - 0x2163c8, - 0x53240e02, - 0x20bbcb, - 0x21d3c9, + 0x21e084, + 0x20f8c3, + 0x2287c3, + 0x215c83, + 0x202543, + 0x24b583, + 0x20ab43, + 0xaf0c8, + 0x344802, + 0x122c87, + 0x9302, + 0x562c5, + 0x54d8f, + 0x158e708, + 0x114c4e, + 0x502210c2, + 0x32bb48, + 0x221546, + 0x2c2806, + 0x220ec7, + 0x50603882, + 0x50bbc048, + 0x21f44a, + 0x264b48, + 0x204542, + 0x38ab09, + 0x3ccec7, + 0x2132c6, 0x21da89, - 0x32cec7, - 0x211648, - 0x536236c8, - 0x201b8b, - 0x368589, - 0x28744d, - 0x24ee48, - 0x35b508, - 0x53a00ec2, - 0x33d484, - 0x53e30cc2, - 0x2ff206, - 0x54202dc2, - 0x31b2ca, - 0x208d86, - 0x3ce588, - 0x38a788, - 0x399946, - 0x2eed86, - 0x2f8e86, - 0x37acc5, - 0x23da04, - 0x5463aa04, - 0x351e06, - 0x27d9c7, - 0x54ae6547, - 0x35950b, - 0x216a49, - 0x376cca, - 0x205384, - 0x2bdac8, - 0x3c6bcd, - 0x2f2649, - 0x2f2888, - 0x2f2b09, - 0x2f5b44, - 0x24b6c4, - 0x261d05, - 0x310c0b, - 0x2096c6, - 0x351c45, - 0x221109, - 0x227508, - 0x23ab84, - 0x2e5409, - 0x356d05, - 0x2c3108, - 0x313087, - 0x325c48, - 0x285a06, - 0x38e807, - 0x2de209, - 0x366d09, - 0x358e05, - 0x24d2c5, - 0x54e17002, - 0x2fa8c4, - 0x22da45, - 0x227106, - 0x306b45, - 0x29adc7, - 0x351f05, - 0x279e44, - 0x323a06, - 0x27dac7, - 0x22f406, - 0x319205, - 0x21b448, - 0x227a85, - 0x224e87, - 0x22a4c9, - 0x32d18a, - 0x282b07, - 0x282b0c, - 0x2b4706, - 0x248149, - 0x24e905, - 0x250cc8, - 0x204683, - 0x210605, - 0x399205, - 0x281607, - 0x55223142, - 0x2ee587, - 0x2f2186, - 0x33ac86, - 0x2f7346, - 0x216306, - 0x356f88, - 0x23e3c5, - 0x355a87, - 0x355a8d, - 0x220ac3, - 0x220ac5, - 0x309e07, - 0x2ee8c8, - 0x3099c5, - 0x21aa48, - 0x3942c6, - 0x2dbe87, - 0x2c9805, - 0x227386, - 0x3ad4c5, - 0x22594a, - 0x347e06, - 0x3cf5c7, - 0x2da9c5, - 0x2fdf87, - 0x2ff344, - 0x3034c6, - 0x3429c5, - 0x21364b, - 0x3993c9, - 0x39e50a, - 0x358e88, - 0x3c7448, - 0x30e5cc, - 0x314607, - 0x347508, - 0x34bf08, - 0x34d145, - 0x37f18a, - 0x3a6ac9, - 0x556035c2, - 0x3cb386, - 0x264304, - 0x334849, - 0x297c09, - 0x27afc7, - 0x2c4d87, - 0x20c909, - 0x243688, - 0x24368f, - 0x22ca46, - 0x2db98b, - 0x3b7785, - 0x3b7787, - 0x2fd1c9, - 0x268946, - 0x2e5387, - 0x2e1205, - 0x2330c4, - 0x26b246, - 0x222ec4, - 0x2e9807, - 0x32fc08, - 0x55b01508, - 0x301c85, - 0x301dc7, - 0x31a149, - 0x218f04, - 0x246cc8, - 0x55e58a48, - 0x206d84, - 0x2fae08, - 0x38fc44, - 0x39b489, - 0x219b85, - 0x562061c2, - 0x22ca85, - 0x2d4cc5, - 0x373e08, - 0x236547, - 0x566008c2, - 0x23ab45, - 0x2d5786, - 0x244d06, - 0x2fa888, - 0x2fc108, - 0x306b06, - 0x3ad9c6, - 0x3cdfc9, - 0x33abc6, - 0x294f0b, - 0x276c45, - 0x2a8146, - 0x2ec448, - 0x295746, - 0x22f186, - 0x21af0a, - 0x2abeca, - 0x2634c5, - 0x23e487, - 0x315246, - 0x56a09642, - 0x309f47, - 0x262305, - 0x24bc84, - 0x24bc85, - 0x2bd9c6, - 0x273107, - 0x21f3c5, - 0x24fc84, - 0x305788, - 0x22f245, - 0x2e2047, - 0x3af3c5, - 0x225885, - 0x2a60c4, - 0x31df49, - 0x2f6dc8, - 0x241506, - 0x2d31c6, - 0x206a86, - 0x56fbe708, - 0x3c72c7, - 0x30728d, - 0x3081cc, - 0x3087c9, - 0x308a09, - 0x57374ac2, - 0x3c5f43, - 0x20a2c3, - 0x399605, - 0x3a8e0a, - 0x33aa86, - 0x30ce45, - 0x30f8c4, - 0x30f8cb, - 0x32e90c, - 0x32fe0c, - 0x330115, - 0x330fcd, - 0x334a8f, - 0x334e52, - 0x3352cf, - 0x335692, - 0x335b13, - 0x335fcd, - 0x33658d, - 0x33690e, - 0x336e8e, - 0x3376cc, - 0x337a8c, - 0x337ecb, - 0x338dce, - 0x3396d2, - 0x33a84c, - 0x33ae10, - 0x344fd2, - 0x345c4c, - 0x34630d, - 0x34664c, - 0x3495d1, - 0x34b88d, - 0x34decd, - 0x34e4ca, - 0x34e74c, - 0x34fa0c, - 0x35194c, - 0x35238c, - 0x35a0d3, - 0x35a750, - 0x35ab50, - 0x35b70d, - 0x35bd0c, - 0x35cac9, - 0x35e14d, - 0x35e493, - 0x360851, - 0x360c93, - 0x36184f, - 0x361c0c, - 0x361f0f, - 0x3622cd, - 0x3628cf, - 0x362c90, - 0x36370e, - 0x366f4e, - 0x3674d0, - 0x369b0d, - 0x36a48e, - 0x36a80c, - 0x36b7d3, - 0x36d2ce, - 0x36d950, - 0x36dd51, - 0x36e18f, - 0x36e553, - 0x37464d, - 0x37498f, - 0x374d4e, - 0x375410, - 0x375809, - 0x376f50, - 0x37754f, - 0x377bcf, - 0x377f92, - 0x378cce, - 0x3796cd, - 0x379dcd, - 0x37a10d, - 0x37b30d, - 0x37b64d, - 0x37b990, - 0x37bd8b, - 0x37c6cc, - 0x37ca4c, - 0x37d04c, - 0x37d34e, - 0x38a990, - 0x38bf12, - 0x38c38b, - 0x38c88e, - 0x38cc0e, - 0x38d48e, - 0x38d90b, - 0x5778e116, - 0x3933cd, - 0x393854, - 0x39460d, - 0x395e95, - 0x39790d, - 0x39828f, - 0x39890f, - 0x39c98f, - 0x39cd4e, - 0x39d2cd, - 0x3a0851, - 0x3a2dcc, - 0x3a30cc, - 0x3a33cb, - 0x3a384c, - 0x3a3c0f, - 0x3a3fd2, - 0x3a4d8d, - 0x3a630c, - 0x3a6d0c, - 0x3a700d, - 0x3a734f, - 0x3a770e, - 0x3a8acc, - 0x3a908d, - 0x3a93cb, - 0x3a9c8c, - 0x3aa58d, - 0x3aa8ce, - 0x3aac49, - 0x3abd93, - 0x3ac2cd, - 0x3ac60d, - 0x3acc0c, - 0x3ad08e, - 0x3add0f, - 0x3ae0cc, - 0x3ae3cd, - 0x3ae70f, - 0x3aeacc, - 0x3af50c, - 0x3af88c, - 0x3afb8c, - 0x3b024d, - 0x3b0592, - 0x3b0c0c, - 0x3b0f0c, - 0x3b1211, - 0x3b164f, - 0x3b1a0f, - 0x3b1dd3, - 0x3b278e, - 0x3b2b0f, - 0x3b2ecc, - 0x57bb320e, - 0x3b358f, - 0x3b3956, - 0x3b4cd2, - 0x3b6f8c, - 0x3b794f, - 0x3b7fcd, - 0x3bd78f, - 0x3bdb4c, - 0x3bde4d, - 0x3be18d, - 0x3bf70e, - 0x3c09cc, - 0x3c238c, - 0x3c2690, - 0x3c52d1, - 0x3c570b, - 0x3c5b4c, - 0x3c5e4e, - 0x3c7951, - 0x3c7d8e, - 0x3c810d, - 0x3ccccb, - 0x3cd5cf, - 0x3cfa94, - 0x25cfc2, - 0x25cfc2, - 0x209303, - 0x25cfc2, - 0x209303, - 0x25cfc2, - 0x202c02, - 0x24eac5, - 0x3c764c, - 0x25cfc2, - 0x25cfc2, - 0x202c02, - 0x25cfc2, - 0x297805, - 0x32d185, - 0x25cfc2, - 0x25cfc2, - 0x212802, - 0x297805, - 0x332189, - 0x36054c, - 0x25cfc2, - 0x25cfc2, - 0x25cfc2, - 0x25cfc2, - 0x24eac5, - 0x25cfc2, - 0x25cfc2, - 0x25cfc2, - 0x25cfc2, - 0x212802, - 0x332189, - 0x25cfc2, - 0x25cfc2, - 0x25cfc2, - 0x32d185, - 0x25cfc2, - 0x32d185, - 0x36054c, - 0x3c764c, - 0x203a83, - 0x20d183, - 0x2355c3, - 0x214d83, - 0x224604, - 0x222103, - 0x2482c3, - 0x1732c8, - 0x5aa84, - 0x182c3, - 0xc9008, - 0x2000c2, - 0x58a07c02, - 0x246603, - 0x25a0c4, - 0x202b03, - 0x21ee44, - 0x232406, - 0x215903, - 0x300c44, - 0x2d3dc5, - 0x272203, - 0x222103, - 0xe6003, - 0x2482c3, - 0x2ed18a, - 0x25cb86, - 0x38cf8c, - 0x1513c8, - 0x207c02, - 0x20d183, - 0x2355c3, - 0x214d83, - 0x20d203, - 0x2d9fc6, - 0x222103, - 0x2482c3, - 0x21a803, - 0xa7d88, - 0xf4d45, - 0x1c4589, - 0x3742, - 0x59efddc5, - 0xf4d45, - 0x39e47, - 0x70748, - 0xe9ce, - 0x8bfd2, - 0x308b, - 0x102b46, - 0x5a28e505, - 0x5a68e50c, - 0x3e707, - 0xdf5c7, - 0xbe28a, - 0x41790, - 0x192bc5, - 0xb1b8b, - 0x83b88, - 0x37707, - 0x59d4b, - 0x7e409, - 0x131587, - 0x112187, - 0x7a6c7, - 0x37646, - 0x6a488, - 0x5ac39c86, - 0x4f147, - 0x15974d, - 0xbdc50, - 0x5b00b782, - 0x7c748, - 0x5e890, - 0x180a4c, - 0x5b789b4d, - 0x60fc8, - 0x6144b, - 0x6e407, - 0x5a649, - 0x5c786, - 0x97388, - 0x5642, - 0x7354a, - 0x82987, - 0x76007, - 0xa8709, - 0xaa248, - 0x111a45, - 0xf3b0e, - 0x261ce, - 0x15758f, - 0x157cc9, - 0x5f3c9, - 0x8334b, - 0x967cf, - 0xafbcc, - 0x11cd4b, - 0xcff88, - 0xf34c7, - 0x100f08, - 0x13b78b, - 0x13eecc, - 0x15c38c, - 0x163ecc, - 0x16ce0d, - 0x168708, - 0xc7782, - 0x1a4789, - 0x148108, - 0x1a1f0b, - 0xcb646, - 0xd510b, - 0x13920b, - 0xe094a, - 0xe1505, - 0xe6b90, - 0xe8646, - 0x12c986, - 0x127cc5, - 0x165807, - 0xf9388, - 0xeef07, - 0xef1c7, - 0xfd007, - 0xff04a, - 0x15124a, - 0xea506, - 0x944cd, - 0x4f208, - 0x10bbc8, - 0xa6009, - 0xb8c45, - 0xfeb8c, - 0x16d00b, - 0x179a44, - 0x106ec9, - 0x107106, - 0x156186, - 0xbb8c6, - 0x3582, - 0x44a06, - 0x5f4b, - 0x114487, - 0x1242, - 0xcdf45, - 0x1e904, - 0x101, - 0x62c43, - 0x5aa77dc6, - 0x97703, - 0x382, - 0x1f84, - 0x8e82, - 0x8a904, - 0x882, - 0x34c2, - 0xbc2, - 0x120542, - 0xf82, - 0x8e502, - 0xae02, - 0x2d42, - 0x39082, - 0x1d2c2, - 0x3282, - 0x11702, - 0x355c3, - 0x942, - 0x6ac2, - 0x19902, - 0x1482, - 0x642, - 0x33842, - 0x586c2, - 0x7842, - 0x15642, - 0x5c2, - 0x19a43, - 0xb02, - 0x2c82, - 0xb38c2, - 0xb2c2, - 0x4942, - 0xe7c2, - 0x16182, - 0x9f702, - 0x2002, - 0x126302, - 0x6f582, - 0xbf82, - 0x22103, - 0x602, - 0x40e02, - 0x1a42, - 0x1202, - 0x158d85, - 0x9dc2, - 0x4c42, - 0x43dc3, - 0x682, - 0x4e42, - 0x1002, - 0x2c42, - 0x16302, - 0x8c2, - 0xab42, - 0x3582, - 0xc105, - 0x5ba02c02, - 0x5bfba083, - 0x11783, - 0x5c202c02, - 0x11783, - 0x85307, - 0x2141c3, - 0x2000c2, - 0x20d183, - 0x2355c3, - 0x3d0443, - 0x2005c3, - 0x20d203, - 0x222103, - 0x2182c3, - 0x2482c3, - 0x297743, - 0x15dc3, - 0x1513c8, - 0x20d183, - 0x2355c3, - 0x3d0443, - 0x272203, - 0x222103, - 0x2182c3, - 0xe6003, - 0x2482c3, - 0x20d183, - 0x2355c3, - 0x2482c3, - 0x20d183, - 0x2355c3, - 0x214d83, - 0x200181, - 0x272203, - 0x222103, - 0x252ec3, - 0x2482c3, - 0x174544, - 0x203a83, - 0x20d183, - 0x2355c3, - 0x211e83, - 0x3d0443, - 0x281503, - 0x241c03, - 0x2aad43, - 0x2bd583, - 0x214d83, - 0x224604, - 0x222103, - 0x2482c3, - 0x2020c3, - 0x202544, - 0x2633c3, - 0x8403, - 0x3c3103, - 0x2034c8, - 0x307c84, - 0x20020a, - 0x23ce06, - 0x114104, - 0x3783c7, - 0x22068a, - 0x22c909, - 0x3a8807, - 0x3b3e8a, - 0x203a83, - 0x2d224b, - 0x2bd4c9, - 0x206b85, - 0x33a687, - 0x7c02, - 0x20d183, - 0x222347, - 0x353005, - 0x2c6049, - 0x2355c3, - 0x2bb306, - 0x2c5303, - 0xf2203, - 0x10e9c6, - 0x172646, - 0x15887, - 0x228c46, - 0x31bf45, - 0x20ba87, - 0x30c387, - 0x5ea14d83, - 0x345e87, - 0x2bb9c3, - 0x249545, - 0x224604, - 0x270e08, - 0x3793cc, - 0x33eb45, - 0x2a55c6, - 0x222207, - 0x229307, - 0x256fc7, - 0x25f5c8, - 0x30d98f, - 0x3924c5, - 0x246707, - 0x28f1c7, - 0x28f30a, - 0x2d7d09, - 0x30a285, - 0x30eb0a, - 0x1919c6, - 0x2c5385, - 0x37b204, - 0x38a6c6, - 0x2e31c7, - 0x2c7687, - 0x38ef08, - 0x225005, - 0x352f06, + 0x2094c4, + 0x210e06, + 0x2c2c04, + 0x282344, + 0x259789, + 0x305d46, + 0x2ed945, + 0x252345, + 0x22fe47, + 0x2bfdc7, + 0x2a5884, + 0x221106, + 0x2f7c45, + 0x2ab0c5, + 0x319145, + 0x20eac7, + 0x311b85, + 0x32a449, + 0x367b05, + 0x2fb904, + 0x37bc87, + 0x37264e, + 0x30b549, + 0x371b89, + 0x366646, + 0x240d88, + 0x2f110b, + 0x36314c, + 0x353d46, + 0x364f87, + 0x2afec5, + 0x2e54ca, + 0x218089, + 0x348d49, + 0x2016c6, + 0x303005, + 0x2483c5, + 0x3313c9, + 0x3192cb, + 0x279006, + 0x34c086, + 0x20b0c4, + 0x28dd86, + 0x246b48, + 0x3c4106, + 0x26e206, + 0x204b48, + 0x205fc7, + 0x206c89, + 0x207845, + 0xaf0c8, + 0x293e84, + 0x3157c4, + 0x20d9c5, + 0x3b2209, + 0x21c787, + 0x21c78b, + 0x22354a, + 0x229fc5, + 0x50e08942, + 0x2ab487, + 0x5122a2c8, + 0x211507, + 0x2dc385, + 0x23968a, + 0x9302, + 0x25200b, + 0x27b48a, + 0x25c546, + 0x20d843, + 0x2ccf4d, + 0x39d20c, + 0x3d230d, + 0x24aa45, + 0x391cc5, + 0x30f047, + 0x20e7c9, + 0x21f346, + 0x25d285, + 0x2d6108, + 0x28dc83, + 0x2fe788, + 0x28dc88, + 0x2c3907, + 0x34d6c8, + 0x39d009, + 0x2e3307, + 0x31b6c7, + 0x38a988, + 0x2fd784, + 0x2fd787, + 0x295008, + 0x359406, + 0x39dfcf, + 0x226e87, + 0x355286, 0x23a105, - 0x2382c5, - 0x28de44, - 0x399847, - 0x356dca, - 0x242788, - 0x375286, - 0xd203, - 0x2e2845, - 0x35fd06, - 0x3b2586, - 0x327bc6, - 0x272203, - 0x3a5007, - 0x28f145, - 0x222103, - 0x2e0c0d, - 0x2182c3, - 0x38f008, - 0x388644, - 0x27a105, - 0x2a7046, - 0x21d146, - 0x2a8047, - 0x2aad87, - 0x365cc5, - 0x2482c3, - 0x2f35c7, - 0x25a7c9, - 0x32cbc9, - 0x2123ca, - 0x20c0c2, - 0x249504, - 0x2e8b84, - 0x327487, - 0x2ee448, - 0x2f0149, - 0x220989, - 0x2f1287, - 0x2ebb86, - 0xf3886, - 0x2f5b44, - 0x2f614a, - 0x2f86c8, - 0x2f8d49, - 0x30e186, - 0x2b5805, + 0x21ec03, + 0x249e87, + 0x389083, + 0x24e986, + 0x250046, + 0x2508c6, + 0x292705, + 0x2685c3, + 0x3a5808, + 0x38c609, + 0x39ec0b, + 0x250a48, + 0x251b85, + 0x2530c5, + 0x5163a302, + 0x2a3e09, + 0x21e107, + 0x25c485, + 0x259687, + 0x25bc06, + 0x383105, + 0x25d54b, + 0x25f6c4, + 0x264705, + 0x264847, + 0x278986, + 0x278dc5, + 0x286807, + 0x286f87, + 0x2fa5c4, + 0x28ba0a, + 0x28c508, + 0x2b9649, + 0x2cdec5, + 0x363946, + 0x246d0a, + 0x252246, + 0x268f87, + 0x2b930d, + 0x2a3649, + 0x3a4b05, + 0x310207, + 0x372a48, + 0x374fc8, + 0x366207, + 0x205c06, + 0x227247, + 0x24fb83, + 0x305cc4, + 0x380645, + 0x3aa207, + 0x3ae689, + 0x26e548, + 0x22dfc5, + 0x246284, + 0x250c05, + 0x25f88d, + 0x203402, + 0x2bedc6, + 0x2ea686, + 0x315f8a, + 0x395b06, + 0x39bd85, + 0x368fc5, + 0x368fc7, + 0x3a928c, + 0x2e488a, + 0x28da46, + 0x2d7685, + 0x28dbc6, + 0x28df07, + 0x2906c6, + 0x29260c, + 0x21dbc9, + 0x51a12dc7, + 0x294e85, + 0x294e86, + 0x295388, + 0x24fe05, + 0x2a40c5, + 0x2a4848, + 0x2a4a4a, + 0x51e7c402, + 0x52208b82, + 0x2d8a45, + 0x2a7483, + 0x245308, + 0x2050c3, + 0x2a4cc4, + 0x26a4cb, + 0x2050c8, + 0x317b08, + 0x526560c9, + 0x2a9809, + 0x2a9f46, + 0x2ac1c8, + 0x2ac3c9, + 0x2ad046, + 0x2ad1c5, + 0x24a846, + 0x2ad689, + 0x30c487, + 0x3ad306, + 0x2e8487, + 0x30c747, + 0x326404, + 0x52b1b509, + 0x2827c8, + 0x3bbf48, + 0x373407, + 0x2c7dc6, + 0x2027c9, + 0x2c2ec7, + 0x35c60a, + 0x35db88, + 0x212f47, + 0x215d86, + 0x28ea8a, + 0x3a0288, + 0x2ea105, + 0x2255c5, + 0x343607, + 0x304d09, + 0x37d30b, + 0x326c88, + 0x367b89, + 0x250e47, + 0x2b5f4c, + 0x2b670c, + 0x2b6a0a, + 0x2b6c8c, + 0x2c2788, + 0x2c2988, + 0x2c2b84, + 0x2c3089, + 0x2c32c9, + 0x2c350a, + 0x2c3789, + 0x2c3ac7, + 0x3b3b8c, + 0x237a46, + 0x2c6248, + 0x252306, + 0x393bc6, + 0x3a4a07, + 0x30b748, + 0x377a4b, + 0x2113c7, + 0x238689, + 0x3840c9, + 0x24f6c7, + 0x2c2e44, + 0x26fd47, + 0x39b5c6, + 0x210d06, + 0x2f5785, + 0x24c848, + 0x292f44, + 0x292f46, + 0x2e474b, + 0x34d889, + 0x20e9c6, + 0x20ec49, + 0x20da86, + 0x257348, + 0x21e283, + 0x303185, + 0x26e349, + 0x27afc5, + 0x307fc4, + 0x277e86, + 0x23d6c5, + 0x254fc6, + 0x318407, + 0x32dac6, + 0x22c58b, + 0x3911c7, + 0x252d86, + 0x23a606, + 0x22ff06, + 0x2a5849, + 0x2ef4ca, + 0x2bb3c5, + 0x2ed08d, + 0x2a4b46, + 0x2f88c6, + 0x2f4c46, + 0x23f245, + 0x2e70c7, + 0x300687, + 0x208bce, + 0x2287c3, + 0x2c7d89, + 0x38d889, + 0x2e58c7, + 0x26c387, + 0x29dac5, + 0x37e245, + 0x52f9868f, + 0x2d0107, + 0x2d02c8, + 0x2d1144, + 0x2d16c6, + 0x532480c2, + 0x2d5606, + 0x2d7d46, + 0x377d8e, + 0x2fe5ca, + 0x3bc7c6, + 0x22390a, + 0x3d2109, + 0x243885, + 0x37e7c8, + 0x352246, + 0x29c048, + 0x257508, + 0x373f4b, + 0x220fc5, + 0x311c08, + 0x204c8c, + 0x2dc247, + 0x250286, + 0x334fc8, + 0x211088, + 0x5363ce82, + 0x207a4b, + 0x217489, + 0x217b49, + 0x3947c7, + 0x216448, + 0x53a1d148, + 0x375b8b, + 0x2ccb89, + 0x28528d, + 0x26ed88, + 0x3575c8, + 0x53e03c02, + 0x3c4a04, + 0x542203c2, + 0x300ec6, + 0x54605102, + 0x2fd0ca, + 0x204446, + 0x2ecc08, + 0x38fe48, + 0x39b9c6, + 0x2ee946, + 0x2f9c46, + 0x37e4c5, + 0x23aa84, + 0x54a6ea84, + 0x351d86, + 0x27bcc7, + 0x54ee6787, + 0x2200cb, + 0x211709, + 0x391d0a, + 0x369104, + 0x2ba1c8, + 0x3ad0cd, + 0x2f2ac9, + 0x2f2d08, + 0x2f2f89, + 0x2f6044, + 0x2466c4, + 0x25e745, + 0x31990b, + 0x205046, + 0x351bc5, + 0x21bac9, + 0x2211c8, + 0x26ec04, + 0x2e5649, + 0x266f45, + 0x2bfe08, + 0x31bd87, + 0x371f88, + 0x283846, + 0x208807, + 0x2ddbc9, + 0x204909, + 0x35acc5, + 0x248f85, + 0x55212fc2, + 0x2fb6c4, + 0x224405, + 0x220dc6, + 0x3c85c5, + 0x298fc7, + 0x351e85, + 0x2789c4, + 0x366706, + 0x27bdc7, + 0x232906, + 0x325545, + 0x210748, + 0x221745, + 0x216b87, + 0x225209, + 0x34d9ca, + 0x2ec787, + 0x2ec78c, + 0x2ed906, + 0x24b409, + 0x24e1c5, + 0x24fd48, + 0x20a743, + 0x20d405, + 0x39b285, + 0x27fbc7, + 0x5561cbc2, + 0x2ee147, + 0x2f5cc6, + 0x33d146, + 0x2fc2c6, + 0x210fc6, + 0x2671c8, + 0x2cdcc5, + 0x355347, + 0x35534d, + 0x21ae83, + 0x21ae85, + 0x3124c7, + 0x2ee488, + 0x312085, + 0x2150c8, + 0x3a25c6, + 0x2db587, + 0x2c6185, + 0x221046, + 0x3aed05, + 0x21f10a, + 0x3819c6, + 0x304587, + 0x2e2e85, + 0x301007, + 0x305344, + 0x307f46, + 0x37e705, + 0x22260b, + 0x39b449, + 0x24310a, + 0x35ad48, + 0x317248, + 0x31d30c, + 0x328d47, + 0x34c888, + 0x34e948, + 0x34ed85, + 0x3a3b8a, + 0x3a8089, + 0x55a010c2, + 0x3cc3c6, + 0x260344, + 0x348409, + 0x295e09, + 0x279647, + 0x2c25c7, + 0x206509, + 0x23f448, + 0x23f44f, + 0x227c46, + 0x2db08b, + 0x3b8ec5, + 0x3b8ec7, + 0x2fed09, + 0x26a606, + 0x2e55c7, + 0x2df245, + 0x232704, + 0x27ae86, + 0x21c944, + 0x2e9a47, + 0x2cfbc8, + 0x55f02f08, + 0x304a45, + 0x304b87, + 0x359f09, + 0x20ef84, 0x242648, - 0x2cba8a, - 0x295a83, - 0x2026c6, - 0x2f1387, - 0x22f985, - 0x388505, - 0x3ab783, - 0x271dc4, - 0x229a05, - 0x289387, - 0x2f6f05, - 0x2fc946, - 0x106a05, - 0x2076c3, - 0x2076c9, - 0x279ecc, - 0x2e004c, - 0x2d4f08, - 0x2bf987, - 0x3028c8, - 0x30424a, - 0x304c4b, - 0x2bd608, - 0x21d248, - 0x237d46, - 0x206945, - 0x20498a, - 0x3ba0c5, - 0x2061c2, - 0x2c96c7, - 0x254606, - 0x3764c5, - 0x37a689, - 0x27bdc5, - 0x3862c5, - 0x2ec149, - 0x35fc46, - 0x3b4588, - 0x249603, - 0x228d86, - 0x279246, - 0x313c85, - 0x313c89, - 0x2f0889, - 0x280687, - 0x116204, - 0x316207, - 0x220889, - 0x2380c5, - 0x3db08, - 0x33d745, - 0x36f045, - 0x259549, - 0x205a42, - 0x282544, - 0x200f42, + 0x562189c8, + 0x2f0d44, + 0x2fbc08, + 0x367584, + 0x34b749, + 0x20fa05, + 0x56616a42, + 0x227c85, + 0x2d3285, + 0x310048, + 0x236147, + 0x56a008c2, + 0x26ebc5, + 0x2d3dc6, + 0x240686, + 0x2fb688, + 0x2fda88, + 0x3c8586, + 0x3af206, + 0x322d49, + 0x33d086, + 0x29408b, + 0x2f1f45, + 0x2a5e46, + 0x2948c8, + 0x22ea46, + 0x331786, + 0x21558a, + 0x2a9bca, + 0x25cc05, + 0x2cdd87, + 0x31e086, + 0x56e04fc2, + 0x312607, + 0x256c45, + 0x246c84, + 0x246c85, + 0x2ba0c6, + 0x272d47, + 0x219785, + 0x24c8c4, + 0x334648, + 0x331845, + 0x2e0b07, + 0x3b0c05, + 0x21f045, + 0x2266c4, + 0x2266c9, + 0x2f7a88, + 0x23d586, + 0x2d19c6, + 0x36a806, + 0x573bf808, + 0x3c8307, + 0x3099cd, + 0x31088c, + 0x310e89, + 0x3110c9, + 0x57778742, + 0x3c7543, + 0x205cc3, + 0x39b685, + 0x3aa30a, + 0x33cf46, + 0x315b45, + 0x3185c4, + 0x3185cb, + 0x32f78c, + 0x330bcc, + 0x330ed5, + 0x331e0d, + 0x336a0f, + 0x336dd2, + 0x33724f, + 0x337612, + 0x337a93, + 0x337f4d, + 0x33850d, + 0x33888e, + 0x338e0e, + 0x33964c, + 0x339a0c, + 0x339e4b, + 0x33b28e, + 0x33bb92, + 0x33cd0c, + 0x33d2d0, + 0x3460d2, + 0x346d4c, + 0x34740d, + 0x34774c, + 0x34a151, + 0x34c20d, + 0x34f34d, + 0x34f94a, + 0x34fbcc, + 0x350e8c, + 0x3518cc, + 0x3535cc, + 0x356193, + 0x356810, + 0x356c10, + 0x3577cd, + 0x357dcc, + 0x358ac9, + 0x35b5cd, + 0x35b913, + 0x35ebd1, + 0x35f013, + 0x35fbcf, + 0x35ff8c, + 0x36028f, + 0x36064d, + 0x360c4f, + 0x361010, + 0x361a8e, + 0x36af0e, + 0x36b490, + 0x36c14d, + 0x36cace, + 0x36ce4c, + 0x36de13, + 0x36fd0e, + 0x370390, + 0x370791, + 0x370bcf, + 0x370f93, + 0x3782cd, + 0x37860f, + 0x3789ce, + 0x379090, + 0x379489, + 0x37a510, + 0x37ab0f, + 0x37b18f, + 0x37b552, + 0x37c4ce, + 0x37cecd, + 0x37d5cd, + 0x37d90d, + 0x37f38d, + 0x37f6cd, + 0x37fa10, + 0x37fe0b, + 0x38040c, + 0x38078c, + 0x380d8c, + 0x38108e, + 0x390050, + 0x391f92, + 0x39240b, + 0x39290e, + 0x392c8e, + 0x39350e, + 0x39398b, + 0x57b940d6, + 0x39580d, + 0x395c94, + 0x39680d, + 0x398095, + 0x39998d, + 0x39a30f, + 0x39a98f, + 0x39eecf, + 0x39f28e, + 0x39f80d, + 0x3a15d1, + 0x3a41cc, + 0x3a44cc, + 0x3a47cb, + 0x3a4c4c, + 0x3a500f, + 0x3a53d2, + 0x3a620d, + 0x3a78cc, + 0x3a82cc, + 0x3a85cd, + 0x3a890f, + 0x3a8cce, + 0x3a9fcc, + 0x3aa58d, + 0x3aa8cb, + 0x3ab18c, + 0x3aba8d, + 0x3abdce, + 0x3ac149, + 0x3ad5d3, + 0x3adb0d, + 0x3ade4d, + 0x3ae44c, + 0x3ae8ce, + 0x3af54f, + 0x3af90c, + 0x3afc0d, + 0x3aff4f, + 0x3b030c, + 0x3b0d4c, + 0x3b10cc, + 0x3b13cc, + 0x3b1a8d, + 0x3b1dd2, + 0x3b244c, + 0x3b274c, + 0x3b2a51, + 0x3b2e8f, + 0x3b324f, + 0x3b3613, + 0x3b3fce, + 0x3b434f, + 0x3b470c, + 0x57fb4a4e, + 0x3b4dcf, + 0x3b5196, + 0x3b6412, + 0x3b86cc, + 0x3b908f, + 0x3b970d, + 0x3be88f, + 0x3bec4c, + 0x3bef4d, + 0x3bf28d, + 0x3c090e, + 0x3c1bcc, + 0x3c348c, + 0x3c3790, + 0x3c68d1, + 0x3c6d0b, + 0x3c714c, + 0x3c744e, + 0x3c8c51, + 0x3c908e, + 0x3c940d, + 0x3ce5cb, + 0x3ceecf, + 0x3cfd14, + 0x2049c2, + 0x2049c2, + 0x204c83, + 0x2049c2, + 0x204c83, + 0x2049c2, + 0x203702, + 0x24a885, + 0x3c894c, + 0x2049c2, + 0x2049c2, + 0x203702, + 0x2049c2, + 0x295a05, + 0x34d9c5, + 0x2049c2, + 0x2049c2, + 0x20d9c2, + 0x295a05, + 0x3337c9, + 0x35e8cc, + 0x2049c2, + 0x2049c2, + 0x2049c2, + 0x2049c2, + 0x24a885, + 0x2049c2, + 0x2049c2, + 0x2049c2, + 0x2049c2, + 0x20d9c2, + 0x3337c9, + 0x2049c2, + 0x2049c2, + 0x2049c2, + 0x34d9c5, + 0x2049c2, + 0x34d9c5, + 0x35e8cc, + 0x3c894c, + 0x373a83, + 0x209303, + 0x2351c3, + 0x22b883, + 0x21e084, + 0x215c83, + 0x24b583, + 0x10f508, + 0x80144, + 0x2543, + 0xc5a88, + 0x2000c2, + 0x58e09302, + 0x241f83, + 0x2471c4, + 0x202c03, + 0x255a44, + 0x231a46, + 0x210303, + 0x302144, + 0x25c905, + 0x2287c3, + 0x215c83, + 0xe6243, + 0x24b583, + 0x2678ca, + 0x25c006, + 0x39300c, + 0xaf0c8, + 0x209302, + 0x209303, + 0x2351c3, + 0x22b883, + 0x209383, + 0x2d7d46, + 0x215c83, + 0x24b583, + 0x214e83, + 0xa5a88, + 0xf6d85, + 0x1c5cc9, + 0x10c02, + 0x5a2ff905, + 0xf6d85, + 0x6dec7, + 0x6fa88, + 0xbece, + 0x89ad2, + 0x7d2cb, + 0x105986, + 0x5a68be45, + 0x5aa8be4c, + 0xce007, + 0xe41c7, + 0xba98a, + 0x3c7d0, + 0x10c8c5, + 0xe4f8b, + 0x73dc8, + 0x37307, + 0x15d74b, + 0x7c709, + 0x1323c7, + 0x11ae87, + 0x79247, + 0x37246, + 0x65e48, + 0x5b03cf86, + 0x4b187, + 0x2030d, + 0xba350, + 0x5b407242, + 0x28748, + 0x5ae50, + 0x183e4c, + 0x5bb8e40d, + 0x5da08, + 0x5de8b, + 0x6b9c7, + 0x15e049, + 0x59086, + 0x95588, + 0x72302, + 0x7318a, + 0xe1c07, + 0x2d747, + 0xa63c9, + 0xa8148, + 0x11a745, + 0xf410e, + 0x1b44e, + 0x1f88f, + 0x23bc9, + 0x47089, + 0x7358b, + 0x91c4f, + 0xad38c, + 0x193e0b, + 0xd1388, + 0x1016c7, + 0x1076c8, + 0x140f8b, + 0x15844c, + 0x16224c, + 0x16fa0c, + 0x17a04d, + 0xccd08, + 0xc4442, + 0x1a5b89, + 0x181cc8, + 0x1a300b, + 0xc7fc6, + 0xd36cb, + 0x13b6cb, + 0xde98a, + 0xdf545, + 0xe6dd0, + 0xe8646, + 0x74e86, + 0x178145, + 0x8ec87, + 0x113648, + 0xeeac7, + 0xeed87, + 0x1cfb47, + 0x100d0a, + 0xaef4a, + 0xea746, + 0x9358d, + 0x4b248, + 0x1147c8, + 0xaa309, + 0xb5685, + 0x10084c, + 0x17a24b, + 0x17d244, + 0x109609, + 0x109846, + 0x155a46, + 0xb7fc6, + 0x2fc2, + 0x40386, + 0x169ccb, + 0x11d187, + 0xc882, + 0xc9f05, + 0x189c4, + 0x101, + 0x8ac3, + 0x5aea7646, + 0x95903, + 0x382, + 0x2d744, + 0x4542, + 0x8b004, + 0x882, + 0x7b82, + 0x3fc2, + 0x10e842, + 0xdc2, + 0x8be42, + 0x1242, + 0x142c02, + 0x38b42, + 0x17382, + 0x69c2, + 0x16502, + 0x351c3, + 0x942, + 0x12c2, + 0xd42, + 0x8282, + 0x642, + 0x33542, + 0x55282, + 0x7602, + 0x7502, + 0x5c2, + 0xf8c3, + 0xb02, + 0x2382, + 0xb0442, + 0x6d82, + 0x5142, + 0xbcc2, + 0x10e42, + 0x9df02, + 0x9382, + 0x10b282, + 0x6ca42, + 0x7e02, + 0x15c83, + 0x602, + 0x3ce82, + 0x1e82, + 0x1b382, + 0x15ac45, + 0x57c2, + 0x44042, + 0x3f843, + 0x682, + 0x6702, + 0x3182, + 0xac02, + 0xeb02, + 0x8c2, + 0x2a82, + 0x2fc2, + 0x7f85, + 0x5be03702, + 0x5c3bb7c3, + 0x16583, + 0x5c603702, + 0x16583, + 0x83147, + 0x20f403, + 0x2000c2, + 0x209303, + 0x2351c3, + 0x210a43, + 0x2005c3, + 0x209383, + 0x215c83, + 0x202543, + 0x24b583, + 0x295943, + 0xd983, + 0xaf0c8, + 0x209303, + 0x2351c3, + 0x210a43, + 0x2287c3, + 0x215c83, + 0x202543, + 0xe6243, + 0x24b583, + 0x209303, + 0x2351c3, + 0x24b583, + 0x209303, + 0x2351c3, + 0x22b883, + 0x200181, + 0x2287c3, + 0x215c83, + 0x24fb43, + 0x24b583, + 0x110784, + 0x373a83, + 0x209303, + 0x2351c3, + 0x209d43, + 0x210a43, + 0x27fac3, + 0x23ea03, + 0x2a3bc3, + 0x2b9c83, + 0x22b883, + 0x21e084, + 0x215c83, + 0x24b583, + 0x20ab43, + 0x376544, + 0x25fa83, + 0x3ac3, + 0x3c4203, + 0x375548, + 0x28eac4, + 0x20020a, + 0x23dc06, + 0x11ce04, + 0x37b987, + 0x21aa4a, + 0x227b09, + 0x3a9d07, + 0x3b56ca, + 0x373a83, + 0x2d8acb, + 0x2b9bc9, + 0x36a905, + 0x33cb47, + 0x9302, + 0x209303, + 0x218cc7, + 0x3549c5, + 0x2c2d09, + 0x2351c3, + 0x2b7a06, + 0x2c1fc3, + 0xf5d43, + 0x117546, + 0x10e886, + 0x10287, + 0x222cc6, + 0x22cac5, + 0x207907, + 0x315087, + 0x5ee2b883, + 0x346f87, + 0x2b80c3, + 0x245085, + 0x21e084, + 0x2705c8, + 0x37cbcc, + 0x340c05, + 0x2a37c6, + 0x218b87, + 0x223387, + 0x24f847, + 0x252ec8, + 0x31668f, + 0x367d45, + 0x242087, + 0x202d07, + 0x2a4e0a, + 0x2d5f49, + 0x312945, + 0x31768a, + 0x173746, + 0x2c2045, + 0x37f284, + 0x38fd86, + 0x335c87, + 0x2c4347, + 0x394948, + 0x21e285, + 0x3548c6, + 0x26e185, + 0x23a745, + 0x28b784, + 0x39b8c7, + 0x26700a, + 0x247608, + 0x378f06, + 0x9383, + 0x2e1305, + 0x3cab86, + 0x3b3dc6, + 0x378046, + 0x2287c3, + 0x3a6487, + 0x202c85, + 0x215c83, + 0x2dec4d, + 0x202543, + 0x394a48, + 0x38cf04, + 0x278c85, + 0x2a4d06, + 0x217206, + 0x2a5d47, + 0x2a3c07, + 0x293d05, + 0x24b583, + 0x3017c7, + 0x35e1c9, + 0x2750c9, + 0x20a28a, + 0x207f42, + 0x245044, + 0x2e8dc4, + 0x377907, + 0x2ee008, + 0x2efd09, + 0x21ad49, + 0x2f0907, + 0x2f1686, + 0xf3e86, + 0x2f6044, + 0x2f664a, + 0x2f9488, + 0x2f9b09, + 0x2e8c46, + 0x2b1f45, + 0x2474c8, + 0x2c840a, + 0x205dc3, + 0x3766c6, + 0x2f0a07, + 0x230245, + 0x38cdc5, + 0x3acc83, + 0x271504, + 0x225585, + 0x287087, + 0x2f7bc5, + 0x3434c6, + 0x1c8485, + 0x289143, + 0x3bc889, + 0x278a4c, + 0x321c4c, + 0x2d34c8, + 0x2fad87, + 0x305708, + 0x3064ca, + 0x306ecb, + 0x2b9d08, + 0x217308, + 0x237946, + 0x36a6c5, + 0x36870a, + 0x3bb805, + 0x216a42, + 0x2c6047, + 0x250586, + 0x379c05, + 0x37de89, + 0x365445, + 0x390f45, + 0x2f1c49, + 0x3caac6, + 0x3b5dc8, + 0x245143, + 0x222e06, + 0x277dc6, + 0x31c985, + 0x31c989, + 0x2f0449, + 0x27ec47, + 0x11f044, + 0x31f047, + 0x21ac49, + 0x237cc5, + 0x3ab88, + 0x394e05, + 0x371a85, + 0x35cf49, + 0x2013c2, + 0x2e17c4, + 0x200d82, 0x200b02, - 0x2c7805, - 0x313e88, - 0x2b8b85, - 0x2c6fc3, - 0x2c6fc5, - 0x2d71c3, - 0x20f7c2, - 0x24af84, - 0x2a9943, - 0x210482, - 0x34a404, - 0x2e9103, - 0x2054c2, - 0x2b8c03, - 0x290e84, - 0x2f9303, - 0x2605c4, - 0x204fc2, - 0x21a703, - 0x238b03, + 0x2c44c5, + 0x31cb88, + 0x2b55c5, + 0x2c3c83, + 0x2c3c85, + 0x2d5803, + 0x20c9c2, + 0x24be44, + 0x2b08c3, + 0x204782, + 0x34af84, + 0x2e9343, + 0x21ad42, + 0x2b5643, + 0x28f7c4, + 0x2fa0c3, + 0x25f344, + 0x2022c2, + 0x214d83, + 0x223a03, 0x200b42, - 0x2d48c2, - 0x2f06c9, - 0x203502, - 0x28cd44, - 0x200cc2, - 0x2424c4, - 0x2ebb44, - 0x206404, - 0x203582, - 0x237982, - 0x231703, - 0x304a03, - 0x24cec4, - 0x295904, - 0x2f1504, - 0x2f8884, - 0x310003, - 0x324183, - 0x391944, - 0x317a04, - 0x317b46, - 0x22bd82, - 0x40d83, - 0x207c02, - 0x2355c3, - 0x214d83, - 0x222103, - 0x2482c3, + 0x2e03c2, + 0x2f0289, + 0x202202, + 0x28a604, + 0x20e2c2, + 0x247344, + 0x2f1644, + 0x36a184, + 0x202fc2, + 0x237582, + 0x239f43, + 0x306c83, + 0x248b84, + 0x29a7c4, + 0x2f0b84, + 0x2f9644, + 0x318d03, + 0x366e83, + 0x3736c4, + 0x320844, + 0x320986, + 0x22b4c2, + 0x3ce03, + 0x209302, + 0x2351c3, + 0x22b883, + 0x215c83, + 0x24b583, 0x2000c2, - 0x203a83, - 0x20d183, - 0x2355c3, - 0x206ac3, - 0x214d83, - 0x224604, - 0x2f0984, - 0x2bf0c4, - 0x222103, - 0x2482c3, - 0x21a803, - 0x2f68c4, - 0x329903, - 0x2a9183, - 0x379984, - 0x33d546, - 0x20b403, - 0xf4d45, - 0xdf5c7, - 0x31e283, - 0x5fe21448, - 0x22d483, - 0x2b4c43, - 0x249583, - 0x20d203, - 0x39b745, - 0x13a643, - 0x20d183, - 0x2355c3, - 0x214d83, - 0x222103, - 0x2482c3, - 0x210f83, - 0x2311c3, - 0x1513c8, - 0x20d183, - 0x2355c3, - 0x214d83, - 0x219a43, - 0x222103, - 0x2361c4, - 0xe6003, - 0x2482c3, - 0x3664c4, - 0xf4d45, - 0x2c24c5, - 0xdf5c7, - 0x207c02, - 0x206042, + 0x373a83, + 0x209303, + 0x2351c3, + 0x207343, + 0x22b883, + 0x21e084, + 0x2f0544, + 0x226004, + 0x215c83, + 0x24b583, + 0x214e83, + 0x2f7584, + 0x32bb03, + 0x2a6e43, + 0x37d184, + 0x394c06, + 0x206ec3, + 0xf6d85, + 0xe41c7, + 0x226a03, + 0x6021be08, + 0x25d0c3, + 0x2b1383, + 0x2450c3, + 0x209383, + 0x365f45, + 0x13cb03, + 0x209303, + 0x2351c3, + 0x22b883, + 0x215c83, + 0x24b583, + 0x20d8c3, + 0x231083, + 0xaf0c8, + 0x209303, + 0x2351c3, + 0x22b883, + 0x20f8c3, + 0x215c83, + 0x235dc4, + 0xe6243, + 0x24b583, + 0x30d1c4, + 0xf6d85, + 0x2bf1c5, + 0xe41c7, + 0x209302, + 0x2046c2, 0x200382, - 0x201402, - 0x182c3, + 0x203202, + 0x2543, 0x2003c2, - 0x170c04, - 0x20d183, - 0x2385c4, - 0x2355c3, - 0x214d83, - 0x272203, - 0x222103, - 0x2482c3, - 0x1513c8, - 0x20d183, - 0x2355c3, - 0x214d83, - 0x272203, - 0x2bf0c4, - 0x222103, - 0x182c3, - 0x2482c3, - 0x215e83, - 0x28a904, - 0x1513c8, - 0x20d183, - 0x2182c3, - 0x15dc3, - 0x146b44, - 0x248084, - 0x1513c8, - 0x20d183, - 0x2526c4, - 0x224604, - 0x2182c3, - 0x200ec2, - 0xe6003, - 0x2482c3, - 0x206103, - 0x71dc4, - 0x3c0045, - 0x2061c2, - 0x205ec3, - 0x4389, - 0xdd706, - 0x191b08, + 0x10ce44, + 0x209303, + 0x238084, + 0x2351c3, + 0x22b883, + 0x2287c3, + 0x215c83, + 0x24b583, + 0xaf0c8, + 0x209303, + 0x2351c3, + 0x22b883, + 0x2287c3, + 0x226004, + 0x215c83, + 0x2543, + 0x24b583, + 0x201203, + 0x28b004, + 0xaf0c8, + 0x209303, + 0x202543, + 0xd983, + 0x153bc4, + 0x243644, + 0xaf0c8, + 0x209303, + 0x24f544, + 0x21e084, + 0x202543, + 0x203c02, + 0xe6243, + 0x24b583, + 0x22d183, + 0x71504, + 0x3c1245, + 0x216a42, + 0x369c43, + 0x168109, + 0xdd0c6, + 0x173888, 0x2000c2, - 0x1513c8, - 0x207c02, - 0x2355c3, - 0x214d83, + 0xaf0c8, + 0x209302, + 0x2351c3, + 0x22b883, 0x2005c2, - 0x182c3, - 0x2482c3, - 0xe182, + 0x2543, + 0x24b583, + 0xb682, 0x2000c2, - 0x1b4047, - 0x107a89, - 0x8303, - 0x1513c8, - 0x1725c3, - 0x63752987, - 0xd183, - 0x1cb288, - 0x2355c3, - 0x214d83, - 0x46486, - 0x219a43, - 0x92288, - 0xc4948, - 0x3e186, - 0x272203, - 0xcf388, - 0x9ae43, - 0x638e1d06, - 0xe7685, - 0x357c7, - 0x22103, - 0x95c3, - 0x482c3, - 0x6442, - 0x1940ca, - 0x19743, - 0xc78c3, - 0x2fc4c4, - 0x10d78b, - 0x10dd48, - 0x91542, - 0x14581c7, - 0x1590b47, - 0x14c7088, - 0x1517643, - 0x14a94b, - 0x12b507, + 0x1b5887, + 0x10a1c9, + 0x39c3, + 0xaf0c8, + 0x10e803, + 0x63b54747, + 0x9303, + 0x1cc2c8, + 0x2351c3, + 0x22b883, + 0x41e06, + 0x20f8c3, + 0x90d88, + 0xc1348, + 0xcda86, + 0x2287c3, + 0xcb788, + 0x99043, + 0x63ce07c6, + 0xe7785, + 0x353c7, + 0x15c83, + 0x4f43, + 0x4b583, + 0x4482, + 0x1a23ca, + 0x5303, + 0xc4583, + 0x2fde44, + 0x11648b, + 0x116a48, + 0x8fe82, + 0x1454d87, + 0x15728c7, + 0x14c3d48, + 0x1520483, + 0x14b4cb, + 0x12d707, 0x2000c2, - 0x207c02, - 0x20d183, - 0x2355c3, - 0x2db184, - 0x214d83, - 0x219a43, - 0x272203, - 0x222103, - 0x20d183, - 0x2355c3, - 0x214d83, - 0x20d203, - 0x222103, - 0x2482c3, - 0x284743, - 0x215e83, - 0x20d183, - 0x2355c3, - 0x214d83, - 0x222103, - 0x2482c3, - 0x20d183, - 0x2355c3, - 0x214d83, - 0x222103, - 0x2482c3, - 0x15dc3, - 0x20d183, - 0x2355c3, - 0x214d83, - 0x224604, - 0x20d203, - 0x222103, - 0x2482c3, - 0x22e042, + 0x209302, + 0x209303, + 0x2351c3, + 0x2da884, + 0x22b883, + 0x20f8c3, + 0x2287c3, + 0x215c83, + 0x209303, + 0x2351c3, + 0x22b883, + 0x209383, + 0x215c83, + 0x24b583, + 0x282583, + 0x201203, + 0x209303, + 0x2351c3, + 0x22b883, + 0x215c83, + 0x24b583, + 0x209303, + 0x2351c3, + 0x22b883, + 0x215c83, + 0x24b583, + 0xd983, + 0x209303, + 0x2351c3, + 0x22b883, + 0x21e084, + 0x209383, + 0x215c83, + 0x24b583, + 0x22c682, 0x2000c1, 0x2000c2, 0x200201, - 0x334b82, - 0x1513c8, - 0x21f145, + 0x336b02, + 0xaf0c8, + 0x219505, 0x200101, - 0xd183, - 0x200d81, + 0x9303, + 0x2029c1, 0x200501, - 0x201481, - 0x24ea42, - 0x385944, - 0x24ea43, + 0x200d41, + 0x24a802, + 0x389084, + 0x24a803, 0x200041, 0x200801, 0x200181, 0x200701, - 0x2f4bc7, - 0x2f898f, - 0x37f3c6, + 0x2f6c07, + 0x2f974f, + 0x3a3dc6, 0x2004c1, - 0x346b86, - 0x200d01, + 0x353c06, + 0x201741, 0x200581, - 0x3ccf0e, + 0x3ce80e, 0x2003c1, - 0x2482c3, - 0x204c41, - 0x247705, - 0x206442, - 0x3ab685, + 0x24b583, + 0x201401, + 0x242b85, + 0x204482, + 0x3acb85, 0x200401, 0x200741, 0x2007c1, - 0x2061c2, + 0x216a42, 0x200081, - 0x2020c1, - 0x207b01, - 0x2018c1, - 0x201241, - 0x1513c8, - 0x20d183, - 0x2355c3, - 0x214d83, - 0x222103, - 0x2482c3, - 0x21b2c3, - 0x20d183, - 0x214d83, - 0x91488, - 0x272203, - 0x222103, - 0x7783, - 0x2482c3, - 0x14eacc8, - 0x10e88, - 0xf4d45, - 0x1513c8, - 0x182c3, - 0xf4d45, - 0x1445c4, - 0x4c604, - 0x14eacca, - 0x1513c8, - 0xe6003, - 0x20d183, - 0x2355c3, - 0x214d83, - 0x222103, - 0x2482c3, - 0x208403, - 0x1513c8, - 0x20d183, - 0x2355c3, - 0x2db184, - 0x2482c3, - 0x280ec5, - 0x35ebc4, - 0x20d183, - 0x222103, - 0x2482c3, - 0xa7e8a, - 0x116584, - 0x11d1c6, - 0x207c02, - 0x20d183, - 0x233009, - 0x2355c3, - 0x20a749, - 0x214d83, - 0x272203, - 0x222103, - 0x7d904, - 0x182c3, - 0x2482c3, - 0x2f5948, - 0x243347, - 0x3c0045, - 0x1c5948, - 0x1b4047, - 0xee6ca, - 0x10978b, - 0x146dc7, - 0x452c8, - 0x11264a, - 0x17808, + 0x207301, + 0x20b6c1, + 0x201d81, + 0x202e01, + 0xaf0c8, + 0x209303, + 0x2351c3, + 0x22b883, + 0x215c83, + 0x24b583, + 0x215943, + 0x209303, + 0x22b883, + 0x8fdc8, + 0x2287c3, + 0x215c83, + 0x89d83, + 0x24b583, + 0x14eb148, + 0xd7c8, + 0xf6d85, + 0xaf0c8, + 0x2543, + 0xf6d85, + 0x18a904, + 0x48284, + 0x14eb14a, + 0xaf0c8, + 0xe6243, + 0x209303, + 0x2351c3, + 0x22b883, + 0x215c83, + 0x24b583, + 0x203ac3, + 0xaf0c8, + 0x209303, + 0x2351c3, + 0x2da884, + 0x24b583, + 0x27f485, + 0x3c9d44, + 0x209303, + 0x215c83, + 0x24b583, + 0xa5b8a, + 0x11f3c4, + 0x124e06, + 0x209302, + 0x209303, + 0x232649, + 0x2351c3, + 0x2a8d09, + 0x22b883, + 0x2287c3, + 0x215c83, + 0x7bc04, + 0x2543, + 0x24b583, + 0x2f5e48, + 0x23f107, + 0x3c1245, + 0x1c6f48, + 0x1b5887, + 0xee28a, + 0x111e4b, + 0x153e47, + 0x40c48, + 0x11b34a, + 0x12a08, + 0x10a1c9, + 0x25447, + 0x66e07, + 0x10b1c8, + 0x1cc2c8, + 0x4234f, + 0x260c5, + 0x1cc5c7, + 0x41e06, + 0x18e947, + 0x112a86, + 0x90d88, + 0xa1346, + 0x1ca8c7, + 0x55e09, + 0x5d47, 0x107a89, - 0x298c7, - 0x156bc7, - 0x126248, - 0x1cb288, - 0x469cf, - 0x157345, - 0x1cb587, - 0x46486, - 0x19e7c7, - 0x10a3c6, - 0x92288, - 0xa2f86, - 0x15fa47, - 0x125649, - 0x1b4a47, - 0x1012c9, - 0xb92c9, - 0xc2246, - 0xc4948, - 0xc3245, - 0x7df8a, - 0xcf388, - 0x9ae43, - 0xd7b48, - 0x357c7, - 0x75a85, - 0x64dd0, - 0x95c3, - 0xe6003, - 0x1254c7, - 0x2c145, - 0xef4c8, - 0x6c345, - 0xc78c3, - 0x5588, - 0x59406, - 0x153989, - 0xaec87, - 0x464b, - 0x141044, - 0x106444, - 0x10d78b, - 0x10dd48, - 0x10e8c7, - 0xf4d45, - 0x20d183, - 0x2355c3, - 0x3d0443, - 0x2482c3, - 0x206003, - 0x214d83, - 0xe6003, - 0x20d183, - 0x2355c3, - 0x214d83, - 0x272203, - 0x222103, - 0x2482c3, - 0x8348b, + 0xb5ac9, + 0xbef46, + 0xc1348, + 0xbff45, + 0x7c28a, + 0xcb788, + 0x99043, + 0xd5d88, + 0x353c7, + 0x167885, + 0x60e10, + 0x4f43, + 0xe6243, + 0x55c87, + 0x27345, + 0xef088, + 0x693c5, + 0xc4583, + 0x169308, + 0x15ce06, + 0x1a68c9, + 0xac5c7, + 0x1683cb, + 0x1430c4, + 0x108f84, + 0x11648b, + 0x116a48, + 0x117447, + 0xf6d85, + 0x209303, + 0x2351c3, + 0x210a43, + 0x24b583, + 0x23ffc3, + 0x22b883, + 0xe6243, + 0x209303, + 0x2351c3, + 0x22b883, + 0x2287c3, + 0x215c83, + 0x24b583, + 0x736cb, 0x2000c2, - 0x207c02, - 0x2482c3, - 0x1513c8, + 0x209302, + 0x24b583, + 0xaf0c8, 0x2000c2, - 0x207c02, + 0x209302, 0x200382, 0x2005c2, + 0x2025c2, + 0x215c83, + 0x2003c2, + 0x2000c2, + 0x373a83, + 0x209302, + 0x209303, + 0x2351c3, + 0x200382, + 0x22b883, + 0x20f8c3, + 0x2287c3, + 0x226004, + 0x215c83, + 0x213203, + 0x2543, + 0x24b583, + 0x2fde44, + 0x20ab43, + 0x22b883, + 0x209302, + 0x209303, + 0x2351c3, + 0x22b883, + 0x2287c3, + 0x215c83, + 0x202543, + 0x24b583, + 0x3b8b87, + 0x209303, + 0x20a607, + 0x307846, + 0x20a6c3, + 0x20f783, + 0x22b883, + 0x202b03, + 0x21e084, + 0x39bec4, + 0x2e9e86, + 0x200f03, + 0x215c83, + 0x24b583, + 0x27f485, + 0x2abac4, + 0x2bae83, + 0x20b2c3, + 0x2c6047, + 0x31bd05, + 0x209303, + 0x2351c3, + 0x22b883, + 0x2287c3, + 0x215c83, + 0x24b583, + 0x57cc7, + 0x8ec87, + 0x1a3605, + 0x219002, + 0x24b383, + 0x20f083, + 0x373a83, + 0x6ce09303, + 0x20e7c2, + 0x2351c3, + 0x202c03, + 0x22b883, + 0x21e084, + 0x329f83, + 0x2ebcc3, + 0x2287c3, + 0x226004, + 0x6d205f02, + 0x215c83, + 0x24b583, + 0x233603, + 0x2189c3, + 0x22c682, + 0x20ab43, + 0xaf0c8, + 0x22b883, + 0xd983, + 0x32c3c4, + 0x373a83, + 0x209302, + 0x209303, + 0x238084, + 0x2351c3, + 0x22b883, + 0x21e084, + 0x20f8c3, + 0x319584, + 0x229d44, + 0x2d7d46, + 0x226004, + 0x215c83, + 0x24b583, + 0x214e83, + 0x250586, + 0x3b18b, + 0x3cf86, + 0x103a8a, + 0x11d74a, + 0xaf0c8, + 0x26e144, + 0x6e609303, + 0x373a44, + 0x2351c3, + 0x2aa444, + 0x22b883, + 0x2ba043, + 0x2287c3, + 0x215c83, + 0xe6243, + 0x24b583, + 0xc6e43, + 0x343a4b, + 0x3bf5ca, + 0x3d100c, + 0xe1088, + 0x2000c2, + 0x209302, + 0x200382, + 0x230105, + 0x21e084, + 0x209382, + 0x2287c3, + 0x229d44, + 0x203202, + 0x2003c2, + 0x201202, + 0x22c682, + 0x173a83, + 0xbc2, + 0x2b3b89, + 0x2ed608, + 0x22b709, + 0x326249, + 0x33340a, + 0x359d0a, + 0x208202, + 0x342c02, + 0x9302, + 0x209303, + 0x22c842, + 0x242246, + 0x37b002, + 0x204702, + 0x3121ce, + 0x214dce, + 0x280b87, + 0x215c07, + 0x283202, + 0x2351c3, + 0x22b883, 0x200d02, - 0x222103, - 0x2003c2, - 0x2000c2, - 0x203a83, - 0x207c02, - 0x20d183, - 0x2355c3, - 0x200382, - 0x214d83, - 0x219a43, - 0x272203, - 0x2bf0c4, - 0x222103, - 0x214b83, - 0x182c3, - 0x2482c3, - 0x2fc4c4, - 0x2020c3, - 0x214d83, - 0x207c02, - 0x20d183, - 0x2355c3, - 0x214d83, - 0x272203, - 0x222103, - 0x2182c3, - 0x2482c3, - 0x3b7447, - 0x20d183, - 0x20fa47, - 0x301086, - 0x20fb03, - 0x219903, - 0x214d83, - 0x207483, - 0x224604, - 0x399e44, - 0x2e9c46, - 0x2109c3, - 0x222103, - 0x2482c3, - 0x280ec5, - 0x2ad2c4, - 0x2be783, - 0x20ddc3, - 0x2c96c7, - 0x313005, - 0x20d183, - 0x2355c3, - 0x214d83, - 0x272203, - 0x222103, - 0x2482c3, - 0x5b3c7, - 0x165807, - 0x1a2505, - 0x221bc2, - 0x24f343, - 0x219003, - 0x203a83, - 0x6ca0d183, - 0x201c42, - 0x2355c3, - 0x202b03, - 0x214d83, - 0x224604, - 0x308103, - 0x3924c3, - 0x272203, - 0x2bf0c4, - 0x6ce039c2, - 0x222103, - 0x2482c3, - 0x233903, - 0x21e903, - 0x22e042, - 0x2020c3, - 0x1513c8, - 0x214d83, - 0x15dc3, - 0x32a1c4, - 0x203a83, - 0x207c02, - 0x20d183, - 0x2385c4, - 0x2355c3, - 0x214d83, - 0x224604, - 0x219a43, - 0x310884, - 0x215584, - 0x2d9fc6, - 0x2bf0c4, - 0x222103, - 0x2482c3, - 0x21a803, - 0x254606, - 0x3f08b, - 0x39c86, - 0x1ceaca, - 0x11490a, - 0x1513c8, - 0x23a0c4, - 0x6e20d183, - 0x203a44, - 0x2355c3, - 0x25a644, - 0x214d83, - 0x2bd943, - 0x272203, - 0x222103, - 0xe6003, - 0x2482c3, - 0xca4c3, - 0x3415cb, - 0x3be4ca, - 0x3d0ecc, - 0xe25c8, - 0x2000c2, - 0x207c02, - 0x200382, - 0x230285, - 0x224604, - 0x202002, - 0x272203, - 0x215584, - 0x201402, - 0x2003c2, - 0x207bc2, - 0x22e042, - 0x3a83, - 0x4ce42, - 0x2b6409, - 0x333ec8, - 0x214c09, - 0x21c689, - 0x227e0a, - 0x319f4a, - 0x20c382, - 0x202d42, - 0x7c02, - 0x20d183, - 0x203382, - 0x2468c6, - 0x377a42, - 0x201ec2, - 0x309b0e, - 0x21a74e, - 0x282087, - 0x222087, - 0x2853c2, - 0x2355c3, - 0x214d83, - 0x208002, 0x2005c2, - 0x19883, - 0x2387cf, - 0x246c02, - 0x305607, - 0x2b0d87, - 0x320047, - 0x2b4fcc, - 0x2df18c, - 0x229f04, - 0x261b4a, - 0x21a682, - 0x20b2c2, - 0x2babc4, + 0xf703, + 0x23828f, + 0x242582, + 0x3344c7, + 0x2ae547, + 0x326e47, + 0x2b170c, + 0x2e3d8c, + 0x225a84, + 0x25e58a, + 0x214d02, + 0x206d82, + 0x2b72c4, 0x200702, - 0x218ac2, - 0x2df3c4, - 0x2180c2, - 0x204942, - 0x24f03, - 0x2a3007, - 0x242305, - 0x216182, - 0x30cd84, - 0x326302, - 0x2e1e88, - 0x222103, - 0x3d20c8, - 0x203242, - 0x22a0c5, - 0x396d46, - 0x2482c3, - 0x209dc2, - 0x2f0387, - 0x6442, - 0x25fb85, - 0x209b45, - 0x202202, - 0x212a82, - 0x2bc7ca, - 0x365b4a, - 0x2721c2, - 0x29cec4, - 0x202cc2, - 0x2493c8, - 0x211a02, - 0x2af008, - 0x30a547, - 0x30ab09, - 0x209bc2, - 0x30f685, - 0x20a445, - 0x2250cb, - 0x2cc70c, - 0x22dd08, - 0x32b348, - 0x22bd82, - 0x2a8102, + 0x20eb42, + 0x2e3fc4, + 0x213302, + 0x205142, + 0x16c03, + 0x2a13c7, + 0x23d985, + 0x210e42, + 0x315a84, + 0x30b282, + 0x2e0948, + 0x215c83, + 0x34e148, + 0x202d82, + 0x225c45, + 0x398f46, + 0x24b583, + 0x2057c2, + 0x2eff47, + 0x4482, + 0x25c285, + 0x3c4905, + 0x2085c2, + 0x20dc42, + 0x2b8eca, + 0x293b8a, + 0x265ac2, + 0x29b6c4, + 0x203c42, + 0x244f08, + 0x20c842, + 0x2ac948, + 0x312c07, + 0x3131c9, + 0x25c302, + 0x318385, + 0x211d45, + 0x21e34b, + 0x2c90cc, + 0x22c348, + 0x32d548, + 0x22b4c2, + 0x2a5e02, 0x2000c2, - 0x1513c8, - 0x207c02, - 0x20d183, + 0xaf0c8, + 0x209302, + 0x209303, 0x200382, - 0x201402, - 0x182c3, + 0x203202, + 0x2543, 0x2003c2, - 0x2482c3, - 0x207bc2, + 0x24b583, + 0x201202, 0x2000c2, - 0xf4d45, - 0x6f607c02, - 0x6fa14d83, - 0x224f03, - 0x202002, - 0x222103, - 0x3c92c3, - 0x6fe482c3, - 0x2eca43, - 0x285406, - 0x1615e83, - 0xf4d45, - 0x14a4cb, - 0x1513c8, - 0x71f87, - 0x70547, - 0x127cc5, - 0xaa94d, - 0xa8bca, - 0x165987, - 0x2e484, - 0x2e4c3, - 0xbb944, - 0x70601902, - 0x70a08e82, - 0x70e01742, - 0x71200fc2, - 0x71615402, - 0x71a00f82, - 0xdf5c7, - 0x71e07c02, - 0x72206182, - 0x72620f02, - 0x72a03282, - 0x21a743, - 0x2a344, - 0x2318c3, - 0x72e16002, - 0x60fc8, - 0x732051c2, - 0x52f07, - 0x73600042, - 0x73a04142, - 0x73e00182, - 0x74200d42, - 0x74615642, - 0x74a005c2, - 0x13c385, - 0x227a43, - 0x312104, - 0x74e00702, - 0x75218e42, - 0x75600e42, - 0x3b04b, - 0x75a02a82, - 0x76252782, - 0x76602002, - 0x76a00d02, - 0x76e2a542, - 0x77203202, - 0x77606282, - 0x77a6f582, - 0x77e039c2, - 0x78201dc2, - 0x78601402, - 0x78a12c82, - 0x78e0c6c2, - 0x79214ec2, - 0xe5e44, - 0x33cf83, - 0x79625482, - 0x79a1a382, - 0x79e13e42, - 0x7a2006c2, - 0x7a6003c2, - 0x7aa10482, - 0x83607, - 0x7ae08642, - 0x7b2024c2, - 0x7b607bc2, - 0x7ba1a702, - 0xfeb8c, - 0x7be05102, - 0x7c22b042, - 0x7c602942, - 0x7ca09642, - 0x7ce0a2c2, - 0x7d213a42, - 0x7d603682, - 0x7da07802, - 0x7de795c2, - 0x7e279b42, - 0x24ce42, - 0x308103, - 0x226cc3, - 0x24ce42, - 0x308103, - 0x226cc3, - 0x24ce42, - 0x308103, - 0x226cc3, - 0x24ce42, - 0x308103, - 0x226cc3, - 0x24ce42, - 0x308103, - 0x226cc3, - 0x24ce42, - 0x308103, - 0x226cc3, - 0x24ce42, - 0x308103, - 0x226cc3, - 0x24ce42, - 0x308103, - 0x226cc3, - 0x24ce42, - 0x308103, - 0x226cc3, - 0x24ce42, - 0x308103, - 0x26cc3, - 0x24ce42, - 0x308103, - 0x226cc3, - 0x24ce42, - 0x308103, - 0x226cc3, - 0x24ce42, - 0x308103, - 0x226cc3, - 0x24ce42, - 0x226cc3, - 0x24ce42, - 0x308103, - 0x226cc3, - 0x24ce42, - 0x308103, - 0x226cc3, - 0x24ce42, - 0x308103, - 0x226cc3, - 0x24ce42, - 0x308103, - 0x226cc3, - 0x24ce42, - 0x308103, - 0x226cc3, - 0x24ce42, - 0x308103, - 0x226cc3, - 0x24ce42, - 0x308103, - 0x226cc3, - 0x24ce42, - 0x75f08103, - 0x226cc3, - 0x39b7c4, - 0x333dc6, - 0x2fa803, - 0x24ce42, - 0x308103, - 0x226cc3, - 0x24ce42, - 0x308103, - 0x226cc3, - 0x3b5809, - 0x24ce42, - 0x3a4a83, - 0x2b95c3, - 0x373d85, - 0x202b03, - 0x308103, - 0x226cc3, - 0x2a3983, - 0x202cc3, - 0x3c3f89, - 0x24ce42, - 0x308103, - 0x226cc3, - 0x24ce42, - 0x308103, - 0x226cc3, - 0x24ce42, - 0x308103, - 0x226cc3, - 0x24ce42, - 0x308103, - 0x226cc3, - 0x24ce42, - 0x308103, - 0x226cc3, - 0x24ce42, - 0x226cc3, - 0x24ce42, - 0x308103, - 0x226cc3, - 0x24ce42, - 0x308103, - 0x226cc3, - 0x24ce42, - 0x308103, - 0x226cc3, - 0x24ce42, - 0x308103, - 0x226cc3, - 0x24ce42, - 0x308103, - 0x226cc3, - 0x24ce42, - 0x308103, - 0x226cc3, - 0x24ce42, - 0x308103, - 0x226cc3, - 0x24ce42, - 0x308103, - 0x226cc3, - 0x24ce42, - 0x308103, - 0x226cc3, - 0x24ce42, - 0x308103, - 0x226cc3, - 0x24ce42, - 0x308103, - 0x226cc3, - 0x24ce42, - 0x308103, - 0x226cc3, - 0x24ce42, - 0x308103, - 0x226cc3, - 0x24ce42, - 0x226cc3, - 0x24ce42, - 0x308103, - 0x226cc3, - 0x24ce42, - 0x308103, - 0x226cc3, - 0x24ce42, - 0x308103, - 0x226cc3, - 0x24ce42, - 0x308103, - 0x226cc3, - 0x24ce42, - 0x308103, - 0x226cc3, - 0x24ce42, - 0x308103, - 0x226cc3, - 0x24ce42, - 0x308103, - 0x226cc3, - 0x24ce42, - 0x308103, - 0x226cc3, - 0x24ce42, - 0x24ce42, - 0x308103, - 0x226cc3, - 0x7ea0d183, - 0x2355c3, - 0x21c8c3, - 0x272203, - 0x222103, - 0x182c3, - 0x2482c3, - 0x1513c8, - 0x207c02, - 0x20d183, - 0x222103, - 0x2482c3, - 0x20d183, - 0x2355c3, - 0x214d83, - 0x272203, - 0x222103, - 0x182c3, - 0x2482c3, - 0x248084, - 0x207c02, - 0x20d183, - 0x371b83, - 0x2355c3, - 0x2526c4, - 0x3d0443, - 0x214d83, - 0x224604, - 0x219a43, - 0x272203, - 0x222103, - 0x2482c3, - 0x206103, - 0x3c0045, - 0x202cc3, - 0x2020c3, - 0x182c3, - 0x207c02, - 0x20d183, - 0x308103, - 0x222103, - 0x2482c3, + 0xf6d85, + 0x6fa09302, + 0x6fe2b883, + 0x216c03, + 0x209382, + 0x215c83, + 0x32e303, + 0x7024b583, + 0x2ec143, + 0x283246, + 0x1601203, + 0xf6d85, + 0x14b04b, + 0xaf0c8, + 0x65887, + 0x6f887, + 0x178145, + 0xa884d, + 0xa688a, + 0x939c7, + 0x2bdc4, + 0x2be03, + 0xb8044, + 0x70a02302, + 0x70e04542, + 0x71203902, + 0x71601b82, + 0x71a0f642, + 0x71e00dc2, + 0xe41c7, + 0x72209302, + 0x7262d202, + 0x72a1b8c2, + 0x72e069c2, + 0x214dc3, + 0x22984, + 0x23a103, + 0x73210cc2, + 0x5da08, + 0x73602242, + 0x4fb87, + 0x73a00042, + 0x73e01042, + 0x74200182, + 0x74606982, + 0x74a07502, + 0x74e005c2, + 0x16bd45, + 0x221703, + 0x31ae04, + 0x75200702, + 0x7560eec2, + 0x75a02902, + 0x7a3cb, + 0x75e01e02, + 0x7664f602, + 0x76a09382, + 0x76e025c2, + 0x77225282, + 0x77603542, + 0x77a04842, + 0x77e6ca42, + 0x78205f02, + 0x78602982, + 0x78a03202, + 0x78e21c42, + 0x792062c2, + 0x7962b9c2, + 0xe6084, + 0x33f9c3, + 0x79a16942, + 0x79e14a02, + 0x7a212d82, + 0x7a6006c2, + 0x7aa003c2, + 0x7ae04782, + 0x73847, + 0x7b203d02, + 0x7b601182, + 0x7ba01202, + 0x7be14d82, + 0x10084c, + 0x7c217442, + 0x7c62a902, + 0x7ca01502, + 0x7ce04fc2, + 0x7d205cc2, + 0x7d655ac2, + 0x7da0c902, + 0x7de10342, + 0x7e278142, + 0x7e6786c2, + 0x200bc2, + 0x329f83, + 0x220983, + 0x200bc2, + 0x329f83, + 0x220983, + 0x200bc2, + 0x329f83, + 0x220983, + 0x200bc2, + 0x329f83, + 0x220983, + 0x200bc2, + 0x329f83, + 0x220983, + 0x200bc2, + 0x329f83, + 0x220983, + 0x200bc2, + 0x329f83, + 0x220983, + 0x200bc2, + 0x329f83, + 0x220983, + 0x200bc2, + 0x329f83, + 0x220983, + 0x200bc2, + 0x329f83, + 0x20983, + 0x200bc2, + 0x329f83, + 0x220983, + 0x200bc2, + 0x329f83, + 0x220983, + 0x200bc2, + 0x329f83, + 0x220983, + 0x200bc2, + 0x220983, + 0x200bc2, + 0x329f83, + 0x220983, + 0x200bc2, + 0x329f83, + 0x220983, + 0x200bc2, + 0x329f83, + 0x220983, + 0x200bc2, + 0x329f83, + 0x220983, + 0x200bc2, + 0x329f83, + 0x220983, + 0x200bc2, + 0x329f83, + 0x220983, + 0x200bc2, + 0x329f83, + 0x220983, + 0x200bc2, + 0x76329f83, + 0x220983, + 0x365fc4, + 0x2ed506, + 0x2fb603, + 0x200bc2, + 0x329f83, + 0x220983, + 0x200bc2, + 0x329f83, + 0x220983, + 0x3b6f49, + 0x200bc2, + 0x3c7e03, + 0x2b5dc3, + 0x30ffc5, + 0x202c03, + 0x329f83, + 0x220983, + 0x2a0cc3, + 0x236e03, + 0x3c56c9, + 0x200bc2, + 0x329f83, + 0x220983, + 0x200bc2, + 0x329f83, + 0x220983, + 0x200bc2, + 0x329f83, + 0x220983, + 0x200bc2, + 0x329f83, + 0x220983, + 0x200bc2, + 0x329f83, + 0x220983, + 0x200bc2, + 0x220983, + 0x200bc2, + 0x329f83, + 0x220983, + 0x200bc2, + 0x329f83, + 0x220983, + 0x200bc2, + 0x329f83, + 0x220983, + 0x200bc2, + 0x329f83, + 0x220983, + 0x200bc2, + 0x329f83, + 0x220983, + 0x200bc2, + 0x329f83, + 0x220983, + 0x200bc2, + 0x329f83, + 0x220983, + 0x200bc2, + 0x329f83, + 0x220983, + 0x200bc2, + 0x329f83, + 0x220983, + 0x200bc2, + 0x329f83, + 0x220983, + 0x200bc2, + 0x329f83, + 0x220983, + 0x200bc2, + 0x329f83, + 0x220983, + 0x200bc2, + 0x329f83, + 0x220983, + 0x200bc2, + 0x220983, + 0x200bc2, + 0x329f83, + 0x220983, + 0x200bc2, + 0x329f83, + 0x220983, + 0x200bc2, + 0x329f83, + 0x220983, + 0x200bc2, + 0x329f83, + 0x220983, + 0x200bc2, + 0x329f83, + 0x220983, + 0x200bc2, + 0x329f83, + 0x220983, + 0x200bc2, + 0x329f83, + 0x220983, + 0x200bc2, + 0x329f83, + 0x220983, + 0x200bc2, + 0x200bc2, + 0x329f83, + 0x220983, + 0x7ee09303, + 0x2351c3, + 0x326483, + 0x2287c3, + 0x215c83, + 0x2543, + 0x24b583, + 0xaf0c8, + 0x209302, + 0x209303, + 0x215c83, + 0x24b583, + 0x209303, + 0x2351c3, + 0x22b883, + 0x2287c3, + 0x215c83, + 0x2543, + 0x24b583, + 0x243644, + 0x209302, + 0x209303, + 0x30ddc3, + 0x2351c3, + 0x24f544, + 0x210a43, + 0x22b883, + 0x21e084, + 0x20f8c3, + 0x2287c3, + 0x215c83, + 0x24b583, + 0x22d183, + 0x3c1245, + 0x236e03, + 0x20ab43, + 0x2543, + 0x209302, + 0x209303, + 0x329f83, + 0x215c83, + 0x24b583, 0x2000c2, - 0x203a83, - 0x1513c8, - 0x20d183, - 0x2355c3, - 0x214d83, - 0x232406, - 0x224604, - 0x219a43, - 0x2bf0c4, - 0x222103, - 0x2482c3, - 0x21a803, - 0x20d183, - 0x2355c3, - 0x222103, - 0x2482c3, - 0x159e407, - 0xc107, - 0x20d183, - 0x39c86, - 0x2355c3, - 0x214d83, - 0xe4286, - 0x222103, - 0x2482c3, - 0x3280c8, - 0x32b189, - 0x33bb49, - 0x342708, - 0x398f88, - 0x398f89, - 0x31eeca, - 0x35c84a, - 0x39494a, - 0x39aa0a, - 0x3be4ca, - 0x3ca14b, - 0x24b3cd, - 0x3644cf, - 0x274a10, - 0x35dccd, - 0x37cd4c, - 0x39a74b, - 0x70748, - 0xfad08, - 0xd4705, - 0xcdf45, + 0x373a83, + 0xaf0c8, + 0x209303, + 0x2351c3, + 0x22b883, + 0x231a46, + 0x21e084, + 0x20f8c3, + 0x226004, + 0x215c83, + 0x24b583, + 0x214e83, + 0x209303, + 0x2351c3, + 0x215c83, + 0x24b583, + 0x1443007, + 0x7f87, + 0x209303, + 0x3cf86, + 0x2351c3, + 0x22b883, + 0xe2fc6, + 0x215c83, + 0x24b583, + 0x32a2c8, + 0x32d389, + 0x33e289, + 0x345288, + 0x39b008, + 0x39b009, + 0x32598a, + 0x35884a, + 0x396b4a, + 0x39ca8a, + 0x3bf5ca, + 0x3cb54b, + 0x2463cd, + 0x36294f, + 0x271710, + 0x35b14d, + 0x380a8c, + 0x39c7cb, + 0x6fa88, + 0xfbb08, + 0xe0205, + 0xc9f05, 0x2000c2, - 0x312e45, - 0x20a483, - 0x82207c02, - 0x2355c3, - 0x214d83, - 0x38b6c7, - 0x249583, - 0x272203, - 0x222103, - 0x252ec3, - 0x20d443, - 0x2182c3, - 0x2482c3, - 0x25cb86, - 0x2061c2, - 0x2020c3, - 0x1513c8, + 0x31bb45, + 0x2094c3, + 0x82609302, + 0x2351c3, + 0x22b883, + 0x3a1107, + 0x2450c3, + 0x2287c3, + 0x215c83, + 0x24fb43, + 0x2095c3, + 0x202543, + 0x24b583, + 0x25c006, + 0x216a42, + 0x20ab43, + 0xaf0c8, 0x2000c2, - 0x203a83, - 0x207c02, - 0x20d183, - 0x2355c3, - 0x214d83, - 0x224604, - 0x272203, - 0x222103, - 0x2482c3, - 0x215e83, - 0x4384, - 0x14f82c6, + 0x373a83, + 0x209302, + 0x209303, + 0x2351c3, + 0x22b883, + 0x21e084, + 0x2287c3, + 0x215c83, + 0x24b583, + 0x201203, + 0x168104, + 0x14f9086, 0x2000c2, - 0x207c02, - 0x214d83, - 0x272203, - 0x2482c3, + 0x209302, + 0x22b883, + 0x2287c3, + 0x24b583, } // children is the list of nodes' children, the parent's wildcard bit and the @@ -9221,528 +9225,529 @@ var children = [...]uint32{ 0x40000000, 0x50000000, 0x60000000, - 0x185460f, - 0x1858615, - 0x185c616, - 0x1880617, - 0x19dc620, - 0x19f4677, + 0x184060a, + 0x1844610, + 0x1848611, + 0x186c612, + 0x19c861b, + 0x19e0672, + 0x19f4678, 0x1a0867d, - 0x1a1c682, - 0x1a3c687, - 0x1a4068f, - 0x1a58690, - 0x1a5c696, - 0x1a84697, - 0x1a886a1, - 0x1aa06a2, - 0x1aa46a8, - 0x1aa86a9, - 0x1ae46aa, - 0x1ae86b9, - 0x61af06ba, - 0x21af86bc, - 0x1b406be, - 0x1b446d0, - 0x1b646d1, - 0x1b786d9, - 0x1b7c6de, - 0x1bac6df, - 0x1bc86eb, - 0x1bf06f2, - 0x1c006fc, - 0x1c04700, - 0x1c9c701, + 0x1a28682, + 0x1a2c68a, + 0x1a4468b, + 0x1a48691, + 0x1a70692, + 0x1a7469c, + 0x1a8c69d, + 0x1a906a3, + 0x1a946a4, + 0x1ad06a5, + 0x1ad46b4, + 0x61adc6b5, + 0x21ae46b7, + 0x1b2c6b9, + 0x1b306cb, + 0x1b506cc, + 0x1b646d4, + 0x1b686d9, + 0x1b986da, + 0x1bb46e6, + 0x1bdc6ed, + 0x1bec6f7, + 0x1bf06fb, + 0x1c886fc, + 0x1c9c722, 0x1cb0727, - 0x1cc472c, - 0x1cf4731, - 0x1d0473d, + 0x1ce072c, + 0x1cf0738, + 0x1d0473c, 0x1d18741, 0x1dbc746, - 0x1fb876f, - 0x1fbc7ee, - 0x20287ef, - 0x209480a, - 0x20ac825, - 0x20c082b, - 0x20c8830, - 0x20dc832, - 0x20e0837, - 0x20fc838, - 0x214883f, - 0x2164852, - 0x2168859, + 0x1fbc76f, + 0x1fc07ef, + 0x202c7f0, + 0x209880b, + 0x20b0826, + 0x20c482c, + 0x20cc831, + 0x20e0833, + 0x20e4838, + 0x2100839, + 0x214c840, + 0x2168853, 0x216c85a, - 0x219085b, - 0x21cc864, - 0x621d0873, - 0x21e8874, - 0x220087a, - 0x2208880, - 0x2218882, - 0x22c8886, - 0x22cc8b2, - 0x222dc8b3, - 0x222e08b7, + 0x217085b, + 0x219485c, + 0x21d0865, + 0x621d4874, + 0x21ec875, + 0x220487b, + 0x220c881, + 0x221c883, + 0x22cc887, + 0x22d08b3, + 0x222e08b4, 0x222e48b8, - 0x23288b9, - 0x232c8ca, - 0x27e88cb, - 0x228909fa, - 0x22894a24, - 0x22898a25, - 0x228a4a26, - 0x228a8a29, - 0x228b4a2a, - 0x228b8a2d, - 0x228bca2e, + 0x222ec8b9, + 0x23308bb, + 0x23348cc, + 0x27f08cd, + 0x228989fc, + 0x2289ca26, + 0x228a0a27, + 0x228aca28, + 0x228b0a2b, + 0x228bca2c, 0x228c0a2f, 0x228c4a30, 0x228c8a31, - 0x228d4a32, - 0x228d8a35, - 0x228e4a36, - 0x228e8a39, - 0x228eca3a, + 0x228cca32, + 0x228d0a33, + 0x228dca34, + 0x228e0a37, + 0x228eca38, 0x228f0a3b, - 0x228fca3c, - 0x22900a3f, - 0x2290ca40, - 0x22910a43, - 0x22914a44, + 0x228f4a3c, + 0x228f8a3d, + 0x22904a3e, + 0x22908a41, + 0x22914a42, 0x22918a45, - 0x291ca46, + 0x2291ca46, 0x22920a47, - 0x2292ca48, - 0x22930a4b, - 0x2938a4c, - 0x297ca4e, - 0x2299ca5f, - 0x229a0a67, - 0x229a4a68, + 0x2924a48, + 0x22928a49, + 0x22934a4a, + 0x22938a4d, + 0x2940a4e, + 0x2984a50, + 0x229a4a61, 0x229a8a69, - 0x29aca6a, + 0x229aca6a, 0x229b0a6b, - 0x29b8a6c, - 0x29bca6e, - 0x29c0a6f, - 0x29dca70, - 0x29f4a77, - 0x29f8a7d, - 0x2a08a7e, - 0x2a14a82, - 0x2a48a85, - 0x2a4ca92, - 0x2a64a93, - 0x22a6ca99, - 0x22a70a9b, - 0x22a78a9c, - 0x2b50a9e, - 0x22b54ad4, - 0x2b5cad5, - 0x2b60ad7, - 0x22b64ad8, + 0x29b4a6c, + 0x229b8a6d, + 0x29c0a6e, + 0x29c4a70, + 0x29c8a71, + 0x29e4a72, + 0x29fca79, + 0x2a00a7f, + 0x2a10a80, + 0x2a1ca84, + 0x2a50a87, + 0x2a54a94, + 0x2a6ca95, + 0x22a74a9b, + 0x22a78a9d, + 0x22a80a9e, + 0x2b58aa0, + 0x22b5cad6, + 0x2b64ad7, 0x2b68ad9, - 0x2b80ada, - 0x2b94ae0, - 0x2bbcae5, - 0x2bdcaef, - 0x2c0caf7, - 0x2c34b03, - 0x2c38b0d, - 0x2c5cb0e, - 0x2c60b17, - 0x2c74b18, - 0x2c78b1d, - 0x2c7cb1e, - 0x2c9cb1f, - 0x2cb8b27, - 0x2cbcb2e, - 0x22cc0b2f, + 0x22b6cada, + 0x2b70adb, + 0x2b88adc, + 0x2b9cae2, + 0x2bc4ae7, + 0x2be4af1, + 0x2c14af9, + 0x2c3cb05, + 0x2c40b0f, + 0x2c64b10, + 0x2c68b19, + 0x2c7cb1a, + 0x2c80b1f, + 0x2c84b20, + 0x2ca4b21, + 0x2cc0b29, 0x2cc4b30, - 0x2cc8b31, - 0x2cd8b32, - 0x2cdcb36, - 0x2d54b37, - 0x2d58b55, - 0x2d5cb56, - 0x2d7cb57, - 0x2d8cb5f, - 0x2da0b63, - 0x2db8b68, - 0x2dd0b6e, - 0x2de8b74, - 0x2decb7a, - 0x2e04b7b, - 0x2e20b81, - 0x2e40b88, - 0x2e60b90, - 0x2e7cb98, - 0x2edcb9f, - 0x2ef8bb7, - 0x2f08bbe, - 0x2f0cbc2, - 0x2f20bc3, - 0x2f64bc8, - 0x2fe4bd9, - 0x3014bf9, - 0x3018c05, - 0x3024c06, - 0x3044c09, - 0x3048c11, - 0x306cc12, - 0x3074c1b, - 0x30b0c1d, - 0x3100c2c, - 0x3104c40, - 0x318cc41, - 0x3190c63, - 0x23194c64, - 0x23198c65, - 0x2319cc66, - 0x231acc67, - 0x231b0c6b, - 0x231b4c6c, - 0x231b8c6d, - 0x231bcc6e, - 0x31d4c6f, - 0x31f8c75, - 0x3218c7e, - 0x3880c86, - 0x388ce20, - 0x38ace23, - 0x3a68e2b, - 0x3b38e9a, - 0x3ba8ece, - 0x3c00eea, - 0x3ce8f00, - 0x3d40f3a, - 0x3d7cf50, - 0x3e78f5f, - 0x3f44f9e, - 0x3fdcfd1, - 0x406cff7, - 0x40d101b, - 0x4309034, - 0x43c10c2, - 0x448d0f0, - 0x44d9123, - 0x4561136, - 0x459d158, - 0x45ed167, - 0x466517b, - 0x64669199, - 0x6466d19a, - 0x6467119b, - 0x46ed19c, - 0x47491bb, - 0x47c51d2, - 0x483d1f1, - 0x48bd20f, - 0x492922f, - 0x4a5524a, - 0x4aad295, - 0x64ab12ab, - 0x4b492ac, - 0x4bd12d2, - 0x4c1d2f4, - 0x4c85307, - 0x4d2d321, - 0x4df534b, - 0x4e5d37d, - 0x4f71397, - 0x64f753dc, - 0x64f793dd, - 0x4fd53de, - 0x50313f5, - 0x50c140c, - 0x513d430, - 0x518144f, - 0x5265460, - 0x5299499, - 0x52f94a6, - 0x536d4be, - 0x53f54db, - 0x54354fd, - 0x54a550d, - 0x654a9529, - 0x54d152a, - 0x54d5534, - 0x54ed535, - 0x550953b, - 0x554d542, - 0x555d553, - 0x5575557, - 0x55ed55d, - 0x55f557b, - 0x561157d, - 0x5625584, - 0x5641589, - 0x566d590, - 0x567159b, - 0x567959c, - 0x568d59e, - 0x56ad5a3, - 0x56b95ab, - 0x56c15ae, - 0x56fd5b0, - 0x57115bf, - 0x57195c4, - 0x57255c6, - 0x572d5c9, - 0x57515cb, - 0x57755d4, - 0x578d5dd, - 0x57915e3, - 0x57995e4, - 0x579d5e6, - 0x58195e7, - 0x581d606, - 0x5821607, - 0x5845608, - 0x5869611, - 0x588561a, - 0x5899621, - 0x58ad626, - 0x58b562b, - 0x58bd62d, - 0x58d162f, - 0x58e1634, - 0x58e5638, - 0x5901639, - 0x6191640, - 0x61c9864, - 0x61f5872, - 0x621187d, - 0x6231884, - 0x625188c, - 0x6295894, - 0x629d8a5, - 0x262a18a7, - 0x262a58a8, + 0x22cc8b31, + 0x2cccb32, + 0x2cd0b33, + 0x2ce0b34, + 0x2ce4b38, + 0x2d5cb39, + 0x2d60b57, + 0x2d64b58, + 0x2d84b59, + 0x2d94b61, + 0x2da8b65, + 0x2dc0b6a, + 0x2dd8b70, + 0x2df0b76, + 0x2df4b7c, + 0x2e0cb7d, + 0x2e28b83, + 0x2e48b8a, + 0x2e68b92, + 0x2e84b9a, + 0x2ee4ba1, + 0x2f00bb9, + 0x2f10bc0, + 0x2f14bc4, + 0x2f28bc5, + 0x2f6cbca, + 0x2fecbdb, + 0x3020bfb, + 0x3024c08, + 0x3030c09, + 0x3050c0c, + 0x3054c14, + 0x3078c15, + 0x3080c1e, + 0x30bcc20, + 0x310cc2f, + 0x3110c43, + 0x319cc44, + 0x31a0c67, + 0x231a4c68, + 0x231a8c69, + 0x231acc6a, + 0x231bcc6b, + 0x231c0c6f, + 0x231c4c70, + 0x231c8c71, + 0x231ccc72, + 0x31e4c73, + 0x3208c79, + 0x3228c82, + 0x3890c8a, + 0x389ce24, + 0x38bce27, + 0x3a78e2f, + 0x3b48e9e, + 0x3bb8ed2, + 0x3c10eee, + 0x3cf8f04, + 0x3d50f3e, + 0x3d8cf54, + 0x3e88f63, + 0x3f54fa2, + 0x3fecfd5, + 0x407cffb, + 0x40e101f, + 0x4319038, + 0x43d10c6, + 0x449d0f4, + 0x44e9127, + 0x457113a, + 0x45ad15c, + 0x45fd16b, + 0x467517f, + 0x6467919d, + 0x6467d19e, + 0x6468119f, + 0x46fd1a0, + 0x47591bf, + 0x47d51d6, + 0x484d1f5, + 0x48cd213, + 0x4939233, + 0x4a6524e, + 0x4abd299, + 0x64ac12af, + 0x4b592b0, + 0x4be12d6, + 0x4c2d2f8, + 0x4c9530b, + 0x4d3d325, + 0x4e0534f, + 0x4e6d381, + 0x4f8139b, + 0x64f853e0, + 0x64f893e1, + 0x4fe53e2, + 0x50413f9, + 0x50d1410, + 0x514d434, + 0x5191453, + 0x5275464, + 0x52a949d, + 0x53094aa, + 0x537d4c2, + 0x54054df, + 0x5445501, + 0x54b5511, + 0x654b952d, + 0x54e152e, + 0x54e5538, + 0x54fd539, + 0x551953f, + 0x555d546, + 0x556d557, + 0x558555b, + 0x55fd561, + 0x560557f, + 0x5621581, + 0x5635588, + 0x565158d, + 0x567d594, + 0x568159f, + 0x56895a0, + 0x569d5a2, + 0x56bd5a7, + 0x56c95af, + 0x56d15b2, + 0x570d5b4, + 0x57215c3, + 0x57295c8, + 0x57355ca, + 0x573d5cd, + 0x57615cf, + 0x57855d8, + 0x579d5e1, + 0x57a15e7, + 0x57a95e8, + 0x57ad5ea, + 0x58295eb, + 0x582d60a, + 0x583160b, + 0x585560c, + 0x5879615, + 0x589561e, + 0x58a9625, + 0x58bd62a, + 0x58c562f, + 0x58cd631, + 0x58e1633, + 0x58f1638, + 0x58f563c, + 0x591163d, + 0x61a1644, + 0x61d9868, + 0x6205876, + 0x6221881, + 0x6241888, + 0x6261890, + 0x62a5898, 0x62ad8a9, - 0x64558ab, - 0x26459915, - 0x26469916, - 0x2647191a, - 0x2647d91c, - 0x648191f, - 0x6485920, - 0x64ad921, - 0x64d592b, - 0x64d9935, - 0x6511936, - 0x6531944, - 0x708994c, - 0x708dc22, - 0x7091c23, - 0x27095c24, - 0x7099c25, - 0x2709dc26, + 0x262b18ab, + 0x262b58ac, + 0x62bd8ad, + 0x64658af, + 0x26469919, + 0x2647991a, + 0x2648191e, + 0x2648d920, + 0x6491923, + 0x6495924, + 0x64bd925, + 0x64e592f, + 0x64e9939, + 0x652193a, + 0x6541948, + 0x7099950, + 0x709dc26, 0x70a1c27, - 0x270adc28, + 0x270a5c28, + 0x70a9c29, + 0x270adc2a, 0x70b1c2b, - 0x70b5c2c, - 0x270b9c2d, - 0x70bdc2e, - 0x270c5c2f, - 0x70c9c31, + 0x270bdc2c, + 0x70c1c2f, + 0x70c5c30, + 0x270c9c31, 0x70cdc32, - 0x270ddc33, - 0x70e1c37, - 0x70e5c38, - 0x70e9c39, - 0x70edc3a, - 0x270f1c3b, + 0x270d5c33, + 0x70d9c35, + 0x70ddc36, + 0x270edc37, + 0x70f1c3b, 0x70f5c3c, 0x70f9c3d, 0x70fdc3e, - 0x7101c3f, - 0x27109c40, + 0x27101c3f, + 0x7105c40, + 0x7109c41, 0x710dc42, 0x7111c43, - 0x7115c44, - 0x27119c45, + 0x27119c44, 0x711dc46, - 0x27125c47, + 0x7121c47, + 0x7125c48, 0x27129c49, - 0x7145c4a, - 0x7155c51, - 0x7199c55, - 0x719dc66, - 0x71c1c67, - 0x71c5c70, - 0x71c9c71, - 0x7371c72, - 0x27375cdc, - 0x2737dcdd, - 0x27381cdf, + 0x712dc4a, + 0x27135c4b, + 0x27139c4d, + 0x7155c4e, + 0x7165c55, + 0x71a9c59, + 0x71adc6a, + 0x71d1c6b, + 0x71d5c74, + 0x71d9c75, + 0x7381c76, 0x27385ce0, - 0x738dce1, - 0x7469ce3, - 0x27475d1a, - 0x27479d1d, - 0x2747dd1e, - 0x27481d1f, - 0x7485d20, - 0x74b1d21, - 0x74b5d2c, - 0x74d9d2d, - 0x74e5d36, - 0x7505d39, - 0x7509d41, - 0x7541d42, - 0x77d9d50, - 0x7895df6, - 0x7899e25, - 0x78ade26, - 0x78e1e2b, - 0x7919e38, - 0x2791de46, - 0x7939e47, - 0x7961e4e, - 0x7965e58, - 0x7989e59, - 0x79a5e62, - 0x79cde69, - 0x79dde73, - 0x79e1e77, - 0x79e5e78, - 0x7a1de79, - 0x7a29e87, - 0x7a4de8a, - 0x7acde93, - 0x27ad1eb3, - 0x7ae1eb4, - 0x7ae9eb8, - 0x7b0deba, - 0x7b2dec3, - 0x7b41ecb, - 0x7b55ed0, - 0x7b59ed5, - 0x7b79ed6, - 0x7c1dede, - 0x7c39f07, - 0x7c5df0e, - 0x7c61f17, - 0x7c69f18, - 0x7c79f1a, - 0x7c81f1e, - 0x7c95f20, - 0x7cb5f25, - 0x7cc1f2d, - 0x7ccdf30, - 0x7d05f33, - 0x7dd9f41, - 0x7dddf76, - 0x7df1f77, - 0x7df9f7c, - 0x7e11f7e, - 0x7e15f84, - 0x7e21f85, + 0x2738dce1, + 0x27391ce3, + 0x27395ce4, + 0x739dce5, + 0x7479ce7, + 0x27485d1e, + 0x27489d21, + 0x2748dd22, + 0x27491d23, + 0x7495d24, + 0x74c1d25, + 0x74c5d30, + 0x74e9d31, + 0x74f5d3a, + 0x7515d3d, + 0x7519d45, + 0x7551d46, + 0x77e9d54, + 0x78a5dfa, + 0x78a9e29, + 0x78bde2a, + 0x78f1e2f, + 0x7929e3c, + 0x2792de4a, + 0x7949e4b, + 0x7971e52, + 0x7975e5c, + 0x7999e5d, + 0x79b5e66, + 0x79dde6d, + 0x79ede77, + 0x79f1e7b, + 0x79f5e7c, + 0x7a2de7d, + 0x7a39e8b, + 0x7a5de8e, + 0x7adde97, + 0x27ae1eb7, + 0x7af1eb8, + 0x7af9ebc, + 0x7b1debe, + 0x7b3dec7, + 0x7b51ecf, + 0x7b65ed4, + 0x7b69ed9, + 0x7b89eda, + 0x7c2dee2, + 0x7c49f0b, + 0x7c6df12, + 0x7c71f1b, + 0x7c79f1c, + 0x7c89f1e, + 0x7c91f22, + 0x7ca5f24, + 0x7cc5f29, + 0x7cd1f31, + 0x7cddf34, + 0x7d15f37, + 0x7de9f45, + 0x7dedf7a, + 0x7e01f7b, + 0x7e09f80, + 0x7e21f82, 0x7e25f88, - 0x7e41f89, - 0x7e81f90, - 0x7e85fa0, - 0x7ea5fa1, - 0x7ef5fa9, - 0x7f11fbd, - 0x7f19fc4, - 0x7f6dfc6, - 0x7f71fdb, - 0x7f75fdc, - 0x7f79fdd, - 0x7fbdfde, - 0x7fcdfef, - 0x800dff3, - 0x8012003, - 0x8042004, - 0x818a010, - 0x81b2062, - 0x81e206c, - 0x81fe078, - 0x820607f, - 0x8212081, - 0x8326084, - 0x83320c9, - 0x833e0cc, - 0x834a0cf, - 0x83560d2, - 0x83620d5, - 0x836e0d8, - 0x837a0db, - 0x83860de, - 0x83920e1, - 0x839e0e4, - 0x83aa0e7, - 0x83b60ea, - 0x83c20ed, - 0x83ca0f0, - 0x83d60f2, - 0x83e20f5, - 0x83ee0f8, - 0x83fa0fb, - 0x84060fe, - 0x8412101, - 0x841e104, - 0x842a107, - 0x843610a, - 0x844210d, - 0x844e110, - 0x847a113, - 0x848611e, - 0x8492121, - 0x849e124, - 0x84aa127, - 0x84b612a, - 0x84be12d, - 0x84ca12f, - 0x84d6132, - 0x84e2135, - 0x84ee138, - 0x84fa13b, - 0x850613e, - 0x8512141, - 0x851e144, - 0x852a147, - 0x853614a, - 0x854214d, - 0x854e150, - 0x855a153, - 0x8562156, - 0x856e158, - 0x857a15b, - 0x858615e, - 0x8592161, - 0x859e164, - 0x85aa167, - 0x85b616a, - 0x85c216d, - 0x85c6170, + 0x7e31f89, + 0x7e35f8c, + 0x7e51f8d, + 0x7e91f94, + 0x7e95fa4, + 0x7eb5fa5, + 0x7f05fad, + 0x7f21fc1, + 0x7f29fc8, + 0x7f7dfca, + 0x7f81fdf, + 0x7f85fe0, + 0x7f89fe1, + 0x7fcdfe2, + 0x7fddff3, + 0x801dff7, + 0x8022007, + 0x8052008, + 0x819a014, + 0x81c2066, + 0x81f2070, + 0x820e07c, + 0x8216083, + 0x8222085, + 0x8336088, + 0x83420cd, + 0x834e0d0, + 0x835a0d3, + 0x83660d6, + 0x83720d9, + 0x837e0dc, + 0x838a0df, + 0x83960e2, + 0x83a20e5, + 0x83ae0e8, + 0x83ba0eb, + 0x83c60ee, + 0x83d20f1, + 0x83da0f4, + 0x83e60f6, + 0x83f20f9, + 0x83fe0fc, + 0x840a0ff, + 0x8416102, + 0x8422105, + 0x842e108, + 0x843a10b, + 0x844610e, + 0x8452111, + 0x845e114, + 0x848a117, + 0x8496122, + 0x84a2125, + 0x84ae128, + 0x84ba12b, + 0x84c612e, + 0x84ce131, + 0x84da133, + 0x84e6136, + 0x84f2139, + 0x84fe13c, + 0x850a13f, + 0x8516142, + 0x8522145, + 0x852e148, + 0x853a14b, + 0x854614e, + 0x8552151, + 0x855e154, + 0x856a157, + 0x857215a, + 0x857e15c, + 0x858a15f, + 0x8596162, + 0x85a2165, + 0x85ae168, + 0x85ba16b, + 0x85c616e, 0x85d2171, - 0x85ee174, - 0x85f217b, - 0x860217c, - 0x861e180, - 0x8662187, - 0x8666198, - 0x867a199, - 0x86ae19e, - 0x86be1ab, - 0x86e21af, - 0x86fa1b8, - 0x87121be, - 0x872a1c4, - 0x873a1ca, - 0x2877e1ce, - 0x87821df, - 0x87ae1e0, - 0x87b61eb, - 0x87ca1ed, + 0x85d6174, + 0x85e2175, + 0x85fe178, + 0x860217f, + 0x8612180, + 0x862e184, + 0x867218b, + 0x867619c, + 0x868a19d, + 0x86be1a2, + 0x86ce1af, + 0x86f21b3, + 0x870a1bc, + 0x87221c2, + 0x873a1c8, + 0x874a1ce, + 0x2878e1d2, + 0x87921e3, + 0x87be1e4, + 0x87c61ef, + 0x87da1f1, } -// max children 523 (capacity 1023) -// max text offset 29863 (capacity 32767) +// max children 524 (capacity 1023) +// max text offset 29871 (capacity 32767) // max text length 36 (capacity 63) -// max hi 8690 (capacity 16383) -// max lo 8685 (capacity 16383) +// max hi 8694 (capacity 16383) +// max lo 8689 (capacity 16383) diff --git a/vendor/golang.org/x/net/trace/trace.go b/vendor/golang.org/x/net/trace/trace.go index f00d869ff..43711c67d 100644 --- a/vendor/golang.org/x/net/trace/trace.go +++ b/vendor/golang.org/x/net/trace/trace.go @@ -64,6 +64,7 @@ package trace // import "golang.org/x/net/trace" import ( "bytes" + "context" "fmt" "html/template" "io" @@ -124,6 +125,18 @@ func init() { http.HandleFunc("/debug/events", Events) } +// NewContext returns a copy of the parent context +// and associates it with a Trace. +func NewContext(ctx context.Context, tr Trace) context.Context { + return context.WithValue(ctx, contextKey, tr) +} + +// FromContext returns the Trace bound to the context, if any. +func FromContext(ctx context.Context) (tr Trace, ok bool) { + tr, ok = ctx.Value(contextKey).(Trace) + return +} + // Traces responds with traces from the program. // The package initialization registers it in http.DefaultServeMux // at /debug/requests. diff --git a/vendor/golang.org/x/net/trace/trace_go16.go b/vendor/golang.org/x/net/trace/trace_go16.go deleted file mode 100644 index d60819118..000000000 --- a/vendor/golang.org/x/net/trace/trace_go16.go +++ /dev/null @@ -1,21 +0,0 @@ -// Copyright 2017 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build !go1.7 - -package trace - -import "golang.org/x/net/context" - -// NewContext returns a copy of the parent context -// and associates it with a Trace. -func NewContext(ctx context.Context, tr Trace) context.Context { - return context.WithValue(ctx, contextKey, tr) -} - -// FromContext returns the Trace bound to the context, if any. -func FromContext(ctx context.Context) (tr Trace, ok bool) { - tr, ok = ctx.Value(contextKey).(Trace) - return -} diff --git a/vendor/golang.org/x/net/trace/trace_go17.go b/vendor/golang.org/x/net/trace/trace_go17.go deleted file mode 100644 index df6e1fba7..000000000 --- a/vendor/golang.org/x/net/trace/trace_go17.go +++ /dev/null @@ -1,21 +0,0 @@ -// Copyright 2017 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build go1.7 - -package trace - -import "context" - -// NewContext returns a copy of the parent context -// and associates it with a Trace. -func NewContext(ctx context.Context, tr Trace) context.Context { - return context.WithValue(ctx, contextKey, tr) -} - -// FromContext returns the Trace bound to the context, if any. -func FromContext(ctx context.Context) (tr Trace, ok bool) { - tr, ok = ctx.Value(contextKey).(Trace) - return -} diff --git a/vendor/golang.org/x/oauth2/README.md b/vendor/golang.org/x/oauth2/README.md index eb8dcee17..68f436ed9 100644 --- a/vendor/golang.org/x/oauth2/README.md +++ b/vendor/golang.org/x/oauth2/README.md @@ -24,7 +24,9 @@ See godoc for further documentation and examples. In change 96e89be (March 2015), we removed the `oauth2.Context2` type in favor of the [`context.Context`](https://golang.org/x/net/context#Context) type from -the `golang.org/x/net/context` package +the `golang.org/x/net/context` package. Later replaced by the standard `context` package +of the [`context.Context`](https://golang.org/pkg/context#Context) type. + This means it's no longer possible to use the "Classic App Engine" `appengine.Context` type with the `oauth2` package. (You're using @@ -44,7 +46,7 @@ with the `oauth2` package. ```go import ( - "golang.org/x/net/context" + "context" "golang.org/x/oauth2" "golang.org/x/oauth2/google" newappengine "google.golang.org/appengine" @@ -68,6 +70,13 @@ func handler(w http.ResponseWriter, r *http.Request) { } ``` +## Policy for new packages + +We no longer accept new provider-specific packages in this repo. For +defining provider endpoints and provider-specific OAuth2 behavior, we +encourage you to create packages elsewhere. We'll keep the existing +packages for compatibility. + ## Report Issues / Send Patches This repository uses Gerrit for code changes. To learn how to submit changes to diff --git a/vendor/golang.org/x/oauth2/google/appengine.go b/vendor/golang.org/x/oauth2/google/appengine.go index 50d918b87..feb1157b1 100644 --- a/vendor/golang.org/x/oauth2/google/appengine.go +++ b/vendor/golang.org/x/oauth2/google/appengine.go @@ -5,85 +5,34 @@ package google import ( - "sort" - "strings" - "sync" + "context" "time" - "golang.org/x/net/context" "golang.org/x/oauth2" ) -// appengineFlex is set at init time by appengineflex_hook.go. If true, we are on App Engine Flex. -var appengineFlex bool - -// Set at init time by appengine_hook.go. If nil, we're not on App Engine. +// Set at init time by appengine_gen1.go. If nil, we're not on App Engine standard first generation (<= Go 1.9) or App Engine flexible. var appengineTokenFunc func(c context.Context, scopes ...string) (token string, expiry time.Time, err error) -// Set at init time by appengine_hook.go. If nil, we're not on App Engine. +// Set at init time by appengine_gen1.go. If nil, we're not on App Engine standard first generation (<= Go 1.9) or App Engine flexible. var appengineAppIDFunc func(c context.Context) string -// AppEngineTokenSource returns a token source that fetches tokens -// issued to the current App Engine application's service account. -// If you are implementing a 3-legged OAuth 2.0 flow on App Engine -// that involves user accounts, see oauth2.Config instead. +// AppEngineTokenSource returns a token source that fetches tokens from either +// the current application's service account or from the metadata server, +// depending on the App Engine environment. See below for environment-specific +// details. If you are implementing a 3-legged OAuth 2.0 flow on App Engine that +// involves user accounts, see oauth2.Config instead. // -// The provided context must have come from appengine.NewContext. +// First generation App Engine runtimes (<= Go 1.9): +// AppEngineTokenSource returns a token source that fetches tokens issued to the +// current App Engine application's service account. The provided context must have +// come from appengine.NewContext. +// +// Second generation App Engine runtimes (>= Go 1.11) and App Engine flexible: +// AppEngineTokenSource is DEPRECATED on second generation runtimes and on the +// flexible environment. It delegates to ComputeTokenSource, and the provided +// context and scopes are not used. Please use DefaultTokenSource (or ComputeTokenSource, +// which DefaultTokenSource will use in this case) instead. func AppEngineTokenSource(ctx context.Context, scope ...string) oauth2.TokenSource { - if appengineTokenFunc == nil { - panic("google: AppEngineTokenSource can only be used on App Engine.") - } - scopes := append([]string{}, scope...) - sort.Strings(scopes) - return &appEngineTokenSource{ - ctx: ctx, - scopes: scopes, - key: strings.Join(scopes, " "), - } -} - -// aeTokens helps the fetched tokens to be reused until their expiration. -var ( - aeTokensMu sync.Mutex - aeTokens = make(map[string]*tokenLock) // key is space-separated scopes -) - -type tokenLock struct { - mu sync.Mutex // guards t; held while fetching or updating t - t *oauth2.Token -} - -type appEngineTokenSource struct { - ctx context.Context - scopes []string - key string // to aeTokens map; space-separated scopes -} - -func (ts *appEngineTokenSource) Token() (*oauth2.Token, error) { - if appengineTokenFunc == nil { - panic("google: AppEngineTokenSource can only be used on App Engine.") - } - - aeTokensMu.Lock() - tok, ok := aeTokens[ts.key] - if !ok { - tok = &tokenLock{} - aeTokens[ts.key] = tok - } - aeTokensMu.Unlock() - - tok.mu.Lock() - defer tok.mu.Unlock() - if tok.t.Valid() { - return tok.t, nil - } - access, exp, err := appengineTokenFunc(ts.ctx, ts.scopes...) - if err != nil { - return nil, err - } - tok.t = &oauth2.Token{ - AccessToken: access, - Expiry: exp, - } - return tok.t, nil + return appEngineTokenSource(ctx, scope...) } diff --git a/vendor/golang.org/x/oauth2/google/appengine_gen1.go b/vendor/golang.org/x/oauth2/google/appengine_gen1.go new file mode 100644 index 000000000..83dacac32 --- /dev/null +++ b/vendor/golang.org/x/oauth2/google/appengine_gen1.go @@ -0,0 +1,77 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build appengine + +// This file applies to App Engine first generation runtimes (<= Go 1.9). + +package google + +import ( + "context" + "sort" + "strings" + "sync" + + "golang.org/x/oauth2" + "google.golang.org/appengine" +) + +func init() { + appengineTokenFunc = appengine.AccessToken + appengineAppIDFunc = appengine.AppID +} + +// See comment on AppEngineTokenSource in appengine.go. +func appEngineTokenSource(ctx context.Context, scope ...string) oauth2.TokenSource { + scopes := append([]string{}, scope...) + sort.Strings(scopes) + return &gaeTokenSource{ + ctx: ctx, + scopes: scopes, + key: strings.Join(scopes, " "), + } +} + +// aeTokens helps the fetched tokens to be reused until their expiration. +var ( + aeTokensMu sync.Mutex + aeTokens = make(map[string]*tokenLock) // key is space-separated scopes +) + +type tokenLock struct { + mu sync.Mutex // guards t; held while fetching or updating t + t *oauth2.Token +} + +type gaeTokenSource struct { + ctx context.Context + scopes []string + key string // to aeTokens map; space-separated scopes +} + +func (ts *gaeTokenSource) Token() (*oauth2.Token, error) { + aeTokensMu.Lock() + tok, ok := aeTokens[ts.key] + if !ok { + tok = &tokenLock{} + aeTokens[ts.key] = tok + } + aeTokensMu.Unlock() + + tok.mu.Lock() + defer tok.mu.Unlock() + if tok.t.Valid() { + return tok.t, nil + } + access, exp, err := appengineTokenFunc(ts.ctx, ts.scopes...) + if err != nil { + return nil, err + } + tok.t = &oauth2.Token{ + AccessToken: access, + Expiry: exp, + } + return tok.t, nil +} diff --git a/vendor/golang.org/x/oauth2/google/appengine_gen2_flex.go b/vendor/golang.org/x/oauth2/google/appengine_gen2_flex.go new file mode 100644 index 000000000..04c2c2216 --- /dev/null +++ b/vendor/golang.org/x/oauth2/google/appengine_gen2_flex.go @@ -0,0 +1,27 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !appengine + +// This file applies to App Engine second generation runtimes (>= Go 1.11) and App Engine flexible. + +package google + +import ( + "context" + "log" + "sync" + + "golang.org/x/oauth2" +) + +var logOnce sync.Once // only spam about deprecation once + +// See comment on AppEngineTokenSource in appengine.go. +func appEngineTokenSource(ctx context.Context, scope ...string) oauth2.TokenSource { + logOnce.Do(func() { + log.Print("google: AppEngineTokenSource is deprecated on App Engine standard second generation runtimes (>= Go 1.11) and App Engine flexible. Please use DefaultTokenSource or ComputeTokenSource.") + }) + return ComputeTokenSource("") +} diff --git a/vendor/golang.org/x/oauth2/google/appengine_hook.go b/vendor/golang.org/x/oauth2/google/appengine_hook.go deleted file mode 100644 index 56669eaa9..000000000 --- a/vendor/golang.org/x/oauth2/google/appengine_hook.go +++ /dev/null @@ -1,14 +0,0 @@ -// Copyright 2015 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build appengine appenginevm - -package google - -import "google.golang.org/appengine" - -func init() { - appengineTokenFunc = appengine.AccessToken - appengineAppIDFunc = appengine.AppID -} diff --git a/vendor/golang.org/x/oauth2/google/appengineflex_hook.go b/vendor/golang.org/x/oauth2/google/appengineflex_hook.go deleted file mode 100644 index 5d0231af2..000000000 --- a/vendor/golang.org/x/oauth2/google/appengineflex_hook.go +++ /dev/null @@ -1,11 +0,0 @@ -// Copyright 2015 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build appenginevm - -package google - -func init() { - appengineFlex = true // Flex doesn't support appengine.AccessToken; depend on metadata server. -} diff --git a/vendor/golang.org/x/oauth2/google/default.go b/vendor/golang.org/x/oauth2/google/default.go index a31607437..5087d845f 100644 --- a/vendor/golang.org/x/oauth2/google/default.go +++ b/vendor/golang.org/x/oauth2/google/default.go @@ -5,6 +5,7 @@ package google import ( + "context" "encoding/json" "fmt" "io/ioutil" @@ -14,10 +15,28 @@ import ( "runtime" "cloud.google.com/go/compute/metadata" - "golang.org/x/net/context" "golang.org/x/oauth2" ) +// Credentials holds Google credentials, including "Application Default Credentials". +// For more details, see: +// https://developers.google.com/accounts/docs/application-default-credentials +type Credentials struct { + ProjectID string // may be empty + TokenSource oauth2.TokenSource + + // JSON contains the raw bytes from a JSON credentials file. + // This field may be nil if authentication is provided by the + // environment and not with a credentials file, e.g. when code is + // running on Google Cloud Platform. + JSON []byte +} + +// DefaultCredentials is the old name of Credentials. +// +// Deprecated: use Credentials instead. +type DefaultCredentials = Credentials + // DefaultClient returns an HTTP Client that uses the // DefaultTokenSource to obtain authentication credentials. func DefaultClient(ctx context.Context, scope ...string) (*http.Client, error) { @@ -39,8 +58,23 @@ func DefaultTokenSource(ctx context.Context, scope ...string) (oauth2.TokenSourc return creds.TokenSource, nil } -// Common implementation for FindDefaultCredentials. -func findDefaultCredentials(ctx context.Context, scopes []string) (*DefaultCredentials, error) { +// FindDefaultCredentials searches for "Application Default Credentials". +// +// It looks for credentials in the following places, +// preferring the first location found: +// +// 1. A JSON file whose path is specified by the +// GOOGLE_APPLICATION_CREDENTIALS environment variable. +// 2. A JSON file in a location known to the gcloud command-line tool. +// On Windows, this is %APPDATA%/gcloud/application_default_credentials.json. +// On other systems, $HOME/.config/gcloud/application_default_credentials.json. +// 3. On Google App Engine standard first generation runtimes (<= Go 1.9) it uses +// the appengine.AccessToken function. +// 4. On Google Compute Engine, Google App Engine standard second generation runtimes +// (>= Go 1.11), and Google App Engine flexible environment, it fetches +// credentials from the metadata server. +// (In this final case any provided scopes are ignored.) +func FindDefaultCredentials(ctx context.Context, scopes ...string) (*Credentials, error) { // First, try the environment variable. const envVar = "GOOGLE_APPLICATION_CREDENTIALS" if filename := os.Getenv(envVar); filename != "" { @@ -59,15 +93,18 @@ func findDefaultCredentials(ctx context.Context, scopes []string) (*DefaultCrede return nil, fmt.Errorf("google: error getting credentials using well-known file (%v): %v", filename, err) } - // Third, if we're on Google App Engine use those credentials. - if appengineTokenFunc != nil && !appengineFlex { + // Third, if we're on a Google App Engine standard first generation runtime (<= Go 1.9) + // use those credentials. App Engine standard second generation runtimes (>= Go 1.11) + // and App Engine flexible use ComputeTokenSource and the metadata server. + if appengineTokenFunc != nil { return &DefaultCredentials{ ProjectID: appengineAppIDFunc(ctx), TokenSource: AppEngineTokenSource(ctx, scopes...), }, nil } - // Fourth, if we're on Google Compute Engine use the metadata server. + // Fourth, if we're on Google Compute Engine, an App Engine standard second generation runtime, + // or App Engine flexible, use the metadata server. if metadata.OnGCE() { id, _ := metadata.ProjectID() return &DefaultCredentials{ @@ -81,8 +118,11 @@ func findDefaultCredentials(ctx context.Context, scopes []string) (*DefaultCrede return nil, fmt.Errorf("google: could not find default credentials. See %v for more information.", url) } -// Common implementation for CredentialsFromJSON. -func credentialsFromJSON(ctx context.Context, jsonData []byte, scopes []string) (*DefaultCredentials, error) { +// CredentialsFromJSON obtains Google credentials from a JSON value. The JSON can +// represent either a Google Developers Console client_credentials.json file (as in +// ConfigFromJSON) or a Google Developers service account key file (as in +// JWTConfigFromJSON). +func CredentialsFromJSON(ctx context.Context, jsonData []byte, scopes ...string) (*Credentials, error) { var f credentialsFile if err := json.Unmarshal(jsonData, &f); err != nil { return nil, err diff --git a/vendor/golang.org/x/oauth2/google/doc_go19.go b/vendor/golang.org/x/oauth2/google/doc.go similarity index 99% rename from vendor/golang.org/x/oauth2/google/doc_go19.go rename to vendor/golang.org/x/oauth2/google/doc.go index 2a86325fe..73be62903 100644 --- a/vendor/golang.org/x/oauth2/google/doc_go19.go +++ b/vendor/golang.org/x/oauth2/google/doc.go @@ -2,8 +2,6 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build go1.9 - // Package google provides support for making OAuth2 authorized and authenticated // HTTP requests to Google APIs. It supports the Web server flow, client-side // credentials, service accounts, Google Compute Engine service accounts, and Google diff --git a/vendor/golang.org/x/oauth2/google/doc_not_go19.go b/vendor/golang.org/x/oauth2/google/doc_not_go19.go deleted file mode 100644 index 5c3c6e148..000000000 --- a/vendor/golang.org/x/oauth2/google/doc_not_go19.go +++ /dev/null @@ -1,43 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build !go1.9 - -// Package google provides support for making OAuth2 authorized and authenticated -// HTTP requests to Google APIs. It supports the Web server flow, client-side -// credentials, service accounts, Google Compute Engine service accounts, and Google -// App Engine service accounts. -// -// A brief overview of the package follows. For more information, please read -// https://developers.google.com/accounts/docs/OAuth2 -// and -// https://developers.google.com/accounts/docs/application-default-credentials. -// -// OAuth2 Configs -// -// Two functions in this package return golang.org/x/oauth2.Config values from Google credential -// data. Google supports two JSON formats for OAuth2 credentials: one is handled by ConfigFromJSON, -// the other by JWTConfigFromJSON. The returned Config can be used to obtain a TokenSource or -// create an http.Client. -// -// -// Credentials -// -// The DefaultCredentials type represents Google Application Default Credentials, as -// well as other forms of credential. -// -// Use FindDefaultCredentials to obtain Application Default Credentials. -// FindDefaultCredentials looks in some well-known places for a credentials file, and -// will call AppEngineTokenSource or ComputeTokenSource as needed. -// -// DefaultClient and DefaultTokenSource are convenience methods. They first call FindDefaultCredentials, -// then use the credentials to construct an http.Client or an oauth2.TokenSource. -// -// Use CredentialsFromJSON to obtain credentials from either of the two JSON -// formats described in OAuth2 Configs, above. (The DefaultCredentials returned may -// not be "Application Default Credentials".) The TokenSource in the returned value -// is the same as the one obtained from the oauth2.Config returned from -// ConfigFromJSON or JWTConfigFromJSON, but the DefaultCredentials may contain -// additional information that is useful is some circumstances. -package google // import "golang.org/x/oauth2/google" diff --git a/vendor/golang.org/x/oauth2/google/go19.go b/vendor/golang.org/x/oauth2/google/go19.go deleted file mode 100644 index 4d0318b1e..000000000 --- a/vendor/golang.org/x/oauth2/google/go19.go +++ /dev/null @@ -1,57 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build go1.9 - -package google - -import ( - "golang.org/x/net/context" - "golang.org/x/oauth2" -) - -// Credentials holds Google credentials, including "Application Default Credentials". -// For more details, see: -// https://developers.google.com/accounts/docs/application-default-credentials -type Credentials struct { - ProjectID string // may be empty - TokenSource oauth2.TokenSource - - // JSON contains the raw bytes from a JSON credentials file. - // This field may be nil if authentication is provided by the - // environment and not with a credentials file, e.g. when code is - // running on Google Cloud Platform. - JSON []byte -} - -// DefaultCredentials is the old name of Credentials. -// -// Deprecated: use Credentials instead. -type DefaultCredentials = Credentials - -// FindDefaultCredentials searches for "Application Default Credentials". -// -// It looks for credentials in the following places, -// preferring the first location found: -// -// 1. A JSON file whose path is specified by the -// GOOGLE_APPLICATION_CREDENTIALS environment variable. -// 2. A JSON file in a location known to the gcloud command-line tool. -// On Windows, this is %APPDATA%/gcloud/application_default_credentials.json. -// On other systems, $HOME/.config/gcloud/application_default_credentials.json. -// 3. On Google App Engine it uses the appengine.AccessToken function. -// 4. On Google Compute Engine and Google App Engine Managed VMs, it fetches -// credentials from the metadata server. -// (In this final case any provided scopes are ignored.) -func FindDefaultCredentials(ctx context.Context, scopes ...string) (*Credentials, error) { - return findDefaultCredentials(ctx, scopes) -} - -// CredentialsFromJSON obtains Google credentials from a JSON value. The JSON can -// represent either a Google Developers Console client_credentials.json file (as in -// ConfigFromJSON) or a Google Developers service account key file (as in -// JWTConfigFromJSON). -func CredentialsFromJSON(ctx context.Context, jsonData []byte, scopes ...string) (*Credentials, error) { - return credentialsFromJSON(ctx, jsonData, scopes) -} diff --git a/vendor/golang.org/x/oauth2/google/google.go b/vendor/golang.org/x/oauth2/google/google.go index f7481fbcc..ca7d208d7 100644 --- a/vendor/golang.org/x/oauth2/google/google.go +++ b/vendor/golang.org/x/oauth2/google/google.go @@ -5,6 +5,7 @@ package google import ( + "context" "encoding/json" "errors" "fmt" @@ -12,7 +13,6 @@ import ( "time" "cloud.google.com/go/compute/metadata" - "golang.org/x/net/context" "golang.org/x/oauth2" "golang.org/x/oauth2/jwt" ) diff --git a/vendor/golang.org/x/oauth2/google/not_go19.go b/vendor/golang.org/x/oauth2/google/not_go19.go deleted file mode 100644 index 544e40624..000000000 --- a/vendor/golang.org/x/oauth2/google/not_go19.go +++ /dev/null @@ -1,54 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build !go1.9 - -package google - -import ( - "golang.org/x/net/context" - "golang.org/x/oauth2" -) - -// DefaultCredentials holds Google credentials, including "Application Default Credentials". -// For more details, see: -// https://developers.google.com/accounts/docs/application-default-credentials -type DefaultCredentials struct { - ProjectID string // may be empty - TokenSource oauth2.TokenSource - - // JSON contains the raw bytes from a JSON credentials file. - // This field may be nil if authentication is provided by the - // environment and not with a credentials file, e.g. when code is - // running on Google Cloud Platform. - JSON []byte -} - -// FindDefaultCredentials searches for "Application Default Credentials". -// -// It looks for credentials in the following places, -// preferring the first location found: -// -// 1. A JSON file whose path is specified by the -// GOOGLE_APPLICATION_CREDENTIALS environment variable. -// 2. A JSON file in a location known to the gcloud command-line tool. -// On Windows, this is %APPDATA%/gcloud/application_default_credentials.json. -// On other systems, $HOME/.config/gcloud/application_default_credentials.json. -// 3. On Google App Engine it uses the appengine.AccessToken function. -// 4. On Google Compute Engine and Google App Engine Managed VMs, it fetches -// credentials from the metadata server. -// (In this final case any provided scopes are ignored.) -func FindDefaultCredentials(ctx context.Context, scopes ...string) (*DefaultCredentials, error) { - return findDefaultCredentials(ctx, scopes) -} - -// CredentialsFromJSON obtains Google credentials from a JSON value. The JSON can -// represent either a Google Developers Console client_credentials.json file (as in -// ConfigFromJSON) or a Google Developers service account key file (as in -// JWTConfigFromJSON). -// -// Note: despite the name, the returned credentials may not be Application Default Credentials. -func CredentialsFromJSON(ctx context.Context, jsonData []byte, scopes ...string) (*DefaultCredentials, error) { - return credentialsFromJSON(ctx, jsonData, scopes) -} diff --git a/vendor/golang.org/x/oauth2/google/sdk.go b/vendor/golang.org/x/oauth2/google/sdk.go index b9660cadd..456224bc7 100644 --- a/vendor/golang.org/x/oauth2/google/sdk.go +++ b/vendor/golang.org/x/oauth2/google/sdk.go @@ -6,6 +6,7 @@ package google import ( "bufio" + "context" "encoding/json" "errors" "fmt" @@ -18,7 +19,6 @@ import ( "strings" "time" - "golang.org/x/net/context" "golang.org/x/oauth2" ) diff --git a/vendor/golang.org/x/oauth2/internal/oauth2.go b/vendor/golang.org/x/oauth2/internal/oauth2.go index fc63fcab3..c0ab196cf 100644 --- a/vendor/golang.org/x/oauth2/internal/oauth2.go +++ b/vendor/golang.org/x/oauth2/internal/oauth2.go @@ -26,7 +26,7 @@ func ParseKey(key []byte) (*rsa.PrivateKey, error) { if err != nil { parsedKey, err = x509.ParsePKCS1PrivateKey(key) if err != nil { - return nil, fmt.Errorf("private key should be a PEM or plain PKSC1 or PKCS8; parse error: %v", err) + return nil, fmt.Errorf("private key should be a PEM or plain PKCS1 or PKCS8; parse error: %v", err) } } parsed, ok := parsedKey.(*rsa.PrivateKey) diff --git a/vendor/golang.org/x/oauth2/internal/token.go b/vendor/golang.org/x/oauth2/internal/token.go index 5c5451ad8..1f607943f 100644 --- a/vendor/golang.org/x/oauth2/internal/token.go +++ b/vendor/golang.org/x/oauth2/internal/token.go @@ -5,6 +5,7 @@ package internal import ( + "context" "encoding/json" "errors" "fmt" @@ -17,7 +18,6 @@ import ( "strings" "time" - "golang.org/x/net/context" "golang.org/x/net/context/ctxhttp" ) @@ -110,6 +110,7 @@ var brokenAuthHeaderProviders = []string{ "https://login.salesforce.com/", "https://login.windows.net", "https://login.live.com/", + "https://login.live-int.com/", "https://oauth.sandbox.trainingpeaks.com/", "https://oauth.trainingpeaks.com/", "https://oauth.vk.com/", @@ -132,6 +133,9 @@ var brokenAuthHeaderProviders = []string{ "https://whats.todaysplan.com.au/rest/oauth/access_token", "https://stackoverflow.com/oauth/access_token", "https://account.health.nokia.com", + "https://accounts.zoho.com", + "https://gitter.im/login/oauth/token", + "https://openid-connect.onelogin.com/oidc", } // brokenAuthHeaderDomains lists broken providers that issue dynamic endpoints. diff --git a/vendor/golang.org/x/oauth2/internal/transport.go b/vendor/golang.org/x/oauth2/internal/transport.go index d16f9ae1f..572074a63 100644 --- a/vendor/golang.org/x/oauth2/internal/transport.go +++ b/vendor/golang.org/x/oauth2/internal/transport.go @@ -5,9 +5,8 @@ package internal import ( + "context" "net/http" - - "golang.org/x/net/context" ) // HTTPClient is the context key to use with golang.org/x/net/context's diff --git a/vendor/golang.org/x/oauth2/jwt/jwt.go b/vendor/golang.org/x/oauth2/jwt/jwt.go index e08f31595..0783a94c4 100644 --- a/vendor/golang.org/x/oauth2/jwt/jwt.go +++ b/vendor/golang.org/x/oauth2/jwt/jwt.go @@ -9,6 +9,7 @@ package jwt import ( + "context" "encoding/json" "fmt" "io" @@ -18,7 +19,6 @@ import ( "strings" "time" - "golang.org/x/net/context" "golang.org/x/oauth2" "golang.org/x/oauth2/internal" "golang.org/x/oauth2/jws" diff --git a/vendor/golang.org/x/oauth2/oauth2.go b/vendor/golang.org/x/oauth2/oauth2.go index 16775d081..1e8e1b741 100644 --- a/vendor/golang.org/x/oauth2/oauth2.go +++ b/vendor/golang.org/x/oauth2/oauth2.go @@ -10,13 +10,13 @@ package oauth2 // import "golang.org/x/oauth2" import ( "bytes" + "context" "errors" "net/http" "net/url" "strings" "sync" - "golang.org/x/net/context" "golang.org/x/oauth2/internal" ) @@ -164,8 +164,7 @@ func (c *Config) AuthCodeURL(state string, opts ...AuthCodeOption) string { // and when other authorization grant types are not available." // See https://tools.ietf.org/html/rfc6749#section-4.3 for more info. // -// The HTTP client to use is derived from the context. -// If nil, http.DefaultClient is used. +// The provided context optionally controls which HTTP client is used. See the HTTPClient variable. func (c *Config) PasswordCredentialsToken(ctx context.Context, username, password string) (*Token, error) { v := url.Values{ "grant_type": {"password"}, @@ -183,8 +182,7 @@ func (c *Config) PasswordCredentialsToken(ctx context.Context, username, passwor // It is used after a resource provider redirects the user back // to the Redirect URI (the URL obtained from AuthCodeURL). // -// The HTTP client to use is derived from the context. -// If a client is not provided via the context, http.DefaultClient is used. +// The provided context optionally controls which HTTP client is used. See the HTTPClient variable. // // The code will be in the *http.Request.FormValue("code"). Before // calling Exchange, be sure to validate FormValue("state"). diff --git a/vendor/golang.org/x/oauth2/token.go b/vendor/golang.org/x/oauth2/token.go index 34db8cdc8..9be1ae537 100644 --- a/vendor/golang.org/x/oauth2/token.go +++ b/vendor/golang.org/x/oauth2/token.go @@ -5,6 +5,7 @@ package oauth2 import ( + "context" "fmt" "net/http" "net/url" @@ -12,7 +13,6 @@ import ( "strings" "time" - "golang.org/x/net/context" "golang.org/x/oauth2/internal" ) diff --git a/vendor/golang.org/x/sync/AUTHORS b/vendor/golang.org/x/sync/AUTHORS new file mode 100644 index 000000000..15167cd74 --- /dev/null +++ b/vendor/golang.org/x/sync/AUTHORS @@ -0,0 +1,3 @@ +# This source code refers to The Go Authors for copyright purposes. +# The master list of authors is in the main Go distribution, +# visible at http://tip.golang.org/AUTHORS. diff --git a/vendor/golang.org/x/sync/CONTRIBUTING.md b/vendor/golang.org/x/sync/CONTRIBUTING.md new file mode 100644 index 000000000..d0485e887 --- /dev/null +++ b/vendor/golang.org/x/sync/CONTRIBUTING.md @@ -0,0 +1,26 @@ +# Contributing to Go + +Go is an open source project. + +It is the work of hundreds of contributors. We appreciate your help! + +## Filing issues + +When [filing an issue](https://golang.org/issue/new), make sure to answer these five questions: + +1. What version of Go are you using (`go version`)? +2. What operating system and processor architecture are you using? +3. What did you do? +4. What did you expect to see? +5. What did you see instead? + +General questions should go to the [golang-nuts mailing list](https://groups.google.com/group/golang-nuts) instead of the issue tracker. +The gophers there will answer or ask you to file an issue if you've tripped over a bug. + +## Contributing code + +Please read the [Contribution Guidelines](https://golang.org/doc/contribute.html) +before sending patches. + +Unless otherwise noted, the Go source files are distributed under +the BSD-style license found in the LICENSE file. diff --git a/vendor/golang.org/x/sync/CONTRIBUTORS b/vendor/golang.org/x/sync/CONTRIBUTORS new file mode 100644 index 000000000..1c4577e96 --- /dev/null +++ b/vendor/golang.org/x/sync/CONTRIBUTORS @@ -0,0 +1,3 @@ +# This source code was written by the Go contributors. +# The master list of contributors is in the main Go distribution, +# visible at http://tip.golang.org/CONTRIBUTORS. diff --git a/vendor/gopkg.in/square/go-jose.v2/json/LICENSE b/vendor/golang.org/x/sync/LICENSE similarity index 96% rename from vendor/gopkg.in/square/go-jose.v2/json/LICENSE rename to vendor/golang.org/x/sync/LICENSE index 744875676..6a66aea5e 100644 --- a/vendor/gopkg.in/square/go-jose.v2/json/LICENSE +++ b/vendor/golang.org/x/sync/LICENSE @@ -1,4 +1,4 @@ -Copyright (c) 2012 The Go Authors. All rights reserved. +Copyright (c) 2009 The Go Authors. All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are diff --git a/vendor/golang.org/x/sync/PATENTS b/vendor/golang.org/x/sync/PATENTS new file mode 100644 index 000000000..733099041 --- /dev/null +++ b/vendor/golang.org/x/sync/PATENTS @@ -0,0 +1,22 @@ +Additional IP Rights Grant (Patents) + +"This implementation" means the copyrightable works distributed by +Google as part of the Go project. + +Google hereby grants to You a perpetual, worldwide, non-exclusive, +no-charge, royalty-free, irrevocable (except as stated in this section) +patent license to make, have made, use, offer to sell, sell, import, +transfer and otherwise run, modify and propagate the contents of this +implementation of Go, where such license applies only to those patent +claims, both currently owned or controlled by Google and acquired in +the future, licensable by Google that are necessarily infringed by this +implementation of Go. This grant does not include claims that would be +infringed only as a consequence of further modification of this +implementation. If you or your agent or exclusive licensee institute or +order or agree to the institution of patent litigation against any +entity (including a cross-claim or counterclaim in a lawsuit) alleging +that this implementation of Go or any code incorporated within this +implementation of Go constitutes direct or contributory patent +infringement, or inducement of patent infringement, then any patent +rights granted to you under this License for this implementation of Go +shall terminate as of the date such litigation is filed. diff --git a/vendor/golang.org/x/sync/README.md b/vendor/golang.org/x/sync/README.md new file mode 100644 index 000000000..1f8436cc9 --- /dev/null +++ b/vendor/golang.org/x/sync/README.md @@ -0,0 +1,18 @@ +# Go Sync + +This repository provides Go concurrency primitives in addition to the +ones provided by the language and "sync" and "sync/atomic" packages. + +## Download/Install + +The easiest way to install is to run `go get -u golang.org/x/sync`. You can +also manually git clone the repository to `$GOPATH/src/golang.org/x/sync`. + +## Report Issues / Send Patches + +This repository uses Gerrit for code changes. To learn how to submit changes to +this repository, see https://golang.org/doc/contribute.html. + +The main issue tracker for the sync repository is located at +https://github.com/golang/go/issues. Prefix your issue with "x/sync:" in the +subject line, so it is easy to find. diff --git a/vendor/golang.org/x/sync/codereview.cfg b/vendor/golang.org/x/sync/codereview.cfg new file mode 100644 index 000000000..3f8b14b64 --- /dev/null +++ b/vendor/golang.org/x/sync/codereview.cfg @@ -0,0 +1 @@ +issuerepo: golang/go diff --git a/vendor/golang.org/x/sync/semaphore/semaphore.go b/vendor/golang.org/x/sync/semaphore/semaphore.go new file mode 100644 index 000000000..2096ca3a0 --- /dev/null +++ b/vendor/golang.org/x/sync/semaphore/semaphore.go @@ -0,0 +1,127 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package semaphore provides a weighted semaphore implementation. +package semaphore // import "golang.org/x/sync/semaphore" + +import ( + "container/list" + "context" + "sync" +) + +type waiter struct { + n int64 + ready chan<- struct{} // Closed when semaphore acquired. +} + +// NewWeighted creates a new weighted semaphore with the given +// maximum combined weight for concurrent access. +func NewWeighted(n int64) *Weighted { + w := &Weighted{size: n} + return w +} + +// Weighted provides a way to bound concurrent access to a resource. +// The callers can request access with a given weight. +type Weighted struct { + size int64 + cur int64 + mu sync.Mutex + waiters list.List +} + +// Acquire acquires the semaphore with a weight of n, blocking only until ctx +// is done. On success, returns nil. On failure, returns ctx.Err() and leaves +// the semaphore unchanged. +// +// If ctx is already done, Acquire may still succeed without blocking. +func (s *Weighted) Acquire(ctx context.Context, n int64) error { + s.mu.Lock() + if s.size-s.cur >= n && s.waiters.Len() == 0 { + s.cur += n + s.mu.Unlock() + return nil + } + + if n > s.size { + // Don't make other Acquire calls block on one that's doomed to fail. + s.mu.Unlock() + <-ctx.Done() + return ctx.Err() + } + + ready := make(chan struct{}) + w := waiter{n: n, ready: ready} + elem := s.waiters.PushBack(w) + s.mu.Unlock() + + select { + case <-ctx.Done(): + err := ctx.Err() + s.mu.Lock() + select { + case <-ready: + // Acquired the semaphore after we were canceled. Rather than trying to + // fix up the queue, just pretend we didn't notice the cancelation. + err = nil + default: + s.waiters.Remove(elem) + } + s.mu.Unlock() + return err + + case <-ready: + return nil + } +} + +// TryAcquire acquires the semaphore with a weight of n without blocking. +// On success, returns true. On failure, returns false and leaves the semaphore unchanged. +func (s *Weighted) TryAcquire(n int64) bool { + s.mu.Lock() + success := s.size-s.cur >= n && s.waiters.Len() == 0 + if success { + s.cur += n + } + s.mu.Unlock() + return success +} + +// Release releases the semaphore with a weight of n. +func (s *Weighted) Release(n int64) { + s.mu.Lock() + s.cur -= n + if s.cur < 0 { + s.mu.Unlock() + panic("semaphore: bad release") + } + for { + next := s.waiters.Front() + if next == nil { + break // No more waiters blocked. + } + + w := next.Value.(waiter) + if s.size-s.cur < w.n { + // Not enough tokens for the next waiter. We could keep going (to try to + // find a waiter with a smaller request), but under load that could cause + // starvation for large requests; instead, we leave all remaining waiters + // blocked. + // + // Consider a semaphore used as a read-write lock, with N tokens, N + // readers, and one writer. Each reader can Acquire(1) to obtain a read + // lock. The writer can Acquire(N) to obtain a write lock, excluding all + // of the readers. If we allow the readers to jump ahead in the queue, + // the writer will starve — there is always one token available for every + // reader. + break + } + + s.cur += w.n + s.waiters.Remove(next) + close(w.ready) + } + s.mu.Unlock() +} diff --git a/vendor/golang.org/x/sys/unix/asm_aix_ppc64.s b/vendor/golang.org/x/sys/unix/asm_aix_ppc64.s new file mode 100644 index 000000000..06f84b855 --- /dev/null +++ b/vendor/golang.org/x/sys/unix/asm_aix_ppc64.s @@ -0,0 +1,17 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !gccgo + +#include "textflag.h" + +// +// System calls for ppc64, AIX are implemented in runtime/syscall_aix.go +// + +TEXT ·syscall6(SB),NOSPLIT,$0-88 + JMP syscall·syscall6(SB) + +TEXT ·rawSyscall6(SB),NOSPLIT,$0-88 + JMP syscall·rawSyscall6(SB) diff --git a/vendor/golang.org/x/sys/unix/asm_linux_ppc64x.s b/vendor/golang.org/x/sys/unix/asm_linux_ppc64x.s index 649e58714..88f712557 100644 --- a/vendor/golang.org/x/sys/unix/asm_linux_ppc64x.s +++ b/vendor/golang.org/x/sys/unix/asm_linux_ppc64x.s @@ -15,12 +15,6 @@ // Just jump to package syscall's implementation for all these functions. // The runtime may know about them. -TEXT ·Syscall(SB),NOSPLIT,$0-56 - BR syscall·Syscall(SB) - -TEXT ·Syscall6(SB),NOSPLIT,$0-80 - BR syscall·Syscall6(SB) - TEXT ·SyscallNoError(SB),NOSPLIT,$0-48 BL runtime·entersyscall(SB) MOVD a1+8(FP), R3 @@ -36,12 +30,6 @@ TEXT ·SyscallNoError(SB),NOSPLIT,$0-48 BL runtime·exitsyscall(SB) RET -TEXT ·RawSyscall(SB),NOSPLIT,$0-56 - BR syscall·RawSyscall(SB) - -TEXT ·RawSyscall6(SB),NOSPLIT,$0-80 - BR syscall·RawSyscall6(SB) - TEXT ·RawSyscallNoError(SB),NOSPLIT,$0-48 MOVD a1+8(FP), R3 MOVD a2+16(FP), R4 diff --git a/vendor/golang.org/x/sys/unix/mkall.sh b/vendor/golang.org/x/sys/unix/mkall.sh index edb176f16..4f92537ca 100755 --- a/vendor/golang.org/x/sys/unix/mkall.sh +++ b/vendor/golang.org/x/sys/unix/mkall.sh @@ -10,7 +10,7 @@ GOOSARCH="${GOOS}_${GOARCH}" # defaults -mksyscall="./mksyscall.pl" +mksyscall="go run mksyscall.go" mkerrors="./mkerrors.sh" zerrors="zerrors_$GOOSARCH.go" mksysctl="" @@ -61,17 +61,17 @@ _* | *_ | _) ;; aix_ppc) mkerrors="$mkerrors -maix32" - mksyscall="perl mksyscall_aix.pl -aix" + mksyscall="./mksyscall_aix_ppc.pl -aix" mktypes="GOARCH=$GOARCH go tool cgo -godefs" ;; aix_ppc64) mkerrors="$mkerrors -maix64" - mksyscall="perl mksyscall_aix.pl -aix" + mksyscall="./mksyscall_aix_ppc64.pl -aix" mktypes="GOARCH=$GOARCH go tool cgo -godefs" ;; darwin_386) mkerrors="$mkerrors -m32" - mksyscall="./mksyscall.pl -l32" + mksyscall="go run mksyscall.go -l32" mksysnum="./mksysnum_darwin.pl $(xcrun --show-sdk-path --sdk macosx)/usr/include/sys/syscall.h" mktypes="GOARCH=$GOARCH go tool cgo -godefs" ;; @@ -92,13 +92,13 @@ darwin_arm64) ;; dragonfly_amd64) mkerrors="$mkerrors -m64" - mksyscall="./mksyscall.pl -dragonfly" + mksyscall="go run mksyscall.go -dragonfly" mksysnum="curl -s 'http://gitweb.dragonflybsd.org/dragonfly.git/blob_plain/HEAD:/sys/kern/syscalls.master' | ./mksysnum_dragonfly.pl" mktypes="GOARCH=$GOARCH go tool cgo -godefs" ;; freebsd_386) mkerrors="$mkerrors -m32" - mksyscall="./mksyscall.pl -l32" + mksyscall="go run mksyscall.go -l32" mksysnum="curl -s 'http://svn.freebsd.org/base/stable/10/sys/kern/syscalls.master' | ./mksysnum_freebsd.pl" mktypes="GOARCH=$GOARCH go tool cgo -godefs" ;; @@ -109,7 +109,7 @@ freebsd_amd64) ;; freebsd_arm) mkerrors="$mkerrors" - mksyscall="./mksyscall.pl -l32 -arm" + mksyscall="go run mksyscall.go -l32 -arm" mksysnum="curl -s 'http://svn.freebsd.org/base/stable/10/sys/kern/syscalls.master' | ./mksysnum_freebsd.pl" # Let the type of C char be signed for making the bare syscall # API consistent across platforms. @@ -124,19 +124,19 @@ linux_sparc64) ;; netbsd_386) mkerrors="$mkerrors -m32" - mksyscall="./mksyscall.pl -l32 -netbsd" + mksyscall="go run mksyscall.go -l32 -netbsd" mksysnum="curl -s 'http://cvsweb.netbsd.org/bsdweb.cgi/~checkout~/src/sys/kern/syscalls.master' | ./mksysnum_netbsd.pl" mktypes="GOARCH=$GOARCH go tool cgo -godefs" ;; netbsd_amd64) mkerrors="$mkerrors -m64" - mksyscall="./mksyscall.pl -netbsd" + mksyscall="go run mksyscall.go -netbsd" mksysnum="curl -s 'http://cvsweb.netbsd.org/bsdweb.cgi/~checkout~/src/sys/kern/syscalls.master' | ./mksysnum_netbsd.pl" mktypes="GOARCH=$GOARCH go tool cgo -godefs" ;; netbsd_arm) mkerrors="$mkerrors" - mksyscall="./mksyscall.pl -l32 -netbsd -arm" + mksyscall="go run mksyscall.go -l32 -netbsd -arm" mksysnum="curl -s 'http://cvsweb.netbsd.org/bsdweb.cgi/~checkout~/src/sys/kern/syscalls.master' | ./mksysnum_netbsd.pl" # Let the type of C char be signed for making the bare syscall # API consistent across platforms. @@ -144,21 +144,21 @@ netbsd_arm) ;; openbsd_386) mkerrors="$mkerrors -m32" - mksyscall="./mksyscall.pl -l32 -openbsd" + mksyscall="go run mksyscall.go -l32 -openbsd" mksysctl="./mksysctl_openbsd.pl" mksysnum="curl -s 'http://cvsweb.openbsd.org/cgi-bin/cvsweb/~checkout~/src/sys/kern/syscalls.master' | ./mksysnum_openbsd.pl" mktypes="GOARCH=$GOARCH go tool cgo -godefs" ;; openbsd_amd64) mkerrors="$mkerrors -m64" - mksyscall="./mksyscall.pl -openbsd" + mksyscall="go run mksyscall.go -openbsd" mksysctl="./mksysctl_openbsd.pl" mksysnum="curl -s 'http://cvsweb.openbsd.org/cgi-bin/cvsweb/~checkout~/src/sys/kern/syscalls.master' | ./mksysnum_openbsd.pl" mktypes="GOARCH=$GOARCH go tool cgo -godefs" ;; openbsd_arm) mkerrors="$mkerrors" - mksyscall="./mksyscall.pl -l32 -openbsd -arm" + mksyscall="go run mksyscall.go -l32 -openbsd -arm" mksysctl="./mksysctl_openbsd.pl" mksysnum="curl -s 'http://cvsweb.openbsd.org/cgi-bin/cvsweb/~checkout~/src/sys/kern/syscalls.master' | ./mksysnum_openbsd.pl" # Let the type of C char be signed for making the bare syscall @@ -187,8 +187,14 @@ esac syscall_goos="syscall_bsd.go $syscall_goos" ;; esac - if [ -n "$mksyscall" ]; then echo "$mksyscall -tags $GOOS,$GOARCH $syscall_goos $GOOSARCH_in |gofmt >zsyscall_$GOOSARCH.go"; fi - ;; + if [ -n "$mksyscall" ]; then + if [ "$GOOSARCH" == "aix_ppc64" ]; then + # aix/ppc64 script generates files instead of writing to stdin. + echo "$mksyscall -tags $GOOS,$GOARCH $syscall_goos $GOOSARCH_in && gofmt -w zsyscall_$GOOSARCH.go && gofmt -w zsyscall_"$GOOSARCH"_gccgo.go && gofmt -w zsyscall_"$GOOSARCH"_gc.go " ; + else + echo "$mksyscall -tags $GOOS,$GOARCH $syscall_goos $GOOSARCH_in |gofmt >zsyscall_$GOOSARCH.go"; + fi + fi esac if [ -n "$mksysctl" ]; then echo "$mksysctl |gofmt >$zsysctl"; fi if [ -n "$mksysnum" ]; then echo "$mksysnum |gofmt >zsysnum_$GOOSARCH.go"; fi diff --git a/vendor/golang.org/x/sys/unix/mkerrors.sh b/vendor/golang.org/x/sys/unix/mkerrors.sh index ec8e61322..955dd50f9 100755 --- a/vendor/golang.org/x/sys/unix/mkerrors.sh +++ b/vendor/golang.org/x/sys/unix/mkerrors.sh @@ -101,7 +101,7 @@ includes_DragonFly=' ' includes_FreeBSD=' -#include +#include #include #include #include @@ -187,15 +187,18 @@ struct ltchars { #include #include #include +#include #include #include #include #include #include #include +#include #include #include #include +#include #include #include #include @@ -445,8 +448,10 @@ ccflags="$@" $2 ~ /^(MS|MNT|UMOUNT)_/ || $2 ~ /^TUN(SET|GET|ATTACH|DETACH)/ || $2 ~ /^(O|F|E?FD|NAME|S|PTRACE|PT)_/ || + $2 ~ /^KEXEC_/ || $2 ~ /^LINUX_REBOOT_CMD_/ || $2 ~ /^LINUX_REBOOT_MAGIC[12]$/ || + $2 ~ /^MODULE_INIT_/ || $2 !~ "NLA_TYPE_MASK" && $2 ~ /^(NETLINK|NLM|NLMSG|NLA|IFA|IFAN|RT|RTC|RTCF|RTN|RTPROT|RTNH|ARPHRD|ETH_P|NETNSA)_/ || $2 ~ /^SIOC/ || @@ -496,6 +501,7 @@ ccflags="$@" $2 ~ /^(HDIO|WIN|SMART)_/ || $2 !~ "WMESGLEN" && $2 ~ /^W[A-Z0-9]+$/ || + $2 ~/^PPPIOC/ || $2 ~ /^BLK[A-Z]*(GET$|SET$|BUF$|PART$|SIZE)/ {printf("\t%s = C.%s\n", $2, $2)} $2 ~ /^__WCOREFLAG$/ {next} $2 ~ /^__W[A-Z0-9]+$/ {printf("\t%s = C.%s\n", substr($2,3), $2)} diff --git a/vendor/golang.org/x/sys/unix/mkpost.go b/vendor/golang.org/x/sys/unix/mkpost.go index 7e5c22c47..6d263cf15 100644 --- a/vendor/golang.org/x/sys/unix/mkpost.go +++ b/vendor/golang.org/x/sys/unix/mkpost.go @@ -46,6 +46,10 @@ func main() { valRegex := regexp.MustCompile(`type (Fsid|Sigset_t) struct {(\s+)X__val(\s+\S+\s+)}`) b = valRegex.ReplaceAll(b, []byte("type $1 struct {${2}Val$3}")) + // Intentionally export __fds_bits field in FdSet + fdSetRegex := regexp.MustCompile(`type (FdSet) struct {(\s+)X__fds_bits(\s+\S+\s+)}`) + b = fdSetRegex.ReplaceAll(b, []byte("type $1 struct {${2}Bits$3}")) + // If we have empty Ptrace structs, we should delete them. Only s390x emits // nonempty Ptrace structs. ptraceRexexp := regexp.MustCompile(`type Ptrace((Psw|Fpregs|Per) struct {\s*})`) @@ -65,6 +69,10 @@ func main() { convertUtsnameRegex := regexp.MustCompile(`((Sys|Node|Domain)name|Release|Version|Machine)(\s+)\[(\d+)\]u?int8`) b = convertUtsnameRegex.ReplaceAll(b, []byte("$1$3[$4]byte")) + // Convert [1024]int8 to [1024]byte in Ptmget members + convertPtmget := regexp.MustCompile(`([SC]n)(\s+)\[(\d+)\]u?int8`) + b = convertPtmget.ReplaceAll(b, []byte("$1[$3]byte")) + // Remove spare fields (e.g. in Statx_t) spareFieldsRegex := regexp.MustCompile(`X__spare\S*`) b = spareFieldsRegex.ReplaceAll(b, []byte("_")) diff --git a/vendor/golang.org/x/sys/unix/mksyscall.go b/vendor/golang.org/x/sys/unix/mksyscall.go new file mode 100644 index 000000000..36c0b5ac4 --- /dev/null +++ b/vendor/golang.org/x/sys/unix/mksyscall.go @@ -0,0 +1,367 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build ignore + +/* +This program reads a file containing function prototypes +(like syscall_darwin.go) and generates system call bodies. +The prototypes are marked by lines beginning with "//sys" +and read like func declarations if //sys is replaced by func, but: + * The parameter lists must give a name for each argument. + This includes return parameters. + * The parameter lists must give a type for each argument: + the (x, y, z int) shorthand is not allowed. + * If the return parameter is an error number, it must be named errno. + +A line beginning with //sysnb is like //sys, except that the +goroutine will not be suspended during the execution of the system +call. This must only be used for system calls which can never +block, as otherwise the system call could cause all goroutines to +hang. +*/ +package main + +import ( + "bufio" + "flag" + "fmt" + "os" + "regexp" + "strings" +) + +var ( + b32 = flag.Bool("b32", false, "32bit big-endian") + l32 = flag.Bool("l32", false, "32bit little-endian") + plan9 = flag.Bool("plan9", false, "plan9") + openbsd = flag.Bool("openbsd", false, "openbsd") + netbsd = flag.Bool("netbsd", false, "netbsd") + dragonfly = flag.Bool("dragonfly", false, "dragonfly") + arm = flag.Bool("arm", false, "arm") // 64-bit value should use (even, odd)-pair + tags = flag.String("tags", "", "build tags") + filename = flag.String("output", "", "output file name (standard output if omitted)") +) + +// cmdLine returns this programs's commandline arguments +func cmdLine() string { + return "go run mksyscall.go " + strings.Join(os.Args[1:], " ") +} + +// buildTags returns build tags +func buildTags() string { + return *tags +} + +// Param is function parameter +type Param struct { + Name string + Type string +} + +// usage prints the program usage +func usage() { + fmt.Fprintf(os.Stderr, "usage: go run mksyscall.go [-b32 | -l32] [-tags x,y] [file ...]\n") + os.Exit(1) +} + +// parseParamList parses parameter list and returns a slice of parameters +func parseParamList(list string) []string { + list = strings.TrimSpace(list) + if list == "" { + return []string{} + } + return regexp.MustCompile(`\s*,\s*`).Split(list, -1) +} + +// parseParam splits a parameter into name and type +func parseParam(p string) Param { + ps := regexp.MustCompile(`^(\S*) (\S*)$`).FindStringSubmatch(p) + if ps == nil { + fmt.Fprintf(os.Stderr, "malformed parameter: %s\n", p) + os.Exit(1) + } + return Param{ps[1], ps[2]} +} + +func main() { + // Get the OS and architecture (using GOARCH_TARGET if it exists) + goos := os.Getenv("GOOS") + goarch := os.Getenv("GOARCH_TARGET") + if goarch == "" { + goarch = os.Getenv("GOARCH") + } + + // Check that we are using the new build system if we should + if goos == "linux" && goarch != "sparc64" { + if os.Getenv("GOLANG_SYS_BUILD") != "docker" { + fmt.Fprintf(os.Stderr, "In the new build system, mksyscall should not be called directly.\n") + fmt.Fprintf(os.Stderr, "See README.md\n") + os.Exit(1) + } + } + + flag.Usage = usage + flag.Parse() + if len(flag.Args()) <= 0 { + fmt.Fprintf(os.Stderr, "no files to parse provided\n") + usage() + } + + endianness := "" + if *b32 { + endianness = "big-endian" + } else if *l32 { + endianness = "little-endian" + } + + text := "" + for _, path := range flag.Args() { + file, err := os.Open(path) + if err != nil { + fmt.Fprintf(os.Stderr, err.Error()) + os.Exit(1) + } + s := bufio.NewScanner(file) + for s.Scan() { + t := s.Text() + t = strings.TrimSpace(t) + t = regexp.MustCompile(`\s+`).ReplaceAllString(t, ` `) + nonblock := regexp.MustCompile(`^\/\/sysnb `).FindStringSubmatch(t) + if regexp.MustCompile(`^\/\/sys `).FindStringSubmatch(t) == nil && nonblock == nil { + continue + } + + // Line must be of the form + // func Open(path string, mode int, perm int) (fd int, errno error) + // Split into name, in params, out params. + f := regexp.MustCompile(`^\/\/sys(nb)? (\w+)\(([^()]*)\)\s*(?:\(([^()]+)\))?\s*(?:=\s*((?i)SYS_[A-Z0-9_]+))?$`).FindStringSubmatch(t) + if f == nil { + fmt.Fprintf(os.Stderr, "%s:%s\nmalformed //sys declaration\n", path, t) + os.Exit(1) + } + funct, inps, outps, sysname := f[2], f[3], f[4], f[5] + + // Split argument lists on comma. + in := parseParamList(inps) + out := parseParamList(outps) + + // Try in vain to keep people from editing this file. + // The theory is that they jump into the middle of the file + // without reading the header. + text += "// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT\n\n" + + // Go function header. + outDecl := "" + if len(out) > 0 { + outDecl = fmt.Sprintf(" (%s)", strings.Join(out, ", ")) + } + text += fmt.Sprintf("func %s(%s)%s {\n", funct, strings.Join(in, ", "), outDecl) + + // Check if err return available + errvar := "" + for _, param := range out { + p := parseParam(param) + if p.Type == "error" { + errvar = p.Name + break + } + } + + // Prepare arguments to Syscall. + var args []string + n := 0 + for _, param := range in { + p := parseParam(param) + if regexp.MustCompile(`^\*`).FindStringSubmatch(p.Type) != nil { + args = append(args, "uintptr(unsafe.Pointer("+p.Name+"))") + } else if p.Type == "string" && errvar != "" { + text += fmt.Sprintf("\tvar _p%d *byte\n", n) + text += fmt.Sprintf("\t_p%d, %s = BytePtrFromString(%s)\n", n, errvar, p.Name) + text += fmt.Sprintf("\tif %s != nil {\n\t\treturn\n\t}\n", errvar) + args = append(args, fmt.Sprintf("uintptr(unsafe.Pointer(_p%d))", n)) + n++ + } else if p.Type == "string" { + fmt.Fprintf(os.Stderr, path+":"+funct+" uses string arguments, but has no error return\n") + text += fmt.Sprintf("\tvar _p%d *byte\n", n) + text += fmt.Sprintf("\t_p%d, _ = BytePtrFromString(%s)\n", n, p.Name) + args = append(args, fmt.Sprintf("uintptr(unsafe.Pointer(_p%d))", n)) + n++ + } else if regexp.MustCompile(`^\[\](.*)`).FindStringSubmatch(p.Type) != nil { + // Convert slice into pointer, length. + // Have to be careful not to take address of &a[0] if len == 0: + // pass dummy pointer in that case. + // Used to pass nil, but some OSes or simulators reject write(fd, nil, 0). + text += fmt.Sprintf("\tvar _p%d unsafe.Pointer\n", n) + text += fmt.Sprintf("\tif len(%s) > 0 {\n\t\t_p%d = unsafe.Pointer(&%s[0])\n\t}", p.Name, n, p.Name) + text += fmt.Sprintf(" else {\n\t\t_p%d = unsafe.Pointer(&_zero)\n\t}\n", n) + args = append(args, fmt.Sprintf("uintptr(_p%d)", n), fmt.Sprintf("uintptr(len(%s))", p.Name)) + n++ + } else if p.Type == "int64" && (*openbsd || *netbsd) { + args = append(args, "0") + if endianness == "big-endian" { + args = append(args, fmt.Sprintf("uintptr(%s>>32)", p.Name), fmt.Sprintf("uintptr(%s)", p.Name)) + } else if endianness == "little-endian" { + args = append(args, fmt.Sprintf("uintptr(%s)", p.Name), fmt.Sprintf("uintptr(%s>>32)", p.Name)) + } else { + args = append(args, fmt.Sprintf("uintptr(%s)", p.Name)) + } + } else if p.Type == "int64" && *dragonfly { + if regexp.MustCompile(`^(?i)extp(read|write)`).FindStringSubmatch(funct) == nil { + args = append(args, "0") + } + if endianness == "big-endian" { + args = append(args, fmt.Sprintf("uintptr(%s>>32)", p.Name), fmt.Sprintf("uintptr(%s)", p.Name)) + } else if endianness == "little-endian" { + args = append(args, fmt.Sprintf("uintptr(%s)", p.Name), fmt.Sprintf("uintptr(%s>>32)", p.Name)) + } else { + args = append(args, fmt.Sprintf("uintptr(%s)", p.Name)) + } + } else if p.Type == "int64" && endianness != "" { + if len(args)%2 == 1 && *arm { + // arm abi specifies 64-bit argument uses + // (even, odd) pair + args = append(args, "0") + } + if endianness == "big-endian" { + args = append(args, fmt.Sprintf("uintptr(%s>>32)", p.Name), fmt.Sprintf("uintptr(%s)", p.Name)) + } else { + args = append(args, fmt.Sprintf("uintptr(%s)", p.Name), fmt.Sprintf("uintptr(%s>>32)", p.Name)) + } + } else { + args = append(args, fmt.Sprintf("uintptr(%s)", p.Name)) + } + } + + // Determine which form to use; pad args with zeros. + asm := "Syscall" + if nonblock != nil { + if errvar == "" && goos == "linux" { + asm = "RawSyscallNoError" + } else { + asm = "RawSyscall" + } + } else { + if errvar == "" && goos == "linux" { + asm = "SyscallNoError" + } + } + if len(args) <= 3 { + for len(args) < 3 { + args = append(args, "0") + } + } else if len(args) <= 6 { + asm += "6" + for len(args) < 6 { + args = append(args, "0") + } + } else if len(args) <= 9 { + asm += "9" + for len(args) < 9 { + args = append(args, "0") + } + } else { + fmt.Fprintf(os.Stderr, "%s:%s too many arguments to system call\n", path, funct) + } + + // System call number. + if sysname == "" { + sysname = "SYS_" + funct + sysname = regexp.MustCompile(`([a-z])([A-Z])`).ReplaceAllString(sysname, `${1}_$2`) + sysname = strings.ToUpper(sysname) + } + + // Actual call. + arglist := strings.Join(args, ", ") + call := fmt.Sprintf("%s(%s, %s)", asm, sysname, arglist) + + // Assign return values. + body := "" + ret := []string{"_", "_", "_"} + doErrno := false + for i := 0; i < len(out); i++ { + p := parseParam(out[i]) + reg := "" + if p.Name == "err" && !*plan9 { + reg = "e1" + ret[2] = reg + doErrno = true + } else if p.Name == "err" && *plan9 { + ret[0] = "r0" + ret[2] = "e1" + break + } else { + reg = fmt.Sprintf("r%d", i) + ret[i] = reg + } + if p.Type == "bool" { + reg = fmt.Sprintf("%s != 0", reg) + } + if p.Type == "int64" && endianness != "" { + // 64-bit number in r1:r0 or r0:r1. + if i+2 > len(out) { + fmt.Fprintf(os.Stderr, "%s:%s not enough registers for int64 return\n", path, funct) + } + if endianness == "big-endian" { + reg = fmt.Sprintf("int64(r%d)<<32 | int64(r%d)", i, i+1) + } else { + reg = fmt.Sprintf("int64(r%d)<<32 | int64(r%d)", i+1, i) + } + ret[i] = fmt.Sprintf("r%d", i) + ret[i+1] = fmt.Sprintf("r%d", i+1) + } + if reg != "e1" || *plan9 { + body += fmt.Sprintf("\t%s = %s(%s)\n", p.Name, p.Type, reg) + } + } + if ret[0] == "_" && ret[1] == "_" && ret[2] == "_" { + text += fmt.Sprintf("\t%s\n", call) + } else { + if errvar == "" && goos == "linux" { + // raw syscall without error on Linux, see golang.org/issue/22924 + text += fmt.Sprintf("\t%s, %s := %s\n", ret[0], ret[1], call) + } else { + text += fmt.Sprintf("\t%s, %s, %s := %s\n", ret[0], ret[1], ret[2], call) + } + } + text += body + + if *plan9 && ret[2] == "e1" { + text += "\tif int32(r0) == -1 {\n" + text += "\t\terr = e1\n" + text += "\t}\n" + } else if doErrno { + text += "\tif e1 != 0 {\n" + text += "\t\terr = errnoErr(e1)\n" + text += "\t}\n" + } + text += "\treturn\n" + text += "}\n\n" + + } + if err := s.Err(); err != nil { + fmt.Fprintf(os.Stderr, err.Error()) + os.Exit(1) + } + file.Close() + } + fmt.Printf(srcTemplate, cmdLine(), buildTags(), text) +} + +const srcTemplate = `// %s +// Code generated by the command above; see README.md. DO NOT EDIT. + +// +build %s + +package unix + +import ( + "syscall" + "unsafe" +) + +var _ syscall.Errno + +%s +` diff --git a/vendor/golang.org/x/sys/unix/mksyscall.pl b/vendor/golang.org/x/sys/unix/mksyscall.pl deleted file mode 100755 index 1f6b926f8..000000000 --- a/vendor/golang.org/x/sys/unix/mksyscall.pl +++ /dev/null @@ -1,341 +0,0 @@ -#!/usr/bin/env perl -# Copyright 2009 The Go Authors. All rights reserved. -# Use of this source code is governed by a BSD-style -# license that can be found in the LICENSE file. - -# This program reads a file containing function prototypes -# (like syscall_darwin.go) and generates system call bodies. -# The prototypes are marked by lines beginning with "//sys" -# and read like func declarations if //sys is replaced by func, but: -# * The parameter lists must give a name for each argument. -# This includes return parameters. -# * The parameter lists must give a type for each argument: -# the (x, y, z int) shorthand is not allowed. -# * If the return parameter is an error number, it must be named errno. - -# A line beginning with //sysnb is like //sys, except that the -# goroutine will not be suspended during the execution of the system -# call. This must only be used for system calls which can never -# block, as otherwise the system call could cause all goroutines to -# hang. - -use strict; - -my $cmdline = "mksyscall.pl " . join(' ', @ARGV); -my $errors = 0; -my $_32bit = ""; -my $plan9 = 0; -my $openbsd = 0; -my $netbsd = 0; -my $dragonfly = 0; -my $arm = 0; # 64-bit value should use (even, odd)-pair -my $tags = ""; # build tags - -if($ARGV[0] eq "-b32") { - $_32bit = "big-endian"; - shift; -} elsif($ARGV[0] eq "-l32") { - $_32bit = "little-endian"; - shift; -} -if($ARGV[0] eq "-plan9") { - $plan9 = 1; - shift; -} -if($ARGV[0] eq "-openbsd") { - $openbsd = 1; - shift; -} -if($ARGV[0] eq "-netbsd") { - $netbsd = 1; - shift; -} -if($ARGV[0] eq "-dragonfly") { - $dragonfly = 1; - shift; -} -if($ARGV[0] eq "-arm") { - $arm = 1; - shift; -} -if($ARGV[0] eq "-tags") { - shift; - $tags = $ARGV[0]; - shift; -} - -if($ARGV[0] =~ /^-/) { - print STDERR "usage: mksyscall.pl [-b32 | -l32] [-tags x,y] [file ...]\n"; - exit 1; -} - -# Check that we are using the new build system if we should -if($ENV{'GOOS'} eq "linux" && $ENV{'GOARCH'} ne "sparc64") { - if($ENV{'GOLANG_SYS_BUILD'} ne "docker") { - print STDERR "In the new build system, mksyscall should not be called directly.\n"; - print STDERR "See README.md\n"; - exit 1; - } -} - - -sub parseparamlist($) { - my ($list) = @_; - $list =~ s/^\s*//; - $list =~ s/\s*$//; - if($list eq "") { - return (); - } - return split(/\s*,\s*/, $list); -} - -sub parseparam($) { - my ($p) = @_; - if($p !~ /^(\S*) (\S*)$/) { - print STDERR "$ARGV:$.: malformed parameter: $p\n"; - $errors = 1; - return ("xx", "int"); - } - return ($1, $2); -} - -my $text = ""; -while(<>) { - chomp; - s/\s+/ /g; - s/^\s+//; - s/\s+$//; - my $nonblock = /^\/\/sysnb /; - next if !/^\/\/sys / && !$nonblock; - - # Line must be of the form - # func Open(path string, mode int, perm int) (fd int, errno error) - # Split into name, in params, out params. - if(!/^\/\/sys(nb)? (\w+)\(([^()]*)\)\s*(?:\(([^()]+)\))?\s*(?:=\s*((?i)SYS_[A-Z0-9_]+))?$/) { - print STDERR "$ARGV:$.: malformed //sys declaration\n"; - $errors = 1; - next; - } - my ($func, $in, $out, $sysname) = ($2, $3, $4, $5); - - # Split argument lists on comma. - my @in = parseparamlist($in); - my @out = parseparamlist($out); - - # Try in vain to keep people from editing this file. - # The theory is that they jump into the middle of the file - # without reading the header. - $text .= "// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT\n\n"; - - # Go function header. - my $out_decl = @out ? sprintf(" (%s)", join(', ', @out)) : ""; - $text .= sprintf "func %s(%s)%s {\n", $func, join(', ', @in), $out_decl; - - # Check if err return available - my $errvar = ""; - foreach my $p (@out) { - my ($name, $type) = parseparam($p); - if($type eq "error") { - $errvar = $name; - last; - } - } - - # Prepare arguments to Syscall. - my @args = (); - my $n = 0; - foreach my $p (@in) { - my ($name, $type) = parseparam($p); - if($type =~ /^\*/) { - push @args, "uintptr(unsafe.Pointer($name))"; - } elsif($type eq "string" && $errvar ne "") { - $text .= "\tvar _p$n *byte\n"; - $text .= "\t_p$n, $errvar = BytePtrFromString($name)\n"; - $text .= "\tif $errvar != nil {\n\t\treturn\n\t}\n"; - push @args, "uintptr(unsafe.Pointer(_p$n))"; - $n++; - } elsif($type eq "string") { - print STDERR "$ARGV:$.: $func uses string arguments, but has no error return\n"; - $text .= "\tvar _p$n *byte\n"; - $text .= "\t_p$n, _ = BytePtrFromString($name)\n"; - push @args, "uintptr(unsafe.Pointer(_p$n))"; - $n++; - } elsif($type =~ /^\[\](.*)/) { - # Convert slice into pointer, length. - # Have to be careful not to take address of &a[0] if len == 0: - # pass dummy pointer in that case. - # Used to pass nil, but some OSes or simulators reject write(fd, nil, 0). - $text .= "\tvar _p$n unsafe.Pointer\n"; - $text .= "\tif len($name) > 0 {\n\t\t_p$n = unsafe.Pointer(\&${name}[0])\n\t}"; - $text .= " else {\n\t\t_p$n = unsafe.Pointer(&_zero)\n\t}"; - $text .= "\n"; - push @args, "uintptr(_p$n)", "uintptr(len($name))"; - $n++; - } elsif($type eq "int64" && ($openbsd || $netbsd)) { - push @args, "0"; - if($_32bit eq "big-endian") { - push @args, "uintptr($name>>32)", "uintptr($name)"; - } elsif($_32bit eq "little-endian") { - push @args, "uintptr($name)", "uintptr($name>>32)"; - } else { - push @args, "uintptr($name)"; - } - } elsif($type eq "int64" && $dragonfly) { - if ($func !~ /^extp(read|write)/i) { - push @args, "0"; - } - if($_32bit eq "big-endian") { - push @args, "uintptr($name>>32)", "uintptr($name)"; - } elsif($_32bit eq "little-endian") { - push @args, "uintptr($name)", "uintptr($name>>32)"; - } else { - push @args, "uintptr($name)"; - } - } elsif($type eq "int64" && $_32bit ne "") { - if(@args % 2 && $arm) { - # arm abi specifies 64-bit argument uses - # (even, odd) pair - push @args, "0" - } - if($_32bit eq "big-endian") { - push @args, "uintptr($name>>32)", "uintptr($name)"; - } else { - push @args, "uintptr($name)", "uintptr($name>>32)"; - } - } else { - push @args, "uintptr($name)"; - } - } - - # Determine which form to use; pad args with zeros. - my $asm = "Syscall"; - if ($nonblock) { - if ($errvar eq "" && $ENV{'GOOS'} eq "linux") { - $asm = "RawSyscallNoError"; - } else { - $asm = "RawSyscall"; - } - } else { - if ($errvar eq "" && $ENV{'GOOS'} eq "linux") { - $asm = "SyscallNoError"; - } - } - if(@args <= 3) { - while(@args < 3) { - push @args, "0"; - } - } elsif(@args <= 6) { - $asm .= "6"; - while(@args < 6) { - push @args, "0"; - } - } elsif(@args <= 9) { - $asm .= "9"; - while(@args < 9) { - push @args, "0"; - } - } else { - print STDERR "$ARGV:$.: too many arguments to system call\n"; - } - - # System call number. - if($sysname eq "") { - $sysname = "SYS_$func"; - $sysname =~ s/([a-z])([A-Z])/${1}_$2/g; # turn FooBar into Foo_Bar - $sysname =~ y/a-z/A-Z/; - } - - # Actual call. - my $args = join(', ', @args); - my $call = "$asm($sysname, $args)"; - - # Assign return values. - my $body = ""; - my @ret = ("_", "_", "_"); - my $do_errno = 0; - for(my $i=0; $i<@out; $i++) { - my $p = $out[$i]; - my ($name, $type) = parseparam($p); - my $reg = ""; - if($name eq "err" && !$plan9) { - $reg = "e1"; - $ret[2] = $reg; - $do_errno = 1; - } elsif($name eq "err" && $plan9) { - $ret[0] = "r0"; - $ret[2] = "e1"; - next; - } else { - $reg = sprintf("r%d", $i); - $ret[$i] = $reg; - } - if($type eq "bool") { - $reg = "$reg != 0"; - } - if($type eq "int64" && $_32bit ne "") { - # 64-bit number in r1:r0 or r0:r1. - if($i+2 > @out) { - print STDERR "$ARGV:$.: not enough registers for int64 return\n"; - } - if($_32bit eq "big-endian") { - $reg = sprintf("int64(r%d)<<32 | int64(r%d)", $i, $i+1); - } else { - $reg = sprintf("int64(r%d)<<32 | int64(r%d)", $i+1, $i); - } - $ret[$i] = sprintf("r%d", $i); - $ret[$i+1] = sprintf("r%d", $i+1); - } - if($reg ne "e1" || $plan9) { - $body .= "\t$name = $type($reg)\n"; - } - } - if ($ret[0] eq "_" && $ret[1] eq "_" && $ret[2] eq "_") { - $text .= "\t$call\n"; - } else { - if ($errvar eq "" && $ENV{'GOOS'} eq "linux") { - # raw syscall without error on Linux, see golang.org/issue/22924 - $text .= "\t$ret[0], $ret[1] := $call\n"; - } else { - $text .= "\t$ret[0], $ret[1], $ret[2] := $call\n"; - } - } - $text .= $body; - - if ($plan9 && $ret[2] eq "e1") { - $text .= "\tif int32(r0) == -1 {\n"; - $text .= "\t\terr = e1\n"; - $text .= "\t}\n"; - } elsif ($do_errno) { - $text .= "\tif e1 != 0 {\n"; - $text .= "\t\terr = errnoErr(e1)\n"; - $text .= "\t}\n"; - } - $text .= "\treturn\n"; - $text .= "}\n\n"; -} - -chomp $text; -chomp $text; - -if($errors) { - exit 1; -} - -print <) { chomp; @@ -369,7 +369,6 @@ $c_extern import "C" import ( "unsafe" - "syscall" ) diff --git a/vendor/golang.org/x/sys/unix/mksyscall_aix_ppc64.pl b/vendor/golang.org/x/sys/unix/mksyscall_aix_ppc64.pl new file mode 100755 index 000000000..53df26bb9 --- /dev/null +++ b/vendor/golang.org/x/sys/unix/mksyscall_aix_ppc64.pl @@ -0,0 +1,579 @@ +#!/usr/bin/env perl +# Copyright 2018 The Go Authors. All rights reserved. +# Use of this source code is governed by a BSD-style +# license that can be found in the LICENSE file. + +# This program reads a file containing function prototypes +# (like syscall_aix.go) and generates system call bodies. +# The prototypes are marked by lines beginning with "//sys" +# and read like func declarations if //sys is replaced by func, but: +# * The parameter lists must give a name for each argument. +# This includes return parameters. +# * The parameter lists must give a type for each argument: +# the (x, y, z int) shorthand is not allowed. +# * If the return parameter is an error number, it must be named err. +# * If go func name needs to be different than its libc name, +# * or the function is not in libc, name could be specified +# * at the end, after "=" sign, like +# //sys getsockopt(s int, level int, name int, val uintptr, vallen *_Socklen) (err error) = libsocket.getsockopt + +# This program will generate three files and handle both gc and gccgo implementation: +# - zsyscall_aix_ppc64.go: the common part of each implementation (error handler, pointer creation) +# - zsyscall_aix_ppc64_gc.go: gc part with //go_cgo_import_dynamic and a call to syscall6 +# - zsyscall_aix_ppc64_gccgo.go: gccgo part with C function and conversion to C type. + +# The generated code looks like this +# +# zsyscall_aix_ppc64.go +# func asyscall(...) (n int, err error) { +# // Pointer Creation +# r1, e1 := callasyscall(...) +# // Type Conversion +# // Error Handler +# return +# } +# +# zsyscall_aix_ppc64_gc.go +# //go:cgo_import_dynamic libc_asyscall asyscall "libc.a/shr_64.o" +# //go:linkname libc_asyscall libc_asyscall +# var asyscall syscallFunc +# +# func callasyscall(...) (r1 uintptr, e1 Errno) { +# r1, _, e1 = syscall6(uintptr(unsafe.Pointer(&libc_asyscall)), "nb_args", ... ) +# return +# } +# +# zsyscall_aix_ppc64_ggcgo.go +# /* +# int asyscall(...) +# +# */ +# import "C" +# +# func callasyscall(...) (r1 uintptr, e1 Errno) { +# r1 = uintptr(C.asyscall(...)) +# e1 = syscall.GetErrno() +# return +# } + + + +use strict; + +my $cmdline = "mksyscall_aix_ppc64.pl " . join(' ', @ARGV); +my $errors = 0; +my $_32bit = ""; +my $tags = ""; # build tags +my $aix = 0; +my $solaris = 0; + +binmode STDOUT; + +if($ARGV[0] eq "-b32") { + $_32bit = "big-endian"; + shift; +} elsif($ARGV[0] eq "-l32") { + $_32bit = "little-endian"; + shift; +} +if($ARGV[0] eq "-aix") { + $aix = 1; + shift; +} +if($ARGV[0] eq "-tags") { + shift; + $tags = $ARGV[0]; + shift; +} + +if($ARGV[0] =~ /^-/) { + print STDERR "usage: mksyscall_aix.pl [-b32 | -l32] [-tags x,y] [file ...]\n"; + exit 1; +} + +sub parseparamlist($) { + my ($list) = @_; + $list =~ s/^\s*//; + $list =~ s/\s*$//; + if($list eq "") { + return (); + } + return split(/\s*,\s*/, $list); +} + +sub parseparam($) { + my ($p) = @_; + if($p !~ /^(\S*) (\S*)$/) { + print STDERR "$ARGV:$.: malformed parameter: $p\n"; + $errors = 1; + return ("xx", "int"); + } + return ($1, $2); +} + +my $package = ""; +# GCCGO +my $textgccgo = ""; +my $c_extern = "/*\n#include \n"; +# GC +my $textgc = ""; +my $dynimports = ""; +my $linknames = ""; +my @vars = (); +# COMMUN +my $textcommon = ""; + +while(<>) { + chomp; + s/\s+/ /g; + s/^\s+//; + s/\s+$//; + $package = $1 if !$package && /^package (\S+)$/; + my $nonblock = /^\/\/sysnb /; + next if !/^\/\/sys / && !$nonblock; + + # Line must be of the form + # func Open(path string, mode int, perm int) (fd int, err error) + # Split into name, in params, out params. + if(!/^\/\/sys(nb)? (\w+)\(([^()]*)\)\s*(?:\(([^()]+)\))?\s*(?:=\s*(?:(\w*)\.)?(\w*))?$/) { + print STDERR "$ARGV:$.: malformed //sys declaration\n"; + $errors = 1; + next; + } + my ($nb, $func, $in, $out, $modname, $sysname) = ($1, $2, $3, $4, $5, $6); + + # Split argument lists on comma. + my @in = parseparamlist($in); + my @out = parseparamlist($out); + + $in = join(', ', @in); + $out = join(', ', @out); + + if($sysname eq "") { + $sysname = "$func"; + } + + my $onlyCommon = 0; + if ($func eq "readlen" || $func eq "writelen" || $func eq "FcntlInt" || $func eq "FcntlFlock") { + # This function call another syscall which is already implemented. + # Therefore, the gc and gccgo part must not be generated. + $onlyCommon = 1 + } + + # Try in vain to keep people from editing this file. + # The theory is that they jump into the middle of the file + # without reading the header. + + $textcommon .= "// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT\n\n"; + if (!$onlyCommon) { + $textgccgo .= "// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT\n\n"; + $textgc .= "// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT\n\n"; + } + + + # Check if value return, err return available + my $errvar = ""; + my $retvar = ""; + my $rettype = ""; + foreach my $p (@out) { + my ($name, $type) = parseparam($p); + if($type eq "error") { + $errvar = $name; + } else { + $retvar = $name; + $rettype = $type; + } + } + + + $sysname =~ s/([a-z])([A-Z])/${1}_$2/g; + $sysname =~ y/A-Z/a-z/; # All libc functions are lowercase. + + # GCCGO Prototype return type + my $C_rettype = ""; + if($rettype eq "unsafe.Pointer") { + $C_rettype = "uintptr_t"; + } elsif($rettype eq "uintptr") { + $C_rettype = "uintptr_t"; + } elsif($rettype =~ /^_/) { + $C_rettype = "uintptr_t"; + } elsif($rettype eq "int") { + $C_rettype = "int"; + } elsif($rettype eq "int32") { + $C_rettype = "int"; + } elsif($rettype eq "int64") { + $C_rettype = "long long"; + } elsif($rettype eq "uint32") { + $C_rettype = "unsigned int"; + } elsif($rettype eq "uint64") { + $C_rettype = "unsigned long long"; + } else { + $C_rettype = "int"; + } + if($sysname eq "exit") { + $C_rettype = "void"; + } + + # GCCGO Prototype arguments type + my @c_in = (); + foreach my $i (0 .. $#in) { + my ($name, $type) = parseparam($in[$i]); + if($type =~ /^\*/) { + push @c_in, "uintptr_t"; + } elsif($type eq "string") { + push @c_in, "uintptr_t"; + } elsif($type =~ /^\[\](.*)/) { + push @c_in, "uintptr_t", "size_t"; + } elsif($type eq "unsafe.Pointer") { + push @c_in, "uintptr_t"; + } elsif($type eq "uintptr") { + push @c_in, "uintptr_t"; + } elsif($type =~ /^_/) { + push @c_in, "uintptr_t"; + } elsif($type eq "int") { + if (($i == 0 || $i == 2) && $func eq "fcntl"){ + # These fcntl arguments needs to be uintptr to be able to call FcntlInt and FcntlFlock + push @c_in, "uintptr_t"; + } else { + push @c_in, "int"; + } + } elsif($type eq "int32") { + push @c_in, "int"; + } elsif($type eq "int64") { + push @c_in, "long long"; + } elsif($type eq "uint32") { + push @c_in, "unsigned int"; + } elsif($type eq "uint64") { + push @c_in, "unsigned long long"; + } else { + push @c_in, "int"; + } + } + + if (!$onlyCommon){ + # GCCGO Prototype Generation + # Imports of system calls from libc + $c_extern .= "$C_rettype $sysname"; + my $c_in = join(', ', @c_in); + $c_extern .= "($c_in);\n"; + } + + # GC Library name + if($modname eq "") { + $modname = "libc.a/shr_64.o"; + } else { + print STDERR "$func: only syscall using libc are available\n"; + $errors = 1; + next; + } + my $sysvarname = "libc_${sysname}"; + + if (!$onlyCommon){ + # GC Runtime import of function to allow cross-platform builds. + $dynimports .= "//go:cgo_import_dynamic ${sysvarname} ${sysname} \"$modname\"\n"; + # GC Link symbol to proc address variable. + $linknames .= "//go:linkname ${sysvarname} ${sysvarname}\n"; + # GC Library proc address variable. + push @vars, $sysvarname; + } + + my $strconvfunc ="BytePtrFromString"; + my $strconvtype = "*byte"; + + # Go function header. + if($out ne "") { + $out = " ($out)"; + } + if($textcommon ne "") { + $textcommon .= "\n" + } + + $textcommon .= sprintf "func %s(%s)%s {\n", $func, join(', ', @in), $out ; + + # Prepare arguments to call. + my @argscommun = (); # Arguments in the commun part + my @argscall = (); # Arguments for call prototype + my @argsgc = (); # Arguments for gc call (with syscall6) + my @argsgccgo = (); # Arguments for gccgo call (with C.name_of_syscall) + my $n = 0; + my $arg_n = 0; + foreach my $p (@in) { + my ($name, $type) = parseparam($p); + if($type =~ /^\*/) { + push @argscommun, "uintptr(unsafe.Pointer($name))"; + push @argscall, "$name uintptr"; + push @argsgc, "$name"; + push @argsgccgo, "C.uintptr_t($name)"; + } elsif($type eq "string" && $errvar ne "") { + $textcommon .= "\tvar _p$n $strconvtype\n"; + $textcommon .= "\t_p$n, $errvar = $strconvfunc($name)\n"; + $textcommon .= "\tif $errvar != nil {\n\t\treturn\n\t}\n"; + + push @argscommun, "uintptr(unsafe.Pointer(_p$n))"; + push @argscall, "_p$n uintptr "; + push @argsgc, "_p$n"; + push @argsgccgo, "C.uintptr_t(_p$n)"; + $n++; + } elsif($type eq "string") { + print STDERR "$ARGV:$.: $func uses string arguments, but has no error return\n"; + $textcommon .= "\tvar _p$n $strconvtype\n"; + $textcommon .= "\t_p$n, $errvar = $strconvfunc($name)\n"; + $textcommon .= "\tif $errvar != nil {\n\t\treturn\n\t}\n"; + + push @argscommun, "uintptr(unsafe.Pointer(_p$n))"; + push @argscall, "_p$n uintptr"; + push @argsgc, "_p$n"; + push @argsgccgo, "C.uintptr_t(_p$n)"; + $n++; + } elsif($type =~ /^\[\](.*)/) { + # Convert slice into pointer, length. + # Have to be careful not to take address of &a[0] if len == 0: + # pass nil in that case. + $textcommon .= "\tvar _p$n *$1\n"; + $textcommon .= "\tif len($name) > 0 {\n\t\t_p$n = \&$name\[0]\n\t}\n"; + push @argscommun, "uintptr(unsafe.Pointer(_p$n))", "len($name)"; + push @argscall, "_p$n uintptr", "_lenp$n int"; + push @argsgc, "_p$n", "uintptr(_lenp$n)"; + push @argsgccgo, "C.uintptr_t(_p$n)", "C.size_t(_lenp$n)"; + $n++; + } elsif($type eq "int64" && $_32bit ne "") { + print STDERR "$ARGV:$.: $func uses int64 with 32 bits mode. Case not yet implemented\n"; + # if($_32bit eq "big-endian") { + # push @args, "uintptr($name >> 32)", "uintptr($name)"; + # } else { + # push @args, "uintptr($name)", "uintptr($name >> 32)"; + # } + # $n++; + } elsif($type eq "bool") { + print STDERR "$ARGV:$.: $func uses bool. Case not yet implemented\n"; + # $text .= "\tvar _p$n uint32\n"; + # $text .= "\tif $name {\n\t\t_p$n = 1\n\t} else {\n\t\t_p$n = 0\n\t}\n"; + # push @args, "_p$n"; + # $n++; + } elsif($type =~ /^_/ ||$type eq "unsafe.Pointer") { + push @argscommun, "uintptr($name)"; + push @argscall, "$name uintptr"; + push @argsgc, "$name"; + push @argsgccgo, "C.uintptr_t($name)"; + } elsif($type eq "int") { + if (($arg_n == 0 || $arg_n == 2) && ($func eq "fcntl" || $func eq "FcntlInt" || $func eq "FcntlFlock")) { + # These fcntl arguments need to be uintptr to be able to call FcntlInt and FcntlFlock + push @argscommun, "uintptr($name)"; + push @argscall, "$name uintptr"; + push @argsgc, "$name"; + push @argsgccgo, "C.uintptr_t($name)"; + } else { + push @argscommun, "$name"; + push @argscall, "$name int"; + push @argsgc, "uintptr($name)"; + push @argsgccgo, "C.int($name)"; + } + } elsif($type eq "int32") { + push @argscommun, "$name"; + push @argscall, "$name int32"; + push @argsgc, "uintptr($name)"; + push @argsgccgo, "C.int($name)"; + } elsif($type eq "int64") { + push @argscommun, "$name"; + push @argscall, "$name int64"; + push @argsgc, "uintptr($name)"; + push @argsgccgo, "C.longlong($name)"; + } elsif($type eq "uint32") { + push @argscommun, "$name"; + push @argscall, "$name uint32"; + push @argsgc, "uintptr($name)"; + push @argsgccgo, "C.uint($name)"; + } elsif($type eq "uint64") { + push @argscommun, "$name"; + push @argscall, "$name uint64"; + push @argsgc, "uintptr($name)"; + push @argsgccgo, "C.ulonglong($name)"; + } elsif($type eq "uintptr") { + push @argscommun, "$name"; + push @argscall, "$name uintptr"; + push @argsgc, "$name"; + push @argsgccgo, "C.uintptr_t($name)"; + } else { + push @argscommun, "int($name)"; + push @argscall, "$name int"; + push @argsgc, "uintptr($name)"; + push @argsgccgo, "C.int($name)"; + } + $arg_n++; + } + my $nargs = @argsgc; + + # COMMUN function generation + my $argscommun = join(', ', @argscommun); + my $callcommun = "call$sysname($argscommun)"; + my @ret = ("_", "_"); + my $body = ""; + my $do_errno = 0; + for(my $i=0; $i<@out; $i++) { + my $p = $out[$i]; + my ($name, $type) = parseparam($p); + my $reg = ""; + if($name eq "err") { + $reg = "e1"; + $ret[1] = $reg; + $do_errno = 1; + } else { + $reg = "r0"; + $ret[0] = $reg; + } + if($type eq "bool") { + $reg = "$reg != 0"; + } + if($reg ne "e1") { + $body .= "\t$name = $type($reg)\n"; + } + } + if ($ret[0] eq "_" && $ret[1] eq "_") { + $textcommon .= "\t$callcommun\n"; + } else { + $textcommon .= "\t$ret[0], $ret[1] := $callcommun\n"; + } + $textcommon .= $body; + + if ($do_errno) { + $textcommon .= "\tif e1 != 0 {\n"; + $textcommon .= "\t\terr = errnoErr(e1)\n"; + $textcommon .= "\t}\n"; + } + $textcommon .= "\treturn\n"; + $textcommon .= "}\n"; + + if ($onlyCommon){ + next + } + # CALL Prototype + my $callProto = sprintf "func call%s(%s) (r1 uintptr, e1 Errno) {\n", $sysname, join(', ', @argscall); + + # GC function generation + my $asm = "syscall6"; + if ($nonblock) { + $asm = "rawSyscall6"; + } + + if(@argsgc <= 6) { + while(@argsgc < 6) { + push @argsgc, "0"; + } + } else { + print STDERR "$ARGV:$.: too many arguments to system call\n"; + } + my $argsgc = join(', ', @argsgc); + my $callgc = "$asm(uintptr(unsafe.Pointer(&$sysvarname)), $nargs, $argsgc)"; + + $textgc .= $callProto; + $textgc .= "\tr1, _, e1 = $callgc\n"; + $textgc .= "\treturn\n}\n"; + + # GCCGO function generation + my $argsgccgo = join(', ', @argsgccgo); + my $callgccgo = "C.$sysname($argsgccgo)"; + $textgccgo .= $callProto; + $textgccgo .= "\tr1 = uintptr($callgccgo)\n"; + $textgccgo .= "\te1 = syscall.GetErrno()\n"; + $textgccgo .= "\treturn\n}\n"; +} + +if($errors) { + exit 1; +} + +# Print zsyscall_aix_ppc64.go +open(my $fcommun, '>', 'zsyscall_aix_ppc64.go'); +my $tofcommun = <', 'zsyscall_aix_ppc64_gc.go'); +my $tofgc = <', 'zsyscall_aix_ppc64_gccgo.go'); +my $tofgccgo = <) { my @in = parseparamlist($in); my @out = parseparamlist($out); + # Try in vain to keep people from editing this file. + # The theory is that they jump into the middle of the file + # without reading the header. + $text .= "// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT\n\n"; + # So file name. if($modname eq "") { $modname = "libc"; diff --git a/vendor/golang.org/x/sys/unix/mksysctl_openbsd.pl b/vendor/golang.org/x/sys/unix/mksysctl_openbsd.pl index 49f186f83..20632e146 100755 --- a/vendor/golang.org/x/sys/unix/mksysctl_openbsd.pl +++ b/vendor/golang.org/x/sys/unix/mksysctl_openbsd.pl @@ -32,6 +32,7 @@ my @headers = qw ( sys/sem.h sys/shm.h sys/vmmeter.h + uvm/uvmexp.h uvm/uvm_param.h uvm/uvm_swap_encrypt.h ddb/db_var.h diff --git a/vendor/golang.org/x/sys/unix/openbsd_pledge.go b/vendor/golang.org/x/sys/unix/openbsd_pledge.go index 11388e5d4..230a36d24 100644 --- a/vendor/golang.org/x/sys/unix/openbsd_pledge.go +++ b/vendor/golang.org/x/sys/unix/openbsd_pledge.go @@ -15,10 +15,6 @@ import ( "unsafe" ) -const ( - _SYS_PLEDGE = 108 -) - // Pledge implements the pledge syscall. // // The pledge syscall does not accept execpromises on OpenBSD releases @@ -34,15 +30,9 @@ func Pledge(promises, execpromises string) error { return err } - // If OpenBSD <= 5.9, pledge is not available. - if (maj == 5 && min != 9) || maj < 5 { - return fmt.Errorf("pledge syscall is not available on OpenBSD %d.%d", maj, min) - } - - // If OpenBSD <= 6.2 and execpromises is not empty - // return an error - execpromises is not available before 6.3 - if (maj < 6 || (maj == 6 && min <= 2)) && execpromises != "" { - return fmt.Errorf("cannot use execpromises on OpenBSD %d.%d", maj, min) + err = pledgeAvailable(maj, min, execpromises) + if err != nil { + return err } pptr, err := syscall.BytePtrFromString(promises) @@ -63,7 +53,71 @@ func Pledge(promises, execpromises string) error { expr = unsafe.Pointer(exptr) } - _, _, e := syscall.Syscall(_SYS_PLEDGE, uintptr(unsafe.Pointer(pptr)), uintptr(expr), 0) + _, _, e := syscall.Syscall(SYS_PLEDGE, uintptr(unsafe.Pointer(pptr)), uintptr(expr), 0) + if e != 0 { + return e + } + + return nil +} + +// PledgePromises implements the pledge syscall. +// +// This changes the promises and leaves the execpromises untouched. +// +// For more information see pledge(2). +func PledgePromises(promises string) error { + maj, min, err := majmin() + if err != nil { + return err + } + + err = pledgeAvailable(maj, min, "") + if err != nil { + return err + } + + // This variable holds the execpromises and is always nil. + var expr unsafe.Pointer + + pptr, err := syscall.BytePtrFromString(promises) + if err != nil { + return err + } + + _, _, e := syscall.Syscall(SYS_PLEDGE, uintptr(unsafe.Pointer(pptr)), uintptr(expr), 0) + if e != 0 { + return e + } + + return nil +} + +// PledgeExecpromises implements the pledge syscall. +// +// This changes the execpromises and leaves the promises untouched. +// +// For more information see pledge(2). +func PledgeExecpromises(execpromises string) error { + maj, min, err := majmin() + if err != nil { + return err + } + + err = pledgeAvailable(maj, min, execpromises) + if err != nil { + return err + } + + // This variable holds the promises and is always nil. + var pptr unsafe.Pointer + + exptr, err := syscall.BytePtrFromString(execpromises) + if err != nil { + return err + } + + _, _, e := syscall.Syscall(SYS_PLEDGE, uintptr(pptr), uintptr(unsafe.Pointer(exptr)), 0) if e != 0 { return e } @@ -93,3 +147,20 @@ func majmin() (major int, minor int, err error) { return } + +// pledgeAvailable checks for availability of the pledge(2) syscall +// based on the running OpenBSD version. +func pledgeAvailable(maj, min int, execpromises string) error { + // If OpenBSD <= 5.9, pledge is not available. + if (maj == 5 && min != 9) || maj < 5 { + return fmt.Errorf("pledge syscall is not available on OpenBSD %d.%d", maj, min) + } + + // If OpenBSD <= 6.2 and execpromises is not empty, + // return an error - execpromises is not available before 6.3 + if (maj < 6 || (maj == 6 && min <= 2)) && execpromises != "" { + return fmt.Errorf("cannot use execpromises on OpenBSD %d.%d", maj, min) + } + + return nil +} diff --git a/vendor/golang.org/x/sys/unix/openbsd_unveil.go b/vendor/golang.org/x/sys/unix/openbsd_unveil.go new file mode 100644 index 000000000..aebc2dc57 --- /dev/null +++ b/vendor/golang.org/x/sys/unix/openbsd_unveil.go @@ -0,0 +1,44 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build openbsd + +package unix + +import ( + "syscall" + "unsafe" +) + +// Unveil implements the unveil syscall. +// For more information see unveil(2). +// Note that the special case of blocking further +// unveil calls is handled by UnveilBlock. +func Unveil(path string, flags string) error { + pathPtr, err := syscall.BytePtrFromString(path) + if err != nil { + return err + } + flagsPtr, err := syscall.BytePtrFromString(flags) + if err != nil { + return err + } + _, _, e := syscall.Syscall(SYS_UNVEIL, uintptr(unsafe.Pointer(pathPtr)), uintptr(unsafe.Pointer(flagsPtr)), 0) + if e != 0 { + return e + } + return nil +} + +// UnveilBlock blocks future unveil calls. +// For more information see unveil(2). +func UnveilBlock() error { + // Both pointers must be nil. + var pathUnsafe, flagsUnsafe unsafe.Pointer + _, _, e := syscall.Syscall(SYS_UNVEIL, uintptr(pathUnsafe), uintptr(flagsUnsafe), 0) + if e != 0 { + return e + } + return nil +} diff --git a/vendor/golang.org/x/sys/unix/sockcmsg_unix.go b/vendor/golang.org/x/sys/unix/sockcmsg_unix.go index f153c0673..9dd2f32f5 100644 --- a/vendor/golang.org/x/sys/unix/sockcmsg_unix.go +++ b/vendor/golang.org/x/sys/unix/sockcmsg_unix.go @@ -12,7 +12,7 @@ import "unsafe" // Round the length of a raw sockaddr up to align it properly. func cmsgAlignOf(salen int) int { - salign := sizeofPtr + salign := SizeofPtr // NOTE: It seems like 64-bit Darwin, DragonFly BSD and // Solaris kernels still require 32-bit aligned access to // network subsystem. diff --git a/vendor/golang.org/x/sys/unix/syscall_aix.go b/vendor/golang.org/x/sys/unix/syscall_aix.go index df1f9ea3d..f8eac1742 100644 --- a/vendor/golang.org/x/sys/unix/syscall_aix.go +++ b/vendor/golang.org/x/sys/unix/syscall_aix.go @@ -305,11 +305,11 @@ func Wait4(pid int, wstatus *WaitStatus, options int, rusage *Rusage) (wpid int, type WaitStatus uint32 func (w WaitStatus) Stopped() bool { return w&0x40 != 0 } -func (w WaitStatus) StopSignal() syscall.Signal { +func (w WaitStatus) StopSignal() Signal { if !w.Stopped() { return -1 } - return syscall.Signal(w>>8) & 0xFF + return Signal(w>>8) & 0xFF } func (w WaitStatus) Exited() bool { return w&0xFF == 0 } @@ -321,11 +321,11 @@ func (w WaitStatus) ExitStatus() int { } func (w WaitStatus) Signaled() bool { return w&0x40 == 0 && w&0xFF != 0 } -func (w WaitStatus) Signal() syscall.Signal { +func (w WaitStatus) Signal() Signal { if !w.Signaled() { return -1 } - return syscall.Signal(w>>16) & 0xFF + return Signal(w>>16) & 0xFF } func (w WaitStatus) Continued() bool { return w&0x01000000 != 0 } @@ -383,6 +383,8 @@ func IoctlGetTermios(fd int, req uint) (*Termios, error) { // FcntlFlock performs a fcntl syscall for the F_GETLK, F_SETLK or F_SETLKW command. //sys FcntlFlock(fd uintptr, cmd int, lk *Flock_t) (err error) = fcntl +//sys fcntl(fd int, cmd int, arg int) (val int, err error) + func Flock(fd int, how int) (err error) { return syscall.Flock(fd, how) } @@ -396,15 +398,12 @@ func Flock(fd int, how int) (err error) { //sys Chroot(path string) (err error) //sys Close(fd int) (err error) //sys Dup(oldfd int) (fd int, err error) -//sys Dup3(oldfd int, newfd int, flags int) (err error) //sys Exit(code int) //sys Faccessat(dirfd int, path string, mode uint32, flags int) (err error) -//sys Fallocate(fd int, mode uint32, off int64, len int64) (err error) //sys Fchdir(fd int) (err error) //sys Fchmod(fd int, mode uint32) (err error) //sys Fchmodat(dirfd int, path string, mode uint32, flags int) (err error) //sys Fchownat(dirfd int, path string, uid int, gid int, flags int) (err error) -//sys fcntl(fd int, cmd int, arg int) (val int, err error) //sys Fdatasync(fd int) (err error) //sys Fsync(fd int) (err error) // readdir_r @@ -417,7 +416,7 @@ func Flock(fd int, how int) (err error) { //sys Getpriority(which int, who int) (prio int, err error) //sysnb Getrusage(who int, rusage *Rusage) (err error) //sysnb Getsid(pid int) (sid int, err error) -//sysnb Kill(pid int, sig syscall.Signal) (err error) +//sysnb Kill(pid int, sig Signal) (err error) //sys Klogctl(typ int, buf []byte) (n int, err error) = syslog //sys Mkdir(dirfd int, path string, mode uint32) (err error) //sys Mkdirat(dirfd int, path string, mode uint32) (err error) @@ -429,7 +428,6 @@ func Flock(fd int, how int) (err error) { //sys Openat(dirfd int, path string, flags int, mode uint32) (fd int, err error) //sys read(fd int, p []byte) (n int, err error) //sys Readlink(path string, buf []byte) (n int, err error) -//sys Removexattr(path string, attr string) (err error) //sys Renameat(olddirfd int, oldpath string, newdirfd int, newpath string) (err error) //sys Setdomainname(p []byte) (err error) //sys Sethostname(p []byte) (err error) @@ -443,7 +441,6 @@ func Flock(fd int, how int) (err error) { //sys Setpriority(which int, who int, prio int) (err error) //sys Statx(dirfd int, path string, flags int, mask int, stat *Statx_t) (err error) //sys Sync() -//sys Tee(rfd int, wfd int, len int, flags int) (n int64, err error) //sysnb Times(tms *Tms) (ticks uintptr, err error) //sysnb Umask(mask int) (oldmask int) //sysnb Uname(buf *Utsname) (err error) @@ -451,7 +448,6 @@ func Flock(fd int, how int) (err error) { // //sys Unmount(target string, flags int) (err error) = umount //sys Unlink(path string) (err error) //sys Unlinkat(dirfd int, path string, flags int) (err error) -//sys Unshare(flags int) (err error) //sys Ustat(dev int, ubuf *Ustat_t) (err error) //sys write(fd int, p []byte) (n int, err error) //sys readlen(fd int, p *byte, np int) (n int, err error) = read @@ -537,19 +533,6 @@ func Pipe(p []int) (err error) { return } -//sysnb pipe2(p *[2]_C_int, flags int) (err error) - -func Pipe2(p []int, flags int) (err error) { - if len(p) != 2 { - return EINVAL - } - var pp [2]_C_int - err = pipe2(&pp, flags) - p[0] = int(pp[0]) - p[1] = int(pp[1]) - return -} - //sys poll(fds *PollFd, nfds int, timeout int) (n int, err error) func Poll(fds []PollFd, timeout int) (n int, err error) { diff --git a/vendor/golang.org/x/sys/unix/syscall_dragonfly.go b/vendor/golang.org/x/sys/unix/syscall_dragonfly.go index 79d125b30..756510575 100644 --- a/vendor/golang.org/x/sys/unix/syscall_dragonfly.go +++ b/vendor/golang.org/x/sys/unix/syscall_dragonfly.go @@ -248,11 +248,13 @@ func Uname(uname *Utsname) error { //sys Dup(fd int) (nfd int, err error) //sys Dup2(from int, to int) (err error) //sys Exit(code int) +//sys Faccessat(dirfd int, path string, mode uint32, flags int) (err error) //sys Fchdir(fd int) (err error) //sys Fchflags(fd int, flags int) (err error) //sys Fchmod(fd int, mode uint32) (err error) //sys Fchmodat(dirfd int, path string, mode uint32, flags int) (err error) //sys Fchown(fd int, uid int, gid int) (err error) +//sys Fchownat(dirfd int, path string, uid int, gid int, flags int) (err error) //sys Flock(fd int, how int) (err error) //sys Fpathconf(fd int, name int) (val int, err error) //sys Fstat(fd int, stat *Stat_t) (err error) @@ -280,13 +282,17 @@ func Uname(uname *Utsname) error { //sys Kqueue() (fd int, err error) //sys Lchown(path string, uid int, gid int) (err error) //sys Link(path string, link string) (err error) +//sys Linkat(pathfd int, path string, linkfd int, link string, flags int) (err error) //sys Listen(s int, backlog int) (err error) //sys Lstat(path string, stat *Stat_t) (err error) //sys Mkdir(path string, mode uint32) (err error) +//sys Mkdirat(dirfd int, path string, mode uint32) (err error) //sys Mkfifo(path string, mode uint32) (err error) //sys Mknod(path string, mode uint32, dev int) (err error) +//sys Mknodat(fd int, path string, mode uint32, dev int) (err error) //sys Nanosleep(time *Timespec, leftover *Timespec) (err error) //sys Open(path string, mode int, perm uint32) (fd int, err error) +//sys Openat(dirfd int, path string, mode int, perm uint32) (fd int, err error) //sys Pathconf(path string, name int) (val int, err error) //sys read(fd int, p []byte) (n int, err error) //sys Readlink(path string, buf []byte) (n int, err error) @@ -312,11 +318,13 @@ func Uname(uname *Utsname) error { //sys Stat(path string, stat *Stat_t) (err error) //sys Statfs(path string, stat *Statfs_t) (err error) //sys Symlink(path string, link string) (err error) +//sys Symlinkat(oldpath string, newdirfd int, newpath string) (err error) //sys Sync() (err error) //sys Truncate(path string, length int64) (err error) //sys Umask(newmask int) (oldmask int) //sys Undelete(path string) (err error) //sys Unlink(path string) (err error) +//sys Unlinkat(dirfd int, path string, flags int) (err error) //sys Unmount(path string, flags int) (err error) //sys write(fd int, p []byte) (n int, err error) //sys mmap(addr uintptr, length uintptr, prot int, flag int, fd int, pos int64) (ret uintptr, err error) diff --git a/vendor/golang.org/x/sys/unix/syscall_freebsd.go b/vendor/golang.org/x/sys/unix/syscall_freebsd.go index 77a634c76..085a808cc 100644 --- a/vendor/golang.org/x/sys/unix/syscall_freebsd.go +++ b/vendor/golang.org/x/sys/unix/syscall_freebsd.go @@ -13,9 +13,34 @@ package unix import ( + "sync" "unsafe" ) +const ( + SYS_FSTAT_FREEBSD12 = 551 // { int fstat(int fd, _Out_ struct stat *sb); } + SYS_FSTATAT_FREEBSD12 = 552 // { int fstatat(int fd, _In_z_ char *path, \ + SYS_GETDIRENTRIES_FREEBSD12 = 554 // { ssize_t getdirentries(int fd, \ + SYS_STATFS_FREEBSD12 = 555 // { int statfs(_In_z_ char *path, \ + SYS_FSTATFS_FREEBSD12 = 556 // { int fstatfs(int fd, \ + SYS_GETFSSTAT_FREEBSD12 = 557 // { int getfsstat( \ + SYS_MKNODAT_FREEBSD12 = 559 // { int mknodat(int fd, _In_z_ char *path, \ +) + +// See https://www.freebsd.org/doc/en_US.ISO8859-1/books/porters-handbook/versions.html. +var ( + osreldateOnce sync.Once + osreldate uint32 +) + +// INO64_FIRST from /usr/src/lib/libc/sys/compat-ino64.h +const _ino64First = 1200031 + +func supportsABI(ver uint32) bool { + osreldateOnce.Do(func() { osreldate, _ = SysctlUint32("kern.osreldate") }) + return osreldate >= ver +} + // SockaddrDatalink implements the Sockaddr interface for AF_LINK type sockets. type SockaddrDatalink struct { Len uint8 @@ -121,17 +146,39 @@ func Getwd() (string, error) { } func Getfsstat(buf []Statfs_t, flags int) (n int, err error) { - var _p0 unsafe.Pointer - var bufsize uintptr + var ( + _p0 unsafe.Pointer + bufsize uintptr + oldBuf []statfs_freebsd11_t + needsConvert bool + ) + if len(buf) > 0 { - _p0 = unsafe.Pointer(&buf[0]) - bufsize = unsafe.Sizeof(Statfs_t{}) * uintptr(len(buf)) + if supportsABI(_ino64First) { + _p0 = unsafe.Pointer(&buf[0]) + bufsize = unsafe.Sizeof(Statfs_t{}) * uintptr(len(buf)) + } else { + n := len(buf) + oldBuf = make([]statfs_freebsd11_t, n) + _p0 = unsafe.Pointer(&oldBuf[0]) + bufsize = unsafe.Sizeof(statfs_freebsd11_t{}) * uintptr(n) + needsConvert = true + } } - r0, _, e1 := Syscall(SYS_GETFSSTAT, uintptr(_p0), bufsize, uintptr(flags)) + var sysno uintptr = SYS_GETFSSTAT + if supportsABI(_ino64First) { + sysno = SYS_GETFSSTAT_FREEBSD12 + } + r0, _, e1 := Syscall(sysno, uintptr(_p0), bufsize, uintptr(flags)) n = int(r0) if e1 != 0 { err = e1 } + if e1 == 0 && needsConvert { + for i := range oldBuf { + buf[i].convertFrom(&oldBuf[i]) + } + } return } @@ -225,6 +272,234 @@ func Uname(uname *Utsname) error { return nil } +func Stat(path string, st *Stat_t) (err error) { + var oldStat stat_freebsd11_t + if supportsABI(_ino64First) { + return fstatat_freebsd12(AT_FDCWD, path, st, 0) + } + err = stat(path, &oldStat) + if err != nil { + return err + } + + st.convertFrom(&oldStat) + return nil +} + +func Lstat(path string, st *Stat_t) (err error) { + var oldStat stat_freebsd11_t + if supportsABI(_ino64First) { + return fstatat_freebsd12(AT_FDCWD, path, st, AT_SYMLINK_NOFOLLOW) + } + err = lstat(path, &oldStat) + if err != nil { + return err + } + + st.convertFrom(&oldStat) + return nil +} + +func Fstat(fd int, st *Stat_t) (err error) { + var oldStat stat_freebsd11_t + if supportsABI(_ino64First) { + return fstat_freebsd12(fd, st) + } + err = fstat(fd, &oldStat) + if err != nil { + return err + } + + st.convertFrom(&oldStat) + return nil +} + +func Fstatat(fd int, path string, st *Stat_t, flags int) (err error) { + var oldStat stat_freebsd11_t + if supportsABI(_ino64First) { + return fstatat_freebsd12(fd, path, st, flags) + } + err = fstatat(fd, path, &oldStat, flags) + if err != nil { + return err + } + + st.convertFrom(&oldStat) + return nil +} + +func Statfs(path string, st *Statfs_t) (err error) { + var oldStatfs statfs_freebsd11_t + if supportsABI(_ino64First) { + return statfs_freebsd12(path, st) + } + err = statfs(path, &oldStatfs) + if err != nil { + return err + } + + st.convertFrom(&oldStatfs) + return nil +} + +func Fstatfs(fd int, st *Statfs_t) (err error) { + var oldStatfs statfs_freebsd11_t + if supportsABI(_ino64First) { + return fstatfs_freebsd12(fd, st) + } + err = fstatfs(fd, &oldStatfs) + if err != nil { + return err + } + + st.convertFrom(&oldStatfs) + return nil +} + +func Getdents(fd int, buf []byte) (n int, err error) { + return Getdirentries(fd, buf, nil) +} + +func Getdirentries(fd int, buf []byte, basep *uintptr) (n int, err error) { + if supportsABI(_ino64First) { + return getdirentries_freebsd12(fd, buf, basep) + } + + // The old syscall entries are smaller than the new. Use 1/4 of the original + // buffer size rounded up to DIRBLKSIZ (see /usr/src/lib/libc/sys/getdirentries.c). + oldBufLen := roundup(len(buf)/4, _dirblksiz) + oldBuf := make([]byte, oldBufLen) + n, err = getdirentries(fd, oldBuf, basep) + if err == nil && n > 0 { + n = convertFromDirents11(buf, oldBuf[:n]) + } + return +} + +func Mknod(path string, mode uint32, dev uint64) (err error) { + var oldDev int + if supportsABI(_ino64First) { + return mknodat_freebsd12(AT_FDCWD, path, mode, dev) + } + oldDev = int(dev) + return mknod(path, mode, oldDev) +} + +func Mknodat(fd int, path string, mode uint32, dev uint64) (err error) { + var oldDev int + if supportsABI(_ino64First) { + return mknodat_freebsd12(fd, path, mode, dev) + } + oldDev = int(dev) + return mknodat(fd, path, mode, oldDev) +} + +// round x to the nearest multiple of y, larger or equal to x. +// +// from /usr/include/sys/param.h Macros for counting and rounding. +// #define roundup(x, y) ((((x)+((y)-1))/(y))*(y)) +func roundup(x, y int) int { + return ((x + y - 1) / y) * y +} + +func (s *Stat_t) convertFrom(old *stat_freebsd11_t) { + *s = Stat_t{ + Dev: uint64(old.Dev), + Ino: uint64(old.Ino), + Nlink: uint64(old.Nlink), + Mode: old.Mode, + Uid: old.Uid, + Gid: old.Gid, + Rdev: uint64(old.Rdev), + Atim: old.Atim, + Mtim: old.Mtim, + Ctim: old.Ctim, + Birthtim: old.Birthtim, + Size: old.Size, + Blocks: old.Blocks, + Blksize: old.Blksize, + Flags: old.Flags, + Gen: uint64(old.Gen), + } +} + +func (s *Statfs_t) convertFrom(old *statfs_freebsd11_t) { + *s = Statfs_t{ + Version: _statfsVersion, + Type: old.Type, + Flags: old.Flags, + Bsize: old.Bsize, + Iosize: old.Iosize, + Blocks: old.Blocks, + Bfree: old.Bfree, + Bavail: old.Bavail, + Files: old.Files, + Ffree: old.Ffree, + Syncwrites: old.Syncwrites, + Asyncwrites: old.Asyncwrites, + Syncreads: old.Syncreads, + Asyncreads: old.Asyncreads, + // Spare + Namemax: old.Namemax, + Owner: old.Owner, + Fsid: old.Fsid, + // Charspare + // Fstypename + // Mntfromname + // Mntonname + } + + sl := old.Fstypename[:] + n := clen(*(*[]byte)(unsafe.Pointer(&sl))) + copy(s.Fstypename[:], old.Fstypename[:n]) + + sl = old.Mntfromname[:] + n = clen(*(*[]byte)(unsafe.Pointer(&sl))) + copy(s.Mntfromname[:], old.Mntfromname[:n]) + + sl = old.Mntonname[:] + n = clen(*(*[]byte)(unsafe.Pointer(&sl))) + copy(s.Mntonname[:], old.Mntonname[:n]) +} + +func convertFromDirents11(buf []byte, old []byte) int { + const ( + fixedSize = int(unsafe.Offsetof(Dirent{}.Name)) + oldFixedSize = int(unsafe.Offsetof(dirent_freebsd11{}.Name)) + ) + + dstPos := 0 + srcPos := 0 + for dstPos+fixedSize < len(buf) && srcPos+oldFixedSize < len(old) { + dstDirent := (*Dirent)(unsafe.Pointer(&buf[dstPos])) + srcDirent := (*dirent_freebsd11)(unsafe.Pointer(&old[srcPos])) + + reclen := roundup(fixedSize+int(srcDirent.Namlen)+1, 8) + if dstPos+reclen > len(buf) { + break + } + + dstDirent.Fileno = uint64(srcDirent.Fileno) + dstDirent.Off = 0 + dstDirent.Reclen = uint16(reclen) + dstDirent.Type = srcDirent.Type + dstDirent.Pad0 = 0 + dstDirent.Namlen = uint16(srcDirent.Namlen) + dstDirent.Pad1 = 0 + + copy(dstDirent.Name[:], srcDirent.Name[:srcDirent.Namlen]) + padding := buf[dstPos+fixedSize+int(dstDirent.Namlen) : dstPos+reclen] + for i := range padding { + padding[i] = 0 + } + + dstPos += int(dstDirent.Reclen) + srcPos += int(srcDirent.Reclen) + } + + return dstPos +} + /* * Exposed directly */ @@ -264,13 +539,16 @@ func Uname(uname *Utsname) error { //sys Fchownat(dirfd int, path string, uid int, gid int, flags int) (err error) //sys Flock(fd int, how int) (err error) //sys Fpathconf(fd int, name int) (val int, err error) -//sys Fstat(fd int, stat *Stat_t) (err error) -//sys Fstatat(fd int, path string, stat *Stat_t, flags int) (err error) -//sys Fstatfs(fd int, stat *Statfs_t) (err error) +//sys fstat(fd int, stat *stat_freebsd11_t) (err error) +//sys fstat_freebsd12(fd int, stat *Stat_t) (err error) +//sys fstatat(fd int, path string, stat *stat_freebsd11_t, flags int) (err error) +//sys fstatat_freebsd12(fd int, path string, stat *Stat_t, flags int) (err error) +//sys fstatfs(fd int, stat *statfs_freebsd11_t) (err error) +//sys fstatfs_freebsd12(fd int, stat *Statfs_t) (err error) //sys Fsync(fd int) (err error) //sys Ftruncate(fd int, length int64) (err error) -//sys Getdents(fd int, buf []byte) (n int, err error) -//sys Getdirentries(fd int, buf []byte, basep *uintptr) (n int, err error) +//sys getdirentries(fd int, buf []byte, basep *uintptr) (n int, err error) +//sys getdirentries_freebsd12(fd int, buf []byte, basep *uintptr) (n int, err error) //sys Getdtablesize() (size int) //sysnb Getegid() (egid int) //sysnb Geteuid() (uid int) @@ -292,11 +570,13 @@ func Uname(uname *Utsname) error { //sys Link(path string, link string) (err error) //sys Linkat(pathfd int, path string, linkfd int, link string, flags int) (err error) //sys Listen(s int, backlog int) (err error) -//sys Lstat(path string, stat *Stat_t) (err error) +//sys lstat(path string, stat *stat_freebsd11_t) (err error) //sys Mkdir(path string, mode uint32) (err error) //sys Mkdirat(dirfd int, path string, mode uint32) (err error) //sys Mkfifo(path string, mode uint32) (err error) -//sys Mknod(path string, mode uint32, dev int) (err error) +//sys mknod(path string, mode uint32, dev int) (err error) +//sys mknodat(fd int, path string, mode uint32, dev int) (err error) +//sys mknodat_freebsd12(fd int, path string, mode uint32, dev uint64) (err error) //sys Nanosleep(time *Timespec, leftover *Timespec) (err error) //sys Open(path string, mode int, perm uint32) (fd int, err error) //sys Openat(fdat int, path string, mode int, perm uint32) (fd int, err error) @@ -326,8 +606,9 @@ func Uname(uname *Utsname) error { //sysnb Setsid() (pid int, err error) //sysnb Settimeofday(tp *Timeval) (err error) //sysnb Setuid(uid int) (err error) -//sys Stat(path string, stat *Stat_t) (err error) -//sys Statfs(path string, stat *Statfs_t) (err error) +//sys stat(path string, stat *stat_freebsd11_t) (err error) +//sys statfs(path string, stat *statfs_freebsd11_t) (err error) +//sys statfs_freebsd12(path string, stat *Statfs_t) (err error) //sys Symlink(path string, link string) (err error) //sys Symlinkat(oldpath string, newdirfd int, newpath string) (err error) //sys Sync() (err error) @@ -382,6 +663,7 @@ func Uname(uname *Utsname) error { // Kqueue_portset // Getattrlist // Setattrlist +// Getdents // Getdirentriesattr // Searchfs // Delete diff --git a/vendor/golang.org/x/sys/unix/syscall_linux.go b/vendor/golang.org/x/sys/unix/syscall_linux.go index 02cf204a1..466b2576d 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux.go @@ -12,6 +12,8 @@ package unix import ( + "encoding/binary" + "net" "syscall" "unsafe" ) @@ -55,6 +57,15 @@ func Fchmodat(dirfd int, path string, mode uint32, flags int) (err error) { // ioctl itself should not be exposed directly, but additional get/set // functions for specific types are permissible. +// IoctlSetPointerInt performs an ioctl operation which sets an +// integer value on fd, using the specified request number. The ioctl +// argument is called with a pointer to the integer value, rather than +// passing the integer value directly. +func IoctlSetPointerInt(fd int, req uint, value int) error { + v := int32(value) + return ioctl(fd, req, uintptr(unsafe.Pointer(&v))) +} + // IoctlSetInt performs an ioctl operation which sets an integer value // on fd, using the specified request number. func IoctlSetInt(fd int, req uint, value int) error { @@ -710,6 +721,51 @@ func (sa *SockaddrXDP) sockaddr() (unsafe.Pointer, _Socklen, error) { return unsafe.Pointer(&sa.raw), SizeofSockaddrXDP, nil } +// This constant mirrors the #define of PX_PROTO_OE in +// linux/if_pppox.h. We're defining this by hand here instead of +// autogenerating through mkerrors.sh because including +// linux/if_pppox.h causes some declaration conflicts with other +// includes (linux/if_pppox.h includes linux/in.h, which conflicts +// with netinet/in.h). Given that we only need a single zero constant +// out of that file, it's cleaner to just define it by hand here. +const px_proto_oe = 0 + +type SockaddrPPPoE struct { + SID uint16 + Remote net.HardwareAddr + Dev string + raw RawSockaddrPPPoX +} + +func (sa *SockaddrPPPoE) sockaddr() (unsafe.Pointer, _Socklen, error) { + if len(sa.Remote) != 6 { + return nil, 0, EINVAL + } + if len(sa.Dev) > IFNAMSIZ-1 { + return nil, 0, EINVAL + } + + *(*uint16)(unsafe.Pointer(&sa.raw[0])) = AF_PPPOX + // This next field is in host-endian byte order. We can't use the + // same unsafe pointer cast as above, because this value is not + // 32-bit aligned and some architectures don't allow unaligned + // access. + // + // However, the value of px_proto_oe is 0, so we can use + // encoding/binary helpers to write the bytes without worrying + // about the ordering. + binary.BigEndian.PutUint32(sa.raw[2:6], px_proto_oe) + // This field is deliberately big-endian, unlike the previous + // one. The kernel expects SID to be in network byte order. + binary.BigEndian.PutUint16(sa.raw[6:8], sa.SID) + copy(sa.raw[8:14], sa.Remote) + for i := 14; i < 14+IFNAMSIZ; i++ { + sa.raw[i] = 0 + } + copy(sa.raw[14:], sa.Dev) + return unsafe.Pointer(&sa.raw), SizeofSockaddrPPPoX, nil +} + func anyToSockaddr(fd int, rsa *RawSockaddrAny) (Sockaddr, error) { switch rsa.Addr.Family { case AF_NETLINK: @@ -820,6 +876,22 @@ func anyToSockaddr(fd int, rsa *RawSockaddrAny) (Sockaddr, error) { SharedUmemFD: pp.Shared_umem_fd, } return sa, nil + case AF_PPPOX: + pp := (*RawSockaddrPPPoX)(unsafe.Pointer(rsa)) + if binary.BigEndian.Uint32(pp[2:6]) != px_proto_oe { + return nil, EINVAL + } + sa := &SockaddrPPPoE{ + SID: binary.BigEndian.Uint16(pp[6:8]), + Remote: net.HardwareAddr(pp[8:14]), + } + for i := 14; i < 14+IFNAMSIZ; i++ { + if pp[i] == 0 { + sa.Dev = string(pp[14:i]) + break + } + } + return sa, nil } return nil, EAFNOSUPPORT } @@ -1122,7 +1194,7 @@ func ptracePeek(req int, pid int, addr uintptr, out []byte) (count int, err erro // The ptrace syscall differs from glibc's ptrace. // Peeks returns the word in *data, not as the return value. - var buf [sizeofPtr]byte + var buf [SizeofPtr]byte // Leading edge. PEEKTEXT/PEEKDATA don't require aligned // access (PEEKUSER warns that it might), but if we don't @@ -1130,12 +1202,12 @@ func ptracePeek(req int, pid int, addr uintptr, out []byte) (count int, err erro // boundary and not get the bytes leading up to the page // boundary. n := 0 - if addr%sizeofPtr != 0 { - err = ptrace(req, pid, addr-addr%sizeofPtr, uintptr(unsafe.Pointer(&buf[0]))) + if addr%SizeofPtr != 0 { + err = ptrace(req, pid, addr-addr%SizeofPtr, uintptr(unsafe.Pointer(&buf[0]))) if err != nil { return 0, err } - n += copy(out, buf[addr%sizeofPtr:]) + n += copy(out, buf[addr%SizeofPtr:]) out = out[n:] } @@ -1173,15 +1245,15 @@ func ptracePoke(pokeReq int, peekReq int, pid int, addr uintptr, data []byte) (c // Leading edge. n := 0 - if addr%sizeofPtr != 0 { - var buf [sizeofPtr]byte - err = ptrace(peekReq, pid, addr-addr%sizeofPtr, uintptr(unsafe.Pointer(&buf[0]))) + if addr%SizeofPtr != 0 { + var buf [SizeofPtr]byte + err = ptrace(peekReq, pid, addr-addr%SizeofPtr, uintptr(unsafe.Pointer(&buf[0]))) if err != nil { return 0, err } - n += copy(buf[addr%sizeofPtr:], data) + n += copy(buf[addr%SizeofPtr:], data) word := *((*uintptr)(unsafe.Pointer(&buf[0]))) - err = ptrace(pokeReq, pid, addr-addr%sizeofPtr, word) + err = ptrace(pokeReq, pid, addr-addr%SizeofPtr, word) if err != nil { return 0, err } @@ -1189,19 +1261,19 @@ func ptracePoke(pokeReq int, peekReq int, pid int, addr uintptr, data []byte) (c } // Interior. - for len(data) > sizeofPtr { + for len(data) > SizeofPtr { word := *((*uintptr)(unsafe.Pointer(&data[0]))) err = ptrace(pokeReq, pid, addr+uintptr(n), word) if err != nil { return n, err } - n += sizeofPtr - data = data[sizeofPtr:] + n += SizeofPtr + data = data[SizeofPtr:] } // Trailing edge. if len(data) > 0 { - var buf [sizeofPtr]byte + var buf [SizeofPtr]byte err = ptrace(peekReq, pid, addr+uintptr(n), uintptr(unsafe.Pointer(&buf[0]))) if err != nil { return n, err @@ -1304,6 +1376,7 @@ func Mount(source string, target string, fstype string, flags uintptr, data stri //sys ClockGettime(clockid int32, time *Timespec) (err error) //sys Close(fd int) (err error) //sys CopyFileRange(rfd int, roff *int64, wfd int, woff *int64, len int, flags int) (n int, err error) +//sys DeleteModule(name string, flags int) (err error) //sys Dup(oldfd int) (fd int, err error) //sys Dup3(oldfd int, newfd int, flags int) (err error) //sysnb EpollCreate1(flag int) (fd int, err error) @@ -1317,6 +1390,7 @@ func Mount(source string, target string, fstype string, flags uintptr, data stri //sys fcntl(fd int, cmd int, arg int) (val int, err error) //sys Fdatasync(fd int) (err error) //sys Fgetxattr(fd int, attr string, dest []byte) (sz int, err error) +//sys FinitModule(fd int, params string, flags int) (err error) //sys Flistxattr(fd int, dest []byte) (sz int, err error) //sys Flock(fd int, how int) (err error) //sys Fremovexattr(fd int, attr string) (err error) @@ -1338,6 +1412,7 @@ func Getpgrp() (pid int) { //sysnb Getsid(pid int) (sid int, err error) //sysnb Gettid() (tid int) //sys Getxattr(path string, attr string, dest []byte) (sz int, err error) +//sys InitModule(moduleImage []byte, params string) (err error) //sys InotifyAddWatch(fd int, pathname string, mask uint32) (watchdesc int, err error) //sysnb InotifyInit1(flags int) (fd int, err error) //sysnb InotifyRmWatch(fd int, watchdesc uint32) (success int, err error) @@ -1527,8 +1602,6 @@ func Faccessat(dirfd int, path string, mode uint32, flags int) (err error) { // ClockNanosleep // ClockSettime // Clone -// CreateModule -// DeleteModule // EpollCtlOld // EpollPwait // EpollWaitOld @@ -1572,7 +1645,6 @@ func Faccessat(dirfd int, path string, mode uint32, flags int) (err error) { // Pselect6 // Ptrace // Putpmsg -// QueryModule // Quotactl // Readahead // Readv diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_amd64.go b/vendor/golang.org/x/sys/unix/syscall_linux_amd64.go index 5f9b2454a..5247d9f90 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_amd64.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_amd64.go @@ -160,3 +160,16 @@ func Poll(fds []PollFd, timeout int) (n int, err error) { } return poll(&fds[0], len(fds), timeout) } + +//sys kexecFileLoad(kernelFd int, initrdFd int, cmdlineLen int, cmdline string, flags int) (err error) + +func KexecFileLoad(kernelFd int, initrdFd int, cmdline string, flags int) error { + cmdlineLen := len(cmdline) + if cmdlineLen > 0 { + // Account for the additional NULL byte added by + // BytePtrFromString in kexecFileLoad. The kexec_file_load + // syscall expects a NULL-terminated string. + cmdlineLen++ + } + return kexecFileLoad(kernelFd, initrdFd, cmdlineLen, cmdline, flags) +} diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_arm64.go b/vendor/golang.org/x/sys/unix/syscall_linux_arm64.go index 646f295ad..fa5a9a6f6 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_arm64.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_arm64.go @@ -191,12 +191,9 @@ func Dup2(oldfd int, newfd int) (err error) { return Dup3(oldfd, newfd, 0) } -func Pause() (err error) { - _, _, e1 := Syscall6(SYS_PPOLL, 0, 0, 0, 0, 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return +func Pause() error { + _, err := ppoll(nil, 0, nil, nil) + return err } func Poll(fds []PollFd, timeout int) (n int, err error) { diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_ppc64x.go b/vendor/golang.org/x/sys/unix/syscall_linux_ppc64x.go index 6a38dfd5b..41451854b 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_ppc64x.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_ppc64x.go @@ -136,3 +136,16 @@ func SyncFileRange(fd int, off int64, n int64, flags int) error { // order of their arguments. return syncFileRange2(fd, flags, off, n) } + +//sys kexecFileLoad(kernelFd int, initrdFd int, cmdlineLen int, cmdline string, flags int) (err error) + +func KexecFileLoad(kernelFd int, initrdFd int, cmdline string, flags int) error { + cmdlineLen := len(cmdline) + if cmdlineLen > 0 { + // Account for the additional NULL byte added by + // BytePtrFromString in kexecFileLoad. The kexec_file_load + // syscall expects a NULL-terminated string. + cmdlineLen++ + } + return kexecFileLoad(kernelFd, initrdFd, cmdlineLen, cmdline, flags) +} diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_riscv64.go b/vendor/golang.org/x/sys/unix/syscall_linux_riscv64.go index 512077fe8..44aa1227a 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_riscv64.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_riscv64.go @@ -191,12 +191,9 @@ func Dup2(oldfd int, newfd int) (err error) { return Dup3(oldfd, newfd, 0) } -func Pause() (err error) { - _, _, e1 := Syscall6(SYS_PPOLL, 0, 0, 0, 0, 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return +func Pause() error { + _, err := ppoll(nil, 0, nil, nil) + return err } func Poll(fds []PollFd, timeout int) (n int, err error) { diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_s390x.go b/vendor/golang.org/x/sys/unix/syscall_linux_s390x.go index 6e4ee0cf2..f52f148f9 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_s390x.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_s390x.go @@ -322,3 +322,16 @@ func Poll(fds []PollFd, timeout int) (n int, err error) { } return poll(&fds[0], len(fds), timeout) } + +//sys kexecFileLoad(kernelFd int, initrdFd int, cmdlineLen int, cmdline string, flags int) (err error) + +func KexecFileLoad(kernelFd int, initrdFd int, cmdline string, flags int) error { + cmdlineLen := len(cmdline) + if cmdlineLen > 0 { + // Account for the additional NULL byte added by + // BytePtrFromString in kexecFileLoad. The kexec_file_load + // syscall expects a NULL-terminated string. + cmdlineLen++ + } + return kexecFileLoad(kernelFd, initrdFd, cmdlineLen, cmdline, flags) +} diff --git a/vendor/golang.org/x/sys/unix/syscall_netbsd.go b/vendor/golang.org/x/sys/unix/syscall_netbsd.go index 639bcdef7..059327a36 100644 --- a/vendor/golang.org/x/sys/unix/syscall_netbsd.go +++ b/vendor/golang.org/x/sys/unix/syscall_netbsd.go @@ -13,6 +13,7 @@ package unix import ( + "runtime" "syscall" "unsafe" ) @@ -100,14 +101,14 @@ func SysctlClockinfo(name string) (*Clockinfo, error) { } n := uintptr(SizeofClockinfo) - buf := make([]byte, SizeofClockinfo) - if err := sysctl(mib, &buf[0], &n, nil, 0); err != nil { + var ci Clockinfo + if err := sysctl(mib, (*byte)(unsafe.Pointer(&ci)), &n, nil, 0); err != nil { return nil, err } if n != SizeofClockinfo { return nil, EIO } - return (*Clockinfo)(unsafe.Pointer(&buf[0])), nil + return &ci, nil } //sysnb pipe() (fd1 int, fd2 int, err error) @@ -190,6 +191,13 @@ func IoctlGetTermios(fd int, req uint) (*Termios, error) { return &value, err } +func IoctlGetPtmget(fd int, req uint) (*Ptmget, error) { + var value Ptmget + err := ioctl(fd, req, uintptr(unsafe.Pointer(&value))) + runtime.KeepAlive(value) + return &value, err +} + func Uname(uname *Utsname) error { mib := []_C_int{CTL_KERN, KERN_OSTYPE} n := unsafe.Sizeof(uname.Sysname) @@ -269,6 +277,7 @@ func Uname(uname *Utsname) error { //sys Fchmod(fd int, mode uint32) (err error) //sys Fchmodat(dirfd int, path string, mode uint32, flags int) (err error) //sys Fchown(fd int, uid int, gid int) (err error) +//sys Fchownat(dirfd int, path string, uid int, gid int, flags int) (err error) //sys Flock(fd int, how int) (err error) //sys Fpathconf(fd int, name int) (val int, err error) //sys Fstat(fd int, stat *Stat_t) (err error) @@ -293,19 +302,26 @@ func Uname(uname *Utsname) error { //sys Kqueue() (fd int, err error) //sys Lchown(path string, uid int, gid int) (err error) //sys Link(path string, link string) (err error) +//sys Linkat(pathfd int, path string, linkfd int, link string, flags int) (err error) //sys Listen(s int, backlog int) (err error) //sys Lstat(path string, stat *Stat_t) (err error) //sys Mkdir(path string, mode uint32) (err error) +//sys Mkdirat(dirfd int, path string, mode uint32) (err error) //sys Mkfifo(path string, mode uint32) (err error) +//sys Mkfifoat(dirfd int, path string, mode uint32) (err error) //sys Mknod(path string, mode uint32, dev int) (err error) +//sys Mknodat(dirfd int, path string, mode uint32, dev int) (err error) //sys Nanosleep(time *Timespec, leftover *Timespec) (err error) //sys Open(path string, mode int, perm uint32) (fd int, err error) +//sys Openat(dirfd int, path string, mode int, perm uint32) (fd int, err error) //sys Pathconf(path string, name int) (val int, err error) //sys Pread(fd int, p []byte, offset int64) (n int, err error) //sys Pwrite(fd int, p []byte, offset int64) (n int, err error) //sys read(fd int, p []byte) (n int, err error) //sys Readlink(path string, buf []byte) (n int, err error) +//sys Readlinkat(dirfd int, path string, buf []byte) (n int, err error) //sys Rename(from string, to string) (err error) +//sys Renameat(fromfd int, from string, tofd int, to string) (err error) //sys Revoke(path string) (err error) //sys Rmdir(path string) (err error) //sys Seek(fd int, offset int64, whence int) (newoffset int64, err error) = SYS_LSEEK @@ -323,10 +339,12 @@ func Uname(uname *Utsname) error { //sysnb Setuid(uid int) (err error) //sys Stat(path string, stat *Stat_t) (err error) //sys Symlink(path string, link string) (err error) +//sys Symlinkat(oldpath string, newdirfd int, newpath string) (err error) //sys Sync() (err error) //sys Truncate(path string, length int64) (err error) //sys Umask(newmask int) (oldmask int) //sys Unlink(path string) (err error) +//sys Unlinkat(dirfd int, path string, flags int) (err error) //sys Unmount(path string, flags int) (err error) //sys write(fd int, p []byte) (n int, err error) //sys mmap(addr uintptr, length uintptr, prot int, flag int, fd int, pos int64) (ret uintptr, err error) diff --git a/vendor/golang.org/x/sys/unix/syscall_openbsd.go b/vendor/golang.org/x/sys/unix/syscall_openbsd.go index 07e6669ca..5a398f817 100644 --- a/vendor/golang.org/x/sys/unix/syscall_openbsd.go +++ b/vendor/golang.org/x/sys/unix/syscall_openbsd.go @@ -43,6 +43,23 @@ func nametomib(name string) (mib []_C_int, err error) { return nil, EINVAL } +func SysctlUvmexp(name string) (*Uvmexp, error) { + mib, err := sysctlmib(name) + if err != nil { + return nil, err + } + + n := uintptr(SizeofUvmexp) + var u Uvmexp + if err := sysctl(mib, (*byte)(unsafe.Pointer(&u)), &n, nil, 0); err != nil { + return nil, err + } + if n != SizeofUvmexp { + return nil, EIO + } + return &u, nil +} + //sysnb pipe(p *[2]_C_int) (err error) func Pipe(p []int) (err error) { if len(p) != 2 { @@ -141,6 +158,15 @@ func IoctlGetTermios(fd int, req uint) (*Termios, error) { return &value, err } +//sys ppoll(fds *PollFd, nfds int, timeout *Timespec, sigmask *Sigset_t) (n int, err error) + +func Ppoll(fds []PollFd, timeout *Timespec, sigmask *Sigset_t) (n int, err error) { + if len(fds) == 0 { + return ppoll(nil, 0, timeout, sigmask) + } + return ppoll(&fds[0], len(fds), timeout, sigmask) +} + func Uname(uname *Utsname) error { mib := []_C_int{CTL_KERN, KERN_OSTYPE} n := unsafe.Sizeof(uname.Sysname) @@ -207,6 +233,7 @@ func Uname(uname *Utsname) error { //sys Fchmod(fd int, mode uint32) (err error) //sys Fchmodat(dirfd int, path string, mode uint32, flags int) (err error) //sys Fchown(fd int, uid int, gid int) (err error) +//sys Fchownat(dirfd int, path string, uid int, gid int, flags int) (err error) //sys Flock(fd int, how int) (err error) //sys Fpathconf(fd int, name int) (val int, err error) //sys Fstat(fd int, stat *Stat_t) (err error) @@ -233,19 +260,26 @@ func Uname(uname *Utsname) error { //sys Kqueue() (fd int, err error) //sys Lchown(path string, uid int, gid int) (err error) //sys Link(path string, link string) (err error) +//sys Linkat(pathfd int, path string, linkfd int, link string, flags int) (err error) //sys Listen(s int, backlog int) (err error) //sys Lstat(path string, stat *Stat_t) (err error) //sys Mkdir(path string, mode uint32) (err error) +//sys Mkdirat(dirfd int, path string, mode uint32) (err error) //sys Mkfifo(path string, mode uint32) (err error) +//sys Mkfifoat(dirfd int, path string, mode uint32) (err error) //sys Mknod(path string, mode uint32, dev int) (err error) +//sys Mknodat(dirfd int, path string, mode uint32, dev int) (err error) //sys Nanosleep(time *Timespec, leftover *Timespec) (err error) //sys Open(path string, mode int, perm uint32) (fd int, err error) +//sys Openat(dirfd int, path string, mode int, perm uint32) (fd int, err error) //sys Pathconf(path string, name int) (val int, err error) //sys Pread(fd int, p []byte, offset int64) (n int, err error) //sys Pwrite(fd int, p []byte, offset int64) (n int, err error) //sys read(fd int, p []byte) (n int, err error) //sys Readlink(path string, buf []byte) (n int, err error) +//sys Readlinkat(dirfd int, path string, buf []byte) (n int, err error) //sys Rename(from string, to string) (err error) +//sys Renameat(fromfd int, from string, tofd int, to string) (err error) //sys Revoke(path string) (err error) //sys Rmdir(path string) (err error) //sys Seek(fd int, offset int64, whence int) (newoffset int64, err error) = SYS_LSEEK @@ -268,10 +302,12 @@ func Uname(uname *Utsname) error { //sys Stat(path string, stat *Stat_t) (err error) //sys Statfs(path string, stat *Statfs_t) (err error) //sys Symlink(path string, link string) (err error) +//sys Symlinkat(oldpath string, newdirfd int, newpath string) (err error) //sys Sync() (err error) //sys Truncate(path string, length int64) (err error) //sys Umask(newmask int) (oldmask int) //sys Unlink(path string) (err error) +//sys Unlinkat(dirfd int, path string, flags int) (err error) //sys Unmount(path string, flags int) (err error) //sys write(fd int, p []byte) (n int, err error) //sys mmap(addr uintptr, length uintptr, prot int, flag int, fd int, pos int64) (ret uintptr, err error) @@ -294,15 +330,11 @@ func Uname(uname *Utsname) error { // clock_settime // closefrom // execve -// faccessat -// fchmodat -// fchownat // fcntl // fhopen // fhstat // fhstatfs // fork -// fstatat // futimens // getfh // getgid @@ -316,12 +348,8 @@ func Uname(uname *Utsname) error { // lfs_markv // lfs_segclean // lfs_segwait -// linkat // mincore // minherit -// mkdirat -// mkfifoat -// mknodat // mount // mquery // msgctl @@ -330,12 +358,10 @@ func Uname(uname *Utsname) error { // msgsnd // nfssvc // nnpfspioctl -// openat // preadv // profil // pwritev // quotactl -// readlinkat // readv // reboot // renameat @@ -356,13 +382,11 @@ func Uname(uname *Utsname) error { // sigprocmask // sigreturn // sigsuspend -// symlinkat // sysarch // syscall // threxit // thrsigdivert // thrsleep // thrwakeup -// unlinkat // vfork // writev diff --git a/vendor/golang.org/x/sys/unix/syscall_openbsd_386.go b/vendor/golang.org/x/sys/unix/syscall_openbsd_386.go index 994964a91..d62da60d1 100644 --- a/vendor/golang.org/x/sys/unix/syscall_openbsd_386.go +++ b/vendor/golang.org/x/sys/unix/syscall_openbsd_386.go @@ -31,3 +31,7 @@ func (msghdr *Msghdr) SetControllen(length int) { func (cmsg *Cmsghdr) SetLen(length int) { cmsg.Len = uint32(length) } + +// SYS___SYSCTL is used by syscall_bsd.go for all BSDs, but in modern versions +// of openbsd/386 the syscall is called sysctl instead of __sysctl. +const SYS___SYSCTL = SYS_SYSCTL diff --git a/vendor/golang.org/x/sys/unix/syscall_openbsd_arm.go b/vendor/golang.org/x/sys/unix/syscall_openbsd_arm.go index 59844f504..5d812aaea 100644 --- a/vendor/golang.org/x/sys/unix/syscall_openbsd_arm.go +++ b/vendor/golang.org/x/sys/unix/syscall_openbsd_arm.go @@ -31,3 +31,7 @@ func (msghdr *Msghdr) SetControllen(length int) { func (cmsg *Cmsghdr) SetLen(length int) { cmsg.Len = uint32(length) } + +// SYS___SYSCTL is used by syscall_bsd.go for all BSDs, but in modern versions +// of openbsd/arm the syscall is called sysctl instead of __sysctl. +const SYS___SYSCTL = SYS_SYSCTL diff --git a/vendor/golang.org/x/sys/unix/syscall_unix.go b/vendor/golang.org/x/sys/unix/syscall_unix.go index 13956b795..64fcda4ae 100644 --- a/vendor/golang.org/x/sys/unix/syscall_unix.go +++ b/vendor/golang.org/x/sys/unix/syscall_unix.go @@ -22,10 +22,10 @@ var ( ) const ( - darwin64Bit = runtime.GOOS == "darwin" && sizeofPtr == 8 - dragonfly64Bit = runtime.GOOS == "dragonfly" && sizeofPtr == 8 - netbsd32Bit = runtime.GOOS == "netbsd" && sizeofPtr == 4 - solaris64Bit = runtime.GOOS == "solaris" && sizeofPtr == 8 + darwin64Bit = runtime.GOOS == "darwin" && SizeofPtr == 8 + dragonfly64Bit = runtime.GOOS == "dragonfly" && SizeofPtr == 8 + netbsd32Bit = runtime.GOOS == "netbsd" && SizeofPtr == 4 + solaris64Bit = runtime.GOOS == "solaris" && SizeofPtr == 8 ) // Do the interface allocations only once for common diff --git a/vendor/golang.org/x/sys/unix/syscall_unix_gc.go b/vendor/golang.org/x/sys/unix/syscall_unix_gc.go index 4cb8e8edf..1c70d1b69 100644 --- a/vendor/golang.org/x/sys/unix/syscall_unix_gc.go +++ b/vendor/golang.org/x/sys/unix/syscall_unix_gc.go @@ -3,7 +3,7 @@ // license that can be found in the LICENSE file. // +build darwin dragonfly freebsd linux netbsd openbsd solaris -// +build !gccgo +// +build !gccgo,!ppc64le,!ppc64 package unix diff --git a/vendor/golang.org/x/sys/unix/syscall_unix_gc_ppc64x.go b/vendor/golang.org/x/sys/unix/syscall_unix_gc_ppc64x.go new file mode 100644 index 000000000..86dc765ab --- /dev/null +++ b/vendor/golang.org/x/sys/unix/syscall_unix_gc_ppc64x.go @@ -0,0 +1,24 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build linux +// +build ppc64le ppc64 +// +build !gccgo + +package unix + +import "syscall" + +func Syscall(trap, a1, a2, a3 uintptr) (r1, r2 uintptr, err syscall.Errno) { + return syscall.Syscall(trap, a1, a2, a3) +} +func Syscall6(trap, a1, a2, a3, a4, a5, a6 uintptr) (r1, r2 uintptr, err syscall.Errno) { + return syscall.Syscall6(trap, a1, a2, a3, a4, a5, a6) +} +func RawSyscall(trap, a1, a2, a3 uintptr) (r1, r2 uintptr, err syscall.Errno) { + return syscall.RawSyscall(trap, a1, a2, a3) +} +func RawSyscall6(trap, a1, a2, a3, a4, a5, a6 uintptr) (r1, r2 uintptr, err syscall.Errno) { + return syscall.RawSyscall6(trap, a1, a2, a3, a4, a5, a6) +} diff --git a/vendor/golang.org/x/sys/unix/types_aix.go b/vendor/golang.org/x/sys/unix/types_aix.go index 18fbddd52..25e834940 100644 --- a/vendor/golang.org/x/sys/unix/types_aix.go +++ b/vendor/golang.org/x/sys/unix/types_aix.go @@ -59,14 +59,14 @@ struct sockaddr_any { */ import "C" -// Machine characteristics; for internal use. +// Machine characteristics const ( - sizeofPtr = C.sizeofPtr - sizeofShort = C.sizeof_short - sizeofInt = C.sizeof_int - sizeofLong = C.sizeof_long - sizeofLongLong = C.sizeof_longlong + SizeofPtr = C.sizeofPtr + SizeofShort = C.sizeof_short + SizeofInt = C.sizeof_int + SizeofLong = C.sizeof_long + SizeofLongLong = C.sizeof_longlong PathMax = C.PATH_MAX ) diff --git a/vendor/golang.org/x/sys/unix/types_darwin.go b/vendor/golang.org/x/sys/unix/types_darwin.go index 46b9908e0..9fd2aaa6a 100644 --- a/vendor/golang.org/x/sys/unix/types_darwin.go +++ b/vendor/golang.org/x/sys/unix/types_darwin.go @@ -70,14 +70,14 @@ struct sockaddr_any { */ import "C" -// Machine characteristics; for internal use. +// Machine characteristics const ( - sizeofPtr = C.sizeofPtr - sizeofShort = C.sizeof_short - sizeofInt = C.sizeof_int - sizeofLong = C.sizeof_long - sizeofLongLong = C.sizeof_longlong + SizeofPtr = C.sizeofPtr + SizeofShort = C.sizeof_short + SizeofInt = C.sizeof_int + SizeofLong = C.sizeof_long + SizeofLongLong = C.sizeof_longlong ) // Basic types diff --git a/vendor/golang.org/x/sys/unix/types_dragonfly.go b/vendor/golang.org/x/sys/unix/types_dragonfly.go index 386d5f89f..3365dd79d 100644 --- a/vendor/golang.org/x/sys/unix/types_dragonfly.go +++ b/vendor/golang.org/x/sys/unix/types_dragonfly.go @@ -65,14 +65,14 @@ struct sockaddr_any { */ import "C" -// Machine characteristics; for internal use. +// Machine characteristics const ( - sizeofPtr = C.sizeofPtr - sizeofShort = C.sizeof_short - sizeofInt = C.sizeof_int - sizeofLong = C.sizeof_long - sizeofLongLong = C.sizeof_longlong + SizeofPtr = C.sizeofPtr + SizeofShort = C.sizeof_short + SizeofInt = C.sizeof_int + SizeofLong = C.sizeof_long + SizeofLongLong = C.sizeof_longlong ) // Basic types diff --git a/vendor/golang.org/x/sys/unix/types_freebsd.go b/vendor/golang.org/x/sys/unix/types_freebsd.go index e84a892d6..747079895 100644 --- a/vendor/golang.org/x/sys/unix/types_freebsd.go +++ b/vendor/golang.org/x/sys/unix/types_freebsd.go @@ -14,7 +14,11 @@ Input to cgo -godefs. See README.md package unix /* -#define KERNEL +#define _WANT_FREEBSD11_STAT 1 +#define _WANT_FREEBSD11_STATFS 1 +#define _WANT_FREEBSD11_DIRENT 1 +#define _WANT_FREEBSD11_KEVENT 1 + #include #include #include @@ -22,7 +26,7 @@ package unix #include #include #include -#include +#include #include #include #include @@ -63,50 +67,6 @@ struct sockaddr_any { char pad[sizeof(union sockaddr_all) - sizeof(struct sockaddr)]; }; -// This structure is a duplicate of stat on FreeBSD 8-STABLE. -// See /usr/include/sys/stat.h. -struct stat8 { -#undef st_atimespec st_atim -#undef st_mtimespec st_mtim -#undef st_ctimespec st_ctim -#undef st_birthtimespec st_birthtim - __dev_t st_dev; - ino_t st_ino; - mode_t st_mode; - nlink_t st_nlink; - uid_t st_uid; - gid_t st_gid; - __dev_t st_rdev; -#if __BSD_VISIBLE - struct timespec st_atimespec; - struct timespec st_mtimespec; - struct timespec st_ctimespec; -#else - time_t st_atime; - long __st_atimensec; - time_t st_mtime; - long __st_mtimensec; - time_t st_ctime; - long __st_ctimensec; -#endif - off_t st_size; - blkcnt_t st_blocks; - blksize_t st_blksize; - fflags_t st_flags; - __uint32_t st_gen; - __int32_t st_lspare; -#if __BSD_VISIBLE - struct timespec st_birthtimespec; - unsigned int :(8 / 2) * (16 - (int)sizeof(struct timespec)); - unsigned int :(8 / 2) * (16 - (int)sizeof(struct timespec)); -#else - time_t st_birthtime; - long st_birthtimensec; - unsigned int :(8 / 2) * (16 - (int)sizeof(struct __timespec)); - unsigned int :(8 / 2) * (16 - (int)sizeof(struct __timespec)); -#endif -}; - // This structure is a duplicate of if_data on FreeBSD 8-STABLE. // See /usr/include/net/if.h. struct if_data8 { @@ -154,14 +114,14 @@ struct if_msghdr8 { */ import "C" -// Machine characteristics; for internal use. +// Machine characteristics const ( - sizeofPtr = C.sizeofPtr - sizeofShort = C.sizeof_short - sizeofInt = C.sizeof_int - sizeofLong = C.sizeof_long - sizeofLongLong = C.sizeof_longlong + SizeofPtr = C.sizeofPtr + SizeofShort = C.sizeof_short + SizeofInt = C.sizeof_int + SizeofLong = C.sizeof_long + SizeofLongLong = C.sizeof_longlong ) // Basic types @@ -189,14 +149,25 @@ type _Gid_t C.gid_t // Files -type Stat_t C.struct_stat8 +const ( + _statfsVersion = C.STATFS_VERSION + _dirblksiz = C.DIRBLKSIZ +) + +type Stat_t C.struct_stat + +type stat_freebsd11_t C.struct_freebsd11_stat type Statfs_t C.struct_statfs +type statfs_freebsd11_t C.struct_freebsd11_statfs + type Flock_t C.struct_flock type Dirent C.struct_dirent +type dirent_freebsd11 C.struct_freebsd11_dirent + type Fsid C.struct_fsid // File system limits @@ -279,7 +250,7 @@ const ( // Events (kqueue, kevent) -type Kevent_t C.struct_kevent +type Kevent_t C.struct_kevent_freebsd11 // Select diff --git a/vendor/golang.org/x/sys/unix/types_netbsd.go b/vendor/golang.org/x/sys/unix/types_netbsd.go index c49621ce5..2dd4f9542 100644 --- a/vendor/golang.org/x/sys/unix/types_netbsd.go +++ b/vendor/golang.org/x/sys/unix/types_netbsd.go @@ -67,14 +67,14 @@ struct sockaddr_any { */ import "C" -// Machine characteristics; for internal use. +// Machine characteristics const ( - sizeofPtr = C.sizeofPtr - sizeofShort = C.sizeof_short - sizeofInt = C.sizeof_int - sizeofLong = C.sizeof_long - sizeofLongLong = C.sizeof_longlong + SizeofPtr = C.sizeofPtr + SizeofShort = C.sizeof_short + SizeofInt = C.sizeof_int + SizeofLong = C.sizeof_long + SizeofLongLong = C.sizeof_longlong ) // Basic types @@ -248,6 +248,8 @@ type Termios C.struct_termios type Winsize C.struct_winsize +type Ptmget C.struct_ptmget + // fchmodat-like syscalls. const ( diff --git a/vendor/golang.org/x/sys/unix/types_openbsd.go b/vendor/golang.org/x/sys/unix/types_openbsd.go index 8f2fe704c..4e5e57f9a 100644 --- a/vendor/golang.org/x/sys/unix/types_openbsd.go +++ b/vendor/golang.org/x/sys/unix/types_openbsd.go @@ -38,6 +38,7 @@ package unix #include #include #include +#include #include #include #include @@ -66,14 +67,14 @@ struct sockaddr_any { */ import "C" -// Machine characteristics; for internal use. +// Machine characteristics const ( - sizeofPtr = C.sizeofPtr - sizeofShort = C.sizeof_short - sizeofInt = C.sizeof_int - sizeofLong = C.sizeof_long - sizeofLongLong = C.sizeof_longlong + SizeofPtr = C.sizeofPtr + SizeofShort = C.sizeof_short + SizeofInt = C.sizeof_int + SizeofLong = C.sizeof_long + SizeofLongLong = C.sizeof_longlong ) // Basic types @@ -260,6 +261,16 @@ const ( POLLWRNORM = C.POLLWRNORM ) +// Signal Sets + +type Sigset_t C.sigset_t + // Uname type Utsname C.struct_utsname + +// Uvmexp + +const SizeofUvmexp = C.sizeof_struct_uvmexp + +type Uvmexp C.struct_uvmexp diff --git a/vendor/golang.org/x/sys/unix/types_solaris.go b/vendor/golang.org/x/sys/unix/types_solaris.go index 8cef71bd4..2b716f934 100644 --- a/vendor/golang.org/x/sys/unix/types_solaris.go +++ b/vendor/golang.org/x/sys/unix/types_solaris.go @@ -75,14 +75,14 @@ struct sockaddr_any { */ import "C" -// Machine characteristics; for internal use. +// Machine characteristics const ( - sizeofPtr = C.sizeofPtr - sizeofShort = C.sizeof_short - sizeofInt = C.sizeof_int - sizeofLong = C.sizeof_long - sizeofLongLong = C.sizeof_longlong + SizeofPtr = C.sizeofPtr + SizeofShort = C.sizeof_short + SizeofInt = C.sizeof_int + SizeofLong = C.sizeof_long + SizeofLongLong = C.sizeof_longlong PathMax = C.PATH_MAX MaxHostNameLen = C.MAXHOSTNAMELEN ) diff --git a/vendor/golang.org/x/sys/unix/xattr_bsd.go b/vendor/golang.org/x/sys/unix/xattr_bsd.go index 930499324..30c1d71f4 100644 --- a/vendor/golang.org/x/sys/unix/xattr_bsd.go +++ b/vendor/golang.org/x/sys/unix/xattr_bsd.go @@ -81,7 +81,10 @@ func Lgetxattr(link string, attr string, dest []byte) (sz int, err error) { // flags are unused on FreeBSD func Fsetxattr(fd int, attr string, data []byte, flags int) (err error) { - d := unsafe.Pointer(&data[0]) + var d unsafe.Pointer + if len(data) > 0 { + d = unsafe.Pointer(&data[0]) + } datasiz := len(data) nsid, a, err := xattrnamespace(attr) @@ -94,7 +97,10 @@ func Fsetxattr(fd int, attr string, data []byte, flags int) (err error) { } func Setxattr(file string, attr string, data []byte, flags int) (err error) { - d := unsafe.Pointer(&data[0]) + var d unsafe.Pointer + if len(data) > 0 { + d = unsafe.Pointer(&data[0]) + } datasiz := len(data) nsid, a, err := xattrnamespace(attr) @@ -107,7 +113,10 @@ func Setxattr(file string, attr string, data []byte, flags int) (err error) { } func Lsetxattr(link string, attr string, data []byte, flags int) (err error) { - d := unsafe.Pointer(&data[0]) + var d unsafe.Pointer + if len(data) > 0 { + d = unsafe.Pointer(&data[0]) + } datasiz := len(data) nsid, a, err := xattrnamespace(attr) diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_386.go b/vendor/golang.org/x/sys/unix/zerrors_linux_386.go index fe564160b..db3c31ef8 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_386.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_386.go @@ -879,6 +879,26 @@ const ( IXOFF = 0x1000 IXON = 0x400 JFFS2_SUPER_MAGIC = 0x72b6 + KEXEC_ARCH_386 = 0x30000 + KEXEC_ARCH_68K = 0x40000 + KEXEC_ARCH_AARCH64 = 0xb70000 + KEXEC_ARCH_ARM = 0x280000 + KEXEC_ARCH_DEFAULT = 0x0 + KEXEC_ARCH_IA_64 = 0x320000 + KEXEC_ARCH_MASK = 0xffff0000 + KEXEC_ARCH_MIPS = 0x80000 + KEXEC_ARCH_MIPS_LE = 0xa0000 + KEXEC_ARCH_PPC = 0x140000 + KEXEC_ARCH_PPC64 = 0x150000 + KEXEC_ARCH_S390 = 0x160000 + KEXEC_ARCH_SH = 0x2a0000 + KEXEC_ARCH_X86_64 = 0x3e0000 + KEXEC_FILE_NO_INITRAMFS = 0x4 + KEXEC_FILE_ON_CRASH = 0x2 + KEXEC_FILE_UNLOAD = 0x1 + KEXEC_ON_CRASH = 0x1 + KEXEC_PRESERVE_CONTEXT = 0x2 + KEXEC_SEGMENT_MAX = 0x10 KEYCTL_ASSUME_AUTHORITY = 0x10 KEYCTL_CHOWN = 0x4 KEYCTL_CLEAR = 0x7 @@ -988,7 +1008,9 @@ const ( MFD_HUGE_256MB = 0x70000000 MFD_HUGE_2GB = 0x7c000000 MFD_HUGE_2MB = 0x54000000 + MFD_HUGE_32MB = 0x64000000 MFD_HUGE_512KB = 0x4c000000 + MFD_HUGE_512MB = 0x74000000 MFD_HUGE_64KB = 0x40000000 MFD_HUGE_8MB = 0x5c000000 MFD_HUGE_MASK = 0x3f @@ -1001,6 +1023,8 @@ const ( MNT_DETACH = 0x2 MNT_EXPIRE = 0x4 MNT_FORCE = 0x1 + MODULE_INIT_IGNORE_MODVERSIONS = 0x1 + MODULE_INIT_IGNORE_VERMAGIC = 0x2 MSDOS_SUPER_MAGIC = 0x4d44 MSG_BATCH = 0x40000 MSG_CMSG_CLOEXEC = 0x40000000 @@ -1269,6 +1293,36 @@ const ( PERF_EVENT_IOC_SET_FILTER = 0x40042406 PERF_EVENT_IOC_SET_OUTPUT = 0x2405 PIPEFS_MAGIC = 0x50495045 + PPPIOCATTACH = 0x4004743d + PPPIOCATTCHAN = 0x40047438 + PPPIOCCONNECT = 0x4004743a + PPPIOCDETACH = 0x4004743c + PPPIOCDISCONN = 0x7439 + PPPIOCGASYNCMAP = 0x80047458 + PPPIOCGCHAN = 0x80047437 + PPPIOCGDEBUG = 0x80047441 + PPPIOCGFLAGS = 0x8004745a + PPPIOCGIDLE = 0x8008743f + PPPIOCGL2TPSTATS = 0x80487436 + PPPIOCGMRU = 0x80047453 + PPPIOCGNPMODE = 0xc008744c + PPPIOCGRASYNCMAP = 0x80047455 + PPPIOCGUNIT = 0x80047456 + PPPIOCGXASYNCMAP = 0x80207450 + PPPIOCNEWUNIT = 0xc004743e + PPPIOCSACTIVE = 0x40087446 + PPPIOCSASYNCMAP = 0x40047457 + PPPIOCSCOMPRESS = 0x400c744d + PPPIOCSDEBUG = 0x40047440 + PPPIOCSFLAGS = 0x40047459 + PPPIOCSMAXCID = 0x40047451 + PPPIOCSMRRU = 0x4004743b + PPPIOCSMRU = 0x40047452 + PPPIOCSNPMODE = 0x4008744b + PPPIOCSPASS = 0x40087447 + PPPIOCSRASYNCMAP = 0x40047454 + PPPIOCSXASYNCMAP = 0x4020744f + PPPIOCXFERUNIT = 0x744e PRIO_PGRP = 0x1 PRIO_PROCESS = 0x0 PRIO_USER = 0x2 @@ -1564,6 +1618,7 @@ const ( RTM_DELACTION = 0x31 RTM_DELADDR = 0x15 RTM_DELADDRLABEL = 0x49 + RTM_DELCHAIN = 0x65 RTM_DELLINK = 0x11 RTM_DELMDB = 0x55 RTM_DELNEIGH = 0x1d @@ -1584,6 +1639,7 @@ const ( RTM_GETADDR = 0x16 RTM_GETADDRLABEL = 0x4a RTM_GETANYCAST = 0x3e + RTM_GETCHAIN = 0x66 RTM_GETDCB = 0x4e RTM_GETLINK = 0x12 RTM_GETMDB = 0x56 @@ -1598,11 +1654,12 @@ const ( RTM_GETSTATS = 0x5e RTM_GETTCLASS = 0x2a RTM_GETTFILTER = 0x2e - RTM_MAX = 0x63 + RTM_MAX = 0x67 RTM_NEWACTION = 0x30 RTM_NEWADDR = 0x14 RTM_NEWADDRLABEL = 0x48 RTM_NEWCACHEREPORT = 0x60 + RTM_NEWCHAIN = 0x64 RTM_NEWLINK = 0x10 RTM_NEWMDB = 0x54 RTM_NEWNDUSEROPT = 0x44 @@ -1617,8 +1674,8 @@ const ( RTM_NEWSTATS = 0x5c RTM_NEWTCLASS = 0x28 RTM_NEWTFILTER = 0x2c - RTM_NR_FAMILIES = 0x15 - RTM_NR_MSGTYPES = 0x54 + RTM_NR_FAMILIES = 0x16 + RTM_NR_MSGTYPES = 0x58 RTM_SETDCB = 0x4f RTM_SETLINK = 0x13 RTM_SETNEIGHTBL = 0x43 @@ -1667,7 +1724,9 @@ const ( SCM_TIMESTAMPING_OPT_STATS = 0x36 SCM_TIMESTAMPING_PKTINFO = 0x3a SCM_TIMESTAMPNS = 0x23 + SCM_TXTIME = 0x3d SCM_WIFI_STATUS = 0x29 + SC_LOG_FLUSH = 0x100000 SECCOMP_MODE_DISABLED = 0x0 SECCOMP_MODE_FILTER = 0x2 SECCOMP_MODE_STRICT = 0x1 @@ -1723,6 +1782,9 @@ const ( SIOCGMIIPHY = 0x8947 SIOCGMIIREG = 0x8948 SIOCGPGRP = 0x8904 + SIOCGPPPCSTATS = 0x89f2 + SIOCGPPPSTATS = 0x89f0 + SIOCGPPPVER = 0x89f1 SIOCGRARP = 0x8961 SIOCGSKNS = 0x894c SIOCGSTAMP = 0x8906 @@ -1871,6 +1933,7 @@ const ( SO_TIMESTAMP = 0x1d SO_TIMESTAMPING = 0x25 SO_TIMESTAMPNS = 0x23 + SO_TXTIME = 0x3d SO_TYPE = 0x3 SO_VM_SOCKETS_BUFFER_MAX_SIZE = 0x2 SO_VM_SOCKETS_BUFFER_MIN_SIZE = 0x1 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go index dcfa66749..4785835b6 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go @@ -879,6 +879,26 @@ const ( IXOFF = 0x1000 IXON = 0x400 JFFS2_SUPER_MAGIC = 0x72b6 + KEXEC_ARCH_386 = 0x30000 + KEXEC_ARCH_68K = 0x40000 + KEXEC_ARCH_AARCH64 = 0xb70000 + KEXEC_ARCH_ARM = 0x280000 + KEXEC_ARCH_DEFAULT = 0x0 + KEXEC_ARCH_IA_64 = 0x320000 + KEXEC_ARCH_MASK = 0xffff0000 + KEXEC_ARCH_MIPS = 0x80000 + KEXEC_ARCH_MIPS_LE = 0xa0000 + KEXEC_ARCH_PPC = 0x140000 + KEXEC_ARCH_PPC64 = 0x150000 + KEXEC_ARCH_S390 = 0x160000 + KEXEC_ARCH_SH = 0x2a0000 + KEXEC_ARCH_X86_64 = 0x3e0000 + KEXEC_FILE_NO_INITRAMFS = 0x4 + KEXEC_FILE_ON_CRASH = 0x2 + KEXEC_FILE_UNLOAD = 0x1 + KEXEC_ON_CRASH = 0x1 + KEXEC_PRESERVE_CONTEXT = 0x2 + KEXEC_SEGMENT_MAX = 0x10 KEYCTL_ASSUME_AUTHORITY = 0x10 KEYCTL_CHOWN = 0x4 KEYCTL_CLEAR = 0x7 @@ -988,7 +1008,9 @@ const ( MFD_HUGE_256MB = 0x70000000 MFD_HUGE_2GB = 0x7c000000 MFD_HUGE_2MB = 0x54000000 + MFD_HUGE_32MB = 0x64000000 MFD_HUGE_512KB = 0x4c000000 + MFD_HUGE_512MB = 0x74000000 MFD_HUGE_64KB = 0x40000000 MFD_HUGE_8MB = 0x5c000000 MFD_HUGE_MASK = 0x3f @@ -1001,6 +1023,8 @@ const ( MNT_DETACH = 0x2 MNT_EXPIRE = 0x4 MNT_FORCE = 0x1 + MODULE_INIT_IGNORE_MODVERSIONS = 0x1 + MODULE_INIT_IGNORE_VERMAGIC = 0x2 MSDOS_SUPER_MAGIC = 0x4d44 MSG_BATCH = 0x40000 MSG_CMSG_CLOEXEC = 0x40000000 @@ -1269,6 +1293,36 @@ const ( PERF_EVENT_IOC_SET_FILTER = 0x40082406 PERF_EVENT_IOC_SET_OUTPUT = 0x2405 PIPEFS_MAGIC = 0x50495045 + PPPIOCATTACH = 0x4004743d + PPPIOCATTCHAN = 0x40047438 + PPPIOCCONNECT = 0x4004743a + PPPIOCDETACH = 0x4004743c + PPPIOCDISCONN = 0x7439 + PPPIOCGASYNCMAP = 0x80047458 + PPPIOCGCHAN = 0x80047437 + PPPIOCGDEBUG = 0x80047441 + PPPIOCGFLAGS = 0x8004745a + PPPIOCGIDLE = 0x8010743f + PPPIOCGL2TPSTATS = 0x80487436 + PPPIOCGMRU = 0x80047453 + PPPIOCGNPMODE = 0xc008744c + PPPIOCGRASYNCMAP = 0x80047455 + PPPIOCGUNIT = 0x80047456 + PPPIOCGXASYNCMAP = 0x80207450 + PPPIOCNEWUNIT = 0xc004743e + PPPIOCSACTIVE = 0x40107446 + PPPIOCSASYNCMAP = 0x40047457 + PPPIOCSCOMPRESS = 0x4010744d + PPPIOCSDEBUG = 0x40047440 + PPPIOCSFLAGS = 0x40047459 + PPPIOCSMAXCID = 0x40047451 + PPPIOCSMRRU = 0x4004743b + PPPIOCSMRU = 0x40047452 + PPPIOCSNPMODE = 0x4008744b + PPPIOCSPASS = 0x40107447 + PPPIOCSRASYNCMAP = 0x40047454 + PPPIOCSXASYNCMAP = 0x4020744f + PPPIOCXFERUNIT = 0x744e PRIO_PGRP = 0x1 PRIO_PROCESS = 0x0 PRIO_USER = 0x2 @@ -1565,6 +1619,7 @@ const ( RTM_DELACTION = 0x31 RTM_DELADDR = 0x15 RTM_DELADDRLABEL = 0x49 + RTM_DELCHAIN = 0x65 RTM_DELLINK = 0x11 RTM_DELMDB = 0x55 RTM_DELNEIGH = 0x1d @@ -1585,6 +1640,7 @@ const ( RTM_GETADDR = 0x16 RTM_GETADDRLABEL = 0x4a RTM_GETANYCAST = 0x3e + RTM_GETCHAIN = 0x66 RTM_GETDCB = 0x4e RTM_GETLINK = 0x12 RTM_GETMDB = 0x56 @@ -1599,11 +1655,12 @@ const ( RTM_GETSTATS = 0x5e RTM_GETTCLASS = 0x2a RTM_GETTFILTER = 0x2e - RTM_MAX = 0x63 + RTM_MAX = 0x67 RTM_NEWACTION = 0x30 RTM_NEWADDR = 0x14 RTM_NEWADDRLABEL = 0x48 RTM_NEWCACHEREPORT = 0x60 + RTM_NEWCHAIN = 0x64 RTM_NEWLINK = 0x10 RTM_NEWMDB = 0x54 RTM_NEWNDUSEROPT = 0x44 @@ -1618,8 +1675,8 @@ const ( RTM_NEWSTATS = 0x5c RTM_NEWTCLASS = 0x28 RTM_NEWTFILTER = 0x2c - RTM_NR_FAMILIES = 0x15 - RTM_NR_MSGTYPES = 0x54 + RTM_NR_FAMILIES = 0x16 + RTM_NR_MSGTYPES = 0x58 RTM_SETDCB = 0x4f RTM_SETLINK = 0x13 RTM_SETNEIGHTBL = 0x43 @@ -1668,7 +1725,9 @@ const ( SCM_TIMESTAMPING_OPT_STATS = 0x36 SCM_TIMESTAMPING_PKTINFO = 0x3a SCM_TIMESTAMPNS = 0x23 + SCM_TXTIME = 0x3d SCM_WIFI_STATUS = 0x29 + SC_LOG_FLUSH = 0x100000 SECCOMP_MODE_DISABLED = 0x0 SECCOMP_MODE_FILTER = 0x2 SECCOMP_MODE_STRICT = 0x1 @@ -1724,6 +1783,9 @@ const ( SIOCGMIIPHY = 0x8947 SIOCGMIIREG = 0x8948 SIOCGPGRP = 0x8904 + SIOCGPPPCSTATS = 0x89f2 + SIOCGPPPSTATS = 0x89f0 + SIOCGPPPVER = 0x89f1 SIOCGRARP = 0x8961 SIOCGSKNS = 0x894c SIOCGSTAMP = 0x8906 @@ -1872,6 +1934,7 @@ const ( SO_TIMESTAMP = 0x1d SO_TIMESTAMPING = 0x25 SO_TIMESTAMPNS = 0x23 + SO_TXTIME = 0x3d SO_TYPE = 0x3 SO_VM_SOCKETS_BUFFER_MAX_SIZE = 0x2 SO_VM_SOCKETS_BUFFER_MIN_SIZE = 0x1 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go b/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go index c2ef50c65..5e902423a 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go @@ -878,6 +878,26 @@ const ( IXOFF = 0x1000 IXON = 0x400 JFFS2_SUPER_MAGIC = 0x72b6 + KEXEC_ARCH_386 = 0x30000 + KEXEC_ARCH_68K = 0x40000 + KEXEC_ARCH_AARCH64 = 0xb70000 + KEXEC_ARCH_ARM = 0x280000 + KEXEC_ARCH_DEFAULT = 0x0 + KEXEC_ARCH_IA_64 = 0x320000 + KEXEC_ARCH_MASK = 0xffff0000 + KEXEC_ARCH_MIPS = 0x80000 + KEXEC_ARCH_MIPS_LE = 0xa0000 + KEXEC_ARCH_PPC = 0x140000 + KEXEC_ARCH_PPC64 = 0x150000 + KEXEC_ARCH_S390 = 0x160000 + KEXEC_ARCH_SH = 0x2a0000 + KEXEC_ARCH_X86_64 = 0x3e0000 + KEXEC_FILE_NO_INITRAMFS = 0x4 + KEXEC_FILE_ON_CRASH = 0x2 + KEXEC_FILE_UNLOAD = 0x1 + KEXEC_ON_CRASH = 0x1 + KEXEC_PRESERVE_CONTEXT = 0x2 + KEXEC_SEGMENT_MAX = 0x10 KEYCTL_ASSUME_AUTHORITY = 0x10 KEYCTL_CHOWN = 0x4 KEYCTL_CLEAR = 0x7 @@ -986,7 +1006,9 @@ const ( MFD_HUGE_256MB = 0x70000000 MFD_HUGE_2GB = 0x7c000000 MFD_HUGE_2MB = 0x54000000 + MFD_HUGE_32MB = 0x64000000 MFD_HUGE_512KB = 0x4c000000 + MFD_HUGE_512MB = 0x74000000 MFD_HUGE_64KB = 0x40000000 MFD_HUGE_8MB = 0x5c000000 MFD_HUGE_MASK = 0x3f @@ -999,6 +1021,8 @@ const ( MNT_DETACH = 0x2 MNT_EXPIRE = 0x4 MNT_FORCE = 0x1 + MODULE_INIT_IGNORE_MODVERSIONS = 0x1 + MODULE_INIT_IGNORE_VERMAGIC = 0x2 MSDOS_SUPER_MAGIC = 0x4d44 MSG_BATCH = 0x40000 MSG_CMSG_CLOEXEC = 0x40000000 @@ -1267,6 +1291,36 @@ const ( PERF_EVENT_IOC_SET_FILTER = 0x40042406 PERF_EVENT_IOC_SET_OUTPUT = 0x2405 PIPEFS_MAGIC = 0x50495045 + PPPIOCATTACH = 0x4004743d + PPPIOCATTCHAN = 0x40047438 + PPPIOCCONNECT = 0x4004743a + PPPIOCDETACH = 0x4004743c + PPPIOCDISCONN = 0x7439 + PPPIOCGASYNCMAP = 0x80047458 + PPPIOCGCHAN = 0x80047437 + PPPIOCGDEBUG = 0x80047441 + PPPIOCGFLAGS = 0x8004745a + PPPIOCGIDLE = 0x8008743f + PPPIOCGL2TPSTATS = 0x80487436 + PPPIOCGMRU = 0x80047453 + PPPIOCGNPMODE = 0xc008744c + PPPIOCGRASYNCMAP = 0x80047455 + PPPIOCGUNIT = 0x80047456 + PPPIOCGXASYNCMAP = 0x80207450 + PPPIOCNEWUNIT = 0xc004743e + PPPIOCSACTIVE = 0x40087446 + PPPIOCSASYNCMAP = 0x40047457 + PPPIOCSCOMPRESS = 0x400c744d + PPPIOCSDEBUG = 0x40047440 + PPPIOCSFLAGS = 0x40047459 + PPPIOCSMAXCID = 0x40047451 + PPPIOCSMRRU = 0x4004743b + PPPIOCSMRU = 0x40047452 + PPPIOCSNPMODE = 0x4008744b + PPPIOCSPASS = 0x40087447 + PPPIOCSRASYNCMAP = 0x40047454 + PPPIOCSXASYNCMAP = 0x4020744f + PPPIOCXFERUNIT = 0x744e PRIO_PGRP = 0x1 PRIO_PROCESS = 0x0 PRIO_USER = 0x2 @@ -1571,6 +1625,7 @@ const ( RTM_DELACTION = 0x31 RTM_DELADDR = 0x15 RTM_DELADDRLABEL = 0x49 + RTM_DELCHAIN = 0x65 RTM_DELLINK = 0x11 RTM_DELMDB = 0x55 RTM_DELNEIGH = 0x1d @@ -1591,6 +1646,7 @@ const ( RTM_GETADDR = 0x16 RTM_GETADDRLABEL = 0x4a RTM_GETANYCAST = 0x3e + RTM_GETCHAIN = 0x66 RTM_GETDCB = 0x4e RTM_GETLINK = 0x12 RTM_GETMDB = 0x56 @@ -1605,11 +1661,12 @@ const ( RTM_GETSTATS = 0x5e RTM_GETTCLASS = 0x2a RTM_GETTFILTER = 0x2e - RTM_MAX = 0x63 + RTM_MAX = 0x67 RTM_NEWACTION = 0x30 RTM_NEWADDR = 0x14 RTM_NEWADDRLABEL = 0x48 RTM_NEWCACHEREPORT = 0x60 + RTM_NEWCHAIN = 0x64 RTM_NEWLINK = 0x10 RTM_NEWMDB = 0x54 RTM_NEWNDUSEROPT = 0x44 @@ -1624,8 +1681,8 @@ const ( RTM_NEWSTATS = 0x5c RTM_NEWTCLASS = 0x28 RTM_NEWTFILTER = 0x2c - RTM_NR_FAMILIES = 0x15 - RTM_NR_MSGTYPES = 0x54 + RTM_NR_FAMILIES = 0x16 + RTM_NR_MSGTYPES = 0x58 RTM_SETDCB = 0x4f RTM_SETLINK = 0x13 RTM_SETNEIGHTBL = 0x43 @@ -1674,7 +1731,9 @@ const ( SCM_TIMESTAMPING_OPT_STATS = 0x36 SCM_TIMESTAMPING_PKTINFO = 0x3a SCM_TIMESTAMPNS = 0x23 + SCM_TXTIME = 0x3d SCM_WIFI_STATUS = 0x29 + SC_LOG_FLUSH = 0x100000 SECCOMP_MODE_DISABLED = 0x0 SECCOMP_MODE_FILTER = 0x2 SECCOMP_MODE_STRICT = 0x1 @@ -1730,6 +1789,9 @@ const ( SIOCGMIIPHY = 0x8947 SIOCGMIIREG = 0x8948 SIOCGPGRP = 0x8904 + SIOCGPPPCSTATS = 0x89f2 + SIOCGPPPSTATS = 0x89f0 + SIOCGPPPVER = 0x89f1 SIOCGRARP = 0x8961 SIOCGSKNS = 0x894c SIOCGSTAMP = 0x8906 @@ -1878,6 +1940,7 @@ const ( SO_TIMESTAMP = 0x1d SO_TIMESTAMPING = 0x25 SO_TIMESTAMPNS = 0x23 + SO_TXTIME = 0x3d SO_TYPE = 0x3 SO_VM_SOCKETS_BUFFER_MAX_SIZE = 0x2 SO_VM_SOCKETS_BUFFER_MIN_SIZE = 0x1 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go index 1a820d668..ebe9d8b41 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go @@ -881,6 +881,26 @@ const ( IXOFF = 0x1000 IXON = 0x400 JFFS2_SUPER_MAGIC = 0x72b6 + KEXEC_ARCH_386 = 0x30000 + KEXEC_ARCH_68K = 0x40000 + KEXEC_ARCH_AARCH64 = 0xb70000 + KEXEC_ARCH_ARM = 0x280000 + KEXEC_ARCH_DEFAULT = 0x0 + KEXEC_ARCH_IA_64 = 0x320000 + KEXEC_ARCH_MASK = 0xffff0000 + KEXEC_ARCH_MIPS = 0x80000 + KEXEC_ARCH_MIPS_LE = 0xa0000 + KEXEC_ARCH_PPC = 0x140000 + KEXEC_ARCH_PPC64 = 0x150000 + KEXEC_ARCH_S390 = 0x160000 + KEXEC_ARCH_SH = 0x2a0000 + KEXEC_ARCH_X86_64 = 0x3e0000 + KEXEC_FILE_NO_INITRAMFS = 0x4 + KEXEC_FILE_ON_CRASH = 0x2 + KEXEC_FILE_UNLOAD = 0x1 + KEXEC_ON_CRASH = 0x1 + KEXEC_PRESERVE_CONTEXT = 0x2 + KEXEC_SEGMENT_MAX = 0x10 KEYCTL_ASSUME_AUTHORITY = 0x10 KEYCTL_CHOWN = 0x4 KEYCTL_CLEAR = 0x7 @@ -989,7 +1009,9 @@ const ( MFD_HUGE_256MB = 0x70000000 MFD_HUGE_2GB = 0x7c000000 MFD_HUGE_2MB = 0x54000000 + MFD_HUGE_32MB = 0x64000000 MFD_HUGE_512KB = 0x4c000000 + MFD_HUGE_512MB = 0x74000000 MFD_HUGE_64KB = 0x40000000 MFD_HUGE_8MB = 0x5c000000 MFD_HUGE_MASK = 0x3f @@ -1002,6 +1024,8 @@ const ( MNT_DETACH = 0x2 MNT_EXPIRE = 0x4 MNT_FORCE = 0x1 + MODULE_INIT_IGNORE_MODVERSIONS = 0x1 + MODULE_INIT_IGNORE_VERMAGIC = 0x2 MSDOS_SUPER_MAGIC = 0x4d44 MSG_BATCH = 0x40000 MSG_CMSG_CLOEXEC = 0x40000000 @@ -1270,6 +1294,36 @@ const ( PERF_EVENT_IOC_SET_FILTER = 0x40082406 PERF_EVENT_IOC_SET_OUTPUT = 0x2405 PIPEFS_MAGIC = 0x50495045 + PPPIOCATTACH = 0x4004743d + PPPIOCATTCHAN = 0x40047438 + PPPIOCCONNECT = 0x4004743a + PPPIOCDETACH = 0x4004743c + PPPIOCDISCONN = 0x7439 + PPPIOCGASYNCMAP = 0x80047458 + PPPIOCGCHAN = 0x80047437 + PPPIOCGDEBUG = 0x80047441 + PPPIOCGFLAGS = 0x8004745a + PPPIOCGIDLE = 0x8010743f + PPPIOCGL2TPSTATS = 0x80487436 + PPPIOCGMRU = 0x80047453 + PPPIOCGNPMODE = 0xc008744c + PPPIOCGRASYNCMAP = 0x80047455 + PPPIOCGUNIT = 0x80047456 + PPPIOCGXASYNCMAP = 0x80207450 + PPPIOCNEWUNIT = 0xc004743e + PPPIOCSACTIVE = 0x40107446 + PPPIOCSASYNCMAP = 0x40047457 + PPPIOCSCOMPRESS = 0x4010744d + PPPIOCSDEBUG = 0x40047440 + PPPIOCSFLAGS = 0x40047459 + PPPIOCSMAXCID = 0x40047451 + PPPIOCSMRRU = 0x4004743b + PPPIOCSMRU = 0x40047452 + PPPIOCSNPMODE = 0x4008744b + PPPIOCSPASS = 0x40107447 + PPPIOCSRASYNCMAP = 0x40047454 + PPPIOCSXASYNCMAP = 0x4020744f + PPPIOCXFERUNIT = 0x744e PRIO_PGRP = 0x1 PRIO_PROCESS = 0x0 PRIO_USER = 0x2 @@ -1555,6 +1609,7 @@ const ( RTM_DELACTION = 0x31 RTM_DELADDR = 0x15 RTM_DELADDRLABEL = 0x49 + RTM_DELCHAIN = 0x65 RTM_DELLINK = 0x11 RTM_DELMDB = 0x55 RTM_DELNEIGH = 0x1d @@ -1575,6 +1630,7 @@ const ( RTM_GETADDR = 0x16 RTM_GETADDRLABEL = 0x4a RTM_GETANYCAST = 0x3e + RTM_GETCHAIN = 0x66 RTM_GETDCB = 0x4e RTM_GETLINK = 0x12 RTM_GETMDB = 0x56 @@ -1589,11 +1645,12 @@ const ( RTM_GETSTATS = 0x5e RTM_GETTCLASS = 0x2a RTM_GETTFILTER = 0x2e - RTM_MAX = 0x63 + RTM_MAX = 0x67 RTM_NEWACTION = 0x30 RTM_NEWADDR = 0x14 RTM_NEWADDRLABEL = 0x48 RTM_NEWCACHEREPORT = 0x60 + RTM_NEWCHAIN = 0x64 RTM_NEWLINK = 0x10 RTM_NEWMDB = 0x54 RTM_NEWNDUSEROPT = 0x44 @@ -1608,8 +1665,8 @@ const ( RTM_NEWSTATS = 0x5c RTM_NEWTCLASS = 0x28 RTM_NEWTFILTER = 0x2c - RTM_NR_FAMILIES = 0x15 - RTM_NR_MSGTYPES = 0x54 + RTM_NR_FAMILIES = 0x16 + RTM_NR_MSGTYPES = 0x58 RTM_SETDCB = 0x4f RTM_SETLINK = 0x13 RTM_SETNEIGHTBL = 0x43 @@ -1658,7 +1715,9 @@ const ( SCM_TIMESTAMPING_OPT_STATS = 0x36 SCM_TIMESTAMPING_PKTINFO = 0x3a SCM_TIMESTAMPNS = 0x23 + SCM_TXTIME = 0x3d SCM_WIFI_STATUS = 0x29 + SC_LOG_FLUSH = 0x100000 SECCOMP_MODE_DISABLED = 0x0 SECCOMP_MODE_FILTER = 0x2 SECCOMP_MODE_STRICT = 0x1 @@ -1714,6 +1773,9 @@ const ( SIOCGMIIPHY = 0x8947 SIOCGMIIREG = 0x8948 SIOCGPGRP = 0x8904 + SIOCGPPPCSTATS = 0x89f2 + SIOCGPPPSTATS = 0x89f0 + SIOCGPPPVER = 0x89f1 SIOCGRARP = 0x8961 SIOCGSKNS = 0x894c SIOCGSTAMP = 0x8906 @@ -1862,6 +1924,7 @@ const ( SO_TIMESTAMP = 0x1d SO_TIMESTAMPING = 0x25 SO_TIMESTAMPNS = 0x23 + SO_TXTIME = 0x3d SO_TYPE = 0x3 SO_VM_SOCKETS_BUFFER_MAX_SIZE = 0x2 SO_VM_SOCKETS_BUFFER_MIN_SIZE = 0x1 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go index b515b2a63..d467d211b 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go @@ -878,6 +878,26 @@ const ( IXOFF = 0x1000 IXON = 0x400 JFFS2_SUPER_MAGIC = 0x72b6 + KEXEC_ARCH_386 = 0x30000 + KEXEC_ARCH_68K = 0x40000 + KEXEC_ARCH_AARCH64 = 0xb70000 + KEXEC_ARCH_ARM = 0x280000 + KEXEC_ARCH_DEFAULT = 0x0 + KEXEC_ARCH_IA_64 = 0x320000 + KEXEC_ARCH_MASK = 0xffff0000 + KEXEC_ARCH_MIPS = 0x80000 + KEXEC_ARCH_MIPS_LE = 0xa0000 + KEXEC_ARCH_PPC = 0x140000 + KEXEC_ARCH_PPC64 = 0x150000 + KEXEC_ARCH_S390 = 0x160000 + KEXEC_ARCH_SH = 0x2a0000 + KEXEC_ARCH_X86_64 = 0x3e0000 + KEXEC_FILE_NO_INITRAMFS = 0x4 + KEXEC_FILE_ON_CRASH = 0x2 + KEXEC_FILE_UNLOAD = 0x1 + KEXEC_ON_CRASH = 0x1 + KEXEC_PRESERVE_CONTEXT = 0x2 + KEXEC_SEGMENT_MAX = 0x10 KEYCTL_ASSUME_AUTHORITY = 0x10 KEYCTL_CHOWN = 0x4 KEYCTL_CLEAR = 0x7 @@ -986,7 +1006,9 @@ const ( MFD_HUGE_256MB = 0x70000000 MFD_HUGE_2GB = 0x7c000000 MFD_HUGE_2MB = 0x54000000 + MFD_HUGE_32MB = 0x64000000 MFD_HUGE_512KB = 0x4c000000 + MFD_HUGE_512MB = 0x74000000 MFD_HUGE_64KB = 0x40000000 MFD_HUGE_8MB = 0x5c000000 MFD_HUGE_MASK = 0x3f @@ -999,6 +1021,8 @@ const ( MNT_DETACH = 0x2 MNT_EXPIRE = 0x4 MNT_FORCE = 0x1 + MODULE_INIT_IGNORE_MODVERSIONS = 0x1 + MODULE_INIT_IGNORE_VERMAGIC = 0x2 MSDOS_SUPER_MAGIC = 0x4d44 MSG_BATCH = 0x40000 MSG_CMSG_CLOEXEC = 0x40000000 @@ -1267,6 +1291,36 @@ const ( PERF_EVENT_IOC_SET_FILTER = 0x80042406 PERF_EVENT_IOC_SET_OUTPUT = 0x20002405 PIPEFS_MAGIC = 0x50495045 + PPPIOCATTACH = 0x8004743d + PPPIOCATTCHAN = 0x80047438 + PPPIOCCONNECT = 0x8004743a + PPPIOCDETACH = 0x8004743c + PPPIOCDISCONN = 0x20007439 + PPPIOCGASYNCMAP = 0x40047458 + PPPIOCGCHAN = 0x40047437 + PPPIOCGDEBUG = 0x40047441 + PPPIOCGFLAGS = 0x4004745a + PPPIOCGIDLE = 0x4008743f + PPPIOCGL2TPSTATS = 0x40487436 + PPPIOCGMRU = 0x40047453 + PPPIOCGNPMODE = 0xc008744c + PPPIOCGRASYNCMAP = 0x40047455 + PPPIOCGUNIT = 0x40047456 + PPPIOCGXASYNCMAP = 0x40207450 + PPPIOCNEWUNIT = 0xc004743e + PPPIOCSACTIVE = 0x80087446 + PPPIOCSASYNCMAP = 0x80047457 + PPPIOCSCOMPRESS = 0x800c744d + PPPIOCSDEBUG = 0x80047440 + PPPIOCSFLAGS = 0x80047459 + PPPIOCSMAXCID = 0x80047451 + PPPIOCSMRRU = 0x8004743b + PPPIOCSMRU = 0x80047452 + PPPIOCSNPMODE = 0x8008744b + PPPIOCSPASS = 0x80087447 + PPPIOCSRASYNCMAP = 0x80047454 + PPPIOCSXASYNCMAP = 0x8020744f + PPPIOCXFERUNIT = 0x2000744e PRIO_PGRP = 0x1 PRIO_PROCESS = 0x0 PRIO_USER = 0x2 @@ -1564,6 +1618,7 @@ const ( RTM_DELACTION = 0x31 RTM_DELADDR = 0x15 RTM_DELADDRLABEL = 0x49 + RTM_DELCHAIN = 0x65 RTM_DELLINK = 0x11 RTM_DELMDB = 0x55 RTM_DELNEIGH = 0x1d @@ -1584,6 +1639,7 @@ const ( RTM_GETADDR = 0x16 RTM_GETADDRLABEL = 0x4a RTM_GETANYCAST = 0x3e + RTM_GETCHAIN = 0x66 RTM_GETDCB = 0x4e RTM_GETLINK = 0x12 RTM_GETMDB = 0x56 @@ -1598,11 +1654,12 @@ const ( RTM_GETSTATS = 0x5e RTM_GETTCLASS = 0x2a RTM_GETTFILTER = 0x2e - RTM_MAX = 0x63 + RTM_MAX = 0x67 RTM_NEWACTION = 0x30 RTM_NEWADDR = 0x14 RTM_NEWADDRLABEL = 0x48 RTM_NEWCACHEREPORT = 0x60 + RTM_NEWCHAIN = 0x64 RTM_NEWLINK = 0x10 RTM_NEWMDB = 0x54 RTM_NEWNDUSEROPT = 0x44 @@ -1617,8 +1674,8 @@ const ( RTM_NEWSTATS = 0x5c RTM_NEWTCLASS = 0x28 RTM_NEWTFILTER = 0x2c - RTM_NR_FAMILIES = 0x15 - RTM_NR_MSGTYPES = 0x54 + RTM_NR_FAMILIES = 0x16 + RTM_NR_MSGTYPES = 0x58 RTM_SETDCB = 0x4f RTM_SETLINK = 0x13 RTM_SETNEIGHTBL = 0x43 @@ -1667,7 +1724,9 @@ const ( SCM_TIMESTAMPING_OPT_STATS = 0x36 SCM_TIMESTAMPING_PKTINFO = 0x3a SCM_TIMESTAMPNS = 0x23 + SCM_TXTIME = 0x3d SCM_WIFI_STATUS = 0x29 + SC_LOG_FLUSH = 0x100000 SECCOMP_MODE_DISABLED = 0x0 SECCOMP_MODE_FILTER = 0x2 SECCOMP_MODE_STRICT = 0x1 @@ -1723,6 +1782,9 @@ const ( SIOCGMIIPHY = 0x8947 SIOCGMIIREG = 0x8948 SIOCGPGRP = 0x40047309 + SIOCGPPPCSTATS = 0x89f2 + SIOCGPPPSTATS = 0x89f0 + SIOCGPPPVER = 0x89f1 SIOCGRARP = 0x8961 SIOCGSKNS = 0x894c SIOCGSTAMP = 0x8906 @@ -1872,6 +1934,7 @@ const ( SO_TIMESTAMP = 0x1d SO_TIMESTAMPING = 0x25 SO_TIMESTAMPNS = 0x23 + SO_TXTIME = 0x3d SO_TYPE = 0x1008 SO_VM_SOCKETS_BUFFER_MAX_SIZE = 0x2 SO_VM_SOCKETS_BUFFER_MIN_SIZE = 0x1 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go index 29a88f0e5..9c293ed13 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go @@ -878,6 +878,26 @@ const ( IXOFF = 0x1000 IXON = 0x400 JFFS2_SUPER_MAGIC = 0x72b6 + KEXEC_ARCH_386 = 0x30000 + KEXEC_ARCH_68K = 0x40000 + KEXEC_ARCH_AARCH64 = 0xb70000 + KEXEC_ARCH_ARM = 0x280000 + KEXEC_ARCH_DEFAULT = 0x0 + KEXEC_ARCH_IA_64 = 0x320000 + KEXEC_ARCH_MASK = 0xffff0000 + KEXEC_ARCH_MIPS = 0x80000 + KEXEC_ARCH_MIPS_LE = 0xa0000 + KEXEC_ARCH_PPC = 0x140000 + KEXEC_ARCH_PPC64 = 0x150000 + KEXEC_ARCH_S390 = 0x160000 + KEXEC_ARCH_SH = 0x2a0000 + KEXEC_ARCH_X86_64 = 0x3e0000 + KEXEC_FILE_NO_INITRAMFS = 0x4 + KEXEC_FILE_ON_CRASH = 0x2 + KEXEC_FILE_UNLOAD = 0x1 + KEXEC_ON_CRASH = 0x1 + KEXEC_PRESERVE_CONTEXT = 0x2 + KEXEC_SEGMENT_MAX = 0x10 KEYCTL_ASSUME_AUTHORITY = 0x10 KEYCTL_CHOWN = 0x4 KEYCTL_CLEAR = 0x7 @@ -986,7 +1006,9 @@ const ( MFD_HUGE_256MB = 0x70000000 MFD_HUGE_2GB = 0x7c000000 MFD_HUGE_2MB = 0x54000000 + MFD_HUGE_32MB = 0x64000000 MFD_HUGE_512KB = 0x4c000000 + MFD_HUGE_512MB = 0x74000000 MFD_HUGE_64KB = 0x40000000 MFD_HUGE_8MB = 0x5c000000 MFD_HUGE_MASK = 0x3f @@ -999,6 +1021,8 @@ const ( MNT_DETACH = 0x2 MNT_EXPIRE = 0x4 MNT_FORCE = 0x1 + MODULE_INIT_IGNORE_MODVERSIONS = 0x1 + MODULE_INIT_IGNORE_VERMAGIC = 0x2 MSDOS_SUPER_MAGIC = 0x4d44 MSG_BATCH = 0x40000 MSG_CMSG_CLOEXEC = 0x40000000 @@ -1267,6 +1291,36 @@ const ( PERF_EVENT_IOC_SET_FILTER = 0x80082406 PERF_EVENT_IOC_SET_OUTPUT = 0x20002405 PIPEFS_MAGIC = 0x50495045 + PPPIOCATTACH = 0x8004743d + PPPIOCATTCHAN = 0x80047438 + PPPIOCCONNECT = 0x8004743a + PPPIOCDETACH = 0x8004743c + PPPIOCDISCONN = 0x20007439 + PPPIOCGASYNCMAP = 0x40047458 + PPPIOCGCHAN = 0x40047437 + PPPIOCGDEBUG = 0x40047441 + PPPIOCGFLAGS = 0x4004745a + PPPIOCGIDLE = 0x4010743f + PPPIOCGL2TPSTATS = 0x40487436 + PPPIOCGMRU = 0x40047453 + PPPIOCGNPMODE = 0xc008744c + PPPIOCGRASYNCMAP = 0x40047455 + PPPIOCGUNIT = 0x40047456 + PPPIOCGXASYNCMAP = 0x40207450 + PPPIOCNEWUNIT = 0xc004743e + PPPIOCSACTIVE = 0x80107446 + PPPIOCSASYNCMAP = 0x80047457 + PPPIOCSCOMPRESS = 0x8010744d + PPPIOCSDEBUG = 0x80047440 + PPPIOCSFLAGS = 0x80047459 + PPPIOCSMAXCID = 0x80047451 + PPPIOCSMRRU = 0x8004743b + PPPIOCSMRU = 0x80047452 + PPPIOCSNPMODE = 0x8008744b + PPPIOCSPASS = 0x80107447 + PPPIOCSRASYNCMAP = 0x80047454 + PPPIOCSXASYNCMAP = 0x8020744f + PPPIOCXFERUNIT = 0x2000744e PRIO_PGRP = 0x1 PRIO_PROCESS = 0x0 PRIO_USER = 0x2 @@ -1564,6 +1618,7 @@ const ( RTM_DELACTION = 0x31 RTM_DELADDR = 0x15 RTM_DELADDRLABEL = 0x49 + RTM_DELCHAIN = 0x65 RTM_DELLINK = 0x11 RTM_DELMDB = 0x55 RTM_DELNEIGH = 0x1d @@ -1584,6 +1639,7 @@ const ( RTM_GETADDR = 0x16 RTM_GETADDRLABEL = 0x4a RTM_GETANYCAST = 0x3e + RTM_GETCHAIN = 0x66 RTM_GETDCB = 0x4e RTM_GETLINK = 0x12 RTM_GETMDB = 0x56 @@ -1598,11 +1654,12 @@ const ( RTM_GETSTATS = 0x5e RTM_GETTCLASS = 0x2a RTM_GETTFILTER = 0x2e - RTM_MAX = 0x63 + RTM_MAX = 0x67 RTM_NEWACTION = 0x30 RTM_NEWADDR = 0x14 RTM_NEWADDRLABEL = 0x48 RTM_NEWCACHEREPORT = 0x60 + RTM_NEWCHAIN = 0x64 RTM_NEWLINK = 0x10 RTM_NEWMDB = 0x54 RTM_NEWNDUSEROPT = 0x44 @@ -1617,8 +1674,8 @@ const ( RTM_NEWSTATS = 0x5c RTM_NEWTCLASS = 0x28 RTM_NEWTFILTER = 0x2c - RTM_NR_FAMILIES = 0x15 - RTM_NR_MSGTYPES = 0x54 + RTM_NR_FAMILIES = 0x16 + RTM_NR_MSGTYPES = 0x58 RTM_SETDCB = 0x4f RTM_SETLINK = 0x13 RTM_SETNEIGHTBL = 0x43 @@ -1667,7 +1724,9 @@ const ( SCM_TIMESTAMPING_OPT_STATS = 0x36 SCM_TIMESTAMPING_PKTINFO = 0x3a SCM_TIMESTAMPNS = 0x23 + SCM_TXTIME = 0x3d SCM_WIFI_STATUS = 0x29 + SC_LOG_FLUSH = 0x100000 SECCOMP_MODE_DISABLED = 0x0 SECCOMP_MODE_FILTER = 0x2 SECCOMP_MODE_STRICT = 0x1 @@ -1723,6 +1782,9 @@ const ( SIOCGMIIPHY = 0x8947 SIOCGMIIREG = 0x8948 SIOCGPGRP = 0x40047309 + SIOCGPPPCSTATS = 0x89f2 + SIOCGPPPSTATS = 0x89f0 + SIOCGPPPVER = 0x89f1 SIOCGRARP = 0x8961 SIOCGSKNS = 0x894c SIOCGSTAMP = 0x8906 @@ -1872,6 +1934,7 @@ const ( SO_TIMESTAMP = 0x1d SO_TIMESTAMPING = 0x25 SO_TIMESTAMPNS = 0x23 + SO_TXTIME = 0x3d SO_TYPE = 0x1008 SO_VM_SOCKETS_BUFFER_MAX_SIZE = 0x2 SO_VM_SOCKETS_BUFFER_MIN_SIZE = 0x1 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go index 0767ac1b0..e2162508c 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go @@ -878,6 +878,26 @@ const ( IXOFF = 0x1000 IXON = 0x400 JFFS2_SUPER_MAGIC = 0x72b6 + KEXEC_ARCH_386 = 0x30000 + KEXEC_ARCH_68K = 0x40000 + KEXEC_ARCH_AARCH64 = 0xb70000 + KEXEC_ARCH_ARM = 0x280000 + KEXEC_ARCH_DEFAULT = 0x0 + KEXEC_ARCH_IA_64 = 0x320000 + KEXEC_ARCH_MASK = 0xffff0000 + KEXEC_ARCH_MIPS = 0x80000 + KEXEC_ARCH_MIPS_LE = 0xa0000 + KEXEC_ARCH_PPC = 0x140000 + KEXEC_ARCH_PPC64 = 0x150000 + KEXEC_ARCH_S390 = 0x160000 + KEXEC_ARCH_SH = 0x2a0000 + KEXEC_ARCH_X86_64 = 0x3e0000 + KEXEC_FILE_NO_INITRAMFS = 0x4 + KEXEC_FILE_ON_CRASH = 0x2 + KEXEC_FILE_UNLOAD = 0x1 + KEXEC_ON_CRASH = 0x1 + KEXEC_PRESERVE_CONTEXT = 0x2 + KEXEC_SEGMENT_MAX = 0x10 KEYCTL_ASSUME_AUTHORITY = 0x10 KEYCTL_CHOWN = 0x4 KEYCTL_CLEAR = 0x7 @@ -986,7 +1006,9 @@ const ( MFD_HUGE_256MB = 0x70000000 MFD_HUGE_2GB = 0x7c000000 MFD_HUGE_2MB = 0x54000000 + MFD_HUGE_32MB = 0x64000000 MFD_HUGE_512KB = 0x4c000000 + MFD_HUGE_512MB = 0x74000000 MFD_HUGE_64KB = 0x40000000 MFD_HUGE_8MB = 0x5c000000 MFD_HUGE_MASK = 0x3f @@ -999,6 +1021,8 @@ const ( MNT_DETACH = 0x2 MNT_EXPIRE = 0x4 MNT_FORCE = 0x1 + MODULE_INIT_IGNORE_MODVERSIONS = 0x1 + MODULE_INIT_IGNORE_VERMAGIC = 0x2 MSDOS_SUPER_MAGIC = 0x4d44 MSG_BATCH = 0x40000 MSG_CMSG_CLOEXEC = 0x40000000 @@ -1267,6 +1291,36 @@ const ( PERF_EVENT_IOC_SET_FILTER = 0x80082406 PERF_EVENT_IOC_SET_OUTPUT = 0x20002405 PIPEFS_MAGIC = 0x50495045 + PPPIOCATTACH = 0x8004743d + PPPIOCATTCHAN = 0x80047438 + PPPIOCCONNECT = 0x8004743a + PPPIOCDETACH = 0x8004743c + PPPIOCDISCONN = 0x20007439 + PPPIOCGASYNCMAP = 0x40047458 + PPPIOCGCHAN = 0x40047437 + PPPIOCGDEBUG = 0x40047441 + PPPIOCGFLAGS = 0x4004745a + PPPIOCGIDLE = 0x4010743f + PPPIOCGL2TPSTATS = 0x40487436 + PPPIOCGMRU = 0x40047453 + PPPIOCGNPMODE = 0xc008744c + PPPIOCGRASYNCMAP = 0x40047455 + PPPIOCGUNIT = 0x40047456 + PPPIOCGXASYNCMAP = 0x40207450 + PPPIOCNEWUNIT = 0xc004743e + PPPIOCSACTIVE = 0x80107446 + PPPIOCSASYNCMAP = 0x80047457 + PPPIOCSCOMPRESS = 0x8010744d + PPPIOCSDEBUG = 0x80047440 + PPPIOCSFLAGS = 0x80047459 + PPPIOCSMAXCID = 0x80047451 + PPPIOCSMRRU = 0x8004743b + PPPIOCSMRU = 0x80047452 + PPPIOCSNPMODE = 0x8008744b + PPPIOCSPASS = 0x80107447 + PPPIOCSRASYNCMAP = 0x80047454 + PPPIOCSXASYNCMAP = 0x8020744f + PPPIOCXFERUNIT = 0x2000744e PRIO_PGRP = 0x1 PRIO_PROCESS = 0x0 PRIO_USER = 0x2 @@ -1564,6 +1618,7 @@ const ( RTM_DELACTION = 0x31 RTM_DELADDR = 0x15 RTM_DELADDRLABEL = 0x49 + RTM_DELCHAIN = 0x65 RTM_DELLINK = 0x11 RTM_DELMDB = 0x55 RTM_DELNEIGH = 0x1d @@ -1584,6 +1639,7 @@ const ( RTM_GETADDR = 0x16 RTM_GETADDRLABEL = 0x4a RTM_GETANYCAST = 0x3e + RTM_GETCHAIN = 0x66 RTM_GETDCB = 0x4e RTM_GETLINK = 0x12 RTM_GETMDB = 0x56 @@ -1598,11 +1654,12 @@ const ( RTM_GETSTATS = 0x5e RTM_GETTCLASS = 0x2a RTM_GETTFILTER = 0x2e - RTM_MAX = 0x63 + RTM_MAX = 0x67 RTM_NEWACTION = 0x30 RTM_NEWADDR = 0x14 RTM_NEWADDRLABEL = 0x48 RTM_NEWCACHEREPORT = 0x60 + RTM_NEWCHAIN = 0x64 RTM_NEWLINK = 0x10 RTM_NEWMDB = 0x54 RTM_NEWNDUSEROPT = 0x44 @@ -1617,8 +1674,8 @@ const ( RTM_NEWSTATS = 0x5c RTM_NEWTCLASS = 0x28 RTM_NEWTFILTER = 0x2c - RTM_NR_FAMILIES = 0x15 - RTM_NR_MSGTYPES = 0x54 + RTM_NR_FAMILIES = 0x16 + RTM_NR_MSGTYPES = 0x58 RTM_SETDCB = 0x4f RTM_SETLINK = 0x13 RTM_SETNEIGHTBL = 0x43 @@ -1667,7 +1724,9 @@ const ( SCM_TIMESTAMPING_OPT_STATS = 0x36 SCM_TIMESTAMPING_PKTINFO = 0x3a SCM_TIMESTAMPNS = 0x23 + SCM_TXTIME = 0x3d SCM_WIFI_STATUS = 0x29 + SC_LOG_FLUSH = 0x100000 SECCOMP_MODE_DISABLED = 0x0 SECCOMP_MODE_FILTER = 0x2 SECCOMP_MODE_STRICT = 0x1 @@ -1723,6 +1782,9 @@ const ( SIOCGMIIPHY = 0x8947 SIOCGMIIREG = 0x8948 SIOCGPGRP = 0x40047309 + SIOCGPPPCSTATS = 0x89f2 + SIOCGPPPSTATS = 0x89f0 + SIOCGPPPVER = 0x89f1 SIOCGRARP = 0x8961 SIOCGSKNS = 0x894c SIOCGSTAMP = 0x8906 @@ -1872,6 +1934,7 @@ const ( SO_TIMESTAMP = 0x1d SO_TIMESTAMPING = 0x25 SO_TIMESTAMPNS = 0x23 + SO_TXTIME = 0x3d SO_TYPE = 0x1008 SO_VM_SOCKETS_BUFFER_MAX_SIZE = 0x2 SO_VM_SOCKETS_BUFFER_MIN_SIZE = 0x1 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go index 269b81318..836c0c654 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go @@ -878,6 +878,26 @@ const ( IXOFF = 0x1000 IXON = 0x400 JFFS2_SUPER_MAGIC = 0x72b6 + KEXEC_ARCH_386 = 0x30000 + KEXEC_ARCH_68K = 0x40000 + KEXEC_ARCH_AARCH64 = 0xb70000 + KEXEC_ARCH_ARM = 0x280000 + KEXEC_ARCH_DEFAULT = 0x0 + KEXEC_ARCH_IA_64 = 0x320000 + KEXEC_ARCH_MASK = 0xffff0000 + KEXEC_ARCH_MIPS = 0x80000 + KEXEC_ARCH_MIPS_LE = 0xa0000 + KEXEC_ARCH_PPC = 0x140000 + KEXEC_ARCH_PPC64 = 0x150000 + KEXEC_ARCH_S390 = 0x160000 + KEXEC_ARCH_SH = 0x2a0000 + KEXEC_ARCH_X86_64 = 0x3e0000 + KEXEC_FILE_NO_INITRAMFS = 0x4 + KEXEC_FILE_ON_CRASH = 0x2 + KEXEC_FILE_UNLOAD = 0x1 + KEXEC_ON_CRASH = 0x1 + KEXEC_PRESERVE_CONTEXT = 0x2 + KEXEC_SEGMENT_MAX = 0x10 KEYCTL_ASSUME_AUTHORITY = 0x10 KEYCTL_CHOWN = 0x4 KEYCTL_CLEAR = 0x7 @@ -986,7 +1006,9 @@ const ( MFD_HUGE_256MB = 0x70000000 MFD_HUGE_2GB = 0x7c000000 MFD_HUGE_2MB = 0x54000000 + MFD_HUGE_32MB = 0x64000000 MFD_HUGE_512KB = 0x4c000000 + MFD_HUGE_512MB = 0x74000000 MFD_HUGE_64KB = 0x40000000 MFD_HUGE_8MB = 0x5c000000 MFD_HUGE_MASK = 0x3f @@ -999,6 +1021,8 @@ const ( MNT_DETACH = 0x2 MNT_EXPIRE = 0x4 MNT_FORCE = 0x1 + MODULE_INIT_IGNORE_MODVERSIONS = 0x1 + MODULE_INIT_IGNORE_VERMAGIC = 0x2 MSDOS_SUPER_MAGIC = 0x4d44 MSG_BATCH = 0x40000 MSG_CMSG_CLOEXEC = 0x40000000 @@ -1267,6 +1291,36 @@ const ( PERF_EVENT_IOC_SET_FILTER = 0x80042406 PERF_EVENT_IOC_SET_OUTPUT = 0x20002405 PIPEFS_MAGIC = 0x50495045 + PPPIOCATTACH = 0x8004743d + PPPIOCATTCHAN = 0x80047438 + PPPIOCCONNECT = 0x8004743a + PPPIOCDETACH = 0x8004743c + PPPIOCDISCONN = 0x20007439 + PPPIOCGASYNCMAP = 0x40047458 + PPPIOCGCHAN = 0x40047437 + PPPIOCGDEBUG = 0x40047441 + PPPIOCGFLAGS = 0x4004745a + PPPIOCGIDLE = 0x4008743f + PPPIOCGL2TPSTATS = 0x40487436 + PPPIOCGMRU = 0x40047453 + PPPIOCGNPMODE = 0xc008744c + PPPIOCGRASYNCMAP = 0x40047455 + PPPIOCGUNIT = 0x40047456 + PPPIOCGXASYNCMAP = 0x40207450 + PPPIOCNEWUNIT = 0xc004743e + PPPIOCSACTIVE = 0x80087446 + PPPIOCSASYNCMAP = 0x80047457 + PPPIOCSCOMPRESS = 0x800c744d + PPPIOCSDEBUG = 0x80047440 + PPPIOCSFLAGS = 0x80047459 + PPPIOCSMAXCID = 0x80047451 + PPPIOCSMRRU = 0x8004743b + PPPIOCSMRU = 0x80047452 + PPPIOCSNPMODE = 0x8008744b + PPPIOCSPASS = 0x80087447 + PPPIOCSRASYNCMAP = 0x80047454 + PPPIOCSXASYNCMAP = 0x8020744f + PPPIOCXFERUNIT = 0x2000744e PRIO_PGRP = 0x1 PRIO_PROCESS = 0x0 PRIO_USER = 0x2 @@ -1564,6 +1618,7 @@ const ( RTM_DELACTION = 0x31 RTM_DELADDR = 0x15 RTM_DELADDRLABEL = 0x49 + RTM_DELCHAIN = 0x65 RTM_DELLINK = 0x11 RTM_DELMDB = 0x55 RTM_DELNEIGH = 0x1d @@ -1584,6 +1639,7 @@ const ( RTM_GETADDR = 0x16 RTM_GETADDRLABEL = 0x4a RTM_GETANYCAST = 0x3e + RTM_GETCHAIN = 0x66 RTM_GETDCB = 0x4e RTM_GETLINK = 0x12 RTM_GETMDB = 0x56 @@ -1598,11 +1654,12 @@ const ( RTM_GETSTATS = 0x5e RTM_GETTCLASS = 0x2a RTM_GETTFILTER = 0x2e - RTM_MAX = 0x63 + RTM_MAX = 0x67 RTM_NEWACTION = 0x30 RTM_NEWADDR = 0x14 RTM_NEWADDRLABEL = 0x48 RTM_NEWCACHEREPORT = 0x60 + RTM_NEWCHAIN = 0x64 RTM_NEWLINK = 0x10 RTM_NEWMDB = 0x54 RTM_NEWNDUSEROPT = 0x44 @@ -1617,8 +1674,8 @@ const ( RTM_NEWSTATS = 0x5c RTM_NEWTCLASS = 0x28 RTM_NEWTFILTER = 0x2c - RTM_NR_FAMILIES = 0x15 - RTM_NR_MSGTYPES = 0x54 + RTM_NR_FAMILIES = 0x16 + RTM_NR_MSGTYPES = 0x58 RTM_SETDCB = 0x4f RTM_SETLINK = 0x13 RTM_SETNEIGHTBL = 0x43 @@ -1667,7 +1724,9 @@ const ( SCM_TIMESTAMPING_OPT_STATS = 0x36 SCM_TIMESTAMPING_PKTINFO = 0x3a SCM_TIMESTAMPNS = 0x23 + SCM_TXTIME = 0x3d SCM_WIFI_STATUS = 0x29 + SC_LOG_FLUSH = 0x100000 SECCOMP_MODE_DISABLED = 0x0 SECCOMP_MODE_FILTER = 0x2 SECCOMP_MODE_STRICT = 0x1 @@ -1723,6 +1782,9 @@ const ( SIOCGMIIPHY = 0x8947 SIOCGMIIREG = 0x8948 SIOCGPGRP = 0x40047309 + SIOCGPPPCSTATS = 0x89f2 + SIOCGPPPSTATS = 0x89f0 + SIOCGPPPVER = 0x89f1 SIOCGRARP = 0x8961 SIOCGSKNS = 0x894c SIOCGSTAMP = 0x8906 @@ -1872,6 +1934,7 @@ const ( SO_TIMESTAMP = 0x1d SO_TIMESTAMPING = 0x25 SO_TIMESTAMPNS = 0x23 + SO_TXTIME = 0x3d SO_TYPE = 0x1008 SO_VM_SOCKETS_BUFFER_MAX_SIZE = 0x2 SO_VM_SOCKETS_BUFFER_MIN_SIZE = 0x1 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go index eb52e9689..7ca618431 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go @@ -878,6 +878,26 @@ const ( IXOFF = 0x400 IXON = 0x200 JFFS2_SUPER_MAGIC = 0x72b6 + KEXEC_ARCH_386 = 0x30000 + KEXEC_ARCH_68K = 0x40000 + KEXEC_ARCH_AARCH64 = 0xb70000 + KEXEC_ARCH_ARM = 0x280000 + KEXEC_ARCH_DEFAULT = 0x0 + KEXEC_ARCH_IA_64 = 0x320000 + KEXEC_ARCH_MASK = 0xffff0000 + KEXEC_ARCH_MIPS = 0x80000 + KEXEC_ARCH_MIPS_LE = 0xa0000 + KEXEC_ARCH_PPC = 0x140000 + KEXEC_ARCH_PPC64 = 0x150000 + KEXEC_ARCH_S390 = 0x160000 + KEXEC_ARCH_SH = 0x2a0000 + KEXEC_ARCH_X86_64 = 0x3e0000 + KEXEC_FILE_NO_INITRAMFS = 0x4 + KEXEC_FILE_ON_CRASH = 0x2 + KEXEC_FILE_UNLOAD = 0x1 + KEXEC_ON_CRASH = 0x1 + KEXEC_PRESERVE_CONTEXT = 0x2 + KEXEC_SEGMENT_MAX = 0x10 KEYCTL_ASSUME_AUTHORITY = 0x10 KEYCTL_CHOWN = 0x4 KEYCTL_CLEAR = 0x7 @@ -985,7 +1005,9 @@ const ( MFD_HUGE_256MB = 0x70000000 MFD_HUGE_2GB = 0x7c000000 MFD_HUGE_2MB = 0x54000000 + MFD_HUGE_32MB = 0x64000000 MFD_HUGE_512KB = 0x4c000000 + MFD_HUGE_512MB = 0x74000000 MFD_HUGE_64KB = 0x40000000 MFD_HUGE_8MB = 0x5c000000 MFD_HUGE_MASK = 0x3f @@ -998,6 +1020,8 @@ const ( MNT_DETACH = 0x2 MNT_EXPIRE = 0x4 MNT_FORCE = 0x1 + MODULE_INIT_IGNORE_MODVERSIONS = 0x1 + MODULE_INIT_IGNORE_VERMAGIC = 0x2 MSDOS_SUPER_MAGIC = 0x4d44 MSG_BATCH = 0x40000 MSG_CMSG_CLOEXEC = 0x40000000 @@ -1268,6 +1292,36 @@ const ( PERF_EVENT_IOC_SET_FILTER = 0x80082406 PERF_EVENT_IOC_SET_OUTPUT = 0x20002405 PIPEFS_MAGIC = 0x50495045 + PPPIOCATTACH = 0x8004743d + PPPIOCATTCHAN = 0x80047438 + PPPIOCCONNECT = 0x8004743a + PPPIOCDETACH = 0x8004743c + PPPIOCDISCONN = 0x20007439 + PPPIOCGASYNCMAP = 0x40047458 + PPPIOCGCHAN = 0x40047437 + PPPIOCGDEBUG = 0x40047441 + PPPIOCGFLAGS = 0x4004745a + PPPIOCGIDLE = 0x4010743f + PPPIOCGL2TPSTATS = 0x40487436 + PPPIOCGMRU = 0x40047453 + PPPIOCGNPMODE = 0xc008744c + PPPIOCGRASYNCMAP = 0x40047455 + PPPIOCGUNIT = 0x40047456 + PPPIOCGXASYNCMAP = 0x40207450 + PPPIOCNEWUNIT = 0xc004743e + PPPIOCSACTIVE = 0x80107446 + PPPIOCSASYNCMAP = 0x80047457 + PPPIOCSCOMPRESS = 0x8010744d + PPPIOCSDEBUG = 0x80047440 + PPPIOCSFLAGS = 0x80047459 + PPPIOCSMAXCID = 0x80047451 + PPPIOCSMRRU = 0x8004743b + PPPIOCSMRU = 0x80047452 + PPPIOCSNPMODE = 0x8008744b + PPPIOCSPASS = 0x80107447 + PPPIOCSRASYNCMAP = 0x80047454 + PPPIOCSXASYNCMAP = 0x8020744f + PPPIOCXFERUNIT = 0x2000744e PRIO_PGRP = 0x1 PRIO_PROCESS = 0x0 PRIO_USER = 0x2 @@ -1620,6 +1674,7 @@ const ( RTM_DELACTION = 0x31 RTM_DELADDR = 0x15 RTM_DELADDRLABEL = 0x49 + RTM_DELCHAIN = 0x65 RTM_DELLINK = 0x11 RTM_DELMDB = 0x55 RTM_DELNEIGH = 0x1d @@ -1640,6 +1695,7 @@ const ( RTM_GETADDR = 0x16 RTM_GETADDRLABEL = 0x4a RTM_GETANYCAST = 0x3e + RTM_GETCHAIN = 0x66 RTM_GETDCB = 0x4e RTM_GETLINK = 0x12 RTM_GETMDB = 0x56 @@ -1654,11 +1710,12 @@ const ( RTM_GETSTATS = 0x5e RTM_GETTCLASS = 0x2a RTM_GETTFILTER = 0x2e - RTM_MAX = 0x63 + RTM_MAX = 0x67 RTM_NEWACTION = 0x30 RTM_NEWADDR = 0x14 RTM_NEWADDRLABEL = 0x48 RTM_NEWCACHEREPORT = 0x60 + RTM_NEWCHAIN = 0x64 RTM_NEWLINK = 0x10 RTM_NEWMDB = 0x54 RTM_NEWNDUSEROPT = 0x44 @@ -1673,8 +1730,8 @@ const ( RTM_NEWSTATS = 0x5c RTM_NEWTCLASS = 0x28 RTM_NEWTFILTER = 0x2c - RTM_NR_FAMILIES = 0x15 - RTM_NR_MSGTYPES = 0x54 + RTM_NR_FAMILIES = 0x16 + RTM_NR_MSGTYPES = 0x58 RTM_SETDCB = 0x4f RTM_SETLINK = 0x13 RTM_SETNEIGHTBL = 0x43 @@ -1723,7 +1780,9 @@ const ( SCM_TIMESTAMPING_OPT_STATS = 0x36 SCM_TIMESTAMPING_PKTINFO = 0x3a SCM_TIMESTAMPNS = 0x23 + SCM_TXTIME = 0x3d SCM_WIFI_STATUS = 0x29 + SC_LOG_FLUSH = 0x100000 SECCOMP_MODE_DISABLED = 0x0 SECCOMP_MODE_FILTER = 0x2 SECCOMP_MODE_STRICT = 0x1 @@ -1779,6 +1838,9 @@ const ( SIOCGMIIPHY = 0x8947 SIOCGMIIREG = 0x8948 SIOCGPGRP = 0x8904 + SIOCGPPPCSTATS = 0x89f2 + SIOCGPPPSTATS = 0x89f0 + SIOCGPPPVER = 0x89f1 SIOCGRARP = 0x8961 SIOCGSKNS = 0x894c SIOCGSTAMP = 0x8906 @@ -1927,6 +1989,7 @@ const ( SO_TIMESTAMP = 0x1d SO_TIMESTAMPING = 0x25 SO_TIMESTAMPNS = 0x23 + SO_TXTIME = 0x3d SO_TYPE = 0x3 SO_VM_SOCKETS_BUFFER_MAX_SIZE = 0x2 SO_VM_SOCKETS_BUFFER_MIN_SIZE = 0x1 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go index 0563d34b3..839ac214c 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go @@ -878,6 +878,26 @@ const ( IXOFF = 0x400 IXON = 0x200 JFFS2_SUPER_MAGIC = 0x72b6 + KEXEC_ARCH_386 = 0x30000 + KEXEC_ARCH_68K = 0x40000 + KEXEC_ARCH_AARCH64 = 0xb70000 + KEXEC_ARCH_ARM = 0x280000 + KEXEC_ARCH_DEFAULT = 0x0 + KEXEC_ARCH_IA_64 = 0x320000 + KEXEC_ARCH_MASK = 0xffff0000 + KEXEC_ARCH_MIPS = 0x80000 + KEXEC_ARCH_MIPS_LE = 0xa0000 + KEXEC_ARCH_PPC = 0x140000 + KEXEC_ARCH_PPC64 = 0x150000 + KEXEC_ARCH_S390 = 0x160000 + KEXEC_ARCH_SH = 0x2a0000 + KEXEC_ARCH_X86_64 = 0x3e0000 + KEXEC_FILE_NO_INITRAMFS = 0x4 + KEXEC_FILE_ON_CRASH = 0x2 + KEXEC_FILE_UNLOAD = 0x1 + KEXEC_ON_CRASH = 0x1 + KEXEC_PRESERVE_CONTEXT = 0x2 + KEXEC_SEGMENT_MAX = 0x10 KEYCTL_ASSUME_AUTHORITY = 0x10 KEYCTL_CHOWN = 0x4 KEYCTL_CLEAR = 0x7 @@ -985,7 +1005,9 @@ const ( MFD_HUGE_256MB = 0x70000000 MFD_HUGE_2GB = 0x7c000000 MFD_HUGE_2MB = 0x54000000 + MFD_HUGE_32MB = 0x64000000 MFD_HUGE_512KB = 0x4c000000 + MFD_HUGE_512MB = 0x74000000 MFD_HUGE_64KB = 0x40000000 MFD_HUGE_8MB = 0x5c000000 MFD_HUGE_MASK = 0x3f @@ -998,6 +1020,8 @@ const ( MNT_DETACH = 0x2 MNT_EXPIRE = 0x4 MNT_FORCE = 0x1 + MODULE_INIT_IGNORE_MODVERSIONS = 0x1 + MODULE_INIT_IGNORE_VERMAGIC = 0x2 MSDOS_SUPER_MAGIC = 0x4d44 MSG_BATCH = 0x40000 MSG_CMSG_CLOEXEC = 0x40000000 @@ -1268,6 +1292,36 @@ const ( PERF_EVENT_IOC_SET_FILTER = 0x80082406 PERF_EVENT_IOC_SET_OUTPUT = 0x20002405 PIPEFS_MAGIC = 0x50495045 + PPPIOCATTACH = 0x8004743d + PPPIOCATTCHAN = 0x80047438 + PPPIOCCONNECT = 0x8004743a + PPPIOCDETACH = 0x8004743c + PPPIOCDISCONN = 0x20007439 + PPPIOCGASYNCMAP = 0x40047458 + PPPIOCGCHAN = 0x40047437 + PPPIOCGDEBUG = 0x40047441 + PPPIOCGFLAGS = 0x4004745a + PPPIOCGIDLE = 0x4010743f + PPPIOCGL2TPSTATS = 0x40487436 + PPPIOCGMRU = 0x40047453 + PPPIOCGNPMODE = 0xc008744c + PPPIOCGRASYNCMAP = 0x40047455 + PPPIOCGUNIT = 0x40047456 + PPPIOCGXASYNCMAP = 0x40207450 + PPPIOCNEWUNIT = 0xc004743e + PPPIOCSACTIVE = 0x80107446 + PPPIOCSASYNCMAP = 0x80047457 + PPPIOCSCOMPRESS = 0x8010744d + PPPIOCSDEBUG = 0x80047440 + PPPIOCSFLAGS = 0x80047459 + PPPIOCSMAXCID = 0x80047451 + PPPIOCSMRRU = 0x8004743b + PPPIOCSMRU = 0x80047452 + PPPIOCSNPMODE = 0x8008744b + PPPIOCSPASS = 0x80107447 + PPPIOCSRASYNCMAP = 0x80047454 + PPPIOCSXASYNCMAP = 0x8020744f + PPPIOCXFERUNIT = 0x2000744e PRIO_PGRP = 0x1 PRIO_PROCESS = 0x0 PRIO_USER = 0x2 @@ -1620,6 +1674,7 @@ const ( RTM_DELACTION = 0x31 RTM_DELADDR = 0x15 RTM_DELADDRLABEL = 0x49 + RTM_DELCHAIN = 0x65 RTM_DELLINK = 0x11 RTM_DELMDB = 0x55 RTM_DELNEIGH = 0x1d @@ -1640,6 +1695,7 @@ const ( RTM_GETADDR = 0x16 RTM_GETADDRLABEL = 0x4a RTM_GETANYCAST = 0x3e + RTM_GETCHAIN = 0x66 RTM_GETDCB = 0x4e RTM_GETLINK = 0x12 RTM_GETMDB = 0x56 @@ -1654,11 +1710,12 @@ const ( RTM_GETSTATS = 0x5e RTM_GETTCLASS = 0x2a RTM_GETTFILTER = 0x2e - RTM_MAX = 0x63 + RTM_MAX = 0x67 RTM_NEWACTION = 0x30 RTM_NEWADDR = 0x14 RTM_NEWADDRLABEL = 0x48 RTM_NEWCACHEREPORT = 0x60 + RTM_NEWCHAIN = 0x64 RTM_NEWLINK = 0x10 RTM_NEWMDB = 0x54 RTM_NEWNDUSEROPT = 0x44 @@ -1673,8 +1730,8 @@ const ( RTM_NEWSTATS = 0x5c RTM_NEWTCLASS = 0x28 RTM_NEWTFILTER = 0x2c - RTM_NR_FAMILIES = 0x15 - RTM_NR_MSGTYPES = 0x54 + RTM_NR_FAMILIES = 0x16 + RTM_NR_MSGTYPES = 0x58 RTM_SETDCB = 0x4f RTM_SETLINK = 0x13 RTM_SETNEIGHTBL = 0x43 @@ -1723,7 +1780,9 @@ const ( SCM_TIMESTAMPING_OPT_STATS = 0x36 SCM_TIMESTAMPING_PKTINFO = 0x3a SCM_TIMESTAMPNS = 0x23 + SCM_TXTIME = 0x3d SCM_WIFI_STATUS = 0x29 + SC_LOG_FLUSH = 0x100000 SECCOMP_MODE_DISABLED = 0x0 SECCOMP_MODE_FILTER = 0x2 SECCOMP_MODE_STRICT = 0x1 @@ -1779,6 +1838,9 @@ const ( SIOCGMIIPHY = 0x8947 SIOCGMIIREG = 0x8948 SIOCGPGRP = 0x8904 + SIOCGPPPCSTATS = 0x89f2 + SIOCGPPPSTATS = 0x89f0 + SIOCGPPPVER = 0x89f1 SIOCGRARP = 0x8961 SIOCGSKNS = 0x894c SIOCGSTAMP = 0x8906 @@ -1927,6 +1989,7 @@ const ( SO_TIMESTAMP = 0x1d SO_TIMESTAMPING = 0x25 SO_TIMESTAMPNS = 0x23 + SO_TXTIME = 0x3d SO_TYPE = 0x3 SO_VM_SOCKETS_BUFFER_MAX_SIZE = 0x2 SO_VM_SOCKETS_BUFFER_MIN_SIZE = 0x1 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go index e95e3f677..a747aa1b7 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go @@ -878,6 +878,26 @@ const ( IXOFF = 0x1000 IXON = 0x400 JFFS2_SUPER_MAGIC = 0x72b6 + KEXEC_ARCH_386 = 0x30000 + KEXEC_ARCH_68K = 0x40000 + KEXEC_ARCH_AARCH64 = 0xb70000 + KEXEC_ARCH_ARM = 0x280000 + KEXEC_ARCH_DEFAULT = 0x0 + KEXEC_ARCH_IA_64 = 0x320000 + KEXEC_ARCH_MASK = 0xffff0000 + KEXEC_ARCH_MIPS = 0x80000 + KEXEC_ARCH_MIPS_LE = 0xa0000 + KEXEC_ARCH_PPC = 0x140000 + KEXEC_ARCH_PPC64 = 0x150000 + KEXEC_ARCH_S390 = 0x160000 + KEXEC_ARCH_SH = 0x2a0000 + KEXEC_ARCH_X86_64 = 0x3e0000 + KEXEC_FILE_NO_INITRAMFS = 0x4 + KEXEC_FILE_ON_CRASH = 0x2 + KEXEC_FILE_UNLOAD = 0x1 + KEXEC_ON_CRASH = 0x1 + KEXEC_PRESERVE_CONTEXT = 0x2 + KEXEC_SEGMENT_MAX = 0x10 KEYCTL_ASSUME_AUTHORITY = 0x10 KEYCTL_CHOWN = 0x4 KEYCTL_CLEAR = 0x7 @@ -986,7 +1006,9 @@ const ( MFD_HUGE_256MB = 0x70000000 MFD_HUGE_2GB = 0x7c000000 MFD_HUGE_2MB = 0x54000000 + MFD_HUGE_32MB = 0x64000000 MFD_HUGE_512KB = 0x4c000000 + MFD_HUGE_512MB = 0x74000000 MFD_HUGE_64KB = 0x40000000 MFD_HUGE_8MB = 0x5c000000 MFD_HUGE_MASK = 0x3f @@ -999,6 +1021,8 @@ const ( MNT_DETACH = 0x2 MNT_EXPIRE = 0x4 MNT_FORCE = 0x1 + MODULE_INIT_IGNORE_MODVERSIONS = 0x1 + MODULE_INIT_IGNORE_VERMAGIC = 0x2 MSDOS_SUPER_MAGIC = 0x4d44 MSG_BATCH = 0x40000 MSG_CMSG_CLOEXEC = 0x40000000 @@ -1267,6 +1291,36 @@ const ( PERF_EVENT_IOC_SET_FILTER = 0x40082406 PERF_EVENT_IOC_SET_OUTPUT = 0x2405 PIPEFS_MAGIC = 0x50495045 + PPPIOCATTACH = 0x4004743d + PPPIOCATTCHAN = 0x40047438 + PPPIOCCONNECT = 0x4004743a + PPPIOCDETACH = 0x4004743c + PPPIOCDISCONN = 0x7439 + PPPIOCGASYNCMAP = 0x80047458 + PPPIOCGCHAN = 0x80047437 + PPPIOCGDEBUG = 0x80047441 + PPPIOCGFLAGS = 0x8004745a + PPPIOCGIDLE = 0x8010743f + PPPIOCGL2TPSTATS = 0x80487436 + PPPIOCGMRU = 0x80047453 + PPPIOCGNPMODE = 0xc008744c + PPPIOCGRASYNCMAP = 0x80047455 + PPPIOCGUNIT = 0x80047456 + PPPIOCGXASYNCMAP = 0x80207450 + PPPIOCNEWUNIT = 0xc004743e + PPPIOCSACTIVE = 0x40107446 + PPPIOCSASYNCMAP = 0x40047457 + PPPIOCSCOMPRESS = 0x4010744d + PPPIOCSDEBUG = 0x40047440 + PPPIOCSFLAGS = 0x40047459 + PPPIOCSMAXCID = 0x40047451 + PPPIOCSMRRU = 0x4004743b + PPPIOCSMRU = 0x40047452 + PPPIOCSNPMODE = 0x4008744b + PPPIOCSPASS = 0x40107447 + PPPIOCSRASYNCMAP = 0x40047454 + PPPIOCSXASYNCMAP = 0x4020744f + PPPIOCXFERUNIT = 0x744e PRIO_PGRP = 0x1 PRIO_PROCESS = 0x0 PRIO_USER = 0x2 @@ -1552,6 +1606,7 @@ const ( RTM_DELACTION = 0x31 RTM_DELADDR = 0x15 RTM_DELADDRLABEL = 0x49 + RTM_DELCHAIN = 0x65 RTM_DELLINK = 0x11 RTM_DELMDB = 0x55 RTM_DELNEIGH = 0x1d @@ -1572,6 +1627,7 @@ const ( RTM_GETADDR = 0x16 RTM_GETADDRLABEL = 0x4a RTM_GETANYCAST = 0x3e + RTM_GETCHAIN = 0x66 RTM_GETDCB = 0x4e RTM_GETLINK = 0x12 RTM_GETMDB = 0x56 @@ -1586,11 +1642,12 @@ const ( RTM_GETSTATS = 0x5e RTM_GETTCLASS = 0x2a RTM_GETTFILTER = 0x2e - RTM_MAX = 0x63 + RTM_MAX = 0x67 RTM_NEWACTION = 0x30 RTM_NEWADDR = 0x14 RTM_NEWADDRLABEL = 0x48 RTM_NEWCACHEREPORT = 0x60 + RTM_NEWCHAIN = 0x64 RTM_NEWLINK = 0x10 RTM_NEWMDB = 0x54 RTM_NEWNDUSEROPT = 0x44 @@ -1605,8 +1662,8 @@ const ( RTM_NEWSTATS = 0x5c RTM_NEWTCLASS = 0x28 RTM_NEWTFILTER = 0x2c - RTM_NR_FAMILIES = 0x15 - RTM_NR_MSGTYPES = 0x54 + RTM_NR_FAMILIES = 0x16 + RTM_NR_MSGTYPES = 0x58 RTM_SETDCB = 0x4f RTM_SETLINK = 0x13 RTM_SETNEIGHTBL = 0x43 @@ -1655,7 +1712,9 @@ const ( SCM_TIMESTAMPING_OPT_STATS = 0x36 SCM_TIMESTAMPING_PKTINFO = 0x3a SCM_TIMESTAMPNS = 0x23 + SCM_TXTIME = 0x3d SCM_WIFI_STATUS = 0x29 + SC_LOG_FLUSH = 0x100000 SECCOMP_MODE_DISABLED = 0x0 SECCOMP_MODE_FILTER = 0x2 SECCOMP_MODE_STRICT = 0x1 @@ -1711,6 +1770,9 @@ const ( SIOCGMIIPHY = 0x8947 SIOCGMIIREG = 0x8948 SIOCGPGRP = 0x8904 + SIOCGPPPCSTATS = 0x89f2 + SIOCGPPPSTATS = 0x89f0 + SIOCGPPPVER = 0x89f1 SIOCGRARP = 0x8961 SIOCGSKNS = 0x894c SIOCGSTAMP = 0x8906 @@ -1859,6 +1921,7 @@ const ( SO_TIMESTAMP = 0x1d SO_TIMESTAMPING = 0x25 SO_TIMESTAMPNS = 0x23 + SO_TXTIME = 0x3d SO_TYPE = 0x3 SO_VM_SOCKETS_BUFFER_MAX_SIZE = 0x2 SO_VM_SOCKETS_BUFFER_MIN_SIZE = 0x1 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go b/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go index bad17418e..96aff5083 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go @@ -878,6 +878,26 @@ const ( IXOFF = 0x1000 IXON = 0x400 JFFS2_SUPER_MAGIC = 0x72b6 + KEXEC_ARCH_386 = 0x30000 + KEXEC_ARCH_68K = 0x40000 + KEXEC_ARCH_AARCH64 = 0xb70000 + KEXEC_ARCH_ARM = 0x280000 + KEXEC_ARCH_DEFAULT = 0x0 + KEXEC_ARCH_IA_64 = 0x320000 + KEXEC_ARCH_MASK = 0xffff0000 + KEXEC_ARCH_MIPS = 0x80000 + KEXEC_ARCH_MIPS_LE = 0xa0000 + KEXEC_ARCH_PPC = 0x140000 + KEXEC_ARCH_PPC64 = 0x150000 + KEXEC_ARCH_S390 = 0x160000 + KEXEC_ARCH_SH = 0x2a0000 + KEXEC_ARCH_X86_64 = 0x3e0000 + KEXEC_FILE_NO_INITRAMFS = 0x4 + KEXEC_FILE_ON_CRASH = 0x2 + KEXEC_FILE_UNLOAD = 0x1 + KEXEC_ON_CRASH = 0x1 + KEXEC_PRESERVE_CONTEXT = 0x2 + KEXEC_SEGMENT_MAX = 0x10 KEYCTL_ASSUME_AUTHORITY = 0x10 KEYCTL_CHOWN = 0x4 KEYCTL_CLEAR = 0x7 @@ -986,7 +1006,9 @@ const ( MFD_HUGE_256MB = 0x70000000 MFD_HUGE_2GB = 0x7c000000 MFD_HUGE_2MB = 0x54000000 + MFD_HUGE_32MB = 0x64000000 MFD_HUGE_512KB = 0x4c000000 + MFD_HUGE_512MB = 0x74000000 MFD_HUGE_64KB = 0x40000000 MFD_HUGE_8MB = 0x5c000000 MFD_HUGE_MASK = 0x3f @@ -999,6 +1021,8 @@ const ( MNT_DETACH = 0x2 MNT_EXPIRE = 0x4 MNT_FORCE = 0x1 + MODULE_INIT_IGNORE_MODVERSIONS = 0x1 + MODULE_INIT_IGNORE_VERMAGIC = 0x2 MSDOS_SUPER_MAGIC = 0x4d44 MSG_BATCH = 0x40000 MSG_CMSG_CLOEXEC = 0x40000000 @@ -1267,6 +1291,36 @@ const ( PERF_EVENT_IOC_SET_FILTER = 0x40082406 PERF_EVENT_IOC_SET_OUTPUT = 0x2405 PIPEFS_MAGIC = 0x50495045 + PPPIOCATTACH = 0x4004743d + PPPIOCATTCHAN = 0x40047438 + PPPIOCCONNECT = 0x4004743a + PPPIOCDETACH = 0x4004743c + PPPIOCDISCONN = 0x7439 + PPPIOCGASYNCMAP = 0x80047458 + PPPIOCGCHAN = 0x80047437 + PPPIOCGDEBUG = 0x80047441 + PPPIOCGFLAGS = 0x8004745a + PPPIOCGIDLE = 0x8010743f + PPPIOCGL2TPSTATS = 0x80487436 + PPPIOCGMRU = 0x80047453 + PPPIOCGNPMODE = 0xc008744c + PPPIOCGRASYNCMAP = 0x80047455 + PPPIOCGUNIT = 0x80047456 + PPPIOCGXASYNCMAP = 0x80207450 + PPPIOCNEWUNIT = 0xc004743e + PPPIOCSACTIVE = 0x40107446 + PPPIOCSASYNCMAP = 0x40047457 + PPPIOCSCOMPRESS = 0x4010744d + PPPIOCSDEBUG = 0x40047440 + PPPIOCSFLAGS = 0x40047459 + PPPIOCSMAXCID = 0x40047451 + PPPIOCSMRRU = 0x4004743b + PPPIOCSMRU = 0x40047452 + PPPIOCSNPMODE = 0x4008744b + PPPIOCSPASS = 0x40107447 + PPPIOCSRASYNCMAP = 0x40047454 + PPPIOCSXASYNCMAP = 0x4020744f + PPPIOCXFERUNIT = 0x744e PRIO_PGRP = 0x1 PRIO_PROCESS = 0x0 PRIO_USER = 0x2 @@ -1625,6 +1679,7 @@ const ( RTM_DELACTION = 0x31 RTM_DELADDR = 0x15 RTM_DELADDRLABEL = 0x49 + RTM_DELCHAIN = 0x65 RTM_DELLINK = 0x11 RTM_DELMDB = 0x55 RTM_DELNEIGH = 0x1d @@ -1645,6 +1700,7 @@ const ( RTM_GETADDR = 0x16 RTM_GETADDRLABEL = 0x4a RTM_GETANYCAST = 0x3e + RTM_GETCHAIN = 0x66 RTM_GETDCB = 0x4e RTM_GETLINK = 0x12 RTM_GETMDB = 0x56 @@ -1659,11 +1715,12 @@ const ( RTM_GETSTATS = 0x5e RTM_GETTCLASS = 0x2a RTM_GETTFILTER = 0x2e - RTM_MAX = 0x63 + RTM_MAX = 0x67 RTM_NEWACTION = 0x30 RTM_NEWADDR = 0x14 RTM_NEWADDRLABEL = 0x48 RTM_NEWCACHEREPORT = 0x60 + RTM_NEWCHAIN = 0x64 RTM_NEWLINK = 0x10 RTM_NEWMDB = 0x54 RTM_NEWNDUSEROPT = 0x44 @@ -1678,8 +1735,8 @@ const ( RTM_NEWSTATS = 0x5c RTM_NEWTCLASS = 0x28 RTM_NEWTFILTER = 0x2c - RTM_NR_FAMILIES = 0x15 - RTM_NR_MSGTYPES = 0x54 + RTM_NR_FAMILIES = 0x16 + RTM_NR_MSGTYPES = 0x58 RTM_SETDCB = 0x4f RTM_SETLINK = 0x13 RTM_SETNEIGHTBL = 0x43 @@ -1728,7 +1785,9 @@ const ( SCM_TIMESTAMPING_OPT_STATS = 0x36 SCM_TIMESTAMPING_PKTINFO = 0x3a SCM_TIMESTAMPNS = 0x23 + SCM_TXTIME = 0x3d SCM_WIFI_STATUS = 0x29 + SC_LOG_FLUSH = 0x100000 SECCOMP_MODE_DISABLED = 0x0 SECCOMP_MODE_FILTER = 0x2 SECCOMP_MODE_STRICT = 0x1 @@ -1784,6 +1843,9 @@ const ( SIOCGMIIPHY = 0x8947 SIOCGMIIREG = 0x8948 SIOCGPGRP = 0x8904 + SIOCGPPPCSTATS = 0x89f2 + SIOCGPPPSTATS = 0x89f0 + SIOCGPPPVER = 0x89f1 SIOCGRARP = 0x8961 SIOCGSKNS = 0x894c SIOCGSTAMP = 0x8906 @@ -1932,6 +1994,7 @@ const ( SO_TIMESTAMP = 0x1d SO_TIMESTAMPING = 0x25 SO_TIMESTAMPNS = 0x23 + SO_TXTIME = 0x3d SO_TYPE = 0x3 SO_VM_SOCKETS_BUFFER_MAX_SIZE = 0x2 SO_VM_SOCKETS_BUFFER_MIN_SIZE = 0x1 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go index 7fdc85b17..ba93f3e53 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go @@ -1,10 +1,10 @@ -// mkerrors.sh -m64 -// Code generated by the command above; DO NOT EDIT. +// mkerrors.sh -Wall -Werror -static -I/tmp/include +// Code generated by the command above; see README.md. DO NOT EDIT. // +build sparc64,linux -// Created by cgo -godefs - DO NOT EDIT -// cgo -godefs -- -m64 _const.go +// Code generated by cmd/cgo -godefs; DO NOT EDIT. +// cgo -godefs -- -Wall -Werror -static -I/tmp/include _const.go package unix @@ -1969,174 +1969,182 @@ const ( ) // Error table -var errors = [...]string{ - 1: "operation not permitted", - 2: "no such file or directory", - 3: "no such process", - 4: "interrupted system call", - 5: "input/output error", - 6: "no such device or address", - 7: "argument list too long", - 8: "exec format error", - 9: "bad file descriptor", - 10: "no child processes", - 11: "resource temporarily unavailable", - 12: "cannot allocate memory", - 13: "permission denied", - 14: "bad address", - 15: "block device required", - 16: "device or resource busy", - 17: "file exists", - 18: "invalid cross-device link", - 19: "no such device", - 20: "not a directory", - 21: "is a directory", - 22: "invalid argument", - 23: "too many open files in system", - 24: "too many open files", - 25: "inappropriate ioctl for device", - 26: "text file busy", - 27: "file too large", - 28: "no space left on device", - 29: "illegal seek", - 30: "read-only file system", - 31: "too many links", - 32: "broken pipe", - 33: "numerical argument out of domain", - 34: "numerical result out of range", - 36: "operation now in progress", - 37: "operation already in progress", - 38: "socket operation on non-socket", - 39: "destination address required", - 40: "message too long", - 41: "protocol wrong type for socket", - 42: "protocol not available", - 43: "protocol not supported", - 44: "socket type not supported", - 45: "operation not supported", - 46: "protocol family not supported", - 47: "address family not supported by protocol", - 48: "address already in use", - 49: "cannot assign requested address", - 50: "network is down", - 51: "network is unreachable", - 52: "network dropped connection on reset", - 53: "software caused connection abort", - 54: "connection reset by peer", - 55: "no buffer space available", - 56: "transport endpoint is already connected", - 57: "transport endpoint is not connected", - 58: "cannot send after transport endpoint shutdown", - 59: "too many references: cannot splice", - 60: "connection timed out", - 61: "connection refused", - 62: "too many levels of symbolic links", - 63: "file name too long", - 64: "host is down", - 65: "no route to host", - 66: "directory not empty", - 67: "too many processes", - 68: "too many users", - 69: "disk quota exceeded", - 70: "stale file handle", - 71: "object is remote", - 72: "device not a stream", - 73: "timer expired", - 74: "out of streams resources", - 75: "no message of desired type", - 76: "bad message", - 77: "identifier removed", - 78: "resource deadlock avoided", - 79: "no locks available", - 80: "machine is not on the network", - 81: "unknown error 81", - 82: "link has been severed", - 83: "advertise error", - 84: "srmount error", - 85: "communication error on send", - 86: "protocol error", - 87: "multihop attempted", - 88: "RFS specific error", - 89: "remote address changed", - 90: "function not implemented", - 91: "streams pipe error", - 92: "value too large for defined data type", - 93: "file descriptor in bad state", - 94: "channel number out of range", - 95: "level 2 not synchronized", - 96: "level 3 halted", - 97: "level 3 reset", - 98: "link number out of range", - 99: "protocol driver not attached", - 100: "no CSI structure available", - 101: "level 2 halted", - 102: "invalid exchange", - 103: "invalid request descriptor", - 104: "exchange full", - 105: "no anode", - 106: "invalid request code", - 107: "invalid slot", - 108: "file locking deadlock error", - 109: "bad font file format", - 110: "cannot exec a shared library directly", - 111: "no data available", - 112: "accessing a corrupted shared library", - 113: "package not installed", - 114: "can not access a needed shared library", - 115: "name not unique on network", - 116: "interrupted system call should be restarted", - 117: "structure needs cleaning", - 118: "not a XENIX named type file", - 119: "no XENIX semaphores available", - 120: "is a named type file", - 121: "remote I/O error", - 122: "invalid or incomplete multibyte or wide character", - 123: "attempting to link in too many shared libraries", - 124: ".lib section in a.out corrupted", - 125: "no medium found", - 126: "wrong medium type", - 127: "operation canceled", - 128: "required key not available", - 129: "key has expired", - 130: "key has been revoked", - 131: "key was rejected by service", - 132: "owner died", - 133: "state not recoverable", - 134: "operation not possible due to RF-kill", - 135: "memory page has hardware error", +var errorList = [...]struct { + num syscall.Errno + name string + desc string +}{ + {1, "EPERM", "operation not permitted"}, + {2, "ENOENT", "no such file or directory"}, + {3, "ESRCH", "no such process"}, + {4, "EINTR", "interrupted system call"}, + {5, "EIO", "input/output error"}, + {6, "ENXIO", "no such device or address"}, + {7, "E2BIG", "argument list too long"}, + {8, "ENOEXEC", "exec format error"}, + {9, "EBADF", "bad file descriptor"}, + {10, "ECHILD", "no child processes"}, + {11, "EAGAIN", "resource temporarily unavailable"}, + {12, "ENOMEM", "cannot allocate memory"}, + {13, "EACCES", "permission denied"}, + {14, "EFAULT", "bad address"}, + {15, "ENOTBLK", "block device required"}, + {16, "EBUSY", "device or resource busy"}, + {17, "EEXIST", "file exists"}, + {18, "EXDEV", "invalid cross-device link"}, + {19, "ENODEV", "no such device"}, + {20, "ENOTDIR", "not a directory"}, + {21, "EISDIR", "is a directory"}, + {22, "EINVAL", "invalid argument"}, + {23, "ENFILE", "too many open files in system"}, + {24, "EMFILE", "too many open files"}, + {25, "ENOTTY", "inappropriate ioctl for device"}, + {26, "ETXTBSY", "text file busy"}, + {27, "EFBIG", "file too large"}, + {28, "ENOSPC", "no space left on device"}, + {29, "ESPIPE", "illegal seek"}, + {30, "EROFS", "read-only file system"}, + {31, "EMLINK", "too many links"}, + {32, "EPIPE", "broken pipe"}, + {33, "EDOM", "numerical argument out of domain"}, + {34, "ERANGE", "numerical result out of range"}, + {36, "EINPROGRESS", "operation now in progress"}, + {37, "EALREADY", "operation already in progress"}, + {38, "ENOTSOCK", "socket operation on non-socket"}, + {39, "EDESTADDRREQ", "destination address required"}, + {40, "EMSGSIZE", "message too long"}, + {41, "EPROTOTYPE", "protocol wrong type for socket"}, + {42, "ENOPROTOOPT", "protocol not available"}, + {43, "EPROTONOSUPPORT", "protocol not supported"}, + {44, "ESOCKTNOSUPPORT", "socket type not supported"}, + {45, "ENOTSUP", "operation not supported"}, + {46, "EPFNOSUPPORT", "protocol family not supported"}, + {47, "EAFNOSUPPORT", "address family not supported by protocol"}, + {48, "EADDRINUSE", "address already in use"}, + {49, "EADDRNOTAVAIL", "cannot assign requested address"}, + {50, "ENETDOWN", "network is down"}, + {51, "ENETUNREACH", "network is unreachable"}, + {52, "ENETRESET", "network dropped connection on reset"}, + {53, "ECONNABORTED", "software caused connection abort"}, + {54, "ECONNRESET", "connection reset by peer"}, + {55, "ENOBUFS", "no buffer space available"}, + {56, "EISCONN", "transport endpoint is already connected"}, + {57, "ENOTCONN", "transport endpoint is not connected"}, + {58, "ESHUTDOWN", "cannot send after transport endpoint shutdown"}, + {59, "ETOOMANYREFS", "too many references: cannot splice"}, + {60, "ETIMEDOUT", "connection timed out"}, + {61, "ECONNREFUSED", "connection refused"}, + {62, "ELOOP", "too many levels of symbolic links"}, + {63, "ENAMETOOLONG", "file name too long"}, + {64, "EHOSTDOWN", "host is down"}, + {65, "EHOSTUNREACH", "no route to host"}, + {66, "ENOTEMPTY", "directory not empty"}, + {67, "EPROCLIM", "too many processes"}, + {68, "EUSERS", "too many users"}, + {69, "EDQUOT", "disk quota exceeded"}, + {70, "ESTALE", "stale file handle"}, + {71, "EREMOTE", "object is remote"}, + {72, "ENOSTR", "device not a stream"}, + {73, "ETIME", "timer expired"}, + {74, "ENOSR", "out of streams resources"}, + {75, "ENOMSG", "no message of desired type"}, + {76, "EBADMSG", "bad message"}, + {77, "EIDRM", "identifier removed"}, + {78, "EDEADLK", "resource deadlock avoided"}, + {79, "ENOLCK", "no locks available"}, + {80, "ENONET", "machine is not on the network"}, + {81, "ERREMOTE", "unknown error 81"}, + {82, "ENOLINK", "link has been severed"}, + {83, "EADV", "advertise error"}, + {84, "ESRMNT", "srmount error"}, + {85, "ECOMM", "communication error on send"}, + {86, "EPROTO", "protocol error"}, + {87, "EMULTIHOP", "multihop attempted"}, + {88, "EDOTDOT", "RFS specific error"}, + {89, "EREMCHG", "remote address changed"}, + {90, "ENOSYS", "function not implemented"}, + {91, "ESTRPIPE", "streams pipe error"}, + {92, "EOVERFLOW", "value too large for defined data type"}, + {93, "EBADFD", "file descriptor in bad state"}, + {94, "ECHRNG", "channel number out of range"}, + {95, "EL2NSYNC", "level 2 not synchronized"}, + {96, "EL3HLT", "level 3 halted"}, + {97, "EL3RST", "level 3 reset"}, + {98, "ELNRNG", "link number out of range"}, + {99, "EUNATCH", "protocol driver not attached"}, + {100, "ENOCSI", "no CSI structure available"}, + {101, "EL2HLT", "level 2 halted"}, + {102, "EBADE", "invalid exchange"}, + {103, "EBADR", "invalid request descriptor"}, + {104, "EXFULL", "exchange full"}, + {105, "ENOANO", "no anode"}, + {106, "EBADRQC", "invalid request code"}, + {107, "EBADSLT", "invalid slot"}, + {108, "EDEADLOCK", "file locking deadlock error"}, + {109, "EBFONT", "bad font file format"}, + {110, "ELIBEXEC", "cannot exec a shared library directly"}, + {111, "ENODATA", "no data available"}, + {112, "ELIBBAD", "accessing a corrupted shared library"}, + {113, "ENOPKG", "package not installed"}, + {114, "ELIBACC", "can not access a needed shared library"}, + {115, "ENOTUNIQ", "name not unique on network"}, + {116, "ERESTART", "interrupted system call should be restarted"}, + {117, "EUCLEAN", "structure needs cleaning"}, + {118, "ENOTNAM", "not a XENIX named type file"}, + {119, "ENAVAIL", "no XENIX semaphores available"}, + {120, "EISNAM", "is a named type file"}, + {121, "EREMOTEIO", "remote I/O error"}, + {122, "EILSEQ", "invalid or incomplete multibyte or wide character"}, + {123, "ELIBMAX", "attempting to link in too many shared libraries"}, + {124, "ELIBSCN", ".lib section in a.out corrupted"}, + {125, "ENOMEDIUM", "no medium found"}, + {126, "EMEDIUMTYPE", "wrong medium type"}, + {127, "ECANCELED", "operation canceled"}, + {128, "ENOKEY", "required key not available"}, + {129, "EKEYEXPIRED", "key has expired"}, + {130, "EKEYREVOKED", "key has been revoked"}, + {131, "EKEYREJECTED", "key was rejected by service"}, + {132, "EOWNERDEAD", "owner died"}, + {133, "ENOTRECOVERABLE", "state not recoverable"}, + {134, "ERFKILL", "operation not possible due to RF-kill"}, + {135, "EHWPOISON", "memory page has hardware error"}, } // Signal table -var signals = [...]string{ - 1: "hangup", - 2: "interrupt", - 3: "quit", - 4: "illegal instruction", - 5: "trace/breakpoint trap", - 6: "aborted", - 7: "EMT trap", - 8: "floating point exception", - 9: "killed", - 10: "bus error", - 11: "segmentation fault", - 12: "bad system call", - 13: "broken pipe", - 14: "alarm clock", - 15: "terminated", - 16: "urgent I/O condition", - 17: "stopped (signal)", - 18: "stopped", - 19: "continued", - 20: "child exited", - 21: "stopped (tty input)", - 22: "stopped (tty output)", - 23: "I/O possible", - 24: "CPU time limit exceeded", - 25: "file size limit exceeded", - 26: "virtual timer expired", - 27: "profiling timer expired", - 28: "window changed", - 29: "resource lost", - 30: "user defined signal 1", - 31: "user defined signal 2", +var signalList = [...]struct { + num syscall.Signal + name string + desc string +}{ + {1, "SIGHUP", "hangup"}, + {2, "SIGINT", "interrupt"}, + {3, "SIGQUIT", "quit"}, + {4, "SIGILL", "illegal instruction"}, + {5, "SIGTRAP", "trace/breakpoint trap"}, + {6, "SIGABRT", "aborted"}, + {7, "SIGEMT", "EMT trap"}, + {8, "SIGFPE", "floating point exception"}, + {9, "SIGKILL", "killed"}, + {10, "SIGBUS", "bus error"}, + {11, "SIGSEGV", "segmentation fault"}, + {12, "SIGSYS", "bad system call"}, + {13, "SIGPIPE", "broken pipe"}, + {14, "SIGALRM", "alarm clock"}, + {15, "SIGTERM", "terminated"}, + {16, "SIGURG", "urgent I/O condition"}, + {17, "SIGSTOP", "stopped (signal)"}, + {18, "SIGTSTP", "stopped"}, + {19, "SIGCONT", "continued"}, + {20, "SIGCHLD", "child exited"}, + {21, "SIGTTIN", "stopped (tty input)"}, + {22, "SIGTTOU", "stopped (tty output)"}, + {23, "SIGIO", "I/O possible"}, + {24, "SIGXCPU", "CPU time limit exceeded"}, + {25, "SIGXFSZ", "file size limit exceeded"}, + {26, "SIGVTALRM", "virtual timer expired"}, + {27, "SIGPROF", "profiling timer expired"}, + {28, "SIGWINCH", "window changed"}, + {29, "SIGLOST", "power failure"}, + {30, "SIGUSR1", "user defined signal 1"}, + {31, "SIGUSR2", "user defined signal 2"}, } diff --git a/vendor/golang.org/x/sys/unix/zerrors_openbsd_amd64.go b/vendor/golang.org/x/sys/unix/zerrors_openbsd_amd64.go index 12261a562..1f9e8a29e 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_openbsd_amd64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_openbsd_amd64.go @@ -952,6 +952,7 @@ const ( MNT_NOATIME = 0x8000 MNT_NODEV = 0x10 MNT_NOEXEC = 0x4 + MNT_NOPERM = 0x20 MNT_NOSUID = 0x8 MNT_NOWAIT = 0x2 MNT_QUOTA = 0x2000 @@ -959,6 +960,7 @@ const ( MNT_RELOAD = 0x40000 MNT_ROOTFS = 0x4000 MNT_SOFTDEP = 0x4000000 + MNT_STALLED = 0x100000 MNT_SYNCHRONOUS = 0x2 MNT_UPDATE = 0x10000 MNT_VISFLAGMASK = 0x400ffff @@ -1441,6 +1443,8 @@ const ( TIOCUCNTL_CBRK = 0x7a TIOCUCNTL_SBRK = 0x7b TOSTOP = 0x400000 + UTIME_NOW = -0x2 + UTIME_OMIT = -0x1 VDISCARD = 0xf VDSUSP = 0xb VEOF = 0x0 diff --git a/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc.go b/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc.go index ab2f76122..6bae21e5d 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc.go @@ -1,4 +1,4 @@ -// mksyscall_aix.pl -aix -tags aix,ppc syscall_aix.go syscall_aix_ppc.go +// mksyscall_aix_ppc.pl -aix -tags aix,ppc syscall_aix.go syscall_aix_ppc.go // Code generated by the command above; see README.md. DO NOT EDIT. // +build aix,ppc @@ -7,6 +7,7 @@ package unix /* #include +#include int utimes(uintptr_t, uintptr_t); int utimensat(int, uintptr_t, uintptr_t, int); int getcwd(uintptr_t, size_t); @@ -20,10 +21,8 @@ int chdir(uintptr_t); int chroot(uintptr_t); int close(int); int dup(int); -int dup3(int, int, int); void exit(int); int faccessat(int, uintptr_t, unsigned int, int); -int fallocate(int, unsigned int, long long, long long); int fchdir(int); int fchmod(int, unsigned int); int fchmodat(int, uintptr_t, unsigned int, int); @@ -49,7 +48,6 @@ int open64(uintptr_t, int, unsigned int); int openat(int, uintptr_t, int, unsigned int); int read(int, uintptr_t, size_t); int readlink(uintptr_t, uintptr_t, size_t); -int removexattr(uintptr_t, uintptr_t); int renameat(int, uintptr_t, int, uintptr_t); int setdomainname(uintptr_t, size_t); int sethostname(uintptr_t, size_t); @@ -61,13 +59,11 @@ int setgid(int); int setpriority(int, int, int); int statx(int, uintptr_t, int, int, uintptr_t); int sync(); -long long tee(int, int, int, int); uintptr_t times(uintptr_t); int umask(int); int uname(uintptr_t); int unlink(uintptr_t); int unlinkat(int, uintptr_t, int); -int unshare(int); int ustat(int, uintptr_t); int write(int, uintptr_t, size_t); int dup2(int, int); @@ -118,7 +114,6 @@ int msync(uintptr_t, size_t, int); int munlock(uintptr_t, size_t); int munlockall(); int pipe(uintptr_t); -int pipe2(uintptr_t, int); int poll(uintptr_t, int, int); int gettimeofday(uintptr_t, uintptr_t); int time(uintptr_t); @@ -131,7 +126,6 @@ uintptr_t mmap(uintptr_t, uintptr_t, int, int, int, long long); */ import "C" import ( - "syscall" "unsafe" ) @@ -245,6 +239,17 @@ func FcntlFlock(fd uintptr, cmd int, lk *Flock_t) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func fcntl(fd int, cmd int, arg int) (val int, err error) { + r0, er := C.fcntl(C.uintptr_t(fd), C.int(cmd), C.uintptr_t(arg)) + val = int(r0) + if r0 == -1 && er != nil { + err = er + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Acct(path string) (err error) { _p0 := uintptr(unsafe.Pointer(C.CString(path))) r0, er := C.acct(C.uintptr_t(_p0)) @@ -299,16 +304,6 @@ func Dup(oldfd int) (fd int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Dup3(oldfd int, newfd int, flags int) (err error) { - r0, er := C.dup3(C.int(oldfd), C.int(newfd), C.int(flags)) - if r0 == -1 && er != nil { - err = er - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Exit(code int) { C.exit(C.int(code)) return @@ -327,16 +322,6 @@ func Faccessat(dirfd int, path string, mode uint32, flags int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Fallocate(fd int, mode uint32, off int64, len int64) (err error) { - r0, er := C.fallocate(C.int(fd), C.uint(mode), C.longlong(off), C.longlong(len)) - if r0 == -1 && er != nil { - err = er - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Fchdir(fd int) (err error) { r0, er := C.fchdir(C.int(fd)) if r0 == -1 && er != nil { @@ -379,17 +364,6 @@ func Fchownat(dirfd int, path string, uid int, gid int, flags int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func fcntl(fd int, cmd int, arg int) (val int, err error) { - r0, er := C.fcntl(C.uintptr_t(fd), C.int(cmd), C.uintptr_t(arg)) - val = int(r0) - if r0 == -1 && er != nil { - err = er - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Fdatasync(fd int) (err error) { r0, er := C.fdatasync(C.int(fd)) if r0 == -1 && er != nil { @@ -477,7 +451,7 @@ func Getsid(pid int) (sid int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Kill(pid int, sig syscall.Signal) (err error) { +func Kill(pid int, sig Signal) (err error) { r0, er := C.kill(C.int(pid), C.int(sig)) if r0 == -1 && er != nil { err = er @@ -628,18 +602,6 @@ func Readlink(path string, buf []byte) (n int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Removexattr(path string, attr string) (err error) { - _p0 := uintptr(unsafe.Pointer(C.CString(path))) - _p1 := uintptr(unsafe.Pointer(C.CString(attr))) - r0, er := C.removexattr(C.uintptr_t(_p0), C.uintptr_t(_p1)) - if r0 == -1 && er != nil { - err = er - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Renameat(olddirfd int, oldpath string, newdirfd int, newpath string) (err error) { _p0 := uintptr(unsafe.Pointer(C.CString(oldpath))) _p1 := uintptr(unsafe.Pointer(C.CString(newpath))) @@ -763,17 +725,6 @@ func Sync() { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Tee(rfd int, wfd int, len int, flags int) (n int64, err error) { - r0, er := C.tee(C.int(rfd), C.int(wfd), C.int(len), C.int(flags)) - n = int64(r0) - if r0 == -1 && er != nil { - err = er - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Times(tms *Tms) (ticks uintptr, err error) { r0, er := C.times(C.uintptr_t(uintptr(unsafe.Pointer(tms)))) ticks = uintptr(r0) @@ -825,16 +776,6 @@ func Unlinkat(dirfd int, path string, flags int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Unshare(flags int) (err error) { - r0, er := C.unshare(C.int(flags)) - if r0 == -1 && er != nil { - err = er - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Ustat(dev int, ubuf *Ustat_t) (err error) { r0, er := C.ustat(C.int(dev), C.uintptr_t(uintptr(unsafe.Pointer(ubuf)))) if r0 == -1 && er != nil { @@ -1425,16 +1366,6 @@ func pipe(p *[2]_C_int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func pipe2(p *[2]_C_int, flags int) (err error) { - r0, er := C.pipe2(C.uintptr_t(uintptr(unsafe.Pointer(p))), C.int(flags)) - if r0 == -1 && er != nil { - err = er - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func poll(fds *PollFd, nfds int, timeout int) (n int, err error) { r0, er := C.poll(C.uintptr_t(uintptr(unsafe.Pointer(fds))), C.int(nfds), C.int(timeout)) n = int(r0) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc64.go b/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc64.go index 2e4f93fb1..3e929e520 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc64.go @@ -1,147 +1,25 @@ -// mksyscall_aix.pl -aix -tags aix,ppc64 syscall_aix.go syscall_aix_ppc64.go +// mksyscall_aix_ppc64.pl -aix -tags aix,ppc64 syscall_aix.go syscall_aix_ppc64.go // Code generated by the command above; see README.md. DO NOT EDIT. // +build aix,ppc64 package unix -/* -#include -int utimes(uintptr_t, uintptr_t); -int utimensat(int, uintptr_t, uintptr_t, int); -int getcwd(uintptr_t, size_t); -int accept(int, uintptr_t, uintptr_t); -int getdirent(int, uintptr_t, size_t); -int wait4(int, uintptr_t, int, uintptr_t); -int ioctl(int, int, uintptr_t); -int fcntl(uintptr_t, int, uintptr_t); -int acct(uintptr_t); -int chdir(uintptr_t); -int chroot(uintptr_t); -int close(int); -int dup(int); -int dup3(int, int, int); -void exit(int); -int faccessat(int, uintptr_t, unsigned int, int); -int fallocate(int, unsigned int, long long, long long); -int fchdir(int); -int fchmod(int, unsigned int); -int fchmodat(int, uintptr_t, unsigned int, int); -int fchownat(int, uintptr_t, int, int, int); -int fdatasync(int); -int fsync(int); -int getpgid(int); -int getpgrp(); -int getpid(); -int getppid(); -int getpriority(int, int); -int getrusage(int, uintptr_t); -int getsid(int); -int kill(int, int); -int syslog(int, uintptr_t, size_t); -int mkdir(int, uintptr_t, unsigned int); -int mkdirat(int, uintptr_t, unsigned int); -int mkfifo(uintptr_t, unsigned int); -int mknod(uintptr_t, unsigned int, int); -int mknodat(int, uintptr_t, unsigned int, int); -int nanosleep(uintptr_t, uintptr_t); -int open64(uintptr_t, int, unsigned int); -int openat(int, uintptr_t, int, unsigned int); -int read(int, uintptr_t, size_t); -int readlink(uintptr_t, uintptr_t, size_t); -int removexattr(uintptr_t, uintptr_t); -int renameat(int, uintptr_t, int, uintptr_t); -int setdomainname(uintptr_t, size_t); -int sethostname(uintptr_t, size_t); -int setpgid(int, int); -int setsid(); -int settimeofday(uintptr_t); -int setuid(int); -int setgid(int); -int setpriority(int, int, int); -int statx(int, uintptr_t, int, int, uintptr_t); -int sync(); -long long tee(int, int, int, int); -uintptr_t times(uintptr_t); -int umask(int); -int uname(uintptr_t); -int unlink(uintptr_t); -int unlinkat(int, uintptr_t, int); -int unshare(int); -int ustat(int, uintptr_t); -int write(int, uintptr_t, size_t); -int dup2(int, int); -int posix_fadvise64(int, long long, long long, int); -int fchown(int, int, int); -int fstat(int, uintptr_t); -int fstatat(int, uintptr_t, uintptr_t, int); -int fstatfs(int, uintptr_t); -int ftruncate(int, long long); -int getegid(); -int geteuid(); -int getgid(); -int getuid(); -int lchown(uintptr_t, int, int); -int listen(int, int); -int lstat(uintptr_t, uintptr_t); -int pause(); -int pread64(int, uintptr_t, size_t, long long); -int pwrite64(int, uintptr_t, size_t, long long); -int pselect(int, uintptr_t, uintptr_t, uintptr_t, uintptr_t, uintptr_t); -int setregid(int, int); -int setreuid(int, int); -int shutdown(int, int); -long long splice(int, uintptr_t, int, uintptr_t, int, int); -int stat(uintptr_t, uintptr_t); -int statfs(uintptr_t, uintptr_t); -int truncate(uintptr_t, long long); -int bind(int, uintptr_t, uintptr_t); -int connect(int, uintptr_t, uintptr_t); -int getgroups(int, uintptr_t); -int setgroups(int, uintptr_t); -int getsockopt(int, int, int, uintptr_t, uintptr_t); -int setsockopt(int, int, int, uintptr_t, uintptr_t); -int socket(int, int, int); -int socketpair(int, int, int, uintptr_t); -int getpeername(int, uintptr_t, uintptr_t); -int getsockname(int, uintptr_t, uintptr_t); -int recvfrom(int, uintptr_t, size_t, int, uintptr_t, uintptr_t); -int sendto(int, uintptr_t, size_t, int, uintptr_t, uintptr_t); -int recvmsg(int, uintptr_t, int); -int sendmsg(int, uintptr_t, int); -int munmap(uintptr_t, uintptr_t); -int madvise(uintptr_t, size_t, int); -int mprotect(uintptr_t, size_t, int); -int mlock(uintptr_t, size_t); -int mlockall(int); -int msync(uintptr_t, size_t, int); -int munlock(uintptr_t, size_t); -int munlockall(); -int pipe(uintptr_t); -int pipe2(uintptr_t, int); -int poll(uintptr_t, int, int); -int gettimeofday(uintptr_t, uintptr_t); -int time(uintptr_t); -int utime(uintptr_t, uintptr_t); -int getrlimit(int, uintptr_t); -int setrlimit(int, uintptr_t); -long long lseek(int, long long, int); -uintptr_t mmap64(uintptr_t, uintptr_t, int, int, int, long long); - -*/ -import "C" import ( - "syscall" "unsafe" ) // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func utimes(path string, times *[2]Timeval) (err error) { - _p0 := uintptr(unsafe.Pointer(C.CString(path))) - r0, er := C.utimes(C.uintptr_t(_p0), C.uintptr_t(uintptr(unsafe.Pointer(times)))) - if r0 == -1 && er != nil { - err = er + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, e1 := callutimes(uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(times))) + if e1 != 0 { + err = errnoErr(e1) } return } @@ -149,10 +27,14 @@ func utimes(path string, times *[2]Timeval) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func utimensat(dirfd int, path string, times *[2]Timespec, flag int) (err error) { - _p0 := uintptr(unsafe.Pointer(C.CString(path))) - r0, er := C.utimensat(C.int(dirfd), C.uintptr_t(_p0), C.uintptr_t(uintptr(unsafe.Pointer(times))), C.int(flag)) - if r0 == -1 && er != nil { - err = er + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, e1 := callutimensat(dirfd, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(times)), flag) + if e1 != 0 { + err = errnoErr(e1) } return } @@ -164,11 +46,9 @@ func getcwd(buf []byte) (err error) { if len(buf) > 0 { _p0 = &buf[0] } - var _p1 int - _p1 = len(buf) - r0, er := C.getcwd(C.uintptr_t(uintptr(unsafe.Pointer(_p0))), C.size_t(_p1)) - if r0 == -1 && er != nil { - err = er + _, e1 := callgetcwd(uintptr(unsafe.Pointer(_p0)), len(buf)) + if e1 != 0 { + err = errnoErr(e1) } return } @@ -176,10 +56,10 @@ func getcwd(buf []byte) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func accept(s int, rsa *RawSockaddrAny, addrlen *_Socklen) (fd int, err error) { - r0, er := C.accept(C.int(s), C.uintptr_t(uintptr(unsafe.Pointer(rsa))), C.uintptr_t(uintptr(unsafe.Pointer(addrlen)))) + r0, e1 := callaccept(s, uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) fd = int(r0) - if r0 == -1 && er != nil { - err = er + if e1 != 0 { + err = errnoErr(e1) } return } @@ -191,12 +71,10 @@ func getdirent(fd int, buf []byte) (n int, err error) { if len(buf) > 0 { _p0 = &buf[0] } - var _p1 int - _p1 = len(buf) - r0, er := C.getdirent(C.int(fd), C.uintptr_t(uintptr(unsafe.Pointer(_p0))), C.size_t(_p1)) + r0, e1 := callgetdirent(fd, uintptr(unsafe.Pointer(_p0)), len(buf)) n = int(r0) - if r0 == -1 && er != nil { - err = er + if e1 != 0 { + err = errnoErr(e1) } return } @@ -204,10 +82,10 @@ func getdirent(fd int, buf []byte) (n int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func wait4(pid Pid_t, status *_C_int, options int, rusage *Rusage) (wpid Pid_t, err error) { - r0, er := C.wait4(C.int(pid), C.uintptr_t(uintptr(unsafe.Pointer(status))), C.int(options), C.uintptr_t(uintptr(unsafe.Pointer(rusage)))) + r0, e1 := callwait4(int(pid), uintptr(unsafe.Pointer(status)), options, uintptr(unsafe.Pointer(rusage))) wpid = Pid_t(r0) - if r0 == -1 && er != nil { - err = er + if e1 != 0 { + err = errnoErr(e1) } return } @@ -215,9 +93,9 @@ func wait4(pid Pid_t, status *_C_int, options int, rusage *Rusage) (wpid Pid_t, // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func ioctl(fd int, req uint, arg uintptr) (err error) { - r0, er := C.ioctl(C.int(fd), C.int(req), C.uintptr_t(arg)) - if r0 == -1 && er != nil { - err = er + _, e1 := callioctl(fd, int(req), arg) + if e1 != 0 { + err = errnoErr(e1) } return } @@ -225,10 +103,10 @@ func ioctl(fd int, req uint, arg uintptr) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func FcntlInt(fd uintptr, cmd int, arg int) (r int, err error) { - r0, er := C.fcntl(C.uintptr_t(fd), C.int(cmd), C.uintptr_t(arg)) + r0, e1 := callfcntl(fd, cmd, uintptr(arg)) r = int(r0) - if r0 == -1 && er != nil { - err = er + if e1 != 0 { + err = errnoErr(e1) } return } @@ -236,143 +114,9 @@ func FcntlInt(fd uintptr, cmd int, arg int) (r int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func FcntlFlock(fd uintptr, cmd int, lk *Flock_t) (err error) { - r0, er := C.fcntl(C.uintptr_t(fd), C.int(cmd), C.uintptr_t(uintptr(unsafe.Pointer(lk)))) - if r0 == -1 && er != nil { - err = er - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Acct(path string) (err error) { - _p0 := uintptr(unsafe.Pointer(C.CString(path))) - r0, er := C.acct(C.uintptr_t(_p0)) - if r0 == -1 && er != nil { - err = er - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Chdir(path string) (err error) { - _p0 := uintptr(unsafe.Pointer(C.CString(path))) - r0, er := C.chdir(C.uintptr_t(_p0)) - if r0 == -1 && er != nil { - err = er - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Chroot(path string) (err error) { - _p0 := uintptr(unsafe.Pointer(C.CString(path))) - r0, er := C.chroot(C.uintptr_t(_p0)) - if r0 == -1 && er != nil { - err = er - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Close(fd int) (err error) { - r0, er := C.close(C.int(fd)) - if r0 == -1 && er != nil { - err = er - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Dup(oldfd int) (fd int, err error) { - r0, er := C.dup(C.int(oldfd)) - fd = int(r0) - if r0 == -1 && er != nil { - err = er - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Dup3(oldfd int, newfd int, flags int) (err error) { - r0, er := C.dup3(C.int(oldfd), C.int(newfd), C.int(flags)) - if r0 == -1 && er != nil { - err = er - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Exit(code int) { - C.exit(C.int(code)) - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Faccessat(dirfd int, path string, mode uint32, flags int) (err error) { - _p0 := uintptr(unsafe.Pointer(C.CString(path))) - r0, er := C.faccessat(C.int(dirfd), C.uintptr_t(_p0), C.uint(mode), C.int(flags)) - if r0 == -1 && er != nil { - err = er - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Fallocate(fd int, mode uint32, off int64, len int64) (err error) { - r0, er := C.fallocate(C.int(fd), C.uint(mode), C.longlong(off), C.longlong(len)) - if r0 == -1 && er != nil { - err = er - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Fchdir(fd int) (err error) { - r0, er := C.fchdir(C.int(fd)) - if r0 == -1 && er != nil { - err = er - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Fchmod(fd int, mode uint32) (err error) { - r0, er := C.fchmod(C.int(fd), C.uint(mode)) - if r0 == -1 && er != nil { - err = er - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Fchmodat(dirfd int, path string, mode uint32, flags int) (err error) { - _p0 := uintptr(unsafe.Pointer(C.CString(path))) - r0, er := C.fchmodat(C.int(dirfd), C.uintptr_t(_p0), C.uint(mode), C.int(flags)) - if r0 == -1 && er != nil { - err = er - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Fchownat(dirfd int, path string, uid int, gid int, flags int) (err error) { - _p0 := uintptr(unsafe.Pointer(C.CString(path))) - r0, er := C.fchownat(C.int(dirfd), C.uintptr_t(_p0), C.int(uid), C.int(gid), C.int(flags)) - if r0 == -1 && er != nil { - err = er + _, e1 := callfcntl(fd, cmd, uintptr(unsafe.Pointer(lk))) + if e1 != 0 { + err = errnoErr(e1) } return } @@ -380,10 +124,148 @@ func Fchownat(dirfd int, path string, uid int, gid int, flags int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func fcntl(fd int, cmd int, arg int) (val int, err error) { - r0, er := C.fcntl(C.uintptr_t(fd), C.int(cmd), C.uintptr_t(arg)) + r0, e1 := callfcntl(uintptr(fd), cmd, uintptr(arg)) val = int(r0) - if r0 == -1 && er != nil { - err = er + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Acct(path string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, e1 := callacct(uintptr(unsafe.Pointer(_p0))) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Chdir(path string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, e1 := callchdir(uintptr(unsafe.Pointer(_p0))) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Chroot(path string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, e1 := callchroot(uintptr(unsafe.Pointer(_p0))) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Close(fd int) (err error) { + _, e1 := callclose(fd) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Dup(oldfd int) (fd int, err error) { + r0, e1 := calldup(oldfd) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Exit(code int) { + callexit(code) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Faccessat(dirfd int, path string, mode uint32, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, e1 := callfaccessat(dirfd, uintptr(unsafe.Pointer(_p0)), mode, flags) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fchdir(fd int) (err error) { + _, e1 := callfchdir(fd) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fchmod(fd int, mode uint32) (err error) { + _, e1 := callfchmod(fd, mode) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fchmodat(dirfd int, path string, mode uint32, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, e1 := callfchmodat(dirfd, uintptr(unsafe.Pointer(_p0)), mode, flags) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fchownat(dirfd int, path string, uid int, gid int, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, e1 := callfchownat(dirfd, uintptr(unsafe.Pointer(_p0)), uid, gid, flags) + if e1 != 0 { + err = errnoErr(e1) } return } @@ -391,9 +273,9 @@ func fcntl(fd int, cmd int, arg int) (val int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Fdatasync(fd int) (err error) { - r0, er := C.fdatasync(C.int(fd)) - if r0 == -1 && er != nil { - err = er + _, e1 := callfdatasync(fd) + if e1 != 0 { + err = errnoErr(e1) } return } @@ -401,9 +283,9 @@ func Fdatasync(fd int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Fsync(fd int) (err error) { - r0, er := C.fsync(C.int(fd)) - if r0 == -1 && er != nil { - err = er + _, e1 := callfsync(fd) + if e1 != 0 { + err = errnoErr(e1) } return } @@ -411,10 +293,10 @@ func Fsync(fd int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Getpgid(pid int) (pgid int, err error) { - r0, er := C.getpgid(C.int(pid)) + r0, e1 := callgetpgid(pid) pgid = int(r0) - if r0 == -1 && er != nil { - err = er + if e1 != 0 { + err = errnoErr(e1) } return } @@ -422,7 +304,7 @@ func Getpgid(pid int) (pgid int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Getpgrp() (pid int) { - r0, _ := C.getpgrp() + r0, _ := callgetpgrp() pid = int(r0) return } @@ -430,7 +312,7 @@ func Getpgrp() (pid int) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Getpid() (pid int) { - r0, _ := C.getpid() + r0, _ := callgetpid() pid = int(r0) return } @@ -438,7 +320,7 @@ func Getpid() (pid int) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Getppid() (ppid int) { - r0, _ := C.getppid() + r0, _ := callgetppid() ppid = int(r0) return } @@ -446,10 +328,10 @@ func Getppid() (ppid int) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Getpriority(which int, who int) (prio int, err error) { - r0, er := C.getpriority(C.int(which), C.int(who)) + r0, e1 := callgetpriority(which, who) prio = int(r0) - if r0 == -1 && er != nil { - err = er + if e1 != 0 { + err = errnoErr(e1) } return } @@ -457,9 +339,9 @@ func Getpriority(which int, who int) (prio int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Getrusage(who int, rusage *Rusage) (err error) { - r0, er := C.getrusage(C.int(who), C.uintptr_t(uintptr(unsafe.Pointer(rusage)))) - if r0 == -1 && er != nil { - err = er + _, e1 := callgetrusage(who, uintptr(unsafe.Pointer(rusage))) + if e1 != 0 { + err = errnoErr(e1) } return } @@ -467,20 +349,20 @@ func Getrusage(who int, rusage *Rusage) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Getsid(pid int) (sid int, err error) { - r0, er := C.getsid(C.int(pid)) + r0, e1 := callgetsid(pid) sid = int(r0) - if r0 == -1 && er != nil { - err = er + if e1 != 0 { + err = errnoErr(e1) } return } // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Kill(pid int, sig syscall.Signal) (err error) { - r0, er := C.kill(C.int(pid), C.int(sig)) - if r0 == -1 && er != nil { - err = er +func Kill(pid int, sig Signal) (err error) { + _, e1 := callkill(pid, int(sig)) + if e1 != 0 { + err = errnoErr(e1) } return } @@ -492,12 +374,10 @@ func Klogctl(typ int, buf []byte) (n int, err error) { if len(buf) > 0 { _p0 = &buf[0] } - var _p1 int - _p1 = len(buf) - r0, er := C.syslog(C.int(typ), C.uintptr_t(uintptr(unsafe.Pointer(_p0))), C.size_t(_p1)) + r0, e1 := callsyslog(typ, uintptr(unsafe.Pointer(_p0)), len(buf)) n = int(r0) - if r0 == -1 && er != nil { - err = er + if e1 != 0 { + err = errnoErr(e1) } return } @@ -505,10 +385,14 @@ func Klogctl(typ int, buf []byte) (n int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Mkdir(dirfd int, path string, mode uint32) (err error) { - _p0 := uintptr(unsafe.Pointer(C.CString(path))) - r0, er := C.mkdir(C.int(dirfd), C.uintptr_t(_p0), C.uint(mode)) - if r0 == -1 && er != nil { - err = er + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, e1 := callmkdir(dirfd, uintptr(unsafe.Pointer(_p0)), mode) + if e1 != 0 { + err = errnoErr(e1) } return } @@ -516,10 +400,14 @@ func Mkdir(dirfd int, path string, mode uint32) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Mkdirat(dirfd int, path string, mode uint32) (err error) { - _p0 := uintptr(unsafe.Pointer(C.CString(path))) - r0, er := C.mkdirat(C.int(dirfd), C.uintptr_t(_p0), C.uint(mode)) - if r0 == -1 && er != nil { - err = er + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, e1 := callmkdirat(dirfd, uintptr(unsafe.Pointer(_p0)), mode) + if e1 != 0 { + err = errnoErr(e1) } return } @@ -527,10 +415,14 @@ func Mkdirat(dirfd int, path string, mode uint32) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Mkfifo(path string, mode uint32) (err error) { - _p0 := uintptr(unsafe.Pointer(C.CString(path))) - r0, er := C.mkfifo(C.uintptr_t(_p0), C.uint(mode)) - if r0 == -1 && er != nil { - err = er + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, e1 := callmkfifo(uintptr(unsafe.Pointer(_p0)), mode) + if e1 != 0 { + err = errnoErr(e1) } return } @@ -538,10 +430,14 @@ func Mkfifo(path string, mode uint32) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Mknod(path string, mode uint32, dev int) (err error) { - _p0 := uintptr(unsafe.Pointer(C.CString(path))) - r0, er := C.mknod(C.uintptr_t(_p0), C.uint(mode), C.int(dev)) - if r0 == -1 && er != nil { - err = er + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, e1 := callmknod(uintptr(unsafe.Pointer(_p0)), mode, dev) + if e1 != 0 { + err = errnoErr(e1) } return } @@ -549,10 +445,14 @@ func Mknod(path string, mode uint32, dev int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Mknodat(dirfd int, path string, mode uint32, dev int) (err error) { - _p0 := uintptr(unsafe.Pointer(C.CString(path))) - r0, er := C.mknodat(C.int(dirfd), C.uintptr_t(_p0), C.uint(mode), C.int(dev)) - if r0 == -1 && er != nil { - err = er + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, e1 := callmknodat(dirfd, uintptr(unsafe.Pointer(_p0)), mode, dev) + if e1 != 0 { + err = errnoErr(e1) } return } @@ -560,9 +460,9 @@ func Mknodat(dirfd int, path string, mode uint32, dev int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Nanosleep(time *Timespec, leftover *Timespec) (err error) { - r0, er := C.nanosleep(C.uintptr_t(uintptr(unsafe.Pointer(time))), C.uintptr_t(uintptr(unsafe.Pointer(leftover)))) - if r0 == -1 && er != nil { - err = er + _, e1 := callnanosleep(uintptr(unsafe.Pointer(time)), uintptr(unsafe.Pointer(leftover))) + if e1 != 0 { + err = errnoErr(e1) } return } @@ -570,11 +470,15 @@ func Nanosleep(time *Timespec, leftover *Timespec) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Open(path string, mode int, perm uint32) (fd int, err error) { - _p0 := uintptr(unsafe.Pointer(C.CString(path))) - r0, er := C.open64(C.uintptr_t(_p0), C.int(mode), C.uint(perm)) + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + r0, e1 := callopen64(uintptr(unsafe.Pointer(_p0)), mode, perm) fd = int(r0) - if r0 == -1 && er != nil { - err = er + if e1 != 0 { + err = errnoErr(e1) } return } @@ -582,11 +486,15 @@ func Open(path string, mode int, perm uint32) (fd int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Openat(dirfd int, path string, flags int, mode uint32) (fd int, err error) { - _p0 := uintptr(unsafe.Pointer(C.CString(path))) - r0, er := C.openat(C.int(dirfd), C.uintptr_t(_p0), C.int(flags), C.uint(mode)) + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + r0, e1 := callopenat(dirfd, uintptr(unsafe.Pointer(_p0)), flags, mode) fd = int(r0) - if r0 == -1 && er != nil { - err = er + if e1 != 0 { + err = errnoErr(e1) } return } @@ -598,12 +506,10 @@ func read(fd int, p []byte) (n int, err error) { if len(p) > 0 { _p0 = &p[0] } - var _p1 int - _p1 = len(p) - r0, er := C.read(C.int(fd), C.uintptr_t(uintptr(unsafe.Pointer(_p0))), C.size_t(_p1)) + r0, e1 := callread(fd, uintptr(unsafe.Pointer(_p0)), len(p)) n = int(r0) - if r0 == -1 && er != nil { - err = er + if e1 != 0 { + err = errnoErr(e1) } return } @@ -611,29 +517,19 @@ func read(fd int, p []byte) (n int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Readlink(path string, buf []byte) (n int, err error) { - _p0 := uintptr(unsafe.Pointer(C.CString(path))) + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } var _p1 *byte if len(buf) > 0 { _p1 = &buf[0] } - var _p2 int - _p2 = len(buf) - r0, er := C.readlink(C.uintptr_t(_p0), C.uintptr_t(uintptr(unsafe.Pointer(_p1))), C.size_t(_p2)) + r0, e1 := callreadlink(uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), len(buf)) n = int(r0) - if r0 == -1 && er != nil { - err = er - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Removexattr(path string, attr string) (err error) { - _p0 := uintptr(unsafe.Pointer(C.CString(path))) - _p1 := uintptr(unsafe.Pointer(C.CString(attr))) - r0, er := C.removexattr(C.uintptr_t(_p0), C.uintptr_t(_p1)) - if r0 == -1 && er != nil { - err = er + if e1 != 0 { + err = errnoErr(e1) } return } @@ -641,11 +537,19 @@ func Removexattr(path string, attr string) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Renameat(olddirfd int, oldpath string, newdirfd int, newpath string) (err error) { - _p0 := uintptr(unsafe.Pointer(C.CString(oldpath))) - _p1 := uintptr(unsafe.Pointer(C.CString(newpath))) - r0, er := C.renameat(C.int(olddirfd), C.uintptr_t(_p0), C.int(newdirfd), C.uintptr_t(_p1)) - if r0 == -1 && er != nil { - err = er + var _p0 *byte + _p0, err = BytePtrFromString(oldpath) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(newpath) + if err != nil { + return + } + _, e1 := callrenameat(olddirfd, uintptr(unsafe.Pointer(_p0)), newdirfd, uintptr(unsafe.Pointer(_p1))) + if e1 != 0 { + err = errnoErr(e1) } return } @@ -657,11 +561,9 @@ func Setdomainname(p []byte) (err error) { if len(p) > 0 { _p0 = &p[0] } - var _p1 int - _p1 = len(p) - r0, er := C.setdomainname(C.uintptr_t(uintptr(unsafe.Pointer(_p0))), C.size_t(_p1)) - if r0 == -1 && er != nil { - err = er + _, e1 := callsetdomainname(uintptr(unsafe.Pointer(_p0)), len(p)) + if e1 != 0 { + err = errnoErr(e1) } return } @@ -673,11 +575,9 @@ func Sethostname(p []byte) (err error) { if len(p) > 0 { _p0 = &p[0] } - var _p1 int - _p1 = len(p) - r0, er := C.sethostname(C.uintptr_t(uintptr(unsafe.Pointer(_p0))), C.size_t(_p1)) - if r0 == -1 && er != nil { - err = er + _, e1 := callsethostname(uintptr(unsafe.Pointer(_p0)), len(p)) + if e1 != 0 { + err = errnoErr(e1) } return } @@ -685,9 +585,9 @@ func Sethostname(p []byte) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Setpgid(pid int, pgid int) (err error) { - r0, er := C.setpgid(C.int(pid), C.int(pgid)) - if r0 == -1 && er != nil { - err = er + _, e1 := callsetpgid(pid, pgid) + if e1 != 0 { + err = errnoErr(e1) } return } @@ -695,10 +595,10 @@ func Setpgid(pid int, pgid int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Setsid() (pid int, err error) { - r0, er := C.setsid() + r0, e1 := callsetsid() pid = int(r0) - if r0 == -1 && er != nil { - err = er + if e1 != 0 { + err = errnoErr(e1) } return } @@ -706,9 +606,9 @@ func Setsid() (pid int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Settimeofday(tv *Timeval) (err error) { - r0, er := C.settimeofday(C.uintptr_t(uintptr(unsafe.Pointer(tv)))) - if r0 == -1 && er != nil { - err = er + _, e1 := callsettimeofday(uintptr(unsafe.Pointer(tv))) + if e1 != 0 { + err = errnoErr(e1) } return } @@ -716,9 +616,9 @@ func Settimeofday(tv *Timeval) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Setuid(uid int) (err error) { - r0, er := C.setuid(C.int(uid)) - if r0 == -1 && er != nil { - err = er + _, e1 := callsetuid(uid) + if e1 != 0 { + err = errnoErr(e1) } return } @@ -726,9 +626,9 @@ func Setuid(uid int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Setgid(uid int) (err error) { - r0, er := C.setgid(C.int(uid)) - if r0 == -1 && er != nil { - err = er + _, e1 := callsetgid(uid) + if e1 != 0 { + err = errnoErr(e1) } return } @@ -736,9 +636,9 @@ func Setgid(uid int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Setpriority(which int, who int, prio int) (err error) { - r0, er := C.setpriority(C.int(which), C.int(who), C.int(prio)) - if r0 == -1 && er != nil { - err = er + _, e1 := callsetpriority(which, who, prio) + if e1 != 0 { + err = errnoErr(e1) } return } @@ -746,10 +646,14 @@ func Setpriority(which int, who int, prio int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Statx(dirfd int, path string, flags int, mask int, stat *Statx_t) (err error) { - _p0 := uintptr(unsafe.Pointer(C.CString(path))) - r0, er := C.statx(C.int(dirfd), C.uintptr_t(_p0), C.int(flags), C.int(mask), C.uintptr_t(uintptr(unsafe.Pointer(stat)))) - if r0 == -1 && er != nil { - err = er + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, e1 := callstatx(dirfd, uintptr(unsafe.Pointer(_p0)), flags, mask, uintptr(unsafe.Pointer(stat))) + if e1 != 0 { + err = errnoErr(e1) } return } @@ -757,28 +661,17 @@ func Statx(dirfd int, path string, flags int, mask int, stat *Statx_t) (err erro // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Sync() { - C.sync() - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Tee(rfd int, wfd int, len int, flags int) (n int64, err error) { - r0, er := C.tee(C.int(rfd), C.int(wfd), C.int(len), C.int(flags)) - n = int64(r0) - if r0 == -1 && er != nil { - err = er - } + callsync() return } // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Times(tms *Tms) (ticks uintptr, err error) { - r0, er := C.times(C.uintptr_t(uintptr(unsafe.Pointer(tms)))) + r0, e1 := calltimes(uintptr(unsafe.Pointer(tms))) ticks = uintptr(r0) - if uintptr(r0) == ^uintptr(0) && er != nil { - err = er + if e1 != 0 { + err = errnoErr(e1) } return } @@ -786,7 +679,7 @@ func Times(tms *Tms) (ticks uintptr, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Umask(mask int) (oldmask int) { - r0, _ := C.umask(C.int(mask)) + r0, _ := callumask(mask) oldmask = int(r0) return } @@ -794,9 +687,9 @@ func Umask(mask int) (oldmask int) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Uname(buf *Utsname) (err error) { - r0, er := C.uname(C.uintptr_t(uintptr(unsafe.Pointer(buf)))) - if r0 == -1 && er != nil { - err = er + _, e1 := calluname(uintptr(unsafe.Pointer(buf))) + if e1 != 0 { + err = errnoErr(e1) } return } @@ -804,10 +697,14 @@ func Uname(buf *Utsname) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Unlink(path string) (err error) { - _p0 := uintptr(unsafe.Pointer(C.CString(path))) - r0, er := C.unlink(C.uintptr_t(_p0)) - if r0 == -1 && er != nil { - err = er + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, e1 := callunlink(uintptr(unsafe.Pointer(_p0))) + if e1 != 0 { + err = errnoErr(e1) } return } @@ -815,20 +712,14 @@ func Unlink(path string) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Unlinkat(dirfd int, path string, flags int) (err error) { - _p0 := uintptr(unsafe.Pointer(C.CString(path))) - r0, er := C.unlinkat(C.int(dirfd), C.uintptr_t(_p0), C.int(flags)) - if r0 == -1 && er != nil { - err = er + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Unshare(flags int) (err error) { - r0, er := C.unshare(C.int(flags)) - if r0 == -1 && er != nil { - err = er + _, e1 := callunlinkat(dirfd, uintptr(unsafe.Pointer(_p0)), flags) + if e1 != 0 { + err = errnoErr(e1) } return } @@ -836,9 +727,9 @@ func Unshare(flags int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Ustat(dev int, ubuf *Ustat_t) (err error) { - r0, er := C.ustat(C.int(dev), C.uintptr_t(uintptr(unsafe.Pointer(ubuf)))) - if r0 == -1 && er != nil { - err = er + _, e1 := callustat(dev, uintptr(unsafe.Pointer(ubuf))) + if e1 != 0 { + err = errnoErr(e1) } return } @@ -850,12 +741,10 @@ func write(fd int, p []byte) (n int, err error) { if len(p) > 0 { _p0 = &p[0] } - var _p1 int - _p1 = len(p) - r0, er := C.write(C.int(fd), C.uintptr_t(uintptr(unsafe.Pointer(_p0))), C.size_t(_p1)) + r0, e1 := callwrite(fd, uintptr(unsafe.Pointer(_p0)), len(p)) n = int(r0) - if r0 == -1 && er != nil { - err = er + if e1 != 0 { + err = errnoErr(e1) } return } @@ -863,10 +752,10 @@ func write(fd int, p []byte) (n int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func readlen(fd int, p *byte, np int) (n int, err error) { - r0, er := C.read(C.int(fd), C.uintptr_t(uintptr(unsafe.Pointer(p))), C.size_t(np)) + r0, e1 := callread(fd, uintptr(unsafe.Pointer(p)), np) n = int(r0) - if r0 == -1 && er != nil { - err = er + if e1 != 0 { + err = errnoErr(e1) } return } @@ -874,10 +763,10 @@ func readlen(fd int, p *byte, np int) (n int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func writelen(fd int, p *byte, np int) (n int, err error) { - r0, er := C.write(C.int(fd), C.uintptr_t(uintptr(unsafe.Pointer(p))), C.size_t(np)) + r0, e1 := callwrite(fd, uintptr(unsafe.Pointer(p)), np) n = int(r0) - if r0 == -1 && er != nil { - err = er + if e1 != 0 { + err = errnoErr(e1) } return } @@ -885,9 +774,9 @@ func writelen(fd int, p *byte, np int) (n int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Dup2(oldfd int, newfd int) (err error) { - r0, er := C.dup2(C.int(oldfd), C.int(newfd)) - if r0 == -1 && er != nil { - err = er + _, e1 := calldup2(oldfd, newfd) + if e1 != 0 { + err = errnoErr(e1) } return } @@ -895,9 +784,9 @@ func Dup2(oldfd int, newfd int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Fadvise(fd int, offset int64, length int64, advice int) (err error) { - r0, er := C.posix_fadvise64(C.int(fd), C.longlong(offset), C.longlong(length), C.int(advice)) - if r0 == -1 && er != nil { - err = er + _, e1 := callposix_fadvise64(fd, offset, length, advice) + if e1 != 0 { + err = errnoErr(e1) } return } @@ -905,9 +794,9 @@ func Fadvise(fd int, offset int64, length int64, advice int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Fchown(fd int, uid int, gid int) (err error) { - r0, er := C.fchown(C.int(fd), C.int(uid), C.int(gid)) - if r0 == -1 && er != nil { - err = er + _, e1 := callfchown(fd, uid, gid) + if e1 != 0 { + err = errnoErr(e1) } return } @@ -915,9 +804,9 @@ func Fchown(fd int, uid int, gid int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Fstat(fd int, stat *Stat_t) (err error) { - r0, er := C.fstat(C.int(fd), C.uintptr_t(uintptr(unsafe.Pointer(stat)))) - if r0 == -1 && er != nil { - err = er + _, e1 := callfstat(fd, uintptr(unsafe.Pointer(stat))) + if e1 != 0 { + err = errnoErr(e1) } return } @@ -925,10 +814,14 @@ func Fstat(fd int, stat *Stat_t) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Fstatat(dirfd int, path string, stat *Stat_t, flags int) (err error) { - _p0 := uintptr(unsafe.Pointer(C.CString(path))) - r0, er := C.fstatat(C.int(dirfd), C.uintptr_t(_p0), C.uintptr_t(uintptr(unsafe.Pointer(stat))), C.int(flags)) - if r0 == -1 && er != nil { - err = er + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, e1 := callfstatat(dirfd, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), flags) + if e1 != 0 { + err = errnoErr(e1) } return } @@ -936,9 +829,9 @@ func Fstatat(dirfd int, path string, stat *Stat_t, flags int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Fstatfs(fd int, buf *Statfs_t) (err error) { - r0, er := C.fstatfs(C.int(fd), C.uintptr_t(uintptr(unsafe.Pointer(buf)))) - if r0 == -1 && er != nil { - err = er + _, e1 := callfstatfs(fd, uintptr(unsafe.Pointer(buf))) + if e1 != 0 { + err = errnoErr(e1) } return } @@ -946,9 +839,9 @@ func Fstatfs(fd int, buf *Statfs_t) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Ftruncate(fd int, length int64) (err error) { - r0, er := C.ftruncate(C.int(fd), C.longlong(length)) - if r0 == -1 && er != nil { - err = er + _, e1 := callftruncate(fd, length) + if e1 != 0 { + err = errnoErr(e1) } return } @@ -956,7 +849,7 @@ func Ftruncate(fd int, length int64) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Getegid() (egid int) { - r0, _ := C.getegid() + r0, _ := callgetegid() egid = int(r0) return } @@ -964,7 +857,7 @@ func Getegid() (egid int) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Geteuid() (euid int) { - r0, _ := C.geteuid() + r0, _ := callgeteuid() euid = int(r0) return } @@ -972,7 +865,7 @@ func Geteuid() (euid int) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Getgid() (gid int) { - r0, _ := C.getgid() + r0, _ := callgetgid() gid = int(r0) return } @@ -980,7 +873,7 @@ func Getgid() (gid int) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Getuid() (uid int) { - r0, _ := C.getuid() + r0, _ := callgetuid() uid = int(r0) return } @@ -988,10 +881,14 @@ func Getuid() (uid int) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Lchown(path string, uid int, gid int) (err error) { - _p0 := uintptr(unsafe.Pointer(C.CString(path))) - r0, er := C.lchown(C.uintptr_t(_p0), C.int(uid), C.int(gid)) - if r0 == -1 && er != nil { - err = er + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, e1 := calllchown(uintptr(unsafe.Pointer(_p0)), uid, gid) + if e1 != 0 { + err = errnoErr(e1) } return } @@ -999,9 +896,9 @@ func Lchown(path string, uid int, gid int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Listen(s int, n int) (err error) { - r0, er := C.listen(C.int(s), C.int(n)) - if r0 == -1 && er != nil { - err = er + _, e1 := calllisten(s, n) + if e1 != 0 { + err = errnoErr(e1) } return } @@ -1009,10 +906,14 @@ func Listen(s int, n int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Lstat(path string, stat *Stat_t) (err error) { - _p0 := uintptr(unsafe.Pointer(C.CString(path))) - r0, er := C.lstat(C.uintptr_t(_p0), C.uintptr_t(uintptr(unsafe.Pointer(stat)))) - if r0 == -1 && er != nil { - err = er + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, e1 := calllstat(uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat))) + if e1 != 0 { + err = errnoErr(e1) } return } @@ -1020,9 +921,9 @@ func Lstat(path string, stat *Stat_t) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Pause() (err error) { - r0, er := C.pause() - if r0 == -1 && er != nil { - err = er + _, e1 := callpause() + if e1 != 0 { + err = errnoErr(e1) } return } @@ -1034,12 +935,10 @@ func Pread(fd int, p []byte, offset int64) (n int, err error) { if len(p) > 0 { _p0 = &p[0] } - var _p1 int - _p1 = len(p) - r0, er := C.pread64(C.int(fd), C.uintptr_t(uintptr(unsafe.Pointer(_p0))), C.size_t(_p1), C.longlong(offset)) + r0, e1 := callpread64(fd, uintptr(unsafe.Pointer(_p0)), len(p), offset) n = int(r0) - if r0 == -1 && er != nil { - err = er + if e1 != 0 { + err = errnoErr(e1) } return } @@ -1051,12 +950,10 @@ func Pwrite(fd int, p []byte, offset int64) (n int, err error) { if len(p) > 0 { _p0 = &p[0] } - var _p1 int - _p1 = len(p) - r0, er := C.pwrite64(C.int(fd), C.uintptr_t(uintptr(unsafe.Pointer(_p0))), C.size_t(_p1), C.longlong(offset)) + r0, e1 := callpwrite64(fd, uintptr(unsafe.Pointer(_p0)), len(p), offset) n = int(r0) - if r0 == -1 && er != nil { - err = er + if e1 != 0 { + err = errnoErr(e1) } return } @@ -1064,10 +961,10 @@ func Pwrite(fd int, p []byte, offset int64) (n int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Pselect(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timespec, sigmask *Sigset_t) (n int, err error) { - r0, er := C.pselect(C.int(nfd), C.uintptr_t(uintptr(unsafe.Pointer(r))), C.uintptr_t(uintptr(unsafe.Pointer(w))), C.uintptr_t(uintptr(unsafe.Pointer(e))), C.uintptr_t(uintptr(unsafe.Pointer(timeout))), C.uintptr_t(uintptr(unsafe.Pointer(sigmask)))) + r0, e1 := callpselect(nfd, uintptr(unsafe.Pointer(r)), uintptr(unsafe.Pointer(w)), uintptr(unsafe.Pointer(e)), uintptr(unsafe.Pointer(timeout)), uintptr(unsafe.Pointer(sigmask))) n = int(r0) - if r0 == -1 && er != nil { - err = er + if e1 != 0 { + err = errnoErr(e1) } return } @@ -1075,9 +972,9 @@ func Pselect(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timespec, sigmask * // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Setregid(rgid int, egid int) (err error) { - r0, er := C.setregid(C.int(rgid), C.int(egid)) - if r0 == -1 && er != nil { - err = er + _, e1 := callsetregid(rgid, egid) + if e1 != 0 { + err = errnoErr(e1) } return } @@ -1085,9 +982,9 @@ func Setregid(rgid int, egid int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Setreuid(ruid int, euid int) (err error) { - r0, er := C.setreuid(C.int(ruid), C.int(euid)) - if r0 == -1 && er != nil { - err = er + _, e1 := callsetreuid(ruid, euid) + if e1 != 0 { + err = errnoErr(e1) } return } @@ -1095,9 +992,9 @@ func Setreuid(ruid int, euid int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Shutdown(fd int, how int) (err error) { - r0, er := C.shutdown(C.int(fd), C.int(how)) - if r0 == -1 && er != nil { - err = er + _, e1 := callshutdown(fd, how) + if e1 != 0 { + err = errnoErr(e1) } return } @@ -1105,10 +1002,10 @@ func Shutdown(fd int, how int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Splice(rfd int, roff *int64, wfd int, woff *int64, len int, flags int) (n int64, err error) { - r0, er := C.splice(C.int(rfd), C.uintptr_t(uintptr(unsafe.Pointer(roff))), C.int(wfd), C.uintptr_t(uintptr(unsafe.Pointer(woff))), C.int(len), C.int(flags)) + r0, e1 := callsplice(rfd, uintptr(unsafe.Pointer(roff)), wfd, uintptr(unsafe.Pointer(woff)), len, flags) n = int64(r0) - if r0 == -1 && er != nil { - err = er + if e1 != 0 { + err = errnoErr(e1) } return } @@ -1116,10 +1013,14 @@ func Splice(rfd int, roff *int64, wfd int, woff *int64, len int, flags int) (n i // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Stat(path string, stat *Stat_t) (err error) { - _p0 := uintptr(unsafe.Pointer(C.CString(path))) - r0, er := C.stat(C.uintptr_t(_p0), C.uintptr_t(uintptr(unsafe.Pointer(stat)))) - if r0 == -1 && er != nil { - err = er + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, e1 := callstat(uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat))) + if e1 != 0 { + err = errnoErr(e1) } return } @@ -1127,10 +1028,14 @@ func Stat(path string, stat *Stat_t) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Statfs(path string, buf *Statfs_t) (err error) { - _p0 := uintptr(unsafe.Pointer(C.CString(path))) - r0, er := C.statfs(C.uintptr_t(_p0), C.uintptr_t(uintptr(unsafe.Pointer(buf)))) - if r0 == -1 && er != nil { - err = er + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, e1 := callstatfs(uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(buf))) + if e1 != 0 { + err = errnoErr(e1) } return } @@ -1138,10 +1043,14 @@ func Statfs(path string, buf *Statfs_t) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Truncate(path string, length int64) (err error) { - _p0 := uintptr(unsafe.Pointer(C.CString(path))) - r0, er := C.truncate(C.uintptr_t(_p0), C.longlong(length)) - if r0 == -1 && er != nil { - err = er + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, e1 := calltruncate(uintptr(unsafe.Pointer(_p0)), length) + if e1 != 0 { + err = errnoErr(e1) } return } @@ -1149,9 +1058,9 @@ func Truncate(path string, length int64) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func bind(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) { - r0, er := C.bind(C.int(s), C.uintptr_t(uintptr(addr)), C.uintptr_t(uintptr(addrlen))) - if r0 == -1 && er != nil { - err = er + _, e1 := callbind(s, uintptr(addr), uintptr(addrlen)) + if e1 != 0 { + err = errnoErr(e1) } return } @@ -1159,9 +1068,9 @@ func bind(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func connect(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) { - r0, er := C.connect(C.int(s), C.uintptr_t(uintptr(addr)), C.uintptr_t(uintptr(addrlen))) - if r0 == -1 && er != nil { - err = er + _, e1 := callconnect(s, uintptr(addr), uintptr(addrlen)) + if e1 != 0 { + err = errnoErr(e1) } return } @@ -1169,10 +1078,10 @@ func connect(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func getgroups(n int, list *_Gid_t) (nn int, err error) { - r0, er := C.getgroups(C.int(n), C.uintptr_t(uintptr(unsafe.Pointer(list)))) + r0, e1 := callgetgroups(n, uintptr(unsafe.Pointer(list))) nn = int(r0) - if r0 == -1 && er != nil { - err = er + if e1 != 0 { + err = errnoErr(e1) } return } @@ -1180,9 +1089,9 @@ func getgroups(n int, list *_Gid_t) (nn int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func setgroups(n int, list *_Gid_t) (err error) { - r0, er := C.setgroups(C.int(n), C.uintptr_t(uintptr(unsafe.Pointer(list)))) - if r0 == -1 && er != nil { - err = er + _, e1 := callsetgroups(n, uintptr(unsafe.Pointer(list))) + if e1 != 0 { + err = errnoErr(e1) } return } @@ -1190,9 +1099,9 @@ func setgroups(n int, list *_Gid_t) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func getsockopt(s int, level int, name int, val unsafe.Pointer, vallen *_Socklen) (err error) { - r0, er := C.getsockopt(C.int(s), C.int(level), C.int(name), C.uintptr_t(uintptr(val)), C.uintptr_t(uintptr(unsafe.Pointer(vallen)))) - if r0 == -1 && er != nil { - err = er + _, e1 := callgetsockopt(s, level, name, uintptr(val), uintptr(unsafe.Pointer(vallen))) + if e1 != 0 { + err = errnoErr(e1) } return } @@ -1200,9 +1109,9 @@ func getsockopt(s int, level int, name int, val unsafe.Pointer, vallen *_Socklen // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func setsockopt(s int, level int, name int, val unsafe.Pointer, vallen uintptr) (err error) { - r0, er := C.setsockopt(C.int(s), C.int(level), C.int(name), C.uintptr_t(uintptr(val)), C.uintptr_t(vallen)) - if r0 == -1 && er != nil { - err = er + _, e1 := callsetsockopt(s, level, name, uintptr(val), vallen) + if e1 != 0 { + err = errnoErr(e1) } return } @@ -1210,10 +1119,10 @@ func setsockopt(s int, level int, name int, val unsafe.Pointer, vallen uintptr) // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func socket(domain int, typ int, proto int) (fd int, err error) { - r0, er := C.socket(C.int(domain), C.int(typ), C.int(proto)) + r0, e1 := callsocket(domain, typ, proto) fd = int(r0) - if r0 == -1 && er != nil { - err = er + if e1 != 0 { + err = errnoErr(e1) } return } @@ -1221,9 +1130,9 @@ func socket(domain int, typ int, proto int) (fd int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func socketpair(domain int, typ int, proto int, fd *[2]int32) (err error) { - r0, er := C.socketpair(C.int(domain), C.int(typ), C.int(proto), C.uintptr_t(uintptr(unsafe.Pointer(fd)))) - if r0 == -1 && er != nil { - err = er + _, e1 := callsocketpair(domain, typ, proto, uintptr(unsafe.Pointer(fd))) + if e1 != 0 { + err = errnoErr(e1) } return } @@ -1231,9 +1140,9 @@ func socketpair(domain int, typ int, proto int, fd *[2]int32) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func getpeername(fd int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error) { - r0, er := C.getpeername(C.int(fd), C.uintptr_t(uintptr(unsafe.Pointer(rsa))), C.uintptr_t(uintptr(unsafe.Pointer(addrlen)))) - if r0 == -1 && er != nil { - err = er + _, e1 := callgetpeername(fd, uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) + if e1 != 0 { + err = errnoErr(e1) } return } @@ -1241,9 +1150,9 @@ func getpeername(fd int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func getsockname(fd int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error) { - r0, er := C.getsockname(C.int(fd), C.uintptr_t(uintptr(unsafe.Pointer(rsa))), C.uintptr_t(uintptr(unsafe.Pointer(addrlen)))) - if r0 == -1 && er != nil { - err = er + _, e1 := callgetsockname(fd, uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) + if e1 != 0 { + err = errnoErr(e1) } return } @@ -1255,12 +1164,10 @@ func recvfrom(fd int, p []byte, flags int, from *RawSockaddrAny, fromlen *_Sockl if len(p) > 0 { _p0 = &p[0] } - var _p1 int - _p1 = len(p) - r0, er := C.recvfrom(C.int(fd), C.uintptr_t(uintptr(unsafe.Pointer(_p0))), C.size_t(_p1), C.int(flags), C.uintptr_t(uintptr(unsafe.Pointer(from))), C.uintptr_t(uintptr(unsafe.Pointer(fromlen)))) + r0, e1 := callrecvfrom(fd, uintptr(unsafe.Pointer(_p0)), len(p), flags, uintptr(unsafe.Pointer(from)), uintptr(unsafe.Pointer(fromlen))) n = int(r0) - if r0 == -1 && er != nil { - err = er + if e1 != 0 { + err = errnoErr(e1) } return } @@ -1272,11 +1179,9 @@ func sendto(s int, buf []byte, flags int, to unsafe.Pointer, addrlen _Socklen) ( if len(buf) > 0 { _p0 = &buf[0] } - var _p1 int - _p1 = len(buf) - r0, er := C.sendto(C.int(s), C.uintptr_t(uintptr(unsafe.Pointer(_p0))), C.size_t(_p1), C.int(flags), C.uintptr_t(uintptr(to)), C.uintptr_t(uintptr(addrlen))) - if r0 == -1 && er != nil { - err = er + _, e1 := callsendto(s, uintptr(unsafe.Pointer(_p0)), len(buf), flags, uintptr(to), uintptr(addrlen)) + if e1 != 0 { + err = errnoErr(e1) } return } @@ -1284,10 +1189,10 @@ func sendto(s int, buf []byte, flags int, to unsafe.Pointer, addrlen _Socklen) ( // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func recvmsg(s int, msg *Msghdr, flags int) (n int, err error) { - r0, er := C.recvmsg(C.int(s), C.uintptr_t(uintptr(unsafe.Pointer(msg))), C.int(flags)) + r0, e1 := callrecvmsg(s, uintptr(unsafe.Pointer(msg)), flags) n = int(r0) - if r0 == -1 && er != nil { - err = er + if e1 != 0 { + err = errnoErr(e1) } return } @@ -1295,10 +1200,10 @@ func recvmsg(s int, msg *Msghdr, flags int) (n int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func sendmsg(s int, msg *Msghdr, flags int) (n int, err error) { - r0, er := C.sendmsg(C.int(s), C.uintptr_t(uintptr(unsafe.Pointer(msg))), C.int(flags)) + r0, e1 := callsendmsg(s, uintptr(unsafe.Pointer(msg)), flags) n = int(r0) - if r0 == -1 && er != nil { - err = er + if e1 != 0 { + err = errnoErr(e1) } return } @@ -1306,9 +1211,9 @@ func sendmsg(s int, msg *Msghdr, flags int) (n int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func munmap(addr uintptr, length uintptr) (err error) { - r0, er := C.munmap(C.uintptr_t(addr), C.uintptr_t(length)) - if r0 == -1 && er != nil { - err = er + _, e1 := callmunmap(addr, length) + if e1 != 0 { + err = errnoErr(e1) } return } @@ -1320,11 +1225,9 @@ func Madvise(b []byte, advice int) (err error) { if len(b) > 0 { _p0 = &b[0] } - var _p1 int - _p1 = len(b) - r0, er := C.madvise(C.uintptr_t(uintptr(unsafe.Pointer(_p0))), C.size_t(_p1), C.int(advice)) - if r0 == -1 && er != nil { - err = er + _, e1 := callmadvise(uintptr(unsafe.Pointer(_p0)), len(b), advice) + if e1 != 0 { + err = errnoErr(e1) } return } @@ -1336,11 +1239,9 @@ func Mprotect(b []byte, prot int) (err error) { if len(b) > 0 { _p0 = &b[0] } - var _p1 int - _p1 = len(b) - r0, er := C.mprotect(C.uintptr_t(uintptr(unsafe.Pointer(_p0))), C.size_t(_p1), C.int(prot)) - if r0 == -1 && er != nil { - err = er + _, e1 := callmprotect(uintptr(unsafe.Pointer(_p0)), len(b), prot) + if e1 != 0 { + err = errnoErr(e1) } return } @@ -1352,11 +1253,9 @@ func Mlock(b []byte) (err error) { if len(b) > 0 { _p0 = &b[0] } - var _p1 int - _p1 = len(b) - r0, er := C.mlock(C.uintptr_t(uintptr(unsafe.Pointer(_p0))), C.size_t(_p1)) - if r0 == -1 && er != nil { - err = er + _, e1 := callmlock(uintptr(unsafe.Pointer(_p0)), len(b)) + if e1 != 0 { + err = errnoErr(e1) } return } @@ -1364,9 +1263,9 @@ func Mlock(b []byte) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Mlockall(flags int) (err error) { - r0, er := C.mlockall(C.int(flags)) - if r0 == -1 && er != nil { - err = er + _, e1 := callmlockall(flags) + if e1 != 0 { + err = errnoErr(e1) } return } @@ -1378,11 +1277,9 @@ func Msync(b []byte, flags int) (err error) { if len(b) > 0 { _p0 = &b[0] } - var _p1 int - _p1 = len(b) - r0, er := C.msync(C.uintptr_t(uintptr(unsafe.Pointer(_p0))), C.size_t(_p1), C.int(flags)) - if r0 == -1 && er != nil { - err = er + _, e1 := callmsync(uintptr(unsafe.Pointer(_p0)), len(b), flags) + if e1 != 0 { + err = errnoErr(e1) } return } @@ -1394,11 +1291,9 @@ func Munlock(b []byte) (err error) { if len(b) > 0 { _p0 = &b[0] } - var _p1 int - _p1 = len(b) - r0, er := C.munlock(C.uintptr_t(uintptr(unsafe.Pointer(_p0))), C.size_t(_p1)) - if r0 == -1 && er != nil { - err = er + _, e1 := callmunlock(uintptr(unsafe.Pointer(_p0)), len(b)) + if e1 != 0 { + err = errnoErr(e1) } return } @@ -1406,9 +1301,9 @@ func Munlock(b []byte) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Munlockall() (err error) { - r0, er := C.munlockall() - if r0 == -1 && er != nil { - err = er + _, e1 := callmunlockall() + if e1 != 0 { + err = errnoErr(e1) } return } @@ -1416,19 +1311,9 @@ func Munlockall() (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func pipe(p *[2]_C_int) (err error) { - r0, er := C.pipe(C.uintptr_t(uintptr(unsafe.Pointer(p)))) - if r0 == -1 && er != nil { - err = er - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func pipe2(p *[2]_C_int, flags int) (err error) { - r0, er := C.pipe2(C.uintptr_t(uintptr(unsafe.Pointer(p))), C.int(flags)) - if r0 == -1 && er != nil { - err = er + _, e1 := callpipe(uintptr(unsafe.Pointer(p))) + if e1 != 0 { + err = errnoErr(e1) } return } @@ -1436,10 +1321,10 @@ func pipe2(p *[2]_C_int, flags int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func poll(fds *PollFd, nfds int, timeout int) (n int, err error) { - r0, er := C.poll(C.uintptr_t(uintptr(unsafe.Pointer(fds))), C.int(nfds), C.int(timeout)) + r0, e1 := callpoll(uintptr(unsafe.Pointer(fds)), nfds, timeout) n = int(r0) - if r0 == -1 && er != nil { - err = er + if e1 != 0 { + err = errnoErr(e1) } return } @@ -1447,9 +1332,9 @@ func poll(fds *PollFd, nfds int, timeout int) (n int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func gettimeofday(tv *Timeval, tzp *Timezone) (err error) { - r0, er := C.gettimeofday(C.uintptr_t(uintptr(unsafe.Pointer(tv))), C.uintptr_t(uintptr(unsafe.Pointer(tzp)))) - if r0 == -1 && er != nil { - err = er + _, e1 := callgettimeofday(uintptr(unsafe.Pointer(tv)), uintptr(unsafe.Pointer(tzp))) + if e1 != 0 { + err = errnoErr(e1) } return } @@ -1457,10 +1342,10 @@ func gettimeofday(tv *Timeval, tzp *Timezone) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Time(t *Time_t) (tt Time_t, err error) { - r0, er := C.time(C.uintptr_t(uintptr(unsafe.Pointer(t)))) + r0, e1 := calltime(uintptr(unsafe.Pointer(t))) tt = Time_t(r0) - if r0 == -1 && er != nil { - err = er + if e1 != 0 { + err = errnoErr(e1) } return } @@ -1468,10 +1353,14 @@ func Time(t *Time_t) (tt Time_t, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Utime(path string, buf *Utimbuf) (err error) { - _p0 := uintptr(unsafe.Pointer(C.CString(path))) - r0, er := C.utime(C.uintptr_t(_p0), C.uintptr_t(uintptr(unsafe.Pointer(buf)))) - if r0 == -1 && er != nil { - err = er + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, e1 := callutime(uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(buf))) + if e1 != 0 { + err = errnoErr(e1) } return } @@ -1479,9 +1368,9 @@ func Utime(path string, buf *Utimbuf) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Getrlimit(resource int, rlim *Rlimit) (err error) { - r0, er := C.getrlimit(C.int(resource), C.uintptr_t(uintptr(unsafe.Pointer(rlim)))) - if r0 == -1 && er != nil { - err = er + _, e1 := callgetrlimit(resource, uintptr(unsafe.Pointer(rlim))) + if e1 != 0 { + err = errnoErr(e1) } return } @@ -1489,9 +1378,9 @@ func Getrlimit(resource int, rlim *Rlimit) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Setrlimit(resource int, rlim *Rlimit) (err error) { - r0, er := C.setrlimit(C.int(resource), C.uintptr_t(uintptr(unsafe.Pointer(rlim)))) - if r0 == -1 && er != nil { - err = er + _, e1 := callsetrlimit(resource, uintptr(unsafe.Pointer(rlim))) + if e1 != 0 { + err = errnoErr(e1) } return } @@ -1499,10 +1388,10 @@ func Setrlimit(resource int, rlim *Rlimit) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Seek(fd int, offset int64, whence int) (off int64, err error) { - r0, er := C.lseek(C.int(fd), C.longlong(offset), C.int(whence)) + r0, e1 := calllseek(fd, offset, whence) off = int64(r0) - if r0 == -1 && er != nil { - err = er + if e1 != 0 { + err = errnoErr(e1) } return } @@ -1510,10 +1399,10 @@ func Seek(fd int, offset int64, whence int) (off int64, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func mmap(addr uintptr, length uintptr, prot int, flags int, fd int, offset int64) (xaddr uintptr, err error) { - r0, er := C.mmap64(C.uintptr_t(addr), C.uintptr_t(length), C.int(prot), C.int(flags), C.int(fd), C.longlong(offset)) + r0, e1 := callmmap64(addr, length, prot, flags, fd, offset) xaddr = uintptr(r0) - if uintptr(r0) == ^uintptr(0) && er != nil { - err = er + if e1 != 0 { + err = errnoErr(e1) } return } diff --git a/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc64_gc.go b/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc64_gc.go new file mode 100644 index 000000000..a185ee842 --- /dev/null +++ b/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc64_gc.go @@ -0,0 +1,1162 @@ +// mksyscall_aix_ppc64.pl -aix -tags aix,ppc64 syscall_aix.go syscall_aix_ppc64.go +// Code generated by the command above; see README.md. DO NOT EDIT. + +// +build aix,ppc64 +// +build !gccgo + +package unix + +import ( + "unsafe" +) + +//go:cgo_import_dynamic libc_utimes utimes "libc.a/shr_64.o" +//go:cgo_import_dynamic libc_utimensat utimensat "libc.a/shr_64.o" +//go:cgo_import_dynamic libc_getcwd getcwd "libc.a/shr_64.o" +//go:cgo_import_dynamic libc_accept accept "libc.a/shr_64.o" +//go:cgo_import_dynamic libc_getdirent getdirent "libc.a/shr_64.o" +//go:cgo_import_dynamic libc_wait4 wait4 "libc.a/shr_64.o" +//go:cgo_import_dynamic libc_ioctl ioctl "libc.a/shr_64.o" +//go:cgo_import_dynamic libc_fcntl fcntl "libc.a/shr_64.o" +//go:cgo_import_dynamic libc_acct acct "libc.a/shr_64.o" +//go:cgo_import_dynamic libc_chdir chdir "libc.a/shr_64.o" +//go:cgo_import_dynamic libc_chroot chroot "libc.a/shr_64.o" +//go:cgo_import_dynamic libc_close close "libc.a/shr_64.o" +//go:cgo_import_dynamic libc_dup dup "libc.a/shr_64.o" +//go:cgo_import_dynamic libc_exit exit "libc.a/shr_64.o" +//go:cgo_import_dynamic libc_faccessat faccessat "libc.a/shr_64.o" +//go:cgo_import_dynamic libc_fchdir fchdir "libc.a/shr_64.o" +//go:cgo_import_dynamic libc_fchmod fchmod "libc.a/shr_64.o" +//go:cgo_import_dynamic libc_fchmodat fchmodat "libc.a/shr_64.o" +//go:cgo_import_dynamic libc_fchownat fchownat "libc.a/shr_64.o" +//go:cgo_import_dynamic libc_fdatasync fdatasync "libc.a/shr_64.o" +//go:cgo_import_dynamic libc_fsync fsync "libc.a/shr_64.o" +//go:cgo_import_dynamic libc_getpgid getpgid "libc.a/shr_64.o" +//go:cgo_import_dynamic libc_getpgrp getpgrp "libc.a/shr_64.o" +//go:cgo_import_dynamic libc_getpid getpid "libc.a/shr_64.o" +//go:cgo_import_dynamic libc_getppid getppid "libc.a/shr_64.o" +//go:cgo_import_dynamic libc_getpriority getpriority "libc.a/shr_64.o" +//go:cgo_import_dynamic libc_getrusage getrusage "libc.a/shr_64.o" +//go:cgo_import_dynamic libc_getsid getsid "libc.a/shr_64.o" +//go:cgo_import_dynamic libc_kill kill "libc.a/shr_64.o" +//go:cgo_import_dynamic libc_syslog syslog "libc.a/shr_64.o" +//go:cgo_import_dynamic libc_mkdir mkdir "libc.a/shr_64.o" +//go:cgo_import_dynamic libc_mkdirat mkdirat "libc.a/shr_64.o" +//go:cgo_import_dynamic libc_mkfifo mkfifo "libc.a/shr_64.o" +//go:cgo_import_dynamic libc_mknod mknod "libc.a/shr_64.o" +//go:cgo_import_dynamic libc_mknodat mknodat "libc.a/shr_64.o" +//go:cgo_import_dynamic libc_nanosleep nanosleep "libc.a/shr_64.o" +//go:cgo_import_dynamic libc_open64 open64 "libc.a/shr_64.o" +//go:cgo_import_dynamic libc_openat openat "libc.a/shr_64.o" +//go:cgo_import_dynamic libc_read read "libc.a/shr_64.o" +//go:cgo_import_dynamic libc_readlink readlink "libc.a/shr_64.o" +//go:cgo_import_dynamic libc_renameat renameat "libc.a/shr_64.o" +//go:cgo_import_dynamic libc_setdomainname setdomainname "libc.a/shr_64.o" +//go:cgo_import_dynamic libc_sethostname sethostname "libc.a/shr_64.o" +//go:cgo_import_dynamic libc_setpgid setpgid "libc.a/shr_64.o" +//go:cgo_import_dynamic libc_setsid setsid "libc.a/shr_64.o" +//go:cgo_import_dynamic libc_settimeofday settimeofday "libc.a/shr_64.o" +//go:cgo_import_dynamic libc_setuid setuid "libc.a/shr_64.o" +//go:cgo_import_dynamic libc_setgid setgid "libc.a/shr_64.o" +//go:cgo_import_dynamic libc_setpriority setpriority "libc.a/shr_64.o" +//go:cgo_import_dynamic libc_statx statx "libc.a/shr_64.o" +//go:cgo_import_dynamic libc_sync sync "libc.a/shr_64.o" +//go:cgo_import_dynamic libc_times times "libc.a/shr_64.o" +//go:cgo_import_dynamic libc_umask umask "libc.a/shr_64.o" +//go:cgo_import_dynamic libc_uname uname "libc.a/shr_64.o" +//go:cgo_import_dynamic libc_unlink unlink "libc.a/shr_64.o" +//go:cgo_import_dynamic libc_unlinkat unlinkat "libc.a/shr_64.o" +//go:cgo_import_dynamic libc_ustat ustat "libc.a/shr_64.o" +//go:cgo_import_dynamic libc_write write "libc.a/shr_64.o" +//go:cgo_import_dynamic libc_dup2 dup2 "libc.a/shr_64.o" +//go:cgo_import_dynamic libc_posix_fadvise64 posix_fadvise64 "libc.a/shr_64.o" +//go:cgo_import_dynamic libc_fchown fchown "libc.a/shr_64.o" +//go:cgo_import_dynamic libc_fstat fstat "libc.a/shr_64.o" +//go:cgo_import_dynamic libc_fstatat fstatat "libc.a/shr_64.o" +//go:cgo_import_dynamic libc_fstatfs fstatfs "libc.a/shr_64.o" +//go:cgo_import_dynamic libc_ftruncate ftruncate "libc.a/shr_64.o" +//go:cgo_import_dynamic libc_getegid getegid "libc.a/shr_64.o" +//go:cgo_import_dynamic libc_geteuid geteuid "libc.a/shr_64.o" +//go:cgo_import_dynamic libc_getgid getgid "libc.a/shr_64.o" +//go:cgo_import_dynamic libc_getuid getuid "libc.a/shr_64.o" +//go:cgo_import_dynamic libc_lchown lchown "libc.a/shr_64.o" +//go:cgo_import_dynamic libc_listen listen "libc.a/shr_64.o" +//go:cgo_import_dynamic libc_lstat lstat "libc.a/shr_64.o" +//go:cgo_import_dynamic libc_pause pause "libc.a/shr_64.o" +//go:cgo_import_dynamic libc_pread64 pread64 "libc.a/shr_64.o" +//go:cgo_import_dynamic libc_pwrite64 pwrite64 "libc.a/shr_64.o" +//go:cgo_import_dynamic libc_pselect pselect "libc.a/shr_64.o" +//go:cgo_import_dynamic libc_setregid setregid "libc.a/shr_64.o" +//go:cgo_import_dynamic libc_setreuid setreuid "libc.a/shr_64.o" +//go:cgo_import_dynamic libc_shutdown shutdown "libc.a/shr_64.o" +//go:cgo_import_dynamic libc_splice splice "libc.a/shr_64.o" +//go:cgo_import_dynamic libc_stat stat "libc.a/shr_64.o" +//go:cgo_import_dynamic libc_statfs statfs "libc.a/shr_64.o" +//go:cgo_import_dynamic libc_truncate truncate "libc.a/shr_64.o" +//go:cgo_import_dynamic libc_bind bind "libc.a/shr_64.o" +//go:cgo_import_dynamic libc_connect connect "libc.a/shr_64.o" +//go:cgo_import_dynamic libc_getgroups getgroups "libc.a/shr_64.o" +//go:cgo_import_dynamic libc_setgroups setgroups "libc.a/shr_64.o" +//go:cgo_import_dynamic libc_getsockopt getsockopt "libc.a/shr_64.o" +//go:cgo_import_dynamic libc_setsockopt setsockopt "libc.a/shr_64.o" +//go:cgo_import_dynamic libc_socket socket "libc.a/shr_64.o" +//go:cgo_import_dynamic libc_socketpair socketpair "libc.a/shr_64.o" +//go:cgo_import_dynamic libc_getpeername getpeername "libc.a/shr_64.o" +//go:cgo_import_dynamic libc_getsockname getsockname "libc.a/shr_64.o" +//go:cgo_import_dynamic libc_recvfrom recvfrom "libc.a/shr_64.o" +//go:cgo_import_dynamic libc_sendto sendto "libc.a/shr_64.o" +//go:cgo_import_dynamic libc_recvmsg recvmsg "libc.a/shr_64.o" +//go:cgo_import_dynamic libc_sendmsg sendmsg "libc.a/shr_64.o" +//go:cgo_import_dynamic libc_munmap munmap "libc.a/shr_64.o" +//go:cgo_import_dynamic libc_madvise madvise "libc.a/shr_64.o" +//go:cgo_import_dynamic libc_mprotect mprotect "libc.a/shr_64.o" +//go:cgo_import_dynamic libc_mlock mlock "libc.a/shr_64.o" +//go:cgo_import_dynamic libc_mlockall mlockall "libc.a/shr_64.o" +//go:cgo_import_dynamic libc_msync msync "libc.a/shr_64.o" +//go:cgo_import_dynamic libc_munlock munlock "libc.a/shr_64.o" +//go:cgo_import_dynamic libc_munlockall munlockall "libc.a/shr_64.o" +//go:cgo_import_dynamic libc_pipe pipe "libc.a/shr_64.o" +//go:cgo_import_dynamic libc_poll poll "libc.a/shr_64.o" +//go:cgo_import_dynamic libc_gettimeofday gettimeofday "libc.a/shr_64.o" +//go:cgo_import_dynamic libc_time time "libc.a/shr_64.o" +//go:cgo_import_dynamic libc_utime utime "libc.a/shr_64.o" +//go:cgo_import_dynamic libc_getrlimit getrlimit "libc.a/shr_64.o" +//go:cgo_import_dynamic libc_setrlimit setrlimit "libc.a/shr_64.o" +//go:cgo_import_dynamic libc_lseek lseek "libc.a/shr_64.o" +//go:cgo_import_dynamic libc_mmap64 mmap64 "libc.a/shr_64.o" + +//go:linkname libc_utimes libc_utimes +//go:linkname libc_utimensat libc_utimensat +//go:linkname libc_getcwd libc_getcwd +//go:linkname libc_accept libc_accept +//go:linkname libc_getdirent libc_getdirent +//go:linkname libc_wait4 libc_wait4 +//go:linkname libc_ioctl libc_ioctl +//go:linkname libc_fcntl libc_fcntl +//go:linkname libc_acct libc_acct +//go:linkname libc_chdir libc_chdir +//go:linkname libc_chroot libc_chroot +//go:linkname libc_close libc_close +//go:linkname libc_dup libc_dup +//go:linkname libc_exit libc_exit +//go:linkname libc_faccessat libc_faccessat +//go:linkname libc_fchdir libc_fchdir +//go:linkname libc_fchmod libc_fchmod +//go:linkname libc_fchmodat libc_fchmodat +//go:linkname libc_fchownat libc_fchownat +//go:linkname libc_fdatasync libc_fdatasync +//go:linkname libc_fsync libc_fsync +//go:linkname libc_getpgid libc_getpgid +//go:linkname libc_getpgrp libc_getpgrp +//go:linkname libc_getpid libc_getpid +//go:linkname libc_getppid libc_getppid +//go:linkname libc_getpriority libc_getpriority +//go:linkname libc_getrusage libc_getrusage +//go:linkname libc_getsid libc_getsid +//go:linkname libc_kill libc_kill +//go:linkname libc_syslog libc_syslog +//go:linkname libc_mkdir libc_mkdir +//go:linkname libc_mkdirat libc_mkdirat +//go:linkname libc_mkfifo libc_mkfifo +//go:linkname libc_mknod libc_mknod +//go:linkname libc_mknodat libc_mknodat +//go:linkname libc_nanosleep libc_nanosleep +//go:linkname libc_open64 libc_open64 +//go:linkname libc_openat libc_openat +//go:linkname libc_read libc_read +//go:linkname libc_readlink libc_readlink +//go:linkname libc_renameat libc_renameat +//go:linkname libc_setdomainname libc_setdomainname +//go:linkname libc_sethostname libc_sethostname +//go:linkname libc_setpgid libc_setpgid +//go:linkname libc_setsid libc_setsid +//go:linkname libc_settimeofday libc_settimeofday +//go:linkname libc_setuid libc_setuid +//go:linkname libc_setgid libc_setgid +//go:linkname libc_setpriority libc_setpriority +//go:linkname libc_statx libc_statx +//go:linkname libc_sync libc_sync +//go:linkname libc_times libc_times +//go:linkname libc_umask libc_umask +//go:linkname libc_uname libc_uname +//go:linkname libc_unlink libc_unlink +//go:linkname libc_unlinkat libc_unlinkat +//go:linkname libc_ustat libc_ustat +//go:linkname libc_write libc_write +//go:linkname libc_dup2 libc_dup2 +//go:linkname libc_posix_fadvise64 libc_posix_fadvise64 +//go:linkname libc_fchown libc_fchown +//go:linkname libc_fstat libc_fstat +//go:linkname libc_fstatat libc_fstatat +//go:linkname libc_fstatfs libc_fstatfs +//go:linkname libc_ftruncate libc_ftruncate +//go:linkname libc_getegid libc_getegid +//go:linkname libc_geteuid libc_geteuid +//go:linkname libc_getgid libc_getgid +//go:linkname libc_getuid libc_getuid +//go:linkname libc_lchown libc_lchown +//go:linkname libc_listen libc_listen +//go:linkname libc_lstat libc_lstat +//go:linkname libc_pause libc_pause +//go:linkname libc_pread64 libc_pread64 +//go:linkname libc_pwrite64 libc_pwrite64 +//go:linkname libc_pselect libc_pselect +//go:linkname libc_setregid libc_setregid +//go:linkname libc_setreuid libc_setreuid +//go:linkname libc_shutdown libc_shutdown +//go:linkname libc_splice libc_splice +//go:linkname libc_stat libc_stat +//go:linkname libc_statfs libc_statfs +//go:linkname libc_truncate libc_truncate +//go:linkname libc_bind libc_bind +//go:linkname libc_connect libc_connect +//go:linkname libc_getgroups libc_getgroups +//go:linkname libc_setgroups libc_setgroups +//go:linkname libc_getsockopt libc_getsockopt +//go:linkname libc_setsockopt libc_setsockopt +//go:linkname libc_socket libc_socket +//go:linkname libc_socketpair libc_socketpair +//go:linkname libc_getpeername libc_getpeername +//go:linkname libc_getsockname libc_getsockname +//go:linkname libc_recvfrom libc_recvfrom +//go:linkname libc_sendto libc_sendto +//go:linkname libc_recvmsg libc_recvmsg +//go:linkname libc_sendmsg libc_sendmsg +//go:linkname libc_munmap libc_munmap +//go:linkname libc_madvise libc_madvise +//go:linkname libc_mprotect libc_mprotect +//go:linkname libc_mlock libc_mlock +//go:linkname libc_mlockall libc_mlockall +//go:linkname libc_msync libc_msync +//go:linkname libc_munlock libc_munlock +//go:linkname libc_munlockall libc_munlockall +//go:linkname libc_pipe libc_pipe +//go:linkname libc_poll libc_poll +//go:linkname libc_gettimeofday libc_gettimeofday +//go:linkname libc_time libc_time +//go:linkname libc_utime libc_utime +//go:linkname libc_getrlimit libc_getrlimit +//go:linkname libc_setrlimit libc_setrlimit +//go:linkname libc_lseek libc_lseek +//go:linkname libc_mmap64 libc_mmap64 + +type syscallFunc uintptr + +var ( + libc_utimes, + libc_utimensat, + libc_getcwd, + libc_accept, + libc_getdirent, + libc_wait4, + libc_ioctl, + libc_fcntl, + libc_acct, + libc_chdir, + libc_chroot, + libc_close, + libc_dup, + libc_exit, + libc_faccessat, + libc_fchdir, + libc_fchmod, + libc_fchmodat, + libc_fchownat, + libc_fdatasync, + libc_fsync, + libc_getpgid, + libc_getpgrp, + libc_getpid, + libc_getppid, + libc_getpriority, + libc_getrusage, + libc_getsid, + libc_kill, + libc_syslog, + libc_mkdir, + libc_mkdirat, + libc_mkfifo, + libc_mknod, + libc_mknodat, + libc_nanosleep, + libc_open64, + libc_openat, + libc_read, + libc_readlink, + libc_renameat, + libc_setdomainname, + libc_sethostname, + libc_setpgid, + libc_setsid, + libc_settimeofday, + libc_setuid, + libc_setgid, + libc_setpriority, + libc_statx, + libc_sync, + libc_times, + libc_umask, + libc_uname, + libc_unlink, + libc_unlinkat, + libc_ustat, + libc_write, + libc_dup2, + libc_posix_fadvise64, + libc_fchown, + libc_fstat, + libc_fstatat, + libc_fstatfs, + libc_ftruncate, + libc_getegid, + libc_geteuid, + libc_getgid, + libc_getuid, + libc_lchown, + libc_listen, + libc_lstat, + libc_pause, + libc_pread64, + libc_pwrite64, + libc_pselect, + libc_setregid, + libc_setreuid, + libc_shutdown, + libc_splice, + libc_stat, + libc_statfs, + libc_truncate, + libc_bind, + libc_connect, + libc_getgroups, + libc_setgroups, + libc_getsockopt, + libc_setsockopt, + libc_socket, + libc_socketpair, + libc_getpeername, + libc_getsockname, + libc_recvfrom, + libc_sendto, + libc_recvmsg, + libc_sendmsg, + libc_munmap, + libc_madvise, + libc_mprotect, + libc_mlock, + libc_mlockall, + libc_msync, + libc_munlock, + libc_munlockall, + libc_pipe, + libc_poll, + libc_gettimeofday, + libc_time, + libc_utime, + libc_getrlimit, + libc_setrlimit, + libc_lseek, + libc_mmap64 syscallFunc +) + +// Implemented in runtime/syscall_aix.go. +func rawSyscall6(trap, nargs, a1, a2, a3, a4, a5, a6 uintptr) (r1, r2 uintptr, err Errno) +func syscall6(trap, nargs, a1, a2, a3, a4, a5, a6 uintptr) (r1, r2 uintptr, err Errno) + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func callutimes(_p0 uintptr, times uintptr) (r1 uintptr, e1 Errno) { + r1, _, e1 = syscall6(uintptr(unsafe.Pointer(&libc_utimes)), 2, _p0, times, 0, 0, 0, 0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func callutimensat(dirfd int, _p0 uintptr, times uintptr, flag int) (r1 uintptr, e1 Errno) { + r1, _, e1 = syscall6(uintptr(unsafe.Pointer(&libc_utimensat)), 4, uintptr(dirfd), _p0, times, uintptr(flag), 0, 0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func callgetcwd(_p0 uintptr, _lenp0 int) (r1 uintptr, e1 Errno) { + r1, _, e1 = syscall6(uintptr(unsafe.Pointer(&libc_getcwd)), 2, _p0, uintptr(_lenp0), 0, 0, 0, 0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func callaccept(s int, rsa uintptr, addrlen uintptr) (r1 uintptr, e1 Errno) { + r1, _, e1 = syscall6(uintptr(unsafe.Pointer(&libc_accept)), 3, uintptr(s), rsa, addrlen, 0, 0, 0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func callgetdirent(fd int, _p0 uintptr, _lenp0 int) (r1 uintptr, e1 Errno) { + r1, _, e1 = syscall6(uintptr(unsafe.Pointer(&libc_getdirent)), 3, uintptr(fd), _p0, uintptr(_lenp0), 0, 0, 0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func callwait4(pid int, status uintptr, options int, rusage uintptr) (r1 uintptr, e1 Errno) { + r1, _, e1 = syscall6(uintptr(unsafe.Pointer(&libc_wait4)), 4, uintptr(pid), status, uintptr(options), rusage, 0, 0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func callioctl(fd int, req int, arg uintptr) (r1 uintptr, e1 Errno) { + r1, _, e1 = syscall6(uintptr(unsafe.Pointer(&libc_ioctl)), 3, uintptr(fd), uintptr(req), arg, 0, 0, 0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func callfcntl(fd uintptr, cmd int, arg uintptr) (r1 uintptr, e1 Errno) { + r1, _, e1 = syscall6(uintptr(unsafe.Pointer(&libc_fcntl)), 3, fd, uintptr(cmd), arg, 0, 0, 0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func callacct(_p0 uintptr) (r1 uintptr, e1 Errno) { + r1, _, e1 = syscall6(uintptr(unsafe.Pointer(&libc_acct)), 1, _p0, 0, 0, 0, 0, 0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func callchdir(_p0 uintptr) (r1 uintptr, e1 Errno) { + r1, _, e1 = syscall6(uintptr(unsafe.Pointer(&libc_chdir)), 1, _p0, 0, 0, 0, 0, 0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func callchroot(_p0 uintptr) (r1 uintptr, e1 Errno) { + r1, _, e1 = syscall6(uintptr(unsafe.Pointer(&libc_chroot)), 1, _p0, 0, 0, 0, 0, 0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func callclose(fd int) (r1 uintptr, e1 Errno) { + r1, _, e1 = syscall6(uintptr(unsafe.Pointer(&libc_close)), 1, uintptr(fd), 0, 0, 0, 0, 0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func calldup(oldfd int) (r1 uintptr, e1 Errno) { + r1, _, e1 = syscall6(uintptr(unsafe.Pointer(&libc_dup)), 1, uintptr(oldfd), 0, 0, 0, 0, 0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func callexit(code int) (r1 uintptr, e1 Errno) { + r1, _, e1 = syscall6(uintptr(unsafe.Pointer(&libc_exit)), 1, uintptr(code), 0, 0, 0, 0, 0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func callfaccessat(dirfd int, _p0 uintptr, mode uint32, flags int) (r1 uintptr, e1 Errno) { + r1, _, e1 = syscall6(uintptr(unsafe.Pointer(&libc_faccessat)), 4, uintptr(dirfd), _p0, uintptr(mode), uintptr(flags), 0, 0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func callfchdir(fd int) (r1 uintptr, e1 Errno) { + r1, _, e1 = syscall6(uintptr(unsafe.Pointer(&libc_fchdir)), 1, uintptr(fd), 0, 0, 0, 0, 0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func callfchmod(fd int, mode uint32) (r1 uintptr, e1 Errno) { + r1, _, e1 = syscall6(uintptr(unsafe.Pointer(&libc_fchmod)), 2, uintptr(fd), uintptr(mode), 0, 0, 0, 0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func callfchmodat(dirfd int, _p0 uintptr, mode uint32, flags int) (r1 uintptr, e1 Errno) { + r1, _, e1 = syscall6(uintptr(unsafe.Pointer(&libc_fchmodat)), 4, uintptr(dirfd), _p0, uintptr(mode), uintptr(flags), 0, 0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func callfchownat(dirfd int, _p0 uintptr, uid int, gid int, flags int) (r1 uintptr, e1 Errno) { + r1, _, e1 = syscall6(uintptr(unsafe.Pointer(&libc_fchownat)), 5, uintptr(dirfd), _p0, uintptr(uid), uintptr(gid), uintptr(flags), 0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func callfdatasync(fd int) (r1 uintptr, e1 Errno) { + r1, _, e1 = syscall6(uintptr(unsafe.Pointer(&libc_fdatasync)), 1, uintptr(fd), 0, 0, 0, 0, 0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func callfsync(fd int) (r1 uintptr, e1 Errno) { + r1, _, e1 = syscall6(uintptr(unsafe.Pointer(&libc_fsync)), 1, uintptr(fd), 0, 0, 0, 0, 0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func callgetpgid(pid int) (r1 uintptr, e1 Errno) { + r1, _, e1 = rawSyscall6(uintptr(unsafe.Pointer(&libc_getpgid)), 1, uintptr(pid), 0, 0, 0, 0, 0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func callgetpgrp() (r1 uintptr, e1 Errno) { + r1, _, e1 = syscall6(uintptr(unsafe.Pointer(&libc_getpgrp)), 0, 0, 0, 0, 0, 0, 0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func callgetpid() (r1 uintptr, e1 Errno) { + r1, _, e1 = rawSyscall6(uintptr(unsafe.Pointer(&libc_getpid)), 0, 0, 0, 0, 0, 0, 0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func callgetppid() (r1 uintptr, e1 Errno) { + r1, _, e1 = rawSyscall6(uintptr(unsafe.Pointer(&libc_getppid)), 0, 0, 0, 0, 0, 0, 0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func callgetpriority(which int, who int) (r1 uintptr, e1 Errno) { + r1, _, e1 = syscall6(uintptr(unsafe.Pointer(&libc_getpriority)), 2, uintptr(which), uintptr(who), 0, 0, 0, 0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func callgetrusage(who int, rusage uintptr) (r1 uintptr, e1 Errno) { + r1, _, e1 = rawSyscall6(uintptr(unsafe.Pointer(&libc_getrusage)), 2, uintptr(who), rusage, 0, 0, 0, 0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func callgetsid(pid int) (r1 uintptr, e1 Errno) { + r1, _, e1 = rawSyscall6(uintptr(unsafe.Pointer(&libc_getsid)), 1, uintptr(pid), 0, 0, 0, 0, 0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func callkill(pid int, sig int) (r1 uintptr, e1 Errno) { + r1, _, e1 = rawSyscall6(uintptr(unsafe.Pointer(&libc_kill)), 2, uintptr(pid), uintptr(sig), 0, 0, 0, 0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func callsyslog(typ int, _p0 uintptr, _lenp0 int) (r1 uintptr, e1 Errno) { + r1, _, e1 = syscall6(uintptr(unsafe.Pointer(&libc_syslog)), 3, uintptr(typ), _p0, uintptr(_lenp0), 0, 0, 0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func callmkdir(dirfd int, _p0 uintptr, mode uint32) (r1 uintptr, e1 Errno) { + r1, _, e1 = syscall6(uintptr(unsafe.Pointer(&libc_mkdir)), 3, uintptr(dirfd), _p0, uintptr(mode), 0, 0, 0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func callmkdirat(dirfd int, _p0 uintptr, mode uint32) (r1 uintptr, e1 Errno) { + r1, _, e1 = syscall6(uintptr(unsafe.Pointer(&libc_mkdirat)), 3, uintptr(dirfd), _p0, uintptr(mode), 0, 0, 0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func callmkfifo(_p0 uintptr, mode uint32) (r1 uintptr, e1 Errno) { + r1, _, e1 = syscall6(uintptr(unsafe.Pointer(&libc_mkfifo)), 2, _p0, uintptr(mode), 0, 0, 0, 0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func callmknod(_p0 uintptr, mode uint32, dev int) (r1 uintptr, e1 Errno) { + r1, _, e1 = syscall6(uintptr(unsafe.Pointer(&libc_mknod)), 3, _p0, uintptr(mode), uintptr(dev), 0, 0, 0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func callmknodat(dirfd int, _p0 uintptr, mode uint32, dev int) (r1 uintptr, e1 Errno) { + r1, _, e1 = syscall6(uintptr(unsafe.Pointer(&libc_mknodat)), 4, uintptr(dirfd), _p0, uintptr(mode), uintptr(dev), 0, 0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func callnanosleep(time uintptr, leftover uintptr) (r1 uintptr, e1 Errno) { + r1, _, e1 = syscall6(uintptr(unsafe.Pointer(&libc_nanosleep)), 2, time, leftover, 0, 0, 0, 0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func callopen64(_p0 uintptr, mode int, perm uint32) (r1 uintptr, e1 Errno) { + r1, _, e1 = syscall6(uintptr(unsafe.Pointer(&libc_open64)), 3, _p0, uintptr(mode), uintptr(perm), 0, 0, 0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func callopenat(dirfd int, _p0 uintptr, flags int, mode uint32) (r1 uintptr, e1 Errno) { + r1, _, e1 = syscall6(uintptr(unsafe.Pointer(&libc_openat)), 4, uintptr(dirfd), _p0, uintptr(flags), uintptr(mode), 0, 0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func callread(fd int, _p0 uintptr, _lenp0 int) (r1 uintptr, e1 Errno) { + r1, _, e1 = syscall6(uintptr(unsafe.Pointer(&libc_read)), 3, uintptr(fd), _p0, uintptr(_lenp0), 0, 0, 0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func callreadlink(_p0 uintptr, _p1 uintptr, _lenp1 int) (r1 uintptr, e1 Errno) { + r1, _, e1 = syscall6(uintptr(unsafe.Pointer(&libc_readlink)), 3, _p0, _p1, uintptr(_lenp1), 0, 0, 0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func callrenameat(olddirfd int, _p0 uintptr, newdirfd int, _p1 uintptr) (r1 uintptr, e1 Errno) { + r1, _, e1 = syscall6(uintptr(unsafe.Pointer(&libc_renameat)), 4, uintptr(olddirfd), _p0, uintptr(newdirfd), _p1, 0, 0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func callsetdomainname(_p0 uintptr, _lenp0 int) (r1 uintptr, e1 Errno) { + r1, _, e1 = syscall6(uintptr(unsafe.Pointer(&libc_setdomainname)), 2, _p0, uintptr(_lenp0), 0, 0, 0, 0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func callsethostname(_p0 uintptr, _lenp0 int) (r1 uintptr, e1 Errno) { + r1, _, e1 = syscall6(uintptr(unsafe.Pointer(&libc_sethostname)), 2, _p0, uintptr(_lenp0), 0, 0, 0, 0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func callsetpgid(pid int, pgid int) (r1 uintptr, e1 Errno) { + r1, _, e1 = rawSyscall6(uintptr(unsafe.Pointer(&libc_setpgid)), 2, uintptr(pid), uintptr(pgid), 0, 0, 0, 0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func callsetsid() (r1 uintptr, e1 Errno) { + r1, _, e1 = rawSyscall6(uintptr(unsafe.Pointer(&libc_setsid)), 0, 0, 0, 0, 0, 0, 0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func callsettimeofday(tv uintptr) (r1 uintptr, e1 Errno) { + r1, _, e1 = rawSyscall6(uintptr(unsafe.Pointer(&libc_settimeofday)), 1, tv, 0, 0, 0, 0, 0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func callsetuid(uid int) (r1 uintptr, e1 Errno) { + r1, _, e1 = syscall6(uintptr(unsafe.Pointer(&libc_setuid)), 1, uintptr(uid), 0, 0, 0, 0, 0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func callsetgid(uid int) (r1 uintptr, e1 Errno) { + r1, _, e1 = syscall6(uintptr(unsafe.Pointer(&libc_setgid)), 1, uintptr(uid), 0, 0, 0, 0, 0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func callsetpriority(which int, who int, prio int) (r1 uintptr, e1 Errno) { + r1, _, e1 = syscall6(uintptr(unsafe.Pointer(&libc_setpriority)), 3, uintptr(which), uintptr(who), uintptr(prio), 0, 0, 0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func callstatx(dirfd int, _p0 uintptr, flags int, mask int, stat uintptr) (r1 uintptr, e1 Errno) { + r1, _, e1 = syscall6(uintptr(unsafe.Pointer(&libc_statx)), 5, uintptr(dirfd), _p0, uintptr(flags), uintptr(mask), stat, 0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func callsync() (r1 uintptr, e1 Errno) { + r1, _, e1 = syscall6(uintptr(unsafe.Pointer(&libc_sync)), 0, 0, 0, 0, 0, 0, 0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func calltimes(tms uintptr) (r1 uintptr, e1 Errno) { + r1, _, e1 = rawSyscall6(uintptr(unsafe.Pointer(&libc_times)), 1, tms, 0, 0, 0, 0, 0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func callumask(mask int) (r1 uintptr, e1 Errno) { + r1, _, e1 = rawSyscall6(uintptr(unsafe.Pointer(&libc_umask)), 1, uintptr(mask), 0, 0, 0, 0, 0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func calluname(buf uintptr) (r1 uintptr, e1 Errno) { + r1, _, e1 = rawSyscall6(uintptr(unsafe.Pointer(&libc_uname)), 1, buf, 0, 0, 0, 0, 0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func callunlink(_p0 uintptr) (r1 uintptr, e1 Errno) { + r1, _, e1 = syscall6(uintptr(unsafe.Pointer(&libc_unlink)), 1, _p0, 0, 0, 0, 0, 0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func callunlinkat(dirfd int, _p0 uintptr, flags int) (r1 uintptr, e1 Errno) { + r1, _, e1 = syscall6(uintptr(unsafe.Pointer(&libc_unlinkat)), 3, uintptr(dirfd), _p0, uintptr(flags), 0, 0, 0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func callustat(dev int, ubuf uintptr) (r1 uintptr, e1 Errno) { + r1, _, e1 = syscall6(uintptr(unsafe.Pointer(&libc_ustat)), 2, uintptr(dev), ubuf, 0, 0, 0, 0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func callwrite(fd int, _p0 uintptr, _lenp0 int) (r1 uintptr, e1 Errno) { + r1, _, e1 = syscall6(uintptr(unsafe.Pointer(&libc_write)), 3, uintptr(fd), _p0, uintptr(_lenp0), 0, 0, 0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func calldup2(oldfd int, newfd int) (r1 uintptr, e1 Errno) { + r1, _, e1 = syscall6(uintptr(unsafe.Pointer(&libc_dup2)), 2, uintptr(oldfd), uintptr(newfd), 0, 0, 0, 0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func callposix_fadvise64(fd int, offset int64, length int64, advice int) (r1 uintptr, e1 Errno) { + r1, _, e1 = syscall6(uintptr(unsafe.Pointer(&libc_posix_fadvise64)), 4, uintptr(fd), uintptr(offset), uintptr(length), uintptr(advice), 0, 0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func callfchown(fd int, uid int, gid int) (r1 uintptr, e1 Errno) { + r1, _, e1 = syscall6(uintptr(unsafe.Pointer(&libc_fchown)), 3, uintptr(fd), uintptr(uid), uintptr(gid), 0, 0, 0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func callfstat(fd int, stat uintptr) (r1 uintptr, e1 Errno) { + r1, _, e1 = syscall6(uintptr(unsafe.Pointer(&libc_fstat)), 2, uintptr(fd), stat, 0, 0, 0, 0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func callfstatat(dirfd int, _p0 uintptr, stat uintptr, flags int) (r1 uintptr, e1 Errno) { + r1, _, e1 = syscall6(uintptr(unsafe.Pointer(&libc_fstatat)), 4, uintptr(dirfd), _p0, stat, uintptr(flags), 0, 0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func callfstatfs(fd int, buf uintptr) (r1 uintptr, e1 Errno) { + r1, _, e1 = syscall6(uintptr(unsafe.Pointer(&libc_fstatfs)), 2, uintptr(fd), buf, 0, 0, 0, 0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func callftruncate(fd int, length int64) (r1 uintptr, e1 Errno) { + r1, _, e1 = syscall6(uintptr(unsafe.Pointer(&libc_ftruncate)), 2, uintptr(fd), uintptr(length), 0, 0, 0, 0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func callgetegid() (r1 uintptr, e1 Errno) { + r1, _, e1 = rawSyscall6(uintptr(unsafe.Pointer(&libc_getegid)), 0, 0, 0, 0, 0, 0, 0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func callgeteuid() (r1 uintptr, e1 Errno) { + r1, _, e1 = rawSyscall6(uintptr(unsafe.Pointer(&libc_geteuid)), 0, 0, 0, 0, 0, 0, 0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func callgetgid() (r1 uintptr, e1 Errno) { + r1, _, e1 = rawSyscall6(uintptr(unsafe.Pointer(&libc_getgid)), 0, 0, 0, 0, 0, 0, 0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func callgetuid() (r1 uintptr, e1 Errno) { + r1, _, e1 = rawSyscall6(uintptr(unsafe.Pointer(&libc_getuid)), 0, 0, 0, 0, 0, 0, 0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func calllchown(_p0 uintptr, uid int, gid int) (r1 uintptr, e1 Errno) { + r1, _, e1 = syscall6(uintptr(unsafe.Pointer(&libc_lchown)), 3, _p0, uintptr(uid), uintptr(gid), 0, 0, 0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func calllisten(s int, n int) (r1 uintptr, e1 Errno) { + r1, _, e1 = syscall6(uintptr(unsafe.Pointer(&libc_listen)), 2, uintptr(s), uintptr(n), 0, 0, 0, 0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func calllstat(_p0 uintptr, stat uintptr) (r1 uintptr, e1 Errno) { + r1, _, e1 = syscall6(uintptr(unsafe.Pointer(&libc_lstat)), 2, _p0, stat, 0, 0, 0, 0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func callpause() (r1 uintptr, e1 Errno) { + r1, _, e1 = syscall6(uintptr(unsafe.Pointer(&libc_pause)), 0, 0, 0, 0, 0, 0, 0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func callpread64(fd int, _p0 uintptr, _lenp0 int, offset int64) (r1 uintptr, e1 Errno) { + r1, _, e1 = syscall6(uintptr(unsafe.Pointer(&libc_pread64)), 4, uintptr(fd), _p0, uintptr(_lenp0), uintptr(offset), 0, 0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func callpwrite64(fd int, _p0 uintptr, _lenp0 int, offset int64) (r1 uintptr, e1 Errno) { + r1, _, e1 = syscall6(uintptr(unsafe.Pointer(&libc_pwrite64)), 4, uintptr(fd), _p0, uintptr(_lenp0), uintptr(offset), 0, 0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func callpselect(nfd int, r uintptr, w uintptr, e uintptr, timeout uintptr, sigmask uintptr) (r1 uintptr, e1 Errno) { + r1, _, e1 = syscall6(uintptr(unsafe.Pointer(&libc_pselect)), 6, uintptr(nfd), r, w, e, timeout, sigmask) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func callsetregid(rgid int, egid int) (r1 uintptr, e1 Errno) { + r1, _, e1 = rawSyscall6(uintptr(unsafe.Pointer(&libc_setregid)), 2, uintptr(rgid), uintptr(egid), 0, 0, 0, 0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func callsetreuid(ruid int, euid int) (r1 uintptr, e1 Errno) { + r1, _, e1 = rawSyscall6(uintptr(unsafe.Pointer(&libc_setreuid)), 2, uintptr(ruid), uintptr(euid), 0, 0, 0, 0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func callshutdown(fd int, how int) (r1 uintptr, e1 Errno) { + r1, _, e1 = syscall6(uintptr(unsafe.Pointer(&libc_shutdown)), 2, uintptr(fd), uintptr(how), 0, 0, 0, 0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func callsplice(rfd int, roff uintptr, wfd int, woff uintptr, len int, flags int) (r1 uintptr, e1 Errno) { + r1, _, e1 = syscall6(uintptr(unsafe.Pointer(&libc_splice)), 6, uintptr(rfd), roff, uintptr(wfd), woff, uintptr(len), uintptr(flags)) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func callstat(_p0 uintptr, stat uintptr) (r1 uintptr, e1 Errno) { + r1, _, e1 = syscall6(uintptr(unsafe.Pointer(&libc_stat)), 2, _p0, stat, 0, 0, 0, 0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func callstatfs(_p0 uintptr, buf uintptr) (r1 uintptr, e1 Errno) { + r1, _, e1 = syscall6(uintptr(unsafe.Pointer(&libc_statfs)), 2, _p0, buf, 0, 0, 0, 0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func calltruncate(_p0 uintptr, length int64) (r1 uintptr, e1 Errno) { + r1, _, e1 = syscall6(uintptr(unsafe.Pointer(&libc_truncate)), 2, _p0, uintptr(length), 0, 0, 0, 0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func callbind(s int, addr uintptr, addrlen uintptr) (r1 uintptr, e1 Errno) { + r1, _, e1 = syscall6(uintptr(unsafe.Pointer(&libc_bind)), 3, uintptr(s), addr, addrlen, 0, 0, 0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func callconnect(s int, addr uintptr, addrlen uintptr) (r1 uintptr, e1 Errno) { + r1, _, e1 = syscall6(uintptr(unsafe.Pointer(&libc_connect)), 3, uintptr(s), addr, addrlen, 0, 0, 0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func callgetgroups(n int, list uintptr) (r1 uintptr, e1 Errno) { + r1, _, e1 = rawSyscall6(uintptr(unsafe.Pointer(&libc_getgroups)), 2, uintptr(n), list, 0, 0, 0, 0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func callsetgroups(n int, list uintptr) (r1 uintptr, e1 Errno) { + r1, _, e1 = rawSyscall6(uintptr(unsafe.Pointer(&libc_setgroups)), 2, uintptr(n), list, 0, 0, 0, 0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func callgetsockopt(s int, level int, name int, val uintptr, vallen uintptr) (r1 uintptr, e1 Errno) { + r1, _, e1 = syscall6(uintptr(unsafe.Pointer(&libc_getsockopt)), 5, uintptr(s), uintptr(level), uintptr(name), val, vallen, 0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func callsetsockopt(s int, level int, name int, val uintptr, vallen uintptr) (r1 uintptr, e1 Errno) { + r1, _, e1 = syscall6(uintptr(unsafe.Pointer(&libc_setsockopt)), 5, uintptr(s), uintptr(level), uintptr(name), val, vallen, 0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func callsocket(domain int, typ int, proto int) (r1 uintptr, e1 Errno) { + r1, _, e1 = rawSyscall6(uintptr(unsafe.Pointer(&libc_socket)), 3, uintptr(domain), uintptr(typ), uintptr(proto), 0, 0, 0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func callsocketpair(domain int, typ int, proto int, fd uintptr) (r1 uintptr, e1 Errno) { + r1, _, e1 = rawSyscall6(uintptr(unsafe.Pointer(&libc_socketpair)), 4, uintptr(domain), uintptr(typ), uintptr(proto), fd, 0, 0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func callgetpeername(fd int, rsa uintptr, addrlen uintptr) (r1 uintptr, e1 Errno) { + r1, _, e1 = rawSyscall6(uintptr(unsafe.Pointer(&libc_getpeername)), 3, uintptr(fd), rsa, addrlen, 0, 0, 0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func callgetsockname(fd int, rsa uintptr, addrlen uintptr) (r1 uintptr, e1 Errno) { + r1, _, e1 = rawSyscall6(uintptr(unsafe.Pointer(&libc_getsockname)), 3, uintptr(fd), rsa, addrlen, 0, 0, 0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func callrecvfrom(fd int, _p0 uintptr, _lenp0 int, flags int, from uintptr, fromlen uintptr) (r1 uintptr, e1 Errno) { + r1, _, e1 = syscall6(uintptr(unsafe.Pointer(&libc_recvfrom)), 6, uintptr(fd), _p0, uintptr(_lenp0), uintptr(flags), from, fromlen) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func callsendto(s int, _p0 uintptr, _lenp0 int, flags int, to uintptr, addrlen uintptr) (r1 uintptr, e1 Errno) { + r1, _, e1 = syscall6(uintptr(unsafe.Pointer(&libc_sendto)), 6, uintptr(s), _p0, uintptr(_lenp0), uintptr(flags), to, addrlen) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func callrecvmsg(s int, msg uintptr, flags int) (r1 uintptr, e1 Errno) { + r1, _, e1 = syscall6(uintptr(unsafe.Pointer(&libc_recvmsg)), 3, uintptr(s), msg, uintptr(flags), 0, 0, 0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func callsendmsg(s int, msg uintptr, flags int) (r1 uintptr, e1 Errno) { + r1, _, e1 = syscall6(uintptr(unsafe.Pointer(&libc_sendmsg)), 3, uintptr(s), msg, uintptr(flags), 0, 0, 0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func callmunmap(addr uintptr, length uintptr) (r1 uintptr, e1 Errno) { + r1, _, e1 = syscall6(uintptr(unsafe.Pointer(&libc_munmap)), 2, addr, length, 0, 0, 0, 0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func callmadvise(_p0 uintptr, _lenp0 int, advice int) (r1 uintptr, e1 Errno) { + r1, _, e1 = syscall6(uintptr(unsafe.Pointer(&libc_madvise)), 3, _p0, uintptr(_lenp0), uintptr(advice), 0, 0, 0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func callmprotect(_p0 uintptr, _lenp0 int, prot int) (r1 uintptr, e1 Errno) { + r1, _, e1 = syscall6(uintptr(unsafe.Pointer(&libc_mprotect)), 3, _p0, uintptr(_lenp0), uintptr(prot), 0, 0, 0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func callmlock(_p0 uintptr, _lenp0 int) (r1 uintptr, e1 Errno) { + r1, _, e1 = syscall6(uintptr(unsafe.Pointer(&libc_mlock)), 2, _p0, uintptr(_lenp0), 0, 0, 0, 0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func callmlockall(flags int) (r1 uintptr, e1 Errno) { + r1, _, e1 = syscall6(uintptr(unsafe.Pointer(&libc_mlockall)), 1, uintptr(flags), 0, 0, 0, 0, 0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func callmsync(_p0 uintptr, _lenp0 int, flags int) (r1 uintptr, e1 Errno) { + r1, _, e1 = syscall6(uintptr(unsafe.Pointer(&libc_msync)), 3, _p0, uintptr(_lenp0), uintptr(flags), 0, 0, 0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func callmunlock(_p0 uintptr, _lenp0 int) (r1 uintptr, e1 Errno) { + r1, _, e1 = syscall6(uintptr(unsafe.Pointer(&libc_munlock)), 2, _p0, uintptr(_lenp0), 0, 0, 0, 0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func callmunlockall() (r1 uintptr, e1 Errno) { + r1, _, e1 = syscall6(uintptr(unsafe.Pointer(&libc_munlockall)), 0, 0, 0, 0, 0, 0, 0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func callpipe(p uintptr) (r1 uintptr, e1 Errno) { + r1, _, e1 = rawSyscall6(uintptr(unsafe.Pointer(&libc_pipe)), 1, p, 0, 0, 0, 0, 0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func callpoll(fds uintptr, nfds int, timeout int) (r1 uintptr, e1 Errno) { + r1, _, e1 = syscall6(uintptr(unsafe.Pointer(&libc_poll)), 3, fds, uintptr(nfds), uintptr(timeout), 0, 0, 0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func callgettimeofday(tv uintptr, tzp uintptr) (r1 uintptr, e1 Errno) { + r1, _, e1 = syscall6(uintptr(unsafe.Pointer(&libc_gettimeofday)), 2, tv, tzp, 0, 0, 0, 0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func calltime(t uintptr) (r1 uintptr, e1 Errno) { + r1, _, e1 = rawSyscall6(uintptr(unsafe.Pointer(&libc_time)), 1, t, 0, 0, 0, 0, 0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func callutime(_p0 uintptr, buf uintptr) (r1 uintptr, e1 Errno) { + r1, _, e1 = syscall6(uintptr(unsafe.Pointer(&libc_utime)), 2, _p0, buf, 0, 0, 0, 0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func callgetrlimit(resource int, rlim uintptr) (r1 uintptr, e1 Errno) { + r1, _, e1 = rawSyscall6(uintptr(unsafe.Pointer(&libc_getrlimit)), 2, uintptr(resource), rlim, 0, 0, 0, 0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func callsetrlimit(resource int, rlim uintptr) (r1 uintptr, e1 Errno) { + r1, _, e1 = rawSyscall6(uintptr(unsafe.Pointer(&libc_setrlimit)), 2, uintptr(resource), rlim, 0, 0, 0, 0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func calllseek(fd int, offset int64, whence int) (r1 uintptr, e1 Errno) { + r1, _, e1 = syscall6(uintptr(unsafe.Pointer(&libc_lseek)), 3, uintptr(fd), uintptr(offset), uintptr(whence), 0, 0, 0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func callmmap64(addr uintptr, length uintptr, prot int, flags int, fd int, offset int64) (r1 uintptr, e1 Errno) { + r1, _, e1 = syscall6(uintptr(unsafe.Pointer(&libc_mmap64)), 6, addr, length, uintptr(prot), uintptr(flags), uintptr(fd), uintptr(offset)) + return +} diff --git a/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc64_gccgo.go b/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc64_gccgo.go new file mode 100644 index 000000000..aef7c0e78 --- /dev/null +++ b/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc64_gccgo.go @@ -0,0 +1,1042 @@ +// mksyscall_aix_ppc64.pl -aix -tags aix,ppc64 syscall_aix.go syscall_aix_ppc64.go +// Code generated by the command above; see README.md. DO NOT EDIT. + +// +build aix,ppc64 +// +build gccgo + +package unix + +/* +#include +int utimes(uintptr_t, uintptr_t); +int utimensat(int, uintptr_t, uintptr_t, int); +int getcwd(uintptr_t, size_t); +int accept(int, uintptr_t, uintptr_t); +int getdirent(int, uintptr_t, size_t); +int wait4(int, uintptr_t, int, uintptr_t); +int ioctl(int, int, uintptr_t); +int fcntl(uintptr_t, int, uintptr_t); +int acct(uintptr_t); +int chdir(uintptr_t); +int chroot(uintptr_t); +int close(int); +int dup(int); +void exit(int); +int faccessat(int, uintptr_t, unsigned int, int); +int fchdir(int); +int fchmod(int, unsigned int); +int fchmodat(int, uintptr_t, unsigned int, int); +int fchownat(int, uintptr_t, int, int, int); +int fdatasync(int); +int fsync(int); +int getpgid(int); +int getpgrp(); +int getpid(); +int getppid(); +int getpriority(int, int); +int getrusage(int, uintptr_t); +int getsid(int); +int kill(int, int); +int syslog(int, uintptr_t, size_t); +int mkdir(int, uintptr_t, unsigned int); +int mkdirat(int, uintptr_t, unsigned int); +int mkfifo(uintptr_t, unsigned int); +int mknod(uintptr_t, unsigned int, int); +int mknodat(int, uintptr_t, unsigned int, int); +int nanosleep(uintptr_t, uintptr_t); +int open64(uintptr_t, int, unsigned int); +int openat(int, uintptr_t, int, unsigned int); +int read(int, uintptr_t, size_t); +int readlink(uintptr_t, uintptr_t, size_t); +int renameat(int, uintptr_t, int, uintptr_t); +int setdomainname(uintptr_t, size_t); +int sethostname(uintptr_t, size_t); +int setpgid(int, int); +int setsid(); +int settimeofday(uintptr_t); +int setuid(int); +int setgid(int); +int setpriority(int, int, int); +int statx(int, uintptr_t, int, int, uintptr_t); +int sync(); +uintptr_t times(uintptr_t); +int umask(int); +int uname(uintptr_t); +int unlink(uintptr_t); +int unlinkat(int, uintptr_t, int); +int ustat(int, uintptr_t); +int write(int, uintptr_t, size_t); +int dup2(int, int); +int posix_fadvise64(int, long long, long long, int); +int fchown(int, int, int); +int fstat(int, uintptr_t); +int fstatat(int, uintptr_t, uintptr_t, int); +int fstatfs(int, uintptr_t); +int ftruncate(int, long long); +int getegid(); +int geteuid(); +int getgid(); +int getuid(); +int lchown(uintptr_t, int, int); +int listen(int, int); +int lstat(uintptr_t, uintptr_t); +int pause(); +int pread64(int, uintptr_t, size_t, long long); +int pwrite64(int, uintptr_t, size_t, long long); +int pselect(int, uintptr_t, uintptr_t, uintptr_t, uintptr_t, uintptr_t); +int setregid(int, int); +int setreuid(int, int); +int shutdown(int, int); +long long splice(int, uintptr_t, int, uintptr_t, int, int); +int stat(uintptr_t, uintptr_t); +int statfs(uintptr_t, uintptr_t); +int truncate(uintptr_t, long long); +int bind(int, uintptr_t, uintptr_t); +int connect(int, uintptr_t, uintptr_t); +int getgroups(int, uintptr_t); +int setgroups(int, uintptr_t); +int getsockopt(int, int, int, uintptr_t, uintptr_t); +int setsockopt(int, int, int, uintptr_t, uintptr_t); +int socket(int, int, int); +int socketpair(int, int, int, uintptr_t); +int getpeername(int, uintptr_t, uintptr_t); +int getsockname(int, uintptr_t, uintptr_t); +int recvfrom(int, uintptr_t, size_t, int, uintptr_t, uintptr_t); +int sendto(int, uintptr_t, size_t, int, uintptr_t, uintptr_t); +int recvmsg(int, uintptr_t, int); +int sendmsg(int, uintptr_t, int); +int munmap(uintptr_t, uintptr_t); +int madvise(uintptr_t, size_t, int); +int mprotect(uintptr_t, size_t, int); +int mlock(uintptr_t, size_t); +int mlockall(int); +int msync(uintptr_t, size_t, int); +int munlock(uintptr_t, size_t); +int munlockall(); +int pipe(uintptr_t); +int poll(uintptr_t, int, int); +int gettimeofday(uintptr_t, uintptr_t); +int time(uintptr_t); +int utime(uintptr_t, uintptr_t); +int getrlimit(int, uintptr_t); +int setrlimit(int, uintptr_t); +long long lseek(int, long long, int); +uintptr_t mmap64(uintptr_t, uintptr_t, int, int, int, long long); + +*/ +import "C" +import ( + "syscall" +) + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func callutimes(_p0 uintptr, times uintptr) (r1 uintptr, e1 Errno) { + r1 = uintptr(C.utimes(C.uintptr_t(_p0), C.uintptr_t(times))) + e1 = syscall.GetErrno() + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func callutimensat(dirfd int, _p0 uintptr, times uintptr, flag int) (r1 uintptr, e1 Errno) { + r1 = uintptr(C.utimensat(C.int(dirfd), C.uintptr_t(_p0), C.uintptr_t(times), C.int(flag))) + e1 = syscall.GetErrno() + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func callgetcwd(_p0 uintptr, _lenp0 int) (r1 uintptr, e1 Errno) { + r1 = uintptr(C.getcwd(C.uintptr_t(_p0), C.size_t(_lenp0))) + e1 = syscall.GetErrno() + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func callaccept(s int, rsa uintptr, addrlen uintptr) (r1 uintptr, e1 Errno) { + r1 = uintptr(C.accept(C.int(s), C.uintptr_t(rsa), C.uintptr_t(addrlen))) + e1 = syscall.GetErrno() + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func callgetdirent(fd int, _p0 uintptr, _lenp0 int) (r1 uintptr, e1 Errno) { + r1 = uintptr(C.getdirent(C.int(fd), C.uintptr_t(_p0), C.size_t(_lenp0))) + e1 = syscall.GetErrno() + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func callwait4(pid int, status uintptr, options int, rusage uintptr) (r1 uintptr, e1 Errno) { + r1 = uintptr(C.wait4(C.int(pid), C.uintptr_t(status), C.int(options), C.uintptr_t(rusage))) + e1 = syscall.GetErrno() + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func callioctl(fd int, req int, arg uintptr) (r1 uintptr, e1 Errno) { + r1 = uintptr(C.ioctl(C.int(fd), C.int(req), C.uintptr_t(arg))) + e1 = syscall.GetErrno() + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func callfcntl(fd uintptr, cmd int, arg uintptr) (r1 uintptr, e1 Errno) { + r1 = uintptr(C.fcntl(C.uintptr_t(fd), C.int(cmd), C.uintptr_t(arg))) + e1 = syscall.GetErrno() + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func callacct(_p0 uintptr) (r1 uintptr, e1 Errno) { + r1 = uintptr(C.acct(C.uintptr_t(_p0))) + e1 = syscall.GetErrno() + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func callchdir(_p0 uintptr) (r1 uintptr, e1 Errno) { + r1 = uintptr(C.chdir(C.uintptr_t(_p0))) + e1 = syscall.GetErrno() + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func callchroot(_p0 uintptr) (r1 uintptr, e1 Errno) { + r1 = uintptr(C.chroot(C.uintptr_t(_p0))) + e1 = syscall.GetErrno() + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func callclose(fd int) (r1 uintptr, e1 Errno) { + r1 = uintptr(C.close(C.int(fd))) + e1 = syscall.GetErrno() + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func calldup(oldfd int) (r1 uintptr, e1 Errno) { + r1 = uintptr(C.dup(C.int(oldfd))) + e1 = syscall.GetErrno() + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func callexit(code int) (r1 uintptr, e1 Errno) { + r1 = uintptr(C.exit(C.int(code))) + e1 = syscall.GetErrno() + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func callfaccessat(dirfd int, _p0 uintptr, mode uint32, flags int) (r1 uintptr, e1 Errno) { + r1 = uintptr(C.faccessat(C.int(dirfd), C.uintptr_t(_p0), C.uint(mode), C.int(flags))) + e1 = syscall.GetErrno() + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func callfchdir(fd int) (r1 uintptr, e1 Errno) { + r1 = uintptr(C.fchdir(C.int(fd))) + e1 = syscall.GetErrno() + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func callfchmod(fd int, mode uint32) (r1 uintptr, e1 Errno) { + r1 = uintptr(C.fchmod(C.int(fd), C.uint(mode))) + e1 = syscall.GetErrno() + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func callfchmodat(dirfd int, _p0 uintptr, mode uint32, flags int) (r1 uintptr, e1 Errno) { + r1 = uintptr(C.fchmodat(C.int(dirfd), C.uintptr_t(_p0), C.uint(mode), C.int(flags))) + e1 = syscall.GetErrno() + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func callfchownat(dirfd int, _p0 uintptr, uid int, gid int, flags int) (r1 uintptr, e1 Errno) { + r1 = uintptr(C.fchownat(C.int(dirfd), C.uintptr_t(_p0), C.int(uid), C.int(gid), C.int(flags))) + e1 = syscall.GetErrno() + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func callfdatasync(fd int) (r1 uintptr, e1 Errno) { + r1 = uintptr(C.fdatasync(C.int(fd))) + e1 = syscall.GetErrno() + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func callfsync(fd int) (r1 uintptr, e1 Errno) { + r1 = uintptr(C.fsync(C.int(fd))) + e1 = syscall.GetErrno() + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func callgetpgid(pid int) (r1 uintptr, e1 Errno) { + r1 = uintptr(C.getpgid(C.int(pid))) + e1 = syscall.GetErrno() + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func callgetpgrp() (r1 uintptr, e1 Errno) { + r1 = uintptr(C.getpgrp()) + e1 = syscall.GetErrno() + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func callgetpid() (r1 uintptr, e1 Errno) { + r1 = uintptr(C.getpid()) + e1 = syscall.GetErrno() + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func callgetppid() (r1 uintptr, e1 Errno) { + r1 = uintptr(C.getppid()) + e1 = syscall.GetErrno() + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func callgetpriority(which int, who int) (r1 uintptr, e1 Errno) { + r1 = uintptr(C.getpriority(C.int(which), C.int(who))) + e1 = syscall.GetErrno() + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func callgetrusage(who int, rusage uintptr) (r1 uintptr, e1 Errno) { + r1 = uintptr(C.getrusage(C.int(who), C.uintptr_t(rusage))) + e1 = syscall.GetErrno() + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func callgetsid(pid int) (r1 uintptr, e1 Errno) { + r1 = uintptr(C.getsid(C.int(pid))) + e1 = syscall.GetErrno() + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func callkill(pid int, sig int) (r1 uintptr, e1 Errno) { + r1 = uintptr(C.kill(C.int(pid), C.int(sig))) + e1 = syscall.GetErrno() + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func callsyslog(typ int, _p0 uintptr, _lenp0 int) (r1 uintptr, e1 Errno) { + r1 = uintptr(C.syslog(C.int(typ), C.uintptr_t(_p0), C.size_t(_lenp0))) + e1 = syscall.GetErrno() + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func callmkdir(dirfd int, _p0 uintptr, mode uint32) (r1 uintptr, e1 Errno) { + r1 = uintptr(C.mkdir(C.int(dirfd), C.uintptr_t(_p0), C.uint(mode))) + e1 = syscall.GetErrno() + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func callmkdirat(dirfd int, _p0 uintptr, mode uint32) (r1 uintptr, e1 Errno) { + r1 = uintptr(C.mkdirat(C.int(dirfd), C.uintptr_t(_p0), C.uint(mode))) + e1 = syscall.GetErrno() + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func callmkfifo(_p0 uintptr, mode uint32) (r1 uintptr, e1 Errno) { + r1 = uintptr(C.mkfifo(C.uintptr_t(_p0), C.uint(mode))) + e1 = syscall.GetErrno() + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func callmknod(_p0 uintptr, mode uint32, dev int) (r1 uintptr, e1 Errno) { + r1 = uintptr(C.mknod(C.uintptr_t(_p0), C.uint(mode), C.int(dev))) + e1 = syscall.GetErrno() + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func callmknodat(dirfd int, _p0 uintptr, mode uint32, dev int) (r1 uintptr, e1 Errno) { + r1 = uintptr(C.mknodat(C.int(dirfd), C.uintptr_t(_p0), C.uint(mode), C.int(dev))) + e1 = syscall.GetErrno() + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func callnanosleep(time uintptr, leftover uintptr) (r1 uintptr, e1 Errno) { + r1 = uintptr(C.nanosleep(C.uintptr_t(time), C.uintptr_t(leftover))) + e1 = syscall.GetErrno() + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func callopen64(_p0 uintptr, mode int, perm uint32) (r1 uintptr, e1 Errno) { + r1 = uintptr(C.open64(C.uintptr_t(_p0), C.int(mode), C.uint(perm))) + e1 = syscall.GetErrno() + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func callopenat(dirfd int, _p0 uintptr, flags int, mode uint32) (r1 uintptr, e1 Errno) { + r1 = uintptr(C.openat(C.int(dirfd), C.uintptr_t(_p0), C.int(flags), C.uint(mode))) + e1 = syscall.GetErrno() + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func callread(fd int, _p0 uintptr, _lenp0 int) (r1 uintptr, e1 Errno) { + r1 = uintptr(C.read(C.int(fd), C.uintptr_t(_p0), C.size_t(_lenp0))) + e1 = syscall.GetErrno() + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func callreadlink(_p0 uintptr, _p1 uintptr, _lenp1 int) (r1 uintptr, e1 Errno) { + r1 = uintptr(C.readlink(C.uintptr_t(_p0), C.uintptr_t(_p1), C.size_t(_lenp1))) + e1 = syscall.GetErrno() + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func callrenameat(olddirfd int, _p0 uintptr, newdirfd int, _p1 uintptr) (r1 uintptr, e1 Errno) { + r1 = uintptr(C.renameat(C.int(olddirfd), C.uintptr_t(_p0), C.int(newdirfd), C.uintptr_t(_p1))) + e1 = syscall.GetErrno() + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func callsetdomainname(_p0 uintptr, _lenp0 int) (r1 uintptr, e1 Errno) { + r1 = uintptr(C.setdomainname(C.uintptr_t(_p0), C.size_t(_lenp0))) + e1 = syscall.GetErrno() + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func callsethostname(_p0 uintptr, _lenp0 int) (r1 uintptr, e1 Errno) { + r1 = uintptr(C.sethostname(C.uintptr_t(_p0), C.size_t(_lenp0))) + e1 = syscall.GetErrno() + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func callsetpgid(pid int, pgid int) (r1 uintptr, e1 Errno) { + r1 = uintptr(C.setpgid(C.int(pid), C.int(pgid))) + e1 = syscall.GetErrno() + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func callsetsid() (r1 uintptr, e1 Errno) { + r1 = uintptr(C.setsid()) + e1 = syscall.GetErrno() + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func callsettimeofday(tv uintptr) (r1 uintptr, e1 Errno) { + r1 = uintptr(C.settimeofday(C.uintptr_t(tv))) + e1 = syscall.GetErrno() + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func callsetuid(uid int) (r1 uintptr, e1 Errno) { + r1 = uintptr(C.setuid(C.int(uid))) + e1 = syscall.GetErrno() + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func callsetgid(uid int) (r1 uintptr, e1 Errno) { + r1 = uintptr(C.setgid(C.int(uid))) + e1 = syscall.GetErrno() + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func callsetpriority(which int, who int, prio int) (r1 uintptr, e1 Errno) { + r1 = uintptr(C.setpriority(C.int(which), C.int(who), C.int(prio))) + e1 = syscall.GetErrno() + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func callstatx(dirfd int, _p0 uintptr, flags int, mask int, stat uintptr) (r1 uintptr, e1 Errno) { + r1 = uintptr(C.statx(C.int(dirfd), C.uintptr_t(_p0), C.int(flags), C.int(mask), C.uintptr_t(stat))) + e1 = syscall.GetErrno() + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func callsync() (r1 uintptr, e1 Errno) { + r1 = uintptr(C.sync()) + e1 = syscall.GetErrno() + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func calltimes(tms uintptr) (r1 uintptr, e1 Errno) { + r1 = uintptr(C.times(C.uintptr_t(tms))) + e1 = syscall.GetErrno() + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func callumask(mask int) (r1 uintptr, e1 Errno) { + r1 = uintptr(C.umask(C.int(mask))) + e1 = syscall.GetErrno() + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func calluname(buf uintptr) (r1 uintptr, e1 Errno) { + r1 = uintptr(C.uname(C.uintptr_t(buf))) + e1 = syscall.GetErrno() + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func callunlink(_p0 uintptr) (r1 uintptr, e1 Errno) { + r1 = uintptr(C.unlink(C.uintptr_t(_p0))) + e1 = syscall.GetErrno() + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func callunlinkat(dirfd int, _p0 uintptr, flags int) (r1 uintptr, e1 Errno) { + r1 = uintptr(C.unlinkat(C.int(dirfd), C.uintptr_t(_p0), C.int(flags))) + e1 = syscall.GetErrno() + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func callustat(dev int, ubuf uintptr) (r1 uintptr, e1 Errno) { + r1 = uintptr(C.ustat(C.int(dev), C.uintptr_t(ubuf))) + e1 = syscall.GetErrno() + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func callwrite(fd int, _p0 uintptr, _lenp0 int) (r1 uintptr, e1 Errno) { + r1 = uintptr(C.write(C.int(fd), C.uintptr_t(_p0), C.size_t(_lenp0))) + e1 = syscall.GetErrno() + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func calldup2(oldfd int, newfd int) (r1 uintptr, e1 Errno) { + r1 = uintptr(C.dup2(C.int(oldfd), C.int(newfd))) + e1 = syscall.GetErrno() + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func callposix_fadvise64(fd int, offset int64, length int64, advice int) (r1 uintptr, e1 Errno) { + r1 = uintptr(C.posix_fadvise64(C.int(fd), C.longlong(offset), C.longlong(length), C.int(advice))) + e1 = syscall.GetErrno() + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func callfchown(fd int, uid int, gid int) (r1 uintptr, e1 Errno) { + r1 = uintptr(C.fchown(C.int(fd), C.int(uid), C.int(gid))) + e1 = syscall.GetErrno() + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func callfstat(fd int, stat uintptr) (r1 uintptr, e1 Errno) { + r1 = uintptr(C.fstat(C.int(fd), C.uintptr_t(stat))) + e1 = syscall.GetErrno() + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func callfstatat(dirfd int, _p0 uintptr, stat uintptr, flags int) (r1 uintptr, e1 Errno) { + r1 = uintptr(C.fstatat(C.int(dirfd), C.uintptr_t(_p0), C.uintptr_t(stat), C.int(flags))) + e1 = syscall.GetErrno() + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func callfstatfs(fd int, buf uintptr) (r1 uintptr, e1 Errno) { + r1 = uintptr(C.fstatfs(C.int(fd), C.uintptr_t(buf))) + e1 = syscall.GetErrno() + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func callftruncate(fd int, length int64) (r1 uintptr, e1 Errno) { + r1 = uintptr(C.ftruncate(C.int(fd), C.longlong(length))) + e1 = syscall.GetErrno() + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func callgetegid() (r1 uintptr, e1 Errno) { + r1 = uintptr(C.getegid()) + e1 = syscall.GetErrno() + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func callgeteuid() (r1 uintptr, e1 Errno) { + r1 = uintptr(C.geteuid()) + e1 = syscall.GetErrno() + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func callgetgid() (r1 uintptr, e1 Errno) { + r1 = uintptr(C.getgid()) + e1 = syscall.GetErrno() + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func callgetuid() (r1 uintptr, e1 Errno) { + r1 = uintptr(C.getuid()) + e1 = syscall.GetErrno() + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func calllchown(_p0 uintptr, uid int, gid int) (r1 uintptr, e1 Errno) { + r1 = uintptr(C.lchown(C.uintptr_t(_p0), C.int(uid), C.int(gid))) + e1 = syscall.GetErrno() + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func calllisten(s int, n int) (r1 uintptr, e1 Errno) { + r1 = uintptr(C.listen(C.int(s), C.int(n))) + e1 = syscall.GetErrno() + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func calllstat(_p0 uintptr, stat uintptr) (r1 uintptr, e1 Errno) { + r1 = uintptr(C.lstat(C.uintptr_t(_p0), C.uintptr_t(stat))) + e1 = syscall.GetErrno() + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func callpause() (r1 uintptr, e1 Errno) { + r1 = uintptr(C.pause()) + e1 = syscall.GetErrno() + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func callpread64(fd int, _p0 uintptr, _lenp0 int, offset int64) (r1 uintptr, e1 Errno) { + r1 = uintptr(C.pread64(C.int(fd), C.uintptr_t(_p0), C.size_t(_lenp0), C.longlong(offset))) + e1 = syscall.GetErrno() + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func callpwrite64(fd int, _p0 uintptr, _lenp0 int, offset int64) (r1 uintptr, e1 Errno) { + r1 = uintptr(C.pwrite64(C.int(fd), C.uintptr_t(_p0), C.size_t(_lenp0), C.longlong(offset))) + e1 = syscall.GetErrno() + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func callpselect(nfd int, r uintptr, w uintptr, e uintptr, timeout uintptr, sigmask uintptr) (r1 uintptr, e1 Errno) { + r1 = uintptr(C.pselect(C.int(nfd), C.uintptr_t(r), C.uintptr_t(w), C.uintptr_t(e), C.uintptr_t(timeout), C.uintptr_t(sigmask))) + e1 = syscall.GetErrno() + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func callsetregid(rgid int, egid int) (r1 uintptr, e1 Errno) { + r1 = uintptr(C.setregid(C.int(rgid), C.int(egid))) + e1 = syscall.GetErrno() + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func callsetreuid(ruid int, euid int) (r1 uintptr, e1 Errno) { + r1 = uintptr(C.setreuid(C.int(ruid), C.int(euid))) + e1 = syscall.GetErrno() + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func callshutdown(fd int, how int) (r1 uintptr, e1 Errno) { + r1 = uintptr(C.shutdown(C.int(fd), C.int(how))) + e1 = syscall.GetErrno() + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func callsplice(rfd int, roff uintptr, wfd int, woff uintptr, len int, flags int) (r1 uintptr, e1 Errno) { + r1 = uintptr(C.splice(C.int(rfd), C.uintptr_t(roff), C.int(wfd), C.uintptr_t(woff), C.int(len), C.int(flags))) + e1 = syscall.GetErrno() + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func callstat(_p0 uintptr, stat uintptr) (r1 uintptr, e1 Errno) { + r1 = uintptr(C.stat(C.uintptr_t(_p0), C.uintptr_t(stat))) + e1 = syscall.GetErrno() + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func callstatfs(_p0 uintptr, buf uintptr) (r1 uintptr, e1 Errno) { + r1 = uintptr(C.statfs(C.uintptr_t(_p0), C.uintptr_t(buf))) + e1 = syscall.GetErrno() + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func calltruncate(_p0 uintptr, length int64) (r1 uintptr, e1 Errno) { + r1 = uintptr(C.truncate(C.uintptr_t(_p0), C.longlong(length))) + e1 = syscall.GetErrno() + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func callbind(s int, addr uintptr, addrlen uintptr) (r1 uintptr, e1 Errno) { + r1 = uintptr(C.bind(C.int(s), C.uintptr_t(addr), C.uintptr_t(addrlen))) + e1 = syscall.GetErrno() + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func callconnect(s int, addr uintptr, addrlen uintptr) (r1 uintptr, e1 Errno) { + r1 = uintptr(C.connect(C.int(s), C.uintptr_t(addr), C.uintptr_t(addrlen))) + e1 = syscall.GetErrno() + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func callgetgroups(n int, list uintptr) (r1 uintptr, e1 Errno) { + r1 = uintptr(C.getgroups(C.int(n), C.uintptr_t(list))) + e1 = syscall.GetErrno() + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func callsetgroups(n int, list uintptr) (r1 uintptr, e1 Errno) { + r1 = uintptr(C.setgroups(C.int(n), C.uintptr_t(list))) + e1 = syscall.GetErrno() + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func callgetsockopt(s int, level int, name int, val uintptr, vallen uintptr) (r1 uintptr, e1 Errno) { + r1 = uintptr(C.getsockopt(C.int(s), C.int(level), C.int(name), C.uintptr_t(val), C.uintptr_t(vallen))) + e1 = syscall.GetErrno() + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func callsetsockopt(s int, level int, name int, val uintptr, vallen uintptr) (r1 uintptr, e1 Errno) { + r1 = uintptr(C.setsockopt(C.int(s), C.int(level), C.int(name), C.uintptr_t(val), C.uintptr_t(vallen))) + e1 = syscall.GetErrno() + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func callsocket(domain int, typ int, proto int) (r1 uintptr, e1 Errno) { + r1 = uintptr(C.socket(C.int(domain), C.int(typ), C.int(proto))) + e1 = syscall.GetErrno() + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func callsocketpair(domain int, typ int, proto int, fd uintptr) (r1 uintptr, e1 Errno) { + r1 = uintptr(C.socketpair(C.int(domain), C.int(typ), C.int(proto), C.uintptr_t(fd))) + e1 = syscall.GetErrno() + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func callgetpeername(fd int, rsa uintptr, addrlen uintptr) (r1 uintptr, e1 Errno) { + r1 = uintptr(C.getpeername(C.int(fd), C.uintptr_t(rsa), C.uintptr_t(addrlen))) + e1 = syscall.GetErrno() + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func callgetsockname(fd int, rsa uintptr, addrlen uintptr) (r1 uintptr, e1 Errno) { + r1 = uintptr(C.getsockname(C.int(fd), C.uintptr_t(rsa), C.uintptr_t(addrlen))) + e1 = syscall.GetErrno() + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func callrecvfrom(fd int, _p0 uintptr, _lenp0 int, flags int, from uintptr, fromlen uintptr) (r1 uintptr, e1 Errno) { + r1 = uintptr(C.recvfrom(C.int(fd), C.uintptr_t(_p0), C.size_t(_lenp0), C.int(flags), C.uintptr_t(from), C.uintptr_t(fromlen))) + e1 = syscall.GetErrno() + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func callsendto(s int, _p0 uintptr, _lenp0 int, flags int, to uintptr, addrlen uintptr) (r1 uintptr, e1 Errno) { + r1 = uintptr(C.sendto(C.int(s), C.uintptr_t(_p0), C.size_t(_lenp0), C.int(flags), C.uintptr_t(to), C.uintptr_t(addrlen))) + e1 = syscall.GetErrno() + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func callrecvmsg(s int, msg uintptr, flags int) (r1 uintptr, e1 Errno) { + r1 = uintptr(C.recvmsg(C.int(s), C.uintptr_t(msg), C.int(flags))) + e1 = syscall.GetErrno() + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func callsendmsg(s int, msg uintptr, flags int) (r1 uintptr, e1 Errno) { + r1 = uintptr(C.sendmsg(C.int(s), C.uintptr_t(msg), C.int(flags))) + e1 = syscall.GetErrno() + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func callmunmap(addr uintptr, length uintptr) (r1 uintptr, e1 Errno) { + r1 = uintptr(C.munmap(C.uintptr_t(addr), C.uintptr_t(length))) + e1 = syscall.GetErrno() + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func callmadvise(_p0 uintptr, _lenp0 int, advice int) (r1 uintptr, e1 Errno) { + r1 = uintptr(C.madvise(C.uintptr_t(_p0), C.size_t(_lenp0), C.int(advice))) + e1 = syscall.GetErrno() + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func callmprotect(_p0 uintptr, _lenp0 int, prot int) (r1 uintptr, e1 Errno) { + r1 = uintptr(C.mprotect(C.uintptr_t(_p0), C.size_t(_lenp0), C.int(prot))) + e1 = syscall.GetErrno() + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func callmlock(_p0 uintptr, _lenp0 int) (r1 uintptr, e1 Errno) { + r1 = uintptr(C.mlock(C.uintptr_t(_p0), C.size_t(_lenp0))) + e1 = syscall.GetErrno() + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func callmlockall(flags int) (r1 uintptr, e1 Errno) { + r1 = uintptr(C.mlockall(C.int(flags))) + e1 = syscall.GetErrno() + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func callmsync(_p0 uintptr, _lenp0 int, flags int) (r1 uintptr, e1 Errno) { + r1 = uintptr(C.msync(C.uintptr_t(_p0), C.size_t(_lenp0), C.int(flags))) + e1 = syscall.GetErrno() + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func callmunlock(_p0 uintptr, _lenp0 int) (r1 uintptr, e1 Errno) { + r1 = uintptr(C.munlock(C.uintptr_t(_p0), C.size_t(_lenp0))) + e1 = syscall.GetErrno() + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func callmunlockall() (r1 uintptr, e1 Errno) { + r1 = uintptr(C.munlockall()) + e1 = syscall.GetErrno() + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func callpipe(p uintptr) (r1 uintptr, e1 Errno) { + r1 = uintptr(C.pipe(C.uintptr_t(p))) + e1 = syscall.GetErrno() + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func callpoll(fds uintptr, nfds int, timeout int) (r1 uintptr, e1 Errno) { + r1 = uintptr(C.poll(C.uintptr_t(fds), C.int(nfds), C.int(timeout))) + e1 = syscall.GetErrno() + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func callgettimeofday(tv uintptr, tzp uintptr) (r1 uintptr, e1 Errno) { + r1 = uintptr(C.gettimeofday(C.uintptr_t(tv), C.uintptr_t(tzp))) + e1 = syscall.GetErrno() + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func calltime(t uintptr) (r1 uintptr, e1 Errno) { + r1 = uintptr(C.time(C.uintptr_t(t))) + e1 = syscall.GetErrno() + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func callutime(_p0 uintptr, buf uintptr) (r1 uintptr, e1 Errno) { + r1 = uintptr(C.utime(C.uintptr_t(_p0), C.uintptr_t(buf))) + e1 = syscall.GetErrno() + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func callgetrlimit(resource int, rlim uintptr) (r1 uintptr, e1 Errno) { + r1 = uintptr(C.getrlimit(C.int(resource), C.uintptr_t(rlim))) + e1 = syscall.GetErrno() + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func callsetrlimit(resource int, rlim uintptr) (r1 uintptr, e1 Errno) { + r1 = uintptr(C.setrlimit(C.int(resource), C.uintptr_t(rlim))) + e1 = syscall.GetErrno() + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func calllseek(fd int, offset int64, whence int) (r1 uintptr, e1 Errno) { + r1 = uintptr(C.lseek(C.int(fd), C.longlong(offset), C.int(whence))) + e1 = syscall.GetErrno() + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func callmmap64(addr uintptr, length uintptr, prot int, flags int, fd int, offset int64) (r1 uintptr, e1 Errno) { + r1 = uintptr(C.mmap64(C.uintptr_t(addr), C.uintptr_t(length), C.int(prot), C.int(flags), C.int(fd), C.longlong(offset))) + e1 = syscall.GetErrno() + return +} diff --git a/vendor/golang.org/x/sys/unix/zsyscall_darwin_386.go b/vendor/golang.org/x/sys/unix/zsyscall_darwin_386.go index 9ce06df6e..8b7f27309 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_darwin_386.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_darwin_386.go @@ -1,4 +1,4 @@ -// mksyscall.pl -l32 -tags darwin,386 syscall_bsd.go syscall_darwin.go syscall_darwin_386.go +// go run mksyscall.go -l32 -tags darwin,386 syscall_bsd.go syscall_darwin.go syscall_darwin_386.go // Code generated by the command above; see README.md. DO NOT EDIT. // +build darwin,386 diff --git a/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.go b/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.go index de9927049..285646462 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.go @@ -1,4 +1,4 @@ -// mksyscall.pl -tags darwin,amd64 syscall_bsd.go syscall_darwin.go syscall_darwin_amd64.go +// go run mksyscall.go -tags darwin,amd64 syscall_bsd.go syscall_darwin.go syscall_darwin_amd64.go // Code generated by the command above; see README.md. DO NOT EDIT. // +build darwin,amd64 diff --git a/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm.go b/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm.go index 81c4f0935..37e3c6925 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm.go @@ -1,4 +1,4 @@ -// mksyscall.pl -l32 -tags darwin,arm syscall_bsd.go syscall_darwin.go syscall_darwin_arm.go +// go run mksyscall.go -l32 -tags darwin,arm syscall_bsd.go syscall_darwin.go syscall_darwin_arm.go // Code generated by the command above; see README.md. DO NOT EDIT. // +build darwin,arm diff --git a/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.go b/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.go index 338c32d40..67f3b4e5e 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.go @@ -1,4 +1,4 @@ -// mksyscall.pl -tags darwin,arm64 syscall_bsd.go syscall_darwin.go syscall_darwin_arm64.go +// go run mksyscall.go -tags darwin,arm64 syscall_bsd.go syscall_darwin.go syscall_darwin_arm64.go // Code generated by the command above; see README.md. DO NOT EDIT. // +build darwin,arm64 diff --git a/vendor/golang.org/x/sys/unix/zsyscall_dragonfly_amd64.go b/vendor/golang.org/x/sys/unix/zsyscall_dragonfly_amd64.go index 91f36e9ec..da9986dd2 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_dragonfly_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_dragonfly_amd64.go @@ -1,4 +1,4 @@ -// mksyscall.pl -dragonfly -tags dragonfly,amd64 syscall_bsd.go syscall_dragonfly.go syscall_dragonfly_amd64.go +// go run mksyscall.go -dragonfly -tags dragonfly,amd64 syscall_bsd.go syscall_dragonfly.go syscall_dragonfly_amd64.go // Code generated by the command above; see README.md. DO NOT EDIT. // +build dragonfly,amd64 @@ -588,6 +588,21 @@ func Exit(code int) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Faccessat(dirfd int, path string, mode uint32, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_FACCESSAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(flags), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Fchdir(fd int) (err error) { _, _, e1 := Syscall(SYS_FCHDIR, uintptr(fd), 0, 0) if e1 != 0 { @@ -643,6 +658,21 @@ func Fchown(fd int, uid int, gid int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Fchownat(dirfd int, path string, uid int, gid int, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_FCHOWNAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid), uintptr(flags), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Flock(fd int, how int) (err error) { _, _, e1 := Syscall(SYS_FLOCK, uintptr(fd), uintptr(how), 0) if e1 != 0 { @@ -927,6 +957,26 @@ func Link(path string, link string) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Linkat(pathfd int, path string, linkfd int, link string, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(link) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_LINKAT, uintptr(pathfd), uintptr(unsafe.Pointer(_p0)), uintptr(linkfd), uintptr(unsafe.Pointer(_p1)), uintptr(flags), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Listen(s int, backlog int) (err error) { _, _, e1 := Syscall(SYS_LISTEN, uintptr(s), uintptr(backlog), 0) if e1 != 0 { @@ -967,6 +1017,21 @@ func Mkdir(path string, mode uint32) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Mkdirat(dirfd int, path string, mode uint32) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_MKDIRAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Mkfifo(path string, mode uint32) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) @@ -997,6 +1062,21 @@ func Mknod(path string, mode uint32, dev int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Mknodat(fd int, path string, mode uint32, dev int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_MKNODAT, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(dev), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Nanosleep(time *Timespec, leftover *Timespec) (err error) { _, _, e1 := Syscall(SYS_NANOSLEEP, uintptr(unsafe.Pointer(time)), uintptr(unsafe.Pointer(leftover)), 0) if e1 != 0 { @@ -1023,6 +1103,22 @@ func Open(path string, mode int, perm uint32) (fd int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Openat(dirfd int, path string, mode int, perm uint32) (fd int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + r0, _, e1 := Syscall6(SYS_OPENAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(perm), 0, 0) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Pathconf(path string, name int) (val int, err error) { var _p0 *byte _p0, err = BytePtrFromString(path) @@ -1345,6 +1441,26 @@ func Symlink(path string, link string) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Symlinkat(oldpath string, newdirfd int, newpath string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(oldpath) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(newpath) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_SYMLINKAT, uintptr(unsafe.Pointer(_p0)), uintptr(newdirfd), uintptr(unsafe.Pointer(_p1))) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Sync() (err error) { _, _, e1 := Syscall(SYS_SYNC, 0, 0, 0) if e1 != 0 { @@ -1408,6 +1524,21 @@ func Unlink(path string) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Unlinkat(dirfd int, path string, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_UNLINKAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(flags)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Unmount(path string, flags int) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_freebsd_386.go b/vendor/golang.org/x/sys/unix/zsyscall_freebsd_386.go index ad77882b8..80903e47b 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_freebsd_386.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_freebsd_386.go @@ -1,4 +1,4 @@ -// mksyscall.pl -l32 -tags freebsd,386 syscall_bsd.go syscall_freebsd.go syscall_freebsd_386.go +// go run mksyscall.go -l32 -tags freebsd,386 syscall_bsd.go syscall_freebsd.go syscall_freebsd_386.go // Code generated by the command above; see README.md. DO NOT EDIT. // +build freebsd,386 @@ -912,7 +912,7 @@ func Fpathconf(fd int, name int) (val int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Fstat(fd int, stat *Stat_t) (err error) { +func fstat(fd int, stat *stat_freebsd11_t) (err error) { _, _, e1 := Syscall(SYS_FSTAT, uintptr(fd), uintptr(unsafe.Pointer(stat)), 0) if e1 != 0 { err = errnoErr(e1) @@ -922,7 +922,17 @@ func Fstat(fd int, stat *Stat_t) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Fstatat(fd int, path string, stat *Stat_t, flags int) (err error) { +func fstat_freebsd12(fd int, stat *Stat_t) (err error) { + _, _, e1 := Syscall(SYS_FSTAT_FREEBSD12, uintptr(fd), uintptr(unsafe.Pointer(stat)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func fstatat(fd int, path string, stat *stat_freebsd11_t, flags int) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) if err != nil { @@ -937,7 +947,22 @@ func Fstatat(fd int, path string, stat *Stat_t, flags int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Fstatfs(fd int, stat *Statfs_t) (err error) { +func fstatat_freebsd12(fd int, path string, stat *Stat_t, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_FSTATAT_FREEBSD12, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), uintptr(flags), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func fstatfs(fd int, stat *statfs_freebsd11_t) (err error) { _, _, e1 := Syscall(SYS_FSTATFS, uintptr(fd), uintptr(unsafe.Pointer(stat)), 0) if e1 != 0 { err = errnoErr(e1) @@ -947,6 +972,16 @@ func Fstatfs(fd int, stat *Statfs_t) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func fstatfs_freebsd12(fd int, stat *Statfs_t) (err error) { + _, _, e1 := Syscall(SYS_FSTATFS_FREEBSD12, uintptr(fd), uintptr(unsafe.Pointer(stat)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Fsync(fd int) (err error) { _, _, e1 := Syscall(SYS_FSYNC, uintptr(fd), 0, 0) if e1 != 0 { @@ -967,14 +1002,14 @@ func Ftruncate(fd int, length int64) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Getdents(fd int, buf []byte) (n int, err error) { +func getdirentries(fd int, buf []byte, basep *uintptr) (n int, err error) { var _p0 unsafe.Pointer if len(buf) > 0 { _p0 = unsafe.Pointer(&buf[0]) } else { _p0 = unsafe.Pointer(&_zero) } - r0, _, e1 := Syscall(SYS_GETDENTS, uintptr(fd), uintptr(_p0), uintptr(len(buf))) + r0, _, e1 := Syscall6(SYS_GETDIRENTRIES, uintptr(fd), uintptr(_p0), uintptr(len(buf)), uintptr(unsafe.Pointer(basep)), 0, 0) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -984,14 +1019,14 @@ func Getdents(fd int, buf []byte) (n int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Getdirentries(fd int, buf []byte, basep *uintptr) (n int, err error) { +func getdirentries_freebsd12(fd int, buf []byte, basep *uintptr) (n int, err error) { var _p0 unsafe.Pointer if len(buf) > 0 { _p0 = unsafe.Pointer(&buf[0]) } else { _p0 = unsafe.Pointer(&_zero) } - r0, _, e1 := Syscall6(SYS_GETDIRENTRIES, uintptr(fd), uintptr(_p0), uintptr(len(buf)), uintptr(unsafe.Pointer(basep)), 0, 0) + r0, _, e1 := Syscall6(SYS_GETDIRENTRIES_FREEBSD12, uintptr(fd), uintptr(_p0), uintptr(len(buf)), uintptr(unsafe.Pointer(basep)), 0, 0) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -1222,7 +1257,7 @@ func Listen(s int, backlog int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Lstat(path string, stat *Stat_t) (err error) { +func lstat(path string, stat *stat_freebsd11_t) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) if err != nil { @@ -1282,7 +1317,7 @@ func Mkfifo(path string, mode uint32) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Mknod(path string, mode uint32, dev int) (err error) { +func mknod(path string, mode uint32, dev int) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) if err != nil { @@ -1297,6 +1332,36 @@ func Mknod(path string, mode uint32, dev int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func mknodat(fd int, path string, mode uint32, dev int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_MKNODAT, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(dev), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func mknodat_freebsd12(fd int, path string, mode uint32, dev uint64) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_MKNODAT_FREEBSD12, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(dev), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Nanosleep(time *Timespec, leftover *Timespec) (err error) { _, _, e1 := Syscall(SYS_NANOSLEEP, uintptr(unsafe.Pointer(time)), uintptr(unsafe.Pointer(leftover)), 0) if e1 != 0 { @@ -1687,7 +1752,7 @@ func Setuid(uid int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Stat(path string, stat *Stat_t) (err error) { +func stat(path string, stat *stat_freebsd11_t) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) if err != nil { @@ -1702,7 +1767,7 @@ func Stat(path string, stat *Stat_t) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Statfs(path string, stat *Statfs_t) (err error) { +func statfs(path string, stat *statfs_freebsd11_t) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) if err != nil { @@ -1717,6 +1782,21 @@ func Statfs(path string, stat *Statfs_t) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func statfs_freebsd12(path string, stat *Statfs_t) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_STATFS_FREEBSD12, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Symlink(path string, link string) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_freebsd_amd64.go b/vendor/golang.org/x/sys/unix/zsyscall_freebsd_amd64.go index d3ba6c46f..cd250ff0e 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_freebsd_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_freebsd_amd64.go @@ -1,4 +1,4 @@ -// mksyscall.pl -tags freebsd,amd64 syscall_bsd.go syscall_freebsd.go syscall_freebsd_amd64.go +// go run mksyscall.go -tags freebsd,amd64 syscall_bsd.go syscall_freebsd.go syscall_freebsd_amd64.go // Code generated by the command above; see README.md. DO NOT EDIT. // +build freebsd,amd64 @@ -912,7 +912,7 @@ func Fpathconf(fd int, name int) (val int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Fstat(fd int, stat *Stat_t) (err error) { +func fstat(fd int, stat *stat_freebsd11_t) (err error) { _, _, e1 := Syscall(SYS_FSTAT, uintptr(fd), uintptr(unsafe.Pointer(stat)), 0) if e1 != 0 { err = errnoErr(e1) @@ -922,7 +922,17 @@ func Fstat(fd int, stat *Stat_t) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Fstatat(fd int, path string, stat *Stat_t, flags int) (err error) { +func fstat_freebsd12(fd int, stat *Stat_t) (err error) { + _, _, e1 := Syscall(SYS_FSTAT_FREEBSD12, uintptr(fd), uintptr(unsafe.Pointer(stat)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func fstatat(fd int, path string, stat *stat_freebsd11_t, flags int) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) if err != nil { @@ -937,7 +947,22 @@ func Fstatat(fd int, path string, stat *Stat_t, flags int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Fstatfs(fd int, stat *Statfs_t) (err error) { +func fstatat_freebsd12(fd int, path string, stat *Stat_t, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_FSTATAT_FREEBSD12, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), uintptr(flags), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func fstatfs(fd int, stat *statfs_freebsd11_t) (err error) { _, _, e1 := Syscall(SYS_FSTATFS, uintptr(fd), uintptr(unsafe.Pointer(stat)), 0) if e1 != 0 { err = errnoErr(e1) @@ -947,6 +972,16 @@ func Fstatfs(fd int, stat *Statfs_t) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func fstatfs_freebsd12(fd int, stat *Statfs_t) (err error) { + _, _, e1 := Syscall(SYS_FSTATFS_FREEBSD12, uintptr(fd), uintptr(unsafe.Pointer(stat)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Fsync(fd int) (err error) { _, _, e1 := Syscall(SYS_FSYNC, uintptr(fd), 0, 0) if e1 != 0 { @@ -967,14 +1002,14 @@ func Ftruncate(fd int, length int64) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Getdents(fd int, buf []byte) (n int, err error) { +func getdirentries(fd int, buf []byte, basep *uintptr) (n int, err error) { var _p0 unsafe.Pointer if len(buf) > 0 { _p0 = unsafe.Pointer(&buf[0]) } else { _p0 = unsafe.Pointer(&_zero) } - r0, _, e1 := Syscall(SYS_GETDENTS, uintptr(fd), uintptr(_p0), uintptr(len(buf))) + r0, _, e1 := Syscall6(SYS_GETDIRENTRIES, uintptr(fd), uintptr(_p0), uintptr(len(buf)), uintptr(unsafe.Pointer(basep)), 0, 0) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -984,14 +1019,14 @@ func Getdents(fd int, buf []byte) (n int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Getdirentries(fd int, buf []byte, basep *uintptr) (n int, err error) { +func getdirentries_freebsd12(fd int, buf []byte, basep *uintptr) (n int, err error) { var _p0 unsafe.Pointer if len(buf) > 0 { _p0 = unsafe.Pointer(&buf[0]) } else { _p0 = unsafe.Pointer(&_zero) } - r0, _, e1 := Syscall6(SYS_GETDIRENTRIES, uintptr(fd), uintptr(_p0), uintptr(len(buf)), uintptr(unsafe.Pointer(basep)), 0, 0) + r0, _, e1 := Syscall6(SYS_GETDIRENTRIES_FREEBSD12, uintptr(fd), uintptr(_p0), uintptr(len(buf)), uintptr(unsafe.Pointer(basep)), 0, 0) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -1222,7 +1257,7 @@ func Listen(s int, backlog int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Lstat(path string, stat *Stat_t) (err error) { +func lstat(path string, stat *stat_freebsd11_t) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) if err != nil { @@ -1282,7 +1317,7 @@ func Mkfifo(path string, mode uint32) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Mknod(path string, mode uint32, dev int) (err error) { +func mknod(path string, mode uint32, dev int) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) if err != nil { @@ -1297,6 +1332,36 @@ func Mknod(path string, mode uint32, dev int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func mknodat(fd int, path string, mode uint32, dev int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_MKNODAT, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(dev), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func mknodat_freebsd12(fd int, path string, mode uint32, dev uint64) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_MKNODAT_FREEBSD12, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(dev), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Nanosleep(time *Timespec, leftover *Timespec) (err error) { _, _, e1 := Syscall(SYS_NANOSLEEP, uintptr(unsafe.Pointer(time)), uintptr(unsafe.Pointer(leftover)), 0) if e1 != 0 { @@ -1687,7 +1752,7 @@ func Setuid(uid int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Stat(path string, stat *Stat_t) (err error) { +func stat(path string, stat *stat_freebsd11_t) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) if err != nil { @@ -1702,7 +1767,7 @@ func Stat(path string, stat *Stat_t) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Statfs(path string, stat *Statfs_t) (err error) { +func statfs(path string, stat *statfs_freebsd11_t) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) if err != nil { @@ -1717,6 +1782,21 @@ func Statfs(path string, stat *Statfs_t) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func statfs_freebsd12(path string, stat *Statfs_t) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_STATFS_FREEBSD12, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Symlink(path string, link string) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_freebsd_arm.go b/vendor/golang.org/x/sys/unix/zsyscall_freebsd_arm.go index 9dfd77b62..290a9c2cb 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_freebsd_arm.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_freebsd_arm.go @@ -1,4 +1,4 @@ -// mksyscall.pl -l32 -arm -tags freebsd,arm syscall_bsd.go syscall_freebsd.go syscall_freebsd_arm.go +// go run mksyscall.go -l32 -arm -tags freebsd,arm syscall_bsd.go syscall_freebsd.go syscall_freebsd_arm.go // Code generated by the command above; see README.md. DO NOT EDIT. // +build freebsd,arm @@ -912,7 +912,7 @@ func Fpathconf(fd int, name int) (val int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Fstat(fd int, stat *Stat_t) (err error) { +func fstat(fd int, stat *stat_freebsd11_t) (err error) { _, _, e1 := Syscall(SYS_FSTAT, uintptr(fd), uintptr(unsafe.Pointer(stat)), 0) if e1 != 0 { err = errnoErr(e1) @@ -922,7 +922,17 @@ func Fstat(fd int, stat *Stat_t) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Fstatat(fd int, path string, stat *Stat_t, flags int) (err error) { +func fstat_freebsd12(fd int, stat *Stat_t) (err error) { + _, _, e1 := Syscall(SYS_FSTAT_FREEBSD12, uintptr(fd), uintptr(unsafe.Pointer(stat)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func fstatat(fd int, path string, stat *stat_freebsd11_t, flags int) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) if err != nil { @@ -937,7 +947,22 @@ func Fstatat(fd int, path string, stat *Stat_t, flags int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Fstatfs(fd int, stat *Statfs_t) (err error) { +func fstatat_freebsd12(fd int, path string, stat *Stat_t, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_FSTATAT_FREEBSD12, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), uintptr(flags), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func fstatfs(fd int, stat *statfs_freebsd11_t) (err error) { _, _, e1 := Syscall(SYS_FSTATFS, uintptr(fd), uintptr(unsafe.Pointer(stat)), 0) if e1 != 0 { err = errnoErr(e1) @@ -947,6 +972,16 @@ func Fstatfs(fd int, stat *Statfs_t) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func fstatfs_freebsd12(fd int, stat *Statfs_t) (err error) { + _, _, e1 := Syscall(SYS_FSTATFS_FREEBSD12, uintptr(fd), uintptr(unsafe.Pointer(stat)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Fsync(fd int) (err error) { _, _, e1 := Syscall(SYS_FSYNC, uintptr(fd), 0, 0) if e1 != 0 { @@ -967,14 +1002,14 @@ func Ftruncate(fd int, length int64) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Getdents(fd int, buf []byte) (n int, err error) { +func getdirentries(fd int, buf []byte, basep *uintptr) (n int, err error) { var _p0 unsafe.Pointer if len(buf) > 0 { _p0 = unsafe.Pointer(&buf[0]) } else { _p0 = unsafe.Pointer(&_zero) } - r0, _, e1 := Syscall(SYS_GETDENTS, uintptr(fd), uintptr(_p0), uintptr(len(buf))) + r0, _, e1 := Syscall6(SYS_GETDIRENTRIES, uintptr(fd), uintptr(_p0), uintptr(len(buf)), uintptr(unsafe.Pointer(basep)), 0, 0) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -984,14 +1019,14 @@ func Getdents(fd int, buf []byte) (n int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Getdirentries(fd int, buf []byte, basep *uintptr) (n int, err error) { +func getdirentries_freebsd12(fd int, buf []byte, basep *uintptr) (n int, err error) { var _p0 unsafe.Pointer if len(buf) > 0 { _p0 = unsafe.Pointer(&buf[0]) } else { _p0 = unsafe.Pointer(&_zero) } - r0, _, e1 := Syscall6(SYS_GETDIRENTRIES, uintptr(fd), uintptr(_p0), uintptr(len(buf)), uintptr(unsafe.Pointer(basep)), 0, 0) + r0, _, e1 := Syscall6(SYS_GETDIRENTRIES_FREEBSD12, uintptr(fd), uintptr(_p0), uintptr(len(buf)), uintptr(unsafe.Pointer(basep)), 0, 0) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -1222,7 +1257,7 @@ func Listen(s int, backlog int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Lstat(path string, stat *Stat_t) (err error) { +func lstat(path string, stat *stat_freebsd11_t) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) if err != nil { @@ -1282,7 +1317,7 @@ func Mkfifo(path string, mode uint32) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Mknod(path string, mode uint32, dev int) (err error) { +func mknod(path string, mode uint32, dev int) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) if err != nil { @@ -1297,6 +1332,36 @@ func Mknod(path string, mode uint32, dev int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func mknodat(fd int, path string, mode uint32, dev int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_MKNODAT, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(dev), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func mknodat_freebsd12(fd int, path string, mode uint32, dev uint64) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_MKNODAT_FREEBSD12, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(dev), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Nanosleep(time *Timespec, leftover *Timespec) (err error) { _, _, e1 := Syscall(SYS_NANOSLEEP, uintptr(unsafe.Pointer(time)), uintptr(unsafe.Pointer(leftover)), 0) if e1 != 0 { @@ -1687,7 +1752,7 @@ func Setuid(uid int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Stat(path string, stat *Stat_t) (err error) { +func stat(path string, stat *stat_freebsd11_t) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) if err != nil { @@ -1702,7 +1767,7 @@ func Stat(path string, stat *Stat_t) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Statfs(path string, stat *Statfs_t) (err error) { +func statfs(path string, stat *statfs_freebsd11_t) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) if err != nil { @@ -1717,6 +1782,21 @@ func Statfs(path string, stat *Statfs_t) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func statfs_freebsd12(path string, stat *Statfs_t) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_STATFS_FREEBSD12, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Symlink(path string, link string) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_386.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_386.go index 35b155a02..5356a5175 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_386.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_386.go @@ -1,4 +1,4 @@ -// mksyscall.pl -l32 -tags linux,386 syscall_linux.go syscall_linux_386.go +// go run mksyscall.go -l32 -tags linux,386 syscall_linux.go syscall_linux_386.go // Code generated by the command above; see README.md. DO NOT EDIT. // +build linux,386 @@ -458,6 +458,21 @@ func CopyFileRange(rfd int, roff *int64, wfd int, woff *int64, len int, flags in // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func DeleteModule(name string, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(name) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_DELETE_MODULE, uintptr(unsafe.Pointer(_p0)), uintptr(flags), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Dup(oldfd int) (fd int, err error) { r0, _, e1 := Syscall(SYS_DUP, uintptr(oldfd), 0, 0) fd = int(r0) @@ -606,6 +621,21 @@ func Fgetxattr(fd int, attr string, dest []byte) (sz int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func FinitModule(fd int, params string, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(params) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_FINIT_MODULE, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(flags)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Flistxattr(fd int, dest []byte) (sz int, err error) { var _p0 unsafe.Pointer if len(dest) > 0 { @@ -807,6 +837,27 @@ func Getxattr(path string, attr string, dest []byte) (sz int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func InitModule(moduleImage []byte, params string) (err error) { + var _p0 unsafe.Pointer + if len(moduleImage) > 0 { + _p0 = unsafe.Pointer(&moduleImage[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + var _p1 *byte + _p1, err = BytePtrFromString(params) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_INIT_MODULE, uintptr(_p0), uintptr(len(moduleImage)), uintptr(unsafe.Pointer(_p1))) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func InotifyAddWatch(fd int, pathname string, mask uint32) (watchdesc int, err error) { var _p0 *byte _p0, err = BytePtrFromString(pathname) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_amd64.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_amd64.go index 11a30786c..5e3abfc10 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_amd64.go @@ -1,4 +1,4 @@ -// mksyscall.pl -tags linux,amd64 syscall_linux.go syscall_linux_amd64.go +// go run mksyscall.go -tags linux,amd64 syscall_linux.go syscall_linux_amd64.go // Code generated by the command above; see README.md. DO NOT EDIT. // +build linux,amd64 @@ -458,6 +458,21 @@ func CopyFileRange(rfd int, roff *int64, wfd int, woff *int64, len int, flags in // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func DeleteModule(name string, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(name) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_DELETE_MODULE, uintptr(unsafe.Pointer(_p0)), uintptr(flags), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Dup(oldfd int) (fd int, err error) { r0, _, e1 := Syscall(SYS_DUP, uintptr(oldfd), 0, 0) fd = int(r0) @@ -606,6 +621,21 @@ func Fgetxattr(fd int, attr string, dest []byte) (sz int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func FinitModule(fd int, params string, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(params) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_FINIT_MODULE, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(flags)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Flistxattr(fd int, dest []byte) (sz int, err error) { var _p0 unsafe.Pointer if len(dest) > 0 { @@ -807,6 +837,27 @@ func Getxattr(path string, attr string, dest []byte) (sz int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func InitModule(moduleImage []byte, params string) (err error) { + var _p0 unsafe.Pointer + if len(moduleImage) > 0 { + _p0 = unsafe.Pointer(&moduleImage[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + var _p1 *byte + _p1, err = BytePtrFromString(params) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_INIT_MODULE, uintptr(_p0), uintptr(len(moduleImage)), uintptr(unsafe.Pointer(_p1))) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func InotifyAddWatch(fd int, pathname string, mask uint32) (watchdesc int, err error) { var _p0 *byte _p0, err = BytePtrFromString(pathname) @@ -2296,3 +2347,18 @@ func poll(fds *PollFd, nfds int, timeout int) (n int, err error) { } return } + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func kexecFileLoad(kernelFd int, initrdFd int, cmdlineLen int, cmdline string, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(cmdline) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_KEXEC_FILE_LOAD, uintptr(kernelFd), uintptr(initrdFd), uintptr(cmdlineLen), uintptr(unsafe.Pointer(_p0)), uintptr(flags), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_arm.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_arm.go index 914f25f06..75db4c0c1 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_arm.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_arm.go @@ -1,4 +1,4 @@ -// mksyscall.pl -l32 -arm -tags linux,arm syscall_linux.go syscall_linux_arm.go +// go run mksyscall.go -l32 -arm -tags linux,arm syscall_linux.go syscall_linux_arm.go // Code generated by the command above; see README.md. DO NOT EDIT. // +build linux,arm @@ -458,6 +458,21 @@ func CopyFileRange(rfd int, roff *int64, wfd int, woff *int64, len int, flags in // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func DeleteModule(name string, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(name) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_DELETE_MODULE, uintptr(unsafe.Pointer(_p0)), uintptr(flags), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Dup(oldfd int) (fd int, err error) { r0, _, e1 := Syscall(SYS_DUP, uintptr(oldfd), 0, 0) fd = int(r0) @@ -606,6 +621,21 @@ func Fgetxattr(fd int, attr string, dest []byte) (sz int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func FinitModule(fd int, params string, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(params) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_FINIT_MODULE, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(flags)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Flistxattr(fd int, dest []byte) (sz int, err error) { var _p0 unsafe.Pointer if len(dest) > 0 { @@ -807,6 +837,27 @@ func Getxattr(path string, attr string, dest []byte) (sz int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func InitModule(moduleImage []byte, params string) (err error) { + var _p0 unsafe.Pointer + if len(moduleImage) > 0 { + _p0 = unsafe.Pointer(&moduleImage[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + var _p1 *byte + _p1, err = BytePtrFromString(params) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_INIT_MODULE, uintptr(_p0), uintptr(len(moduleImage)), uintptr(unsafe.Pointer(_p1))) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func InotifyAddWatch(fd int, pathname string, mask uint32) (watchdesc int, err error) { var _p0 *byte _p0, err = BytePtrFromString(pathname) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_arm64.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_arm64.go index 1d6c55628..b890cb03c 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_arm64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_arm64.go @@ -1,4 +1,4 @@ -// mksyscall.pl -tags linux,arm64 syscall_linux.go syscall_linux_arm64.go +// go run mksyscall.go -tags linux,arm64 syscall_linux.go syscall_linux_arm64.go // Code generated by the command above; see README.md. DO NOT EDIT. // +build linux,arm64 @@ -458,6 +458,21 @@ func CopyFileRange(rfd int, roff *int64, wfd int, woff *int64, len int, flags in // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func DeleteModule(name string, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(name) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_DELETE_MODULE, uintptr(unsafe.Pointer(_p0)), uintptr(flags), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Dup(oldfd int) (fd int, err error) { r0, _, e1 := Syscall(SYS_DUP, uintptr(oldfd), 0, 0) fd = int(r0) @@ -606,6 +621,21 @@ func Fgetxattr(fd int, attr string, dest []byte) (sz int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func FinitModule(fd int, params string, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(params) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_FINIT_MODULE, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(flags)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Flistxattr(fd int, dest []byte) (sz int, err error) { var _p0 unsafe.Pointer if len(dest) > 0 { @@ -807,6 +837,27 @@ func Getxattr(path string, attr string, dest []byte) (sz int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func InitModule(moduleImage []byte, params string) (err error) { + var _p0 unsafe.Pointer + if len(moduleImage) > 0 { + _p0 = unsafe.Pointer(&moduleImage[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + var _p1 *byte + _p1, err = BytePtrFromString(params) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_INIT_MODULE, uintptr(_p0), uintptr(len(moduleImage)), uintptr(unsafe.Pointer(_p1))) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func InotifyAddWatch(fd int, pathname string, mask uint32) (watchdesc int, err error) { var _p0 *byte _p0, err = BytePtrFromString(pathname) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_mips.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_mips.go index 260631d14..cc17b43d3 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_mips.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_mips.go @@ -1,4 +1,4 @@ -// mksyscall.pl -b32 -arm -tags linux,mips syscall_linux.go syscall_linux_mipsx.go +// go run mksyscall.go -b32 -arm -tags linux,mips syscall_linux.go syscall_linux_mipsx.go // Code generated by the command above; see README.md. DO NOT EDIT. // +build linux,mips @@ -458,6 +458,21 @@ func CopyFileRange(rfd int, roff *int64, wfd int, woff *int64, len int, flags in // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func DeleteModule(name string, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(name) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_DELETE_MODULE, uintptr(unsafe.Pointer(_p0)), uintptr(flags), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Dup(oldfd int) (fd int, err error) { r0, _, e1 := Syscall(SYS_DUP, uintptr(oldfd), 0, 0) fd = int(r0) @@ -606,6 +621,21 @@ func Fgetxattr(fd int, attr string, dest []byte) (sz int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func FinitModule(fd int, params string, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(params) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_FINIT_MODULE, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(flags)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Flistxattr(fd int, dest []byte) (sz int, err error) { var _p0 unsafe.Pointer if len(dest) > 0 { @@ -807,6 +837,27 @@ func Getxattr(path string, attr string, dest []byte) (sz int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func InitModule(moduleImage []byte, params string) (err error) { + var _p0 unsafe.Pointer + if len(moduleImage) > 0 { + _p0 = unsafe.Pointer(&moduleImage[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + var _p1 *byte + _p1, err = BytePtrFromString(params) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_INIT_MODULE, uintptr(_p0), uintptr(len(moduleImage)), uintptr(unsafe.Pointer(_p1))) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func InotifyAddWatch(fd int, pathname string, mask uint32) (watchdesc int, err error) { var _p0 *byte _p0, err = BytePtrFromString(pathname) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_mips64.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_mips64.go index ff2d84fb9..caf1408ec 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_mips64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_mips64.go @@ -1,4 +1,4 @@ -// mksyscall.pl -tags linux,mips64 syscall_linux.go syscall_linux_mips64x.go +// go run mksyscall.go -tags linux,mips64 syscall_linux.go syscall_linux_mips64x.go // Code generated by the command above; see README.md. DO NOT EDIT. // +build linux,mips64 @@ -458,6 +458,21 @@ func CopyFileRange(rfd int, roff *int64, wfd int, woff *int64, len int, flags in // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func DeleteModule(name string, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(name) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_DELETE_MODULE, uintptr(unsafe.Pointer(_p0)), uintptr(flags), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Dup(oldfd int) (fd int, err error) { r0, _, e1 := Syscall(SYS_DUP, uintptr(oldfd), 0, 0) fd = int(r0) @@ -606,6 +621,21 @@ func Fgetxattr(fd int, attr string, dest []byte) (sz int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func FinitModule(fd int, params string, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(params) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_FINIT_MODULE, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(flags)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Flistxattr(fd int, dest []byte) (sz int, err error) { var _p0 unsafe.Pointer if len(dest) > 0 { @@ -807,6 +837,27 @@ func Getxattr(path string, attr string, dest []byte) (sz int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func InitModule(moduleImage []byte, params string) (err error) { + var _p0 unsafe.Pointer + if len(moduleImage) > 0 { + _p0 = unsafe.Pointer(&moduleImage[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + var _p1 *byte + _p1, err = BytePtrFromString(params) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_INIT_MODULE, uintptr(_p0), uintptr(len(moduleImage)), uintptr(unsafe.Pointer(_p1))) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func InotifyAddWatch(fd int, pathname string, mask uint32) (watchdesc int, err error) { var _p0 *byte _p0, err = BytePtrFromString(pathname) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_mips64le.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_mips64le.go index 48d14e607..266be8b4a 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_mips64le.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_mips64le.go @@ -1,4 +1,4 @@ -// mksyscall.pl -tags linux,mips64le syscall_linux.go syscall_linux_mips64x.go +// go run mksyscall.go -tags linux,mips64le syscall_linux.go syscall_linux_mips64x.go // Code generated by the command above; see README.md. DO NOT EDIT. // +build linux,mips64le @@ -458,6 +458,21 @@ func CopyFileRange(rfd int, roff *int64, wfd int, woff *int64, len int, flags in // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func DeleteModule(name string, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(name) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_DELETE_MODULE, uintptr(unsafe.Pointer(_p0)), uintptr(flags), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Dup(oldfd int) (fd int, err error) { r0, _, e1 := Syscall(SYS_DUP, uintptr(oldfd), 0, 0) fd = int(r0) @@ -606,6 +621,21 @@ func Fgetxattr(fd int, attr string, dest []byte) (sz int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func FinitModule(fd int, params string, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(params) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_FINIT_MODULE, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(flags)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Flistxattr(fd int, dest []byte) (sz int, err error) { var _p0 unsafe.Pointer if len(dest) > 0 { @@ -807,6 +837,27 @@ func Getxattr(path string, attr string, dest []byte) (sz int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func InitModule(moduleImage []byte, params string) (err error) { + var _p0 unsafe.Pointer + if len(moduleImage) > 0 { + _p0 = unsafe.Pointer(&moduleImage[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + var _p1 *byte + _p1, err = BytePtrFromString(params) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_INIT_MODULE, uintptr(_p0), uintptr(len(moduleImage)), uintptr(unsafe.Pointer(_p1))) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func InotifyAddWatch(fd int, pathname string, mask uint32) (watchdesc int, err error) { var _p0 *byte _p0, err = BytePtrFromString(pathname) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_mipsle.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_mipsle.go index 12c17a92b..b16b3e102 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_mipsle.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_mipsle.go @@ -1,4 +1,4 @@ -// mksyscall.pl -l32 -arm -tags linux,mipsle syscall_linux.go syscall_linux_mipsx.go +// go run mksyscall.go -l32 -arm -tags linux,mipsle syscall_linux.go syscall_linux_mipsx.go // Code generated by the command above; see README.md. DO NOT EDIT. // +build linux,mipsle @@ -458,6 +458,21 @@ func CopyFileRange(rfd int, roff *int64, wfd int, woff *int64, len int, flags in // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func DeleteModule(name string, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(name) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_DELETE_MODULE, uintptr(unsafe.Pointer(_p0)), uintptr(flags), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Dup(oldfd int) (fd int, err error) { r0, _, e1 := Syscall(SYS_DUP, uintptr(oldfd), 0, 0) fd = int(r0) @@ -606,6 +621,21 @@ func Fgetxattr(fd int, attr string, dest []byte) (sz int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func FinitModule(fd int, params string, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(params) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_FINIT_MODULE, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(flags)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Flistxattr(fd int, dest []byte) (sz int, err error) { var _p0 unsafe.Pointer if len(dest) > 0 { @@ -807,6 +837,27 @@ func Getxattr(path string, attr string, dest []byte) (sz int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func InitModule(moduleImage []byte, params string) (err error) { + var _p0 unsafe.Pointer + if len(moduleImage) > 0 { + _p0 = unsafe.Pointer(&moduleImage[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + var _p1 *byte + _p1, err = BytePtrFromString(params) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_INIT_MODULE, uintptr(_p0), uintptr(len(moduleImage)), uintptr(unsafe.Pointer(_p1))) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func InotifyAddWatch(fd int, pathname string, mask uint32) (watchdesc int, err error) { var _p0 *byte _p0, err = BytePtrFromString(pathname) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64.go index 8300814d2..27b6a6bf0 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64.go @@ -1,4 +1,4 @@ -// mksyscall.pl -tags linux,ppc64 syscall_linux.go syscall_linux_ppc64x.go +// go run mksyscall.go -tags linux,ppc64 syscall_linux.go syscall_linux_ppc64x.go // Code generated by the command above; see README.md. DO NOT EDIT. // +build linux,ppc64 @@ -458,6 +458,21 @@ func CopyFileRange(rfd int, roff *int64, wfd int, woff *int64, len int, flags in // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func DeleteModule(name string, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(name) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_DELETE_MODULE, uintptr(unsafe.Pointer(_p0)), uintptr(flags), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Dup(oldfd int) (fd int, err error) { r0, _, e1 := Syscall(SYS_DUP, uintptr(oldfd), 0, 0) fd = int(r0) @@ -606,6 +621,21 @@ func Fgetxattr(fd int, attr string, dest []byte) (sz int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func FinitModule(fd int, params string, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(params) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_FINIT_MODULE, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(flags)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Flistxattr(fd int, dest []byte) (sz int, err error) { var _p0 unsafe.Pointer if len(dest) > 0 { @@ -807,6 +837,27 @@ func Getxattr(path string, attr string, dest []byte) (sz int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func InitModule(moduleImage []byte, params string) (err error) { + var _p0 unsafe.Pointer + if len(moduleImage) > 0 { + _p0 = unsafe.Pointer(&moduleImage[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + var _p1 *byte + _p1, err = BytePtrFromString(params) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_INIT_MODULE, uintptr(_p0), uintptr(len(moduleImage)), uintptr(unsafe.Pointer(_p1))) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func InotifyAddWatch(fd int, pathname string, mask uint32) (watchdesc int, err error) { var _p0 *byte _p0, err = BytePtrFromString(pathname) @@ -2343,3 +2394,18 @@ func syncFileRange2(fd int, flags int, off int64, n int64) (err error) { } return } + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func kexecFileLoad(kernelFd int, initrdFd int, cmdlineLen int, cmdline string, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(cmdline) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_KEXEC_FILE_LOAD, uintptr(kernelFd), uintptr(initrdFd), uintptr(cmdlineLen), uintptr(unsafe.Pointer(_p0)), uintptr(flags), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64le.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64le.go index 002b4e175..f7ecc9afd 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64le.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64le.go @@ -1,4 +1,4 @@ -// mksyscall.pl -tags linux,ppc64le syscall_linux.go syscall_linux_ppc64x.go +// go run mksyscall.go -tags linux,ppc64le syscall_linux.go syscall_linux_ppc64x.go // Code generated by the command above; see README.md. DO NOT EDIT. // +build linux,ppc64le @@ -458,6 +458,21 @@ func CopyFileRange(rfd int, roff *int64, wfd int, woff *int64, len int, flags in // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func DeleteModule(name string, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(name) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_DELETE_MODULE, uintptr(unsafe.Pointer(_p0)), uintptr(flags), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Dup(oldfd int) (fd int, err error) { r0, _, e1 := Syscall(SYS_DUP, uintptr(oldfd), 0, 0) fd = int(r0) @@ -606,6 +621,21 @@ func Fgetxattr(fd int, attr string, dest []byte) (sz int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func FinitModule(fd int, params string, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(params) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_FINIT_MODULE, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(flags)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Flistxattr(fd int, dest []byte) (sz int, err error) { var _p0 unsafe.Pointer if len(dest) > 0 { @@ -807,6 +837,27 @@ func Getxattr(path string, attr string, dest []byte) (sz int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func InitModule(moduleImage []byte, params string) (err error) { + var _p0 unsafe.Pointer + if len(moduleImage) > 0 { + _p0 = unsafe.Pointer(&moduleImage[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + var _p1 *byte + _p1, err = BytePtrFromString(params) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_INIT_MODULE, uintptr(_p0), uintptr(len(moduleImage)), uintptr(unsafe.Pointer(_p1))) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func InotifyAddWatch(fd int, pathname string, mask uint32) (watchdesc int, err error) { var _p0 *byte _p0, err = BytePtrFromString(pathname) @@ -2343,3 +2394,18 @@ func syncFileRange2(fd int, flags int, off int64, n int64) (err error) { } return } + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func kexecFileLoad(kernelFd int, initrdFd int, cmdlineLen int, cmdline string, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(cmdline) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_KEXEC_FILE_LOAD, uintptr(kernelFd), uintptr(initrdFd), uintptr(cmdlineLen), uintptr(unsafe.Pointer(_p0)), uintptr(flags), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_riscv64.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_riscv64.go index 542f3a3a3..e3cd4e53f 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_riscv64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_riscv64.go @@ -1,4 +1,4 @@ -// mksyscall.pl -tags linux,riscv64 syscall_linux.go syscall_linux_riscv64.go +// go run mksyscall.go -tags linux,riscv64 syscall_linux.go syscall_linux_riscv64.go // Code generated by the command above; see README.md. DO NOT EDIT. // +build linux,riscv64 @@ -458,6 +458,21 @@ func CopyFileRange(rfd int, roff *int64, wfd int, woff *int64, len int, flags in // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func DeleteModule(name string, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(name) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_DELETE_MODULE, uintptr(unsafe.Pointer(_p0)), uintptr(flags), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Dup(oldfd int) (fd int, err error) { r0, _, e1 := Syscall(SYS_DUP, uintptr(oldfd), 0, 0) fd = int(r0) @@ -606,6 +621,21 @@ func Fgetxattr(fd int, attr string, dest []byte) (sz int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func FinitModule(fd int, params string, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(params) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_FINIT_MODULE, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(flags)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Flistxattr(fd int, dest []byte) (sz int, err error) { var _p0 unsafe.Pointer if len(dest) > 0 { @@ -807,6 +837,27 @@ func Getxattr(path string, attr string, dest []byte) (sz int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func InitModule(moduleImage []byte, params string) (err error) { + var _p0 unsafe.Pointer + if len(moduleImage) > 0 { + _p0 = unsafe.Pointer(&moduleImage[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + var _p1 *byte + _p1, err = BytePtrFromString(params) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_INIT_MODULE, uintptr(_p0), uintptr(len(moduleImage)), uintptr(unsafe.Pointer(_p1))) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func InotifyAddWatch(fd int, pathname string, mask uint32) (watchdesc int, err error) { var _p0 *byte _p0, err = BytePtrFromString(pathname) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_s390x.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_s390x.go index 1a9ba9992..3001d3798 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_s390x.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_s390x.go @@ -1,4 +1,4 @@ -// mksyscall.pl -tags linux,s390x syscall_linux.go syscall_linux_s390x.go +// go run mksyscall.go -tags linux,s390x syscall_linux.go syscall_linux_s390x.go // Code generated by the command above; see README.md. DO NOT EDIT. // +build linux,s390x @@ -458,6 +458,21 @@ func CopyFileRange(rfd int, roff *int64, wfd int, woff *int64, len int, flags in // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func DeleteModule(name string, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(name) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_DELETE_MODULE, uintptr(unsafe.Pointer(_p0)), uintptr(flags), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Dup(oldfd int) (fd int, err error) { r0, _, e1 := Syscall(SYS_DUP, uintptr(oldfd), 0, 0) fd = int(r0) @@ -606,6 +621,21 @@ func Fgetxattr(fd int, attr string, dest []byte) (sz int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func FinitModule(fd int, params string, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(params) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_FINIT_MODULE, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(flags)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Flistxattr(fd int, dest []byte) (sz int, err error) { var _p0 unsafe.Pointer if len(dest) > 0 { @@ -807,6 +837,27 @@ func Getxattr(path string, attr string, dest []byte) (sz int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func InitModule(moduleImage []byte, params string) (err error) { + var _p0 unsafe.Pointer + if len(moduleImage) > 0 { + _p0 = unsafe.Pointer(&moduleImage[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + var _p1 *byte + _p1, err = BytePtrFromString(params) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_INIT_MODULE, uintptr(_p0), uintptr(len(moduleImage)), uintptr(unsafe.Pointer(_p1))) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func InotifyAddWatch(fd int, pathname string, mask uint32) (watchdesc int, err error) { var _p0 *byte _p0, err = BytePtrFromString(pathname) @@ -2113,3 +2164,18 @@ func poll(fds *PollFd, nfds int, timeout int) (n int, err error) { } return } + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func kexecFileLoad(kernelFd int, initrdFd int, cmdlineLen int, cmdline string, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(cmdline) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_KEXEC_FILE_LOAD, uintptr(kernelFd), uintptr(initrdFd), uintptr(cmdlineLen), uintptr(unsafe.Pointer(_p0)), uintptr(flags), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_sparc64.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_sparc64.go index b26aee957..aafe3660f 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_sparc64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_sparc64.go @@ -1,4 +1,4 @@ -// mksyscall.pl -tags linux,sparc64 syscall_linux.go syscall_linux_sparc64.go +// go run mksyscall.go -tags linux,sparc64 syscall_linux.go syscall_linux_sparc64.go // Code generated by the command above; see README.md. DO NOT EDIT. // +build linux,sparc64 @@ -417,6 +417,16 @@ func Chroot(path string) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func ClockGetres(clockid int32, res *Timespec) (err error) { + _, _, e1 := Syscall(SYS_CLOCK_GETRES, uintptr(clockid), uintptr(unsafe.Pointer(res)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func ClockGettime(clockid int32, time *Timespec) (err error) { _, _, e1 := Syscall(SYS_CLOCK_GETTIME, uintptr(clockid), uintptr(unsafe.Pointer(time)), 0) if e1 != 0 { @@ -448,6 +458,21 @@ func CopyFileRange(rfd int, roff *int64, wfd int, woff *int64, len int, flags in // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func DeleteModule(name string, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(name) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_DELETE_MODULE, uintptr(unsafe.Pointer(_p0)), uintptr(flags), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Dup(oldfd int) (fd int, err error) { r0, _, e1 := Syscall(SYS_DUP, uintptr(oldfd), 0, 0) fd = int(r0) @@ -508,21 +533,6 @@ func Exit(code int) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Faccessat(dirfd int, path string, mode uint32, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall6(SYS_FACCESSAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(flags), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Fallocate(fd int, mode uint32, off int64, len int64) (err error) { _, _, e1 := Syscall6(SYS_FALLOCATE, uintptr(fd), uintptr(mode), uintptr(off), uintptr(len), 0, 0) if e1 != 0 { @@ -589,6 +599,60 @@ func Fdatasync(fd int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Fgetxattr(fd int, attr string, dest []byte) (sz int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(attr) + if err != nil { + return + } + var _p1 unsafe.Pointer + if len(dest) > 0 { + _p1 = unsafe.Pointer(&dest[0]) + } else { + _p1 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall6(SYS_FGETXATTR, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(dest)), 0, 0) + sz = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func FinitModule(fd int, params string, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(params) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_FINIT_MODULE, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(flags)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Flistxattr(fd int, dest []byte) (sz int, err error) { + var _p0 unsafe.Pointer + if len(dest) > 0 { + _p0 = unsafe.Pointer(&dest[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall(SYS_FLISTXATTR, uintptr(fd), uintptr(_p0), uintptr(len(dest))) + sz = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Flock(fd int, how int) (err error) { _, _, e1 := Syscall(SYS_FLOCK, uintptr(fd), uintptr(how), 0) if e1 != 0 { @@ -599,6 +663,42 @@ func Flock(fd int, how int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Fremovexattr(fd int, attr string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(attr) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_FREMOVEXATTR, uintptr(fd), uintptr(unsafe.Pointer(_p0)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fsetxattr(fd int, attr string, dest []byte, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(attr) + if err != nil { + return + } + var _p1 unsafe.Pointer + if len(dest) > 0 { + _p1 = unsafe.Pointer(&dest[0]) + } else { + _p1 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall6(SYS_FSETXATTR, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(dest)), uintptr(flags), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Fsync(fd int) (err error) { _, _, e1 := Syscall(SYS_FSYNC, uintptr(fd), 0, 0) if e1 != 0 { @@ -737,6 +837,27 @@ func Getxattr(path string, attr string, dest []byte) (sz int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func InitModule(moduleImage []byte, params string) (err error) { + var _p0 unsafe.Pointer + if len(moduleImage) > 0 { + _p0 = unsafe.Pointer(&moduleImage[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + var _p1 *byte + _p1, err = BytePtrFromString(params) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_INIT_MODULE, uintptr(_p0), uintptr(len(moduleImage)), uintptr(unsafe.Pointer(_p1))) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func InotifyAddWatch(fd int, pathname string, mask uint32) (watchdesc int, err error) { var _p0 *byte _p0, err = BytePtrFromString(pathname) @@ -919,6 +1040,22 @@ func Lsetxattr(path string, attr string, data []byte, flags int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func MemfdCreate(name string, flags int) (fd int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(name) + if err != nil { + return + } + r0, _, e1 := Syscall(SYS_MEMFD_CREATE, uintptr(unsafe.Pointer(_p0)), uintptr(flags), 0) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Mkdirat(dirfd int, path string, mode uint32) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) @@ -1078,6 +1215,26 @@ func Renameat(olddirfd int, oldpath string, newdirfd int, newpath string) (err e // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Renameat2(olddirfd int, oldpath string, newdirfd int, newpath string, flags uint) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(oldpath) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(newpath) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_RENAMEAT2, uintptr(olddirfd), uintptr(unsafe.Pointer(_p0)), uintptr(newdirfd), uintptr(unsafe.Pointer(_p1)), uintptr(flags), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func RequestKey(keyType string, description string, callback string, destRingid int) (id int, err error) { var _p0 *byte _p0, err = BytePtrFromString(keyType) @@ -1489,6 +1646,21 @@ func Munlockall() (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func faccessat(dirfd int, path string, mode uint32) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_FACCESSAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func EpollWait(epfd int, events []EpollEvent, msec int) (n int, err error) { var _p0 unsafe.Pointer if len(events) > 0 { diff --git a/vendor/golang.org/x/sys/unix/zsyscall_netbsd_386.go b/vendor/golang.org/x/sys/unix/zsyscall_netbsd_386.go index f1874d5a1..642db7670 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_netbsd_386.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_netbsd_386.go @@ -1,4 +1,4 @@ -// mksyscall.pl -l32 -netbsd -tags netbsd,386 syscall_bsd.go syscall_netbsd.go syscall_netbsd_386.go +// go run mksyscall.go -l32 -netbsd -tags netbsd,386 syscall_bsd.go syscall_netbsd.go syscall_netbsd_386.go // Code generated by the command above; see README.md. DO NOT EDIT. // +build netbsd,386 @@ -865,6 +865,21 @@ func Fchown(fd int, uid int, gid int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Fchownat(dirfd int, path string, uid int, gid int, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_FCHOWNAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid), uintptr(flags), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Flock(fd int, how int) (err error) { _, _, e1 := Syscall(SYS_FLOCK, uintptr(fd), uintptr(how), 0) if e1 != 0 { @@ -1114,6 +1129,26 @@ func Link(path string, link string) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Linkat(pathfd int, path string, linkfd int, link string, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(link) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_LINKAT, uintptr(pathfd), uintptr(unsafe.Pointer(_p0)), uintptr(linkfd), uintptr(unsafe.Pointer(_p1)), uintptr(flags), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Listen(s int, backlog int) (err error) { _, _, e1 := Syscall(SYS_LISTEN, uintptr(s), uintptr(backlog), 0) if e1 != 0 { @@ -1154,6 +1189,21 @@ func Mkdir(path string, mode uint32) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Mkdirat(dirfd int, path string, mode uint32) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_MKDIRAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Mkfifo(path string, mode uint32) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) @@ -1169,6 +1219,21 @@ func Mkfifo(path string, mode uint32) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Mkfifoat(dirfd int, path string, mode uint32) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_MKFIFOAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Mknod(path string, mode uint32, dev int) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) @@ -1184,6 +1249,21 @@ func Mknod(path string, mode uint32, dev int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Mknodat(dirfd int, path string, mode uint32, dev int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_MKNODAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(dev), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Nanosleep(time *Timespec, leftover *Timespec) (err error) { _, _, e1 := Syscall(SYS_NANOSLEEP, uintptr(unsafe.Pointer(time)), uintptr(unsafe.Pointer(leftover)), 0) if e1 != 0 { @@ -1210,6 +1290,22 @@ func Open(path string, mode int, perm uint32) (fd int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Openat(dirfd int, path string, mode int, perm uint32) (fd int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + r0, _, e1 := Syscall6(SYS_OPENAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(perm), 0, 0) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Pathconf(path string, name int) (val int, err error) { var _p0 *byte _p0, err = BytePtrFromString(path) @@ -1299,6 +1395,28 @@ func Readlink(path string, buf []byte) (n int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Readlinkat(dirfd int, path string, buf []byte) (n int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + var _p1 unsafe.Pointer + if len(buf) > 0 { + _p1 = unsafe.Pointer(&buf[0]) + } else { + _p1 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall6(SYS_READLINKAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(buf)), 0, 0) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Rename(from string, to string) (err error) { var _p0 *byte _p0, err = BytePtrFromString(from) @@ -1319,6 +1437,26 @@ func Rename(from string, to string) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Renameat(fromfd int, from string, tofd int, to string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(from) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(to) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_RENAMEAT, uintptr(fromfd), uintptr(unsafe.Pointer(_p0)), uintptr(tofd), uintptr(unsafe.Pointer(_p1)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Revoke(path string) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) @@ -1516,6 +1654,26 @@ func Symlink(path string, link string) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Symlinkat(oldpath string, newdirfd int, newpath string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(oldpath) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(newpath) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_SYMLINKAT, uintptr(unsafe.Pointer(_p0)), uintptr(newdirfd), uintptr(unsafe.Pointer(_p1))) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Sync() (err error) { _, _, e1 := Syscall(SYS_SYNC, 0, 0, 0) if e1 != 0 { @@ -1564,6 +1722,21 @@ func Unlink(path string) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Unlinkat(dirfd int, path string, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_UNLINKAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(flags)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Unmount(path string, flags int) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_netbsd_amd64.go b/vendor/golang.org/x/sys/unix/zsyscall_netbsd_amd64.go index eb8028397..59585fee3 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_netbsd_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_netbsd_amd64.go @@ -1,4 +1,4 @@ -// mksyscall.pl -netbsd -tags netbsd,amd64 syscall_bsd.go syscall_netbsd.go syscall_netbsd_amd64.go +// go run mksyscall.go -netbsd -tags netbsd,amd64 syscall_bsd.go syscall_netbsd.go syscall_netbsd_amd64.go // Code generated by the command above; see README.md. DO NOT EDIT. // +build netbsd,amd64 @@ -865,6 +865,21 @@ func Fchown(fd int, uid int, gid int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Fchownat(dirfd int, path string, uid int, gid int, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_FCHOWNAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid), uintptr(flags), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Flock(fd int, how int) (err error) { _, _, e1 := Syscall(SYS_FLOCK, uintptr(fd), uintptr(how), 0) if e1 != 0 { @@ -1114,6 +1129,26 @@ func Link(path string, link string) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Linkat(pathfd int, path string, linkfd int, link string, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(link) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_LINKAT, uintptr(pathfd), uintptr(unsafe.Pointer(_p0)), uintptr(linkfd), uintptr(unsafe.Pointer(_p1)), uintptr(flags), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Listen(s int, backlog int) (err error) { _, _, e1 := Syscall(SYS_LISTEN, uintptr(s), uintptr(backlog), 0) if e1 != 0 { @@ -1154,6 +1189,21 @@ func Mkdir(path string, mode uint32) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Mkdirat(dirfd int, path string, mode uint32) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_MKDIRAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Mkfifo(path string, mode uint32) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) @@ -1169,6 +1219,21 @@ func Mkfifo(path string, mode uint32) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Mkfifoat(dirfd int, path string, mode uint32) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_MKFIFOAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Mknod(path string, mode uint32, dev int) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) @@ -1184,6 +1249,21 @@ func Mknod(path string, mode uint32, dev int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Mknodat(dirfd int, path string, mode uint32, dev int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_MKNODAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(dev), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Nanosleep(time *Timespec, leftover *Timespec) (err error) { _, _, e1 := Syscall(SYS_NANOSLEEP, uintptr(unsafe.Pointer(time)), uintptr(unsafe.Pointer(leftover)), 0) if e1 != 0 { @@ -1210,6 +1290,22 @@ func Open(path string, mode int, perm uint32) (fd int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Openat(dirfd int, path string, mode int, perm uint32) (fd int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + r0, _, e1 := Syscall6(SYS_OPENAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(perm), 0, 0) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Pathconf(path string, name int) (val int, err error) { var _p0 *byte _p0, err = BytePtrFromString(path) @@ -1299,6 +1395,28 @@ func Readlink(path string, buf []byte) (n int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Readlinkat(dirfd int, path string, buf []byte) (n int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + var _p1 unsafe.Pointer + if len(buf) > 0 { + _p1 = unsafe.Pointer(&buf[0]) + } else { + _p1 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall6(SYS_READLINKAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(buf)), 0, 0) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Rename(from string, to string) (err error) { var _p0 *byte _p0, err = BytePtrFromString(from) @@ -1319,6 +1437,26 @@ func Rename(from string, to string) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Renameat(fromfd int, from string, tofd int, to string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(from) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(to) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_RENAMEAT, uintptr(fromfd), uintptr(unsafe.Pointer(_p0)), uintptr(tofd), uintptr(unsafe.Pointer(_p1)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Revoke(path string) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) @@ -1516,6 +1654,26 @@ func Symlink(path string, link string) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Symlinkat(oldpath string, newdirfd int, newpath string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(oldpath) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(newpath) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_SYMLINKAT, uintptr(unsafe.Pointer(_p0)), uintptr(newdirfd), uintptr(unsafe.Pointer(_p1))) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Sync() (err error) { _, _, e1 := Syscall(SYS_SYNC, 0, 0, 0) if e1 != 0 { @@ -1564,6 +1722,21 @@ func Unlink(path string) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Unlinkat(dirfd int, path string, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_UNLINKAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(flags)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Unmount(path string, flags int) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_netbsd_arm.go b/vendor/golang.org/x/sys/unix/zsyscall_netbsd_arm.go index 7b36499d5..6ec31434b 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_netbsd_arm.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_netbsd_arm.go @@ -1,4 +1,4 @@ -// mksyscall.pl -l32 -netbsd -arm -tags netbsd,arm syscall_bsd.go syscall_netbsd.go syscall_netbsd_arm.go +// go run mksyscall.go -l32 -netbsd -arm -tags netbsd,arm syscall_bsd.go syscall_netbsd.go syscall_netbsd_arm.go // Code generated by the command above; see README.md. DO NOT EDIT. // +build netbsd,arm @@ -865,6 +865,21 @@ func Fchown(fd int, uid int, gid int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Fchownat(dirfd int, path string, uid int, gid int, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_FCHOWNAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid), uintptr(flags), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Flock(fd int, how int) (err error) { _, _, e1 := Syscall(SYS_FLOCK, uintptr(fd), uintptr(how), 0) if e1 != 0 { @@ -1114,6 +1129,26 @@ func Link(path string, link string) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Linkat(pathfd int, path string, linkfd int, link string, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(link) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_LINKAT, uintptr(pathfd), uintptr(unsafe.Pointer(_p0)), uintptr(linkfd), uintptr(unsafe.Pointer(_p1)), uintptr(flags), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Listen(s int, backlog int) (err error) { _, _, e1 := Syscall(SYS_LISTEN, uintptr(s), uintptr(backlog), 0) if e1 != 0 { @@ -1154,6 +1189,21 @@ func Mkdir(path string, mode uint32) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Mkdirat(dirfd int, path string, mode uint32) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_MKDIRAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Mkfifo(path string, mode uint32) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) @@ -1169,6 +1219,21 @@ func Mkfifo(path string, mode uint32) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Mkfifoat(dirfd int, path string, mode uint32) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_MKFIFOAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Mknod(path string, mode uint32, dev int) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) @@ -1184,6 +1249,21 @@ func Mknod(path string, mode uint32, dev int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Mknodat(dirfd int, path string, mode uint32, dev int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_MKNODAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(dev), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Nanosleep(time *Timespec, leftover *Timespec) (err error) { _, _, e1 := Syscall(SYS_NANOSLEEP, uintptr(unsafe.Pointer(time)), uintptr(unsafe.Pointer(leftover)), 0) if e1 != 0 { @@ -1210,6 +1290,22 @@ func Open(path string, mode int, perm uint32) (fd int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Openat(dirfd int, path string, mode int, perm uint32) (fd int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + r0, _, e1 := Syscall6(SYS_OPENAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(perm), 0, 0) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Pathconf(path string, name int) (val int, err error) { var _p0 *byte _p0, err = BytePtrFromString(path) @@ -1299,6 +1395,28 @@ func Readlink(path string, buf []byte) (n int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Readlinkat(dirfd int, path string, buf []byte) (n int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + var _p1 unsafe.Pointer + if len(buf) > 0 { + _p1 = unsafe.Pointer(&buf[0]) + } else { + _p1 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall6(SYS_READLINKAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(buf)), 0, 0) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Rename(from string, to string) (err error) { var _p0 *byte _p0, err = BytePtrFromString(from) @@ -1319,6 +1437,26 @@ func Rename(from string, to string) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Renameat(fromfd int, from string, tofd int, to string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(from) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(to) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_RENAMEAT, uintptr(fromfd), uintptr(unsafe.Pointer(_p0)), uintptr(tofd), uintptr(unsafe.Pointer(_p1)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Revoke(path string) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) @@ -1516,6 +1654,26 @@ func Symlink(path string, link string) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Symlinkat(oldpath string, newdirfd int, newpath string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(oldpath) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(newpath) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_SYMLINKAT, uintptr(unsafe.Pointer(_p0)), uintptr(newdirfd), uintptr(unsafe.Pointer(_p1))) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Sync() (err error) { _, _, e1 := Syscall(SYS_SYNC, 0, 0, 0) if e1 != 0 { @@ -1564,6 +1722,21 @@ func Unlink(path string) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Unlinkat(dirfd int, path string, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_UNLINKAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(flags)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Unmount(path string, flags int) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_386.go b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_386.go index 1942049b0..6a489fac0 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_386.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_386.go @@ -1,4 +1,4 @@ -// mksyscall.pl -l32 -openbsd -tags openbsd,386 syscall_bsd.go syscall_openbsd.go syscall_openbsd_386.go +// go run mksyscall.go -l32 -openbsd -tags openbsd,386 syscall_bsd.go syscall_openbsd.go syscall_openbsd_386.go // Code generated by the command above; see README.md. DO NOT EDIT. // +build openbsd,386 @@ -431,6 +431,17 @@ func ioctl(fd int, req uint, arg uintptr) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func ppoll(fds *PollFd, nfds int, timeout *Timespec, sigmask *Sigset_t) (n int, err error) { + r0, _, e1 := Syscall6(SYS_PPOLL, uintptr(unsafe.Pointer(fds)), uintptr(nfds), uintptr(unsafe.Pointer(timeout)), uintptr(unsafe.Pointer(sigmask)), 0, 0) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Access(path string, mode uint32) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) @@ -639,6 +650,21 @@ func Fchown(fd int, uid int, gid int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Fchownat(dirfd int, path string, uid int, gid int, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_FCHOWNAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid), uintptr(flags), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Flock(fd int, how int) (err error) { _, _, e1 := Syscall(SYS_FLOCK, uintptr(fd), uintptr(how), 0) if e1 != 0 { @@ -909,6 +935,26 @@ func Link(path string, link string) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Linkat(pathfd int, path string, linkfd int, link string, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(link) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_LINKAT, uintptr(pathfd), uintptr(unsafe.Pointer(_p0)), uintptr(linkfd), uintptr(unsafe.Pointer(_p1)), uintptr(flags), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Listen(s int, backlog int) (err error) { _, _, e1 := Syscall(SYS_LISTEN, uintptr(s), uintptr(backlog), 0) if e1 != 0 { @@ -949,6 +995,21 @@ func Mkdir(path string, mode uint32) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Mkdirat(dirfd int, path string, mode uint32) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_MKDIRAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Mkfifo(path string, mode uint32) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) @@ -964,6 +1025,21 @@ func Mkfifo(path string, mode uint32) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Mkfifoat(dirfd int, path string, mode uint32) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_MKFIFOAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Mknod(path string, mode uint32, dev int) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) @@ -979,6 +1055,21 @@ func Mknod(path string, mode uint32, dev int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Mknodat(dirfd int, path string, mode uint32, dev int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_MKNODAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(dev), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Nanosleep(time *Timespec, leftover *Timespec) (err error) { _, _, e1 := Syscall(SYS_NANOSLEEP, uintptr(unsafe.Pointer(time)), uintptr(unsafe.Pointer(leftover)), 0) if e1 != 0 { @@ -1005,6 +1096,22 @@ func Open(path string, mode int, perm uint32) (fd int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Openat(dirfd int, path string, mode int, perm uint32) (fd int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + r0, _, e1 := Syscall6(SYS_OPENAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(perm), 0, 0) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Pathconf(path string, name int) (val int, err error) { var _p0 *byte _p0, err = BytePtrFromString(path) @@ -1094,6 +1201,28 @@ func Readlink(path string, buf []byte) (n int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Readlinkat(dirfd int, path string, buf []byte) (n int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + var _p1 unsafe.Pointer + if len(buf) > 0 { + _p1 = unsafe.Pointer(&buf[0]) + } else { + _p1 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall6(SYS_READLINKAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(buf)), 0, 0) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Rename(from string, to string) (err error) { var _p0 *byte _p0, err = BytePtrFromString(from) @@ -1114,6 +1243,26 @@ func Rename(from string, to string) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Renameat(fromfd int, from string, tofd int, to string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(from) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(to) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_RENAMEAT, uintptr(fromfd), uintptr(unsafe.Pointer(_p0)), uintptr(tofd), uintptr(unsafe.Pointer(_p1)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Revoke(path string) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) @@ -1371,6 +1520,26 @@ func Symlink(path string, link string) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Symlinkat(oldpath string, newdirfd int, newpath string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(oldpath) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(newpath) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_SYMLINKAT, uintptr(unsafe.Pointer(_p0)), uintptr(newdirfd), uintptr(unsafe.Pointer(_p1))) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Sync() (err error) { _, _, e1 := Syscall(SYS_SYNC, 0, 0, 0) if e1 != 0 { @@ -1419,6 +1588,21 @@ func Unlink(path string) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Unlinkat(dirfd int, path string, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_UNLINKAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(flags)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Unmount(path string, flags int) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_amd64.go b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_amd64.go index d351c72cb..30cba4347 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_amd64.go @@ -1,4 +1,4 @@ -// mksyscall.pl -openbsd -tags openbsd,amd64 syscall_bsd.go syscall_openbsd.go syscall_openbsd_amd64.go +// go run mksyscall.go -openbsd -tags openbsd,amd64 syscall_bsd.go syscall_openbsd.go syscall_openbsd_amd64.go // Code generated by the command above; see README.md. DO NOT EDIT. // +build openbsd,amd64 @@ -431,6 +431,17 @@ func ioctl(fd int, req uint, arg uintptr) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func ppoll(fds *PollFd, nfds int, timeout *Timespec, sigmask *Sigset_t) (n int, err error) { + r0, _, e1 := Syscall6(SYS_PPOLL, uintptr(unsafe.Pointer(fds)), uintptr(nfds), uintptr(unsafe.Pointer(timeout)), uintptr(unsafe.Pointer(sigmask)), 0, 0) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Access(path string, mode uint32) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) @@ -639,6 +650,21 @@ func Fchown(fd int, uid int, gid int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Fchownat(dirfd int, path string, uid int, gid int, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_FCHOWNAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid), uintptr(flags), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Flock(fd int, how int) (err error) { _, _, e1 := Syscall(SYS_FLOCK, uintptr(fd), uintptr(how), 0) if e1 != 0 { @@ -909,6 +935,26 @@ func Link(path string, link string) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Linkat(pathfd int, path string, linkfd int, link string, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(link) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_LINKAT, uintptr(pathfd), uintptr(unsafe.Pointer(_p0)), uintptr(linkfd), uintptr(unsafe.Pointer(_p1)), uintptr(flags), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Listen(s int, backlog int) (err error) { _, _, e1 := Syscall(SYS_LISTEN, uintptr(s), uintptr(backlog), 0) if e1 != 0 { @@ -949,6 +995,21 @@ func Mkdir(path string, mode uint32) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Mkdirat(dirfd int, path string, mode uint32) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_MKDIRAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Mkfifo(path string, mode uint32) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) @@ -964,6 +1025,21 @@ func Mkfifo(path string, mode uint32) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Mkfifoat(dirfd int, path string, mode uint32) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_MKFIFOAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Mknod(path string, mode uint32, dev int) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) @@ -979,6 +1055,21 @@ func Mknod(path string, mode uint32, dev int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Mknodat(dirfd int, path string, mode uint32, dev int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_MKNODAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(dev), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Nanosleep(time *Timespec, leftover *Timespec) (err error) { _, _, e1 := Syscall(SYS_NANOSLEEP, uintptr(unsafe.Pointer(time)), uintptr(unsafe.Pointer(leftover)), 0) if e1 != 0 { @@ -1005,6 +1096,22 @@ func Open(path string, mode int, perm uint32) (fd int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Openat(dirfd int, path string, mode int, perm uint32) (fd int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + r0, _, e1 := Syscall6(SYS_OPENAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(perm), 0, 0) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Pathconf(path string, name int) (val int, err error) { var _p0 *byte _p0, err = BytePtrFromString(path) @@ -1094,6 +1201,28 @@ func Readlink(path string, buf []byte) (n int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Readlinkat(dirfd int, path string, buf []byte) (n int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + var _p1 unsafe.Pointer + if len(buf) > 0 { + _p1 = unsafe.Pointer(&buf[0]) + } else { + _p1 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall6(SYS_READLINKAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(buf)), 0, 0) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Rename(from string, to string) (err error) { var _p0 *byte _p0, err = BytePtrFromString(from) @@ -1114,6 +1243,26 @@ func Rename(from string, to string) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Renameat(fromfd int, from string, tofd int, to string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(from) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(to) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_RENAMEAT, uintptr(fromfd), uintptr(unsafe.Pointer(_p0)), uintptr(tofd), uintptr(unsafe.Pointer(_p1)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Revoke(path string) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) @@ -1371,6 +1520,26 @@ func Symlink(path string, link string) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Symlinkat(oldpath string, newdirfd int, newpath string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(oldpath) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(newpath) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_SYMLINKAT, uintptr(unsafe.Pointer(_p0)), uintptr(newdirfd), uintptr(unsafe.Pointer(_p1))) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Sync() (err error) { _, _, e1 := Syscall(SYS_SYNC, 0, 0, 0) if e1 != 0 { @@ -1419,6 +1588,21 @@ func Unlink(path string) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Unlinkat(dirfd int, path string, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_UNLINKAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(flags)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Unmount(path string, flags int) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm.go b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm.go index 617d47f0f..fa1beda33 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm.go @@ -1,4 +1,4 @@ -// mksyscall.pl -l32 -openbsd -arm -tags openbsd,arm syscall_bsd.go syscall_openbsd.go syscall_openbsd_arm.go +// go run mksyscall.go -l32 -openbsd -arm -tags openbsd,arm syscall_bsd.go syscall_openbsd.go syscall_openbsd_arm.go // Code generated by the command above; see README.md. DO NOT EDIT. // +build openbsd,arm @@ -431,6 +431,17 @@ func ioctl(fd int, req uint, arg uintptr) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func ppoll(fds *PollFd, nfds int, timeout *Timespec, sigmask *Sigset_t) (n int, err error) { + r0, _, e1 := Syscall6(SYS_PPOLL, uintptr(unsafe.Pointer(fds)), uintptr(nfds), uintptr(unsafe.Pointer(timeout)), uintptr(unsafe.Pointer(sigmask)), 0, 0) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Access(path string, mode uint32) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) @@ -639,6 +650,21 @@ func Fchown(fd int, uid int, gid int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Fchownat(dirfd int, path string, uid int, gid int, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_FCHOWNAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid), uintptr(flags), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Flock(fd int, how int) (err error) { _, _, e1 := Syscall(SYS_FLOCK, uintptr(fd), uintptr(how), 0) if e1 != 0 { @@ -909,6 +935,26 @@ func Link(path string, link string) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Linkat(pathfd int, path string, linkfd int, link string, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(link) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_LINKAT, uintptr(pathfd), uintptr(unsafe.Pointer(_p0)), uintptr(linkfd), uintptr(unsafe.Pointer(_p1)), uintptr(flags), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Listen(s int, backlog int) (err error) { _, _, e1 := Syscall(SYS_LISTEN, uintptr(s), uintptr(backlog), 0) if e1 != 0 { @@ -949,6 +995,21 @@ func Mkdir(path string, mode uint32) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Mkdirat(dirfd int, path string, mode uint32) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_MKDIRAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Mkfifo(path string, mode uint32) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) @@ -964,6 +1025,21 @@ func Mkfifo(path string, mode uint32) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Mkfifoat(dirfd int, path string, mode uint32) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_MKFIFOAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Mknod(path string, mode uint32, dev int) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) @@ -979,6 +1055,21 @@ func Mknod(path string, mode uint32, dev int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Mknodat(dirfd int, path string, mode uint32, dev int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_MKNODAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(dev), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Nanosleep(time *Timespec, leftover *Timespec) (err error) { _, _, e1 := Syscall(SYS_NANOSLEEP, uintptr(unsafe.Pointer(time)), uintptr(unsafe.Pointer(leftover)), 0) if e1 != 0 { @@ -1005,6 +1096,22 @@ func Open(path string, mode int, perm uint32) (fd int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Openat(dirfd int, path string, mode int, perm uint32) (fd int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + r0, _, e1 := Syscall6(SYS_OPENAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(perm), 0, 0) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Pathconf(path string, name int) (val int, err error) { var _p0 *byte _p0, err = BytePtrFromString(path) @@ -1094,6 +1201,28 @@ func Readlink(path string, buf []byte) (n int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Readlinkat(dirfd int, path string, buf []byte) (n int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + var _p1 unsafe.Pointer + if len(buf) > 0 { + _p1 = unsafe.Pointer(&buf[0]) + } else { + _p1 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall6(SYS_READLINKAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(buf)), 0, 0) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Rename(from string, to string) (err error) { var _p0 *byte _p0, err = BytePtrFromString(from) @@ -1114,6 +1243,26 @@ func Rename(from string, to string) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Renameat(fromfd int, from string, tofd int, to string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(from) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(to) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_RENAMEAT, uintptr(fromfd), uintptr(unsafe.Pointer(_p0)), uintptr(tofd), uintptr(unsafe.Pointer(_p1)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Revoke(path string) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) @@ -1371,6 +1520,26 @@ func Symlink(path string, link string) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Symlinkat(oldpath string, newdirfd int, newpath string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(oldpath) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(newpath) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_SYMLINKAT, uintptr(unsafe.Pointer(_p0)), uintptr(newdirfd), uintptr(unsafe.Pointer(_p1))) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Sync() (err error) { _, _, e1 := Syscall(SYS_SYNC, 0, 0, 0) if e1 != 0 { @@ -1419,6 +1588,21 @@ func Unlink(path string) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Unlinkat(dirfd int, path string, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_UNLINKAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(flags)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Unmount(path string, flags int) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_solaris_amd64.go b/vendor/golang.org/x/sys/unix/zsyscall_solaris_amd64.go index e2e5fc5e0..97b22a499 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_solaris_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_solaris_amd64.go @@ -399,6 +399,8 @@ var ( procrecvfrom syscallFunc ) +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func pipe(p *[2]_C_int) (n int, err error) { r0, _, e1 := rawSysvicall6(uintptr(unsafe.Pointer(&procpipe)), 1, uintptr(unsafe.Pointer(p)), 0, 0, 0, 0, 0) n = int(r0) @@ -408,6 +410,8 @@ func pipe(p *[2]_C_int) (n int, err error) { return } +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func getsockname(fd int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error) { _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procgetsockname)), 3, uintptr(fd), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen)), 0, 0, 0) if e1 != 0 { @@ -416,6 +420,8 @@ func getsockname(fd int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error) { return } +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Getcwd(buf []byte) (n int, err error) { var _p0 *byte if len(buf) > 0 { @@ -429,6 +435,8 @@ func Getcwd(buf []byte) (n int, err error) { return } +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func getgroups(ngid int, gid *_Gid_t) (n int, err error) { r0, _, e1 := rawSysvicall6(uintptr(unsafe.Pointer(&procgetgroups)), 2, uintptr(ngid), uintptr(unsafe.Pointer(gid)), 0, 0, 0, 0) n = int(r0) @@ -438,6 +446,8 @@ func getgroups(ngid int, gid *_Gid_t) (n int, err error) { return } +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func setgroups(ngid int, gid *_Gid_t) (err error) { _, _, e1 := rawSysvicall6(uintptr(unsafe.Pointer(&procsetgroups)), 2, uintptr(ngid), uintptr(unsafe.Pointer(gid)), 0, 0, 0, 0) if e1 != 0 { @@ -446,6 +456,8 @@ func setgroups(ngid int, gid *_Gid_t) (err error) { return } +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func wait4(pid int32, statusp *_C_int, options int, rusage *Rusage) (wpid int32, err error) { r0, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procwait4)), 4, uintptr(pid), uintptr(unsafe.Pointer(statusp)), uintptr(options), uintptr(unsafe.Pointer(rusage)), 0, 0) wpid = int32(r0) @@ -455,6 +467,8 @@ func wait4(pid int32, statusp *_C_int, options int, rusage *Rusage) (wpid int32, return } +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func gethostname(buf []byte) (n int, err error) { var _p0 *byte if len(buf) > 0 { @@ -468,6 +482,8 @@ func gethostname(buf []byte) (n int, err error) { return } +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func utimes(path string, times *[2]Timeval) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) @@ -481,6 +497,8 @@ func utimes(path string, times *[2]Timeval) (err error) { return } +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func utimensat(fd int, path string, times *[2]Timespec, flag int) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) @@ -494,6 +512,8 @@ func utimensat(fd int, path string, times *[2]Timespec, flag int) (err error) { return } +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func fcntl(fd int, cmd int, arg int) (val int, err error) { r0, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procfcntl)), 3, uintptr(fd), uintptr(cmd), uintptr(arg), 0, 0, 0) val = int(r0) @@ -503,6 +523,8 @@ func fcntl(fd int, cmd int, arg int) (val int, err error) { return } +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func futimesat(fildes int, path *byte, times *[2]Timeval) (err error) { _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procfutimesat)), 3, uintptr(fildes), uintptr(unsafe.Pointer(path)), uintptr(unsafe.Pointer(times)), 0, 0, 0) if e1 != 0 { @@ -511,6 +533,8 @@ func futimesat(fildes int, path *byte, times *[2]Timeval) (err error) { return } +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func accept(s int, rsa *RawSockaddrAny, addrlen *_Socklen) (fd int, err error) { r0, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procaccept)), 3, uintptr(s), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen)), 0, 0, 0) fd = int(r0) @@ -520,6 +544,8 @@ func accept(s int, rsa *RawSockaddrAny, addrlen *_Socklen) (fd int, err error) { return } +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func recvmsg(s int, msg *Msghdr, flags int) (n int, err error) { r0, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&proc__xnet_recvmsg)), 3, uintptr(s), uintptr(unsafe.Pointer(msg)), uintptr(flags), 0, 0, 0) n = int(r0) @@ -529,6 +555,8 @@ func recvmsg(s int, msg *Msghdr, flags int) (n int, err error) { return } +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func sendmsg(s int, msg *Msghdr, flags int) (n int, err error) { r0, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&proc__xnet_sendmsg)), 3, uintptr(s), uintptr(unsafe.Pointer(msg)), uintptr(flags), 0, 0, 0) n = int(r0) @@ -538,6 +566,8 @@ func sendmsg(s int, msg *Msghdr, flags int) (n int, err error) { return } +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func acct(path *byte) (err error) { _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procacct)), 1, uintptr(unsafe.Pointer(path)), 0, 0, 0, 0, 0) if e1 != 0 { @@ -546,24 +576,32 @@ func acct(path *byte) (err error) { return } +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func __makedev(version int, major uint, minor uint) (val uint64) { r0, _, _ := sysvicall6(uintptr(unsafe.Pointer(&proc__makedev)), 3, uintptr(version), uintptr(major), uintptr(minor), 0, 0, 0) val = uint64(r0) return } +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func __major(version int, dev uint64) (val uint) { r0, _, _ := sysvicall6(uintptr(unsafe.Pointer(&proc__major)), 2, uintptr(version), uintptr(dev), 0, 0, 0, 0) val = uint(r0) return } +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func __minor(version int, dev uint64) (val uint) { r0, _, _ := sysvicall6(uintptr(unsafe.Pointer(&proc__minor)), 2, uintptr(version), uintptr(dev), 0, 0, 0, 0) val = uint(r0) return } +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func ioctl(fd int, req uint, arg uintptr) (err error) { _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procioctl)), 3, uintptr(fd), uintptr(req), uintptr(arg), 0, 0, 0) if e1 != 0 { @@ -572,6 +610,8 @@ func ioctl(fd int, req uint, arg uintptr) (err error) { return } +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func poll(fds *PollFd, nfds int, timeout int) (n int, err error) { r0, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procpoll)), 3, uintptr(unsafe.Pointer(fds)), uintptr(nfds), uintptr(timeout), 0, 0, 0) n = int(r0) @@ -581,6 +621,8 @@ func poll(fds *PollFd, nfds int, timeout int) (n int, err error) { return } +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Access(path string, mode uint32) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) @@ -594,6 +636,8 @@ func Access(path string, mode uint32) (err error) { return } +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Adjtime(delta *Timeval, olddelta *Timeval) (err error) { _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procAdjtime)), 2, uintptr(unsafe.Pointer(delta)), uintptr(unsafe.Pointer(olddelta)), 0, 0, 0, 0) if e1 != 0 { @@ -602,6 +646,8 @@ func Adjtime(delta *Timeval, olddelta *Timeval) (err error) { return } +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Chdir(path string) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) @@ -615,6 +661,8 @@ func Chdir(path string) (err error) { return } +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Chmod(path string, mode uint32) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) @@ -628,6 +676,8 @@ func Chmod(path string, mode uint32) (err error) { return } +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Chown(path string, uid int, gid int) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) @@ -641,6 +691,8 @@ func Chown(path string, uid int, gid int) (err error) { return } +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Chroot(path string) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) @@ -654,6 +706,8 @@ func Chroot(path string) (err error) { return } +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Close(fd int) (err error) { _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procClose)), 1, uintptr(fd), 0, 0, 0, 0, 0) if e1 != 0 { @@ -662,6 +716,8 @@ func Close(fd int) (err error) { return } +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Creat(path string, mode uint32) (fd int, err error) { var _p0 *byte _p0, err = BytePtrFromString(path) @@ -676,6 +732,8 @@ func Creat(path string, mode uint32) (fd int, err error) { return } +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Dup(fd int) (nfd int, err error) { r0, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procDup)), 1, uintptr(fd), 0, 0, 0, 0, 0) nfd = int(r0) @@ -685,6 +743,8 @@ func Dup(fd int) (nfd int, err error) { return } +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Dup2(oldfd int, newfd int) (err error) { _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procDup2)), 2, uintptr(oldfd), uintptr(newfd), 0, 0, 0, 0) if e1 != 0 { @@ -693,11 +753,15 @@ func Dup2(oldfd int, newfd int) (err error) { return } +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Exit(code int) { sysvicall6(uintptr(unsafe.Pointer(&procExit)), 1, uintptr(code), 0, 0, 0, 0, 0) return } +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Faccessat(dirfd int, path string, mode uint32, flags int) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) @@ -711,6 +775,8 @@ func Faccessat(dirfd int, path string, mode uint32, flags int) (err error) { return } +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Fchdir(fd int) (err error) { _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procFchdir)), 1, uintptr(fd), 0, 0, 0, 0, 0) if e1 != 0 { @@ -719,6 +785,8 @@ func Fchdir(fd int) (err error) { return } +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Fchmod(fd int, mode uint32) (err error) { _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procFchmod)), 2, uintptr(fd), uintptr(mode), 0, 0, 0, 0) if e1 != 0 { @@ -727,6 +795,8 @@ func Fchmod(fd int, mode uint32) (err error) { return } +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Fchmodat(dirfd int, path string, mode uint32, flags int) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) @@ -740,6 +810,8 @@ func Fchmodat(dirfd int, path string, mode uint32, flags int) (err error) { return } +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Fchown(fd int, uid int, gid int) (err error) { _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procFchown)), 3, uintptr(fd), uintptr(uid), uintptr(gid), 0, 0, 0) if e1 != 0 { @@ -748,6 +820,8 @@ func Fchown(fd int, uid int, gid int) (err error) { return } +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Fchownat(dirfd int, path string, uid int, gid int, flags int) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) @@ -761,6 +835,8 @@ func Fchownat(dirfd int, path string, uid int, gid int, flags int) (err error) { return } +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Fdatasync(fd int) (err error) { _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procFdatasync)), 1, uintptr(fd), 0, 0, 0, 0, 0) if e1 != 0 { @@ -769,6 +845,8 @@ func Fdatasync(fd int) (err error) { return } +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Flock(fd int, how int) (err error) { _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procFlock)), 2, uintptr(fd), uintptr(how), 0, 0, 0, 0) if e1 != 0 { @@ -777,6 +855,8 @@ func Flock(fd int, how int) (err error) { return } +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Fpathconf(fd int, name int) (val int, err error) { r0, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procFpathconf)), 2, uintptr(fd), uintptr(name), 0, 0, 0, 0) val = int(r0) @@ -786,6 +866,8 @@ func Fpathconf(fd int, name int) (val int, err error) { return } +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Fstat(fd int, stat *Stat_t) (err error) { _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procFstat)), 2, uintptr(fd), uintptr(unsafe.Pointer(stat)), 0, 0, 0, 0) if e1 != 0 { @@ -794,6 +876,8 @@ func Fstat(fd int, stat *Stat_t) (err error) { return } +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Fstatat(fd int, path string, stat *Stat_t, flags int) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) @@ -807,6 +891,8 @@ func Fstatat(fd int, path string, stat *Stat_t, flags int) (err error) { return } +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Fstatvfs(fd int, vfsstat *Statvfs_t) (err error) { _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procFstatvfs)), 2, uintptr(fd), uintptr(unsafe.Pointer(vfsstat)), 0, 0, 0, 0) if e1 != 0 { @@ -815,6 +901,8 @@ func Fstatvfs(fd int, vfsstat *Statvfs_t) (err error) { return } +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Getdents(fd int, buf []byte, basep *uintptr) (n int, err error) { var _p0 *byte if len(buf) > 0 { @@ -828,18 +916,24 @@ func Getdents(fd int, buf []byte, basep *uintptr) (n int, err error) { return } +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Getgid() (gid int) { r0, _, _ := rawSysvicall6(uintptr(unsafe.Pointer(&procGetgid)), 0, 0, 0, 0, 0, 0, 0) gid = int(r0) return } +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Getpid() (pid int) { r0, _, _ := rawSysvicall6(uintptr(unsafe.Pointer(&procGetpid)), 0, 0, 0, 0, 0, 0, 0) pid = int(r0) return } +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Getpgid(pid int) (pgid int, err error) { r0, _, e1 := rawSysvicall6(uintptr(unsafe.Pointer(&procGetpgid)), 1, uintptr(pid), 0, 0, 0, 0, 0) pgid = int(r0) @@ -849,6 +943,8 @@ func Getpgid(pid int) (pgid int, err error) { return } +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Getpgrp() (pgid int, err error) { r0, _, e1 := rawSysvicall6(uintptr(unsafe.Pointer(&procGetpgrp)), 0, 0, 0, 0, 0, 0, 0) pgid = int(r0) @@ -858,24 +954,32 @@ func Getpgrp() (pgid int, err error) { return } +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Geteuid() (euid int) { r0, _, _ := sysvicall6(uintptr(unsafe.Pointer(&procGeteuid)), 0, 0, 0, 0, 0, 0, 0) euid = int(r0) return } +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Getegid() (egid int) { r0, _, _ := sysvicall6(uintptr(unsafe.Pointer(&procGetegid)), 0, 0, 0, 0, 0, 0, 0) egid = int(r0) return } +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Getppid() (ppid int) { r0, _, _ := sysvicall6(uintptr(unsafe.Pointer(&procGetppid)), 0, 0, 0, 0, 0, 0, 0) ppid = int(r0) return } +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Getpriority(which int, who int) (n int, err error) { r0, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procGetpriority)), 2, uintptr(which), uintptr(who), 0, 0, 0, 0) n = int(r0) @@ -885,6 +989,8 @@ func Getpriority(which int, who int) (n int, err error) { return } +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Getrlimit(which int, lim *Rlimit) (err error) { _, _, e1 := rawSysvicall6(uintptr(unsafe.Pointer(&procGetrlimit)), 2, uintptr(which), uintptr(unsafe.Pointer(lim)), 0, 0, 0, 0) if e1 != 0 { @@ -893,6 +999,8 @@ func Getrlimit(which int, lim *Rlimit) (err error) { return } +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Getrusage(who int, rusage *Rusage) (err error) { _, _, e1 := rawSysvicall6(uintptr(unsafe.Pointer(&procGetrusage)), 2, uintptr(who), uintptr(unsafe.Pointer(rusage)), 0, 0, 0, 0) if e1 != 0 { @@ -901,6 +1009,8 @@ func Getrusage(who int, rusage *Rusage) (err error) { return } +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Gettimeofday(tv *Timeval) (err error) { _, _, e1 := rawSysvicall6(uintptr(unsafe.Pointer(&procGettimeofday)), 1, uintptr(unsafe.Pointer(tv)), 0, 0, 0, 0, 0) if e1 != 0 { @@ -909,12 +1019,16 @@ func Gettimeofday(tv *Timeval) (err error) { return } +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Getuid() (uid int) { r0, _, _ := rawSysvicall6(uintptr(unsafe.Pointer(&procGetuid)), 0, 0, 0, 0, 0, 0, 0) uid = int(r0) return } +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Kill(pid int, signum syscall.Signal) (err error) { _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procKill)), 2, uintptr(pid), uintptr(signum), 0, 0, 0, 0) if e1 != 0 { @@ -923,6 +1037,8 @@ func Kill(pid int, signum syscall.Signal) (err error) { return } +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Lchown(path string, uid int, gid int) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) @@ -936,6 +1052,8 @@ func Lchown(path string, uid int, gid int) (err error) { return } +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Link(path string, link string) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) @@ -954,6 +1072,8 @@ func Link(path string, link string) (err error) { return } +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Listen(s int, backlog int) (err error) { _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&proc__xnet_llisten)), 2, uintptr(s), uintptr(backlog), 0, 0, 0, 0) if e1 != 0 { @@ -962,6 +1082,8 @@ func Listen(s int, backlog int) (err error) { return } +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Lstat(path string, stat *Stat_t) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) @@ -975,6 +1097,8 @@ func Lstat(path string, stat *Stat_t) (err error) { return } +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Madvise(b []byte, advice int) (err error) { var _p0 *byte if len(b) > 0 { @@ -987,6 +1111,8 @@ func Madvise(b []byte, advice int) (err error) { return } +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Mkdir(path string, mode uint32) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) @@ -1000,6 +1126,8 @@ func Mkdir(path string, mode uint32) (err error) { return } +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Mkdirat(dirfd int, path string, mode uint32) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) @@ -1013,6 +1141,8 @@ func Mkdirat(dirfd int, path string, mode uint32) (err error) { return } +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Mkfifo(path string, mode uint32) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) @@ -1026,6 +1156,8 @@ func Mkfifo(path string, mode uint32) (err error) { return } +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Mkfifoat(dirfd int, path string, mode uint32) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) @@ -1039,6 +1171,8 @@ func Mkfifoat(dirfd int, path string, mode uint32) (err error) { return } +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Mknod(path string, mode uint32, dev int) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) @@ -1052,6 +1186,8 @@ func Mknod(path string, mode uint32, dev int) (err error) { return } +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Mknodat(dirfd int, path string, mode uint32, dev int) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) @@ -1065,6 +1201,8 @@ func Mknodat(dirfd int, path string, mode uint32, dev int) (err error) { return } +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Mlock(b []byte) (err error) { var _p0 *byte if len(b) > 0 { @@ -1077,6 +1215,8 @@ func Mlock(b []byte) (err error) { return } +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Mlockall(flags int) (err error) { _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procMlockall)), 1, uintptr(flags), 0, 0, 0, 0, 0) if e1 != 0 { @@ -1085,6 +1225,8 @@ func Mlockall(flags int) (err error) { return } +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Mprotect(b []byte, prot int) (err error) { var _p0 *byte if len(b) > 0 { @@ -1097,6 +1239,8 @@ func Mprotect(b []byte, prot int) (err error) { return } +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Msync(b []byte, flags int) (err error) { var _p0 *byte if len(b) > 0 { @@ -1109,6 +1253,8 @@ func Msync(b []byte, flags int) (err error) { return } +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Munlock(b []byte) (err error) { var _p0 *byte if len(b) > 0 { @@ -1121,6 +1267,8 @@ func Munlock(b []byte) (err error) { return } +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Munlockall() (err error) { _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procMunlockall)), 0, 0, 0, 0, 0, 0, 0) if e1 != 0 { @@ -1129,6 +1277,8 @@ func Munlockall() (err error) { return } +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Nanosleep(time *Timespec, leftover *Timespec) (err error) { _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procNanosleep)), 2, uintptr(unsafe.Pointer(time)), uintptr(unsafe.Pointer(leftover)), 0, 0, 0, 0) if e1 != 0 { @@ -1137,6 +1287,8 @@ func Nanosleep(time *Timespec, leftover *Timespec) (err error) { return } +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Open(path string, mode int, perm uint32) (fd int, err error) { var _p0 *byte _p0, err = BytePtrFromString(path) @@ -1151,6 +1303,8 @@ func Open(path string, mode int, perm uint32) (fd int, err error) { return } +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Openat(dirfd int, path string, flags int, mode uint32) (fd int, err error) { var _p0 *byte _p0, err = BytePtrFromString(path) @@ -1165,6 +1319,8 @@ func Openat(dirfd int, path string, flags int, mode uint32) (fd int, err error) return } +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Pathconf(path string, name int) (val int, err error) { var _p0 *byte _p0, err = BytePtrFromString(path) @@ -1179,6 +1335,8 @@ func Pathconf(path string, name int) (val int, err error) { return } +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Pause() (err error) { _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procPause)), 0, 0, 0, 0, 0, 0, 0) if e1 != 0 { @@ -1187,6 +1345,8 @@ func Pause() (err error) { return } +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Pread(fd int, p []byte, offset int64) (n int, err error) { var _p0 *byte if len(p) > 0 { @@ -1200,6 +1360,8 @@ func Pread(fd int, p []byte, offset int64) (n int, err error) { return } +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Pwrite(fd int, p []byte, offset int64) (n int, err error) { var _p0 *byte if len(p) > 0 { @@ -1213,6 +1375,8 @@ func Pwrite(fd int, p []byte, offset int64) (n int, err error) { return } +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func read(fd int, p []byte) (n int, err error) { var _p0 *byte if len(p) > 0 { @@ -1226,6 +1390,8 @@ func read(fd int, p []byte) (n int, err error) { return } +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Readlink(path string, buf []byte) (n int, err error) { var _p0 *byte _p0, err = BytePtrFromString(path) @@ -1244,6 +1410,8 @@ func Readlink(path string, buf []byte) (n int, err error) { return } +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Rename(from string, to string) (err error) { var _p0 *byte _p0, err = BytePtrFromString(from) @@ -1262,6 +1430,8 @@ func Rename(from string, to string) (err error) { return } +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Renameat(olddirfd int, oldpath string, newdirfd int, newpath string) (err error) { var _p0 *byte _p0, err = BytePtrFromString(oldpath) @@ -1280,6 +1450,8 @@ func Renameat(olddirfd int, oldpath string, newdirfd int, newpath string) (err e return } +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Rmdir(path string) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) @@ -1293,6 +1465,8 @@ func Rmdir(path string) (err error) { return } +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Seek(fd int, offset int64, whence int) (newoffset int64, err error) { r0, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&proclseek)), 3, uintptr(fd), uintptr(offset), uintptr(whence), 0, 0, 0) newoffset = int64(r0) @@ -1302,6 +1476,8 @@ func Seek(fd int, offset int64, whence int) (newoffset int64, err error) { return } +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Select(n int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (err error) { _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procSelect)), 5, uintptr(n), uintptr(unsafe.Pointer(r)), uintptr(unsafe.Pointer(w)), uintptr(unsafe.Pointer(e)), uintptr(unsafe.Pointer(timeout)), 0) if e1 != 0 { @@ -1310,6 +1486,8 @@ func Select(n int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (err error) { return } +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Setegid(egid int) (err error) { _, _, e1 := rawSysvicall6(uintptr(unsafe.Pointer(&procSetegid)), 1, uintptr(egid), 0, 0, 0, 0, 0) if e1 != 0 { @@ -1318,6 +1496,8 @@ func Setegid(egid int) (err error) { return } +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Seteuid(euid int) (err error) { _, _, e1 := rawSysvicall6(uintptr(unsafe.Pointer(&procSeteuid)), 1, uintptr(euid), 0, 0, 0, 0, 0) if e1 != 0 { @@ -1326,6 +1506,8 @@ func Seteuid(euid int) (err error) { return } +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Setgid(gid int) (err error) { _, _, e1 := rawSysvicall6(uintptr(unsafe.Pointer(&procSetgid)), 1, uintptr(gid), 0, 0, 0, 0, 0) if e1 != 0 { @@ -1334,6 +1516,8 @@ func Setgid(gid int) (err error) { return } +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Sethostname(p []byte) (err error) { var _p0 *byte if len(p) > 0 { @@ -1346,6 +1530,8 @@ func Sethostname(p []byte) (err error) { return } +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Setpgid(pid int, pgid int) (err error) { _, _, e1 := rawSysvicall6(uintptr(unsafe.Pointer(&procSetpgid)), 2, uintptr(pid), uintptr(pgid), 0, 0, 0, 0) if e1 != 0 { @@ -1354,6 +1540,8 @@ func Setpgid(pid int, pgid int) (err error) { return } +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Setpriority(which int, who int, prio int) (err error) { _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procSetpriority)), 3, uintptr(which), uintptr(who), uintptr(prio), 0, 0, 0) if e1 != 0 { @@ -1362,6 +1550,8 @@ func Setpriority(which int, who int, prio int) (err error) { return } +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Setregid(rgid int, egid int) (err error) { _, _, e1 := rawSysvicall6(uintptr(unsafe.Pointer(&procSetregid)), 2, uintptr(rgid), uintptr(egid), 0, 0, 0, 0) if e1 != 0 { @@ -1370,6 +1560,8 @@ func Setregid(rgid int, egid int) (err error) { return } +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Setreuid(ruid int, euid int) (err error) { _, _, e1 := rawSysvicall6(uintptr(unsafe.Pointer(&procSetreuid)), 2, uintptr(ruid), uintptr(euid), 0, 0, 0, 0) if e1 != 0 { @@ -1378,6 +1570,8 @@ func Setreuid(ruid int, euid int) (err error) { return } +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Setrlimit(which int, lim *Rlimit) (err error) { _, _, e1 := rawSysvicall6(uintptr(unsafe.Pointer(&procSetrlimit)), 2, uintptr(which), uintptr(unsafe.Pointer(lim)), 0, 0, 0, 0) if e1 != 0 { @@ -1386,6 +1580,8 @@ func Setrlimit(which int, lim *Rlimit) (err error) { return } +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Setsid() (pid int, err error) { r0, _, e1 := rawSysvicall6(uintptr(unsafe.Pointer(&procSetsid)), 0, 0, 0, 0, 0, 0, 0) pid = int(r0) @@ -1395,6 +1591,8 @@ func Setsid() (pid int, err error) { return } +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Setuid(uid int) (err error) { _, _, e1 := rawSysvicall6(uintptr(unsafe.Pointer(&procSetuid)), 1, uintptr(uid), 0, 0, 0, 0, 0) if e1 != 0 { @@ -1403,6 +1601,8 @@ func Setuid(uid int) (err error) { return } +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Shutdown(s int, how int) (err error) { _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procshutdown)), 2, uintptr(s), uintptr(how), 0, 0, 0, 0) if e1 != 0 { @@ -1411,6 +1611,8 @@ func Shutdown(s int, how int) (err error) { return } +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Stat(path string, stat *Stat_t) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) @@ -1424,6 +1626,8 @@ func Stat(path string, stat *Stat_t) (err error) { return } +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Statvfs(path string, vfsstat *Statvfs_t) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) @@ -1437,6 +1641,8 @@ func Statvfs(path string, vfsstat *Statvfs_t) (err error) { return } +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Symlink(path string, link string) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) @@ -1455,6 +1661,8 @@ func Symlink(path string, link string) (err error) { return } +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Sync() (err error) { _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procSync)), 0, 0, 0, 0, 0, 0, 0) if e1 != 0 { @@ -1463,6 +1671,8 @@ func Sync() (err error) { return } +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Times(tms *Tms) (ticks uintptr, err error) { r0, _, e1 := rawSysvicall6(uintptr(unsafe.Pointer(&procTimes)), 1, uintptr(unsafe.Pointer(tms)), 0, 0, 0, 0, 0) ticks = uintptr(r0) @@ -1472,6 +1682,8 @@ func Times(tms *Tms) (ticks uintptr, err error) { return } +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Truncate(path string, length int64) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) @@ -1485,6 +1697,8 @@ func Truncate(path string, length int64) (err error) { return } +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Fsync(fd int) (err error) { _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procFsync)), 1, uintptr(fd), 0, 0, 0, 0, 0) if e1 != 0 { @@ -1493,6 +1707,8 @@ func Fsync(fd int) (err error) { return } +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Ftruncate(fd int, length int64) (err error) { _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procFtruncate)), 2, uintptr(fd), uintptr(length), 0, 0, 0, 0) if e1 != 0 { @@ -1501,12 +1717,16 @@ func Ftruncate(fd int, length int64) (err error) { return } +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Umask(mask int) (oldmask int) { r0, _, _ := sysvicall6(uintptr(unsafe.Pointer(&procUmask)), 1, uintptr(mask), 0, 0, 0, 0, 0) oldmask = int(r0) return } +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Uname(buf *Utsname) (err error) { _, _, e1 := rawSysvicall6(uintptr(unsafe.Pointer(&procUname)), 1, uintptr(unsafe.Pointer(buf)), 0, 0, 0, 0, 0) if e1 != 0 { @@ -1515,6 +1735,8 @@ func Uname(buf *Utsname) (err error) { return } +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Unmount(target string, flags int) (err error) { var _p0 *byte _p0, err = BytePtrFromString(target) @@ -1528,6 +1750,8 @@ func Unmount(target string, flags int) (err error) { return } +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Unlink(path string) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) @@ -1541,6 +1765,8 @@ func Unlink(path string) (err error) { return } +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Unlinkat(dirfd int, path string, flags int) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) @@ -1554,6 +1780,8 @@ func Unlinkat(dirfd int, path string, flags int) (err error) { return } +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Ustat(dev int, ubuf *Ustat_t) (err error) { _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procUstat)), 2, uintptr(dev), uintptr(unsafe.Pointer(ubuf)), 0, 0, 0, 0) if e1 != 0 { @@ -1562,6 +1790,8 @@ func Ustat(dev int, ubuf *Ustat_t) (err error) { return } +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Utime(path string, buf *Utimbuf) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) @@ -1575,6 +1805,8 @@ func Utime(path string, buf *Utimbuf) (err error) { return } +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func bind(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) { _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&proc__xnet_bind)), 3, uintptr(s), uintptr(addr), uintptr(addrlen), 0, 0, 0) if e1 != 0 { @@ -1583,6 +1815,8 @@ func bind(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) { return } +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func connect(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) { _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&proc__xnet_connect)), 3, uintptr(s), uintptr(addr), uintptr(addrlen), 0, 0, 0) if e1 != 0 { @@ -1591,6 +1825,8 @@ func connect(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) { return } +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func mmap(addr uintptr, length uintptr, prot int, flag int, fd int, pos int64) (ret uintptr, err error) { r0, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procmmap)), 6, uintptr(addr), uintptr(length), uintptr(prot), uintptr(flag), uintptr(fd), uintptr(pos)) ret = uintptr(r0) @@ -1600,6 +1836,8 @@ func mmap(addr uintptr, length uintptr, prot int, flag int, fd int, pos int64) ( return } +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func munmap(addr uintptr, length uintptr) (err error) { _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procmunmap)), 2, uintptr(addr), uintptr(length), 0, 0, 0, 0) if e1 != 0 { @@ -1608,6 +1846,8 @@ func munmap(addr uintptr, length uintptr) (err error) { return } +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func sendfile(outfd int, infd int, offset *int64, count int) (written int, err error) { r0, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procsendfile)), 4, uintptr(outfd), uintptr(infd), uintptr(unsafe.Pointer(offset)), uintptr(count), 0, 0) written = int(r0) @@ -1617,6 +1857,8 @@ func sendfile(outfd int, infd int, offset *int64, count int) (written int, err e return } +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func sendto(s int, buf []byte, flags int, to unsafe.Pointer, addrlen _Socklen) (err error) { var _p0 *byte if len(buf) > 0 { @@ -1629,6 +1871,8 @@ func sendto(s int, buf []byte, flags int, to unsafe.Pointer, addrlen _Socklen) ( return } +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func socket(domain int, typ int, proto int) (fd int, err error) { r0, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&proc__xnet_socket)), 3, uintptr(domain), uintptr(typ), uintptr(proto), 0, 0, 0) fd = int(r0) @@ -1638,6 +1882,8 @@ func socket(domain int, typ int, proto int) (fd int, err error) { return } +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func socketpair(domain int, typ int, proto int, fd *[2]int32) (err error) { _, _, e1 := rawSysvicall6(uintptr(unsafe.Pointer(&proc__xnet_socketpair)), 4, uintptr(domain), uintptr(typ), uintptr(proto), uintptr(unsafe.Pointer(fd)), 0, 0) if e1 != 0 { @@ -1646,6 +1892,8 @@ func socketpair(domain int, typ int, proto int, fd *[2]int32) (err error) { return } +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func write(fd int, p []byte) (n int, err error) { var _p0 *byte if len(p) > 0 { @@ -1659,6 +1907,8 @@ func write(fd int, p []byte) (n int, err error) { return } +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func getsockopt(s int, level int, name int, val unsafe.Pointer, vallen *_Socklen) (err error) { _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&proc__xnet_getsockopt)), 5, uintptr(s), uintptr(level), uintptr(name), uintptr(val), uintptr(unsafe.Pointer(vallen)), 0) if e1 != 0 { @@ -1667,6 +1917,8 @@ func getsockopt(s int, level int, name int, val unsafe.Pointer, vallen *_Socklen return } +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func getpeername(fd int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error) { _, _, e1 := rawSysvicall6(uintptr(unsafe.Pointer(&procgetpeername)), 3, uintptr(fd), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen)), 0, 0, 0) if e1 != 0 { @@ -1675,6 +1927,8 @@ func getpeername(fd int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error) { return } +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func setsockopt(s int, level int, name int, val unsafe.Pointer, vallen uintptr) (err error) { _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procsetsockopt)), 5, uintptr(s), uintptr(level), uintptr(name), uintptr(val), uintptr(vallen), 0) if e1 != 0 { @@ -1683,6 +1937,8 @@ func setsockopt(s int, level int, name int, val unsafe.Pointer, vallen uintptr) return } +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func recvfrom(fd int, p []byte, flags int, from *RawSockaddrAny, fromlen *_Socklen) (n int, err error) { var _p0 *byte if len(p) > 0 { diff --git a/vendor/golang.org/x/sys/unix/zsysctl_openbsd_amd64.go b/vendor/golang.org/x/sys/unix/zsysctl_openbsd_amd64.go index 90c95c2c7..d014451c9 100644 --- a/vendor/golang.org/x/sys/unix/zsysctl_openbsd_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsysctl_openbsd_amd64.go @@ -254,4 +254,17 @@ var sysctlMib = []mibentry{ {"net.mpls.ttl", []_C_int{4, 33, 2}}, {"net.pflow.stats", []_C_int{4, 34, 1}}, {"net.pipex.enable", []_C_int{4, 35, 1}}, + {"vm.anonmin", []_C_int{2, 7}}, + {"vm.loadavg", []_C_int{2, 2}}, + {"vm.maxslp", []_C_int{2, 10}}, + {"vm.nkmempages", []_C_int{2, 6}}, + {"vm.psstrings", []_C_int{2, 3}}, + {"vm.swapencrypt.enable", []_C_int{2, 5, 0}}, + {"vm.swapencrypt.keyscreated", []_C_int{2, 5, 1}}, + {"vm.swapencrypt.keysdeleted", []_C_int{2, 5, 2}}, + {"vm.uspace", []_C_int{2, 11}}, + {"vm.uvmexp", []_C_int{2, 4}}, + {"vm.vmmeter", []_C_int{2, 1}}, + {"vm.vnodemin", []_C_int{2, 9}}, + {"vm.vtextmin", []_C_int{2, 8}}, } diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_arm.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_arm.go index a1db143f8..6e281d6b3 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_arm.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_arm.go @@ -360,4 +360,5 @@ const ( SYS_PKEY_FREE = 396 SYS_STATX = 397 SYS_RSEQ = 398 + SYS_IO_PGETEVENTS = 399 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_arm64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_arm64.go index 2e4cee70d..f9157e192 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_arm64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_arm64.go @@ -284,4 +284,5 @@ const ( SYS_PKEY_FREE = 290 SYS_STATX = 291 SYS_IO_PGETEVENTS = 292 + SYS_RSEQ = 293 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_riscv64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_riscv64.go index 41e4fd1d3..a5d991915 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_riscv64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_riscv64.go @@ -283,4 +283,5 @@ const ( SYS_PKEY_FREE = 290 SYS_STATX = 291 SYS_IO_PGETEVENTS = 292 + SYS_RSEQ = 293 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_openbsd_386.go b/vendor/golang.org/x/sys/unix/zsysnum_openbsd_386.go index 07787301f..f93f391d2 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_openbsd_386.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_openbsd_386.go @@ -1,5 +1,5 @@ // mksysnum_openbsd.pl -// Code generated by the command above; DO NOT EDIT. +// Code generated by the command above; see README.md. DO NOT EDIT. // +build 386,openbsd @@ -12,6 +12,7 @@ const ( SYS_WRITE = 4 // { ssize_t sys_write(int fd, const void *buf, \ SYS_OPEN = 5 // { int sys_open(const char *path, \ SYS_CLOSE = 6 // { int sys_close(int fd); } + SYS_GETENTROPY = 7 // { int sys_getentropy(void *buf, size_t nbyte); } SYS___TFORK = 8 // { int sys___tfork(const struct __tfork *param, \ SYS_LINK = 9 // { int sys_link(const char *path, const char *link); } SYS_UNLINK = 10 // { int sys_unlink(const char *path); } @@ -37,11 +38,10 @@ const ( SYS_ACCEPT = 30 // { int sys_accept(int s, struct sockaddr *name, \ SYS_GETPEERNAME = 31 // { int sys_getpeername(int fdes, struct sockaddr *asa, \ SYS_GETSOCKNAME = 32 // { int sys_getsockname(int fdes, struct sockaddr *asa, \ - SYS_ACCESS = 33 // { int sys_access(const char *path, int flags); } + SYS_ACCESS = 33 // { int sys_access(const char *path, int amode); } SYS_CHFLAGS = 34 // { int sys_chflags(const char *path, u_int flags); } SYS_FCHFLAGS = 35 // { int sys_fchflags(int fd, u_int flags); } SYS_SYNC = 36 // { void sys_sync(void); } - SYS_KILL = 37 // { int sys_kill(int pid, int signum); } SYS_STAT = 38 // { int sys_stat(const char *path, struct stat *ub); } SYS_GETPPID = 39 // { pid_t sys_getppid(void); } SYS_LSTAT = 40 // { int sys_lstat(const char *path, struct stat *ub); } @@ -53,7 +53,6 @@ const ( SYS_SIGACTION = 46 // { int sys_sigaction(int signum, \ SYS_GETGID = 47 // { gid_t sys_getgid(void); } SYS_SIGPROCMASK = 48 // { int sys_sigprocmask(int how, sigset_t mask); } - SYS_GETLOGIN = 49 // { int sys_getlogin(char *namebuf, u_int namelen); } SYS_SETLOGIN = 50 // { int sys_setlogin(const char *namebuf); } SYS_ACCT = 51 // { int sys_acct(const char *path); } SYS_SIGPENDING = 52 // { int sys_sigpending(void); } @@ -62,7 +61,7 @@ const ( SYS_REBOOT = 55 // { int sys_reboot(int opt); } SYS_REVOKE = 56 // { int sys_revoke(const char *path); } SYS_SYMLINK = 57 // { int sys_symlink(const char *path, \ - SYS_READLINK = 58 // { int sys_readlink(const char *path, char *buf, \ + SYS_READLINK = 58 // { ssize_t sys_readlink(const char *path, \ SYS_EXECVE = 59 // { int sys_execve(const char *path, \ SYS_UMASK = 60 // { mode_t sys_umask(mode_t newmask); } SYS_CHROOT = 61 // { int sys_chroot(const char *path); } @@ -86,15 +85,18 @@ const ( SYS_GETGROUPS = 79 // { int sys_getgroups(int gidsetsize, \ SYS_SETGROUPS = 80 // { int sys_setgroups(int gidsetsize, \ SYS_GETPGRP = 81 // { int sys_getpgrp(void); } - SYS_SETPGID = 82 // { int sys_setpgid(pid_t pid, int pgid); } + SYS_SETPGID = 82 // { int sys_setpgid(pid_t pid, pid_t pgid); } + SYS_FUTEX = 83 // { int sys_futex(uint32_t *f, int op, int val, \ SYS_UTIMENSAT = 84 // { int sys_utimensat(int fd, const char *path, \ SYS_FUTIMENS = 85 // { int sys_futimens(int fd, \ + SYS_KBIND = 86 // { int sys_kbind(const struct __kbind *param, \ SYS_CLOCK_GETTIME = 87 // { int sys_clock_gettime(clockid_t clock_id, \ SYS_CLOCK_SETTIME = 88 // { int sys_clock_settime(clockid_t clock_id, \ SYS_CLOCK_GETRES = 89 // { int sys_clock_getres(clockid_t clock_id, \ SYS_DUP2 = 90 // { int sys_dup2(int from, int to); } SYS_NANOSLEEP = 91 // { int sys_nanosleep(const struct timespec *rqtp, \ SYS_FCNTL = 92 // { int sys_fcntl(int fd, int cmd, ... void *arg); } + SYS_ACCEPT4 = 93 // { int sys_accept4(int s, struct sockaddr *name, \ SYS___THRSLEEP = 94 // { int sys___thrsleep(const volatile void *ident, \ SYS_FSYNC = 95 // { int sys_fsync(int fd); } SYS_SETPRIORITY = 96 // { int sys_setpriority(int which, id_t who, int prio); } @@ -102,16 +104,24 @@ const ( SYS_CONNECT = 98 // { int sys_connect(int s, const struct sockaddr *name, \ SYS_GETDENTS = 99 // { int sys_getdents(int fd, void *buf, size_t buflen); } SYS_GETPRIORITY = 100 // { int sys_getpriority(int which, id_t who); } + SYS_PIPE2 = 101 // { int sys_pipe2(int *fdp, int flags); } + SYS_DUP3 = 102 // { int sys_dup3(int from, int to, int flags); } SYS_SIGRETURN = 103 // { int sys_sigreturn(struct sigcontext *sigcntxp); } SYS_BIND = 104 // { int sys_bind(int s, const struct sockaddr *name, \ SYS_SETSOCKOPT = 105 // { int sys_setsockopt(int s, int level, int name, \ SYS_LISTEN = 106 // { int sys_listen(int s, int backlog); } + SYS_CHFLAGSAT = 107 // { int sys_chflagsat(int fd, const char *path, \ + SYS_PLEDGE = 108 // { int sys_pledge(const char *promises, \ SYS_PPOLL = 109 // { int sys_ppoll(struct pollfd *fds, \ SYS_PSELECT = 110 // { int sys_pselect(int nd, fd_set *in, fd_set *ou, \ SYS_SIGSUSPEND = 111 // { int sys_sigsuspend(int mask); } + SYS_SENDSYSLOG = 112 // { int sys_sendsyslog(const char *buf, size_t nbyte, \ + SYS_UNVEIL = 114 // { int sys_unveil(const char *path, \ SYS_GETSOCKOPT = 118 // { int sys_getsockopt(int s, int level, int name, \ + SYS_THRKILL = 119 // { int sys_thrkill(pid_t tid, int signum, void *tcb); } SYS_READV = 120 // { ssize_t sys_readv(int fd, \ SYS_WRITEV = 121 // { ssize_t sys_writev(int fd, \ + SYS_KILL = 122 // { int sys_kill(int pid, int signum); } SYS_FCHOWN = 123 // { int sys_fchown(int fd, uid_t uid, gid_t gid); } SYS_FCHMOD = 124 // { int sys_fchmod(int fd, mode_t mode); } SYS_SETREUID = 126 // { int sys_setreuid(uid_t ruid, uid_t euid); } @@ -125,6 +135,7 @@ const ( SYS_MKDIR = 136 // { int sys_mkdir(const char *path, mode_t mode); } SYS_RMDIR = 137 // { int sys_rmdir(const char *path); } SYS_ADJTIME = 140 // { int sys_adjtime(const struct timeval *delta, \ + SYS_GETLOGIN_R = 141 // { int sys_getlogin_r(char *namebuf, u_int namelen); } SYS_SETSID = 147 // { int sys_setsid(void); } SYS_QUOTACTL = 148 // { int sys_quotactl(const char *path, int cmd, \ SYS_NFSSVC = 155 // { int sys_nfssvc(int flag, void *argp); } @@ -144,7 +155,7 @@ const ( SYS_LSEEK = 199 // { off_t sys_lseek(int fd, int pad, off_t offset, \ SYS_TRUNCATE = 200 // { int sys_truncate(const char *path, int pad, \ SYS_FTRUNCATE = 201 // { int sys_ftruncate(int fd, int pad, off_t length); } - SYS___SYSCTL = 202 // { int sys___sysctl(const int *name, u_int namelen, \ + SYS_SYSCTL = 202 // { int sys_sysctl(const int *name, u_int namelen, \ SYS_MLOCK = 203 // { int sys_mlock(const void *addr, size_t len); } SYS_MUNLOCK = 204 // { int sys_munlock(const void *addr, size_t len); } SYS_GETPGID = 207 // { pid_t sys_getpgid(pid_t pid); } diff --git a/vendor/golang.org/x/sys/unix/zsysnum_openbsd_arm.go b/vendor/golang.org/x/sys/unix/zsysnum_openbsd_arm.go index 7a1693acb..be1198d91 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_openbsd_arm.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_openbsd_arm.go @@ -1,5 +1,5 @@ // mksysnum_openbsd.pl -// Code generated by the command above; DO NOT EDIT. +// Code generated by the command above; see README.md. DO NOT EDIT. // +build arm,openbsd @@ -53,7 +53,6 @@ const ( SYS_SIGACTION = 46 // { int sys_sigaction(int signum, \ SYS_GETGID = 47 // { gid_t sys_getgid(void); } SYS_SIGPROCMASK = 48 // { int sys_sigprocmask(int how, sigset_t mask); } - SYS_GETLOGIN = 49 // { int sys_getlogin(char *namebuf, u_int namelen); } SYS_SETLOGIN = 50 // { int sys_setlogin(const char *namebuf); } SYS_ACCT = 51 // { int sys_acct(const char *path); } SYS_SIGPENDING = 52 // { int sys_sigpending(void); } @@ -87,9 +86,10 @@ const ( SYS_SETGROUPS = 80 // { int sys_setgroups(int gidsetsize, \ SYS_GETPGRP = 81 // { int sys_getpgrp(void); } SYS_SETPGID = 82 // { int sys_setpgid(pid_t pid, pid_t pgid); } - SYS_SENDSYSLOG = 83 // { int sys_sendsyslog(const void *buf, size_t nbyte); } + SYS_FUTEX = 83 // { int sys_futex(uint32_t *f, int op, int val, \ SYS_UTIMENSAT = 84 // { int sys_utimensat(int fd, const char *path, \ SYS_FUTIMENS = 85 // { int sys_futimens(int fd, \ + SYS_KBIND = 86 // { int sys_kbind(const struct __kbind *param, \ SYS_CLOCK_GETTIME = 87 // { int sys_clock_gettime(clockid_t clock_id, \ SYS_CLOCK_SETTIME = 88 // { int sys_clock_settime(clockid_t clock_id, \ SYS_CLOCK_GETRES = 89 // { int sys_clock_getres(clockid_t clock_id, \ @@ -111,10 +111,14 @@ const ( SYS_SETSOCKOPT = 105 // { int sys_setsockopt(int s, int level, int name, \ SYS_LISTEN = 106 // { int sys_listen(int s, int backlog); } SYS_CHFLAGSAT = 107 // { int sys_chflagsat(int fd, const char *path, \ + SYS_PLEDGE = 108 // { int sys_pledge(const char *promises, \ SYS_PPOLL = 109 // { int sys_ppoll(struct pollfd *fds, \ SYS_PSELECT = 110 // { int sys_pselect(int nd, fd_set *in, fd_set *ou, \ SYS_SIGSUSPEND = 111 // { int sys_sigsuspend(int mask); } + SYS_SENDSYSLOG = 112 // { int sys_sendsyslog(const char *buf, size_t nbyte, \ + SYS_UNVEIL = 114 // { int sys_unveil(const char *path, \ SYS_GETSOCKOPT = 118 // { int sys_getsockopt(int s, int level, int name, \ + SYS_THRKILL = 119 // { int sys_thrkill(pid_t tid, int signum, void *tcb); } SYS_READV = 120 // { ssize_t sys_readv(int fd, \ SYS_WRITEV = 121 // { ssize_t sys_writev(int fd, \ SYS_KILL = 122 // { int sys_kill(int pid, int signum); } @@ -131,6 +135,7 @@ const ( SYS_MKDIR = 136 // { int sys_mkdir(const char *path, mode_t mode); } SYS_RMDIR = 137 // { int sys_rmdir(const char *path); } SYS_ADJTIME = 140 // { int sys_adjtime(const struct timeval *delta, \ + SYS_GETLOGIN_R = 141 // { int sys_getlogin_r(char *namebuf, u_int namelen); } SYS_SETSID = 147 // { int sys_setsid(void); } SYS_QUOTACTL = 148 // { int sys_quotactl(const char *path, int cmd, \ SYS_NFSSVC = 155 // { int sys_nfssvc(int flag, void *argp); } @@ -150,7 +155,7 @@ const ( SYS_LSEEK = 199 // { off_t sys_lseek(int fd, int pad, off_t offset, \ SYS_TRUNCATE = 200 // { int sys_truncate(const char *path, int pad, \ SYS_FTRUNCATE = 201 // { int sys_ftruncate(int fd, int pad, off_t length); } - SYS___SYSCTL = 202 // { int sys___sysctl(const int *name, u_int namelen, \ + SYS_SYSCTL = 202 // { int sys_sysctl(const int *name, u_int namelen, \ SYS_MLOCK = 203 // { int sys_mlock(const void *addr, size_t len); } SYS_MUNLOCK = 204 // { int sys_munlock(const void *addr, size_t len); } SYS_GETPGID = 207 // { pid_t sys_getpgid(pid_t pid); } diff --git a/vendor/golang.org/x/sys/unix/ztypes_aix_ppc.go b/vendor/golang.org/x/sys/unix/ztypes_aix_ppc.go index f1cfe7db1..cedc9b0f2 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_aix_ppc.go +++ b/vendor/golang.org/x/sys/unix/ztypes_aix_ppc.go @@ -6,11 +6,11 @@ package unix const ( - sizeofPtr = 0x4 - sizeofShort = 0x2 - sizeofInt = 0x4 - sizeofLong = 0x4 - sizeofLongLong = 0x8 + SizeofPtr = 0x4 + SizeofShort = 0x2 + SizeofInt = 0x4 + SizeofLong = 0x4 + SizeofLongLong = 0x8 PathMax = 0x3ff ) diff --git a/vendor/golang.org/x/sys/unix/ztypes_aix_ppc64.go b/vendor/golang.org/x/sys/unix/ztypes_aix_ppc64.go index 95581a3bc..f46482d27 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_aix_ppc64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_aix_ppc64.go @@ -6,11 +6,11 @@ package unix const ( - sizeofPtr = 0x8 - sizeofShort = 0x2 - sizeofInt = 0x4 - sizeofLong = 0x8 - sizeofLongLong = 0x8 + SizeofPtr = 0x8 + SizeofShort = 0x2 + SizeofInt = 0x4 + SizeofLong = 0x8 + SizeofLongLong = 0x8 PathMax = 0x3ff ) diff --git a/vendor/golang.org/x/sys/unix/ztypes_darwin_386.go b/vendor/golang.org/x/sys/unix/ztypes_darwin_386.go index 327af5fba..2aeb52a88 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_darwin_386.go +++ b/vendor/golang.org/x/sys/unix/ztypes_darwin_386.go @@ -6,11 +6,11 @@ package unix const ( - sizeofPtr = 0x4 - sizeofShort = 0x2 - sizeofInt = 0x4 - sizeofLong = 0x4 - sizeofLongLong = 0x8 + SizeofPtr = 0x4 + SizeofShort = 0x2 + SizeofInt = 0x4 + SizeofLong = 0x4 + SizeofLongLong = 0x8 ) type ( diff --git a/vendor/golang.org/x/sys/unix/ztypes_darwin_amd64.go b/vendor/golang.org/x/sys/unix/ztypes_darwin_amd64.go index 116e6e075..0d0d9f2cc 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_darwin_amd64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_darwin_amd64.go @@ -6,11 +6,11 @@ package unix const ( - sizeofPtr = 0x8 - sizeofShort = 0x2 - sizeofInt = 0x4 - sizeofLong = 0x8 - sizeofLongLong = 0x8 + SizeofPtr = 0x8 + SizeofShort = 0x2 + SizeofInt = 0x4 + SizeofLong = 0x8 + SizeofLongLong = 0x8 ) type ( diff --git a/vendor/golang.org/x/sys/unix/ztypes_darwin_arm.go b/vendor/golang.org/x/sys/unix/ztypes_darwin_arm.go index 2750ad760..04e344b78 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_darwin_arm.go +++ b/vendor/golang.org/x/sys/unix/ztypes_darwin_arm.go @@ -7,11 +7,11 @@ package unix const ( - sizeofPtr = 0x4 - sizeofShort = 0x2 - sizeofInt = 0x4 - sizeofLong = 0x4 - sizeofLongLong = 0x8 + SizeofPtr = 0x4 + SizeofShort = 0x2 + SizeofInt = 0x4 + SizeofLong = 0x4 + SizeofLongLong = 0x8 ) type ( diff --git a/vendor/golang.org/x/sys/unix/ztypes_darwin_arm64.go b/vendor/golang.org/x/sys/unix/ztypes_darwin_arm64.go index 8cead0996..9fec185c1 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_darwin_arm64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_darwin_arm64.go @@ -6,11 +6,11 @@ package unix const ( - sizeofPtr = 0x8 - sizeofShort = 0x2 - sizeofInt = 0x4 - sizeofLong = 0x8 - sizeofLongLong = 0x8 + SizeofPtr = 0x8 + SizeofShort = 0x2 + SizeofInt = 0x4 + SizeofLong = 0x8 + SizeofLongLong = 0x8 ) type ( diff --git a/vendor/golang.org/x/sys/unix/ztypes_dragonfly_amd64.go b/vendor/golang.org/x/sys/unix/ztypes_dragonfly_amd64.go index c01ae6701..7b34e2e2c 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_dragonfly_amd64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_dragonfly_amd64.go @@ -6,11 +6,11 @@ package unix const ( - sizeofPtr = 0x8 - sizeofShort = 0x2 - sizeofInt = 0x4 - sizeofLong = 0x8 - sizeofLongLong = 0x8 + SizeofPtr = 0x8 + SizeofShort = 0x2 + SizeofInt = 0x4 + SizeofLong = 0x8 + SizeofLongLong = 0x8 ) type ( diff --git a/vendor/golang.org/x/sys/unix/ztypes_freebsd_386.go b/vendor/golang.org/x/sys/unix/ztypes_freebsd_386.go index 8006c5638..c146c1ad3 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_freebsd_386.go +++ b/vendor/golang.org/x/sys/unix/ztypes_freebsd_386.go @@ -6,11 +6,11 @@ package unix const ( - sizeofPtr = 0x4 - sizeofShort = 0x2 - sizeofInt = 0x4 - sizeofLong = 0x4 - sizeofLongLong = 0x8 + SizeofPtr = 0x4 + SizeofShort = 0x2 + SizeofInt = 0x4 + SizeofLong = 0x4 + SizeofLongLong = 0x8 ) type ( @@ -56,28 +56,84 @@ type Rlimit struct { type _Gid_t uint32 +const ( + _statfsVersion = 0x20140518 + _dirblksiz = 0x400 +) + type Stat_t struct { - Dev uint32 - Ino uint32 - Mode uint16 - Nlink uint16 - Uid uint32 - Gid uint32 - Rdev uint32 - Atimespec Timespec - Mtimespec Timespec - Ctimespec Timespec - Size int64 - Blocks int64 - Blksize int32 - Flags uint32 - Gen uint32 - Lspare int32 - Birthtimespec Timespec - Pad_cgo_0 [8]byte + Dev uint64 + Ino uint64 + Nlink uint64 + Mode uint16 + _0 int16 + Uid uint32 + Gid uint32 + _1 int32 + Rdev uint64 + Atim_ext int32 + Atim Timespec + Mtim_ext int32 + Mtim Timespec + Ctim_ext int32 + Ctim Timespec + Btim_ext int32 + Birthtim Timespec + Size int64 + Blocks int64 + Blksize int32 + Flags uint32 + Gen uint64 + Spare [10]uint64 +} + +type stat_freebsd11_t struct { + Dev uint32 + Ino uint32 + Mode uint16 + Nlink uint16 + Uid uint32 + Gid uint32 + Rdev uint32 + Atim Timespec + Mtim Timespec + Ctim Timespec + Size int64 + Blocks int64 + Blksize int32 + Flags uint32 + Gen uint32 + Lspare int32 + Birthtim Timespec + _ [8]byte } type Statfs_t struct { + Version uint32 + Type uint32 + Flags uint64 + Bsize uint64 + Iosize uint64 + Blocks uint64 + Bfree uint64 + Bavail int64 + Files uint64 + Ffree int64 + Syncwrites uint64 + Asyncwrites uint64 + Syncreads uint64 + Asyncreads uint64 + Spare [10]uint64 + Namemax uint32 + Owner uint32 + Fsid Fsid + Charspare [80]int8 + Fstypename [16]int8 + Mntfromname [1024]int8 + Mntonname [1024]int8 +} + +type statfs_freebsd11_t struct { Version uint32 Type uint32 Flags uint64 @@ -112,6 +168,17 @@ type Flock_t struct { } type Dirent struct { + Fileno uint64 + Off int64 + Reclen uint16 + Type uint8 + Pad0 uint8 + Namlen uint16 + Pad1 uint16 + Name [256]int8 +} + +type dirent_freebsd11 struct { Fileno uint32 Reclen uint16 Type uint8 @@ -272,7 +339,7 @@ type Kevent_t struct { } type FdSet struct { - X__fds_bits [32]uint32 + Bits [32]uint32 } const ( @@ -288,53 +355,53 @@ const ( ) type ifMsghdr struct { - Msglen uint16 - Version uint8 - Type uint8 - Addrs int32 - Flags int32 - Index uint16 - Pad_cgo_0 [2]byte - Data ifData + Msglen uint16 + Version uint8 + Type uint8 + Addrs int32 + Flags int32 + Index uint16 + _ [2]byte + Data ifData } type IfMsghdr struct { - Msglen uint16 - Version uint8 - Type uint8 - Addrs int32 - Flags int32 - Index uint16 - Pad_cgo_0 [2]byte - Data IfData + Msglen uint16 + Version uint8 + Type uint8 + Addrs int32 + Flags int32 + Index uint16 + _ [2]byte + Data IfData } type ifData struct { - Type uint8 - Physical uint8 - Addrlen uint8 - Hdrlen uint8 - Link_state uint8 - Vhid uint8 - Datalen uint16 - Mtu uint32 - Metric uint32 - Baudrate uint64 - Ipackets uint64 - Ierrors uint64 - Opackets uint64 - Oerrors uint64 - Collisions uint64 - Ibytes uint64 - Obytes uint64 - Imcasts uint64 - Omcasts uint64 - Iqdrops uint64 - Oqdrops uint64 - Noproto uint64 - Hwassist uint64 - X__ifi_epoch [8]byte - X__ifi_lastchange [16]byte + Type uint8 + Physical uint8 + Addrlen uint8 + Hdrlen uint8 + Link_state uint8 + Vhid uint8 + Datalen uint16 + Mtu uint32 + Metric uint32 + Baudrate uint64 + Ipackets uint64 + Ierrors uint64 + Opackets uint64 + Oerrors uint64 + Collisions uint64 + Ibytes uint64 + Obytes uint64 + Imcasts uint64 + Omcasts uint64 + Iqdrops uint64 + Oqdrops uint64 + Noproto uint64 + Hwassist uint64 + _ [8]byte + _ [16]byte } type IfData struct { @@ -366,24 +433,24 @@ type IfData struct { } type IfaMsghdr struct { - Msglen uint16 - Version uint8 - Type uint8 - Addrs int32 - Flags int32 - Index uint16 - Pad_cgo_0 [2]byte - Metric int32 + Msglen uint16 + Version uint8 + Type uint8 + Addrs int32 + Flags int32 + Index uint16 + _ [2]byte + Metric int32 } type IfmaMsghdr struct { - Msglen uint16 - Version uint8 - Type uint8 - Addrs int32 - Flags int32 - Index uint16 - Pad_cgo_0 [2]byte + Msglen uint16 + Version uint8 + Type uint8 + Addrs int32 + Flags int32 + Index uint16 + _ [2]byte } type IfAnnounceMsghdr struct { @@ -396,19 +463,19 @@ type IfAnnounceMsghdr struct { } type RtMsghdr struct { - Msglen uint16 - Version uint8 - Type uint8 - Index uint16 - Pad_cgo_0 [2]byte - Flags int32 - Addrs int32 - Pid int32 - Seq int32 - Errno int32 - Fmask int32 - Inits uint32 - Rmx RtMetrics + Msglen uint16 + Version uint8 + Type uint8 + Index uint16 + _ [2]byte + Flags int32 + Addrs int32 + Pid int32 + Seq int32 + Errno int32 + Fmask int32 + Inits uint32 + Rmx RtMetrics } type RtMetrics struct { @@ -465,18 +532,18 @@ type BpfInsn struct { } type BpfHdr struct { - Tstamp Timeval - Caplen uint32 - Datalen uint32 - Hdrlen uint16 - Pad_cgo_0 [2]byte + Tstamp Timeval + Caplen uint32 + Datalen uint32 + Hdrlen uint16 + _ [2]byte } type BpfZbufHeader struct { Kernel_gen uint32 Kernel_len uint32 User_gen uint32 - X_bzh_pad [5]uint32 + _ [5]uint32 } type Termios struct { diff --git a/vendor/golang.org/x/sys/unix/ztypes_freebsd_amd64.go b/vendor/golang.org/x/sys/unix/ztypes_freebsd_amd64.go index 716774ded..ac33a8dd4 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_freebsd_amd64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_freebsd_amd64.go @@ -6,11 +6,11 @@ package unix const ( - sizeofPtr = 0x8 - sizeofShort = 0x2 - sizeofInt = 0x4 - sizeofLong = 0x8 - sizeofLongLong = 0x8 + SizeofPtr = 0x8 + SizeofShort = 0x2 + SizeofInt = 0x4 + SizeofLong = 0x8 + SizeofLongLong = 0x8 ) type ( @@ -56,27 +56,79 @@ type Rlimit struct { type _Gid_t uint32 +const ( + _statfsVersion = 0x20140518 + _dirblksiz = 0x400 +) + type Stat_t struct { - Dev uint32 - Ino uint32 - Mode uint16 - Nlink uint16 - Uid uint32 - Gid uint32 - Rdev uint32 - Atimespec Timespec - Mtimespec Timespec - Ctimespec Timespec - Size int64 - Blocks int64 - Blksize int32 - Flags uint32 - Gen uint32 - Lspare int32 - Birthtimespec Timespec + Dev uint64 + Ino uint64 + Nlink uint64 + Mode uint16 + _0 int16 + Uid uint32 + Gid uint32 + _1 int32 + Rdev uint64 + Atim Timespec + Mtim Timespec + Ctim Timespec + Birthtim Timespec + Size int64 + Blocks int64 + Blksize int32 + Flags uint32 + Gen uint64 + Spare [10]uint64 +} + +type stat_freebsd11_t struct { + Dev uint32 + Ino uint32 + Mode uint16 + Nlink uint16 + Uid uint32 + Gid uint32 + Rdev uint32 + Atim Timespec + Mtim Timespec + Ctim Timespec + Size int64 + Blocks int64 + Blksize int32 + Flags uint32 + Gen uint32 + Lspare int32 + Birthtim Timespec } type Statfs_t struct { + Version uint32 + Type uint32 + Flags uint64 + Bsize uint64 + Iosize uint64 + Blocks uint64 + Bfree uint64 + Bavail int64 + Files uint64 + Ffree int64 + Syncwrites uint64 + Asyncwrites uint64 + Syncreads uint64 + Asyncreads uint64 + Spare [10]uint64 + Namemax uint32 + Owner uint32 + Fsid Fsid + Charspare [80]int8 + Fstypename [16]int8 + Mntfromname [1024]int8 + Mntonname [1024]int8 +} + +type statfs_freebsd11_t struct { Version uint32 Type uint32 Flags uint64 @@ -102,16 +154,27 @@ type Statfs_t struct { } type Flock_t struct { - Start int64 - Len int64 - Pid int32 - Type int16 - Whence int16 - Sysid int32 - Pad_cgo_0 [4]byte + Start int64 + Len int64 + Pid int32 + Type int16 + Whence int16 + Sysid int32 + _ [4]byte } type Dirent struct { + Fileno uint64 + Off int64 + Reclen uint16 + Type uint8 + Pad0 uint8 + Namlen uint16 + Pad1 uint16 + Name [256]int8 +} + +type dirent_freebsd11 struct { Fileno uint32 Reclen uint16 Type uint8 @@ -212,10 +275,10 @@ type IPv6Mreq struct { type Msghdr struct { Name *byte Namelen uint32 - Pad_cgo_0 [4]byte + _ [4]byte Iov *Iovec Iovlen int32 - Pad_cgo_1 [4]byte + _ [4]byte Control *byte Controllen uint32 Flags int32 @@ -274,7 +337,7 @@ type Kevent_t struct { } type FdSet struct { - X__fds_bits [16]uint64 + Bits [16]uint64 } const ( @@ -290,53 +353,53 @@ const ( ) type ifMsghdr struct { - Msglen uint16 - Version uint8 - Type uint8 - Addrs int32 - Flags int32 - Index uint16 - Pad_cgo_0 [2]byte - Data ifData + Msglen uint16 + Version uint8 + Type uint8 + Addrs int32 + Flags int32 + Index uint16 + _ [2]byte + Data ifData } type IfMsghdr struct { - Msglen uint16 - Version uint8 - Type uint8 - Addrs int32 - Flags int32 - Index uint16 - Pad_cgo_0 [2]byte - Data IfData + Msglen uint16 + Version uint8 + Type uint8 + Addrs int32 + Flags int32 + Index uint16 + _ [2]byte + Data IfData } type ifData struct { - Type uint8 - Physical uint8 - Addrlen uint8 - Hdrlen uint8 - Link_state uint8 - Vhid uint8 - Datalen uint16 - Mtu uint32 - Metric uint32 - Baudrate uint64 - Ipackets uint64 - Ierrors uint64 - Opackets uint64 - Oerrors uint64 - Collisions uint64 - Ibytes uint64 - Obytes uint64 - Imcasts uint64 - Omcasts uint64 - Iqdrops uint64 - Oqdrops uint64 - Noproto uint64 - Hwassist uint64 - X__ifi_epoch [8]byte - X__ifi_lastchange [16]byte + Type uint8 + Physical uint8 + Addrlen uint8 + Hdrlen uint8 + Link_state uint8 + Vhid uint8 + Datalen uint16 + Mtu uint32 + Metric uint32 + Baudrate uint64 + Ipackets uint64 + Ierrors uint64 + Opackets uint64 + Oerrors uint64 + Collisions uint64 + Ibytes uint64 + Obytes uint64 + Imcasts uint64 + Omcasts uint64 + Iqdrops uint64 + Oqdrops uint64 + Noproto uint64 + Hwassist uint64 + _ [8]byte + _ [16]byte } type IfData struct { @@ -368,24 +431,24 @@ type IfData struct { } type IfaMsghdr struct { - Msglen uint16 - Version uint8 - Type uint8 - Addrs int32 - Flags int32 - Index uint16 - Pad_cgo_0 [2]byte - Metric int32 + Msglen uint16 + Version uint8 + Type uint8 + Addrs int32 + Flags int32 + Index uint16 + _ [2]byte + Metric int32 } type IfmaMsghdr struct { - Msglen uint16 - Version uint8 - Type uint8 - Addrs int32 - Flags int32 - Index uint16 - Pad_cgo_0 [2]byte + Msglen uint16 + Version uint8 + Type uint8 + Addrs int32 + Flags int32 + Index uint16 + _ [2]byte } type IfAnnounceMsghdr struct { @@ -398,19 +461,19 @@ type IfAnnounceMsghdr struct { } type RtMsghdr struct { - Msglen uint16 - Version uint8 - Type uint8 - Index uint16 - Pad_cgo_0 [2]byte - Flags int32 - Addrs int32 - Pid int32 - Seq int32 - Errno int32 - Fmask int32 - Inits uint64 - Rmx RtMetrics + Msglen uint16 + Version uint8 + Type uint8 + Index uint16 + _ [2]byte + Flags int32 + Addrs int32 + Pid int32 + Seq int32 + Errno int32 + Fmask int32 + Inits uint64 + Rmx RtMetrics } type RtMetrics struct { @@ -455,9 +518,9 @@ type BpfZbuf struct { } type BpfProgram struct { - Len uint32 - Pad_cgo_0 [4]byte - Insns *BpfInsn + Len uint32 + _ [4]byte + Insns *BpfInsn } type BpfInsn struct { @@ -468,18 +531,18 @@ type BpfInsn struct { } type BpfHdr struct { - Tstamp Timeval - Caplen uint32 - Datalen uint32 - Hdrlen uint16 - Pad_cgo_0 [6]byte + Tstamp Timeval + Caplen uint32 + Datalen uint32 + Hdrlen uint16 + _ [6]byte } type BpfZbufHeader struct { Kernel_gen uint32 Kernel_len uint32 User_gen uint32 - X_bzh_pad [5]uint32 + _ [5]uint32 } type Termios struct { diff --git a/vendor/golang.org/x/sys/unix/ztypes_freebsd_arm.go b/vendor/golang.org/x/sys/unix/ztypes_freebsd_arm.go index 92e07b00f..e27511a64 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_freebsd_arm.go +++ b/vendor/golang.org/x/sys/unix/ztypes_freebsd_arm.go @@ -6,11 +6,11 @@ package unix const ( - sizeofPtr = 0x4 - sizeofShort = 0x2 - sizeofInt = 0x4 - sizeofLong = 0x4 - sizeofLongLong = 0x8 + SizeofPtr = 0x4 + SizeofShort = 0x2 + SizeofInt = 0x4 + SizeofLong = 0x4 + SizeofLongLong = 0x8 ) type ( @@ -21,15 +21,15 @@ type ( ) type Timespec struct { - Sec int64 - Nsec int32 - Pad_cgo_0 [4]byte + Sec int64 + Nsec int32 + _ [4]byte } type Timeval struct { - Sec int64 - Usec int32 - Pad_cgo_0 [4]byte + Sec int64 + Usec int32 + _ [4]byte } type Rusage struct { @@ -58,27 +58,79 @@ type Rlimit struct { type _Gid_t uint32 +const ( + _statfsVersion = 0x20140518 + _dirblksiz = 0x400 +) + type Stat_t struct { - Dev uint32 - Ino uint32 - Mode uint16 - Nlink uint16 - Uid uint32 - Gid uint32 - Rdev uint32 - Atimespec Timespec - Mtimespec Timespec - Ctimespec Timespec - Size int64 - Blocks int64 - Blksize int32 - Flags uint32 - Gen uint32 - Lspare int32 - Birthtimespec Timespec + Dev uint64 + Ino uint64 + Nlink uint64 + Mode uint16 + _0 int16 + Uid uint32 + Gid uint32 + _1 int32 + Rdev uint64 + Atim Timespec + Mtim Timespec + Ctim Timespec + Birthtim Timespec + Size int64 + Blocks int64 + Blksize int32 + Flags uint32 + Gen uint64 + Spare [10]uint64 +} + +type stat_freebsd11_t struct { + Dev uint32 + Ino uint32 + Mode uint16 + Nlink uint16 + Uid uint32 + Gid uint32 + Rdev uint32 + Atim Timespec + Mtim Timespec + Ctim Timespec + Size int64 + Blocks int64 + Blksize int32 + Flags uint32 + Gen uint32 + Lspare int32 + Birthtim Timespec } type Statfs_t struct { + Version uint32 + Type uint32 + Flags uint64 + Bsize uint64 + Iosize uint64 + Blocks uint64 + Bfree uint64 + Bavail int64 + Files uint64 + Ffree int64 + Syncwrites uint64 + Asyncwrites uint64 + Syncreads uint64 + Asyncreads uint64 + Spare [10]uint64 + Namemax uint32 + Owner uint32 + Fsid Fsid + Charspare [80]int8 + Fstypename [16]int8 + Mntfromname [1024]int8 + Mntonname [1024]int8 +} + +type statfs_freebsd11_t struct { Version uint32 Type uint32 Flags uint64 @@ -104,16 +156,27 @@ type Statfs_t struct { } type Flock_t struct { - Start int64 - Len int64 - Pid int32 - Type int16 - Whence int16 - Sysid int32 - Pad_cgo_0 [4]byte + Start int64 + Len int64 + Pid int32 + Type int16 + Whence int16 + Sysid int32 + _ [4]byte } type Dirent struct { + Fileno uint64 + Off int64 + Reclen uint16 + Type uint8 + Pad0 uint8 + Namlen uint16 + Pad1 uint16 + Name [256]int8 +} + +type dirent_freebsd11 struct { Fileno uint32 Reclen uint16 Type uint8 @@ -274,7 +337,7 @@ type Kevent_t struct { } type FdSet struct { - X__fds_bits [32]uint32 + Bits [32]uint32 } const ( @@ -290,53 +353,53 @@ const ( ) type ifMsghdr struct { - Msglen uint16 - Version uint8 - Type uint8 - Addrs int32 - Flags int32 - Index uint16 - Pad_cgo_0 [2]byte - Data ifData + Msglen uint16 + Version uint8 + Type uint8 + Addrs int32 + Flags int32 + Index uint16 + _ [2]byte + Data ifData } type IfMsghdr struct { - Msglen uint16 - Version uint8 - Type uint8 - Addrs int32 - Flags int32 - Index uint16 - Pad_cgo_0 [2]byte - Data IfData + Msglen uint16 + Version uint8 + Type uint8 + Addrs int32 + Flags int32 + Index uint16 + _ [2]byte + Data IfData } type ifData struct { - Type uint8 - Physical uint8 - Addrlen uint8 - Hdrlen uint8 - Link_state uint8 - Vhid uint8 - Datalen uint16 - Mtu uint32 - Metric uint32 - Baudrate uint64 - Ipackets uint64 - Ierrors uint64 - Opackets uint64 - Oerrors uint64 - Collisions uint64 - Ibytes uint64 - Obytes uint64 - Imcasts uint64 - Omcasts uint64 - Iqdrops uint64 - Oqdrops uint64 - Noproto uint64 - Hwassist uint64 - X__ifi_epoch [8]byte - X__ifi_lastchange [16]byte + Type uint8 + Physical uint8 + Addrlen uint8 + Hdrlen uint8 + Link_state uint8 + Vhid uint8 + Datalen uint16 + Mtu uint32 + Metric uint32 + Baudrate uint64 + Ipackets uint64 + Ierrors uint64 + Opackets uint64 + Oerrors uint64 + Collisions uint64 + Ibytes uint64 + Obytes uint64 + Imcasts uint64 + Omcasts uint64 + Iqdrops uint64 + Oqdrops uint64 + Noproto uint64 + Hwassist uint64 + _ [8]byte + _ [16]byte } type IfData struct { @@ -363,30 +426,30 @@ type IfData struct { Iqdrops uint32 Noproto uint32 Hwassist uint32 - Pad_cgo_0 [4]byte + _ [4]byte Epoch int64 Lastchange Timeval } type IfaMsghdr struct { - Msglen uint16 - Version uint8 - Type uint8 - Addrs int32 - Flags int32 - Index uint16 - Pad_cgo_0 [2]byte - Metric int32 + Msglen uint16 + Version uint8 + Type uint8 + Addrs int32 + Flags int32 + Index uint16 + _ [2]byte + Metric int32 } type IfmaMsghdr struct { - Msglen uint16 - Version uint8 - Type uint8 - Addrs int32 - Flags int32 - Index uint16 - Pad_cgo_0 [2]byte + Msglen uint16 + Version uint8 + Type uint8 + Addrs int32 + Flags int32 + Index uint16 + _ [2]byte } type IfAnnounceMsghdr struct { @@ -399,19 +462,19 @@ type IfAnnounceMsghdr struct { } type RtMsghdr struct { - Msglen uint16 - Version uint8 - Type uint8 - Index uint16 - Pad_cgo_0 [2]byte - Flags int32 - Addrs int32 - Pid int32 - Seq int32 - Errno int32 - Fmask int32 - Inits uint32 - Rmx RtMetrics + Msglen uint16 + Version uint8 + Type uint8 + Index uint16 + _ [2]byte + Flags int32 + Addrs int32 + Pid int32 + Seq int32 + Errno int32 + Fmask int32 + Inits uint32 + Rmx RtMetrics } type RtMetrics struct { @@ -468,18 +531,18 @@ type BpfInsn struct { } type BpfHdr struct { - Tstamp Timeval - Caplen uint32 - Datalen uint32 - Hdrlen uint16 - Pad_cgo_0 [6]byte + Tstamp Timeval + Caplen uint32 + Datalen uint32 + Hdrlen uint16 + _ [6]byte } type BpfZbufHeader struct { Kernel_gen uint32 Kernel_len uint32 User_gen uint32 - X_bzh_pad [5]uint32 + _ [5]uint32 } type Termios struct { diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_386.go b/vendor/golang.org/x/sys/unix/ztypes_linux_386.go index 793b3fdd2..f56e164b7 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_386.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_386.go @@ -6,11 +6,11 @@ package unix const ( - sizeofPtr = 0x4 - sizeofShort = 0x2 - sizeofInt = 0x4 - sizeofLong = 0x4 - sizeofLongLong = 0x8 + SizeofPtr = 0x4 + SizeofShort = 0x2 + SizeofInt = 0x4 + SizeofLong = 0x4 + SizeofLongLong = 0x8 PathMax = 0x1000 ) @@ -286,6 +286,8 @@ type RawSockaddrXDP struct { Shared_umem_fd uint32 } +type RawSockaddrPPPoX [0x1e]byte + type RawSockaddr struct { Family uint16 Data [14]int8 @@ -421,6 +423,7 @@ const ( SizeofSockaddrALG = 0x58 SizeofSockaddrVM = 0x10 SizeofSockaddrXDP = 0x10 + SizeofSockaddrPPPoX = 0x1e SizeofLinger = 0x8 SizeofIovec = 0x8 SizeofIPMreq = 0x8 @@ -494,7 +497,7 @@ const ( IFLA_EVENT = 0x2c IFLA_NEW_NETNSID = 0x2d IFLA_IF_NETNSID = 0x2e - IFLA_MAX = 0x31 + IFLA_MAX = 0x33 RT_SCOPE_UNIVERSE = 0x0 RT_SCOPE_SITE = 0xc8 RT_SCOPE_LINK = 0xfd @@ -1937,3 +1940,52 @@ type XDPDesc struct { Len uint32 Options uint32 } + +const ( + NCSI_CMD_UNSPEC = 0x0 + NCSI_CMD_PKG_INFO = 0x1 + NCSI_CMD_SET_INTERFACE = 0x2 + NCSI_CMD_CLEAR_INTERFACE = 0x3 + NCSI_ATTR_UNSPEC = 0x0 + NCSI_ATTR_IFINDEX = 0x1 + NCSI_ATTR_PACKAGE_LIST = 0x2 + NCSI_ATTR_PACKAGE_ID = 0x3 + NCSI_ATTR_CHANNEL_ID = 0x4 + NCSI_PKG_ATTR_UNSPEC = 0x0 + NCSI_PKG_ATTR = 0x1 + NCSI_PKG_ATTR_ID = 0x2 + NCSI_PKG_ATTR_FORCED = 0x3 + NCSI_PKG_ATTR_CHANNEL_LIST = 0x4 + NCSI_CHANNEL_ATTR_UNSPEC = 0x0 + NCSI_CHANNEL_ATTR = 0x1 + NCSI_CHANNEL_ATTR_ID = 0x2 + NCSI_CHANNEL_ATTR_VERSION_MAJOR = 0x3 + NCSI_CHANNEL_ATTR_VERSION_MINOR = 0x4 + NCSI_CHANNEL_ATTR_VERSION_STR = 0x5 + NCSI_CHANNEL_ATTR_LINK_STATE = 0x6 + NCSI_CHANNEL_ATTR_ACTIVE = 0x7 + NCSI_CHANNEL_ATTR_FORCED = 0x8 + NCSI_CHANNEL_ATTR_VLAN_LIST = 0x9 + NCSI_CHANNEL_ATTR_VLAN_ID = 0xa +) + +const ( + SOF_TIMESTAMPING_TX_HARDWARE = 0x1 + SOF_TIMESTAMPING_TX_SOFTWARE = 0x2 + SOF_TIMESTAMPING_RX_HARDWARE = 0x4 + SOF_TIMESTAMPING_RX_SOFTWARE = 0x8 + SOF_TIMESTAMPING_SOFTWARE = 0x10 + SOF_TIMESTAMPING_SYS_HARDWARE = 0x20 + SOF_TIMESTAMPING_RAW_HARDWARE = 0x40 + SOF_TIMESTAMPING_OPT_ID = 0x80 + SOF_TIMESTAMPING_TX_SCHED = 0x100 + SOF_TIMESTAMPING_TX_ACK = 0x200 + SOF_TIMESTAMPING_OPT_CMSG = 0x400 + SOF_TIMESTAMPING_OPT_TSONLY = 0x800 + SOF_TIMESTAMPING_OPT_STATS = 0x1000 + SOF_TIMESTAMPING_OPT_PKTINFO = 0x2000 + SOF_TIMESTAMPING_OPT_TX_SWHW = 0x4000 + + SOF_TIMESTAMPING_LAST = 0x4000 + SOF_TIMESTAMPING_MASK = 0x7fff +) diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_amd64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_amd64.go index c3548848a..ac5f636a6 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_amd64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_amd64.go @@ -6,11 +6,11 @@ package unix const ( - sizeofPtr = 0x8 - sizeofShort = 0x2 - sizeofInt = 0x4 - sizeofLong = 0x8 - sizeofLongLong = 0x8 + SizeofPtr = 0x8 + SizeofShort = 0x2 + SizeofInt = 0x4 + SizeofLong = 0x8 + SizeofLongLong = 0x8 PathMax = 0x1000 ) @@ -288,6 +288,8 @@ type RawSockaddrXDP struct { Shared_umem_fd uint32 } +type RawSockaddrPPPoX [0x1e]byte + type RawSockaddr struct { Family uint16 Data [14]int8 @@ -425,6 +427,7 @@ const ( SizeofSockaddrALG = 0x58 SizeofSockaddrVM = 0x10 SizeofSockaddrXDP = 0x10 + SizeofSockaddrPPPoX = 0x1e SizeofLinger = 0x8 SizeofIovec = 0x10 SizeofIPMreq = 0x8 @@ -498,7 +501,7 @@ const ( IFLA_EVENT = 0x2c IFLA_NEW_NETNSID = 0x2d IFLA_IF_NETNSID = 0x2e - IFLA_MAX = 0x31 + IFLA_MAX = 0x33 RT_SCOPE_UNIVERSE = 0x0 RT_SCOPE_SITE = 0xc8 RT_SCOPE_LINK = 0xfd @@ -1959,3 +1962,52 @@ type XDPDesc struct { Len uint32 Options uint32 } + +const ( + NCSI_CMD_UNSPEC = 0x0 + NCSI_CMD_PKG_INFO = 0x1 + NCSI_CMD_SET_INTERFACE = 0x2 + NCSI_CMD_CLEAR_INTERFACE = 0x3 + NCSI_ATTR_UNSPEC = 0x0 + NCSI_ATTR_IFINDEX = 0x1 + NCSI_ATTR_PACKAGE_LIST = 0x2 + NCSI_ATTR_PACKAGE_ID = 0x3 + NCSI_ATTR_CHANNEL_ID = 0x4 + NCSI_PKG_ATTR_UNSPEC = 0x0 + NCSI_PKG_ATTR = 0x1 + NCSI_PKG_ATTR_ID = 0x2 + NCSI_PKG_ATTR_FORCED = 0x3 + NCSI_PKG_ATTR_CHANNEL_LIST = 0x4 + NCSI_CHANNEL_ATTR_UNSPEC = 0x0 + NCSI_CHANNEL_ATTR = 0x1 + NCSI_CHANNEL_ATTR_ID = 0x2 + NCSI_CHANNEL_ATTR_VERSION_MAJOR = 0x3 + NCSI_CHANNEL_ATTR_VERSION_MINOR = 0x4 + NCSI_CHANNEL_ATTR_VERSION_STR = 0x5 + NCSI_CHANNEL_ATTR_LINK_STATE = 0x6 + NCSI_CHANNEL_ATTR_ACTIVE = 0x7 + NCSI_CHANNEL_ATTR_FORCED = 0x8 + NCSI_CHANNEL_ATTR_VLAN_LIST = 0x9 + NCSI_CHANNEL_ATTR_VLAN_ID = 0xa +) + +const ( + SOF_TIMESTAMPING_TX_HARDWARE = 0x1 + SOF_TIMESTAMPING_TX_SOFTWARE = 0x2 + SOF_TIMESTAMPING_RX_HARDWARE = 0x4 + SOF_TIMESTAMPING_RX_SOFTWARE = 0x8 + SOF_TIMESTAMPING_SOFTWARE = 0x10 + SOF_TIMESTAMPING_SYS_HARDWARE = 0x20 + SOF_TIMESTAMPING_RAW_HARDWARE = 0x40 + SOF_TIMESTAMPING_OPT_ID = 0x80 + SOF_TIMESTAMPING_TX_SCHED = 0x100 + SOF_TIMESTAMPING_TX_ACK = 0x200 + SOF_TIMESTAMPING_OPT_CMSG = 0x400 + SOF_TIMESTAMPING_OPT_TSONLY = 0x800 + SOF_TIMESTAMPING_OPT_STATS = 0x1000 + SOF_TIMESTAMPING_OPT_PKTINFO = 0x2000 + SOF_TIMESTAMPING_OPT_TX_SWHW = 0x4000 + + SOF_TIMESTAMPING_LAST = 0x4000 + SOF_TIMESTAMPING_MASK = 0x7fff +) diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_arm.go b/vendor/golang.org/x/sys/unix/ztypes_linux_arm.go index c7b699432..eb7562da7 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_arm.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_arm.go @@ -6,11 +6,11 @@ package unix const ( - sizeofPtr = 0x4 - sizeofShort = 0x2 - sizeofInt = 0x4 - sizeofLong = 0x4 - sizeofLongLong = 0x8 + SizeofPtr = 0x4 + SizeofShort = 0x2 + SizeofInt = 0x4 + SizeofLong = 0x4 + SizeofLongLong = 0x8 PathMax = 0x1000 ) @@ -289,6 +289,8 @@ type RawSockaddrXDP struct { Shared_umem_fd uint32 } +type RawSockaddrPPPoX [0x1e]byte + type RawSockaddr struct { Family uint16 Data [14]uint8 @@ -424,6 +426,7 @@ const ( SizeofSockaddrALG = 0x58 SizeofSockaddrVM = 0x10 SizeofSockaddrXDP = 0x10 + SizeofSockaddrPPPoX = 0x1e SizeofLinger = 0x8 SizeofIovec = 0x8 SizeofIPMreq = 0x8 @@ -497,7 +500,7 @@ const ( IFLA_EVENT = 0x2c IFLA_NEW_NETNSID = 0x2d IFLA_IF_NETNSID = 0x2e - IFLA_MAX = 0x31 + IFLA_MAX = 0x33 RT_SCOPE_UNIVERSE = 0x0 RT_SCOPE_SITE = 0xc8 RT_SCOPE_LINK = 0xfd @@ -1927,3 +1930,52 @@ type XDPDesc struct { Len uint32 Options uint32 } + +const ( + NCSI_CMD_UNSPEC = 0x0 + NCSI_CMD_PKG_INFO = 0x1 + NCSI_CMD_SET_INTERFACE = 0x2 + NCSI_CMD_CLEAR_INTERFACE = 0x3 + NCSI_ATTR_UNSPEC = 0x0 + NCSI_ATTR_IFINDEX = 0x1 + NCSI_ATTR_PACKAGE_LIST = 0x2 + NCSI_ATTR_PACKAGE_ID = 0x3 + NCSI_ATTR_CHANNEL_ID = 0x4 + NCSI_PKG_ATTR_UNSPEC = 0x0 + NCSI_PKG_ATTR = 0x1 + NCSI_PKG_ATTR_ID = 0x2 + NCSI_PKG_ATTR_FORCED = 0x3 + NCSI_PKG_ATTR_CHANNEL_LIST = 0x4 + NCSI_CHANNEL_ATTR_UNSPEC = 0x0 + NCSI_CHANNEL_ATTR = 0x1 + NCSI_CHANNEL_ATTR_ID = 0x2 + NCSI_CHANNEL_ATTR_VERSION_MAJOR = 0x3 + NCSI_CHANNEL_ATTR_VERSION_MINOR = 0x4 + NCSI_CHANNEL_ATTR_VERSION_STR = 0x5 + NCSI_CHANNEL_ATTR_LINK_STATE = 0x6 + NCSI_CHANNEL_ATTR_ACTIVE = 0x7 + NCSI_CHANNEL_ATTR_FORCED = 0x8 + NCSI_CHANNEL_ATTR_VLAN_LIST = 0x9 + NCSI_CHANNEL_ATTR_VLAN_ID = 0xa +) + +const ( + SOF_TIMESTAMPING_TX_HARDWARE = 0x1 + SOF_TIMESTAMPING_TX_SOFTWARE = 0x2 + SOF_TIMESTAMPING_RX_HARDWARE = 0x4 + SOF_TIMESTAMPING_RX_SOFTWARE = 0x8 + SOF_TIMESTAMPING_SOFTWARE = 0x10 + SOF_TIMESTAMPING_SYS_HARDWARE = 0x20 + SOF_TIMESTAMPING_RAW_HARDWARE = 0x40 + SOF_TIMESTAMPING_OPT_ID = 0x80 + SOF_TIMESTAMPING_TX_SCHED = 0x100 + SOF_TIMESTAMPING_TX_ACK = 0x200 + SOF_TIMESTAMPING_OPT_CMSG = 0x400 + SOF_TIMESTAMPING_OPT_TSONLY = 0x800 + SOF_TIMESTAMPING_OPT_STATS = 0x1000 + SOF_TIMESTAMPING_OPT_PKTINFO = 0x2000 + SOF_TIMESTAMPING_OPT_TX_SWHW = 0x4000 + + SOF_TIMESTAMPING_LAST = 0x4000 + SOF_TIMESTAMPING_MASK = 0x7fff +) diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_arm64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_arm64.go index 2339b2129..3c4fb88d7 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_arm64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_arm64.go @@ -6,11 +6,11 @@ package unix const ( - sizeofPtr = 0x8 - sizeofShort = 0x2 - sizeofInt = 0x4 - sizeofLong = 0x8 - sizeofLongLong = 0x8 + SizeofPtr = 0x8 + SizeofShort = 0x2 + SizeofInt = 0x4 + SizeofLong = 0x8 + SizeofLongLong = 0x8 PathMax = 0x1000 ) @@ -289,6 +289,8 @@ type RawSockaddrXDP struct { Shared_umem_fd uint32 } +type RawSockaddrPPPoX [0x1e]byte + type RawSockaddr struct { Family uint16 Data [14]int8 @@ -426,6 +428,7 @@ const ( SizeofSockaddrALG = 0x58 SizeofSockaddrVM = 0x10 SizeofSockaddrXDP = 0x10 + SizeofSockaddrPPPoX = 0x1e SizeofLinger = 0x8 SizeofIovec = 0x10 SizeofIPMreq = 0x8 @@ -499,7 +502,7 @@ const ( IFLA_EVENT = 0x2c IFLA_NEW_NETNSID = 0x2d IFLA_IF_NETNSID = 0x2e - IFLA_MAX = 0x31 + IFLA_MAX = 0x33 RT_SCOPE_UNIVERSE = 0x0 RT_SCOPE_SITE = 0xc8 RT_SCOPE_LINK = 0xfd @@ -1938,3 +1941,52 @@ type XDPDesc struct { Len uint32 Options uint32 } + +const ( + NCSI_CMD_UNSPEC = 0x0 + NCSI_CMD_PKG_INFO = 0x1 + NCSI_CMD_SET_INTERFACE = 0x2 + NCSI_CMD_CLEAR_INTERFACE = 0x3 + NCSI_ATTR_UNSPEC = 0x0 + NCSI_ATTR_IFINDEX = 0x1 + NCSI_ATTR_PACKAGE_LIST = 0x2 + NCSI_ATTR_PACKAGE_ID = 0x3 + NCSI_ATTR_CHANNEL_ID = 0x4 + NCSI_PKG_ATTR_UNSPEC = 0x0 + NCSI_PKG_ATTR = 0x1 + NCSI_PKG_ATTR_ID = 0x2 + NCSI_PKG_ATTR_FORCED = 0x3 + NCSI_PKG_ATTR_CHANNEL_LIST = 0x4 + NCSI_CHANNEL_ATTR_UNSPEC = 0x0 + NCSI_CHANNEL_ATTR = 0x1 + NCSI_CHANNEL_ATTR_ID = 0x2 + NCSI_CHANNEL_ATTR_VERSION_MAJOR = 0x3 + NCSI_CHANNEL_ATTR_VERSION_MINOR = 0x4 + NCSI_CHANNEL_ATTR_VERSION_STR = 0x5 + NCSI_CHANNEL_ATTR_LINK_STATE = 0x6 + NCSI_CHANNEL_ATTR_ACTIVE = 0x7 + NCSI_CHANNEL_ATTR_FORCED = 0x8 + NCSI_CHANNEL_ATTR_VLAN_LIST = 0x9 + NCSI_CHANNEL_ATTR_VLAN_ID = 0xa +) + +const ( + SOF_TIMESTAMPING_TX_HARDWARE = 0x1 + SOF_TIMESTAMPING_TX_SOFTWARE = 0x2 + SOF_TIMESTAMPING_RX_HARDWARE = 0x4 + SOF_TIMESTAMPING_RX_SOFTWARE = 0x8 + SOF_TIMESTAMPING_SOFTWARE = 0x10 + SOF_TIMESTAMPING_SYS_HARDWARE = 0x20 + SOF_TIMESTAMPING_RAW_HARDWARE = 0x40 + SOF_TIMESTAMPING_OPT_ID = 0x80 + SOF_TIMESTAMPING_TX_SCHED = 0x100 + SOF_TIMESTAMPING_TX_ACK = 0x200 + SOF_TIMESTAMPING_OPT_CMSG = 0x400 + SOF_TIMESTAMPING_OPT_TSONLY = 0x800 + SOF_TIMESTAMPING_OPT_STATS = 0x1000 + SOF_TIMESTAMPING_OPT_PKTINFO = 0x2000 + SOF_TIMESTAMPING_OPT_TX_SWHW = 0x4000 + + SOF_TIMESTAMPING_LAST = 0x4000 + SOF_TIMESTAMPING_MASK = 0x7fff +) diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_mips.go b/vendor/golang.org/x/sys/unix/ztypes_linux_mips.go index 013462ba5..647e40a77 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_mips.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_mips.go @@ -6,11 +6,11 @@ package unix const ( - sizeofPtr = 0x4 - sizeofShort = 0x2 - sizeofInt = 0x4 - sizeofLong = 0x4 - sizeofLongLong = 0x8 + SizeofPtr = 0x4 + SizeofShort = 0x2 + SizeofInt = 0x4 + SizeofLong = 0x4 + SizeofLongLong = 0x8 PathMax = 0x1000 ) @@ -287,6 +287,8 @@ type RawSockaddrXDP struct { Shared_umem_fd uint32 } +type RawSockaddrPPPoX [0x1e]byte + type RawSockaddr struct { Family uint16 Data [14]int8 @@ -422,6 +424,7 @@ const ( SizeofSockaddrALG = 0x58 SizeofSockaddrVM = 0x10 SizeofSockaddrXDP = 0x10 + SizeofSockaddrPPPoX = 0x1e SizeofLinger = 0x8 SizeofIovec = 0x8 SizeofIPMreq = 0x8 @@ -495,7 +498,7 @@ const ( IFLA_EVENT = 0x2c IFLA_NEW_NETNSID = 0x2d IFLA_IF_NETNSID = 0x2e - IFLA_MAX = 0x31 + IFLA_MAX = 0x33 RT_SCOPE_UNIVERSE = 0x0 RT_SCOPE_SITE = 0xc8 RT_SCOPE_LINK = 0xfd @@ -1932,3 +1935,52 @@ type XDPDesc struct { Len uint32 Options uint32 } + +const ( + NCSI_CMD_UNSPEC = 0x0 + NCSI_CMD_PKG_INFO = 0x1 + NCSI_CMD_SET_INTERFACE = 0x2 + NCSI_CMD_CLEAR_INTERFACE = 0x3 + NCSI_ATTR_UNSPEC = 0x0 + NCSI_ATTR_IFINDEX = 0x1 + NCSI_ATTR_PACKAGE_LIST = 0x2 + NCSI_ATTR_PACKAGE_ID = 0x3 + NCSI_ATTR_CHANNEL_ID = 0x4 + NCSI_PKG_ATTR_UNSPEC = 0x0 + NCSI_PKG_ATTR = 0x1 + NCSI_PKG_ATTR_ID = 0x2 + NCSI_PKG_ATTR_FORCED = 0x3 + NCSI_PKG_ATTR_CHANNEL_LIST = 0x4 + NCSI_CHANNEL_ATTR_UNSPEC = 0x0 + NCSI_CHANNEL_ATTR = 0x1 + NCSI_CHANNEL_ATTR_ID = 0x2 + NCSI_CHANNEL_ATTR_VERSION_MAJOR = 0x3 + NCSI_CHANNEL_ATTR_VERSION_MINOR = 0x4 + NCSI_CHANNEL_ATTR_VERSION_STR = 0x5 + NCSI_CHANNEL_ATTR_LINK_STATE = 0x6 + NCSI_CHANNEL_ATTR_ACTIVE = 0x7 + NCSI_CHANNEL_ATTR_FORCED = 0x8 + NCSI_CHANNEL_ATTR_VLAN_LIST = 0x9 + NCSI_CHANNEL_ATTR_VLAN_ID = 0xa +) + +const ( + SOF_TIMESTAMPING_TX_HARDWARE = 0x1 + SOF_TIMESTAMPING_TX_SOFTWARE = 0x2 + SOF_TIMESTAMPING_RX_HARDWARE = 0x4 + SOF_TIMESTAMPING_RX_SOFTWARE = 0x8 + SOF_TIMESTAMPING_SOFTWARE = 0x10 + SOF_TIMESTAMPING_SYS_HARDWARE = 0x20 + SOF_TIMESTAMPING_RAW_HARDWARE = 0x40 + SOF_TIMESTAMPING_OPT_ID = 0x80 + SOF_TIMESTAMPING_TX_SCHED = 0x100 + SOF_TIMESTAMPING_TX_ACK = 0x200 + SOF_TIMESTAMPING_OPT_CMSG = 0x400 + SOF_TIMESTAMPING_OPT_TSONLY = 0x800 + SOF_TIMESTAMPING_OPT_STATS = 0x1000 + SOF_TIMESTAMPING_OPT_PKTINFO = 0x2000 + SOF_TIMESTAMPING_OPT_TX_SWHW = 0x4000 + + SOF_TIMESTAMPING_LAST = 0x4000 + SOF_TIMESTAMPING_MASK = 0x7fff +) diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_mips64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_mips64.go index 86f0ab552..e0159b01d 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_mips64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_mips64.go @@ -6,11 +6,11 @@ package unix const ( - sizeofPtr = 0x8 - sizeofShort = 0x2 - sizeofInt = 0x4 - sizeofLong = 0x8 - sizeofLongLong = 0x8 + SizeofPtr = 0x8 + SizeofShort = 0x2 + SizeofInt = 0x4 + SizeofLong = 0x8 + SizeofLongLong = 0x8 PathMax = 0x1000 ) @@ -289,6 +289,8 @@ type RawSockaddrXDP struct { Shared_umem_fd uint32 } +type RawSockaddrPPPoX [0x1e]byte + type RawSockaddr struct { Family uint16 Data [14]int8 @@ -426,6 +428,7 @@ const ( SizeofSockaddrALG = 0x58 SizeofSockaddrVM = 0x10 SizeofSockaddrXDP = 0x10 + SizeofSockaddrPPPoX = 0x1e SizeofLinger = 0x8 SizeofIovec = 0x10 SizeofIPMreq = 0x8 @@ -499,7 +502,7 @@ const ( IFLA_EVENT = 0x2c IFLA_NEW_NETNSID = 0x2d IFLA_IF_NETNSID = 0x2e - IFLA_MAX = 0x31 + IFLA_MAX = 0x33 RT_SCOPE_UNIVERSE = 0x0 RT_SCOPE_SITE = 0xc8 RT_SCOPE_LINK = 0xfd @@ -1940,3 +1943,52 @@ type XDPDesc struct { Len uint32 Options uint32 } + +const ( + NCSI_CMD_UNSPEC = 0x0 + NCSI_CMD_PKG_INFO = 0x1 + NCSI_CMD_SET_INTERFACE = 0x2 + NCSI_CMD_CLEAR_INTERFACE = 0x3 + NCSI_ATTR_UNSPEC = 0x0 + NCSI_ATTR_IFINDEX = 0x1 + NCSI_ATTR_PACKAGE_LIST = 0x2 + NCSI_ATTR_PACKAGE_ID = 0x3 + NCSI_ATTR_CHANNEL_ID = 0x4 + NCSI_PKG_ATTR_UNSPEC = 0x0 + NCSI_PKG_ATTR = 0x1 + NCSI_PKG_ATTR_ID = 0x2 + NCSI_PKG_ATTR_FORCED = 0x3 + NCSI_PKG_ATTR_CHANNEL_LIST = 0x4 + NCSI_CHANNEL_ATTR_UNSPEC = 0x0 + NCSI_CHANNEL_ATTR = 0x1 + NCSI_CHANNEL_ATTR_ID = 0x2 + NCSI_CHANNEL_ATTR_VERSION_MAJOR = 0x3 + NCSI_CHANNEL_ATTR_VERSION_MINOR = 0x4 + NCSI_CHANNEL_ATTR_VERSION_STR = 0x5 + NCSI_CHANNEL_ATTR_LINK_STATE = 0x6 + NCSI_CHANNEL_ATTR_ACTIVE = 0x7 + NCSI_CHANNEL_ATTR_FORCED = 0x8 + NCSI_CHANNEL_ATTR_VLAN_LIST = 0x9 + NCSI_CHANNEL_ATTR_VLAN_ID = 0xa +) + +const ( + SOF_TIMESTAMPING_TX_HARDWARE = 0x1 + SOF_TIMESTAMPING_TX_SOFTWARE = 0x2 + SOF_TIMESTAMPING_RX_HARDWARE = 0x4 + SOF_TIMESTAMPING_RX_SOFTWARE = 0x8 + SOF_TIMESTAMPING_SOFTWARE = 0x10 + SOF_TIMESTAMPING_SYS_HARDWARE = 0x20 + SOF_TIMESTAMPING_RAW_HARDWARE = 0x40 + SOF_TIMESTAMPING_OPT_ID = 0x80 + SOF_TIMESTAMPING_TX_SCHED = 0x100 + SOF_TIMESTAMPING_TX_ACK = 0x200 + SOF_TIMESTAMPING_OPT_CMSG = 0x400 + SOF_TIMESTAMPING_OPT_TSONLY = 0x800 + SOF_TIMESTAMPING_OPT_STATS = 0x1000 + SOF_TIMESTAMPING_OPT_PKTINFO = 0x2000 + SOF_TIMESTAMPING_OPT_TX_SWHW = 0x4000 + + SOF_TIMESTAMPING_LAST = 0x4000 + SOF_TIMESTAMPING_MASK = 0x7fff +) diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_mips64le.go b/vendor/golang.org/x/sys/unix/ztypes_linux_mips64le.go index 007537b4d..c1a024dfd 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_mips64le.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_mips64le.go @@ -6,11 +6,11 @@ package unix const ( - sizeofPtr = 0x8 - sizeofShort = 0x2 - sizeofInt = 0x4 - sizeofLong = 0x8 - sizeofLongLong = 0x8 + SizeofPtr = 0x8 + SizeofShort = 0x2 + SizeofInt = 0x4 + SizeofLong = 0x8 + SizeofLongLong = 0x8 PathMax = 0x1000 ) @@ -289,6 +289,8 @@ type RawSockaddrXDP struct { Shared_umem_fd uint32 } +type RawSockaddrPPPoX [0x1e]byte + type RawSockaddr struct { Family uint16 Data [14]int8 @@ -426,6 +428,7 @@ const ( SizeofSockaddrALG = 0x58 SizeofSockaddrVM = 0x10 SizeofSockaddrXDP = 0x10 + SizeofSockaddrPPPoX = 0x1e SizeofLinger = 0x8 SizeofIovec = 0x10 SizeofIPMreq = 0x8 @@ -499,7 +502,7 @@ const ( IFLA_EVENT = 0x2c IFLA_NEW_NETNSID = 0x2d IFLA_IF_NETNSID = 0x2e - IFLA_MAX = 0x31 + IFLA_MAX = 0x33 RT_SCOPE_UNIVERSE = 0x0 RT_SCOPE_SITE = 0xc8 RT_SCOPE_LINK = 0xfd @@ -1940,3 +1943,52 @@ type XDPDesc struct { Len uint32 Options uint32 } + +const ( + NCSI_CMD_UNSPEC = 0x0 + NCSI_CMD_PKG_INFO = 0x1 + NCSI_CMD_SET_INTERFACE = 0x2 + NCSI_CMD_CLEAR_INTERFACE = 0x3 + NCSI_ATTR_UNSPEC = 0x0 + NCSI_ATTR_IFINDEX = 0x1 + NCSI_ATTR_PACKAGE_LIST = 0x2 + NCSI_ATTR_PACKAGE_ID = 0x3 + NCSI_ATTR_CHANNEL_ID = 0x4 + NCSI_PKG_ATTR_UNSPEC = 0x0 + NCSI_PKG_ATTR = 0x1 + NCSI_PKG_ATTR_ID = 0x2 + NCSI_PKG_ATTR_FORCED = 0x3 + NCSI_PKG_ATTR_CHANNEL_LIST = 0x4 + NCSI_CHANNEL_ATTR_UNSPEC = 0x0 + NCSI_CHANNEL_ATTR = 0x1 + NCSI_CHANNEL_ATTR_ID = 0x2 + NCSI_CHANNEL_ATTR_VERSION_MAJOR = 0x3 + NCSI_CHANNEL_ATTR_VERSION_MINOR = 0x4 + NCSI_CHANNEL_ATTR_VERSION_STR = 0x5 + NCSI_CHANNEL_ATTR_LINK_STATE = 0x6 + NCSI_CHANNEL_ATTR_ACTIVE = 0x7 + NCSI_CHANNEL_ATTR_FORCED = 0x8 + NCSI_CHANNEL_ATTR_VLAN_LIST = 0x9 + NCSI_CHANNEL_ATTR_VLAN_ID = 0xa +) + +const ( + SOF_TIMESTAMPING_TX_HARDWARE = 0x1 + SOF_TIMESTAMPING_TX_SOFTWARE = 0x2 + SOF_TIMESTAMPING_RX_HARDWARE = 0x4 + SOF_TIMESTAMPING_RX_SOFTWARE = 0x8 + SOF_TIMESTAMPING_SOFTWARE = 0x10 + SOF_TIMESTAMPING_SYS_HARDWARE = 0x20 + SOF_TIMESTAMPING_RAW_HARDWARE = 0x40 + SOF_TIMESTAMPING_OPT_ID = 0x80 + SOF_TIMESTAMPING_TX_SCHED = 0x100 + SOF_TIMESTAMPING_TX_ACK = 0x200 + SOF_TIMESTAMPING_OPT_CMSG = 0x400 + SOF_TIMESTAMPING_OPT_TSONLY = 0x800 + SOF_TIMESTAMPING_OPT_STATS = 0x1000 + SOF_TIMESTAMPING_OPT_PKTINFO = 0x2000 + SOF_TIMESTAMPING_OPT_TX_SWHW = 0x4000 + + SOF_TIMESTAMPING_LAST = 0x4000 + SOF_TIMESTAMPING_MASK = 0x7fff +) diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_mipsle.go b/vendor/golang.org/x/sys/unix/ztypes_linux_mipsle.go index fc4a15912..7e525eb7f 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_mipsle.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_mipsle.go @@ -6,11 +6,11 @@ package unix const ( - sizeofPtr = 0x4 - sizeofShort = 0x2 - sizeofInt = 0x4 - sizeofLong = 0x4 - sizeofLongLong = 0x8 + SizeofPtr = 0x4 + SizeofShort = 0x2 + SizeofInt = 0x4 + SizeofLong = 0x4 + SizeofLongLong = 0x8 PathMax = 0x1000 ) @@ -287,6 +287,8 @@ type RawSockaddrXDP struct { Shared_umem_fd uint32 } +type RawSockaddrPPPoX [0x1e]byte + type RawSockaddr struct { Family uint16 Data [14]int8 @@ -422,6 +424,7 @@ const ( SizeofSockaddrALG = 0x58 SizeofSockaddrVM = 0x10 SizeofSockaddrXDP = 0x10 + SizeofSockaddrPPPoX = 0x1e SizeofLinger = 0x8 SizeofIovec = 0x8 SizeofIPMreq = 0x8 @@ -495,7 +498,7 @@ const ( IFLA_EVENT = 0x2c IFLA_NEW_NETNSID = 0x2d IFLA_IF_NETNSID = 0x2e - IFLA_MAX = 0x31 + IFLA_MAX = 0x33 RT_SCOPE_UNIVERSE = 0x0 RT_SCOPE_SITE = 0xc8 RT_SCOPE_LINK = 0xfd @@ -1932,3 +1935,52 @@ type XDPDesc struct { Len uint32 Options uint32 } + +const ( + NCSI_CMD_UNSPEC = 0x0 + NCSI_CMD_PKG_INFO = 0x1 + NCSI_CMD_SET_INTERFACE = 0x2 + NCSI_CMD_CLEAR_INTERFACE = 0x3 + NCSI_ATTR_UNSPEC = 0x0 + NCSI_ATTR_IFINDEX = 0x1 + NCSI_ATTR_PACKAGE_LIST = 0x2 + NCSI_ATTR_PACKAGE_ID = 0x3 + NCSI_ATTR_CHANNEL_ID = 0x4 + NCSI_PKG_ATTR_UNSPEC = 0x0 + NCSI_PKG_ATTR = 0x1 + NCSI_PKG_ATTR_ID = 0x2 + NCSI_PKG_ATTR_FORCED = 0x3 + NCSI_PKG_ATTR_CHANNEL_LIST = 0x4 + NCSI_CHANNEL_ATTR_UNSPEC = 0x0 + NCSI_CHANNEL_ATTR = 0x1 + NCSI_CHANNEL_ATTR_ID = 0x2 + NCSI_CHANNEL_ATTR_VERSION_MAJOR = 0x3 + NCSI_CHANNEL_ATTR_VERSION_MINOR = 0x4 + NCSI_CHANNEL_ATTR_VERSION_STR = 0x5 + NCSI_CHANNEL_ATTR_LINK_STATE = 0x6 + NCSI_CHANNEL_ATTR_ACTIVE = 0x7 + NCSI_CHANNEL_ATTR_FORCED = 0x8 + NCSI_CHANNEL_ATTR_VLAN_LIST = 0x9 + NCSI_CHANNEL_ATTR_VLAN_ID = 0xa +) + +const ( + SOF_TIMESTAMPING_TX_HARDWARE = 0x1 + SOF_TIMESTAMPING_TX_SOFTWARE = 0x2 + SOF_TIMESTAMPING_RX_HARDWARE = 0x4 + SOF_TIMESTAMPING_RX_SOFTWARE = 0x8 + SOF_TIMESTAMPING_SOFTWARE = 0x10 + SOF_TIMESTAMPING_SYS_HARDWARE = 0x20 + SOF_TIMESTAMPING_RAW_HARDWARE = 0x40 + SOF_TIMESTAMPING_OPT_ID = 0x80 + SOF_TIMESTAMPING_TX_SCHED = 0x100 + SOF_TIMESTAMPING_TX_ACK = 0x200 + SOF_TIMESTAMPING_OPT_CMSG = 0x400 + SOF_TIMESTAMPING_OPT_TSONLY = 0x800 + SOF_TIMESTAMPING_OPT_STATS = 0x1000 + SOF_TIMESTAMPING_OPT_PKTINFO = 0x2000 + SOF_TIMESTAMPING_OPT_TX_SWHW = 0x4000 + + SOF_TIMESTAMPING_LAST = 0x4000 + SOF_TIMESTAMPING_MASK = 0x7fff +) diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64.go index 377e9efdf..85ae2954d 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64.go @@ -6,11 +6,11 @@ package unix const ( - sizeofPtr = 0x8 - sizeofShort = 0x2 - sizeofInt = 0x4 - sizeofLong = 0x8 - sizeofLongLong = 0x8 + SizeofPtr = 0x8 + SizeofShort = 0x2 + SizeofInt = 0x4 + SizeofLong = 0x8 + SizeofLongLong = 0x8 PathMax = 0x1000 ) @@ -290,6 +290,8 @@ type RawSockaddrXDP struct { Shared_umem_fd uint32 } +type RawSockaddrPPPoX [0x1e]byte + type RawSockaddr struct { Family uint16 Data [14]uint8 @@ -427,6 +429,7 @@ const ( SizeofSockaddrALG = 0x58 SizeofSockaddrVM = 0x10 SizeofSockaddrXDP = 0x10 + SizeofSockaddrPPPoX = 0x1e SizeofLinger = 0x8 SizeofIovec = 0x10 SizeofIPMreq = 0x8 @@ -500,7 +503,7 @@ const ( IFLA_EVENT = 0x2c IFLA_NEW_NETNSID = 0x2d IFLA_IF_NETNSID = 0x2e - IFLA_MAX = 0x31 + IFLA_MAX = 0x33 RT_SCOPE_UNIVERSE = 0x0 RT_SCOPE_SITE = 0xc8 RT_SCOPE_LINK = 0xfd @@ -1948,3 +1951,52 @@ type XDPDesc struct { Len uint32 Options uint32 } + +const ( + NCSI_CMD_UNSPEC = 0x0 + NCSI_CMD_PKG_INFO = 0x1 + NCSI_CMD_SET_INTERFACE = 0x2 + NCSI_CMD_CLEAR_INTERFACE = 0x3 + NCSI_ATTR_UNSPEC = 0x0 + NCSI_ATTR_IFINDEX = 0x1 + NCSI_ATTR_PACKAGE_LIST = 0x2 + NCSI_ATTR_PACKAGE_ID = 0x3 + NCSI_ATTR_CHANNEL_ID = 0x4 + NCSI_PKG_ATTR_UNSPEC = 0x0 + NCSI_PKG_ATTR = 0x1 + NCSI_PKG_ATTR_ID = 0x2 + NCSI_PKG_ATTR_FORCED = 0x3 + NCSI_PKG_ATTR_CHANNEL_LIST = 0x4 + NCSI_CHANNEL_ATTR_UNSPEC = 0x0 + NCSI_CHANNEL_ATTR = 0x1 + NCSI_CHANNEL_ATTR_ID = 0x2 + NCSI_CHANNEL_ATTR_VERSION_MAJOR = 0x3 + NCSI_CHANNEL_ATTR_VERSION_MINOR = 0x4 + NCSI_CHANNEL_ATTR_VERSION_STR = 0x5 + NCSI_CHANNEL_ATTR_LINK_STATE = 0x6 + NCSI_CHANNEL_ATTR_ACTIVE = 0x7 + NCSI_CHANNEL_ATTR_FORCED = 0x8 + NCSI_CHANNEL_ATTR_VLAN_LIST = 0x9 + NCSI_CHANNEL_ATTR_VLAN_ID = 0xa +) + +const ( + SOF_TIMESTAMPING_TX_HARDWARE = 0x1 + SOF_TIMESTAMPING_TX_SOFTWARE = 0x2 + SOF_TIMESTAMPING_RX_HARDWARE = 0x4 + SOF_TIMESTAMPING_RX_SOFTWARE = 0x8 + SOF_TIMESTAMPING_SOFTWARE = 0x10 + SOF_TIMESTAMPING_SYS_HARDWARE = 0x20 + SOF_TIMESTAMPING_RAW_HARDWARE = 0x40 + SOF_TIMESTAMPING_OPT_ID = 0x80 + SOF_TIMESTAMPING_TX_SCHED = 0x100 + SOF_TIMESTAMPING_TX_ACK = 0x200 + SOF_TIMESTAMPING_OPT_CMSG = 0x400 + SOF_TIMESTAMPING_OPT_TSONLY = 0x800 + SOF_TIMESTAMPING_OPT_STATS = 0x1000 + SOF_TIMESTAMPING_OPT_PKTINFO = 0x2000 + SOF_TIMESTAMPING_OPT_TX_SWHW = 0x4000 + + SOF_TIMESTAMPING_LAST = 0x4000 + SOF_TIMESTAMPING_MASK = 0x7fff +) diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64le.go b/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64le.go index 595ba6303..d0c930a13 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64le.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64le.go @@ -6,11 +6,11 @@ package unix const ( - sizeofPtr = 0x8 - sizeofShort = 0x2 - sizeofInt = 0x4 - sizeofLong = 0x8 - sizeofLongLong = 0x8 + SizeofPtr = 0x8 + SizeofShort = 0x2 + SizeofInt = 0x4 + SizeofLong = 0x8 + SizeofLongLong = 0x8 PathMax = 0x1000 ) @@ -290,6 +290,8 @@ type RawSockaddrXDP struct { Shared_umem_fd uint32 } +type RawSockaddrPPPoX [0x1e]byte + type RawSockaddr struct { Family uint16 Data [14]uint8 @@ -427,6 +429,7 @@ const ( SizeofSockaddrALG = 0x58 SizeofSockaddrVM = 0x10 SizeofSockaddrXDP = 0x10 + SizeofSockaddrPPPoX = 0x1e SizeofLinger = 0x8 SizeofIovec = 0x10 SizeofIPMreq = 0x8 @@ -500,7 +503,7 @@ const ( IFLA_EVENT = 0x2c IFLA_NEW_NETNSID = 0x2d IFLA_IF_NETNSID = 0x2e - IFLA_MAX = 0x31 + IFLA_MAX = 0x33 RT_SCOPE_UNIVERSE = 0x0 RT_SCOPE_SITE = 0xc8 RT_SCOPE_LINK = 0xfd @@ -1948,3 +1951,52 @@ type XDPDesc struct { Len uint32 Options uint32 } + +const ( + NCSI_CMD_UNSPEC = 0x0 + NCSI_CMD_PKG_INFO = 0x1 + NCSI_CMD_SET_INTERFACE = 0x2 + NCSI_CMD_CLEAR_INTERFACE = 0x3 + NCSI_ATTR_UNSPEC = 0x0 + NCSI_ATTR_IFINDEX = 0x1 + NCSI_ATTR_PACKAGE_LIST = 0x2 + NCSI_ATTR_PACKAGE_ID = 0x3 + NCSI_ATTR_CHANNEL_ID = 0x4 + NCSI_PKG_ATTR_UNSPEC = 0x0 + NCSI_PKG_ATTR = 0x1 + NCSI_PKG_ATTR_ID = 0x2 + NCSI_PKG_ATTR_FORCED = 0x3 + NCSI_PKG_ATTR_CHANNEL_LIST = 0x4 + NCSI_CHANNEL_ATTR_UNSPEC = 0x0 + NCSI_CHANNEL_ATTR = 0x1 + NCSI_CHANNEL_ATTR_ID = 0x2 + NCSI_CHANNEL_ATTR_VERSION_MAJOR = 0x3 + NCSI_CHANNEL_ATTR_VERSION_MINOR = 0x4 + NCSI_CHANNEL_ATTR_VERSION_STR = 0x5 + NCSI_CHANNEL_ATTR_LINK_STATE = 0x6 + NCSI_CHANNEL_ATTR_ACTIVE = 0x7 + NCSI_CHANNEL_ATTR_FORCED = 0x8 + NCSI_CHANNEL_ATTR_VLAN_LIST = 0x9 + NCSI_CHANNEL_ATTR_VLAN_ID = 0xa +) + +const ( + SOF_TIMESTAMPING_TX_HARDWARE = 0x1 + SOF_TIMESTAMPING_TX_SOFTWARE = 0x2 + SOF_TIMESTAMPING_RX_HARDWARE = 0x4 + SOF_TIMESTAMPING_RX_SOFTWARE = 0x8 + SOF_TIMESTAMPING_SOFTWARE = 0x10 + SOF_TIMESTAMPING_SYS_HARDWARE = 0x20 + SOF_TIMESTAMPING_RAW_HARDWARE = 0x40 + SOF_TIMESTAMPING_OPT_ID = 0x80 + SOF_TIMESTAMPING_TX_SCHED = 0x100 + SOF_TIMESTAMPING_TX_ACK = 0x200 + SOF_TIMESTAMPING_OPT_CMSG = 0x400 + SOF_TIMESTAMPING_OPT_TSONLY = 0x800 + SOF_TIMESTAMPING_OPT_STATS = 0x1000 + SOF_TIMESTAMPING_OPT_PKTINFO = 0x2000 + SOF_TIMESTAMPING_OPT_TX_SWHW = 0x4000 + + SOF_TIMESTAMPING_LAST = 0x4000 + SOF_TIMESTAMPING_MASK = 0x7fff +) diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_riscv64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_riscv64.go index 0ccf5bc3e..c1a20bcd3 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_riscv64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_riscv64.go @@ -6,11 +6,11 @@ package unix const ( - sizeofPtr = 0x8 - sizeofShort = 0x2 - sizeofInt = 0x4 - sizeofLong = 0x8 - sizeofLongLong = 0x8 + SizeofPtr = 0x8 + SizeofShort = 0x2 + SizeofInt = 0x4 + SizeofLong = 0x8 + SizeofLongLong = 0x8 PathMax = 0x1000 ) @@ -289,6 +289,8 @@ type RawSockaddrXDP struct { Shared_umem_fd uint32 } +type RawSockaddrPPPoX [0x1e]byte + type RawSockaddr struct { Family uint16 Data [14]uint8 @@ -426,6 +428,7 @@ const ( SizeofSockaddrALG = 0x58 SizeofSockaddrVM = 0x10 SizeofSockaddrXDP = 0x10 + SizeofSockaddrPPPoX = 0x1e SizeofLinger = 0x8 SizeofIovec = 0x10 SizeofIPMreq = 0x8 @@ -499,7 +502,7 @@ const ( IFLA_EVENT = 0x2c IFLA_NEW_NETNSID = 0x2d IFLA_IF_NETNSID = 0x2e - IFLA_MAX = 0x31 + IFLA_MAX = 0x33 RT_SCOPE_UNIVERSE = 0x0 RT_SCOPE_SITE = 0xc8 RT_SCOPE_LINK = 0xfd @@ -1965,3 +1968,52 @@ type XDPDesc struct { Len uint32 Options uint32 } + +const ( + NCSI_CMD_UNSPEC = 0x0 + NCSI_CMD_PKG_INFO = 0x1 + NCSI_CMD_SET_INTERFACE = 0x2 + NCSI_CMD_CLEAR_INTERFACE = 0x3 + NCSI_ATTR_UNSPEC = 0x0 + NCSI_ATTR_IFINDEX = 0x1 + NCSI_ATTR_PACKAGE_LIST = 0x2 + NCSI_ATTR_PACKAGE_ID = 0x3 + NCSI_ATTR_CHANNEL_ID = 0x4 + NCSI_PKG_ATTR_UNSPEC = 0x0 + NCSI_PKG_ATTR = 0x1 + NCSI_PKG_ATTR_ID = 0x2 + NCSI_PKG_ATTR_FORCED = 0x3 + NCSI_PKG_ATTR_CHANNEL_LIST = 0x4 + NCSI_CHANNEL_ATTR_UNSPEC = 0x0 + NCSI_CHANNEL_ATTR = 0x1 + NCSI_CHANNEL_ATTR_ID = 0x2 + NCSI_CHANNEL_ATTR_VERSION_MAJOR = 0x3 + NCSI_CHANNEL_ATTR_VERSION_MINOR = 0x4 + NCSI_CHANNEL_ATTR_VERSION_STR = 0x5 + NCSI_CHANNEL_ATTR_LINK_STATE = 0x6 + NCSI_CHANNEL_ATTR_ACTIVE = 0x7 + NCSI_CHANNEL_ATTR_FORCED = 0x8 + NCSI_CHANNEL_ATTR_VLAN_LIST = 0x9 + NCSI_CHANNEL_ATTR_VLAN_ID = 0xa +) + +const ( + SOF_TIMESTAMPING_TX_HARDWARE = 0x1 + SOF_TIMESTAMPING_TX_SOFTWARE = 0x2 + SOF_TIMESTAMPING_RX_HARDWARE = 0x4 + SOF_TIMESTAMPING_RX_SOFTWARE = 0x8 + SOF_TIMESTAMPING_SOFTWARE = 0x10 + SOF_TIMESTAMPING_SYS_HARDWARE = 0x20 + SOF_TIMESTAMPING_RAW_HARDWARE = 0x40 + SOF_TIMESTAMPING_OPT_ID = 0x80 + SOF_TIMESTAMPING_TX_SCHED = 0x100 + SOF_TIMESTAMPING_TX_ACK = 0x200 + SOF_TIMESTAMPING_OPT_CMSG = 0x400 + SOF_TIMESTAMPING_OPT_TSONLY = 0x800 + SOF_TIMESTAMPING_OPT_STATS = 0x1000 + SOF_TIMESTAMPING_OPT_PKTINFO = 0x2000 + SOF_TIMESTAMPING_OPT_TX_SWHW = 0x4000 + + SOF_TIMESTAMPING_LAST = 0x4000 + SOF_TIMESTAMPING_MASK = 0x7fff +) diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_s390x.go b/vendor/golang.org/x/sys/unix/ztypes_linux_s390x.go index 06b07852d..3c26ea82b 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_s390x.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_s390x.go @@ -6,11 +6,11 @@ package unix const ( - sizeofPtr = 0x8 - sizeofShort = 0x2 - sizeofInt = 0x4 - sizeofLong = 0x8 - sizeofLongLong = 0x8 + SizeofPtr = 0x8 + SizeofShort = 0x2 + SizeofInt = 0x4 + SizeofLong = 0x8 + SizeofLongLong = 0x8 PathMax = 0x1000 ) @@ -288,6 +288,8 @@ type RawSockaddrXDP struct { Shared_umem_fd uint32 } +type RawSockaddrPPPoX [0x1e]byte + type RawSockaddr struct { Family uint16 Data [14]int8 @@ -425,6 +427,7 @@ const ( SizeofSockaddrALG = 0x58 SizeofSockaddrVM = 0x10 SizeofSockaddrXDP = 0x10 + SizeofSockaddrPPPoX = 0x1e SizeofLinger = 0x8 SizeofIovec = 0x10 SizeofIPMreq = 0x8 @@ -498,7 +501,7 @@ const ( IFLA_EVENT = 0x2c IFLA_NEW_NETNSID = 0x2d IFLA_IF_NETNSID = 0x2e - IFLA_MAX = 0x31 + IFLA_MAX = 0x33 RT_SCOPE_UNIVERSE = 0x0 RT_SCOPE_SITE = 0xc8 RT_SCOPE_LINK = 0xfd @@ -1965,3 +1968,52 @@ type XDPDesc struct { Len uint32 Options uint32 } + +const ( + NCSI_CMD_UNSPEC = 0x0 + NCSI_CMD_PKG_INFO = 0x1 + NCSI_CMD_SET_INTERFACE = 0x2 + NCSI_CMD_CLEAR_INTERFACE = 0x3 + NCSI_ATTR_UNSPEC = 0x0 + NCSI_ATTR_IFINDEX = 0x1 + NCSI_ATTR_PACKAGE_LIST = 0x2 + NCSI_ATTR_PACKAGE_ID = 0x3 + NCSI_ATTR_CHANNEL_ID = 0x4 + NCSI_PKG_ATTR_UNSPEC = 0x0 + NCSI_PKG_ATTR = 0x1 + NCSI_PKG_ATTR_ID = 0x2 + NCSI_PKG_ATTR_FORCED = 0x3 + NCSI_PKG_ATTR_CHANNEL_LIST = 0x4 + NCSI_CHANNEL_ATTR_UNSPEC = 0x0 + NCSI_CHANNEL_ATTR = 0x1 + NCSI_CHANNEL_ATTR_ID = 0x2 + NCSI_CHANNEL_ATTR_VERSION_MAJOR = 0x3 + NCSI_CHANNEL_ATTR_VERSION_MINOR = 0x4 + NCSI_CHANNEL_ATTR_VERSION_STR = 0x5 + NCSI_CHANNEL_ATTR_LINK_STATE = 0x6 + NCSI_CHANNEL_ATTR_ACTIVE = 0x7 + NCSI_CHANNEL_ATTR_FORCED = 0x8 + NCSI_CHANNEL_ATTR_VLAN_LIST = 0x9 + NCSI_CHANNEL_ATTR_VLAN_ID = 0xa +) + +const ( + SOF_TIMESTAMPING_TX_HARDWARE = 0x1 + SOF_TIMESTAMPING_TX_SOFTWARE = 0x2 + SOF_TIMESTAMPING_RX_HARDWARE = 0x4 + SOF_TIMESTAMPING_RX_SOFTWARE = 0x8 + SOF_TIMESTAMPING_SOFTWARE = 0x10 + SOF_TIMESTAMPING_SYS_HARDWARE = 0x20 + SOF_TIMESTAMPING_RAW_HARDWARE = 0x40 + SOF_TIMESTAMPING_OPT_ID = 0x80 + SOF_TIMESTAMPING_TX_SCHED = 0x100 + SOF_TIMESTAMPING_TX_ACK = 0x200 + SOF_TIMESTAMPING_OPT_CMSG = 0x400 + SOF_TIMESTAMPING_OPT_TSONLY = 0x800 + SOF_TIMESTAMPING_OPT_STATS = 0x1000 + SOF_TIMESTAMPING_OPT_PKTINFO = 0x2000 + SOF_TIMESTAMPING_OPT_TX_SWHW = 0x4000 + + SOF_TIMESTAMPING_LAST = 0x4000 + SOF_TIMESTAMPING_MASK = 0x7fff +) diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_sparc64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_sparc64.go index 8e7384b89..1fc7f7dea 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_sparc64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_sparc64.go @@ -5,11 +5,11 @@ package unix const ( - sizeofPtr = 0x8 - sizeofShort = 0x2 - sizeofInt = 0x4 - sizeofLong = 0x8 - sizeofLongLong = 0x8 + SizeofPtr = 0x8 + SizeofShort = 0x2 + SizeofInt = 0x4 + SizeofLong = 0x8 + SizeofLongLong = 0x8 PathMax = 0x1000 ) diff --git a/vendor/golang.org/x/sys/unix/ztypes_netbsd_386.go b/vendor/golang.org/x/sys/unix/ztypes_netbsd_386.go index 9e9088de1..2dae0c17a 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_netbsd_386.go +++ b/vendor/golang.org/x/sys/unix/ztypes_netbsd_386.go @@ -6,11 +6,11 @@ package unix const ( - sizeofPtr = 0x4 - sizeofShort = 0x2 - sizeofInt = 0x4 - sizeofLong = 0x4 - sizeofLongLong = 0x8 + SizeofPtr = 0x4 + SizeofShort = 0x2 + SizeofInt = 0x4 + SizeofLong = 0x4 + SizeofLongLong = 0x8 ) type ( @@ -402,6 +402,13 @@ type Winsize struct { Ypixel uint16 } +type Ptmget struct { + Cfd int32 + Sfd int32 + Cn [1024]byte + Sn [1024]byte +} + const ( AT_FDCWD = -0x64 AT_SYMLINK_NOFOLLOW = 0x200 diff --git a/vendor/golang.org/x/sys/unix/ztypes_netbsd_amd64.go b/vendor/golang.org/x/sys/unix/ztypes_netbsd_amd64.go index ed3f17366..1f0e76c0c 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_netbsd_amd64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_netbsd_amd64.go @@ -6,11 +6,11 @@ package unix const ( - sizeofPtr = 0x8 - sizeofShort = 0x2 - sizeofInt = 0x4 - sizeofLong = 0x8 - sizeofLongLong = 0x8 + SizeofPtr = 0x8 + SizeofShort = 0x2 + SizeofInt = 0x4 + SizeofLong = 0x8 + SizeofLongLong = 0x8 ) type ( @@ -409,6 +409,13 @@ type Winsize struct { Ypixel uint16 } +type Ptmget struct { + Cfd int32 + Sfd int32 + Cn [1024]byte + Sn [1024]byte +} + const ( AT_FDCWD = -0x64 AT_SYMLINK_NOFOLLOW = 0x200 diff --git a/vendor/golang.org/x/sys/unix/ztypes_netbsd_arm.go b/vendor/golang.org/x/sys/unix/ztypes_netbsd_arm.go index d263b6147..53f2159c7 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_netbsd_arm.go +++ b/vendor/golang.org/x/sys/unix/ztypes_netbsd_arm.go @@ -6,11 +6,11 @@ package unix const ( - sizeofPtr = 0x4 - sizeofShort = 0x2 - sizeofInt = 0x4 - sizeofLong = 0x4 - sizeofLongLong = 0x8 + SizeofPtr = 0x4 + SizeofShort = 0x2 + SizeofInt = 0x4 + SizeofLong = 0x4 + SizeofLongLong = 0x8 ) type ( @@ -407,6 +407,13 @@ type Winsize struct { Ypixel uint16 } +type Ptmget struct { + Cfd int32 + Sfd int32 + Cn [1024]byte + Sn [1024]byte +} + const ( AT_FDCWD = -0x64 AT_SYMLINK_NOFOLLOW = 0x200 diff --git a/vendor/golang.org/x/sys/unix/ztypes_openbsd_386.go b/vendor/golang.org/x/sys/unix/ztypes_openbsd_386.go index 231f4e8ef..8b37d8399 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_openbsd_386.go +++ b/vendor/golang.org/x/sys/unix/ztypes_openbsd_386.go @@ -6,11 +6,11 @@ package unix const ( - sizeofPtr = 0x4 - sizeofShort = 0x2 - sizeofInt = 0x4 - sizeofLong = 0x4 - sizeofLongLong = 0x8 + SizeofPtr = 0x4 + SizeofShort = 0x2 + SizeofInt = 0x4 + SizeofLong = 0x4 + SizeofLongLong = 0x8 ) type ( @@ -458,6 +458,8 @@ const ( POLLWRNORM = 0x4 ) +type Sigset_t uint32 + type Utsname struct { Sysname [256]byte Nodename [256]byte @@ -465,3 +467,94 @@ type Utsname struct { Version [256]byte Machine [256]byte } + +const SizeofUvmexp = 0x158 + +type Uvmexp struct { + Pagesize int32 + Pagemask int32 + Pageshift int32 + Npages int32 + Free int32 + Active int32 + Inactive int32 + Paging int32 + Wired int32 + Zeropages int32 + Reserve_pagedaemon int32 + Reserve_kernel int32 + Anonpages int32 + Vnodepages int32 + Vtextpages int32 + Freemin int32 + Freetarg int32 + Inactarg int32 + Wiredmax int32 + Anonmin int32 + Vtextmin int32 + Vnodemin int32 + Anonminpct int32 + Vtextminpct int32 + Vnodeminpct int32 + Nswapdev int32 + Swpages int32 + Swpginuse int32 + Swpgonly int32 + Nswget int32 + Nanon int32 + Nanonneeded int32 + Nfreeanon int32 + Faults int32 + Traps int32 + Intrs int32 + Swtch int32 + Softs int32 + Syscalls int32 + Pageins int32 + Obsolete_swapins int32 + Obsolete_swapouts int32 + Pgswapin int32 + Pgswapout int32 + Forks int32 + Forks_ppwait int32 + Forks_sharevm int32 + Pga_zerohit int32 + Pga_zeromiss int32 + Zeroaborts int32 + Fltnoram int32 + Fltnoanon int32 + Fltnoamap int32 + Fltpgwait int32 + Fltpgrele int32 + Fltrelck int32 + Fltrelckok int32 + Fltanget int32 + Fltanretry int32 + Fltamcopy int32 + Fltnamap int32 + Fltnomap int32 + Fltlget int32 + Fltget int32 + Flt_anon int32 + Flt_acow int32 + Flt_obj int32 + Flt_prcopy int32 + Flt_przero int32 + Pdwoke int32 + Pdrevs int32 + Pdswout int32 + Pdfreed int32 + Pdscans int32 + Pdanscan int32 + Pdobscan int32 + Pdreact int32 + Pdbusy int32 + Pdpageouts int32 + Pdpending int32 + Pddeact int32 + Pdreanon int32 + Pdrevnode int32 + Pdrevtext int32 + Fpswtch int32 + Kmapent int32 +} diff --git a/vendor/golang.org/x/sys/unix/ztypes_openbsd_amd64.go b/vendor/golang.org/x/sys/unix/ztypes_openbsd_amd64.go index bb2c44886..6efea4635 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_openbsd_amd64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_openbsd_amd64.go @@ -6,11 +6,11 @@ package unix const ( - sizeofPtr = 0x8 - sizeofShort = 0x2 - sizeofInt = 0x4 - sizeofLong = 0x8 - sizeofLongLong = 0x8 + SizeofPtr = 0x8 + SizeofShort = 0x2 + SizeofInt = 0x4 + SizeofLong = 0x8 + SizeofLongLong = 0x8 ) type ( @@ -458,6 +458,8 @@ const ( POLLWRNORM = 0x4 ) +type Sigset_t uint32 + type Utsname struct { Sysname [256]byte Nodename [256]byte @@ -465,3 +467,94 @@ type Utsname struct { Version [256]byte Machine [256]byte } + +const SizeofUvmexp = 0x158 + +type Uvmexp struct { + Pagesize int32 + Pagemask int32 + Pageshift int32 + Npages int32 + Free int32 + Active int32 + Inactive int32 + Paging int32 + Wired int32 + Zeropages int32 + Reserve_pagedaemon int32 + Reserve_kernel int32 + Anonpages int32 + Vnodepages int32 + Vtextpages int32 + Freemin int32 + Freetarg int32 + Inactarg int32 + Wiredmax int32 + Anonmin int32 + Vtextmin int32 + Vnodemin int32 + Anonminpct int32 + Vtextminpct int32 + Vnodeminpct int32 + Nswapdev int32 + Swpages int32 + Swpginuse int32 + Swpgonly int32 + Nswget int32 + Nanon int32 + Nanonneeded int32 + Nfreeanon int32 + Faults int32 + Traps int32 + Intrs int32 + Swtch int32 + Softs int32 + Syscalls int32 + Pageins int32 + Obsolete_swapins int32 + Obsolete_swapouts int32 + Pgswapin int32 + Pgswapout int32 + Forks int32 + Forks_ppwait int32 + Forks_sharevm int32 + Pga_zerohit int32 + Pga_zeromiss int32 + Zeroaborts int32 + Fltnoram int32 + Fltnoanon int32 + Fltnoamap int32 + Fltpgwait int32 + Fltpgrele int32 + Fltrelck int32 + Fltrelckok int32 + Fltanget int32 + Fltanretry int32 + Fltamcopy int32 + Fltnamap int32 + Fltnomap int32 + Fltlget int32 + Fltget int32 + Flt_anon int32 + Flt_acow int32 + Flt_obj int32 + Flt_prcopy int32 + Flt_przero int32 + Pdwoke int32 + Pdrevs int32 + Pdswout int32 + Pdfreed int32 + Pdscans int32 + Pdanscan int32 + Pdobscan int32 + Pdreact int32 + Pdbusy int32 + Pdpageouts int32 + Pdpending int32 + Pddeact int32 + Pdreanon int32 + Pdrevnode int32 + Pdrevtext int32 + Fpswtch int32 + Kmapent int32 +} diff --git a/vendor/golang.org/x/sys/unix/ztypes_openbsd_arm.go b/vendor/golang.org/x/sys/unix/ztypes_openbsd_arm.go index 941367cab..87a637e3f 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_openbsd_arm.go +++ b/vendor/golang.org/x/sys/unix/ztypes_openbsd_arm.go @@ -6,11 +6,11 @@ package unix const ( - sizeofPtr = 0x4 - sizeofShort = 0x2 - sizeofInt = 0x4 - sizeofLong = 0x4 - sizeofLongLong = 0x8 + SizeofPtr = 0x4 + SizeofShort = 0x2 + SizeofInt = 0x4 + SizeofLong = 0x4 + SizeofLongLong = 0x8 ) type ( @@ -451,6 +451,8 @@ const ( POLLWRNORM = 0x4 ) +type Sigset_t uint32 + type Utsname struct { Sysname [256]byte Nodename [256]byte @@ -458,3 +460,94 @@ type Utsname struct { Version [256]byte Machine [256]byte } + +const SizeofUvmexp = 0x158 + +type Uvmexp struct { + Pagesize int32 + Pagemask int32 + Pageshift int32 + Npages int32 + Free int32 + Active int32 + Inactive int32 + Paging int32 + Wired int32 + Zeropages int32 + Reserve_pagedaemon int32 + Reserve_kernel int32 + Anonpages int32 + Vnodepages int32 + Vtextpages int32 + Freemin int32 + Freetarg int32 + Inactarg int32 + Wiredmax int32 + Anonmin int32 + Vtextmin int32 + Vnodemin int32 + Anonminpct int32 + Vtextminpct int32 + Vnodeminpct int32 + Nswapdev int32 + Swpages int32 + Swpginuse int32 + Swpgonly int32 + Nswget int32 + Nanon int32 + Nanonneeded int32 + Nfreeanon int32 + Faults int32 + Traps int32 + Intrs int32 + Swtch int32 + Softs int32 + Syscalls int32 + Pageins int32 + Obsolete_swapins int32 + Obsolete_swapouts int32 + Pgswapin int32 + Pgswapout int32 + Forks int32 + Forks_ppwait int32 + Forks_sharevm int32 + Pga_zerohit int32 + Pga_zeromiss int32 + Zeroaborts int32 + Fltnoram int32 + Fltnoanon int32 + Fltnoamap int32 + Fltpgwait int32 + Fltpgrele int32 + Fltrelck int32 + Fltrelckok int32 + Fltanget int32 + Fltanretry int32 + Fltamcopy int32 + Fltnamap int32 + Fltnomap int32 + Fltlget int32 + Fltget int32 + Flt_anon int32 + Flt_acow int32 + Flt_obj int32 + Flt_prcopy int32 + Flt_przero int32 + Pdwoke int32 + Pdrevs int32 + Pdswout int32 + Pdfreed int32 + Pdscans int32 + Pdanscan int32 + Pdobscan int32 + Pdreact int32 + Pdbusy int32 + Pdpageouts int32 + Pdpending int32 + Pddeact int32 + Pdreanon int32 + Pdrevnode int32 + Pdrevtext int32 + Fpswtch int32 + Kmapent int32 +} diff --git a/vendor/golang.org/x/sys/unix/ztypes_solaris_amd64.go b/vendor/golang.org/x/sys/unix/ztypes_solaris_amd64.go index 0543e1a49..8531a190f 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_solaris_amd64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_solaris_amd64.go @@ -6,11 +6,11 @@ package unix const ( - sizeofPtr = 0x8 - sizeofShort = 0x2 - sizeofInt = 0x4 - sizeofLong = 0x8 - sizeofLongLong = 0x8 + SizeofPtr = 0x8 + SizeofShort = 0x2 + SizeofInt = 0x4 + SizeofLong = 0x8 + SizeofLongLong = 0x8 PathMax = 0x400 MaxHostNameLen = 0x100 ) diff --git a/vendor/golang.org/x/time/rate/rate.go b/vendor/golang.org/x/time/rate/rate.go index 7228d97e9..ae93e2471 100644 --- a/vendor/golang.org/x/time/rate/rate.go +++ b/vendor/golang.org/x/time/rate/rate.go @@ -6,6 +6,7 @@ package rate import ( + "context" "fmt" "math" "sync" @@ -212,19 +213,8 @@ func (lim *Limiter) ReserveN(now time.Time, n int) *Reservation { return &r } -// contextContext is a temporary(?) copy of the context.Context type -// to support both Go 1.6 using golang.org/x/net/context and Go 1.7+ -// with the built-in context package. If people ever stop using Go 1.6 -// we can remove this. -type contextContext interface { - Deadline() (deadline time.Time, ok bool) - Done() <-chan struct{} - Err() error - Value(key interface{}) interface{} -} - // Wait is shorthand for WaitN(ctx, 1). -func (lim *Limiter) wait(ctx contextContext) (err error) { +func (lim *Limiter) Wait(ctx context.Context) (err error) { return lim.WaitN(ctx, 1) } @@ -232,7 +222,7 @@ func (lim *Limiter) wait(ctx contextContext) (err error) { // It returns an error if n exceeds the Limiter's burst size, the Context is // canceled, or the expected wait time exceeds the Context's Deadline. // The burst limit is ignored if the rate limit is Inf. -func (lim *Limiter) waitN(ctx contextContext, n int) (err error) { +func (lim *Limiter) WaitN(ctx context.Context, n int) (err error) { if n > lim.burst && lim.limit != Inf { return fmt.Errorf("rate: Wait(n=%d) exceeds limiter's burst %d", n, lim.burst) } diff --git a/vendor/golang.org/x/time/rate/rate_go16.go b/vendor/golang.org/x/time/rate/rate_go16.go deleted file mode 100644 index 6bab1850f..000000000 --- a/vendor/golang.org/x/time/rate/rate_go16.go +++ /dev/null @@ -1,21 +0,0 @@ -// Copyright 2017 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build !go1.7 - -package rate - -import "golang.org/x/net/context" - -// Wait is shorthand for WaitN(ctx, 1). -func (lim *Limiter) Wait(ctx context.Context) (err error) { - return lim.waitN(ctx, 1) -} - -// WaitN blocks until lim permits n events to happen. -// It returns an error if n exceeds the Limiter's burst size, the Context is -// canceled, or the expected wait time exceeds the Context's Deadline. -func (lim *Limiter) WaitN(ctx context.Context, n int) (err error) { - return lim.waitN(ctx, n) -} diff --git a/vendor/golang.org/x/time/rate/rate_go17.go b/vendor/golang.org/x/time/rate/rate_go17.go deleted file mode 100644 index f90d85f51..000000000 --- a/vendor/golang.org/x/time/rate/rate_go17.go +++ /dev/null @@ -1,21 +0,0 @@ -// Copyright 2017 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build go1.7 - -package rate - -import "context" - -// Wait is shorthand for WaitN(ctx, 1). -func (lim *Limiter) Wait(ctx context.Context) (err error) { - return lim.waitN(ctx, 1) -} - -// WaitN blocks until lim permits n events to happen. -// It returns an error if n exceeds the Limiter's burst size, the Context is -// canceled, or the expected wait time exceeds the Context's Deadline. -func (lim *Limiter) WaitN(ctx context.Context, n int) (err error) { - return lim.waitN(ctx, n) -} diff --git a/vendor/google.golang.org/api/.gitignore b/vendor/google.golang.org/api/.gitignore new file mode 100644 index 000000000..50d2a9bd6 --- /dev/null +++ b/vendor/google.golang.org/api/.gitignore @@ -0,0 +1,12 @@ +_obj/ +*_testmain.go +clientid.dat +clientsecret.dat +/google-api-go-generator/google-api-go-generator + +*.6 +*.8 +*~ +*.out +*.test +*.exe diff --git a/vendor/google.golang.org/api/.hgtags b/vendor/google.golang.org/api/.hgtags new file mode 100644 index 000000000..f1a735783 --- /dev/null +++ b/vendor/google.golang.org/api/.hgtags @@ -0,0 +1 @@ +b571b553f8c057cb6952ce817dfb09b6e34a8c0b release diff --git a/vendor/google.golang.org/api/AUTHORS b/vendor/google.golang.org/api/AUTHORS new file mode 100644 index 000000000..f73b72574 --- /dev/null +++ b/vendor/google.golang.org/api/AUTHORS @@ -0,0 +1,10 @@ +# This is the official list of authors for copyright purposes. +# This file is distinct from the CONTRIBUTORS files. +# See the latter for an explanation. + +# Names should be added to this file as +# Name or Organization +# The email address is not required for organizations. + +# Please keep the list sorted. +Google Inc. diff --git a/vendor/google.golang.org/api/CONTRIBUTING.md b/vendor/google.golang.org/api/CONTRIBUTING.md new file mode 100644 index 000000000..e4d36da35 --- /dev/null +++ b/vendor/google.golang.org/api/CONTRIBUTING.md @@ -0,0 +1,484 @@ +# Contributing to the Google API Go Client + +## Master git repo + +Our master git repo is https://code.googlesource.com/google-api-go-client + +## Pull Requests + +We do **NOT** use Github pull requests. We use Gerrit instead +with the same workflow as Go. See below. + +## The source tree + +Most of this project is auto-generated. + +The notable directories which are not auto-generated: + +``` + google-api-go-generator/ -- the generator itself + googleapi/ -- shared common code, used by auto-generated code + examples/ -- sample code +``` + +# Contribution Guidelines + +## Introduction + +This document explains how to contribute changes to the google-api-go-client project. + +## Testing redux + +You've written and tested your code, but +before sending code out for review, run all the tests for the whole +tree to make sure the changes don't break other packages or programs: + +``` +$ make cached +$ go test ./... +... +ok google.golang.org/api/google-api-go-generator 0.226s +ok google.golang.org/api/googleapi 0.015s +... +``` + +Ideally, you will add unit tests to one of the above directories to +demonstrate the changes you are making and include the tests with your +code review. + +## Code review + +Changes to google-api-go-client must be reviewed before they are submitted, +no matter who makes the change. +A custom git command called `git-codereview`, +discussed below, helps manage the code review process through a Google-hosted +[instance](https://code-review.googlesource.com/) of the code review +system called [Gerrit](https://code.google.com/p/gerrit/). + +### Set up authentication for code review + +The Git code hosting server and Gerrit code review server both use a Google +Account to authenticate. You therefore need a Google Account to proceed. +(If you can use the account to +[sign in at google.com](https://www.google.com/accounts/Login), +you can use it to sign in to the code review server.) +The email address you use with the code review system +needs to be added to the [`CONTRIBUTORS`](/CONTRIBUTORS) file +with your first code review. +You can [create a Google Account](https://www.google.com/accounts/NewAccount) +associated with any address where you receive email. + +Visit the site [code.googlesource.com](https://code.googlesource.com) +and log in using your Google Account. +Click on the "Generate Password" link that appears at the top of the page. + +Click the radio button that says "Only `code.googlesource.com`" +to use this authentication token only for the google-api-go-client project. + +Further down the page is a box containing commands to install +the authentication cookie in file called `.gitcookies` in your home +directory. +Copy the text for the commands into a Unix shell window to execute it. +That will install the authentication token. + +(If you are on a Windows computer, you should instead follow the instructions +in the yellow box to run the command.) + +### Register with Gerrit + +Now that you have a Google account and the authentication token, +you need to register your account with Gerrit, the code review system. +To do this, visit [golang.org/cl](https://golang.org/cl) +and log in using the same Google Account you used above. +That is all that is required. + +### Install the git-codereview command + +Now install the `git-codereview` command by running, + +``` +go get -u golang.org/x/review/git-codereview +``` + +Make sure `git-codereview` is installed in your shell path, so that the +`git` command can find it. Check that + +``` +$ git codereview help +``` + +prints help text, not an error. + +Note to Git aficionados: The `git-codereview` command is not required to +upload and manage Gerrit code reviews. For those who prefer plain Git, the text +below gives the Git equivalent of each git-codereview command. If you do use plain +Git, note that you still need the commit hooks that the git-codereview command +configures; those hooks add a Gerrit `Change-Id` line to the commit +message and check that all Go source files have been formatted with gofmt. Even +if you intend to use plain Git for daily work, install the hooks in a new Git +checkout by running `git-codereview hooks`. + +### Set up git aliases + +The `git-codereview` command can be run directly from the shell +by typing, for instance, + +``` +$ git codereview sync +``` + +but it is more convenient to set up aliases for `git-codereview`'s own +subcommands, so that the above becomes, + +``` +$ git sync +``` + +The `git-codereview` subcommands have been chosen to be distinct from +Git's own, so it's safe to do so. + +The aliases are optional, but in the rest of this document we will assume +they are installed. +To install them, copy this text into your Git configuration file +(usually `.gitconfig` in your home directory): + +``` +[alias] + change = codereview change + gofmt = codereview gofmt + mail = codereview mail + pending = codereview pending + submit = codereview submit + sync = codereview sync +``` + +### Understanding the git-codereview command + +After installing the `git-codereview` command, you can run + +``` +$ git codereview help +``` + +to learn more about its commands. +You can also read the [command documentation](https://godoc.org/golang.org/x/review/git-codereview). + +### Switch to the master branch + +New changes should +only be made based on the master branch. +Before making a change, make sure you start on the master branch: + +``` +$ git checkout master +$ git sync +```` + +(In Git terms, `git sync` runs +`git pull -r`.) + +### Make a change + +The entire checked-out tree is writable. +Once you have edited files, you must tell Git that they have been modified. +You must also tell Git about any files that are added, removed, or renamed files. +These operations are done with the usual Git commands, +`git add`, +`git rm`, +and +`git mv`. + +If you wish to checkpoint your work, or are ready to send the code out for review, run + +``` +$ git change +``` + +from any directory in your google-api-go-client repository to commit the changes so far. +The name `` is an arbitrary one you choose to identify the +local branch containing your changes. + +(In Git terms, `git change ` +runs `git checkout -b branch`, +then `git branch --set-upstream-to origin/master`, +then `git commit`.) + +Git will open a change description file in your editor. +(It uses the editor named by the `$EDITOR` environment variable, +`vi` by default.) +The file will look like: + +``` +# Please enter the commit message for your changes. Lines starting +# with '#' will be ignored, and an empty message aborts the commit. +# On branch foo +# Changes not staged for commit: +# modified: editedfile.go +# +``` + +At the beginning of this file is a blank line; replace it +with a thorough description of your change. +The first line of the change description is conventionally a one-line +summary of the change, prefixed by `google-api-go-client:`, +and is used as the subject for code review mail. +The rest of the +description elaborates and should provide context for the +change and explain what it does. +If there is a helpful reference, mention it here. + +After editing, the template might now read: + +``` +math: improved Sin, Cos and Tan precision for very large arguments + +The existing implementation has poor numerical properties for +large arguments, so use the McGillicutty algorithm to improve +accuracy above 1e10. + +The algorithm is described at http://wikipedia.org/wiki/McGillicutty_Algorithm + +Fixes #54 + +# Please enter the commit message for your changes. Lines starting +# with '#' will be ignored, and an empty message aborts the commit. +# On branch foo +# Changes not staged for commit: +# modified: editedfile.go +# +``` + +The commented section of the file lists all the modified files in your client. +It is best to keep unrelated changes in different change lists, +so if you see a file listed that should not be included, abort +the command and move that file to a different branch. + +The special notation "Fixes #54" associates the change with issue 54 in the +[google-api-go-client issue tracker](https://github.com/google/google-api-go-client/issues/54). +When this change is eventually submitted, the issue +tracker will automatically mark the issue as fixed. +(There are several such conventions, described in detail in the +[GitHub Issue Tracker documentation](https://help.github.com/articles/closing-issues-via-commit-messages/).) + +Once you have finished writing the commit message, +save the file and exit the editor. + +If you wish to do more editing, re-stage your changes using +`git add`, and then run + +``` +$ git change +``` + +to update the change description and incorporate the staged changes. The +change description contains a `Change-Id` line near the bottom, +added by a Git commit hook during the initial +`git change`. +That line is used by Gerrit to match successive uploads of the same change. +Do not edit or delete it. + +(In Git terms, `git change` with no branch name +runs `git commit --amend`.) + +### Mail the change for review + +Once the change is ready, mail it out for review: + +``` +$ git mail +``` + +You can specify a reviewer or CC interested parties +using the `-r` or `-cc` options. +Both accept a comma-separated list of email addresses: + +``` +$ git mail -r joe@golang.org -cc mabel@example.com,math-nuts@swtch.com +``` + +Unless explicitly told otherwise, such as in the discussion leading +up to sending in the change list, please specify +`bradfitz@golang.org`, `gmlewis@google.com`, or +`mcgreevy@golang.org` as a reviewer. + +(In Git terms, `git mail` pushes the local committed +changes to Gerrit using `git push origin HEAD:refs/for/master`.) + +If your change relates to an open issue, please add a comment to the issue +announcing your proposed fix, including a link to your CL. + +The code review server assigns your change an issue number and URL, +which `git mail` will print, something like: + +``` +remote: New Changes: +remote: https://code-review.googlesource.com/99999 math: improved Sin, Cos and Tan precision for very large arguments +``` + +### Reviewing code + +Running `git mail` will send an email to you and the +reviewers asking them to visit the issue's URL and make comments on the change. +When done, the reviewer adds comments through the Gerrit user interface +and clicks "Reply" to send comments back. +You will receive a mail notification when this happens. +You must reply through the web interface. + +### Revise and upload + +You must respond to review comments through the web interface. + +When you have revised the code and are ready for another round of review, +stage those changes and use `git change` to update the +commit. +To send the update change list for another round of review, +run `git mail` again. + +The reviewer can comment on the new copy, and the process repeats. +The reviewer approves the change by giving it a positive score +(+1 or +2) and replying `LGTM`: looks good to me. + +You can see a list of your pending changes by running +`git pending`, and switch between change branches with +`git change `. + +### Synchronize your client + +While you were working, others might have submitted changes to the repository. +To update your local branch, run + +``` +$ git sync +``` + +(In git terms, `git sync` runs +`git pull -r`.) + +If files you were editing have changed, Git does its best to merge the +remote changes into your local changes. +It may leave some files to merge by hand. + +For example, suppose you have edited `sin.go` but +someone else has committed an independent change. +When you run `git sync`, +you will get the (scary-looking) output: + +``` +$ git sync +Failed to merge in the changes. +Patch failed at 0023 math: improved Sin, Cos and Tan precision for very large arguments +The copy of the patch that failed is found in: + /home/you/repo/.git/rebase-apply/patch + +When you have resolved this problem, run "git rebase --continue". +If you prefer to skip this patch, run "git rebase --skip" instead. +To check out the original branch and stop rebasing, run "git rebase --abort". +``` + + +If this happens, run + +``` +$ git status +``` + +to see which files failed to merge. +The output will look something like this: + +``` +rebase in progress; onto a24c3eb +You are currently rebasing branch 'mcgillicutty' on 'a24c3eb'. + (fix conflicts and then run "git rebase --continue") + (use "git rebase --skip" to skip this patch) + (use "git rebase --abort" to check out the original branch) + +Unmerged paths: + (use "git reset HEAD ..." to unstage) + (use "git add ..." to mark resolution) + + _both modified: sin.go_ +``` + + +The only important part in that transcript is the italicized "both modified" +line: Git failed to merge your changes with the conflicting change. +When this happens, Git leaves both sets of edits in the file, +with conflicts marked by `<<<<<<<` and +`>>>>>>>`. +It is now your job to edit the file to combine them. +Continuing the example, searching for those strings in `sin.go` +might turn up: + +``` + arg = scale(arg) +<<<<<<< HEAD + if arg > 1e9 { +======= + if arg > 1e10 { +>>>>>>> mcgillicutty + largeReduce(arg) +``` + +Git doesn't show it, but suppose the original text that both edits +started with was 1e8; you changed it to 1e10 and the other change to 1e9, +so the correct answer might now be 1e10. First, edit the section +to remove the markers and leave the correct code: + +``` + arg = scale(arg) + if arg > 1e10 { + largeReduce(arg) +``` + +Then tell Git that the conflict is resolved by running + +``` +$ git add sin.go +``` + +If you had been editing the file, say for debugging, but do not +care to preserve your changes, you can run +`git reset HEAD sin.go` +to abandon your changes. +Then run `git rebase --continue` to +restore the change commit. + +### Reviewing code by others + +You can import a change proposed by someone else into your local Git repository. +On the Gerrit review page, click the "Download ▼" link in the upper right +corner, copy the "Checkout" command and run it from your local Git repo. +It should look something like this: + +``` +$ git fetch https://code.googlesource.com/review refs/changes/21/1221/1 && git checkout FETCH_HEAD +``` + +To revert, change back to the branch you were working in. + +### Submit the change after the review + +After the code has been `LGTM`'ed, an approver may +submit it to the master branch using the Gerrit UI. +There is a "Submit" button on the web page for the change +that appears once the change is approved (marked +2). + +This checks the change into the repository. +The change description will include a link to the code review, +and the code review will be updated with a link to the change +in the repository. +Since the method used to integrate the changes is "Cherry Pick", +the commit hashes in the repository will be changed by +the submit operation. + +### More information + +In addition to the information here, the Go community maintains a [CodeReview](https://golang.org/wiki/CodeReview) wiki page. +Feel free to contribute to this page as you learn the review process. + +## Contributors + +Files in the google-api-go-client repository don't list author names, +both to avoid clutter and to avoid having to keep the lists up to date. +Instead, please add your name to the [`CONTRIBUTORS`](/CONTRIBUTORS) +file as your first code review, keeping the names in sorted order. diff --git a/vendor/google.golang.org/api/CONTRIBUTORS b/vendor/google.golang.org/api/CONTRIBUTORS new file mode 100644 index 000000000..fe55ebff0 --- /dev/null +++ b/vendor/google.golang.org/api/CONTRIBUTORS @@ -0,0 +1,55 @@ +# This is the official list of people who can contribute +# (and typically have contributed) code to the repository. +# The AUTHORS file lists the copyright holders; this file +# lists people. For example, Google employees are listed here +# but not in AUTHORS, because Google holds the copyright. +# +# The submission process automatically checks to make sure +# that people submitting code are listed in this file (by email address). +# +# Names should be added to this file only after verifying that +# the individual or the individual's organization has agreed to +# the appropriate Contributor License Agreement, found here: +# +# https://cla.developers.google.com/about/google-individual +# https://cla.developers.google.com/about/google-corporate +# +# The CLA can be filled out on the web: +# +# https://cla.developers.google.com/ +# +# When adding J Random Contributor's name to this file, +# either J's name or J's organization's name should be +# added to the AUTHORS file, depending on whether the +# individual or corporate CLA was used. + +# Names should be added to this file like so: +# Name +# +# An entry with two email addresses specifies that the +# first address should be used in the submit logs and +# that the second address should be recognized as the +# same person when interacting with Rietveld. + +# Please keep the list sorted. + +Alain Vongsouvanhalainv +Andrew Gerrand +Brad Fitzpatrick +Eric Koleda +Francesc Campoy +Garrick Evans +Glenn Lewis +Ivan Krasin +Jason Hall +Johan Euphrosine +Kostik Shtoyk +Kunpei Sakai +Matthew Whisenhunt +Michael McGreevy +Nick Craig-Wood +Robbie Trencheny +Ross Light +Sarah Adams +Scott Van Woudenberg +Takashi Matsuo diff --git a/vendor/google.golang.org/api/GettingStarted.md b/vendor/google.golang.org/api/GettingStarted.md new file mode 100644 index 000000000..72855cda3 --- /dev/null +++ b/vendor/google.golang.org/api/GettingStarted.md @@ -0,0 +1,146 @@ +# Getting Started with the Google APIs for Go + +## Getting Started + +This is a quick walk-through of how to get started with the Google APIs for Go. + +## Background + +The first thing to understand is that the Google API libraries are auto-generated for +each language, including Go, so they may not feel like 100% natural for any language. +The Go versions are pretty natural, but please forgive any small non-idiomatic things. +(Suggestions welcome, though!) + +## Installing + +Pick an API and a version of that API to install. +You can find the complete list by looking at the +[directories here](https://github.com/google/google-api-go-client/tree/master/). + +For example, let's install the +[urlshortener's version 1 API](https://godoc.org/google.golang.org/api/urlshortener/v1): + +``` +$ go get -u google.golang.org/api/urlshortener/v1 +``` + +Now it's ready for use in your code. + +## Using + +Once you've installed a library, you import it like this: + +```go +package main + +import ( + "context" + "golang.org/x/oauth2" + "golang.org/x/oauth2/google" + "google.golang.org/api/urlshortener/v1" +) +``` + +The package name, if you don't override it on your import line, is the name of the +API without the version number. In the case above, just `urlshortener`. + +## Instantiating + +Each API has a `New` function taking an `*http.Client` and returning an API-specific `*Service`. + +You create the service like: + +```go + svc, err := urlshortener.New(httpClient) +``` + +## OAuth HTTP Client + +The HTTP client you pass in to the service must be one that automatically adds +Google-supported Authorization information to the requests. + +There are several ways to do authentication. They will all involve the package +[golang.org/x/oauth2](https://godoc.org/golang.org/x/oauth2) in some way. + +### 3-legged OAuth + +For 3-legged OAuth (your application redirecting a user through a website to get a +token giving your application access to that user's resources), you will need to +create an oauth2.Config, + + +```go + var config = &oauth2.Config{ + ClientID: "", // from https://console.developers.google.com/project//apiui/credential + ClientSecret: "", // from https://console.developers.google.com/project//apiui/credential + Endpoint: google.Endpoint, + Scopes: []string{urlshortener.UrlshortenerScope}, + } +``` + +... and then use the AuthCodeURL, Exchange, and Client methods on it. +For an example, see: https://godoc.org/golang.org/x/oauth2#example-Config + +For the redirect URL, see +https://developers.google.com/identity/protocols/OAuth2InstalledApp#choosingredirecturi + +### Service Accounts + +To use a Google service account, or the GCE metadata service, see +the [golang.org/x/oauth2/google](https://godoc.org/golang.org/x/oauth2/google) package. +In particular, see [google.DefaultClient](https://godoc.org/golang.org/x/oauth2/google#DefaultClient). + +### Using API Keys + +Some APIs require passing API keys from your application. +To do this, you can use +[transport.APIKey](https://godoc.org/google.golang.org/api/googleapi/transport#APIKey): + +```go + ctx := context.WithValue(context.Background(), oauth2.HTTPClient, &http.Client{ + Transport: &transport.APIKey{Key: developerKey}, + }) + oauthConfig := &oauth2.Config{ .... } + var token *oauth2.Token = .... // via cache, or oauthConfig.Exchange + httpClient := oauthConfig.Client(ctx, token) + svc, err := urlshortener.New(httpClient) + ... +``` + +## Using the Service + +Each service contains zero or more methods and zero or more sub-services. +The sub-services related to a specific type of "Resource". + +Those sub-services then contain their own methods. + +For instance, the urlshortener API has just the "Url" sub-service: + +```go + url, err := svc.Url.Get(shortURL).Do() + if err != nil { + ... + } + fmt.Printf("The URL %s goes to %s\n", shortURL, url.LongUrl) +``` + +For a more complete example, see +[urlshortener.go](https://github.com/google/google-api-go-client/tree/master/examples/urlshortener.go) +in the [examples directory](https://github.com/google/google-api-go-client/tree/master/examples/). +(the examples use some functions in `main.go` in the same directory) + +## Error Handling + +Most errors returned by the `Do` methods of these clients will be of type +[`googleapi.Error`](https://godoc.org/google.golang.org/api/googleapi#Error). +Use a type assertion to obtain the HTTP status code and other properties of the +error: + +```go + url, err := svc.Url.Get(shortURL).Do() + if err != nil { + if e, ok := err.(*googleapi.Error); ok && e.Code == http.StatusNotFound { + ... + } + } +``` diff --git a/vendor/google.golang.org/api/LICENSE b/vendor/google.golang.org/api/LICENSE new file mode 100644 index 000000000..263aa7a0c --- /dev/null +++ b/vendor/google.golang.org/api/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2011 Google Inc. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/google.golang.org/api/NOTES b/vendor/google.golang.org/api/NOTES new file mode 100644 index 000000000..571f6ec79 --- /dev/null +++ b/vendor/google.golang.org/api/NOTES @@ -0,0 +1,13 @@ +Discovery Service: +https://developers.google.com/discovery/ +https://developers.google.com/discovery/v1/reference/ + +The "type" key: +http://tools.ietf.org/html/draft-zyp-json-schema-03#section-5.1 + +The "format" key: +http://tools.ietf.org/html/draft-zyp-json-schema-03#section-5.23 +https://developers.google.com/discovery/v1/type-format + +Google JSON format docs: +http://google-styleguide.googlecode.com/svn/trunk/jsoncstyleguide.xml diff --git a/vendor/google.golang.org/api/README.md b/vendor/google.golang.org/api/README.md new file mode 100644 index 000000000..28c712768 --- /dev/null +++ b/vendor/google.golang.org/api/README.md @@ -0,0 +1,103 @@ +# Google APIs Client Library for Go + +## Getting Started + +``` +$ go get google.golang.org/api/tasks/v1 +$ go get google.golang.org/api/moderator/v1 +$ go get google.golang.org/api/urlshortener/v1 +... etc ... +``` + +and using: + +```go +package main + +import ( + "net/http" + + "google.golang.org/api/urlshortener/v1" +) + +func main() { + svc, err := urlshortener.New(http.DefaultClient) + // ... +} +``` + +* For a longer tutorial, see the [Getting Started guide](https://github.com/google/google-api-go-client/blob/master/GettingStarted.md). +* For examples, see the [examples directory](https://github.com/google/google-api-go-client/tree/master/examples). +* For support, use the [golang-nuts](https://groups.google.com/group/golang-nuts) mailing list. + +## Status +[![GoDoc](https://godoc.org/google.golang.org/api?status.svg)](https://godoc.org/google.golang.org/api) + +These are auto-generated Go libraries from the Google Discovery Service's JSON description files of the available "new style" Google APIs. + +Due to the auto-generated nature of this collection of libraries, complete APIs or specific versions can appear or go away without notice. +As a result, you should always locally vendor any API(s) that your code relies upon. + +These client libraries are officially supported by Google. However, the libraries are considered complete and are in maintenance mode. This means that we will address critical bugs and security issues but will not add any new features. + +If you're working with Google Cloud Platform APIs such as Datastore or Pub/Sub, +consider using the +[Cloud Client Libraries for Go](https://github.com/GoogleCloudPlatform/google-cloud-go) +instead. These are the new and +idiomatic Go libraries targeted specifically at Google Cloud Platform Services. + +The generator itself and the code it produces are beta. Some APIs are +alpha/beta, and indicated as such in the import path (e.g., +"google.golang.org/api/someapi/v1alpha"). + +## Application Default Credentials Example + +Application Default Credentials provide a simplified way to obtain credentials +for authenticating with Google APIs. + +The Application Default Credentials authenticate as the application itself, +which make them great for working with Google Cloud APIs like Storage or +Datastore. They are the recommended form of authentication when building +applications that run on Google Compute Engine or Google App Engine. + +Default credentials are provided by the `golang.org/x/oauth2/google` package. To use them, add the following import: + +```go +import "golang.org/x/oauth2/google" +``` + +Some credentials types require you to specify scopes, and service entry points may not inject them. If you encounter this situation you may need to specify scopes as follows: + +```go +import ( + "context" + "golang.org/x/oauth2/google" + "google.golang.org/api/compute/v1" +) + +func main() { + // Use oauth2.NoContext if there isn't a good context to pass in. + ctx := context.Background() + + client, err := google.DefaultClient(ctx, compute.ComputeScope) + if err != nil { + //... + } + computeService, err := compute.New(client) + if err != nil { + //... + } +} +``` + +If you need a `oauth2.TokenSource`, use the `DefaultTokenSource` function: + +```go +ts, err := google.DefaultTokenSource(ctx, scope1, scope2, ...) +if err != nil { + //... +} +client := oauth2.NewClient(ctx, ts) +``` + +See also: [golang.org/x/oauth2/google](https://godoc.org/golang.org/x/oauth2/google) package documentation. diff --git a/vendor/google.golang.org/api/TODO b/vendor/google.golang.org/api/TODO new file mode 100644 index 000000000..de2467c39 --- /dev/null +++ b/vendor/google.golang.org/api/TODO @@ -0,0 +1,2 @@ +Moved to: +https://github.com/google/google-api-go-client/issues diff --git a/vendor/google.golang.org/api/api-list.json b/vendor/google.golang.org/api/api-list.json new file mode 100644 index 000000000..54fca74c6 --- /dev/null +++ b/vendor/google.golang.org/api/api-list.json @@ -0,0 +1,3363 @@ +{ + "kind": "discovery#directoryList", + "discoveryVersion": "v1", + "items": [ + { + "kind": "discovery#directoryItem", + "id": "abusiveexperiencereport:v1", + "name": "abusiveexperiencereport", + "version": "v1", + "title": "Abusive Experience Report API", + "description": "Views Abusive Experience Report data, and gets a list of sites that have a significant number of abusive experiences.", + "discoveryRestUrl": "https://abusiveexperiencereport.googleapis.com/$discovery/rest?version=v1", + "icons": { + "x16": "https://www.gstatic.com/images/branding/product/1x/googleg_16dp.png", + "x32": "https://www.gstatic.com/images/branding/product/1x/googleg_32dp.png" + }, + "documentationLink": "https://developers.google.com/abusive-experience-report/", + "preferred": true + }, + { + "kind": "discovery#directoryItem", + "id": "acceleratedmobilepageurl:v1", + "name": "acceleratedmobilepageurl", + "version": "v1", + "title": "Accelerated Mobile Pages (AMP) URL API", + "description": "This API contains a single method, batchGet. Call this method to retrieve the AMP URL (and equivalent AMP Cache URL) for given public URL(s).", + "discoveryRestUrl": "https://acceleratedmobilepageurl.googleapis.com/$discovery/rest?version=v1", + "icons": { + "x16": "https://www.gstatic.com/images/branding/product/1x/googleg_16dp.png", + "x32": "https://www.gstatic.com/images/branding/product/1x/googleg_32dp.png" + }, + "documentationLink": "https://developers.google.com/amp/cache/", + "preferred": true + }, + { + "kind": "discovery#directoryItem", + "id": "accesscontextmanager:v1beta", + "name": "accesscontextmanager", + "version": "v1beta", + "title": "Access Context Manager API", + "description": "An API for setting attribute based access control to requests to GCP services.", + "discoveryRestUrl": "https://accesscontextmanager.googleapis.com/$discovery/rest?version=v1beta", + "icons": { + "x16": "https://www.gstatic.com/images/branding/product/1x/googleg_16dp.png", + "x32": "https://www.gstatic.com/images/branding/product/1x/googleg_32dp.png" + }, + "documentationLink": "https://cloud.google.com/access-context-manager/docs/reference/rest/", + "preferred": true + }, + { + "kind": "discovery#directoryItem", + "id": "adexchangebuyer:v1.2", + "name": "adexchangebuyer", + "version": "v1.2", + "title": "Ad Exchange Buyer API", + "description": "Accesses your bidding-account information, submits creatives for validation, finds available direct deals, and retrieves performance reports.", + "discoveryRestUrl": "https://www.googleapis.com/discovery/v1/apis/adexchangebuyer/v1.2/rest", + "discoveryLink": "./apis/adexchangebuyer/v1.2/rest", + "icons": { + "x16": "https://www.google.com/images/icons/product/doubleclick-16.gif", + "x32": "https://www.google.com/images/icons/product/doubleclick-32.gif" + }, + "documentationLink": "https://developers.google.com/ad-exchange/buyer-rest", + "preferred": false + }, + { + "kind": "discovery#directoryItem", + "id": "adexchangebuyer:v1.3", + "name": "adexchangebuyer", + "version": "v1.3", + "title": "Ad Exchange Buyer API", + "description": "Accesses your bidding-account information, submits creatives for validation, finds available direct deals, and retrieves performance reports.", + "discoveryRestUrl": "https://www.googleapis.com/discovery/v1/apis/adexchangebuyer/v1.3/rest", + "discoveryLink": "./apis/adexchangebuyer/v1.3/rest", + "icons": { + "x16": "https://www.google.com/images/icons/product/doubleclick-16.gif", + "x32": "https://www.google.com/images/icons/product/doubleclick-32.gif" + }, + "documentationLink": "https://developers.google.com/ad-exchange/buyer-rest", + "preferred": false + }, + { + "kind": "discovery#directoryItem", + "id": "adexchangebuyer:v1.4", + "name": "adexchangebuyer", + "version": "v1.4", + "title": "Ad Exchange Buyer API", + "description": "Accesses your bidding-account information, submits creatives for validation, finds available direct deals, and retrieves performance reports.", + "discoveryRestUrl": "https://www.googleapis.com/discovery/v1/apis/adexchangebuyer/v1.4/rest", + "discoveryLink": "./apis/adexchangebuyer/v1.4/rest", + "icons": { + "x16": "https://www.google.com/images/icons/product/doubleclick-16.gif", + "x32": "https://www.google.com/images/icons/product/doubleclick-32.gif" + }, + "documentationLink": "https://developers.google.com/ad-exchange/buyer-rest", + "preferred": true + }, + { + "kind": "discovery#directoryItem", + "id": "adexchangebuyer2:v2beta1", + "name": "adexchangebuyer2", + "version": "v2beta1", + "title": "Ad Exchange Buyer API II", + "description": "Accesses the latest features for managing Ad Exchange accounts, Real-Time Bidding configurations and auction metrics, and Marketplace programmatic deals.", + "discoveryRestUrl": "https://adexchangebuyer.googleapis.com/$discovery/rest?version=v2beta1", + "icons": { + "x16": "https://www.gstatic.com/images/branding/product/1x/googleg_16dp.png", + "x32": "https://www.gstatic.com/images/branding/product/1x/googleg_32dp.png" + }, + "documentationLink": "https://developers.google.com/ad-exchange/buyer-rest/reference/rest/", + "preferred": true + }, + { + "kind": "discovery#directoryItem", + "id": "adexperiencereport:v1", + "name": "adexperiencereport", + "version": "v1", + "title": "Ad Experience Report API", + "description": "Views Ad Experience Report data, and gets a list of sites that have a significant number of annoying ads.", + "discoveryRestUrl": "https://adexperiencereport.googleapis.com/$discovery/rest?version=v1", + "icons": { + "x16": "https://www.gstatic.com/images/branding/product/1x/googleg_16dp.png", + "x32": "https://www.gstatic.com/images/branding/product/1x/googleg_32dp.png" + }, + "documentationLink": "https://developers.google.com/ad-experience-report/", + "preferred": true + }, + { + "kind": "discovery#directoryItem", + "id": "admin:datatransfer_v1", + "name": "admin", + "version": "datatransfer_v1", + "title": "Admin Data Transfer API", + "description": "Transfers user data from one user to another.", + "discoveryRestUrl": "https://www.googleapis.com/discovery/v1/apis/admin/datatransfer_v1/rest", + "discoveryLink": "./apis/admin/datatransfer_v1/rest", + "icons": { + "x16": "https://www.gstatic.com/images/branding/product/1x/googleg_16dp.png", + "x32": "https://www.gstatic.com/images/branding/product/1x/googleg_32dp.png" + }, + "documentationLink": "https://developers.google.com/admin-sdk/data-transfer/", + "preferred": false + }, + { + "kind": "discovery#directoryItem", + "id": "admin:directory_v1", + "name": "admin", + "version": "directory_v1", + "title": "Admin Directory API", + "description": "Manages enterprise resources such as users and groups, administrative notifications, security features, and more.", + "discoveryRestUrl": "https://www.googleapis.com/discovery/v1/apis/admin/directory_v1/rest", + "discoveryLink": "./apis/admin/directory_v1/rest", + "icons": { + "x16": "https://www.gstatic.com/images/branding/product/1x/googleg_16dp.png", + "x32": "https://www.gstatic.com/images/branding/product/1x/googleg_32dp.png" + }, + "documentationLink": "https://developers.google.com/admin-sdk/directory/", + "preferred": false + }, + { + "kind": "discovery#directoryItem", + "id": "admin:reports_v1", + "name": "admin", + "version": "reports_v1", + "title": "Admin Reports API", + "description": "Fetches reports for the administrators of G Suite customers about the usage, collaboration, security, and risk for their users.", + "discoveryRestUrl": "https://www.googleapis.com/discovery/v1/apis/admin/reports_v1/rest", + "discoveryLink": "./apis/admin/reports_v1/rest", + "icons": { + "x16": "https://www.gstatic.com/images/branding/product/1x/googleg_16dp.png", + "x32": "https://www.gstatic.com/images/branding/product/1x/googleg_32dp.png" + }, + "documentationLink": "https://developers.google.com/admin-sdk/reports/", + "preferred": true + }, + { + "kind": "discovery#directoryItem", + "id": "adsense:v1.4", + "name": "adsense", + "version": "v1.4", + "title": "AdSense Management API", + "description": "Accesses AdSense publishers' inventory and generates performance reports.", + "discoveryRestUrl": "https://www.googleapis.com/discovery/v1/apis/adsense/v1.4/rest", + "discoveryLink": "./apis/adsense/v1.4/rest", + "icons": { + "x16": "https://www.google.com/images/icons/product/adsense-16.png", + "x32": "https://www.google.com/images/icons/product/adsense-32.png" + }, + "documentationLink": "https://developers.google.com/adsense/management/", + "preferred": true + }, + { + "kind": "discovery#directoryItem", + "id": "adsensehost:v4.1", + "name": "adsensehost", + "version": "v4.1", + "title": "AdSense Host API", + "description": "Generates performance reports, generates ad codes, and provides publisher management capabilities for AdSense Hosts.", + "discoveryRestUrl": "https://www.googleapis.com/discovery/v1/apis/adsensehost/v4.1/rest", + "discoveryLink": "./apis/adsensehost/v4.1/rest", + "icons": { + "x16": "https://www.google.com/images/icons/product/adsense-16.png", + "x32": "https://www.google.com/images/icons/product/adsense-32.png" + }, + "documentationLink": "https://developers.google.com/adsense/host/", + "labels": [ + "limited_availability" + ], + "preferred": true + }, + { + "kind": "discovery#directoryItem", + "id": "alertcenter:v1beta1", + "name": "alertcenter", + "version": "v1beta1", + "title": "G Suite Alert Center API", + "description": "Manages alerts on issues affecting your domain.", + "discoveryRestUrl": "https://alertcenter.googleapis.com/$discovery/rest?version=v1beta1", + "icons": { + "x16": "https://www.gstatic.com/images/branding/product/1x/googleg_16dp.png", + "x32": "https://www.gstatic.com/images/branding/product/1x/googleg_32dp.png" + }, + "documentationLink": "https://developers.google.com/admin-sdk/alertcenter/", + "preferred": true + }, + { + "kind": "discovery#directoryItem", + "id": "analytics:v2.4", + "name": "analytics", + "version": "v2.4", + "title": "Google Analytics API", + "description": "Views and manages your Google Analytics data.", + "discoveryRestUrl": "https://www.googleapis.com/discovery/v1/apis/analytics/v2.4/rest", + "discoveryLink": "./apis/analytics/v2.4/rest", + "icons": { + "x16": "https://www.google.com/images/icons/product/analytics-16.png", + "x32": "https://www.google.com/images/icons/product/analytics-32.png" + }, + "documentationLink": "https://developers.google.com/analytics/", + "preferred": false + }, + { + "kind": "discovery#directoryItem", + "id": "analytics:v3", + "name": "analytics", + "version": "v3", + "title": "Google Analytics API", + "description": "Views and manages your Google Analytics data.", + "discoveryRestUrl": "https://www.googleapis.com/discovery/v1/apis/analytics/v3/rest", + "discoveryLink": "./apis/analytics/v3/rest", + "icons": { + "x16": "https://www.google.com/images/icons/product/analytics-16.png", + "x32": "https://www.google.com/images/icons/product/analytics-32.png" + }, + "documentationLink": "https://developers.google.com/analytics/", + "preferred": true + }, + { + "kind": "discovery#directoryItem", + "id": "analyticsreporting:v4", + "name": "analyticsreporting", + "version": "v4", + "title": "Google Analytics Reporting API", + "description": "Accesses Analytics report data.", + "discoveryRestUrl": "https://analyticsreporting.googleapis.com/$discovery/rest?version=v4", + "icons": { + "x16": "https://www.gstatic.com/images/branding/product/1x/googleg_16dp.png", + "x32": "https://www.gstatic.com/images/branding/product/1x/googleg_32dp.png" + }, + "documentationLink": "https://developers.google.com/analytics/devguides/reporting/core/v4/", + "preferred": true + }, + { + "kind": "discovery#directoryItem", + "id": "androiddeviceprovisioning:v1", + "name": "androiddeviceprovisioning", + "version": "v1", + "title": "Android Device Provisioning Partner API", + "description": "Automates Android zero-touch enrollment for device resellers, customers, and EMMs.", + "discoveryRestUrl": "https://androiddeviceprovisioning.googleapis.com/$discovery/rest?version=v1", + "icons": { + "x16": "https://www.gstatic.com/images/branding/product/1x/googleg_16dp.png", + "x32": "https://www.gstatic.com/images/branding/product/1x/googleg_32dp.png" + }, + "documentationLink": "https://developers.google.com/zero-touch/", + "preferred": true + }, + { + "kind": "discovery#directoryItem", + "id": "androidenterprise:v1", + "name": "androidenterprise", + "version": "v1", + "title": "Google Play EMM API", + "description": "Manages the deployment of apps to Android for Work users.", + "discoveryRestUrl": "https://www.googleapis.com/discovery/v1/apis/androidenterprise/v1/rest", + "discoveryLink": "./apis/androidenterprise/v1/rest", + "icons": { + "x16": "https://www.google.com/images/icons/product/android-16.png", + "x32": "https://www.google.com/images/icons/product/android-32.png" + }, + "documentationLink": "https://developers.google.com/android/work/play/emm-api", + "preferred": true + }, + { + "kind": "discovery#directoryItem", + "id": "androidmanagement:v1", + "name": "androidmanagement", + "version": "v1", + "title": "Android Management API", + "description": "The Android Management API provides remote enterprise management of Android devices and apps.", + "discoveryRestUrl": "https://androidmanagement.googleapis.com/$discovery/rest?version=v1", + "icons": { + "x16": "https://www.gstatic.com/images/branding/product/1x/googleg_16dp.png", + "x32": "https://www.gstatic.com/images/branding/product/1x/googleg_32dp.png" + }, + "documentationLink": "https://developers.google.com/android/management", + "preferred": true + }, + { + "kind": "discovery#directoryItem", + "id": "androidpublisher:v1", + "name": "androidpublisher", + "version": "v1", + "title": "Google Play Developer API", + "description": "Accesses Android application developers' Google Play accounts.", + "discoveryRestUrl": "https://www.googleapis.com/discovery/v1/apis/androidpublisher/v1/rest", + "discoveryLink": "./apis/androidpublisher/v1/rest", + "icons": { + "x16": "https://www.google.com/images/icons/product/android-16.png", + "x32": "https://www.google.com/images/icons/product/android-32.png" + }, + "documentationLink": "https://developers.google.com/android-publisher", + "preferred": false + }, + { + "kind": "discovery#directoryItem", + "id": "androidpublisher:v1.1", + "name": "androidpublisher", + "version": "v1.1", + "title": "Google Play Developer API", + "description": "Accesses Android application developers' Google Play accounts.", + "discoveryRestUrl": "https://www.googleapis.com/discovery/v1/apis/androidpublisher/v1.1/rest", + "discoveryLink": "./apis/androidpublisher/v1.1/rest", + "icons": { + "x16": "https://www.google.com/images/icons/product/android-16.png", + "x32": "https://www.google.com/images/icons/product/android-32.png" + }, + "documentationLink": "https://developers.google.com/android-publisher", + "preferred": false + }, + { + "kind": "discovery#directoryItem", + "id": "androidpublisher:v2", + "name": "androidpublisher", + "version": "v2", + "title": "Google Play Developer API", + "description": "Accesses Android application developers' Google Play accounts.", + "discoveryRestUrl": "https://www.googleapis.com/discovery/v1/apis/androidpublisher/v2/rest", + "discoveryLink": "./apis/androidpublisher/v2/rest", + "icons": { + "x16": "https://www.google.com/images/icons/product/android-16.png", + "x32": "https://www.google.com/images/icons/product/android-32.png" + }, + "documentationLink": "https://developers.google.com/android-publisher", + "preferred": false + }, + { + "kind": "discovery#directoryItem", + "id": "androidpublisher:v3", + "name": "androidpublisher", + "version": "v3", + "title": "Google Play Developer API", + "description": "Accesses Android application developers' Google Play accounts.", + "discoveryRestUrl": "https://www.googleapis.com/discovery/v1/apis/androidpublisher/v3/rest", + "discoveryLink": "./apis/androidpublisher/v3/rest", + "icons": { + "x16": "https://www.google.com/images/icons/product/android-16.png", + "x32": "https://www.google.com/images/icons/product/android-32.png" + }, + "documentationLink": "https://developers.google.com/android-publisher", + "preferred": true + }, + { + "kind": "discovery#directoryItem", + "id": "appengine:v1alpha", + "name": "appengine", + "version": "v1alpha", + "title": "App Engine Admin API", + "description": "Provisions and manages developers' App Engine applications.", + "discoveryRestUrl": "https://appengine.googleapis.com/$discovery/rest?version=v1alpha", + "icons": { + "x16": "https://www.gstatic.com/images/branding/product/1x/googleg_16dp.png", + "x32": "https://www.gstatic.com/images/branding/product/1x/googleg_32dp.png" + }, + "documentationLink": "https://cloud.google.com/appengine/docs/admin-api/", + "preferred": false + }, + { + "kind": "discovery#directoryItem", + "id": "appengine:v1beta", + "name": "appengine", + "version": "v1beta", + "title": "App Engine Admin API", + "description": "Provisions and manages developers' App Engine applications.", + "discoveryRestUrl": "https://appengine.googleapis.com/$discovery/rest?version=v1beta", + "icons": { + "x16": "https://www.gstatic.com/images/branding/product/1x/googleg_16dp.png", + "x32": "https://www.gstatic.com/images/branding/product/1x/googleg_32dp.png" + }, + "documentationLink": "https://cloud.google.com/appengine/docs/admin-api/", + "preferred": false + }, + { + "kind": "discovery#directoryItem", + "id": "appengine:v1", + "name": "appengine", + "version": "v1", + "title": "App Engine Admin API", + "description": "Provisions and manages developers' App Engine applications.", + "discoveryRestUrl": "https://appengine.googleapis.com/$discovery/rest?version=v1", + "icons": { + "x16": "https://www.gstatic.com/images/branding/product/1x/googleg_16dp.png", + "x32": "https://www.gstatic.com/images/branding/product/1x/googleg_32dp.png" + }, + "documentationLink": "https://cloud.google.com/appengine/docs/admin-api/", + "preferred": true + }, + { + "kind": "discovery#directoryItem", + "id": "appengine:v1beta4", + "name": "appengine", + "version": "v1beta4", + "title": "App Engine Admin API", + "description": "Provisions and manages developers' App Engine applications.", + "discoveryRestUrl": "https://appengine.googleapis.com/$discovery/rest?version=v1beta4", + "icons": { + "x16": "https://www.gstatic.com/images/branding/product/1x/googleg_16dp.png", + "x32": "https://www.gstatic.com/images/branding/product/1x/googleg_32dp.png" + }, + "documentationLink": "https://cloud.google.com/appengine/docs/admin-api/", + "preferred": false + }, + { + "kind": "discovery#directoryItem", + "id": "appengine:v1beta5", + "name": "appengine", + "version": "v1beta5", + "title": "App Engine Admin API", + "description": "Provisions and manages developers' App Engine applications.", + "discoveryRestUrl": "https://appengine.googleapis.com/$discovery/rest?version=v1beta5", + "icons": { + "x16": "https://www.gstatic.com/images/branding/product/1x/googleg_16dp.png", + "x32": "https://www.gstatic.com/images/branding/product/1x/googleg_32dp.png" + }, + "documentationLink": "https://cloud.google.com/appengine/docs/admin-api/", + "preferred": false + }, + { + "kind": "discovery#directoryItem", + "id": "appsactivity:v1", + "name": "appsactivity", + "version": "v1", + "title": "Drive Activity API", + "description": "Provides a historical view of activity.", + "discoveryRestUrl": "https://www.googleapis.com/discovery/v1/apis/appsactivity/v1/rest", + "discoveryLink": "./apis/appsactivity/v1/rest", + "icons": { + "x16": "https://www.gstatic.com/images/branding/product/1x/googleg_16dp.png", + "x32": "https://www.gstatic.com/images/branding/product/1x/googleg_32dp.png" + }, + "documentationLink": "https://developers.google.com/google-apps/activity/", + "preferred": true + }, + { + "kind": "discovery#directoryItem", + "id": "appstate:v1", + "name": "appstate", + "version": "v1", + "title": "Google App State API", + "description": "The Google App State API.", + "discoveryRestUrl": "https://www.googleapis.com/discovery/v1/apis/appstate/v1/rest", + "discoveryLink": "./apis/appstate/v1/rest", + "icons": { + "x16": "https://www.gstatic.com/images/branding/product/1x/googleg_16dp.png", + "x32": "https://www.gstatic.com/images/branding/product/1x/googleg_32dp.png" + }, + "documentationLink": "https://developers.google.com/games/services/web/api/states", + "preferred": true + }, + { + "kind": "discovery#directoryItem", + "id": "bigquery:v2", + "name": "bigquery", + "version": "v2", + "title": "BigQuery API", + "description": "A data platform for customers to create, manage, share and query data.", + "discoveryRestUrl": "https://www.googleapis.com/discovery/v1/apis/bigquery/v2/rest", + "discoveryLink": "./apis/bigquery/v2/rest", + "icons": { + "x16": "https://www.google.com/images/icons/product/search-16.gif", + "x32": "https://www.google.com/images/icons/product/search-32.gif" + }, + "documentationLink": "https://cloud.google.com/bigquery/", + "preferred": true + }, + { + "kind": "discovery#directoryItem", + "id": "bigquerydatatransfer:v1", + "name": "bigquerydatatransfer", + "version": "v1", + "title": "BigQuery Data Transfer API", + "description": "Transfers data from partner SaaS applications to Google BigQuery on a scheduled, managed basis.", + "discoveryRestUrl": "https://bigquerydatatransfer.googleapis.com/$discovery/rest?version=v1", + "icons": { + "x16": "https://www.gstatic.com/images/branding/product/1x/googleg_16dp.png", + "x32": "https://www.gstatic.com/images/branding/product/1x/googleg_32dp.png" + }, + "documentationLink": "https://cloud.google.com/bigquery/", + "preferred": true + }, + { + "kind": "discovery#directoryItem", + "id": "binaryauthorization:v1beta1", + "name": "binaryauthorization", + "version": "v1beta1", + "title": "Binary Authorization API", + "description": "The management interface for Binary Authorization, a system providing policy control for images deployed to Kubernetes Engine clusters.", + "discoveryRestUrl": "https://binaryauthorization.googleapis.com/$discovery/rest?version=v1beta1", + "icons": { + "x16": "https://www.gstatic.com/images/branding/product/1x/googleg_16dp.png", + "x32": "https://www.gstatic.com/images/branding/product/1x/googleg_32dp.png" + }, + "documentationLink": "https://cloud.google.com/binary-authorization/", + "preferred": true + }, + { + "kind": "discovery#directoryItem", + "id": "blogger:v2", + "name": "blogger", + "version": "v2", + "title": "Blogger API", + "description": "API for access to the data within Blogger.", + "discoveryRestUrl": "https://www.googleapis.com/discovery/v1/apis/blogger/v2/rest", + "discoveryLink": "./apis/blogger/v2/rest", + "icons": { + "x16": "https://www.google.com/images/icons/product/blogger-16.png", + "x32": "https://www.google.com/images/icons/product/blogger-32.png" + }, + "documentationLink": "https://developers.google.com/blogger/docs/2.0/json/getting_started", + "labels": [ + "limited_availability" + ], + "preferred": false + }, + { + "kind": "discovery#directoryItem", + "id": "blogger:v3", + "name": "blogger", + "version": "v3", + "title": "Blogger API", + "description": "API for access to the data within Blogger.", + "discoveryRestUrl": "https://www.googleapis.com/discovery/v1/apis/blogger/v3/rest", + "discoveryLink": "./apis/blogger/v3/rest", + "icons": { + "x16": "https://www.google.com/images/icons/product/blogger-16.png", + "x32": "https://www.google.com/images/icons/product/blogger-32.png" + }, + "documentationLink": "https://developers.google.com/blogger/docs/3.0/getting_started", + "labels": [ + "limited_availability" + ], + "preferred": true + }, + { + "kind": "discovery#directoryItem", + "id": "books:v1", + "name": "books", + "version": "v1", + "title": "Books API", + "description": "Searches for books and manages your Google Books library.", + "discoveryRestUrl": "https://www.googleapis.com/discovery/v1/apis/books/v1/rest", + "discoveryLink": "./apis/books/v1/rest", + "icons": { + "x16": "https://www.google.com/images/icons/product/ebooks-16.png", + "x32": "https://www.google.com/images/icons/product/ebooks-32.png" + }, + "documentationLink": "https://developers.google.com/books/docs/v1/getting_started", + "preferred": true + }, + { + "kind": "discovery#directoryItem", + "id": "calendar:v3", + "name": "calendar", + "version": "v3", + "title": "Calendar API", + "description": "Manipulates events and other calendar data.", + "discoveryRestUrl": "https://www.googleapis.com/discovery/v1/apis/calendar/v3/rest", + "discoveryLink": "./apis/calendar/v3/rest", + "icons": { + "x16": "http://www.google.com/images/icons/product/calendar-16.png", + "x32": "http://www.google.com/images/icons/product/calendar-32.png" + }, + "documentationLink": "https://developers.google.com/google-apps/calendar/firstapp", + "preferred": true + }, + { + "kind": "discovery#directoryItem", + "id": "chat:v1", + "name": "chat", + "version": "v1", + "title": "Hangouts Chat API", + "description": "Create bots and extend the new Hangouts Chat.", + "discoveryRestUrl": "https://chat.googleapis.com/$discovery/rest?version=v1", + "icons": { + "x16": "https://www.gstatic.com/images/branding/product/1x/googleg_16dp.png", + "x32": "https://www.gstatic.com/images/branding/product/1x/googleg_32dp.png" + }, + "documentationLink": "https://developers.google.com/hangouts/chat", + "preferred": true + }, + { + "kind": "discovery#directoryItem", + "id": "civicinfo:v2", + "name": "civicinfo", + "version": "v2", + "title": "Google Civic Information API", + "description": "Provides polling places, early vote locations, contest data, election officials, and government representatives for U.S. residential addresses.", + "discoveryRestUrl": "https://www.googleapis.com/discovery/v1/apis/civicinfo/v2/rest", + "discoveryLink": "./apis/civicinfo/v2/rest", + "icons": { + "x16": "https://www.gstatic.com/images/branding/product/1x/googleg_16dp.png", + "x32": "https://www.gstatic.com/images/branding/product/1x/googleg_32dp.png" + }, + "documentationLink": "https://developers.google.com/civic-information", + "preferred": true + }, + { + "kind": "discovery#directoryItem", + "id": "classroom:v1", + "name": "classroom", + "version": "v1", + "title": "Google Classroom API", + "description": "Manages classes, rosters, and invitations in Google Classroom.", + "discoveryRestUrl": "https://classroom.googleapis.com/$discovery/rest?version=v1", + "icons": { + "x16": "https://www.gstatic.com/images/branding/product/1x/googleg_16dp.png", + "x32": "https://www.gstatic.com/images/branding/product/1x/googleg_32dp.png" + }, + "documentationLink": "https://developers.google.com/classroom", + "preferred": true + }, + { + "kind": "discovery#directoryItem", + "id": "cloudasset:v1beta1", + "name": "cloudasset", + "version": "v1beta1", + "title": "Cloud Asset API", + "description": "The cloud asset API manages the history and inventory of cloud resources.", + "discoveryRestUrl": "https://cloudasset.googleapis.com/$discovery/rest?version=v1beta1", + "icons": { + "x16": "https://www.gstatic.com/images/branding/product/1x/googleg_16dp.png", + "x32": "https://www.gstatic.com/images/branding/product/1x/googleg_32dp.png" + }, + "documentationLink": "https://console.cloud.google.com/apis/api/cloudasset.googleapis.com/overview", + "preferred": true + }, + { + "kind": "discovery#directoryItem", + "id": "cloudbilling:v1", + "name": "cloudbilling", + "version": "v1", + "title": "Cloud Billing API", + "description": "Allows developers to manage billing for their Google Cloud Platform projects programmatically.", + "discoveryRestUrl": "https://cloudbilling.googleapis.com/$discovery/rest?version=v1", + "icons": { + "x16": "https://www.gstatic.com/images/branding/product/1x/googleg_16dp.png", + "x32": "https://www.gstatic.com/images/branding/product/1x/googleg_32dp.png" + }, + "documentationLink": "https://cloud.google.com/billing/", + "preferred": true + }, + { + "kind": "discovery#directoryItem", + "id": "cloudbuild:v1alpha1", + "name": "cloudbuild", + "version": "v1alpha1", + "title": "Cloud Build API", + "description": "Creates and manages builds on Google Cloud Platform.", + "discoveryRestUrl": "https://cloudbuild.googleapis.com/$discovery/rest?version=v1alpha1", + "icons": { + "x16": "https://www.gstatic.com/images/branding/product/1x/googleg_16dp.png", + "x32": "https://www.gstatic.com/images/branding/product/1x/googleg_32dp.png" + }, + "documentationLink": "https://cloud.google.com/cloud-build/docs/", + "preferred": false + }, + { + "kind": "discovery#directoryItem", + "id": "cloudbuild:v1", + "name": "cloudbuild", + "version": "v1", + "title": "Cloud Build API", + "description": "Creates and manages builds on Google Cloud Platform.", + "discoveryRestUrl": "https://cloudbuild.googleapis.com/$discovery/rest?version=v1", + "icons": { + "x16": "https://www.gstatic.com/images/branding/product/1x/googleg_16dp.png", + "x32": "https://www.gstatic.com/images/branding/product/1x/googleg_32dp.png" + }, + "documentationLink": "https://cloud.google.com/cloud-build/docs/", + "preferred": true + }, + { + "kind": "discovery#directoryItem", + "id": "clouddebugger:v2", + "name": "clouddebugger", + "version": "v2", + "title": "Stackdriver Debugger API", + "description": "Examines the call stack and variables of a running application without stopping or slowing it down.", + "discoveryRestUrl": "https://clouddebugger.googleapis.com/$discovery/rest?version=v2", + "icons": { + "x16": "https://www.gstatic.com/images/branding/product/1x/googleg_16dp.png", + "x32": "https://www.gstatic.com/images/branding/product/1x/googleg_32dp.png" + }, + "documentationLink": "https://cloud.google.com/debugger", + "preferred": true + }, + { + "kind": "discovery#directoryItem", + "id": "clouderrorreporting:v1beta1", + "name": "clouderrorreporting", + "version": "v1beta1", + "title": "Stackdriver Error Reporting API", + "description": "Groups and counts similar errors from cloud services and applications, reports new errors, and provides access to error groups and their associated errors.", + "discoveryRestUrl": "https://clouderrorreporting.googleapis.com/$discovery/rest?version=v1beta1", + "icons": { + "x16": "https://www.gstatic.com/images/branding/product/1x/googleg_16dp.png", + "x32": "https://www.gstatic.com/images/branding/product/1x/googleg_32dp.png" + }, + "documentationLink": "https://cloud.google.com/error-reporting/", + "preferred": true + }, + { + "kind": "discovery#directoryItem", + "id": "cloudfunctions:v1", + "name": "cloudfunctions", + "version": "v1", + "title": "Cloud Functions API", + "description": "Manages lightweight user-provided functions executed in response to events.", + "discoveryRestUrl": "https://cloudfunctions.googleapis.com/$discovery/rest?version=v1", + "icons": { + "x16": "https://www.gstatic.com/images/branding/product/1x/googleg_16dp.png", + "x32": "https://www.gstatic.com/images/branding/product/1x/googleg_32dp.png" + }, + "documentationLink": "https://cloud.google.com/functions", + "preferred": true + }, + { + "kind": "discovery#directoryItem", + "id": "cloudfunctions:v1beta2", + "name": "cloudfunctions", + "version": "v1beta2", + "title": "Cloud Functions API", + "description": "Manages lightweight user-provided functions executed in response to events.", + "discoveryRestUrl": "https://cloudfunctions.googleapis.com/$discovery/rest?version=v1beta2", + "icons": { + "x16": "https://www.gstatic.com/images/branding/product/1x/googleg_16dp.png", + "x32": "https://www.gstatic.com/images/branding/product/1x/googleg_32dp.png" + }, + "documentationLink": "https://cloud.google.com/functions", + "preferred": false + }, + { + "kind": "discovery#directoryItem", + "id": "cloudiot:v1", + "name": "cloudiot", + "version": "v1", + "title": "Cloud IoT API", + "description": "Registers and manages IoT (Internet of Things) devices that connect to the Google Cloud Platform.", + "discoveryRestUrl": "https://cloudiot.googleapis.com/$discovery/rest?version=v1", + "icons": { + "x16": "https://www.gstatic.com/images/branding/product/1x/googleg_16dp.png", + "x32": "https://www.gstatic.com/images/branding/product/1x/googleg_32dp.png" + }, + "documentationLink": "https://cloud.google.com/iot", + "preferred": true + }, + { + "kind": "discovery#directoryItem", + "id": "cloudkms:v1", + "name": "cloudkms", + "version": "v1", + "title": "Cloud Key Management Service (KMS) API", + "description": "Manages keys and performs cryptographic operations in a central cloud service, for direct use by other cloud resources and applications.", + "discoveryRestUrl": "https://cloudkms.googleapis.com/$discovery/rest?version=v1", + "icons": { + "x16": "https://www.gstatic.com/images/branding/product/1x/googleg_16dp.png", + "x32": "https://www.gstatic.com/images/branding/product/1x/googleg_32dp.png" + }, + "documentationLink": "https://cloud.google.com/kms/", + "preferred": true + }, + { + "kind": "discovery#directoryItem", + "id": "cloudprofiler:v2", + "name": "cloudprofiler", + "version": "v2", + "title": "Stackdriver Profiler API", + "description": "Manages continuous profiling information.", + "discoveryRestUrl": "https://cloudprofiler.googleapis.com/$discovery/rest?version=v2", + "icons": { + "x16": "https://www.gstatic.com/images/branding/product/1x/googleg_16dp.png", + "x32": "https://www.gstatic.com/images/branding/product/1x/googleg_32dp.png" + }, + "documentationLink": "https://cloud.google.com/profiler/", + "preferred": true + }, + { + "kind": "discovery#directoryItem", + "id": "cloudresourcemanager:v1", + "name": "cloudresourcemanager", + "version": "v1", + "title": "Cloud Resource Manager API", + "description": "Creates, reads, and updates metadata for Google Cloud Platform resource containers.", + "discoveryRestUrl": "https://cloudresourcemanager.googleapis.com/$discovery/rest?version=v1", + "icons": { + "x16": "https://www.gstatic.com/images/branding/product/1x/googleg_16dp.png", + "x32": "https://www.gstatic.com/images/branding/product/1x/googleg_32dp.png" + }, + "documentationLink": "https://cloud.google.com/resource-manager", + "preferred": false + }, + { + "kind": "discovery#directoryItem", + "id": "cloudresourcemanager:v1beta1", + "name": "cloudresourcemanager", + "version": "v1beta1", + "title": "Cloud Resource Manager API", + "description": "Creates, reads, and updates metadata for Google Cloud Platform resource containers.", + "discoveryRestUrl": "https://cloudresourcemanager.googleapis.com/$discovery/rest?version=v1beta1", + "icons": { + "x16": "https://www.gstatic.com/images/branding/product/1x/googleg_16dp.png", + "x32": "https://www.gstatic.com/images/branding/product/1x/googleg_32dp.png" + }, + "documentationLink": "https://cloud.google.com/resource-manager", + "preferred": false + }, + { + "kind": "discovery#directoryItem", + "id": "cloudresourcemanager:v2", + "name": "cloudresourcemanager", + "version": "v2", + "title": "Cloud Resource Manager API", + "description": "Creates, reads, and updates metadata for Google Cloud Platform resource containers.", + "discoveryRestUrl": "https://cloudresourcemanager.googleapis.com/$discovery/rest?version=v2", + "icons": { + "x16": "https://www.gstatic.com/images/branding/product/1x/googleg_16dp.png", + "x32": "https://www.gstatic.com/images/branding/product/1x/googleg_32dp.png" + }, + "documentationLink": "https://cloud.google.com/resource-manager", + "preferred": true + }, + { + "kind": "discovery#directoryItem", + "id": "cloudresourcemanager:v2beta1", + "name": "cloudresourcemanager", + "version": "v2beta1", + "title": "Cloud Resource Manager API", + "description": "Creates, reads, and updates metadata for Google Cloud Platform resource containers.", + "discoveryRestUrl": "https://cloudresourcemanager.googleapis.com/$discovery/rest?version=v2beta1", + "icons": { + "x16": "https://www.gstatic.com/images/branding/product/1x/googleg_16dp.png", + "x32": "https://www.gstatic.com/images/branding/product/1x/googleg_32dp.png" + }, + "documentationLink": "https://cloud.google.com/resource-manager", + "preferred": false + }, + { + "kind": "discovery#directoryItem", + "id": "cloudscheduler:v1beta1", + "name": "cloudscheduler", + "version": "v1beta1", + "title": "Cloud Scheduler API", + "description": "Creates and manages jobs run on a regular recurring schedule.", + "discoveryRestUrl": "https://cloudscheduler.googleapis.com/$discovery/rest?version=v1beta1", + "icons": { + "x16": "https://www.gstatic.com/images/branding/product/1x/googleg_16dp.png", + "x32": "https://www.gstatic.com/images/branding/product/1x/googleg_32dp.png" + }, + "documentationLink": "https://cloud.google.com/scheduler/", + "preferred": true + }, + { + "kind": "discovery#directoryItem", + "id": "cloudsearch:v1", + "name": "cloudsearch", + "version": "v1", + "title": "Cloud Search API", + "description": "Cloud Search provides cloud-based search capabilities over G Suite data. The Cloud Search API allows indexing of non-G Suite data into Cloud Search.", + "discoveryRestUrl": "https://cloudsearch.googleapis.com/$discovery/rest?version=v1", + "icons": { + "x16": "https://www.gstatic.com/images/branding/product/1x/googleg_16dp.png", + "x32": "https://www.gstatic.com/images/branding/product/1x/googleg_32dp.png" + }, + "documentationLink": "https://gsuite.google.com/products/cloud-search/", + "preferred": true + }, + { + "kind": "discovery#directoryItem", + "id": "cloudshell:v1alpha1", + "name": "cloudshell", + "version": "v1alpha1", + "title": "Cloud Shell API", + "description": "Allows users to start, configure, and connect to interactive shell sessions running in the cloud.", + "discoveryRestUrl": "https://cloudshell.googleapis.com/$discovery/rest?version=v1alpha1", + "icons": { + "x16": "https://www.gstatic.com/images/branding/product/1x/googleg_16dp.png", + "x32": "https://www.gstatic.com/images/branding/product/1x/googleg_32dp.png" + }, + "documentationLink": "https://cloud.google.com/shell/docs/", + "preferred": false + }, + { + "kind": "discovery#directoryItem", + "id": "cloudshell:v1", + "name": "cloudshell", + "version": "v1", + "title": "Cloud Shell API", + "description": "Allows users to start, configure, and connect to interactive shell sessions running in the cloud.", + "discoveryRestUrl": "https://cloudshell.googleapis.com/$discovery/rest?version=v1", + "icons": { + "x16": "https://www.gstatic.com/images/branding/product/1x/googleg_16dp.png", + "x32": "https://www.gstatic.com/images/branding/product/1x/googleg_32dp.png" + }, + "documentationLink": "https://cloud.google.com/shell/docs/", + "preferred": true + }, + { + "kind": "discovery#directoryItem", + "id": "cloudtasks:v2beta2", + "name": "cloudtasks", + "version": "v2beta2", + "title": "Cloud Tasks API", + "description": "Manages the execution of large numbers of distributed requests.", + "discoveryRestUrl": "https://cloudtasks.googleapis.com/$discovery/rest?version=v2beta2", + "icons": { + "x16": "https://www.gstatic.com/images/branding/product/1x/googleg_16dp.png", + "x32": "https://www.gstatic.com/images/branding/product/1x/googleg_32dp.png" + }, + "documentationLink": "https://cloud.google.com/tasks/", + "preferred": false + }, + { + "kind": "discovery#directoryItem", + "id": "cloudtasks:v2beta3", + "name": "cloudtasks", + "version": "v2beta3", + "title": "Cloud Tasks API", + "description": "Manages the execution of large numbers of distributed requests.", + "discoveryRestUrl": "https://cloudtasks.googleapis.com/$discovery/rest?version=v2beta3", + "icons": { + "x16": "https://www.gstatic.com/images/branding/product/1x/googleg_16dp.png", + "x32": "https://www.gstatic.com/images/branding/product/1x/googleg_32dp.png" + }, + "documentationLink": "https://cloud.google.com/tasks/", + "preferred": true + }, + { + "kind": "discovery#directoryItem", + "id": "cloudtrace:v2alpha1", + "name": "cloudtrace", + "version": "v2alpha1", + "title": "Stackdriver Trace API", + "description": "Sends application trace data to Stackdriver Trace for viewing. Trace data is collected for all App Engine applications by default. Trace data from other applications can be provided using this API. This library is used to interact with the Trace API directly. If you are looking to instrument your application for Stackdriver Trace, we recommend using OpenCensus.", + "discoveryRestUrl": "https://cloudtrace.googleapis.com/$discovery/rest?version=v2alpha1", + "icons": { + "x16": "https://www.gstatic.com/images/branding/product/1x/googleg_16dp.png", + "x32": "https://www.gstatic.com/images/branding/product/1x/googleg_32dp.png" + }, + "documentationLink": "https://cloud.google.com/trace", + "preferred": false + }, + { + "kind": "discovery#directoryItem", + "id": "cloudtrace:v1", + "name": "cloudtrace", + "version": "v1", + "title": "Stackdriver Trace API", + "description": "Sends application trace data to Stackdriver Trace for viewing. Trace data is collected for all App Engine applications by default. Trace data from other applications can be provided using this API. This library is used to interact with the Trace API directly. If you are looking to instrument your application for Stackdriver Trace, we recommend using OpenCensus.", + "discoveryRestUrl": "https://cloudtrace.googleapis.com/$discovery/rest?version=v1", + "icons": { + "x16": "https://www.gstatic.com/images/branding/product/1x/googleg_16dp.png", + "x32": "https://www.gstatic.com/images/branding/product/1x/googleg_32dp.png" + }, + "documentationLink": "https://cloud.google.com/trace", + "preferred": false + }, + { + "kind": "discovery#directoryItem", + "id": "cloudtrace:v2", + "name": "cloudtrace", + "version": "v2", + "title": "Stackdriver Trace API", + "description": "Sends application trace data to Stackdriver Trace for viewing. Trace data is collected for all App Engine applications by default. Trace data from other applications can be provided using this API. This library is used to interact with the Trace API directly. If you are looking to instrument your application for Stackdriver Trace, we recommend using OpenCensus.", + "discoveryRestUrl": "https://cloudtrace.googleapis.com/$discovery/rest?version=v2", + "icons": { + "x16": "https://www.gstatic.com/images/branding/product/1x/googleg_16dp.png", + "x32": "https://www.gstatic.com/images/branding/product/1x/googleg_32dp.png" + }, + "documentationLink": "https://cloud.google.com/trace", + "preferred": true + }, + { + "kind": "discovery#directoryItem", + "id": "composer:v1", + "name": "composer", + "version": "v1", + "title": "Cloud Composer API", + "description": "Manages Apache Airflow environments on Google Cloud Platform.", + "discoveryRestUrl": "https://composer.googleapis.com/$discovery/rest?version=v1", + "icons": { + "x16": "https://www.gstatic.com/images/branding/product/1x/googleg_16dp.png", + "x32": "https://www.gstatic.com/images/branding/product/1x/googleg_32dp.png" + }, + "documentationLink": "https://cloud.google.com/composer/", + "preferred": true + }, + { + "kind": "discovery#directoryItem", + "id": "composer:v1beta1", + "name": "composer", + "version": "v1beta1", + "title": "Cloud Composer API", + "description": "Manages Apache Airflow environments on Google Cloud Platform.", + "discoveryRestUrl": "https://composer.googleapis.com/$discovery/rest?version=v1beta1", + "icons": { + "x16": "https://www.gstatic.com/images/branding/product/1x/googleg_16dp.png", + "x32": "https://www.gstatic.com/images/branding/product/1x/googleg_32dp.png" + }, + "documentationLink": "https://cloud.google.com/composer/", + "preferred": false + }, + { + "kind": "discovery#directoryItem", + "id": "compute:alpha", + "name": "compute", + "version": "alpha", + "title": "Compute Engine API", + "description": "Creates and runs virtual machines on Google Cloud Platform.", + "discoveryRestUrl": "https://www.googleapis.com/discovery/v1/apis/compute/alpha/rest", + "discoveryLink": "./apis/compute/alpha/rest", + "icons": { + "x16": "https://www.google.com/images/icons/product/compute_engine-16.png", + "x32": "https://www.google.com/images/icons/product/compute_engine-32.png" + }, + "documentationLink": "https://developers.google.com/compute/docs/reference/latest/", + "preferred": false + }, + { + "kind": "discovery#directoryItem", + "id": "compute:beta", + "name": "compute", + "version": "beta", + "title": "Compute Engine API", + "description": "Creates and runs virtual machines on Google Cloud Platform.", + "discoveryRestUrl": "https://www.googleapis.com/discovery/v1/apis/compute/beta/rest", + "discoveryLink": "./apis/compute/beta/rest", + "icons": { + "x16": "https://www.google.com/images/icons/product/compute_engine-16.png", + "x32": "https://www.google.com/images/icons/product/compute_engine-32.png" + }, + "documentationLink": "https://developers.google.com/compute/docs/reference/latest/", + "preferred": false + }, + { + "kind": "discovery#directoryItem", + "id": "compute:v1", + "name": "compute", + "version": "v1", + "title": "Compute Engine API", + "description": "Creates and runs virtual machines on Google Cloud Platform.", + "discoveryRestUrl": "https://www.googleapis.com/discovery/v1/apis/compute/v1/rest", + "discoveryLink": "./apis/compute/v1/rest", + "icons": { + "x16": "https://www.google.com/images/icons/product/compute_engine-16.png", + "x32": "https://www.google.com/images/icons/product/compute_engine-32.png" + }, + "documentationLink": "https://developers.google.com/compute/docs/reference/latest/", + "preferred": true + }, + { + "kind": "discovery#directoryItem", + "id": "container:v1", + "name": "container", + "version": "v1", + "title": "Kubernetes Engine API", + "description": "Builds and manages container-based applications, powered by the open source Kubernetes technology.", + "discoveryRestUrl": "https://container.googleapis.com/$discovery/rest?version=v1", + "icons": { + "x16": "https://www.gstatic.com/images/branding/product/1x/googleg_16dp.png", + "x32": "https://www.gstatic.com/images/branding/product/1x/googleg_32dp.png" + }, + "documentationLink": "https://cloud.google.com/container-engine/", + "preferred": true + }, + { + "kind": "discovery#directoryItem", + "id": "container:v1beta1", + "name": "container", + "version": "v1beta1", + "title": "Kubernetes Engine API", + "description": "Builds and manages container-based applications, powered by the open source Kubernetes technology.", + "discoveryRestUrl": "https://container.googleapis.com/$discovery/rest?version=v1beta1", + "icons": { + "x16": "https://www.gstatic.com/images/branding/product/1x/googleg_16dp.png", + "x32": "https://www.gstatic.com/images/branding/product/1x/googleg_32dp.png" + }, + "documentationLink": "https://cloud.google.com/container-engine/", + "preferred": false + }, + { + "kind": "discovery#directoryItem", + "id": "content:v2", + "name": "content", + "version": "v2", + "title": "Content API for Shopping", + "description": "Manages product items, inventory, and Merchant Center accounts for Google Shopping.", + "discoveryRestUrl": "https://www.googleapis.com/discovery/v1/apis/content/v2/rest", + "discoveryLink": "./apis/content/v2/rest", + "icons": { + "x16": "https://www.gstatic.com/images/branding/product/1x/googleg_16dp.png", + "x32": "https://www.gstatic.com/images/branding/product/1x/googleg_32dp.png" + }, + "documentationLink": "https://developers.google.com/shopping-content", + "preferred": true + }, + { + "kind": "discovery#directoryItem", + "id": "customsearch:v1", + "name": "customsearch", + "version": "v1", + "title": "CustomSearch API", + "description": "Searches over a website or collection of websites", + "discoveryRestUrl": "https://www.googleapis.com/discovery/v1/apis/customsearch/v1/rest", + "discoveryLink": "./apis/customsearch/v1/rest", + "icons": { + "x16": "https://www.gstatic.com/images/branding/product/1x/googleg_16dp.png", + "x32": "https://www.gstatic.com/images/branding/product/1x/googleg_32dp.png" + }, + "documentationLink": "https://developers.google.com/custom-search/v1/using_rest", + "preferred": true + }, + { + "kind": "discovery#directoryItem", + "id": "dataflow:v1b3", + "name": "dataflow", + "version": "v1b3", + "title": "Dataflow API", + "description": "Manages Google Cloud Dataflow projects on Google Cloud Platform.", + "discoveryRestUrl": "https://dataflow.googleapis.com/$discovery/rest?version=v1b3", + "icons": { + "x16": "https://www.gstatic.com/images/branding/product/1x/googleg_16dp.png", + "x32": "https://www.gstatic.com/images/branding/product/1x/googleg_32dp.png" + }, + "documentationLink": "https://cloud.google.com/dataflow", + "preferred": true + }, + { + "kind": "discovery#directoryItem", + "id": "dataproc:v1", + "name": "dataproc", + "version": "v1", + "title": "Cloud Dataproc API", + "description": "Manages Hadoop-based clusters and jobs on Google Cloud Platform.", + "discoveryRestUrl": "https://dataproc.googleapis.com/$discovery/rest?version=v1", + "icons": { + "x16": "https://www.gstatic.com/images/branding/product/1x/googleg_16dp.png", + "x32": "https://www.gstatic.com/images/branding/product/1x/googleg_32dp.png" + }, + "documentationLink": "https://cloud.google.com/dataproc/", + "preferred": true + }, + { + "kind": "discovery#directoryItem", + "id": "dataproc:v1beta2", + "name": "dataproc", + "version": "v1beta2", + "title": "Cloud Dataproc API", + "description": "Manages Hadoop-based clusters and jobs on Google Cloud Platform.", + "discoveryRestUrl": "https://dataproc.googleapis.com/$discovery/rest?version=v1beta2", + "icons": { + "x16": "https://www.gstatic.com/images/branding/product/1x/googleg_16dp.png", + "x32": "https://www.gstatic.com/images/branding/product/1x/googleg_32dp.png" + }, + "documentationLink": "https://cloud.google.com/dataproc/", + "preferred": false + }, + { + "kind": "discovery#directoryItem", + "id": "datastore:v1", + "name": "datastore", + "version": "v1", + "title": "Cloud Datastore API", + "description": "Accesses the schemaless NoSQL database to provide fully managed, robust, scalable storage for your application.", + "discoveryRestUrl": "https://datastore.googleapis.com/$discovery/rest?version=v1", + "icons": { + "x16": "https://www.gstatic.com/images/branding/product/1x/googleg_16dp.png", + "x32": "https://www.gstatic.com/images/branding/product/1x/googleg_32dp.png" + }, + "documentationLink": "https://cloud.google.com/datastore/", + "preferred": true + }, + { + "kind": "discovery#directoryItem", + "id": "datastore:v1beta1", + "name": "datastore", + "version": "v1beta1", + "title": "Cloud Datastore API", + "description": "Accesses the schemaless NoSQL database to provide fully managed, robust, scalable storage for your application.", + "discoveryRestUrl": "https://datastore.googleapis.com/$discovery/rest?version=v1beta1", + "icons": { + "x16": "https://www.gstatic.com/images/branding/product/1x/googleg_16dp.png", + "x32": "https://www.gstatic.com/images/branding/product/1x/googleg_32dp.png" + }, + "documentationLink": "https://cloud.google.com/datastore/", + "preferred": false + }, + { + "kind": "discovery#directoryItem", + "id": "datastore:v1beta3", + "name": "datastore", + "version": "v1beta3", + "title": "Cloud Datastore API", + "description": "Accesses the schemaless NoSQL database to provide fully managed, robust, scalable storage for your application.", + "discoveryRestUrl": "https://datastore.googleapis.com/$discovery/rest?version=v1beta3", + "icons": { + "x16": "https://www.gstatic.com/images/branding/product/1x/googleg_16dp.png", + "x32": "https://www.gstatic.com/images/branding/product/1x/googleg_32dp.png" + }, + "documentationLink": "https://cloud.google.com/datastore/", + "preferred": false + }, + { + "kind": "discovery#directoryItem", + "id": "deploymentmanager:alpha", + "name": "deploymentmanager", + "version": "alpha", + "title": "Google Cloud Deployment Manager Alpha API", + "description": "The Deployment Manager API allows users to declaratively configure, deploy and run complex solutions on the Google Cloud Platform.", + "discoveryRestUrl": "https://www.googleapis.com/discovery/v1/apis/deploymentmanager/alpha/rest", + "discoveryLink": "./apis/deploymentmanager/alpha/rest", + "icons": { + "x16": "https://www.gstatic.com/images/branding/product/1x/googleg_16dp.png", + "x32": "https://www.gstatic.com/images/branding/product/1x/googleg_32dp.png" + }, + "documentationLink": "https://cloud.google.com/deployment-manager/", + "preferred": false + }, + { + "kind": "discovery#directoryItem", + "id": "deploymentmanager:v2beta", + "name": "deploymentmanager", + "version": "v2beta", + "title": "Google Cloud Deployment Manager API V2Beta Methods", + "description": "The Deployment Manager API allows users to declaratively configure, deploy and run complex solutions on the Google Cloud Platform.", + "discoveryRestUrl": "https://www.googleapis.com/discovery/v1/apis/deploymentmanager/v2beta/rest", + "discoveryLink": "./apis/deploymentmanager/v2beta/rest", + "icons": { + "x16": "https://www.gstatic.com/images/branding/product/1x/googleg_16dp.png", + "x32": "https://www.gstatic.com/images/branding/product/1x/googleg_32dp.png" + }, + "documentationLink": "https://developers.google.com/deployment-manager/", + "preferred": false + }, + { + "kind": "discovery#directoryItem", + "id": "deploymentmanager:v2", + "name": "deploymentmanager", + "version": "v2", + "title": "Google Cloud Deployment Manager API", + "description": "Declares, configures, and deploys complex solutions on Google Cloud Platform.", + "discoveryRestUrl": "https://www.googleapis.com/discovery/v1/apis/deploymentmanager/v2/rest", + "discoveryLink": "./apis/deploymentmanager/v2/rest", + "icons": { + "x16": "https://www.gstatic.com/images/branding/product/1x/googleg_16dp.png", + "x32": "https://www.gstatic.com/images/branding/product/1x/googleg_32dp.png" + }, + "documentationLink": "https://cloud.google.com/deployment-manager/", + "preferred": true + }, + { + "kind": "discovery#directoryItem", + "id": "dfareporting:v3.0", + "name": "dfareporting", + "version": "v3.0", + "title": "DCM/DFA Reporting And Trafficking API", + "description": "Manages your DoubleClick Campaign Manager ad campaigns and reports.", + "discoveryRestUrl": "https://www.googleapis.com/discovery/v1/apis/dfareporting/v3.0/rest", + "discoveryLink": "./apis/dfareporting/v3.0/rest", + "icons": { + "x16": "https://www.google.com/images/icons/product/doubleclick-16.gif", + "x32": "https://www.google.com/images/icons/product/doubleclick-32.gif" + }, + "documentationLink": "https://developers.google.com/doubleclick-advertisers/", + "preferred": false + }, + { + "kind": "discovery#directoryItem", + "id": "dfareporting:v3.1", + "name": "dfareporting", + "version": "v3.1", + "title": "DCM/DFA Reporting And Trafficking API", + "description": "Manages your DoubleClick Campaign Manager ad campaigns and reports.", + "discoveryRestUrl": "https://www.googleapis.com/discovery/v1/apis/dfareporting/v3.1/rest", + "discoveryLink": "./apis/dfareporting/v3.1/rest", + "icons": { + "x16": "https://www.google.com/images/icons/product/doubleclick-16.gif", + "x32": "https://www.google.com/images/icons/product/doubleclick-32.gif" + }, + "documentationLink": "https://developers.google.com/doubleclick-advertisers/", + "preferred": false + }, + { + "kind": "discovery#directoryItem", + "id": "dfareporting:v3.2", + "name": "dfareporting", + "version": "v3.2", + "title": "DCM/DFA Reporting And Trafficking API", + "description": "Manages your DoubleClick Campaign Manager ad campaigns and reports.", + "discoveryRestUrl": "https://www.googleapis.com/discovery/v1/apis/dfareporting/v3.2/rest", + "discoveryLink": "./apis/dfareporting/v3.2/rest", + "icons": { + "x16": "https://www.google.com/images/icons/product/doubleclick-16.gif", + "x32": "https://www.google.com/images/icons/product/doubleclick-32.gif" + }, + "documentationLink": "https://developers.google.com/doubleclick-advertisers/", + "preferred": true + }, + { + "kind": "discovery#directoryItem", + "id": "dialogflow:v2", + "name": "dialogflow", + "version": "v2", + "title": "Dialogflow API", + "description": "Builds conversational interfaces (for example, chatbots, and voice-powered apps and devices).", + "discoveryRestUrl": "https://dialogflow.googleapis.com/$discovery/rest?version=v2", + "icons": { + "x16": "https://www.gstatic.com/images/branding/product/1x/googleg_16dp.png", + "x32": "https://www.gstatic.com/images/branding/product/1x/googleg_32dp.png" + }, + "documentationLink": "https://cloud.google.com/dialogflow-enterprise/", + "preferred": true + }, + { + "kind": "discovery#directoryItem", + "id": "dialogflow:v2beta1", + "name": "dialogflow", + "version": "v2beta1", + "title": "Dialogflow API", + "description": "Builds conversational interfaces (for example, chatbots, and voice-powered apps and devices).", + "discoveryRestUrl": "https://dialogflow.googleapis.com/$discovery/rest?version=v2beta1", + "icons": { + "x16": "https://www.gstatic.com/images/branding/product/1x/googleg_16dp.png", + "x32": "https://www.gstatic.com/images/branding/product/1x/googleg_32dp.png" + }, + "documentationLink": "https://cloud.google.com/dialogflow-enterprise/", + "preferred": false + }, + { + "kind": "discovery#directoryItem", + "id": "digitalassetlinks:v1", + "name": "digitalassetlinks", + "version": "v1", + "title": "Digital Asset Links API", + "description": "Discovers relationships between online assets such as websites or mobile apps.", + "discoveryRestUrl": "https://digitalassetlinks.googleapis.com/$discovery/rest?version=v1", + "icons": { + "x16": "https://www.gstatic.com/images/branding/product/1x/googleg_16dp.png", + "x32": "https://www.gstatic.com/images/branding/product/1x/googleg_32dp.png" + }, + "documentationLink": "https://developers.google.com/digital-asset-links/", + "preferred": true + }, + { + "kind": "discovery#directoryItem", + "id": "discovery:v1", + "name": "discovery", + "version": "v1", + "title": "APIs Discovery Service", + "description": "Provides information about other Google APIs, such as what APIs are available, the resource, and method details for each API.", + "discoveryRestUrl": "https://www.googleapis.com/discovery/v1/apis/discovery/v1/rest", + "discoveryLink": "./apis/discovery/v1/rest", + "icons": { + "x16": "http://www.google.com/images/icons/feature/filing_cabinet_search-g16.png", + "x32": "http://www.google.com/images/icons/feature/filing_cabinet_search-g32.png" + }, + "documentationLink": "https://developers.google.com/discovery/", + "preferred": true + }, + { + "kind": "discovery#directoryItem", + "id": "dlp:v2", + "name": "dlp", + "version": "v2", + "title": "Cloud Data Loss Prevention (DLP) API", + "description": "Provides methods for detection, risk analysis, and de-identification of privacy-sensitive fragments in text, images, and Google Cloud Platform storage repositories.", + "discoveryRestUrl": "https://dlp.googleapis.com/$discovery/rest?version=v2", + "icons": { + "x16": "https://www.gstatic.com/images/branding/product/1x/googleg_16dp.png", + "x32": "https://www.gstatic.com/images/branding/product/1x/googleg_32dp.png" + }, + "documentationLink": "https://cloud.google.com/dlp/docs/", + "preferred": true + }, + { + "kind": "discovery#directoryItem", + "id": "dns:v1", + "name": "dns", + "version": "v1", + "title": "Google Cloud DNS API", + "description": "Configures and serves authoritative DNS records.", + "discoveryRestUrl": "https://www.googleapis.com/discovery/v1/apis/dns/v1/rest", + "discoveryLink": "./apis/dns/v1/rest", + "icons": { + "x16": "https://www.gstatic.com/images/branding/product/1x/googleg_16dp.png", + "x32": "https://www.gstatic.com/images/branding/product/1x/googleg_32dp.png" + }, + "documentationLink": "https://developers.google.com/cloud-dns", + "preferred": true + }, + { + "kind": "discovery#directoryItem", + "id": "dns:v1beta2", + "name": "dns", + "version": "v1beta2", + "title": "Google Cloud DNS API", + "description": "Configures and serves authoritative DNS records.", + "discoveryRestUrl": "https://www.googleapis.com/discovery/v1/apis/dns/v1beta2/rest", + "discoveryLink": "./apis/dns/v1beta2/rest", + "icons": { + "x16": "https://www.gstatic.com/images/branding/product/1x/googleg_16dp.png", + "x32": "https://www.gstatic.com/images/branding/product/1x/googleg_32dp.png" + }, + "documentationLink": "https://developers.google.com/cloud-dns", + "preferred": false + }, + { + "kind": "discovery#directoryItem", + "id": "dns:v2beta1", + "name": "dns", + "version": "v2beta1", + "title": "Google Cloud DNS API", + "description": "Configures and serves authoritative DNS records.", + "discoveryRestUrl": "https://www.googleapis.com/discovery/v1/apis/dns/v2beta1/rest", + "discoveryLink": "./apis/dns/v2beta1/rest", + "icons": { + "x16": "https://www.gstatic.com/images/branding/product/1x/googleg_16dp.png", + "x32": "https://www.gstatic.com/images/branding/product/1x/googleg_32dp.png" + }, + "documentationLink": "https://developers.google.com/cloud-dns", + "preferred": false + }, + { + "kind": "discovery#directoryItem", + "id": "doubleclickbidmanager:v1", + "name": "doubleclickbidmanager", + "version": "v1", + "title": "DoubleClick Bid Manager API", + "description": "API for viewing and managing your reports in DoubleClick Bid Manager.", + "discoveryRestUrl": "https://www.googleapis.com/discovery/v1/apis/doubleclickbidmanager/v1/rest", + "discoveryLink": "./apis/doubleclickbidmanager/v1/rest", + "icons": { + "x16": "https://www.gstatic.com/images/branding/product/1x/googleg_16dp.png", + "x32": "https://www.gstatic.com/images/branding/product/1x/googleg_32dp.png" + }, + "documentationLink": "https://developers.google.com/bid-manager/", + "labels": [ + "limited_availability" + ], + "preferred": true + }, + { + "kind": "discovery#directoryItem", + "id": "doubleclicksearch:v2", + "name": "doubleclicksearch", + "version": "v2", + "title": "DoubleClick Search API", + "description": "Reports and modifies your advertising data in DoubleClick Search (for example, campaigns, ad groups, keywords, and conversions).", + "discoveryRestUrl": "https://www.googleapis.com/discovery/v1/apis/doubleclicksearch/v2/rest", + "discoveryLink": "./apis/doubleclicksearch/v2/rest", + "icons": { + "x16": "https://www.gstatic.com/images/branding/product/1x/googleg_16dp.png", + "x32": "https://www.gstatic.com/images/branding/product/1x/googleg_32dp.png" + }, + "documentationLink": "https://developers.google.com/doubleclick-search/", + "preferred": true + }, + { + "kind": "discovery#directoryItem", + "id": "drive:v2", + "name": "drive", + "version": "v2", + "title": "Drive API", + "description": "Manages files in Drive including uploading, downloading, searching, detecting changes, and updating sharing permissions.", + "discoveryRestUrl": "https://www.googleapis.com/discovery/v1/apis/drive/v2/rest", + "discoveryLink": "./apis/drive/v2/rest", + "icons": { + "x16": "https://ssl.gstatic.com/docs/doclist/images/drive_icon_16.png", + "x32": "https://ssl.gstatic.com/docs/doclist/images/drive_icon_32.png" + }, + "documentationLink": "https://developers.google.com/drive/", + "preferred": false + }, + { + "kind": "discovery#directoryItem", + "id": "drive:v3", + "name": "drive", + "version": "v3", + "title": "Drive API", + "description": "Manages files in Drive including uploading, downloading, searching, detecting changes, and updating sharing permissions.", + "discoveryRestUrl": "https://www.googleapis.com/discovery/v1/apis/drive/v3/rest", + "discoveryLink": "./apis/drive/v3/rest", + "icons": { + "x16": "https://ssl.gstatic.com/docs/doclist/images/drive_icon_16.png", + "x32": "https://ssl.gstatic.com/docs/doclist/images/drive_icon_32.png" + }, + "documentationLink": "https://developers.google.com/drive/", + "preferred": true + }, + { + "kind": "discovery#directoryItem", + "id": "driveactivity:v2", + "name": "driveactivity", + "version": "v2", + "title": "Drive Activity API", + "description": "Provides a historical view of activity in Google Drive.", + "discoveryRestUrl": "https://driveactivity.googleapis.com/$discovery/rest?version=v2", + "icons": { + "x16": "https://www.gstatic.com/images/branding/product/1x/googleg_16dp.png", + "x32": "https://www.gstatic.com/images/branding/product/1x/googleg_32dp.png" + }, + "documentationLink": "https://developers.google.com/drive/activity/", + "preferred": true + }, + { + "kind": "discovery#directoryItem", + "id": "file:v1beta1", + "name": "file", + "version": "v1beta1", + "title": "Cloud Filestore API", + "description": "The Cloud Filestore API is used for creating and managing cloud file servers.", + "discoveryRestUrl": "https://file.googleapis.com/$discovery/rest?version=v1beta1", + "icons": { + "x16": "https://www.gstatic.com/images/branding/product/1x/googleg_16dp.png", + "x32": "https://www.gstatic.com/images/branding/product/1x/googleg_32dp.png" + }, + "documentationLink": "https://cloud.google.com/filestore/", + "preferred": true + }, + { + "kind": "discovery#directoryItem", + "id": "firebasedynamiclinks:v1", + "name": "firebasedynamiclinks", + "version": "v1", + "title": "Firebase Dynamic Links API", + "description": "Programmatically creates and manages Firebase Dynamic Links.", + "discoveryRestUrl": "https://firebasedynamiclinks.googleapis.com/$discovery/rest?version=v1", + "icons": { + "x16": "https://www.gstatic.com/images/branding/product/1x/googleg_16dp.png", + "x32": "https://www.gstatic.com/images/branding/product/1x/googleg_32dp.png" + }, + "documentationLink": "https://firebase.google.com/docs/dynamic-links/", + "preferred": true + }, + { + "kind": "discovery#directoryItem", + "id": "firebasehosting:v1beta1", + "name": "firebasehosting", + "version": "v1beta1", + "title": "Firebase Hosting API", + "description": "", + "discoveryRestUrl": "https://firebasehosting.googleapis.com/$discovery/rest?version=v1beta1", + "icons": { + "x16": "https://www.gstatic.com/images/branding/product/1x/googleg_16dp.png", + "x32": "https://www.gstatic.com/images/branding/product/1x/googleg_32dp.png" + }, + "documentationLink": "https://firebase.google.com/docs/hosting/", + "preferred": true + }, + { + "kind": "discovery#directoryItem", + "id": "firebaserules:v1", + "name": "firebaserules", + "version": "v1", + "title": "Firebase Rules API", + "description": "Creates and manages rules that determine when a Firebase Rules-enabled service should permit a request.", + "discoveryRestUrl": "https://firebaserules.googleapis.com/$discovery/rest?version=v1", + "icons": { + "x16": "https://www.gstatic.com/images/branding/product/1x/googleg_16dp.png", + "x32": "https://www.gstatic.com/images/branding/product/1x/googleg_32dp.png" + }, + "documentationLink": "https://firebase.google.com/docs/storage/security", + "preferred": true + }, + { + "kind": "discovery#directoryItem", + "id": "firestore:v1", + "name": "firestore", + "version": "v1", + "title": "Cloud Firestore API", + "description": "Accesses the NoSQL document database built for automatic scaling, high performance, and ease of application development.", + "discoveryRestUrl": "https://firestore.googleapis.com/$discovery/rest?version=v1", + "icons": { + "x16": "https://www.gstatic.com/images/branding/product/1x/googleg_16dp.png", + "x32": "https://www.gstatic.com/images/branding/product/1x/googleg_32dp.png" + }, + "documentationLink": "https://cloud.google.com/firestore", + "preferred": true + }, + { + "kind": "discovery#directoryItem", + "id": "firestore:v1beta1", + "name": "firestore", + "version": "v1beta1", + "title": "Cloud Firestore API", + "description": "Accesses the NoSQL document database built for automatic scaling, high performance, and ease of application development.", + "discoveryRestUrl": "https://firestore.googleapis.com/$discovery/rest?version=v1beta1", + "icons": { + "x16": "https://www.gstatic.com/images/branding/product/1x/googleg_16dp.png", + "x32": "https://www.gstatic.com/images/branding/product/1x/googleg_32dp.png" + }, + "documentationLink": "https://cloud.google.com/firestore", + "preferred": false + }, + { + "kind": "discovery#directoryItem", + "id": "firestore:v1beta2", + "name": "firestore", + "version": "v1beta2", + "title": "Cloud Firestore API", + "description": "Accesses the NoSQL document database built for automatic scaling, high performance, and ease of application development.", + "discoveryRestUrl": "https://firestore.googleapis.com/$discovery/rest?version=v1beta2", + "icons": { + "x16": "https://www.gstatic.com/images/branding/product/1x/googleg_16dp.png", + "x32": "https://www.gstatic.com/images/branding/product/1x/googleg_32dp.png" + }, + "documentationLink": "https://cloud.google.com/firestore", + "preferred": false + }, + { + "kind": "discovery#directoryItem", + "id": "fitness:v1", + "name": "fitness", + "version": "v1", + "title": "Fitness", + "description": "Stores and accesses user data in the fitness store from apps on any platform.", + "discoveryRestUrl": "https://www.googleapis.com/discovery/v1/apis/fitness/v1/rest", + "discoveryLink": "./apis/fitness/v1/rest", + "icons": { + "x16": "https://www.gstatic.com/images/branding/product/1x/googleg_16dp.png", + "x32": "https://www.gstatic.com/images/branding/product/1x/googleg_32dp.png" + }, + "documentationLink": "https://developers.google.com/fit/rest/", + "preferred": true + }, + { + "kind": "discovery#directoryItem", + "id": "fusiontables:v1", + "name": "fusiontables", + "version": "v1", + "title": "Fusion Tables API", + "description": "API for working with Fusion Tables data.", + "discoveryRestUrl": "https://www.googleapis.com/discovery/v1/apis/fusiontables/v1/rest", + "discoveryLink": "./apis/fusiontables/v1/rest", + "icons": { + "x16": "https://www.gstatic.com/images/branding/product/1x/googleg_16dp.png", + "x32": "https://www.gstatic.com/images/branding/product/1x/googleg_32dp.png" + }, + "documentationLink": "https://developers.google.com/fusiontables", + "preferred": false + }, + { + "kind": "discovery#directoryItem", + "id": "fusiontables:v2", + "name": "fusiontables", + "version": "v2", + "title": "Fusion Tables API", + "description": "API for working with Fusion Tables data.", + "discoveryRestUrl": "https://www.googleapis.com/discovery/v1/apis/fusiontables/v2/rest", + "discoveryLink": "./apis/fusiontables/v2/rest", + "icons": { + "x16": "https://www.gstatic.com/images/branding/product/1x/googleg_16dp.png", + "x32": "https://www.gstatic.com/images/branding/product/1x/googleg_32dp.png" + }, + "documentationLink": "https://developers.google.com/fusiontables", + "preferred": true + }, + { + "kind": "discovery#directoryItem", + "id": "games:v1", + "name": "games", + "version": "v1", + "title": "Google Play Game Services API", + "description": "The API for Google Play Game Services.", + "discoveryRestUrl": "https://www.googleapis.com/discovery/v1/apis/games/v1/rest", + "discoveryLink": "./apis/games/v1/rest", + "icons": { + "x16": "https://www.gstatic.com/images/branding/product/1x/googleg_16dp.png", + "x32": "https://www.gstatic.com/images/branding/product/1x/googleg_32dp.png" + }, + "documentationLink": "https://developers.google.com/games/services/", + "preferred": true + }, + { + "kind": "discovery#directoryItem", + "id": "gamesConfiguration:v1configuration", + "name": "gamesConfiguration", + "version": "v1configuration", + "title": "Google Play Game Services Publishing API", + "description": "The Publishing API for Google Play Game Services.", + "discoveryRestUrl": "https://www.googleapis.com/discovery/v1/apis/gamesConfiguration/v1configuration/rest", + "discoveryLink": "./apis/gamesConfiguration/v1configuration/rest", + "icons": { + "x16": "https://www.gstatic.com/images/branding/product/1x/googleg_16dp.png", + "x32": "https://www.gstatic.com/images/branding/product/1x/googleg_32dp.png" + }, + "documentationLink": "https://developers.google.com/games/services", + "preferred": true + }, + { + "kind": "discovery#directoryItem", + "id": "gamesManagement:v1management", + "name": "gamesManagement", + "version": "v1management", + "title": "Google Play Game Services Management API", + "description": "The Management API for Google Play Game Services.", + "discoveryRestUrl": "https://www.googleapis.com/discovery/v1/apis/gamesManagement/v1management/rest", + "discoveryLink": "./apis/gamesManagement/v1management/rest", + "icons": { + "x16": "https://www.gstatic.com/images/branding/product/1x/googleg_16dp.png", + "x32": "https://www.gstatic.com/images/branding/product/1x/googleg_32dp.png" + }, + "documentationLink": "https://developers.google.com/games/services", + "preferred": true + }, + { + "kind": "discovery#directoryItem", + "id": "genomics:v1alpha2", + "name": "genomics", + "version": "v1alpha2", + "title": "Genomics API", + "description": "Uploads, processes, queries, and searches Genomics data in the cloud.", + "discoveryRestUrl": "https://genomics.googleapis.com/$discovery/rest?version=v1alpha2", + "icons": { + "x16": "https://www.gstatic.com/images/branding/product/1x/googleg_16dp.png", + "x32": "https://www.gstatic.com/images/branding/product/1x/googleg_32dp.png" + }, + "documentationLink": "https://cloud.google.com/genomics", + "preferred": false + }, + { + "kind": "discovery#directoryItem", + "id": "genomics:v2alpha1", + "name": "genomics", + "version": "v2alpha1", + "title": "Genomics API", + "description": "Uploads, processes, queries, and searches Genomics data in the cloud.", + "discoveryRestUrl": "https://genomics.googleapis.com/$discovery/rest?version=v2alpha1", + "icons": { + "x16": "https://www.gstatic.com/images/branding/product/1x/googleg_16dp.png", + "x32": "https://www.gstatic.com/images/branding/product/1x/googleg_32dp.png" + }, + "documentationLink": "https://cloud.google.com/genomics", + "preferred": false + }, + { + "kind": "discovery#directoryItem", + "id": "genomics:v1", + "name": "genomics", + "version": "v1", + "title": "Genomics API", + "description": "Uploads, processes, queries, and searches Genomics data in the cloud.", + "discoveryRestUrl": "https://genomics.googleapis.com/$discovery/rest?version=v1", + "icons": { + "x16": "https://www.gstatic.com/images/branding/product/1x/googleg_16dp.png", + "x32": "https://www.gstatic.com/images/branding/product/1x/googleg_32dp.png" + }, + "documentationLink": "https://cloud.google.com/genomics", + "preferred": true + }, + { + "kind": "discovery#directoryItem", + "id": "gmail:v1", + "name": "gmail", + "version": "v1", + "title": "Gmail API", + "description": "Access Gmail mailboxes including sending user email.", + "discoveryRestUrl": "https://www.googleapis.com/discovery/v1/apis/gmail/v1/rest", + "discoveryLink": "./apis/gmail/v1/rest", + "icons": { + "x16": "https://www.google.com/images/icons/product/googlemail-16.png", + "x32": "https://www.google.com/images/icons/product/googlemail-32.png" + }, + "documentationLink": "https://developers.google.com/gmail/api/", + "preferred": true + }, + { + "kind": "discovery#directoryItem", + "id": "groupsmigration:v1", + "name": "groupsmigration", + "version": "v1", + "title": "Groups Migration API", + "description": "Groups Migration Api.", + "discoveryRestUrl": "https://www.googleapis.com/discovery/v1/apis/groupsmigration/v1/rest", + "discoveryLink": "./apis/groupsmigration/v1/rest", + "icons": { + "x16": "https://www.google.com/images/icons/product/discussions-16.gif", + "x32": "https://www.google.com/images/icons/product/discussions-32.gif" + }, + "documentationLink": "https://developers.google.com/google-apps/groups-migration/", + "preferred": true + }, + { + "kind": "discovery#directoryItem", + "id": "groupssettings:v1", + "name": "groupssettings", + "version": "v1", + "title": "Groups Settings API", + "description": "Lets you manage permission levels and related settings of a group.", + "discoveryRestUrl": "https://www.googleapis.com/discovery/v1/apis/groupssettings/v1/rest", + "discoveryLink": "./apis/groupssettings/v1/rest", + "icons": { + "x16": "https://www.gstatic.com/images/branding/product/1x/googleg_16dp.png", + "x32": "https://www.gstatic.com/images/branding/product/1x/googleg_32dp.png" + }, + "documentationLink": "https://developers.google.com/google-apps/groups-settings/get_started", + "preferred": true + }, + { + "kind": "discovery#directoryItem", + "id": "iam:v1", + "name": "iam", + "version": "v1", + "title": "Identity and Access Management (IAM) API", + "description": "Manages identity and access control for Google Cloud Platform resources, including the creation of service accounts, which you can use to authenticate to Google and make API calls.", + "discoveryRestUrl": "https://iam.googleapis.com/$discovery/rest?version=v1", + "icons": { + "x16": "https://www.gstatic.com/images/branding/product/1x/googleg_16dp.png", + "x32": "https://www.gstatic.com/images/branding/product/1x/googleg_32dp.png" + }, + "documentationLink": "https://cloud.google.com/iam/", + "preferred": true + }, + { + "kind": "discovery#directoryItem", + "id": "iamcredentials:v1", + "name": "iamcredentials", + "version": "v1", + "title": "IAM Service Account Credentials API", + "description": "Creates short-lived, limited-privilege credentials for IAM service accounts.", + "discoveryRestUrl": "https://iamcredentials.googleapis.com/$discovery/rest?version=v1", + "icons": { + "x16": "https://www.gstatic.com/images/branding/product/1x/googleg_16dp.png", + "x32": "https://www.gstatic.com/images/branding/product/1x/googleg_32dp.png" + }, + "documentationLink": "https://cloud.google.com/iam/docs/creating-short-lived-service-account-credentials", + "preferred": true + }, + { + "kind": "discovery#directoryItem", + "id": "iap:v1beta1", + "name": "iap", + "version": "v1beta1", + "title": "Cloud Identity-Aware Proxy API", + "description": "Controls access to cloud applications running on Google Cloud Platform.", + "discoveryRestUrl": "https://iap.googleapis.com/$discovery/rest?version=v1beta1", + "icons": { + "x16": "https://www.gstatic.com/images/branding/product/1x/googleg_16dp.png", + "x32": "https://www.gstatic.com/images/branding/product/1x/googleg_32dp.png" + }, + "documentationLink": "https://cloud.google.com/iap", + "preferred": true + }, + { + "kind": "discovery#directoryItem", + "id": "identitytoolkit:v3", + "name": "identitytoolkit", + "version": "v3", + "title": "Google Identity Toolkit API", + "description": "Help the third party sites to implement federated login.", + "discoveryRestUrl": "https://www.googleapis.com/discovery/v1/apis/identitytoolkit/v3/rest", + "discoveryLink": "./apis/identitytoolkit/v3/rest", + "icons": { + "x16": "https://www.gstatic.com/images/branding/product/1x/googleg_16dp.png", + "x32": "https://www.gstatic.com/images/branding/product/1x/googleg_32dp.png" + }, + "documentationLink": "https://developers.google.com/identity-toolkit/v3/", + "preferred": true + }, + { + "kind": "discovery#directoryItem", + "id": "indexing:v3", + "name": "indexing", + "version": "v3", + "title": "Indexing API", + "description": "Notifies Google when your web pages change.", + "discoveryRestUrl": "https://indexing.googleapis.com/$discovery/rest?version=v3", + "icons": { + "x16": "https://www.gstatic.com/images/branding/product/1x/googleg_16dp.png", + "x32": "https://www.gstatic.com/images/branding/product/1x/googleg_32dp.png" + }, + "documentationLink": "https://developers.google.com/search/apis/indexing-api/", + "preferred": true + }, + { + "kind": "discovery#directoryItem", + "id": "jobs:v3p1beta1", + "name": "jobs", + "version": "v3p1beta1", + "title": "Cloud Talent Solution API", + "description": "Cloud Talent Solution provides the capability to create, read, update, and delete job postings, as well as search jobs based on keywords and filters.", + "discoveryRestUrl": "https://jobs.googleapis.com/$discovery/rest?version=v3p1beta1", + "icons": { + "x16": "https://www.gstatic.com/images/branding/product/1x/googleg_16dp.png", + "x32": "https://www.gstatic.com/images/branding/product/1x/googleg_32dp.png" + }, + "documentationLink": "https://cloud.google.com/talent-solution/job-search/docs/", + "preferred": false + }, + { + "kind": "discovery#directoryItem", + "id": "jobs:v2", + "name": "jobs", + "version": "v2", + "title": "Cloud Talent Solution API", + "description": "Cloud Talent Solution provides the capability to create, read, update, and delete job postings, as well as search jobs based on keywords and filters.", + "discoveryRestUrl": "https://jobs.googleapis.com/$discovery/rest?version=v2", + "icons": { + "x16": "https://www.gstatic.com/images/branding/product/1x/googleg_16dp.png", + "x32": "https://www.gstatic.com/images/branding/product/1x/googleg_32dp.png" + }, + "documentationLink": "https://cloud.google.com/talent-solution/job-search/docs/", + "preferred": false + }, + { + "kind": "discovery#directoryItem", + "id": "jobs:v3", + "name": "jobs", + "version": "v3", + "title": "Cloud Talent Solution API", + "description": "Cloud Talent Solution provides the capability to create, read, update, and delete job postings, as well as search jobs based on keywords and filters.", + "discoveryRestUrl": "https://jobs.googleapis.com/$discovery/rest?version=v3", + "icons": { + "x16": "https://www.gstatic.com/images/branding/product/1x/googleg_16dp.png", + "x32": "https://www.gstatic.com/images/branding/product/1x/googleg_32dp.png" + }, + "documentationLink": "https://cloud.google.com/talent-solution/job-search/docs/", + "preferred": true + }, + { + "kind": "discovery#directoryItem", + "id": "kgsearch:v1", + "name": "kgsearch", + "version": "v1", + "title": "Knowledge Graph Search API", + "description": "Searches the Google Knowledge Graph for entities.", + "discoveryRestUrl": "https://kgsearch.googleapis.com/$discovery/rest?version=v1", + "icons": { + "x16": "https://www.gstatic.com/images/branding/product/1x/googleg_16dp.png", + "x32": "https://www.gstatic.com/images/branding/product/1x/googleg_32dp.png" + }, + "documentationLink": "https://developers.google.com/knowledge-graph/", + "preferred": true + }, + { + "kind": "discovery#directoryItem", + "id": "language:v1", + "name": "language", + "version": "v1", + "title": "Cloud Natural Language API", + "description": "Provides natural language understanding technologies, such as sentiment analysis, entity recognition, entity sentiment analysis, and other text annotations, to developers.", + "discoveryRestUrl": "https://language.googleapis.com/$discovery/rest?version=v1", + "icons": { + "x16": "https://www.gstatic.com/images/branding/product/1x/googleg_16dp.png", + "x32": "https://www.gstatic.com/images/branding/product/1x/googleg_32dp.png" + }, + "documentationLink": "https://cloud.google.com/natural-language/", + "preferred": true + }, + { + "kind": "discovery#directoryItem", + "id": "language:v1beta1", + "name": "language", + "version": "v1beta1", + "title": "Cloud Natural Language API", + "description": "Provides natural language understanding technologies, such as sentiment analysis, entity recognition, entity sentiment analysis, and other text annotations, to developers.", + "discoveryRestUrl": "https://language.googleapis.com/$discovery/rest?version=v1beta1", + "icons": { + "x16": "https://www.gstatic.com/images/branding/product/1x/googleg_16dp.png", + "x32": "https://www.gstatic.com/images/branding/product/1x/googleg_32dp.png" + }, + "documentationLink": "https://cloud.google.com/natural-language/", + "preferred": false + }, + { + "kind": "discovery#directoryItem", + "id": "language:v1beta2", + "name": "language", + "version": "v1beta2", + "title": "Cloud Natural Language API", + "description": "Provides natural language understanding technologies, such as sentiment analysis, entity recognition, entity sentiment analysis, and other text annotations, to developers.", + "discoveryRestUrl": "https://language.googleapis.com/$discovery/rest?version=v1beta2", + "icons": { + "x16": "https://www.gstatic.com/images/branding/product/1x/googleg_16dp.png", + "x32": "https://www.gstatic.com/images/branding/product/1x/googleg_32dp.png" + }, + "documentationLink": "https://cloud.google.com/natural-language/", + "preferred": false + }, + { + "kind": "discovery#directoryItem", + "id": "licensing:v1", + "name": "licensing", + "version": "v1", + "title": "Enterprise License Manager API", + "description": "Views and manages licenses for your domain.", + "discoveryRestUrl": "https://www.googleapis.com/discovery/v1/apis/licensing/v1/rest", + "discoveryLink": "./apis/licensing/v1/rest", + "icons": { + "x16": "https://www.gstatic.com/images/branding/product/1x/googleg_16dp.png", + "x32": "https://www.gstatic.com/images/branding/product/1x/googleg_32dp.png" + }, + "documentationLink": "https://developers.google.com/google-apps/licensing/", + "preferred": true + }, + { + "kind": "discovery#directoryItem", + "id": "logging:v2", + "name": "logging", + "version": "v2", + "title": "Stackdriver Logging API", + "description": "Writes log entries and manages your Logging configuration.", + "discoveryRestUrl": "https://logging.googleapis.com/$discovery/rest?version=v2", + "icons": { + "x16": "https://www.gstatic.com/images/branding/product/1x/googleg_16dp.png", + "x32": "https://www.gstatic.com/images/branding/product/1x/googleg_32dp.png" + }, + "documentationLink": "https://cloud.google.com/logging/docs/", + "preferred": true + }, + { + "kind": "discovery#directoryItem", + "id": "logging:v2beta1", + "name": "logging", + "version": "v2beta1", + "title": "Stackdriver Logging API", + "description": "Writes log entries and manages your Logging configuration.", + "discoveryRestUrl": "https://logging.googleapis.com/$discovery/rest?version=v2beta1", + "icons": { + "x16": "https://www.gstatic.com/images/branding/product/1x/googleg_16dp.png", + "x32": "https://www.gstatic.com/images/branding/product/1x/googleg_32dp.png" + }, + "documentationLink": "https://cloud.google.com/logging/docs/", + "preferred": false + }, + { + "kind": "discovery#directoryItem", + "id": "manufacturers:v1", + "name": "manufacturers", + "version": "v1", + "title": "Manufacturer Center API", + "description": "Public API for managing Manufacturer Center related data.", + "discoveryRestUrl": "https://manufacturers.googleapis.com/$discovery/rest?version=v1", + "icons": { + "x16": "https://www.gstatic.com/images/branding/product/1x/googleg_16dp.png", + "x32": "https://www.gstatic.com/images/branding/product/1x/googleg_32dp.png" + }, + "documentationLink": "https://developers.google.com/manufacturers/", + "preferred": true + }, + { + "kind": "discovery#directoryItem", + "id": "mirror:v1", + "name": "mirror", + "version": "v1", + "title": "Google Mirror API", + "description": "Interacts with Glass users via the timeline.", + "discoveryRestUrl": "https://www.googleapis.com/discovery/v1/apis/mirror/v1/rest", + "discoveryLink": "./apis/mirror/v1/rest", + "icons": { + "x16": "https://www.gstatic.com/images/branding/product/1x/googleg_16dp.png", + "x32": "https://www.gstatic.com/images/branding/product/1x/googleg_32dp.png" + }, + "documentationLink": "https://developers.google.com/glass", + "labels": [ + "limited_availability" + ], + "preferred": true + }, + { + "kind": "discovery#directoryItem", + "id": "ml:v1", + "name": "ml", + "version": "v1", + "title": "Cloud Machine Learning Engine", + "description": "An API to enable creating and using machine learning models.", + "discoveryRestUrl": "https://ml.googleapis.com/$discovery/rest?version=v1", + "icons": { + "x16": "https://www.gstatic.com/images/branding/product/1x/googleg_16dp.png", + "x32": "https://www.gstatic.com/images/branding/product/1x/googleg_32dp.png" + }, + "documentationLink": "https://cloud.google.com/ml/", + "preferred": true + }, + { + "kind": "discovery#directoryItem", + "id": "monitoring:v3", + "name": "monitoring", + "version": "v3", + "title": "Stackdriver Monitoring API", + "description": "Manages your Stackdriver Monitoring data and configurations. Most projects must be associated with a Stackdriver account, with a few exceptions as noted on the individual method pages.", + "discoveryRestUrl": "https://monitoring.googleapis.com/$discovery/rest?version=v3", + "icons": { + "x16": "https://www.gstatic.com/images/branding/product/1x/googleg_16dp.png", + "x32": "https://www.gstatic.com/images/branding/product/1x/googleg_32dp.png" + }, + "documentationLink": "https://cloud.google.com/monitoring/api/", + "preferred": true + }, + { + "kind": "discovery#directoryItem", + "id": "oauth2:v1", + "name": "oauth2", + "version": "v1", + "title": "Google OAuth2 API", + "description": "Obtains end-user authorization grants for use with other Google APIs.", + "discoveryRestUrl": "https://www.googleapis.com/discovery/v1/apis/oauth2/v1/rest", + "discoveryLink": "./apis/oauth2/v1/rest", + "icons": { + "x16": "https://www.gstatic.com/images/branding/product/1x/googleg_16dp.png", + "x32": "https://www.gstatic.com/images/branding/product/1x/googleg_32dp.png" + }, + "documentationLink": "https://developers.google.com/accounts/docs/OAuth2", + "preferred": false + }, + { + "kind": "discovery#directoryItem", + "id": "oauth2:v2", + "name": "oauth2", + "version": "v2", + "title": "Google OAuth2 API", + "description": "Obtains end-user authorization grants for use with other Google APIs.", + "discoveryRestUrl": "https://www.googleapis.com/discovery/v1/apis/oauth2/v2/rest", + "discoveryLink": "./apis/oauth2/v2/rest", + "icons": { + "x16": "https://www.gstatic.com/images/branding/product/1x/googleg_16dp.png", + "x32": "https://www.gstatic.com/images/branding/product/1x/googleg_32dp.png" + }, + "documentationLink": "https://developers.google.com/accounts/docs/OAuth2", + "preferred": true + }, + { + "kind": "discovery#directoryItem", + "id": "oslogin:v1alpha", + "name": "oslogin", + "version": "v1alpha", + "title": "Cloud OS Login API", + "description": "Manages OS login configuration for Google account users.", + "discoveryRestUrl": "https://oslogin.googleapis.com/$discovery/rest?version=v1alpha", + "icons": { + "x16": "https://www.gstatic.com/images/branding/product/1x/googleg_16dp.png", + "x32": "https://www.gstatic.com/images/branding/product/1x/googleg_32dp.png" + }, + "documentationLink": "https://cloud.google.com/compute/docs/oslogin/rest/", + "preferred": false + }, + { + "kind": "discovery#directoryItem", + "id": "oslogin:v1beta", + "name": "oslogin", + "version": "v1beta", + "title": "Cloud OS Login API", + "description": "Manages OS login configuration for Google account users.", + "discoveryRestUrl": "https://oslogin.googleapis.com/$discovery/rest?version=v1beta", + "icons": { + "x16": "https://www.gstatic.com/images/branding/product/1x/googleg_16dp.png", + "x32": "https://www.gstatic.com/images/branding/product/1x/googleg_32dp.png" + }, + "documentationLink": "https://cloud.google.com/compute/docs/oslogin/rest/", + "preferred": false + }, + { + "kind": "discovery#directoryItem", + "id": "oslogin:v1", + "name": "oslogin", + "version": "v1", + "title": "Cloud OS Login API", + "description": "Manages OS login configuration for Google account users.", + "discoveryRestUrl": "https://oslogin.googleapis.com/$discovery/rest?version=v1", + "icons": { + "x16": "https://www.gstatic.com/images/branding/product/1x/googleg_16dp.png", + "x32": "https://www.gstatic.com/images/branding/product/1x/googleg_32dp.png" + }, + "documentationLink": "https://cloud.google.com/compute/docs/oslogin/rest/", + "preferred": true + }, + { + "kind": "discovery#directoryItem", + "id": "pagespeedonline:v1", + "name": "pagespeedonline", + "version": "v1", + "title": "PageSpeed Insights API", + "description": "Analyzes the performance of a web page and provides tailored suggestions to make that page faster.", + "discoveryRestUrl": "https://www.googleapis.com/discovery/v1/apis/pagespeedonline/v1/rest", + "discoveryLink": "./apis/pagespeedonline/v1/rest", + "icons": { + "x16": "https://www.google.com/images/icons/product/pagespeed-16.png", + "x32": "https://www.google.com/images/icons/product/pagespeed-32.png" + }, + "documentationLink": "https://developers.google.com/speed/docs/insights/v1/getting_started", + "preferred": false + }, + { + "kind": "discovery#directoryItem", + "id": "pagespeedonline:v2", + "name": "pagespeedonline", + "version": "v2", + "title": "PageSpeed Insights API", + "description": "Analyzes the performance of a web page and provides tailored suggestions to make that page faster.", + "discoveryRestUrl": "https://www.googleapis.com/discovery/v1/apis/pagespeedonline/v2/rest", + "discoveryLink": "./apis/pagespeedonline/v2/rest", + "icons": { + "x16": "https://www.google.com/images/icons/product/pagespeed-16.png", + "x32": "https://www.google.com/images/icons/product/pagespeed-32.png" + }, + "documentationLink": "https://developers.google.com/speed/docs/insights/v2/getting-started", + "preferred": false + }, + { + "kind": "discovery#directoryItem", + "id": "pagespeedonline:v4", + "name": "pagespeedonline", + "version": "v4", + "title": "PageSpeed Insights API", + "description": "Analyzes the performance of a web page and provides tailored suggestions to make that page faster.", + "discoveryRestUrl": "https://www.googleapis.com/discovery/v1/apis/pagespeedonline/v4/rest", + "discoveryLink": "./apis/pagespeedonline/v4/rest", + "icons": { + "x16": "https://www.google.com/images/icons/product/pagespeed-16.png", + "x32": "https://www.google.com/images/icons/product/pagespeed-32.png" + }, + "documentationLink": "https://developers.google.com/speed/docs/insights/v4/getting-started", + "preferred": false + }, + { + "kind": "discovery#directoryItem", + "id": "pagespeedonline:v5", + "name": "pagespeedonline", + "version": "v5", + "title": "PageSpeed Insights API", + "description": "Analyzes the performance of a web page and provides tailored suggestions to make that page faster.", + "discoveryRestUrl": "https://www.googleapis.com/discovery/v1/apis/pagespeedonline/v5/rest", + "discoveryLink": "./apis/pagespeedonline/v5/rest", + "icons": { + "x16": "https://www.google.com/images/icons/product/pagespeed-16.png", + "x32": "https://www.google.com/images/icons/product/pagespeed-32.png" + }, + "documentationLink": "https://developers.google.com/speed/docs/insights/v5/get-started", + "preferred": true + }, + { + "kind": "discovery#directoryItem", + "id": "partners:v2", + "name": "partners", + "version": "v2", + "title": "Google Partners API", + "description": "Searches certified companies and creates contact leads with them, and also audits the usage of clients.", + "discoveryRestUrl": "https://partners.googleapis.com/$discovery/rest?version=v2", + "icons": { + "x16": "https://www.gstatic.com/images/branding/product/1x/googleg_16dp.png", + "x32": "https://www.gstatic.com/images/branding/product/1x/googleg_32dp.png" + }, + "documentationLink": "https://developers.google.com/partners/", + "preferred": true + }, + { + "kind": "discovery#directoryItem", + "id": "people:v1", + "name": "people", + "version": "v1", + "title": "People API", + "description": "Provides access to information about profiles and contacts.", + "discoveryRestUrl": "https://people.googleapis.com/$discovery/rest?version=v1", + "icons": { + "x16": "https://www.gstatic.com/images/branding/product/1x/googleg_16dp.png", + "x32": "https://www.gstatic.com/images/branding/product/1x/googleg_32dp.png" + }, + "documentationLink": "https://developers.google.com/people/", + "preferred": true + }, + { + "kind": "discovery#directoryItem", + "id": "playcustomapp:v1", + "name": "playcustomapp", + "version": "v1", + "title": "Google Play Custom App Publishing API", + "description": "An API to publish custom Android apps.", + "discoveryRestUrl": "https://www.googleapis.com/discovery/v1/apis/playcustomapp/v1/rest", + "discoveryLink": "./apis/playcustomapp/v1/rest", + "icons": { + "x16": "https://www.gstatic.com/images/branding/product/1x/googleg_16dp.png", + "x32": "https://www.gstatic.com/images/branding/product/1x/googleg_32dp.png" + }, + "documentationLink": "https://developers.google.com/android/work/play/custom-app-api", + "preferred": true + }, + { + "kind": "discovery#directoryItem", + "id": "plus:v1", + "name": "plus", + "version": "v1", + "title": "Google+ API", + "description": "Builds on top of the Google+ platform.", + "discoveryRestUrl": "https://www.googleapis.com/discovery/v1/apis/plus/v1/rest", + "discoveryLink": "./apis/plus/v1/rest", + "icons": { + "x16": "http://www.google.com/images/icons/product/gplus-16.png", + "x32": "http://www.google.com/images/icons/product/gplus-32.png" + }, + "documentationLink": "https://developers.google.com/+/api/", + "preferred": true + }, + { + "kind": "discovery#directoryItem", + "id": "plusDomains:v1", + "name": "plusDomains", + "version": "v1", + "title": "Google+ Domains API", + "description": "Builds on top of the Google+ platform for Google Apps Domains.", + "discoveryRestUrl": "https://www.googleapis.com/discovery/v1/apis/plusDomains/v1/rest", + "discoveryLink": "./apis/plusDomains/v1/rest", + "icons": { + "x16": "http://www.google.com/images/icons/product/gplus-16.png", + "x32": "http://www.google.com/images/icons/product/gplus-32.png" + }, + "documentationLink": "https://developers.google.com/+/domains/", + "preferred": true + }, + { + "kind": "discovery#directoryItem", + "id": "poly:v1", + "name": "poly", + "version": "v1", + "title": "Poly API", + "description": "The Poly API provides read access to assets hosted on poly.google.com to all, and upload access to poly.google.com for whitelisted accounts.", + "discoveryRestUrl": "https://poly.googleapis.com/$discovery/rest?version=v1", + "icons": { + "x16": "https://www.gstatic.com/images/branding/product/1x/googleg_16dp.png", + "x32": "https://www.gstatic.com/images/branding/product/1x/googleg_32dp.png" + }, + "documentationLink": "https://developers.google.com/poly/", + "preferred": true + }, + { + "kind": "discovery#directoryItem", + "id": "proximitybeacon:v1beta1", + "name": "proximitybeacon", + "version": "v1beta1", + "title": "Proximity Beacon API", + "description": "Registers, manages, indexes, and searches beacons.", + "discoveryRestUrl": "https://proximitybeacon.googleapis.com/$discovery/rest?version=v1beta1", + "icons": { + "x16": "https://www.gstatic.com/images/branding/product/1x/googleg_16dp.png", + "x32": "https://www.gstatic.com/images/branding/product/1x/googleg_32dp.png" + }, + "documentationLink": "https://developers.google.com/beacons/proximity/", + "preferred": true + }, + { + "kind": "discovery#directoryItem", + "id": "pubsub:v1beta1a", + "name": "pubsub", + "version": "v1beta1a", + "title": "Cloud Pub/Sub API", + "description": "Provides reliable, many-to-many, asynchronous messaging between applications.", + "discoveryRestUrl": "https://pubsub.googleapis.com/$discovery/rest?version=v1beta1a", + "icons": { + "x16": "https://www.gstatic.com/images/branding/product/1x/googleg_16dp.png", + "x32": "https://www.gstatic.com/images/branding/product/1x/googleg_32dp.png" + }, + "documentationLink": "https://cloud.google.com/pubsub/docs", + "preferred": false + }, + { + "kind": "discovery#directoryItem", + "id": "pubsub:v1", + "name": "pubsub", + "version": "v1", + "title": "Cloud Pub/Sub API", + "description": "Provides reliable, many-to-many, asynchronous messaging between applications.", + "discoveryRestUrl": "https://pubsub.googleapis.com/$discovery/rest?version=v1", + "icons": { + "x16": "https://www.gstatic.com/images/branding/product/1x/googleg_16dp.png", + "x32": "https://www.gstatic.com/images/branding/product/1x/googleg_32dp.png" + }, + "documentationLink": "https://cloud.google.com/pubsub/docs", + "preferred": true + }, + { + "kind": "discovery#directoryItem", + "id": "pubsub:v1beta2", + "name": "pubsub", + "version": "v1beta2", + "title": "Cloud Pub/Sub API", + "description": "Provides reliable, many-to-many, asynchronous messaging between applications.", + "discoveryRestUrl": "https://pubsub.googleapis.com/$discovery/rest?version=v1beta2", + "icons": { + "x16": "https://www.gstatic.com/images/branding/product/1x/googleg_16dp.png", + "x32": "https://www.gstatic.com/images/branding/product/1x/googleg_32dp.png" + }, + "documentationLink": "https://cloud.google.com/pubsub/docs", + "preferred": false + }, + { + "kind": "discovery#directoryItem", + "id": "redis:v1", + "name": "redis", + "version": "v1", + "title": "Google Cloud Memorystore for Redis API", + "description": "The Google Cloud Memorystore for Redis API is used for creating and managing Redis instances on the Google Cloud Platform.", + "discoveryRestUrl": "https://redis.googleapis.com/$discovery/rest?version=v1", + "icons": { + "x16": "https://www.gstatic.com/images/branding/product/1x/googleg_16dp.png", + "x32": "https://www.gstatic.com/images/branding/product/1x/googleg_32dp.png" + }, + "documentationLink": "https://cloud.google.com/memorystore/docs/redis/", + "preferred": true + }, + { + "kind": "discovery#directoryItem", + "id": "redis:v1beta1", + "name": "redis", + "version": "v1beta1", + "title": "Google Cloud Memorystore for Redis API", + "description": "The Google Cloud Memorystore for Redis API is used for creating and managing Redis instances on the Google Cloud Platform.", + "discoveryRestUrl": "https://redis.googleapis.com/$discovery/rest?version=v1beta1", + "icons": { + "x16": "https://www.gstatic.com/images/branding/product/1x/googleg_16dp.png", + "x32": "https://www.gstatic.com/images/branding/product/1x/googleg_32dp.png" + }, + "documentationLink": "https://cloud.google.com/memorystore/docs/redis/", + "preferred": false + }, + { + "kind": "discovery#directoryItem", + "id": "replicapool:v1beta1", + "name": "replicapool", + "version": "v1beta1", + "title": "Replica Pool API", + "description": "The Replica Pool API allows users to declaratively provision and manage groups of Google Compute Engine instances based on a common template.", + "discoveryRestUrl": "https://www.googleapis.com/discovery/v1/apis/replicapool/v1beta1/rest", + "discoveryLink": "./apis/replicapool/v1beta1/rest", + "icons": { + "x16": "https://www.gstatic.com/images/branding/product/1x/googleg_16dp.png", + "x32": "https://www.gstatic.com/images/branding/product/1x/googleg_32dp.png" + }, + "documentationLink": "https://developers.google.com/compute/docs/replica-pool/", + "labels": [ + "limited_availability" + ], + "preferred": true + }, + { + "kind": "discovery#directoryItem", + "id": "replicapoolupdater:v1beta1", + "name": "replicapoolupdater", + "version": "v1beta1", + "title": "Google Compute Engine Instance Group Updater API", + "description": "[Deprecated. Please use compute.instanceGroupManagers.update method. replicapoolupdater API will be disabled after December 30th, 2016] Updates groups of Compute Engine instances.", + "discoveryRestUrl": "https://www.googleapis.com/discovery/v1/apis/replicapoolupdater/v1beta1/rest", + "discoveryLink": "./apis/replicapoolupdater/v1beta1/rest", + "icons": { + "x16": "https://www.gstatic.com/images/branding/product/1x/googleg_16dp.png", + "x32": "https://www.gstatic.com/images/branding/product/1x/googleg_32dp.png" + }, + "documentationLink": "https://cloud.google.com/compute/docs/instance-groups/manager/#applying_rolling_updates_using_the_updater_service", + "labels": [ + "limited_availability" + ], + "preferred": true + }, + { + "kind": "discovery#directoryItem", + "id": "reseller:v1", + "name": "reseller", + "version": "v1", + "title": "Enterprise Apps Reseller API", + "description": "Creates and manages your customers and their subscriptions.", + "discoveryRestUrl": "https://www.googleapis.com/discovery/v1/apis/reseller/v1/rest", + "discoveryLink": "./apis/reseller/v1/rest", + "icons": { + "x16": "https://www.gstatic.com/images/branding/product/1x/googleg_16dp.png", + "x32": "https://www.gstatic.com/images/branding/product/1x/googleg_32dp.png" + }, + "documentationLink": "https://developers.google.com/google-apps/reseller/", + "labels": [ + "limited_availability" + ], + "preferred": true + }, + { + "kind": "discovery#directoryItem", + "id": "runtimeconfig:v1", + "name": "runtimeconfig", + "version": "v1", + "title": "Cloud Runtime Configuration API", + "description": "The Runtime Configurator allows you to dynamically configure and expose variables through Google Cloud Platform. In addition, you can also set Watchers and Waiters that will watch for changes to your data and return based on certain conditions.", + "discoveryRestUrl": "https://runtimeconfig.googleapis.com/$discovery/rest?version=v1", + "icons": { + "x16": "https://www.gstatic.com/images/branding/product/1x/googleg_16dp.png", + "x32": "https://www.gstatic.com/images/branding/product/1x/googleg_32dp.png" + }, + "documentationLink": "https://cloud.google.com/deployment-manager/runtime-configurator/", + "preferred": true + }, + { + "kind": "discovery#directoryItem", + "id": "runtimeconfig:v1beta1", + "name": "runtimeconfig", + "version": "v1beta1", + "title": "Cloud Runtime Configuration API", + "description": "The Runtime Configurator allows you to dynamically configure and expose variables through Google Cloud Platform. In addition, you can also set Watchers and Waiters that will watch for changes to your data and return based on certain conditions.", + "discoveryRestUrl": "https://runtimeconfig.googleapis.com/$discovery/rest?version=v1beta1", + "icons": { + "x16": "https://www.gstatic.com/images/branding/product/1x/googleg_16dp.png", + "x32": "https://www.gstatic.com/images/branding/product/1x/googleg_32dp.png" + }, + "documentationLink": "https://cloud.google.com/deployment-manager/runtime-configurator/", + "preferred": false + }, + { + "kind": "discovery#directoryItem", + "id": "safebrowsing:v4", + "name": "safebrowsing", + "version": "v4", + "title": "Safe Browsing API", + "description": "Enables client applications to check web resources (most commonly URLs) against Google-generated lists of unsafe web resources.", + "discoveryRestUrl": "https://safebrowsing.googleapis.com/$discovery/rest?version=v4", + "icons": { + "x16": "https://www.gstatic.com/images/branding/product/1x/googleg_16dp.png", + "x32": "https://www.gstatic.com/images/branding/product/1x/googleg_32dp.png" + }, + "documentationLink": "https://developers.google.com/safe-browsing/", + "preferred": true + }, + { + "kind": "discovery#directoryItem", + "id": "script:v1", + "name": "script", + "version": "v1", + "title": "Apps Script API", + "description": "Manages and executes Google Apps Script projects.", + "discoveryRestUrl": "https://script.googleapis.com/$discovery/rest?version=v1", + "icons": { + "x16": "https://www.gstatic.com/images/branding/product/1x/googleg_16dp.png", + "x32": "https://www.gstatic.com/images/branding/product/1x/googleg_32dp.png" + }, + "documentationLink": "https://developers.google.com/apps-script/api/", + "preferred": true + }, + { + "kind": "discovery#directoryItem", + "id": "searchconsole:v1", + "name": "searchconsole", + "version": "v1", + "title": "Google Search Console URL Testing Tools API", + "description": "Provides tools for running validation tests against single URLs", + "discoveryRestUrl": "https://searchconsole.googleapis.com/$discovery/rest?version=v1", + "icons": { + "x16": "https://www.gstatic.com/images/branding/product/1x/googleg_16dp.png", + "x32": "https://www.gstatic.com/images/branding/product/1x/googleg_32dp.png" + }, + "documentationLink": "https://developers.google.com/webmaster-tools/search-console-api/", + "preferred": true + }, + { + "kind": "discovery#directoryItem", + "id": "servicebroker:v1alpha1", + "name": "servicebroker", + "version": "v1alpha1", + "title": "Service Broker API", + "description": "The Google Cloud Platform Service Broker API provides Google hosted implementation of the Open Service Broker API (https://www.openservicebrokerapi.org/).", + "discoveryRestUrl": "https://servicebroker.googleapis.com/$discovery/rest?version=v1alpha1", + "icons": { + "x16": "https://www.gstatic.com/images/branding/product/1x/googleg_16dp.png", + "x32": "https://www.gstatic.com/images/branding/product/1x/googleg_32dp.png" + }, + "documentationLink": "https://cloud.google.com/kubernetes-engine/docs/concepts/add-on/service-broker", + "preferred": false + }, + { + "kind": "discovery#directoryItem", + "id": "servicebroker:v1", + "name": "servicebroker", + "version": "v1", + "title": "Service Broker API", + "description": "The Google Cloud Platform Service Broker API provides Google hosted implementation of the Open Service Broker API (https://www.openservicebrokerapi.org/).", + "discoveryRestUrl": "https://servicebroker.googleapis.com/$discovery/rest?version=v1", + "icons": { + "x16": "https://www.gstatic.com/images/branding/product/1x/googleg_16dp.png", + "x32": "https://www.gstatic.com/images/branding/product/1x/googleg_32dp.png" + }, + "documentationLink": "https://cloud.google.com/kubernetes-engine/docs/concepts/add-on/service-broker", + "preferred": true + }, + { + "kind": "discovery#directoryItem", + "id": "servicebroker:v1beta1", + "name": "servicebroker", + "version": "v1beta1", + "title": "Service Broker API", + "description": "The Google Cloud Platform Service Broker API provides Google hosted implementation of the Open Service Broker API (https://www.openservicebrokerapi.org/).", + "discoveryRestUrl": "https://servicebroker.googleapis.com/$discovery/rest?version=v1beta1", + "icons": { + "x16": "https://www.gstatic.com/images/branding/product/1x/googleg_16dp.png", + "x32": "https://www.gstatic.com/images/branding/product/1x/googleg_32dp.png" + }, + "documentationLink": "https://cloud.google.com/kubernetes-engine/docs/concepts/add-on/service-broker", + "preferred": false + }, + { + "kind": "discovery#directoryItem", + "id": "serviceconsumermanagement:v1", + "name": "serviceconsumermanagement", + "version": "v1", + "title": "Service Consumer Management API", + "description": "Manages the service consumers of a Service Infrastructure service.", + "discoveryRestUrl": "https://serviceconsumermanagement.googleapis.com/$discovery/rest?version=v1", + "icons": { + "x16": "https://www.gstatic.com/images/branding/product/1x/googleg_16dp.png", + "x32": "https://www.gstatic.com/images/branding/product/1x/googleg_32dp.png" + }, + "documentationLink": "https://cloud.google.com/service-consumer-management/docs/overview", + "preferred": true + }, + { + "kind": "discovery#directoryItem", + "id": "servicecontrol:v1", + "name": "servicecontrol", + "version": "v1", + "title": "Service Control API", + "description": "Provides control plane functionality to managed services, such as logging, monitoring, and status checks.", + "discoveryRestUrl": "https://servicecontrol.googleapis.com/$discovery/rest?version=v1", + "icons": { + "x16": "https://www.gstatic.com/images/branding/product/1x/googleg_16dp.png", + "x32": "https://www.gstatic.com/images/branding/product/1x/googleg_32dp.png" + }, + "documentationLink": "https://cloud.google.com/service-control/", + "preferred": true + }, + { + "kind": "discovery#directoryItem", + "id": "servicemanagement:v1", + "name": "servicemanagement", + "version": "v1", + "title": "Service Management API", + "description": "Google Service Management allows service producers to publish their services on Google Cloud Platform so that they can be discovered and used by service consumers.", + "discoveryRestUrl": "https://servicemanagement.googleapis.com/$discovery/rest?version=v1", + "icons": { + "x16": "https://www.gstatic.com/images/branding/product/1x/googleg_16dp.png", + "x32": "https://www.gstatic.com/images/branding/product/1x/googleg_32dp.png" + }, + "documentationLink": "https://cloud.google.com/service-management/", + "preferred": true + }, + { + "kind": "discovery#directoryItem", + "id": "servicenetworking:v1beta", + "name": "servicenetworking", + "version": "v1beta", + "title": "Service Networking API", + "description": "Provides automatic management of network configurations necessary for certain services.", + "discoveryRestUrl": "https://servicenetworking.googleapis.com/$discovery/rest?version=v1beta", + "icons": { + "x16": "https://www.gstatic.com/images/branding/product/1x/googleg_16dp.png", + "x32": "https://www.gstatic.com/images/branding/product/1x/googleg_32dp.png" + }, + "documentationLink": "https://cloud.google.com/service-infrastructure/docs/service-networking/getting-started", + "preferred": true + }, + { + "kind": "discovery#directoryItem", + "id": "serviceusage:v1", + "name": "serviceusage", + "version": "v1", + "title": "Service Usage API", + "description": "Enables services that service consumers want to use on Google Cloud Platform, lists the available or enabled services, or disables services that service consumers no longer use.", + "discoveryRestUrl": "https://serviceusage.googleapis.com/$discovery/rest?version=v1", + "icons": { + "x16": "https://www.gstatic.com/images/branding/product/1x/googleg_16dp.png", + "x32": "https://www.gstatic.com/images/branding/product/1x/googleg_32dp.png" + }, + "documentationLink": "https://cloud.google.com/service-usage/", + "preferred": true + }, + { + "kind": "discovery#directoryItem", + "id": "serviceusage:v1beta1", + "name": "serviceusage", + "version": "v1beta1", + "title": "Service Usage API", + "description": "Enables services that service consumers want to use on Google Cloud Platform, lists the available or enabled services, or disables services that service consumers no longer use.", + "discoveryRestUrl": "https://serviceusage.googleapis.com/$discovery/rest?version=v1beta1", + "icons": { + "x16": "https://www.gstatic.com/images/branding/product/1x/googleg_16dp.png", + "x32": "https://www.gstatic.com/images/branding/product/1x/googleg_32dp.png" + }, + "documentationLink": "https://cloud.google.com/service-usage/", + "preferred": false + }, + { + "kind": "discovery#directoryItem", + "id": "sheets:v4", + "name": "sheets", + "version": "v4", + "title": "Google Sheets API", + "description": "Reads and writes Google Sheets.", + "discoveryRestUrl": "https://sheets.googleapis.com/$discovery/rest?version=v4", + "icons": { + "x16": "https://www.gstatic.com/images/branding/product/1x/googleg_16dp.png", + "x32": "https://www.gstatic.com/images/branding/product/1x/googleg_32dp.png" + }, + "documentationLink": "https://developers.google.com/sheets/", + "preferred": true + }, + { + "kind": "discovery#directoryItem", + "id": "siteVerification:v1", + "name": "siteVerification", + "version": "v1", + "title": "Google Site Verification API", + "description": "Verifies ownership of websites or domains with Google.", + "discoveryRestUrl": "https://www.googleapis.com/discovery/v1/apis/siteVerification/v1/rest", + "discoveryLink": "./apis/siteVerification/v1/rest", + "icons": { + "x16": "https://www.gstatic.com/images/branding/product/1x/googleg_16dp.png", + "x32": "https://www.gstatic.com/images/branding/product/1x/googleg_32dp.png" + }, + "documentationLink": "https://developers.google.com/site-verification/", + "preferred": true + }, + { + "kind": "discovery#directoryItem", + "id": "slides:v1", + "name": "slides", + "version": "v1", + "title": "Google Slides API", + "description": "Reads and writes Google Slides presentations.", + "discoveryRestUrl": "https://slides.googleapis.com/$discovery/rest?version=v1", + "icons": { + "x16": "https://www.gstatic.com/images/branding/product/1x/googleg_16dp.png", + "x32": "https://www.gstatic.com/images/branding/product/1x/googleg_32dp.png" + }, + "documentationLink": "https://developers.google.com/slides/", + "preferred": true + }, + { + "kind": "discovery#directoryItem", + "id": "sourcerepo:v1", + "name": "sourcerepo", + "version": "v1", + "title": "Cloud Source Repositories API", + "description": "Accesses source code repositories hosted by Google.", + "discoveryRestUrl": "https://sourcerepo.googleapis.com/$discovery/rest?version=v1", + "icons": { + "x16": "https://www.gstatic.com/images/branding/product/1x/googleg_16dp.png", + "x32": "https://www.gstatic.com/images/branding/product/1x/googleg_32dp.png" + }, + "documentationLink": "https://cloud.google.com/source-repositories/docs/apis", + "preferred": true + }, + { + "kind": "discovery#directoryItem", + "id": "spanner:v1", + "name": "spanner", + "version": "v1", + "title": "Cloud Spanner API", + "description": "Cloud Spanner is a managed, mission-critical, globally consistent and scalable relational database service.", + "discoveryRestUrl": "https://spanner.googleapis.com/$discovery/rest?version=v1", + "icons": { + "x16": "https://www.gstatic.com/images/branding/product/1x/googleg_16dp.png", + "x32": "https://www.gstatic.com/images/branding/product/1x/googleg_32dp.png" + }, + "documentationLink": "https://cloud.google.com/spanner/", + "preferred": true + }, + { + "kind": "discovery#directoryItem", + "id": "speech:v1p1beta1", + "name": "speech", + "version": "v1p1beta1", + "title": "Cloud Speech API", + "description": "Converts audio to text by applying powerful neural network models.", + "discoveryRestUrl": "https://speech.googleapis.com/$discovery/rest?version=v1p1beta1", + "icons": { + "x16": "https://www.gstatic.com/images/branding/product/1x/googleg_16dp.png", + "x32": "https://www.gstatic.com/images/branding/product/1x/googleg_32dp.png" + }, + "documentationLink": "https://cloud.google.com/speech-to-text/docs/quickstart-protocol", + "preferred": false + }, + { + "kind": "discovery#directoryItem", + "id": "speech:v1", + "name": "speech", + "version": "v1", + "title": "Cloud Speech API", + "description": "Converts audio to text by applying powerful neural network models.", + "discoveryRestUrl": "https://speech.googleapis.com/$discovery/rest?version=v1", + "icons": { + "x16": "https://www.gstatic.com/images/branding/product/1x/googleg_16dp.png", + "x32": "https://www.gstatic.com/images/branding/product/1x/googleg_32dp.png" + }, + "documentationLink": "https://cloud.google.com/speech-to-text/docs/quickstart-protocol", + "preferred": true + }, + { + "kind": "discovery#directoryItem", + "id": "sqladmin:v1beta4", + "name": "sqladmin", + "version": "v1beta4", + "title": "Cloud SQL Admin API", + "description": "Creates and manages Cloud SQL instances, which provide fully managed MySQL or PostgreSQL databases.", + "discoveryRestUrl": "https://www.googleapis.com/discovery/v1/apis/sqladmin/v1beta4/rest", + "discoveryLink": "./apis/sqladmin/v1beta4/rest", + "icons": { + "x16": "https://www.gstatic.com/images/branding/product/1x/googleg_16dp.png", + "x32": "https://www.gstatic.com/images/branding/product/1x/googleg_32dp.png" + }, + "documentationLink": "https://cloud.google.com/sql/docs/reference/latest", + "preferred": true + }, + { + "kind": "discovery#directoryItem", + "id": "storage:v1", + "name": "storage", + "version": "v1", + "title": "Cloud Storage JSON API", + "description": "Stores and retrieves potentially large, immutable data objects.", + "discoveryRestUrl": "https://www.googleapis.com/discovery/v1/apis/storage/v1/rest", + "discoveryLink": "./apis/storage/v1/rest", + "icons": { + "x16": "https://www.google.com/images/icons/product/cloud_storage-16.png", + "x32": "https://www.google.com/images/icons/product/cloud_storage-32.png" + }, + "documentationLink": "https://developers.google.com/storage/docs/json_api/", + "labels": [ + "labs" + ], + "preferred": true + }, + { + "kind": "discovery#directoryItem", + "id": "storage:v1beta1", + "name": "storage", + "version": "v1beta1", + "title": "Cloud Storage JSON API", + "description": "Lets you store and retrieve potentially-large, immutable data objects.", + "discoveryRestUrl": "https://www.googleapis.com/discovery/v1/apis/storage/v1beta1/rest", + "discoveryLink": "./apis/storage/v1beta1/rest", + "icons": { + "x16": "https://www.google.com/images/icons/product/cloud_storage-16.png", + "x32": "https://www.google.com/images/icons/product/cloud_storage-32.png" + }, + "documentationLink": "https://developers.google.com/storage/docs/json_api/", + "labels": [ + "labs" + ], + "preferred": false + }, + { + "kind": "discovery#directoryItem", + "id": "storage:v1beta2", + "name": "storage", + "version": "v1beta2", + "title": "Cloud Storage JSON API", + "description": "Lets you store and retrieve potentially-large, immutable data objects.", + "discoveryRestUrl": "https://www.googleapis.com/discovery/v1/apis/storage/v1beta2/rest", + "discoveryLink": "./apis/storage/v1beta2/rest", + "icons": { + "x16": "https://www.google.com/images/icons/product/cloud_storage-16.png", + "x32": "https://www.google.com/images/icons/product/cloud_storage-32.png" + }, + "documentationLink": "https://developers.google.com/storage/docs/json_api/", + "labels": [ + "labs" + ], + "preferred": false + }, + { + "kind": "discovery#directoryItem", + "id": "storagetransfer:v1", + "name": "storagetransfer", + "version": "v1", + "title": "Storage Transfer API", + "description": "Transfers data from external data sources to a Google Cloud Storage bucket or between Google Cloud Storage buckets.", + "discoveryRestUrl": "https://storagetransfer.googleapis.com/$discovery/rest?version=v1", + "icons": { + "x16": "https://www.gstatic.com/images/branding/product/1x/googleg_16dp.png", + "x32": "https://www.gstatic.com/images/branding/product/1x/googleg_32dp.png" + }, + "documentationLink": "https://cloud.google.com/storage/transfer", + "preferred": true + }, + { + "kind": "discovery#directoryItem", + "id": "streetviewpublish:v1", + "name": "streetviewpublish", + "version": "v1", + "title": "Street View Publish API", + "description": "Publishes 360 photos to Google Maps, along with position, orientation, and connectivity metadata. Apps can offer an interface for positioning, connecting, and uploading user-generated Street View images.", + "discoveryRestUrl": "https://streetviewpublish.googleapis.com/$discovery/rest?version=v1", + "icons": { + "x16": "https://www.gstatic.com/images/branding/product/1x/googleg_16dp.png", + "x32": "https://www.gstatic.com/images/branding/product/1x/googleg_32dp.png" + }, + "documentationLink": "https://developers.google.com/streetview/publish/", + "preferred": true + }, + { + "kind": "discovery#directoryItem", + "id": "surveys:v2", + "name": "surveys", + "version": "v2", + "title": "Surveys API", + "description": "Creates and conducts surveys, lists the surveys that an authenticated user owns, and retrieves survey results and information about specified surveys.", + "discoveryRestUrl": "https://www.googleapis.com/discovery/v1/apis/surveys/v2/rest", + "discoveryLink": "./apis/surveys/v2/rest", + "icons": { + "x16": "https://www.gstatic.com/images/branding/product/1x/googleg_16dp.png", + "x32": "https://www.gstatic.com/images/branding/product/1x/googleg_32dp.png" + }, + "preferred": true + }, + { + "kind": "discovery#directoryItem", + "id": "tagmanager:v1", + "name": "tagmanager", + "version": "v1", + "title": "Tag Manager API", + "description": "Accesses Tag Manager accounts and containers.", + "discoveryRestUrl": "https://www.googleapis.com/discovery/v1/apis/tagmanager/v1/rest", + "discoveryLink": "./apis/tagmanager/v1/rest", + "icons": { + "x16": "https://www.gstatic.com/images/branding/product/1x/googleg_16dp.png", + "x32": "https://www.gstatic.com/images/branding/product/1x/googleg_32dp.png" + }, + "documentationLink": "https://developers.google.com/tag-manager/api/v1/", + "preferred": false + }, + { + "kind": "discovery#directoryItem", + "id": "tagmanager:v2", + "name": "tagmanager", + "version": "v2", + "title": "Tag Manager API", + "description": "Accesses Tag Manager accounts and containers.", + "discoveryRestUrl": "https://www.googleapis.com/discovery/v1/apis/tagmanager/v2/rest", + "discoveryLink": "./apis/tagmanager/v2/rest", + "icons": { + "x16": "https://www.gstatic.com/images/branding/product/1x/googleg_16dp.png", + "x32": "https://www.gstatic.com/images/branding/product/1x/googleg_32dp.png" + }, + "documentationLink": "https://developers.google.com/tag-manager/api/v2/", + "preferred": true + }, + { + "kind": "discovery#directoryItem", + "id": "tasks:v1", + "name": "tasks", + "version": "v1", + "title": "Tasks API", + "description": "Lets you manage your tasks and task lists.", + "discoveryRestUrl": "https://www.googleapis.com/discovery/v1/apis/tasks/v1/rest", + "discoveryLink": "./apis/tasks/v1/rest", + "icons": { + "x16": "https://www.google.com/images/icons/product/tasks-16.png", + "x32": "https://www.google.com/images/icons/product/tasks-32.png" + }, + "documentationLink": "https://developers.google.com/google-apps/tasks/firstapp", + "preferred": true + }, + { + "kind": "discovery#directoryItem", + "id": "testing:v1", + "name": "testing", + "version": "v1", + "title": "Cloud Testing API", + "description": "Allows developers to run automated tests for their mobile applications on Google infrastructure.", + "discoveryRestUrl": "https://testing.googleapis.com/$discovery/rest?version=v1", + "icons": { + "x16": "https://www.gstatic.com/images/branding/product/1x/googleg_16dp.png", + "x32": "https://www.gstatic.com/images/branding/product/1x/googleg_32dp.png" + }, + "documentationLink": "https://developers.google.com/cloud-test-lab/", + "preferred": true + }, + { + "kind": "discovery#directoryItem", + "id": "texttospeech:v1", + "name": "texttospeech", + "version": "v1", + "title": "Cloud Text-to-Speech API", + "description": "Synthesizes natural-sounding speech by applying powerful neural network models.", + "discoveryRestUrl": "https://texttospeech.googleapis.com/$discovery/rest?version=v1", + "icons": { + "x16": "https://www.gstatic.com/images/branding/product/1x/googleg_16dp.png", + "x32": "https://www.gstatic.com/images/branding/product/1x/googleg_32dp.png" + }, + "documentationLink": "https://cloud.google.com/text-to-speech/", + "preferred": true + }, + { + "kind": "discovery#directoryItem", + "id": "texttospeech:v1beta1", + "name": "texttospeech", + "version": "v1beta1", + "title": "Cloud Text-to-Speech API", + "description": "Synthesizes natural-sounding speech by applying powerful neural network models.", + "discoveryRestUrl": "https://texttospeech.googleapis.com/$discovery/rest?version=v1beta1", + "icons": { + "x16": "https://www.gstatic.com/images/branding/product/1x/googleg_16dp.png", + "x32": "https://www.gstatic.com/images/branding/product/1x/googleg_32dp.png" + }, + "documentationLink": "https://cloud.google.com/text-to-speech/", + "preferred": false + }, + { + "kind": "discovery#directoryItem", + "id": "toolresults:v1beta3", + "name": "toolresults", + "version": "v1beta3", + "title": "Cloud Tool Results API", + "description": "Reads and publishes results from Firebase Test Lab.", + "discoveryRestUrl": "https://www.googleapis.com/discovery/v1/apis/toolresults/v1beta3/rest", + "discoveryLink": "./apis/toolresults/v1beta3/rest", + "icons": { + "x16": "https://www.gstatic.com/images/branding/product/1x/googleg_16dp.png", + "x32": "https://www.gstatic.com/images/branding/product/1x/googleg_32dp.png" + }, + "documentationLink": "https://firebase.google.com/docs/test-lab/", + "preferred": true + }, + { + "kind": "discovery#directoryItem", + "id": "tpu:v1alpha1", + "name": "tpu", + "version": "v1alpha1", + "title": "Cloud TPU API", + "description": "TPU API provides customers with access to Google TPU technology.", + "discoveryRestUrl": "https://tpu.googleapis.com/$discovery/rest?version=v1alpha1", + "icons": { + "x16": "https://www.gstatic.com/images/branding/product/1x/googleg_16dp.png", + "x32": "https://www.gstatic.com/images/branding/product/1x/googleg_32dp.png" + }, + "documentationLink": "https://cloud.google.com/tpu/", + "preferred": false + }, + { + "kind": "discovery#directoryItem", + "id": "tpu:v1", + "name": "tpu", + "version": "v1", + "title": "Cloud TPU API", + "description": "TPU API provides customers with access to Google TPU technology.", + "discoveryRestUrl": "https://tpu.googleapis.com/$discovery/rest?version=v1", + "icons": { + "x16": "https://www.gstatic.com/images/branding/product/1x/googleg_16dp.png", + "x32": "https://www.gstatic.com/images/branding/product/1x/googleg_32dp.png" + }, + "documentationLink": "https://cloud.google.com/tpu/", + "preferred": true + }, + { + "kind": "discovery#directoryItem", + "id": "translate:v2", + "name": "translate", + "version": "v2", + "title": "Cloud Translation API", + "description": "Integrates text translation into your website or application.", + "discoveryRestUrl": "https://translation.googleapis.com/$discovery/rest?version=v2", + "icons": { + "x16": "https://www.gstatic.com/images/branding/product/1x/googleg_16dp.png", + "x32": "https://www.gstatic.com/images/branding/product/1x/googleg_32dp.png" + }, + "documentationLink": "https://code.google.com/apis/language/translate/v2/getting_started.html", + "preferred": true + }, + { + "kind": "discovery#directoryItem", + "id": "urlshortener:v1", + "name": "urlshortener", + "version": "v1", + "title": "URL Shortener API", + "description": "Lets you create, inspect, and manage goo.gl short URLs", + "discoveryRestUrl": "https://www.googleapis.com/discovery/v1/apis/urlshortener/v1/rest", + "discoveryLink": "./apis/urlshortener/v1/rest", + "icons": { + "x16": "https://www.gstatic.com/images/branding/product/1x/googleg_16dp.png", + "x32": "https://www.gstatic.com/images/branding/product/1x/googleg_32dp.png" + }, + "documentationLink": "https://developers.google.com/url-shortener/v1/getting_started", + "preferred": true + }, + { + "kind": "discovery#directoryItem", + "id": "vault:v1", + "name": "vault", + "version": "v1", + "title": "G Suite Vault API", + "description": "Archiving and eDiscovery for G Suite.", + "discoveryRestUrl": "https://vault.googleapis.com/$discovery/rest?version=v1", + "icons": { + "x16": "https://www.gstatic.com/images/branding/product/1x/googleg_16dp.png", + "x32": "https://www.gstatic.com/images/branding/product/1x/googleg_32dp.png" + }, + "documentationLink": "https://developers.google.com/vault", + "preferred": true + }, + { + "kind": "discovery#directoryItem", + "id": "videointelligence:v1p1beta1", + "name": "videointelligence", + "version": "v1p1beta1", + "title": "Cloud Video Intelligence API", + "description": "Cloud Video Intelligence API.", + "discoveryRestUrl": "https://videointelligence.googleapis.com/$discovery/rest?version=v1p1beta1", + "icons": { + "x16": "https://www.gstatic.com/images/branding/product/1x/googleg_16dp.png", + "x32": "https://www.gstatic.com/images/branding/product/1x/googleg_32dp.png" + }, + "documentationLink": "https://cloud.google.com/video-intelligence/docs/", + "preferred": false + }, + { + "kind": "discovery#directoryItem", + "id": "videointelligence:v1", + "name": "videointelligence", + "version": "v1", + "title": "Cloud Video Intelligence API", + "description": "Cloud Video Intelligence API.", + "discoveryRestUrl": "https://videointelligence.googleapis.com/$discovery/rest?version=v1", + "icons": { + "x16": "https://www.gstatic.com/images/branding/product/1x/googleg_16dp.png", + "x32": "https://www.gstatic.com/images/branding/product/1x/googleg_32dp.png" + }, + "documentationLink": "https://cloud.google.com/video-intelligence/docs/", + "preferred": true + }, + { + "kind": "discovery#directoryItem", + "id": "videointelligence:v1beta2", + "name": "videointelligence", + "version": "v1beta2", + "title": "Cloud Video Intelligence API", + "description": "Cloud Video Intelligence API.", + "discoveryRestUrl": "https://videointelligence.googleapis.com/$discovery/rest?version=v1beta2", + "icons": { + "x16": "https://www.gstatic.com/images/branding/product/1x/googleg_16dp.png", + "x32": "https://www.gstatic.com/images/branding/product/1x/googleg_32dp.png" + }, + "documentationLink": "https://cloud.google.com/video-intelligence/docs/", + "preferred": false + }, + { + "kind": "discovery#directoryItem", + "id": "vision:v1p1beta1", + "name": "vision", + "version": "v1p1beta1", + "title": "Cloud Vision API", + "description": "Integrates Google Vision features, including image labeling, face, logo, and landmark detection, optical character recognition (OCR), and detection of explicit content, into applications.", + "discoveryRestUrl": "https://vision.googleapis.com/$discovery/rest?version=v1p1beta1", + "icons": { + "x16": "https://www.gstatic.com/images/branding/product/1x/googleg_16dp.png", + "x32": "https://www.gstatic.com/images/branding/product/1x/googleg_32dp.png" + }, + "documentationLink": "https://cloud.google.com/vision/", + "preferred": false + }, + { + "kind": "discovery#directoryItem", + "id": "vision:v1p2beta1", + "name": "vision", + "version": "v1p2beta1", + "title": "Cloud Vision API", + "description": "Integrates Google Vision features, including image labeling, face, logo, and landmark detection, optical character recognition (OCR), and detection of explicit content, into applications.", + "discoveryRestUrl": "https://vision.googleapis.com/$discovery/rest?version=v1p2beta1", + "icons": { + "x16": "https://www.gstatic.com/images/branding/product/1x/googleg_16dp.png", + "x32": "https://www.gstatic.com/images/branding/product/1x/googleg_32dp.png" + }, + "documentationLink": "https://cloud.google.com/vision/", + "preferred": false + }, + { + "kind": "discovery#directoryItem", + "id": "vision:v1", + "name": "vision", + "version": "v1", + "title": "Cloud Vision API", + "description": "Integrates Google Vision features, including image labeling, face, logo, and landmark detection, optical character recognition (OCR), and detection of explicit content, into applications.", + "discoveryRestUrl": "https://vision.googleapis.com/$discovery/rest?version=v1", + "icons": { + "x16": "https://www.gstatic.com/images/branding/product/1x/googleg_16dp.png", + "x32": "https://www.gstatic.com/images/branding/product/1x/googleg_32dp.png" + }, + "documentationLink": "https://cloud.google.com/vision/", + "preferred": true + }, + { + "kind": "discovery#directoryItem", + "id": "webfonts:v1", + "name": "webfonts", + "version": "v1", + "title": "Google Fonts Developer API", + "description": "Accesses the metadata for all families served by Google Fonts, providing a list of families currently available (including available styles and a list of supported script subsets).", + "discoveryRestUrl": "https://www.googleapis.com/discovery/v1/apis/webfonts/v1/rest", + "discoveryLink": "./apis/webfonts/v1/rest", + "icons": { + "x16": "https://www.google.com/images/icons/feature/font_api-16.png", + "x32": "https://www.google.com/images/icons/feature/font_api-32.gif" + }, + "documentationLink": "https://developers.google.com/fonts/docs/developer_api", + "preferred": true + }, + { + "kind": "discovery#directoryItem", + "id": "webmasters:v3", + "name": "webmasters", + "version": "v3", + "title": "Search Console API", + "description": "View Google Search Console data for your verified sites.", + "discoveryRestUrl": "https://www.googleapis.com/discovery/v1/apis/webmasters/v3/rest", + "discoveryLink": "./apis/webmasters/v3/rest", + "icons": { + "x16": "https://www.google.com/images/icons/product/webmaster_tools-16.png", + "x32": "https://www.google.com/images/icons/product/webmaster_tools-32.png" + }, + "documentationLink": "https://developers.google.com/webmaster-tools/", + "preferred": true + }, + { + "kind": "discovery#directoryItem", + "id": "websecurityscanner:v1alpha", + "name": "websecurityscanner", + "version": "v1alpha", + "title": "Web Security Scanner API", + "description": "Scans your Compute and App Engine apps for common web vulnerabilities.", + "discoveryRestUrl": "https://websecurityscanner.googleapis.com/$discovery/rest?version=v1alpha", + "icons": { + "x16": "https://www.gstatic.com/images/branding/product/1x/googleg_16dp.png", + "x32": "https://www.gstatic.com/images/branding/product/1x/googleg_32dp.png" + }, + "documentationLink": "https://cloud.google.com/security-scanner/", + "preferred": true + }, + { + "kind": "discovery#directoryItem", + "id": "websecurityscanner:v1beta", + "name": "websecurityscanner", + "version": "v1beta", + "title": "Web Security Scanner API", + "description": "Scans your Compute and App Engine apps for common web vulnerabilities.", + "discoveryRestUrl": "https://websecurityscanner.googleapis.com/$discovery/rest?version=v1beta", + "icons": { + "x16": "https://www.gstatic.com/images/branding/product/1x/googleg_16dp.png", + "x32": "https://www.gstatic.com/images/branding/product/1x/googleg_32dp.png" + }, + "documentationLink": "https://cloud.google.com/security-scanner/", + "preferred": false + }, + { + "kind": "discovery#directoryItem", + "id": "youtube:v3", + "name": "youtube", + "version": "v3", + "title": "YouTube Data API", + "description": "Supports core YouTube features, such as uploading videos, creating and managing playlists, searching for content, and much more.", + "discoveryRestUrl": "https://www.googleapis.com/discovery/v1/apis/youtube/v3/rest", + "discoveryLink": "./apis/youtube/v3/rest", + "icons": { + "x16": "https://www.google.com/images/icons/product/youtube-16.png", + "x32": "https://www.google.com/images/icons/product/youtube-32.png" + }, + "documentationLink": "https://developers.google.com/youtube/v3", + "preferred": true + }, + { + "kind": "discovery#directoryItem", + "id": "youtubeAnalytics:v1", + "name": "youtubeAnalytics", + "version": "v1", + "title": "YouTube Analytics API", + "description": "Retrieves your YouTube Analytics data.", + "discoveryRestUrl": "https://www.googleapis.com/discovery/v1/apis/youtubeAnalytics/v1/rest", + "discoveryLink": "./apis/youtubeAnalytics/v1/rest", + "icons": { + "x16": "https://www.google.com/images/icons/product/youtube-16.png", + "x32": "https://www.google.com/images/icons/product/youtube-32.png" + }, + "documentationLink": "http://developers.google.com/youtube/analytics/", + "preferred": false + }, + { + "kind": "discovery#directoryItem", + "id": "youtubeAnalytics:v1beta1", + "name": "youtubeAnalytics", + "version": "v1beta1", + "title": "YouTube Analytics API", + "description": "Retrieves your YouTube Analytics data.", + "discoveryRestUrl": "https://www.googleapis.com/discovery/v1/apis/youtubeAnalytics/v1beta1/rest", + "discoveryLink": "./apis/youtubeAnalytics/v1beta1/rest", + "icons": { + "x16": "https://www.google.com/images/icons/product/youtube-16.png", + "x32": "https://www.google.com/images/icons/product/youtube-32.png" + }, + "documentationLink": "http://developers.google.com/youtube/analytics/", + "labels": [ + "deprecated" + ], + "preferred": false + }, + { + "kind": "discovery#directoryItem", + "id": "youtubeAnalytics:v2", + "name": "youtubeAnalytics", + "version": "v2", + "title": "YouTube Analytics API", + "description": "Retrieves your YouTube Analytics data.", + "discoveryRestUrl": "https://youtubeanalytics.googleapis.com/$discovery/rest?version=v2", + "icons": { + "x16": "https://www.gstatic.com/images/branding/product/1x/googleg_16dp.png", + "x32": "https://www.gstatic.com/images/branding/product/1x/googleg_32dp.png" + }, + "documentationLink": "https://developers.google.com/youtube/analytics", + "preferred": true + }, + { + "kind": "discovery#directoryItem", + "id": "youtubereporting:v1", + "name": "youtubereporting", + "version": "v1", + "title": "YouTube Reporting API", + "description": "Schedules reporting jobs containing your YouTube Analytics data and downloads the resulting bulk data reports in the form of CSV files.", + "discoveryRestUrl": "https://youtubereporting.googleapis.com/$discovery/rest?version=v1", + "icons": { + "x16": "https://www.gstatic.com/images/branding/product/1x/googleg_16dp.png", + "x32": "https://www.gstatic.com/images/branding/product/1x/googleg_32dp.png" + }, + "documentationLink": "https://developers.google.com/youtube/reporting/v1/reports/", + "preferred": true + } + ] +} diff --git a/vendor/google.golang.org/api/key.json.enc b/vendor/google.golang.org/api/key.json.enc new file mode 100644 index 000000000..e69de29bb diff --git a/vendor/google.golang.org/api/support/bundler/bundler.go b/vendor/google.golang.org/api/support/bundler/bundler.go new file mode 100644 index 000000000..c55327119 --- /dev/null +++ b/vendor/google.golang.org/api/support/bundler/bundler.go @@ -0,0 +1,349 @@ +// Copyright 2016 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package bundler supports bundling (batching) of items. Bundling amortizes an +// action with fixed costs over multiple items. For example, if an API provides +// an RPC that accepts a list of items as input, but clients would prefer +// adding items one at a time, then a Bundler can accept individual items from +// the client and bundle many of them into a single RPC. +// +// This package is experimental and subject to change without notice. +package bundler + +import ( + "context" + "errors" + "math" + "reflect" + "sync" + "time" + + "golang.org/x/sync/semaphore" +) + +const ( + DefaultDelayThreshold = time.Second + DefaultBundleCountThreshold = 10 + DefaultBundleByteThreshold = 1e6 // 1M + DefaultBufferedByteLimit = 1e9 // 1G +) + +var ( + // ErrOverflow indicates that Bundler's stored bytes exceeds its BufferedByteLimit. + ErrOverflow = errors.New("bundler reached buffered byte limit") + + // ErrOversizedItem indicates that an item's size exceeds the maximum bundle size. + ErrOversizedItem = errors.New("item size exceeds bundle byte limit") +) + +// A Bundler collects items added to it into a bundle until the bundle +// exceeds a given size, then calls a user-provided function to handle the bundle. +type Bundler struct { + // Starting from the time that the first message is added to a bundle, once + // this delay has passed, handle the bundle. The default is DefaultDelayThreshold. + DelayThreshold time.Duration + + // Once a bundle has this many items, handle the bundle. Since only one + // item at a time is added to a bundle, no bundle will exceed this + // threshold, so it also serves as a limit. The default is + // DefaultBundleCountThreshold. + BundleCountThreshold int + + // Once the number of bytes in current bundle reaches this threshold, handle + // the bundle. The default is DefaultBundleByteThreshold. This triggers handling, + // but does not cap the total size of a bundle. + BundleByteThreshold int + + // The maximum size of a bundle, in bytes. Zero means unlimited. + BundleByteLimit int + + // The maximum number of bytes that the Bundler will keep in memory before + // returning ErrOverflow. The default is DefaultBufferedByteLimit. + BufferedByteLimit int + + // The maximum number of handler invocations that can be running at once. + // The default is 1. + HandlerLimit int + + handler func(interface{}) // called to handle a bundle + itemSliceZero reflect.Value // nil (zero value) for slice of items + flushTimer *time.Timer // implements DelayThreshold + + mu sync.Mutex + sem *semaphore.Weighted // enforces BufferedByteLimit + semOnce sync.Once + curBundle bundle // incoming items added to this bundle + + // Each bundle is assigned a unique ticket that determines the order in which the + // handler is called. The ticket is assigned with mu locked, but waiting for tickets + // to be handled is done via mu2 and cond, below. + nextTicket uint64 // next ticket to be assigned + + mu2 sync.Mutex + cond *sync.Cond + nextHandled uint64 // next ticket to be handled + + // In this implementation, active uses space proportional to HandlerLimit, and + // waitUntilAllHandled takes time proportional to HandlerLimit each time an acquire + // or release occurs, so large values of HandlerLimit max may cause performance + // issues. + active map[uint64]bool // tickets of bundles actively being handled +} + +type bundle struct { + items reflect.Value // slice of item type + size int // size in bytes of all items +} + +// NewBundler creates a new Bundler. +// +// itemExample is a value of the type that will be bundled. For example, if you +// want to create bundles of *Entry, you could pass &Entry{} for itemExample. +// +// handler is a function that will be called on each bundle. If itemExample is +// of type T, the argument to handler is of type []T. handler is always called +// sequentially for each bundle, and never in parallel. +// +// Configure the Bundler by setting its thresholds and limits before calling +// any of its methods. +func NewBundler(itemExample interface{}, handler func(interface{})) *Bundler { + b := &Bundler{ + DelayThreshold: DefaultDelayThreshold, + BundleCountThreshold: DefaultBundleCountThreshold, + BundleByteThreshold: DefaultBundleByteThreshold, + BufferedByteLimit: DefaultBufferedByteLimit, + HandlerLimit: 1, + + handler: handler, + itemSliceZero: reflect.Zero(reflect.SliceOf(reflect.TypeOf(itemExample))), + active: map[uint64]bool{}, + } + b.curBundle.items = b.itemSliceZero + b.cond = sync.NewCond(&b.mu2) + return b +} + +func (b *Bundler) initSemaphores() { + // Create the semaphores lazily, because the user may set limits + // after NewBundler. + b.semOnce.Do(func() { + b.sem = semaphore.NewWeighted(int64(b.BufferedByteLimit)) + }) +} + +// Add adds item to the current bundle. It marks the bundle for handling and +// starts a new one if any of the thresholds or limits are exceeded. +// +// If the item's size exceeds the maximum bundle size (Bundler.BundleByteLimit), then +// the item can never be handled. Add returns ErrOversizedItem in this case. +// +// If adding the item would exceed the maximum memory allowed +// (Bundler.BufferedByteLimit) or an AddWait call is blocked waiting for +// memory, Add returns ErrOverflow. +// +// Add never blocks. +func (b *Bundler) Add(item interface{}, size int) error { + // If this item exceeds the maximum size of a bundle, + // we can never send it. + if b.BundleByteLimit > 0 && size > b.BundleByteLimit { + return ErrOversizedItem + } + // If adding this item would exceed our allotted memory + // footprint, we can't accept it. + // (TryAcquire also returns false if anything is waiting on the semaphore, + // so calls to Add and AddWait shouldn't be mixed.) + b.initSemaphores() + if !b.sem.TryAcquire(int64(size)) { + return ErrOverflow + } + b.add(item, size) + return nil +} + +// add adds item to the current bundle. It marks the bundle for handling and +// starts a new one if any of the thresholds or limits are exceeded. +func (b *Bundler) add(item interface{}, size int) { + b.mu.Lock() + defer b.mu.Unlock() + + // If adding this item to the current bundle would cause it to exceed the + // maximum bundle size, close the current bundle and start a new one. + if b.BundleByteLimit > 0 && b.curBundle.size+size > b.BundleByteLimit { + b.startFlushLocked() + } + // Add the item. + b.curBundle.items = reflect.Append(b.curBundle.items, reflect.ValueOf(item)) + b.curBundle.size += size + + // Start a timer to flush the item if one isn't already running. + // startFlushLocked clears the timer and closes the bundle at the same time, + // so we only allocate a new timer for the first item in each bundle. + // (We could try to call Reset on the timer instead, but that would add a lot + // of complexity to the code just to save one small allocation.) + if b.flushTimer == nil { + b.flushTimer = time.AfterFunc(b.DelayThreshold, b.Flush) + } + + // If the current bundle equals the count threshold, close it. + if b.curBundle.items.Len() == b.BundleCountThreshold { + b.startFlushLocked() + } + // If the current bundle equals or exceeds the byte threshold, close it. + if b.curBundle.size >= b.BundleByteThreshold { + b.startFlushLocked() + } +} + +// AddWait adds item to the current bundle. It marks the bundle for handling and +// starts a new one if any of the thresholds or limits are exceeded. +// +// If the item's size exceeds the maximum bundle size (Bundler.BundleByteLimit), then +// the item can never be handled. AddWait returns ErrOversizedItem in this case. +// +// If adding the item would exceed the maximum memory allowed (Bundler.BufferedByteLimit), +// AddWait blocks until space is available or ctx is done. +// +// Calls to Add and AddWait should not be mixed on the same Bundler. +func (b *Bundler) AddWait(ctx context.Context, item interface{}, size int) error { + // If this item exceeds the maximum size of a bundle, + // we can never send it. + if b.BundleByteLimit > 0 && size > b.BundleByteLimit { + return ErrOversizedItem + } + // If adding this item would exceed our allotted memory footprint, block + // until space is available. The semaphore is FIFO, so there will be no + // starvation. + b.initSemaphores() + if err := b.sem.Acquire(ctx, int64(size)); err != nil { + return err + } + // Here, we've reserved space for item. Other goroutines can call AddWait + // and even acquire space, but no one can take away our reservation + // (assuming sem.Release is used correctly). So there is no race condition + // resulting from locking the mutex after sem.Acquire returns. + b.add(item, size) + return nil +} + +// Flush invokes the handler for all remaining items in the Bundler and waits +// for it to return. +func (b *Bundler) Flush() { + b.mu.Lock() + b.startFlushLocked() + // Here, all bundles with tickets < b.nextTicket are + // either finished or active. Those are the ones + // we want to wait for. + t := b.nextTicket + b.mu.Unlock() + b.initSemaphores() + b.waitUntilAllHandled(t) +} + +func (b *Bundler) startFlushLocked() { + if b.flushTimer != nil { + b.flushTimer.Stop() + b.flushTimer = nil + } + if b.curBundle.items.Len() == 0 { + return + } + // Here, both semaphores must have been initialized. + bun := b.curBundle + b.curBundle = bundle{items: b.itemSliceZero} + ticket := b.nextTicket + b.nextTicket++ + go func() { + defer func() { + b.sem.Release(int64(bun.size)) + b.release(ticket) + }() + b.acquire(ticket) + b.handler(bun.items.Interface()) + }() +} + +// acquire blocks until ticket is the next to be served, then returns. In order for N +// acquire calls to return, the tickets must be in the range [0, N). A ticket must +// not be presented to acquire more than once. +func (b *Bundler) acquire(ticket uint64) { + b.mu2.Lock() + defer b.mu2.Unlock() + if ticket < b.nextHandled { + panic("bundler: acquire: arg too small") + } + for !(ticket == b.nextHandled && len(b.active) < b.HandlerLimit) { + b.cond.Wait() + } + // Here, + // ticket == b.nextHandled: the caller is the next one to be handled; + // and len(b.active) < b.HandlerLimit: there is space available. + b.active[ticket] = true + b.nextHandled++ + // Broadcast, not Signal: although at most one acquire waiter can make progress, + // there might be waiters in waitUntilAllHandled. + b.cond.Broadcast() +} + +// If a ticket is used for a call to acquire, it must later be passed to release. A +// ticket must not be presented to release more than once. +func (b *Bundler) release(ticket uint64) { + b.mu2.Lock() + defer b.mu2.Unlock() + if !b.active[ticket] { + panic("bundler: release: not an active ticket") + } + delete(b.active, ticket) + b.cond.Broadcast() +} + +// waitUntilAllHandled blocks until all tickets < n have called release, meaning +// all bundles with tickets < n have been handled. +func (b *Bundler) waitUntilAllHandled(n uint64) { + // Proof of correctness of this function. + // "N is acquired" means acquire(N) has returned. + // "N is released" means release(N) has returned. + // 1. If N is acquired, N-1 is acquired. + // Follows from the loop test in acquire, and the fact + // that nextHandled is incremented by 1. + // 2. If nextHandled >= N, then N-1 is acquired. + // Because we only increment nextHandled to N after N-1 is acquired. + // 3. If nextHandled >= N, then all n < N is acquired. + // Follows from #1 and #2. + // 4. If N is acquired and N is not in active, then N is released. + // Because we put N in active before acquire returns, and only + // remove it when it is released. + // Let min(active) be the smallest member of active, or infinity if active is empty. + // 5. If nextHandled >= N and N <= min(active), then all n < N is released. + // From nextHandled >= N and #3, all n < N is acquired. + // N <= min(active) implies n < min(active) for all n < N. So all n < N is not in active. + // So from #4, all n < N is released. + // The loop test below is the antecedent of #5. + b.mu2.Lock() + defer b.mu2.Unlock() + for !(b.nextHandled >= n && n <= min(b.active)) { + b.cond.Wait() + } +} + +// min returns the minimum value of the set s, or the largest uint64 if +// s is empty. +func min(s map[uint64]bool) uint64 { + var m uint64 = math.MaxUint64 + for n := range s { + if n < m { + m = n + } + } + return m +} diff --git a/vendor/google.golang.org/appengine/.travis.yml b/vendor/google.golang.org/appengine/.travis.yml index 7ef8b6c7f..70ffe89d5 100644 --- a/vendor/google.golang.org/appengine/.travis.yml +++ b/vendor/google.golang.org/appengine/.travis.yml @@ -1,24 +1,20 @@ language: go -go: - - 1.6.x - - 1.7.x - - 1.8.x - - 1.9.x - go_import_path: google.golang.org/appengine install: - - go get -u -v $(go list -f '{{join .Imports "\n"}}{{"\n"}}{{join .TestImports "\n"}}' ./... | sort | uniq | grep -v appengine) - - mkdir /tmp/sdk - - curl -o /tmp/sdk.zip "https://storage.googleapis.com/appengine-sdks/featured/go_appengine_sdk_linux_amd64-1.9.40.zip" - - unzip -q /tmp/sdk.zip -d /tmp/sdk - - export PATH="$PATH:/tmp/sdk/go_appengine" - - export APPENGINE_DEV_APPSERVER=/tmp/sdk/go_appengine/dev_appserver.py + - ./travis_install.sh script: - - goapp version - - go version - - go test -v google.golang.org/appengine/... - - go test -v -race google.golang.org/appengine/... - - goapp test -v google.golang.org/appengine/... + - ./travis_test.sh + +matrix: + include: + - go: 1.8.x + env: GOAPP=true + - go: 1.9.x + env: GOAPP=true + - go: 1.10.x + env: GOAPP=false + - go: 1.11.x + env: GO111MODULE=on diff --git a/vendor/google.golang.org/appengine/appengine.go b/vendor/google.golang.org/appengine/appengine.go index 76dedc81d..795254549 100644 --- a/vendor/google.golang.org/appengine/appengine.go +++ b/vendor/google.golang.org/appengine/appengine.go @@ -60,6 +60,24 @@ func IsDevAppServer() bool { return internal.IsDevAppServer() } +// IsStandard reports whether the App Engine app is running in the standard +// environment. This includes both the first generation runtimes (<= Go 1.9) +// and the second generation runtimes (>= Go 1.11). +func IsStandard() bool { + return internal.IsStandard() +} + +// IsFlex reports whether the App Engine app is running in the flexible environment. +func IsFlex() bool { + return internal.IsFlex() +} + +// IsAppEngine reports whether the App Engine app is running on App Engine, in either +// the standard or flexible environment. +func IsAppEngine() bool { + return internal.IsAppEngine() +} + // NewContext returns a context for an in-flight HTTP request. // This function is cheap. func NewContext(req *http.Request) context.Context { diff --git a/vendor/google.golang.org/appengine/go.sum b/vendor/google.golang.org/appengine/go.sum index 5e644c2e9..1a221c089 100644 --- a/vendor/google.golang.org/appengine/go.sum +++ b/vendor/google.golang.org/appengine/go.sum @@ -1,3 +1,6 @@ +github.com/golang/protobuf v1.2.0 h1:P3YflyNX/ehuJFLhxviNdFxQPkGK5cDcApsge1SqnvM= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +golang.org/x/net v0.0.0-20180724234803-3673e40ba225 h1:kNX+jCowfMYzvlSvJu5pQWEmyWFrBXJ3PBy10xKMXK8= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= diff --git a/vendor/google.golang.org/appengine/internal/api.go b/vendor/google.golang.org/appengine/internal/api.go index 16f87c5d3..c95149525 100644 --- a/vendor/google.golang.org/appengine/internal/api.go +++ b/vendor/google.golang.org/appengine/internal/api.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. // +build !appengine -// +build go1.7 package internal @@ -130,7 +129,13 @@ func handleHTTP(w http.ResponseWriter, r *http.Request) { flushes++ } c.pendingLogs.Unlock() - go c.flushLog(false) + flushed := make(chan struct{}) + go func() { + defer close(flushed) + // Force a log flush, because with very short requests we + // may not ever flush logs. + c.flushLog(true) + }() w.Header().Set(logFlushHeader, strconv.Itoa(flushes)) // Avoid nil Write call if c.Write is never called. @@ -140,6 +145,9 @@ func handleHTTP(w http.ResponseWriter, r *http.Request) { if c.outBody != nil { w.Write(c.outBody) } + // Wait for the last flush to complete before returning, + // otherwise the security ticket will not be valid. + <-flushed } func executeRequestSafely(c *context, r *http.Request) { diff --git a/vendor/google.golang.org/appengine/internal/api_pre17.go b/vendor/google.golang.org/appengine/internal/api_pre17.go deleted file mode 100644 index 028b4f056..000000000 --- a/vendor/google.golang.org/appengine/internal/api_pre17.go +++ /dev/null @@ -1,682 +0,0 @@ -// Copyright 2011 Google Inc. All rights reserved. -// Use of this source code is governed by the Apache 2.0 -// license that can be found in the LICENSE file. - -// +build !appengine -// +build !go1.7 - -package internal - -import ( - "bytes" - "errors" - "fmt" - "io/ioutil" - "log" - "net" - "net/http" - "net/url" - "os" - "runtime" - "strconv" - "strings" - "sync" - "sync/atomic" - "time" - - "github.com/golang/protobuf/proto" - netcontext "golang.org/x/net/context" - - basepb "google.golang.org/appengine/internal/base" - logpb "google.golang.org/appengine/internal/log" - remotepb "google.golang.org/appengine/internal/remote_api" -) - -const ( - apiPath = "/rpc_http" - defaultTicketSuffix = "/default.20150612t184001.0" -) - -var ( - // Incoming headers. - ticketHeader = http.CanonicalHeaderKey("X-AppEngine-API-Ticket") - dapperHeader = http.CanonicalHeaderKey("X-Google-DapperTraceInfo") - traceHeader = http.CanonicalHeaderKey("X-Cloud-Trace-Context") - curNamespaceHeader = http.CanonicalHeaderKey("X-AppEngine-Current-Namespace") - userIPHeader = http.CanonicalHeaderKey("X-AppEngine-User-IP") - remoteAddrHeader = http.CanonicalHeaderKey("X-AppEngine-Remote-Addr") - - // Outgoing headers. - apiEndpointHeader = http.CanonicalHeaderKey("X-Google-RPC-Service-Endpoint") - apiEndpointHeaderValue = []string{"app-engine-apis"} - apiMethodHeader = http.CanonicalHeaderKey("X-Google-RPC-Service-Method") - apiMethodHeaderValue = []string{"/VMRemoteAPI.CallRemoteAPI"} - apiDeadlineHeader = http.CanonicalHeaderKey("X-Google-RPC-Service-Deadline") - apiContentType = http.CanonicalHeaderKey("Content-Type") - apiContentTypeValue = []string{"application/octet-stream"} - logFlushHeader = http.CanonicalHeaderKey("X-AppEngine-Log-Flush-Count") - - apiHTTPClient = &http.Client{ - Transport: &http.Transport{ - Proxy: http.ProxyFromEnvironment, - Dial: limitDial, - }, - } - - defaultTicketOnce sync.Once - defaultTicket string -) - -func apiURL() *url.URL { - host, port := "appengine.googleapis.internal", "10001" - if h := os.Getenv("API_HOST"); h != "" { - host = h - } - if p := os.Getenv("API_PORT"); p != "" { - port = p - } - return &url.URL{ - Scheme: "http", - Host: host + ":" + port, - Path: apiPath, - } -} - -func handleHTTP(w http.ResponseWriter, r *http.Request) { - c := &context{ - req: r, - outHeader: w.Header(), - apiURL: apiURL(), - } - stopFlushing := make(chan int) - - ctxs.Lock() - ctxs.m[r] = c - ctxs.Unlock() - defer func() { - ctxs.Lock() - delete(ctxs.m, r) - ctxs.Unlock() - }() - - // Patch up RemoteAddr so it looks reasonable. - if addr := r.Header.Get(userIPHeader); addr != "" { - r.RemoteAddr = addr - } else if addr = r.Header.Get(remoteAddrHeader); addr != "" { - r.RemoteAddr = addr - } else { - // Should not normally reach here, but pick a sensible default anyway. - r.RemoteAddr = "127.0.0.1" - } - // The address in the headers will most likely be of these forms: - // 123.123.123.123 - // 2001:db8::1 - // net/http.Request.RemoteAddr is specified to be in "IP:port" form. - if _, _, err := net.SplitHostPort(r.RemoteAddr); err != nil { - // Assume the remote address is only a host; add a default port. - r.RemoteAddr = net.JoinHostPort(r.RemoteAddr, "80") - } - - // Start goroutine responsible for flushing app logs. - // This is done after adding c to ctx.m (and stopped before removing it) - // because flushing logs requires making an API call. - go c.logFlusher(stopFlushing) - - executeRequestSafely(c, r) - c.outHeader = nil // make sure header changes aren't respected any more - - stopFlushing <- 1 // any logging beyond this point will be dropped - - // Flush any pending logs asynchronously. - c.pendingLogs.Lock() - flushes := c.pendingLogs.flushes - if len(c.pendingLogs.lines) > 0 { - flushes++ - } - c.pendingLogs.Unlock() - go c.flushLog(false) - w.Header().Set(logFlushHeader, strconv.Itoa(flushes)) - - // Avoid nil Write call if c.Write is never called. - if c.outCode != 0 { - w.WriteHeader(c.outCode) - } - if c.outBody != nil { - w.Write(c.outBody) - } -} - -func executeRequestSafely(c *context, r *http.Request) { - defer func() { - if x := recover(); x != nil { - logf(c, 4, "%s", renderPanic(x)) // 4 == critical - c.outCode = 500 - } - }() - - http.DefaultServeMux.ServeHTTP(c, r) -} - -func renderPanic(x interface{}) string { - buf := make([]byte, 16<<10) // 16 KB should be plenty - buf = buf[:runtime.Stack(buf, false)] - - // Remove the first few stack frames: - // this func - // the recover closure in the caller - // That will root the stack trace at the site of the panic. - const ( - skipStart = "internal.renderPanic" - skipFrames = 2 - ) - start := bytes.Index(buf, []byte(skipStart)) - p := start - for i := 0; i < skipFrames*2 && p+1 < len(buf); i++ { - p = bytes.IndexByte(buf[p+1:], '\n') + p + 1 - if p < 0 { - break - } - } - if p >= 0 { - // buf[start:p+1] is the block to remove. - // Copy buf[p+1:] over buf[start:] and shrink buf. - copy(buf[start:], buf[p+1:]) - buf = buf[:len(buf)-(p+1-start)] - } - - // Add panic heading. - head := fmt.Sprintf("panic: %v\n\n", x) - if len(head) > len(buf) { - // Extremely unlikely to happen. - return head - } - copy(buf[len(head):], buf) - copy(buf, head) - - return string(buf) -} - -var ctxs = struct { - sync.Mutex - m map[*http.Request]*context - bg *context // background context, lazily initialized - // dec is used by tests to decorate the netcontext.Context returned - // for a given request. This allows tests to add overrides (such as - // WithAppIDOverride) to the context. The map is nil outside tests. - dec map[*http.Request]func(netcontext.Context) netcontext.Context -}{ - m: make(map[*http.Request]*context), -} - -// context represents the context of an in-flight HTTP request. -// It implements the appengine.Context and http.ResponseWriter interfaces. -type context struct { - req *http.Request - - outCode int - outHeader http.Header - outBody []byte - - pendingLogs struct { - sync.Mutex - lines []*logpb.UserAppLogLine - flushes int - } - - apiURL *url.URL -} - -var contextKey = "holds a *context" - -// fromContext returns the App Engine context or nil if ctx is not -// derived from an App Engine context. -func fromContext(ctx netcontext.Context) *context { - c, _ := ctx.Value(&contextKey).(*context) - return c -} - -func withContext(parent netcontext.Context, c *context) netcontext.Context { - ctx := netcontext.WithValue(parent, &contextKey, c) - if ns := c.req.Header.Get(curNamespaceHeader); ns != "" { - ctx = withNamespace(ctx, ns) - } - return ctx -} - -func toContext(c *context) netcontext.Context { - return withContext(netcontext.Background(), c) -} - -func IncomingHeaders(ctx netcontext.Context) http.Header { - if c := fromContext(ctx); c != nil { - return c.req.Header - } - return nil -} - -func ReqContext(req *http.Request) netcontext.Context { - return WithContext(netcontext.Background(), req) -} - -func WithContext(parent netcontext.Context, req *http.Request) netcontext.Context { - ctxs.Lock() - c := ctxs.m[req] - d := ctxs.dec[req] - ctxs.Unlock() - - if d != nil { - parent = d(parent) - } - - if c == nil { - // Someone passed in an http.Request that is not in-flight. - // We panic here rather than panicking at a later point - // so that stack traces will be more sensible. - log.Panic("appengine: NewContext passed an unknown http.Request") - } - return withContext(parent, c) -} - -// DefaultTicket returns a ticket used for background context or dev_appserver. -func DefaultTicket() string { - defaultTicketOnce.Do(func() { - if IsDevAppServer() { - defaultTicket = "testapp" + defaultTicketSuffix - return - } - appID := partitionlessAppID() - escAppID := strings.Replace(strings.Replace(appID, ":", "_", -1), ".", "_", -1) - majVersion := VersionID(nil) - if i := strings.Index(majVersion, "."); i > 0 { - majVersion = majVersion[:i] - } - defaultTicket = fmt.Sprintf("%s/%s.%s.%s", escAppID, ModuleName(nil), majVersion, InstanceID()) - }) - return defaultTicket -} - -func BackgroundContext() netcontext.Context { - ctxs.Lock() - defer ctxs.Unlock() - - if ctxs.bg != nil { - return toContext(ctxs.bg) - } - - // Compute background security ticket. - ticket := DefaultTicket() - - ctxs.bg = &context{ - req: &http.Request{ - Header: http.Header{ - ticketHeader: []string{ticket}, - }, - }, - apiURL: apiURL(), - } - - // TODO(dsymonds): Wire up the shutdown handler to do a final flush. - go ctxs.bg.logFlusher(make(chan int)) - - return toContext(ctxs.bg) -} - -// RegisterTestRequest registers the HTTP request req for testing, such that -// any API calls are sent to the provided URL. It returns a closure to delete -// the registration. -// It should only be used by aetest package. -func RegisterTestRequest(req *http.Request, apiURL *url.URL, decorate func(netcontext.Context) netcontext.Context) (*http.Request, func()) { - c := &context{ - req: req, - apiURL: apiURL, - } - ctxs.Lock() - defer ctxs.Unlock() - if _, ok := ctxs.m[req]; ok { - log.Panic("req already associated with context") - } - if _, ok := ctxs.dec[req]; ok { - log.Panic("req already associated with context") - } - if ctxs.dec == nil { - ctxs.dec = make(map[*http.Request]func(netcontext.Context) netcontext.Context) - } - ctxs.m[req] = c - ctxs.dec[req] = decorate - - return req, func() { - ctxs.Lock() - delete(ctxs.m, req) - delete(ctxs.dec, req) - ctxs.Unlock() - } -} - -var errTimeout = &CallError{ - Detail: "Deadline exceeded", - Code: int32(remotepb.RpcError_CANCELLED), - Timeout: true, -} - -func (c *context) Header() http.Header { return c.outHeader } - -// Copied from $GOROOT/src/pkg/net/http/transfer.go. Some response status -// codes do not permit a response body (nor response entity headers such as -// Content-Length, Content-Type, etc). -func bodyAllowedForStatus(status int) bool { - switch { - case status >= 100 && status <= 199: - return false - case status == 204: - return false - case status == 304: - return false - } - return true -} - -func (c *context) Write(b []byte) (int, error) { - if c.outCode == 0 { - c.WriteHeader(http.StatusOK) - } - if len(b) > 0 && !bodyAllowedForStatus(c.outCode) { - return 0, http.ErrBodyNotAllowed - } - c.outBody = append(c.outBody, b...) - return len(b), nil -} - -func (c *context) WriteHeader(code int) { - if c.outCode != 0 { - logf(c, 3, "WriteHeader called multiple times on request.") // error level - return - } - c.outCode = code -} - -func (c *context) post(body []byte, timeout time.Duration) (b []byte, err error) { - hreq := &http.Request{ - Method: "POST", - URL: c.apiURL, - Header: http.Header{ - apiEndpointHeader: apiEndpointHeaderValue, - apiMethodHeader: apiMethodHeaderValue, - apiContentType: apiContentTypeValue, - apiDeadlineHeader: []string{strconv.FormatFloat(timeout.Seconds(), 'f', -1, 64)}, - }, - Body: ioutil.NopCloser(bytes.NewReader(body)), - ContentLength: int64(len(body)), - Host: c.apiURL.Host, - } - if info := c.req.Header.Get(dapperHeader); info != "" { - hreq.Header.Set(dapperHeader, info) - } - if info := c.req.Header.Get(traceHeader); info != "" { - hreq.Header.Set(traceHeader, info) - } - - tr := apiHTTPClient.Transport.(*http.Transport) - - var timedOut int32 // atomic; set to 1 if timed out - t := time.AfterFunc(timeout, func() { - atomic.StoreInt32(&timedOut, 1) - tr.CancelRequest(hreq) - }) - defer t.Stop() - defer func() { - // Check if timeout was exceeded. - if atomic.LoadInt32(&timedOut) != 0 { - err = errTimeout - } - }() - - hresp, err := apiHTTPClient.Do(hreq) - if err != nil { - return nil, &CallError{ - Detail: fmt.Sprintf("service bridge HTTP failed: %v", err), - Code: int32(remotepb.RpcError_UNKNOWN), - } - } - defer hresp.Body.Close() - hrespBody, err := ioutil.ReadAll(hresp.Body) - if hresp.StatusCode != 200 { - return nil, &CallError{ - Detail: fmt.Sprintf("service bridge returned HTTP %d (%q)", hresp.StatusCode, hrespBody), - Code: int32(remotepb.RpcError_UNKNOWN), - } - } - if err != nil { - return nil, &CallError{ - Detail: fmt.Sprintf("service bridge response bad: %v", err), - Code: int32(remotepb.RpcError_UNKNOWN), - } - } - return hrespBody, nil -} - -func Call(ctx netcontext.Context, service, method string, in, out proto.Message) error { - if ns := NamespaceFromContext(ctx); ns != "" { - if fn, ok := NamespaceMods[service]; ok { - fn(in, ns) - } - } - - if f, ctx, ok := callOverrideFromContext(ctx); ok { - return f(ctx, service, method, in, out) - } - - // Handle already-done contexts quickly. - select { - case <-ctx.Done(): - return ctx.Err() - default: - } - - c := fromContext(ctx) - if c == nil { - // Give a good error message rather than a panic lower down. - return errNotAppEngineContext - } - - // Apply transaction modifications if we're in a transaction. - if t := transactionFromContext(ctx); t != nil { - if t.finished { - return errors.New("transaction context has expired") - } - applyTransaction(in, &t.transaction) - } - - // Default RPC timeout is 60s. - timeout := 60 * time.Second - if deadline, ok := ctx.Deadline(); ok { - timeout = deadline.Sub(time.Now()) - } - - data, err := proto.Marshal(in) - if err != nil { - return err - } - - ticket := c.req.Header.Get(ticketHeader) - // Use a test ticket under test environment. - if ticket == "" { - if appid := ctx.Value(&appIDOverrideKey); appid != nil { - ticket = appid.(string) + defaultTicketSuffix - } - } - // Fall back to use background ticket when the request ticket is not available in Flex or dev_appserver. - if ticket == "" { - ticket = DefaultTicket() - } - req := &remotepb.Request{ - ServiceName: &service, - Method: &method, - Request: data, - RequestId: &ticket, - } - hreqBody, err := proto.Marshal(req) - if err != nil { - return err - } - - hrespBody, err := c.post(hreqBody, timeout) - if err != nil { - return err - } - - res := &remotepb.Response{} - if err := proto.Unmarshal(hrespBody, res); err != nil { - return err - } - if res.RpcError != nil { - ce := &CallError{ - Detail: res.RpcError.GetDetail(), - Code: *res.RpcError.Code, - } - switch remotepb.RpcError_ErrorCode(ce.Code) { - case remotepb.RpcError_CANCELLED, remotepb.RpcError_DEADLINE_EXCEEDED: - ce.Timeout = true - } - return ce - } - if res.ApplicationError != nil { - return &APIError{ - Service: *req.ServiceName, - Detail: res.ApplicationError.GetDetail(), - Code: *res.ApplicationError.Code, - } - } - if res.Exception != nil || res.JavaException != nil { - // This shouldn't happen, but let's be defensive. - return &CallError{ - Detail: "service bridge returned exception", - Code: int32(remotepb.RpcError_UNKNOWN), - } - } - return proto.Unmarshal(res.Response, out) -} - -func (c *context) Request() *http.Request { - return c.req -} - -func (c *context) addLogLine(ll *logpb.UserAppLogLine) { - // Truncate long log lines. - // TODO(dsymonds): Check if this is still necessary. - const lim = 8 << 10 - if len(*ll.Message) > lim { - suffix := fmt.Sprintf("...(length %d)", len(*ll.Message)) - ll.Message = proto.String((*ll.Message)[:lim-len(suffix)] + suffix) - } - - c.pendingLogs.Lock() - c.pendingLogs.lines = append(c.pendingLogs.lines, ll) - c.pendingLogs.Unlock() -} - -var logLevelName = map[int64]string{ - 0: "DEBUG", - 1: "INFO", - 2: "WARNING", - 3: "ERROR", - 4: "CRITICAL", -} - -func logf(c *context, level int64, format string, args ...interface{}) { - if c == nil { - panic("not an App Engine context") - } - s := fmt.Sprintf(format, args...) - s = strings.TrimRight(s, "\n") // Remove any trailing newline characters. - c.addLogLine(&logpb.UserAppLogLine{ - TimestampUsec: proto.Int64(time.Now().UnixNano() / 1e3), - Level: &level, - Message: &s, - }) - log.Print(logLevelName[level] + ": " + s) -} - -// flushLog attempts to flush any pending logs to the appserver. -// It should not be called concurrently. -func (c *context) flushLog(force bool) (flushed bool) { - c.pendingLogs.Lock() - // Grab up to 30 MB. We can get away with up to 32 MB, but let's be cautious. - n, rem := 0, 30<<20 - for ; n < len(c.pendingLogs.lines); n++ { - ll := c.pendingLogs.lines[n] - // Each log line will require about 3 bytes of overhead. - nb := proto.Size(ll) + 3 - if nb > rem { - break - } - rem -= nb - } - lines := c.pendingLogs.lines[:n] - c.pendingLogs.lines = c.pendingLogs.lines[n:] - c.pendingLogs.Unlock() - - if len(lines) == 0 && !force { - // Nothing to flush. - return false - } - - rescueLogs := false - defer func() { - if rescueLogs { - c.pendingLogs.Lock() - c.pendingLogs.lines = append(lines, c.pendingLogs.lines...) - c.pendingLogs.Unlock() - } - }() - - buf, err := proto.Marshal(&logpb.UserAppLogGroup{ - LogLine: lines, - }) - if err != nil { - log.Printf("internal.flushLog: marshaling UserAppLogGroup: %v", err) - rescueLogs = true - return false - } - - req := &logpb.FlushRequest{ - Logs: buf, - } - res := &basepb.VoidProto{} - c.pendingLogs.Lock() - c.pendingLogs.flushes++ - c.pendingLogs.Unlock() - if err := Call(toContext(c), "logservice", "Flush", req, res); err != nil { - log.Printf("internal.flushLog: Flush RPC: %v", err) - rescueLogs = true - return false - } - return true -} - -const ( - // Log flushing parameters. - flushInterval = 1 * time.Second - forceFlushInterval = 60 * time.Second -) - -func (c *context) logFlusher(stop <-chan int) { - lastFlush := time.Now() - tick := time.NewTicker(flushInterval) - for { - select { - case <-stop: - // Request finished. - tick.Stop() - return - case <-tick.C: - force := time.Now().Sub(lastFlush) > forceFlushInterval - if c.flushLog(force) { - lastFlush = time.Now() - } - } - } -} - -func ContextForTesting(req *http.Request) netcontext.Context { - return toContext(&context{req: req}) -} diff --git a/vendor/google.golang.org/appengine/internal/identity.go b/vendor/google.golang.org/appengine/internal/identity.go index d538701ab..6d89d6372 100644 --- a/vendor/google.golang.org/appengine/internal/identity.go +++ b/vendor/google.golang.org/appengine/internal/identity.go @@ -4,11 +4,46 @@ package internal -import netcontext "golang.org/x/net/context" +import ( + "os" -// These functions are implementations of the wrapper functions -// in ../appengine/identity.go. See that file for commentary. + netcontext "golang.org/x/net/context" +) +var ( + // This is set to true in identity_classic.go, which is behind the appengine build tag. + // The appengine build tag is set for the first generation runtimes (<= Go 1.9) but not + // the second generation runtimes (>= Go 1.11), so this indicates whether we're on a + // first-gen runtime. See IsStandard below for the second-gen check. + appengineStandard bool + + // This is set to true in identity_flex.go, which is behind the appenginevm build tag. + appengineFlex bool +) + +// AppID is the implementation of the wrapper function of the same name in +// ../identity.go. See that file for commentary. func AppID(c netcontext.Context) string { return appID(FullyQualifiedAppID(c)) } + +// IsStandard is the implementation of the wrapper function of the same name in +// ../appengine.go. See that file for commentary. +func IsStandard() bool { + // appengineStandard will be true for first-gen runtimes (<= Go 1.9) but not + // second-gen (>= Go 1.11). Second-gen runtimes set $GAE_ENV so we use that + // to check if we're on a second-gen runtime. + return appengineStandard || os.Getenv("GAE_ENV") == "standard" +} + +// IsFlex is the implementation of the wrapper function of the same name in +// ../appengine.go. See that file for commentary. +func IsFlex() bool { + return appengineFlex +} + +// IsAppEngine is the implementation of the wrapper function of the same name in +// ../appengine.go. See that file for commentary. +func IsAppEngine() bool { + return IsStandard() || IsFlex() +} diff --git a/vendor/google.golang.org/appengine/internal/identity_classic.go b/vendor/google.golang.org/appengine/internal/identity_classic.go index b59603f13..4e979f45e 100644 --- a/vendor/google.golang.org/appengine/internal/identity_classic.go +++ b/vendor/google.golang.org/appengine/internal/identity_classic.go @@ -12,6 +12,10 @@ import ( netcontext "golang.org/x/net/context" ) +func init() { + appengineStandard = true +} + func DefaultVersionHostname(ctx netcontext.Context) string { c := fromContext(ctx) if c == nil { diff --git a/vendor/google.golang.org/appengine/internal/identity_flex.go b/vendor/google.golang.org/appengine/internal/identity_flex.go new file mode 100644 index 000000000..d5e2e7b5e --- /dev/null +++ b/vendor/google.golang.org/appengine/internal/identity_flex.go @@ -0,0 +1,11 @@ +// Copyright 2018 Google LLC. All rights reserved. +// Use of this source code is governed by the Apache 2.0 +// license that can be found in the LICENSE file. + +// +build appenginevm + +package internal + +func init() { + appengineFlex = true +} diff --git a/vendor/google.golang.org/appengine/travis_install.sh b/vendor/google.golang.org/appengine/travis_install.sh new file mode 100755 index 000000000..785b62f46 --- /dev/null +++ b/vendor/google.golang.org/appengine/travis_install.sh @@ -0,0 +1,18 @@ +#!/bin/bash +set -e + +if [[ $GO111MODULE == "on" ]]; then + go get . +else + go get -u -v $(go list -f '{{join .Imports "\n"}}{{"\n"}}{{join .TestImports "\n"}}' ./... | sort | uniq | grep -v appengine) +fi + +if [[ $GOAPP == "true" ]]; then + mkdir /tmp/sdk + curl -o /tmp/sdk.zip "https://storage.googleapis.com/appengine-sdks/featured/go_appengine_sdk_linux_amd64-1.9.68.zip" + unzip -q /tmp/sdk.zip -d /tmp/sdk + # NOTE: Set the following env vars in the test script: + # export PATH="$PATH:/tmp/sdk/go_appengine" + # export APPENGINE_DEV_APPSERVER=/tmp/sdk/go_appengine/dev_appserver.py +fi + diff --git a/vendor/google.golang.org/appengine/travis_test.sh b/vendor/google.golang.org/appengine/travis_test.sh new file mode 100755 index 000000000..d4390f045 --- /dev/null +++ b/vendor/google.golang.org/appengine/travis_test.sh @@ -0,0 +1,12 @@ +#!/bin/bash +set -e + +go version +go test -v google.golang.org/appengine/... +go test -v -race google.golang.org/appengine/... +if [[ $GOAPP == "true" ]]; then + export PATH="$PATH:/tmp/sdk/go_appengine" + export APPENGINE_DEV_APPSERVER=/tmp/sdk/go_appengine/dev_appserver.py + goapp version + goapp test -v google.golang.org/appengine/... +fi diff --git a/vendor/google.golang.org/genproto/.travis.yml b/vendor/google.golang.org/genproto/.travis.yml index bbe8fb300..df73d9d5c 100644 --- a/vendor/google.golang.org/genproto/.travis.yml +++ b/vendor/google.golang.org/genproto/.travis.yml @@ -1,8 +1,8 @@ language: go go: - - 1.6 - - 1.7 - - 1.x + - 1.9.x + - 1.10.x + - 1.11.x go_import_path: google.golang.org/genproto script: diff --git a/vendor/google.golang.org/genproto/go.mod b/vendor/google.golang.org/genproto/go.mod new file mode 100644 index 000000000..d81a7c327 --- /dev/null +++ b/vendor/google.golang.org/genproto/go.mod @@ -0,0 +1,7 @@ +module google.golang.org/genproto + +require ( + github.com/golang/protobuf v1.2.0 + golang.org/x/net v0.0.0-20181106065722-10aee1819953 + google.golang.org/grpc v1.16.0 +) diff --git a/vendor/google.golang.org/genproto/go.sum b/vendor/google.golang.org/genproto/go.sum new file mode 100644 index 000000000..623d8f8f2 --- /dev/null +++ b/vendor/google.golang.org/genproto/go.sum @@ -0,0 +1,26 @@ +cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= +github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b h1:VKtxabqXZkF25pY9ekfRL6a582T4P37/31XEstQ5p58= +github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= +github.com/golang/lint v0.0.0-20180702182130-06c8688daad7/go.mod h1:tluoj9z5200jBnyusfRPU2LqT6J+DAorxEvtC7LHB+E= +github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/protobuf v1.2.0 h1:P3YflyNX/ehuJFLhxviNdFxQPkGK5cDcApsge1SqnvM= +github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= +golang.org/x/lint v0.0.0-20180702182130-06c8688daad7/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181106065722-10aee1819953 h1:LuZIitY8waaxUfNIdtajyE/YzA/zyf0YxXG27VpLrkg= +golang.org/x/net v0.0.0-20181106065722-10aee1819953/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= +golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f h1:wMNYb4v58l5UBM7MYRLPG6ZhfOqbKu7X5eyFl8ZhKvA= +golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sys v0.0.0-20180830151530-49385e6e1522 h1:Ve1ORMCxvRmSXBwJK+t3Oy+V2vRW2OetUQBq4rJIkZE= +golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/tools v0.0.0-20180828015842-6cd1fcedba52/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= +google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/grpc v1.16.0 h1:dz5IJGuC2BB7qXR5AyHNwAUBhZscK2xVez7mznh72sY= +google.golang.org/grpc v1.16.0/go.mod h1:0JHn/cJsOMiMfNA9+DeHDlAU7KAAB5GDlYFpa9MZMio= +honnef.co/go/tools v0.0.0-20180728063816-88497007e858/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= diff --git a/vendor/google.golang.org/grpc/.travis.yml b/vendor/google.golang.org/grpc/.travis.yml index fa501a990..a77558572 100644 --- a/vendor/google.golang.org/grpc/.travis.yml +++ b/vendor/google.golang.org/grpc/.travis.yml @@ -26,7 +26,8 @@ before_install: - if [[ "${TRAVIS_EVENT_TYPE}" != "cron" ]]; then VET_SKIP_PROTO=1; fi install: - - if [[ "${GO111MODULE}" = "on" ]]; then go mod download; else make testdeps; fi + - try3() { eval "$*" || eval "$*" || eval "$*"; } + - try3 'if [[ "${GO111MODULE}" = "on" ]]; then go mod download; else make testdeps; fi' - if [[ "${GAE}" = 1 ]]; then source ./install_gae.sh; make testappenginedeps; fi - if [[ "${VET}" = 1 ]]; then ./vet.sh -install; fi diff --git a/vendor/google.golang.org/grpc/Makefile b/vendor/google.golang.org/grpc/Makefile index eb2d2a7cf..41a754f97 100644 --- a/vendor/google.golang.org/grpc/Makefile +++ b/vendor/google.golang.org/grpc/Makefile @@ -17,10 +17,10 @@ proto: go generate google.golang.org/grpc/... test: testdeps - go test -cpu 1,4 -timeout 5m google.golang.org/grpc/... + go test -cpu 1,4 -timeout 7m google.golang.org/grpc/... testappengine: testappenginedeps - goapp test -cpu 1,4 -timeout 5m google.golang.org/grpc/... + goapp test -cpu 1,4 -timeout 7m google.golang.org/grpc/... testappenginedeps: goapp get -d -v -t -tags 'appengine appenginevm' google.golang.org/grpc/... diff --git a/vendor/google.golang.org/grpc/README.md b/vendor/google.golang.org/grpc/README.md index 789adfd65..29ffb00d3 100644 --- a/vendor/google.golang.org/grpc/README.md +++ b/vendor/google.golang.org/grpc/README.md @@ -43,3 +43,25 @@ Please update proto package, gRPC package and rebuild the proto files: - `go get -u github.com/golang/protobuf/{proto,protoc-gen-go}` - `go get -u google.golang.org/grpc` - `protoc --go_out=plugins=grpc:. *.proto` + +#### How to turn on logging + +The default logger is controlled by the environment variables. Turn everything +on by setting: + +``` +GRPC_GO_LOG_VERBOSITY_LEVEL=99 GRPC_GO_LOG_SEVERITY_LEVEL=info +``` + +#### The RPC failed with error `"code = Unavailable desc = transport is closing"` + +This error means the connection the RPC is using was closed, and there are many +possible reasons, including: + 1. mis-configured transport credentials, connection failed on handshaking + 1. bytes disrupted, possibly by a proxy in between + 1. server shutdown + +It can be tricky to debug this because the error happens on the client side but +the root cause of the connection being closed is on the server side. Turn on +logging on __both client and server__, and see if there are any transport +errors. diff --git a/vendor/google.golang.org/grpc/balancer/balancer.go b/vendor/google.golang.org/grpc/balancer/balancer.go index 069feb1e7..ee1703f03 100644 --- a/vendor/google.golang.org/grpc/balancer/balancer.go +++ b/vendor/google.golang.org/grpc/balancer/balancer.go @@ -28,6 +28,7 @@ import ( "golang.org/x/net/context" "google.golang.org/grpc/connectivity" "google.golang.org/grpc/credentials" + "google.golang.org/grpc/metadata" "google.golang.org/grpc/resolver" ) @@ -88,7 +89,12 @@ type SubConn interface { } // NewSubConnOptions contains options to create new SubConn. -type NewSubConnOptions struct{} +type NewSubConnOptions struct { + // CredsBundle is the credentials bundle that will be used in the created + // SubConn. If it's nil, the original creds from grpc DialOptions will be + // used. + CredsBundle credentials.Bundle +} // ClientConn represents a gRPC ClientConn. // @@ -125,6 +131,8 @@ type BuildOptions struct { // use to dial to a remote load balancer server. The Balancer implementations // can ignore this if it does not need to talk to another party securely. DialCreds credentials.TransportCredentials + // CredsBundle is the credentials bundle that the Balancer can use. + CredsBundle credentials.Bundle // Dialer is the custom dialer the Balancer implementation can use to dial // to a remote load balancer server. The Balancer implementations // can ignore this if it doesn't need to talk to remote balancer. @@ -147,12 +155,17 @@ type PickOptions struct { // FullMethodName is the method name that NewClientStream() is called // with. The canonical format is /service/Method. FullMethodName string + // Header contains the metadata from the RPC's client header. The metadata + // should not be modified; make a copy first if needed. + Header metadata.MD } // DoneInfo contains additional information for done. type DoneInfo struct { // Err is the rpc error the RPC finished with. It could be nil. Err error + // Trailer contains the metadata from the RPC's trailer, if present. + Trailer metadata.MD // BytesSent indicates if any bytes have been sent to the server. BytesSent bool // BytesReceived indicates if any byte has been received from the server. diff --git a/vendor/google.golang.org/grpc/balancer_conn_wrappers.go b/vendor/google.golang.org/grpc/balancer_conn_wrappers.go index c23f81706..1ab95fde2 100644 --- a/vendor/google.golang.org/grpc/balancer_conn_wrappers.go +++ b/vendor/google.golang.org/grpc/balancer_conn_wrappers.go @@ -197,7 +197,7 @@ func (ccb *ccBalancerWrapper) NewSubConn(addrs []resolver.Address, opts balancer if ccb.subConns == nil { return nil, fmt.Errorf("grpc: ClientConn balancer wrapper was closed") } - ac, err := ccb.cc.newAddrConn(addrs) + ac, err := ccb.cc.newAddrConn(addrs, opts) if err != nil { return nil, err } @@ -257,6 +257,7 @@ func (acbw *acBalancerWrapper) UpdateAddresses(addrs []resolver.Address) { } if !acbw.ac.tryUpdateAddrs(addrs) { cc := acbw.ac.cc + opts := acbw.ac.scopts acbw.ac.mu.Lock() // Set old ac.acbw to nil so the Shutdown state update will be ignored // by balancer. @@ -272,7 +273,7 @@ func (acbw *acBalancerWrapper) UpdateAddresses(addrs []resolver.Address) { return } - ac, err := cc.newAddrConn(addrs) + ac, err := cc.newAddrConn(addrs, opts) if err != nil { grpclog.Warningf("acBalancerWrapper: UpdateAddresses: failed to newAddrConn: %v", err) return diff --git a/vendor/google.golang.org/grpc/clientconn.go b/vendor/google.golang.org/grpc/clientconn.go index 318ac4073..f49ac3f9b 100644 --- a/vendor/google.golang.org/grpc/clientconn.go +++ b/vendor/google.golang.org/grpc/clientconn.go @@ -30,7 +30,6 @@ import ( "time" "golang.org/x/net/context" - "golang.org/x/net/trace" "google.golang.org/grpc/balancer" _ "google.golang.org/grpc/balancer/roundrobin" // To register roundrobin. "google.golang.org/grpc/codes" @@ -41,6 +40,7 @@ import ( "google.golang.org/grpc/internal/channelz" "google.golang.org/grpc/internal/transport" "google.golang.org/grpc/keepalive" + "google.golang.org/grpc/metadata" "google.golang.org/grpc/resolver" _ "google.golang.org/grpc/resolver/dns" // To register dns resolver. _ "google.golang.org/grpc/resolver/passthrough" // To register passthrough resolver. @@ -80,6 +80,9 @@ var ( // being set for ClientConn. Users should either set one or explicitly // call WithInsecure DialOption to disable security. errNoTransportSecurity = errors.New("grpc: no transport security set (use grpc.WithInsecure() explicitly or set credentials)") + // errTransportCredsAndBundle indicates that creds bundle is used together + // with other individual Transport Credentials. + errTransportCredsAndBundle = errors.New("grpc: credentials.Bundle may not be used with individual TransportCredentials") // errTransportCredentialsMissing indicates that users want to transmit security // information (e.g., oauth2 token) which requires secure connection on an insecure // connection. @@ -137,17 +140,33 @@ func DialContext(ctx context.Context, target string, opts ...DialOption) (conn * if channelz.IsOn() { if cc.dopts.channelzParentID != 0 { cc.channelzID = channelz.RegisterChannel(&channelzChannel{cc}, cc.dopts.channelzParentID, target) + channelz.AddTraceEvent(cc.channelzID, &channelz.TraceEventDesc{ + Desc: "Channel Created", + Severity: channelz.CtINFO, + Parent: &channelz.TraceEventDesc{ + Desc: fmt.Sprintf("Nested Channel(id:%d) created", cc.channelzID), + Severity: channelz.CtINFO, + }, + }) } else { cc.channelzID = channelz.RegisterChannel(&channelzChannel{cc}, 0, target) + channelz.AddTraceEvent(cc.channelzID, &channelz.TraceEventDesc{ + Desc: "Channel Created", + Severity: channelz.CtINFO, + }) } + cc.csMgr.channelzID = cc.channelzID } if !cc.dopts.insecure { - if cc.dopts.copts.TransportCredentials == nil { + if cc.dopts.copts.TransportCredentials == nil && cc.dopts.copts.CredsBundle == nil { return nil, errNoTransportSecurity } + if cc.dopts.copts.TransportCredentials != nil && cc.dopts.copts.CredsBundle != nil { + return nil, errTransportCredsAndBundle + } } else { - if cc.dopts.copts.TransportCredentials != nil { + if cc.dopts.copts.TransportCredentials != nil || cc.dopts.copts.CredsBundle != nil { return nil, errCredentialsConflict } for _, cd := range cc.dopts.copts.PerRPCCredentials { @@ -260,6 +279,7 @@ func DialContext(ctx context.Context, target string, opts ...DialOption) (conn * } cc.balancerBuildOpts = balancer.BuildOptions{ DialCreds: credsClone, + CredsBundle: cc.dopts.copts.CredsBundle, Dialer: cc.dopts.copts.Dialer, ChannelzParentID: cc.channelzID, } @@ -308,6 +328,7 @@ type connectivityStateManager struct { mu sync.Mutex state connectivity.State notifyChan chan struct{} + channelzID int64 } // updateState updates the connectivity.State of ClientConn. @@ -323,6 +344,12 @@ func (csm *connectivityStateManager) updateState(state connectivity.State) { return } csm.state = state + if channelz.IsOn() { + channelz.AddTraceEvent(csm.channelzID, &channelz.TraceEventDesc{ + Desc: fmt.Sprintf("Channel Connectivity change to %v", state), + Severity: channelz.CtINFO, + }) + } if csm.notifyChan != nil { // There are other goroutines waiting on this channel. close(csm.notifyChan) @@ -500,10 +527,26 @@ func (cc *ClientConn) switchBalancer(name string) { } builder := balancer.Get(name) + // TODO(yuxuanli): If user send a service config that does not contain a valid balancer name, should + // we reuse previous one? + if channelz.IsOn() { + if builder == nil { + channelz.AddTraceEvent(cc.channelzID, &channelz.TraceEventDesc{ + Desc: fmt.Sprintf("Channel switches to new LB policy %q due to fallback from invalid balancer name", PickFirstBalancerName), + Severity: channelz.CtWarning, + }) + } else { + channelz.AddTraceEvent(cc.channelzID, &channelz.TraceEventDesc{ + Desc: fmt.Sprintf("Channel switches to new LB policy %q", name), + Severity: channelz.CtINFO, + }) + } + } if builder == nil { grpclog.Infof("failed to get balancer builder for: %v, using pick_first instead", name) builder = newPickfirstBuilder() } + cc.preBalancerName = cc.curBalancerName cc.curBalancerName = builder.Name() cc.balancerWrapper = newCCBalancerWrapper(cc, builder, cc.balancerBuildOpts) @@ -524,13 +567,15 @@ func (cc *ClientConn) handleSubConnStateChange(sc balancer.SubConn, s connectivi // newAddrConn creates an addrConn for addrs and adds it to cc.conns. // // Caller needs to make sure len(addrs) > 0. -func (cc *ClientConn) newAddrConn(addrs []resolver.Address) (*addrConn, error) { +func (cc *ClientConn) newAddrConn(addrs []resolver.Address, opts balancer.NewSubConnOptions) (*addrConn, error) { ac := &addrConn{ - cc: cc, - addrs: addrs, - dopts: cc.dopts, - czData: new(channelzData), - resetBackoff: make(chan struct{}), + cc: cc, + addrs: addrs, + scopts: opts, + dopts: cc.dopts, + czData: new(channelzData), + successfulHandshake: true, // make the first nextAddr() call _not_ move addrIdx up by 1 + resetBackoff: make(chan struct{}), } ac.ctx, ac.cancel = context.WithCancel(cc.ctx) // Track ac in cc. This needs to be done before any getTransport(...) is called. @@ -541,6 +586,14 @@ func (cc *ClientConn) newAddrConn(addrs []resolver.Address) (*addrConn, error) { } if channelz.IsOn() { ac.channelzID = channelz.RegisterSubChannel(ac, cc.channelzID, "") + channelz.AddTraceEvent(ac.channelzID, &channelz.TraceEventDesc{ + Desc: "Subchannel Created", + Severity: channelz.CtINFO, + Parent: &channelz.TraceEventDesc{ + Desc: fmt.Sprintf("Subchannel(id:%d) created", ac.channelzID), + Severity: channelz.CtINFO, + }, + }) } cc.conns[ac] = struct{}{} cc.mu.Unlock() @@ -590,11 +643,9 @@ func (cc *ClientConn) incrCallsFailed() { atomic.AddInt64(&cc.czData.callsFailed, 1) } -// connect starts to creating transport and also starts the transport monitor -// goroutine for this ac. +// connect starts creating a transport. // It does nothing if the ac is not IDLE. // TODO(bar) Move this to the addrConn section. -// This was part of resetAddrConn, keep it here to make the diff look clean. func (ac *addrConn) connect() error { ac.mu.Lock() if ac.state == connectivity.Shutdown { @@ -605,22 +656,12 @@ func (ac *addrConn) connect() error { ac.mu.Unlock() return nil } - ac.state = connectivity.Connecting + ac.updateConnectivityState(connectivity.Connecting) ac.cc.handleSubConnStateChange(ac.acbw, ac.state) ac.mu.Unlock() // Start a goroutine connecting to the server asynchronously. - go func() { - if err := ac.resetTransport(); err != nil { - grpclog.Warningf("Failed to dial %s: %v; please retry.", ac.addrs[0].Addr, err) - if err != errConnClosing { - // Keep this ac in cc.conns, to get the reason it's torn down. - ac.tearDown(err) - } - return - } - ac.transportMonitor() - }() + go ac.resetTransport(false) return nil } @@ -649,7 +690,7 @@ func (ac *addrConn) tryUpdateAddrs(addrs []resolver.Address) bool { grpclog.Infof("addrConn: tryUpdateAddrs curAddrFound: %v", curAddrFound) if curAddrFound { ac.addrs = addrs - ac.reconnectIdx = 0 // Start reconnecting from beginning in the new list. + ac.addrIdx = 0 // Start reconnecting from beginning in the new list. } return curAddrFound @@ -675,8 +716,10 @@ func (cc *ClientConn) GetMethodConfig(method string) MethodConfig { } func (cc *ClientConn) getTransport(ctx context.Context, failfast bool, method string) (transport.ClientTransport, func(balancer.DoneInfo), error) { + hdr, _ := metadata.FromOutgoingContext(ctx) t, done, err := cc.blockingpicker.pick(ctx, failfast, balancer.PickOptions{ FullMethodName: method, + Header: hdr, }) if err != nil { return nil, nil, toRPCErr(err) @@ -690,11 +733,29 @@ func (cc *ClientConn) handleServiceConfig(js string) error { if cc.dopts.disableServiceConfig { return nil } + if cc.scRaw == js { + return nil + } + if channelz.IsOn() { + channelz.AddTraceEvent(cc.channelzID, &channelz.TraceEventDesc{ + // The special formatting of \"%s\" instead of %q is to provide nice printing of service config + // for human consumption. + Desc: fmt.Sprintf("Channel has a new service config \"%s\"", js), + Severity: channelz.CtINFO, + }) + } sc, err := parseServiceConfig(js) if err != nil { return err } cc.mu.Lock() + // Check if the ClientConn is already closed. Some fields (e.g. + // balancerWrapper) are set to nil when closing the ClientConn, and could + // cause nil pointer panic if we don't have this check. + if cc.conns == nil { + cc.mu.Unlock() + return nil + } cc.scRaw = js cc.sc = sc @@ -788,6 +849,19 @@ func (cc *ClientConn) Close() error { ac.tearDown(ErrClientConnClosing) } if channelz.IsOn() { + ted := &channelz.TraceEventDesc{ + Desc: "Channel Deleted", + Severity: channelz.CtINFO, + } + if cc.dopts.channelzParentID != 0 { + ted.Parent = &channelz.TraceEventDesc{ + Desc: fmt.Sprintf("Nested channel(id:%d) deleted", cc.channelzID), + Severity: channelz.CtINFO, + } + } + channelz.AddTraceEvent(cc.channelzID, ted) + // TraceEvent needs to be called before RemoveEntry, as TraceEvent may add trace reference to + // the entity beng deleted, and thus prevent it from being deleted right away. channelz.RemoveEntry(cc.channelzID) } return nil @@ -799,26 +873,25 @@ type addrConn struct { cancel context.CancelFunc cc *ClientConn - addrs []resolver.Address dopts dialOptions - events trace.EventLog acbw balancer.SubConn + scopts balancer.NewSubConnOptions - mu sync.Mutex - curAddr resolver.Address - reconnectIdx int // The index in addrs list to start reconnecting from. - state connectivity.State - // ready is closed and becomes nil when a new transport is up or failed - // due to timeout. - ready chan struct{} - transport transport.ClientTransport + transport transport.ClientTransport // The current transport. - // The reason this addrConn is torn down. - tearDownErr error + mu sync.Mutex + addrIdx int // The index in addrs list to start reconnecting from. + curAddr resolver.Address // The current address. + addrs []resolver.Address // All addresses that the resolver resolved to. - connectRetryNum int + // Use updateConnectivityState for updating addrConn's connectivity state. + state connectivity.State + + tearDownErr error // The reason this addrConn is torn down. + + backoffIdx int // backoffDeadline is the time until which resetTransport needs to - // wait before increasing connectRetryNum count. + // wait before increasing backoffIdx count. backoffDeadline time.Time // connectDeadline is the time by which all connection // negotiations must complete. @@ -828,6 +901,19 @@ type addrConn struct { channelzID int64 // channelz unique identification number czData *channelzData + + successfulHandshake bool +} + +// Note: this requires a lock on ac.mu. +func (ac *addrConn) updateConnectivityState(s connectivity.State) { + ac.state = s + if channelz.IsOn() { + channelz.AddTraceEvent(ac.channelzID, &channelz.TraceEventDesc{ + Desc: fmt.Sprintf("Subchannel Connectivity change to %v", s), + Severity: channelz.CtINFO, + }) + } } // adjustParams updates parameters used to create transports upon @@ -844,301 +930,328 @@ func (ac *addrConn) adjustParams(r transport.GoAwayReason) { } } -// printf records an event in ac's event log, unless ac has been closed. -// REQUIRES ac.mu is held. -func (ac *addrConn) printf(format string, a ...interface{}) { - if ac.events != nil { - ac.events.Printf(format, a...) - } -} - -// resetTransport recreates a transport to the address for ac. The old -// transport will close itself on error or when the clientconn is closed. -// The created transport must receive initial settings frame from the server. -// In case that doesn't happen, transportMonitor will kill the newly created -// transport after connectDeadline has expired. -// In case there was an error on the transport before the settings frame was -// received, resetTransport resumes connecting to backends after the one that -// was previously connected to. In case end of the list is reached, resetTransport -// backs off until the original deadline. -// If the DialOption WithWaitForHandshake was set, resetTrasport returns -// successfully only after server settings are received. +// resetTransport makes sure that a healthy ac.transport exists. // -// TODO(bar) make sure all state transitions are valid. -func (ac *addrConn) resetTransport() error { - ac.mu.Lock() - if ac.state == connectivity.Shutdown { - ac.mu.Unlock() - return errConnClosing - } - if ac.ready != nil { - close(ac.ready) - ac.ready = nil - } - ac.transport = nil - ridx := ac.reconnectIdx - ac.mu.Unlock() - ac.cc.mu.RLock() - ac.dopts.copts.KeepaliveParams = ac.cc.mkp - ac.cc.mu.RUnlock() - var backoffDeadline, connectDeadline time.Time - var resetBackoff chan struct{} - for connectRetryNum := 0; ; connectRetryNum++ { - ac.mu.Lock() - if ac.backoffDeadline.IsZero() { - // This means either a successful HTTP2 connection was established - // or this is the first time this addrConn is trying to establish a - // connection. - backoffFor := ac.dopts.bs.Backoff(connectRetryNum) // time.Duration. - resetBackoff = ac.resetBackoff - // This will be the duration that dial gets to finish. - dialDuration := getMinConnectTimeout() - if backoffFor > dialDuration { - // Give dial more time as we keep failing to connect. - dialDuration = backoffFor - } - start := time.Now() - backoffDeadline = start.Add(backoffFor) - connectDeadline = start.Add(dialDuration) - ridx = 0 // Start connecting from the beginning. - } else { - // Continue trying to connect with the same deadlines. - connectRetryNum = ac.connectRetryNum - backoffDeadline = ac.backoffDeadline - connectDeadline = ac.connectDeadline - ac.backoffDeadline = time.Time{} - ac.connectDeadline = time.Time{} - ac.connectRetryNum = 0 +// The transport will close itself when it encounters an error, or on GOAWAY, or on deadline waiting for handshake, or +// when the clientconn is closed. Each iteration creating a new transport will try a different address that the balancer +// assigned to the addrConn, until it has tried all addresses. Once it has tried all addresses, it will re-resolve to +// get a new address list. If an error is received, the list is re-resolved and the next reset attempt will try from the +// beginning. This method has backoff built in. The backoff amount starts at 0 and increases each time resolution occurs +// (addresses are exhausted). The backoff amount is reset to 0 each time a handshake is received. +// +// If the DialOption WithWaitForHandshake was set, resetTransport returns successfully only after handshake is received. +func (ac *addrConn) resetTransport(resolveNow bool) { + for { + // If this is the first in a line of resets, we want to resolve immediately. The only other time we + // want to reset is if we have tried all the addresses handed to us. + if resolveNow { + ac.mu.Lock() + ac.cc.resolveNow(resolver.ResolveNowOption{}) + ac.mu.Unlock() } + + ac.mu.Lock() if ac.state == connectivity.Shutdown { ac.mu.Unlock() - return errConnClosing + return } - ac.printf("connecting") - if ac.state != connectivity.Connecting { - ac.state = connectivity.Connecting + + // If the connection is READY, a failure must have occurred. + // Otherwise, we'll consider this is a transient failure when: + // We've exhausted all addresses + // We're in CONNECTING + // And it's not the very first addr to try TODO(deklerk) find a better way to do this than checking ac.successfulHandshake + if ac.state == connectivity.Ready || (ac.addrIdx == len(ac.addrs)-1 && ac.state == connectivity.Connecting && !ac.successfulHandshake) { + ac.updateConnectivityState(connectivity.TransientFailure) ac.cc.handleSubConnStateChange(ac.acbw, ac.state) } - // copy ac.addrs in case of race - addrsIter := make([]resolver.Address, len(ac.addrs)) - copy(addrsIter, ac.addrs) - copts := ac.dopts.copts + ac.transport = nil ac.mu.Unlock() - connected, err := ac.createTransport(connectRetryNum, ridx, backoffDeadline, connectDeadline, addrsIter, copts, resetBackoff) - if err != nil { - return err + + if err := ac.nextAddr(); err != nil { + return } - if connected { - return nil + + ac.mu.Lock() + if ac.state == connectivity.Shutdown { + ac.mu.Unlock() + return } + + backoffIdx := ac.backoffIdx + backoffFor := ac.dopts.bs.Backoff(backoffIdx) + + // This will be the duration that dial gets to finish. + dialDuration := getMinConnectTimeout() + if backoffFor > dialDuration { + // Give dial more time as we keep failing to connect. + dialDuration = backoffFor + } + start := time.Now() + connectDeadline := start.Add(dialDuration) + ac.backoffDeadline = start.Add(backoffFor) + ac.connectDeadline = connectDeadline + + ac.mu.Unlock() + + ac.cc.mu.RLock() + ac.dopts.copts.KeepaliveParams = ac.cc.mkp + ac.cc.mu.RUnlock() + + ac.mu.Lock() + + if ac.state == connectivity.Shutdown { + ac.mu.Unlock() + return + } + + if ac.state != connectivity.Connecting { + ac.updateConnectivityState(connectivity.Connecting) + ac.cc.handleSubConnStateChange(ac.acbw, ac.state) + } + + addr := ac.addrs[ac.addrIdx] + copts := ac.dopts.copts + if ac.scopts.CredsBundle != nil { + copts.CredsBundle = ac.scopts.CredsBundle + } + ac.mu.Unlock() + + if channelz.IsOn() { + channelz.AddTraceEvent(ac.channelzID, &channelz.TraceEventDesc{ + Desc: fmt.Sprintf("Subchannel picks a new address %q to connect", addr.Addr), + Severity: channelz.CtINFO, + }) + } + + if err := ac.createTransport(backoffIdx, addr, copts, connectDeadline); err != nil { + continue + } + + return } } // createTransport creates a connection to one of the backends in addrs. -// It returns true if a connection was established. -func (ac *addrConn) createTransport(connectRetryNum, ridx int, backoffDeadline, connectDeadline time.Time, addrs []resolver.Address, copts transport.ConnectOptions, resetBackoff chan struct{}) (bool, error) { - for i := ridx; i < len(addrs); i++ { - addr := addrs[i] - target := transport.TargetInfo{ - Addr: addr.Addr, - Metadata: addr.Metadata, - Authority: ac.cc.authority, +func (ac *addrConn) createTransport(backoffNum int, addr resolver.Address, copts transport.ConnectOptions, connectDeadline time.Time) error { + oneReset := sync.Once{} + skipReset := make(chan struct{}) + allowedToReset := make(chan struct{}) + prefaceReceived := make(chan struct{}) + onCloseCalled := make(chan struct{}) + + var prefaceMu sync.Mutex + var serverPrefaceReceived bool + var clientPrefaceWrote bool + + onGoAway := func(r transport.GoAwayReason) { + ac.mu.Lock() + ac.adjustParams(r) + ac.mu.Unlock() + select { + case <-skipReset: // The outer resetTransport loop will handle reconnection. + return + case <-allowedToReset: // We're in the clear to reset. + go oneReset.Do(func() { ac.resetTransport(false) }) } - done := make(chan struct{}) - onPrefaceReceipt := func() { - ac.mu.Lock() - close(done) - if !ac.backoffDeadline.IsZero() { - // If we haven't already started reconnecting to - // other backends. - // Note, this can happen when writer notices an error - // and triggers resetTransport while at the same time - // reader receives the preface and invokes this closure. - ac.backoffDeadline = time.Time{} - ac.connectDeadline = time.Time{} - ac.connectRetryNum = 0 - } - ac.mu.Unlock() + } + + prefaceTimer := time.NewTimer(connectDeadline.Sub(time.Now())) + + onClose := func() { + close(onCloseCalled) + prefaceTimer.Stop() + + select { + case <-skipReset: // The outer resetTransport loop will handle reconnection. + return + case <-allowedToReset: // We're in the clear to reset. + oneReset.Do(func() { ac.resetTransport(false) }) } - // Do not cancel in the success path because of - // this issue in Go1.6: https://github.com/golang/go/issues/15078. - connectCtx, cancel := context.WithDeadline(ac.ctx, connectDeadline) - if channelz.IsOn() { - copts.ChannelzParentID = ac.channelzID + } + + target := transport.TargetInfo{ + Addr: addr.Addr, + Metadata: addr.Metadata, + Authority: ac.cc.authority, + } + + onPrefaceReceipt := func() { + close(prefaceReceived) + prefaceTimer.Stop() + + // TODO(deklerk): optimization; does anyone else actually use this lock? maybe we can just remove it for this scope + ac.mu.Lock() + + prefaceMu.Lock() + serverPrefaceReceived = true + if clientPrefaceWrote { + ac.successfulHandshake = true + ac.backoffDeadline = time.Time{} + ac.connectDeadline = time.Time{} + ac.addrIdx = 0 + ac.backoffIdx = 0 } - newTr, err := transport.NewClientTransport(connectCtx, ac.cc.ctx, target, copts, onPrefaceReceipt) - if err != nil { - cancel() - ac.cc.blockingpicker.updateConnectionError(err) - ac.mu.Lock() - if ac.state == connectivity.Shutdown { - // ac.tearDown(...) has been invoked. - ac.mu.Unlock() - return false, errConnClosing - } - ac.mu.Unlock() - grpclog.Warningf("grpc: addrConn.createTransport failed to connect to %v. Err :%v. Reconnecting...", addr, err) - continue + prefaceMu.Unlock() + + ac.mu.Unlock() + } + + // Do not cancel in the success path because of this issue in Go1.6: https://github.com/golang/go/issues/15078. + connectCtx, cancel := context.WithDeadline(ac.ctx, connectDeadline) + if channelz.IsOn() { + copts.ChannelzParentID = ac.channelzID + } + + newTr, err := transport.NewClientTransport(connectCtx, ac.cc.ctx, target, copts, onPrefaceReceipt, onGoAway, onClose) + + if err == nil { + prefaceMu.Lock() + clientPrefaceWrote = true + if serverPrefaceReceived { + ac.successfulHandshake = true } + prefaceMu.Unlock() + if ac.dopts.waitForHandshake { select { - case <-done: - case <-connectCtx.Done(): - // Didn't receive server preface, must kill this new transport now. - grpclog.Warningf("grpc: addrConn.createTransport failed to receive server preface before deadline.") + case <-prefaceTimer.C: + // We didn't get the preface in time. newTr.Close() - continue - case <-ac.ctx.Done(): + err = errors.New("timed out waiting for server handshake") + case <-prefaceReceived: + // We got the preface - huzzah! things are good. + case <-onCloseCalled: + // The transport has already closed - noop. + close(allowedToReset) + return nil } + } else { + go func() { + select { + case <-prefaceTimer.C: + // We didn't get the preface in time. + newTr.Close() + case <-prefaceReceived: + // We got the preface just in the nick of time - huzzah! + case <-onCloseCalled: + // The transport has already closed - noop. + } + }() } + } + + if err != nil { + // newTr is either nil, or closed. + cancel() + ac.cc.blockingpicker.updateConnectionError(err) ac.mu.Lock() if ac.state == connectivity.Shutdown { + // ac.tearDown(...) has been invoked. ac.mu.Unlock() - // ac.tearDonn(...) has been invoked. - newTr.Close() - return false, errConnClosing - } - ac.printf("ready") - ac.state = connectivity.Ready - ac.cc.handleSubConnStateChange(ac.acbw, ac.state) - ac.transport = newTr - ac.curAddr = addr - if ac.ready != nil { - close(ac.ready) - ac.ready = nil - } - select { - case <-done: - // If the server has responded back with preface already, - // don't set the reconnect parameters. - default: - ac.connectRetryNum = connectRetryNum - ac.backoffDeadline = backoffDeadline - ac.connectDeadline = connectDeadline - ac.reconnectIdx = i + 1 // Start reconnecting from the next backend in the list. + + // We don't want to reset during this close because we prefer to kick out of this function and let the loop + // in resetTransport take care of reconnecting. + close(skipReset) + + return errConnClosing } ac.mu.Unlock() - return true, nil + grpclog.Warningf("grpc: addrConn.createTransport failed to connect to %v. Err :%v. Reconnecting...", addr, err) + + // We don't want to reset during this close because we prefer to kick out of this function and let the loop + // in resetTransport take care of reconnecting. + close(skipReset) + + return err } + ac.mu.Lock() + if ac.state == connectivity.Shutdown { ac.mu.Unlock() - return false, errConnClosing + + // We don't want to reset during this close because we prefer to kick out of this function and let the loop + // in resetTransport take care of reconnecting. + close(skipReset) + + newTr.Close() + return errConnClosing } - ac.state = connectivity.TransientFailure + + ac.updateConnectivityState(connectivity.Ready) ac.cc.handleSubConnStateChange(ac.acbw, ac.state) - ac.cc.resolveNow(resolver.ResolveNowOption{}) - if ac.ready != nil { - close(ac.ready) - ac.ready = nil + ac.transport = newTr + ac.curAddr = addr + + ac.mu.Unlock() + + // Ok, _now_ we will finally let the transport reset if it encounters a closable error. Without this, the reader + // goroutine failing races with all the code in this method that sets the connection to "ready". + close(allowedToReset) + return nil +} + +// nextAddr increments the addrIdx if there are more addresses to try. If +// there are no more addrs to try it will re-resolve, set addrIdx to 0, and +// increment the backoffIdx. +// +// nextAddr must be called without ac.mu being held. +func (ac *addrConn) nextAddr() error { + ac.mu.Lock() + + // If a handshake has been observed, we expect the counters to have manually + // been reset so we'll just return, since we want the next usage to start + // at index 0. + if ac.successfulHandshake { + ac.successfulHandshake = false + ac.mu.Unlock() + return nil } + + if ac.addrIdx < len(ac.addrs)-1 { + ac.addrIdx++ + ac.mu.Unlock() + return nil + } + + ac.addrIdx = 0 + ac.backoffIdx++ + + if ac.state == connectivity.Shutdown { + ac.mu.Unlock() + return errConnClosing + } + ac.cc.resolveNow(resolver.ResolveNowOption{}) + backoffDeadline := ac.backoffDeadline + b := ac.resetBackoff ac.mu.Unlock() timer := time.NewTimer(backoffDeadline.Sub(time.Now())) select { case <-timer.C: - case <-resetBackoff: + case <-b: timer.Stop() case <-ac.ctx.Done(): timer.Stop() - return false, ac.ctx.Err() + return ac.ctx.Err() } - return false, nil + return nil } func (ac *addrConn) resetConnectBackoff() { ac.mu.Lock() close(ac.resetBackoff) + ac.backoffIdx = 0 ac.resetBackoff = make(chan struct{}) - ac.connectRetryNum = 0 ac.mu.Unlock() } -// Run in a goroutine to track the error in transport and create the -// new transport if an error happens. It returns when the channel is closing. -func (ac *addrConn) transportMonitor() { - for { - var timer *time.Timer - var cdeadline <-chan time.Time - ac.mu.Lock() - t := ac.transport - if !ac.connectDeadline.IsZero() { - timer = time.NewTimer(ac.connectDeadline.Sub(time.Now())) - cdeadline = timer.C - } - ac.mu.Unlock() - // Block until we receive a goaway or an error occurs. - select { - case <-t.GoAway(): - done := t.Error() - cleanup := t.Close - // Since this transport will be orphaned (won't have a transportMonitor) - // we need to launch a goroutine to keep track of clientConn.Close() - // happening since it might not be noticed by any other goroutine for a while. - go func() { - <-done - cleanup() - }() - case <-t.Error(): - // In case this is triggered because clientConn.Close() - // was called, we want to immeditately close the transport - // since no other goroutine might notice it for a while. - t.Close() - case <-cdeadline: - ac.mu.Lock() - // This implies that client received server preface. - if ac.backoffDeadline.IsZero() { - ac.mu.Unlock() - continue - } - ac.mu.Unlock() - timer = nil - // No server preface received until deadline. - // Kill the connection. - grpclog.Warningf("grpc: addrConn.transportMonitor didn't get server preface after waiting. Closing the new transport now.") - t.Close() - } - if timer != nil { - timer.Stop() - } - // If a GoAway happened, regardless of error, adjust our keepalive - // parameters as appropriate. - select { - case <-t.GoAway(): - ac.adjustParams(t.GetGoAwayReason()) - default: - } - ac.mu.Lock() - if ac.state == connectivity.Shutdown { - ac.mu.Unlock() - return - } - // Set connectivity state to TransientFailure before calling - // resetTransport. Transition READY->CONNECTING is not valid. - ac.state = connectivity.TransientFailure - ac.cc.handleSubConnStateChange(ac.acbw, ac.state) - ac.cc.resolveNow(resolver.ResolveNowOption{}) - ac.curAddr = resolver.Address{} - ac.mu.Unlock() - if err := ac.resetTransport(); err != nil { - ac.mu.Lock() - ac.printf("transport exiting: %v", err) - ac.mu.Unlock() - grpclog.Warningf("grpc: addrConn.transportMonitor exits due to: %v", err) - if err != errConnClosing { - // Keep this ac in cc.conns, to get the reason it's torn down. - ac.tearDown(err) - } - return - } - } -} - // getReadyTransport returns the transport if ac's state is READY. // Otherwise it returns nil, false. // If ac's state is IDLE, it will trigger ac to connect. func (ac *addrConn) getReadyTransport() (transport.ClientTransport, bool) { ac.mu.Lock() - if ac.state == connectivity.Ready { + if ac.state == connectivity.Ready && ac.transport != nil { t := ac.transport ac.mu.Unlock() return t, true @@ -1161,34 +1274,42 @@ func (ac *addrConn) getReadyTransport() (transport.ClientTransport, bool) { // tight loop. // tearDown doesn't remove ac from ac.cc.conns. func (ac *addrConn) tearDown(err error) { - ac.cancel() ac.mu.Lock() - defer ac.mu.Unlock() if ac.state == connectivity.Shutdown { + ac.mu.Unlock() return } + // We have to set the state to Shutdown before anything else to prevent races + // between setting the state and logic that waits on context cancelation / etc. + ac.updateConnectivityState(connectivity.Shutdown) + ac.cancel() + ac.tearDownErr = err + ac.cc.handleSubConnStateChange(ac.acbw, ac.state) ac.curAddr = resolver.Address{} if err == errConnDrain && ac.transport != nil { // GracefulClose(...) may be executed multiple times when // i) receiving multiple GoAway frames from the server; or // ii) there are concurrent name resolver/Balancer triggered // address removal and GoAway. + // We have to unlock and re-lock here because GracefulClose => Close => onClose, which requires locking ac.mu. + ac.mu.Unlock() ac.transport.GracefulClose() - } - ac.state = connectivity.Shutdown - ac.tearDownErr = err - ac.cc.handleSubConnStateChange(ac.acbw, ac.state) - if ac.events != nil { - ac.events.Finish() - ac.events = nil - } - if ac.ready != nil { - close(ac.ready) - ac.ready = nil + ac.mu.Lock() } if channelz.IsOn() { + channelz.AddTraceEvent(ac.channelzID, &channelz.TraceEventDesc{ + Desc: "Subchannel Deleted", + Severity: channelz.CtINFO, + Parent: &channelz.TraceEventDesc{ + Desc: fmt.Sprintf("Subchanel(id:%d) deleted", ac.channelzID), + Severity: channelz.CtINFO, + }, + }) + // TraceEvent needs to be called before RemoveEntry, as TraceEvent may add trace reference to + // the entity beng deleted, and thus prevent it from being deleted right away. channelz.RemoveEntry(ac.channelzID) } + ac.mu.Unlock() } func (ac *addrConn) getState() connectivity.State { diff --git a/vendor/google.golang.org/grpc/credentials/credentials.go b/vendor/google.golang.org/grpc/credentials/credentials.go index 1dae57ab1..6c2b811fd 100644 --- a/vendor/google.golang.org/grpc/credentials/credentials.go +++ b/vendor/google.golang.org/grpc/credentials/credentials.go @@ -108,6 +108,25 @@ type TransportCredentials interface { OverrideServerName(string) error } +// Bundle is a combination of TransportCredentials and PerRPCCredentials. +// +// It also contains a mode switching method, so it can be used as a combination +// of different credential policies. +// +// Bundle cannot be used together with individual TransportCredentials. +// PerRPCCredentials from Bundle will be appended to other PerRPCCredentials. +// +// This API is experimental. +type Bundle interface { + TransportCredentials() TransportCredentials + PerRPCCredentials() PerRPCCredentials + // NewWithMode should make a copy of Bundle, and switch mode. Modifying the + // existing Bundle may cause races. + // + // NewWithMode returns nil if the requested mode is not supported. + NewWithMode(mode string) (Bundle, error) +} + // TLSInfo contains the auth information for a TLS authenticated connection. // It implements the AuthInfo interface. type TLSInfo struct { diff --git a/vendor/google.golang.org/grpc/dialoptions.go b/vendor/google.golang.org/grpc/dialoptions.go index 3d3c9e231..99b495272 100644 --- a/vendor/google.golang.org/grpc/dialoptions.go +++ b/vendor/google.golang.org/grpc/dialoptions.go @@ -286,7 +286,8 @@ func WithInsecure() DialOption { } // WithTransportCredentials returns a DialOption which configures a connection -// level security credentials (e.g., TLS/SSL). +// level security credentials (e.g., TLS/SSL). This should not be used together +// with WithCredentialsBundle. func WithTransportCredentials(creds credentials.TransportCredentials) DialOption { return newFuncDialOption(func(o *dialOptions) { o.copts.TransportCredentials = creds @@ -301,6 +302,17 @@ func WithPerRPCCredentials(creds credentials.PerRPCCredentials) DialOption { }) } +// WithCredentialsBundle returns a DialOption to set a credentials bundle for +// the ClientConn.WithCreds. This should not be used together with +// WithTransportCredentials. +// +// This API is experimental. +func WithCredentialsBundle(b credentials.Bundle) DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.copts.CredsBundle = b + }) +} + // WithTimeout returns a DialOption that configures a timeout for dialing a // ClientConn initially. This is valid if and only if WithBlock() is present. // diff --git a/vendor/google.golang.org/grpc/install_gae.sh b/vendor/google.golang.org/grpc/install_gae.sh index d4236f3b8..7c7bcada5 100755 --- a/vendor/google.golang.org/grpc/install_gae.sh +++ b/vendor/google.golang.org/grpc/install_gae.sh @@ -1,6 +1,6 @@ #!/bin/bash TMP=$(mktemp -d /tmp/sdk.XXX) \ -&& curl -o $TMP.zip "https://storage.googleapis.com/appengine-sdks/featured/go_appengine_sdk_linux_amd64-1.9.64.zip" \ +&& curl -o $TMP.zip "https://storage.googleapis.com/appengine-sdks/featured/go_appengine_sdk_linux_amd64-1.9.68.zip" \ && unzip -q $TMP.zip -d $TMP \ && export PATH="$PATH:$TMP/go_appengine" diff --git a/vendor/google.golang.org/grpc/internal/channelz/funcs.go b/vendor/google.golang.org/grpc/internal/channelz/funcs.go index 586a0336b..6e729fa63 100644 --- a/vendor/google.golang.org/grpc/internal/channelz/funcs.go +++ b/vendor/google.golang.org/grpc/internal/channelz/funcs.go @@ -27,16 +27,22 @@ import ( "sort" "sync" "sync/atomic" + "time" "google.golang.org/grpc/grpclog" ) +const ( + defaultMaxTraceEntry int32 = 30 +) + var ( db dbWrapper idGen idGenerator // EntryPerPage defines the number of channelz entries to be shown on a web page. - EntryPerPage = 50 - curState int32 + EntryPerPage = 50 + curState int32 + maxTraceEntry = defaultMaxTraceEntry ) // TurnOn turns on channelz data collection. @@ -52,6 +58,22 @@ func IsOn() bool { return atomic.CompareAndSwapInt32(&curState, 1, 1) } +// SetMaxTraceEntry sets maximum number of trace entry per entity (i.e. channel/subchannel). +// Setting it to 0 will disable channel tracing. +func SetMaxTraceEntry(i int32) { + atomic.StoreInt32(&maxTraceEntry, i) +} + +// ResetMaxTraceEntryToDefault resets the maximum number of trace entry per entity to default. +func ResetMaxTraceEntryToDefault() { + atomic.StoreInt32(&maxTraceEntry, defaultMaxTraceEntry) +} + +func getMaxTraceEntry() int { + i := atomic.LoadInt32(&maxTraceEntry) + return int(i) +} + // dbWarpper wraps around a reference to internal channelz data storage, and // provide synchronized functionality to set and get the reference. type dbWrapper struct { @@ -146,6 +168,7 @@ func RegisterChannel(c Channel, pid int64, ref string) int64 { nestedChans: make(map[int64]string), id: id, pid: pid, + trace: &channelTrace{createdTime: time.Now(), events: make([]*TraceEvent, 0, getMaxTraceEntry())}, } if pid == 0 { db.get().addChannel(id, cn, true, pid, ref) @@ -170,6 +193,7 @@ func RegisterSubChannel(c Channel, pid int64, ref string) int64 { sockets: make(map[int64]string), id: id, pid: pid, + trace: &channelTrace{createdTime: time.Now(), events: make([]*TraceEvent, 0, getMaxTraceEntry())}, } db.get().addSubChannel(id, sc, pid, ref) return id @@ -226,6 +250,24 @@ func RemoveEntry(id int64) { db.get().removeEntry(id) } +// TraceEventDesc is what the caller of AddTraceEvent should provide to describe the event to be added +// to the channel trace. +// The Parent field is optional. It is used for event that will be recorded in the entity's parent +// trace also. +type TraceEventDesc struct { + Desc string + Severity Severity + Parent *TraceEventDesc +} + +// AddTraceEvent adds trace related to the entity with specified id, using the provided TraceEventDesc. +func AddTraceEvent(id int64, desc *TraceEventDesc) { + if getMaxTraceEntry() == 0 { + return + } + db.get().traceEvent(id, desc) +} + // channelMap is the storage data structure for channelz. // Methods of channelMap can be divided in two two categories with respect to locking. // 1. Methods acquire the global lock. @@ -251,6 +293,7 @@ func (c *channelMap) addServer(id int64, s *server) { func (c *channelMap) addChannel(id int64, cn *channel, isTopChannel bool, pid int64, ref string) { c.mu.Lock() cn.cm = c + cn.trace.cm = c c.channels[id] = cn if isTopChannel { c.topLevelChannels[id] = struct{}{} @@ -263,6 +306,7 @@ func (c *channelMap) addChannel(id int64, cn *channel, isTopChannel bool, pid in func (c *channelMap) addSubChannel(id int64, sc *subChannel, pid int64, ref string) { c.mu.Lock() sc.cm = c + sc.trace.cm = c c.subChannels[id] = sc c.findEntry(pid).addChild(id, sc) c.mu.Unlock() @@ -284,16 +328,25 @@ func (c *channelMap) addNormalSocket(id int64, ns *normalSocket, pid int64, ref c.mu.Unlock() } -// removeEntry triggers the removal of an entry, which may not indeed delete the -// entry, if it has to wait on the deletion of its children, or may lead to a chain -// of entry deletion. For example, deleting the last socket of a gracefully shutting -// down server will lead to the server being also deleted. +// removeEntry triggers the removal of an entry, which may not indeed delete the entry, if it has to +// wait on the deletion of its children and until no other entity's channel trace references it. +// It may lead to a chain of entry deletion. For example, deleting the last socket of a gracefully +// shutting down server will lead to the server being also deleted. func (c *channelMap) removeEntry(id int64) { c.mu.Lock() c.findEntry(id).triggerDelete() c.mu.Unlock() } +// c.mu must be held by the caller +func (c *channelMap) decrTraceRefCount(id int64) { + e := c.findEntry(id) + if v, ok := e.(tracedChannel); ok { + v.decrTraceRefCount() + e.deleteSelfIfReady() + } +} + // c.mu must be held by the caller. func (c *channelMap) findEntry(id int64) entry { var v entry @@ -347,6 +400,39 @@ func (c *channelMap) deleteEntry(id int64) { } } +func (c *channelMap) traceEvent(id int64, desc *TraceEventDesc) { + c.mu.Lock() + child := c.findEntry(id) + childTC, ok := child.(tracedChannel) + if !ok { + c.mu.Unlock() + return + } + childTC.getChannelTrace().append(&TraceEvent{Desc: desc.Desc, Severity: desc.Severity, Timestamp: time.Now()}) + if desc.Parent != nil { + parent := c.findEntry(child.getParentID()) + var chanType RefChannelType + switch child.(type) { + case *channel: + chanType = RefChannel + case *subChannel: + chanType = RefSubChannel + } + if parentTC, ok := parent.(tracedChannel); ok { + parentTC.getChannelTrace().append(&TraceEvent{ + Desc: desc.Parent.Desc, + Severity: desc.Parent.Severity, + Timestamp: time.Now(), + RefID: id, + RefName: childTC.getRefName(), + RefType: chanType, + }) + childTC.incrTraceRefCount() + } + } + c.mu.Unlock() +} + type int64Slice []int64 func (s int64Slice) Len() int { return len(s) } @@ -408,6 +494,7 @@ func (c *channelMap) GetTopChannels(id int64) ([]*ChannelMetric, bool) { t[i].ChannelData = cn.c.ChannelzMetric() t[i].ID = cn.id t[i].RefName = cn.refName + t[i].Trace = cn.trace.dumpData() } return t, end } @@ -470,7 +557,7 @@ func (c *channelMap) GetServerSockets(id int64, startID int64) ([]*SocketMetric, for k := range svrskts { ids = append(ids, k) } - sort.Sort((int64Slice(ids))) + sort.Sort(int64Slice(ids)) idx := sort.Search(len(ids), func(i int) bool { return ids[i] >= id }) count := 0 var end bool @@ -518,6 +605,7 @@ func (c *channelMap) GetChannel(id int64) *ChannelMetric { cm.ChannelData = cn.c.ChannelzMetric() cm.ID = cn.id cm.RefName = cn.refName + cm.Trace = cn.trace.dumpData() return cm } @@ -536,6 +624,7 @@ func (c *channelMap) GetSubChannel(id int64) *SubChannelMetric { cm.ChannelData = sc.c.ChannelzMetric() cm.ID = sc.id cm.RefName = sc.refName + cm.Trace = sc.trace.dumpData() return cm } diff --git a/vendor/google.golang.org/grpc/internal/channelz/types.go b/vendor/google.golang.org/grpc/internal/channelz/types.go index 6fd6bb388..17c2274cb 100644 --- a/vendor/google.golang.org/grpc/internal/channelz/types.go +++ b/vendor/google.golang.org/grpc/internal/channelz/types.go @@ -20,6 +20,8 @@ package channelz import ( "net" + "sync" + "sync/atomic" "time" "google.golang.org/grpc/connectivity" @@ -40,6 +42,8 @@ type entry interface { // deleteSelfIfReady check whether triggerDelete() has been called before, and whether child // list is now empty. If both conditions are met, then delete self from database. deleteSelfIfReady() + // getParentID returns parent ID of the entry. 0 value parent ID means no parent. + getParentID() int64 } // dummyEntry is a fake entry to handle entry not found case. @@ -73,6 +77,10 @@ func (*dummyEntry) deleteSelfIfReady() { // code should not reach here. deleteSelfIfReady is always called on an existing entry. } +func (*dummyEntry) getParentID() int64 { + return 0 +} + // ChannelMetric defines the info channelz provides for a specific Channel, which // includes ChannelInternalMetric and channelz-specific data, such as channelz id, // child list, etc. @@ -95,6 +103,8 @@ type ChannelMetric struct { // Note current grpc implementation doesn't allow channel having sockets directly, // therefore, this is field is unused. Sockets map[int64]string + // Trace contains the most recent traced events. + Trace *ChannelTrace } // SubChannelMetric defines the info channelz provides for a specific SubChannel, @@ -121,6 +131,8 @@ type SubChannelMetric struct { // Sockets tracks the socket type children of this subchannel in the format of a map // from socket channelz id to corresponding reference string. Sockets map[int64]string + // Trace contains the most recent traced events. + Trace *ChannelTrace } // ChannelInternalMetric defines the struct that the implementor of Channel interface @@ -138,7 +150,35 @@ type ChannelInternalMetric struct { CallsFailed int64 // The last time a call was started on the channel. LastCallStartedTimestamp time.Time - //TODO: trace +} + +// ChannelTrace stores traced events on a channel/subchannel and related info. +type ChannelTrace struct { + // EventNum is the number of events that ever got traced (i.e. including those that have been deleted) + EventNum int64 + // CreationTime is the creation time of the trace. + CreationTime time.Time + // Events stores the most recent trace events (up to $maxTraceEntry, newer event will overwrite the + // oldest one) + Events []*TraceEvent +} + +// TraceEvent represent a single trace event +type TraceEvent struct { + // Desc is a simple description of the trace event. + Desc string + // Severity states the severity of this trace event. + Severity Severity + // Timestamp is the event time. + Timestamp time.Time + // RefID is the id of the entity that gets referenced in the event. RefID is 0 if no other entity is + // involved in this event. + // e.g. SubChannel (id: 4[]) Created. --> RefID = 4, RefName = "" (inside []) + RefID int64 + // RefName is the reference name for the entity that gets referenced in the event. + RefName string + // RefType indicates the referenced entity type, i.e Channel or SubChannel. + RefType RefChannelType } // Channel is the interface that should be satisfied in order to be tracked by @@ -147,6 +187,12 @@ type Channel interface { ChannelzMetric() *ChannelInternalMetric } +type dummyChannel struct{} + +func (d *dummyChannel) ChannelzMetric() *ChannelInternalMetric { + return &ChannelInternalMetric{} +} + type channel struct { refName string c Channel @@ -156,6 +202,10 @@ type channel struct { id int64 pid int64 cm *channelMap + trace *channelTrace + // traceRefCount is the number of trace events that reference this channel. + // Non-zero traceRefCount means the trace of this channel cannot be deleted. + traceRefCount int32 } func (c *channel) addChild(id int64, e entry) { @@ -180,25 +230,96 @@ func (c *channel) triggerDelete() { c.deleteSelfIfReady() } -func (c *channel) deleteSelfIfReady() { +func (c *channel) getParentID() int64 { + return c.pid +} + +// deleteSelfFromTree tries to delete the channel from the channelz entry relation tree, which means +// deleting the channel reference from its parent's child list. +// +// In order for a channel to be deleted from the tree, it must meet the criteria that, removal of the +// corresponding grpc object has been invoked, and the channel does not have any children left. +// +// The returned boolean value indicates whether the channel has been successfully deleted from tree. +func (c *channel) deleteSelfFromTree() (deleted bool) { if !c.closeCalled || len(c.subChans)+len(c.nestedChans) != 0 { - return + return false } - c.cm.deleteEntry(c.id) // not top channel if c.pid != 0 { c.cm.findEntry(c.pid).deleteChild(c.id) } + return true +} + +// deleteSelfFromMap checks whether it is valid to delete the channel from the map, which means +// deleting the channel from channelz's tracking entirely. Users can no longer use id to query the +// channel, and its memory will be garbage collected. +// +// The trace reference count of the channel must be 0 in order to be deleted from the map. This is +// specified in the channel tracing gRFC that as long as some other trace has reference to an entity, +// the trace of the referenced entity must not be deleted. In order to release the resource allocated +// by grpc, the reference to the grpc object is reset to a dummy object. +// +// deleteSelfFromMap must be called after deleteSelfFromTree returns true. +// +// It returns a bool to indicate whether the channel can be safely deleted from map. +func (c *channel) deleteSelfFromMap() (delete bool) { + if c.getTraceRefCount() != 0 { + c.c = &dummyChannel{} + return false + } + return true +} + +// deleteSelfIfReady tries to delete the channel itself from the channelz database. +// The delete process includes two steps: +// 1. delete the channel from the entry relation tree, i.e. delete the channel reference from its +// parent's child list. +// 2. delete the channel from the map, i.e. delete the channel entirely from channelz. Lookup by id +// will return entry not found error. +func (c *channel) deleteSelfIfReady() { + if !c.deleteSelfFromTree() { + return + } + if !c.deleteSelfFromMap() { + return + } + c.cm.deleteEntry(c.id) + c.trace.clear() +} + +func (c *channel) getChannelTrace() *channelTrace { + return c.trace +} + +func (c *channel) incrTraceRefCount() { + atomic.AddInt32(&c.traceRefCount, 1) +} + +func (c *channel) decrTraceRefCount() { + atomic.AddInt32(&c.traceRefCount, -1) +} + +func (c *channel) getTraceRefCount() int { + i := atomic.LoadInt32(&c.traceRefCount) + return int(i) +} + +func (c *channel) getRefName() string { + return c.refName } type subChannel struct { - refName string - c Channel - closeCalled bool - sockets map[int64]string - id int64 - pid int64 - cm *channelMap + refName string + c Channel + closeCalled bool + sockets map[int64]string + id int64 + pid int64 + cm *channelMap + trace *channelTrace + traceRefCount int32 } func (sc *subChannel) addChild(id int64, e entry) { @@ -219,12 +340,82 @@ func (sc *subChannel) triggerDelete() { sc.deleteSelfIfReady() } -func (sc *subChannel) deleteSelfIfReady() { +func (sc *subChannel) getParentID() int64 { + return sc.pid +} + +// deleteSelfFromTree tries to delete the subchannel from the channelz entry relation tree, which +// means deleting the subchannel reference from its parent's child list. +// +// In order for a subchannel to be deleted from the tree, it must meet the criteria that, removal of +// the corresponding grpc object has been invoked, and the subchannel does not have any children left. +// +// The returned boolean value indicates whether the channel has been successfully deleted from tree. +func (sc *subChannel) deleteSelfFromTree() (deleted bool) { if !sc.closeCalled || len(sc.sockets) != 0 { + return false + } + sc.cm.findEntry(sc.pid).deleteChild(sc.id) + return true +} + +// deleteSelfFromMap checks whether it is valid to delete the subchannel from the map, which means +// deleting the subchannel from channelz's tracking entirely. Users can no longer use id to query +// the subchannel, and its memory will be garbage collected. +// +// The trace reference count of the subchannel must be 0 in order to be deleted from the map. This is +// specified in the channel tracing gRFC that as long as some other trace has reference to an entity, +// the trace of the referenced entity must not be deleted. In order to release the resource allocated +// by grpc, the reference to the grpc object is reset to a dummy object. +// +// deleteSelfFromMap must be called after deleteSelfFromTree returns true. +// +// It returns a bool to indicate whether the channel can be safely deleted from map. +func (sc *subChannel) deleteSelfFromMap() (delete bool) { + if sc.getTraceRefCount() != 0 { + // free the grpc struct (i.e. addrConn) + sc.c = &dummyChannel{} + return false + } + return true +} + +// deleteSelfIfReady tries to delete the subchannel itself from the channelz database. +// The delete process includes two steps: +// 1. delete the subchannel from the entry relation tree, i.e. delete the subchannel reference from +// its parent's child list. +// 2. delete the subchannel from the map, i.e. delete the subchannel entirely from channelz. Lookup +// by id will return entry not found error. +func (sc *subChannel) deleteSelfIfReady() { + if !sc.deleteSelfFromTree() { + return + } + if !sc.deleteSelfFromMap() { return } sc.cm.deleteEntry(sc.id) - sc.cm.findEntry(sc.pid).deleteChild(sc.id) + sc.trace.clear() +} + +func (sc *subChannel) getChannelTrace() *channelTrace { + return sc.trace +} + +func (sc *subChannel) incrTraceRefCount() { + atomic.AddInt32(&sc.traceRefCount, 1) +} + +func (sc *subChannel) decrTraceRefCount() { + atomic.AddInt32(&sc.traceRefCount, -1) +} + +func (sc *subChannel) getTraceRefCount() int { + i := atomic.LoadInt32(&sc.traceRefCount) + return int(i) +} + +func (sc *subChannel) getRefName() string { + return sc.refName } // SocketMetric defines the info channelz provides for a specific Socket, which @@ -318,6 +509,10 @@ func (ls *listenSocket) deleteSelfIfReady() { grpclog.Errorf("cannot call deleteSelfIfReady on a listen socket") } +func (ls *listenSocket) getParentID() int64 { + return ls.pid +} + type normalSocket struct { refName string s Socket @@ -343,6 +538,10 @@ func (ns *normalSocket) deleteSelfIfReady() { grpclog.Errorf("cannot call deleteSelfIfReady on a normal socket") } +func (ns *normalSocket) getParentID() int64 { + return ns.pid +} + // ServerMetric defines the info channelz provides for a specific Server, which // includes ServerInternalMetric and channelz-specific data, such as channelz id, // child list, etc. @@ -370,7 +569,6 @@ type ServerInternalMetric struct { CallsFailed int64 // The last time a call was started on the server. LastCallStartedTimestamp time.Time - //TODO: trace } // Server is the interface to be satisfied in order to be tracked by channelz as @@ -417,3 +615,88 @@ func (s *server) deleteSelfIfReady() { } s.cm.deleteEntry(s.id) } + +func (s *server) getParentID() int64 { + return 0 +} + +type tracedChannel interface { + getChannelTrace() *channelTrace + incrTraceRefCount() + decrTraceRefCount() + getRefName() string +} + +type channelTrace struct { + cm *channelMap + createdTime time.Time + eventCount int64 + mu sync.Mutex + events []*TraceEvent +} + +func (c *channelTrace) append(e *TraceEvent) { + c.mu.Lock() + if len(c.events) == getMaxTraceEntry() { + del := c.events[0] + c.events = c.events[1:] + if del.RefID != 0 { + // start recursive cleanup in a goroutine to not block the call originated from grpc. + go func() { + // need to acquire c.cm.mu lock to call the unlocked attemptCleanup func. + c.cm.mu.Lock() + c.cm.decrTraceRefCount(del.RefID) + c.cm.mu.Unlock() + }() + } + } + e.Timestamp = time.Now() + c.events = append(c.events, e) + c.eventCount++ + c.mu.Unlock() +} + +func (c *channelTrace) clear() { + c.mu.Lock() + for _, e := range c.events { + if e.RefID != 0 { + // caller should have already held the c.cm.mu lock. + c.cm.decrTraceRefCount(e.RefID) + } + } + c.mu.Unlock() +} + +// Severity is the severity level of a trace event. +// The canonical enumeration of all valid values is here: +// https://github.com/grpc/grpc-proto/blob/9b13d199cc0d4703c7ea26c9c330ba695866eb23/grpc/channelz/v1/channelz.proto#L126. +type Severity int + +const ( + // CtUNKNOWN indicates unknown severity of a trace event. + CtUNKNOWN Severity = iota + // CtINFO indicates info level severity of a trace event. + CtINFO + // CtWarning indicates warning level severity of a trace event. + CtWarning + // CtError indicates error level severity of a trace event. + CtError +) + +// RefChannelType is the type of the entity being referenced in a trace event. +type RefChannelType int + +const ( + // RefChannel indicates the referenced entity is a Channel. + RefChannel RefChannelType = iota + // RefSubChannel indicates the referenced entity is a SubChannel. + RefSubChannel +) + +func (c *channelTrace) dumpData() *ChannelTrace { + c.mu.Lock() + ct := &ChannelTrace{EventNum: c.eventCount, CreationTime: c.createdTime} + ct.Events = c.events[:len(c.events)] + c.mu.Unlock() + return ct +} diff --git a/vendor/google.golang.org/grpc/internal/channelz/types_nonlinux.go b/vendor/google.golang.org/grpc/internal/channelz/types_nonlinux.go index 884910c4e..b24600480 100644 --- a/vendor/google.golang.org/grpc/internal/channelz/types_nonlinux.go +++ b/vendor/google.golang.org/grpc/internal/channelz/types_nonlinux.go @@ -20,11 +20,13 @@ package channelz -import "google.golang.org/grpc/grpclog" +import ( + "sync" -func init() { - grpclog.Infof("Channelz: socket options are not supported on non-linux os and appengine.") -} + "google.golang.org/grpc/grpclog" +) + +var once sync.Once // SocketOptionData defines the struct to hold socket option data, and related // getter function to obtain info from fd. @@ -35,4 +37,8 @@ type SocketOptionData struct { // Getsockopt defines the function to get socket options requested by channelz. // It is to be passed to syscall.RawConn.Control(). // Windows OS doesn't support Socket Option -func (s *SocketOptionData) Getsockopt(fd uintptr) {} +func (s *SocketOptionData) Getsockopt(fd uintptr) { + once.Do(func() { + grpclog.Warningln("Channelz: socket options are not supported on non-linux os and appengine.") + }) +} diff --git a/vendor/google.golang.org/grpc/internal/internal.go b/vendor/google.golang.org/grpc/internal/internal.go index c35afb05e..41f8af678 100644 --- a/vendor/google.golang.org/grpc/internal/internal.go +++ b/vendor/google.golang.org/grpc/internal/internal.go @@ -20,9 +20,24 @@ // symbols to avoid circular dependencies. package internal +import "golang.org/x/net/context" + var ( // WithContextDialer is exported by clientconn.go WithContextDialer interface{} // func(context.Context, string) (net.Conn, error) grpc.DialOption // WithResolverBuilder is exported by clientconn.go WithResolverBuilder interface{} // func (resolver.Builder) grpc.DialOption + // HealthCheckFunc is used to provide client-side LB channel health checking + HealthCheckFunc func(ctx context.Context, newStream func() (interface{}, error), reportHealth func(bool), serviceName string) error +) + +const ( + // CredsBundleModeFallback switches GoogleDefaultCreds to fallback mode. + CredsBundleModeFallback = "fallback" + // CredsBundleModeBalancer switches GoogleDefaultCreds to grpclb balancer + // mode. + CredsBundleModeBalancer = "balancer" + // CredsBundleModeBackendFromBalancer switches GoogleDefaultCreds to mode + // that supports backend returned by grpclb balancer. + CredsBundleModeBackendFromBalancer = "backend-from-balancer" ) diff --git a/vendor/google.golang.org/grpc/internal/transport/http2_client.go b/vendor/google.golang.org/grpc/internal/transport/http2_client.go index 904e790c4..0c3c47e2a 100644 --- a/vendor/google.golang.org/grpc/internal/transport/http2_client.go +++ b/vendor/google.golang.org/grpc/internal/transport/http2_client.go @@ -73,7 +73,7 @@ type http2Client struct { isSecure bool - creds []credentials.PerRPCCredentials + perRPCCreds []credentials.PerRPCCredentials // Boolean to keep track of reading activity on transport. // 1 is true and 0 is false. @@ -112,6 +112,9 @@ type http2Client struct { // Fields below are for channelz metric collection. channelzID int64 // channelz unique identification number czData *channelzData + + onGoAway func(GoAwayReason) + onClose func() } func dial(ctx context.Context, fn func(context.Context, string) (net.Conn, error), addr string) (net.Conn, error) { @@ -140,7 +143,7 @@ func isTemporary(err error) bool { // newHTTP2Client constructs a connected ClientTransport to addr based on HTTP2 // and starts to receive messages on it. Non-nil error returns if construction // fails. -func newHTTP2Client(connectCtx, ctx context.Context, addr TargetInfo, opts ConnectOptions, onSuccess func()) (_ *http2Client, err error) { +func newHTTP2Client(connectCtx, ctx context.Context, addr TargetInfo, opts ConnectOptions, onSuccess func(), onGoAway func(GoAwayReason), onClose func()) (_ *http2Client, err error) { scheme := "http" ctx, cancel := context.WithCancel(ctx) defer func() { @@ -166,9 +169,20 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr TargetInfo, opts Conne isSecure bool authInfo credentials.AuthInfo ) - if creds := opts.TransportCredentials; creds != nil { + transportCreds := opts.TransportCredentials + perRPCCreds := opts.PerRPCCredentials + + if b := opts.CredsBundle; b != nil { + if t := b.TransportCredentials(); t != nil { + transportCreds = t + } + if t := b.PerRPCCredentials(); t != nil { + perRPCCreds = append(perRPCCreds, t) + } + } + if transportCreds != nil { scheme = "https" - conn, authInfo, err = creds.ClientHandshake(connectCtx, addr.Authority, conn) + conn, authInfo, err = transportCreds.ClientHandshake(connectCtx, addr.Authority, conn) if err != nil { return nil, connectionErrorf(isTemporary(err), err, "transport: authentication handshake failed: %v", err) } @@ -213,7 +227,7 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr TargetInfo, opts Conne scheme: scheme, activeStreams: make(map[uint32]*Stream), isSecure: isSecure, - creds: opts.PerRPCCredentials, + perRPCCreds: perRPCCreds, kp: kp, statsHandler: opts.StatsHandler, initialWindowSize: initialWindowSize, @@ -223,6 +237,8 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr TargetInfo, opts Conne streamQuota: defaultMaxStreamsClient, streamsQuotaAvailable: make(chan struct{}, 1), czData: new(channelzData), + onGoAway: onGoAway, + onClose: onClose, } t.controlBuf = newControlBuffer(t.ctxDone) if opts.InitialWindowSize >= defaultWindowSize { @@ -259,6 +275,7 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr TargetInfo, opts Conne // a dedicated goroutine which reads HTTP2 frame from network. Then it // dispatches the frame to the corresponding stream entity. go t.reader() + // Send connection preface to server. n, err := t.conn.Write(clientPreface) if err != nil { @@ -295,6 +312,7 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr TargetInfo, opts Conne return nil, connectionErrorf(true, err, "transport: failed to write window update: %v", err) } } + t.framer.writer.Flush() go func() { t.loopy = newLoopyWriter(clientSide, t.framer, t.controlBuf, t.bdpEst) @@ -443,7 +461,7 @@ func (t *http2Client) createHeaderFields(ctx context.Context, callHdr *CallHdr) func (t *http2Client) createAudience(callHdr *CallHdr) string { // Create an audience string only if needed. - if len(t.creds) == 0 && callHdr.Creds == nil { + if len(t.perRPCCreds) == 0 && callHdr.Creds == nil { return "" } // Construct URI required to get auth request metadata. @@ -458,7 +476,7 @@ func (t *http2Client) createAudience(callHdr *CallHdr) string { func (t *http2Client) getTrAuthData(ctx context.Context, audience string) (map[string]string, error) { authData := map[string]string{} - for _, c := range t.creds { + for _, c := range t.perRPCCreds { data, err := c.GetRequestMetadata(ctx, audience) if err != nil { if _, ok := status.FromError(err); ok { @@ -664,7 +682,9 @@ func (t *http2Client) CloseStream(s *Stream, err error) { func (t *http2Client) closeStream(s *Stream, err error, rst bool, rstCode http2.ErrCode, st *status.Status, mdata map[string][]string, eosReceived bool) { // Set stream status to done. if s.swapState(streamDone) == streamDone { - // If it was already done, return. + // If it was already done, return. If multiple closeStream calls + // happen simultaneously, wait for the first to finish. + <-s.done return } // status and trailers can be updated here without any synchronization because the stream goroutine will @@ -678,8 +698,6 @@ func (t *http2Client) closeStream(s *Stream, err error, rst bool, rstCode http2. // This will unblock reads eventually. s.write(recvMsg{err: err}) } - // This will unblock write. - close(s.done) // If headerChan isn't closed, then close it. if atomic.SwapUint32(&s.headerDone, 1) == 0 { s.noHeaders = true @@ -715,11 +733,17 @@ func (t *http2Client) closeStream(s *Stream, err error, rst bool, rstCode http2. return true } t.controlBuf.executeAndPut(addBackStreamQuota, cleanup) + // This will unblock write. + close(s.done) } // Close kicks off the shutdown process of the transport. This should be called // only once on a transport. Once it is called, the transport should not be // accessed any more. +// +// This method blocks until the addrConn that initiated this transport is +// re-connected. This happens because t.onClose() begins reconnect logic at the +// addrConn level and blocks until the addrConn is successfully connected. func (t *http2Client) Close() error { t.mu.Lock() // Make sure we only Close once. @@ -747,6 +771,7 @@ func (t *http2Client) Close() error { } t.statsHandler.HandleConn(t.ctx, connEnd) } + go t.onClose() return err } @@ -1043,6 +1068,9 @@ func (t *http2Client) handleGoAway(f *http2.GoAwayFrame) { close(t.goAway) t.state = draining t.controlBuf.put(&incomingGoAway{}) + + // This has to be a new goroutine because we're still using the current goroutine to read in the transport. + t.onGoAway(t.goAwayReason) } // All streams with IDs greater than the GoAwayId // and smaller than the previous GoAway ID should be killed. @@ -1145,7 +1173,9 @@ func (t *http2Client) operateHeaders(frame *http2.MetaHeadersFrame) { if !endStream { return } - t.closeStream(s, io.EOF, false, http2.ErrCodeNo, state.status(), state.mdata, true) + // if client received END_STREAM from server while stream was still active, send RST_STREAM + rst := s.getState() == streamActive + t.closeStream(s, io.EOF, rst, http2.ErrCodeNo, state.status(), state.mdata, true) } // reader runs as a separate goroutine in charge of reading data from network @@ -1159,15 +1189,16 @@ func (t *http2Client) reader() { // Check the validity of server preface. frame, err := t.framer.fr.ReadFrame() if err != nil { - t.Close() + t.Close() // this kicks off resetTransport, so must be last before return return } + t.conn.SetReadDeadline(time.Time{}) // reset deadline once we get the settings frame (we didn't time out, yay!) if t.keepaliveEnabled { atomic.CompareAndSwapUint32(&t.activity, 0, 1) } sf, ok := frame.(*http2.SettingsFrame) if !ok { - t.Close() + t.Close() // this kicks off resetTransport, so must be last before return return } t.onSuccess() diff --git a/vendor/google.golang.org/grpc/internal/transport/http_util.go b/vendor/google.golang.org/grpc/internal/transport/http_util.go index 21da6e80b..77a2cfaae 100644 --- a/vendor/google.golang.org/grpc/internal/transport/http_util.go +++ b/vendor/google.golang.org/grpc/internal/transport/http_util.go @@ -24,6 +24,7 @@ import ( "encoding/base64" "fmt" "io" + "math" "net" "net/http" "strconv" @@ -435,6 +436,10 @@ func decodeTimeout(s string) (time.Duration, error) { if size < 2 { return 0, fmt.Errorf("transport: timeout string is too short: %q", s) } + if size > 9 { + // Spec allows for 8 digits plus the unit. + return 0, fmt.Errorf("transport: timeout string is too long: %q", s) + } unit := timeoutUnit(s[size-1]) d, ok := timeoutUnitToDuration(unit) if !ok { @@ -444,6 +449,11 @@ func decodeTimeout(s string) (time.Duration, error) { if err != nil { return 0, err } + const maxHours = math.MaxInt64 / int64(time.Hour) + if d == time.Hour && t > maxHours { + // This timeout would overflow math.MaxInt64; clamp it. + return time.Duration(math.MaxInt64), nil + } return d * time.Duration(t), nil } diff --git a/vendor/google.golang.org/grpc/internal/transport/transport.go b/vendor/google.golang.org/grpc/internal/transport/transport.go index fdf8ad684..1be518a62 100644 --- a/vendor/google.golang.org/grpc/internal/transport/transport.go +++ b/vendor/google.golang.org/grpc/internal/transport/transport.go @@ -465,8 +465,12 @@ type ConnectOptions struct { FailOnNonTempDialError bool // PerRPCCredentials stores the PerRPCCredentials required to issue RPCs. PerRPCCredentials []credentials.PerRPCCredentials - // TransportCredentials stores the Authenticator required to setup a client connection. + // TransportCredentials stores the Authenticator required to setup a client + // connection. Only one of TransportCredentials and CredsBundle is non-nil. TransportCredentials credentials.TransportCredentials + // CredsBundle is the credentials bundle to be used. Only one of + // TransportCredentials and CredsBundle is non-nil. + CredsBundle credentials.Bundle // KeepaliveParams stores the keepalive parameters. KeepaliveParams keepalive.ClientParameters // StatsHandler stores the handler for stats. @@ -494,8 +498,8 @@ type TargetInfo struct { // NewClientTransport establishes the transport with the required ConnectOptions // and returns it to the caller. -func NewClientTransport(connectCtx, ctx context.Context, target TargetInfo, opts ConnectOptions, onSuccess func()) (ClientTransport, error) { - return newHTTP2Client(connectCtx, ctx, target, opts, onSuccess) +func NewClientTransport(connectCtx, ctx context.Context, target TargetInfo, opts ConnectOptions, onSuccess func(), onGoAway func(GoAwayReason), onClose func()) (ClientTransport, error) { + return newHTTP2Client(connectCtx, ctx, target, opts, onSuccess, onGoAway, onClose) } // Options provides additional hints and information for message diff --git a/vendor/google.golang.org/grpc/keepalive/keepalive.go b/vendor/google.golang.org/grpc/keepalive/keepalive.go index f8adc7e6d..78eea1fc9 100644 --- a/vendor/google.golang.org/grpc/keepalive/keepalive.go +++ b/vendor/google.golang.org/grpc/keepalive/keepalive.go @@ -16,7 +16,8 @@ * */ -// Package keepalive defines configurable parameters for point-to-point healthcheck. +// Package keepalive defines configurable parameters for point-to-point +// healthcheck. package keepalive import ( @@ -24,42 +25,59 @@ import ( ) // ClientParameters is used to set keepalive parameters on the client-side. -// These configure how the client will actively probe to notice when a connection is broken -// and send pings so intermediaries will be aware of the liveness of the connection. -// Make sure these parameters are set in coordination with the keepalive policy on the server, -// as incompatible settings can result in closing of connection. +// These configure how the client will actively probe to notice when a +// connection is broken and send pings so intermediaries will be aware of the +// liveness of the connection. Make sure these parameters are set in +// coordination with the keepalive policy on the server, as incompatible +// settings can result in closing of connection. type ClientParameters struct { - // After a duration of this time if the client doesn't see any activity it pings the server to see if the transport is still alive. + // After a duration of this time if the client doesn't see any activity it + // pings the server to see if the transport is still alive. Time time.Duration // The current default value is infinity. - // After having pinged for keepalive check, the client waits for a duration of Timeout and if no activity is seen even after that - // the connection is closed. + // After having pinged for keepalive check, the client waits for a duration + // of Timeout and if no activity is seen even after that the connection is + // closed. Timeout time.Duration // The current default value is 20 seconds. - // If true, client runs keepalive checks even with no active RPCs. + // If true, client sends keepalive pings even with no active RPCs. If false, + // when there are no active RPCs, Time and Timeout will be ignored and no + // keepalive pings will be sent. PermitWithoutStream bool // false by default. } -// ServerParameters is used to set keepalive and max-age parameters on the server-side. +// ServerParameters is used to set keepalive and max-age parameters on the +// server-side. type ServerParameters struct { - // MaxConnectionIdle is a duration for the amount of time after which an idle connection would be closed by sending a GoAway. - // Idleness duration is defined since the most recent time the number of outstanding RPCs became zero or the connection establishment. + // MaxConnectionIdle is a duration for the amount of time after which an + // idle connection would be closed by sending a GoAway. Idleness duration is + // defined since the most recent time the number of outstanding RPCs became + // zero or the connection establishment. MaxConnectionIdle time.Duration // The current default value is infinity. - // MaxConnectionAge is a duration for the maximum amount of time a connection may exist before it will be closed by sending a GoAway. - // A random jitter of +/-10% will be added to MaxConnectionAge to spread out connection storms. + // MaxConnectionAge is a duration for the maximum amount of time a + // connection may exist before it will be closed by sending a GoAway. A + // random jitter of +/-10% will be added to MaxConnectionAge to spread out + // connection storms. MaxConnectionAge time.Duration // The current default value is infinity. - // MaxConnectinoAgeGrace is an additive period after MaxConnectionAge after which the connection will be forcibly closed. + // MaxConnectinoAgeGrace is an additive period after MaxConnectionAge after + // which the connection will be forcibly closed. MaxConnectionAgeGrace time.Duration // The current default value is infinity. - // After a duration of this time if the server doesn't see any activity it pings the client to see if the transport is still alive. + // After a duration of this time if the server doesn't see any activity it + // pings the client to see if the transport is still alive. Time time.Duration // The current default value is 2 hours. - // After having pinged for keepalive check, the server waits for a duration of Timeout and if no activity is seen even after that - // the connection is closed. + // After having pinged for keepalive check, the server waits for a duration + // of Timeout and if no activity is seen even after that the connection is + // closed. Timeout time.Duration // The current default value is 20 seconds. } -// EnforcementPolicy is used to set keepalive enforcement policy on the server-side. -// Server will close connection with a client that violates this policy. +// EnforcementPolicy is used to set keepalive enforcement policy on the +// server-side. Server will close connection with a client that violates this +// policy. type EnforcementPolicy struct { - // MinTime is the minimum amount of time a client should wait before sending a keepalive ping. + // MinTime is the minimum amount of time a client should wait before sending + // a keepalive ping. MinTime time.Duration // The current default value is 5 minutes. - // If true, server expects keepalive pings even when there are no active streams(RPCs). + // If true, server allows keepalive pings even when there are no active + // streams(RPCs). If false, and client sends ping when there are no active + // streams, server will send GOAWAY and close the connection. PermitWithoutStream bool // false by default. } diff --git a/vendor/google.golang.org/grpc/pickfirst.go b/vendor/google.golang.org/grpc/pickfirst.go index bf659d49d..bda4309c0 100644 --- a/vendor/google.golang.org/grpc/pickfirst.go +++ b/vendor/google.golang.org/grpc/pickfirst.go @@ -56,6 +56,7 @@ func (b *pickfirstBalancer) HandleResolvedAddrs(addrs []resolver.Address, err er if b.sc == nil { b.sc, err = b.cc.NewSubConn(addrs, balancer.NewSubConnOptions{}) if err != nil { + //TODO(yuxuanli): why not change the cc state to Idle? grpclog.Errorf("pickfirstBalancer: failed to NewSubConn: %v", err) return } diff --git a/vendor/google.golang.org/grpc/resolver/dns/dns_resolver.go b/vendor/google.golang.org/grpc/resolver/dns/dns_resolver.go index 4ce81671d..4af67422c 100644 --- a/vendor/google.golang.org/grpc/resolver/dns/dns_resolver.go +++ b/vendor/google.golang.org/grpc/resolver/dns/dns_resolver.go @@ -1,6 +1,6 @@ /* * - * Copyright 2017 gRPC authors. + * Copyright 2018 gRPC authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -73,10 +73,7 @@ type dnsBuilder struct { // Build creates and starts a DNS resolver that watches the name resolution of the target. func (b *dnsBuilder) Build(target resolver.Target, cc resolver.ClientConn, opts resolver.BuildOption) (resolver.Resolver, error) { - if target.Authority != "" { - return nil, fmt.Errorf("default DNS resolver does not support custom DNS server") - } - host, port, err := parseTarget(target.Endpoint) + host, port, err := parseTarget(target.Endpoint, defaultPort) if err != nil { return nil, err } @@ -111,6 +108,15 @@ func (b *dnsBuilder) Build(target resolver.Target, cc resolver.ClientConn, opts disableServiceConfig: opts.DisableServiceConfig, } + if target.Authority == "" { + d.resolver = defaultResolver + } else { + d.resolver, err = customAuthorityResolver(target.Authority) + if err != nil { + return nil, err + } + } + d.wg.Add(1) go d.watcher() return d, nil @@ -121,6 +127,12 @@ func (b *dnsBuilder) Scheme() string { return "dns" } +type netResolver interface { + LookupHost(ctx context.Context, host string) (addrs []string, err error) + LookupSRV(ctx context.Context, service, proto, name string) (cname string, addrs []*net.SRV, err error) + LookupTXT(ctx context.Context, name string) (txts []string, err error) +} + // ipResolver watches for the name resolution update for an IP address. type ipResolver struct { cc resolver.ClientConn @@ -161,6 +173,7 @@ type dnsResolver struct { retryCount int host string port string + resolver netResolver ctx context.Context cancel context.CancelFunc cc resolver.ClientConn @@ -218,13 +231,13 @@ func (d *dnsResolver) watcher() { func (d *dnsResolver) lookupSRV() []resolver.Address { var newAddrs []resolver.Address - _, srvs, err := lookupSRV(d.ctx, "grpclb", "tcp", d.host) + _, srvs, err := d.resolver.LookupSRV(d.ctx, "grpclb", "tcp", d.host) if err != nil { grpclog.Infof("grpc: failed dns SRV record lookup due to %v.\n", err) return nil } for _, s := range srvs { - lbAddrs, err := lookupHost(d.ctx, s.Target) + lbAddrs, err := d.resolver.LookupHost(d.ctx, s.Target) if err != nil { grpclog.Infof("grpc: failed load balancer address dns lookup due to %v.\n", err) continue @@ -243,7 +256,7 @@ func (d *dnsResolver) lookupSRV() []resolver.Address { } func (d *dnsResolver) lookupTXT() string { - ss, err := lookupTXT(d.ctx, d.host) + ss, err := d.resolver.LookupTXT(d.ctx, d.host) if err != nil { grpclog.Infof("grpc: failed dns TXT record lookup due to %v.\n", err) return "" @@ -263,7 +276,7 @@ func (d *dnsResolver) lookupTXT() string { func (d *dnsResolver) lookupHost() []resolver.Address { var newAddrs []resolver.Address - addrs, err := lookupHost(d.ctx, d.host) + addrs, err := d.resolver.LookupHost(d.ctx, d.host) if err != nil { grpclog.Warningf("grpc: failed dns A record lookup due to %v.\n", err) return nil @@ -305,16 +318,16 @@ func formatIP(addr string) (addrIP string, ok bool) { return "[" + addr + "]", true } -// parseTarget takes the user input target string, returns formatted host and port info. +// parseTarget takes the user input target string and default port, returns formatted host and port info. // If target doesn't specify a port, set the port to be the defaultPort. // If target is in IPv6 format and host-name is enclosed in sqarue brackets, brackets // are strippd when setting the host. // examples: -// target: "www.google.com" returns host: "www.google.com", port: "443" -// target: "ipv4-host:80" returns host: "ipv4-host", port: "80" -// target: "[ipv6-host]" returns host: "ipv6-host", port: "443" -// target: ":80" returns host: "localhost", port: "80" -func parseTarget(target string) (host, port string, err error) { +// target: "www.google.com" defaultPort: "443" returns host: "www.google.com", port: "443" +// target: "ipv4-host:80" defaultPort: "443" returns host: "ipv4-host", port: "80" +// target: "[ipv6-host]" defaultPort: "443" returns host: "ipv6-host", port: "443" +// target: ":80" defaultPort: "443" returns host: "localhost", port: "80" +func parseTarget(target, defaultPort string) (host, port string, err error) { if target == "" { return "", "", errMissingAddr } diff --git a/vendor/google.golang.org/grpc/resolver/dns/go17.go b/vendor/google.golang.org/grpc/resolver/dns/go17.go deleted file mode 100644 index b466bc8f6..000000000 --- a/vendor/google.golang.org/grpc/resolver/dns/go17.go +++ /dev/null @@ -1,35 +0,0 @@ -// +build go1.6, !go1.8 - -/* - * - * Copyright 2017 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package dns - -import ( - "net" - - "golang.org/x/net/context" -) - -var ( - lookupHost = func(ctx context.Context, host string) ([]string, error) { return net.LookupHost(host) } - lookupSRV = func(ctx context.Context, service, proto, name string) (string, []*net.SRV, error) { - return net.LookupSRV(service, proto, name) - } - lookupTXT = func(ctx context.Context, name string) ([]string, error) { return net.LookupTXT(name) } -) diff --git a/vendor/google.golang.org/grpc/resolver/dns/go18.go b/vendor/google.golang.org/grpc/resolver/dns/go18.go deleted file mode 100644 index fa34f14ca..000000000 --- a/vendor/google.golang.org/grpc/resolver/dns/go18.go +++ /dev/null @@ -1,29 +0,0 @@ -// +build go1.8 - -/* - * - * Copyright 2017 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package dns - -import "net" - -var ( - lookupHost = net.DefaultResolver.LookupHost - lookupSRV = net.DefaultResolver.LookupSRV - lookupTXT = net.DefaultResolver.LookupTXT -) diff --git a/vendor/google.golang.org/grpc/resolver/dns/go19.go b/vendor/google.golang.org/grpc/resolver/dns/go19.go new file mode 100644 index 000000000..9886de275 --- /dev/null +++ b/vendor/google.golang.org/grpc/resolver/dns/go19.go @@ -0,0 +1,54 @@ +// +build go1.9 + +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package dns + +import ( + "net" + + "golang.org/x/net/context" +) + +var ( + defaultResolver netResolver = net.DefaultResolver +) + +const defaultDNSSvrPort = "53" + +var customAuthorityDialler = func(authority string) func(ctx context.Context, network, address string) (net.Conn, error) { + return func(ctx context.Context, network, address string) (net.Conn, error) { + var dialer net.Dialer + return dialer.DialContext(ctx, network, authority) + } +} + +var customAuthorityResolver = func(authority string) (netResolver, error) { + host, port, err := parseTarget(authority, defaultDNSSvrPort) + if err != nil { + return nil, err + } + + authorityWithPort := net.JoinHostPort(host, port) + + return &net.Resolver{ + PreferGo: true, + Dial: customAuthorityDialler(authorityWithPort), + }, nil +} diff --git a/vendor/google.golang.org/grpc/resolver/dns/pre_go19.go b/vendor/google.golang.org/grpc/resolver/dns/pre_go19.go new file mode 100644 index 000000000..70428113b --- /dev/null +++ b/vendor/google.golang.org/grpc/resolver/dns/pre_go19.go @@ -0,0 +1,51 @@ +// +build go1.6, !go1.9 + +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package dns + +import ( + "fmt" + "net" + + "golang.org/x/net/context" +) + +var ( + defaultResolver netResolver = &preGo19Resolver{} +) + +type preGo19Resolver struct { +} + +func (*preGo19Resolver) LookupHost(ctx context.Context, host string) ([]string, error) { + return net.LookupHost(host) +} + +func (*preGo19Resolver) LookupSRV(ctx context.Context, service, proto, name string) (string, []*net.SRV, error) { + return net.LookupSRV(service, proto, name) +} + +func (*preGo19Resolver) LookupTXT(ctx context.Context, name string) ([]string, error) { + return net.LookupTXT(name) +} + +var customAuthorityResolver = func(authority string) (netResolver, error) { + return nil, fmt.Errorf("Default DNS resolver does not support custom DNS server with go < 1.9") +} diff --git a/vendor/google.golang.org/grpc/resolver_conn_wrapper.go b/vendor/google.golang.org/grpc/resolver_conn_wrapper.go index 494d6931e..a6c02ac9e 100644 --- a/vendor/google.golang.org/grpc/resolver_conn_wrapper.go +++ b/vendor/google.golang.org/grpc/resolver_conn_wrapper.go @@ -23,17 +23,19 @@ import ( "strings" "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/internal/channelz" "google.golang.org/grpc/resolver" ) // ccResolverWrapper is a wrapper on top of cc for resolvers. // It implements resolver.ClientConnection interface. type ccResolverWrapper struct { - cc *ClientConn - resolver resolver.Resolver - addrCh chan []resolver.Address - scCh chan string - done chan struct{} + cc *ClientConn + resolver resolver.Resolver + addrCh chan []resolver.Address + scCh chan string + done chan struct{} + lastAddressesCount int } // split2 returns the values from strings.SplitN(s, sep, 2). @@ -114,6 +116,9 @@ func (ccr *ccResolverWrapper) watcher() { default: } grpclog.Infof("ccResolverWrapper: sending new addresses to cc: %v", addrs) + if channelz.IsOn() { + ccr.addChannelzTraceEvent(addrs) + } ccr.cc.handleResolvedAddrs(addrs, nil) case sc := <-ccr.scCh: select { @@ -156,3 +161,29 @@ func (ccr *ccResolverWrapper) NewServiceConfig(sc string) { } ccr.scCh <- sc } + +func (ccr *ccResolverWrapper) addChannelzTraceEvent(addrs []resolver.Address) { + if len(addrs) == 0 && ccr.lastAddressesCount != 0 { + channelz.AddTraceEvent(ccr.cc.channelzID, &channelz.TraceEventDesc{ + Desc: "Resolver returns an empty address list", + Severity: channelz.CtWarning, + }) + } else if len(addrs) != 0 && ccr.lastAddressesCount == 0 { + var s string + for i, a := range addrs { + if a.ServerName != "" { + s += a.Addr + "(" + a.ServerName + ")" + } else { + s += a.Addr + } + if i != len(addrs)-1 { + s += " " + } + } + channelz.AddTraceEvent(ccr.cc.channelzID, &channelz.TraceEventDesc{ + Desc: fmt.Sprintf("Resolver returns a non-empty address list (previous one was empty) %q", s), + Severity: channelz.CtINFO, + }) + } + ccr.lastAddressesCount = len(addrs) +} diff --git a/vendor/google.golang.org/grpc/rpc_util.go b/vendor/google.golang.org/grpc/rpc_util.go index fa0568302..6849e37a5 100644 --- a/vendor/google.golang.org/grpc/rpc_util.go +++ b/vendor/google.golang.org/grpc/rpc_util.go @@ -531,7 +531,10 @@ func compress(in []byte, cp Compressor, compressor encoding.Compressor) ([]byte, } cbuf := &bytes.Buffer{} if compressor != nil { - z, _ := compressor.Compress(cbuf) + z, err := compressor.Compress(cbuf) + if err != nil { + return nil, wrapErr(err) + } if _, err := z.Write(in); err != nil { return nil, wrapErr(err) } @@ -595,20 +598,17 @@ func checkRecvPayload(pf payloadFormat, recvCompress string, haveCompressor bool return nil } -// For the two compressor parameters, both should not be set, but if they are, -// dc takes precedence over compressor. -// TODO(dfawley): wrap the old compressor/decompressor using the new API? -func recv(p *parser, c baseCodec, s *transport.Stream, dc Decompressor, m interface{}, maxReceiveMessageSize int, inPayload *stats.InPayload, compressor encoding.Compressor) error { +func recvAndDecompress(p *parser, s *transport.Stream, dc Decompressor, maxReceiveMessageSize int, inPayload *stats.InPayload, compressor encoding.Compressor) ([]byte, error) { pf, d, err := p.recvMsg(maxReceiveMessageSize) if err != nil { - return err + return nil, err } if inPayload != nil { inPayload.WireLength = len(d) } if st := checkRecvPayload(pf, s.RecvCompress(), compressor != nil || dc != nil); st != nil { - return st.Err() + return nil, st.Err() } if pf == compressionMade { @@ -617,23 +617,34 @@ func recv(p *parser, c baseCodec, s *transport.Stream, dc Decompressor, m interf if dc != nil { d, err = dc.Do(bytes.NewReader(d)) if err != nil { - return status.Errorf(codes.Internal, "grpc: failed to decompress the received message %v", err) + return nil, status.Errorf(codes.Internal, "grpc: failed to decompress the received message %v", err) } } else { dcReader, err := compressor.Decompress(bytes.NewReader(d)) if err != nil { - return status.Errorf(codes.Internal, "grpc: failed to decompress the received message %v", err) + return nil, status.Errorf(codes.Internal, "grpc: failed to decompress the received message %v", err) } d, err = ioutil.ReadAll(dcReader) if err != nil { - return status.Errorf(codes.Internal, "grpc: failed to decompress the received message %v", err) + return nil, status.Errorf(codes.Internal, "grpc: failed to decompress the received message %v", err) } } } if len(d) > maxReceiveMessageSize { // TODO: Revisit the error code. Currently keep it consistent with java // implementation. - return status.Errorf(codes.ResourceExhausted, "grpc: received message larger than max (%d vs. %d)", len(d), maxReceiveMessageSize) + return nil, status.Errorf(codes.ResourceExhausted, "grpc: received message larger than max (%d vs. %d)", len(d), maxReceiveMessageSize) + } + return d, nil +} + +// For the two compressor parameters, both should not be set, but if they are, +// dc takes precedence over compressor. +// TODO(dfawley): wrap the old compressor/decompressor using the new API? +func recv(p *parser, c baseCodec, s *transport.Stream, dc Decompressor, m interface{}, maxReceiveMessageSize int, inPayload *stats.InPayload, compressor encoding.Compressor) error { + d, err := recvAndDecompress(p, s, dc, maxReceiveMessageSize, inPayload, compressor) + if err != nil { + return err } if err := c.Unmarshal(d, m); err != nil { return status.Errorf(codes.Internal, "grpc: failed to unmarshal the received message %v", err) diff --git a/vendor/google.golang.org/grpc/server.go b/vendor/google.golang.org/grpc/server.go index 5c7d5b635..920da5e01 100644 --- a/vendor/google.golang.org/grpc/server.go +++ b/vendor/google.golang.org/grpc/server.go @@ -19,7 +19,6 @@ package grpc import ( - "bytes" "errors" "fmt" "io" @@ -33,8 +32,6 @@ import ( "sync/atomic" "time" - "io/ioutil" - "golang.org/x/net/context" "golang.org/x/net/trace" @@ -901,76 +898,32 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport. } } - p := &parser{r: stream} - pf, req, err := p.recvMsg(s.opts.maxReceiveMessageSize) - if err == io.EOF { - // The entire stream is done (for unary RPC only). - return err - } - if err == io.ErrUnexpectedEOF { - err = status.Errorf(codes.Internal, io.ErrUnexpectedEOF.Error()) + var inPayload *stats.InPayload + if sh != nil { + inPayload = &stats.InPayload{ + RecvTime: time.Now(), + } } + d, err := recvAndDecompress(&parser{r: stream}, stream, dc, s.opts.maxReceiveMessageSize, inPayload, decomp) if err != nil { if st, ok := status.FromError(err); ok { if e := t.WriteStatus(stream, st); e != nil { grpclog.Warningf("grpc: Server.processUnaryRPC failed to write status %v", e) } - } else { - switch st := err.(type) { - case transport.ConnectionError: - // Nothing to do here. - default: - panic(fmt.Sprintf("grpc: Unexpected error (%T) from recvMsg: %v", st, st)) - } } return err } if channelz.IsOn() { t.IncrMsgRecv() } - if st := checkRecvPayload(pf, stream.RecvCompress(), dc != nil || decomp != nil); st != nil { - if e := t.WriteStatus(stream, st); e != nil { - grpclog.Warningf("grpc: Server.processUnaryRPC failed to write status %v", e) - } - return st.Err() - } - var inPayload *stats.InPayload - if sh != nil { - inPayload = &stats.InPayload{ - RecvTime: time.Now(), - } - } df := func(v interface{}) error { - if inPayload != nil { - inPayload.WireLength = len(req) - } - if pf == compressionMade { - var err error - if dc != nil { - req, err = dc.Do(bytes.NewReader(req)) - if err != nil { - return status.Errorf(codes.Internal, err.Error()) - } - } else { - tmp, _ := decomp.Decompress(bytes.NewReader(req)) - req, err = ioutil.ReadAll(tmp) - if err != nil { - return status.Errorf(codes.Internal, "grpc: failed to decompress the received message %v", err) - } - } - } - if len(req) > s.opts.maxReceiveMessageSize { - // TODO: Revisit the error code. Currently keep it consistent with - // java implementation. - return status.Errorf(codes.ResourceExhausted, "grpc: received message larger than max (%d vs. %d)", len(req), s.opts.maxReceiveMessageSize) - } - if err := s.getCodec(stream.ContentSubtype()).Unmarshal(req, v); err != nil { + if err := s.getCodec(stream.ContentSubtype()).Unmarshal(d, v); err != nil { return status.Errorf(codes.Internal, "grpc: error unmarshalling request: %v", err) } if inPayload != nil { inPayload.Payload = v - inPayload.Data = req - inPayload.Length = len(req) + inPayload.Data = d + inPayload.Length = len(d) sh.HandleRPC(stream.Context(), inPayload) } if trInfo != nil { @@ -1180,47 +1133,27 @@ func (s *Server) handleStream(t transport.ServerTransport, stream *transport.Str } service := sm[:pos] method := sm[pos+1:] - srv, ok := s.m[service] - if !ok { - if unknownDesc := s.opts.unknownStreamDesc; unknownDesc != nil { - s.processStreamingRPC(t, stream, nil, unknownDesc, trInfo) + + if srv, ok := s.m[service]; ok { + if md, ok := srv.md[method]; ok { + s.processUnaryRPC(t, stream, srv, md, trInfo) return } - if trInfo != nil { - trInfo.tr.LazyLog(&fmtStringer{"Unknown service %v", []interface{}{service}}, true) - trInfo.tr.SetError() + if sd, ok := srv.sd[method]; ok { + s.processStreamingRPC(t, stream, srv, sd, trInfo) + return } - errDesc := fmt.Sprintf("unknown service %v", service) - if err := t.WriteStatus(stream, status.New(codes.Unimplemented, errDesc)); err != nil { - if trInfo != nil { - trInfo.tr.LazyLog(&fmtStringer{"%v", []interface{}{err}}, true) - trInfo.tr.SetError() - } - grpclog.Warningf("grpc: Server.handleStream failed to write status: %v", err) - } - if trInfo != nil { - trInfo.tr.Finish() - } - return - } - // Unary RPC or Streaming RPC? - if md, ok := srv.md[method]; ok { - s.processUnaryRPC(t, stream, srv, md, trInfo) - return - } - if sd, ok := srv.sd[method]; ok { - s.processStreamingRPC(t, stream, srv, sd, trInfo) - return - } - if trInfo != nil { - trInfo.tr.LazyLog(&fmtStringer{"Unknown method %v", []interface{}{method}}, true) - trInfo.tr.SetError() } + // Unknown service, or known server unknown method. if unknownDesc := s.opts.unknownStreamDesc; unknownDesc != nil { s.processStreamingRPC(t, stream, nil, unknownDesc, trInfo) return } - errDesc := fmt.Sprintf("unknown method %v", method) + if trInfo != nil { + trInfo.tr.LazyLog(&fmtStringer{"Unknown service %v", []interface{}{service}}, true) + trInfo.tr.SetError() + } + errDesc := fmt.Sprintf("unknown service %v", service) if err := t.WriteStatus(stream, status.New(codes.Unimplemented, errDesc)); err != nil { if trInfo != nil { trInfo.tr.LazyLog(&fmtStringer{"%v", []interface{}{err}}, true) diff --git a/vendor/google.golang.org/grpc/service_config.go b/vendor/google.golang.org/grpc/service_config.go index e0d735265..a305fe0a4 100644 --- a/vendor/google.golang.org/grpc/service_config.go +++ b/vendor/google.golang.org/grpc/service_config.go @@ -229,6 +229,9 @@ type jsonSC struct { } func parseServiceConfig(js string) (ServiceConfig, error) { + if len(js) == 0 { + return ServiceConfig{}, fmt.Errorf("no JSON service config provided") + } var rsc jsonSC err := json.Unmarshal([]byte(js), &rsc) if err != nil { diff --git a/vendor/google.golang.org/grpc/status/status.go b/vendor/google.golang.org/grpc/status/status.go index 9c61b0945..897321bab 100644 --- a/vendor/google.golang.org/grpc/status/status.go +++ b/vendor/google.golang.org/grpc/status/status.go @@ -126,7 +126,9 @@ func FromError(err error) (s *Status, ok bool) { if err == nil { return &Status{s: &spb.Status{Code: int32(codes.OK)}}, true } - if se, ok := err.(interface{ GRPCStatus() *Status }); ok { + if se, ok := err.(interface { + GRPCStatus() *Status + }); ok { return se.GRPCStatus(), true } return New(codes.Unknown, err.Error()), false @@ -182,7 +184,9 @@ func Code(err error) codes.Code { if err == nil { return codes.OK } - if se, ok := err.(interface{ GRPCStatus() *Status }); ok { + if se, ok := err.(interface { + GRPCStatus() *Status + }); ok { return se.GRPCStatus().Code() } return codes.Unknown diff --git a/vendor/google.golang.org/grpc/stream.go b/vendor/google.golang.org/grpc/stream.go index 65d45a1d9..b71eb3112 100644 --- a/vendor/google.golang.org/grpc/stream.go +++ b/vendor/google.golang.org/grpc/stream.go @@ -660,7 +660,14 @@ func (cs *clientStream) CloseSend() error { return nil } cs.sentLast = true - op := func(a *csAttempt) error { return a.t.Write(a.s, nil, nil, &transport.Options{Last: true}) } + op := func(a *csAttempt) error { + a.t.Write(a.s, nil, nil, &transport.Options{Last: true}) + // Always return nil; io.EOF is the only error that might make sense + // instead, but there is no need to signal the client to call RecvMsg + // as the only use left for the stream after CloseSend is to call + // RecvMsg. This also matches historical behavior. + return nil + } cs.withRetry(op, func() { cs.bufferForRetryLocked(0, op) }) // We never returned an error here for reasons. return nil @@ -809,11 +816,14 @@ func (a *csAttempt) finish(err error) { if a.done != nil { br := false + var tr metadata.MD if a.s != nil { br = a.s.BytesReceived() + tr = a.s.Trailer() } a.done(balancer.DoneInfo{ Err: err, + Trailer: tr, BytesSent: a.s != nil, BytesReceived: br, }) diff --git a/vendor/google.golang.org/grpc/version.go b/vendor/google.golang.org/grpc/version.go index 8ee619bf6..d8e0287ef 100644 --- a/vendor/google.golang.org/grpc/version.go +++ b/vendor/google.golang.org/grpc/version.go @@ -19,4 +19,4 @@ package grpc // Version is the current grpc version. -const Version = "1.15.0" +const Version = "1.16.0" diff --git a/vendor/google.golang.org/grpc/vet.sh b/vendor/google.golang.org/grpc/vet.sh index f42c034f7..eb3287036 100755 --- a/vendor/google.golang.org/grpc/vet.sh +++ b/vendor/google.golang.org/grpc/vet.sh @@ -71,7 +71,7 @@ fi git ls-files "*.go" | xargs grep -L "\(Copyright [0-9]\{4,\} gRPC authors\)\|DO NOT EDIT" 2>&1 | tee /dev/stderr | (! read) git ls-files "*.go" | xargs grep -l '"math/rand"' 2>&1 | (! grep -v '^examples\|^stress\|grpcrand') | tee /dev/stderr | (! read) -git ls-files | xargs dirname | sort | uniq | xargs go run go_vet/vet.go | tee /dev/stderr | (! read) +git ls-files | xargs dirname | sort | uniq | xargs go run test/go_vet/vet.go | tee /dev/stderr | (! read) gofmt -s -d -l . 2>&1 | tee /dev/stderr | (! read) goimports -l . 2>&1 | tee /dev/stderr | (! read) golint ./... 2>&1 | (grep -vE "(_mock|\.pb)\.go:" || true) | tee /dev/stderr | (! read) @@ -80,7 +80,14 @@ golint ./... 2>&1 | (grep -vE "(_mock|\.pb)\.go:" || true) | tee /dev/stderr | ( # TODO: Remove this mangling once "context" is imported directly (grpc/grpc-go#711). git ls-files "*.go" | xargs sed -i 's:"golang.org/x/net/context":"context":' set +o pipefail # vet exits with non-zero error if issues are found -go tool vet -all . 2>&1 | grep -vE 'clientconn.go:.*cancel (function|var)' | tee /dev/stderr | (! read) + +# TODO(deklerk) remove when we drop Go 1.6 support +go tool vet -all . 2>&1 | \ + grep -vE 'clientconn.go:.*cancel (function|var)' | \ + grep -vE '.*transport_test.go:.*cancel' | \ + tee /dev/stderr | \ + (! read) + set -o pipefail git reset --hard HEAD diff --git a/vendor/gopkg.in/square/go-jose.v2/.gitcookies.sh.enc b/vendor/gopkg.in/square/go-jose.v2/.gitcookies.sh.enc deleted file mode 100644 index 730e569b0..000000000 --- a/vendor/gopkg.in/square/go-jose.v2/.gitcookies.sh.enc +++ /dev/null @@ -1 +0,0 @@ -'|&{tU|gG(Cy=+c:u:/p#~["4!nADK merged.coverprofile -- $HOME/gopath/bin/goveralls -coverprofile merged.coverprofile -service=travis-ci - diff --git a/vendor/gopkg.in/square/go-jose.v2/BUG-BOUNTY.md b/vendor/gopkg.in/square/go-jose.v2/BUG-BOUNTY.md deleted file mode 100644 index 3305db0f6..000000000 --- a/vendor/gopkg.in/square/go-jose.v2/BUG-BOUNTY.md +++ /dev/null @@ -1,10 +0,0 @@ -Serious about security -====================== - -Square recognizes the important contributions the security research community -can make. We therefore encourage reporting security issues with the code -contained in this repository. - -If you believe you have discovered a security vulnerability, please follow the -guidelines at . - diff --git a/vendor/gopkg.in/square/go-jose.v2/CONTRIBUTING.md b/vendor/gopkg.in/square/go-jose.v2/CONTRIBUTING.md deleted file mode 100644 index 61b183651..000000000 --- a/vendor/gopkg.in/square/go-jose.v2/CONTRIBUTING.md +++ /dev/null @@ -1,14 +0,0 @@ -# Contributing - -If you would like to contribute code to go-jose you can do so through GitHub by -forking the repository and sending a pull request. - -When submitting code, please make every effort to follow existing conventions -and style in order to keep the code as readable as possible. Please also make -sure all tests pass by running `go test`, and format your code with `go fmt`. -We also recommend using `golint` and `errcheck`. - -Before your code can be accepted into the project you must also sign the -[Individual Contributor License Agreement][1]. - - [1]: https://spreadsheets.google.com/spreadsheet/viewform?formkey=dDViT2xzUHAwRkI3X3k5Z0lQM091OGc6MQ&ndplr=1 diff --git a/vendor/gopkg.in/square/go-jose.v2/README.md b/vendor/gopkg.in/square/go-jose.v2/README.md deleted file mode 100644 index 1791bfa8f..000000000 --- a/vendor/gopkg.in/square/go-jose.v2/README.md +++ /dev/null @@ -1,118 +0,0 @@ -# Go JOSE - -[![godoc](http://img.shields.io/badge/godoc-version_1-blue.svg?style=flat)](https://godoc.org/gopkg.in/square/go-jose.v1) -[![godoc](http://img.shields.io/badge/godoc-version_2-blue.svg?style=flat)](https://godoc.org/gopkg.in/square/go-jose.v2) -[![license](http://img.shields.io/badge/license-apache_2.0-blue.svg?style=flat)](https://raw.githubusercontent.com/square/go-jose/master/LICENSE) -[![build](https://travis-ci.org/square/go-jose.svg?branch=v2)](https://travis-ci.org/square/go-jose) -[![coverage](https://coveralls.io/repos/github/square/go-jose/badge.svg?branch=v2)](https://coveralls.io/r/square/go-jose) - -Package jose aims to provide an implementation of the Javascript Object Signing -and Encryption set of standards. This includes support for JSON Web Encryption, -JSON Web Signature, and JSON Web Token standards. - -**Disclaimer**: This library contains encryption software that is subject to -the U.S. Export Administration Regulations. You may not export, re-export, -transfer or download this code or any part of it in violation of any United -States law, directive or regulation. In particular this software may not be -exported or re-exported in any form or on any media to Iran, North Sudan, -Syria, Cuba, or North Korea, or to denied persons or entities mentioned on any -US maintained blocked list. - -## Overview - -The implementation follows the -[JSON Web Encryption](http://dx.doi.org/10.17487/RFC7516) (RFC 7516), -[JSON Web Signature](http://dx.doi.org/10.17487/RFC7515) (RFC 7515), and -[JSON Web Token](http://dx.doi.org/10.17487/RFC7519) (RFC 7519). -Tables of supported algorithms are shown below. The library supports both -the compact and full serialization formats, and has optional support for -multiple recipients. It also comes with a small command-line utility -([`jose-util`](https://github.com/square/go-jose/tree/v2/jose-util)) -for dealing with JOSE messages in a shell. - -**Note**: We use a forked version of the `encoding/json` package from the Go -standard library which uses case-sensitive matching for member names (instead -of [case-insensitive matching](https://www.ietf.org/mail-archive/web/json/current/msg03763.html)). -This is to avoid differences in interpretation of messages between go-jose and -libraries in other languages. - -### Versions - -We use [gopkg.in](https://gopkg.in) for versioning. - -[Version 2](https://gopkg.in/square/go-jose.v2) -([branch](https://github.com/square/go-jose/tree/v2), -[doc](https://godoc.org/gopkg.in/square/go-jose.v2)) is the current version: - - import "gopkg.in/square/go-jose.v2" - -The old `v1` branch ([go-jose.v1](https://gopkg.in/square/go-jose.v1)) will -still receive backported bug fixes and security fixes, but otherwise -development is frozen. All new feature development takes place on the `v2` -branch. Version 2 also contains additional sub-packages such as the -[jwt](https://godoc.org/gopkg.in/square/go-jose.v2/jwt) implementation -contributed by [@shaxbee](https://github.com/shaxbee). - -### Supported algorithms - -See below for a table of supported algorithms. Algorithm identifiers match -the names in the [JSON Web Algorithms](http://dx.doi.org/10.17487/RFC7518) -standard where possible. The Godoc reference has a list of constants. - - Key encryption | Algorithm identifier(s) - :------------------------- | :------------------------------ - RSA-PKCS#1v1.5 | RSA1_5 - RSA-OAEP | RSA-OAEP, RSA-OAEP-256 - AES key wrap | A128KW, A192KW, A256KW - AES-GCM key wrap | A128GCMKW, A192GCMKW, A256GCMKW - ECDH-ES + AES key wrap | ECDH-ES+A128KW, ECDH-ES+A192KW, ECDH-ES+A256KW - ECDH-ES (direct) | ECDH-ES1 - Direct encryption | dir1 - -1. Not supported in multi-recipient mode - - Signing / MAC | Algorithm identifier(s) - :------------------------- | :------------------------------ - RSASSA-PKCS#1v1.5 | RS256, RS384, RS512 - RSASSA-PSS | PS256, PS384, PS512 - HMAC | HS256, HS384, HS512 - ECDSA | ES256, ES384, ES512 - Ed25519 | EdDSA2 - -2. Only available in version 2 of the package - - Content encryption | Algorithm identifier(s) - :------------------------- | :------------------------------ - AES-CBC+HMAC | A128CBC-HS256, A192CBC-HS384, A256CBC-HS512 - AES-GCM | A128GCM, A192GCM, A256GCM - - Compression | Algorithm identifiers(s) - :------------------------- | ------------------------------- - DEFLATE (RFC 1951) | DEF - -### Supported key types - -See below for a table of supported key types. These are understood by the -library, and can be passed to corresponding functions such as `NewEncrypter` or -`NewSigner`. Each of these keys can also be wrapped in a JWK if desired, which -allows attaching a key id. - - Algorithm(s) | Corresponding types - :------------------------- | ------------------------------- - RSA | *[rsa.PublicKey](http://golang.org/pkg/crypto/rsa/#PublicKey), *[rsa.PrivateKey](http://golang.org/pkg/crypto/rsa/#PrivateKey) - ECDH, ECDSA | *[ecdsa.PublicKey](http://golang.org/pkg/crypto/ecdsa/#PublicKey), *[ecdsa.PrivateKey](http://golang.org/pkg/crypto/ecdsa/#PrivateKey) - EdDSA1 | [ed25519.PublicKey](https://godoc.org/golang.org/x/crypto/ed25519#PublicKey), [ed25519.PrivateKey](https://godoc.org/golang.org/x/crypto/ed25519#PrivateKey) - AES, HMAC | []byte - -1. Only available in version 2 of the package - -## Examples - -[![godoc](http://img.shields.io/badge/godoc-version_1-blue.svg?style=flat)](https://godoc.org/gopkg.in/square/go-jose.v1) -[![godoc](http://img.shields.io/badge/godoc-version_2-blue.svg?style=flat)](https://godoc.org/gopkg.in/square/go-jose.v2) - -Examples can be found in the Godoc -reference for this package. The -[`jose-util`](https://github.com/square/go-jose/tree/v2/jose-util) -subdirectory also contains a small command-line utility which might be useful -as an example. diff --git a/vendor/gopkg.in/square/go-jose.v2/asymmetric.go b/vendor/gopkg.in/square/go-jose.v2/asymmetric.go deleted file mode 100644 index 5272648fa..000000000 --- a/vendor/gopkg.in/square/go-jose.v2/asymmetric.go +++ /dev/null @@ -1,592 +0,0 @@ -/*- - * Copyright 2014 Square Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package jose - -import ( - "crypto" - "crypto/aes" - "crypto/ecdsa" - "crypto/rand" - "crypto/rsa" - "crypto/sha1" - "crypto/sha256" - "errors" - "fmt" - "math/big" - - "golang.org/x/crypto/ed25519" - "gopkg.in/square/go-jose.v2/cipher" - "gopkg.in/square/go-jose.v2/json" -) - -// A generic RSA-based encrypter/verifier -type rsaEncrypterVerifier struct { - publicKey *rsa.PublicKey -} - -// A generic RSA-based decrypter/signer -type rsaDecrypterSigner struct { - privateKey *rsa.PrivateKey -} - -// A generic EC-based encrypter/verifier -type ecEncrypterVerifier struct { - publicKey *ecdsa.PublicKey -} - -type edEncrypterVerifier struct { - publicKey ed25519.PublicKey -} - -// A key generator for ECDH-ES -type ecKeyGenerator struct { - size int - algID string - publicKey *ecdsa.PublicKey -} - -// A generic EC-based decrypter/signer -type ecDecrypterSigner struct { - privateKey *ecdsa.PrivateKey -} - -type edDecrypterSigner struct { - privateKey ed25519.PrivateKey -} - -// newRSARecipient creates recipientKeyInfo based on the given key. -func newRSARecipient(keyAlg KeyAlgorithm, publicKey *rsa.PublicKey) (recipientKeyInfo, error) { - // Verify that key management algorithm is supported by this encrypter - switch keyAlg { - case RSA1_5, RSA_OAEP, RSA_OAEP_256: - default: - return recipientKeyInfo{}, ErrUnsupportedAlgorithm - } - - if publicKey == nil { - return recipientKeyInfo{}, errors.New("invalid public key") - } - - return recipientKeyInfo{ - keyAlg: keyAlg, - keyEncrypter: &rsaEncrypterVerifier{ - publicKey: publicKey, - }, - }, nil -} - -// newRSASigner creates a recipientSigInfo based on the given key. -func newRSASigner(sigAlg SignatureAlgorithm, privateKey *rsa.PrivateKey) (recipientSigInfo, error) { - // Verify that key management algorithm is supported by this encrypter - switch sigAlg { - case RS256, RS384, RS512, PS256, PS384, PS512: - default: - return recipientSigInfo{}, ErrUnsupportedAlgorithm - } - - if privateKey == nil { - return recipientSigInfo{}, errors.New("invalid private key") - } - - return recipientSigInfo{ - sigAlg: sigAlg, - publicKey: staticPublicKey(&JSONWebKey{ - Key: privateKey.Public(), - }), - signer: &rsaDecrypterSigner{ - privateKey: privateKey, - }, - }, nil -} - -func newEd25519Signer(sigAlg SignatureAlgorithm, privateKey ed25519.PrivateKey) (recipientSigInfo, error) { - if sigAlg != EdDSA { - return recipientSigInfo{}, ErrUnsupportedAlgorithm - } - - if privateKey == nil { - return recipientSigInfo{}, errors.New("invalid private key") - } - return recipientSigInfo{ - sigAlg: sigAlg, - publicKey: staticPublicKey(&JSONWebKey{ - Key: privateKey.Public(), - }), - signer: &edDecrypterSigner{ - privateKey: privateKey, - }, - }, nil -} - -// newECDHRecipient creates recipientKeyInfo based on the given key. -func newECDHRecipient(keyAlg KeyAlgorithm, publicKey *ecdsa.PublicKey) (recipientKeyInfo, error) { - // Verify that key management algorithm is supported by this encrypter - switch keyAlg { - case ECDH_ES, ECDH_ES_A128KW, ECDH_ES_A192KW, ECDH_ES_A256KW: - default: - return recipientKeyInfo{}, ErrUnsupportedAlgorithm - } - - if publicKey == nil || !publicKey.Curve.IsOnCurve(publicKey.X, publicKey.Y) { - return recipientKeyInfo{}, errors.New("invalid public key") - } - - return recipientKeyInfo{ - keyAlg: keyAlg, - keyEncrypter: &ecEncrypterVerifier{ - publicKey: publicKey, - }, - }, nil -} - -// newECDSASigner creates a recipientSigInfo based on the given key. -func newECDSASigner(sigAlg SignatureAlgorithm, privateKey *ecdsa.PrivateKey) (recipientSigInfo, error) { - // Verify that key management algorithm is supported by this encrypter - switch sigAlg { - case ES256, ES384, ES512: - default: - return recipientSigInfo{}, ErrUnsupportedAlgorithm - } - - if privateKey == nil { - return recipientSigInfo{}, errors.New("invalid private key") - } - - return recipientSigInfo{ - sigAlg: sigAlg, - publicKey: staticPublicKey(&JSONWebKey{ - Key: privateKey.Public(), - }), - signer: &ecDecrypterSigner{ - privateKey: privateKey, - }, - }, nil -} - -// Encrypt the given payload and update the object. -func (ctx rsaEncrypterVerifier) encryptKey(cek []byte, alg KeyAlgorithm) (recipientInfo, error) { - encryptedKey, err := ctx.encrypt(cek, alg) - if err != nil { - return recipientInfo{}, err - } - - return recipientInfo{ - encryptedKey: encryptedKey, - header: &rawHeader{}, - }, nil -} - -// Encrypt the given payload. Based on the key encryption algorithm, -// this will either use RSA-PKCS1v1.5 or RSA-OAEP (with SHA-1 or SHA-256). -func (ctx rsaEncrypterVerifier) encrypt(cek []byte, alg KeyAlgorithm) ([]byte, error) { - switch alg { - case RSA1_5: - return rsa.EncryptPKCS1v15(randReader, ctx.publicKey, cek) - case RSA_OAEP: - return rsa.EncryptOAEP(sha1.New(), randReader, ctx.publicKey, cek, []byte{}) - case RSA_OAEP_256: - return rsa.EncryptOAEP(sha256.New(), randReader, ctx.publicKey, cek, []byte{}) - } - - return nil, ErrUnsupportedAlgorithm -} - -// Decrypt the given payload and return the content encryption key. -func (ctx rsaDecrypterSigner) decryptKey(headers rawHeader, recipient *recipientInfo, generator keyGenerator) ([]byte, error) { - return ctx.decrypt(recipient.encryptedKey, headers.getAlgorithm(), generator) -} - -// Decrypt the given payload. Based on the key encryption algorithm, -// this will either use RSA-PKCS1v1.5 or RSA-OAEP (with SHA-1 or SHA-256). -func (ctx rsaDecrypterSigner) decrypt(jek []byte, alg KeyAlgorithm, generator keyGenerator) ([]byte, error) { - // Note: The random reader on decrypt operations is only used for blinding, - // so stubbing is meanlingless (hence the direct use of rand.Reader). - switch alg { - case RSA1_5: - defer func() { - // DecryptPKCS1v15SessionKey sometimes panics on an invalid payload - // because of an index out of bounds error, which we want to ignore. - // This has been fixed in Go 1.3.1 (released 2014/08/13), the recover() - // only exists for preventing crashes with unpatched versions. - // See: https://groups.google.com/forum/#!topic/golang-dev/7ihX6Y6kx9k - // See: https://code.google.com/p/go/source/detail?r=58ee390ff31602edb66af41ed10901ec95904d33 - _ = recover() - }() - - // Perform some input validation. - keyBytes := ctx.privateKey.PublicKey.N.BitLen() / 8 - if keyBytes != len(jek) { - // Input size is incorrect, the encrypted payload should always match - // the size of the public modulus (e.g. using a 2048 bit key will - // produce 256 bytes of output). Reject this since it's invalid input. - return nil, ErrCryptoFailure - } - - cek, _, err := generator.genKey() - if err != nil { - return nil, ErrCryptoFailure - } - - // When decrypting an RSA-PKCS1v1.5 payload, we must take precautions to - // prevent chosen-ciphertext attacks as described in RFC 3218, "Preventing - // the Million Message Attack on Cryptographic Message Syntax". We are - // therefore deliberately ignoring errors here. - _ = rsa.DecryptPKCS1v15SessionKey(rand.Reader, ctx.privateKey, jek, cek) - - return cek, nil - case RSA_OAEP: - // Use rand.Reader for RSA blinding - return rsa.DecryptOAEP(sha1.New(), rand.Reader, ctx.privateKey, jek, []byte{}) - case RSA_OAEP_256: - // Use rand.Reader for RSA blinding - return rsa.DecryptOAEP(sha256.New(), rand.Reader, ctx.privateKey, jek, []byte{}) - } - - return nil, ErrUnsupportedAlgorithm -} - -// Sign the given payload -func (ctx rsaDecrypterSigner) signPayload(payload []byte, alg SignatureAlgorithm) (Signature, error) { - var hash crypto.Hash - - switch alg { - case RS256, PS256: - hash = crypto.SHA256 - case RS384, PS384: - hash = crypto.SHA384 - case RS512, PS512: - hash = crypto.SHA512 - default: - return Signature{}, ErrUnsupportedAlgorithm - } - - hasher := hash.New() - - // According to documentation, Write() on hash never fails - _, _ = hasher.Write(payload) - hashed := hasher.Sum(nil) - - var out []byte - var err error - - switch alg { - case RS256, RS384, RS512: - out, err = rsa.SignPKCS1v15(randReader, ctx.privateKey, hash, hashed) - case PS256, PS384, PS512: - out, err = rsa.SignPSS(randReader, ctx.privateKey, hash, hashed, &rsa.PSSOptions{ - SaltLength: rsa.PSSSaltLengthAuto, - }) - } - - if err != nil { - return Signature{}, err - } - - return Signature{ - Signature: out, - protected: &rawHeader{}, - }, nil -} - -// Verify the given payload -func (ctx rsaEncrypterVerifier) verifyPayload(payload []byte, signature []byte, alg SignatureAlgorithm) error { - var hash crypto.Hash - - switch alg { - case RS256, PS256: - hash = crypto.SHA256 - case RS384, PS384: - hash = crypto.SHA384 - case RS512, PS512: - hash = crypto.SHA512 - default: - return ErrUnsupportedAlgorithm - } - - hasher := hash.New() - - // According to documentation, Write() on hash never fails - _, _ = hasher.Write(payload) - hashed := hasher.Sum(nil) - - switch alg { - case RS256, RS384, RS512: - return rsa.VerifyPKCS1v15(ctx.publicKey, hash, hashed, signature) - case PS256, PS384, PS512: - return rsa.VerifyPSS(ctx.publicKey, hash, hashed, signature, nil) - } - - return ErrUnsupportedAlgorithm -} - -// Encrypt the given payload and update the object. -func (ctx ecEncrypterVerifier) encryptKey(cek []byte, alg KeyAlgorithm) (recipientInfo, error) { - switch alg { - case ECDH_ES: - // ECDH-ES mode doesn't wrap a key, the shared secret is used directly as the key. - return recipientInfo{ - header: &rawHeader{}, - }, nil - case ECDH_ES_A128KW, ECDH_ES_A192KW, ECDH_ES_A256KW: - default: - return recipientInfo{}, ErrUnsupportedAlgorithm - } - - generator := ecKeyGenerator{ - algID: string(alg), - publicKey: ctx.publicKey, - } - - switch alg { - case ECDH_ES_A128KW: - generator.size = 16 - case ECDH_ES_A192KW: - generator.size = 24 - case ECDH_ES_A256KW: - generator.size = 32 - } - - kek, header, err := generator.genKey() - if err != nil { - return recipientInfo{}, err - } - - block, err := aes.NewCipher(kek) - if err != nil { - return recipientInfo{}, err - } - - jek, err := josecipher.KeyWrap(block, cek) - if err != nil { - return recipientInfo{}, err - } - - return recipientInfo{ - encryptedKey: jek, - header: &header, - }, nil -} - -// Get key size for EC key generator -func (ctx ecKeyGenerator) keySize() int { - return ctx.size -} - -// Get a content encryption key for ECDH-ES -func (ctx ecKeyGenerator) genKey() ([]byte, rawHeader, error) { - priv, err := ecdsa.GenerateKey(ctx.publicKey.Curve, randReader) - if err != nil { - return nil, rawHeader{}, err - } - - out := josecipher.DeriveECDHES(ctx.algID, []byte{}, []byte{}, priv, ctx.publicKey, ctx.size) - - b, err := json.Marshal(&JSONWebKey{ - Key: &priv.PublicKey, - }) - if err != nil { - return nil, nil, err - } - - headers := rawHeader{ - headerEPK: makeRawMessage(b), - } - - return out, headers, nil -} - -// Decrypt the given payload and return the content encryption key. -func (ctx ecDecrypterSigner) decryptKey(headers rawHeader, recipient *recipientInfo, generator keyGenerator) ([]byte, error) { - epk, err := headers.getEPK() - if err != nil { - return nil, errors.New("square/go-jose: invalid epk header") - } - if epk == nil { - return nil, errors.New("square/go-jose: missing epk header") - } - - publicKey, ok := epk.Key.(*ecdsa.PublicKey) - if publicKey == nil || !ok { - return nil, errors.New("square/go-jose: invalid epk header") - } - - if !ctx.privateKey.Curve.IsOnCurve(publicKey.X, publicKey.Y) { - return nil, errors.New("square/go-jose: invalid public key in epk header") - } - - apuData, err := headers.getAPU() - if err != nil { - return nil, errors.New("square/go-jose: invalid apu header") - } - apvData, err := headers.getAPV() - if err != nil { - return nil, errors.New("square/go-jose: invalid apv header") - } - - deriveKey := func(algID string, size int) []byte { - return josecipher.DeriveECDHES(algID, apuData.bytes(), apvData.bytes(), ctx.privateKey, publicKey, size) - } - - var keySize int - - algorithm := headers.getAlgorithm() - switch algorithm { - case ECDH_ES: - // ECDH-ES uses direct key agreement, no key unwrapping necessary. - return deriveKey(string(headers.getEncryption()), generator.keySize()), nil - case ECDH_ES_A128KW: - keySize = 16 - case ECDH_ES_A192KW: - keySize = 24 - case ECDH_ES_A256KW: - keySize = 32 - default: - return nil, ErrUnsupportedAlgorithm - } - - key := deriveKey(string(algorithm), keySize) - block, err := aes.NewCipher(key) - if err != nil { - return nil, err - } - - return josecipher.KeyUnwrap(block, recipient.encryptedKey) -} - -func (ctx edDecrypterSigner) signPayload(payload []byte, alg SignatureAlgorithm) (Signature, error) { - if alg != EdDSA { - return Signature{}, ErrUnsupportedAlgorithm - } - - sig, err := ctx.privateKey.Sign(randReader, payload, crypto.Hash(0)) - if err != nil { - return Signature{}, err - } - - return Signature{ - Signature: sig, - protected: &rawHeader{}, - }, nil -} - -func (ctx edEncrypterVerifier) verifyPayload(payload []byte, signature []byte, alg SignatureAlgorithm) error { - if alg != EdDSA { - return ErrUnsupportedAlgorithm - } - ok := ed25519.Verify(ctx.publicKey, payload, signature) - if !ok { - return errors.New("square/go-jose: ed25519 signature failed to verify") - } - return nil -} - -// Sign the given payload -func (ctx ecDecrypterSigner) signPayload(payload []byte, alg SignatureAlgorithm) (Signature, error) { - var expectedBitSize int - var hash crypto.Hash - - switch alg { - case ES256: - expectedBitSize = 256 - hash = crypto.SHA256 - case ES384: - expectedBitSize = 384 - hash = crypto.SHA384 - case ES512: - expectedBitSize = 521 - hash = crypto.SHA512 - } - - curveBits := ctx.privateKey.Curve.Params().BitSize - if expectedBitSize != curveBits { - return Signature{}, fmt.Errorf("square/go-jose: expected %d bit key, got %d bits instead", expectedBitSize, curveBits) - } - - hasher := hash.New() - - // According to documentation, Write() on hash never fails - _, _ = hasher.Write(payload) - hashed := hasher.Sum(nil) - - r, s, err := ecdsa.Sign(randReader, ctx.privateKey, hashed) - if err != nil { - return Signature{}, err - } - - keyBytes := curveBits / 8 - if curveBits%8 > 0 { - keyBytes++ - } - - // We serialize the outputs (r and s) into big-endian byte arrays and pad - // them with zeros on the left to make sure the sizes work out. Both arrays - // must be keyBytes long, and the output must be 2*keyBytes long. - rBytes := r.Bytes() - rBytesPadded := make([]byte, keyBytes) - copy(rBytesPadded[keyBytes-len(rBytes):], rBytes) - - sBytes := s.Bytes() - sBytesPadded := make([]byte, keyBytes) - copy(sBytesPadded[keyBytes-len(sBytes):], sBytes) - - out := append(rBytesPadded, sBytesPadded...) - - return Signature{ - Signature: out, - protected: &rawHeader{}, - }, nil -} - -// Verify the given payload -func (ctx ecEncrypterVerifier) verifyPayload(payload []byte, signature []byte, alg SignatureAlgorithm) error { - var keySize int - var hash crypto.Hash - - switch alg { - case ES256: - keySize = 32 - hash = crypto.SHA256 - case ES384: - keySize = 48 - hash = crypto.SHA384 - case ES512: - keySize = 66 - hash = crypto.SHA512 - default: - return ErrUnsupportedAlgorithm - } - - if len(signature) != 2*keySize { - return fmt.Errorf("square/go-jose: invalid signature size, have %d bytes, wanted %d", len(signature), 2*keySize) - } - - hasher := hash.New() - - // According to documentation, Write() on hash never fails - _, _ = hasher.Write(payload) - hashed := hasher.Sum(nil) - - r := big.NewInt(0).SetBytes(signature[:keySize]) - s := big.NewInt(0).SetBytes(signature[keySize:]) - - match := ecdsa.Verify(ctx.publicKey, hashed, r, s) - if !match { - return errors.New("square/go-jose: ecdsa signature failed to verify") - } - - return nil -} diff --git a/vendor/gopkg.in/square/go-jose.v2/cipher/cbc_hmac.go b/vendor/gopkg.in/square/go-jose.v2/cipher/cbc_hmac.go deleted file mode 100644 index 126b85ce2..000000000 --- a/vendor/gopkg.in/square/go-jose.v2/cipher/cbc_hmac.go +++ /dev/null @@ -1,196 +0,0 @@ -/*- - * Copyright 2014 Square Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package josecipher - -import ( - "bytes" - "crypto/cipher" - "crypto/hmac" - "crypto/sha256" - "crypto/sha512" - "crypto/subtle" - "encoding/binary" - "errors" - "hash" -) - -const ( - nonceBytes = 16 -) - -// NewCBCHMAC instantiates a new AEAD based on CBC+HMAC. -func NewCBCHMAC(key []byte, newBlockCipher func([]byte) (cipher.Block, error)) (cipher.AEAD, error) { - keySize := len(key) / 2 - integrityKey := key[:keySize] - encryptionKey := key[keySize:] - - blockCipher, err := newBlockCipher(encryptionKey) - if err != nil { - return nil, err - } - - var hash func() hash.Hash - switch keySize { - case 16: - hash = sha256.New - case 24: - hash = sha512.New384 - case 32: - hash = sha512.New - } - - return &cbcAEAD{ - hash: hash, - blockCipher: blockCipher, - authtagBytes: keySize, - integrityKey: integrityKey, - }, nil -} - -// An AEAD based on CBC+HMAC -type cbcAEAD struct { - hash func() hash.Hash - authtagBytes int - integrityKey []byte - blockCipher cipher.Block -} - -func (ctx *cbcAEAD) NonceSize() int { - return nonceBytes -} - -func (ctx *cbcAEAD) Overhead() int { - // Maximum overhead is block size (for padding) plus auth tag length, where - // the length of the auth tag is equivalent to the key size. - return ctx.blockCipher.BlockSize() + ctx.authtagBytes -} - -// Seal encrypts and authenticates the plaintext. -func (ctx *cbcAEAD) Seal(dst, nonce, plaintext, data []byte) []byte { - // Output buffer -- must take care not to mangle plaintext input. - ciphertext := make([]byte, uint64(len(plaintext))+uint64(ctx.Overhead()))[:len(plaintext)] - copy(ciphertext, plaintext) - ciphertext = padBuffer(ciphertext, ctx.blockCipher.BlockSize()) - - cbc := cipher.NewCBCEncrypter(ctx.blockCipher, nonce) - - cbc.CryptBlocks(ciphertext, ciphertext) - authtag := ctx.computeAuthTag(data, nonce, ciphertext) - - ret, out := resize(dst, uint64(len(dst))+uint64(len(ciphertext))+uint64(len(authtag))) - copy(out, ciphertext) - copy(out[len(ciphertext):], authtag) - - return ret -} - -// Open decrypts and authenticates the ciphertext. -func (ctx *cbcAEAD) Open(dst, nonce, ciphertext, data []byte) ([]byte, error) { - if len(ciphertext) < ctx.authtagBytes { - return nil, errors.New("square/go-jose: invalid ciphertext (too short)") - } - - offset := len(ciphertext) - ctx.authtagBytes - expectedTag := ctx.computeAuthTag(data, nonce, ciphertext[:offset]) - match := subtle.ConstantTimeCompare(expectedTag, ciphertext[offset:]) - if match != 1 { - return nil, errors.New("square/go-jose: invalid ciphertext (auth tag mismatch)") - } - - cbc := cipher.NewCBCDecrypter(ctx.blockCipher, nonce) - - // Make copy of ciphertext buffer, don't want to modify in place - buffer := append([]byte{}, []byte(ciphertext[:offset])...) - - if len(buffer)%ctx.blockCipher.BlockSize() > 0 { - return nil, errors.New("square/go-jose: invalid ciphertext (invalid length)") - } - - cbc.CryptBlocks(buffer, buffer) - - // Remove padding - plaintext, err := unpadBuffer(buffer, ctx.blockCipher.BlockSize()) - if err != nil { - return nil, err - } - - ret, out := resize(dst, uint64(len(dst))+uint64(len(plaintext))) - copy(out, plaintext) - - return ret, nil -} - -// Compute an authentication tag -func (ctx *cbcAEAD) computeAuthTag(aad, nonce, ciphertext []byte) []byte { - buffer := make([]byte, uint64(len(aad))+uint64(len(nonce))+uint64(len(ciphertext))+8) - n := 0 - n += copy(buffer, aad) - n += copy(buffer[n:], nonce) - n += copy(buffer[n:], ciphertext) - binary.BigEndian.PutUint64(buffer[n:], uint64(len(aad))*8) - - // According to documentation, Write() on hash.Hash never fails. - hmac := hmac.New(ctx.hash, ctx.integrityKey) - _, _ = hmac.Write(buffer) - - return hmac.Sum(nil)[:ctx.authtagBytes] -} - -// resize ensures the the given slice has a capacity of at least n bytes. -// If the capacity of the slice is less than n, a new slice is allocated -// and the existing data will be copied. -func resize(in []byte, n uint64) (head, tail []byte) { - if uint64(cap(in)) >= n { - head = in[:n] - } else { - head = make([]byte, n) - copy(head, in) - } - - tail = head[len(in):] - return -} - -// Apply padding -func padBuffer(buffer []byte, blockSize int) []byte { - missing := blockSize - (len(buffer) % blockSize) - ret, out := resize(buffer, uint64(len(buffer))+uint64(missing)) - padding := bytes.Repeat([]byte{byte(missing)}, missing) - copy(out, padding) - return ret -} - -// Remove padding -func unpadBuffer(buffer []byte, blockSize int) ([]byte, error) { - if len(buffer)%blockSize != 0 { - return nil, errors.New("square/go-jose: invalid padding") - } - - last := buffer[len(buffer)-1] - count := int(last) - - if count == 0 || count > blockSize || count > len(buffer) { - return nil, errors.New("square/go-jose: invalid padding") - } - - padding := bytes.Repeat([]byte{last}, count) - if !bytes.HasSuffix(buffer, padding) { - return nil, errors.New("square/go-jose: invalid padding") - } - - return buffer[:len(buffer)-count], nil -} diff --git a/vendor/gopkg.in/square/go-jose.v2/cipher/concat_kdf.go b/vendor/gopkg.in/square/go-jose.v2/cipher/concat_kdf.go deleted file mode 100644 index f62c3bdba..000000000 --- a/vendor/gopkg.in/square/go-jose.v2/cipher/concat_kdf.go +++ /dev/null @@ -1,75 +0,0 @@ -/*- - * Copyright 2014 Square Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package josecipher - -import ( - "crypto" - "encoding/binary" - "hash" - "io" -) - -type concatKDF struct { - z, info []byte - i uint32 - cache []byte - hasher hash.Hash -} - -// NewConcatKDF builds a KDF reader based on the given inputs. -func NewConcatKDF(hash crypto.Hash, z, algID, ptyUInfo, ptyVInfo, supPubInfo, supPrivInfo []byte) io.Reader { - buffer := make([]byte, uint64(len(algID))+uint64(len(ptyUInfo))+uint64(len(ptyVInfo))+uint64(len(supPubInfo))+uint64(len(supPrivInfo))) - n := 0 - n += copy(buffer, algID) - n += copy(buffer[n:], ptyUInfo) - n += copy(buffer[n:], ptyVInfo) - n += copy(buffer[n:], supPubInfo) - copy(buffer[n:], supPrivInfo) - - hasher := hash.New() - - return &concatKDF{ - z: z, - info: buffer, - hasher: hasher, - cache: []byte{}, - i: 1, - } -} - -func (ctx *concatKDF) Read(out []byte) (int, error) { - copied := copy(out, ctx.cache) - ctx.cache = ctx.cache[copied:] - - for copied < len(out) { - ctx.hasher.Reset() - - // Write on a hash.Hash never fails - _ = binary.Write(ctx.hasher, binary.BigEndian, ctx.i) - _, _ = ctx.hasher.Write(ctx.z) - _, _ = ctx.hasher.Write(ctx.info) - - hash := ctx.hasher.Sum(nil) - chunkCopied := copy(out[copied:], hash) - copied += chunkCopied - ctx.cache = hash[chunkCopied:] - - ctx.i++ - } - - return copied, nil -} diff --git a/vendor/gopkg.in/square/go-jose.v2/cipher/ecdh_es.go b/vendor/gopkg.in/square/go-jose.v2/cipher/ecdh_es.go deleted file mode 100644 index c128e327f..000000000 --- a/vendor/gopkg.in/square/go-jose.v2/cipher/ecdh_es.go +++ /dev/null @@ -1,62 +0,0 @@ -/*- - * Copyright 2014 Square Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package josecipher - -import ( - "crypto" - "crypto/ecdsa" - "encoding/binary" -) - -// DeriveECDHES derives a shared encryption key using ECDH/ConcatKDF as described in JWE/JWA. -// It is an error to call this function with a private/public key that are not on the same -// curve. Callers must ensure that the keys are valid before calling this function. Output -// size may be at most 1<<16 bytes (64 KiB). -func DeriveECDHES(alg string, apuData, apvData []byte, priv *ecdsa.PrivateKey, pub *ecdsa.PublicKey, size int) []byte { - if size > 1<<16 { - panic("ECDH-ES output size too large, must be less than or equal to 1<<16") - } - - // algId, partyUInfo, partyVInfo inputs must be prefixed with the length - algID := lengthPrefixed([]byte(alg)) - ptyUInfo := lengthPrefixed(apuData) - ptyVInfo := lengthPrefixed(apvData) - - // suppPubInfo is the encoded length of the output size in bits - supPubInfo := make([]byte, 4) - binary.BigEndian.PutUint32(supPubInfo, uint32(size)*8) - - if !priv.PublicKey.Curve.IsOnCurve(pub.X, pub.Y) { - panic("public key not on same curve as private key") - } - - z, _ := priv.PublicKey.Curve.ScalarMult(pub.X, pub.Y, priv.D.Bytes()) - reader := NewConcatKDF(crypto.SHA256, z.Bytes(), algID, ptyUInfo, ptyVInfo, supPubInfo, []byte{}) - - key := make([]byte, size) - - // Read on the KDF will never fail - _, _ = reader.Read(key) - return key -} - -func lengthPrefixed(data []byte) []byte { - out := make([]byte, len(data)+4) - binary.BigEndian.PutUint32(out, uint32(len(data))) - copy(out[4:], data) - return out -} diff --git a/vendor/gopkg.in/square/go-jose.v2/cipher/key_wrap.go b/vendor/gopkg.in/square/go-jose.v2/cipher/key_wrap.go deleted file mode 100644 index 1d36d5015..000000000 --- a/vendor/gopkg.in/square/go-jose.v2/cipher/key_wrap.go +++ /dev/null @@ -1,109 +0,0 @@ -/*- - * Copyright 2014 Square Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package josecipher - -import ( - "crypto/cipher" - "crypto/subtle" - "encoding/binary" - "errors" -) - -var defaultIV = []byte{0xA6, 0xA6, 0xA6, 0xA6, 0xA6, 0xA6, 0xA6, 0xA6} - -// KeyWrap implements NIST key wrapping; it wraps a content encryption key (cek) with the given block cipher. -func KeyWrap(block cipher.Block, cek []byte) ([]byte, error) { - if len(cek)%8 != 0 { - return nil, errors.New("square/go-jose: key wrap input must be 8 byte blocks") - } - - n := len(cek) / 8 - r := make([][]byte, n) - - for i := range r { - r[i] = make([]byte, 8) - copy(r[i], cek[i*8:]) - } - - buffer := make([]byte, 16) - tBytes := make([]byte, 8) - copy(buffer, defaultIV) - - for t := 0; t < 6*n; t++ { - copy(buffer[8:], r[t%n]) - - block.Encrypt(buffer, buffer) - - binary.BigEndian.PutUint64(tBytes, uint64(t+1)) - - for i := 0; i < 8; i++ { - buffer[i] = buffer[i] ^ tBytes[i] - } - copy(r[t%n], buffer[8:]) - } - - out := make([]byte, (n+1)*8) - copy(out, buffer[:8]) - for i := range r { - copy(out[(i+1)*8:], r[i]) - } - - return out, nil -} - -// KeyUnwrap implements NIST key unwrapping; it unwraps a content encryption key (cek) with the given block cipher. -func KeyUnwrap(block cipher.Block, ciphertext []byte) ([]byte, error) { - if len(ciphertext)%8 != 0 { - return nil, errors.New("square/go-jose: key wrap input must be 8 byte blocks") - } - - n := (len(ciphertext) / 8) - 1 - r := make([][]byte, n) - - for i := range r { - r[i] = make([]byte, 8) - copy(r[i], ciphertext[(i+1)*8:]) - } - - buffer := make([]byte, 16) - tBytes := make([]byte, 8) - copy(buffer[:8], ciphertext[:8]) - - for t := 6*n - 1; t >= 0; t-- { - binary.BigEndian.PutUint64(tBytes, uint64(t+1)) - - for i := 0; i < 8; i++ { - buffer[i] = buffer[i] ^ tBytes[i] - } - copy(buffer[8:], r[t%n]) - - block.Decrypt(buffer, buffer) - - copy(r[t%n], buffer[8:]) - } - - if subtle.ConstantTimeCompare(buffer[:8], defaultIV) == 0 { - return nil, errors.New("square/go-jose: failed to unwrap key") - } - - out := make([]byte, n*8) - for i := range r { - copy(out[i*8:], r[i]) - } - - return out, nil -} diff --git a/vendor/gopkg.in/square/go-jose.v2/crypter.go b/vendor/gopkg.in/square/go-jose.v2/crypter.go deleted file mode 100644 index 9ee44f8bd..000000000 --- a/vendor/gopkg.in/square/go-jose.v2/crypter.go +++ /dev/null @@ -1,532 +0,0 @@ -/*- - * Copyright 2014 Square Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package jose - -import ( - "crypto/ecdsa" - "crypto/rsa" - "errors" - "fmt" - "reflect" - - "gopkg.in/square/go-jose.v2/json" -) - -// Encrypter represents an encrypter which produces an encrypted JWE object. -type Encrypter interface { - Encrypt(plaintext []byte) (*JSONWebEncryption, error) - EncryptWithAuthData(plaintext []byte, aad []byte) (*JSONWebEncryption, error) - Options() EncrypterOptions -} - -// A generic content cipher -type contentCipher interface { - keySize() int - encrypt(cek []byte, aad, plaintext []byte) (*aeadParts, error) - decrypt(cek []byte, aad []byte, parts *aeadParts) ([]byte, error) -} - -// A key generator (for generating/getting a CEK) -type keyGenerator interface { - keySize() int - genKey() ([]byte, rawHeader, error) -} - -// A generic key encrypter -type keyEncrypter interface { - encryptKey(cek []byte, alg KeyAlgorithm) (recipientInfo, error) // Encrypt a key -} - -// A generic key decrypter -type keyDecrypter interface { - decryptKey(headers rawHeader, recipient *recipientInfo, generator keyGenerator) ([]byte, error) // Decrypt a key -} - -// A generic encrypter based on the given key encrypter and content cipher. -type genericEncrypter struct { - contentAlg ContentEncryption - compressionAlg CompressionAlgorithm - cipher contentCipher - recipients []recipientKeyInfo - keyGenerator keyGenerator - extraHeaders map[HeaderKey]interface{} -} - -type recipientKeyInfo struct { - keyID string - keyAlg KeyAlgorithm - keyEncrypter keyEncrypter -} - -// EncrypterOptions represents options that can be set on new encrypters. -type EncrypterOptions struct { - Compression CompressionAlgorithm - - // Optional map of additional keys to be inserted into the protected header - // of a JWS object. Some specifications which make use of JWS like to insert - // additional values here. All values must be JSON-serializable. - ExtraHeaders map[HeaderKey]interface{} -} - -// WithHeader adds an arbitrary value to the ExtraHeaders map, initializing it -// if necessary. It returns itself and so can be used in a fluent style. -func (eo *EncrypterOptions) WithHeader(k HeaderKey, v interface{}) *EncrypterOptions { - if eo.ExtraHeaders == nil { - eo.ExtraHeaders = map[HeaderKey]interface{}{} - } - eo.ExtraHeaders[k] = v - return eo -} - -// WithContentType adds a content type ("cty") header and returns the updated -// EncrypterOptions. -func (eo *EncrypterOptions) WithContentType(contentType ContentType) *EncrypterOptions { - return eo.WithHeader(HeaderContentType, contentType) -} - -// WithType adds a type ("typ") header and returns the updated EncrypterOptions. -func (eo *EncrypterOptions) WithType(typ ContentType) *EncrypterOptions { - return eo.WithHeader(HeaderType, typ) -} - -// Recipient represents an algorithm/key to encrypt messages to. -// -// PBES2Count and PBES2Salt correspond with the "p2c" and "p2s" headers used -// on the password-based encryption algorithms PBES2-HS256+A128KW, -// PBES2-HS384+A192KW, and PBES2-HS512+A256KW. If they are not provided a safe -// default of 100000 will be used for the count and a 128-bit random salt will -// be generated. -type Recipient struct { - Algorithm KeyAlgorithm - Key interface{} - KeyID string - PBES2Count int - PBES2Salt []byte -} - -// NewEncrypter creates an appropriate encrypter based on the key type -func NewEncrypter(enc ContentEncryption, rcpt Recipient, opts *EncrypterOptions) (Encrypter, error) { - encrypter := &genericEncrypter{ - contentAlg: enc, - recipients: []recipientKeyInfo{}, - cipher: getContentCipher(enc), - } - if opts != nil { - encrypter.compressionAlg = opts.Compression - encrypter.extraHeaders = opts.ExtraHeaders - } - - if encrypter.cipher == nil { - return nil, ErrUnsupportedAlgorithm - } - - var keyID string - var rawKey interface{} - switch encryptionKey := rcpt.Key.(type) { - case JSONWebKey: - keyID, rawKey = encryptionKey.KeyID, encryptionKey.Key - case *JSONWebKey: - keyID, rawKey = encryptionKey.KeyID, encryptionKey.Key - default: - rawKey = encryptionKey - } - - switch rcpt.Algorithm { - case DIRECT: - // Direct encryption mode must be treated differently - if reflect.TypeOf(rawKey) != reflect.TypeOf([]byte{}) { - return nil, ErrUnsupportedKeyType - } - encrypter.keyGenerator = staticKeyGenerator{ - key: rawKey.([]byte), - } - recipientInfo, _ := newSymmetricRecipient(rcpt.Algorithm, rawKey.([]byte)) - recipientInfo.keyID = keyID - if rcpt.KeyID != "" { - recipientInfo.keyID = rcpt.KeyID - } - encrypter.recipients = []recipientKeyInfo{recipientInfo} - return encrypter, nil - case ECDH_ES: - // ECDH-ES (w/o key wrapping) is similar to DIRECT mode - typeOf := reflect.TypeOf(rawKey) - if typeOf != reflect.TypeOf(&ecdsa.PublicKey{}) { - return nil, ErrUnsupportedKeyType - } - encrypter.keyGenerator = ecKeyGenerator{ - size: encrypter.cipher.keySize(), - algID: string(enc), - publicKey: rawKey.(*ecdsa.PublicKey), - } - recipientInfo, _ := newECDHRecipient(rcpt.Algorithm, rawKey.(*ecdsa.PublicKey)) - recipientInfo.keyID = keyID - if rcpt.KeyID != "" { - recipientInfo.keyID = rcpt.KeyID - } - encrypter.recipients = []recipientKeyInfo{recipientInfo} - return encrypter, nil - default: - // Can just add a standard recipient - encrypter.keyGenerator = randomKeyGenerator{ - size: encrypter.cipher.keySize(), - } - err := encrypter.addRecipient(rcpt) - return encrypter, err - } -} - -// NewMultiEncrypter creates a multi-encrypter based on the given parameters -func NewMultiEncrypter(enc ContentEncryption, rcpts []Recipient, opts *EncrypterOptions) (Encrypter, error) { - cipher := getContentCipher(enc) - - if cipher == nil { - return nil, ErrUnsupportedAlgorithm - } - if rcpts == nil || len(rcpts) == 0 { - return nil, fmt.Errorf("square/go-jose: recipients is nil or empty") - } - - encrypter := &genericEncrypter{ - contentAlg: enc, - recipients: []recipientKeyInfo{}, - cipher: cipher, - keyGenerator: randomKeyGenerator{ - size: cipher.keySize(), - }, - } - - if opts != nil { - encrypter.compressionAlg = opts.Compression - } - - for _, recipient := range rcpts { - err := encrypter.addRecipient(recipient) - if err != nil { - return nil, err - } - } - - return encrypter, nil -} - -func (ctx *genericEncrypter) addRecipient(recipient Recipient) (err error) { - var recipientInfo recipientKeyInfo - - switch recipient.Algorithm { - case DIRECT, ECDH_ES: - return fmt.Errorf("square/go-jose: key algorithm '%s' not supported in multi-recipient mode", recipient.Algorithm) - } - - recipientInfo, err = makeJWERecipient(recipient.Algorithm, recipient.Key) - if recipient.KeyID != "" { - recipientInfo.keyID = recipient.KeyID - } - - switch recipient.Algorithm { - case PBES2_HS256_A128KW, PBES2_HS384_A192KW, PBES2_HS512_A256KW: - if sr, ok := recipientInfo.keyEncrypter.(*symmetricKeyCipher); ok { - sr.p2c = recipient.PBES2Count - sr.p2s = recipient.PBES2Salt - } - } - - if err == nil { - ctx.recipients = append(ctx.recipients, recipientInfo) - } - return err -} - -func makeJWERecipient(alg KeyAlgorithm, encryptionKey interface{}) (recipientKeyInfo, error) { - switch encryptionKey := encryptionKey.(type) { - case *rsa.PublicKey: - return newRSARecipient(alg, encryptionKey) - case *ecdsa.PublicKey: - return newECDHRecipient(alg, encryptionKey) - case []byte: - return newSymmetricRecipient(alg, encryptionKey) - case string: - return newSymmetricRecipient(alg, []byte(encryptionKey)) - case *JSONWebKey: - recipient, err := makeJWERecipient(alg, encryptionKey.Key) - recipient.keyID = encryptionKey.KeyID - return recipient, err - default: - return recipientKeyInfo{}, ErrUnsupportedKeyType - } -} - -// newDecrypter creates an appropriate decrypter based on the key type -func newDecrypter(decryptionKey interface{}) (keyDecrypter, error) { - switch decryptionKey := decryptionKey.(type) { - case *rsa.PrivateKey: - return &rsaDecrypterSigner{ - privateKey: decryptionKey, - }, nil - case *ecdsa.PrivateKey: - return &ecDecrypterSigner{ - privateKey: decryptionKey, - }, nil - case []byte: - return &symmetricKeyCipher{ - key: decryptionKey, - }, nil - case string: - return &symmetricKeyCipher{ - key: []byte(decryptionKey), - }, nil - case JSONWebKey: - return newDecrypter(decryptionKey.Key) - case *JSONWebKey: - return newDecrypter(decryptionKey.Key) - default: - return nil, ErrUnsupportedKeyType - } -} - -// Implementation of encrypt method producing a JWE object. -func (ctx *genericEncrypter) Encrypt(plaintext []byte) (*JSONWebEncryption, error) { - return ctx.EncryptWithAuthData(plaintext, nil) -} - -// Implementation of encrypt method producing a JWE object. -func (ctx *genericEncrypter) EncryptWithAuthData(plaintext, aad []byte) (*JSONWebEncryption, error) { - obj := &JSONWebEncryption{} - obj.aad = aad - - obj.protected = &rawHeader{} - err := obj.protected.set(headerEncryption, ctx.contentAlg) - if err != nil { - return nil, err - } - - obj.recipients = make([]recipientInfo, len(ctx.recipients)) - - if len(ctx.recipients) == 0 { - return nil, fmt.Errorf("square/go-jose: no recipients to encrypt to") - } - - cek, headers, err := ctx.keyGenerator.genKey() - if err != nil { - return nil, err - } - - obj.protected.merge(&headers) - - for i, info := range ctx.recipients { - recipient, err := info.keyEncrypter.encryptKey(cek, info.keyAlg) - if err != nil { - return nil, err - } - - err = recipient.header.set(headerAlgorithm, info.keyAlg) - if err != nil { - return nil, err - } - - if info.keyID != "" { - err = recipient.header.set(headerKeyID, info.keyID) - if err != nil { - return nil, err - } - } - obj.recipients[i] = recipient - } - - if len(ctx.recipients) == 1 { - // Move per-recipient headers into main protected header if there's - // only a single recipient. - obj.protected.merge(obj.recipients[0].header) - obj.recipients[0].header = nil - } - - if ctx.compressionAlg != NONE { - plaintext, err = compress(ctx.compressionAlg, plaintext) - if err != nil { - return nil, err - } - - err = obj.protected.set(headerCompression, ctx.compressionAlg) - if err != nil { - return nil, err - } - } - - for k, v := range ctx.extraHeaders { - b, err := json.Marshal(v) - if err != nil { - return nil, err - } - (*obj.protected)[k] = makeRawMessage(b) - } - - authData := obj.computeAuthData() - parts, err := ctx.cipher.encrypt(cek, authData, plaintext) - if err != nil { - return nil, err - } - - obj.iv = parts.iv - obj.ciphertext = parts.ciphertext - obj.tag = parts.tag - - return obj, nil -} - -func (ctx *genericEncrypter) Options() EncrypterOptions { - return EncrypterOptions{ - Compression: ctx.compressionAlg, - ExtraHeaders: ctx.extraHeaders, - } -} - -// Decrypt and validate the object and return the plaintext. Note that this -// function does not support multi-recipient, if you desire multi-recipient -// decryption use DecryptMulti instead. -func (obj JSONWebEncryption) Decrypt(decryptionKey interface{}) ([]byte, error) { - headers := obj.mergedHeaders(nil) - - if len(obj.recipients) > 1 { - return nil, errors.New("square/go-jose: too many recipients in payload; expecting only one") - } - - critical, err := headers.getCritical() - if err != nil { - return nil, fmt.Errorf("square/go-jose: invalid crit header") - } - - if len(critical) > 0 { - return nil, fmt.Errorf("square/go-jose: unsupported crit header") - } - - decrypter, err := newDecrypter(decryptionKey) - if err != nil { - return nil, err - } - - cipher := getContentCipher(headers.getEncryption()) - if cipher == nil { - return nil, fmt.Errorf("square/go-jose: unsupported enc value '%s'", string(headers.getEncryption())) - } - - generator := randomKeyGenerator{ - size: cipher.keySize(), - } - - parts := &aeadParts{ - iv: obj.iv, - ciphertext: obj.ciphertext, - tag: obj.tag, - } - - authData := obj.computeAuthData() - - var plaintext []byte - recipient := obj.recipients[0] - recipientHeaders := obj.mergedHeaders(&recipient) - - cek, err := decrypter.decryptKey(recipientHeaders, &recipient, generator) - if err == nil { - // Found a valid CEK -- let's try to decrypt. - plaintext, err = cipher.decrypt(cek, authData, parts) - } - - if plaintext == nil { - return nil, ErrCryptoFailure - } - - // The "zip" header parameter may only be present in the protected header. - if comp := obj.protected.getCompression(); comp != "" { - plaintext, err = decompress(comp, plaintext) - } - - return plaintext, err -} - -// DecryptMulti decrypts and validates the object and returns the plaintexts, -// with support for multiple recipients. It returns the index of the recipient -// for which the decryption was successful, the merged headers for that recipient, -// and the plaintext. -func (obj JSONWebEncryption) DecryptMulti(decryptionKey interface{}) (int, Header, []byte, error) { - globalHeaders := obj.mergedHeaders(nil) - - critical, err := globalHeaders.getCritical() - if err != nil { - return -1, Header{}, nil, fmt.Errorf("square/go-jose: invalid crit header") - } - - if len(critical) > 0 { - return -1, Header{}, nil, fmt.Errorf("square/go-jose: unsupported crit header") - } - - decrypter, err := newDecrypter(decryptionKey) - if err != nil { - return -1, Header{}, nil, err - } - - encryption := globalHeaders.getEncryption() - cipher := getContentCipher(encryption) - if cipher == nil { - return -1, Header{}, nil, fmt.Errorf("square/go-jose: unsupported enc value '%s'", string(encryption)) - } - - generator := randomKeyGenerator{ - size: cipher.keySize(), - } - - parts := &aeadParts{ - iv: obj.iv, - ciphertext: obj.ciphertext, - tag: obj.tag, - } - - authData := obj.computeAuthData() - - index := -1 - var plaintext []byte - var headers rawHeader - - for i, recipient := range obj.recipients { - recipientHeaders := obj.mergedHeaders(&recipient) - - cek, err := decrypter.decryptKey(recipientHeaders, &recipient, generator) - if err == nil { - // Found a valid CEK -- let's try to decrypt. - plaintext, err = cipher.decrypt(cek, authData, parts) - if err == nil { - index = i - headers = recipientHeaders - break - } - } - } - - if plaintext == nil || err != nil { - return -1, Header{}, nil, ErrCryptoFailure - } - - // The "zip" header parameter may only be present in the protected header. - if comp := obj.protected.getCompression(); comp != "" { - plaintext, err = decompress(comp, plaintext) - } - - sanitized, err := headers.sanitized() - if err != nil { - return -1, Header{}, nil, fmt.Errorf("square/go-jose: failed to sanitize header: %v", err) - } - - return index, sanitized, plaintext, err -} diff --git a/vendor/gopkg.in/square/go-jose.v2/doc.go b/vendor/gopkg.in/square/go-jose.v2/doc.go deleted file mode 100644 index dd1387f3f..000000000 --- a/vendor/gopkg.in/square/go-jose.v2/doc.go +++ /dev/null @@ -1,27 +0,0 @@ -/*- - * Copyright 2014 Square Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -/* - -Package jose aims to provide an implementation of the Javascript Object Signing -and Encryption set of standards. It implements encryption and signing based on -the JSON Web Encryption and JSON Web Signature standards, with optional JSON -Web Token support available in a sub-package. The library supports both the -compact and full serialization formats, and has optional support for multiple -recipients. - -*/ -package jose diff --git a/vendor/gopkg.in/square/go-jose.v2/encoding.go b/vendor/gopkg.in/square/go-jose.v2/encoding.go deleted file mode 100644 index b9687c647..000000000 --- a/vendor/gopkg.in/square/go-jose.v2/encoding.go +++ /dev/null @@ -1,179 +0,0 @@ -/*- - * Copyright 2014 Square Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package jose - -import ( - "bytes" - "compress/flate" - "encoding/base64" - "encoding/binary" - "io" - "math/big" - "regexp" - - "gopkg.in/square/go-jose.v2/json" -) - -var stripWhitespaceRegex = regexp.MustCompile("\\s") - -// Helper function to serialize known-good objects. -// Precondition: value is not a nil pointer. -func mustSerializeJSON(value interface{}) []byte { - out, err := json.Marshal(value) - if err != nil { - panic(err) - } - // We never want to serialize the top-level value "null," since it's not a - // valid JOSE message. But if a caller passes in a nil pointer to this method, - // MarshalJSON will happily serialize it as the top-level value "null". If - // that value is then embedded in another operation, for instance by being - // base64-encoded and fed as input to a signing algorithm - // (https://github.com/square/go-jose/issues/22), the result will be - // incorrect. Because this method is intended for known-good objects, and a nil - // pointer is not a known-good object, we are free to panic in this case. - // Note: It's not possible to directly check whether the data pointed at by an - // interface is a nil pointer, so we do this hacky workaround. - // https://groups.google.com/forum/#!topic/golang-nuts/wnH302gBa4I - if string(out) == "null" { - panic("Tried to serialize a nil pointer.") - } - return out -} - -// Strip all newlines and whitespace -func stripWhitespace(data string) string { - return stripWhitespaceRegex.ReplaceAllString(data, "") -} - -// Perform compression based on algorithm -func compress(algorithm CompressionAlgorithm, input []byte) ([]byte, error) { - switch algorithm { - case DEFLATE: - return deflate(input) - default: - return nil, ErrUnsupportedAlgorithm - } -} - -// Perform decompression based on algorithm -func decompress(algorithm CompressionAlgorithm, input []byte) ([]byte, error) { - switch algorithm { - case DEFLATE: - return inflate(input) - default: - return nil, ErrUnsupportedAlgorithm - } -} - -// Compress with DEFLATE -func deflate(input []byte) ([]byte, error) { - output := new(bytes.Buffer) - - // Writing to byte buffer, err is always nil - writer, _ := flate.NewWriter(output, 1) - _, _ = io.Copy(writer, bytes.NewBuffer(input)) - - err := writer.Close() - return output.Bytes(), err -} - -// Decompress with DEFLATE -func inflate(input []byte) ([]byte, error) { - output := new(bytes.Buffer) - reader := flate.NewReader(bytes.NewBuffer(input)) - - _, err := io.Copy(output, reader) - if err != nil { - return nil, err - } - - err = reader.Close() - return output.Bytes(), err -} - -// byteBuffer represents a slice of bytes that can be serialized to url-safe base64. -type byteBuffer struct { - data []byte -} - -func newBuffer(data []byte) *byteBuffer { - if data == nil { - return nil - } - return &byteBuffer{ - data: data, - } -} - -func newFixedSizeBuffer(data []byte, length int) *byteBuffer { - if len(data) > length { - panic("square/go-jose: invalid call to newFixedSizeBuffer (len(data) > length)") - } - pad := make([]byte, length-len(data)) - return newBuffer(append(pad, data...)) -} - -func newBufferFromInt(num uint64) *byteBuffer { - data := make([]byte, 8) - binary.BigEndian.PutUint64(data, num) - return newBuffer(bytes.TrimLeft(data, "\x00")) -} - -func (b *byteBuffer) MarshalJSON() ([]byte, error) { - return json.Marshal(b.base64()) -} - -func (b *byteBuffer) UnmarshalJSON(data []byte) error { - var encoded string - err := json.Unmarshal(data, &encoded) - if err != nil { - return err - } - - if encoded == "" { - return nil - } - - decoded, err := base64.RawURLEncoding.DecodeString(encoded) - if err != nil { - return err - } - - *b = *newBuffer(decoded) - - return nil -} - -func (b *byteBuffer) base64() string { - return base64.RawURLEncoding.EncodeToString(b.data) -} - -func (b *byteBuffer) bytes() []byte { - // Handling nil here allows us to transparently handle nil slices when serializing. - if b == nil { - return nil - } - return b.data -} - -func (b byteBuffer) bigInt() *big.Int { - return new(big.Int).SetBytes(b.data) -} - -func (b byteBuffer) toInt() int { - return int(b.bigInt().Int64()) -} diff --git a/vendor/gopkg.in/square/go-jose.v2/json/README.md b/vendor/gopkg.in/square/go-jose.v2/json/README.md deleted file mode 100644 index 86de5e558..000000000 --- a/vendor/gopkg.in/square/go-jose.v2/json/README.md +++ /dev/null @@ -1,13 +0,0 @@ -# Safe JSON - -This repository contains a fork of the `encoding/json` package from Go 1.6. - -The following changes were made: - -* Object deserialization uses case-sensitive member name matching instead of - [case-insensitive matching](https://www.ietf.org/mail-archive/web/json/current/msg03763.html). - This is to avoid differences in the interpretation of JOSE messages between - go-jose and libraries written in other languages. -* When deserializing a JSON object, we check for duplicate keys and reject the - input whenever we detect a duplicate. Rather than trying to work with malformed - data, we prefer to reject it right away. diff --git a/vendor/gopkg.in/square/go-jose.v2/json/decode.go b/vendor/gopkg.in/square/go-jose.v2/json/decode.go deleted file mode 100644 index 37457e5a8..000000000 --- a/vendor/gopkg.in/square/go-jose.v2/json/decode.go +++ /dev/null @@ -1,1183 +0,0 @@ -// Copyright 2010 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Represents JSON data structure using native Go types: booleans, floats, -// strings, arrays, and maps. - -package json - -import ( - "bytes" - "encoding" - "encoding/base64" - "errors" - "fmt" - "reflect" - "runtime" - "strconv" - "unicode" - "unicode/utf16" - "unicode/utf8" -) - -// Unmarshal parses the JSON-encoded data and stores the result -// in the value pointed to by v. -// -// Unmarshal uses the inverse of the encodings that -// Marshal uses, allocating maps, slices, and pointers as necessary, -// with the following additional rules: -// -// To unmarshal JSON into a pointer, Unmarshal first handles the case of -// the JSON being the JSON literal null. In that case, Unmarshal sets -// the pointer to nil. Otherwise, Unmarshal unmarshals the JSON into -// the value pointed at by the pointer. If the pointer is nil, Unmarshal -// allocates a new value for it to point to. -// -// To unmarshal JSON into a struct, Unmarshal matches incoming object -// keys to the keys used by Marshal (either the struct field name or its tag), -// preferring an exact match but also accepting a case-insensitive match. -// Unmarshal will only set exported fields of the struct. -// -// To unmarshal JSON into an interface value, -// Unmarshal stores one of these in the interface value: -// -// bool, for JSON booleans -// float64, for JSON numbers -// string, for JSON strings -// []interface{}, for JSON arrays -// map[string]interface{}, for JSON objects -// nil for JSON null -// -// To unmarshal a JSON array into a slice, Unmarshal resets the slice length -// to zero and then appends each element to the slice. -// As a special case, to unmarshal an empty JSON array into a slice, -// Unmarshal replaces the slice with a new empty slice. -// -// To unmarshal a JSON array into a Go array, Unmarshal decodes -// JSON array elements into corresponding Go array elements. -// If the Go array is smaller than the JSON array, -// the additional JSON array elements are discarded. -// If the JSON array is smaller than the Go array, -// the additional Go array elements are set to zero values. -// -// To unmarshal a JSON object into a string-keyed map, Unmarshal first -// establishes a map to use, If the map is nil, Unmarshal allocates a new map. -// Otherwise Unmarshal reuses the existing map, keeping existing entries. -// Unmarshal then stores key-value pairs from the JSON object into the map. -// -// If a JSON value is not appropriate for a given target type, -// or if a JSON number overflows the target type, Unmarshal -// skips that field and completes the unmarshaling as best it can. -// If no more serious errors are encountered, Unmarshal returns -// an UnmarshalTypeError describing the earliest such error. -// -// The JSON null value unmarshals into an interface, map, pointer, or slice -// by setting that Go value to nil. Because null is often used in JSON to mean -// ``not present,'' unmarshaling a JSON null into any other Go type has no effect -// on the value and produces no error. -// -// When unmarshaling quoted strings, invalid UTF-8 or -// invalid UTF-16 surrogate pairs are not treated as an error. -// Instead, they are replaced by the Unicode replacement -// character U+FFFD. -// -func Unmarshal(data []byte, v interface{}) error { - // Check for well-formedness. - // Avoids filling out half a data structure - // before discovering a JSON syntax error. - var d decodeState - err := checkValid(data, &d.scan) - if err != nil { - return err - } - - d.init(data) - return d.unmarshal(v) -} - -// Unmarshaler is the interface implemented by objects -// that can unmarshal a JSON description of themselves. -// The input can be assumed to be a valid encoding of -// a JSON value. UnmarshalJSON must copy the JSON data -// if it wishes to retain the data after returning. -type Unmarshaler interface { - UnmarshalJSON([]byte) error -} - -// An UnmarshalTypeError describes a JSON value that was -// not appropriate for a value of a specific Go type. -type UnmarshalTypeError struct { - Value string // description of JSON value - "bool", "array", "number -5" - Type reflect.Type // type of Go value it could not be assigned to - Offset int64 // error occurred after reading Offset bytes -} - -func (e *UnmarshalTypeError) Error() string { - return "json: cannot unmarshal " + e.Value + " into Go value of type " + e.Type.String() -} - -// An UnmarshalFieldError describes a JSON object key that -// led to an unexported (and therefore unwritable) struct field. -// (No longer used; kept for compatibility.) -type UnmarshalFieldError struct { - Key string - Type reflect.Type - Field reflect.StructField -} - -func (e *UnmarshalFieldError) Error() string { - return "json: cannot unmarshal object key " + strconv.Quote(e.Key) + " into unexported field " + e.Field.Name + " of type " + e.Type.String() -} - -// An InvalidUnmarshalError describes an invalid argument passed to Unmarshal. -// (The argument to Unmarshal must be a non-nil pointer.) -type InvalidUnmarshalError struct { - Type reflect.Type -} - -func (e *InvalidUnmarshalError) Error() string { - if e.Type == nil { - return "json: Unmarshal(nil)" - } - - if e.Type.Kind() != reflect.Ptr { - return "json: Unmarshal(non-pointer " + e.Type.String() + ")" - } - return "json: Unmarshal(nil " + e.Type.String() + ")" -} - -func (d *decodeState) unmarshal(v interface{}) (err error) { - defer func() { - if r := recover(); r != nil { - if _, ok := r.(runtime.Error); ok { - panic(r) - } - err = r.(error) - } - }() - - rv := reflect.ValueOf(v) - if rv.Kind() != reflect.Ptr || rv.IsNil() { - return &InvalidUnmarshalError{reflect.TypeOf(v)} - } - - d.scan.reset() - // We decode rv not rv.Elem because the Unmarshaler interface - // test must be applied at the top level of the value. - d.value(rv) - return d.savedError -} - -// A Number represents a JSON number literal. -type Number string - -// String returns the literal text of the number. -func (n Number) String() string { return string(n) } - -// Float64 returns the number as a float64. -func (n Number) Float64() (float64, error) { - return strconv.ParseFloat(string(n), 64) -} - -// Int64 returns the number as an int64. -func (n Number) Int64() (int64, error) { - return strconv.ParseInt(string(n), 10, 64) -} - -// isValidNumber reports whether s is a valid JSON number literal. -func isValidNumber(s string) bool { - // This function implements the JSON numbers grammar. - // See https://tools.ietf.org/html/rfc7159#section-6 - // and http://json.org/number.gif - - if s == "" { - return false - } - - // Optional - - if s[0] == '-' { - s = s[1:] - if s == "" { - return false - } - } - - // Digits - switch { - default: - return false - - case s[0] == '0': - s = s[1:] - - case '1' <= s[0] && s[0] <= '9': - s = s[1:] - for len(s) > 0 && '0' <= s[0] && s[0] <= '9' { - s = s[1:] - } - } - - // . followed by 1 or more digits. - if len(s) >= 2 && s[0] == '.' && '0' <= s[1] && s[1] <= '9' { - s = s[2:] - for len(s) > 0 && '0' <= s[0] && s[0] <= '9' { - s = s[1:] - } - } - - // e or E followed by an optional - or + and - // 1 or more digits. - if len(s) >= 2 && (s[0] == 'e' || s[0] == 'E') { - s = s[1:] - if s[0] == '+' || s[0] == '-' { - s = s[1:] - if s == "" { - return false - } - } - for len(s) > 0 && '0' <= s[0] && s[0] <= '9' { - s = s[1:] - } - } - - // Make sure we are at the end. - return s == "" -} - -// decodeState represents the state while decoding a JSON value. -type decodeState struct { - data []byte - off int // read offset in data - scan scanner - nextscan scanner // for calls to nextValue - savedError error - useNumber bool -} - -// errPhase is used for errors that should not happen unless -// there is a bug in the JSON decoder or something is editing -// the data slice while the decoder executes. -var errPhase = errors.New("JSON decoder out of sync - data changing underfoot?") - -func (d *decodeState) init(data []byte) *decodeState { - d.data = data - d.off = 0 - d.savedError = nil - return d -} - -// error aborts the decoding by panicking with err. -func (d *decodeState) error(err error) { - panic(err) -} - -// saveError saves the first err it is called with, -// for reporting at the end of the unmarshal. -func (d *decodeState) saveError(err error) { - if d.savedError == nil { - d.savedError = err - } -} - -// next cuts off and returns the next full JSON value in d.data[d.off:]. -// The next value is known to be an object or array, not a literal. -func (d *decodeState) next() []byte { - c := d.data[d.off] - item, rest, err := nextValue(d.data[d.off:], &d.nextscan) - if err != nil { - d.error(err) - } - d.off = len(d.data) - len(rest) - - // Our scanner has seen the opening brace/bracket - // and thinks we're still in the middle of the object. - // invent a closing brace/bracket to get it out. - if c == '{' { - d.scan.step(&d.scan, '}') - } else { - d.scan.step(&d.scan, ']') - } - - return item -} - -// scanWhile processes bytes in d.data[d.off:] until it -// receives a scan code not equal to op. -// It updates d.off and returns the new scan code. -func (d *decodeState) scanWhile(op int) int { - var newOp int - for { - if d.off >= len(d.data) { - newOp = d.scan.eof() - d.off = len(d.data) + 1 // mark processed EOF with len+1 - } else { - c := d.data[d.off] - d.off++ - newOp = d.scan.step(&d.scan, c) - } - if newOp != op { - break - } - } - return newOp -} - -// value decodes a JSON value from d.data[d.off:] into the value. -// it updates d.off to point past the decoded value. -func (d *decodeState) value(v reflect.Value) { - if !v.IsValid() { - _, rest, err := nextValue(d.data[d.off:], &d.nextscan) - if err != nil { - d.error(err) - } - d.off = len(d.data) - len(rest) - - // d.scan thinks we're still at the beginning of the item. - // Feed in an empty string - the shortest, simplest value - - // so that it knows we got to the end of the value. - if d.scan.redo { - // rewind. - d.scan.redo = false - d.scan.step = stateBeginValue - } - d.scan.step(&d.scan, '"') - d.scan.step(&d.scan, '"') - - n := len(d.scan.parseState) - if n > 0 && d.scan.parseState[n-1] == parseObjectKey { - // d.scan thinks we just read an object key; finish the object - d.scan.step(&d.scan, ':') - d.scan.step(&d.scan, '"') - d.scan.step(&d.scan, '"') - d.scan.step(&d.scan, '}') - } - - return - } - - switch op := d.scanWhile(scanSkipSpace); op { - default: - d.error(errPhase) - - case scanBeginArray: - d.array(v) - - case scanBeginObject: - d.object(v) - - case scanBeginLiteral: - d.literal(v) - } -} - -type unquotedValue struct{} - -// valueQuoted is like value but decodes a -// quoted string literal or literal null into an interface value. -// If it finds anything other than a quoted string literal or null, -// valueQuoted returns unquotedValue{}. -func (d *decodeState) valueQuoted() interface{} { - switch op := d.scanWhile(scanSkipSpace); op { - default: - d.error(errPhase) - - case scanBeginArray: - d.array(reflect.Value{}) - - case scanBeginObject: - d.object(reflect.Value{}) - - case scanBeginLiteral: - switch v := d.literalInterface().(type) { - case nil, string: - return v - } - } - return unquotedValue{} -} - -// indirect walks down v allocating pointers as needed, -// until it gets to a non-pointer. -// if it encounters an Unmarshaler, indirect stops and returns that. -// if decodingNull is true, indirect stops at the last pointer so it can be set to nil. -func (d *decodeState) indirect(v reflect.Value, decodingNull bool) (Unmarshaler, encoding.TextUnmarshaler, reflect.Value) { - // If v is a named type and is addressable, - // start with its address, so that if the type has pointer methods, - // we find them. - if v.Kind() != reflect.Ptr && v.Type().Name() != "" && v.CanAddr() { - v = v.Addr() - } - for { - // Load value from interface, but only if the result will be - // usefully addressable. - if v.Kind() == reflect.Interface && !v.IsNil() { - e := v.Elem() - if e.Kind() == reflect.Ptr && !e.IsNil() && (!decodingNull || e.Elem().Kind() == reflect.Ptr) { - v = e - continue - } - } - - if v.Kind() != reflect.Ptr { - break - } - - if v.Elem().Kind() != reflect.Ptr && decodingNull && v.CanSet() { - break - } - if v.IsNil() { - v.Set(reflect.New(v.Type().Elem())) - } - if v.Type().NumMethod() > 0 { - if u, ok := v.Interface().(Unmarshaler); ok { - return u, nil, reflect.Value{} - } - if u, ok := v.Interface().(encoding.TextUnmarshaler); ok { - return nil, u, reflect.Value{} - } - } - v = v.Elem() - } - return nil, nil, v -} - -// array consumes an array from d.data[d.off-1:], decoding into the value v. -// the first byte of the array ('[') has been read already. -func (d *decodeState) array(v reflect.Value) { - // Check for unmarshaler. - u, ut, pv := d.indirect(v, false) - if u != nil { - d.off-- - err := u.UnmarshalJSON(d.next()) - if err != nil { - d.error(err) - } - return - } - if ut != nil { - d.saveError(&UnmarshalTypeError{"array", v.Type(), int64(d.off)}) - d.off-- - d.next() - return - } - - v = pv - - // Check type of target. - switch v.Kind() { - case reflect.Interface: - if v.NumMethod() == 0 { - // Decoding into nil interface? Switch to non-reflect code. - v.Set(reflect.ValueOf(d.arrayInterface())) - return - } - // Otherwise it's invalid. - fallthrough - default: - d.saveError(&UnmarshalTypeError{"array", v.Type(), int64(d.off)}) - d.off-- - d.next() - return - case reflect.Array: - case reflect.Slice: - break - } - - i := 0 - for { - // Look ahead for ] - can only happen on first iteration. - op := d.scanWhile(scanSkipSpace) - if op == scanEndArray { - break - } - - // Back up so d.value can have the byte we just read. - d.off-- - d.scan.undo(op) - - // Get element of array, growing if necessary. - if v.Kind() == reflect.Slice { - // Grow slice if necessary - if i >= v.Cap() { - newcap := v.Cap() + v.Cap()/2 - if newcap < 4 { - newcap = 4 - } - newv := reflect.MakeSlice(v.Type(), v.Len(), newcap) - reflect.Copy(newv, v) - v.Set(newv) - } - if i >= v.Len() { - v.SetLen(i + 1) - } - } - - if i < v.Len() { - // Decode into element. - d.value(v.Index(i)) - } else { - // Ran out of fixed array: skip. - d.value(reflect.Value{}) - } - i++ - - // Next token must be , or ]. - op = d.scanWhile(scanSkipSpace) - if op == scanEndArray { - break - } - if op != scanArrayValue { - d.error(errPhase) - } - } - - if i < v.Len() { - if v.Kind() == reflect.Array { - // Array. Zero the rest. - z := reflect.Zero(v.Type().Elem()) - for ; i < v.Len(); i++ { - v.Index(i).Set(z) - } - } else { - v.SetLen(i) - } - } - if i == 0 && v.Kind() == reflect.Slice { - v.Set(reflect.MakeSlice(v.Type(), 0, 0)) - } -} - -var nullLiteral = []byte("null") - -// object consumes an object from d.data[d.off-1:], decoding into the value v. -// the first byte ('{') of the object has been read already. -func (d *decodeState) object(v reflect.Value) { - // Check for unmarshaler. - u, ut, pv := d.indirect(v, false) - if u != nil { - d.off-- - err := u.UnmarshalJSON(d.next()) - if err != nil { - d.error(err) - } - return - } - if ut != nil { - d.saveError(&UnmarshalTypeError{"object", v.Type(), int64(d.off)}) - d.off-- - d.next() // skip over { } in input - return - } - v = pv - - // Decoding into nil interface? Switch to non-reflect code. - if v.Kind() == reflect.Interface && v.NumMethod() == 0 { - v.Set(reflect.ValueOf(d.objectInterface())) - return - } - - // Check type of target: struct or map[string]T - switch v.Kind() { - case reflect.Map: - // map must have string kind - t := v.Type() - if t.Key().Kind() != reflect.String { - d.saveError(&UnmarshalTypeError{"object", v.Type(), int64(d.off)}) - d.off-- - d.next() // skip over { } in input - return - } - if v.IsNil() { - v.Set(reflect.MakeMap(t)) - } - case reflect.Struct: - - default: - d.saveError(&UnmarshalTypeError{"object", v.Type(), int64(d.off)}) - d.off-- - d.next() // skip over { } in input - return - } - - var mapElem reflect.Value - keys := map[string]bool{} - - for { - // Read opening " of string key or closing }. - op := d.scanWhile(scanSkipSpace) - if op == scanEndObject { - // closing } - can only happen on first iteration. - break - } - if op != scanBeginLiteral { - d.error(errPhase) - } - - // Read key. - start := d.off - 1 - op = d.scanWhile(scanContinue) - item := d.data[start : d.off-1] - key, ok := unquote(item) - if !ok { - d.error(errPhase) - } - - // Check for duplicate keys. - _, ok = keys[key] - if !ok { - keys[key] = true - } else { - d.error(fmt.Errorf("json: duplicate key '%s' in object", key)) - } - - // Figure out field corresponding to key. - var subv reflect.Value - destring := false // whether the value is wrapped in a string to be decoded first - - if v.Kind() == reflect.Map { - elemType := v.Type().Elem() - if !mapElem.IsValid() { - mapElem = reflect.New(elemType).Elem() - } else { - mapElem.Set(reflect.Zero(elemType)) - } - subv = mapElem - } else { - var f *field - fields := cachedTypeFields(v.Type()) - for i := range fields { - ff := &fields[i] - if bytes.Equal(ff.nameBytes, []byte(key)) { - f = ff - break - } - } - if f != nil { - subv = v - destring = f.quoted - for _, i := range f.index { - if subv.Kind() == reflect.Ptr { - if subv.IsNil() { - subv.Set(reflect.New(subv.Type().Elem())) - } - subv = subv.Elem() - } - subv = subv.Field(i) - } - } - } - - // Read : before value. - if op == scanSkipSpace { - op = d.scanWhile(scanSkipSpace) - } - if op != scanObjectKey { - d.error(errPhase) - } - - // Read value. - if destring { - switch qv := d.valueQuoted().(type) { - case nil: - d.literalStore(nullLiteral, subv, false) - case string: - d.literalStore([]byte(qv), subv, true) - default: - d.saveError(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal unquoted value into %v", subv.Type())) - } - } else { - d.value(subv) - } - - // Write value back to map; - // if using struct, subv points into struct already. - if v.Kind() == reflect.Map { - kv := reflect.ValueOf(key).Convert(v.Type().Key()) - v.SetMapIndex(kv, subv) - } - - // Next token must be , or }. - op = d.scanWhile(scanSkipSpace) - if op == scanEndObject { - break - } - if op != scanObjectValue { - d.error(errPhase) - } - } -} - -// literal consumes a literal from d.data[d.off-1:], decoding into the value v. -// The first byte of the literal has been read already -// (that's how the caller knows it's a literal). -func (d *decodeState) literal(v reflect.Value) { - // All bytes inside literal return scanContinue op code. - start := d.off - 1 - op := d.scanWhile(scanContinue) - - // Scan read one byte too far; back up. - d.off-- - d.scan.undo(op) - - d.literalStore(d.data[start:d.off], v, false) -} - -// convertNumber converts the number literal s to a float64 or a Number -// depending on the setting of d.useNumber. -func (d *decodeState) convertNumber(s string) (interface{}, error) { - if d.useNumber { - return Number(s), nil - } - f, err := strconv.ParseFloat(s, 64) - if err != nil { - return nil, &UnmarshalTypeError{"number " + s, reflect.TypeOf(0.0), int64(d.off)} - } - return f, nil -} - -var numberType = reflect.TypeOf(Number("")) - -// literalStore decodes a literal stored in item into v. -// -// fromQuoted indicates whether this literal came from unwrapping a -// string from the ",string" struct tag option. this is used only to -// produce more helpful error messages. -func (d *decodeState) literalStore(item []byte, v reflect.Value, fromQuoted bool) { - // Check for unmarshaler. - if len(item) == 0 { - //Empty string given - d.saveError(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type())) - return - } - wantptr := item[0] == 'n' // null - u, ut, pv := d.indirect(v, wantptr) - if u != nil { - err := u.UnmarshalJSON(item) - if err != nil { - d.error(err) - } - return - } - if ut != nil { - if item[0] != '"' { - if fromQuoted { - d.saveError(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type())) - } else { - d.saveError(&UnmarshalTypeError{"string", v.Type(), int64(d.off)}) - } - return - } - s, ok := unquoteBytes(item) - if !ok { - if fromQuoted { - d.error(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type())) - } else { - d.error(errPhase) - } - } - err := ut.UnmarshalText(s) - if err != nil { - d.error(err) - } - return - } - - v = pv - - switch c := item[0]; c { - case 'n': // null - switch v.Kind() { - case reflect.Interface, reflect.Ptr, reflect.Map, reflect.Slice: - v.Set(reflect.Zero(v.Type())) - // otherwise, ignore null for primitives/string - } - case 't', 'f': // true, false - value := c == 't' - switch v.Kind() { - default: - if fromQuoted { - d.saveError(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type())) - } else { - d.saveError(&UnmarshalTypeError{"bool", v.Type(), int64(d.off)}) - } - case reflect.Bool: - v.SetBool(value) - case reflect.Interface: - if v.NumMethod() == 0 { - v.Set(reflect.ValueOf(value)) - } else { - d.saveError(&UnmarshalTypeError{"bool", v.Type(), int64(d.off)}) - } - } - - case '"': // string - s, ok := unquoteBytes(item) - if !ok { - if fromQuoted { - d.error(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type())) - } else { - d.error(errPhase) - } - } - switch v.Kind() { - default: - d.saveError(&UnmarshalTypeError{"string", v.Type(), int64(d.off)}) - case reflect.Slice: - if v.Type().Elem().Kind() != reflect.Uint8 { - d.saveError(&UnmarshalTypeError{"string", v.Type(), int64(d.off)}) - break - } - b := make([]byte, base64.StdEncoding.DecodedLen(len(s))) - n, err := base64.StdEncoding.Decode(b, s) - if err != nil { - d.saveError(err) - break - } - v.SetBytes(b[:n]) - case reflect.String: - v.SetString(string(s)) - case reflect.Interface: - if v.NumMethod() == 0 { - v.Set(reflect.ValueOf(string(s))) - } else { - d.saveError(&UnmarshalTypeError{"string", v.Type(), int64(d.off)}) - } - } - - default: // number - if c != '-' && (c < '0' || c > '9') { - if fromQuoted { - d.error(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type())) - } else { - d.error(errPhase) - } - } - s := string(item) - switch v.Kind() { - default: - if v.Kind() == reflect.String && v.Type() == numberType { - v.SetString(s) - if !isValidNumber(s) { - d.error(fmt.Errorf("json: invalid number literal, trying to unmarshal %q into Number", item)) - } - break - } - if fromQuoted { - d.error(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type())) - } else { - d.error(&UnmarshalTypeError{"number", v.Type(), int64(d.off)}) - } - case reflect.Interface: - n, err := d.convertNumber(s) - if err != nil { - d.saveError(err) - break - } - if v.NumMethod() != 0 { - d.saveError(&UnmarshalTypeError{"number", v.Type(), int64(d.off)}) - break - } - v.Set(reflect.ValueOf(n)) - - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - n, err := strconv.ParseInt(s, 10, 64) - if err != nil || v.OverflowInt(n) { - d.saveError(&UnmarshalTypeError{"number " + s, v.Type(), int64(d.off)}) - break - } - v.SetInt(n) - - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: - n, err := strconv.ParseUint(s, 10, 64) - if err != nil || v.OverflowUint(n) { - d.saveError(&UnmarshalTypeError{"number " + s, v.Type(), int64(d.off)}) - break - } - v.SetUint(n) - - case reflect.Float32, reflect.Float64: - n, err := strconv.ParseFloat(s, v.Type().Bits()) - if err != nil || v.OverflowFloat(n) { - d.saveError(&UnmarshalTypeError{"number " + s, v.Type(), int64(d.off)}) - break - } - v.SetFloat(n) - } - } -} - -// The xxxInterface routines build up a value to be stored -// in an empty interface. They are not strictly necessary, -// but they avoid the weight of reflection in this common case. - -// valueInterface is like value but returns interface{} -func (d *decodeState) valueInterface() interface{} { - switch d.scanWhile(scanSkipSpace) { - default: - d.error(errPhase) - panic("unreachable") - case scanBeginArray: - return d.arrayInterface() - case scanBeginObject: - return d.objectInterface() - case scanBeginLiteral: - return d.literalInterface() - } -} - -// arrayInterface is like array but returns []interface{}. -func (d *decodeState) arrayInterface() []interface{} { - var v = make([]interface{}, 0) - for { - // Look ahead for ] - can only happen on first iteration. - op := d.scanWhile(scanSkipSpace) - if op == scanEndArray { - break - } - - // Back up so d.value can have the byte we just read. - d.off-- - d.scan.undo(op) - - v = append(v, d.valueInterface()) - - // Next token must be , or ]. - op = d.scanWhile(scanSkipSpace) - if op == scanEndArray { - break - } - if op != scanArrayValue { - d.error(errPhase) - } - } - return v -} - -// objectInterface is like object but returns map[string]interface{}. -func (d *decodeState) objectInterface() map[string]interface{} { - m := make(map[string]interface{}) - keys := map[string]bool{} - - for { - // Read opening " of string key or closing }. - op := d.scanWhile(scanSkipSpace) - if op == scanEndObject { - // closing } - can only happen on first iteration. - break - } - if op != scanBeginLiteral { - d.error(errPhase) - } - - // Read string key. - start := d.off - 1 - op = d.scanWhile(scanContinue) - item := d.data[start : d.off-1] - key, ok := unquote(item) - if !ok { - d.error(errPhase) - } - - // Check for duplicate keys. - _, ok = keys[key] - if !ok { - keys[key] = true - } else { - d.error(fmt.Errorf("json: duplicate key '%s' in object", key)) - } - - // Read : before value. - if op == scanSkipSpace { - op = d.scanWhile(scanSkipSpace) - } - if op != scanObjectKey { - d.error(errPhase) - } - - // Read value. - m[key] = d.valueInterface() - - // Next token must be , or }. - op = d.scanWhile(scanSkipSpace) - if op == scanEndObject { - break - } - if op != scanObjectValue { - d.error(errPhase) - } - } - return m -} - -// literalInterface is like literal but returns an interface value. -func (d *decodeState) literalInterface() interface{} { - // All bytes inside literal return scanContinue op code. - start := d.off - 1 - op := d.scanWhile(scanContinue) - - // Scan read one byte too far; back up. - d.off-- - d.scan.undo(op) - item := d.data[start:d.off] - - switch c := item[0]; c { - case 'n': // null - return nil - - case 't', 'f': // true, false - return c == 't' - - case '"': // string - s, ok := unquote(item) - if !ok { - d.error(errPhase) - } - return s - - default: // number - if c != '-' && (c < '0' || c > '9') { - d.error(errPhase) - } - n, err := d.convertNumber(string(item)) - if err != nil { - d.saveError(err) - } - return n - } -} - -// getu4 decodes \uXXXX from the beginning of s, returning the hex value, -// or it returns -1. -func getu4(s []byte) rune { - if len(s) < 6 || s[0] != '\\' || s[1] != 'u' { - return -1 - } - r, err := strconv.ParseUint(string(s[2:6]), 16, 64) - if err != nil { - return -1 - } - return rune(r) -} - -// unquote converts a quoted JSON string literal s into an actual string t. -// The rules are different than for Go, so cannot use strconv.Unquote. -func unquote(s []byte) (t string, ok bool) { - s, ok = unquoteBytes(s) - t = string(s) - return -} - -func unquoteBytes(s []byte) (t []byte, ok bool) { - if len(s) < 2 || s[0] != '"' || s[len(s)-1] != '"' { - return - } - s = s[1 : len(s)-1] - - // Check for unusual characters. If there are none, - // then no unquoting is needed, so return a slice of the - // original bytes. - r := 0 - for r < len(s) { - c := s[r] - if c == '\\' || c == '"' || c < ' ' { - break - } - if c < utf8.RuneSelf { - r++ - continue - } - rr, size := utf8.DecodeRune(s[r:]) - if rr == utf8.RuneError && size == 1 { - break - } - r += size - } - if r == len(s) { - return s, true - } - - b := make([]byte, len(s)+2*utf8.UTFMax) - w := copy(b, s[0:r]) - for r < len(s) { - // Out of room? Can only happen if s is full of - // malformed UTF-8 and we're replacing each - // byte with RuneError. - if w >= len(b)-2*utf8.UTFMax { - nb := make([]byte, (len(b)+utf8.UTFMax)*2) - copy(nb, b[0:w]) - b = nb - } - switch c := s[r]; { - case c == '\\': - r++ - if r >= len(s) { - return - } - switch s[r] { - default: - return - case '"', '\\', '/', '\'': - b[w] = s[r] - r++ - w++ - case 'b': - b[w] = '\b' - r++ - w++ - case 'f': - b[w] = '\f' - r++ - w++ - case 'n': - b[w] = '\n' - r++ - w++ - case 'r': - b[w] = '\r' - r++ - w++ - case 't': - b[w] = '\t' - r++ - w++ - case 'u': - r-- - rr := getu4(s[r:]) - if rr < 0 { - return - } - r += 6 - if utf16.IsSurrogate(rr) { - rr1 := getu4(s[r:]) - if dec := utf16.DecodeRune(rr, rr1); dec != unicode.ReplacementChar { - // A valid pair; consume. - r += 6 - w += utf8.EncodeRune(b[w:], dec) - break - } - // Invalid surrogate; fall back to replacement rune. - rr = unicode.ReplacementChar - } - w += utf8.EncodeRune(b[w:], rr) - } - - // Quote, control characters are invalid. - case c == '"', c < ' ': - return - - // ASCII - case c < utf8.RuneSelf: - b[w] = c - r++ - w++ - - // Coerce to well-formed UTF-8. - default: - rr, size := utf8.DecodeRune(s[r:]) - r += size - w += utf8.EncodeRune(b[w:], rr) - } - } - return b[0:w], true -} diff --git a/vendor/gopkg.in/square/go-jose.v2/json/encode.go b/vendor/gopkg.in/square/go-jose.v2/json/encode.go deleted file mode 100644 index 1dae8bb7c..000000000 --- a/vendor/gopkg.in/square/go-jose.v2/json/encode.go +++ /dev/null @@ -1,1197 +0,0 @@ -// Copyright 2010 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package json implements encoding and decoding of JSON objects as defined in -// RFC 4627. The mapping between JSON objects and Go values is described -// in the documentation for the Marshal and Unmarshal functions. -// -// See "JSON and Go" for an introduction to this package: -// https://golang.org/doc/articles/json_and_go.html -package json - -import ( - "bytes" - "encoding" - "encoding/base64" - "fmt" - "math" - "reflect" - "runtime" - "sort" - "strconv" - "strings" - "sync" - "unicode" - "unicode/utf8" -) - -// Marshal returns the JSON encoding of v. -// -// Marshal traverses the value v recursively. -// If an encountered value implements the Marshaler interface -// and is not a nil pointer, Marshal calls its MarshalJSON method -// to produce JSON. If no MarshalJSON method is present but the -// value implements encoding.TextMarshaler instead, Marshal calls -// its MarshalText method. -// The nil pointer exception is not strictly necessary -// but mimics a similar, necessary exception in the behavior of -// UnmarshalJSON. -// -// Otherwise, Marshal uses the following type-dependent default encodings: -// -// Boolean values encode as JSON booleans. -// -// Floating point, integer, and Number values encode as JSON numbers. -// -// String values encode as JSON strings coerced to valid UTF-8, -// replacing invalid bytes with the Unicode replacement rune. -// The angle brackets "<" and ">" are escaped to "\u003c" and "\u003e" -// to keep some browsers from misinterpreting JSON output as HTML. -// Ampersand "&" is also escaped to "\u0026" for the same reason. -// -// Array and slice values encode as JSON arrays, except that -// []byte encodes as a base64-encoded string, and a nil slice -// encodes as the null JSON object. -// -// Struct values encode as JSON objects. Each exported struct field -// becomes a member of the object unless -// - the field's tag is "-", or -// - the field is empty and its tag specifies the "omitempty" option. -// The empty values are false, 0, any -// nil pointer or interface value, and any array, slice, map, or string of -// length zero. The object's default key string is the struct field name -// but can be specified in the struct field's tag value. The "json" key in -// the struct field's tag value is the key name, followed by an optional comma -// and options. Examples: -// -// // Field is ignored by this package. -// Field int `json:"-"` -// -// // Field appears in JSON as key "myName". -// Field int `json:"myName"` -// -// // Field appears in JSON as key "myName" and -// // the field is omitted from the object if its value is empty, -// // as defined above. -// Field int `json:"myName,omitempty"` -// -// // Field appears in JSON as key "Field" (the default), but -// // the field is skipped if empty. -// // Note the leading comma. -// Field int `json:",omitempty"` -// -// The "string" option signals that a field is stored as JSON inside a -// JSON-encoded string. It applies only to fields of string, floating point, -// integer, or boolean types. This extra level of encoding is sometimes used -// when communicating with JavaScript programs: -// -// Int64String int64 `json:",string"` -// -// The key name will be used if it's a non-empty string consisting of -// only Unicode letters, digits, dollar signs, percent signs, hyphens, -// underscores and slashes. -// -// Anonymous struct fields are usually marshaled as if their inner exported fields -// were fields in the outer struct, subject to the usual Go visibility rules amended -// as described in the next paragraph. -// An anonymous struct field with a name given in its JSON tag is treated as -// having that name, rather than being anonymous. -// An anonymous struct field of interface type is treated the same as having -// that type as its name, rather than being anonymous. -// -// The Go visibility rules for struct fields are amended for JSON when -// deciding which field to marshal or unmarshal. If there are -// multiple fields at the same level, and that level is the least -// nested (and would therefore be the nesting level selected by the -// usual Go rules), the following extra rules apply: -// -// 1) Of those fields, if any are JSON-tagged, only tagged fields are considered, -// even if there are multiple untagged fields that would otherwise conflict. -// 2) If there is exactly one field (tagged or not according to the first rule), that is selected. -// 3) Otherwise there are multiple fields, and all are ignored; no error occurs. -// -// Handling of anonymous struct fields is new in Go 1.1. -// Prior to Go 1.1, anonymous struct fields were ignored. To force ignoring of -// an anonymous struct field in both current and earlier versions, give the field -// a JSON tag of "-". -// -// Map values encode as JSON objects. -// The map's key type must be string; the map keys are used as JSON object -// keys, subject to the UTF-8 coercion described for string values above. -// -// Pointer values encode as the value pointed to. -// A nil pointer encodes as the null JSON object. -// -// Interface values encode as the value contained in the interface. -// A nil interface value encodes as the null JSON object. -// -// Channel, complex, and function values cannot be encoded in JSON. -// Attempting to encode such a value causes Marshal to return -// an UnsupportedTypeError. -// -// JSON cannot represent cyclic data structures and Marshal does not -// handle them. Passing cyclic structures to Marshal will result in -// an infinite recursion. -// -func Marshal(v interface{}) ([]byte, error) { - e := &encodeState{} - err := e.marshal(v) - if err != nil { - return nil, err - } - return e.Bytes(), nil -} - -// MarshalIndent is like Marshal but applies Indent to format the output. -func MarshalIndent(v interface{}, prefix, indent string) ([]byte, error) { - b, err := Marshal(v) - if err != nil { - return nil, err - } - var buf bytes.Buffer - err = Indent(&buf, b, prefix, indent) - if err != nil { - return nil, err - } - return buf.Bytes(), nil -} - -// HTMLEscape appends to dst the JSON-encoded src with <, >, &, U+2028 and U+2029 -// characters inside string literals changed to \u003c, \u003e, \u0026, \u2028, \u2029 -// so that the JSON will be safe to embed inside HTML