Update godeps

This commit is contained in:
Manuel de Brito Fontes 2017-01-05 18:10:28 -03:00
parent 597a0e691a
commit 9085e24a29
50 changed files with 5284 additions and 4676 deletions

822
Godeps/Godeps.json generated

File diff suppressed because it is too large Load diff

View file

@ -1,9 +0,0 @@
language: go
go: 1.5
sudo: false
before_install:
- go get github.com/axw/gocov/gocov
- go get github.com/mattn/goveralls
- if ! go get github.com/golang/tools/cmd/cover; then go get golang.org/x/tools/cmd/cover; fi
script:
- $HOME/gopath/bin/goveralls -service=travis-ci

View file

@ -1,21 +0,0 @@
The MIT License (MIT)
Copyright (c) 2014 Fatih Arslan
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.

View file

@ -1,164 +0,0 @@
# Structs [![GoDoc](http://img.shields.io/badge/go-documentation-blue.svg?style=flat-square)](http://godoc.org/github.com/fatih/structs) [![Build Status](http://img.shields.io/travis/fatih/structs.svg?style=flat-square)](https://travis-ci.org/fatih/structs) [![Coverage Status](http://img.shields.io/coveralls/fatih/structs.svg?style=flat-square)](https://coveralls.io/r/fatih/structs)
Structs contains various utilities to work with Go (Golang) structs. It was
initially used by me to convert a struct into a `map[string]interface{}`. With
time I've added other utilities for structs. It's basically a high level
package based on primitives from the reflect package. Feel free to add new
functions or improve the existing code.
## Install
```bash
go get github.com/fatih/structs
```
## Usage and Examples
Just like the standard lib `strings`, `bytes` and co packages, `structs` has
many global functions to manipulate or organize your struct data. Lets define
and declare a struct:
```go
type Server struct {
Name string `json:"name,omitempty"`
ID int
Enabled bool
users []string // not exported
http.Server // embedded
}
server := &Server{
Name: "gopher",
ID: 123456,
Enabled: true,
}
```
```go
// Convert a struct to a map[string]interface{}
// => {"Name":"gopher", "ID":123456, "Enabled":true}
m := structs.Map(server)
// Convert the values of a struct to a []interface{}
// => ["gopher", 123456, true]
v := structs.Values(server)
// Convert the names of a struct to a []string
// (see "Names methods" for more info about fields)
n := structs.Names(server)
// Convert the values of a struct to a []*Field
// (see "Field methods" for more info about fields)
f := structs.Fields(server)
// Return the struct name => "Server"
n := structs.Name(server)
// Check if any field of a struct is initialized or not.
h := structs.HasZero(server)
// Check if all fields of a struct is initialized or not.
z := structs.IsZero(server)
// Check if server is a struct or a pointer to struct
i := structs.IsStruct(server)
```
### Struct methods
The structs functions can be also used as independent methods by creating a new
`*structs.Struct`. This is handy if you want to have more control over the
structs (such as retrieving a single Field).
```go
// Create a new struct type:
s := structs.New(server)
m := s.Map() // Get a map[string]interface{}
v := s.Values() // Get a []interface{}
f := s.Fields() // Get a []*Field
n := s.Names() // Get a []string
f := s.Field(name) // Get a *Field based on the given field name
f, ok := s.FieldOk(name) // Get a *Field based on the given field name
n := s.Name() // Get the struct name
h := s.HasZero() // Check if any field is initialized
z := s.IsZero() // Check if all fields are initialized
```
### Field methods
We can easily examine a single Field for more detail. Below you can see how we
get and interact with various field methods:
```go
s := structs.New(server)
// Get the Field struct for the "Name" field
name := s.Field("Name")
// Get the underlying value, value => "gopher"
value := name.Value().(string)
// Set the field's value
name.Set("another gopher")
// Get the field's kind, kind => "string"
name.Kind()
// Check if the field is exported or not
if name.IsExported() {
fmt.Println("Name field is exported")
}
// Check if the value is a zero value, such as "" for string, 0 for int
if !name.IsZero() {
fmt.Println("Name is initialized")
}
// Check if the field is an anonymous (embedded) field
if !name.IsEmbedded() {
fmt.Println("Name is not an embedded field")
}
// Get the Field's tag value for tag name "json", tag value => "name,omitempty"
tagValue := name.Tag("json")
```
Nested structs are supported too:
```go
addrField := s.Field("Server").Field("Addr")
// Get the value for addr
a := addrField.Value().(string)
// Or get all fields
httpServer := s.Field("Server").Fields()
```
We can also get a slice of Fields from the Struct type to iterate over all
fields. This is handy if you wish to examine all fields:
```go
// Convert the fields of a struct to a []*Field
fields := s.Fields()
for _, f := range fields {
fmt.Printf("field name: %+v\n", f.Name())
if f.IsExported() {
fmt.Printf("value : %+v\n", f.Value())
fmt.Printf("is zero : %+v\n", f.IsZero())
}
}
```
## Credits
* [Fatih Arslan](https://github.com/fatih)
* [Cihangir Savas](https://github.com/cihangir)
## License
The MIT License (MIT) - see LICENSE.md for more details

View file

@ -1,133 +0,0 @@
package structs
import (
"errors"
"fmt"
"reflect"
)
var (
errNotExported = errors.New("field is not exported")
errNotSettable = errors.New("field is not settable")
)
// Field represents a single struct field that encapsulates high level
// functions around the field.
type Field struct {
value reflect.Value
field reflect.StructField
defaultTag string
}
// Tag returns the value associated with key in the tag string. If there is no
// such key in the tag, Tag returns the empty string.
func (f *Field) Tag(key string) string {
return f.field.Tag.Get(key)
}
// Value returns the underlying value of of the field. It panics if the field
// is not exported.
func (f *Field) Value() interface{} {
return f.value.Interface()
}
// IsEmbedded returns true if the given field is an anonymous field (embedded)
func (f *Field) IsEmbedded() bool {
return f.field.Anonymous
}
// IsExported returns true if the given field is exported.
func (f *Field) IsExported() bool {
return f.field.PkgPath == ""
}
// IsZero returns true if the given field is not initalized (has a zero value).
// It panics if the field is not exported.
func (f *Field) IsZero() bool {
zero := reflect.Zero(f.value.Type()).Interface()
current := f.Value()
return reflect.DeepEqual(current, zero)
}
// Name returns the name of the given field
func (f *Field) Name() string {
return f.field.Name
}
// Kind returns the fields kind, such as "string", "map", "bool", etc ..
func (f *Field) Kind() reflect.Kind {
return f.value.Kind()
}
// Set sets the field to given value v. It retuns an error if the field is not
// settable (not addresable or not exported) or if the given value's type
// doesn't match the fields type.
func (f *Field) Set(val interface{}) error {
// we can't set unexported fields, so be sure this field is exported
if !f.IsExported() {
return errNotExported
}
// do we get here? not sure...
if !f.value.CanSet() {
return errNotSettable
}
given := reflect.ValueOf(val)
if f.value.Kind() != given.Kind() {
return fmt.Errorf("wrong kind. got: %s want: %s", given.Kind(), f.value.Kind())
}
f.value.Set(given)
return nil
}
// Zero sets the field to its zero value. It returns an error if the field is not
// settable (not addressable or not exported).
func (f *Field) Zero() error {
zero := reflect.Zero(f.value.Type()).Interface()
return f.Set(zero)
}
// Fields returns a slice of Fields. This is particular handy to get the fields
// of a nested struct . A struct tag with the content of "-" ignores the
// checking of that particular field. Example:
//
// // Field is ignored by this package.
// Field *http.Request `structs:"-"`
//
// It panics if field is not exported or if field's kind is not struct
func (f *Field) Fields() []*Field {
return getFields(f.value, f.defaultTag)
}
// Field returns the field from a nested struct. It panics if the nested struct
// is not exported or if the field was not found.
func (f *Field) Field(name string) *Field {
field, ok := f.FieldOk(name)
if !ok {
panic("field not found")
}
return field
}
// Field returns the field from a nested struct. The boolean returns true if
// the field was found. It panics if the nested struct is not exported or if
// the field was not found.
func (f *Field) FieldOk(name string) (*Field, bool) {
v := strctVal(f.value.Interface())
t := v.Type()
field, ok := t.FieldByName(name)
if !ok {
return nil, false
}
return &Field{
field: field,
value: v.FieldByName(name),
}, true
}

View file

@ -1,449 +0,0 @@
// Package structs contains various utilities functions to work with structs.
package structs
import "reflect"
var (
// DefaultTagName is the default tag name for struct fields which provides
// a more granular to tweak certain structs. Lookup the necessary functions
// for more info.
DefaultTagName = "structs" // struct's field default tag name
)
// Struct encapsulates a struct type to provide several high level functions
// around the struct.
type Struct struct {
raw interface{}
value reflect.Value
TagName string
}
// New returns a new *Struct with the struct s. It panics if the s's kind is
// not struct.
func New(s interface{}) *Struct {
return &Struct{
raw: s,
value: strctVal(s),
TagName: DefaultTagName,
}
}
// Map converts the given struct to a map[string]interface{}, where the keys
// of the map are the field names and the values of the map the associated
// values of the fields. The default key string is the struct field name but
// can be changed in the struct field's tag value. The "structs" key in the
// struct's field tag value is the key name. Example:
//
// // Field appears in map as key "myName".
// Name string `structs:"myName"`
//
// A tag value with the content of "-" ignores that particular field. Example:
//
// // Field is ignored by this package.
// Field bool `structs:"-"`
//
// A tag value with the option of "omitnested" stops iterating further if the type
// is a struct. Example:
//
// // Field is not processed further by this package.
// Field time.Time `structs:"myName,omitnested"`
// Field *http.Request `structs:",omitnested"`
//
// A tag value with the option of "omitempty" ignores that particular field if
// the field value is empty. Example:
//
// // Field appears in map as key "myName", but the field is
// // skipped if empty.
// Field string `structs:"myName,omitempty"`
//
// // Field appears in map as key "Field" (the default), but
// // the field is skipped if empty.
// Field string `structs:",omitempty"`
//
// Note that only exported fields of a struct can be accessed, non exported
// fields will be neglected.
func (s *Struct) Map() map[string]interface{} {
out := make(map[string]interface{})
fields := s.structFields()
for _, field := range fields {
name := field.Name
val := s.value.FieldByName(name)
var finalVal interface{}
tagName, tagOpts := parseTag(field.Tag.Get(s.TagName))
if tagName != "" {
name = tagName
}
// if the value is a zero value and the field is marked as omitempty do
// not include
if tagOpts.Has("omitempty") {
zero := reflect.Zero(val.Type()).Interface()
current := val.Interface()
if reflect.DeepEqual(current, zero) {
continue
}
}
if IsStruct(val.Interface()) && !tagOpts.Has("omitnested") {
// look out for embedded structs, and convert them to a
// map[string]interface{} too
n := New(val.Interface())
n.TagName = s.TagName
finalVal = n.Map()
} else {
finalVal = val.Interface()
}
out[name] = finalVal
}
return out
}
// Values converts the given s struct's field values to a []interface{}. A
// struct tag with the content of "-" ignores the that particular field.
// Example:
//
// // Field is ignored by this package.
// Field int `structs:"-"`
//
// A value with the option of "omitnested" stops iterating further if the type
// is a struct. Example:
//
// // Fields is not processed further by this package.
// Field time.Time `structs:",omitnested"`
// Field *http.Request `structs:",omitnested"`
//
// A tag value with the option of "omitempty" ignores that particular field and
// is not added to the values if the field value is empty. Example:
//
// // Field is skipped if empty
// Field string `structs:",omitempty"`
//
// Note that only exported fields of a struct can be accessed, non exported
// fields will be neglected.
func (s *Struct) Values() []interface{} {
fields := s.structFields()
var t []interface{}
for _, field := range fields {
val := s.value.FieldByName(field.Name)
_, tagOpts := parseTag(field.Tag.Get(s.TagName))
// if the value is a zero value and the field is marked as omitempty do
// not include
if tagOpts.Has("omitempty") {
zero := reflect.Zero(val.Type()).Interface()
current := val.Interface()
if reflect.DeepEqual(current, zero) {
continue
}
}
if IsStruct(val.Interface()) && !tagOpts.Has("omitnested") {
// look out for embedded structs, and convert them to a
// []interface{} to be added to the final values slice
for _, embeddedVal := range Values(val.Interface()) {
t = append(t, embeddedVal)
}
} else {
t = append(t, val.Interface())
}
}
return t
}
// Fields returns a slice of Fields. A struct tag with the content of "-"
// ignores the checking of that particular field. Example:
//
// // Field is ignored by this package.
// Field bool `structs:"-"`
//
// It panics if s's kind is not struct.
func (s *Struct) Fields() []*Field {
return getFields(s.value, s.TagName)
}
// Names returns a slice of field names. A struct tag with the content of "-"
// ignores the checking of that particular field. Example:
//
// // Field is ignored by this package.
// Field bool `structs:"-"`
//
// It panics if s's kind is not struct.
func (s *Struct) Names() []string {
fields := getFields(s.value, s.TagName)
names := make([]string, len(fields))
for i, field := range fields {
names[i] = field.Name()
}
return names
}
func getFields(v reflect.Value, tagName string) []*Field {
if v.Kind() == reflect.Ptr {
v = v.Elem()
}
t := v.Type()
var fields []*Field
for i := 0; i < t.NumField(); i++ {
field := t.Field(i)
if tag := field.Tag.Get(tagName); tag == "-" {
continue
}
f := &Field{
field: field,
value: v.FieldByName(field.Name),
}
fields = append(fields, f)
}
return fields
}
// Field returns a new Field struct that provides several high level functions
// around a single struct field entity. It panics if the field is not found.
func (s *Struct) Field(name string) *Field {
f, ok := s.FieldOk(name)
if !ok {
panic("field not found")
}
return f
}
// Field returns a new Field struct that provides several high level functions
// around a single struct field entity. The boolean returns true if the field
// was found.
func (s *Struct) FieldOk(name string) (*Field, bool) {
t := s.value.Type()
field, ok := t.FieldByName(name)
if !ok {
return nil, false
}
return &Field{
field: field,
value: s.value.FieldByName(name),
defaultTag: s.TagName,
}, true
}
// IsZero returns true if all fields in a struct is a zero value (not
// initialized) A struct tag with the content of "-" ignores the checking of
// that particular field. Example:
//
// // Field is ignored by this package.
// Field bool `structs:"-"`
//
// A value with the option of "omitnested" stops iterating further if the type
// is a struct. Example:
//
// // Field is not processed further by this package.
// Field time.Time `structs:"myName,omitnested"`
// Field *http.Request `structs:",omitnested"`
//
// Note that only exported fields of a struct can be accessed, non exported
// fields will be neglected. It panics if s's kind is not struct.
func (s *Struct) IsZero() bool {
fields := s.structFields()
for _, field := range fields {
val := s.value.FieldByName(field.Name)
_, tagOpts := parseTag(field.Tag.Get(s.TagName))
if IsStruct(val.Interface()) && !tagOpts.Has("omitnested") {
ok := IsZero(val.Interface())
if !ok {
return false
}
continue
}
// zero value of the given field, such as "" for string, 0 for int
zero := reflect.Zero(val.Type()).Interface()
// current value of the given field
current := val.Interface()
if !reflect.DeepEqual(current, zero) {
return false
}
}
return true
}
// HasZero returns true if a field in a struct is not initialized (zero value).
// A struct tag with the content of "-" ignores the checking of that particular
// field. Example:
//
// // Field is ignored by this package.
// Field bool `structs:"-"`
//
// A value with the option of "omitnested" stops iterating further if the type
// is a struct. Example:
//
// // Field is not processed further by this package.
// Field time.Time `structs:"myName,omitnested"`
// Field *http.Request `structs:",omitnested"`
//
// Note that only exported fields of a struct can be accessed, non exported
// fields will be neglected. It panics if s's kind is not struct.
func (s *Struct) HasZero() bool {
fields := s.structFields()
for _, field := range fields {
val := s.value.FieldByName(field.Name)
_, tagOpts := parseTag(field.Tag.Get(s.TagName))
if IsStruct(val.Interface()) && !tagOpts.Has("omitnested") {
ok := HasZero(val.Interface())
if ok {
return true
}
continue
}
// zero value of the given field, such as "" for string, 0 for int
zero := reflect.Zero(val.Type()).Interface()
// current value of the given field
current := val.Interface()
if reflect.DeepEqual(current, zero) {
return true
}
}
return false
}
// Name returns the structs's type name within its package. For more info refer
// to Name() function.
func (s *Struct) Name() string {
return s.value.Type().Name()
}
// structFields returns the exported struct fields for a given s struct. This
// is a convenient helper method to avoid duplicate code in some of the
// functions.
func (s *Struct) structFields() []reflect.StructField {
t := s.value.Type()
var f []reflect.StructField
for i := 0; i < t.NumField(); i++ {
field := t.Field(i)
// we can't access the value of unexported fields
if field.PkgPath != "" {
continue
}
// don't check if it's omitted
if tag := field.Tag.Get(s.TagName); tag == "-" {
continue
}
f = append(f, field)
}
return f
}
func strctVal(s interface{}) reflect.Value {
v := reflect.ValueOf(s)
// if pointer get the underlying element≤
if v.Kind() == reflect.Ptr {
v = v.Elem()
}
if v.Kind() != reflect.Struct {
panic("not struct")
}
return v
}
// Map converts the given struct to a map[string]interface{}. For more info
// refer to Struct types Map() method. It panics if s's kind is not struct.
func Map(s interface{}) map[string]interface{} {
return New(s).Map()
}
// Values converts the given struct to a []interface{}. For more info refer to
// Struct types Values() method. It panics if s's kind is not struct.
func Values(s interface{}) []interface{} {
return New(s).Values()
}
// Fields returns a slice of *Field. For more info refer to Struct types
// Fields() method. It panics if s's kind is not struct.
func Fields(s interface{}) []*Field {
return New(s).Fields()
}
// Names returns a slice of field names. For more info refer to Struct types
// Names() method. It panics if s's kind is not struct.
func Names(s interface{}) []string {
return New(s).Names()
}
// IsZero returns true if all fields is equal to a zero value. For more info
// refer to Struct types IsZero() method. It panics if s's kind is not struct.
func IsZero(s interface{}) bool {
return New(s).IsZero()
}
// HasZero returns true if any field is equal to a zero value. For more info
// refer to Struct types HasZero() method. It panics if s's kind is not struct.
func HasZero(s interface{}) bool {
return New(s).HasZero()
}
// IsStruct returns true if the given variable is a struct or a pointer to
// struct.
func IsStruct(s interface{}) bool {
v := reflect.ValueOf(s)
if v.Kind() == reflect.Ptr {
v = v.Elem()
}
// uninitialized zero value of a struct
if v.Kind() == reflect.Invalid {
return false
}
return v.Kind() == reflect.Struct
}
// Name returns the structs's type name within its package. It returns an
// empty string for unnamed types. It panics if s's kind is not struct.
func Name(s interface{}) string {
return New(s).Name()
}

View file

@ -1,32 +0,0 @@
package structs
import "strings"
// tagOptions contains a slice of tag options
type tagOptions []string
// Has returns true if the given optiton is available in tagOptions
func (t tagOptions) Has(opt string) bool {
for _, tagOpt := range t {
if tagOpt == opt {
return true
}
}
return false
}
// parseTag splits a struct field's tag into its name and a list of options
// which comes after a name. A tag is in the form of: "name,option1,option2".
// The name can be neglectected.
func parseTag(tag string) (string, tagOptions) {
// tag is one of followings:
// ""
// "name"
// "name,opt"
// "name,opt,opt2"
// ",opt"
res := strings.Split(tag, ",")
return res[0], res[1:]
}

191
vendor/github.com/juju/ratelimit/LICENSE generated vendored Normal file
View file

@ -0,0 +1,191 @@
All files in this repository are licensed as follows. If you contribute
to this repository, it is assumed that you license your contribution
under the same license unless you state otherwise.
All files Copyright (C) 2015 Canonical Ltd. unless otherwise specified in the file.
This software is licensed under the LGPLv3, included below.
As a special exception to the GNU Lesser General Public License version 3
("LGPL3"), the copyright holders of this Library give you permission to
convey to a third party a Combined Work that links statically or dynamically
to this Library without providing any Minimal Corresponding Source or
Minimal Application Code as set out in 4d or providing the installation
information set out in section 4e, provided that you comply with the other
provisions of LGPL3 and provided that you meet, for the Application the
terms and conditions of the license(s) which apply to the Application.
Except as stated in this special exception, the provisions of LGPL3 will
continue to comply in full to this Library. If you modify this Library, you
may apply this exception to your version of this Library, but you are not
obliged to do so. If you do not wish to do so, delete this exception
statement from your version. This exception does not (and cannot) modify any
license terms which apply to the Application, with which you must still
comply.
GNU LESSER GENERAL PUBLIC LICENSE
Version 3, 29 June 2007
Copyright (C) 2007 Free Software Foundation, Inc. <http://fsf.org/>
Everyone is permitted to copy and distribute verbatim copies
of this license document, but changing it is not allowed.
This version of the GNU Lesser General Public License incorporates
the terms and conditions of version 3 of the GNU General Public
License, supplemented by the additional permissions listed below.
0. Additional Definitions.
As used herein, "this License" refers to version 3 of the GNU Lesser
General Public License, and the "GNU GPL" refers to version 3 of the GNU
General Public License.
"The Library" refers to a covered work governed by this License,
other than an Application or a Combined Work as defined below.
An "Application" is any work that makes use of an interface provided
by the Library, but which is not otherwise based on the Library.
Defining a subclass of a class defined by the Library is deemed a mode
of using an interface provided by the Library.
A "Combined Work" is a work produced by combining or linking an
Application with the Library. The particular version of the Library
with which the Combined Work was made is also called the "Linked
Version".
The "Minimal Corresponding Source" for a Combined Work means the
Corresponding Source for the Combined Work, excluding any source code
for portions of the Combined Work that, considered in isolation, are
based on the Application, and not on the Linked Version.
The "Corresponding Application Code" for a Combined Work means the
object code and/or source code for the Application, including any data
and utility programs needed for reproducing the Combined Work from the
Application, but excluding the System Libraries of the Combined Work.
1. Exception to Section 3 of the GNU GPL.
You may convey a covered work under sections 3 and 4 of this License
without being bound by section 3 of the GNU GPL.
2. Conveying Modified Versions.
If you modify a copy of the Library, and, in your modifications, a
facility refers to a function or data to be supplied by an Application
that uses the facility (other than as an argument passed when the
facility is invoked), then you may convey a copy of the modified
version:
a) under this License, provided that you make a good faith effort to
ensure that, in the event an Application does not supply the
function or data, the facility still operates, and performs
whatever part of its purpose remains meaningful, or
b) under the GNU GPL, with none of the additional permissions of
this License applicable to that copy.
3. Object Code Incorporating Material from Library Header Files.
The object code form of an Application may incorporate material from
a header file that is part of the Library. You may convey such object
code under terms of your choice, provided that, if the incorporated
material is not limited to numerical parameters, data structure
layouts and accessors, or small macros, inline functions and templates
(ten or fewer lines in length), you do both of the following:
a) Give prominent notice with each copy of the object code that the
Library is used in it and that the Library and its use are
covered by this License.
b) Accompany the object code with a copy of the GNU GPL and this license
document.
4. Combined Works.
You may convey a Combined Work under terms of your choice that,
taken together, effectively do not restrict modification of the
portions of the Library contained in the Combined Work and reverse
engineering for debugging such modifications, if you also do each of
the following:
a) Give prominent notice with each copy of the Combined Work that
the Library is used in it and that the Library and its use are
covered by this License.
b) Accompany the Combined Work with a copy of the GNU GPL and this license
document.
c) For a Combined Work that displays copyright notices during
execution, include the copyright notice for the Library among
these notices, as well as a reference directing the user to the
copies of the GNU GPL and this license document.
d) Do one of the following:
0) Convey the Minimal Corresponding Source under the terms of this
License, and the Corresponding Application Code in a form
suitable for, and under terms that permit, the user to
recombine or relink the Application with a modified version of
the Linked Version to produce a modified Combined Work, in the
manner specified by section 6 of the GNU GPL for conveying
Corresponding Source.
1) Use a suitable shared library mechanism for linking with the
Library. A suitable mechanism is one that (a) uses at run time
a copy of the Library already present on the user's computer
system, and (b) will operate properly with a modified version
of the Library that is interface-compatible with the Linked
Version.
e) Provide Installation Information, but only if you would otherwise
be required to provide such information under section 6 of the
GNU GPL, and only to the extent that such information is
necessary to install and execute a modified version of the
Combined Work produced by recombining or relinking the
Application with a modified version of the Linked Version. (If
you use option 4d0, the Installation Information must accompany
the Minimal Corresponding Source and Corresponding Application
Code. If you use option 4d1, you must provide the Installation
Information in the manner specified by section 6 of the GNU GPL
for conveying Corresponding Source.)
5. Combined Libraries.
You may place library facilities that are a work based on the
Library side by side in a single library together with other library
facilities that are not Applications and are not covered by this
License, and convey such a combined library under terms of your
choice, if you do both of the following:
a) Accompany the combined library with a copy of the same work based
on the Library, uncombined with any other library facilities,
conveyed under the terms of this License.
b) Give prominent notice with the combined library that part of it
is a work based on the Library, and explaining where to find the
accompanying uncombined form of the same work.
6. Revised Versions of the GNU Lesser General Public License.
The Free Software Foundation may publish revised and/or new versions
of the GNU Lesser General Public License from time to time. Such new
versions will be similar in spirit to the present version, but may
differ in detail to address new problems or concerns.
Each version is given a distinguishing version number. If the
Library as you received it specifies that a certain numbered version
of the GNU Lesser General Public License "or any later version"
applies to it, you have the option of following the terms and
conditions either of that published version or of any later version
published by the Free Software Foundation. If the Library as you
received it does not specify a version number of the GNU Lesser
General Public License, you may choose any version of the GNU Lesser
General Public License ever published by the Free Software Foundation.
If the Library as you received it specifies that a proxy can decide
whether future versions of the GNU Lesser General Public License shall
apply, that proxy's public statement of acceptance of any version is
permanent authorization for you to choose that version for the
Library.

117
vendor/github.com/juju/ratelimit/README.md generated vendored Normal file
View file

@ -0,0 +1,117 @@
# ratelimit
--
import "github.com/juju/ratelimit"
The ratelimit package provides an efficient token bucket implementation. See
http://en.wikipedia.org/wiki/Token_bucket.
## Usage
#### func Reader
```go
func Reader(r io.Reader, bucket *Bucket) io.Reader
```
Reader returns a reader that is rate limited by the given token bucket. Each
token in the bucket represents one byte.
#### func Writer
```go
func Writer(w io.Writer, bucket *Bucket) io.Writer
```
Writer returns a writer that is rate limited by the given token bucket. Each
token in the bucket represents one byte.
#### type Bucket
```go
type Bucket struct {
}
```
Bucket represents a token bucket that fills at a predetermined rate. Methods on
Bucket may be called concurrently.
#### func NewBucket
```go
func NewBucket(fillInterval time.Duration, capacity int64) *Bucket
```
NewBucket returns a new token bucket that fills at the rate of one token every
fillInterval, up to the given maximum capacity. Both arguments must be positive.
The bucket is initially full.
#### func NewBucketWithQuantum
```go
func NewBucketWithQuantum(fillInterval time.Duration, capacity, quantum int64) *Bucket
```
NewBucketWithQuantum is similar to NewBucket, but allows the specification of
the quantum size - quantum tokens are added every fillInterval.
#### func NewBucketWithRate
```go
func NewBucketWithRate(rate float64, capacity int64) *Bucket
```
NewBucketWithRate returns a token bucket that fills the bucket at the rate of
rate tokens per second up to the given maximum capacity. Because of limited
clock resolution, at high rates, the actual rate may be up to 1% different from
the specified rate.
#### func (*Bucket) Rate
```go
func (tb *Bucket) Rate() float64
```
Rate returns the fill rate of the bucket, in tokens per second.
#### func (*Bucket) Take
```go
func (tb *Bucket) Take(count int64) time.Duration
```
Take takes count tokens from the bucket without blocking. It returns the time
that the caller should wait until the tokens are actually available.
Note that if the request is irrevocable - there is no way to return tokens to
the bucket once this method commits us to taking them.
#### func (*Bucket) TakeAvailable
```go
func (tb *Bucket) TakeAvailable(count int64) int64
```
TakeAvailable takes up to count immediately available tokens from the bucket. It
returns the number of tokens removed, or zero if there are no available tokens.
It does not block.
#### func (*Bucket) TakeMaxDuration
```go
func (tb *Bucket) TakeMaxDuration(count int64, maxWait time.Duration) (time.Duration, bool)
```
TakeMaxDuration is like Take, except that it will only take tokens from the
bucket if the wait time for the tokens is no greater than maxWait.
If it would take longer than maxWait for the tokens to become available, it does
nothing and reports false, otherwise it returns the time that the caller should
wait until the tokens are actually available, and reports true.
#### func (*Bucket) Wait
```go
func (tb *Bucket) Wait(count int64)
```
Wait takes count tokens from the bucket, waiting until they are available.
#### func (*Bucket) WaitMaxDuration
```go
func (tb *Bucket) WaitMaxDuration(count int64, maxWait time.Duration) bool
```
WaitMaxDuration is like Wait except that it will only take tokens from the
bucket if it needs to wait for no greater than maxWait. It reports whether any
tokens have been removed from the bucket If no tokens have been removed, it
returns immediately.

245
vendor/github.com/juju/ratelimit/ratelimit.go generated vendored Normal file
View file

@ -0,0 +1,245 @@
// Copyright 2014 Canonical Ltd.
// Licensed under the LGPLv3 with static-linking exception.
// See LICENCE file for details.
// The ratelimit package provides an efficient token bucket implementation
// that can be used to limit the rate of arbitrary things.
// See http://en.wikipedia.org/wiki/Token_bucket.
package ratelimit
import (
"math"
"strconv"
"sync"
"time"
)
// Bucket represents a token bucket that fills at a predetermined rate.
// Methods on Bucket may be called concurrently.
type Bucket struct {
startTime time.Time
capacity int64
quantum int64
fillInterval time.Duration
// The mutex guards the fields following it.
mu sync.Mutex
// avail holds the number of available tokens
// in the bucket, as of availTick ticks from startTime.
// It will be negative when there are consumers
// waiting for tokens.
avail int64
availTick int64
}
// NewBucket returns a new token bucket that fills at the
// rate of one token every fillInterval, up to the given
// maximum capacity. Both arguments must be
// positive. The bucket is initially full.
func NewBucket(fillInterval time.Duration, capacity int64) *Bucket {
return NewBucketWithQuantum(fillInterval, capacity, 1)
}
// rateMargin specifes the allowed variance of actual
// rate from specified rate. 1% seems reasonable.
const rateMargin = 0.01
// NewBucketWithRate returns a token bucket that fills the bucket
// at the rate of rate tokens per second up to the given
// maximum capacity. Because of limited clock resolution,
// at high rates, the actual rate may be up to 1% different from the
// specified rate.
func NewBucketWithRate(rate float64, capacity int64) *Bucket {
for quantum := int64(1); quantum < 1<<50; quantum = nextQuantum(quantum) {
fillInterval := time.Duration(1e9 * float64(quantum) / rate)
if fillInterval <= 0 {
continue
}
tb := NewBucketWithQuantum(fillInterval, capacity, quantum)
if diff := math.Abs(tb.Rate() - rate); diff/rate <= rateMargin {
return tb
}
}
panic("cannot find suitable quantum for " + strconv.FormatFloat(rate, 'g', -1, 64))
}
// nextQuantum returns the next quantum to try after q.
// We grow the quantum exponentially, but slowly, so we
// get a good fit in the lower numbers.
func nextQuantum(q int64) int64 {
q1 := q * 11 / 10
if q1 == q {
q1++
}
return q1
}
// NewBucketWithQuantum is similar to NewBucket, but allows
// the specification of the quantum size - quantum tokens
// are added every fillInterval.
func NewBucketWithQuantum(fillInterval time.Duration, capacity, quantum int64) *Bucket {
if fillInterval <= 0 {
panic("token bucket fill interval is not > 0")
}
if capacity <= 0 {
panic("token bucket capacity is not > 0")
}
if quantum <= 0 {
panic("token bucket quantum is not > 0")
}
return &Bucket{
startTime: time.Now(),
capacity: capacity,
quantum: quantum,
avail: capacity,
fillInterval: fillInterval,
}
}
// Wait takes count tokens from the bucket, waiting until they are
// available.
func (tb *Bucket) Wait(count int64) {
if d := tb.Take(count); d > 0 {
time.Sleep(d)
}
}
// WaitMaxDuration is like Wait except that it will
// only take tokens from the bucket if it needs to wait
// for no greater than maxWait. It reports whether
// any tokens have been removed from the bucket
// If no tokens have been removed, it returns immediately.
func (tb *Bucket) WaitMaxDuration(count int64, maxWait time.Duration) bool {
d, ok := tb.TakeMaxDuration(count, maxWait)
if d > 0 {
time.Sleep(d)
}
return ok
}
const infinityDuration time.Duration = 0x7fffffffffffffff
// Take takes count tokens from the bucket without blocking. It returns
// the time that the caller should wait until the tokens are actually
// available.
//
// Note that if the request is irrevocable - there is no way to return
// tokens to the bucket once this method commits us to taking them.
func (tb *Bucket) Take(count int64) time.Duration {
d, _ := tb.take(time.Now(), count, infinityDuration)
return d
}
// TakeMaxDuration is like Take, except that
// it will only take tokens from the bucket if the wait
// time for the tokens is no greater than maxWait.
//
// If it would take longer than maxWait for the tokens
// to become available, it does nothing and reports false,
// otherwise it returns the time that the caller should
// wait until the tokens are actually available, and reports
// true.
func (tb *Bucket) TakeMaxDuration(count int64, maxWait time.Duration) (time.Duration, bool) {
return tb.take(time.Now(), count, maxWait)
}
// TakeAvailable takes up to count immediately available tokens from the
// bucket. It returns the number of tokens removed, or zero if there are
// no available tokens. It does not block.
func (tb *Bucket) TakeAvailable(count int64) int64 {
return tb.takeAvailable(time.Now(), count)
}
// takeAvailable is the internal version of TakeAvailable - it takes the
// current time as an argument to enable easy testing.
func (tb *Bucket) takeAvailable(now time.Time, count int64) int64 {
if count <= 0 {
return 0
}
tb.mu.Lock()
defer tb.mu.Unlock()
tb.adjust(now)
if tb.avail <= 0 {
return 0
}
if count > tb.avail {
count = tb.avail
}
tb.avail -= count
return count
}
// Available returns the number of available tokens. It will be negative
// when there are consumers waiting for tokens. Note that if this
// returns greater than zero, it does not guarantee that calls that take
// tokens from the buffer will succeed, as the number of available
// tokens could have changed in the meantime. This method is intended
// primarily for metrics reporting and debugging.
func (tb *Bucket) Available() int64 {
return tb.available(time.Now())
}
// available is the internal version of available - it takes the current time as
// an argument to enable easy testing.
func (tb *Bucket) available(now time.Time) int64 {
tb.mu.Lock()
defer tb.mu.Unlock()
tb.adjust(now)
return tb.avail
}
// Capacity returns the capacity that the bucket was created with.
func (tb *Bucket) Capacity() int64 {
return tb.capacity
}
// Rate returns the fill rate of the bucket, in tokens per second.
func (tb *Bucket) Rate() float64 {
return 1e9 * float64(tb.quantum) / float64(tb.fillInterval)
}
// take is the internal version of Take - it takes the current time as
// an argument to enable easy testing.
func (tb *Bucket) take(now time.Time, count int64, maxWait time.Duration) (time.Duration, bool) {
if count <= 0 {
return 0, true
}
tb.mu.Lock()
defer tb.mu.Unlock()
currentTick := tb.adjust(now)
avail := tb.avail - count
if avail >= 0 {
tb.avail = avail
return 0, true
}
// Round up the missing tokens to the nearest multiple
// of quantum - the tokens won't be available until
// that tick.
endTick := currentTick + (-avail+tb.quantum-1)/tb.quantum
endTime := tb.startTime.Add(time.Duration(endTick) * tb.fillInterval)
waitTime := endTime.Sub(now)
if waitTime > maxWait {
return 0, false
}
tb.avail = avail
return waitTime, true
}
// adjust adjusts the current bucket capacity based on the current time.
// It returns the current tick.
func (tb *Bucket) adjust(now time.Time) (currentTick int64) {
currentTick = int64(now.Sub(tb.startTime) / tb.fillInterval)
if tb.avail >= tb.capacity {
return
}
tb.avail += (currentTick - tb.availTick) * tb.quantum
if tb.avail > tb.capacity {
tb.avail = tb.capacity
}
tb.availTick = currentTick
return
}

51
vendor/github.com/juju/ratelimit/reader.go generated vendored Normal file
View file

@ -0,0 +1,51 @@
// Copyright 2014 Canonical Ltd.
// Licensed under the LGPLv3 with static-linking exception.
// See LICENCE file for details.
package ratelimit
import "io"
type reader struct {
r io.Reader
bucket *Bucket
}
// Reader returns a reader that is rate limited by
// the given token bucket. Each token in the bucket
// represents one byte.
func Reader(r io.Reader, bucket *Bucket) io.Reader {
return &reader{
r: r,
bucket: bucket,
}
}
func (r *reader) Read(buf []byte) (int, error) {
n, err := r.r.Read(buf)
if n <= 0 {
return n, err
}
r.bucket.Wait(int64(n))
return n, err
}
type writer struct {
w io.Writer
bucket *Bucket
}
// Writer returns a reader that is rate limited by
// the given token bucket. Each token in the bucket
// represents one byte.
func Writer(w io.Writer, bucket *Bucket) io.Writer {
return &writer{
w: w,
bucket: bucket,
}
}
func (w *writer) Write(buf []byte) (int, error) {
w.bucket.Wait(int64(len(buf)))
return w.w.Write(buf)
}

View file

@ -21,3 +21,4 @@ _testmain.go
*.exe
*.test
*.prof

10
vendor/github.com/pkg/errors/.travis.yml generated vendored Normal file
View file

@ -0,0 +1,10 @@
language: go
go_import_path: github.com/pkg/errors
go:
- 1.4.3
- 1.5.4
- 1.6.2
- tip
script:
- go test -v ./...

23
vendor/github.com/pkg/errors/LICENSE generated vendored Normal file
View file

@ -0,0 +1,23 @@
Copyright (c) 2015, Dave Cheney <dave@cheney.net>
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

52
vendor/github.com/pkg/errors/README.md generated vendored Normal file
View file

@ -0,0 +1,52 @@
# errors [![Travis-CI](https://travis-ci.org/pkg/errors.svg)](https://travis-ci.org/pkg/errors) [![AppVeyor](https://ci.appveyor.com/api/projects/status/b98mptawhudj53ep/branch/master?svg=true)](https://ci.appveyor.com/project/davecheney/errors/branch/master) [![GoDoc](https://godoc.org/github.com/pkg/errors?status.svg)](http://godoc.org/github.com/pkg/errors) [![Report card](https://goreportcard.com/badge/github.com/pkg/errors)](https://goreportcard.com/report/github.com/pkg/errors)
Package errors provides simple error handling primitives.
`go get github.com/pkg/errors`
The traditional error handling idiom in Go is roughly akin to
```go
if err != nil {
return err
}
```
which applied recursively up the call stack results in error reports without context or debugging information. The errors package allows programmers to add context to the failure path in their code in a way that does not destroy the original value of the error.
## Adding context to an error
The errors.Wrap function returns a new error that adds context to the original error. For example
```go
_, err := ioutil.ReadAll(r)
if err != nil {
return errors.Wrap(err, "read failed")
}
```
## Retrieving the cause of an error
Using `errors.Wrap` constructs a stack of errors, adding context to the preceding error. Depending on the nature of the error it may be necessary to reverse the operation of errors.Wrap to retrieve the original error for inspection. Any error value which implements this interface can be inspected by `errors.Cause`.
```go
type causer interface {
Cause() error
}
```
`errors.Cause` will recursively retrieve the topmost error which does not implement `causer`, which is assumed to be the original cause. For example:
```go
switch err := errors.Cause(err).(type) {
case *MyError:
// handle specifically
default:
// unknown error
}
```
[Read the package documentation for more information](https://godoc.org/github.com/pkg/errors).
## Contributing
We welcome pull requests, bug fixes and issue reports. With that said, the bar for adding new symbols to this package is intentionally set high.
Before proposing a change, please discuss your change by raising an issue.
## Licence
BSD-2-Clause

32
vendor/github.com/pkg/errors/appveyor.yml generated vendored Normal file
View file

@ -0,0 +1,32 @@
version: build-{build}.{branch}
clone_folder: C:\gopath\src\github.com\pkg\errors
shallow_clone: true # for startup speed
environment:
GOPATH: C:\gopath
platform:
- x64
# http://www.appveyor.com/docs/installed-software
install:
# some helpful output for debugging builds
- go version
- go env
# pre-installed MinGW at C:\MinGW is 32bit only
# but MSYS2 at C:\msys64 has mingw64
- set PATH=C:\msys64\mingw64\bin;%PATH%
- gcc --version
- g++ --version
build_script:
- go install -v ./...
test_script:
- set PATH=C:\gopath\bin;%PATH%
- go test -v ./...
#artifacts:
# - path: '%GOPATH%\bin\*.exe'
deploy: off

236
vendor/github.com/pkg/errors/errors.go generated vendored Normal file
View file

@ -0,0 +1,236 @@
// Package errors provides simple error handling primitives.
//
// The traditional error handling idiom in Go is roughly akin to
//
// if err != nil {
// return err
// }
//
// which applied recursively up the call stack results in error reports
// without context or debugging information. The errors package allows
// programmers to add context to the failure path in their code in a way
// that does not destroy the original value of the error.
//
// Adding context to an error
//
// The errors.Wrap function returns a new error that adds context to the
// original error. For example
//
// _, err := ioutil.ReadAll(r)
// if err != nil {
// return errors.Wrap(err, "read failed")
// }
//
// Retrieving the cause of an error
//
// Using errors.Wrap constructs a stack of errors, adding context to the
// preceding error. Depending on the nature of the error it may be necessary
// to reverse the operation of errors.Wrap to retrieve the original error
// for inspection. Any error value which implements this interface
//
// type causer interface {
// Cause() error
// }
//
// can be inspected by errors.Cause. errors.Cause will recursively retrieve
// the topmost error which does not implement causer, which is assumed to be
// the original cause. For example:
//
// switch err := errors.Cause(err).(type) {
// case *MyError:
// // handle specifically
// default:
// // unknown error
// }
//
// causer interface is not exported by this package, but is considered a part
// of stable public API.
//
// Formatted printing of errors
//
// All error values returned from this package implement fmt.Formatter and can
// be formatted by the fmt package. The following verbs are supported
//
// %s print the error. If the error has a Cause it will be
// printed recursively
// %v see %s
// %+v extended format. Each Frame of the error's StackTrace will
// be printed in detail.
//
// Retrieving the stack trace of an error or wrapper
//
// New, Errorf, Wrap, and Wrapf record a stack trace at the point they are
// invoked. This information can be retrieved with the following interface.
//
// type stackTracer interface {
// StackTrace() errors.StackTrace
// }
//
// Where errors.StackTrace is defined as
//
// type StackTrace []Frame
//
// The Frame type represents a call site in the stack trace. Frame supports
// the fmt.Formatter interface that can be used for printing information about
// the stack trace of this error. For example:
//
// if err, ok := err.(stackTracer); ok {
// for _, f := range err.StackTrace() {
// fmt.Printf("%+s:%d", f)
// }
// }
//
// stackTracer interface is not exported by this package, but is considered a part
// of stable public API.
//
// See the documentation for Frame.Format for more details.
package errors
import (
"fmt"
"io"
)
// New returns an error with the supplied message.
// New also records the stack trace at the point it was called.
func New(message string) error {
return &fundamental{
msg: message,
stack: callers(),
}
}
// Errorf formats according to a format specifier and returns the string
// as a value that satisfies error.
// Errorf also records the stack trace at the point it was called.
func Errorf(format string, args ...interface{}) error {
return &fundamental{
msg: fmt.Sprintf(format, args...),
stack: callers(),
}
}
// fundamental is an error that has a message and a stack, but no caller.
type fundamental struct {
msg string
*stack
}
func (f *fundamental) Error() string { return f.msg }
func (f *fundamental) Format(s fmt.State, verb rune) {
switch verb {
case 'v':
if s.Flag('+') {
io.WriteString(s, f.msg)
f.stack.Format(s, verb)
return
}
fallthrough
case 's', 'q':
io.WriteString(s, f.msg)
}
}
type withStack struct {
error
*stack
}
func (w *withStack) Cause() error { return w.error }
func (w *withStack) Format(s fmt.State, verb rune) {
switch verb {
case 'v':
if s.Flag('+') {
fmt.Fprintf(s, "%+v", w.Cause())
w.stack.Format(s, verb)
return
}
fallthrough
case 's':
io.WriteString(s, w.Error())
case 'q':
fmt.Fprintf(s, "%q", w.Error())
}
}
// Wrap returns an error annotating err with message.
// If err is nil, Wrap returns nil.
func Wrap(err error, message string) error {
if err == nil {
return nil
}
err = &withMessage{
cause: err,
msg: message,
}
return &withStack{
err,
callers(),
}
}
// Wrapf returns an error annotating err with the format specifier.
// If err is nil, Wrapf returns nil.
func Wrapf(err error, format string, args ...interface{}) error {
if err == nil {
return nil
}
err = &withMessage{
cause: err,
msg: fmt.Sprintf(format, args...),
}
return &withStack{
err,
callers(),
}
}
type withMessage struct {
cause error
msg string
}
func (w *withMessage) Error() string { return w.msg + ": " + w.cause.Error() }
func (w *withMessage) Cause() error { return w.cause }
func (w *withMessage) Format(s fmt.State, verb rune) {
switch verb {
case 'v':
if s.Flag('+') {
fmt.Fprintf(s, "%+v\n", w.Cause())
io.WriteString(s, w.msg)
return
}
fallthrough
case 's', 'q':
io.WriteString(s, w.Error())
}
}
// Cause returns the underlying cause of the error, if possible.
// An error value has a cause if it implements the following
// interface:
//
// type causer interface {
// Cause() error
// }
//
// If the error does not implement Cause, the original error will
// be returned. If the error is nil, nil will be returned without further
// investigation.
func Cause(err error) error {
type causer interface {
Cause() error
}
for err != nil {
cause, ok := err.(causer)
if !ok {
break
}
err = cause.Cause()
}
return err
}

178
vendor/github.com/pkg/errors/stack.go generated vendored Normal file
View file

@ -0,0 +1,178 @@
package errors
import (
"fmt"
"io"
"path"
"runtime"
"strings"
)
// Frame represents a program counter inside a stack frame.
type Frame uintptr
// pc returns the program counter for this frame;
// multiple frames may have the same PC value.
func (f Frame) pc() uintptr { return uintptr(f) - 1 }
// file returns the full path to the file that contains the
// function for this Frame's pc.
func (f Frame) file() string {
fn := runtime.FuncForPC(f.pc())
if fn == nil {
return "unknown"
}
file, _ := fn.FileLine(f.pc())
return file
}
// line returns the line number of source code of the
// function for this Frame's pc.
func (f Frame) line() int {
fn := runtime.FuncForPC(f.pc())
if fn == nil {
return 0
}
_, line := fn.FileLine(f.pc())
return line
}
// Format formats the frame according to the fmt.Formatter interface.
//
// %s source file
// %d source line
// %n function name
// %v equivalent to %s:%d
//
// Format accepts flags that alter the printing of some verbs, as follows:
//
// %+s path of source file relative to the compile time GOPATH
// %+v equivalent to %+s:%d
func (f Frame) Format(s fmt.State, verb rune) {
switch verb {
case 's':
switch {
case s.Flag('+'):
pc := f.pc()
fn := runtime.FuncForPC(pc)
if fn == nil {
io.WriteString(s, "unknown")
} else {
file, _ := fn.FileLine(pc)
fmt.Fprintf(s, "%s\n\t%s", fn.Name(), file)
}
default:
io.WriteString(s, path.Base(f.file()))
}
case 'd':
fmt.Fprintf(s, "%d", f.line())
case 'n':
name := runtime.FuncForPC(f.pc()).Name()
io.WriteString(s, funcname(name))
case 'v':
f.Format(s, 's')
io.WriteString(s, ":")
f.Format(s, 'd')
}
}
// StackTrace is stack of Frames from innermost (newest) to outermost (oldest).
type StackTrace []Frame
func (st StackTrace) Format(s fmt.State, verb rune) {
switch verb {
case 'v':
switch {
case s.Flag('+'):
for _, f := range st {
fmt.Fprintf(s, "\n%+v", f)
}
case s.Flag('#'):
fmt.Fprintf(s, "%#v", []Frame(st))
default:
fmt.Fprintf(s, "%v", []Frame(st))
}
case 's':
fmt.Fprintf(s, "%s", []Frame(st))
}
}
// stack represents a stack of program counters.
type stack []uintptr
func (s *stack) Format(st fmt.State, verb rune) {
switch verb {
case 'v':
switch {
case st.Flag('+'):
for _, pc := range *s {
f := Frame(pc)
fmt.Fprintf(st, "\n%+v", f)
}
}
}
}
func (s *stack) StackTrace() StackTrace {
f := make([]Frame, len(*s))
for i := 0; i < len(f); i++ {
f[i] = Frame((*s)[i])
}
return f
}
func callers() *stack {
const depth = 32
var pcs [depth]uintptr
n := runtime.Callers(3, pcs[:])
var st stack = pcs[0:n]
return &st
}
// funcname removes the path prefix component of a function's name reported by func.Name().
func funcname(name string) string {
i := strings.LastIndex(name, "/")
name = name[i+1:]
i = strings.Index(name, ".")
return name[i+1:]
}
func trimGOPATH(name, file string) string {
// Here we want to get the source file path relative to the compile time
// GOPATH. As of Go 1.6.x there is no direct way to know the compiled
// GOPATH at runtime, but we can infer the number of path segments in the
// GOPATH. We note that fn.Name() returns the function name qualified by
// the import path, which does not include the GOPATH. Thus we can trim
// segments from the beginning of the file path until the number of path
// separators remaining is one more than the number of path separators in
// the function name. For example, given:
//
// GOPATH /home/user
// file /home/user/src/pkg/sub/file.go
// fn.Name() pkg/sub.Type.Method
//
// We want to produce:
//
// pkg/sub/file.go
//
// From this we can easily see that fn.Name() has one less path separator
// than our desired output. We count separators from the end of the file
// path until it finds two more than in the function name and then move
// one character forward to preserve the initial path segment without a
// leading separator.
const sep = "/"
goal := strings.Count(name, sep) + 2
i := len(file)
for n := 0; n < goal; n++ {
i = strings.LastIndex(file[:i], sep)
if i == -1 {
// not enough separators found, set i so that the slice expression
// below leaves file unmodified
i = -len(sep)
break
}
}
// get back to 0 or trim the leading separator
file = file[i+len(sep):]
return file
}

View file

@ -44,7 +44,7 @@ func (m Metric) Before(o Metric) bool {
// Clone returns a copy of the Metric.
func (m Metric) Clone() Metric {
clone := Metric{}
clone := make(Metric, len(m))
for k, v := range m {
clone[k] = v
}

View file

@ -1,5 +1,5 @@
sudo: false
language: go
go:
- 1.5
- 1.6
- 1.6.4
- 1.7.4

View file

@ -14,6 +14,7 @@ The following individuals have contributed code to this repository
* Ji-Hoon, Seol <jihoon.seol@gmail.com>
* Jonas Große Sundrup <cherti@letopolis.de>
* Julius Volz <julius.volz@gmail.com>
* Matt Layher <mdlayher@gmail.com>
* Matthias Rampke <mr@soundcloud.com>
* Nicky Gerritsen <nicky@streamone.nl>
* Rémi Audebert <contact@halfr.net>

552
vendor/github.com/prometheus/procfs/mountstats.go generated vendored Normal file
View file

@ -0,0 +1,552 @@
package procfs
// While implementing parsing of /proc/[pid]/mountstats, this blog was used
// heavily as a reference:
// https://utcc.utoronto.ca/~cks/space/blog/linux/NFSMountstatsIndex
//
// Special thanks to Chris Siebenmann for all of his posts explaining the
// various statistics available for NFS.
import (
"bufio"
"fmt"
"io"
"strconv"
"strings"
"time"
)
// Constants shared between multiple functions.
const (
deviceEntryLen = 8
fieldBytesLen = 8
fieldEventsLen = 27
statVersion10 = "1.0"
statVersion11 = "1.1"
fieldTransport10Len = 10
fieldTransport11Len = 13
)
// A Mount is a device mount parsed from /proc/[pid]/mountstats.
type Mount struct {
// Name of the device.
Device string
// The mount point of the device.
Mount string
// The filesystem type used by the device.
Type string
// If available additional statistics related to this Mount.
// Use a type assertion to determine if additional statistics are available.
Stats MountStats
}
// A MountStats is a type which contains detailed statistics for a specific
// type of Mount.
type MountStats interface {
mountStats()
}
// A MountStatsNFS is a MountStats implementation for NFSv3 and v4 mounts.
type MountStatsNFS struct {
// The version of statistics provided.
StatVersion string
// The age of the NFS mount.
Age time.Duration
// Statistics related to byte counters for various operations.
Bytes NFSBytesStats
// Statistics related to various NFS event occurrences.
Events NFSEventsStats
// Statistics broken down by filesystem operation.
Operations []NFSOperationStats
// Statistics about the NFS RPC transport.
Transport NFSTransportStats
}
// mountStats implements MountStats.
func (m MountStatsNFS) mountStats() {}
// A NFSBytesStats contains statistics about the number of bytes read and written
// by an NFS client to and from an NFS server.
type NFSBytesStats struct {
// Number of bytes read using the read() syscall.
Read int
// Number of bytes written using the write() syscall.
Write int
// Number of bytes read using the read() syscall in O_DIRECT mode.
DirectRead int
// Number of bytes written using the write() syscall in O_DIRECT mode.
DirectWrite int
// Number of bytes read from the NFS server, in total.
ReadTotal int
// Number of bytes written to the NFS server, in total.
WriteTotal int
// Number of pages read directly via mmap()'d files.
ReadPages int
// Number of pages written directly via mmap()'d files.
WritePages int
}
// A NFSEventsStats contains statistics about NFS event occurrences.
type NFSEventsStats struct {
// Number of times cached inode attributes are re-validated from the server.
InodeRevalidate int
// Number of times cached dentry nodes are re-validated from the server.
DnodeRevalidate int
// Number of times an inode cache is cleared.
DataInvalidate int
// Number of times cached inode attributes are invalidated.
AttributeInvalidate int
// Number of times files or directories have been open()'d.
VFSOpen int
// Number of times a directory lookup has occurred.
VFSLookup int
// Number of times permissions have been checked.
VFSAccess int
// Number of updates (and potential writes) to pages.
VFSUpdatePage int
// Number of pages read directly via mmap()'d files.
VFSReadPage int
// Number of times a group of pages have been read.
VFSReadPages int
// Number of pages written directly via mmap()'d files.
VFSWritePage int
// Number of times a group of pages have been written.
VFSWritePages int
// Number of times directory entries have been read with getdents().
VFSGetdents int
// Number of times attributes have been set on inodes.
VFSSetattr int
// Number of pending writes that have been forcefully flushed to the server.
VFSFlush int
// Number of times fsync() has been called on directories and files.
VFSFsync int
// Number of times locking has been attemped on a file.
VFSLock int
// Number of times files have been closed and released.
VFSFileRelease int
// Unknown. Possibly unused.
CongestionWait int
// Number of times files have been truncated.
Truncation int
// Number of times a file has been grown due to writes beyond its existing end.
WriteExtension int
// Number of times a file was removed while still open by another process.
SillyRename int
// Number of times the NFS server gave less data than expected while reading.
ShortRead int
// Number of times the NFS server wrote less data than expected while writing.
ShortWrite int
// Number of times the NFS server indicated EJUKEBOX; retrieving data from
// offline storage.
JukeboxDelay int
// Number of NFS v4.1+ pNFS reads.
PNFSRead int
// Number of NFS v4.1+ pNFS writes.
PNFSWrite int
}
// A NFSOperationStats contains statistics for a single operation.
type NFSOperationStats struct {
// The name of the operation.
Operation string
// Number of requests performed for this operation.
Requests int
// Number of times an actual RPC request has been transmitted for this operation.
Transmissions int
// Number of times a request has had a major timeout.
MajorTimeouts int
// Number of bytes sent for this operation, including RPC headers and payload.
BytesSent int
// Number of bytes received for this operation, including RPC headers and payload.
BytesReceived int
// Duration all requests spent queued for transmission before they were sent.
CumulativeQueueTime time.Duration
// Duration it took to get a reply back after the request was transmitted.
CumulativeTotalResponseTime time.Duration
// Duration from when a request was enqueued to when it was completely handled.
CumulativeTotalRequestTime time.Duration
}
// A NFSTransportStats contains statistics for the NFS mount RPC requests and
// responses.
type NFSTransportStats struct {
// The local port used for the NFS mount.
Port int
// Number of times the client has had to establish a connection from scratch
// to the NFS server.
Bind int
// Number of times the client has made a TCP connection to the NFS server.
Connect int
// Duration (in jiffies, a kernel internal unit of time) the NFS mount has
// spent waiting for connections to the server to be established.
ConnectIdleTime int
// Duration since the NFS mount last saw any RPC traffic.
IdleTime time.Duration
// Number of RPC requests for this mount sent to the NFS server.
Sends int
// Number of RPC responses for this mount received from the NFS server.
Receives int
// Number of times the NFS server sent a response with a transaction ID
// unknown to this client.
BadTransactionIDs int
// A running counter, incremented on each request as the current difference
// ebetween sends and receives.
CumulativeActiveRequests int
// A running counter, incremented on each request by the current backlog
// queue size.
CumulativeBacklog int
// Stats below only available with stat version 1.1.
// Maximum number of simultaneously active RPC requests ever used.
MaximumRPCSlotsUsed int
// A running counter, incremented on each request as the current size of the
// sending queue.
CumulativeSendingQueue int
// A running counter, incremented on each request as the current size of the
// pending queue.
CumulativePendingQueue int
}
// parseMountStats parses a /proc/[pid]/mountstats file and returns a slice
// of Mount structures containing detailed information about each mount.
// If available, statistics for each mount are parsed as well.
func parseMountStats(r io.Reader) ([]*Mount, error) {
const (
device = "device"
statVersionPrefix = "statvers="
nfs3Type = "nfs"
nfs4Type = "nfs4"
)
var mounts []*Mount
s := bufio.NewScanner(r)
for s.Scan() {
// Only look for device entries in this function
ss := strings.Fields(string(s.Bytes()))
if len(ss) == 0 || ss[0] != device {
continue
}
m, err := parseMount(ss)
if err != nil {
return nil, err
}
// Does this mount also possess statistics information?
if len(ss) > deviceEntryLen {
// Only NFSv3 and v4 are supported for parsing statistics
if m.Type != nfs3Type && m.Type != nfs4Type {
return nil, fmt.Errorf("cannot parse MountStats for fstype %q", m.Type)
}
statVersion := strings.TrimPrefix(ss[8], statVersionPrefix)
stats, err := parseMountStatsNFS(s, statVersion)
if err != nil {
return nil, err
}
m.Stats = stats
}
mounts = append(mounts, m)
}
return mounts, s.Err()
}
// parseMount parses an entry in /proc/[pid]/mountstats in the format:
// device [device] mounted on [mount] with fstype [type]
func parseMount(ss []string) (*Mount, error) {
if len(ss) < deviceEntryLen {
return nil, fmt.Errorf("invalid device entry: %v", ss)
}
// Check for specific words appearing at specific indices to ensure
// the format is consistent with what we expect
format := []struct {
i int
s string
}{
{i: 0, s: "device"},
{i: 2, s: "mounted"},
{i: 3, s: "on"},
{i: 5, s: "with"},
{i: 6, s: "fstype"},
}
for _, f := range format {
if ss[f.i] != f.s {
return nil, fmt.Errorf("invalid device entry: %v", ss)
}
}
return &Mount{
Device: ss[1],
Mount: ss[4],
Type: ss[7],
}, nil
}
// parseMountStatsNFS parses a MountStatsNFS by scanning additional information
// related to NFS statistics.
func parseMountStatsNFS(s *bufio.Scanner, statVersion string) (*MountStatsNFS, error) {
// Field indicators for parsing specific types of data
const (
fieldAge = "age:"
fieldBytes = "bytes:"
fieldEvents = "events:"
fieldPerOpStats = "per-op"
fieldTransport = "xprt:"
)
stats := &MountStatsNFS{
StatVersion: statVersion,
}
for s.Scan() {
ss := strings.Fields(string(s.Bytes()))
if len(ss) == 0 {
break
}
if len(ss) < 2 {
return nil, fmt.Errorf("not enough information for NFS stats: %v", ss)
}
switch ss[0] {
case fieldAge:
// Age integer is in seconds
d, err := time.ParseDuration(ss[1] + "s")
if err != nil {
return nil, err
}
stats.Age = d
case fieldBytes:
bstats, err := parseNFSBytesStats(ss[1:])
if err != nil {
return nil, err
}
stats.Bytes = *bstats
case fieldEvents:
estats, err := parseNFSEventsStats(ss[1:])
if err != nil {
return nil, err
}
stats.Events = *estats
case fieldTransport:
if len(ss) < 3 {
return nil, fmt.Errorf("not enough information for NFS transport stats: %v", ss)
}
tstats, err := parseNFSTransportStats(ss[2:], statVersion)
if err != nil {
return nil, err
}
stats.Transport = *tstats
}
// When encountering "per-operation statistics", we must break this
// loop and parse them seperately to ensure we can terminate parsing
// before reaching another device entry; hence why this 'if' statement
// is not just another switch case
if ss[0] == fieldPerOpStats {
break
}
}
if err := s.Err(); err != nil {
return nil, err
}
// NFS per-operation stats appear last before the next device entry
perOpStats, err := parseNFSOperationStats(s)
if err != nil {
return nil, err
}
stats.Operations = perOpStats
return stats, nil
}
// parseNFSBytesStats parses a NFSBytesStats line using an input set of
// integer fields.
func parseNFSBytesStats(ss []string) (*NFSBytesStats, error) {
if len(ss) != fieldBytesLen {
return nil, fmt.Errorf("invalid NFS bytes stats: %v", ss)
}
ns := make([]int, 0, fieldBytesLen)
for _, s := range ss {
n, err := strconv.Atoi(s)
if err != nil {
return nil, err
}
ns = append(ns, n)
}
return &NFSBytesStats{
Read: ns[0],
Write: ns[1],
DirectRead: ns[2],
DirectWrite: ns[3],
ReadTotal: ns[4],
WriteTotal: ns[5],
ReadPages: ns[6],
WritePages: ns[7],
}, nil
}
// parseNFSEventsStats parses a NFSEventsStats line using an input set of
// integer fields.
func parseNFSEventsStats(ss []string) (*NFSEventsStats, error) {
if len(ss) != fieldEventsLen {
return nil, fmt.Errorf("invalid NFS events stats: %v", ss)
}
ns := make([]int, 0, fieldEventsLen)
for _, s := range ss {
n, err := strconv.Atoi(s)
if err != nil {
return nil, err
}
ns = append(ns, n)
}
return &NFSEventsStats{
InodeRevalidate: ns[0],
DnodeRevalidate: ns[1],
DataInvalidate: ns[2],
AttributeInvalidate: ns[3],
VFSOpen: ns[4],
VFSLookup: ns[5],
VFSAccess: ns[6],
VFSUpdatePage: ns[7],
VFSReadPage: ns[8],
VFSReadPages: ns[9],
VFSWritePage: ns[10],
VFSWritePages: ns[11],
VFSGetdents: ns[12],
VFSSetattr: ns[13],
VFSFlush: ns[14],
VFSFsync: ns[15],
VFSLock: ns[16],
VFSFileRelease: ns[17],
CongestionWait: ns[18],
Truncation: ns[19],
WriteExtension: ns[20],
SillyRename: ns[21],
ShortRead: ns[22],
ShortWrite: ns[23],
JukeboxDelay: ns[24],
PNFSRead: ns[25],
PNFSWrite: ns[26],
}, nil
}
// parseNFSOperationStats parses a slice of NFSOperationStats by scanning
// additional information about per-operation statistics until an empty
// line is reached.
func parseNFSOperationStats(s *bufio.Scanner) ([]NFSOperationStats, error) {
const (
// Number of expected fields in each per-operation statistics set
numFields = 9
)
var ops []NFSOperationStats
for s.Scan() {
ss := strings.Fields(string(s.Bytes()))
if len(ss) == 0 {
// Must break when reading a blank line after per-operation stats to
// enable top-level function to parse the next device entry
break
}
if len(ss) != numFields {
return nil, fmt.Errorf("invalid NFS per-operations stats: %v", ss)
}
// Skip string operation name for integers
ns := make([]int, 0, numFields-1)
for _, st := range ss[1:] {
n, err := strconv.Atoi(st)
if err != nil {
return nil, err
}
ns = append(ns, n)
}
ops = append(ops, NFSOperationStats{
Operation: strings.TrimSuffix(ss[0], ":"),
Requests: ns[0],
Transmissions: ns[1],
MajorTimeouts: ns[2],
BytesSent: ns[3],
BytesReceived: ns[4],
CumulativeQueueTime: time.Duration(ns[5]) * time.Millisecond,
CumulativeTotalResponseTime: time.Duration(ns[6]) * time.Millisecond,
CumulativeTotalRequestTime: time.Duration(ns[7]) * time.Millisecond,
})
}
return ops, s.Err()
}
// parseNFSTransportStats parses a NFSTransportStats line using an input set of
// integer fields matched to a specific stats version.
func parseNFSTransportStats(ss []string, statVersion string) (*NFSTransportStats, error) {
switch statVersion {
case statVersion10:
if len(ss) != fieldTransport10Len {
return nil, fmt.Errorf("invalid NFS transport stats 1.0 statement: %v", ss)
}
case statVersion11:
if len(ss) != fieldTransport11Len {
return nil, fmt.Errorf("invalid NFS transport stats 1.1 statement: %v", ss)
}
default:
return nil, fmt.Errorf("unrecognized NFS transport stats version: %q", statVersion)
}
// Allocate enough for v1.1 stats since zero value for v1.1 stats will be okay
// in a v1.0 response
ns := make([]int, 0, fieldTransport11Len)
for _, s := range ss {
n, err := strconv.Atoi(s)
if err != nil {
return nil, err
}
ns = append(ns, n)
}
return &NFSTransportStats{
Port: ns[0],
Bind: ns[1],
Connect: ns[2],
ConnectIdleTime: ns[3],
IdleTime: time.Duration(ns[4]) * time.Second,
Sends: ns[5],
Receives: ns[6],
BadTransactionIDs: ns[7],
CumulativeActiveRequests: ns[8],
CumulativeBacklog: ns[9],
MaximumRPCSlotsUsed: ns[10],
CumulativeSendingQueue: ns[11],
CumulativePendingQueue: ns[12],
}, nil
}

View file

@ -192,6 +192,18 @@ func (p Proc) FileDescriptorsLen() (int, error) {
return len(fds), nil
}
// MountStats retrieves statistics and configuration for mount points in a
// process's namespace.
func (p Proc) MountStats() ([]*Mount, error) {
f, err := os.Open(p.path("mountstats"))
if err != nil {
return nil, err
}
defer f.Close()
return parseMountStats(f)
}
func (p Proc) fileDescriptors() ([]string, error) {
d, err := os.Open(p.path("fd"))
if err != nil {

View file

@ -1,27 +0,0 @@
# Compiled Object files, Static and Dynamic libs (Shared Objects)
*.o
*.a
*.so
# Folders
_obj
_test
# Architecture specific extensions/prefixes
*.[568vq]
[568vq].out
*.cgo1.go
*.cgo2.c
_cgo_defun.c
_cgo_gotypes.go
_cgo_export.*
_testmain.go
*.exe
*.test
*.prof
# Emacs
*~

View file

@ -1,22 +0,0 @@
# go-camelcase
Fast camelcase implementation that avoids Go's regexps. Direct fork of our [snakecase](https://github.com/segmentio/go-snakecase) implementation.
--
import "github.com/segmentio/go-camelcase"
Convert strings to camelCase
## Usage
#### func Camelcase
```go
func Camelcase(str string) string
```
Camelcase representation of `str`.
# License
MIT

View file

@ -1,105 +0,0 @@
//
// Fast camel-case implementation.
//
package camelcase
// Camelcase the given string.
func Camelcase(s string) string {
b := make([]byte, 0, 64)
l := len(s)
i := 0
for i < l {
// skip leading bytes that aren't letters or digits
for i < l && !isWord(s[i]) {
i++
}
// set the first byte to uppercase if it needs to
if i < l {
c := s[i]
// simply append contiguous digits
if isDigit(c) {
for i < l {
if c = s[i]; !isDigit(c) {
break
}
b = append(b, c)
i++
}
continue
}
// the sequence starts with and uppercase letter, we append
// all following uppercase letters as equivalent lowercases
if isUpper(c) {
b = append(b, c)
i++
for i < l {
if c = s[i]; !isUpper(c) {
break
}
b = append(b, toLower(c))
i++
}
} else {
b = append(b, toUpper(c))
i++
}
// append all trailing lowercase letters
for i < l {
if c = s[i]; !isLower(c) {
break
}
b = append(b, c)
i++
}
}
}
// the first byte must always be lowercase
if len(b) != 0 {
b[0] = toLower(b[0])
}
return string(b)
}
func isWord(c byte) bool {
return isLetter(c) || isDigit(c)
}
func isLetter(c byte) bool {
return isLower(c) || isUpper(c)
}
func isUpper(c byte) bool {
return c >= 'A' && c <= 'Z'
}
func isLower(c byte) bool {
return c >= 'a' && c <= 'z'
}
func isDigit(c byte) bool {
return c >= '0' && c <= '9'
}
func toLower(c byte) byte {
if isUpper(c) {
return c + ('a' - 'A')
}
return c
}
func toUpper(c byte) byte {
if isLower(c) {
return c - ('a' - 'A')
}
return c
}

View file

@ -1,21 +0,0 @@
machine:
services:
- docker
dependencies:
override:
- docker pull segment/golang:latest
test:
override:
- >
docker run
$(env | grep -E '^CIRCLE_|^DOCKER_|^CIRCLECI=|^CI=' | sed 's/^/--env /g' | tr "\\n" " ")
--rm
--tty
--interactive
--name go
--volume /var/run/docker.sock:/run/docker.sock
--volume ${PWD}:/go/src/github.com/${CIRCLE_PROJECT_USERNAME}/${CIRCLE_PROJECT_REPONAME}
--workdir /go/src/github.com/${CIRCLE_PROJECT_USERNAME}/${CIRCLE_PROJECT_REPONAME}
segment/golang:latest

View file

@ -80,7 +80,15 @@ func BeforeDelete(strategy RESTDeleteStrategy, ctx api.Context, obj runtime.Obje
// if we are already being deleted, we may only shorten the deletion grace period
// this means the object was gracefully deleted previously but deletionGracePeriodSeconds was not set,
// so we force deletion immediately
if objectMeta.DeletionGracePeriodSeconds == nil {
// IMPORTANT:
// The deletion operation happens in two phases.
// 1. Update to set DeletionGracePeriodSeconds and DeletionTimestamp
// 2. Delete the object from storage.
// If the update succeeds, but the delete fails (network error, internal storage error, etc.),
// a resource was previously left in a state that was non-recoverable. We
// check if the existing stored resource has a grace period as 0 and if so
// attempt to delete immediately in order to recover from this scenario.
if objectMeta.DeletionGracePeriodSeconds == nil || *objectMeta.DeletionGracePeriodSeconds == 0 {
return false, false, nil
}
// only a shorter grace period may be provided by a user

View file

@ -289,6 +289,10 @@ type StorageMetadata interface {
// ProducesMIMETypes returns a list of the MIME types the specified HTTP verb (GET, POST, DELETE,
// PATCH) can respond with.
ProducesMIMETypes(verb string) []string
// ProducesObject returns an object the specified HTTP verb respond with. It will overwrite storage object if
// it is not nil. Only the type of the return object matters, the value will be ignored.
ProducesObject(verb string) interface{}
}
// ConnectRequest is an object passed to admission control for Connect operations

View file

@ -2665,15 +2665,6 @@ func ValidateServiceUpdate(service, oldService *api.Service) field.ErrorList {
}
}
// TODO(freehan): allow user to update loadbalancerSourceRanges
// Only allow removing LoadBalancerSourceRanges when change service type from LoadBalancer
// to non-LoadBalancer or adding LoadBalancerSourceRanges when change service type from
// non-LoadBalancer to LoadBalancer.
if service.Spec.Type != api.ServiceTypeLoadBalancer && oldService.Spec.Type != api.ServiceTypeLoadBalancer ||
service.Spec.Type == api.ServiceTypeLoadBalancer && oldService.Spec.Type == api.ServiceTypeLoadBalancer {
allErrs = append(allErrs, ValidateImmutableField(service.Spec.LoadBalancerSourceRanges, oldService.Spec.LoadBalancerSourceRanges, field.NewPath("spec", "loadBalancerSourceRanges"))...)
}
allErrs = append(allErrs, validateServiceFields(service)...)
allErrs = append(allErrs, validateServiceAnnotations(service, oldService)...)
return allErrs

File diff suppressed because it is too large Load diff

View file

@ -424,6 +424,9 @@ type KubeletConfiguration struct {
// Comma-delimited list of minimum reclaims (e.g. imagefs.available=2Gi) that describes the minimum amount of resource the kubelet will reclaim when performing a pod eviction if that resource is under pressure.
// +optional
EvictionMinimumReclaim string `json:"evictionMinimumReclaim,omitempty"`
// If enabled, the kubelet will integrate with the kernel memcg notification to determine if memory eviction thresholds are crossed rather than polling.
// +optional
ExperimentalKernelMemcgNotification bool `json:"experimentalKernelMemcgNotification"`
// Maximum number of pods per core. Cannot exceed MaxPods
PodsPerCore int32 `json:"podsPerCore"`
// enableControllerAttachDetach enables the Attach/Detach controller to

View file

@ -374,6 +374,9 @@ func SetDefaults_KubeletConfiguration(obj *KubeletConfiguration) {
if obj.EvictionPressureTransitionPeriod == zeroDuration {
obj.EvictionPressureTransitionPeriod = unversioned.Duration{Duration: 5 * time.Minute}
}
if obj.ExperimentalKernelMemcgNotification == nil {
obj.ExperimentalKernelMemcgNotification = boolVar(false)
}
if obj.SystemReserved == nil {
obj.SystemReserved = make(map[string]string)
}

View file

@ -462,6 +462,8 @@ type KubeletConfiguration struct {
EvictionMaxPodGracePeriod int32 `json:"evictionMaxPodGracePeriod"`
// Comma-delimited list of minimum reclaims (e.g. imagefs.available=2Gi) that describes the minimum amount of resource the kubelet will reclaim when performing a pod eviction if that resource is under pressure.
EvictionMinimumReclaim string `json:"evictionMinimumReclaim"`
// If enabled, the kubelet will integrate with the kernel memcg notification to determine if memory eviction thresholds are crossed rather than polling.
ExperimentalKernelMemcgNotification *bool `json:"experimentalKernelMemcgNotification"`
// Maximum number of pods per core. Cannot exceed MaxPods
PodsPerCore int32 `json:"podsPerCore"`
// enableControllerAttachDetach enables the Attach/Detach controller to

View file

@ -387,6 +387,9 @@ func autoConvert_v1alpha1_KubeletConfiguration_To_componentconfig_KubeletConfigu
out.EvictionPressureTransitionPeriod = in.EvictionPressureTransitionPeriod
out.EvictionMaxPodGracePeriod = in.EvictionMaxPodGracePeriod
out.EvictionMinimumReclaim = in.EvictionMinimumReclaim
if err := api.Convert_Pointer_bool_To_bool(&in.ExperimentalKernelMemcgNotification, &out.ExperimentalKernelMemcgNotification, s); err != nil {
return err
}
out.PodsPerCore = in.PodsPerCore
if err := api.Convert_Pointer_bool_To_bool(&in.EnableControllerAttachDetach, &out.EnableControllerAttachDetach, s); err != nil {
return err
@ -556,6 +559,9 @@ func autoConvert_componentconfig_KubeletConfiguration_To_v1alpha1_KubeletConfigu
out.EvictionPressureTransitionPeriod = in.EvictionPressureTransitionPeriod
out.EvictionMaxPodGracePeriod = in.EvictionMaxPodGracePeriod
out.EvictionMinimumReclaim = in.EvictionMinimumReclaim
if err := api.Convert_bool_To_Pointer_bool(&in.ExperimentalKernelMemcgNotification, &out.ExperimentalKernelMemcgNotification, s); err != nil {
return err
}
out.PodsPerCore = in.PodsPerCore
if err := api.Convert_bool_To_Pointer_bool(&in.EnableControllerAttachDetach, &out.EnableControllerAttachDetach, s); err != nil {
return err

View file

@ -403,6 +403,13 @@ func DeepCopy_v1alpha1_KubeletConfiguration(in interface{}, out interface{}, c *
out.EvictionPressureTransitionPeriod = in.EvictionPressureTransitionPeriod
out.EvictionMaxPodGracePeriod = in.EvictionMaxPodGracePeriod
out.EvictionMinimumReclaim = in.EvictionMinimumReclaim
if in.ExperimentalKernelMemcgNotification != nil {
in, out := &in.ExperimentalKernelMemcgNotification, &out.ExperimentalKernelMemcgNotification
*out = new(bool)
**out = **in
} else {
out.ExperimentalKernelMemcgNotification = nil
}
out.PodsPerCore = in.PodsPerCore
if in.EnableControllerAttachDetach != nil {
in, out := &in.EnableControllerAttachDetach, &out.EnableControllerAttachDetach

View file

@ -358,6 +358,7 @@ func DeepCopy_componentconfig_KubeletConfiguration(in interface{}, out interface
out.EvictionPressureTransitionPeriod = in.EvictionPressureTransitionPeriod
out.EvictionMaxPodGracePeriod = in.EvictionMaxPodGracePeriod
out.EvictionMinimumReclaim = in.EvictionMinimumReclaim
out.ExperimentalKernelMemcgNotification = in.ExperimentalKernelMemcgNotification
out.PodsPerCore = in.PodsPerCore
out.EnableControllerAttachDetach = in.EnableControllerAttachDetach
if in.SystemReserved != nil {

View file

@ -259,12 +259,16 @@ func (r *Reflector) ListAndWatch(stopCh <-chan struct{}) error {
r.setLastSyncResourceVersion(resourceVersion)
resyncerrc := make(chan error, 1)
cancelCh := make(chan struct{})
defer close(cancelCh)
go func() {
for {
select {
case <-resyncCh:
case <-stopCh:
return
case <-cancelCh:
return
}
glog.V(4).Infof("%s: forcing resync", r.name)
if err := r.store.Resync(); err != nil {

View file

@ -531,17 +531,23 @@ func (c *Cacher) dispatchEvents() {
func (c *Cacher) dispatchEvent(event *watchCacheEvent) {
triggerValues, supported := c.triggerValues(event)
// TODO: For now we assume we have a given <timeout> budget for dispatching
// a single event. We should consider changing to the approach with:
// - budget has upper bound at <max_timeout>
// - we add <portion> to current timeout every second
timeout := time.Duration(250) * time.Millisecond
c.Lock()
defer c.Unlock()
// Iterate over "allWatchers" no matter what the trigger function is.
for _, watcher := range c.watchers.allWatchers {
watcher.add(event)
watcher.add(event, &timeout)
}
if supported {
// Iterate over watchers interested in the given values of the trigger.
for _, triggerValue := range triggerValues {
for _, watcher := range c.watchers.valueWatchers[triggerValue] {
watcher.add(event)
watcher.add(event, &timeout)
}
}
} else {
@ -554,7 +560,7 @@ func (c *Cacher) dispatchEvent(event *watchCacheEvent) {
// Iterate over watchers interested in exact values for all values.
for _, watchers := range c.watchers.valueWatchers {
for _, watcher := range watchers {
watcher.add(event)
watcher.add(event, &timeout)
}
}
}
@ -728,7 +734,7 @@ func (c *cacheWatcher) stop() {
var timerPool sync.Pool
func (c *cacheWatcher) add(event *watchCacheEvent) {
func (c *cacheWatcher) add(event *watchCacheEvent, timeout *time.Duration) {
// Try to send the event immediately, without blocking.
select {
case c.input <- *event:
@ -736,20 +742,16 @@ func (c *cacheWatcher) add(event *watchCacheEvent) {
default:
}
// OK, block sending, but only for up to 5 seconds.
// OK, block sending, but only for up to <timeout>.
// cacheWatcher.add is called very often, so arrange
// to reuse timers instead of constantly allocating.
trace := util.NewTrace(
fmt.Sprintf("cacheWatcher %v: waiting for add (initial result size %v)",
reflect.TypeOf(event.Object).String(), len(c.result)))
defer trace.LogIfLong(50 * time.Millisecond)
startTime := time.Now()
const timeout = 5 * time.Second
t, ok := timerPool.Get().(*time.Timer)
if ok {
t.Reset(timeout)
t.Reset(*timeout)
} else {
t = time.NewTimer(timeout)
t = time.NewTimer(*timeout)
}
defer timerPool.Put(t)
@ -768,6 +770,10 @@ func (c *cacheWatcher) add(event *watchCacheEvent) {
c.forget(false)
c.stop()
}
if *timeout = *timeout - time.Since(startTime); *timeout < 0 {
*timeout = 0
}
}
// NOTE: sendWatchCacheEvent is assumed to not modify <event> !!!

View file

@ -20,7 +20,7 @@ go_library(
deps = [
"//pkg/util/clock:go_default_library",
"//pkg/util/integer:go_default_library",
"//pkg/util/ratelimit:go_default_library",
"//vendor:github.com/juju/ratelimit",
],
)

View file

@ -19,7 +19,7 @@ package flowcontrol
import (
"sync"
"k8s.io/kubernetes/pkg/util/ratelimit"
"github.com/juju/ratelimit"
)
type RateLimiter interface {

View file

@ -74,9 +74,9 @@ func (mounter *Mounter) Mount(source string, target string, fstype string, optio
}
return doMount(mounterPath, source, target, fstype, bindRemountOpts)
}
// These filesystem types are expected to be supported by the mount utility on the host across all Linux distros.
var defaultMounterFsTypes = sets.NewString("tmpfs", "ext4", "ext3", "ext2")
if !defaultMounterFsTypes.Has(fstype) {
// The list of filesystems that require containerized mounter on GCI image cluster
fsTypesNeedMounter := sets.NewString("nfs", "glusterfs")
if fsTypesNeedMounter.Has(fstype) {
mounterPath = mounter.mounterPath
}
return doMount(mounterPath, source, target, fstype, options)

View file

@ -23,8 +23,6 @@ import (
"time"
)
var letters = []rune("abcdefghijklmnopqrstuvwxyz0123456789")
var numLetters = len(letters)
var rng = struct {
sync.Mutex
rand *rand.Rand
@ -72,12 +70,16 @@ func Perm(n int) []int {
return rng.rand.Perm(n)
}
// String generates a random alphanumeric string n characters long. This will
// panic if n is less than zero.
// We omit vowels from the set of available characters to reduce the chances
// of "bad words" being formed.
var alphanums = []rune("bcdfghjklmnpqrstvwxz0123456789")
// String generates a random alphanumeric string, without vowels, which is n
// characters long. This will panic if n is less than zero.
func String(length int) string {
b := make([]rune, length)
for i := range b {
b[i] = letters[Intn(numLetters)]
b[i] = alphanums[Intn(len(alphanums))]
}
return string(b)
}

View file

@ -1,25 +0,0 @@
package(default_visibility = ["//visibility:public"])
licenses(["notice"])
load(
"@io_bazel_rules_go//go:def.bzl",
"go_binary",
"go_library",
"go_test",
"cgo_library",
)
go_library(
name = "go_default_library",
srcs = ["bucket.go"],
tags = ["automanaged"],
)
go_test(
name = "go_default_test",
srcs = ["bucket_test.go"],
library = "go_default_library",
tags = ["automanaged"],
deps = [],
)

View file

@ -1,170 +0,0 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package ratelimit
import (
"math"
"sync"
"time"
)
// Bucket models a token bucket
type Bucket struct {
unitsPerNano float64
nanosPerUnit float64
capacity int64
mutex sync.Mutex
available int64
lastRefill int64
// fractionalAvailable "buffers" any amounts that flowed into the bucket smaller than one unit
// This lets us retain precision even with pathological refill rates like (1E9 + 1) per second
fractionalAvailable float64
}
// NewBucketWithRate creates a new token bucket, with maximum capacity = initial capacity, and a refill rate of qps
// We use floats for refill calculations, which introduces the possibility of truncation and rounding errors.
// For "sensible" qps values though, is is acceptable: jbeda did some tests here https://play.golang.org/p/LSKUOGz2LG
func NewBucketWithRate(qps float64, capacity int64) *Bucket {
unitsPerNano := qps / 1E9
nanosPerUnit := 1E9 / qps
b := &Bucket{
unitsPerNano: unitsPerNano,
nanosPerUnit: nanosPerUnit,
capacity: capacity,
available: capacity,
lastRefill: time.Now().UnixNano(),
}
return b
}
// Take takes n units from the bucket, reducing the available quantity even below zero,
// but then returns the amount of time we should wait
func (b *Bucket) Take(n int64) time.Duration {
b.mutex.Lock()
defer b.mutex.Unlock()
var d time.Duration
if b.available >= n {
// Fast path when bucket has sufficient availability before refilling
} else {
b.refill()
if b.available < n {
deficit := n - b.available
d = time.Duration(int64(float64(deficit) * b.nanosPerUnit))
}
}
b.available -= n
return d
}
// TakeAvailable immediately takes whatever quantity is available, up to max
func (b *Bucket) TakeAvailable(max int64) int64 {
b.mutex.Lock()
defer b.mutex.Unlock()
var took int64
if b.available >= max {
// Fast path when bucket has sufficient availability before refilling
took = max
} else {
b.refill()
took = b.available
if took < 0 {
took = 0
} else if took > max {
took = max
}
}
if took > 0 {
b.available -= took
}
return took
}
// Wait combines a call to Take with a sleep call
func (b *Bucket) Wait(n int64) {
d := b.Take(n)
if d != 0 {
time.Sleep(d)
}
}
// Capacity returns the maximum capacity of the bucket
func (b *Bucket) Capacity() int64 {
return b.capacity
}
// Available returns the quantity available in the bucket (which may be negative), but does not take it.
// This function is for diagnostic / informational purposes only - the returned capacity may immediately
// be inaccurate if another thread is operating on the bucket concurrently.
func (b *Bucket) Available() int64 {
b.mutex.Lock()
defer b.mutex.Unlock()
b.refill()
return b.available
}
// refill replenishes the bucket based on elapsed time; mutex must be held
func (b *Bucket) refill() {
// Note that we really want a monotonic clock here, but go says no:
// https://github.com/golang/go/issues/12914
now := time.Now().UnixNano()
b.refillAtTimestamp(now)
}
// refillAtTimestamp is the logic of the refill function, for testing
func (b *Bucket) refillAtTimestamp(now int64) {
nanosSinceLastRefill := now - b.lastRefill
if nanosSinceLastRefill <= 0 {
// we really want monotonic
return
}
// Compute units that have flowed into bucket
refillFloat := (float64(nanosSinceLastRefill) * b.unitsPerNano) + b.fractionalAvailable
if refillFloat > float64(b.capacity) {
// float64 > MaxInt64 can be converted to negative int64; side step this
b.available = b.capacity
// Don't worry about the fractional units with huge refill rates
} else {
whole, fraction := math.Modf(refillFloat)
refill := int64(whole)
b.fractionalAvailable = fraction
if refill != 0 {
// Refill with overflow
b.available += refill
if b.available >= b.capacity {
b.available = b.capacity
b.fractionalAvailable = 0
}
}
}
b.lastRefill = now
}

View file

@ -25,8 +25,8 @@ go_library(
tags = ["automanaged"],
deps = [
"//pkg/util/clock:go_default_library",
"//pkg/util/ratelimit:go_default_library",
"//pkg/util/runtime:go_default_library",
"//vendor:github.com/juju/ratelimit",
],
)

View file

@ -21,7 +21,7 @@ import (
"sync"
"time"
"k8s.io/kubernetes/pkg/util/ratelimit"
"github.com/juju/ratelimit"
)
type RateLimiter interface {
@ -35,7 +35,7 @@ type RateLimiter interface {
}
// DefaultControllerRateLimiter is a no-arg constructor for a default rate limiter for a workqueue. It has
// both overall and per-item rate limiting. The overall is a token bucket and the per-item is exponential
// both overall and per-item rate limitting. The overall is a token bucket and the per-item is exponential
func DefaultControllerRateLimiter() RateLimiter {
return NewMaxOfRateLimiter(
NewItemExponentialFailureRateLimiter(5*time.Millisecond, 1000*time.Second),

View file

@ -39,8 +39,8 @@ var (
// them irrelevant. (Next we'll take it out, which may muck with
// scripts consuming the kubectl version output - but most of
// these should be looking at gitVersion already anyways.)
gitMajor string = "1" // major version, always numeric
gitMinor string = "5+" // minor version, numeric possibly followed by "+"
gitMajor string = "1" // major version, always numeric
gitMinor string = "5" // minor version, numeric possibly followed by "+"
// semantic version, derived by build scripts (see
// https://github.com/kubernetes/kubernetes/blob/master/docs/design/versioning.md
@ -51,7 +51,7 @@ var (
// semantic version is a git hash, but the version itself is no
// longer the direct output of "git describe", but a slight
// translation to be semver compliant.
gitVersion string = "v1.5.0-beta.2+$Format:%h$"
gitVersion string = "v1.5.1+$Format:%h$"
gitCommit string = "$Format:%H$" // sha1 from git, output of $(git rev-parse HEAD)
gitTreeState string = "not a git tree" // state of git tree, either "clean" or "dirty"

View file

@ -94,7 +94,7 @@ func UnmountPath(mountPath string, mounter mount.Interface) error {
return err
}
if notMnt {
glog.V(4).Info("%q is unmounted, deleting the directory", mountPath)
glog.V(4).Infof("%q is unmounted, deleting the directory", mountPath)
return os.Remove(mountPath)
}
return nil

View file

@ -51,6 +51,17 @@ func SetVolumeOwnership(mounter Mounter, fsGroup *int64) error {
return err
}
// chown and chmod pass through to the underlying file for symlinks.
// Symlinks have a mode of 777 but this really doesn't mean anything.
// The permissions of the underlying file are what matter.
// However, if one reads the mode of a symlink then chmods the symlink
// with that mode, it changes the mode of the underlying file, overridden
// the defaultMode and permissions initialized by the volume plugin, which
// is not what we want; thus, we skip chown/chmod for symlinks.
if info.Mode()&os.ModeSymlink != 0 {
return nil
}
stat, ok := info.Sys().(*syscall.Stat_t)
if !ok {
return nil