Update go dependencies
This commit is contained in:
parent
060e449056
commit
4fb61c73d1
1192 changed files with 185874 additions and 302749 deletions
1092
Godeps/Godeps.json
generated
1092
Godeps/Godeps.json
generated
File diff suppressed because it is too large
Load diff
13
vendor/bitbucket.org/ww/goautoneg/Makefile
generated
vendored
13
vendor/bitbucket.org/ww/goautoneg/Makefile
generated
vendored
|
@ -1,13 +0,0 @@
|
|||
include $(GOROOT)/src/Make.inc
|
||||
|
||||
TARG=bitbucket.org/ww/goautoneg
|
||||
GOFILES=autoneg.go
|
||||
|
||||
include $(GOROOT)/src/Make.pkg
|
||||
|
||||
format:
|
||||
gofmt -w *.go
|
||||
|
||||
docs:
|
||||
gomake clean
|
||||
godoc ${TARG} > README.txt
|
67
vendor/bitbucket.org/ww/goautoneg/README.txt
generated
vendored
67
vendor/bitbucket.org/ww/goautoneg/README.txt
generated
vendored
|
@ -1,67 +0,0 @@
|
|||
PACKAGE
|
||||
|
||||
package goautoneg
|
||||
import "bitbucket.org/ww/goautoneg"
|
||||
|
||||
HTTP Content-Type Autonegotiation.
|
||||
|
||||
The functions in this package implement the behaviour specified in
|
||||
http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html
|
||||
|
||||
Copyright (c) 2011, Open Knowledge Foundation Ltd.
|
||||
All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions are
|
||||
met:
|
||||
|
||||
Redistributions of source code must retain the above copyright
|
||||
notice, this list of conditions and the following disclaimer.
|
||||
|
||||
Redistributions in binary form must reproduce the above copyright
|
||||
notice, this list of conditions and the following disclaimer in
|
||||
the documentation and/or other materials provided with the
|
||||
distribution.
|
||||
|
||||
Neither the name of the Open Knowledge Foundation Ltd. nor the
|
||||
names of its contributors may be used to endorse or promote
|
||||
products derived from this software without specific prior written
|
||||
permission.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
|
||||
FUNCTIONS
|
||||
|
||||
func Negotiate(header string, alternatives []string) (content_type string)
|
||||
Negotiate the most appropriate content_type given the accept header
|
||||
and a list of alternatives.
|
||||
|
||||
func ParseAccept(header string) (accept []Accept)
|
||||
Parse an Accept Header string returning a sorted list
|
||||
of clauses
|
||||
|
||||
|
||||
TYPES
|
||||
|
||||
type Accept struct {
|
||||
Type, SubType string
|
||||
Q float32
|
||||
Params map[string]string
|
||||
}
|
||||
Structure to represent a clause in an HTTP Accept Header
|
||||
|
||||
|
||||
SUBDIRECTORIES
|
||||
|
||||
.hg
|
162
vendor/bitbucket.org/ww/goautoneg/autoneg.go
generated
vendored
162
vendor/bitbucket.org/ww/goautoneg/autoneg.go
generated
vendored
|
@ -1,162 +0,0 @@
|
|||
/*
|
||||
HTTP Content-Type Autonegotiation.
|
||||
|
||||
The functions in this package implement the behaviour specified in
|
||||
http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html
|
||||
|
||||
Copyright (c) 2011, Open Knowledge Foundation Ltd.
|
||||
All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions are
|
||||
met:
|
||||
|
||||
Redistributions of source code must retain the above copyright
|
||||
notice, this list of conditions and the following disclaimer.
|
||||
|
||||
Redistributions in binary form must reproduce the above copyright
|
||||
notice, this list of conditions and the following disclaimer in
|
||||
the documentation and/or other materials provided with the
|
||||
distribution.
|
||||
|
||||
Neither the name of the Open Knowledge Foundation Ltd. nor the
|
||||
names of its contributors may be used to endorse or promote
|
||||
products derived from this software without specific prior written
|
||||
permission.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
|
||||
*/
|
||||
package goautoneg
|
||||
|
||||
import (
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// Structure to represent a clause in an HTTP Accept Header
|
||||
type Accept struct {
|
||||
Type, SubType string
|
||||
Q float64
|
||||
Params map[string]string
|
||||
}
|
||||
|
||||
// For internal use, so that we can use the sort interface
|
||||
type accept_slice []Accept
|
||||
|
||||
func (accept accept_slice) Len() int {
|
||||
slice := []Accept(accept)
|
||||
return len(slice)
|
||||
}
|
||||
|
||||
func (accept accept_slice) Less(i, j int) bool {
|
||||
slice := []Accept(accept)
|
||||
ai, aj := slice[i], slice[j]
|
||||
if ai.Q > aj.Q {
|
||||
return true
|
||||
}
|
||||
if ai.Type != "*" && aj.Type == "*" {
|
||||
return true
|
||||
}
|
||||
if ai.SubType != "*" && aj.SubType == "*" {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (accept accept_slice) Swap(i, j int) {
|
||||
slice := []Accept(accept)
|
||||
slice[i], slice[j] = slice[j], slice[i]
|
||||
}
|
||||
|
||||
// Parse an Accept Header string returning a sorted list
|
||||
// of clauses
|
||||
func ParseAccept(header string) (accept []Accept) {
|
||||
parts := strings.Split(header, ",")
|
||||
accept = make([]Accept, 0, len(parts))
|
||||
for _, part := range parts {
|
||||
part := strings.Trim(part, " ")
|
||||
|
||||
a := Accept{}
|
||||
a.Params = make(map[string]string)
|
||||
a.Q = 1.0
|
||||
|
||||
mrp := strings.Split(part, ";")
|
||||
|
||||
media_range := mrp[0]
|
||||
sp := strings.Split(media_range, "/")
|
||||
a.Type = strings.Trim(sp[0], " ")
|
||||
|
||||
switch {
|
||||
case len(sp) == 1 && a.Type == "*":
|
||||
a.SubType = "*"
|
||||
case len(sp) == 2:
|
||||
a.SubType = strings.Trim(sp[1], " ")
|
||||
default:
|
||||
continue
|
||||
}
|
||||
|
||||
if len(mrp) == 1 {
|
||||
accept = append(accept, a)
|
||||
continue
|
||||
}
|
||||
|
||||
for _, param := range mrp[1:] {
|
||||
sp := strings.SplitN(param, "=", 2)
|
||||
if len(sp) != 2 {
|
||||
continue
|
||||
}
|
||||
token := strings.Trim(sp[0], " ")
|
||||
if token == "q" {
|
||||
a.Q, _ = strconv.ParseFloat(sp[1], 32)
|
||||
} else {
|
||||
a.Params[token] = strings.Trim(sp[1], " ")
|
||||
}
|
||||
}
|
||||
|
||||
accept = append(accept, a)
|
||||
}
|
||||
|
||||
slice := accept_slice(accept)
|
||||
sort.Sort(slice)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// Negotiate the most appropriate content_type given the accept header
|
||||
// and a list of alternatives.
|
||||
func Negotiate(header string, alternatives []string) (content_type string) {
|
||||
asp := make([][]string, 0, len(alternatives))
|
||||
for _, ctype := range alternatives {
|
||||
asp = append(asp, strings.SplitN(ctype, "/", 2))
|
||||
}
|
||||
for _, clause := range ParseAccept(header) {
|
||||
for i, ctsp := range asp {
|
||||
if clause.Type == ctsp[0] && clause.SubType == ctsp[1] {
|
||||
content_type = alternatives[i]
|
||||
return
|
||||
}
|
||||
if clause.Type == ctsp[0] && clause.SubType == "*" {
|
||||
content_type = alternatives[i]
|
||||
return
|
||||
}
|
||||
if clause.Type == "*" && clause.SubType == "*" {
|
||||
content_type = alternatives[i]
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
36
vendor/github.com/go-openapi/analysis/.drone.yml
generated
vendored
36
vendor/github.com/go-openapi/analysis/.drone.yml
generated
vendored
|
@ -1,36 +0,0 @@
|
|||
clone:
|
||||
path: github.com/go-openapi/analysis
|
||||
|
||||
matrix:
|
||||
GO_VERSION:
|
||||
- "1.6"
|
||||
|
||||
build:
|
||||
integration:
|
||||
image: golang:$$GO_VERSION
|
||||
pull: true
|
||||
commands:
|
||||
- go get -u github.com/stretchr/testify/assert
|
||||
- go get -u gopkg.in/yaml.v2
|
||||
- go get -u github.com/go-openapi/swag
|
||||
- go get -u github.com/go-openapi/jsonpointer
|
||||
- go get -u github.com/go-openapi/spec
|
||||
- go get -u github.com/go-openapi/loads/fmts
|
||||
- go test -race ./...
|
||||
- go test -v -cover -coverprofile=coverage.out -covermode=count ./...
|
||||
|
||||
notify:
|
||||
slack:
|
||||
channel: bots
|
||||
webhook_url: $$SLACK_URL
|
||||
username: drone
|
||||
|
||||
publish:
|
||||
coverage:
|
||||
server: https://coverage.vmware.run
|
||||
token: $$GITHUB_TOKEN
|
||||
# threshold: 70
|
||||
# must_increase: true
|
||||
when:
|
||||
matrix:
|
||||
GO_VERSION: "1.6"
|
2
vendor/github.com/go-openapi/analysis/.gitignore
generated
vendored
2
vendor/github.com/go-openapi/analysis/.gitignore
generated
vendored
|
@ -1,2 +0,0 @@
|
|||
secrets.yml
|
||||
coverage.out
|
12
vendor/github.com/go-openapi/analysis/.pullapprove.yml
generated
vendored
12
vendor/github.com/go-openapi/analysis/.pullapprove.yml
generated
vendored
|
@ -1,12 +0,0 @@
|
|||
approve_by_comment: true
|
||||
approve_regex: '^(:shipit:|:\+1:|\+1|LGTM|lgtm|Approved)'
|
||||
reject_regex: ^[Rr]ejected
|
||||
reset_on_push: false
|
||||
reviewers:
|
||||
members:
|
||||
- casualjim
|
||||
- frapposelli
|
||||
- vburenin
|
||||
- pytlesk4
|
||||
name: pullapprove
|
||||
required: 1
|
74
vendor/github.com/go-openapi/analysis/CODE_OF_CONDUCT.md
generated
vendored
74
vendor/github.com/go-openapi/analysis/CODE_OF_CONDUCT.md
generated
vendored
|
@ -1,74 +0,0 @@
|
|||
# Contributor Covenant Code of Conduct
|
||||
|
||||
## Our Pledge
|
||||
|
||||
In the interest of fostering an open and welcoming environment, we as
|
||||
contributors and maintainers pledge to making participation in our project and
|
||||
our community a harassment-free experience for everyone, regardless of age, body
|
||||
size, disability, ethnicity, gender identity and expression, level of experience,
|
||||
nationality, personal appearance, race, religion, or sexual identity and
|
||||
orientation.
|
||||
|
||||
## Our Standards
|
||||
|
||||
Examples of behavior that contributes to creating a positive environment
|
||||
include:
|
||||
|
||||
* Using welcoming and inclusive language
|
||||
* Being respectful of differing viewpoints and experiences
|
||||
* Gracefully accepting constructive criticism
|
||||
* Focusing on what is best for the community
|
||||
* Showing empathy towards other community members
|
||||
|
||||
Examples of unacceptable behavior by participants include:
|
||||
|
||||
* The use of sexualized language or imagery and unwelcome sexual attention or
|
||||
advances
|
||||
* Trolling, insulting/derogatory comments, and personal or political attacks
|
||||
* Public or private harassment
|
||||
* Publishing others' private information, such as a physical or electronic
|
||||
address, without explicit permission
|
||||
* Other conduct which could reasonably be considered inappropriate in a
|
||||
professional setting
|
||||
|
||||
## Our Responsibilities
|
||||
|
||||
Project maintainers are responsible for clarifying the standards of acceptable
|
||||
behavior and are expected to take appropriate and fair corrective action in
|
||||
response to any instances of unacceptable behavior.
|
||||
|
||||
Project maintainers have the right and responsibility to remove, edit, or
|
||||
reject comments, commits, code, wiki edits, issues, and other contributions
|
||||
that are not aligned to this Code of Conduct, or to ban temporarily or
|
||||
permanently any contributor for other behaviors that they deem inappropriate,
|
||||
threatening, offensive, or harmful.
|
||||
|
||||
## Scope
|
||||
|
||||
This Code of Conduct applies both within project spaces and in public spaces
|
||||
when an individual is representing the project or its community. Examples of
|
||||
representing a project or community include using an official project e-mail
|
||||
address, posting via an official social media account, or acting as an appointed
|
||||
representative at an online or offline event. Representation of a project may be
|
||||
further defined and clarified by project maintainers.
|
||||
|
||||
## Enforcement
|
||||
|
||||
Instances of abusive, harassing, or otherwise unacceptable behavior may be
|
||||
reported by contacting the project team at ivan+abuse@flanders.co.nz. All
|
||||
complaints will be reviewed and investigated and will result in a response that
|
||||
is deemed necessary and appropriate to the circumstances. The project team is
|
||||
obligated to maintain confidentiality with regard to the reporter of an incident.
|
||||
Further details of specific enforcement policies may be posted separately.
|
||||
|
||||
Project maintainers who do not follow or enforce the Code of Conduct in good
|
||||
faith may face temporary or permanent repercussions as determined by other
|
||||
members of the project's leadership.
|
||||
|
||||
## Attribution
|
||||
|
||||
This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4,
|
||||
available at [http://contributor-covenant.org/version/1/4][version]
|
||||
|
||||
[homepage]: http://contributor-covenant.org
|
||||
[version]: http://contributor-covenant.org/version/1/4/
|
6
vendor/github.com/go-openapi/analysis/README.md
generated
vendored
6
vendor/github.com/go-openapi/analysis/README.md
generated
vendored
|
@ -1,6 +0,0 @@
|
|||
# OpenAPI initiative analysis [](https://ci.vmware.run/go-openapi/analysis) [](https://coverage.vmware.run/go-openapi/analysis) [](https://slackin.goswagger.io)
|
||||
|
||||
[](https://raw.githubusercontent.com/go-openapi/analysis/master/LICENSE) [](http://godoc.org/github.com/go-openapi/analysis)
|
||||
|
||||
|
||||
A foundational library to analyze an OAI specification document for easier reasoning about the content.
|
614
vendor/github.com/go-openapi/analysis/analyzer.go
generated
vendored
614
vendor/github.com/go-openapi/analysis/analyzer.go
generated
vendored
|
@ -1,614 +0,0 @@
|
|||
// Copyright 2015 go-swagger maintainers
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package analysis
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
slashpath "path"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/go-openapi/jsonpointer"
|
||||
"github.com/go-openapi/spec"
|
||||
"github.com/go-openapi/swag"
|
||||
)
|
||||
|
||||
type referenceAnalysis struct {
|
||||
schemas map[string]spec.Ref
|
||||
responses map[string]spec.Ref
|
||||
parameters map[string]spec.Ref
|
||||
items map[string]spec.Ref
|
||||
allRefs map[string]spec.Ref
|
||||
referenced struct {
|
||||
schemas map[string]SchemaRef
|
||||
responses map[string]*spec.Response
|
||||
parameters map[string]*spec.Parameter
|
||||
}
|
||||
}
|
||||
|
||||
func (r *referenceAnalysis) addRef(key string, ref spec.Ref) {
|
||||
r.allRefs["#"+key] = ref
|
||||
}
|
||||
|
||||
func (r *referenceAnalysis) addItemsRef(key string, items *spec.Items) {
|
||||
r.items["#"+key] = items.Ref
|
||||
r.addRef(key, items.Ref)
|
||||
}
|
||||
|
||||
func (r *referenceAnalysis) addSchemaRef(key string, ref SchemaRef) {
|
||||
r.schemas["#"+key] = ref.Schema.Ref
|
||||
r.addRef(key, ref.Schema.Ref)
|
||||
}
|
||||
|
||||
func (r *referenceAnalysis) addResponseRef(key string, resp *spec.Response) {
|
||||
r.responses["#"+key] = resp.Ref
|
||||
r.addRef(key, resp.Ref)
|
||||
}
|
||||
|
||||
func (r *referenceAnalysis) addParamRef(key string, param *spec.Parameter) {
|
||||
r.parameters["#"+key] = param.Ref
|
||||
r.addRef(key, param.Ref)
|
||||
}
|
||||
|
||||
// New takes a swagger spec object and returns an analyzed spec document.
|
||||
// The analyzed document contains a number of indices that make it easier to
|
||||
// reason about semantics of a swagger specification for use in code generation
|
||||
// or validation etc.
|
||||
func New(doc *spec.Swagger) *Spec {
|
||||
a := &Spec{
|
||||
spec: doc,
|
||||
consumes: make(map[string]struct{}, 150),
|
||||
produces: make(map[string]struct{}, 150),
|
||||
authSchemes: make(map[string]struct{}, 150),
|
||||
operations: make(map[string]map[string]*spec.Operation, 150),
|
||||
allSchemas: make(map[string]SchemaRef, 150),
|
||||
allOfs: make(map[string]SchemaRef, 150),
|
||||
references: referenceAnalysis{
|
||||
schemas: make(map[string]spec.Ref, 150),
|
||||
responses: make(map[string]spec.Ref, 150),
|
||||
parameters: make(map[string]spec.Ref, 150),
|
||||
items: make(map[string]spec.Ref, 150),
|
||||
allRefs: make(map[string]spec.Ref, 150),
|
||||
},
|
||||
}
|
||||
a.references.referenced.schemas = make(map[string]SchemaRef, 150)
|
||||
a.references.referenced.responses = make(map[string]*spec.Response, 150)
|
||||
a.references.referenced.parameters = make(map[string]*spec.Parameter, 150)
|
||||
a.initialize()
|
||||
return a
|
||||
}
|
||||
|
||||
// Spec takes a swagger spec object and turns it into a registry
|
||||
// with a bunch of utility methods to act on the information in the spec
|
||||
type Spec struct {
|
||||
spec *spec.Swagger
|
||||
consumes map[string]struct{}
|
||||
produces map[string]struct{}
|
||||
authSchemes map[string]struct{}
|
||||
operations map[string]map[string]*spec.Operation
|
||||
references referenceAnalysis
|
||||
allSchemas map[string]SchemaRef
|
||||
allOfs map[string]SchemaRef
|
||||
}
|
||||
|
||||
func (s *Spec) initialize() {
|
||||
for _, c := range s.spec.Consumes {
|
||||
s.consumes[c] = struct{}{}
|
||||
}
|
||||
for _, c := range s.spec.Produces {
|
||||
s.produces[c] = struct{}{}
|
||||
}
|
||||
for _, ss := range s.spec.Security {
|
||||
for k := range ss {
|
||||
s.authSchemes[k] = struct{}{}
|
||||
}
|
||||
}
|
||||
for path, pathItem := range s.AllPaths() {
|
||||
s.analyzeOperations(path, &pathItem)
|
||||
}
|
||||
|
||||
for name, parameter := range s.spec.Parameters {
|
||||
refPref := slashpath.Join("/parameters", jsonpointer.Escape(name))
|
||||
if parameter.Items != nil {
|
||||
s.analyzeItems("items", parameter.Items, refPref)
|
||||
}
|
||||
if parameter.In == "body" && parameter.Schema != nil {
|
||||
s.analyzeSchema("schema", *parameter.Schema, refPref)
|
||||
}
|
||||
}
|
||||
|
||||
for name, response := range s.spec.Responses {
|
||||
refPref := slashpath.Join("/responses", jsonpointer.Escape(name))
|
||||
for _, v := range response.Headers {
|
||||
if v.Items != nil {
|
||||
s.analyzeItems("items", v.Items, refPref)
|
||||
}
|
||||
}
|
||||
if response.Schema != nil {
|
||||
s.analyzeSchema("schema", *response.Schema, refPref)
|
||||
}
|
||||
}
|
||||
|
||||
for name, schema := range s.spec.Definitions {
|
||||
s.analyzeSchema(name, schema, "/definitions")
|
||||
}
|
||||
// TODO: after analyzing all things and flattening schemas etc
|
||||
// resolve all the collected references to their final representations
|
||||
// best put in a separate method because this could get expensive
|
||||
}
|
||||
|
||||
func (s *Spec) analyzeOperations(path string, pi *spec.PathItem) {
|
||||
// TODO: resolve refs here?
|
||||
op := pi
|
||||
s.analyzeOperation("GET", path, op.Get)
|
||||
s.analyzeOperation("PUT", path, op.Put)
|
||||
s.analyzeOperation("POST", path, op.Post)
|
||||
s.analyzeOperation("PATCH", path, op.Patch)
|
||||
s.analyzeOperation("DELETE", path, op.Delete)
|
||||
s.analyzeOperation("HEAD", path, op.Head)
|
||||
s.analyzeOperation("OPTIONS", path, op.Options)
|
||||
for i, param := range op.Parameters {
|
||||
refPref := slashpath.Join("/paths", jsonpointer.Escape(path), "parameters", strconv.Itoa(i))
|
||||
if param.Ref.String() != "" {
|
||||
s.references.addParamRef(refPref, ¶m)
|
||||
}
|
||||
if param.Items != nil {
|
||||
s.analyzeItems("items", param.Items, refPref)
|
||||
}
|
||||
if param.Schema != nil {
|
||||
s.analyzeSchema("schema", *param.Schema, refPref)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (s *Spec) analyzeItems(name string, items *spec.Items, prefix string) {
|
||||
if items == nil {
|
||||
return
|
||||
}
|
||||
refPref := slashpath.Join(prefix, name)
|
||||
s.analyzeItems(name, items.Items, refPref)
|
||||
if items.Ref.String() != "" {
|
||||
s.references.addItemsRef(refPref, items)
|
||||
}
|
||||
}
|
||||
|
||||
func (s *Spec) analyzeOperation(method, path string, op *spec.Operation) {
|
||||
if op == nil {
|
||||
return
|
||||
}
|
||||
|
||||
for _, c := range op.Consumes {
|
||||
s.consumes[c] = struct{}{}
|
||||
}
|
||||
for _, c := range op.Produces {
|
||||
s.produces[c] = struct{}{}
|
||||
}
|
||||
for _, ss := range op.Security {
|
||||
for k := range ss {
|
||||
s.authSchemes[k] = struct{}{}
|
||||
}
|
||||
}
|
||||
if _, ok := s.operations[method]; !ok {
|
||||
s.operations[method] = make(map[string]*spec.Operation)
|
||||
}
|
||||
s.operations[method][path] = op
|
||||
prefix := slashpath.Join("/paths", jsonpointer.Escape(path), strings.ToLower(method))
|
||||
for i, param := range op.Parameters {
|
||||
refPref := slashpath.Join(prefix, "parameters", strconv.Itoa(i))
|
||||
if param.Ref.String() != "" {
|
||||
s.references.addParamRef(refPref, ¶m)
|
||||
}
|
||||
s.analyzeItems("items", param.Items, refPref)
|
||||
if param.In == "body" && param.Schema != nil {
|
||||
s.analyzeSchema("schema", *param.Schema, refPref)
|
||||
}
|
||||
}
|
||||
if op.Responses != nil {
|
||||
if op.Responses.Default != nil {
|
||||
refPref := slashpath.Join(prefix, "responses", "default")
|
||||
if op.Responses.Default.Ref.String() != "" {
|
||||
s.references.addResponseRef(refPref, op.Responses.Default)
|
||||
}
|
||||
for _, v := range op.Responses.Default.Headers {
|
||||
s.analyzeItems("items", v.Items, refPref)
|
||||
}
|
||||
if op.Responses.Default.Schema != nil {
|
||||
s.analyzeSchema("schema", *op.Responses.Default.Schema, refPref)
|
||||
}
|
||||
}
|
||||
for k, res := range op.Responses.StatusCodeResponses {
|
||||
refPref := slashpath.Join(prefix, "responses", strconv.Itoa(k))
|
||||
if res.Ref.String() != "" {
|
||||
s.references.addResponseRef(refPref, &res)
|
||||
}
|
||||
for _, v := range res.Headers {
|
||||
s.analyzeItems("items", v.Items, refPref)
|
||||
}
|
||||
if res.Schema != nil {
|
||||
s.analyzeSchema("schema", *res.Schema, refPref)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (s *Spec) analyzeSchema(name string, schema spec.Schema, prefix string) {
|
||||
refURI := slashpath.Join(prefix, jsonpointer.Escape(name))
|
||||
schRef := SchemaRef{
|
||||
Name: name,
|
||||
Schema: &schema,
|
||||
Ref: spec.MustCreateRef("#" + refURI),
|
||||
}
|
||||
s.allSchemas["#"+refURI] = schRef
|
||||
if schema.Ref.String() != "" {
|
||||
s.references.addSchemaRef(refURI, schRef)
|
||||
}
|
||||
for k, v := range schema.Definitions {
|
||||
s.analyzeSchema(k, v, slashpath.Join(refURI, "definitions"))
|
||||
}
|
||||
for k, v := range schema.Properties {
|
||||
s.analyzeSchema(k, v, slashpath.Join(refURI, "properties"))
|
||||
}
|
||||
for k, v := range schema.PatternProperties {
|
||||
s.analyzeSchema(k, v, slashpath.Join(refURI, "patternProperties"))
|
||||
}
|
||||
for i, v := range schema.AllOf {
|
||||
s.analyzeSchema(strconv.Itoa(i), v, slashpath.Join(refURI, "allOf"))
|
||||
}
|
||||
if len(schema.AllOf) > 0 {
|
||||
s.allOfs["#"+refURI] = SchemaRef{Name: name, Schema: &schema, Ref: spec.MustCreateRef("#" + refURI)}
|
||||
}
|
||||
for i, v := range schema.AnyOf {
|
||||
s.analyzeSchema(strconv.Itoa(i), v, slashpath.Join(refURI, "anyOf"))
|
||||
}
|
||||
for i, v := range schema.OneOf {
|
||||
s.analyzeSchema(strconv.Itoa(i), v, slashpath.Join(refURI, "oneOf"))
|
||||
}
|
||||
if schema.Not != nil {
|
||||
s.analyzeSchema("not", *schema.Not, refURI)
|
||||
}
|
||||
if schema.AdditionalProperties != nil && schema.AdditionalProperties.Schema != nil {
|
||||
s.analyzeSchema("additionalProperties", *schema.AdditionalProperties.Schema, refURI)
|
||||
}
|
||||
if schema.AdditionalItems != nil && schema.AdditionalItems.Schema != nil {
|
||||
s.analyzeSchema("additionalItems", *schema.AdditionalItems.Schema, refURI)
|
||||
}
|
||||
if schema.Items != nil {
|
||||
if schema.Items.Schema != nil {
|
||||
s.analyzeSchema("items", *schema.Items.Schema, refURI)
|
||||
}
|
||||
for i, sch := range schema.Items.Schemas {
|
||||
s.analyzeSchema(strconv.Itoa(i), sch, slashpath.Join(refURI, "items"))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// SecurityRequirement is a representation of a security requirement for an operation
|
||||
type SecurityRequirement struct {
|
||||
Name string
|
||||
Scopes []string
|
||||
}
|
||||
|
||||
// SecurityRequirementsFor gets the security requirements for the operation
|
||||
func (s *Spec) SecurityRequirementsFor(operation *spec.Operation) []SecurityRequirement {
|
||||
if s.spec.Security == nil && operation.Security == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
schemes := s.spec.Security
|
||||
if operation.Security != nil {
|
||||
schemes = operation.Security
|
||||
}
|
||||
|
||||
unique := make(map[string]SecurityRequirement)
|
||||
for _, scheme := range schemes {
|
||||
for k, v := range scheme {
|
||||
if _, ok := unique[k]; !ok {
|
||||
unique[k] = SecurityRequirement{Name: k, Scopes: v}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
var result []SecurityRequirement
|
||||
for _, v := range unique {
|
||||
result = append(result, v)
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
// SecurityDefinitionsFor gets the matching security definitions for a set of requirements
|
||||
func (s *Spec) SecurityDefinitionsFor(operation *spec.Operation) map[string]spec.SecurityScheme {
|
||||
requirements := s.SecurityRequirementsFor(operation)
|
||||
if len(requirements) == 0 {
|
||||
return nil
|
||||
}
|
||||
result := make(map[string]spec.SecurityScheme)
|
||||
for _, v := range requirements {
|
||||
if definition, ok := s.spec.SecurityDefinitions[v.Name]; ok {
|
||||
if definition != nil {
|
||||
result[v.Name] = *definition
|
||||
}
|
||||
}
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
// ConsumesFor gets the mediatypes for the operation
|
||||
func (s *Spec) ConsumesFor(operation *spec.Operation) []string {
|
||||
|
||||
if len(operation.Consumes) == 0 {
|
||||
cons := make(map[string]struct{}, len(s.spec.Consumes))
|
||||
for _, k := range s.spec.Consumes {
|
||||
cons[k] = struct{}{}
|
||||
}
|
||||
return s.structMapKeys(cons)
|
||||
}
|
||||
|
||||
cons := make(map[string]struct{}, len(operation.Consumes))
|
||||
for _, c := range operation.Consumes {
|
||||
cons[c] = struct{}{}
|
||||
}
|
||||
return s.structMapKeys(cons)
|
||||
}
|
||||
|
||||
// ProducesFor gets the mediatypes for the operation
|
||||
func (s *Spec) ProducesFor(operation *spec.Operation) []string {
|
||||
if len(operation.Produces) == 0 {
|
||||
prod := make(map[string]struct{}, len(s.spec.Produces))
|
||||
for _, k := range s.spec.Produces {
|
||||
prod[k] = struct{}{}
|
||||
}
|
||||
return s.structMapKeys(prod)
|
||||
}
|
||||
|
||||
prod := make(map[string]struct{}, len(operation.Produces))
|
||||
for _, c := range operation.Produces {
|
||||
prod[c] = struct{}{}
|
||||
}
|
||||
return s.structMapKeys(prod)
|
||||
}
|
||||
|
||||
func mapKeyFromParam(param *spec.Parameter) string {
|
||||
return fmt.Sprintf("%s#%s", param.In, fieldNameFromParam(param))
|
||||
}
|
||||
|
||||
func fieldNameFromParam(param *spec.Parameter) string {
|
||||
if nm, ok := param.Extensions.GetString("go-name"); ok {
|
||||
return nm
|
||||
}
|
||||
return swag.ToGoName(param.Name)
|
||||
}
|
||||
|
||||
func (s *Spec) paramsAsMap(parameters []spec.Parameter, res map[string]spec.Parameter) {
|
||||
for _, param := range parameters {
|
||||
pr := param
|
||||
if pr.Ref.String() != "" {
|
||||
obj, _, err := pr.Ref.GetPointer().Get(s.spec)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
pr = obj.(spec.Parameter)
|
||||
}
|
||||
res[mapKeyFromParam(&pr)] = pr
|
||||
}
|
||||
}
|
||||
|
||||
// ParametersFor the specified operation id
|
||||
func (s *Spec) ParametersFor(operationID string) []spec.Parameter {
|
||||
gatherParams := func(pi *spec.PathItem, op *spec.Operation) []spec.Parameter {
|
||||
bag := make(map[string]spec.Parameter)
|
||||
s.paramsAsMap(pi.Parameters, bag)
|
||||
s.paramsAsMap(op.Parameters, bag)
|
||||
|
||||
var res []spec.Parameter
|
||||
for _, v := range bag {
|
||||
res = append(res, v)
|
||||
}
|
||||
return res
|
||||
}
|
||||
for _, pi := range s.spec.Paths.Paths {
|
||||
if pi.Get != nil && pi.Get.ID == operationID {
|
||||
return gatherParams(&pi, pi.Get)
|
||||
}
|
||||
if pi.Head != nil && pi.Head.ID == operationID {
|
||||
return gatherParams(&pi, pi.Head)
|
||||
}
|
||||
if pi.Options != nil && pi.Options.ID == operationID {
|
||||
return gatherParams(&pi, pi.Options)
|
||||
}
|
||||
if pi.Post != nil && pi.Post.ID == operationID {
|
||||
return gatherParams(&pi, pi.Post)
|
||||
}
|
||||
if pi.Patch != nil && pi.Patch.ID == operationID {
|
||||
return gatherParams(&pi, pi.Patch)
|
||||
}
|
||||
if pi.Put != nil && pi.Put.ID == operationID {
|
||||
return gatherParams(&pi, pi.Put)
|
||||
}
|
||||
if pi.Delete != nil && pi.Delete.ID == operationID {
|
||||
return gatherParams(&pi, pi.Delete)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// ParamsFor the specified method and path. Aggregates them with the defaults etc, so it's all the params that
|
||||
// apply for the method and path.
|
||||
func (s *Spec) ParamsFor(method, path string) map[string]spec.Parameter {
|
||||
res := make(map[string]spec.Parameter)
|
||||
if pi, ok := s.spec.Paths.Paths[path]; ok {
|
||||
s.paramsAsMap(pi.Parameters, res)
|
||||
s.paramsAsMap(s.operations[strings.ToUpper(method)][path].Parameters, res)
|
||||
}
|
||||
return res
|
||||
}
|
||||
|
||||
// OperationForName gets the operation for the given id
|
||||
func (s *Spec) OperationForName(operationID string) (string, string, *spec.Operation, bool) {
|
||||
for method, pathItem := range s.operations {
|
||||
for path, op := range pathItem {
|
||||
if operationID == op.ID {
|
||||
return method, path, op, true
|
||||
}
|
||||
}
|
||||
}
|
||||
return "", "", nil, false
|
||||
}
|
||||
|
||||
// OperationFor the given method and path
|
||||
func (s *Spec) OperationFor(method, path string) (*spec.Operation, bool) {
|
||||
if mp, ok := s.operations[strings.ToUpper(method)]; ok {
|
||||
op, fn := mp[path]
|
||||
return op, fn
|
||||
}
|
||||
return nil, false
|
||||
}
|
||||
|
||||
// Operations gathers all the operations specified in the spec document
|
||||
func (s *Spec) Operations() map[string]map[string]*spec.Operation {
|
||||
return s.operations
|
||||
}
|
||||
|
||||
func (s *Spec) structMapKeys(mp map[string]struct{}) []string {
|
||||
if len(mp) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
result := make([]string, 0, len(mp))
|
||||
for k := range mp {
|
||||
result = append(result, k)
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
// AllPaths returns all the paths in the swagger spec
|
||||
func (s *Spec) AllPaths() map[string]spec.PathItem {
|
||||
if s.spec == nil || s.spec.Paths == nil {
|
||||
return nil
|
||||
}
|
||||
return s.spec.Paths.Paths
|
||||
}
|
||||
|
||||
// OperationIDs gets all the operation ids based on method an dpath
|
||||
func (s *Spec) OperationIDs() []string {
|
||||
if len(s.operations) == 0 {
|
||||
return nil
|
||||
}
|
||||
result := make([]string, 0, len(s.operations))
|
||||
for method, v := range s.operations {
|
||||
for p, o := range v {
|
||||
if o.ID != "" {
|
||||
result = append(result, o.ID)
|
||||
} else {
|
||||
result = append(result, fmt.Sprintf("%s %s", strings.ToUpper(method), p))
|
||||
}
|
||||
}
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
// RequiredConsumes gets all the distinct consumes that are specified in the specification document
|
||||
func (s *Spec) RequiredConsumes() []string {
|
||||
return s.structMapKeys(s.consumes)
|
||||
}
|
||||
|
||||
// RequiredProduces gets all the distinct produces that are specified in the specification document
|
||||
func (s *Spec) RequiredProduces() []string {
|
||||
return s.structMapKeys(s.produces)
|
||||
}
|
||||
|
||||
// RequiredSecuritySchemes gets all the distinct security schemes that are specified in the swagger spec
|
||||
func (s *Spec) RequiredSecuritySchemes() []string {
|
||||
return s.structMapKeys(s.authSchemes)
|
||||
}
|
||||
|
||||
// SchemaRef is a reference to a schema
|
||||
type SchemaRef struct {
|
||||
Name string
|
||||
Ref spec.Ref
|
||||
Schema *spec.Schema
|
||||
}
|
||||
|
||||
// SchemasWithAllOf returns schema references to all schemas that are defined
|
||||
// with an allOf key
|
||||
func (s *Spec) SchemasWithAllOf() (result []SchemaRef) {
|
||||
for _, v := range s.allOfs {
|
||||
result = append(result, v)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// AllDefinitions returns schema references for all the definitions that were discovered
|
||||
func (s *Spec) AllDefinitions() (result []SchemaRef) {
|
||||
for _, v := range s.allSchemas {
|
||||
result = append(result, v)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// AllDefinitionReferences returns json refs for all the discovered schemas
|
||||
func (s *Spec) AllDefinitionReferences() (result []string) {
|
||||
for _, v := range s.references.schemas {
|
||||
result = append(result, v.String())
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// AllParameterReferences returns json refs for all the discovered parameters
|
||||
func (s *Spec) AllParameterReferences() (result []string) {
|
||||
for _, v := range s.references.parameters {
|
||||
result = append(result, v.String())
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// AllResponseReferences returns json refs for all the discovered responses
|
||||
func (s *Spec) AllResponseReferences() (result []string) {
|
||||
for _, v := range s.references.responses {
|
||||
result = append(result, v.String())
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// AllItemsReferences returns the references for all the items
|
||||
func (s *Spec) AllItemsReferences() (result []string) {
|
||||
for _, v := range s.references.items {
|
||||
result = append(result, v.String())
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// AllReferences returns all the references found in the document
|
||||
func (s *Spec) AllReferences() (result []string) {
|
||||
for _, v := range s.references.allRefs {
|
||||
result = append(result, v.String())
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// AllRefs returns all the unique references found in the document
|
||||
func (s *Spec) AllRefs() (result []spec.Ref) {
|
||||
set := make(map[string]struct{})
|
||||
for _, v := range s.references.allRefs {
|
||||
a := v.String()
|
||||
if a == "" {
|
||||
continue
|
||||
}
|
||||
if _, ok := set[a]; !ok {
|
||||
set[a] = struct{}{}
|
||||
result = append(result, v)
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
39
vendor/github.com/go-openapi/loads/.drone.yml
generated
vendored
39
vendor/github.com/go-openapi/loads/.drone.yml
generated
vendored
|
@ -1,39 +0,0 @@
|
|||
clone:
|
||||
path: github.com/go-openapi/loads
|
||||
|
||||
matrix:
|
||||
GO_VERSION:
|
||||
- "1.6"
|
||||
|
||||
build:
|
||||
integration:
|
||||
image: golang:$$GO_VERSION
|
||||
pull: true
|
||||
environment:
|
||||
GOCOVMODE: "count"
|
||||
commands:
|
||||
- go get -u github.com/axw/gocov/gocov
|
||||
- go get -u gopkg.in/matm/v1/gocov-html
|
||||
- go get -u github.com/cee-dub/go-junit-report
|
||||
- go get -u github.com/stretchr/testify/assert
|
||||
- go get -u gopkg.in/yaml.v2
|
||||
- go get -u github.com/go-openapi/swag
|
||||
- go get -u github.com/go-openapi/analysis
|
||||
- go get -u github.com/go-openapi/spec
|
||||
- ./hack/build-drone.sh
|
||||
|
||||
notify:
|
||||
slack:
|
||||
channel: bots
|
||||
webhook_url: $$SLACK_URL
|
||||
username: drone
|
||||
|
||||
publish:
|
||||
coverage:
|
||||
server: https://coverage.vmware.run
|
||||
token: $$GITHUB_TOKEN
|
||||
# threshold: 70
|
||||
# must_increase: true
|
||||
when:
|
||||
matrix:
|
||||
GO_VERSION: "1.6"
|
4
vendor/github.com/go-openapi/loads/.gitignore
generated
vendored
4
vendor/github.com/go-openapi/loads/.gitignore
generated
vendored
|
@ -1,4 +0,0 @@
|
|||
secrets.yml
|
||||
coverage.out
|
||||
profile.cov
|
||||
profile.out
|
13
vendor/github.com/go-openapi/loads/.pullapprove.yml
generated
vendored
13
vendor/github.com/go-openapi/loads/.pullapprove.yml
generated
vendored
|
@ -1,13 +0,0 @@
|
|||
approve_by_comment: true
|
||||
approve_regex: '^(:shipit:|:\+1:|\+1|LGTM|lgtm|Approved)'
|
||||
reject_regex: ^[Rr]ejected
|
||||
reset_on_push: false
|
||||
reviewers:
|
||||
members:
|
||||
- casualjim
|
||||
- chancez
|
||||
- frapposelli
|
||||
- vburenin
|
||||
- pytlesk4
|
||||
name: pullapprove
|
||||
required: 1
|
74
vendor/github.com/go-openapi/loads/CODE_OF_CONDUCT.md
generated
vendored
74
vendor/github.com/go-openapi/loads/CODE_OF_CONDUCT.md
generated
vendored
|
@ -1,74 +0,0 @@
|
|||
# Contributor Covenant Code of Conduct
|
||||
|
||||
## Our Pledge
|
||||
|
||||
In the interest of fostering an open and welcoming environment, we as
|
||||
contributors and maintainers pledge to making participation in our project and
|
||||
our community a harassment-free experience for everyone, regardless of age, body
|
||||
size, disability, ethnicity, gender identity and expression, level of experience,
|
||||
nationality, personal appearance, race, religion, or sexual identity and
|
||||
orientation.
|
||||
|
||||
## Our Standards
|
||||
|
||||
Examples of behavior that contributes to creating a positive environment
|
||||
include:
|
||||
|
||||
* Using welcoming and inclusive language
|
||||
* Being respectful of differing viewpoints and experiences
|
||||
* Gracefully accepting constructive criticism
|
||||
* Focusing on what is best for the community
|
||||
* Showing empathy towards other community members
|
||||
|
||||
Examples of unacceptable behavior by participants include:
|
||||
|
||||
* The use of sexualized language or imagery and unwelcome sexual attention or
|
||||
advances
|
||||
* Trolling, insulting/derogatory comments, and personal or political attacks
|
||||
* Public or private harassment
|
||||
* Publishing others' private information, such as a physical or electronic
|
||||
address, without explicit permission
|
||||
* Other conduct which could reasonably be considered inappropriate in a
|
||||
professional setting
|
||||
|
||||
## Our Responsibilities
|
||||
|
||||
Project maintainers are responsible for clarifying the standards of acceptable
|
||||
behavior and are expected to take appropriate and fair corrective action in
|
||||
response to any instances of unacceptable behavior.
|
||||
|
||||
Project maintainers have the right and responsibility to remove, edit, or
|
||||
reject comments, commits, code, wiki edits, issues, and other contributions
|
||||
that are not aligned to this Code of Conduct, or to ban temporarily or
|
||||
permanently any contributor for other behaviors that they deem inappropriate,
|
||||
threatening, offensive, or harmful.
|
||||
|
||||
## Scope
|
||||
|
||||
This Code of Conduct applies both within project spaces and in public spaces
|
||||
when an individual is representing the project or its community. Examples of
|
||||
representing a project or community include using an official project e-mail
|
||||
address, posting via an official social media account, or acting as an appointed
|
||||
representative at an online or offline event. Representation of a project may be
|
||||
further defined and clarified by project maintainers.
|
||||
|
||||
## Enforcement
|
||||
|
||||
Instances of abusive, harassing, or otherwise unacceptable behavior may be
|
||||
reported by contacting the project team at ivan+abuse@flanders.co.nz. All
|
||||
complaints will be reviewed and investigated and will result in a response that
|
||||
is deemed necessary and appropriate to the circumstances. The project team is
|
||||
obligated to maintain confidentiality with regard to the reporter of an incident.
|
||||
Further details of specific enforcement policies may be posted separately.
|
||||
|
||||
Project maintainers who do not follow or enforce the Code of Conduct in good
|
||||
faith may face temporary or permanent repercussions as determined by other
|
||||
members of the project's leadership.
|
||||
|
||||
## Attribution
|
||||
|
||||
This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4,
|
||||
available at [http://contributor-covenant.org/version/1/4][version]
|
||||
|
||||
[homepage]: http://contributor-covenant.org
|
||||
[version]: http://contributor-covenant.org/version/1/4/
|
5
vendor/github.com/go-openapi/loads/README.md
generated
vendored
5
vendor/github.com/go-openapi/loads/README.md
generated
vendored
|
@ -1,5 +0,0 @@
|
|||
# Loads OAI specs [](https://ci.vmware.run/go-openapi/loads) [](https://coverage.vmware.run/go-openapi/loads) [](https://slackin.goswagger.io)
|
||||
|
||||
[](https://raw.githubusercontent.com/go-openapi/loads/master/LICENSE) [](http://godoc.org/github.com/go-openapi/loads)
|
||||
|
||||
Loading of OAI specification documents from local or remote locations.
|
203
vendor/github.com/go-openapi/loads/spec.go
generated
vendored
203
vendor/github.com/go-openapi/loads/spec.go
generated
vendored
|
@ -1,203 +0,0 @@
|
|||
// Copyright 2015 go-swagger maintainers
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package loads
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"net/url"
|
||||
|
||||
"github.com/go-openapi/analysis"
|
||||
"github.com/go-openapi/spec"
|
||||
"github.com/go-openapi/swag"
|
||||
)
|
||||
|
||||
// JSONDoc loads a json document from either a file or a remote url
|
||||
func JSONDoc(path string) (json.RawMessage, error) {
|
||||
data, err := swag.LoadFromFileOrHTTP(path)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return json.RawMessage(data), nil
|
||||
}
|
||||
|
||||
// DocLoader represents a doc loader type
|
||||
type DocLoader func(string) (json.RawMessage, error)
|
||||
|
||||
// DocMatcher represents a predicate to check if a loader matches
|
||||
type DocMatcher func(string) bool
|
||||
|
||||
var loaders = &loader{Match: func(_ string) bool { return true }, Fn: JSONDoc}
|
||||
|
||||
// AddLoader for a document
|
||||
func AddLoader(predicate DocMatcher, load DocLoader) {
|
||||
prev := loaders
|
||||
loaders = &loader{
|
||||
Match: predicate,
|
||||
Fn: load,
|
||||
Next: prev,
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
type loader struct {
|
||||
Fn DocLoader
|
||||
Match DocMatcher
|
||||
Next *loader
|
||||
}
|
||||
|
||||
// JSONSpec loads a spec from a json document
|
||||
func JSONSpec(path string) (*Document, error) {
|
||||
data, err := JSONDoc(path)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// convert to json
|
||||
return Analyzed(json.RawMessage(data), "")
|
||||
}
|
||||
|
||||
// Document represents a swagger spec document
|
||||
type Document struct {
|
||||
// specAnalyzer
|
||||
Analyzer *analysis.Spec
|
||||
spec *spec.Swagger
|
||||
origSpec *spec.Swagger
|
||||
schema *spec.Schema
|
||||
raw json.RawMessage
|
||||
}
|
||||
|
||||
// Spec loads a new spec document
|
||||
func Spec(path string) (*Document, error) {
|
||||
specURL, err := url.Parse(path)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
for l := loaders.Next; l != nil; l = l.Next {
|
||||
if loaders.Match(specURL.Path) {
|
||||
b, err2 := loaders.Fn(path)
|
||||
if err2 != nil {
|
||||
return nil, err2
|
||||
}
|
||||
return Analyzed(b, "")
|
||||
}
|
||||
}
|
||||
b, err := loaders.Fn(path)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return Analyzed(b, "")
|
||||
}
|
||||
|
||||
var swag20Schema = spec.MustLoadSwagger20Schema()
|
||||
|
||||
// Analyzed creates a new analyzed spec document
|
||||
func Analyzed(data json.RawMessage, version string) (*Document, error) {
|
||||
if version == "" {
|
||||
version = "2.0"
|
||||
}
|
||||
if version != "2.0" {
|
||||
return nil, fmt.Errorf("spec version %q is not supported", version)
|
||||
}
|
||||
|
||||
swspec := new(spec.Swagger)
|
||||
if err := json.Unmarshal(data, swspec); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
origsqspec := new(spec.Swagger)
|
||||
if err := json.Unmarshal(data, origsqspec); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
d := &Document{
|
||||
Analyzer: analysis.New(swspec),
|
||||
schema: swag20Schema,
|
||||
spec: swspec,
|
||||
raw: data,
|
||||
origSpec: origsqspec,
|
||||
}
|
||||
return d, nil
|
||||
}
|
||||
|
||||
// Expanded expands the ref fields in the spec document and returns a new spec document
|
||||
func (d *Document) Expanded() (*Document, error) {
|
||||
swspec := new(spec.Swagger)
|
||||
if err := json.Unmarshal(d.raw, swspec); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := spec.ExpandSpec(swspec); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
dd := &Document{
|
||||
Analyzer: analysis.New(swspec),
|
||||
spec: swspec,
|
||||
schema: swag20Schema,
|
||||
raw: d.raw,
|
||||
origSpec: d.origSpec,
|
||||
}
|
||||
return dd, nil
|
||||
}
|
||||
|
||||
// BasePath the base path for this spec
|
||||
func (d *Document) BasePath() string {
|
||||
return d.spec.BasePath
|
||||
}
|
||||
|
||||
// Version returns the version of this spec
|
||||
func (d *Document) Version() string {
|
||||
return d.spec.Swagger
|
||||
}
|
||||
|
||||
// Schema returns the swagger 2.0 schema
|
||||
func (d *Document) Schema() *spec.Schema {
|
||||
return d.schema
|
||||
}
|
||||
|
||||
// Spec returns the swagger spec object model
|
||||
func (d *Document) Spec() *spec.Swagger {
|
||||
return d.spec
|
||||
}
|
||||
|
||||
// Host returns the host for the API
|
||||
func (d *Document) Host() string {
|
||||
return d.spec.Host
|
||||
}
|
||||
|
||||
// Raw returns the raw swagger spec as json bytes
|
||||
func (d *Document) Raw() json.RawMessage {
|
||||
return d.raw
|
||||
}
|
||||
|
||||
func (d *Document) OrigSpec() *spec.Swagger {
|
||||
return d.origSpec
|
||||
}
|
||||
|
||||
// ResetDefinitions gives a shallow copy with the models reset
|
||||
func (d *Document) ResetDefinitions() *Document {
|
||||
defs := make(map[string]spec.Schema, len(d.origSpec.Definitions))
|
||||
for k, v := range d.origSpec.Definitions {
|
||||
defs[k] = v
|
||||
}
|
||||
|
||||
d.spec.Definitions = defs
|
||||
return d
|
||||
}
|
||||
|
||||
// Pristine creates a new pristine document instance based on the input data
|
||||
func (d *Document) Pristine() *Document {
|
||||
dd, _ := Analyzed(d.Raw(), d.Version())
|
||||
return dd
|
||||
}
|
136
vendor/github.com/golang/protobuf/ptypes/any.go
generated
vendored
Normal file
136
vendor/github.com/golang/protobuf/ptypes/any.go
generated
vendored
Normal file
|
@ -0,0 +1,136 @@
|
|||
// Go support for Protocol Buffers - Google's data interchange format
|
||||
//
|
||||
// Copyright 2016 The Go Authors. All rights reserved.
|
||||
// https://github.com/golang/protobuf
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are
|
||||
// met:
|
||||
//
|
||||
// * Redistributions of source code must retain the above copyright
|
||||
// notice, this list of conditions and the following disclaimer.
|
||||
// * Redistributions in binary form must reproduce the above
|
||||
// copyright notice, this list of conditions and the following disclaimer
|
||||
// in the documentation and/or other materials provided with the
|
||||
// distribution.
|
||||
// * Neither the name of Google Inc. nor the names of its
|
||||
// contributors may be used to endorse or promote products derived from
|
||||
// this software without specific prior written permission.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
package ptypes
|
||||
|
||||
// This file implements functions to marshal proto.Message to/from
|
||||
// google.protobuf.Any message.
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"reflect"
|
||||
"strings"
|
||||
|
||||
"github.com/golang/protobuf/proto"
|
||||
"github.com/golang/protobuf/ptypes/any"
|
||||
)
|
||||
|
||||
const googleApis = "type.googleapis.com/"
|
||||
|
||||
// AnyMessageName returns the name of the message contained in a google.protobuf.Any message.
|
||||
//
|
||||
// Note that regular type assertions should be done using the Is
|
||||
// function. AnyMessageName is provided for less common use cases like filtering a
|
||||
// sequence of Any messages based on a set of allowed message type names.
|
||||
func AnyMessageName(any *any.Any) (string, error) {
|
||||
slash := strings.LastIndex(any.TypeUrl, "/")
|
||||
if slash < 0 {
|
||||
return "", fmt.Errorf("message type url %q is invalid", any.TypeUrl)
|
||||
}
|
||||
return any.TypeUrl[slash+1:], nil
|
||||
}
|
||||
|
||||
// MarshalAny takes the protocol buffer and encodes it into google.protobuf.Any.
|
||||
func MarshalAny(pb proto.Message) (*any.Any, error) {
|
||||
value, err := proto.Marshal(pb)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &any.Any{TypeUrl: googleApis + proto.MessageName(pb), Value: value}, nil
|
||||
}
|
||||
|
||||
// DynamicAny is a value that can be passed to UnmarshalAny to automatically
|
||||
// allocate a proto.Message for the type specified in a google.protobuf.Any
|
||||
// message. The allocated message is stored in the embedded proto.Message.
|
||||
//
|
||||
// Example:
|
||||
//
|
||||
// var x ptypes.DynamicAny
|
||||
// if err := ptypes.UnmarshalAny(a, &x); err != nil { ... }
|
||||
// fmt.Printf("unmarshaled message: %v", x.Message)
|
||||
type DynamicAny struct {
|
||||
proto.Message
|
||||
}
|
||||
|
||||
// Empty returns a new proto.Message of the type specified in a
|
||||
// google.protobuf.Any message. It returns an error if corresponding message
|
||||
// type isn't linked in.
|
||||
func Empty(any *any.Any) (proto.Message, error) {
|
||||
aname, err := AnyMessageName(any)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
t := proto.MessageType(aname)
|
||||
if t == nil {
|
||||
return nil, fmt.Errorf("any: message type %q isn't linked in", aname)
|
||||
}
|
||||
return reflect.New(t.Elem()).Interface().(proto.Message), nil
|
||||
}
|
||||
|
||||
// UnmarshalAny parses the protocol buffer representation in a google.protobuf.Any
|
||||
// message and places the decoded result in pb. It returns an error if type of
|
||||
// contents of Any message does not match type of pb message.
|
||||
//
|
||||
// pb can be a proto.Message, or a *DynamicAny.
|
||||
func UnmarshalAny(any *any.Any, pb proto.Message) error {
|
||||
if d, ok := pb.(*DynamicAny); ok {
|
||||
if d.Message == nil {
|
||||
var err error
|
||||
d.Message, err = Empty(any)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return UnmarshalAny(any, d.Message)
|
||||
}
|
||||
|
||||
aname, err := AnyMessageName(any)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
mname := proto.MessageName(pb)
|
||||
if aname != mname {
|
||||
return fmt.Errorf("mismatched message type: got %q want %q", aname, mname)
|
||||
}
|
||||
return proto.Unmarshal(any.Value, pb)
|
||||
}
|
||||
|
||||
// Is returns true if any value contains a given message type.
|
||||
func Is(any *any.Any, pb proto.Message) bool {
|
||||
aname, err := AnyMessageName(any)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
|
||||
return aname == proto.MessageName(pb)
|
||||
}
|
155
vendor/github.com/golang/protobuf/ptypes/any/any.pb.go
generated
vendored
Normal file
155
vendor/github.com/golang/protobuf/ptypes/any/any.pb.go
generated
vendored
Normal file
|
@ -0,0 +1,155 @@
|
|||
// Code generated by protoc-gen-go.
|
||||
// source: github.com/golang/protobuf/ptypes/any/any.proto
|
||||
// DO NOT EDIT!
|
||||
|
||||
/*
|
||||
Package any is a generated protocol buffer package.
|
||||
|
||||
It is generated from these files:
|
||||
github.com/golang/protobuf/ptypes/any/any.proto
|
||||
|
||||
It has these top-level messages:
|
||||
Any
|
||||
*/
|
||||
package any
|
||||
|
||||
import proto "github.com/golang/protobuf/proto"
|
||||
import fmt "fmt"
|
||||
import math "math"
|
||||
|
||||
// Reference imports to suppress errors if they are not otherwise used.
|
||||
var _ = proto.Marshal
|
||||
var _ = fmt.Errorf
|
||||
var _ = math.Inf
|
||||
|
||||
// This is a compile-time assertion to ensure that this generated file
|
||||
// is compatible with the proto package it is being compiled against.
|
||||
// A compilation error at this line likely means your copy of the
|
||||
// proto package needs to be updated.
|
||||
const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
|
||||
|
||||
// `Any` contains an arbitrary serialized protocol buffer message along with a
|
||||
// URL that describes the type of the serialized message.
|
||||
//
|
||||
// Protobuf library provides support to pack/unpack Any values in the form
|
||||
// of utility functions or additional generated methods of the Any type.
|
||||
//
|
||||
// Example 1: Pack and unpack a message in C++.
|
||||
//
|
||||
// Foo foo = ...;
|
||||
// Any any;
|
||||
// any.PackFrom(foo);
|
||||
// ...
|
||||
// if (any.UnpackTo(&foo)) {
|
||||
// ...
|
||||
// }
|
||||
//
|
||||
// Example 2: Pack and unpack a message in Java.
|
||||
//
|
||||
// Foo foo = ...;
|
||||
// Any any = Any.pack(foo);
|
||||
// ...
|
||||
// if (any.is(Foo.class)) {
|
||||
// foo = any.unpack(Foo.class);
|
||||
// }
|
||||
//
|
||||
// Example 3: Pack and unpack a message in Python.
|
||||
//
|
||||
// foo = Foo(...)
|
||||
// any = Any()
|
||||
// any.Pack(foo)
|
||||
// ...
|
||||
// if any.Is(Foo.DESCRIPTOR):
|
||||
// any.Unpack(foo)
|
||||
// ...
|
||||
//
|
||||
// The pack methods provided by protobuf library will by default use
|
||||
// 'type.googleapis.com/full.type.name' as the type URL and the unpack
|
||||
// methods only use the fully qualified type name after the last '/'
|
||||
// in the type URL, for example "foo.bar.com/x/y.z" will yield type
|
||||
// name "y.z".
|
||||
//
|
||||
//
|
||||
// JSON
|
||||
// ====
|
||||
// The JSON representation of an `Any` value uses the regular
|
||||
// representation of the deserialized, embedded message, with an
|
||||
// additional field `@type` which contains the type URL. Example:
|
||||
//
|
||||
// package google.profile;
|
||||
// message Person {
|
||||
// string first_name = 1;
|
||||
// string last_name = 2;
|
||||
// }
|
||||
//
|
||||
// {
|
||||
// "@type": "type.googleapis.com/google.profile.Person",
|
||||
// "firstName": <string>,
|
||||
// "lastName": <string>
|
||||
// }
|
||||
//
|
||||
// If the embedded message type is well-known and has a custom JSON
|
||||
// representation, that representation will be embedded adding a field
|
||||
// `value` which holds the custom JSON in addition to the `@type`
|
||||
// field. Example (for message [google.protobuf.Duration][]):
|
||||
//
|
||||
// {
|
||||
// "@type": "type.googleapis.com/google.protobuf.Duration",
|
||||
// "value": "1.212s"
|
||||
// }
|
||||
//
|
||||
type Any struct {
|
||||
// A URL/resource name whose content describes the type of the
|
||||
// serialized protocol buffer message.
|
||||
//
|
||||
// For URLs which use the scheme `http`, `https`, or no scheme, the
|
||||
// following restrictions and interpretations apply:
|
||||
//
|
||||
// * If no scheme is provided, `https` is assumed.
|
||||
// * The last segment of the URL's path must represent the fully
|
||||
// qualified name of the type (as in `path/google.protobuf.Duration`).
|
||||
// The name should be in a canonical form (e.g., leading "." is
|
||||
// not accepted).
|
||||
// * An HTTP GET on the URL must yield a [google.protobuf.Type][]
|
||||
// value in binary format, or produce an error.
|
||||
// * Applications are allowed to cache lookup results based on the
|
||||
// URL, or have them precompiled into a binary to avoid any
|
||||
// lookup. Therefore, binary compatibility needs to be preserved
|
||||
// on changes to types. (Use versioned type names to manage
|
||||
// breaking changes.)
|
||||
//
|
||||
// Schemes other than `http`, `https` (or the empty scheme) might be
|
||||
// used with implementation specific semantics.
|
||||
//
|
||||
TypeUrl string `protobuf:"bytes,1,opt,name=type_url,json=typeUrl" json:"type_url,omitempty"`
|
||||
// Must be a valid serialized protocol buffer of the above specified type.
|
||||
Value []byte `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"`
|
||||
}
|
||||
|
||||
func (m *Any) Reset() { *m = Any{} }
|
||||
func (m *Any) String() string { return proto.CompactTextString(m) }
|
||||
func (*Any) ProtoMessage() {}
|
||||
func (*Any) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} }
|
||||
func (*Any) XXX_WellKnownType() string { return "Any" }
|
||||
|
||||
func init() {
|
||||
proto.RegisterType((*Any)(nil), "google.protobuf.Any")
|
||||
}
|
||||
|
||||
func init() { proto.RegisterFile("github.com/golang/protobuf/ptypes/any/any.proto", fileDescriptor0) }
|
||||
|
||||
var fileDescriptor0 = []byte{
|
||||
// 187 bytes of a gzipped FileDescriptorProto
|
||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xe2, 0xd2, 0x4f, 0xcf, 0x2c, 0xc9,
|
||||
0x28, 0x4d, 0xd2, 0x4b, 0xce, 0xcf, 0xd5, 0x4f, 0xcf, 0xcf, 0x49, 0xcc, 0x4b, 0xd7, 0x2f, 0x28,
|
||||
0xca, 0x2f, 0xc9, 0x4f, 0x2a, 0x4d, 0xd3, 0x2f, 0x28, 0xa9, 0x2c, 0x48, 0x2d, 0xd6, 0x4f, 0xcc,
|
||||
0xab, 0x04, 0x61, 0x3d, 0xb0, 0xb8, 0x10, 0x7f, 0x7a, 0x7e, 0x7e, 0x7a, 0x4e, 0xaa, 0x1e, 0x4c,
|
||||
0x95, 0x92, 0x19, 0x17, 0xb3, 0x63, 0x5e, 0xa5, 0x90, 0x24, 0x17, 0x07, 0x48, 0x79, 0x7c, 0x69,
|
||||
0x51, 0x8e, 0x04, 0xa3, 0x02, 0xa3, 0x06, 0x67, 0x10, 0x3b, 0x88, 0x1f, 0x5a, 0x94, 0x23, 0x24,
|
||||
0xc2, 0xc5, 0x5a, 0x96, 0x98, 0x53, 0x9a, 0x2a, 0xc1, 0xa4, 0xc0, 0xa8, 0xc1, 0x13, 0x04, 0xe1,
|
||||
0x38, 0x15, 0x71, 0x09, 0x27, 0xe7, 0xe7, 0xea, 0xa1, 0x19, 0xe7, 0xc4, 0xe1, 0x98, 0x57, 0x19,
|
||||
0x00, 0xe2, 0x04, 0x30, 0x46, 0xa9, 0x12, 0xe5, 0xb8, 0x05, 0x8c, 0x8c, 0x8b, 0x98, 0x98, 0xdd,
|
||||
0x03, 0x9c, 0x56, 0x31, 0xc9, 0xb9, 0x43, 0x4c, 0x0b, 0x80, 0xaa, 0xd2, 0x0b, 0x4f, 0xcd, 0xc9,
|
||||
0xf1, 0xce, 0xcb, 0x2f, 0xcf, 0x0b, 0x01, 0xa9, 0x4e, 0x62, 0x03, 0x6b, 0x37, 0x06, 0x04, 0x00,
|
||||
0x00, 0xff, 0xff, 0xc6, 0x4d, 0x03, 0x23, 0xf6, 0x00, 0x00, 0x00,
|
||||
}
|
140
vendor/github.com/golang/protobuf/ptypes/any/any.proto
generated
vendored
Normal file
140
vendor/github.com/golang/protobuf/ptypes/any/any.proto
generated
vendored
Normal file
|
@ -0,0 +1,140 @@
|
|||
// Protocol Buffers - Google's data interchange format
|
||||
// Copyright 2008 Google Inc. All rights reserved.
|
||||
// https://developers.google.com/protocol-buffers/
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are
|
||||
// met:
|
||||
//
|
||||
// * Redistributions of source code must retain the above copyright
|
||||
// notice, this list of conditions and the following disclaimer.
|
||||
// * Redistributions in binary form must reproduce the above
|
||||
// copyright notice, this list of conditions and the following disclaimer
|
||||
// in the documentation and/or other materials provided with the
|
||||
// distribution.
|
||||
// * Neither the name of Google Inc. nor the names of its
|
||||
// contributors may be used to endorse or promote products derived from
|
||||
// this software without specific prior written permission.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
syntax = "proto3";
|
||||
|
||||
package google.protobuf;
|
||||
|
||||
option csharp_namespace = "Google.Protobuf.WellKnownTypes";
|
||||
option go_package = "github.com/golang/protobuf/ptypes/any";
|
||||
option java_package = "com.google.protobuf";
|
||||
option java_outer_classname = "AnyProto";
|
||||
option java_multiple_files = true;
|
||||
option java_generate_equals_and_hash = true;
|
||||
option objc_class_prefix = "GPB";
|
||||
|
||||
// `Any` contains an arbitrary serialized protocol buffer message along with a
|
||||
// URL that describes the type of the serialized message.
|
||||
//
|
||||
// Protobuf library provides support to pack/unpack Any values in the form
|
||||
// of utility functions or additional generated methods of the Any type.
|
||||
//
|
||||
// Example 1: Pack and unpack a message in C++.
|
||||
//
|
||||
// Foo foo = ...;
|
||||
// Any any;
|
||||
// any.PackFrom(foo);
|
||||
// ...
|
||||
// if (any.UnpackTo(&foo)) {
|
||||
// ...
|
||||
// }
|
||||
//
|
||||
// Example 2: Pack and unpack a message in Java.
|
||||
//
|
||||
// Foo foo = ...;
|
||||
// Any any = Any.pack(foo);
|
||||
// ...
|
||||
// if (any.is(Foo.class)) {
|
||||
// foo = any.unpack(Foo.class);
|
||||
// }
|
||||
//
|
||||
// Example 3: Pack and unpack a message in Python.
|
||||
//
|
||||
// foo = Foo(...)
|
||||
// any = Any()
|
||||
// any.Pack(foo)
|
||||
// ...
|
||||
// if any.Is(Foo.DESCRIPTOR):
|
||||
// any.Unpack(foo)
|
||||
// ...
|
||||
//
|
||||
// The pack methods provided by protobuf library will by default use
|
||||
// 'type.googleapis.com/full.type.name' as the type URL and the unpack
|
||||
// methods only use the fully qualified type name after the last '/'
|
||||
// in the type URL, for example "foo.bar.com/x/y.z" will yield type
|
||||
// name "y.z".
|
||||
//
|
||||
//
|
||||
// JSON
|
||||
// ====
|
||||
// The JSON representation of an `Any` value uses the regular
|
||||
// representation of the deserialized, embedded message, with an
|
||||
// additional field `@type` which contains the type URL. Example:
|
||||
//
|
||||
// package google.profile;
|
||||
// message Person {
|
||||
// string first_name = 1;
|
||||
// string last_name = 2;
|
||||
// }
|
||||
//
|
||||
// {
|
||||
// "@type": "type.googleapis.com/google.profile.Person",
|
||||
// "firstName": <string>,
|
||||
// "lastName": <string>
|
||||
// }
|
||||
//
|
||||
// If the embedded message type is well-known and has a custom JSON
|
||||
// representation, that representation will be embedded adding a field
|
||||
// `value` which holds the custom JSON in addition to the `@type`
|
||||
// field. Example (for message [google.protobuf.Duration][]):
|
||||
//
|
||||
// {
|
||||
// "@type": "type.googleapis.com/google.protobuf.Duration",
|
||||
// "value": "1.212s"
|
||||
// }
|
||||
//
|
||||
message Any {
|
||||
// A URL/resource name whose content describes the type of the
|
||||
// serialized protocol buffer message.
|
||||
//
|
||||
// For URLs which use the scheme `http`, `https`, or no scheme, the
|
||||
// following restrictions and interpretations apply:
|
||||
//
|
||||
// * If no scheme is provided, `https` is assumed.
|
||||
// * The last segment of the URL's path must represent the fully
|
||||
// qualified name of the type (as in `path/google.protobuf.Duration`).
|
||||
// The name should be in a canonical form (e.g., leading "." is
|
||||
// not accepted).
|
||||
// * An HTTP GET on the URL must yield a [google.protobuf.Type][]
|
||||
// value in binary format, or produce an error.
|
||||
// * Applications are allowed to cache lookup results based on the
|
||||
// URL, or have them precompiled into a binary to avoid any
|
||||
// lookup. Therefore, binary compatibility needs to be preserved
|
||||
// on changes to types. (Use versioned type names to manage
|
||||
// breaking changes.)
|
||||
//
|
||||
// Schemes other than `http`, `https` (or the empty scheme) might be
|
||||
// used with implementation specific semantics.
|
||||
//
|
||||
string type_url = 1;
|
||||
|
||||
// Must be a valid serialized protocol buffer of the above specified type.
|
||||
bytes value = 2;
|
||||
}
|
35
vendor/github.com/golang/protobuf/ptypes/doc.go
generated
vendored
Normal file
35
vendor/github.com/golang/protobuf/ptypes/doc.go
generated
vendored
Normal file
|
@ -0,0 +1,35 @@
|
|||
// Go support for Protocol Buffers - Google's data interchange format
|
||||
//
|
||||
// Copyright 2016 The Go Authors. All rights reserved.
|
||||
// https://github.com/golang/protobuf
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are
|
||||
// met:
|
||||
//
|
||||
// * Redistributions of source code must retain the above copyright
|
||||
// notice, this list of conditions and the following disclaimer.
|
||||
// * Redistributions in binary form must reproduce the above
|
||||
// copyright notice, this list of conditions and the following disclaimer
|
||||
// in the documentation and/or other materials provided with the
|
||||
// distribution.
|
||||
// * Neither the name of Google Inc. nor the names of its
|
||||
// contributors may be used to endorse or promote products derived from
|
||||
// this software without specific prior written permission.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
/*
|
||||
Package ptypes contains code for interacting with well-known types.
|
||||
*/
|
||||
package ptypes
|
102
vendor/github.com/golang/protobuf/ptypes/duration.go
generated
vendored
Normal file
102
vendor/github.com/golang/protobuf/ptypes/duration.go
generated
vendored
Normal file
|
@ -0,0 +1,102 @@
|
|||
// Go support for Protocol Buffers - Google's data interchange format
|
||||
//
|
||||
// Copyright 2016 The Go Authors. All rights reserved.
|
||||
// https://github.com/golang/protobuf
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are
|
||||
// met:
|
||||
//
|
||||
// * Redistributions of source code must retain the above copyright
|
||||
// notice, this list of conditions and the following disclaimer.
|
||||
// * Redistributions in binary form must reproduce the above
|
||||
// copyright notice, this list of conditions and the following disclaimer
|
||||
// in the documentation and/or other materials provided with the
|
||||
// distribution.
|
||||
// * Neither the name of Google Inc. nor the names of its
|
||||
// contributors may be used to endorse or promote products derived from
|
||||
// this software without specific prior written permission.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
package ptypes
|
||||
|
||||
// This file implements conversions between google.protobuf.Duration
|
||||
// and time.Duration.
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
durpb "github.com/golang/protobuf/ptypes/duration"
|
||||
)
|
||||
|
||||
const (
|
||||
// Range of a durpb.Duration in seconds, as specified in
|
||||
// google/protobuf/duration.proto. This is about 10,000 years in seconds.
|
||||
maxSeconds = int64(10000 * 365.25 * 24 * 60 * 60)
|
||||
minSeconds = -maxSeconds
|
||||
)
|
||||
|
||||
// validateDuration determines whether the durpb.Duration is valid according to the
|
||||
// definition in google/protobuf/duration.proto. A valid durpb.Duration
|
||||
// may still be too large to fit into a time.Duration (the range of durpb.Duration
|
||||
// is about 10,000 years, and the range of time.Duration is about 290).
|
||||
func validateDuration(d *durpb.Duration) error {
|
||||
if d == nil {
|
||||
return errors.New("duration: nil Duration")
|
||||
}
|
||||
if d.Seconds < minSeconds || d.Seconds > maxSeconds {
|
||||
return fmt.Errorf("duration: %v: seconds out of range", d)
|
||||
}
|
||||
if d.Nanos <= -1e9 || d.Nanos >= 1e9 {
|
||||
return fmt.Errorf("duration: %v: nanos out of range", d)
|
||||
}
|
||||
// Seconds and Nanos must have the same sign, unless d.Nanos is zero.
|
||||
if (d.Seconds < 0 && d.Nanos > 0) || (d.Seconds > 0 && d.Nanos < 0) {
|
||||
return fmt.Errorf("duration: %v: seconds and nanos have different signs", d)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Duration converts a durpb.Duration to a time.Duration. Duration
|
||||
// returns an error if the durpb.Duration is invalid or is too large to be
|
||||
// represented in a time.Duration.
|
||||
func Duration(p *durpb.Duration) (time.Duration, error) {
|
||||
if err := validateDuration(p); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
d := time.Duration(p.Seconds) * time.Second
|
||||
if int64(d/time.Second) != p.Seconds {
|
||||
return 0, fmt.Errorf("duration: %v is out of range for time.Duration", p)
|
||||
}
|
||||
if p.Nanos != 0 {
|
||||
d += time.Duration(p.Nanos)
|
||||
if (d < 0) != (p.Nanos < 0) {
|
||||
return 0, fmt.Errorf("duration: %v is out of range for time.Duration", p)
|
||||
}
|
||||
}
|
||||
return d, nil
|
||||
}
|
||||
|
||||
// DurationProto converts a time.Duration to a durpb.Duration.
|
||||
func DurationProto(d time.Duration) *durpb.Duration {
|
||||
nanos := d.Nanoseconds()
|
||||
secs := nanos / 1e9
|
||||
nanos -= secs * 1e9
|
||||
return &durpb.Duration{
|
||||
Seconds: secs,
|
||||
Nanos: int32(nanos),
|
||||
}
|
||||
}
|
114
vendor/github.com/golang/protobuf/ptypes/duration/duration.pb.go
generated
vendored
Normal file
114
vendor/github.com/golang/protobuf/ptypes/duration/duration.pb.go
generated
vendored
Normal file
|
@ -0,0 +1,114 @@
|
|||
// Code generated by protoc-gen-go.
|
||||
// source: github.com/golang/protobuf/ptypes/duration/duration.proto
|
||||
// DO NOT EDIT!
|
||||
|
||||
/*
|
||||
Package duration is a generated protocol buffer package.
|
||||
|
||||
It is generated from these files:
|
||||
github.com/golang/protobuf/ptypes/duration/duration.proto
|
||||
|
||||
It has these top-level messages:
|
||||
Duration
|
||||
*/
|
||||
package duration
|
||||
|
||||
import proto "github.com/golang/protobuf/proto"
|
||||
import fmt "fmt"
|
||||
import math "math"
|
||||
|
||||
// Reference imports to suppress errors if they are not otherwise used.
|
||||
var _ = proto.Marshal
|
||||
var _ = fmt.Errorf
|
||||
var _ = math.Inf
|
||||
|
||||
// This is a compile-time assertion to ensure that this generated file
|
||||
// is compatible with the proto package it is being compiled against.
|
||||
// A compilation error at this line likely means your copy of the
|
||||
// proto package needs to be updated.
|
||||
const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
|
||||
|
||||
// A Duration represents a signed, fixed-length span of time represented
|
||||
// as a count of seconds and fractions of seconds at nanosecond
|
||||
// resolution. It is independent of any calendar and concepts like "day"
|
||||
// or "month". It is related to Timestamp in that the difference between
|
||||
// two Timestamp values is a Duration and it can be added or subtracted
|
||||
// from a Timestamp. Range is approximately +-10,000 years.
|
||||
//
|
||||
// Example 1: Compute Duration from two Timestamps in pseudo code.
|
||||
//
|
||||
// Timestamp start = ...;
|
||||
// Timestamp end = ...;
|
||||
// Duration duration = ...;
|
||||
//
|
||||
// duration.seconds = end.seconds - start.seconds;
|
||||
// duration.nanos = end.nanos - start.nanos;
|
||||
//
|
||||
// if (duration.seconds < 0 && duration.nanos > 0) {
|
||||
// duration.seconds += 1;
|
||||
// duration.nanos -= 1000000000;
|
||||
// } else if (durations.seconds > 0 && duration.nanos < 0) {
|
||||
// duration.seconds -= 1;
|
||||
// duration.nanos += 1000000000;
|
||||
// }
|
||||
//
|
||||
// Example 2: Compute Timestamp from Timestamp + Duration in pseudo code.
|
||||
//
|
||||
// Timestamp start = ...;
|
||||
// Duration duration = ...;
|
||||
// Timestamp end = ...;
|
||||
//
|
||||
// end.seconds = start.seconds + duration.seconds;
|
||||
// end.nanos = start.nanos + duration.nanos;
|
||||
//
|
||||
// if (end.nanos < 0) {
|
||||
// end.seconds -= 1;
|
||||
// end.nanos += 1000000000;
|
||||
// } else if (end.nanos >= 1000000000) {
|
||||
// end.seconds += 1;
|
||||
// end.nanos -= 1000000000;
|
||||
// }
|
||||
//
|
||||
//
|
||||
type Duration struct {
|
||||
// Signed seconds of the span of time. Must be from -315,576,000,000
|
||||
// to +315,576,000,000 inclusive.
|
||||
Seconds int64 `protobuf:"varint,1,opt,name=seconds" json:"seconds,omitempty"`
|
||||
// Signed fractions of a second at nanosecond resolution of the span
|
||||
// of time. Durations less than one second are represented with a 0
|
||||
// `seconds` field and a positive or negative `nanos` field. For durations
|
||||
// of one second or more, a non-zero value for the `nanos` field must be
|
||||
// of the same sign as the `seconds` field. Must be from -999,999,999
|
||||
// to +999,999,999 inclusive.
|
||||
Nanos int32 `protobuf:"varint,2,opt,name=nanos" json:"nanos,omitempty"`
|
||||
}
|
||||
|
||||
func (m *Duration) Reset() { *m = Duration{} }
|
||||
func (m *Duration) String() string { return proto.CompactTextString(m) }
|
||||
func (*Duration) ProtoMessage() {}
|
||||
func (*Duration) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} }
|
||||
func (*Duration) XXX_WellKnownType() string { return "Duration" }
|
||||
|
||||
func init() {
|
||||
proto.RegisterType((*Duration)(nil), "google.protobuf.Duration")
|
||||
}
|
||||
|
||||
func init() {
|
||||
proto.RegisterFile("github.com/golang/protobuf/ptypes/duration/duration.proto", fileDescriptor0)
|
||||
}
|
||||
|
||||
var fileDescriptor0 = []byte{
|
||||
// 189 bytes of a gzipped FileDescriptorProto
|
||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xe2, 0xb2, 0x4c, 0xcf, 0x2c, 0xc9,
|
||||
0x28, 0x4d, 0xd2, 0x4b, 0xce, 0xcf, 0xd5, 0x4f, 0xcf, 0xcf, 0x49, 0xcc, 0x4b, 0xd7, 0x2f, 0x28,
|
||||
0xca, 0x2f, 0xc9, 0x4f, 0x2a, 0x4d, 0xd3, 0x2f, 0x28, 0xa9, 0x2c, 0x48, 0x2d, 0xd6, 0x4f, 0x29,
|
||||
0x2d, 0x4a, 0x2c, 0xc9, 0xcc, 0xcf, 0x83, 0x33, 0xf4, 0xc0, 0x2a, 0x84, 0xf8, 0xd3, 0xf3, 0xf3,
|
||||
0xd3, 0x73, 0x52, 0xf5, 0x60, 0xea, 0x95, 0xac, 0xb8, 0x38, 0x5c, 0xa0, 0x4a, 0x84, 0x24, 0xb8,
|
||||
0xd8, 0x8b, 0x53, 0x93, 0xf3, 0xf3, 0x52, 0x8a, 0x25, 0x18, 0x15, 0x18, 0x35, 0x98, 0x83, 0x60,
|
||||
0x5c, 0x21, 0x11, 0x2e, 0xd6, 0xbc, 0xc4, 0xbc, 0xfc, 0x62, 0x09, 0x26, 0x05, 0x46, 0x0d, 0xd6,
|
||||
0x20, 0x08, 0xc7, 0xa9, 0x86, 0x4b, 0x38, 0x39, 0x3f, 0x57, 0x0f, 0xcd, 0x48, 0x27, 0x5e, 0x98,
|
||||
0x81, 0x01, 0x20, 0x91, 0x00, 0xc6, 0x28, 0x2d, 0xe2, 0xdd, 0xbb, 0x80, 0x91, 0x71, 0x11, 0x13,
|
||||
0xb3, 0x7b, 0x80, 0xd3, 0x2a, 0x26, 0x39, 0x77, 0x88, 0xb9, 0x01, 0x50, 0xa5, 0x7a, 0xe1, 0xa9,
|
||||
0x39, 0x39, 0xde, 0x79, 0xf9, 0xe5, 0x79, 0x21, 0x20, 0x2d, 0x49, 0x6c, 0x60, 0x33, 0x8c, 0x01,
|
||||
0x01, 0x00, 0x00, 0xff, 0xff, 0x62, 0xfb, 0xb1, 0x51, 0x0e, 0x01, 0x00, 0x00,
|
||||
}
|
98
vendor/github.com/golang/protobuf/ptypes/duration/duration.proto
generated
vendored
Normal file
98
vendor/github.com/golang/protobuf/ptypes/duration/duration.proto
generated
vendored
Normal file
|
@ -0,0 +1,98 @@
|
|||
// Protocol Buffers - Google's data interchange format
|
||||
// Copyright 2008 Google Inc. All rights reserved.
|
||||
// https://developers.google.com/protocol-buffers/
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are
|
||||
// met:
|
||||
//
|
||||
// * Redistributions of source code must retain the above copyright
|
||||
// notice, this list of conditions and the following disclaimer.
|
||||
// * Redistributions in binary form must reproduce the above
|
||||
// copyright notice, this list of conditions and the following disclaimer
|
||||
// in the documentation and/or other materials provided with the
|
||||
// distribution.
|
||||
// * Neither the name of Google Inc. nor the names of its
|
||||
// contributors may be used to endorse or promote products derived from
|
||||
// this software without specific prior written permission.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
syntax = "proto3";
|
||||
|
||||
package google.protobuf;
|
||||
|
||||
option csharp_namespace = "Google.Protobuf.WellKnownTypes";
|
||||
option go_package = "github.com/golang/protobuf/ptypes/duration";
|
||||
option java_package = "com.google.protobuf";
|
||||
option java_outer_classname = "DurationProto";
|
||||
option java_multiple_files = true;
|
||||
option java_generate_equals_and_hash = true;
|
||||
option objc_class_prefix = "GPB";
|
||||
|
||||
// A Duration represents a signed, fixed-length span of time represented
|
||||
// as a count of seconds and fractions of seconds at nanosecond
|
||||
// resolution. It is independent of any calendar and concepts like "day"
|
||||
// or "month". It is related to Timestamp in that the difference between
|
||||
// two Timestamp values is a Duration and it can be added or subtracted
|
||||
// from a Timestamp. Range is approximately +-10,000 years.
|
||||
//
|
||||
// Example 1: Compute Duration from two Timestamps in pseudo code.
|
||||
//
|
||||
// Timestamp start = ...;
|
||||
// Timestamp end = ...;
|
||||
// Duration duration = ...;
|
||||
//
|
||||
// duration.seconds = end.seconds - start.seconds;
|
||||
// duration.nanos = end.nanos - start.nanos;
|
||||
//
|
||||
// if (duration.seconds < 0 && duration.nanos > 0) {
|
||||
// duration.seconds += 1;
|
||||
// duration.nanos -= 1000000000;
|
||||
// } else if (durations.seconds > 0 && duration.nanos < 0) {
|
||||
// duration.seconds -= 1;
|
||||
// duration.nanos += 1000000000;
|
||||
// }
|
||||
//
|
||||
// Example 2: Compute Timestamp from Timestamp + Duration in pseudo code.
|
||||
//
|
||||
// Timestamp start = ...;
|
||||
// Duration duration = ...;
|
||||
// Timestamp end = ...;
|
||||
//
|
||||
// end.seconds = start.seconds + duration.seconds;
|
||||
// end.nanos = start.nanos + duration.nanos;
|
||||
//
|
||||
// if (end.nanos < 0) {
|
||||
// end.seconds -= 1;
|
||||
// end.nanos += 1000000000;
|
||||
// } else if (end.nanos >= 1000000000) {
|
||||
// end.seconds += 1;
|
||||
// end.nanos -= 1000000000;
|
||||
// }
|
||||
//
|
||||
//
|
||||
message Duration {
|
||||
|
||||
// Signed seconds of the span of time. Must be from -315,576,000,000
|
||||
// to +315,576,000,000 inclusive.
|
||||
int64 seconds = 1;
|
||||
|
||||
// Signed fractions of a second at nanosecond resolution of the span
|
||||
// of time. Durations less than one second are represented with a 0
|
||||
// `seconds` field and a positive or negative `nanos` field. For durations
|
||||
// of one second or more, a non-zero value for the `nanos` field must be
|
||||
// of the same sign as the `seconds` field. Must be from -999,999,999
|
||||
// to +999,999,999 inclusive.
|
||||
int32 nanos = 2;
|
||||
}
|
66
vendor/github.com/golang/protobuf/ptypes/regen.sh
generated
vendored
Executable file
66
vendor/github.com/golang/protobuf/ptypes/regen.sh
generated
vendored
Executable file
|
@ -0,0 +1,66 @@
|
|||
#!/bin/bash -e
|
||||
#
|
||||
# This script fetches and rebuilds the "well-known types" protocol buffers.
|
||||
# To run this you will need protoc and goprotobuf installed;
|
||||
# see https://github.com/golang/protobuf for instructions.
|
||||
# You also need Go and Git installed.
|
||||
|
||||
PKG=github.com/golang/protobuf/ptypes
|
||||
UPSTREAM=https://github.com/google/protobuf
|
||||
UPSTREAM_SUBDIR=src/google/protobuf
|
||||
PROTO_FILES='
|
||||
any.proto
|
||||
duration.proto
|
||||
empty.proto
|
||||
struct.proto
|
||||
timestamp.proto
|
||||
wrappers.proto
|
||||
'
|
||||
|
||||
function die() {
|
||||
echo 1>&2 $*
|
||||
exit 1
|
||||
}
|
||||
|
||||
# Sanity check that the right tools are accessible.
|
||||
for tool in go git protoc protoc-gen-go; do
|
||||
q=$(which $tool) || die "didn't find $tool"
|
||||
echo 1>&2 "$tool: $q"
|
||||
done
|
||||
|
||||
tmpdir=$(mktemp -d -t regen-wkt.XXXXXX)
|
||||
trap 'rm -rf $tmpdir' EXIT
|
||||
|
||||
echo -n 1>&2 "finding package dir... "
|
||||
pkgdir=$(go list -f '{{.Dir}}' $PKG)
|
||||
echo 1>&2 $pkgdir
|
||||
base=$(echo $pkgdir | sed "s,/$PKG\$,,")
|
||||
echo 1>&2 "base: $base"
|
||||
cd $base
|
||||
|
||||
echo 1>&2 "fetching latest protos... "
|
||||
git clone -q $UPSTREAM $tmpdir
|
||||
# Pass 1: build mapping from upstream filename to our filename.
|
||||
declare -A filename_map
|
||||
for f in $(cd $PKG && find * -name '*.proto'); do
|
||||
echo -n 1>&2 "looking for latest version of $f... "
|
||||
up=$(cd $tmpdir/$UPSTREAM_SUBDIR && find * -name $(basename $f) | grep -v /testdata/)
|
||||
echo 1>&2 $up
|
||||
if [ $(echo $up | wc -w) != "1" ]; then
|
||||
die "not exactly one match"
|
||||
fi
|
||||
filename_map[$up]=$f
|
||||
done
|
||||
# Pass 2: copy files
|
||||
for up in "${!filename_map[@]}"; do
|
||||
f=${filename_map[$up]}
|
||||
shortname=$(basename $f | sed 's,\.proto$,,')
|
||||
cp $tmpdir/$UPSTREAM_SUBDIR/$up $PKG/$f
|
||||
done
|
||||
|
||||
# Run protoc once per package.
|
||||
for dir in $(find $PKG -name '*.proto' | xargs dirname | sort | uniq); do
|
||||
echo 1>&2 "* $dir"
|
||||
protoc --go_out=. $dir/*.proto
|
||||
done
|
||||
echo 1>&2 "All OK"
|
125
vendor/github.com/golang/protobuf/ptypes/timestamp.go
generated
vendored
Normal file
125
vendor/github.com/golang/protobuf/ptypes/timestamp.go
generated
vendored
Normal file
|
@ -0,0 +1,125 @@
|
|||
// Go support for Protocol Buffers - Google's data interchange format
|
||||
//
|
||||
// Copyright 2016 The Go Authors. All rights reserved.
|
||||
// https://github.com/golang/protobuf
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are
|
||||
// met:
|
||||
//
|
||||
// * Redistributions of source code must retain the above copyright
|
||||
// notice, this list of conditions and the following disclaimer.
|
||||
// * Redistributions in binary form must reproduce the above
|
||||
// copyright notice, this list of conditions and the following disclaimer
|
||||
// in the documentation and/or other materials provided with the
|
||||
// distribution.
|
||||
// * Neither the name of Google Inc. nor the names of its
|
||||
// contributors may be used to endorse or promote products derived from
|
||||
// this software without specific prior written permission.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
package ptypes
|
||||
|
||||
// This file implements operations on google.protobuf.Timestamp.
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
tspb "github.com/golang/protobuf/ptypes/timestamp"
|
||||
)
|
||||
|
||||
const (
|
||||
// Seconds field of the earliest valid Timestamp.
|
||||
// This is time.Date(1, 1, 1, 0, 0, 0, 0, time.UTC).Unix().
|
||||
minValidSeconds = -62135596800
|
||||
// Seconds field just after the latest valid Timestamp.
|
||||
// This is time.Date(10000, 1, 1, 0, 0, 0, 0, time.UTC).Unix().
|
||||
maxValidSeconds = 253402300800
|
||||
)
|
||||
|
||||
// validateTimestamp determines whether a Timestamp is valid.
|
||||
// A valid timestamp represents a time in the range
|
||||
// [0001-01-01, 10000-01-01) and has a Nanos field
|
||||
// in the range [0, 1e9).
|
||||
//
|
||||
// If the Timestamp is valid, validateTimestamp returns nil.
|
||||
// Otherwise, it returns an error that describes
|
||||
// the problem.
|
||||
//
|
||||
// Every valid Timestamp can be represented by a time.Time, but the converse is not true.
|
||||
func validateTimestamp(ts *tspb.Timestamp) error {
|
||||
if ts == nil {
|
||||
return errors.New("timestamp: nil Timestamp")
|
||||
}
|
||||
if ts.Seconds < minValidSeconds {
|
||||
return fmt.Errorf("timestamp: %v before 0001-01-01", ts)
|
||||
}
|
||||
if ts.Seconds >= maxValidSeconds {
|
||||
return fmt.Errorf("timestamp: %v after 10000-01-01", ts)
|
||||
}
|
||||
if ts.Nanos < 0 || ts.Nanos >= 1e9 {
|
||||
return fmt.Errorf("timestamp: %v: nanos not in range [0, 1e9)", ts)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Timestamp converts a google.protobuf.Timestamp proto to a time.Time.
|
||||
// It returns an error if the argument is invalid.
|
||||
//
|
||||
// Unlike most Go functions, if Timestamp returns an error, the first return value
|
||||
// is not the zero time.Time. Instead, it is the value obtained from the
|
||||
// time.Unix function when passed the contents of the Timestamp, in the UTC
|
||||
// locale. This may or may not be a meaningful time; many invalid Timestamps
|
||||
// do map to valid time.Times.
|
||||
//
|
||||
// A nil Timestamp returns an error. The first return value in that case is
|
||||
// undefined.
|
||||
func Timestamp(ts *tspb.Timestamp) (time.Time, error) {
|
||||
// Don't return the zero value on error, because corresponds to a valid
|
||||
// timestamp. Instead return whatever time.Unix gives us.
|
||||
var t time.Time
|
||||
if ts == nil {
|
||||
t = time.Unix(0, 0).UTC() // treat nil like the empty Timestamp
|
||||
} else {
|
||||
t = time.Unix(ts.Seconds, int64(ts.Nanos)).UTC()
|
||||
}
|
||||
return t, validateTimestamp(ts)
|
||||
}
|
||||
|
||||
// TimestampProto converts the time.Time to a google.protobuf.Timestamp proto.
|
||||
// It returns an error if the resulting Timestamp is invalid.
|
||||
func TimestampProto(t time.Time) (*tspb.Timestamp, error) {
|
||||
seconds := t.Unix()
|
||||
nanos := int32(t.Sub(time.Unix(seconds, 0)))
|
||||
ts := &tspb.Timestamp{
|
||||
Seconds: seconds,
|
||||
Nanos: nanos,
|
||||
}
|
||||
if err := validateTimestamp(ts); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return ts, nil
|
||||
}
|
||||
|
||||
// TimestampString returns the RFC 3339 string for valid Timestamps. For invalid
|
||||
// Timestamps, it returns an error message in parentheses.
|
||||
func TimestampString(ts *tspb.Timestamp) string {
|
||||
t, err := Timestamp(ts)
|
||||
if err != nil {
|
||||
return fmt.Sprintf("(%v)", err)
|
||||
}
|
||||
return t.Format(time.RFC3339Nano)
|
||||
}
|
127
vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.pb.go
generated
vendored
Normal file
127
vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.pb.go
generated
vendored
Normal file
|
@ -0,0 +1,127 @@
|
|||
// Code generated by protoc-gen-go.
|
||||
// source: github.com/golang/protobuf/ptypes/timestamp/timestamp.proto
|
||||
// DO NOT EDIT!
|
||||
|
||||
/*
|
||||
Package timestamp is a generated protocol buffer package.
|
||||
|
||||
It is generated from these files:
|
||||
github.com/golang/protobuf/ptypes/timestamp/timestamp.proto
|
||||
|
||||
It has these top-level messages:
|
||||
Timestamp
|
||||
*/
|
||||
package timestamp
|
||||
|
||||
import proto "github.com/golang/protobuf/proto"
|
||||
import fmt "fmt"
|
||||
import math "math"
|
||||
|
||||
// Reference imports to suppress errors if they are not otherwise used.
|
||||
var _ = proto.Marshal
|
||||
var _ = fmt.Errorf
|
||||
var _ = math.Inf
|
||||
|
||||
// This is a compile-time assertion to ensure that this generated file
|
||||
// is compatible with the proto package it is being compiled against.
|
||||
// A compilation error at this line likely means your copy of the
|
||||
// proto package needs to be updated.
|
||||
const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
|
||||
|
||||
// A Timestamp represents a point in time independent of any time zone
|
||||
// or calendar, represented as seconds and fractions of seconds at
|
||||
// nanosecond resolution in UTC Epoch time. It is encoded using the
|
||||
// Proleptic Gregorian Calendar which extends the Gregorian calendar
|
||||
// backwards to year one. It is encoded assuming all minutes are 60
|
||||
// seconds long, i.e. leap seconds are "smeared" so that no leap second
|
||||
// table is needed for interpretation. Range is from
|
||||
// 0001-01-01T00:00:00Z to 9999-12-31T23:59:59.999999999Z.
|
||||
// By restricting to that range, we ensure that we can convert to
|
||||
// and from RFC 3339 date strings.
|
||||
// See [https://www.ietf.org/rfc/rfc3339.txt](https://www.ietf.org/rfc/rfc3339.txt).
|
||||
//
|
||||
// Example 1: Compute Timestamp from POSIX `time()`.
|
||||
//
|
||||
// Timestamp timestamp;
|
||||
// timestamp.set_seconds(time(NULL));
|
||||
// timestamp.set_nanos(0);
|
||||
//
|
||||
// Example 2: Compute Timestamp from POSIX `gettimeofday()`.
|
||||
//
|
||||
// struct timeval tv;
|
||||
// gettimeofday(&tv, NULL);
|
||||
//
|
||||
// Timestamp timestamp;
|
||||
// timestamp.set_seconds(tv.tv_sec);
|
||||
// timestamp.set_nanos(tv.tv_usec * 1000);
|
||||
//
|
||||
// Example 3: Compute Timestamp from Win32 `GetSystemTimeAsFileTime()`.
|
||||
//
|
||||
// FILETIME ft;
|
||||
// GetSystemTimeAsFileTime(&ft);
|
||||
// UINT64 ticks = (((UINT64)ft.dwHighDateTime) << 32) | ft.dwLowDateTime;
|
||||
//
|
||||
// // A Windows tick is 100 nanoseconds. Windows epoch 1601-01-01T00:00:00Z
|
||||
// // is 11644473600 seconds before Unix epoch 1970-01-01T00:00:00Z.
|
||||
// Timestamp timestamp;
|
||||
// timestamp.set_seconds((INT64) ((ticks / 10000000) - 11644473600LL));
|
||||
// timestamp.set_nanos((INT32) ((ticks % 10000000) * 100));
|
||||
//
|
||||
// Example 4: Compute Timestamp from Java `System.currentTimeMillis()`.
|
||||
//
|
||||
// long millis = System.currentTimeMillis();
|
||||
//
|
||||
// Timestamp timestamp = Timestamp.newBuilder().setSeconds(millis / 1000)
|
||||
// .setNanos((int) ((millis % 1000) * 1000000)).build();
|
||||
//
|
||||
//
|
||||
// Example 5: Compute Timestamp from current time in Python.
|
||||
//
|
||||
// now = time.time()
|
||||
// seconds = int(now)
|
||||
// nanos = int((now - seconds) * 10**9)
|
||||
// timestamp = Timestamp(seconds=seconds, nanos=nanos)
|
||||
//
|
||||
//
|
||||
type Timestamp struct {
|
||||
// Represents seconds of UTC time since Unix epoch
|
||||
// 1970-01-01T00:00:00Z. Must be from from 0001-01-01T00:00:00Z to
|
||||
// 9999-12-31T23:59:59Z inclusive.
|
||||
Seconds int64 `protobuf:"varint,1,opt,name=seconds" json:"seconds,omitempty"`
|
||||
// Non-negative fractions of a second at nanosecond resolution. Negative
|
||||
// second values with fractions must still have non-negative nanos values
|
||||
// that count forward in time. Must be from 0 to 999,999,999
|
||||
// inclusive.
|
||||
Nanos int32 `protobuf:"varint,2,opt,name=nanos" json:"nanos,omitempty"`
|
||||
}
|
||||
|
||||
func (m *Timestamp) Reset() { *m = Timestamp{} }
|
||||
func (m *Timestamp) String() string { return proto.CompactTextString(m) }
|
||||
func (*Timestamp) ProtoMessage() {}
|
||||
func (*Timestamp) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} }
|
||||
func (*Timestamp) XXX_WellKnownType() string { return "Timestamp" }
|
||||
|
||||
func init() {
|
||||
proto.RegisterType((*Timestamp)(nil), "google.protobuf.Timestamp")
|
||||
}
|
||||
|
||||
func init() {
|
||||
proto.RegisterFile("github.com/golang/protobuf/ptypes/timestamp/timestamp.proto", fileDescriptor0)
|
||||
}
|
||||
|
||||
var fileDescriptor0 = []byte{
|
||||
// 194 bytes of a gzipped FileDescriptorProto
|
||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xe2, 0xb2, 0x4e, 0xcf, 0x2c, 0xc9,
|
||||
0x28, 0x4d, 0xd2, 0x4b, 0xce, 0xcf, 0xd5, 0x4f, 0xcf, 0xcf, 0x49, 0xcc, 0x4b, 0xd7, 0x2f, 0x28,
|
||||
0xca, 0x2f, 0xc9, 0x4f, 0x2a, 0x4d, 0xd3, 0x2f, 0x28, 0xa9, 0x2c, 0x48, 0x2d, 0xd6, 0x2f, 0xc9,
|
||||
0xcc, 0x4d, 0x2d, 0x2e, 0x49, 0xcc, 0x2d, 0x40, 0xb0, 0xf4, 0xc0, 0x6a, 0x84, 0xf8, 0xd3, 0xf3,
|
||||
0xf3, 0xd3, 0x73, 0x52, 0xf5, 0x60, 0x3a, 0x94, 0xac, 0xb9, 0x38, 0x43, 0x60, 0x6a, 0x84, 0x24,
|
||||
0xb8, 0xd8, 0x8b, 0x53, 0x93, 0xf3, 0xf3, 0x52, 0x8a, 0x25, 0x18, 0x15, 0x18, 0x35, 0x98, 0x83,
|
||||
0x60, 0x5c, 0x21, 0x11, 0x2e, 0xd6, 0xbc, 0xc4, 0xbc, 0xfc, 0x62, 0x09, 0x26, 0x05, 0x46, 0x0d,
|
||||
0xd6, 0x20, 0x08, 0xc7, 0xa9, 0x91, 0x91, 0x4b, 0x38, 0x39, 0x3f, 0x57, 0x0f, 0xcd, 0x50, 0x27,
|
||||
0x3e, 0xb8, 0x91, 0x01, 0x20, 0xa1, 0x00, 0xc6, 0x28, 0x6d, 0x12, 0x1c, 0xbd, 0x80, 0x91, 0xf1,
|
||||
0x07, 0x23, 0xe3, 0x22, 0x26, 0x66, 0xf7, 0x00, 0xa7, 0x55, 0x4c, 0x72, 0xee, 0x10, 0xc3, 0x03,
|
||||
0xa0, 0xca, 0xf5, 0xc2, 0x53, 0x73, 0x72, 0xbc, 0xf3, 0xf2, 0xcb, 0xf3, 0x42, 0x40, 0xda, 0x92,
|
||||
0xd8, 0xc0, 0xe6, 0x18, 0x03, 0x02, 0x00, 0x00, 0xff, 0xff, 0x17, 0x5f, 0xb7, 0xdc, 0x17, 0x01,
|
||||
0x00, 0x00,
|
||||
}
|
111
vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.proto
generated
vendored
Normal file
111
vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.proto
generated
vendored
Normal file
|
@ -0,0 +1,111 @@
|
|||
// Protocol Buffers - Google's data interchange format
|
||||
// Copyright 2008 Google Inc. All rights reserved.
|
||||
// https://developers.google.com/protocol-buffers/
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are
|
||||
// met:
|
||||
//
|
||||
// * Redistributions of source code must retain the above copyright
|
||||
// notice, this list of conditions and the following disclaimer.
|
||||
// * Redistributions in binary form must reproduce the above
|
||||
// copyright notice, this list of conditions and the following disclaimer
|
||||
// in the documentation and/or other materials provided with the
|
||||
// distribution.
|
||||
// * Neither the name of Google Inc. nor the names of its
|
||||
// contributors may be used to endorse or promote products derived from
|
||||
// this software without specific prior written permission.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
syntax = "proto3";
|
||||
|
||||
package google.protobuf;
|
||||
|
||||
option csharp_namespace = "Google.Protobuf.WellKnownTypes";
|
||||
option cc_enable_arenas = true;
|
||||
option go_package = "github.com/golang/protobuf/ptypes/timestamp";
|
||||
option java_package = "com.google.protobuf";
|
||||
option java_outer_classname = "TimestampProto";
|
||||
option java_multiple_files = true;
|
||||
option java_generate_equals_and_hash = true;
|
||||
option objc_class_prefix = "GPB";
|
||||
|
||||
// A Timestamp represents a point in time independent of any time zone
|
||||
// or calendar, represented as seconds and fractions of seconds at
|
||||
// nanosecond resolution in UTC Epoch time. It is encoded using the
|
||||
// Proleptic Gregorian Calendar which extends the Gregorian calendar
|
||||
// backwards to year one. It is encoded assuming all minutes are 60
|
||||
// seconds long, i.e. leap seconds are "smeared" so that no leap second
|
||||
// table is needed for interpretation. Range is from
|
||||
// 0001-01-01T00:00:00Z to 9999-12-31T23:59:59.999999999Z.
|
||||
// By restricting to that range, we ensure that we can convert to
|
||||
// and from RFC 3339 date strings.
|
||||
// See [https://www.ietf.org/rfc/rfc3339.txt](https://www.ietf.org/rfc/rfc3339.txt).
|
||||
//
|
||||
// Example 1: Compute Timestamp from POSIX `time()`.
|
||||
//
|
||||
// Timestamp timestamp;
|
||||
// timestamp.set_seconds(time(NULL));
|
||||
// timestamp.set_nanos(0);
|
||||
//
|
||||
// Example 2: Compute Timestamp from POSIX `gettimeofday()`.
|
||||
//
|
||||
// struct timeval tv;
|
||||
// gettimeofday(&tv, NULL);
|
||||
//
|
||||
// Timestamp timestamp;
|
||||
// timestamp.set_seconds(tv.tv_sec);
|
||||
// timestamp.set_nanos(tv.tv_usec * 1000);
|
||||
//
|
||||
// Example 3: Compute Timestamp from Win32 `GetSystemTimeAsFileTime()`.
|
||||
//
|
||||
// FILETIME ft;
|
||||
// GetSystemTimeAsFileTime(&ft);
|
||||
// UINT64 ticks = (((UINT64)ft.dwHighDateTime) << 32) | ft.dwLowDateTime;
|
||||
//
|
||||
// // A Windows tick is 100 nanoseconds. Windows epoch 1601-01-01T00:00:00Z
|
||||
// // is 11644473600 seconds before Unix epoch 1970-01-01T00:00:00Z.
|
||||
// Timestamp timestamp;
|
||||
// timestamp.set_seconds((INT64) ((ticks / 10000000) - 11644473600LL));
|
||||
// timestamp.set_nanos((INT32) ((ticks % 10000000) * 100));
|
||||
//
|
||||
// Example 4: Compute Timestamp from Java `System.currentTimeMillis()`.
|
||||
//
|
||||
// long millis = System.currentTimeMillis();
|
||||
//
|
||||
// Timestamp timestamp = Timestamp.newBuilder().setSeconds(millis / 1000)
|
||||
// .setNanos((int) ((millis % 1000) * 1000000)).build();
|
||||
//
|
||||
//
|
||||
// Example 5: Compute Timestamp from current time in Python.
|
||||
//
|
||||
// now = time.time()
|
||||
// seconds = int(now)
|
||||
// nanos = int((now - seconds) * 10**9)
|
||||
// timestamp = Timestamp(seconds=seconds, nanos=nanos)
|
||||
//
|
||||
//
|
||||
message Timestamp {
|
||||
|
||||
// Represents seconds of UTC time since Unix epoch
|
||||
// 1970-01-01T00:00:00Z. Must be from from 0001-01-01T00:00:00Z to
|
||||
// 9999-12-31T23:59:59Z inclusive.
|
||||
int64 seconds = 1;
|
||||
|
||||
// Non-negative fractions of a second at nanosecond resolution. Negative
|
||||
// second values with fractions must still have non-negative nanos values
|
||||
// that count forward in time. Must be from 0 to 999,999,999
|
||||
// inclusive.
|
||||
int32 nanos = 2;
|
||||
}
|
|
@ -200,3 +200,4 @@
|
|||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
|
6944
vendor/github.com/googleapis/gnostic/OpenAPIv2/OpenAPIv2.go
generated
vendored
Normal file
6944
vendor/github.com/googleapis/gnostic/OpenAPIv2/OpenAPIv2.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load diff
4456
vendor/github.com/googleapis/gnostic/OpenAPIv2/OpenAPIv2.pb.go
generated
vendored
Normal file
4456
vendor/github.com/googleapis/gnostic/OpenAPIv2/OpenAPIv2.pb.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load diff
663
vendor/github.com/googleapis/gnostic/OpenAPIv2/OpenAPIv2.proto
generated
vendored
Normal file
663
vendor/github.com/googleapis/gnostic/OpenAPIv2/OpenAPIv2.proto
generated
vendored
Normal file
|
@ -0,0 +1,663 @@
|
|||
// Copyright 2017 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// THIS FILE IS AUTOMATICALLY GENERATED.
|
||||
|
||||
syntax = "proto3";
|
||||
|
||||
package openapi.v2;
|
||||
|
||||
import "google/protobuf/any.proto";
|
||||
|
||||
// This option lets the proto compiler generate Java code inside the package
|
||||
// name (see below) instead of inside an outer class. It creates a simpler
|
||||
// developer experience by reducing one-level of name nesting and be
|
||||
// consistent with most programming languages that don't support outer classes.
|
||||
option java_multiple_files = true;
|
||||
|
||||
// The Java outer classname should be the filename in UpperCamelCase. This
|
||||
// class is only used to hold proto descriptor, so developers don't need to
|
||||
// work with it directly.
|
||||
option java_outer_classname = "OpenAPIProto";
|
||||
|
||||
// The Java package name must be proto package name with proper prefix.
|
||||
option java_package = "org.openapi_v2";
|
||||
|
||||
// A reasonable prefix for the Objective-C symbols generated from the package.
|
||||
// It should at a minimum be 3 characters long, all uppercase, and convention
|
||||
// is to use an abbreviation of the package name. Something short, but
|
||||
// hopefully unique enough to not conflict with things that may come along in
|
||||
// the future. 'GPB' is reserved for the protocol buffer implementation itself.
|
||||
option objc_class_prefix = "OAS";
|
||||
|
||||
message AdditionalPropertiesItem {
|
||||
oneof oneof {
|
||||
Schema schema = 1;
|
||||
bool boolean = 2;
|
||||
}
|
||||
}
|
||||
|
||||
message Any {
|
||||
google.protobuf.Any value = 1;
|
||||
string yaml = 2;
|
||||
}
|
||||
|
||||
message ApiKeySecurity {
|
||||
string type = 1;
|
||||
string name = 2;
|
||||
string in = 3;
|
||||
string description = 4;
|
||||
repeated NamedAny vendor_extension = 5;
|
||||
}
|
||||
|
||||
message BasicAuthenticationSecurity {
|
||||
string type = 1;
|
||||
string description = 2;
|
||||
repeated NamedAny vendor_extension = 3;
|
||||
}
|
||||
|
||||
message BodyParameter {
|
||||
// A brief description of the parameter. This could contain examples of use. GitHub Flavored Markdown is allowed.
|
||||
string description = 1;
|
||||
// The name of the parameter.
|
||||
string name = 2;
|
||||
// Determines the location of the parameter.
|
||||
string in = 3;
|
||||
// Determines whether or not this parameter is required or optional.
|
||||
bool required = 4;
|
||||
Schema schema = 5;
|
||||
repeated NamedAny vendor_extension = 6;
|
||||
}
|
||||
|
||||
// Contact information for the owners of the API.
|
||||
message Contact {
|
||||
// The identifying name of the contact person/organization.
|
||||
string name = 1;
|
||||
// The URL pointing to the contact information.
|
||||
string url = 2;
|
||||
// The email address of the contact person/organization.
|
||||
string email = 3;
|
||||
repeated NamedAny vendor_extension = 4;
|
||||
}
|
||||
|
||||
message Default {
|
||||
repeated NamedAny additional_properties = 1;
|
||||
}
|
||||
|
||||
// One or more JSON objects describing the schemas being consumed and produced by the API.
|
||||
message Definitions {
|
||||
repeated NamedSchema additional_properties = 1;
|
||||
}
|
||||
|
||||
message Document {
|
||||
// The Swagger version of this document.
|
||||
string swagger = 1;
|
||||
Info info = 2;
|
||||
// The host (name or ip) of the API. Example: 'swagger.io'
|
||||
string host = 3;
|
||||
// The base path to the API. Example: '/api'.
|
||||
string base_path = 4;
|
||||
// The transfer protocol of the API.
|
||||
repeated string schemes = 5;
|
||||
// A list of MIME types accepted by the API.
|
||||
repeated string consumes = 6;
|
||||
// A list of MIME types the API can produce.
|
||||
repeated string produces = 7;
|
||||
Paths paths = 8;
|
||||
Definitions definitions = 9;
|
||||
ParameterDefinitions parameters = 10;
|
||||
ResponseDefinitions responses = 11;
|
||||
repeated SecurityRequirement security = 12;
|
||||
SecurityDefinitions security_definitions = 13;
|
||||
repeated Tag tags = 14;
|
||||
ExternalDocs external_docs = 15;
|
||||
repeated NamedAny vendor_extension = 16;
|
||||
}
|
||||
|
||||
message Examples {
|
||||
repeated NamedAny additional_properties = 1;
|
||||
}
|
||||
|
||||
// information about external documentation
|
||||
message ExternalDocs {
|
||||
string description = 1;
|
||||
string url = 2;
|
||||
repeated NamedAny vendor_extension = 3;
|
||||
}
|
||||
|
||||
// A deterministic version of a JSON Schema object.
|
||||
message FileSchema {
|
||||
string format = 1;
|
||||
string title = 2;
|
||||
string description = 3;
|
||||
Any default = 4;
|
||||
repeated string required = 5;
|
||||
string type = 6;
|
||||
bool read_only = 7;
|
||||
ExternalDocs external_docs = 8;
|
||||
Any example = 9;
|
||||
repeated NamedAny vendor_extension = 10;
|
||||
}
|
||||
|
||||
message FormDataParameterSubSchema {
|
||||
// Determines whether or not this parameter is required or optional.
|
||||
bool required = 1;
|
||||
// Determines the location of the parameter.
|
||||
string in = 2;
|
||||
// A brief description of the parameter. This could contain examples of use. GitHub Flavored Markdown is allowed.
|
||||
string description = 3;
|
||||
// The name of the parameter.
|
||||
string name = 4;
|
||||
// allows sending a parameter by name only or with an empty value.
|
||||
bool allow_empty_value = 5;
|
||||
string type = 6;
|
||||
string format = 7;
|
||||
PrimitivesItems items = 8;
|
||||
string collection_format = 9;
|
||||
Any default = 10;
|
||||
double maximum = 11;
|
||||
bool exclusive_maximum = 12;
|
||||
double minimum = 13;
|
||||
bool exclusive_minimum = 14;
|
||||
int64 max_length = 15;
|
||||
int64 min_length = 16;
|
||||
string pattern = 17;
|
||||
int64 max_items = 18;
|
||||
int64 min_items = 19;
|
||||
bool unique_items = 20;
|
||||
repeated Any enum = 21;
|
||||
double multiple_of = 22;
|
||||
repeated NamedAny vendor_extension = 23;
|
||||
}
|
||||
|
||||
message Header {
|
||||
string type = 1;
|
||||
string format = 2;
|
||||
PrimitivesItems items = 3;
|
||||
string collection_format = 4;
|
||||
Any default = 5;
|
||||
double maximum = 6;
|
||||
bool exclusive_maximum = 7;
|
||||
double minimum = 8;
|
||||
bool exclusive_minimum = 9;
|
||||
int64 max_length = 10;
|
||||
int64 min_length = 11;
|
||||
string pattern = 12;
|
||||
int64 max_items = 13;
|
||||
int64 min_items = 14;
|
||||
bool unique_items = 15;
|
||||
repeated Any enum = 16;
|
||||
double multiple_of = 17;
|
||||
string description = 18;
|
||||
repeated NamedAny vendor_extension = 19;
|
||||
}
|
||||
|
||||
message HeaderParameterSubSchema {
|
||||
// Determines whether or not this parameter is required or optional.
|
||||
bool required = 1;
|
||||
// Determines the location of the parameter.
|
||||
string in = 2;
|
||||
// A brief description of the parameter. This could contain examples of use. GitHub Flavored Markdown is allowed.
|
||||
string description = 3;
|
||||
// The name of the parameter.
|
||||
string name = 4;
|
||||
string type = 5;
|
||||
string format = 6;
|
||||
PrimitivesItems items = 7;
|
||||
string collection_format = 8;
|
||||
Any default = 9;
|
||||
double maximum = 10;
|
||||
bool exclusive_maximum = 11;
|
||||
double minimum = 12;
|
||||
bool exclusive_minimum = 13;
|
||||
int64 max_length = 14;
|
||||
int64 min_length = 15;
|
||||
string pattern = 16;
|
||||
int64 max_items = 17;
|
||||
int64 min_items = 18;
|
||||
bool unique_items = 19;
|
||||
repeated Any enum = 20;
|
||||
double multiple_of = 21;
|
||||
repeated NamedAny vendor_extension = 22;
|
||||
}
|
||||
|
||||
message Headers {
|
||||
repeated NamedHeader additional_properties = 1;
|
||||
}
|
||||
|
||||
// General information about the API.
|
||||
message Info {
|
||||
// A unique and precise title of the API.
|
||||
string title = 1;
|
||||
// A semantic version number of the API.
|
||||
string version = 2;
|
||||
// A longer description of the API. Should be different from the title. GitHub Flavored Markdown is allowed.
|
||||
string description = 3;
|
||||
// The terms of service for the API.
|
||||
string terms_of_service = 4;
|
||||
Contact contact = 5;
|
||||
License license = 6;
|
||||
repeated NamedAny vendor_extension = 7;
|
||||
}
|
||||
|
||||
message ItemsItem {
|
||||
repeated Schema schema = 1;
|
||||
}
|
||||
|
||||
message JsonReference {
|
||||
string _ref = 1;
|
||||
string description = 2;
|
||||
}
|
||||
|
||||
message License {
|
||||
// The name of the license type. It's encouraged to use an OSI compatible license.
|
||||
string name = 1;
|
||||
// The URL pointing to the license.
|
||||
string url = 2;
|
||||
repeated NamedAny vendor_extension = 3;
|
||||
}
|
||||
|
||||
// Automatically-generated message used to represent maps of Any as ordered (name,value) pairs.
|
||||
message NamedAny {
|
||||
// Map key
|
||||
string name = 1;
|
||||
// Mapped value
|
||||
Any value = 2;
|
||||
}
|
||||
|
||||
// Automatically-generated message used to represent maps of Header as ordered (name,value) pairs.
|
||||
message NamedHeader {
|
||||
// Map key
|
||||
string name = 1;
|
||||
// Mapped value
|
||||
Header value = 2;
|
||||
}
|
||||
|
||||
// Automatically-generated message used to represent maps of Parameter as ordered (name,value) pairs.
|
||||
message NamedParameter {
|
||||
// Map key
|
||||
string name = 1;
|
||||
// Mapped value
|
||||
Parameter value = 2;
|
||||
}
|
||||
|
||||
// Automatically-generated message used to represent maps of PathItem as ordered (name,value) pairs.
|
||||
message NamedPathItem {
|
||||
// Map key
|
||||
string name = 1;
|
||||
// Mapped value
|
||||
PathItem value = 2;
|
||||
}
|
||||
|
||||
// Automatically-generated message used to represent maps of Response as ordered (name,value) pairs.
|
||||
message NamedResponse {
|
||||
// Map key
|
||||
string name = 1;
|
||||
// Mapped value
|
||||
Response value = 2;
|
||||
}
|
||||
|
||||
// Automatically-generated message used to represent maps of ResponseValue as ordered (name,value) pairs.
|
||||
message NamedResponseValue {
|
||||
// Map key
|
||||
string name = 1;
|
||||
// Mapped value
|
||||
ResponseValue value = 2;
|
||||
}
|
||||
|
||||
// Automatically-generated message used to represent maps of Schema as ordered (name,value) pairs.
|
||||
message NamedSchema {
|
||||
// Map key
|
||||
string name = 1;
|
||||
// Mapped value
|
||||
Schema value = 2;
|
||||
}
|
||||
|
||||
// Automatically-generated message used to represent maps of SecurityDefinitionsItem as ordered (name,value) pairs.
|
||||
message NamedSecurityDefinitionsItem {
|
||||
// Map key
|
||||
string name = 1;
|
||||
// Mapped value
|
||||
SecurityDefinitionsItem value = 2;
|
||||
}
|
||||
|
||||
// Automatically-generated message used to represent maps of string as ordered (name,value) pairs.
|
||||
message NamedString {
|
||||
// Map key
|
||||
string name = 1;
|
||||
// Mapped value
|
||||
string value = 2;
|
||||
}
|
||||
|
||||
// Automatically-generated message used to represent maps of StringArray as ordered (name,value) pairs.
|
||||
message NamedStringArray {
|
||||
// Map key
|
||||
string name = 1;
|
||||
// Mapped value
|
||||
StringArray value = 2;
|
||||
}
|
||||
|
||||
message NonBodyParameter {
|
||||
oneof oneof {
|
||||
HeaderParameterSubSchema header_parameter_sub_schema = 1;
|
||||
FormDataParameterSubSchema form_data_parameter_sub_schema = 2;
|
||||
QueryParameterSubSchema query_parameter_sub_schema = 3;
|
||||
PathParameterSubSchema path_parameter_sub_schema = 4;
|
||||
}
|
||||
}
|
||||
|
||||
message Oauth2AccessCodeSecurity {
|
||||
string type = 1;
|
||||
string flow = 2;
|
||||
Oauth2Scopes scopes = 3;
|
||||
string authorization_url = 4;
|
||||
string token_url = 5;
|
||||
string description = 6;
|
||||
repeated NamedAny vendor_extension = 7;
|
||||
}
|
||||
|
||||
message Oauth2ApplicationSecurity {
|
||||
string type = 1;
|
||||
string flow = 2;
|
||||
Oauth2Scopes scopes = 3;
|
||||
string token_url = 4;
|
||||
string description = 5;
|
||||
repeated NamedAny vendor_extension = 6;
|
||||
}
|
||||
|
||||
message Oauth2ImplicitSecurity {
|
||||
string type = 1;
|
||||
string flow = 2;
|
||||
Oauth2Scopes scopes = 3;
|
||||
string authorization_url = 4;
|
||||
string description = 5;
|
||||
repeated NamedAny vendor_extension = 6;
|
||||
}
|
||||
|
||||
message Oauth2PasswordSecurity {
|
||||
string type = 1;
|
||||
string flow = 2;
|
||||
Oauth2Scopes scopes = 3;
|
||||
string token_url = 4;
|
||||
string description = 5;
|
||||
repeated NamedAny vendor_extension = 6;
|
||||
}
|
||||
|
||||
message Oauth2Scopes {
|
||||
repeated NamedString additional_properties = 1;
|
||||
}
|
||||
|
||||
message Operation {
|
||||
repeated string tags = 1;
|
||||
// A brief summary of the operation.
|
||||
string summary = 2;
|
||||
// A longer description of the operation, GitHub Flavored Markdown is allowed.
|
||||
string description = 3;
|
||||
ExternalDocs external_docs = 4;
|
||||
// A unique identifier of the operation.
|
||||
string operation_id = 5;
|
||||
// A list of MIME types the API can produce.
|
||||
repeated string produces = 6;
|
||||
// A list of MIME types the API can consume.
|
||||
repeated string consumes = 7;
|
||||
// The parameters needed to send a valid API call.
|
||||
repeated ParametersItem parameters = 8;
|
||||
Responses responses = 9;
|
||||
// The transfer protocol of the API.
|
||||
repeated string schemes = 10;
|
||||
bool deprecated = 11;
|
||||
repeated SecurityRequirement security = 12;
|
||||
repeated NamedAny vendor_extension = 13;
|
||||
}
|
||||
|
||||
message Parameter {
|
||||
oneof oneof {
|
||||
BodyParameter body_parameter = 1;
|
||||
NonBodyParameter non_body_parameter = 2;
|
||||
}
|
||||
}
|
||||
|
||||
// One or more JSON representations for parameters
|
||||
message ParameterDefinitions {
|
||||
repeated NamedParameter additional_properties = 1;
|
||||
}
|
||||
|
||||
message ParametersItem {
|
||||
oneof oneof {
|
||||
Parameter parameter = 1;
|
||||
JsonReference json_reference = 2;
|
||||
}
|
||||
}
|
||||
|
||||
message PathItem {
|
||||
string _ref = 1;
|
||||
Operation get = 2;
|
||||
Operation put = 3;
|
||||
Operation post = 4;
|
||||
Operation delete = 5;
|
||||
Operation options = 6;
|
||||
Operation head = 7;
|
||||
Operation patch = 8;
|
||||
// The parameters needed to send a valid API call.
|
||||
repeated ParametersItem parameters = 9;
|
||||
repeated NamedAny vendor_extension = 10;
|
||||
}
|
||||
|
||||
message PathParameterSubSchema {
|
||||
// Determines whether or not this parameter is required or optional.
|
||||
bool required = 1;
|
||||
// Determines the location of the parameter.
|
||||
string in = 2;
|
||||
// A brief description of the parameter. This could contain examples of use. GitHub Flavored Markdown is allowed.
|
||||
string description = 3;
|
||||
// The name of the parameter.
|
||||
string name = 4;
|
||||
string type = 5;
|
||||
string format = 6;
|
||||
PrimitivesItems items = 7;
|
||||
string collection_format = 8;
|
||||
Any default = 9;
|
||||
double maximum = 10;
|
||||
bool exclusive_maximum = 11;
|
||||
double minimum = 12;
|
||||
bool exclusive_minimum = 13;
|
||||
int64 max_length = 14;
|
||||
int64 min_length = 15;
|
||||
string pattern = 16;
|
||||
int64 max_items = 17;
|
||||
int64 min_items = 18;
|
||||
bool unique_items = 19;
|
||||
repeated Any enum = 20;
|
||||
double multiple_of = 21;
|
||||
repeated NamedAny vendor_extension = 22;
|
||||
}
|
||||
|
||||
// Relative paths to the individual endpoints. They must be relative to the 'basePath'.
|
||||
message Paths {
|
||||
repeated NamedAny vendor_extension = 1;
|
||||
repeated NamedPathItem path = 2;
|
||||
}
|
||||
|
||||
message PrimitivesItems {
|
||||
string type = 1;
|
||||
string format = 2;
|
||||
PrimitivesItems items = 3;
|
||||
string collection_format = 4;
|
||||
Any default = 5;
|
||||
double maximum = 6;
|
||||
bool exclusive_maximum = 7;
|
||||
double minimum = 8;
|
||||
bool exclusive_minimum = 9;
|
||||
int64 max_length = 10;
|
||||
int64 min_length = 11;
|
||||
string pattern = 12;
|
||||
int64 max_items = 13;
|
||||
int64 min_items = 14;
|
||||
bool unique_items = 15;
|
||||
repeated Any enum = 16;
|
||||
double multiple_of = 17;
|
||||
repeated NamedAny vendor_extension = 18;
|
||||
}
|
||||
|
||||
message Properties {
|
||||
repeated NamedSchema additional_properties = 1;
|
||||
}
|
||||
|
||||
message QueryParameterSubSchema {
|
||||
// Determines whether or not this parameter is required or optional.
|
||||
bool required = 1;
|
||||
// Determines the location of the parameter.
|
||||
string in = 2;
|
||||
// A brief description of the parameter. This could contain examples of use. GitHub Flavored Markdown is allowed.
|
||||
string description = 3;
|
||||
// The name of the parameter.
|
||||
string name = 4;
|
||||
// allows sending a parameter by name only or with an empty value.
|
||||
bool allow_empty_value = 5;
|
||||
string type = 6;
|
||||
string format = 7;
|
||||
PrimitivesItems items = 8;
|
||||
string collection_format = 9;
|
||||
Any default = 10;
|
||||
double maximum = 11;
|
||||
bool exclusive_maximum = 12;
|
||||
double minimum = 13;
|
||||
bool exclusive_minimum = 14;
|
||||
int64 max_length = 15;
|
||||
int64 min_length = 16;
|
||||
string pattern = 17;
|
||||
int64 max_items = 18;
|
||||
int64 min_items = 19;
|
||||
bool unique_items = 20;
|
||||
repeated Any enum = 21;
|
||||
double multiple_of = 22;
|
||||
repeated NamedAny vendor_extension = 23;
|
||||
}
|
||||
|
||||
message Response {
|
||||
string description = 1;
|
||||
SchemaItem schema = 2;
|
||||
Headers headers = 3;
|
||||
Examples examples = 4;
|
||||
repeated NamedAny vendor_extension = 5;
|
||||
}
|
||||
|
||||
// One or more JSON representations for parameters
|
||||
message ResponseDefinitions {
|
||||
repeated NamedResponse additional_properties = 1;
|
||||
}
|
||||
|
||||
message ResponseValue {
|
||||
oneof oneof {
|
||||
Response response = 1;
|
||||
JsonReference json_reference = 2;
|
||||
}
|
||||
}
|
||||
|
||||
// Response objects names can either be any valid HTTP status code or 'default'.
|
||||
message Responses {
|
||||
repeated NamedResponseValue response_code = 1;
|
||||
repeated NamedAny vendor_extension = 2;
|
||||
}
|
||||
|
||||
// A deterministic version of a JSON Schema object.
|
||||
message Schema {
|
||||
string _ref = 1;
|
||||
string format = 2;
|
||||
string title = 3;
|
||||
string description = 4;
|
||||
Any default = 5;
|
||||
double multiple_of = 6;
|
||||
double maximum = 7;
|
||||
bool exclusive_maximum = 8;
|
||||
double minimum = 9;
|
||||
bool exclusive_minimum = 10;
|
||||
int64 max_length = 11;
|
||||
int64 min_length = 12;
|
||||
string pattern = 13;
|
||||
int64 max_items = 14;
|
||||
int64 min_items = 15;
|
||||
bool unique_items = 16;
|
||||
int64 max_properties = 17;
|
||||
int64 min_properties = 18;
|
||||
repeated string required = 19;
|
||||
repeated Any enum = 20;
|
||||
AdditionalPropertiesItem additional_properties = 21;
|
||||
TypeItem type = 22;
|
||||
ItemsItem items = 23;
|
||||
repeated Schema all_of = 24;
|
||||
Properties properties = 25;
|
||||
string discriminator = 26;
|
||||
bool read_only = 27;
|
||||
Xml xml = 28;
|
||||
ExternalDocs external_docs = 29;
|
||||
Any example = 30;
|
||||
repeated NamedAny vendor_extension = 31;
|
||||
}
|
||||
|
||||
message SchemaItem {
|
||||
oneof oneof {
|
||||
Schema schema = 1;
|
||||
FileSchema file_schema = 2;
|
||||
}
|
||||
}
|
||||
|
||||
message SecurityDefinitions {
|
||||
repeated NamedSecurityDefinitionsItem additional_properties = 1;
|
||||
}
|
||||
|
||||
message SecurityDefinitionsItem {
|
||||
oneof oneof {
|
||||
BasicAuthenticationSecurity basic_authentication_security = 1;
|
||||
ApiKeySecurity api_key_security = 2;
|
||||
Oauth2ImplicitSecurity oauth2_implicit_security = 3;
|
||||
Oauth2PasswordSecurity oauth2_password_security = 4;
|
||||
Oauth2ApplicationSecurity oauth2_application_security = 5;
|
||||
Oauth2AccessCodeSecurity oauth2_access_code_security = 6;
|
||||
}
|
||||
}
|
||||
|
||||
message SecurityRequirement {
|
||||
repeated NamedStringArray additional_properties = 1;
|
||||
}
|
||||
|
||||
message StringArray {
|
||||
repeated string value = 1;
|
||||
}
|
||||
|
||||
message Tag {
|
||||
string name = 1;
|
||||
string description = 2;
|
||||
ExternalDocs external_docs = 3;
|
||||
repeated NamedAny vendor_extension = 4;
|
||||
}
|
||||
|
||||
message TypeItem {
|
||||
repeated string value = 1;
|
||||
}
|
||||
|
||||
// Any property starting with x- is valid.
|
||||
message VendorExtension {
|
||||
repeated NamedAny additional_properties = 1;
|
||||
}
|
||||
|
||||
message Xml {
|
||||
string name = 1;
|
||||
string namespace = 2;
|
||||
string prefix = 3;
|
||||
bool attribute = 4;
|
||||
bool wrapped = 5;
|
||||
repeated NamedAny vendor_extension = 6;
|
||||
}
|
||||
|
16
vendor/github.com/googleapis/gnostic/OpenAPIv2/README.md
generated
vendored
Normal file
16
vendor/github.com/googleapis/gnostic/OpenAPIv2/README.md
generated
vendored
Normal file
|
@ -0,0 +1,16 @@
|
|||
# OpenAPI v2 Protocol Buffer Models
|
||||
|
||||
This directory contains a Protocol Buffer-language model
|
||||
and related code for supporting OpenAPI v2.
|
||||
|
||||
Gnostic applications and plugins can use OpenAPIv2.proto
|
||||
to generate Protocol Buffer support code for their preferred languages.
|
||||
|
||||
OpenAPIv2.go is used by Gnostic to read JSON and YAML OpenAPI
|
||||
descriptions into the Protocol Buffer-based datastructures
|
||||
generated from OpenAPIv2.proto.
|
||||
|
||||
OpenAPIv2.proto and OpenAPIv2.go are generated by the Gnostic
|
||||
compiler generator, and OpenAPIv2.pb.go is generated by
|
||||
protoc, the Protocol Buffer compiler, and protoc-gen-go, the
|
||||
Protocol Buffer Go code generation plugin.
|
1610
vendor/github.com/googleapis/gnostic/OpenAPIv2/openapi-2.0.json
generated
vendored
Normal file
1610
vendor/github.com/googleapis/gnostic/OpenAPIv2/openapi-2.0.json
generated
vendored
Normal file
File diff suppressed because it is too large
Load diff
3
vendor/github.com/googleapis/gnostic/compiler/README.md
generated
vendored
Normal file
3
vendor/github.com/googleapis/gnostic/compiler/README.md
generated
vendored
Normal file
|
@ -0,0 +1,3 @@
|
|||
# Compiler support code
|
||||
|
||||
This directory contains compiler support code used by Gnostic and Gnostic extensions.
|
41
vendor/github.com/googleapis/gnostic/compiler/context.go
generated
vendored
Normal file
41
vendor/github.com/googleapis/gnostic/compiler/context.go
generated
vendored
Normal file
|
@ -0,0 +1,41 @@
|
|||
// Copyright 2017 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package compiler
|
||||
|
||||
type Context struct {
|
||||
Parent *Context
|
||||
Name string
|
||||
ExtensionHandlers *[]ExtensionHandler
|
||||
}
|
||||
|
||||
func NewContextWithExtensions(name string, parent *Context, extensionHandlers *[]ExtensionHandler) *Context {
|
||||
return &Context{Name: name, Parent: parent, ExtensionHandlers: extensionHandlers}
|
||||
}
|
||||
|
||||
func NewContext(name string, parent *Context) *Context {
|
||||
if parent != nil {
|
||||
return &Context{Name: name, Parent: parent, ExtensionHandlers: parent.ExtensionHandlers}
|
||||
} else {
|
||||
return &Context{Name: name, Parent: parent, ExtensionHandlers: nil}
|
||||
}
|
||||
}
|
||||
|
||||
func (context *Context) Description() string {
|
||||
if context.Parent != nil {
|
||||
return context.Parent.Description() + "." + context.Name
|
||||
} else {
|
||||
return context.Name
|
||||
}
|
||||
}
|
59
vendor/github.com/googleapis/gnostic/compiler/error.go
generated
vendored
Normal file
59
vendor/github.com/googleapis/gnostic/compiler/error.go
generated
vendored
Normal file
|
@ -0,0 +1,59 @@
|
|||
// Copyright 2017 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package compiler
|
||||
|
||||
// basic error type
|
||||
type Error struct {
|
||||
Context *Context
|
||||
Message string
|
||||
}
|
||||
|
||||
func NewError(context *Context, message string) *Error {
|
||||
return &Error{Context: context, Message: message}
|
||||
}
|
||||
|
||||
func (err *Error) Error() string {
|
||||
if err.Context != nil {
|
||||
return "ERROR " + err.Context.Description() + " " + err.Message
|
||||
} else {
|
||||
return "ERROR " + err.Message
|
||||
}
|
||||
}
|
||||
|
||||
// container for groups of errors
|
||||
type ErrorGroup struct {
|
||||
Errors []error
|
||||
}
|
||||
|
||||
func NewErrorGroupOrNil(errors []error) error {
|
||||
if len(errors) == 0 {
|
||||
return nil
|
||||
} else if len(errors) == 1 {
|
||||
return errors[0]
|
||||
} else {
|
||||
return &ErrorGroup{Errors: errors}
|
||||
}
|
||||
}
|
||||
|
||||
func (group *ErrorGroup) Error() string {
|
||||
result := ""
|
||||
for i, err := range group.Errors {
|
||||
if i > 0 {
|
||||
result += "\n"
|
||||
}
|
||||
result += err.Error()
|
||||
}
|
||||
return result
|
||||
}
|
99
vendor/github.com/googleapis/gnostic/compiler/extension-handler.go
generated
vendored
Normal file
99
vendor/github.com/googleapis/gnostic/compiler/extension-handler.go
generated
vendored
Normal file
|
@ -0,0 +1,99 @@
|
|||
// Copyright 2017 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package compiler
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"os/exec"
|
||||
|
||||
"strings"
|
||||
|
||||
"errors"
|
||||
|
||||
"github.com/golang/protobuf/proto"
|
||||
"github.com/golang/protobuf/ptypes/any"
|
||||
ext_plugin "github.com/googleapis/gnostic/extensions"
|
||||
yaml "gopkg.in/yaml.v2"
|
||||
)
|
||||
|
||||
type ExtensionHandler struct {
|
||||
Name string
|
||||
}
|
||||
|
||||
func HandleExtension(context *Context, in interface{}, extensionName string) (bool, *any.Any, error) {
|
||||
handled := false
|
||||
var errFromPlugin error
|
||||
var outFromPlugin *any.Any
|
||||
|
||||
if context.ExtensionHandlers != nil && len(*(context.ExtensionHandlers)) != 0 {
|
||||
for _, customAnyProtoGenerator := range *(context.ExtensionHandlers) {
|
||||
outFromPlugin, errFromPlugin = customAnyProtoGenerator.handle(in, extensionName)
|
||||
if outFromPlugin == nil {
|
||||
continue
|
||||
} else {
|
||||
handled = true
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
return handled, outFromPlugin, errFromPlugin
|
||||
}
|
||||
|
||||
func (extensionHandlers *ExtensionHandler) handle(in interface{}, extensionName string) (*any.Any, error) {
|
||||
if extensionHandlers.Name != "" {
|
||||
binary, _ := yaml.Marshal(in)
|
||||
|
||||
request := &ext_plugin.ExtensionHandlerRequest{}
|
||||
|
||||
version := &ext_plugin.Version{}
|
||||
version.Major = 0
|
||||
version.Minor = 1
|
||||
version.Patch = 0
|
||||
request.CompilerVersion = version
|
||||
|
||||
request.Wrapper = &ext_plugin.Wrapper{}
|
||||
|
||||
request.Wrapper.Version = "v2"
|
||||
request.Wrapper.Yaml = string(binary)
|
||||
request.Wrapper.ExtensionName = extensionName
|
||||
|
||||
requestBytes, _ := proto.Marshal(request)
|
||||
cmd := exec.Command(extensionHandlers.Name)
|
||||
cmd.Stdin = bytes.NewReader(requestBytes)
|
||||
output, err := cmd.Output()
|
||||
|
||||
if err != nil {
|
||||
fmt.Printf("Error: %+v\n", err)
|
||||
return nil, err
|
||||
}
|
||||
response := &ext_plugin.ExtensionHandlerResponse{}
|
||||
err = proto.Unmarshal(output, response)
|
||||
if err != nil {
|
||||
fmt.Printf("Error: %+v\n", err)
|
||||
fmt.Printf("%s\n", string(output))
|
||||
return nil, err
|
||||
}
|
||||
if !response.Handled {
|
||||
return nil, nil
|
||||
}
|
||||
if len(response.Error) != 0 {
|
||||
message := fmt.Sprintf("Errors when parsing: %+v for field %s by vendor extension handler %s. Details %+v", in, extensionName, extensionHandlers.Name, strings.Join(response.Error, ","))
|
||||
return nil, errors.New(message)
|
||||
}
|
||||
return response.Value, nil
|
||||
}
|
||||
return nil, nil
|
||||
}
|
193
vendor/github.com/googleapis/gnostic/compiler/helpers.go
generated
vendored
Normal file
193
vendor/github.com/googleapis/gnostic/compiler/helpers.go
generated
vendored
Normal file
|
@ -0,0 +1,193 @@
|
|||
// Copyright 2017 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package compiler
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"gopkg.in/yaml.v2"
|
||||
"regexp"
|
||||
"sort"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// compiler helper functions, usually called from generated code
|
||||
|
||||
func UnpackMap(in interface{}) (yaml.MapSlice, bool) {
|
||||
m, ok := in.(yaml.MapSlice)
|
||||
if ok {
|
||||
return m, ok
|
||||
} else {
|
||||
// do we have an empty array?
|
||||
a, ok := in.([]interface{})
|
||||
if ok && len(a) == 0 {
|
||||
// if so, return an empty map
|
||||
return yaml.MapSlice{}, ok
|
||||
} else {
|
||||
return nil, ok
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func SortedKeysForMap(m yaml.MapSlice) []string {
|
||||
keys := make([]string, 0)
|
||||
for _, item := range m {
|
||||
keys = append(keys, item.Key.(string))
|
||||
}
|
||||
sort.Strings(keys)
|
||||
return keys
|
||||
}
|
||||
|
||||
func MapHasKey(m yaml.MapSlice, key string) bool {
|
||||
for _, item := range m {
|
||||
itemKey, ok := item.Key.(string)
|
||||
if ok && key == itemKey {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func MapValueForKey(m yaml.MapSlice, key string) interface{} {
|
||||
for _, item := range m {
|
||||
itemKey, ok := item.Key.(string)
|
||||
if ok && key == itemKey {
|
||||
return item.Value
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func ConvertInterfaceArrayToStringArray(interfaceArray []interface{}) []string {
|
||||
stringArray := make([]string, 0)
|
||||
for _, item := range interfaceArray {
|
||||
v, ok := item.(string)
|
||||
if ok {
|
||||
stringArray = append(stringArray, v)
|
||||
}
|
||||
}
|
||||
return stringArray
|
||||
}
|
||||
|
||||
func PatternMatches(pattern string, value string) bool {
|
||||
// if pattern contains a subpattern like "{path}", replace it with ".*"
|
||||
if pattern[0] != '^' {
|
||||
subpatternPattern := regexp.MustCompile("^.*(\\{.*\\}).*$")
|
||||
if matches := subpatternPattern.FindSubmatch([]byte(pattern)); matches != nil {
|
||||
match := string(matches[1])
|
||||
pattern = strings.Replace(pattern, match, ".*", -1)
|
||||
}
|
||||
}
|
||||
matched, err := regexp.Match(pattern, []byte(value))
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return matched
|
||||
}
|
||||
|
||||
func MissingKeysInMap(m yaml.MapSlice, requiredKeys []string) []string {
|
||||
missingKeys := make([]string, 0)
|
||||
for _, k := range requiredKeys {
|
||||
if !MapHasKey(m, k) {
|
||||
missingKeys = append(missingKeys, k)
|
||||
}
|
||||
}
|
||||
return missingKeys
|
||||
}
|
||||
|
||||
func InvalidKeysInMap(m yaml.MapSlice, allowedKeys []string, allowedPatterns []string) []string {
|
||||
invalidKeys := make([]string, 0)
|
||||
for _, item := range m {
|
||||
itemKey, ok := item.Key.(string)
|
||||
if ok {
|
||||
key := itemKey
|
||||
found := false
|
||||
// does the key match an allowed key?
|
||||
for _, allowedKey := range allowedKeys {
|
||||
if key == allowedKey {
|
||||
found = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if !found {
|
||||
// does the key match an allowed pattern?
|
||||
for _, allowedPattern := range allowedPatterns {
|
||||
if PatternMatches(allowedPattern, key) {
|
||||
found = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if !found {
|
||||
invalidKeys = append(invalidKeys, key)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return invalidKeys
|
||||
}
|
||||
|
||||
// describe a map (for debugging purposes)
|
||||
func DescribeMap(in interface{}, indent string) string {
|
||||
description := ""
|
||||
m, ok := in.(map[string]interface{})
|
||||
if ok {
|
||||
keys := make([]string, 0)
|
||||
for k, _ := range m {
|
||||
keys = append(keys, k)
|
||||
}
|
||||
sort.Strings(keys)
|
||||
for _, k := range keys {
|
||||
v := m[k]
|
||||
description += fmt.Sprintf("%s%s:\n", indent, k)
|
||||
description += DescribeMap(v, indent+" ")
|
||||
}
|
||||
return description
|
||||
}
|
||||
a, ok := in.([]interface{})
|
||||
if ok {
|
||||
for i, v := range a {
|
||||
description += fmt.Sprintf("%s%d:\n", indent, i)
|
||||
description += DescribeMap(v, indent+" ")
|
||||
}
|
||||
return description
|
||||
}
|
||||
description += fmt.Sprintf("%s%+v\n", indent, in)
|
||||
return description
|
||||
}
|
||||
|
||||
func PluralProperties(count int) string {
|
||||
if count == 1 {
|
||||
return "property"
|
||||
} else {
|
||||
return "properties"
|
||||
}
|
||||
}
|
||||
|
||||
func StringArrayContainsValue(array []string, value string) bool {
|
||||
for _, item := range array {
|
||||
if item == value {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func StringArrayContainsValues(array []string, values []string) bool {
|
||||
for _, value := range values {
|
||||
if !StringArrayContainsValue(array, value) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
16
vendor/github.com/googleapis/gnostic/compiler/main.go
generated
vendored
Normal file
16
vendor/github.com/googleapis/gnostic/compiler/main.go
generated
vendored
Normal file
|
@ -0,0 +1,16 @@
|
|||
// Copyright 2017 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// Package compiler provides support functions to generated compiler code.
|
||||
package compiler
|
167
vendor/github.com/googleapis/gnostic/compiler/reader.go
generated
vendored
Normal file
167
vendor/github.com/googleapis/gnostic/compiler/reader.go
generated
vendored
Normal file
|
@ -0,0 +1,167 @@
|
|||
// Copyright 2017 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package compiler
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"gopkg.in/yaml.v2"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
)
|
||||
|
||||
var file_cache map[string][]byte
|
||||
var info_cache map[string]interface{}
|
||||
var count int64
|
||||
|
||||
var VERBOSE_READER = false
|
||||
|
||||
func initializeFileCache() {
|
||||
if file_cache == nil {
|
||||
file_cache = make(map[string][]byte, 0)
|
||||
}
|
||||
}
|
||||
|
||||
func initializeInfoCache() {
|
||||
if info_cache == nil {
|
||||
info_cache = make(map[string]interface{}, 0)
|
||||
}
|
||||
}
|
||||
|
||||
func FetchFile(fileurl string) ([]byte, error) {
|
||||
initializeFileCache()
|
||||
bytes, ok := file_cache[fileurl]
|
||||
if ok {
|
||||
if VERBOSE_READER {
|
||||
log.Printf("Cache hit %s", fileurl)
|
||||
}
|
||||
return bytes, nil
|
||||
}
|
||||
log.Printf("Fetching %s", fileurl)
|
||||
response, err := http.Get(fileurl)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
} else {
|
||||
defer response.Body.Close()
|
||||
bytes, err := ioutil.ReadAll(response.Body)
|
||||
if err == nil {
|
||||
file_cache[fileurl] = bytes
|
||||
}
|
||||
return bytes, err
|
||||
}
|
||||
}
|
||||
|
||||
// read a file and unmarshal it as a yaml.MapSlice
|
||||
func ReadInfoForFile(filename string) (interface{}, error) {
|
||||
initializeInfoCache()
|
||||
info, ok := info_cache[filename]
|
||||
if ok {
|
||||
if VERBOSE_READER {
|
||||
log.Printf("Cache hit info for file %s", filename)
|
||||
}
|
||||
return info, nil
|
||||
}
|
||||
if VERBOSE_READER {
|
||||
log.Printf("Reading info for file %s", filename)
|
||||
}
|
||||
|
||||
// is the filename a url?
|
||||
fileurl, _ := url.Parse(filename)
|
||||
if fileurl.Scheme != "" {
|
||||
// yes, fetch it
|
||||
bytes, err := FetchFile(filename)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var info yaml.MapSlice
|
||||
err = yaml.Unmarshal(bytes, &info)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
info_cache[filename] = info
|
||||
return info, nil
|
||||
} else {
|
||||
// no, it's a local filename
|
||||
bytes, err := ioutil.ReadFile(filename)
|
||||
if err != nil {
|
||||
log.Printf("File error: %v\n", err)
|
||||
return nil, err
|
||||
}
|
||||
var info yaml.MapSlice
|
||||
err = yaml.Unmarshal(bytes, &info)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
info_cache[filename] = info
|
||||
return info, nil
|
||||
}
|
||||
}
|
||||
|
||||
// read a file and return the fragment needed to resolve a $ref
|
||||
func ReadInfoForRef(basefile string, ref string) (interface{}, error) {
|
||||
initializeInfoCache()
|
||||
{
|
||||
info, ok := info_cache[ref]
|
||||
if ok {
|
||||
if VERBOSE_READER {
|
||||
log.Printf("Cache hit for ref %s#%s", basefile, ref)
|
||||
}
|
||||
return info, nil
|
||||
}
|
||||
}
|
||||
if VERBOSE_READER {
|
||||
log.Printf("Reading info for ref %s#%s", basefile, ref)
|
||||
}
|
||||
count = count + 1
|
||||
basedir, _ := filepath.Split(basefile)
|
||||
parts := strings.Split(ref, "#")
|
||||
var filename string
|
||||
if parts[0] != "" {
|
||||
filename = basedir + parts[0]
|
||||
} else {
|
||||
filename = basefile
|
||||
}
|
||||
info, err := ReadInfoForFile(filename)
|
||||
if err != nil {
|
||||
log.Printf("File error: %v\n", err)
|
||||
} else {
|
||||
if len(parts) > 1 {
|
||||
path := strings.Split(parts[1], "/")
|
||||
for i, key := range path {
|
||||
if i > 0 {
|
||||
m, ok := info.(yaml.MapSlice)
|
||||
if ok {
|
||||
found := false
|
||||
for _, section := range m {
|
||||
if section.Key == key {
|
||||
info = section.Value
|
||||
found = true
|
||||
}
|
||||
}
|
||||
if !found {
|
||||
info_cache[ref] = nil
|
||||
return nil, NewError(nil, fmt.Sprintf("could not resolve %s", ref))
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
info_cache[ref] = info
|
||||
return info, nil
|
||||
}
|
7
vendor/github.com/googleapis/gnostic/extensions/COMPILE-EXTENSION.sh
generated
vendored
Executable file
7
vendor/github.com/googleapis/gnostic/extensions/COMPILE-EXTENSION.sh
generated
vendored
Executable file
|
@ -0,0 +1,7 @@
|
|||
go get github.com/golang/protobuf/protoc-gen-go
|
||||
|
||||
protoc \
|
||||
--go_out=Mgoogle/protobuf/any.proto=github.com/golang/protobuf/ptypes/any:. *.proto
|
||||
|
||||
go build
|
||||
go install
|
5
vendor/github.com/googleapis/gnostic/extensions/README.md
generated
vendored
Normal file
5
vendor/github.com/googleapis/gnostic/extensions/README.md
generated
vendored
Normal file
|
@ -0,0 +1,5 @@
|
|||
# Extensions
|
||||
|
||||
This directory contains support code for building Gnostic extensions and associated examples.
|
||||
|
||||
Extensions are used to compile vendor or specification extensions into protocol buffer structures.
|
219
vendor/github.com/googleapis/gnostic/extensions/extension.pb.go
generated
vendored
Normal file
219
vendor/github.com/googleapis/gnostic/extensions/extension.pb.go
generated
vendored
Normal file
|
@ -0,0 +1,219 @@
|
|||
// Code generated by protoc-gen-go.
|
||||
// source: extension.proto
|
||||
// DO NOT EDIT!
|
||||
|
||||
/*
|
||||
Package openapiextension_v1 is a generated protocol buffer package.
|
||||
|
||||
It is generated from these files:
|
||||
extension.proto
|
||||
|
||||
It has these top-level messages:
|
||||
Version
|
||||
ExtensionHandlerRequest
|
||||
ExtensionHandlerResponse
|
||||
Wrapper
|
||||
*/
|
||||
package openapiextension_v1
|
||||
|
||||
import proto "github.com/golang/protobuf/proto"
|
||||
import fmt "fmt"
|
||||
import math "math"
|
||||
import google_protobuf "github.com/golang/protobuf/ptypes/any"
|
||||
|
||||
// Reference imports to suppress errors if they are not otherwise used.
|
||||
var _ = proto.Marshal
|
||||
var _ = fmt.Errorf
|
||||
var _ = math.Inf
|
||||
|
||||
// This is a compile-time assertion to ensure that this generated file
|
||||
// is compatible with the proto package it is being compiled against.
|
||||
// A compilation error at this line likely means your copy of the
|
||||
// proto package needs to be updated.
|
||||
const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
|
||||
|
||||
// The version number of OpenAPI compiler.
|
||||
type Version struct {
|
||||
Major int32 `protobuf:"varint,1,opt,name=major" json:"major,omitempty"`
|
||||
Minor int32 `protobuf:"varint,2,opt,name=minor" json:"minor,omitempty"`
|
||||
Patch int32 `protobuf:"varint,3,opt,name=patch" json:"patch,omitempty"`
|
||||
// A suffix for alpha, beta or rc release, e.g., "alpha-1", "rc2". It should
|
||||
// be empty for mainline stable releases.
|
||||
Suffix string `protobuf:"bytes,4,opt,name=suffix" json:"suffix,omitempty"`
|
||||
}
|
||||
|
||||
func (m *Version) Reset() { *m = Version{} }
|
||||
func (m *Version) String() string { return proto.CompactTextString(m) }
|
||||
func (*Version) ProtoMessage() {}
|
||||
func (*Version) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} }
|
||||
|
||||
func (m *Version) GetMajor() int32 {
|
||||
if m != nil {
|
||||
return m.Major
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func (m *Version) GetMinor() int32 {
|
||||
if m != nil {
|
||||
return m.Minor
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func (m *Version) GetPatch() int32 {
|
||||
if m != nil {
|
||||
return m.Patch
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func (m *Version) GetSuffix() string {
|
||||
if m != nil {
|
||||
return m.Suffix
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
// An encoded Request is written to the ExtensionHandler's stdin.
|
||||
type ExtensionHandlerRequest struct {
|
||||
// The OpenAPI descriptions that were explicitly listed on the command line.
|
||||
// The specifications will appear in the order they are specified to openapic.
|
||||
Wrapper *Wrapper `protobuf:"bytes,1,opt,name=wrapper" json:"wrapper,omitempty"`
|
||||
// The version number of openapi compiler.
|
||||
CompilerVersion *Version `protobuf:"bytes,3,opt,name=compiler_version,json=compilerVersion" json:"compiler_version,omitempty"`
|
||||
}
|
||||
|
||||
func (m *ExtensionHandlerRequest) Reset() { *m = ExtensionHandlerRequest{} }
|
||||
func (m *ExtensionHandlerRequest) String() string { return proto.CompactTextString(m) }
|
||||
func (*ExtensionHandlerRequest) ProtoMessage() {}
|
||||
func (*ExtensionHandlerRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} }
|
||||
|
||||
func (m *ExtensionHandlerRequest) GetWrapper() *Wrapper {
|
||||
if m != nil {
|
||||
return m.Wrapper
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *ExtensionHandlerRequest) GetCompilerVersion() *Version {
|
||||
if m != nil {
|
||||
return m.CompilerVersion
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// The extensions writes an encoded ExtensionHandlerResponse to stdout.
|
||||
type ExtensionHandlerResponse struct {
|
||||
// true if the extension is handled by the extension handler; false otherwise
|
||||
Handled bool `protobuf:"varint,1,opt,name=handled" json:"handled,omitempty"`
|
||||
// Error message. If non-empty, the extension handling failed.
|
||||
// The extension handler process should exit with status code zero
|
||||
// even if it reports an error in this way.
|
||||
//
|
||||
// This should be used to indicate errors which prevent the extension from
|
||||
// operating as intended. Errors which indicate a problem in gnostic
|
||||
// itself -- such as the input Document being unparseable -- should be
|
||||
// reported by writing a message to stderr and exiting with a non-zero
|
||||
// status code.
|
||||
Error []string `protobuf:"bytes,2,rep,name=error" json:"error,omitempty"`
|
||||
// text output
|
||||
Value *google_protobuf.Any `protobuf:"bytes,3,opt,name=value" json:"value,omitempty"`
|
||||
}
|
||||
|
||||
func (m *ExtensionHandlerResponse) Reset() { *m = ExtensionHandlerResponse{} }
|
||||
func (m *ExtensionHandlerResponse) String() string { return proto.CompactTextString(m) }
|
||||
func (*ExtensionHandlerResponse) ProtoMessage() {}
|
||||
func (*ExtensionHandlerResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2} }
|
||||
|
||||
func (m *ExtensionHandlerResponse) GetHandled() bool {
|
||||
if m != nil {
|
||||
return m.Handled
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (m *ExtensionHandlerResponse) GetError() []string {
|
||||
if m != nil {
|
||||
return m.Error
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *ExtensionHandlerResponse) GetValue() *google_protobuf.Any {
|
||||
if m != nil {
|
||||
return m.Value
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type Wrapper struct {
|
||||
// version of the OpenAPI specification in which this extension was written.
|
||||
Version string `protobuf:"bytes,1,opt,name=version" json:"version,omitempty"`
|
||||
// Name of the extension
|
||||
ExtensionName string `protobuf:"bytes,2,opt,name=extension_name,json=extensionName" json:"extension_name,omitempty"`
|
||||
// Must be a valid yaml for the proto
|
||||
Yaml string `protobuf:"bytes,3,opt,name=yaml" json:"yaml,omitempty"`
|
||||
}
|
||||
|
||||
func (m *Wrapper) Reset() { *m = Wrapper{} }
|
||||
func (m *Wrapper) String() string { return proto.CompactTextString(m) }
|
||||
func (*Wrapper) ProtoMessage() {}
|
||||
func (*Wrapper) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{3} }
|
||||
|
||||
func (m *Wrapper) GetVersion() string {
|
||||
if m != nil {
|
||||
return m.Version
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (m *Wrapper) GetExtensionName() string {
|
||||
if m != nil {
|
||||
return m.ExtensionName
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (m *Wrapper) GetYaml() string {
|
||||
if m != nil {
|
||||
return m.Yaml
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func init() {
|
||||
proto.RegisterType((*Version)(nil), "openapiextension.v1.Version")
|
||||
proto.RegisterType((*ExtensionHandlerRequest)(nil), "openapiextension.v1.ExtensionHandlerRequest")
|
||||
proto.RegisterType((*ExtensionHandlerResponse)(nil), "openapiextension.v1.ExtensionHandlerResponse")
|
||||
proto.RegisterType((*Wrapper)(nil), "openapiextension.v1.Wrapper")
|
||||
}
|
||||
|
||||
func init() { proto.RegisterFile("extension.proto", fileDescriptor0) }
|
||||
|
||||
var fileDescriptor0 = []byte{
|
||||
// 355 bytes of a gzipped FileDescriptorProto
|
||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0x74, 0x91, 0x4d, 0x4b, 0xf3, 0x40,
|
||||
0x1c, 0xc4, 0x49, 0xdf, 0xf2, 0x64, 0x1f, 0xb4, 0xb2, 0x16, 0x8d, 0xe2, 0xa1, 0x04, 0x84, 0x22,
|
||||
0xb8, 0xa5, 0x0a, 0xde, 0x5b, 0x28, 0xea, 0xc5, 0x96, 0x3d, 0xd4, 0x9b, 0x65, 0x9b, 0xfe, 0xdb,
|
||||
0x46, 0x92, 0xdd, 0x75, 0xf3, 0x62, 0xfb, 0x55, 0x3c, 0xfa, 0x49, 0x25, 0xbb, 0xd9, 0x7a, 0x50,
|
||||
0x6f, 0x99, 0x1f, 0x93, 0xfc, 0x67, 0x26, 0xa8, 0x0d, 0xdb, 0x0c, 0x78, 0x1a, 0x09, 0x4e, 0xa4,
|
||||
0x12, 0x99, 0xc0, 0xc7, 0x42, 0x02, 0x67, 0x32, 0xfa, 0xe6, 0xc5, 0xe0, 0xfc, 0x6c, 0x2d, 0xc4,
|
||||
0x3a, 0x86, 0xbe, 0xb6, 0x2c, 0xf2, 0x55, 0x9f, 0xf1, 0x9d, 0xf1, 0x07, 0x21, 0x72, 0x67, 0xa0,
|
||||
0x4a, 0x23, 0xee, 0xa0, 0x66, 0xc2, 0x5e, 0x85, 0xf2, 0x9d, 0xae, 0xd3, 0x6b, 0x52, 0x23, 0x34,
|
||||
0x8d, 0xb8, 0x50, 0x7e, 0xad, 0xa2, 0xa5, 0x28, 0xa9, 0x64, 0x59, 0xb8, 0xf1, 0xeb, 0x86, 0x6a,
|
||||
0x81, 0x4f, 0x50, 0x2b, 0xcd, 0x57, 0xab, 0x68, 0xeb, 0x37, 0xba, 0x4e, 0xcf, 0xa3, 0x95, 0x0a,
|
||||
0x3e, 0x1c, 0x74, 0x3a, 0xb6, 0x81, 0x1e, 0x18, 0x5f, 0xc6, 0xa0, 0x28, 0xbc, 0xe5, 0x90, 0x66,
|
||||
0xf8, 0x0e, 0xb9, 0xef, 0x8a, 0x49, 0x09, 0xe6, 0xee, 0xff, 0x9b, 0x0b, 0xf2, 0x4b, 0x05, 0xf2,
|
||||
0x6c, 0x3c, 0xd4, 0x9a, 0xf1, 0x3d, 0x3a, 0x0a, 0x45, 0x22, 0xa3, 0x18, 0xd4, 0xbc, 0x30, 0x0d,
|
||||
0x74, 0x98, 0xbf, 0x3e, 0x50, 0xb5, 0xa4, 0x6d, 0xfb, 0x56, 0x05, 0x82, 0x02, 0xf9, 0x3f, 0xb3,
|
||||
0xa5, 0x52, 0xf0, 0x14, 0xb0, 0x8f, 0xdc, 0x8d, 0x46, 0x4b, 0x1d, 0xee, 0x1f, 0xb5, 0xb2, 0x1c,
|
||||
0x00, 0x94, 0xd2, 0xb3, 0xd4, 0x7b, 0x1e, 0x35, 0x02, 0x5f, 0xa1, 0x66, 0xc1, 0xe2, 0x1c, 0xaa,
|
||||
0x24, 0x1d, 0x62, 0x86, 0x27, 0x76, 0x78, 0x32, 0xe4, 0x3b, 0x6a, 0x2c, 0xc1, 0x0b, 0x72, 0xab,
|
||||
0x52, 0xe5, 0x19, 0x5b, 0xc1, 0xd1, 0xc3, 0x59, 0x89, 0x2f, 0xd1, 0xe1, 0xbe, 0xc5, 0x9c, 0xb3,
|
||||
0x04, 0xf4, 0x6f, 0xf0, 0xe8, 0xc1, 0x9e, 0x3e, 0xb1, 0x04, 0x30, 0x46, 0x8d, 0x1d, 0x4b, 0x62,
|
||||
0x7d, 0xd6, 0xa3, 0xfa, 0x79, 0x74, 0x8d, 0xda, 0x42, 0xad, 0xed, 0x16, 0x21, 0x29, 0x06, 0x23,
|
||||
0x3c, 0x91, 0xc0, 0x87, 0xd3, 0xc7, 0x7d, 0xdf, 0xd9, 0x60, 0xea, 0x7c, 0xd6, 0xea, 0x93, 0xe1,
|
||||
0x78, 0xd1, 0xd2, 0x19, 0x6f, 0xbf, 0x02, 0x00, 0x00, 0xff, 0xff, 0xfc, 0x56, 0x40, 0x4d, 0x52,
|
||||
0x02, 0x00, 0x00,
|
||||
}
|
93
vendor/github.com/googleapis/gnostic/extensions/extension.proto
generated
vendored
Normal file
93
vendor/github.com/googleapis/gnostic/extensions/extension.proto
generated
vendored
Normal file
|
@ -0,0 +1,93 @@
|
|||
// Copyright 2017 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
syntax = "proto3";
|
||||
|
||||
import "google/protobuf/any.proto";
|
||||
package openapiextension.v1;
|
||||
|
||||
// This option lets the proto compiler generate Java code inside the package
|
||||
// name (see below) instead of inside an outer class. It creates a simpler
|
||||
// developer experience by reducing one-level of name nesting and be
|
||||
// consistent with most programming languages that don't support outer classes.
|
||||
option java_multiple_files = true;
|
||||
|
||||
// The Java outer classname should be the filename in UpperCamelCase. This
|
||||
// class is only used to hold proto descriptor, so developers don't need to
|
||||
// work with it directly.
|
||||
option java_outer_classname = "OpenAPIExtensionV1";
|
||||
|
||||
// The Java package name must be proto package name with proper prefix.
|
||||
option java_package = "org.openapic.v1";
|
||||
|
||||
// A reasonable prefix for the Objective-C symbols generated from the package.
|
||||
// It should at a minimum be 3 characters long, all uppercase, and convention
|
||||
// is to use an abbreviation of the package name. Something short, but
|
||||
// hopefully unique enough to not conflict with things that may come along in
|
||||
// the future. 'GPB' is reserved for the protocol buffer implementation itself.
|
||||
//
|
||||
option objc_class_prefix = "OAE"; // "OpenAPI Extension"
|
||||
|
||||
// The version number of OpenAPI compiler.
|
||||
message Version {
|
||||
int32 major = 1;
|
||||
int32 minor = 2;
|
||||
int32 patch = 3;
|
||||
// A suffix for alpha, beta or rc release, e.g., "alpha-1", "rc2". It should
|
||||
// be empty for mainline stable releases.
|
||||
string suffix = 4;
|
||||
}
|
||||
|
||||
// An encoded Request is written to the ExtensionHandler's stdin.
|
||||
message ExtensionHandlerRequest {
|
||||
|
||||
// The OpenAPI descriptions that were explicitly listed on the command line.
|
||||
// The specifications will appear in the order they are specified to openapic.
|
||||
Wrapper wrapper = 1;
|
||||
|
||||
// The version number of openapi compiler.
|
||||
Version compiler_version = 3;
|
||||
}
|
||||
|
||||
// The extensions writes an encoded ExtensionHandlerResponse to stdout.
|
||||
message ExtensionHandlerResponse {
|
||||
|
||||
// true if the extension is handled by the extension handler; false otherwise
|
||||
bool handled = 1;
|
||||
|
||||
// Error message. If non-empty, the extension handling failed.
|
||||
// The extension handler process should exit with status code zero
|
||||
// even if it reports an error in this way.
|
||||
//
|
||||
// This should be used to indicate errors which prevent the extension from
|
||||
// operating as intended. Errors which indicate a problem in gnostic
|
||||
// itself -- such as the input Document being unparseable -- should be
|
||||
// reported by writing a message to stderr and exiting with a non-zero
|
||||
// status code.
|
||||
repeated string error = 2;
|
||||
|
||||
// text output
|
||||
google.protobuf.Any value = 3;
|
||||
}
|
||||
|
||||
message Wrapper {
|
||||
// version of the OpenAPI specification in which this extension was written.
|
||||
string version = 1;
|
||||
|
||||
// Name of the extension
|
||||
string extension_name = 2;
|
||||
|
||||
// Must be a valid yaml for the proto
|
||||
string yaml = 3;
|
||||
}
|
83
vendor/github.com/googleapis/gnostic/extensions/extensions.go
generated
vendored
Normal file
83
vendor/github.com/googleapis/gnostic/extensions/extensions.go
generated
vendored
Normal file
|
@ -0,0 +1,83 @@
|
|||
// Copyright 2017 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package openapiextension_v1
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
|
||||
"github.com/golang/protobuf/proto"
|
||||
"github.com/golang/protobuf/ptypes"
|
||||
"gopkg.in/yaml.v2"
|
||||
)
|
||||
|
||||
type documentHandler func(version string, extensionName string, document string)
|
||||
type extensionHandler func(name string, info yaml.MapSlice) (bool, proto.Message, error)
|
||||
|
||||
func forInputYamlFromOpenapic(handler documentHandler) {
|
||||
data, err := ioutil.ReadAll(os.Stdin)
|
||||
|
||||
if err != nil {
|
||||
fmt.Println("File error:", err.Error())
|
||||
os.Exit(1)
|
||||
}
|
||||
request := &ExtensionHandlerRequest{}
|
||||
err = proto.Unmarshal(data, request)
|
||||
handler(request.Wrapper.Version, request.Wrapper.ExtensionName, request.Wrapper.Yaml)
|
||||
}
|
||||
|
||||
func ProcessExtension(handleExtension extensionHandler) {
|
||||
response := &ExtensionHandlerResponse{}
|
||||
forInputYamlFromOpenapic(
|
||||
func(version string, extensionName string, yamlInput string) {
|
||||
var info yaml.MapSlice
|
||||
var newObject proto.Message
|
||||
var err error
|
||||
err = yaml.Unmarshal([]byte(yamlInput), &info)
|
||||
if err != nil {
|
||||
response.Error = append(response.Error, err.Error())
|
||||
responseBytes, _ := proto.Marshal(response)
|
||||
os.Stdout.Write(responseBytes)
|
||||
os.Exit(0)
|
||||
}
|
||||
|
||||
handled, newObject, err := handleExtension(extensionName, info)
|
||||
if !handled {
|
||||
responseBytes, _ := proto.Marshal(response)
|
||||
os.Stdout.Write(responseBytes)
|
||||
os.Exit(0)
|
||||
}
|
||||
|
||||
// If we reach here, then the extension is handled
|
||||
response.Handled = true
|
||||
if err != nil {
|
||||
response.Error = append(response.Error, err.Error())
|
||||
responseBytes, _ := proto.Marshal(response)
|
||||
os.Stdout.Write(responseBytes)
|
||||
os.Exit(0)
|
||||
}
|
||||
response.Value, err = ptypes.MarshalAny(newObject)
|
||||
if err != nil {
|
||||
response.Error = append(response.Error, err.Error())
|
||||
responseBytes, _ := proto.Marshal(response)
|
||||
os.Stdout.Write(responseBytes)
|
||||
os.Exit(0)
|
||||
}
|
||||
})
|
||||
|
||||
responseBytes, _ := proto.Marshal(response)
|
||||
os.Stdout.Write(responseBytes)
|
||||
}
|
22
vendor/github.com/gorilla/websocket/.gitignore
generated
vendored
Normal file
22
vendor/github.com/gorilla/websocket/.gitignore
generated
vendored
Normal file
|
@ -0,0 +1,22 @@
|
|||
# Compiled Object files, Static and Dynamic libs (Shared Objects)
|
||||
*.o
|
||||
*.a
|
||||
*.so
|
||||
|
||||
# Folders
|
||||
_obj
|
||||
_test
|
||||
|
||||
# Architecture specific extensions/prefixes
|
||||
*.[568vq]
|
||||
[568vq].out
|
||||
|
||||
*.cgo1.go
|
||||
*.cgo2.c
|
||||
_cgo_defun.c
|
||||
_cgo_gotypes.go
|
||||
_cgo_export.*
|
||||
|
||||
_testmain.go
|
||||
|
||||
*.exe
|
6
vendor/github.com/gorilla/websocket/.travis.yml
generated
vendored
Normal file
6
vendor/github.com/gorilla/websocket/.travis.yml
generated
vendored
Normal file
|
@ -0,0 +1,6 @@
|
|||
language: go
|
||||
|
||||
go:
|
||||
- 1.1
|
||||
- 1.2
|
||||
- tip
|
8
vendor/github.com/gorilla/websocket/AUTHORS
generated
vendored
Normal file
8
vendor/github.com/gorilla/websocket/AUTHORS
generated
vendored
Normal file
|
@ -0,0 +1,8 @@
|
|||
# This is the official list of Gorilla WebSocket authors for copyright
|
||||
# purposes.
|
||||
#
|
||||
# Please keep the list sorted.
|
||||
|
||||
Gary Burd <gary@beagledreams.com>
|
||||
Joachim Bauch <mail@joachim-bauch.de>
|
||||
|
22
vendor/github.com/gorilla/websocket/LICENSE
generated
vendored
Normal file
22
vendor/github.com/gorilla/websocket/LICENSE
generated
vendored
Normal file
|
@ -0,0 +1,22 @@
|
|||
Copyright (c) 2013 The Gorilla WebSocket Authors. All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions are met:
|
||||
|
||||
Redistributions of source code must retain the above copyright notice, this
|
||||
list of conditions and the following disclaimer.
|
||||
|
||||
Redistributions in binary form must reproduce the above copyright notice,
|
||||
this list of conditions and the following disclaimer in the documentation
|
||||
and/or other materials provided with the distribution.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
|
||||
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
|
||||
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
||||
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
|
||||
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
||||
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
|
||||
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
|
||||
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
|
||||
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
59
vendor/github.com/gorilla/websocket/README.md
generated
vendored
Normal file
59
vendor/github.com/gorilla/websocket/README.md
generated
vendored
Normal file
|
@ -0,0 +1,59 @@
|
|||
# Gorilla WebSocket
|
||||
|
||||
Gorilla WebSocket is a [Go](http://golang.org/) implementation of the
|
||||
[WebSocket](http://www.rfc-editor.org/rfc/rfc6455.txt) protocol.
|
||||
|
||||
### Documentation
|
||||
|
||||
* [API Reference](http://godoc.org/github.com/gorilla/websocket)
|
||||
* [Chat example](https://github.com/gorilla/websocket/tree/master/examples/chat)
|
||||
* [File watch example](https://github.com/gorilla/websocket/tree/master/examples/filewatch)
|
||||
|
||||
### Status
|
||||
|
||||
The Gorilla WebSocket package provides a complete and tested implementation of
|
||||
the [WebSocket](http://www.rfc-editor.org/rfc/rfc6455.txt) protocol. The
|
||||
package API is stable.
|
||||
|
||||
### Installation
|
||||
|
||||
go get github.com/gorilla/websocket
|
||||
|
||||
### Protocol Compliance
|
||||
|
||||
The Gorilla WebSocket package passes the server tests in the [Autobahn Test
|
||||
Suite](http://autobahn.ws/testsuite) using the application in the [examples/autobahn
|
||||
subdirectory](https://github.com/gorilla/websocket/tree/master/examples/autobahn).
|
||||
|
||||
### Gorilla WebSocket compared with other packages
|
||||
|
||||
<table>
|
||||
<tr>
|
||||
<th></th>
|
||||
<th><a href="http://godoc.org/github.com/gorilla/websocket">github.com/gorilla</a></th>
|
||||
<th><a href="http://godoc.org/golang.org/x/net/websocket">golang.org/x/net</a></th>
|
||||
</tr>
|
||||
<tr>
|
||||
<tr><td colspan="3"><a href="http://tools.ietf.org/html/rfc6455">RFC 6455</a> Features</td></tr>
|
||||
<tr><td>Passes <a href="http://autobahn.ws/testsuite/">Autobahn Test Suite</a></td><td><a href="https://github.com/gorilla/websocket/tree/master/examples/autobahn">Yes</a></td><td>No</td></tr>
|
||||
<tr><td>Receive <a href="https://tools.ietf.org/html/rfc6455#section-5.4">fragmented</a> message<td>Yes</td><td><a href="https://code.google.com/p/go/issues/detail?id=7632">No</a>, see note 1</td></tr>
|
||||
<tr><td>Send <a href="https://tools.ietf.org/html/rfc6455#section-5.5.1">close</a> message</td><td><a href="http://godoc.org/github.com/gorilla/websocket#hdr-Control_Messages">Yes</a></td><td><a href="https://code.google.com/p/go/issues/detail?id=4588">No</a></td></tr>
|
||||
<tr><td>Send <a href="https://tools.ietf.org/html/rfc6455#section-5.5.2">pings</a> and receive <a href="https://tools.ietf.org/html/rfc6455#section-5.5.3">pongs</a></td><td><a href="http://godoc.org/github.com/gorilla/websocket#hdr-Control_Messages">Yes</a></td><td>No</td></tr>
|
||||
<tr><td>Get the <a href="https://tools.ietf.org/html/rfc6455#section-5.6">type</a> of a received data message</td><td>Yes</td><td>Yes, see note 2</td></tr>
|
||||
<tr><td colspan="3">Other Features</tr></td>
|
||||
<tr><td>Limit size of received message</td><td><a href="http://godoc.org/github.com/gorilla/websocket#Conn.SetReadLimit">Yes</a></td><td><a href="https://code.google.com/p/go/issues/detail?id=5082">No</a></td></tr>
|
||||
<tr><td>Read message using io.Reader</td><td><a href="http://godoc.org/github.com/gorilla/websocket#Conn.NextReader">Yes</a></td><td>No, see note 3</td></tr>
|
||||
<tr><td>Write message using io.WriteCloser</td><td><a href="http://godoc.org/github.com/gorilla/websocket#Conn.NextWriter">Yes</a></td><td>No, see note 3</td></tr>
|
||||
</table>
|
||||
|
||||
Notes:
|
||||
|
||||
1. Large messages are fragmented in [Chrome's new WebSocket implementation](http://www.ietf.org/mail-archive/web/hybi/current/msg10503.html).
|
||||
2. The application can get the type of a received data message by implementing
|
||||
a [Codec marshal](http://godoc.org/golang.org/x/net/websocket#Codec.Marshal)
|
||||
function.
|
||||
3. The go.net io.Reader and io.Writer operate across WebSocket frame boundaries.
|
||||
Read returns when the input buffer is full or a frame boundary is
|
||||
encountered. Each call to Write sends a single frame message. The Gorilla
|
||||
io.Reader and io.WriteCloser operate on a single WebSocket message.
|
||||
|
269
vendor/github.com/gorilla/websocket/client.go
generated
vendored
Normal file
269
vendor/github.com/gorilla/websocket/client.go
generated
vendored
Normal file
|
@ -0,0 +1,269 @@
|
|||
// Copyright 2013 The Gorilla WebSocket Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package websocket
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"crypto/tls"
|
||||
"errors"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"net"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
// ErrBadHandshake is returned when the server response to opening handshake is
|
||||
// invalid.
|
||||
var ErrBadHandshake = errors.New("websocket: bad handshake")
|
||||
|
||||
// NewClient creates a new client connection using the given net connection.
|
||||
// The URL u specifies the host and request URI. Use requestHeader to specify
|
||||
// the origin (Origin), subprotocols (Sec-WebSocket-Protocol) and cookies
|
||||
// (Cookie). Use the response.Header to get the selected subprotocol
|
||||
// (Sec-WebSocket-Protocol) and cookies (Set-Cookie).
|
||||
//
|
||||
// If the WebSocket handshake fails, ErrBadHandshake is returned along with a
|
||||
// non-nil *http.Response so that callers can handle redirects, authentication,
|
||||
// etc.
|
||||
func NewClient(netConn net.Conn, u *url.URL, requestHeader http.Header, readBufSize, writeBufSize int) (c *Conn, response *http.Response, err error) {
|
||||
challengeKey, err := generateChallengeKey()
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
acceptKey := computeAcceptKey(challengeKey)
|
||||
|
||||
c = newConn(netConn, false, readBufSize, writeBufSize)
|
||||
p := c.writeBuf[:0]
|
||||
p = append(p, "GET "...)
|
||||
p = append(p, u.RequestURI()...)
|
||||
p = append(p, " HTTP/1.1\r\nHost: "...)
|
||||
p = append(p, u.Host...)
|
||||
// "Upgrade" is capitalized for servers that do not use case insensitive
|
||||
// comparisons on header tokens.
|
||||
p = append(p, "\r\nUpgrade: websocket\r\nConnection: Upgrade\r\nSec-WebSocket-Version: 13\r\nSec-WebSocket-Key: "...)
|
||||
p = append(p, challengeKey...)
|
||||
p = append(p, "\r\n"...)
|
||||
for k, vs := range requestHeader {
|
||||
for _, v := range vs {
|
||||
p = append(p, k...)
|
||||
p = append(p, ": "...)
|
||||
p = append(p, v...)
|
||||
p = append(p, "\r\n"...)
|
||||
}
|
||||
}
|
||||
p = append(p, "\r\n"...)
|
||||
|
||||
if _, err := netConn.Write(p); err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
resp, err := http.ReadResponse(c.br, &http.Request{Method: "GET", URL: u})
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
if resp.StatusCode != 101 ||
|
||||
!strings.EqualFold(resp.Header.Get("Upgrade"), "websocket") ||
|
||||
!strings.EqualFold(resp.Header.Get("Connection"), "upgrade") ||
|
||||
resp.Header.Get("Sec-Websocket-Accept") != acceptKey {
|
||||
return nil, resp, ErrBadHandshake
|
||||
}
|
||||
c.subprotocol = resp.Header.Get("Sec-Websocket-Protocol")
|
||||
return c, resp, nil
|
||||
}
|
||||
|
||||
// A Dialer contains options for connecting to WebSocket server.
|
||||
type Dialer struct {
|
||||
// NetDial specifies the dial function for creating TCP connections. If
|
||||
// NetDial is nil, net.Dial is used.
|
||||
NetDial func(network, addr string) (net.Conn, error)
|
||||
|
||||
// TLSClientConfig specifies the TLS configuration to use with tls.Client.
|
||||
// If nil, the default configuration is used.
|
||||
TLSClientConfig *tls.Config
|
||||
|
||||
// HandshakeTimeout specifies the duration for the handshake to complete.
|
||||
HandshakeTimeout time.Duration
|
||||
|
||||
// Input and output buffer sizes. If the buffer size is zero, then a
|
||||
// default value of 4096 is used.
|
||||
ReadBufferSize, WriteBufferSize int
|
||||
|
||||
// Subprotocols specifies the client's requested subprotocols.
|
||||
Subprotocols []string
|
||||
}
|
||||
|
||||
var errMalformedURL = errors.New("malformed ws or wss URL")
|
||||
|
||||
// parseURL parses the URL. The url.Parse function is not used here because
|
||||
// url.Parse mangles the path.
|
||||
func parseURL(s string) (*url.URL, error) {
|
||||
// From the RFC:
|
||||
//
|
||||
// ws-URI = "ws:" "//" host [ ":" port ] path [ "?" query ]
|
||||
// wss-URI = "wss:" "//" host [ ":" port ] path [ "?" query ]
|
||||
//
|
||||
// We don't use the net/url parser here because the dialer interface does
|
||||
// not provide a way for applications to work around percent deocding in
|
||||
// the net/url parser.
|
||||
|
||||
var u url.URL
|
||||
switch {
|
||||
case strings.HasPrefix(s, "ws://"):
|
||||
u.Scheme = "ws"
|
||||
s = s[len("ws://"):]
|
||||
case strings.HasPrefix(s, "wss://"):
|
||||
u.Scheme = "wss"
|
||||
s = s[len("wss://"):]
|
||||
default:
|
||||
return nil, errMalformedURL
|
||||
}
|
||||
|
||||
u.Host = s
|
||||
u.Opaque = "/"
|
||||
if i := strings.Index(s, "/"); i >= 0 {
|
||||
u.Host = s[:i]
|
||||
u.Opaque = s[i:]
|
||||
}
|
||||
|
||||
if strings.Contains(u.Host, "@") {
|
||||
// WebSocket URIs do not contain user information.
|
||||
return nil, errMalformedURL
|
||||
}
|
||||
|
||||
return &u, nil
|
||||
}
|
||||
|
||||
func hostPortNoPort(u *url.URL) (hostPort, hostNoPort string) {
|
||||
hostPort = u.Host
|
||||
hostNoPort = u.Host
|
||||
if i := strings.LastIndex(u.Host, ":"); i > strings.LastIndex(u.Host, "]") {
|
||||
hostNoPort = hostNoPort[:i]
|
||||
} else {
|
||||
if u.Scheme == "wss" {
|
||||
hostPort += ":443"
|
||||
} else {
|
||||
hostPort += ":80"
|
||||
}
|
||||
}
|
||||
return hostPort, hostNoPort
|
||||
}
|
||||
|
||||
// DefaultDialer is a dialer with all fields set to the default zero values.
|
||||
var DefaultDialer *Dialer
|
||||
|
||||
// Dial creates a new client connection. Use requestHeader to specify the
|
||||
// origin (Origin), subprotocols (Sec-WebSocket-Protocol) and cookies (Cookie).
|
||||
// Use the response.Header to get the selected subprotocol
|
||||
// (Sec-WebSocket-Protocol) and cookies (Set-Cookie).
|
||||
//
|
||||
// If the WebSocket handshake fails, ErrBadHandshake is returned along with a
|
||||
// non-nil *http.Response so that callers can handle redirects, authentication,
|
||||
// etcetera. The response body may not contain the entire response and does not
|
||||
// need to be closed by the application.
|
||||
func (d *Dialer) Dial(urlStr string, requestHeader http.Header) (*Conn, *http.Response, error) {
|
||||
u, err := parseURL(urlStr)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
hostPort, hostNoPort := hostPortNoPort(u)
|
||||
|
||||
if d == nil {
|
||||
d = &Dialer{}
|
||||
}
|
||||
|
||||
var deadline time.Time
|
||||
if d.HandshakeTimeout != 0 {
|
||||
deadline = time.Now().Add(d.HandshakeTimeout)
|
||||
}
|
||||
|
||||
netDial := d.NetDial
|
||||
if netDial == nil {
|
||||
netDialer := &net.Dialer{Deadline: deadline}
|
||||
netDial = netDialer.Dial
|
||||
}
|
||||
|
||||
netConn, err := netDial("tcp", hostPort)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
defer func() {
|
||||
if netConn != nil {
|
||||
netConn.Close()
|
||||
}
|
||||
}()
|
||||
|
||||
if err := netConn.SetDeadline(deadline); err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
if u.Scheme == "wss" {
|
||||
cfg := d.TLSClientConfig
|
||||
if cfg == nil {
|
||||
cfg = &tls.Config{ServerName: hostNoPort}
|
||||
} else if cfg.ServerName == "" {
|
||||
shallowCopy := *cfg
|
||||
cfg = &shallowCopy
|
||||
cfg.ServerName = hostNoPort
|
||||
}
|
||||
tlsConn := tls.Client(netConn, cfg)
|
||||
netConn = tlsConn
|
||||
if err := tlsConn.Handshake(); err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
if !cfg.InsecureSkipVerify {
|
||||
if err := tlsConn.VerifyHostname(cfg.ServerName); err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if len(d.Subprotocols) > 0 {
|
||||
h := http.Header{}
|
||||
for k, v := range requestHeader {
|
||||
h[k] = v
|
||||
}
|
||||
h.Set("Sec-Websocket-Protocol", strings.Join(d.Subprotocols, ", "))
|
||||
requestHeader = h
|
||||
}
|
||||
|
||||
if len(requestHeader["Host"]) > 0 {
|
||||
// This can be used to supply a Host: header which is different from
|
||||
// the dial address.
|
||||
u.Host = requestHeader.Get("Host")
|
||||
|
||||
// Drop "Host" header
|
||||
h := http.Header{}
|
||||
for k, v := range requestHeader {
|
||||
if k == "Host" {
|
||||
continue
|
||||
}
|
||||
h[k] = v
|
||||
}
|
||||
requestHeader = h
|
||||
}
|
||||
|
||||
conn, resp, err := NewClient(netConn, u, requestHeader, d.ReadBufferSize, d.WriteBufferSize)
|
||||
|
||||
if err != nil {
|
||||
if err == ErrBadHandshake {
|
||||
// Before closing the network connection on return from this
|
||||
// function, slurp up some of the response to aid application
|
||||
// debugging.
|
||||
buf := make([]byte, 1024)
|
||||
n, _ := io.ReadFull(resp.Body, buf)
|
||||
resp.Body = ioutil.NopCloser(bytes.NewReader(buf[:n]))
|
||||
}
|
||||
return nil, resp, err
|
||||
}
|
||||
|
||||
netConn.SetDeadline(time.Time{})
|
||||
netConn = nil // to avoid close in defer.
|
||||
return conn, resp, nil
|
||||
}
|
825
vendor/github.com/gorilla/websocket/conn.go
generated
vendored
Normal file
825
vendor/github.com/gorilla/websocket/conn.go
generated
vendored
Normal file
|
@ -0,0 +1,825 @@
|
|||
// Copyright 2013 The Gorilla WebSocket Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package websocket
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"encoding/binary"
|
||||
"errors"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"math/rand"
|
||||
"net"
|
||||
"strconv"
|
||||
"time"
|
||||
)
|
||||
|
||||
const (
|
||||
maxFrameHeaderSize = 2 + 8 + 4 // Fixed header + length + mask
|
||||
maxControlFramePayloadSize = 125
|
||||
finalBit = 1 << 7
|
||||
maskBit = 1 << 7
|
||||
writeWait = time.Second
|
||||
|
||||
defaultReadBufferSize = 4096
|
||||
defaultWriteBufferSize = 4096
|
||||
|
||||
continuationFrame = 0
|
||||
noFrame = -1
|
||||
)
|
||||
|
||||
// Close codes defined in RFC 6455, section 11.7.
|
||||
const (
|
||||
CloseNormalClosure = 1000
|
||||
CloseGoingAway = 1001
|
||||
CloseProtocolError = 1002
|
||||
CloseUnsupportedData = 1003
|
||||
CloseNoStatusReceived = 1005
|
||||
CloseAbnormalClosure = 1006
|
||||
CloseInvalidFramePayloadData = 1007
|
||||
ClosePolicyViolation = 1008
|
||||
CloseMessageTooBig = 1009
|
||||
CloseMandatoryExtension = 1010
|
||||
CloseInternalServerErr = 1011
|
||||
CloseTLSHandshake = 1015
|
||||
)
|
||||
|
||||
// The message types are defined in RFC 6455, section 11.8.
|
||||
const (
|
||||
// TextMessage denotes a text data message. The text message payload is
|
||||
// interpreted as UTF-8 encoded text data.
|
||||
TextMessage = 1
|
||||
|
||||
// BinaryMessage denotes a binary data message.
|
||||
BinaryMessage = 2
|
||||
|
||||
// CloseMessage denotes a close control message. The optional message
|
||||
// payload contains a numeric code and text. Use the FormatCloseMessage
|
||||
// function to format a close message payload.
|
||||
CloseMessage = 8
|
||||
|
||||
// PingMessage denotes a ping control message. The optional message payload
|
||||
// is UTF-8 encoded text.
|
||||
PingMessage = 9
|
||||
|
||||
// PongMessage denotes a ping control message. The optional message payload
|
||||
// is UTF-8 encoded text.
|
||||
PongMessage = 10
|
||||
)
|
||||
|
||||
// ErrCloseSent is returned when the application writes a message to the
|
||||
// connection after sending a close message.
|
||||
var ErrCloseSent = errors.New("websocket: close sent")
|
||||
|
||||
// ErrReadLimit is returned when reading a message that is larger than the
|
||||
// read limit set for the connection.
|
||||
var ErrReadLimit = errors.New("websocket: read limit exceeded")
|
||||
|
||||
// netError satisfies the net Error interface.
|
||||
type netError struct {
|
||||
msg string
|
||||
temporary bool
|
||||
timeout bool
|
||||
}
|
||||
|
||||
func (e *netError) Error() string { return e.msg }
|
||||
func (e *netError) Temporary() bool { return e.temporary }
|
||||
func (e *netError) Timeout() bool { return e.timeout }
|
||||
|
||||
// closeError represents close frame.
|
||||
type closeError struct {
|
||||
code int
|
||||
text string
|
||||
}
|
||||
|
||||
func (e *closeError) Error() string {
|
||||
return "websocket: close " + strconv.Itoa(e.code) + " " + e.text
|
||||
}
|
||||
|
||||
var (
|
||||
errWriteTimeout = &netError{msg: "websocket: write timeout", timeout: true}
|
||||
errUnexpectedEOF = &closeError{code: CloseAbnormalClosure, text: io.ErrUnexpectedEOF.Error()}
|
||||
errBadWriteOpCode = errors.New("websocket: bad write message type")
|
||||
errWriteClosed = errors.New("websocket: write closed")
|
||||
errInvalidControlFrame = errors.New("websocket: invalid control frame")
|
||||
)
|
||||
|
||||
func hideTempErr(err error) error {
|
||||
if e, ok := err.(net.Error); ok && e.Temporary() {
|
||||
err = &netError{msg: e.Error(), timeout: e.Timeout()}
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
func isControl(frameType int) bool {
|
||||
return frameType == CloseMessage || frameType == PingMessage || frameType == PongMessage
|
||||
}
|
||||
|
||||
func isData(frameType int) bool {
|
||||
return frameType == TextMessage || frameType == BinaryMessage
|
||||
}
|
||||
|
||||
func maskBytes(key [4]byte, pos int, b []byte) int {
|
||||
for i := range b {
|
||||
b[i] ^= key[pos&3]
|
||||
pos++
|
||||
}
|
||||
return pos & 3
|
||||
}
|
||||
|
||||
func newMaskKey() [4]byte {
|
||||
n := rand.Uint32()
|
||||
return [4]byte{byte(n), byte(n >> 8), byte(n >> 16), byte(n >> 24)}
|
||||
}
|
||||
|
||||
// Conn represents a WebSocket connection.
|
||||
type Conn struct {
|
||||
conn net.Conn
|
||||
isServer bool
|
||||
subprotocol string
|
||||
|
||||
// Write fields
|
||||
mu chan bool // used as mutex to protect write to conn and closeSent
|
||||
closeSent bool // true if close message was sent
|
||||
|
||||
// Message writer fields.
|
||||
writeErr error
|
||||
writeBuf []byte // frame is constructed in this buffer.
|
||||
writePos int // end of data in writeBuf.
|
||||
writeFrameType int // type of the current frame.
|
||||
writeSeq int // incremented to invalidate message writers.
|
||||
writeDeadline time.Time
|
||||
|
||||
// Read fields
|
||||
readErr error
|
||||
br *bufio.Reader
|
||||
readRemaining int64 // bytes remaining in current frame.
|
||||
readFinal bool // true the current message has more frames.
|
||||
readSeq int // incremented to invalidate message readers.
|
||||
readLength int64 // Message size.
|
||||
readLimit int64 // Maximum message size.
|
||||
readMaskPos int
|
||||
readMaskKey [4]byte
|
||||
handlePong func(string) error
|
||||
handlePing func(string) error
|
||||
}
|
||||
|
||||
func newConn(conn net.Conn, isServer bool, readBufferSize, writeBufferSize int) *Conn {
|
||||
mu := make(chan bool, 1)
|
||||
mu <- true
|
||||
|
||||
if readBufferSize == 0 {
|
||||
readBufferSize = defaultReadBufferSize
|
||||
}
|
||||
if writeBufferSize == 0 {
|
||||
writeBufferSize = defaultWriteBufferSize
|
||||
}
|
||||
|
||||
c := &Conn{
|
||||
isServer: isServer,
|
||||
br: bufio.NewReaderSize(conn, readBufferSize),
|
||||
conn: conn,
|
||||
mu: mu,
|
||||
readFinal: true,
|
||||
writeBuf: make([]byte, writeBufferSize+maxFrameHeaderSize),
|
||||
writeFrameType: noFrame,
|
||||
writePos: maxFrameHeaderSize,
|
||||
}
|
||||
c.SetPingHandler(nil)
|
||||
c.SetPongHandler(nil)
|
||||
return c
|
||||
}
|
||||
|
||||
// Subprotocol returns the negotiated protocol for the connection.
|
||||
func (c *Conn) Subprotocol() string {
|
||||
return c.subprotocol
|
||||
}
|
||||
|
||||
// Close closes the underlying network connection without sending or waiting for a close frame.
|
||||
func (c *Conn) Close() error {
|
||||
return c.conn.Close()
|
||||
}
|
||||
|
||||
// LocalAddr returns the local network address.
|
||||
func (c *Conn) LocalAddr() net.Addr {
|
||||
return c.conn.LocalAddr()
|
||||
}
|
||||
|
||||
// RemoteAddr returns the remote network address.
|
||||
func (c *Conn) RemoteAddr() net.Addr {
|
||||
return c.conn.RemoteAddr()
|
||||
}
|
||||
|
||||
// Write methods
|
||||
|
||||
func (c *Conn) write(frameType int, deadline time.Time, bufs ...[]byte) error {
|
||||
<-c.mu
|
||||
defer func() { c.mu <- true }()
|
||||
|
||||
if c.closeSent {
|
||||
return ErrCloseSent
|
||||
} else if frameType == CloseMessage {
|
||||
c.closeSent = true
|
||||
}
|
||||
|
||||
c.conn.SetWriteDeadline(deadline)
|
||||
for _, buf := range bufs {
|
||||
if len(buf) > 0 {
|
||||
n, err := c.conn.Write(buf)
|
||||
if n != len(buf) {
|
||||
// Close on partial write.
|
||||
c.conn.Close()
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// WriteControl writes a control message with the given deadline. The allowed
|
||||
// message types are CloseMessage, PingMessage and PongMessage.
|
||||
func (c *Conn) WriteControl(messageType int, data []byte, deadline time.Time) error {
|
||||
if !isControl(messageType) {
|
||||
return errBadWriteOpCode
|
||||
}
|
||||
if len(data) > maxControlFramePayloadSize {
|
||||
return errInvalidControlFrame
|
||||
}
|
||||
|
||||
b0 := byte(messageType) | finalBit
|
||||
b1 := byte(len(data))
|
||||
if !c.isServer {
|
||||
b1 |= maskBit
|
||||
}
|
||||
|
||||
buf := make([]byte, 0, maxFrameHeaderSize+maxControlFramePayloadSize)
|
||||
buf = append(buf, b0, b1)
|
||||
|
||||
if c.isServer {
|
||||
buf = append(buf, data...)
|
||||
} else {
|
||||
key := newMaskKey()
|
||||
buf = append(buf, key[:]...)
|
||||
buf = append(buf, data...)
|
||||
maskBytes(key, 0, buf[6:])
|
||||
}
|
||||
|
||||
d := time.Hour * 1000
|
||||
if !deadline.IsZero() {
|
||||
d = deadline.Sub(time.Now())
|
||||
if d < 0 {
|
||||
return errWriteTimeout
|
||||
}
|
||||
}
|
||||
|
||||
timer := time.NewTimer(d)
|
||||
select {
|
||||
case <-c.mu:
|
||||
timer.Stop()
|
||||
case <-timer.C:
|
||||
return errWriteTimeout
|
||||
}
|
||||
defer func() { c.mu <- true }()
|
||||
|
||||
if c.closeSent {
|
||||
return ErrCloseSent
|
||||
} else if messageType == CloseMessage {
|
||||
c.closeSent = true
|
||||
}
|
||||
|
||||
c.conn.SetWriteDeadline(deadline)
|
||||
n, err := c.conn.Write(buf)
|
||||
if n != 0 && n != len(buf) {
|
||||
c.conn.Close()
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// NextWriter returns a writer for the next message to send. The writer's
|
||||
// Close method flushes the complete message to the network.
|
||||
//
|
||||
// There can be at most one open writer on a connection. NextWriter closes the
|
||||
// previous writer if the application has not already done so.
|
||||
//
|
||||
// The NextWriter method and the writers returned from the method cannot be
|
||||
// accessed by more than one goroutine at a time.
|
||||
func (c *Conn) NextWriter(messageType int) (io.WriteCloser, error) {
|
||||
if c.writeErr != nil {
|
||||
return nil, c.writeErr
|
||||
}
|
||||
|
||||
if c.writeFrameType != noFrame {
|
||||
if err := c.flushFrame(true, nil); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
if !isControl(messageType) && !isData(messageType) {
|
||||
return nil, errBadWriteOpCode
|
||||
}
|
||||
|
||||
c.writeFrameType = messageType
|
||||
return messageWriter{c, c.writeSeq}, nil
|
||||
}
|
||||
|
||||
func (c *Conn) flushFrame(final bool, extra []byte) error {
|
||||
length := c.writePos - maxFrameHeaderSize + len(extra)
|
||||
|
||||
// Check for invalid control frames.
|
||||
if isControl(c.writeFrameType) &&
|
||||
(!final || length > maxControlFramePayloadSize) {
|
||||
c.writeSeq++
|
||||
c.writeFrameType = noFrame
|
||||
c.writePos = maxFrameHeaderSize
|
||||
return errInvalidControlFrame
|
||||
}
|
||||
|
||||
b0 := byte(c.writeFrameType)
|
||||
if final {
|
||||
b0 |= finalBit
|
||||
}
|
||||
b1 := byte(0)
|
||||
if !c.isServer {
|
||||
b1 |= maskBit
|
||||
}
|
||||
|
||||
// Assume that the frame starts at beginning of c.writeBuf.
|
||||
framePos := 0
|
||||
if c.isServer {
|
||||
// Adjust up if mask not included in the header.
|
||||
framePos = 4
|
||||
}
|
||||
|
||||
switch {
|
||||
case length >= 65536:
|
||||
c.writeBuf[framePos] = b0
|
||||
c.writeBuf[framePos+1] = b1 | 127
|
||||
binary.BigEndian.PutUint64(c.writeBuf[framePos+2:], uint64(length))
|
||||
case length > 125:
|
||||
framePos += 6
|
||||
c.writeBuf[framePos] = b0
|
||||
c.writeBuf[framePos+1] = b1 | 126
|
||||
binary.BigEndian.PutUint16(c.writeBuf[framePos+2:], uint16(length))
|
||||
default:
|
||||
framePos += 8
|
||||
c.writeBuf[framePos] = b0
|
||||
c.writeBuf[framePos+1] = b1 | byte(length)
|
||||
}
|
||||
|
||||
if !c.isServer {
|
||||
key := newMaskKey()
|
||||
copy(c.writeBuf[maxFrameHeaderSize-4:], key[:])
|
||||
maskBytes(key, 0, c.writeBuf[maxFrameHeaderSize:c.writePos])
|
||||
if len(extra) > 0 {
|
||||
c.writeErr = errors.New("websocket: internal error, extra used in client mode")
|
||||
return c.writeErr
|
||||
}
|
||||
}
|
||||
|
||||
// Write the buffers to the connection.
|
||||
c.writeErr = c.write(c.writeFrameType, c.writeDeadline, c.writeBuf[framePos:c.writePos], extra)
|
||||
|
||||
// Setup for next frame.
|
||||
c.writePos = maxFrameHeaderSize
|
||||
c.writeFrameType = continuationFrame
|
||||
if final {
|
||||
c.writeSeq++
|
||||
c.writeFrameType = noFrame
|
||||
}
|
||||
return c.writeErr
|
||||
}
|
||||
|
||||
type messageWriter struct {
|
||||
c *Conn
|
||||
seq int
|
||||
}
|
||||
|
||||
func (w messageWriter) err() error {
|
||||
c := w.c
|
||||
if c.writeSeq != w.seq {
|
||||
return errWriteClosed
|
||||
}
|
||||
if c.writeErr != nil {
|
||||
return c.writeErr
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (w messageWriter) ncopy(max int) (int, error) {
|
||||
n := len(w.c.writeBuf) - w.c.writePos
|
||||
if n <= 0 {
|
||||
if err := w.c.flushFrame(false, nil); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
n = len(w.c.writeBuf) - w.c.writePos
|
||||
}
|
||||
if n > max {
|
||||
n = max
|
||||
}
|
||||
return n, nil
|
||||
}
|
||||
|
||||
func (w messageWriter) write(final bool, p []byte) (int, error) {
|
||||
if err := w.err(); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
if len(p) > 2*len(w.c.writeBuf) && w.c.isServer {
|
||||
// Don't buffer large messages.
|
||||
err := w.c.flushFrame(final, p)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return len(p), nil
|
||||
}
|
||||
|
||||
nn := len(p)
|
||||
for len(p) > 0 {
|
||||
n, err := w.ncopy(len(p))
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
copy(w.c.writeBuf[w.c.writePos:], p[:n])
|
||||
w.c.writePos += n
|
||||
p = p[n:]
|
||||
}
|
||||
return nn, nil
|
||||
}
|
||||
|
||||
func (w messageWriter) Write(p []byte) (int, error) {
|
||||
return w.write(false, p)
|
||||
}
|
||||
|
||||
func (w messageWriter) WriteString(p string) (int, error) {
|
||||
if err := w.err(); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
nn := len(p)
|
||||
for len(p) > 0 {
|
||||
n, err := w.ncopy(len(p))
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
copy(w.c.writeBuf[w.c.writePos:], p[:n])
|
||||
w.c.writePos += n
|
||||
p = p[n:]
|
||||
}
|
||||
return nn, nil
|
||||
}
|
||||
|
||||
func (w messageWriter) ReadFrom(r io.Reader) (nn int64, err error) {
|
||||
if err := w.err(); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
for {
|
||||
if w.c.writePos == len(w.c.writeBuf) {
|
||||
err = w.c.flushFrame(false, nil)
|
||||
if err != nil {
|
||||
break
|
||||
}
|
||||
}
|
||||
var n int
|
||||
n, err = r.Read(w.c.writeBuf[w.c.writePos:])
|
||||
w.c.writePos += n
|
||||
nn += int64(n)
|
||||
if err != nil {
|
||||
if err == io.EOF {
|
||||
err = nil
|
||||
}
|
||||
break
|
||||
}
|
||||
}
|
||||
return nn, err
|
||||
}
|
||||
|
||||
func (w messageWriter) Close() error {
|
||||
if err := w.err(); err != nil {
|
||||
return err
|
||||
}
|
||||
return w.c.flushFrame(true, nil)
|
||||
}
|
||||
|
||||
// WriteMessage is a helper method for getting a writer using NextWriter,
|
||||
// writing the message and closing the writer.
|
||||
func (c *Conn) WriteMessage(messageType int, data []byte) error {
|
||||
wr, err := c.NextWriter(messageType)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
w := wr.(messageWriter)
|
||||
if _, err := w.write(true, data); err != nil {
|
||||
return err
|
||||
}
|
||||
if c.writeSeq == w.seq {
|
||||
if err := c.flushFrame(true, nil); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// SetWriteDeadline sets the write deadline on the underlying network
|
||||
// connection. After a write has timed out, the websocket state is corrupt and
|
||||
// all future writes will return an error. A zero value for t means writes will
|
||||
// not time out.
|
||||
func (c *Conn) SetWriteDeadline(t time.Time) error {
|
||||
c.writeDeadline = t
|
||||
return nil
|
||||
}
|
||||
|
||||
// Read methods
|
||||
|
||||
// readFull is like io.ReadFull except that io.EOF is never returned.
|
||||
func (c *Conn) readFull(p []byte) (err error) {
|
||||
var n int
|
||||
for n < len(p) && err == nil {
|
||||
var nn int
|
||||
nn, err = c.br.Read(p[n:])
|
||||
n += nn
|
||||
}
|
||||
if n == len(p) {
|
||||
err = nil
|
||||
} else if err == io.EOF {
|
||||
err = errUnexpectedEOF
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (c *Conn) advanceFrame() (int, error) {
|
||||
|
||||
// 1. Skip remainder of previous frame.
|
||||
|
||||
if c.readRemaining > 0 {
|
||||
if _, err := io.CopyN(ioutil.Discard, c.br, c.readRemaining); err != nil {
|
||||
return noFrame, err
|
||||
}
|
||||
}
|
||||
|
||||
// 2. Read and parse first two bytes of frame header.
|
||||
|
||||
var b [8]byte
|
||||
if err := c.readFull(b[:2]); err != nil {
|
||||
return noFrame, err
|
||||
}
|
||||
|
||||
final := b[0]&finalBit != 0
|
||||
frameType := int(b[0] & 0xf)
|
||||
reserved := int((b[0] >> 4) & 0x7)
|
||||
mask := b[1]&maskBit != 0
|
||||
c.readRemaining = int64(b[1] & 0x7f)
|
||||
|
||||
if reserved != 0 {
|
||||
return noFrame, c.handleProtocolError("unexpected reserved bits " + strconv.Itoa(reserved))
|
||||
}
|
||||
|
||||
switch frameType {
|
||||
case CloseMessage, PingMessage, PongMessage:
|
||||
if c.readRemaining > maxControlFramePayloadSize {
|
||||
return noFrame, c.handleProtocolError("control frame length > 125")
|
||||
}
|
||||
if !final {
|
||||
return noFrame, c.handleProtocolError("control frame not final")
|
||||
}
|
||||
case TextMessage, BinaryMessage:
|
||||
if !c.readFinal {
|
||||
return noFrame, c.handleProtocolError("message start before final message frame")
|
||||
}
|
||||
c.readFinal = final
|
||||
case continuationFrame:
|
||||
if c.readFinal {
|
||||
return noFrame, c.handleProtocolError("continuation after final message frame")
|
||||
}
|
||||
c.readFinal = final
|
||||
default:
|
||||
return noFrame, c.handleProtocolError("unknown opcode " + strconv.Itoa(frameType))
|
||||
}
|
||||
|
||||
// 3. Read and parse frame length.
|
||||
|
||||
switch c.readRemaining {
|
||||
case 126:
|
||||
if err := c.readFull(b[:2]); err != nil {
|
||||
return noFrame, err
|
||||
}
|
||||
c.readRemaining = int64(binary.BigEndian.Uint16(b[:2]))
|
||||
case 127:
|
||||
if err := c.readFull(b[:8]); err != nil {
|
||||
return noFrame, err
|
||||
}
|
||||
c.readRemaining = int64(binary.BigEndian.Uint64(b[:8]))
|
||||
}
|
||||
|
||||
// 4. Handle frame masking.
|
||||
|
||||
if mask != c.isServer {
|
||||
return noFrame, c.handleProtocolError("incorrect mask flag")
|
||||
}
|
||||
|
||||
if mask {
|
||||
c.readMaskPos = 0
|
||||
if err := c.readFull(c.readMaskKey[:]); err != nil {
|
||||
return noFrame, err
|
||||
}
|
||||
}
|
||||
|
||||
// 5. For text and binary messages, enforce read limit and return.
|
||||
|
||||
if frameType == continuationFrame || frameType == TextMessage || frameType == BinaryMessage {
|
||||
|
||||
c.readLength += c.readRemaining
|
||||
if c.readLimit > 0 && c.readLength > c.readLimit {
|
||||
c.WriteControl(CloseMessage, FormatCloseMessage(CloseMessageTooBig, ""), time.Now().Add(writeWait))
|
||||
return noFrame, ErrReadLimit
|
||||
}
|
||||
|
||||
return frameType, nil
|
||||
}
|
||||
|
||||
// 6. Read control frame payload.
|
||||
|
||||
var payload []byte
|
||||
if c.readRemaining > 0 {
|
||||
payload = make([]byte, c.readRemaining)
|
||||
c.readRemaining = 0
|
||||
if err := c.readFull(payload); err != nil {
|
||||
return noFrame, err
|
||||
}
|
||||
if c.isServer {
|
||||
maskBytes(c.readMaskKey, 0, payload)
|
||||
}
|
||||
}
|
||||
|
||||
// 7. Process control frame payload.
|
||||
|
||||
switch frameType {
|
||||
case PongMessage:
|
||||
if err := c.handlePong(string(payload)); err != nil {
|
||||
return noFrame, err
|
||||
}
|
||||
case PingMessage:
|
||||
if err := c.handlePing(string(payload)); err != nil {
|
||||
return noFrame, err
|
||||
}
|
||||
case CloseMessage:
|
||||
c.WriteControl(CloseMessage, []byte{}, time.Now().Add(writeWait))
|
||||
closeCode := CloseNoStatusReceived
|
||||
closeText := ""
|
||||
if len(payload) >= 2 {
|
||||
closeCode = int(binary.BigEndian.Uint16(payload))
|
||||
closeText = string(payload[2:])
|
||||
}
|
||||
switch closeCode {
|
||||
case CloseNormalClosure, CloseGoingAway:
|
||||
return noFrame, io.EOF
|
||||
default:
|
||||
return noFrame, &closeError{code: closeCode, text: closeText}
|
||||
}
|
||||
}
|
||||
|
||||
return frameType, nil
|
||||
}
|
||||
|
||||
func (c *Conn) handleProtocolError(message string) error {
|
||||
c.WriteControl(CloseMessage, FormatCloseMessage(CloseProtocolError, message), time.Now().Add(writeWait))
|
||||
return errors.New("websocket: " + message)
|
||||
}
|
||||
|
||||
// NextReader returns the next data message received from the peer. The
|
||||
// returned messageType is either TextMessage or BinaryMessage.
|
||||
//
|
||||
// There can be at most one open reader on a connection. NextReader discards
|
||||
// the previous message if the application has not already consumed it.
|
||||
//
|
||||
// The NextReader method and the readers returned from the method cannot be
|
||||
// accessed by more than one goroutine at a time.
|
||||
func (c *Conn) NextReader() (messageType int, r io.Reader, err error) {
|
||||
|
||||
c.readSeq++
|
||||
c.readLength = 0
|
||||
|
||||
for c.readErr == nil {
|
||||
frameType, err := c.advanceFrame()
|
||||
if err != nil {
|
||||
c.readErr = hideTempErr(err)
|
||||
break
|
||||
}
|
||||
if frameType == TextMessage || frameType == BinaryMessage {
|
||||
return frameType, messageReader{c, c.readSeq}, nil
|
||||
}
|
||||
}
|
||||
return noFrame, nil, c.readErr
|
||||
}
|
||||
|
||||
type messageReader struct {
|
||||
c *Conn
|
||||
seq int
|
||||
}
|
||||
|
||||
func (r messageReader) Read(b []byte) (int, error) {
|
||||
|
||||
if r.seq != r.c.readSeq {
|
||||
return 0, io.EOF
|
||||
}
|
||||
|
||||
for r.c.readErr == nil {
|
||||
|
||||
if r.c.readRemaining > 0 {
|
||||
if int64(len(b)) > r.c.readRemaining {
|
||||
b = b[:r.c.readRemaining]
|
||||
}
|
||||
n, err := r.c.br.Read(b)
|
||||
r.c.readErr = hideTempErr(err)
|
||||
if r.c.isServer {
|
||||
r.c.readMaskPos = maskBytes(r.c.readMaskKey, r.c.readMaskPos, b[:n])
|
||||
}
|
||||
r.c.readRemaining -= int64(n)
|
||||
return n, r.c.readErr
|
||||
}
|
||||
|
||||
if r.c.readFinal {
|
||||
r.c.readSeq++
|
||||
return 0, io.EOF
|
||||
}
|
||||
|
||||
frameType, err := r.c.advanceFrame()
|
||||
switch {
|
||||
case err != nil:
|
||||
r.c.readErr = hideTempErr(err)
|
||||
case frameType == TextMessage || frameType == BinaryMessage:
|
||||
r.c.readErr = errors.New("websocket: internal error, unexpected text or binary in Reader")
|
||||
}
|
||||
}
|
||||
|
||||
err := r.c.readErr
|
||||
if err == io.EOF && r.seq == r.c.readSeq {
|
||||
err = errUnexpectedEOF
|
||||
}
|
||||
return 0, err
|
||||
}
|
||||
|
||||
// ReadMessage is a helper method for getting a reader using NextReader and
|
||||
// reading from that reader to a buffer.
|
||||
func (c *Conn) ReadMessage() (messageType int, p []byte, err error) {
|
||||
var r io.Reader
|
||||
messageType, r, err = c.NextReader()
|
||||
if err != nil {
|
||||
return messageType, nil, err
|
||||
}
|
||||
p, err = ioutil.ReadAll(r)
|
||||
return messageType, p, err
|
||||
}
|
||||
|
||||
// SetReadDeadline sets the read deadline on the underlying network connection.
|
||||
// After a read has timed out, the websocket connection state is corrupt and
|
||||
// all future reads will return an error. A zero value for t means reads will
|
||||
// not time out.
|
||||
func (c *Conn) SetReadDeadline(t time.Time) error {
|
||||
return c.conn.SetReadDeadline(t)
|
||||
}
|
||||
|
||||
// SetReadLimit sets the maximum size for a message read from the peer. If a
|
||||
// message exceeds the limit, the connection sends a close frame to the peer
|
||||
// and returns ErrReadLimit to the application.
|
||||
func (c *Conn) SetReadLimit(limit int64) {
|
||||
c.readLimit = limit
|
||||
}
|
||||
|
||||
// SetPingHandler sets the handler for ping messages received from the peer.
|
||||
// The default ping handler sends a pong to the peer.
|
||||
func (c *Conn) SetPingHandler(h func(string) error) {
|
||||
if h == nil {
|
||||
h = func(message string) error {
|
||||
c.WriteControl(PongMessage, []byte(message), time.Now().Add(writeWait))
|
||||
return nil
|
||||
}
|
||||
}
|
||||
c.handlePing = h
|
||||
}
|
||||
|
||||
// SetPongHandler sets the handler for pong messages received from the peer.
|
||||
// The default pong handler does nothing.
|
||||
func (c *Conn) SetPongHandler(h func(string) error) {
|
||||
if h == nil {
|
||||
h = func(string) error { return nil }
|
||||
}
|
||||
c.handlePong = h
|
||||
}
|
||||
|
||||
// UnderlyingConn returns the internal net.Conn. This can be used to further
|
||||
// modifications to connection specific flags.
|
||||
func (c *Conn) UnderlyingConn() net.Conn {
|
||||
return c.conn
|
||||
}
|
||||
|
||||
// FormatCloseMessage formats closeCode and text as a WebSocket close message.
|
||||
func FormatCloseMessage(closeCode int, text string) []byte {
|
||||
buf := make([]byte, 2+len(text))
|
||||
binary.BigEndian.PutUint16(buf, uint16(closeCode))
|
||||
copy(buf[2:], text)
|
||||
return buf
|
||||
}
|
148
vendor/github.com/gorilla/websocket/doc.go
generated
vendored
Normal file
148
vendor/github.com/gorilla/websocket/doc.go
generated
vendored
Normal file
|
@ -0,0 +1,148 @@
|
|||
// Copyright 2013 The Gorilla WebSocket Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Package websocket implements the WebSocket protocol defined in RFC 6455.
|
||||
//
|
||||
// Overview
|
||||
//
|
||||
// The Conn type represents a WebSocket connection. A server application uses
|
||||
// the Upgrade function from an Upgrader object with a HTTP request handler
|
||||
// to get a pointer to a Conn:
|
||||
//
|
||||
// var upgrader = websocket.Upgrader{
|
||||
// ReadBufferSize: 1024,
|
||||
// WriteBufferSize: 1024,
|
||||
// }
|
||||
//
|
||||
// func handler(w http.ResponseWriter, r *http.Request) {
|
||||
// conn, err := upgrader.Upgrade(w, r, nil)
|
||||
// if err != nil {
|
||||
// log.Println(err)
|
||||
// return
|
||||
// }
|
||||
// ... Use conn to send and receive messages.
|
||||
// }
|
||||
//
|
||||
// Call the connection's WriteMessage and ReadMessage methods to send and
|
||||
// receive messages as a slice of bytes. This snippet of code shows how to echo
|
||||
// messages using these methods:
|
||||
//
|
||||
// for {
|
||||
// messageType, p, err := conn.ReadMessage()
|
||||
// if err != nil {
|
||||
// return
|
||||
// }
|
||||
// if err = conn.WriteMessage(messageType, p); err != nil {
|
||||
// return err
|
||||
// }
|
||||
// }
|
||||
//
|
||||
// In above snippet of code, p is a []byte and messageType is an int with value
|
||||
// websocket.BinaryMessage or websocket.TextMessage.
|
||||
//
|
||||
// An application can also send and receive messages using the io.WriteCloser
|
||||
// and io.Reader interfaces. To send a message, call the connection NextWriter
|
||||
// method to get an io.WriteCloser, write the message to the writer and close
|
||||
// the writer when done. To receive a message, call the connection NextReader
|
||||
// method to get an io.Reader and read until io.EOF is returned. This snippet
|
||||
// snippet shows how to echo messages using the NextWriter and NextReader
|
||||
// methods:
|
||||
//
|
||||
// for {
|
||||
// messageType, r, err := conn.NextReader()
|
||||
// if err != nil {
|
||||
// return
|
||||
// }
|
||||
// w, err := conn.NextWriter(messageType)
|
||||
// if err != nil {
|
||||
// return err
|
||||
// }
|
||||
// if _, err := io.Copy(w, r); err != nil {
|
||||
// return err
|
||||
// }
|
||||
// if err := w.Close(); err != nil {
|
||||
// return err
|
||||
// }
|
||||
// }
|
||||
//
|
||||
// Data Messages
|
||||
//
|
||||
// The WebSocket protocol distinguishes between text and binary data messages.
|
||||
// Text messages are interpreted as UTF-8 encoded text. The interpretation of
|
||||
// binary messages is left to the application.
|
||||
//
|
||||
// This package uses the TextMessage and BinaryMessage integer constants to
|
||||
// identify the two data message types. The ReadMessage and NextReader methods
|
||||
// return the type of the received message. The messageType argument to the
|
||||
// WriteMessage and NextWriter methods specifies the type of a sent message.
|
||||
//
|
||||
// It is the application's responsibility to ensure that text messages are
|
||||
// valid UTF-8 encoded text.
|
||||
//
|
||||
// Control Messages
|
||||
//
|
||||
// The WebSocket protocol defines three types of control messages: close, ping
|
||||
// and pong. Call the connection WriteControl, WriteMessage or NextWriter
|
||||
// methods to send a control message to the peer.
|
||||
//
|
||||
// Connections handle received ping and pong messages by invoking a callback
|
||||
// function set with SetPingHandler and SetPongHandler methods. These callback
|
||||
// functions can be invoked from the ReadMessage method, the NextReader method
|
||||
// or from a call to the data message reader returned from NextReader.
|
||||
//
|
||||
// Connections handle received close messages by returning an error from the
|
||||
// ReadMessage method, the NextReader method or from a call to the data message
|
||||
// reader returned from NextReader.
|
||||
//
|
||||
// Concurrency
|
||||
//
|
||||
// Connections do not support concurrent calls to the write methods
|
||||
// (NextWriter, SetWriteDeadline, WriteMessage) or concurrent calls to the read
|
||||
// methods methods (NextReader, SetReadDeadline, ReadMessage). Connections do
|
||||
// support a concurrent reader and writer.
|
||||
//
|
||||
// The Close and WriteControl methods can be called concurrently with all other
|
||||
// methods.
|
||||
//
|
||||
// Read is Required
|
||||
//
|
||||
// The application must read the connection to process ping and close messages
|
||||
// sent from the peer. If the application is not otherwise interested in
|
||||
// messages from the peer, then the application should start a goroutine to read
|
||||
// and discard messages from the peer. A simple example is:
|
||||
//
|
||||
// func readLoop(c *websocket.Conn) {
|
||||
// for {
|
||||
// if _, _, err := c.NextReader(); err != nil {
|
||||
// c.Close()
|
||||
// break
|
||||
// }
|
||||
// }
|
||||
// }
|
||||
//
|
||||
// Origin Considerations
|
||||
//
|
||||
// Web browsers allow Javascript applications to open a WebSocket connection to
|
||||
// any host. It's up to the server to enforce an origin policy using the Origin
|
||||
// request header sent by the browser.
|
||||
//
|
||||
// The Upgrader calls the function specified in the CheckOrigin field to check
|
||||
// the origin. If the CheckOrigin function returns false, then the Upgrade
|
||||
// method fails the WebSocket handshake with HTTP status 403.
|
||||
//
|
||||
// If the CheckOrigin field is nil, then the Upgrader uses a safe default: fail
|
||||
// the handshake if the Origin request header is present and not equal to the
|
||||
// Host request header.
|
||||
//
|
||||
// An application can allow connections from any origin by specifying a
|
||||
// function that always returns true:
|
||||
//
|
||||
// var upgrader = websocket.Upgrader{
|
||||
// CheckOrigin: func(r *http.Request) bool { return true },
|
||||
// }
|
||||
//
|
||||
// The deprecated Upgrade function does not enforce an origin policy. It's the
|
||||
// application's responsibility to check the Origin header before calling
|
||||
// Upgrade.
|
||||
package websocket
|
57
vendor/github.com/gorilla/websocket/json.go
generated
vendored
Normal file
57
vendor/github.com/gorilla/websocket/json.go
generated
vendored
Normal file
|
@ -0,0 +1,57 @@
|
|||
// Copyright 2013 The Gorilla WebSocket Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package websocket
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"io"
|
||||
)
|
||||
|
||||
// WriteJSON is deprecated, use c.WriteJSON instead.
|
||||
func WriteJSON(c *Conn, v interface{}) error {
|
||||
return c.WriteJSON(v)
|
||||
}
|
||||
|
||||
// WriteJSON writes the JSON encoding of v to the connection.
|
||||
//
|
||||
// See the documentation for encoding/json Marshal for details about the
|
||||
// conversion of Go values to JSON.
|
||||
func (c *Conn) WriteJSON(v interface{}) error {
|
||||
w, err := c.NextWriter(TextMessage)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err1 := json.NewEncoder(w).Encode(v)
|
||||
err2 := w.Close()
|
||||
if err1 != nil {
|
||||
return err1
|
||||
}
|
||||
return err2
|
||||
}
|
||||
|
||||
// ReadJSON is deprecated, use c.ReadJSON instead.
|
||||
func ReadJSON(c *Conn, v interface{}) error {
|
||||
return c.ReadJSON(v)
|
||||
}
|
||||
|
||||
// ReadJSON reads the next JSON-encoded message from the connection and stores
|
||||
// it in the value pointed to by v.
|
||||
//
|
||||
// See the documentation for the encoding/json Unmarshal function for details
|
||||
// about the conversion of JSON to a Go value.
|
||||
func (c *Conn) ReadJSON(v interface{}) error {
|
||||
_, r, err := c.NextReader()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = json.NewDecoder(r).Decode(v)
|
||||
if err == io.EOF {
|
||||
// Decode returns io.EOF when the message is empty or all whitespace.
|
||||
// Convert to io.ErrUnexpectedEOF so that application can distinguish
|
||||
// between an error reading the JSON value and the connection closing.
|
||||
err = io.ErrUnexpectedEOF
|
||||
}
|
||||
return err
|
||||
}
|
247
vendor/github.com/gorilla/websocket/server.go
generated
vendored
Normal file
247
vendor/github.com/gorilla/websocket/server.go
generated
vendored
Normal file
|
@ -0,0 +1,247 @@
|
|||
// Copyright 2013 The Gorilla WebSocket Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package websocket
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"errors"
|
||||
"net"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
// HandshakeError describes an error with the handshake from the peer.
|
||||
type HandshakeError struct {
|
||||
message string
|
||||
}
|
||||
|
||||
func (e HandshakeError) Error() string { return e.message }
|
||||
|
||||
// Upgrader specifies parameters for upgrading an HTTP connection to a
|
||||
// WebSocket connection.
|
||||
type Upgrader struct {
|
||||
// HandshakeTimeout specifies the duration for the handshake to complete.
|
||||
HandshakeTimeout time.Duration
|
||||
|
||||
// ReadBufferSize and WriteBufferSize specify I/O buffer sizes. If a buffer
|
||||
// size is zero, then a default value of 4096 is used. The I/O buffer sizes
|
||||
// do not limit the size of the messages that can be sent or received.
|
||||
ReadBufferSize, WriteBufferSize int
|
||||
|
||||
// Subprotocols specifies the server's supported protocols in order of
|
||||
// preference. If this field is set, then the Upgrade method negotiates a
|
||||
// subprotocol by selecting the first match in this list with a protocol
|
||||
// requested by the client.
|
||||
Subprotocols []string
|
||||
|
||||
// Error specifies the function for generating HTTP error responses. If Error
|
||||
// is nil, then http.Error is used to generate the HTTP response.
|
||||
Error func(w http.ResponseWriter, r *http.Request, status int, reason error)
|
||||
|
||||
// CheckOrigin returns true if the request Origin header is acceptable. If
|
||||
// CheckOrigin is nil, the host in the Origin header must not be set or
|
||||
// must match the host of the request.
|
||||
CheckOrigin func(r *http.Request) bool
|
||||
}
|
||||
|
||||
func (u *Upgrader) returnError(w http.ResponseWriter, r *http.Request, status int, reason string) (*Conn, error) {
|
||||
err := HandshakeError{reason}
|
||||
if u.Error != nil {
|
||||
u.Error(w, r, status, err)
|
||||
} else {
|
||||
http.Error(w, http.StatusText(status), status)
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// checkSameOrigin returns true if the origin is not set or is equal to the request host.
|
||||
func checkSameOrigin(r *http.Request) bool {
|
||||
origin := r.Header["Origin"]
|
||||
if len(origin) == 0 {
|
||||
return true
|
||||
}
|
||||
u, err := url.Parse(origin[0])
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
return u.Host == r.Host
|
||||
}
|
||||
|
||||
func (u *Upgrader) selectSubprotocol(r *http.Request, responseHeader http.Header) string {
|
||||
if u.Subprotocols != nil {
|
||||
clientProtocols := Subprotocols(r)
|
||||
for _, serverProtocol := range u.Subprotocols {
|
||||
for _, clientProtocol := range clientProtocols {
|
||||
if clientProtocol == serverProtocol {
|
||||
return clientProtocol
|
||||
}
|
||||
}
|
||||
}
|
||||
} else if responseHeader != nil {
|
||||
return responseHeader.Get("Sec-Websocket-Protocol")
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
// Upgrade upgrades the HTTP server connection to the WebSocket protocol.
|
||||
//
|
||||
// The responseHeader is included in the response to the client's upgrade
|
||||
// request. Use the responseHeader to specify cookies (Set-Cookie) and the
|
||||
// application negotiated subprotocol (Sec-Websocket-Protocol).
|
||||
func (u *Upgrader) Upgrade(w http.ResponseWriter, r *http.Request, responseHeader http.Header) (*Conn, error) {
|
||||
if values := r.Header["Sec-Websocket-Version"]; len(values) == 0 || values[0] != "13" {
|
||||
return u.returnError(w, r, http.StatusBadRequest, "websocket: version != 13")
|
||||
}
|
||||
|
||||
if !tokenListContainsValue(r.Header, "Connection", "upgrade") {
|
||||
return u.returnError(w, r, http.StatusBadRequest, "websocket: could not find connection header with token 'upgrade'")
|
||||
}
|
||||
|
||||
if !tokenListContainsValue(r.Header, "Upgrade", "websocket") {
|
||||
return u.returnError(w, r, http.StatusBadRequest, "websocket: could not find upgrade header with token 'websocket'")
|
||||
}
|
||||
|
||||
checkOrigin := u.CheckOrigin
|
||||
if checkOrigin == nil {
|
||||
checkOrigin = checkSameOrigin
|
||||
}
|
||||
if !checkOrigin(r) {
|
||||
return u.returnError(w, r, http.StatusForbidden, "websocket: origin not allowed")
|
||||
}
|
||||
|
||||
challengeKey := r.Header.Get("Sec-Websocket-Key")
|
||||
if challengeKey == "" {
|
||||
return u.returnError(w, r, http.StatusBadRequest, "websocket: key missing or blank")
|
||||
}
|
||||
|
||||
subprotocol := u.selectSubprotocol(r, responseHeader)
|
||||
|
||||
var (
|
||||
netConn net.Conn
|
||||
br *bufio.Reader
|
||||
err error
|
||||
)
|
||||
|
||||
h, ok := w.(http.Hijacker)
|
||||
if !ok {
|
||||
return u.returnError(w, r, http.StatusInternalServerError, "websocket: response does not implement http.Hijacker")
|
||||
}
|
||||
var rw *bufio.ReadWriter
|
||||
netConn, rw, err = h.Hijack()
|
||||
if err != nil {
|
||||
return u.returnError(w, r, http.StatusInternalServerError, err.Error())
|
||||
}
|
||||
br = rw.Reader
|
||||
|
||||
if br.Buffered() > 0 {
|
||||
netConn.Close()
|
||||
return nil, errors.New("websocket: client sent data before handshake is complete")
|
||||
}
|
||||
|
||||
c := newConn(netConn, true, u.ReadBufferSize, u.WriteBufferSize)
|
||||
c.subprotocol = subprotocol
|
||||
|
||||
p := c.writeBuf[:0]
|
||||
p = append(p, "HTTP/1.1 101 Switching Protocols\r\nUpgrade: websocket\r\nConnection: Upgrade\r\nSec-WebSocket-Accept: "...)
|
||||
p = append(p, computeAcceptKey(challengeKey)...)
|
||||
p = append(p, "\r\n"...)
|
||||
if c.subprotocol != "" {
|
||||
p = append(p, "Sec-Websocket-Protocol: "...)
|
||||
p = append(p, c.subprotocol...)
|
||||
p = append(p, "\r\n"...)
|
||||
}
|
||||
for k, vs := range responseHeader {
|
||||
if k == "Sec-Websocket-Protocol" {
|
||||
continue
|
||||
}
|
||||
for _, v := range vs {
|
||||
p = append(p, k...)
|
||||
p = append(p, ": "...)
|
||||
for i := 0; i < len(v); i++ {
|
||||
b := v[i]
|
||||
if b <= 31 {
|
||||
// prevent response splitting.
|
||||
b = ' '
|
||||
}
|
||||
p = append(p, b)
|
||||
}
|
||||
p = append(p, "\r\n"...)
|
||||
}
|
||||
}
|
||||
p = append(p, "\r\n"...)
|
||||
|
||||
// Clear deadlines set by HTTP server.
|
||||
netConn.SetDeadline(time.Time{})
|
||||
|
||||
if u.HandshakeTimeout > 0 {
|
||||
netConn.SetWriteDeadline(time.Now().Add(u.HandshakeTimeout))
|
||||
}
|
||||
if _, err = netConn.Write(p); err != nil {
|
||||
netConn.Close()
|
||||
return nil, err
|
||||
}
|
||||
if u.HandshakeTimeout > 0 {
|
||||
netConn.SetWriteDeadline(time.Time{})
|
||||
}
|
||||
|
||||
return c, nil
|
||||
}
|
||||
|
||||
// Upgrade upgrades the HTTP server connection to the WebSocket protocol.
|
||||
//
|
||||
// This function is deprecated, use websocket.Upgrader instead.
|
||||
//
|
||||
// The application is responsible for checking the request origin before
|
||||
// calling Upgrade. An example implementation of the same origin policy is:
|
||||
//
|
||||
// if req.Header.Get("Origin") != "http://"+req.Host {
|
||||
// http.Error(w, "Origin not allowed", 403)
|
||||
// return
|
||||
// }
|
||||
//
|
||||
// If the endpoint supports subprotocols, then the application is responsible
|
||||
// for negotiating the protocol used on the connection. Use the Subprotocols()
|
||||
// function to get the subprotocols requested by the client. Use the
|
||||
// Sec-Websocket-Protocol response header to specify the subprotocol selected
|
||||
// by the application.
|
||||
//
|
||||
// The responseHeader is included in the response to the client's upgrade
|
||||
// request. Use the responseHeader to specify cookies (Set-Cookie) and the
|
||||
// negotiated subprotocol (Sec-Websocket-Protocol).
|
||||
//
|
||||
// The connection buffers IO to the underlying network connection. The
|
||||
// readBufSize and writeBufSize parameters specify the size of the buffers to
|
||||
// use. Messages can be larger than the buffers.
|
||||
//
|
||||
// If the request is not a valid WebSocket handshake, then Upgrade returns an
|
||||
// error of type HandshakeError. Applications should handle this error by
|
||||
// replying to the client with an HTTP error response.
|
||||
func Upgrade(w http.ResponseWriter, r *http.Request, responseHeader http.Header, readBufSize, writeBufSize int) (*Conn, error) {
|
||||
u := Upgrader{ReadBufferSize: readBufSize, WriteBufferSize: writeBufSize}
|
||||
u.Error = func(w http.ResponseWriter, r *http.Request, status int, reason error) {
|
||||
// don't return errors to maintain backwards compatibility
|
||||
}
|
||||
u.CheckOrigin = func(r *http.Request) bool {
|
||||
// allow all connections by default
|
||||
return true
|
||||
}
|
||||
return u.Upgrade(w, r, responseHeader)
|
||||
}
|
||||
|
||||
// Subprotocols returns the subprotocols requested by the client in the
|
||||
// Sec-Websocket-Protocol header.
|
||||
func Subprotocols(r *http.Request) []string {
|
||||
h := strings.TrimSpace(r.Header.Get("Sec-Websocket-Protocol"))
|
||||
if h == "" {
|
||||
return nil
|
||||
}
|
||||
protocols := strings.Split(h, ",")
|
||||
for i := range protocols {
|
||||
protocols[i] = strings.TrimSpace(protocols[i])
|
||||
}
|
||||
return protocols
|
||||
}
|
44
vendor/github.com/gorilla/websocket/util.go
generated
vendored
Normal file
44
vendor/github.com/gorilla/websocket/util.go
generated
vendored
Normal file
|
@ -0,0 +1,44 @@
|
|||
// Copyright 2013 The Gorilla WebSocket Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package websocket
|
||||
|
||||
import (
|
||||
"crypto/rand"
|
||||
"crypto/sha1"
|
||||
"encoding/base64"
|
||||
"io"
|
||||
"net/http"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// tokenListContainsValue returns true if the 1#token header with the given
|
||||
// name contains token.
|
||||
func tokenListContainsValue(header http.Header, name string, value string) bool {
|
||||
for _, v := range header[name] {
|
||||
for _, s := range strings.Split(v, ",") {
|
||||
if strings.EqualFold(value, strings.TrimSpace(s)) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
var keyGUID = []byte("258EAFA5-E914-47DA-95CA-C5AB0DC85B11")
|
||||
|
||||
func computeAcceptKey(challengeKey string) string {
|
||||
h := sha1.New()
|
||||
h.Write([]byte(challengeKey))
|
||||
h.Write(keyGUID)
|
||||
return base64.StdEncoding.EncodeToString(h.Sum(nil))
|
||||
}
|
||||
|
||||
func generateChallengeKey() (string, error) {
|
||||
p := make([]byte, 16)
|
||||
if _, err := io.ReadFull(rand.Reader, p); err != nil {
|
||||
return "", err
|
||||
}
|
||||
return base64.StdEncoding.EncodeToString(p), nil
|
||||
}
|
43
vendor/github.com/howeyc/gopass/pass.go
generated
vendored
43
vendor/github.com/howeyc/gopass/pass.go
generated
vendored
|
@ -7,9 +7,14 @@ import (
|
|||
"os"
|
||||
)
|
||||
|
||||
var defaultGetCh = func() (byte, error) {
|
||||
type FdReader interface {
|
||||
io.Reader
|
||||
Fd() uintptr
|
||||
}
|
||||
|
||||
var defaultGetCh = func(r io.Reader) (byte, error) {
|
||||
buf := make([]byte, 1)
|
||||
if n, err := os.Stdin.Read(buf); n == 0 || err != nil {
|
||||
if n, err := r.Read(buf); n == 0 || err != nil {
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
@ -28,9 +33,10 @@ var (
|
|||
)
|
||||
|
||||
// getPasswd returns the input read from terminal.
|
||||
// If prompt is not empty, it will be output as a prompt to the user
|
||||
// If masked is true, typing will be matched by asterisks on the screen.
|
||||
// Otherwise, typing will echo nothing.
|
||||
func getPasswd(masked bool) ([]byte, error) {
|
||||
func getPasswd(prompt string, masked bool, r FdReader, w io.Writer) ([]byte, error) {
|
||||
var err error
|
||||
var pass, bs, mask []byte
|
||||
if masked {
|
||||
|
@ -38,26 +44,33 @@ func getPasswd(masked bool) ([]byte, error) {
|
|||
mask = []byte("*")
|
||||
}
|
||||
|
||||
if isTerminal(os.Stdin.Fd()) {
|
||||
if oldState, err := makeRaw(os.Stdin.Fd()); err != nil {
|
||||
if isTerminal(r.Fd()) {
|
||||
if oldState, err := makeRaw(r.Fd()); err != nil {
|
||||
return pass, err
|
||||
} else {
|
||||
defer restore(os.Stdin.Fd(), oldState)
|
||||
defer func() {
|
||||
restore(r.Fd(), oldState)
|
||||
fmt.Fprintln(w)
|
||||
}()
|
||||
}
|
||||
}
|
||||
|
||||
if prompt != "" {
|
||||
fmt.Fprint(w, prompt)
|
||||
}
|
||||
|
||||
// Track total bytes read, not just bytes in the password. This ensures any
|
||||
// errors that might flood the console with nil or -1 bytes infinitely are
|
||||
// capped.
|
||||
var counter int
|
||||
for counter = 0; counter <= maxLength; counter++ {
|
||||
if v, e := getch(); e != nil {
|
||||
if v, e := getch(r); e != nil {
|
||||
err = e
|
||||
break
|
||||
} else if v == 127 || v == 8 {
|
||||
if l := len(pass); l > 0 {
|
||||
pass = pass[:l-1]
|
||||
fmt.Print(string(bs))
|
||||
fmt.Fprint(w, string(bs))
|
||||
}
|
||||
} else if v == 13 || v == 10 {
|
||||
break
|
||||
|
@ -66,7 +79,7 @@ func getPasswd(masked bool) ([]byte, error) {
|
|||
break
|
||||
} else if v != 0 {
|
||||
pass = append(pass, v)
|
||||
fmt.Print(string(mask))
|
||||
fmt.Fprint(w, string(mask))
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -74,18 +87,24 @@ func getPasswd(masked bool) ([]byte, error) {
|
|||
err = ErrMaxLengthExceeded
|
||||
}
|
||||
|
||||
fmt.Println()
|
||||
return pass, err
|
||||
}
|
||||
|
||||
// GetPasswd returns the password read from the terminal without echoing input.
|
||||
// The returned byte array does not include end-of-line characters.
|
||||
func GetPasswd() ([]byte, error) {
|
||||
return getPasswd(false)
|
||||
return getPasswd("", false, os.Stdin, os.Stdout)
|
||||
}
|
||||
|
||||
// GetPasswdMasked returns the password read from the terminal, echoing asterisks.
|
||||
// The returned byte array does not include end-of-line characters.
|
||||
func GetPasswdMasked() ([]byte, error) {
|
||||
return getPasswd(true)
|
||||
return getPasswd("", true, os.Stdin, os.Stdout)
|
||||
}
|
||||
|
||||
// GetPasswdPrompt prompts the user and returns the password read from the terminal.
|
||||
// If mask is true, then asterisks are echoed.
|
||||
// The returned byte array does not include end-of-line characters.
|
||||
func GetPasswdPrompt(prompt string, mask bool, r FdReader, w io.Writer) ([]byte, error) {
|
||||
return getPasswd(prompt, mask, r, w)
|
||||
}
|
||||
|
|
59
vendor/github.com/juju/ratelimit/ratelimit.go
generated
vendored
59
vendor/github.com/juju/ratelimit/ratelimit.go
generated
vendored
|
@ -2,7 +2,7 @@
|
|||
// Licensed under the LGPLv3 with static-linking exception.
|
||||
// See LICENCE file for details.
|
||||
|
||||
// The ratelimit package provides an efficient token bucket implementation
|
||||
// Package ratelimit provides an efficient token bucket implementation
|
||||
// that can be used to limit the rate of arbitrary things.
|
||||
// See http://en.wikipedia.org/wiki/Token_bucket.
|
||||
package ratelimit
|
||||
|
@ -21,6 +21,7 @@ type Bucket struct {
|
|||
capacity int64
|
||||
quantum int64
|
||||
fillInterval time.Duration
|
||||
clock Clock
|
||||
|
||||
// The mutex guards the fields following it.
|
||||
mu sync.Mutex
|
||||
|
@ -33,12 +34,37 @@ type Bucket struct {
|
|||
availTick int64
|
||||
}
|
||||
|
||||
// Clock is used to inject testable fakes.
|
||||
type Clock interface {
|
||||
Now() time.Time
|
||||
Sleep(d time.Duration)
|
||||
}
|
||||
|
||||
// realClock implements Clock in terms of standard time functions.
|
||||
type realClock struct{}
|
||||
|
||||
// Now is identical to time.Now.
|
||||
func (realClock) Now() time.Time {
|
||||
return time.Now()
|
||||
}
|
||||
|
||||
// Sleep is identical to time.Sleep.
|
||||
func (realClock) Sleep(d time.Duration) {
|
||||
time.Sleep(d)
|
||||
}
|
||||
|
||||
// NewBucket returns a new token bucket that fills at the
|
||||
// rate of one token every fillInterval, up to the given
|
||||
// maximum capacity. Both arguments must be
|
||||
// positive. The bucket is initially full.
|
||||
func NewBucket(fillInterval time.Duration, capacity int64) *Bucket {
|
||||
return NewBucketWithQuantum(fillInterval, capacity, 1)
|
||||
return NewBucketWithClock(fillInterval, capacity, realClock{})
|
||||
}
|
||||
|
||||
// NewBucketWithClock is identical to NewBucket but injects a testable clock
|
||||
// interface.
|
||||
func NewBucketWithClock(fillInterval time.Duration, capacity int64, clock Clock) *Bucket {
|
||||
return NewBucketWithQuantumAndClock(fillInterval, capacity, 1, clock)
|
||||
}
|
||||
|
||||
// rateMargin specifes the allowed variance of actual
|
||||
|
@ -51,12 +77,18 @@ const rateMargin = 0.01
|
|||
// at high rates, the actual rate may be up to 1% different from the
|
||||
// specified rate.
|
||||
func NewBucketWithRate(rate float64, capacity int64) *Bucket {
|
||||
return NewBucketWithRateAndClock(rate, capacity, realClock{})
|
||||
}
|
||||
|
||||
// NewBucketWithRateAndClock is identical to NewBucketWithRate but injects a
|
||||
// testable clock interface.
|
||||
func NewBucketWithRateAndClock(rate float64, capacity int64, clock Clock) *Bucket {
|
||||
for quantum := int64(1); quantum < 1<<50; quantum = nextQuantum(quantum) {
|
||||
fillInterval := time.Duration(1e9 * float64(quantum) / rate)
|
||||
if fillInterval <= 0 {
|
||||
continue
|
||||
}
|
||||
tb := NewBucketWithQuantum(fillInterval, capacity, quantum)
|
||||
tb := NewBucketWithQuantumAndClock(fillInterval, capacity, quantum, clock)
|
||||
if diff := math.Abs(tb.Rate() - rate); diff/rate <= rateMargin {
|
||||
return tb
|
||||
}
|
||||
|
@ -79,6 +111,12 @@ func nextQuantum(q int64) int64 {
|
|||
// the specification of the quantum size - quantum tokens
|
||||
// are added every fillInterval.
|
||||
func NewBucketWithQuantum(fillInterval time.Duration, capacity, quantum int64) *Bucket {
|
||||
return NewBucketWithQuantumAndClock(fillInterval, capacity, quantum, realClock{})
|
||||
}
|
||||
|
||||
// NewBucketWithQuantumAndClock is identical to NewBucketWithQuantum but injects
|
||||
// a testable clock interface.
|
||||
func NewBucketWithQuantumAndClock(fillInterval time.Duration, capacity, quantum int64, clock Clock) *Bucket {
|
||||
if fillInterval <= 0 {
|
||||
panic("token bucket fill interval is not > 0")
|
||||
}
|
||||
|
@ -89,7 +127,8 @@ func NewBucketWithQuantum(fillInterval time.Duration, capacity, quantum int64) *
|
|||
panic("token bucket quantum is not > 0")
|
||||
}
|
||||
return &Bucket{
|
||||
startTime: time.Now(),
|
||||
clock: clock,
|
||||
startTime: clock.Now(),
|
||||
capacity: capacity,
|
||||
quantum: quantum,
|
||||
avail: capacity,
|
||||
|
@ -101,7 +140,7 @@ func NewBucketWithQuantum(fillInterval time.Duration, capacity, quantum int64) *
|
|||
// available.
|
||||
func (tb *Bucket) Wait(count int64) {
|
||||
if d := tb.Take(count); d > 0 {
|
||||
time.Sleep(d)
|
||||
tb.clock.Sleep(d)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -113,7 +152,7 @@ func (tb *Bucket) Wait(count int64) {
|
|||
func (tb *Bucket) WaitMaxDuration(count int64, maxWait time.Duration) bool {
|
||||
d, ok := tb.TakeMaxDuration(count, maxWait)
|
||||
if d > 0 {
|
||||
time.Sleep(d)
|
||||
tb.clock.Sleep(d)
|
||||
}
|
||||
return ok
|
||||
}
|
||||
|
@ -127,7 +166,7 @@ const infinityDuration time.Duration = 0x7fffffffffffffff
|
|||
// Note that if the request is irrevocable - there is no way to return
|
||||
// tokens to the bucket once this method commits us to taking them.
|
||||
func (tb *Bucket) Take(count int64) time.Duration {
|
||||
d, _ := tb.take(time.Now(), count, infinityDuration)
|
||||
d, _ := tb.take(tb.clock.Now(), count, infinityDuration)
|
||||
return d
|
||||
}
|
||||
|
||||
|
@ -141,14 +180,14 @@ func (tb *Bucket) Take(count int64) time.Duration {
|
|||
// wait until the tokens are actually available, and reports
|
||||
// true.
|
||||
func (tb *Bucket) TakeMaxDuration(count int64, maxWait time.Duration) (time.Duration, bool) {
|
||||
return tb.take(time.Now(), count, maxWait)
|
||||
return tb.take(tb.clock.Now(), count, maxWait)
|
||||
}
|
||||
|
||||
// TakeAvailable takes up to count immediately available tokens from the
|
||||
// bucket. It returns the number of tokens removed, or zero if there are
|
||||
// no available tokens. It does not block.
|
||||
func (tb *Bucket) TakeAvailable(count int64) int64 {
|
||||
return tb.takeAvailable(time.Now(), count)
|
||||
return tb.takeAvailable(tb.clock.Now(), count)
|
||||
}
|
||||
|
||||
// takeAvailable is the internal version of TakeAvailable - it takes the
|
||||
|
@ -178,7 +217,7 @@ func (tb *Bucket) takeAvailable(now time.Time, count int64) int64 {
|
|||
// tokens could have changed in the meantime. This method is intended
|
||||
// primarily for metrics reporting and debugging.
|
||||
func (tb *Bucket) Available() int64 {
|
||||
return tb.available(time.Now())
|
||||
return tb.available(tb.clock.Now())
|
||||
}
|
||||
|
||||
// available is the internal version of available - it takes the current time as
|
||||
|
|
7
vendor/github.com/mitchellh/mapstructure/.travis.yml
generated
vendored
Normal file
7
vendor/github.com/mitchellh/mapstructure/.travis.yml
generated
vendored
Normal file
|
@ -0,0 +1,7 @@
|
|||
language: go
|
||||
|
||||
go:
|
||||
- 1.4
|
||||
|
||||
script:
|
||||
- go test
|
78
vendor/github.com/mitchellh/mapstructure/decode_hooks.go
generated
vendored
78
vendor/github.com/mitchellh/mapstructure/decode_hooks.go
generated
vendored
|
@ -1,11 +1,59 @@
|
|||
package mapstructure
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"reflect"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
// typedDecodeHook takes a raw DecodeHookFunc (an interface{}) and turns
|
||||
// it into the proper DecodeHookFunc type, such as DecodeHookFuncType.
|
||||
func typedDecodeHook(h DecodeHookFunc) DecodeHookFunc {
|
||||
// Create variables here so we can reference them with the reflect pkg
|
||||
var f1 DecodeHookFuncType
|
||||
var f2 DecodeHookFuncKind
|
||||
|
||||
// Fill in the variables into this interface and the rest is done
|
||||
// automatically using the reflect package.
|
||||
potential := []interface{}{f1, f2}
|
||||
|
||||
v := reflect.ValueOf(h)
|
||||
vt := v.Type()
|
||||
for _, raw := range potential {
|
||||
pt := reflect.ValueOf(raw).Type()
|
||||
if vt.ConvertibleTo(pt) {
|
||||
return v.Convert(pt).Interface()
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// DecodeHookExec executes the given decode hook. This should be used
|
||||
// since it'll naturally degrade to the older backwards compatible DecodeHookFunc
|
||||
// that took reflect.Kind instead of reflect.Type.
|
||||
func DecodeHookExec(
|
||||
raw DecodeHookFunc,
|
||||
from reflect.Type, to reflect.Type,
|
||||
data interface{}) (interface{}, error) {
|
||||
// Build our arguments that reflect expects
|
||||
argVals := make([]reflect.Value, 3)
|
||||
argVals[0] = reflect.ValueOf(from)
|
||||
argVals[1] = reflect.ValueOf(to)
|
||||
argVals[2] = reflect.ValueOf(data)
|
||||
|
||||
switch f := typedDecodeHook(raw).(type) {
|
||||
case DecodeHookFuncType:
|
||||
return f(from, to, data)
|
||||
case DecodeHookFuncKind:
|
||||
return f(from.Kind(), to.Kind(), data)
|
||||
default:
|
||||
return nil, errors.New("invalid decode hook signature")
|
||||
}
|
||||
}
|
||||
|
||||
// ComposeDecodeHookFunc creates a single DecodeHookFunc that
|
||||
// automatically composes multiple DecodeHookFuncs.
|
||||
//
|
||||
|
@ -13,18 +61,21 @@ import (
|
|||
// previous transformation.
|
||||
func ComposeDecodeHookFunc(fs ...DecodeHookFunc) DecodeHookFunc {
|
||||
return func(
|
||||
f reflect.Kind,
|
||||
t reflect.Kind,
|
||||
f reflect.Type,
|
||||
t reflect.Type,
|
||||
data interface{}) (interface{}, error) {
|
||||
var err error
|
||||
for _, f1 := range fs {
|
||||
data, err = f1(f, t, data)
|
||||
data, err = DecodeHookExec(f1, f, t, data)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Modify the from kind to be correct with the new data
|
||||
f = getKind(reflect.ValueOf(data))
|
||||
f = nil
|
||||
if val := reflect.ValueOf(data); val.IsValid() {
|
||||
f = val.Type()
|
||||
}
|
||||
}
|
||||
|
||||
return data, nil
|
||||
|
@ -51,6 +102,25 @@ func StringToSliceHookFunc(sep string) DecodeHookFunc {
|
|||
}
|
||||
}
|
||||
|
||||
// StringToTimeDurationHookFunc returns a DecodeHookFunc that converts
|
||||
// strings to time.Duration.
|
||||
func StringToTimeDurationHookFunc() DecodeHookFunc {
|
||||
return func(
|
||||
f reflect.Type,
|
||||
t reflect.Type,
|
||||
data interface{}) (interface{}, error) {
|
||||
if f.Kind() != reflect.String {
|
||||
return data, nil
|
||||
}
|
||||
if t != reflect.TypeOf(time.Duration(5)) {
|
||||
return data, nil
|
||||
}
|
||||
|
||||
// Convert it by parsing
|
||||
return time.ParseDuration(data.(string))
|
||||
}
|
||||
}
|
||||
|
||||
func WeaklyTypedHook(
|
||||
f reflect.Kind,
|
||||
t reflect.Kind,
|
||||
|
|
18
vendor/github.com/mitchellh/mapstructure/error.go
generated
vendored
18
vendor/github.com/mitchellh/mapstructure/error.go
generated
vendored
|
@ -1,7 +1,9 @@
|
|||
package mapstructure
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"sort"
|
||||
"strings"
|
||||
)
|
||||
|
||||
|
@ -17,11 +19,27 @@ func (e *Error) Error() string {
|
|||
points[i] = fmt.Sprintf("* %s", err)
|
||||
}
|
||||
|
||||
sort.Strings(points)
|
||||
return fmt.Sprintf(
|
||||
"%d error(s) decoding:\n\n%s",
|
||||
len(e.Errors), strings.Join(points, "\n"))
|
||||
}
|
||||
|
||||
// WrappedErrors implements the errwrap.Wrapper interface to make this
|
||||
// return value more useful with the errwrap and go-multierror libraries.
|
||||
func (e *Error) WrappedErrors() []error {
|
||||
if e == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
result := make([]error, len(e.Errors))
|
||||
for i, e := range e.Errors {
|
||||
result[i] = errors.New(e)
|
||||
}
|
||||
|
||||
return result
|
||||
}
|
||||
|
||||
func appendErrors(errors []string, err error) []string {
|
||||
switch e := err.(type) {
|
||||
case *Error:
|
||||
|
|
191
vendor/github.com/mitchellh/mapstructure/mapstructure.go
generated
vendored
191
vendor/github.com/mitchellh/mapstructure/mapstructure.go
generated
vendored
|
@ -1,5 +1,5 @@
|
|||
// The mapstructure package exposes functionality to convert an
|
||||
// abitrary map[string]interface{} into a native Go structure.
|
||||
// arbitrary map[string]interface{} into a native Go structure.
|
||||
//
|
||||
// The Go structure can be arbitrarily complex, containing slices,
|
||||
// other structs, etc. and the decoder will properly decode nested
|
||||
|
@ -8,6 +8,7 @@
|
|||
package mapstructure
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"reflect"
|
||||
|
@ -19,10 +20,20 @@ import (
|
|||
// DecodeHookFunc is the callback function that can be used for
|
||||
// data transformations. See "DecodeHook" in the DecoderConfig
|
||||
// struct.
|
||||
type DecodeHookFunc func(
|
||||
from reflect.Kind,
|
||||
to reflect.Kind,
|
||||
data interface{}) (interface{}, error)
|
||||
//
|
||||
// The type should be DecodeHookFuncType or DecodeHookFuncKind.
|
||||
// Either is accepted. Types are a superset of Kinds (Types can return
|
||||
// Kinds) and are generally a richer thing to use, but Kinds are simpler
|
||||
// if you only need those.
|
||||
//
|
||||
// The reason DecodeHookFunc is multi-typed is for backwards compatibility:
|
||||
// we started with Kinds and then realized Types were the better solution,
|
||||
// but have a promise to not break backwards compat so we now support
|
||||
// both.
|
||||
type DecodeHookFunc interface{}
|
||||
|
||||
type DecodeHookFuncType func(reflect.Type, reflect.Type, interface{}) (interface{}, error)
|
||||
type DecodeHookFuncKind func(reflect.Kind, reflect.Kind, interface{}) (interface{}, error)
|
||||
|
||||
// DecoderConfig is the configuration that is used to create a new decoder
|
||||
// and allows customization of various aspects of decoding.
|
||||
|
@ -40,6 +51,11 @@ type DecoderConfig struct {
|
|||
// (extra keys).
|
||||
ErrorUnused bool
|
||||
|
||||
// ZeroFields, if set to true, will zero fields before writing them.
|
||||
// For example, a map will be emptied before decoded values are put in
|
||||
// it. If this is false, a map will be merged.
|
||||
ZeroFields bool
|
||||
|
||||
// If WeaklyTypedInput is true, the decoder will make the following
|
||||
// "weak" conversions:
|
||||
//
|
||||
|
@ -51,6 +67,11 @@ type DecoderConfig struct {
|
|||
// - string to bool (accepts: 1, t, T, TRUE, true, True, 0, f, F,
|
||||
// FALSE, false, False. Anything else is an error)
|
||||
// - empty array = empty map and vice versa
|
||||
// - negative numbers to overflowed uint values (base 10)
|
||||
// - slice of maps to a merged map
|
||||
// - single values are converted to slices if required. Each
|
||||
// element is weakly decoded. For example: "4" can become []int{4}
|
||||
// if the target type is an int slice.
|
||||
//
|
||||
WeaklyTypedInput bool
|
||||
|
||||
|
@ -180,9 +201,11 @@ func (d *Decoder) decode(name string, data interface{}, val reflect.Value) error
|
|||
if d.config.DecodeHook != nil {
|
||||
// We have a DecodeHook, so let's pre-process the data.
|
||||
var err error
|
||||
data, err = d.config.DecodeHook(getKind(dataVal), getKind(val), data)
|
||||
data, err = DecodeHookExec(
|
||||
d.config.DecodeHook,
|
||||
dataVal.Type(), val.Type(), data)
|
||||
if err != nil {
|
||||
return err
|
||||
return fmt.Errorf("error decoding '%s': %s", name, err)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -209,6 +232,8 @@ func (d *Decoder) decode(name string, data interface{}, val reflect.Value) error
|
|||
err = d.decodePtr(name, data, val)
|
||||
case reflect.Slice:
|
||||
err = d.decodeSlice(name, data, val)
|
||||
case reflect.Func:
|
||||
err = d.decodeFunc(name, data, val)
|
||||
default:
|
||||
// If we reached this point then we weren't able to decode it
|
||||
return fmt.Errorf("%s: unsupported type: %s", name, dataKind)
|
||||
|
@ -227,6 +252,10 @@ func (d *Decoder) decode(name string, data interface{}, val reflect.Value) error
|
|||
// value to "data" of that type.
|
||||
func (d *Decoder) decodeBasic(name string, data interface{}, val reflect.Value) error {
|
||||
dataVal := reflect.ValueOf(data)
|
||||
if !dataVal.IsValid() {
|
||||
dataVal = reflect.Zero(val.Type())
|
||||
}
|
||||
|
||||
dataValType := dataVal.Type()
|
||||
if !dataValType.AssignableTo(val.Type()) {
|
||||
return fmt.Errorf(
|
||||
|
@ -283,6 +312,7 @@ func (d *Decoder) decodeString(name string, data interface{}, val reflect.Value)
|
|||
func (d *Decoder) decodeInt(name string, data interface{}, val reflect.Value) error {
|
||||
dataVal := reflect.ValueOf(data)
|
||||
dataKind := getKind(dataVal)
|
||||
dataType := dataVal.Type()
|
||||
|
||||
switch {
|
||||
case dataKind == reflect.Int:
|
||||
|
@ -304,6 +334,14 @@ func (d *Decoder) decodeInt(name string, data interface{}, val reflect.Value) er
|
|||
} else {
|
||||
return fmt.Errorf("cannot parse '%s' as int: %s", name, err)
|
||||
}
|
||||
case dataType.PkgPath() == "encoding/json" && dataType.Name() == "Number":
|
||||
jn := data.(json.Number)
|
||||
i, err := jn.Int64()
|
||||
if err != nil {
|
||||
return fmt.Errorf(
|
||||
"error decoding json.Number into %s: %s", name, err)
|
||||
}
|
||||
val.SetInt(i)
|
||||
default:
|
||||
return fmt.Errorf(
|
||||
"'%s' expected type '%s', got unconvertible type '%s'",
|
||||
|
@ -319,11 +357,21 @@ func (d *Decoder) decodeUint(name string, data interface{}, val reflect.Value) e
|
|||
|
||||
switch {
|
||||
case dataKind == reflect.Int:
|
||||
val.SetUint(uint64(dataVal.Int()))
|
||||
i := dataVal.Int()
|
||||
if i < 0 && !d.config.WeaklyTypedInput {
|
||||
return fmt.Errorf("cannot parse '%s', %d overflows uint",
|
||||
name, i)
|
||||
}
|
||||
val.SetUint(uint64(i))
|
||||
case dataKind == reflect.Uint:
|
||||
val.SetUint(dataVal.Uint())
|
||||
case dataKind == reflect.Float32:
|
||||
val.SetUint(uint64(dataVal.Float()))
|
||||
f := dataVal.Float()
|
||||
if f < 0 && !d.config.WeaklyTypedInput {
|
||||
return fmt.Errorf("cannot parse '%s', %f overflows uint",
|
||||
name, f)
|
||||
}
|
||||
val.SetUint(uint64(f))
|
||||
case dataKind == reflect.Bool && d.config.WeaklyTypedInput:
|
||||
if dataVal.Bool() {
|
||||
val.SetUint(1)
|
||||
|
@ -380,6 +428,7 @@ func (d *Decoder) decodeBool(name string, data interface{}, val reflect.Value) e
|
|||
func (d *Decoder) decodeFloat(name string, data interface{}, val reflect.Value) error {
|
||||
dataVal := reflect.ValueOf(data)
|
||||
dataKind := getKind(dataVal)
|
||||
dataType := dataVal.Type()
|
||||
|
||||
switch {
|
||||
case dataKind == reflect.Int:
|
||||
|
@ -401,6 +450,14 @@ func (d *Decoder) decodeFloat(name string, data interface{}, val reflect.Value)
|
|||
} else {
|
||||
return fmt.Errorf("cannot parse '%s' as float: %s", name, err)
|
||||
}
|
||||
case dataType.PkgPath() == "encoding/json" && dataType.Name() == "Number":
|
||||
jn := data.(json.Number)
|
||||
i, err := jn.Float64()
|
||||
if err != nil {
|
||||
return fmt.Errorf(
|
||||
"error decoding json.Number into %s: %s", name, err)
|
||||
}
|
||||
val.SetFloat(i)
|
||||
default:
|
||||
return fmt.Errorf(
|
||||
"'%s' expected type '%s', got unconvertible type '%s'",
|
||||
|
@ -415,22 +472,43 @@ func (d *Decoder) decodeMap(name string, data interface{}, val reflect.Value) er
|
|||
valKeyType := valType.Key()
|
||||
valElemType := valType.Elem()
|
||||
|
||||
// By default we overwrite keys in the current map
|
||||
valMap := val
|
||||
|
||||
// If the map is nil or we're purposely zeroing fields, make a new map
|
||||
if valMap.IsNil() || d.config.ZeroFields {
|
||||
// Make a new map to hold our result
|
||||
mapType := reflect.MapOf(valKeyType, valElemType)
|
||||
valMap := reflect.MakeMap(mapType)
|
||||
valMap = reflect.MakeMap(mapType)
|
||||
}
|
||||
|
||||
// Check input type
|
||||
dataVal := reflect.Indirect(reflect.ValueOf(data))
|
||||
if dataVal.Kind() != reflect.Map {
|
||||
// Accept empty array/slice instead of an empty map in weakly typed mode
|
||||
if d.config.WeaklyTypedInput &&
|
||||
(dataVal.Kind() == reflect.Slice || dataVal.Kind() == reflect.Array) &&
|
||||
dataVal.Len() == 0 {
|
||||
// In weak mode, we accept a slice of maps as an input...
|
||||
if d.config.WeaklyTypedInput {
|
||||
switch dataVal.Kind() {
|
||||
case reflect.Array, reflect.Slice:
|
||||
// Special case for BC reasons (covered by tests)
|
||||
if dataVal.Len() == 0 {
|
||||
val.Set(valMap)
|
||||
return nil
|
||||
} else {
|
||||
return fmt.Errorf("'%s' expected a map, got '%s'", name, dataVal.Kind())
|
||||
}
|
||||
|
||||
for i := 0; i < dataVal.Len(); i++ {
|
||||
err := d.decode(
|
||||
fmt.Sprintf("%s[%d]", name, i),
|
||||
dataVal.Index(i).Interface(), val)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
return fmt.Errorf("'%s' expected a map, got '%s'", name, dataVal.Kind())
|
||||
}
|
||||
|
||||
// Accumulate errors
|
||||
|
@ -473,7 +551,12 @@ func (d *Decoder) decodePtr(name string, data interface{}, val reflect.Value) er
|
|||
// into that. Then set the value of the pointer to this type.
|
||||
valType := val.Type()
|
||||
valElemType := valType.Elem()
|
||||
realVal := reflect.New(valElemType)
|
||||
|
||||
realVal := val
|
||||
if realVal.IsNil() || d.config.ZeroFields {
|
||||
realVal = reflect.New(valElemType)
|
||||
}
|
||||
|
||||
if err := d.decode(name, data, reflect.Indirect(realVal)); err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -482,6 +565,19 @@ func (d *Decoder) decodePtr(name string, data interface{}, val reflect.Value) er
|
|||
return nil
|
||||
}
|
||||
|
||||
func (d *Decoder) decodeFunc(name string, data interface{}, val reflect.Value) error {
|
||||
// Create an element of the concrete (non pointer) type and decode
|
||||
// into that. Then set the value of the pointer to this type.
|
||||
dataVal := reflect.Indirect(reflect.ValueOf(data))
|
||||
if val.Type() != dataVal.Type() {
|
||||
return fmt.Errorf(
|
||||
"'%s' expected type '%s', got unconvertible type '%s'",
|
||||
name, val.Type(), dataVal.Type())
|
||||
}
|
||||
val.Set(dataVal)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *Decoder) decodeSlice(name string, data interface{}, val reflect.Value) error {
|
||||
dataVal := reflect.Indirect(reflect.ValueOf(data))
|
||||
dataValKind := dataVal.Kind()
|
||||
|
@ -489,26 +585,44 @@ func (d *Decoder) decodeSlice(name string, data interface{}, val reflect.Value)
|
|||
valElemType := valType.Elem()
|
||||
sliceType := reflect.SliceOf(valElemType)
|
||||
|
||||
valSlice := val
|
||||
if valSlice.IsNil() || d.config.ZeroFields {
|
||||
// Check input type
|
||||
if dataValKind != reflect.Array && dataValKind != reflect.Slice {
|
||||
// Accept empty map instead of array/slice in weakly typed mode
|
||||
if d.config.WeaklyTypedInput && dataVal.Kind() == reflect.Map && dataVal.Len() == 0 {
|
||||
if d.config.WeaklyTypedInput {
|
||||
switch {
|
||||
// Empty maps turn into empty slices
|
||||
case dataValKind == reflect.Map:
|
||||
if dataVal.Len() == 0 {
|
||||
val.Set(reflect.MakeSlice(sliceType, 0, 0))
|
||||
return nil
|
||||
} else {
|
||||
return fmt.Errorf(
|
||||
"'%s': source data must be an array or slice, got %s", name, dataValKind)
|
||||
}
|
||||
|
||||
// All other types we try to convert to the slice type
|
||||
// and "lift" it into it. i.e. a string becomes a string slice.
|
||||
default:
|
||||
// Just re-try this function with data as a slice.
|
||||
return d.decodeSlice(name, []interface{}{data}, val)
|
||||
}
|
||||
}
|
||||
|
||||
return fmt.Errorf(
|
||||
"'%s': source data must be an array or slice, got %s", name, dataValKind)
|
||||
|
||||
}
|
||||
|
||||
// Make a new slice to hold our result, same size as the original data.
|
||||
valSlice := reflect.MakeSlice(sliceType, dataVal.Len(), dataVal.Len())
|
||||
valSlice = reflect.MakeSlice(sliceType, dataVal.Len(), dataVal.Len())
|
||||
}
|
||||
|
||||
// Accumulate any errors
|
||||
errors := make([]string, 0)
|
||||
|
||||
for i := 0; i < dataVal.Len(); i++ {
|
||||
currentData := dataVal.Index(i).Interface()
|
||||
for valSlice.Len() <= i {
|
||||
valSlice = reflect.Append(valSlice, reflect.Zero(valElemType))
|
||||
}
|
||||
currentField := valSlice.Index(i)
|
||||
|
||||
fieldName := fmt.Sprintf("%s[%d]", name, i)
|
||||
|
@ -530,6 +644,14 @@ func (d *Decoder) decodeSlice(name string, data interface{}, val reflect.Value)
|
|||
|
||||
func (d *Decoder) decodeStruct(name string, data interface{}, val reflect.Value) error {
|
||||
dataVal := reflect.Indirect(reflect.ValueOf(data))
|
||||
|
||||
// If the type of the value to write to and the data match directly,
|
||||
// then we just set it directly instead of recursing into the structure.
|
||||
if dataVal.Type() == val.Type() {
|
||||
val.Set(dataVal)
|
||||
return nil
|
||||
}
|
||||
|
||||
dataValKind := dataVal.Kind()
|
||||
if dataValKind != reflect.Map {
|
||||
return fmt.Errorf("'%s' expected a map, got '%s'", name, dataValKind)
|
||||
|
@ -565,19 +687,12 @@ func (d *Decoder) decodeStruct(name string, data interface{}, val reflect.Value)
|
|||
structs = structs[1:]
|
||||
|
||||
structType := structVal.Type()
|
||||
|
||||
for i := 0; i < structType.NumField(); i++ {
|
||||
fieldType := structType.Field(i)
|
||||
|
||||
if fieldType.Anonymous {
|
||||
fieldKind := fieldType.Type.Kind()
|
||||
if fieldKind != reflect.Struct {
|
||||
errors = appendErrors(errors,
|
||||
fmt.Errorf("%s: unsupported type: %s", fieldType.Name, fieldKind))
|
||||
continue
|
||||
}
|
||||
|
||||
// We have an embedded field. We "squash" the fields down
|
||||
// if specified in the tag.
|
||||
// If "squash" is specified in the tag, we squash the field down.
|
||||
squash := false
|
||||
tagParts := strings.Split(fieldType.Tag.Get(d.config.TagName), ",")
|
||||
for _, tag := range tagParts[1:] {
|
||||
|
@ -588,9 +703,13 @@ func (d *Decoder) decodeStruct(name string, data interface{}, val reflect.Value)
|
|||
}
|
||||
|
||||
if squash {
|
||||
if fieldKind != reflect.Struct {
|
||||
errors = appendErrors(errors,
|
||||
fmt.Errorf("%s: unsupported type for squash: %s", fieldType.Name, fieldKind))
|
||||
} else {
|
||||
structs = append(structs, val.FieldByName(fieldType.Name))
|
||||
continue
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
// Normal struct field, store it away
|
||||
|
@ -612,7 +731,7 @@ func (d *Decoder) decodeStruct(name string, data interface{}, val reflect.Value)
|
|||
if !rawMapVal.IsValid() {
|
||||
// Do a slower search by iterating over each key and
|
||||
// doing case-insensitive search.
|
||||
for dataValKey, _ := range dataValKeys {
|
||||
for dataValKey := range dataValKeys {
|
||||
mK, ok := dataValKey.Interface().(string)
|
||||
if !ok {
|
||||
// Not a string key
|
||||
|
@ -660,7 +779,7 @@ func (d *Decoder) decodeStruct(name string, data interface{}, val reflect.Value)
|
|||
|
||||
if d.config.ErrorUnused && len(dataValKeysUnused) > 0 {
|
||||
keys := make([]string, 0, len(dataValKeysUnused))
|
||||
for rawKey, _ := range dataValKeysUnused {
|
||||
for rawKey := range dataValKeysUnused {
|
||||
keys = append(keys, rawKey.(string))
|
||||
}
|
||||
sort.Strings(keys)
|
||||
|
@ -675,7 +794,7 @@ func (d *Decoder) decodeStruct(name string, data interface{}, val reflect.Value)
|
|||
|
||||
// Add the unused keys to the list of unused keys if we're tracking metadata
|
||||
if d.config.Metadata != nil {
|
||||
for rawKey, _ := range dataValKeysUnused {
|
||||
for rawKey := range dataValKeysUnused {
|
||||
key := rawKey.(string)
|
||||
if name != "" {
|
||||
key = fmt.Sprintf("%s.%s", name, key)
|
||||
|
|
1
vendor/github.com/prometheus/client_golang/prometheus/doc.go
generated
vendored
1
vendor/github.com/prometheus/client_golang/prometheus/doc.go
generated
vendored
|
@ -26,6 +26,7 @@
|
|||
// package main
|
||||
//
|
||||
// import (
|
||||
// "log"
|
||||
// "net/http"
|
||||
//
|
||||
// "github.com/prometheus/client_golang/prometheus"
|
||||
|
|
7
vendor/github.com/prometheus/client_golang/prometheus/promhttp/delegator_1_7.go
generated
vendored
7
vendor/github.com/prometheus/client_golang/prometheus/promhttp/delegator_1_7.go
generated
vendored
|
@ -20,8 +20,11 @@ import (
|
|||
"net/http"
|
||||
)
|
||||
|
||||
func newDelegator(w http.ResponseWriter) delegator {
|
||||
d := &responseWriterDelegator{ResponseWriter: w}
|
||||
func newDelegator(w http.ResponseWriter, observeWriteHeaderFunc func(int)) delegator {
|
||||
d := &responseWriterDelegator{
|
||||
ResponseWriter: w,
|
||||
observeWriteHeader: observeWriteHeaderFunc,
|
||||
}
|
||||
|
||||
_, cn := w.(http.CloseNotifier)
|
||||
_, fl := w.(http.Flusher)
|
||||
|
|
7
vendor/github.com/prometheus/client_golang/prometheus/promhttp/delegator_1_8.go
generated
vendored
7
vendor/github.com/prometheus/client_golang/prometheus/promhttp/delegator_1_8.go
generated
vendored
|
@ -22,8 +22,11 @@ import (
|
|||
|
||||
// newDelegator handles the four different methods of upgrading a
|
||||
// http.ResponseWriter to delegator.
|
||||
func newDelegator(w http.ResponseWriter) delegator {
|
||||
d := &responseWriterDelegator{ResponseWriter: w}
|
||||
func newDelegator(w http.ResponseWriter, observeWriteHeaderFunc func(int)) delegator {
|
||||
d := &responseWriterDelegator{
|
||||
ResponseWriter: w,
|
||||
observeWriteHeader: observeWriteHeaderFunc,
|
||||
}
|
||||
|
||||
_, cn := w.(http.CloseNotifier)
|
||||
_, fl := w.(http.Flusher)
|
||||
|
|
40
vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_server.go
generated
vendored
40
vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_server.go
generated
vendored
|
@ -63,7 +63,7 @@ func InstrumentHandlerDuration(obs prometheus.ObserverVec, next http.Handler) ht
|
|||
if code {
|
||||
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
now := time.Now()
|
||||
d := newDelegator(w)
|
||||
d := newDelegator(w, nil)
|
||||
next.ServeHTTP(d, r)
|
||||
|
||||
obs.With(labels(code, method, r.Method, d.Status())).Observe(time.Since(now).Seconds())
|
||||
|
@ -96,7 +96,7 @@ func InstrumentHandlerCounter(counter *prometheus.CounterVec, next http.Handler)
|
|||
|
||||
if code {
|
||||
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
d := newDelegator(w)
|
||||
d := newDelegator(w, nil)
|
||||
next.ServeHTTP(d, r)
|
||||
counter.With(labels(code, method, r.Method, d.Status())).Inc()
|
||||
})
|
||||
|
@ -108,6 +108,34 @@ func InstrumentHandlerCounter(counter *prometheus.CounterVec, next http.Handler)
|
|||
})
|
||||
}
|
||||
|
||||
// InstrumentHandlerTimeToWriteHeader is a middleware that wraps the provided
|
||||
// http.Handler to observe with the provided ObserverVec the request duration
|
||||
// until the response headers are written. The ObserverVec must have zero, one,
|
||||
// or two labels. The only allowed label names are "code" and "method". The
|
||||
// function panics if any other instance labels are provided. The Observe
|
||||
// method of the Observer in the ObserverVec is called with the request
|
||||
// duration in seconds. Partitioning happens by HTTP status code and/or HTTP
|
||||
// method if the respective instance label names are present in the
|
||||
// ObserverVec. For unpartitioned observations, use an ObserverVec with zero
|
||||
// labels. Note that partitioning of Histograms is expensive and should be used
|
||||
// judiciously.
|
||||
//
|
||||
// If the wrapped Handler panics before calling WriteHeader, no value is
|
||||
// reported.
|
||||
//
|
||||
// See the example for InstrumentHandlerDuration for example usage.
|
||||
func InstrumentHandlerTimeToWriteHeader(obs prometheus.ObserverVec, next http.Handler) http.HandlerFunc {
|
||||
code, method := checkLabels(obs)
|
||||
|
||||
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
now := time.Now()
|
||||
d := newDelegator(w, func(status int) {
|
||||
obs.With(labels(code, method, r.Method, status)).Observe(time.Since(now).Seconds())
|
||||
})
|
||||
next.ServeHTTP(d, r)
|
||||
})
|
||||
}
|
||||
|
||||
// InstrumentHandlerRequestSize is a middleware that wraps the provided
|
||||
// http.Handler to observe the request size with the provided ObserverVec.
|
||||
// The ObserverVec must have zero, one, or two labels. The only allowed label
|
||||
|
@ -129,7 +157,7 @@ func InstrumentHandlerRequestSize(obs prometheus.ObserverVec, next http.Handler)
|
|||
|
||||
if code {
|
||||
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
d := newDelegator(w)
|
||||
d := newDelegator(w, nil)
|
||||
next.ServeHTTP(d, r)
|
||||
size := computeApproximateRequestSize(r)
|
||||
obs.With(labels(code, method, r.Method, d.Status())).Observe(float64(size))
|
||||
|
@ -162,7 +190,7 @@ func InstrumentHandlerRequestSize(obs prometheus.ObserverVec, next http.Handler)
|
|||
func InstrumentHandlerResponseSize(obs prometheus.ObserverVec, next http.Handler) http.Handler {
|
||||
code, method := checkLabels(obs)
|
||||
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
d := newDelegator(w)
|
||||
d := newDelegator(w, nil)
|
||||
next.ServeHTTP(d, r)
|
||||
obs.With(labels(code, method, r.Method, d.Status())).Observe(float64(d.Written()))
|
||||
})
|
||||
|
@ -422,6 +450,7 @@ type responseWriterDelegator struct {
|
|||
status int
|
||||
written int64
|
||||
wroteHeader bool
|
||||
observeWriteHeader func(int)
|
||||
}
|
||||
|
||||
func (r *responseWriterDelegator) Status() int {
|
||||
|
@ -436,6 +465,9 @@ func (r *responseWriterDelegator) WriteHeader(code int) {
|
|||
r.status = code
|
||||
r.wroteHeader = true
|
||||
r.ResponseWriter.WriteHeader(code)
|
||||
if r.observeWriteHeader != nil {
|
||||
r.observeWriteHeader(code)
|
||||
}
|
||||
}
|
||||
|
||||
func (r *responseWriterDelegator) Write(b []byte) (int, error) {
|
||||
|
|
13
vendor/github.com/prometheus/client_model/AUTHORS.md
generated
vendored
Normal file
13
vendor/github.com/prometheus/client_model/AUTHORS.md
generated
vendored
Normal file
|
@ -0,0 +1,13 @@
|
|||
The Prometheus project was started by Matt T. Proud (emeritus) and
|
||||
Julius Volz in 2012.
|
||||
|
||||
Maintainers of this repository:
|
||||
|
||||
* Björn Rabenstein <beorn@soundcloud.com>
|
||||
|
||||
The following individuals have contributed code to this repository
|
||||
(listed in alphabetical order):
|
||||
|
||||
* Björn Rabenstein <beorn@soundcloud.com>
|
||||
* Matt T. Proud <matt.proud@gmail.com>
|
||||
* Tobias Schmidt <ts@soundcloud.com>
|
1516
vendor/google.golang.org/api/cloudkms/v1/cloudkms-api.json
generated
vendored
Normal file
1516
vendor/google.golang.org/api/cloudkms/v1/cloudkms-api.json
generated
vendored
Normal file
File diff suppressed because it is too large
Load diff
5036
vendor/google.golang.org/api/cloudkms/v1/cloudkms-gen.go
generated
vendored
Normal file
5036
vendor/google.golang.org/api/cloudkms/v1/cloudkms-gen.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load diff
29691
vendor/google.golang.org/api/compute/v0.alpha/compute-api.json
generated
vendored
Normal file
29691
vendor/google.golang.org/api/compute/v0.alpha/compute-api.json
generated
vendored
Normal file
File diff suppressed because it is too large
Load diff
91682
vendor/google.golang.org/api/compute/v0.alpha/compute-gen.go
generated
vendored
Normal file
91682
vendor/google.golang.org/api/compute/v0.alpha/compute-gen.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load diff
0
vendor/github.com/go-openapi/analysis/LICENSE → vendor/k8s.io/api/LICENSE
generated
vendored
0
vendor/github.com/go-openapi/analysis/LICENSE → vendor/k8s.io/api/LICENSE
generated
vendored
|
@ -10,17 +10,13 @@ load(
|
|||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"defaults.go",
|
||||
"doc.go",
|
||||
"generated.pb.go",
|
||||
"helpers.go",
|
||||
"register.go",
|
||||
"types.generated.go",
|
||||
"types.go",
|
||||
"types_swagger_doc_generated.go",
|
||||
"zz_generated.conversion.go",
|
||||
"zz_generated.deepcopy.go",
|
||||
"zz_generated.defaults.go",
|
||||
],
|
||||
tags = ["automanaged"],
|
||||
deps = [
|
||||
|
@ -31,6 +27,5 @@ go_library(
|
|||
"//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
|
||||
"//vendor/k8s.io/client-go/pkg/apis/rbac:go_default_library",
|
||||
],
|
||||
)
|
|
@ -14,13 +14,12 @@ See the License for the specific language governing permissions and
|
|||
limitations under the License.
|
||||
*/
|
||||
|
||||
package v1
|
||||
// +k8s:deepcopy-gen=package,register
|
||||
// +k8s:openapi-gen=true
|
||||
|
||||
import (
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
)
|
||||
|
||||
func addConversionFuncs(scheme *runtime.Scheme) error {
|
||||
// Add non-generated conversion functions
|
||||
return scheme.AddConversionFuncs()
|
||||
}
|
||||
// Package v1alpha1 is the v1alpha1 version of the API.
|
||||
// AdmissionConfiguration and AdmissionPluginConfiguration are legacy static admission plugin configuration
|
||||
// InitializerConfiguration and ExternalAdmissionHookConfiguration is for the
|
||||
// new dynamic admission controller configuration.
|
||||
// +groupName=admissionregistration.k8s.io
|
||||
package v1alpha1
|
File diff suppressed because it is too large
Load diff
203
vendor/k8s.io/api/admissionregistration/v1alpha1/generated.proto
generated
vendored
Normal file
203
vendor/k8s.io/api/admissionregistration/v1alpha1/generated.proto
generated
vendored
Normal file
|
@ -0,0 +1,203 @@
|
|||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
|
||||
// This file was autogenerated by go-to-protobuf. Do not edit it manually!
|
||||
|
||||
syntax = 'proto2';
|
||||
|
||||
package k8s.io.api.admissionregistration.v1alpha1;
|
||||
|
||||
import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto";
|
||||
import "k8s.io/apimachinery/pkg/runtime/generated.proto";
|
||||
import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto";
|
||||
import "k8s.io/apimachinery/pkg/util/intstr/generated.proto";
|
||||
|
||||
// Package-wide variables from generator "generated".
|
||||
option go_package = "v1alpha1";
|
||||
|
||||
// AdmissionHookClientConfig contains the information to make a TLS
|
||||
// connection with the webhook
|
||||
message AdmissionHookClientConfig {
|
||||
// Service is a reference to the service for this webhook. If there is only
|
||||
// one port open for the service, that port will be used. If there are multiple
|
||||
// ports open, port 443 will be used if it is open, otherwise it is an error.
|
||||
// Required
|
||||
optional ServiceReference service = 1;
|
||||
|
||||
// CABundle is a PEM encoded CA bundle which will be used to validate webhook's server certificate.
|
||||
// Required
|
||||
optional bytes caBundle = 2;
|
||||
}
|
||||
|
||||
// ExternalAdmissionHook describes an external admission webhook and the
|
||||
// resources and operations it applies to.
|
||||
message ExternalAdmissionHook {
|
||||
// The name of the external admission webhook.
|
||||
// Name should be fully qualified, e.g., imagepolicy.kubernetes.io, where
|
||||
// "imagepolicy" is the name of the webhook, and kubernetes.io is the name
|
||||
// of the organization.
|
||||
// Required.
|
||||
optional string name = 1;
|
||||
|
||||
// ClientConfig defines how to communicate with the hook.
|
||||
// Required
|
||||
optional AdmissionHookClientConfig clientConfig = 2;
|
||||
|
||||
// Rules describes what operations on what resources/subresources the webhook cares about.
|
||||
// The webhook cares about an operation if it matches _any_ Rule.
|
||||
repeated RuleWithOperations rules = 3;
|
||||
|
||||
// FailurePolicy defines how unrecognized errors from the admission endpoint are handled -
|
||||
// allowed values are Ignore or Fail. Defaults to Ignore.
|
||||
// +optional
|
||||
optional string failurePolicy = 4;
|
||||
}
|
||||
|
||||
// ExternalAdmissionHookConfiguration describes the configuration of initializers.
|
||||
message ExternalAdmissionHookConfiguration {
|
||||
// Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata.
|
||||
// +optional
|
||||
optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
|
||||
|
||||
// ExternalAdmissionHooks is a list of external admission webhooks and the
|
||||
// affected resources and operations.
|
||||
// +optional
|
||||
// +patchMergeKey=name
|
||||
// +patchStrategy=merge
|
||||
repeated ExternalAdmissionHook externalAdmissionHooks = 2;
|
||||
}
|
||||
|
||||
// ExternalAdmissionHookConfigurationList is a list of ExternalAdmissionHookConfiguration.
|
||||
message ExternalAdmissionHookConfigurationList {
|
||||
// Standard list metadata.
|
||||
// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds
|
||||
// +optional
|
||||
optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
|
||||
|
||||
// List of ExternalAdmissionHookConfiguration.
|
||||
repeated ExternalAdmissionHookConfiguration items = 2;
|
||||
}
|
||||
|
||||
// Initializer describes the name and the failure policy of an initializer, and
|
||||
// what resources it applies to.
|
||||
message Initializer {
|
||||
// Name is the identifier of the initializer. It will be added to the
|
||||
// object that needs to be initialized.
|
||||
// Name should be fully qualified, e.g., alwayspullimages.kubernetes.io, where
|
||||
// "alwayspullimages" is the name of the webhook, and kubernetes.io is the name
|
||||
// of the organization.
|
||||
// Required
|
||||
optional string name = 1;
|
||||
|
||||
// Rules describes what resources/subresources the initializer cares about.
|
||||
// The initializer cares about an operation if it matches _any_ Rule.
|
||||
// Rule.Resources must not include subresources.
|
||||
repeated Rule rules = 2;
|
||||
|
||||
// FailurePolicy defines what happens if the responsible initializer controller
|
||||
// fails to takes action. Allowed values are Ignore, or Fail. If "Ignore" is
|
||||
// set, initializer is removed from the initializers list of an object if
|
||||
// the timeout is reached; If "Fail" is set, admissionregistration returns timeout error
|
||||
// if the timeout is reached.
|
||||
optional string failurePolicy = 3;
|
||||
}
|
||||
|
||||
// InitializerConfiguration describes the configuration of initializers.
|
||||
message InitializerConfiguration {
|
||||
// Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata.
|
||||
// +optional
|
||||
optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
|
||||
|
||||
// Initializers is a list of resources and their default initializers
|
||||
// Order-sensitive.
|
||||
// When merging multiple InitializerConfigurations, we sort the initializers
|
||||
// from different InitializerConfigurations by the name of the
|
||||
// InitializerConfigurations; the order of the initializers from the same
|
||||
// InitializerConfiguration is preserved.
|
||||
// +patchMergeKey=name
|
||||
// +patchStrategy=merge
|
||||
// +optional
|
||||
repeated Initializer initializers = 2;
|
||||
}
|
||||
|
||||
// InitializerConfigurationList is a list of InitializerConfiguration.
|
||||
message InitializerConfigurationList {
|
||||
// Standard list metadata.
|
||||
// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds
|
||||
// +optional
|
||||
optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
|
||||
|
||||
// List of InitializerConfiguration.
|
||||
repeated InitializerConfiguration items = 2;
|
||||
}
|
||||
|
||||
// Rule is a tuple of APIGroups, APIVersion, and Resources.It is recommended
|
||||
// to make sure that all the tuple expansions are valid.
|
||||
message Rule {
|
||||
// APIGroups is the API groups the resources belong to. '*' is all groups.
|
||||
// If '*' is present, the length of the slice must be one.
|
||||
// Required.
|
||||
repeated string apiGroups = 1;
|
||||
|
||||
// APIVersions is the API versions the resources belong to. '*' is all versions.
|
||||
// If '*' is present, the length of the slice must be one.
|
||||
// Required.
|
||||
repeated string apiVersions = 2;
|
||||
|
||||
// Resources is a list of resources this rule applies to.
|
||||
//
|
||||
// For example:
|
||||
// 'pods' means pods.
|
||||
// 'pods/log' means the log subresource of pods.
|
||||
// '*' means all resources, but not subresources.
|
||||
// 'pods/*' means all subresources of pods.
|
||||
// '*/scale' means all scale subresources.
|
||||
// '*/*' means all resources and their subresources.
|
||||
//
|
||||
// If wildcard is present, the validation rule will ensure resources do not
|
||||
// overlap with each other.
|
||||
//
|
||||
// Depending on the enclosing object, subresources might not be allowed.
|
||||
// Required.
|
||||
repeated string resources = 3;
|
||||
}
|
||||
|
||||
// RuleWithOperations is a tuple of Operations and Resources. It is recommended to make
|
||||
// sure that all the tuple expansions are valid.
|
||||
message RuleWithOperations {
|
||||
// Operations is the operations the admission hook cares about - CREATE, UPDATE, or *
|
||||
// for all operations.
|
||||
// If '*' is present, the length of the slice must be one.
|
||||
// Required.
|
||||
repeated string operations = 1;
|
||||
|
||||
// Rule is embedded, it describes other criteria of the rule, like
|
||||
// APIGroups, APIVersions, Resources, etc.
|
||||
optional Rule rule = 2;
|
||||
}
|
||||
|
||||
// ServiceReference holds a reference to Service.legacy.k8s.io
|
||||
message ServiceReference {
|
||||
// Namespace is the namespace of the service
|
||||
// Required
|
||||
optional string namespace = 1;
|
||||
|
||||
// Name is the name of the service
|
||||
// Required
|
||||
optional string name = 2;
|
||||
}
|
||||
|
53
vendor/k8s.io/api/admissionregistration/v1alpha1/register.go
generated
vendored
Normal file
53
vendor/k8s.io/api/admissionregistration/v1alpha1/register.go
generated
vendored
Normal file
|
@ -0,0 +1,53 @@
|
|||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package v1alpha1
|
||||
|
||||
import (
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
)
|
||||
|
||||
const GroupName = "admissionregistration.k8s.io"
|
||||
|
||||
// SchemeGroupVersion is group version used to register these objects
|
||||
var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1alpha1"}
|
||||
|
||||
// Resource takes an unqualified resource and returns a Group qualified GroupResource
|
||||
func Resource(resource string) schema.GroupResource {
|
||||
return SchemeGroupVersion.WithResource(resource).GroupResource()
|
||||
}
|
||||
|
||||
var (
|
||||
// TODO: move SchemeBuilder with zz_generated.deepcopy.go to k8s.io/api.
|
||||
// localSchemeBuilder and AddToScheme will stay in k8s.io/kubernetes.
|
||||
SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes)
|
||||
localSchemeBuilder = &SchemeBuilder
|
||||
AddToScheme = localSchemeBuilder.AddToScheme
|
||||
)
|
||||
|
||||
// Adds the list of known types to scheme.
|
||||
func addKnownTypes(scheme *runtime.Scheme) error {
|
||||
scheme.AddKnownTypes(SchemeGroupVersion,
|
||||
&InitializerConfiguration{},
|
||||
&InitializerConfigurationList{},
|
||||
&ExternalAdmissionHookConfiguration{},
|
||||
&ExternalAdmissionHookConfigurationList{},
|
||||
)
|
||||
metav1.AddToGroupVersion(scheme, SchemeGroupVersion)
|
||||
return nil
|
||||
}
|
File diff suppressed because it is too large
Load diff
220
vendor/k8s.io/api/admissionregistration/v1alpha1/types.go
generated
vendored
Normal file
220
vendor/k8s.io/api/admissionregistration/v1alpha1/types.go
generated
vendored
Normal file
|
@ -0,0 +1,220 @@
|
|||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package v1alpha1
|
||||
|
||||
import (
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
)
|
||||
|
||||
// +genclient=true
|
||||
// +nonNamespaced=true
|
||||
|
||||
// InitializerConfiguration describes the configuration of initializers.
|
||||
type InitializerConfiguration struct {
|
||||
metav1.TypeMeta `json:",inline"`
|
||||
// Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata.
|
||||
// +optional
|
||||
metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
|
||||
|
||||
// Initializers is a list of resources and their default initializers
|
||||
// Order-sensitive.
|
||||
// When merging multiple InitializerConfigurations, we sort the initializers
|
||||
// from different InitializerConfigurations by the name of the
|
||||
// InitializerConfigurations; the order of the initializers from the same
|
||||
// InitializerConfiguration is preserved.
|
||||
// +patchMergeKey=name
|
||||
// +patchStrategy=merge
|
||||
// +optional
|
||||
Initializers []Initializer `json:"initializers,omitempty" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,2,rep,name=initializers"`
|
||||
}
|
||||
|
||||
// InitializerConfigurationList is a list of InitializerConfiguration.
|
||||
type InitializerConfigurationList struct {
|
||||
metav1.TypeMeta `json:",inline"`
|
||||
// Standard list metadata.
|
||||
// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds
|
||||
// +optional
|
||||
metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
|
||||
|
||||
// List of InitializerConfiguration.
|
||||
Items []InitializerConfiguration `json:"items" protobuf:"bytes,2,rep,name=items"`
|
||||
}
|
||||
|
||||
// Initializer describes the name and the failure policy of an initializer, and
|
||||
// what resources it applies to.
|
||||
type Initializer struct {
|
||||
// Name is the identifier of the initializer. It will be added to the
|
||||
// object that needs to be initialized.
|
||||
// Name should be fully qualified, e.g., alwayspullimages.kubernetes.io, where
|
||||
// "alwayspullimages" is the name of the webhook, and kubernetes.io is the name
|
||||
// of the organization.
|
||||
// Required
|
||||
Name string `json:"name" protobuf:"bytes,1,opt,name=name"`
|
||||
|
||||
// Rules describes what resources/subresources the initializer cares about.
|
||||
// The initializer cares about an operation if it matches _any_ Rule.
|
||||
// Rule.Resources must not include subresources.
|
||||
Rules []Rule `json:"rules,omitempty" protobuf:"bytes,2,rep,name=rules"`
|
||||
|
||||
// FailurePolicy defines what happens if the responsible initializer controller
|
||||
// fails to takes action. Allowed values are Ignore, or Fail. If "Ignore" is
|
||||
// set, initializer is removed from the initializers list of an object if
|
||||
// the timeout is reached; If "Fail" is set, admissionregistration returns timeout error
|
||||
// if the timeout is reached.
|
||||
FailurePolicy *FailurePolicyType `json:"failurePolicy,omitempty" protobuf:"bytes,3,opt,name=failurePolicy,casttype=FailurePolicyType"`
|
||||
}
|
||||
|
||||
// Rule is a tuple of APIGroups, APIVersion, and Resources.It is recommended
|
||||
// to make sure that all the tuple expansions are valid.
|
||||
type Rule struct {
|
||||
// APIGroups is the API groups the resources belong to. '*' is all groups.
|
||||
// If '*' is present, the length of the slice must be one.
|
||||
// Required.
|
||||
APIGroups []string `json:"apiGroups,omitempty" protobuf:"bytes,1,rep,name=apiGroups"`
|
||||
|
||||
// APIVersions is the API versions the resources belong to. '*' is all versions.
|
||||
// If '*' is present, the length of the slice must be one.
|
||||
// Required.
|
||||
APIVersions []string `json:"apiVersions,omitempty" protobuf:"bytes,2,rep,name=apiVersions"`
|
||||
|
||||
// Resources is a list of resources this rule applies to.
|
||||
//
|
||||
// For example:
|
||||
// 'pods' means pods.
|
||||
// 'pods/log' means the log subresource of pods.
|
||||
// '*' means all resources, but not subresources.
|
||||
// 'pods/*' means all subresources of pods.
|
||||
// '*/scale' means all scale subresources.
|
||||
// '*/*' means all resources and their subresources.
|
||||
//
|
||||
// If wildcard is present, the validation rule will ensure resources do not
|
||||
// overlap with each other.
|
||||
//
|
||||
// Depending on the enclosing object, subresources might not be allowed.
|
||||
// Required.
|
||||
Resources []string `json:"resources,omitempty" protobuf:"bytes,3,rep,name=resources"`
|
||||
}
|
||||
|
||||
type FailurePolicyType string
|
||||
|
||||
const (
|
||||
// Ignore means the initilizer is removed from the initializers list of an
|
||||
// object if the initializer is timed out.
|
||||
Ignore FailurePolicyType = "Ignore"
|
||||
// For 1.7, only "Ignore" is allowed. "Fail" will be allowed when the
|
||||
// extensible admission feature is beta.
|
||||
Fail FailurePolicyType = "Fail"
|
||||
)
|
||||
|
||||
// +genclient=true
|
||||
// +nonNamespaced=true
|
||||
|
||||
// ExternalAdmissionHookConfiguration describes the configuration of initializers.
|
||||
type ExternalAdmissionHookConfiguration struct {
|
||||
metav1.TypeMeta `json:",inline"`
|
||||
// Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata.
|
||||
// +optional
|
||||
metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
|
||||
// ExternalAdmissionHooks is a list of external admission webhooks and the
|
||||
// affected resources and operations.
|
||||
// +optional
|
||||
// +patchMergeKey=name
|
||||
// +patchStrategy=merge
|
||||
ExternalAdmissionHooks []ExternalAdmissionHook `json:"externalAdmissionHooks,omitempty" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,2,rep,name=externalAdmissionHooks"`
|
||||
}
|
||||
|
||||
// ExternalAdmissionHookConfigurationList is a list of ExternalAdmissionHookConfiguration.
|
||||
type ExternalAdmissionHookConfigurationList struct {
|
||||
metav1.TypeMeta `json:",inline"`
|
||||
// Standard list metadata.
|
||||
// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds
|
||||
// +optional
|
||||
metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
|
||||
// List of ExternalAdmissionHookConfiguration.
|
||||
Items []ExternalAdmissionHookConfiguration `json:"items" protobuf:"bytes,2,rep,name=items"`
|
||||
}
|
||||
|
||||
// ExternalAdmissionHook describes an external admission webhook and the
|
||||
// resources and operations it applies to.
|
||||
type ExternalAdmissionHook struct {
|
||||
// The name of the external admission webhook.
|
||||
// Name should be fully qualified, e.g., imagepolicy.kubernetes.io, where
|
||||
// "imagepolicy" is the name of the webhook, and kubernetes.io is the name
|
||||
// of the organization.
|
||||
// Required.
|
||||
Name string `json:"name" protobuf:"bytes,1,opt,name=name"`
|
||||
|
||||
// ClientConfig defines how to communicate with the hook.
|
||||
// Required
|
||||
ClientConfig AdmissionHookClientConfig `json:"clientConfig" protobuf:"bytes,2,opt,name=clientConfig"`
|
||||
|
||||
// Rules describes what operations on what resources/subresources the webhook cares about.
|
||||
// The webhook cares about an operation if it matches _any_ Rule.
|
||||
Rules []RuleWithOperations `json:"rules,omitempty" protobuf:"bytes,3,rep,name=rules"`
|
||||
|
||||
// FailurePolicy defines how unrecognized errors from the admission endpoint are handled -
|
||||
// allowed values are Ignore or Fail. Defaults to Ignore.
|
||||
// +optional
|
||||
FailurePolicy *FailurePolicyType `json:"failurePolicy,omitempty" protobuf:"bytes,4,opt,name=failurePolicy,casttype=FailurePolicyType"`
|
||||
}
|
||||
|
||||
// RuleWithOperations is a tuple of Operations and Resources. It is recommended to make
|
||||
// sure that all the tuple expansions are valid.
|
||||
type RuleWithOperations struct {
|
||||
// Operations is the operations the admission hook cares about - CREATE, UPDATE, or *
|
||||
// for all operations.
|
||||
// If '*' is present, the length of the slice must be one.
|
||||
// Required.
|
||||
Operations []OperationType `json:"operations,omitempty" protobuf:"bytes,1,rep,name=operations,casttype=OperationType"`
|
||||
// Rule is embedded, it describes other criteria of the rule, like
|
||||
// APIGroups, APIVersions, Resources, etc.
|
||||
Rule `json:",inline" protobuf:"bytes,2,opt,name=rule"`
|
||||
}
|
||||
|
||||
type OperationType string
|
||||
|
||||
// The constants should be kept in sync with those defined in k8s.io/kubernetes/pkg/admission/interface.go.
|
||||
const (
|
||||
OperationAll OperationType = "*"
|
||||
Create OperationType = "CREATE"
|
||||
Update OperationType = "UPDATE"
|
||||
Delete OperationType = "DELETE"
|
||||
Connect OperationType = "CONNECT"
|
||||
)
|
||||
|
||||
// AdmissionHookClientConfig contains the information to make a TLS
|
||||
// connection with the webhook
|
||||
type AdmissionHookClientConfig struct {
|
||||
// Service is a reference to the service for this webhook. If there is only
|
||||
// one port open for the service, that port will be used. If there are multiple
|
||||
// ports open, port 443 will be used if it is open, otherwise it is an error.
|
||||
// Required
|
||||
Service ServiceReference `json:"service" protobuf:"bytes,1,opt,name=service"`
|
||||
// CABundle is a PEM encoded CA bundle which will be used to validate webhook's server certificate.
|
||||
// Required
|
||||
CABundle []byte `json:"caBundle" protobuf:"bytes,2,opt,name=caBundle"`
|
||||
}
|
||||
|
||||
// ServiceReference holds a reference to Service.legacy.k8s.io
|
||||
type ServiceReference struct {
|
||||
// Namespace is the namespace of the service
|
||||
// Required
|
||||
Namespace string `json:"namespace" protobuf:"bytes,1,opt,name=namespace"`
|
||||
// Name is the name of the service
|
||||
// Required
|
||||
Name string `json:"name" protobuf:"bytes,2,opt,name=name"`
|
||||
}
|
133
vendor/k8s.io/api/admissionregistration/v1alpha1/types_swagger_doc_generated.go
generated
vendored
Normal file
133
vendor/k8s.io/api/admissionregistration/v1alpha1/types_swagger_doc_generated.go
generated
vendored
Normal file
|
@ -0,0 +1,133 @@
|
|||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package v1alpha1
|
||||
|
||||
// This file contains a collection of methods that can be used from go-restful to
|
||||
// generate Swagger API documentation for its models. Please read this PR for more
|
||||
// information on the implementation: https://github.com/emicklei/go-restful/pull/215
|
||||
//
|
||||
// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if
|
||||
// they are on one line! For multiple line or blocks that you want to ignore use ---.
|
||||
// Any context after a --- is ignored.
|
||||
//
|
||||
// Those methods can be generated by using hack/update-generated-swagger-docs.sh
|
||||
|
||||
// AUTO-GENERATED FUNCTIONS START HERE
|
||||
var map_AdmissionHookClientConfig = map[string]string{
|
||||
"": "AdmissionHookClientConfig contains the information to make a TLS connection with the webhook",
|
||||
"service": "Service is a reference to the service for this webhook. If there is only one port open for the service, that port will be used. If there are multiple ports open, port 443 will be used if it is open, otherwise it is an error. Required",
|
||||
"caBundle": "CABundle is a PEM encoded CA bundle which will be used to validate webhook's server certificate. Required",
|
||||
}
|
||||
|
||||
func (AdmissionHookClientConfig) SwaggerDoc() map[string]string {
|
||||
return map_AdmissionHookClientConfig
|
||||
}
|
||||
|
||||
var map_ExternalAdmissionHook = map[string]string{
|
||||
"": "ExternalAdmissionHook describes an external admission webhook and the resources and operations it applies to.",
|
||||
"name": "The name of the external admission webhook. Name should be fully qualified, e.g., imagepolicy.kubernetes.io, where \"imagepolicy\" is the name of the webhook, and kubernetes.io is the name of the organization. Required.",
|
||||
"clientConfig": "ClientConfig defines how to communicate with the hook. Required",
|
||||
"rules": "Rules describes what operations on what resources/subresources the webhook cares about. The webhook cares about an operation if it matches _any_ Rule.",
|
||||
"failurePolicy": "FailurePolicy defines how unrecognized errors from the admission endpoint are handled - allowed values are Ignore or Fail. Defaults to Ignore.",
|
||||
}
|
||||
|
||||
func (ExternalAdmissionHook) SwaggerDoc() map[string]string {
|
||||
return map_ExternalAdmissionHook
|
||||
}
|
||||
|
||||
var map_ExternalAdmissionHookConfiguration = map[string]string{
|
||||
"": "ExternalAdmissionHookConfiguration describes the configuration of initializers.",
|
||||
"metadata": "Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata.",
|
||||
"externalAdmissionHooks": "ExternalAdmissionHooks is a list of external admission webhooks and the affected resources and operations.",
|
||||
}
|
||||
|
||||
func (ExternalAdmissionHookConfiguration) SwaggerDoc() map[string]string {
|
||||
return map_ExternalAdmissionHookConfiguration
|
||||
}
|
||||
|
||||
var map_ExternalAdmissionHookConfigurationList = map[string]string{
|
||||
"": "ExternalAdmissionHookConfigurationList is a list of ExternalAdmissionHookConfiguration.",
|
||||
"metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds",
|
||||
"items": "List of ExternalAdmissionHookConfiguration.",
|
||||
}
|
||||
|
||||
func (ExternalAdmissionHookConfigurationList) SwaggerDoc() map[string]string {
|
||||
return map_ExternalAdmissionHookConfigurationList
|
||||
}
|
||||
|
||||
var map_Initializer = map[string]string{
|
||||
"": "Initializer describes the name and the failure policy of an initializer, and what resources it applies to.",
|
||||
"name": "Name is the identifier of the initializer. It will be added to the object that needs to be initialized. Name should be fully qualified, e.g., alwayspullimages.kubernetes.io, where \"alwayspullimages\" is the name of the webhook, and kubernetes.io is the name of the organization. Required",
|
||||
"rules": "Rules describes what resources/subresources the initializer cares about. The initializer cares about an operation if it matches _any_ Rule. Rule.Resources must not include subresources.",
|
||||
"failurePolicy": "FailurePolicy defines what happens if the responsible initializer controller fails to takes action. Allowed values are Ignore, or Fail. If \"Ignore\" is set, initializer is removed from the initializers list of an object if the timeout is reached; If \"Fail\" is set, admissionregistration returns timeout error if the timeout is reached.",
|
||||
}
|
||||
|
||||
func (Initializer) SwaggerDoc() map[string]string {
|
||||
return map_Initializer
|
||||
}
|
||||
|
||||
var map_InitializerConfiguration = map[string]string{
|
||||
"": "InitializerConfiguration describes the configuration of initializers.",
|
||||
"metadata": "Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata.",
|
||||
"initializers": "Initializers is a list of resources and their default initializers Order-sensitive. When merging multiple InitializerConfigurations, we sort the initializers from different InitializerConfigurations by the name of the InitializerConfigurations; the order of the initializers from the same InitializerConfiguration is preserved.",
|
||||
}
|
||||
|
||||
func (InitializerConfiguration) SwaggerDoc() map[string]string {
|
||||
return map_InitializerConfiguration
|
||||
}
|
||||
|
||||
var map_InitializerConfigurationList = map[string]string{
|
||||
"": "InitializerConfigurationList is a list of InitializerConfiguration.",
|
||||
"metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds",
|
||||
"items": "List of InitializerConfiguration.",
|
||||
}
|
||||
|
||||
func (InitializerConfigurationList) SwaggerDoc() map[string]string {
|
||||
return map_InitializerConfigurationList
|
||||
}
|
||||
|
||||
var map_Rule = map[string]string{
|
||||
"": "Rule is a tuple of APIGroups, APIVersion, and Resources.It is recommended to make sure that all the tuple expansions are valid.",
|
||||
"apiGroups": "APIGroups is the API groups the resources belong to. '*' is all groups. If '*' is present, the length of the slice must be one. Required.",
|
||||
"apiVersions": "APIVersions is the API versions the resources belong to. '*' is all versions. If '*' is present, the length of the slice must be one. Required.",
|
||||
"resources": "Resources is a list of resources this rule applies to.\n\nFor example: 'pods' means pods. 'pods/log' means the log subresource of pods. '*' means all resources, but not subresources. 'pods/*' means all subresources of pods. '*/scale' means all scale subresources. '*/*' means all resources and their subresources.\n\nIf wildcard is present, the validation rule will ensure resources do not overlap with each other.\n\nDepending on the enclosing object, subresources might not be allowed. Required.",
|
||||
}
|
||||
|
||||
func (Rule) SwaggerDoc() map[string]string {
|
||||
return map_Rule
|
||||
}
|
||||
|
||||
var map_RuleWithOperations = map[string]string{
|
||||
"": "RuleWithOperations is a tuple of Operations and Resources. It is recommended to make sure that all the tuple expansions are valid.",
|
||||
"operations": "Operations is the operations the admission hook cares about - CREATE, UPDATE, or * for all operations. If '*' is present, the length of the slice must be one. Required.",
|
||||
}
|
||||
|
||||
func (RuleWithOperations) SwaggerDoc() map[string]string {
|
||||
return map_RuleWithOperations
|
||||
}
|
||||
|
||||
var map_ServiceReference = map[string]string{
|
||||
"": "ServiceReference holds a reference to Service.legacy.k8s.io",
|
||||
"namespace": "Namespace is the namespace of the service Required",
|
||||
"name": "Name is the name of the service Required",
|
||||
}
|
||||
|
||||
func (ServiceReference) SwaggerDoc() map[string]string {
|
||||
return map_ServiceReference
|
||||
}
|
||||
|
||||
// AUTO-GENERATED FUNCTIONS END HERE
|
254
vendor/k8s.io/api/admissionregistration/v1alpha1/zz_generated.deepcopy.go
generated
vendored
Normal file
254
vendor/k8s.io/api/admissionregistration/v1alpha1/zz_generated.deepcopy.go
generated
vendored
Normal file
|
@ -0,0 +1,254 @@
|
|||
// +build !ignore_autogenerated
|
||||
|
||||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// This file was autogenerated by deepcopy-gen. Do not edit it manually!
|
||||
|
||||
package v1alpha1
|
||||
|
||||
import (
|
||||
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
conversion "k8s.io/apimachinery/pkg/conversion"
|
||||
runtime "k8s.io/apimachinery/pkg/runtime"
|
||||
reflect "reflect"
|
||||
)
|
||||
|
||||
func init() {
|
||||
SchemeBuilder.Register(RegisterDeepCopies)
|
||||
}
|
||||
|
||||
// RegisterDeepCopies adds deep-copy functions to the given scheme. Public
|
||||
// to allow building arbitrary schemes.
|
||||
func RegisterDeepCopies(scheme *runtime.Scheme) error {
|
||||
return scheme.AddGeneratedDeepCopyFuncs(
|
||||
conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1alpha1_AdmissionHookClientConfig, InType: reflect.TypeOf(&AdmissionHookClientConfig{})},
|
||||
conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1alpha1_ExternalAdmissionHook, InType: reflect.TypeOf(&ExternalAdmissionHook{})},
|
||||
conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1alpha1_ExternalAdmissionHookConfiguration, InType: reflect.TypeOf(&ExternalAdmissionHookConfiguration{})},
|
||||
conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1alpha1_ExternalAdmissionHookConfigurationList, InType: reflect.TypeOf(&ExternalAdmissionHookConfigurationList{})},
|
||||
conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1alpha1_Initializer, InType: reflect.TypeOf(&Initializer{})},
|
||||
conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1alpha1_InitializerConfiguration, InType: reflect.TypeOf(&InitializerConfiguration{})},
|
||||
conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1alpha1_InitializerConfigurationList, InType: reflect.TypeOf(&InitializerConfigurationList{})},
|
||||
conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1alpha1_Rule, InType: reflect.TypeOf(&Rule{})},
|
||||
conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1alpha1_RuleWithOperations, InType: reflect.TypeOf(&RuleWithOperations{})},
|
||||
conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1alpha1_ServiceReference, InType: reflect.TypeOf(&ServiceReference{})},
|
||||
)
|
||||
}
|
||||
|
||||
// DeepCopy_v1alpha1_AdmissionHookClientConfig is an autogenerated deepcopy function.
|
||||
func DeepCopy_v1alpha1_AdmissionHookClientConfig(in interface{}, out interface{}, c *conversion.Cloner) error {
|
||||
{
|
||||
in := in.(*AdmissionHookClientConfig)
|
||||
out := out.(*AdmissionHookClientConfig)
|
||||
*out = *in
|
||||
if in.CABundle != nil {
|
||||
in, out := &in.CABundle, &out.CABundle
|
||||
*out = make([]byte, len(*in))
|
||||
copy(*out, *in)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// DeepCopy_v1alpha1_ExternalAdmissionHook is an autogenerated deepcopy function.
|
||||
func DeepCopy_v1alpha1_ExternalAdmissionHook(in interface{}, out interface{}, c *conversion.Cloner) error {
|
||||
{
|
||||
in := in.(*ExternalAdmissionHook)
|
||||
out := out.(*ExternalAdmissionHook)
|
||||
*out = *in
|
||||
if err := DeepCopy_v1alpha1_AdmissionHookClientConfig(&in.ClientConfig, &out.ClientConfig, c); err != nil {
|
||||
return err
|
||||
}
|
||||
if in.Rules != nil {
|
||||
in, out := &in.Rules, &out.Rules
|
||||
*out = make([]RuleWithOperations, len(*in))
|
||||
for i := range *in {
|
||||
if err := DeepCopy_v1alpha1_RuleWithOperations(&(*in)[i], &(*out)[i], c); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
if in.FailurePolicy != nil {
|
||||
in, out := &in.FailurePolicy, &out.FailurePolicy
|
||||
*out = new(FailurePolicyType)
|
||||
**out = **in
|
||||
}
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// DeepCopy_v1alpha1_ExternalAdmissionHookConfiguration is an autogenerated deepcopy function.
|
||||
func DeepCopy_v1alpha1_ExternalAdmissionHookConfiguration(in interface{}, out interface{}, c *conversion.Cloner) error {
|
||||
{
|
||||
in := in.(*ExternalAdmissionHookConfiguration)
|
||||
out := out.(*ExternalAdmissionHookConfiguration)
|
||||
*out = *in
|
||||
if newVal, err := c.DeepCopy(&in.ObjectMeta); err != nil {
|
||||
return err
|
||||
} else {
|
||||
out.ObjectMeta = *newVal.(*v1.ObjectMeta)
|
||||
}
|
||||
if in.ExternalAdmissionHooks != nil {
|
||||
in, out := &in.ExternalAdmissionHooks, &out.ExternalAdmissionHooks
|
||||
*out = make([]ExternalAdmissionHook, len(*in))
|
||||
for i := range *in {
|
||||
if err := DeepCopy_v1alpha1_ExternalAdmissionHook(&(*in)[i], &(*out)[i], c); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// DeepCopy_v1alpha1_ExternalAdmissionHookConfigurationList is an autogenerated deepcopy function.
|
||||
func DeepCopy_v1alpha1_ExternalAdmissionHookConfigurationList(in interface{}, out interface{}, c *conversion.Cloner) error {
|
||||
{
|
||||
in := in.(*ExternalAdmissionHookConfigurationList)
|
||||
out := out.(*ExternalAdmissionHookConfigurationList)
|
||||
*out = *in
|
||||
if in.Items != nil {
|
||||
in, out := &in.Items, &out.Items
|
||||
*out = make([]ExternalAdmissionHookConfiguration, len(*in))
|
||||
for i := range *in {
|
||||
if err := DeepCopy_v1alpha1_ExternalAdmissionHookConfiguration(&(*in)[i], &(*out)[i], c); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// DeepCopy_v1alpha1_Initializer is an autogenerated deepcopy function.
|
||||
func DeepCopy_v1alpha1_Initializer(in interface{}, out interface{}, c *conversion.Cloner) error {
|
||||
{
|
||||
in := in.(*Initializer)
|
||||
out := out.(*Initializer)
|
||||
*out = *in
|
||||
if in.Rules != nil {
|
||||
in, out := &in.Rules, &out.Rules
|
||||
*out = make([]Rule, len(*in))
|
||||
for i := range *in {
|
||||
if err := DeepCopy_v1alpha1_Rule(&(*in)[i], &(*out)[i], c); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
if in.FailurePolicy != nil {
|
||||
in, out := &in.FailurePolicy, &out.FailurePolicy
|
||||
*out = new(FailurePolicyType)
|
||||
**out = **in
|
||||
}
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// DeepCopy_v1alpha1_InitializerConfiguration is an autogenerated deepcopy function.
|
||||
func DeepCopy_v1alpha1_InitializerConfiguration(in interface{}, out interface{}, c *conversion.Cloner) error {
|
||||
{
|
||||
in := in.(*InitializerConfiguration)
|
||||
out := out.(*InitializerConfiguration)
|
||||
*out = *in
|
||||
if newVal, err := c.DeepCopy(&in.ObjectMeta); err != nil {
|
||||
return err
|
||||
} else {
|
||||
out.ObjectMeta = *newVal.(*v1.ObjectMeta)
|
||||
}
|
||||
if in.Initializers != nil {
|
||||
in, out := &in.Initializers, &out.Initializers
|
||||
*out = make([]Initializer, len(*in))
|
||||
for i := range *in {
|
||||
if err := DeepCopy_v1alpha1_Initializer(&(*in)[i], &(*out)[i], c); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// DeepCopy_v1alpha1_InitializerConfigurationList is an autogenerated deepcopy function.
|
||||
func DeepCopy_v1alpha1_InitializerConfigurationList(in interface{}, out interface{}, c *conversion.Cloner) error {
|
||||
{
|
||||
in := in.(*InitializerConfigurationList)
|
||||
out := out.(*InitializerConfigurationList)
|
||||
*out = *in
|
||||
if in.Items != nil {
|
||||
in, out := &in.Items, &out.Items
|
||||
*out = make([]InitializerConfiguration, len(*in))
|
||||
for i := range *in {
|
||||
if err := DeepCopy_v1alpha1_InitializerConfiguration(&(*in)[i], &(*out)[i], c); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// DeepCopy_v1alpha1_Rule is an autogenerated deepcopy function.
|
||||
func DeepCopy_v1alpha1_Rule(in interface{}, out interface{}, c *conversion.Cloner) error {
|
||||
{
|
||||
in := in.(*Rule)
|
||||
out := out.(*Rule)
|
||||
*out = *in
|
||||
if in.APIGroups != nil {
|
||||
in, out := &in.APIGroups, &out.APIGroups
|
||||
*out = make([]string, len(*in))
|
||||
copy(*out, *in)
|
||||
}
|
||||
if in.APIVersions != nil {
|
||||
in, out := &in.APIVersions, &out.APIVersions
|
||||
*out = make([]string, len(*in))
|
||||
copy(*out, *in)
|
||||
}
|
||||
if in.Resources != nil {
|
||||
in, out := &in.Resources, &out.Resources
|
||||
*out = make([]string, len(*in))
|
||||
copy(*out, *in)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// DeepCopy_v1alpha1_RuleWithOperations is an autogenerated deepcopy function.
|
||||
func DeepCopy_v1alpha1_RuleWithOperations(in interface{}, out interface{}, c *conversion.Cloner) error {
|
||||
{
|
||||
in := in.(*RuleWithOperations)
|
||||
out := out.(*RuleWithOperations)
|
||||
*out = *in
|
||||
if in.Operations != nil {
|
||||
in, out := &in.Operations, &out.Operations
|
||||
*out = make([]OperationType, len(*in))
|
||||
copy(*out, *in)
|
||||
}
|
||||
if err := DeepCopy_v1alpha1_Rule(&in.Rule, &out.Rule, c); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// DeepCopy_v1alpha1_ServiceReference is an autogenerated deepcopy function.
|
||||
func DeepCopy_v1alpha1_ServiceReference(in interface{}, out interface{}, c *conversion.Cloner) error {
|
||||
{
|
||||
in := in.(*ServiceReference)
|
||||
out := out.(*ServiceReference)
|
||||
*out = *in
|
||||
return nil
|
||||
}
|
||||
}
|
|
@ -16,21 +16,20 @@ go_library(
|
|||
"types.generated.go",
|
||||
"types.go",
|
||||
"types_swagger_doc_generated.go",
|
||||
"zz_generated.conversion.go",
|
||||
"zz_generated.deepcopy.go",
|
||||
"zz_generated.defaults.go",
|
||||
],
|
||||
tags = ["automanaged"],
|
||||
deps = [
|
||||
"//vendor/github.com/gogo/protobuf/proto:go_default_library",
|
||||
"//vendor/github.com/gogo/protobuf/sortkeys:go_default_library",
|
||||
"//vendor/github.com/ugorji/go/codec:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/conversion:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/intstr:go_default_library",
|
||||
"//vendor/k8s.io/client-go/pkg/apis/policy:go_default_library",
|
||||
],
|
||||
)
|
|
@ -14,4 +14,7 @@ See the License for the specific language governing permissions and
|
|||
limitations under the License.
|
||||
*/
|
||||
|
||||
// +k8s:deepcopy-gen=package,register
|
||||
// +k8s:openapi-gen=true
|
||||
|
||||
package v1beta1
|
File diff suppressed because it is too large
Load diff
|
@ -19,19 +19,50 @@ limitations under the License.
|
|||
|
||||
syntax = 'proto2';
|
||||
|
||||
package k8s.io.kubernetes.pkg.apis.apps.v1beta1;
|
||||
package k8s.io.api.apps.v1beta1;
|
||||
|
||||
import "k8s.io/api/core/v1/generated.proto";
|
||||
import "k8s.io/api/policy/v1beta1/generated.proto";
|
||||
import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto";
|
||||
import "k8s.io/apimachinery/pkg/runtime/generated.proto";
|
||||
import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto";
|
||||
import "k8s.io/apimachinery/pkg/util/intstr/generated.proto";
|
||||
import "k8s.io/apiserver/pkg/apis/example/v1/generated.proto";
|
||||
import "k8s.io/kubernetes/pkg/api/v1/generated.proto";
|
||||
import "k8s.io/kubernetes/pkg/apis/extensions/v1beta1/generated.proto";
|
||||
|
||||
// Package-wide variables from generator "generated".
|
||||
option go_package = "v1beta1";
|
||||
|
||||
// ControllerRevision implements an immutable snapshot of state data. Clients
|
||||
// are responsible for serializing and deserializing the objects that contain
|
||||
// their internal state.
|
||||
// Once a ControllerRevision has been successfully created, it can not be updated.
|
||||
// The API Server will fail validation of all requests that attempt to mutate
|
||||
// the Data field. ControllerRevisions may, however, be deleted. Note that, due to its use by both
|
||||
// the DaemonSet and StatefulSet controllers for update and rollback, this object is beta. However,
|
||||
// it may be subject to name and representation changes in future releases, and clients should not
|
||||
// depend on its stability. It is primarily for internal use by controllers.
|
||||
message ControllerRevision {
|
||||
// Standard object's metadata.
|
||||
// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata
|
||||
// +optional
|
||||
optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
|
||||
|
||||
// Data is the serialized representation of the state.
|
||||
optional k8s.io.apimachinery.pkg.runtime.RawExtension data = 2;
|
||||
|
||||
// Revision indicates the revision of the state represented by Data.
|
||||
optional int64 revision = 3;
|
||||
}
|
||||
|
||||
// ControllerRevisionList is a resource containing a list of ControllerRevision objects.
|
||||
message ControllerRevisionList {
|
||||
// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata
|
||||
// +optional
|
||||
optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
|
||||
|
||||
// Items is the list of ControllerRevisions
|
||||
repeated ControllerRevision items = 2;
|
||||
}
|
||||
|
||||
// Deployment enables declarative updates for Pods and ReplicaSets.
|
||||
message Deployment {
|
||||
// Standard object metadata.
|
||||
|
@ -104,7 +135,7 @@ message DeploymentSpec {
|
|||
optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector selector = 2;
|
||||
|
||||
// Template describes the pods that will be created.
|
||||
optional k8s.io.kubernetes.pkg.api.v1.PodTemplateSpec template = 3;
|
||||
optional k8s.io.api.core.v1.PodTemplateSpec template = 3;
|
||||
|
||||
// The deployment strategy to use to replace existing pods with new ones.
|
||||
// +optional
|
||||
|
@ -170,6 +201,12 @@ message DeploymentStatus {
|
|||
// +patchMergeKey=type
|
||||
// +patchStrategy=merge
|
||||
repeated DeploymentCondition conditions = 6;
|
||||
|
||||
// Count of hash collisions for the Deployment. The Deployment controller uses this
|
||||
// field as a collision avoidance mechanism when it needs to create the name for the
|
||||
// newest ReplicaSet.
|
||||
// +optional
|
||||
optional int64 collisionCount = 8;
|
||||
}
|
||||
|
||||
// DeploymentStrategy describes how to replace existing pods with new ones.
|
||||
|
@ -188,7 +225,7 @@ message DeploymentStrategy {
|
|||
}
|
||||
|
||||
message RollbackConfig {
|
||||
// The revision to rollback to. If set to 0, rollbck to the last revision.
|
||||
// The revision to rollback to. If set to 0, rollback to the last revision.
|
||||
// +optional
|
||||
optional int64 revision = 1;
|
||||
}
|
||||
|
@ -223,17 +260,24 @@ message RollingUpdateDeployment {
|
|||
optional k8s.io.apimachinery.pkg.util.intstr.IntOrString maxSurge = 2;
|
||||
}
|
||||
|
||||
// RollingUpdateStatefulSetStrategy is used to communicate parameter for RollingUpdateStatefulSetStrategyType.
|
||||
message RollingUpdateStatefulSetStrategy {
|
||||
// Partition indicates the ordinal at which the StatefulSet should be
|
||||
// partitioned.
|
||||
optional int32 partition = 1;
|
||||
}
|
||||
|
||||
// Scale represents a scaling request for a resource.
|
||||
message Scale {
|
||||
// Standard object metadata; More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata.
|
||||
// Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata.
|
||||
// +optional
|
||||
optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
|
||||
|
||||
// defines the behavior of the scale. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status.
|
||||
// defines the behavior of the scale. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status.
|
||||
// +optional
|
||||
optional ScaleSpec spec = 2;
|
||||
|
||||
// current status of the scale. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status. Read-only.
|
||||
// current status of the scale. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status. Read-only.
|
||||
// +optional
|
||||
optional ScaleStatus status = 3;
|
||||
}
|
||||
|
@ -294,7 +338,7 @@ message StatefulSetList {
|
|||
|
||||
// A StatefulSetSpec is the specification of a StatefulSet.
|
||||
message StatefulSetSpec {
|
||||
// Replicas is the desired number of replicas of the given Template.
|
||||
// replicas is the desired number of replicas of the given Template.
|
||||
// These are replicas in the sense that they are instantiations of the
|
||||
// same Template, but individual replicas also have a consistent identity.
|
||||
// If unspecified, defaults to 1.
|
||||
|
@ -302,19 +346,19 @@ message StatefulSetSpec {
|
|||
// +optional
|
||||
optional int32 replicas = 1;
|
||||
|
||||
// Selector is a label query over pods that should match the replica count.
|
||||
// selector is a label query over pods that should match the replica count.
|
||||
// If empty, defaulted to labels on the pod template.
|
||||
// More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors
|
||||
// +optional
|
||||
optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector selector = 2;
|
||||
|
||||
// Template is the object that describes the pod that will be created if
|
||||
// template is the object that describes the pod that will be created if
|
||||
// insufficient replicas are detected. Each pod stamped out by the StatefulSet
|
||||
// will fulfill this Template, but have a unique identity from the rest
|
||||
// of the StatefulSet.
|
||||
optional k8s.io.kubernetes.pkg.api.v1.PodTemplateSpec template = 3;
|
||||
optional k8s.io.api.core.v1.PodTemplateSpec template = 3;
|
||||
|
||||
// VolumeClaimTemplates is a list of claims that pods are allowed to reference.
|
||||
// volumeClaimTemplates is a list of claims that pods are allowed to reference.
|
||||
// The StatefulSet controller is responsible for mapping network identities to
|
||||
// claims in a way that maintains the identity of a pod. Every claim in
|
||||
// this list must have at least one matching (by name) volumeMount in one
|
||||
|
@ -322,23 +366,76 @@ message StatefulSetSpec {
|
|||
// any volumes in the template, with the same name.
|
||||
// TODO: Define the behavior if a claim already exists with the same name.
|
||||
// +optional
|
||||
repeated k8s.io.kubernetes.pkg.api.v1.PersistentVolumeClaim volumeClaimTemplates = 4;
|
||||
repeated k8s.io.api.core.v1.PersistentVolumeClaim volumeClaimTemplates = 4;
|
||||
|
||||
// ServiceName is the name of the service that governs this StatefulSet.
|
||||
// serviceName is the name of the service that governs this StatefulSet.
|
||||
// This service must exist before the StatefulSet, and is responsible for
|
||||
// the network identity of the set. Pods get DNS/hostnames that follow the
|
||||
// pattern: pod-specific-string.serviceName.default.svc.cluster.local
|
||||
// where "pod-specific-string" is managed by the StatefulSet controller.
|
||||
optional string serviceName = 5;
|
||||
|
||||
// podManagementPolicy controls how pods are created during initial scale up,
|
||||
// when replacing pods on nodes, or when scaling down. The default policy is
|
||||
// `OrderedReady`, where pods are created in increasing order (pod-0, then
|
||||
// pod-1, etc) and the controller will wait until each pod is ready before
|
||||
// continuing. When scaling down, the pods are removed in the opposite order.
|
||||
// The alternative policy is `Parallel` which will create pods in parallel
|
||||
// to match the desired scale without waiting, and on scale down will delete
|
||||
// all pods at once.
|
||||
// +optional
|
||||
optional string podManagementPolicy = 6;
|
||||
|
||||
// updateStrategy indicates the StatefulSetUpdateStrategy that will be
|
||||
// employed to update Pods in the StatefulSet when a revision is made to
|
||||
// Template.
|
||||
optional StatefulSetUpdateStrategy updateStrategy = 7;
|
||||
|
||||
// revisionHistoryLimit is the maximum number of revisions that will
|
||||
// be maintained in the StatefulSet's revision history. The revision history
|
||||
// consists of all revisions not represented by a currently applied
|
||||
// StatefulSetSpec version. The default value is 10.
|
||||
optional int32 revisionHistoryLimit = 8;
|
||||
}
|
||||
|
||||
// StatefulSetStatus represents the current state of a StatefulSet.
|
||||
message StatefulSetStatus {
|
||||
// most recent generation observed by this StatefulSet.
|
||||
// observedGeneration is the most recent generation observed for this StatefulSet. It corresponds to the
|
||||
// StatefulSet's generation, which is updated on mutation by the API Server.
|
||||
// +optional
|
||||
optional int64 observedGeneration = 1;
|
||||
|
||||
// Replicas is the number of actual replicas.
|
||||
// replicas is the number of Pods created by the StatefulSet controller.
|
||||
optional int32 replicas = 2;
|
||||
|
||||
// readyReplicas is the number of Pods created by the StatefulSet controller that have a Ready Condition.
|
||||
optional int32 readyReplicas = 3;
|
||||
|
||||
// currentReplicas is the number of Pods created by the StatefulSet controller from the StatefulSet version
|
||||
// indicated by currentRevision.
|
||||
optional int32 currentReplicas = 4;
|
||||
|
||||
// updatedReplicas is the number of Pods created by the StatefulSet controller from the StatefulSet version
|
||||
// indicated by updateRevision.
|
||||
optional int32 updatedReplicas = 5;
|
||||
|
||||
// currentRevision, if not empty, indicates the version of the StatefulSet used to generate Pods in the
|
||||
// sequence [0,currentReplicas).
|
||||
optional string currentRevision = 6;
|
||||
|
||||
// updateRevision, if not empty, indicates the version of the StatefulSet used to generate Pods in the sequence
|
||||
// [replicas-updatedReplicas,replicas)
|
||||
optional string updateRevision = 7;
|
||||
}
|
||||
|
||||
// StatefulSetUpdateStrategy indicates the strategy that the StatefulSet
|
||||
// controller will use to perform updates. It includes any additional parameters
|
||||
// necessary to perform the update for the indicated strategy.
|
||||
message StatefulSetUpdateStrategy {
|
||||
// Type indicates the type of the StatefulSetUpdateStrategy.
|
||||
optional string type = 1;
|
||||
|
||||
// RollingUpdate is used to communicate parameters when Type is RollingUpdateStatefulSetStrategyType.
|
||||
optional RollingUpdateStatefulSetStrategy rollingUpdate = 2;
|
||||
}
|
||||
|
|
@ -36,18 +36,11 @@ func Resource(resource string) schema.GroupResource {
|
|||
var (
|
||||
// TODO: move SchemeBuilder with zz_generated.deepcopy.go to k8s.io/api.
|
||||
// localSchemeBuilder and AddToScheme will stay in k8s.io/kubernetes.
|
||||
SchemeBuilder runtime.SchemeBuilder
|
||||
SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes)
|
||||
localSchemeBuilder = &SchemeBuilder
|
||||
AddToScheme = localSchemeBuilder.AddToScheme
|
||||
)
|
||||
|
||||
func init() {
|
||||
// We only register manually written functions here. The registration of the
|
||||
// generated functions takes place in the generated files. The separation
|
||||
// makes the code compile even when the generated files are missing.
|
||||
localSchemeBuilder.Register(addKnownTypes, addDefaultingFuncs, addConversionFuncs)
|
||||
}
|
||||
|
||||
// Adds the list of known types to api.Scheme.
|
||||
func addKnownTypes(scheme *runtime.Scheme) error {
|
||||
scheme.AddKnownTypes(SchemeGroupVersion,
|
||||
|
@ -57,6 +50,8 @@ func addKnownTypes(scheme *runtime.Scheme) error {
|
|||
&Scale{},
|
||||
&StatefulSet{},
|
||||
&StatefulSetList{},
|
||||
&ControllerRevision{},
|
||||
&ControllerRevisionList{},
|
||||
)
|
||||
metav1.AddToGroupVersion(scheme, SchemeGroupVersion)
|
||||
return nil
|
File diff suppressed because it is too large
Load diff
|
@ -17,14 +17,17 @@ limitations under the License.
|
|||
package v1beta1
|
||||
|
||||
import (
|
||||
"k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/util/intstr"
|
||||
"k8s.io/client-go/pkg/api/v1"
|
||||
)
|
||||
|
||||
const (
|
||||
// StatefulSetInitAnnotation if present, and set to false, indicates that a Pod's readiness should be ignored.
|
||||
StatefulSetInitAnnotation = "pod.alpha.kubernetes.io/initialized"
|
||||
ControllerRevisionHashLabelKey = "controller-revision-hash"
|
||||
StatefulSetRevisionLabel = ControllerRevisionHashLabelKey
|
||||
)
|
||||
|
||||
// ScaleSpec describes the attributes of a scale subresource
|
||||
|
@ -59,15 +62,15 @@ type ScaleStatus struct {
|
|||
// Scale represents a scaling request for a resource.
|
||||
type Scale struct {
|
||||
metav1.TypeMeta `json:",inline"`
|
||||
// Standard object metadata; More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata.
|
||||
// Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata.
|
||||
// +optional
|
||||
metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
|
||||
|
||||
// defines the behavior of the scale. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status.
|
||||
// defines the behavior of the scale. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status.
|
||||
// +optional
|
||||
Spec ScaleSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"`
|
||||
|
||||
// current status of the scale. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status. Read-only.
|
||||
// current status of the scale. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status. Read-only.
|
||||
// +optional
|
||||
Status ScaleStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"`
|
||||
}
|
||||
|
@ -95,9 +98,60 @@ type StatefulSet struct {
|
|||
Status StatefulSetStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"`
|
||||
}
|
||||
|
||||
// PodManagementPolicyType defines the policy for creating pods under a stateful set.
|
||||
type PodManagementPolicyType string
|
||||
|
||||
const (
|
||||
// OrderedReadyPodManagement will create pods in strictly increasing order on
|
||||
// scale up and strictly decreasing order on scale down, progressing only when
|
||||
// the previous pod is ready or terminated. At most one pod will be changed
|
||||
// at any time.
|
||||
OrderedReadyPodManagement PodManagementPolicyType = "OrderedReady"
|
||||
// ParallelPodManagement will create and delete pods as soon as the stateful set
|
||||
// replica count is changed, and will not wait for pods to be ready or complete
|
||||
// termination.
|
||||
ParallelPodManagement = "Parallel"
|
||||
)
|
||||
|
||||
// StatefulSetUpdateStrategy indicates the strategy that the StatefulSet
|
||||
// controller will use to perform updates. It includes any additional parameters
|
||||
// necessary to perform the update for the indicated strategy.
|
||||
type StatefulSetUpdateStrategy struct {
|
||||
// Type indicates the type of the StatefulSetUpdateStrategy.
|
||||
Type StatefulSetUpdateStrategyType `json:"type,omitempty" protobuf:"bytes,1,opt,name=type,casttype=StatefulSetStrategyType"`
|
||||
// RollingUpdate is used to communicate parameters when Type is RollingUpdateStatefulSetStrategyType.
|
||||
RollingUpdate *RollingUpdateStatefulSetStrategy `json:"rollingUpdate,omitempty" protobuf:"bytes,2,opt,name=rollingUpdate"`
|
||||
}
|
||||
|
||||
// StatefulSetUpdateStrategyType is a string enumeration type that enumerates
|
||||
// all possible update strategies for the StatefulSet controller.
|
||||
type StatefulSetUpdateStrategyType string
|
||||
|
||||
const (
|
||||
// RollingUpdateStatefulSetStrategyType indicates that update will be
|
||||
// applied to all Pods in the StatefulSet with respect to the StatefulSet
|
||||
// ordering constraints. When a scale operation is performed with this
|
||||
// strategy, new Pods will be created from the specification version indicated
|
||||
// by the StatefulSet's updateRevision.
|
||||
RollingUpdateStatefulSetStrategyType = "RollingUpdate"
|
||||
// OnDeleteStatefulSetStrategyType triggers the legacy behavior. Version
|
||||
// tracking and ordered rolling restarts are disabled. Pods are recreated
|
||||
// from the StatefulSetSpec when they are manually deleted. When a scale
|
||||
// operation is performed with this strategy,specification version indicated
|
||||
// by the StatefulSet's currentRevision.
|
||||
OnDeleteStatefulSetStrategyType = "OnDelete"
|
||||
)
|
||||
|
||||
// RollingUpdateStatefulSetStrategy is used to communicate parameter for RollingUpdateStatefulSetStrategyType.
|
||||
type RollingUpdateStatefulSetStrategy struct {
|
||||
// Partition indicates the ordinal at which the StatefulSet should be
|
||||
// partitioned.
|
||||
Partition *int32 `json:"partition,omitempty" protobuf:"varint,1,opt,name=partition"`
|
||||
}
|
||||
|
||||
// A StatefulSetSpec is the specification of a StatefulSet.
|
||||
type StatefulSetSpec struct {
|
||||
// Replicas is the desired number of replicas of the given Template.
|
||||
// replicas is the desired number of replicas of the given Template.
|
||||
// These are replicas in the sense that they are instantiations of the
|
||||
// same Template, but individual replicas also have a consistent identity.
|
||||
// If unspecified, defaults to 1.
|
||||
|
@ -105,19 +159,19 @@ type StatefulSetSpec struct {
|
|||
// +optional
|
||||
Replicas *int32 `json:"replicas,omitempty" protobuf:"varint,1,opt,name=replicas"`
|
||||
|
||||
// Selector is a label query over pods that should match the replica count.
|
||||
// selector is a label query over pods that should match the replica count.
|
||||
// If empty, defaulted to labels on the pod template.
|
||||
// More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors
|
||||
// +optional
|
||||
Selector *metav1.LabelSelector `json:"selector,omitempty" protobuf:"bytes,2,opt,name=selector"`
|
||||
|
||||
// Template is the object that describes the pod that will be created if
|
||||
// template is the object that describes the pod that will be created if
|
||||
// insufficient replicas are detected. Each pod stamped out by the StatefulSet
|
||||
// will fulfill this Template, but have a unique identity from the rest
|
||||
// of the StatefulSet.
|
||||
Template v1.PodTemplateSpec `json:"template" protobuf:"bytes,3,opt,name=template"`
|
||||
|
||||
// VolumeClaimTemplates is a list of claims that pods are allowed to reference.
|
||||
// volumeClaimTemplates is a list of claims that pods are allowed to reference.
|
||||
// The StatefulSet controller is responsible for mapping network identities to
|
||||
// claims in a way that maintains the identity of a pod. Every claim in
|
||||
// this list must have at least one matching (by name) volumeMount in one
|
||||
|
@ -127,22 +181,64 @@ type StatefulSetSpec struct {
|
|||
// +optional
|
||||
VolumeClaimTemplates []v1.PersistentVolumeClaim `json:"volumeClaimTemplates,omitempty" protobuf:"bytes,4,rep,name=volumeClaimTemplates"`
|
||||
|
||||
// ServiceName is the name of the service that governs this StatefulSet.
|
||||
// serviceName is the name of the service that governs this StatefulSet.
|
||||
// This service must exist before the StatefulSet, and is responsible for
|
||||
// the network identity of the set. Pods get DNS/hostnames that follow the
|
||||
// pattern: pod-specific-string.serviceName.default.svc.cluster.local
|
||||
// where "pod-specific-string" is managed by the StatefulSet controller.
|
||||
ServiceName string `json:"serviceName" protobuf:"bytes,5,opt,name=serviceName"`
|
||||
|
||||
// podManagementPolicy controls how pods are created during initial scale up,
|
||||
// when replacing pods on nodes, or when scaling down. The default policy is
|
||||
// `OrderedReady`, where pods are created in increasing order (pod-0, then
|
||||
// pod-1, etc) and the controller will wait until each pod is ready before
|
||||
// continuing. When scaling down, the pods are removed in the opposite order.
|
||||
// The alternative policy is `Parallel` which will create pods in parallel
|
||||
// to match the desired scale without waiting, and on scale down will delete
|
||||
// all pods at once.
|
||||
// +optional
|
||||
PodManagementPolicy PodManagementPolicyType `json:"podManagementPolicy,omitempty" protobuf:"bytes,6,opt,name=podManagementPolicy,casttype=PodManagementPolicyType"`
|
||||
|
||||
// updateStrategy indicates the StatefulSetUpdateStrategy that will be
|
||||
// employed to update Pods in the StatefulSet when a revision is made to
|
||||
// Template.
|
||||
UpdateStrategy StatefulSetUpdateStrategy `json:"updateStrategy,omitempty" protobuf:"bytes,7,opt,name=updateStrategy"`
|
||||
|
||||
// revisionHistoryLimit is the maximum number of revisions that will
|
||||
// be maintained in the StatefulSet's revision history. The revision history
|
||||
// consists of all revisions not represented by a currently applied
|
||||
// StatefulSetSpec version. The default value is 10.
|
||||
RevisionHistoryLimit *int32 `json:"revisionHistoryLimit,omitempty" protobuf:"varint,8,opt,name=revisionHistoryLimit"`
|
||||
}
|
||||
|
||||
// StatefulSetStatus represents the current state of a StatefulSet.
|
||||
type StatefulSetStatus struct {
|
||||
// most recent generation observed by this StatefulSet.
|
||||
// observedGeneration is the most recent generation observed for this StatefulSet. It corresponds to the
|
||||
// StatefulSet's generation, which is updated on mutation by the API Server.
|
||||
// +optional
|
||||
ObservedGeneration *int64 `json:"observedGeneration,omitempty" protobuf:"varint,1,opt,name=observedGeneration"`
|
||||
|
||||
// Replicas is the number of actual replicas.
|
||||
// replicas is the number of Pods created by the StatefulSet controller.
|
||||
Replicas int32 `json:"replicas" protobuf:"varint,2,opt,name=replicas"`
|
||||
|
||||
// readyReplicas is the number of Pods created by the StatefulSet controller that have a Ready Condition.
|
||||
ReadyReplicas int32 `json:"readyReplicas,omitempty" protobuf:"varint,3,opt,name=readyReplicas"`
|
||||
|
||||
// currentReplicas is the number of Pods created by the StatefulSet controller from the StatefulSet version
|
||||
// indicated by currentRevision.
|
||||
CurrentReplicas int32 `json:"currentReplicas,omitempty" protobuf:"varint,4,opt,name=currentReplicas"`
|
||||
|
||||
// updatedReplicas is the number of Pods created by the StatefulSet controller from the StatefulSet version
|
||||
// indicated by updateRevision.
|
||||
UpdatedReplicas int32 `json:"updatedReplicas,omitempty" protobuf:"varint,5,opt,name=updatedReplicas"`
|
||||
|
||||
// currentRevision, if not empty, indicates the version of the StatefulSet used to generate Pods in the
|
||||
// sequence [0,currentReplicas).
|
||||
CurrentRevision string `json:"currentRevision,omitempty" protobuf:"bytes,6,opt,name=currentRevision"`
|
||||
|
||||
// updateRevision, if not empty, indicates the version of the StatefulSet used to generate Pods in the sequence
|
||||
// [replicas-updatedReplicas,replicas)
|
||||
UpdateRevision string `json:"updateRevision,omitempty" protobuf:"bytes,7,opt,name=updateRevision"`
|
||||
}
|
||||
|
||||
// StatefulSetList is a collection of StatefulSets.
|
||||
|
@ -233,7 +329,7 @@ type DeploymentRollback struct {
|
|||
}
|
||||
|
||||
type RollbackConfig struct {
|
||||
// The revision to rollback to. If set to 0, rollbck to the last revision.
|
||||
// The revision to rollback to. If set to 0, rollback to the last revision.
|
||||
// +optional
|
||||
Revision int64 `json:"revision,omitempty" protobuf:"varint,1,opt,name=revision"`
|
||||
}
|
||||
|
@ -330,6 +426,12 @@ type DeploymentStatus struct {
|
|||
// +patchMergeKey=type
|
||||
// +patchStrategy=merge
|
||||
Conditions []DeploymentCondition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,6,rep,name=conditions"`
|
||||
|
||||
// Count of hash collisions for the Deployment. The Deployment controller uses this
|
||||
// field as a collision avoidance mechanism when it needs to create the name for the
|
||||
// newest ReplicaSet.
|
||||
// +optional
|
||||
CollisionCount *int64 `json:"collisionCount,omitempty" protobuf:"varint,8,opt,name=collisionCount"`
|
||||
}
|
||||
|
||||
type DeploymentConditionType string
|
||||
|
@ -354,7 +456,7 @@ type DeploymentCondition struct {
|
|||
// Type of deployment condition.
|
||||
Type DeploymentConditionType `json:"type" protobuf:"bytes,1,opt,name=type,casttype=DeploymentConditionType"`
|
||||
// Status of the condition, one of True, False, Unknown.
|
||||
Status v1.ConditionStatus `json:"status" protobuf:"bytes,2,opt,name=status,casttype=k8s.io/kubernetes/pkg/api/v1.ConditionStatus"`
|
||||
Status v1.ConditionStatus `json:"status" protobuf:"bytes,2,opt,name=status,casttype=k8s.io/api/core/v1.ConditionStatus"`
|
||||
// The last time this condition was updated.
|
||||
LastUpdateTime metav1.Time `json:"lastUpdateTime,omitempty" protobuf:"bytes,6,opt,name=lastUpdateTime"`
|
||||
// Last time the condition transitioned from one status to another.
|
||||
|
@ -375,3 +477,40 @@ type DeploymentList struct {
|
|||
// Items is the list of Deployments.
|
||||
Items []Deployment `json:"items" protobuf:"bytes,2,rep,name=items"`
|
||||
}
|
||||
|
||||
// +genclient=true
|
||||
|
||||
// ControllerRevision implements an immutable snapshot of state data. Clients
|
||||
// are responsible for serializing and deserializing the objects that contain
|
||||
// their internal state.
|
||||
// Once a ControllerRevision has been successfully created, it can not be updated.
|
||||
// The API Server will fail validation of all requests that attempt to mutate
|
||||
// the Data field. ControllerRevisions may, however, be deleted. Note that, due to its use by both
|
||||
// the DaemonSet and StatefulSet controllers for update and rollback, this object is beta. However,
|
||||
// it may be subject to name and representation changes in future releases, and clients should not
|
||||
// depend on its stability. It is primarily for internal use by controllers.
|
||||
type ControllerRevision struct {
|
||||
metav1.TypeMeta `json:",inline"`
|
||||
// Standard object's metadata.
|
||||
// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata
|
||||
// +optional
|
||||
metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
|
||||
|
||||
// Data is the serialized representation of the state.
|
||||
Data runtime.RawExtension `json:"data,omitempty" protobuf:"bytes,2,opt,name=data"`
|
||||
|
||||
// Revision indicates the revision of the state represented by Data.
|
||||
Revision int64 `json:"revision" protobuf:"varint,3,opt,name=revision"`
|
||||
}
|
||||
|
||||
// ControllerRevisionList is a resource containing a list of ControllerRevision objects.
|
||||
type ControllerRevisionList struct {
|
||||
metav1.TypeMeta `json:",inline"`
|
||||
|
||||
// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata
|
||||
// +optional
|
||||
metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
|
||||
|
||||
// Items is the list of ControllerRevisions
|
||||
Items []ControllerRevision `json:"items" protobuf:"bytes,2,rep,name=items"`
|
||||
}
|
|
@ -27,6 +27,27 @@ package v1beta1
|
|||
// Those methods can be generated by using hack/update-generated-swagger-docs.sh
|
||||
|
||||
// AUTO-GENERATED FUNCTIONS START HERE
|
||||
var map_ControllerRevision = map[string]string{
|
||||
"": "ControllerRevision implements an immutable snapshot of state data. Clients are responsible for serializing and deserializing the objects that contain their internal state. Once a ControllerRevision has been successfully created, it can not be updated. The API Server will fail validation of all requests that attempt to mutate the Data field. ControllerRevisions may, however, be deleted. Note that, due to its use by both the DaemonSet and StatefulSet controllers for update and rollback, this object is beta. However, it may be subject to name and representation changes in future releases, and clients should not depend on its stability. It is primarily for internal use by controllers.",
|
||||
"metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata",
|
||||
"data": "Data is the serialized representation of the state.",
|
||||
"revision": "Revision indicates the revision of the state represented by Data.",
|
||||
}
|
||||
|
||||
func (ControllerRevision) SwaggerDoc() map[string]string {
|
||||
return map_ControllerRevision
|
||||
}
|
||||
|
||||
var map_ControllerRevisionList = map[string]string{
|
||||
"": "ControllerRevisionList is a resource containing a list of ControllerRevision objects.",
|
||||
"metadata": "More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata",
|
||||
"items": "Items is the list of ControllerRevisions",
|
||||
}
|
||||
|
||||
func (ControllerRevisionList) SwaggerDoc() map[string]string {
|
||||
return map_ControllerRevisionList
|
||||
}
|
||||
|
||||
var map_Deployment = map[string]string{
|
||||
"": "Deployment enables declarative updates for Pods and ReplicaSets.",
|
||||
"metadata": "Standard object metadata.",
|
||||
|
@ -99,6 +120,7 @@ var map_DeploymentStatus = map[string]string{
|
|||
"availableReplicas": "Total number of available pods (ready for at least minReadySeconds) targeted by this deployment.",
|
||||
"unavailableReplicas": "Total number of unavailable pods targeted by this deployment.",
|
||||
"conditions": "Represents the latest available observations of a deployment's current state.",
|
||||
"collisionCount": "Count of hash collisions for the Deployment. The Deployment controller uses this field as a collision avoidance mechanism when it needs to create the name for the newest ReplicaSet.",
|
||||
}
|
||||
|
||||
func (DeploymentStatus) SwaggerDoc() map[string]string {
|
||||
|
@ -116,7 +138,7 @@ func (DeploymentStrategy) SwaggerDoc() map[string]string {
|
|||
}
|
||||
|
||||
var map_RollbackConfig = map[string]string{
|
||||
"revision": "The revision to rollback to. If set to 0, rollbck to the last revision.",
|
||||
"revision": "The revision to rollback to. If set to 0, rollback to the last revision.",
|
||||
}
|
||||
|
||||
func (RollbackConfig) SwaggerDoc() map[string]string {
|
||||
|
@ -133,11 +155,20 @@ func (RollingUpdateDeployment) SwaggerDoc() map[string]string {
|
|||
return map_RollingUpdateDeployment
|
||||
}
|
||||
|
||||
var map_RollingUpdateStatefulSetStrategy = map[string]string{
|
||||
"": "RollingUpdateStatefulSetStrategy is used to communicate parameter for RollingUpdateStatefulSetStrategyType.",
|
||||
"partition": "Partition indicates the ordinal at which the StatefulSet should be partitioned.",
|
||||
}
|
||||
|
||||
func (RollingUpdateStatefulSetStrategy) SwaggerDoc() map[string]string {
|
||||
return map_RollingUpdateStatefulSetStrategy
|
||||
}
|
||||
|
||||
var map_Scale = map[string]string{
|
||||
"": "Scale represents a scaling request for a resource.",
|
||||
"metadata": "Standard object metadata; More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata.",
|
||||
"spec": "defines the behavior of the scale. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status.",
|
||||
"status": "current status of the scale. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status. Read-only.",
|
||||
"metadata": "Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata.",
|
||||
"spec": "defines the behavior of the scale. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status.",
|
||||
"status": "current status of the scale. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status. Read-only.",
|
||||
}
|
||||
|
||||
func (Scale) SwaggerDoc() map[string]string {
|
||||
|
@ -184,11 +215,14 @@ func (StatefulSetList) SwaggerDoc() map[string]string {
|
|||
|
||||
var map_StatefulSetSpec = map[string]string{
|
||||
"": "A StatefulSetSpec is the specification of a StatefulSet.",
|
||||
"replicas": "Replicas is the desired number of replicas of the given Template. These are replicas in the sense that they are instantiations of the same Template, but individual replicas also have a consistent identity. If unspecified, defaults to 1.",
|
||||
"selector": "Selector is a label query over pods that should match the replica count. If empty, defaulted to labels on the pod template. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors",
|
||||
"template": "Template is the object that describes the pod that will be created if insufficient replicas are detected. Each pod stamped out by the StatefulSet will fulfill this Template, but have a unique identity from the rest of the StatefulSet.",
|
||||
"volumeClaimTemplates": "VolumeClaimTemplates is a list of claims that pods are allowed to reference. The StatefulSet controller is responsible for mapping network identities to claims in a way that maintains the identity of a pod. Every claim in this list must have at least one matching (by name) volumeMount in one container in the template. A claim in this list takes precedence over any volumes in the template, with the same name.",
|
||||
"serviceName": "ServiceName is the name of the service that governs this StatefulSet. This service must exist before the StatefulSet, and is responsible for the network identity of the set. Pods get DNS/hostnames that follow the pattern: pod-specific-string.serviceName.default.svc.cluster.local where \"pod-specific-string\" is managed by the StatefulSet controller.",
|
||||
"replicas": "replicas is the desired number of replicas of the given Template. These are replicas in the sense that they are instantiations of the same Template, but individual replicas also have a consistent identity. If unspecified, defaults to 1.",
|
||||
"selector": "selector is a label query over pods that should match the replica count. If empty, defaulted to labels on the pod template. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors",
|
||||
"template": "template is the object that describes the pod that will be created if insufficient replicas are detected. Each pod stamped out by the StatefulSet will fulfill this Template, but have a unique identity from the rest of the StatefulSet.",
|
||||
"volumeClaimTemplates": "volumeClaimTemplates is a list of claims that pods are allowed to reference. The StatefulSet controller is responsible for mapping network identities to claims in a way that maintains the identity of a pod. Every claim in this list must have at least one matching (by name) volumeMount in one container in the template. A claim in this list takes precedence over any volumes in the template, with the same name.",
|
||||
"serviceName": "serviceName is the name of the service that governs this StatefulSet. This service must exist before the StatefulSet, and is responsible for the network identity of the set. Pods get DNS/hostnames that follow the pattern: pod-specific-string.serviceName.default.svc.cluster.local where \"pod-specific-string\" is managed by the StatefulSet controller.",
|
||||
"podManagementPolicy": "podManagementPolicy controls how pods are created during initial scale up, when replacing pods on nodes, or when scaling down. The default policy is `OrderedReady`, where pods are created in increasing order (pod-0, then pod-1, etc) and the controller will wait until each pod is ready before continuing. When scaling down, the pods are removed in the opposite order. The alternative policy is `Parallel` which will create pods in parallel to match the desired scale without waiting, and on scale down will delete all pods at once.",
|
||||
"updateStrategy": "updateStrategy indicates the StatefulSetUpdateStrategy that will be employed to update Pods in the StatefulSet when a revision is made to Template.",
|
||||
"revisionHistoryLimit": "revisionHistoryLimit is the maximum number of revisions that will be maintained in the StatefulSet's revision history. The revision history consists of all revisions not represented by a currently applied StatefulSetSpec version. The default value is 10.",
|
||||
}
|
||||
|
||||
func (StatefulSetSpec) SwaggerDoc() map[string]string {
|
||||
|
@ -197,12 +231,27 @@ func (StatefulSetSpec) SwaggerDoc() map[string]string {
|
|||
|
||||
var map_StatefulSetStatus = map[string]string{
|
||||
"": "StatefulSetStatus represents the current state of a StatefulSet.",
|
||||
"observedGeneration": "most recent generation observed by this StatefulSet.",
|
||||
"replicas": "Replicas is the number of actual replicas.",
|
||||
"observedGeneration": "observedGeneration is the most recent generation observed for this StatefulSet. It corresponds to the StatefulSet's generation, which is updated on mutation by the API Server.",
|
||||
"replicas": "replicas is the number of Pods created by the StatefulSet controller.",
|
||||
"readyReplicas": "readyReplicas is the number of Pods created by the StatefulSet controller that have a Ready Condition.",
|
||||
"currentReplicas": "currentReplicas is the number of Pods created by the StatefulSet controller from the StatefulSet version indicated by currentRevision.",
|
||||
"updatedReplicas": "updatedReplicas is the number of Pods created by the StatefulSet controller from the StatefulSet version indicated by updateRevision.",
|
||||
"currentRevision": "currentRevision, if not empty, indicates the version of the StatefulSet used to generate Pods in the sequence [0,currentReplicas).",
|
||||
"updateRevision": "updateRevision, if not empty, indicates the version of the StatefulSet used to generate Pods in the sequence [replicas-updatedReplicas,replicas)",
|
||||
}
|
||||
|
||||
func (StatefulSetStatus) SwaggerDoc() map[string]string {
|
||||
return map_StatefulSetStatus
|
||||
}
|
||||
|
||||
var map_StatefulSetUpdateStrategy = map[string]string{
|
||||
"": "StatefulSetUpdateStrategy indicates the strategy that the StatefulSet controller will use to perform updates. It includes any additional parameters necessary to perform the update for the indicated strategy.",
|
||||
"type": "Type indicates the type of the StatefulSetUpdateStrategy.",
|
||||
"rollingUpdate": "RollingUpdate is used to communicate parameters when Type is RollingUpdateStatefulSetStrategyType.",
|
||||
}
|
||||
|
||||
func (StatefulSetUpdateStrategy) SwaggerDoc() map[string]string {
|
||||
return map_StatefulSetUpdateStrategy
|
||||
}
|
||||
|
||||
// AUTO-GENERATED FUNCTIONS END HERE
|
|
@ -21,11 +21,11 @@ limitations under the License.
|
|||
package v1beta1
|
||||
|
||||
import (
|
||||
core_v1 "k8s.io/api/core/v1"
|
||||
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
conversion "k8s.io/apimachinery/pkg/conversion"
|
||||
runtime "k8s.io/apimachinery/pkg/runtime"
|
||||
intstr "k8s.io/apimachinery/pkg/util/intstr"
|
||||
api_v1 "k8s.io/client-go/pkg/api/v1"
|
||||
reflect "reflect"
|
||||
)
|
||||
|
||||
|
@ -37,6 +37,8 @@ func init() {
|
|||
// to allow building arbitrary schemes.
|
||||
func RegisterDeepCopies(scheme *runtime.Scheme) error {
|
||||
return scheme.AddGeneratedDeepCopyFuncs(
|
||||
conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1beta1_ControllerRevision, InType: reflect.TypeOf(&ControllerRevision{})},
|
||||
conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1beta1_ControllerRevisionList, InType: reflect.TypeOf(&ControllerRevisionList{})},
|
||||
conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1beta1_Deployment, InType: reflect.TypeOf(&Deployment{})},
|
||||
conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1beta1_DeploymentCondition, InType: reflect.TypeOf(&DeploymentCondition{})},
|
||||
conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1beta1_DeploymentList, InType: reflect.TypeOf(&DeploymentList{})},
|
||||
|
@ -46,6 +48,7 @@ func RegisterDeepCopies(scheme *runtime.Scheme) error {
|
|||
conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1beta1_DeploymentStrategy, InType: reflect.TypeOf(&DeploymentStrategy{})},
|
||||
conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1beta1_RollbackConfig, InType: reflect.TypeOf(&RollbackConfig{})},
|
||||
conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1beta1_RollingUpdateDeployment, InType: reflect.TypeOf(&RollingUpdateDeployment{})},
|
||||
conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1beta1_RollingUpdateStatefulSetStrategy, InType: reflect.TypeOf(&RollingUpdateStatefulSetStrategy{})},
|
||||
conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1beta1_Scale, InType: reflect.TypeOf(&Scale{})},
|
||||
conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1beta1_ScaleSpec, InType: reflect.TypeOf(&ScaleSpec{})},
|
||||
conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1beta1_ScaleStatus, InType: reflect.TypeOf(&ScaleStatus{})},
|
||||
|
@ -53,9 +56,49 @@ func RegisterDeepCopies(scheme *runtime.Scheme) error {
|
|||
conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1beta1_StatefulSetList, InType: reflect.TypeOf(&StatefulSetList{})},
|
||||
conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1beta1_StatefulSetSpec, InType: reflect.TypeOf(&StatefulSetSpec{})},
|
||||
conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1beta1_StatefulSetStatus, InType: reflect.TypeOf(&StatefulSetStatus{})},
|
||||
conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1beta1_StatefulSetUpdateStrategy, InType: reflect.TypeOf(&StatefulSetUpdateStrategy{})},
|
||||
)
|
||||
}
|
||||
|
||||
// DeepCopy_v1beta1_ControllerRevision is an autogenerated deepcopy function.
|
||||
func DeepCopy_v1beta1_ControllerRevision(in interface{}, out interface{}, c *conversion.Cloner) error {
|
||||
{
|
||||
in := in.(*ControllerRevision)
|
||||
out := out.(*ControllerRevision)
|
||||
*out = *in
|
||||
if newVal, err := c.DeepCopy(&in.ObjectMeta); err != nil {
|
||||
return err
|
||||
} else {
|
||||
out.ObjectMeta = *newVal.(*v1.ObjectMeta)
|
||||
}
|
||||
if newVal, err := c.DeepCopy(&in.Data); err != nil {
|
||||
return err
|
||||
} else {
|
||||
out.Data = *newVal.(*runtime.RawExtension)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// DeepCopy_v1beta1_ControllerRevisionList is an autogenerated deepcopy function.
|
||||
func DeepCopy_v1beta1_ControllerRevisionList(in interface{}, out interface{}, c *conversion.Cloner) error {
|
||||
{
|
||||
in := in.(*ControllerRevisionList)
|
||||
out := out.(*ControllerRevisionList)
|
||||
*out = *in
|
||||
if in.Items != nil {
|
||||
in, out := &in.Items, &out.Items
|
||||
*out = make([]ControllerRevision, len(*in))
|
||||
for i := range *in {
|
||||
if err := DeepCopy_v1beta1_ControllerRevision(&(*in)[i], &(*out)[i], c); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// DeepCopy_v1beta1_Deployment is an autogenerated deepcopy function.
|
||||
func DeepCopy_v1beta1_Deployment(in interface{}, out interface{}, c *conversion.Cloner) error {
|
||||
{
|
||||
|
@ -144,7 +187,7 @@ func DeepCopy_v1beta1_DeploymentSpec(in interface{}, out interface{}, c *convers
|
|||
*out = newVal.(*v1.LabelSelector)
|
||||
}
|
||||
}
|
||||
if err := api_v1.DeepCopy_v1_PodTemplateSpec(&in.Template, &out.Template, c); err != nil {
|
||||
if err := core_v1.DeepCopy_v1_PodTemplateSpec(&in.Template, &out.Template, c); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := DeepCopy_v1beta1_DeploymentStrategy(&in.Strategy, &out.Strategy, c); err != nil {
|
||||
|
@ -184,6 +227,11 @@ func DeepCopy_v1beta1_DeploymentStatus(in interface{}, out interface{}, c *conve
|
|||
}
|
||||
}
|
||||
}
|
||||
if in.CollisionCount != nil {
|
||||
in, out := &in.CollisionCount, &out.CollisionCount
|
||||
*out = new(int64)
|
||||
**out = **in
|
||||
}
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
@ -235,6 +283,21 @@ func DeepCopy_v1beta1_RollingUpdateDeployment(in interface{}, out interface{}, c
|
|||
}
|
||||
}
|
||||
|
||||
// DeepCopy_v1beta1_RollingUpdateStatefulSetStrategy is an autogenerated deepcopy function.
|
||||
func DeepCopy_v1beta1_RollingUpdateStatefulSetStrategy(in interface{}, out interface{}, c *conversion.Cloner) error {
|
||||
{
|
||||
in := in.(*RollingUpdateStatefulSetStrategy)
|
||||
out := out.(*RollingUpdateStatefulSetStrategy)
|
||||
*out = *in
|
||||
if in.Partition != nil {
|
||||
in, out := &in.Partition, &out.Partition
|
||||
*out = new(int32)
|
||||
**out = **in
|
||||
}
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// DeepCopy_v1beta1_Scale is an autogenerated deepcopy function.
|
||||
func DeepCopy_v1beta1_Scale(in interface{}, out interface{}, c *conversion.Cloner) error {
|
||||
{
|
||||
|
@ -339,18 +402,26 @@ func DeepCopy_v1beta1_StatefulSetSpec(in interface{}, out interface{}, c *conver
|
|||
*out = newVal.(*v1.LabelSelector)
|
||||
}
|
||||
}
|
||||
if err := api_v1.DeepCopy_v1_PodTemplateSpec(&in.Template, &out.Template, c); err != nil {
|
||||
if err := core_v1.DeepCopy_v1_PodTemplateSpec(&in.Template, &out.Template, c); err != nil {
|
||||
return err
|
||||
}
|
||||
if in.VolumeClaimTemplates != nil {
|
||||
in, out := &in.VolumeClaimTemplates, &out.VolumeClaimTemplates
|
||||
*out = make([]api_v1.PersistentVolumeClaim, len(*in))
|
||||
*out = make([]core_v1.PersistentVolumeClaim, len(*in))
|
||||
for i := range *in {
|
||||
if err := api_v1.DeepCopy_v1_PersistentVolumeClaim(&(*in)[i], &(*out)[i], c); err != nil {
|
||||
if err := core_v1.DeepCopy_v1_PersistentVolumeClaim(&(*in)[i], &(*out)[i], c); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
if err := DeepCopy_v1beta1_StatefulSetUpdateStrategy(&in.UpdateStrategy, &out.UpdateStrategy, c); err != nil {
|
||||
return err
|
||||
}
|
||||
if in.RevisionHistoryLimit != nil {
|
||||
in, out := &in.RevisionHistoryLimit, &out.RevisionHistoryLimit
|
||||
*out = new(int32)
|
||||
**out = **in
|
||||
}
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
@ -369,3 +440,20 @@ func DeepCopy_v1beta1_StatefulSetStatus(in interface{}, out interface{}, c *conv
|
|||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// DeepCopy_v1beta1_StatefulSetUpdateStrategy is an autogenerated deepcopy function.
|
||||
func DeepCopy_v1beta1_StatefulSetUpdateStrategy(in interface{}, out interface{}, c *conversion.Cloner) error {
|
||||
{
|
||||
in := in.(*StatefulSetUpdateStrategy)
|
||||
out := out.(*StatefulSetUpdateStrategy)
|
||||
*out = *in
|
||||
if in.RollingUpdate != nil {
|
||||
in, out := &in.RollingUpdate, &out.RollingUpdate
|
||||
*out = new(RollingUpdateStatefulSetStrategy)
|
||||
if err := DeepCopy_v1beta1_RollingUpdateStatefulSetStrategy(*in, *out, c); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
}
|
|
@ -15,9 +15,7 @@ go_library(
|
|||
"register.go",
|
||||
"types.go",
|
||||
"types_swagger_doc_generated.go",
|
||||
"zz_generated.conversion.go",
|
||||
"zz_generated.deepcopy.go",
|
||||
"zz_generated.defaults.go",
|
||||
],
|
||||
tags = ["automanaged"],
|
||||
deps = [
|
||||
|
@ -27,6 +25,5 @@ go_library(
|
|||
"//vendor/k8s.io/apimachinery/pkg/conversion:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library",
|
||||
"//vendor/k8s.io/client-go/pkg/apis/storage:go_default_library",
|
||||
],
|
||||
)
|
|
@ -14,5 +14,7 @@ See the License for the specific language governing permissions and
|
|||
limitations under the License.
|
||||
*/
|
||||
|
||||
// +k8s:deepcopy-gen=package,register
|
||||
// +groupName=authentication.k8s.io
|
||||
// +k8s:openapi-gen=true
|
||||
package v1
|
|
@ -15,14 +15,14 @@ limitations under the License.
|
|||
*/
|
||||
|
||||
// Code generated by protoc-gen-gogo.
|
||||
// source: k8s.io/kubernetes/pkg/apis/authentication/v1/generated.proto
|
||||
// source: k8s.io/kubernetes/vendor/k8s.io/api/authentication/v1/generated.proto
|
||||
// DO NOT EDIT!
|
||||
|
||||
/*
|
||||
Package v1 is a generated protocol buffer package.
|
||||
|
||||
It is generated from these files:
|
||||
k8s.io/kubernetes/pkg/apis/authentication/v1/generated.proto
|
||||
k8s.io/kubernetes/vendor/k8s.io/api/authentication/v1/generated.proto
|
||||
|
||||
It has these top-level messages:
|
||||
ExtraValue
|
||||
|
@ -37,9 +37,10 @@ import proto "github.com/gogo/protobuf/proto"
|
|||
import fmt "fmt"
|
||||
import math "math"
|
||||
|
||||
import github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys"
|
||||
|
||||
import strings "strings"
|
||||
import reflect "reflect"
|
||||
import github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys"
|
||||
|
||||
import io "io"
|
||||
|
||||
|
@ -75,11 +76,11 @@ func (*UserInfo) ProtoMessage() {}
|
|||
func (*UserInfo) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{4} }
|
||||
|
||||
func init() {
|
||||
proto.RegisterType((*ExtraValue)(nil), "k8s.io.client-go.pkg.apis.authentication.v1.ExtraValue")
|
||||
proto.RegisterType((*TokenReview)(nil), "k8s.io.client-go.pkg.apis.authentication.v1.TokenReview")
|
||||
proto.RegisterType((*TokenReviewSpec)(nil), "k8s.io.client-go.pkg.apis.authentication.v1.TokenReviewSpec")
|
||||
proto.RegisterType((*TokenReviewStatus)(nil), "k8s.io.client-go.pkg.apis.authentication.v1.TokenReviewStatus")
|
||||
proto.RegisterType((*UserInfo)(nil), "k8s.io.client-go.pkg.apis.authentication.v1.UserInfo")
|
||||
proto.RegisterType((*ExtraValue)(nil), "k8s.io.api.authentication.v1.ExtraValue")
|
||||
proto.RegisterType((*TokenReview)(nil), "k8s.io.api.authentication.v1.TokenReview")
|
||||
proto.RegisterType((*TokenReviewSpec)(nil), "k8s.io.api.authentication.v1.TokenReviewSpec")
|
||||
proto.RegisterType((*TokenReviewStatus)(nil), "k8s.io.api.authentication.v1.TokenReviewStatus")
|
||||
proto.RegisterType((*UserInfo)(nil), "k8s.io.api.authentication.v1.UserInfo")
|
||||
}
|
||||
func (m ExtraValue) Marshal() (dAtA []byte, err error) {
|
||||
size := m.Size()
|
||||
|
@ -255,10 +256,15 @@ func (m *UserInfo) MarshalTo(dAtA []byte) (int, error) {
|
|||
}
|
||||
}
|
||||
if len(m.Extra) > 0 {
|
||||
keysForExtra := make([]string, 0, len(m.Extra))
|
||||
for k := range m.Extra {
|
||||
keysForExtra = append(keysForExtra, string(k))
|
||||
}
|
||||
github_com_gogo_protobuf_sortkeys.Strings(keysForExtra)
|
||||
for _, k := range keysForExtra {
|
||||
dAtA[i] = 0x22
|
||||
i++
|
||||
v := m.Extra[k]
|
||||
v := m.Extra[string(k)]
|
||||
msgSize := 0
|
||||
if (&v) != nil {
|
||||
msgSize = (&v).Size()
|
||||
|
@ -1247,50 +1253,50 @@ var (
|
|||
)
|
||||
|
||||
func init() {
|
||||
proto.RegisterFile("k8s.io/client-go/pkg/apis/authentication/v1/generated.proto", fileDescriptorGenerated)
|
||||
proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/authentication/v1/generated.proto", fileDescriptorGenerated)
|
||||
}
|
||||
|
||||
var fileDescriptorGenerated = []byte{
|
||||
// 655 bytes of a gzipped FileDescriptorProto
|
||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x53, 0xcd, 0x6e, 0xd3, 0x4c,
|
||||
0x14, 0xb5, 0xf3, 0x53, 0x25, 0x93, 0xaf, 0x1f, 0x65, 0x24, 0xa4, 0x28, 0x12, 0x4e, 0x14, 0x58,
|
||||
0x74, 0x51, 0xc6, 0xa4, 0xa0, 0x52, 0x15, 0x10, 0xaa, 0x45, 0x85, 0xba, 0x00, 0xa4, 0x81, 0x22,
|
||||
0xc4, 0x06, 0x26, 0xce, 0xad, 0x63, 0x52, 0xff, 0x68, 0x3c, 0x36, 0xed, 0xae, 0x8f, 0xc0, 0x92,
|
||||
0x25, 0xaf, 0xc1, 0x1b, 0x74, 0x47, 0x77, 0xb0, 0x40, 0x15, 0x0d, 0x2f, 0x82, 0x66, 0x3c, 0xd4,
|
||||
0x2e, 0x69, 0x85, 0xda, 0xdd, 0xcc, 0x99, 0x7b, 0xce, 0xbd, 0xe7, 0xde, 0xb9, 0xe8, 0xc1, 0x64,
|
||||
0x35, 0x21, 0x7e, 0x64, 0x4f, 0xd2, 0x21, 0xf0, 0x10, 0x04, 0x24, 0x76, 0x3c, 0xf1, 0x6c, 0x16,
|
||||
0xfb, 0x89, 0xcd, 0x52, 0x31, 0x86, 0x50, 0xf8, 0x2e, 0x13, 0x7e, 0x14, 0xda, 0xd9, 0xc0, 0xf6,
|
||||
0x20, 0x04, 0xce, 0x04, 0x8c, 0x48, 0xcc, 0x23, 0x11, 0xe1, 0xa5, 0x9c, 0x4d, 0x0a, 0x36, 0x89,
|
||||
0x27, 0x1e, 0x91, 0x6c, 0x72, 0x9a, 0x4d, 0xb2, 0x41, 0xe7, 0x96, 0xe7, 0x8b, 0x71, 0x3a, 0x24,
|
||||
0x6e, 0x14, 0xd8, 0x5e, 0xe4, 0x45, 0xb6, 0x12, 0x19, 0xa6, 0xdb, 0xea, 0xa6, 0x2e, 0xea, 0x94,
|
||||
0x8b, 0x77, 0xee, 0xea, 0xd2, 0x58, 0xec, 0x07, 0xcc, 0x1d, 0xfb, 0x21, 0xf0, 0xbd, 0xa2, 0xb8,
|
||||
0x00, 0x04, 0x3b, 0xa3, 0xa4, 0x8e, 0x7d, 0x1e, 0x8b, 0xa7, 0xa1, 0xf0, 0x03, 0x98, 0x21, 0xac,
|
||||
0xfc, 0x8b, 0x90, 0xb8, 0x63, 0x08, 0xd8, 0x0c, 0xef, 0xce, 0x79, 0xbc, 0x54, 0xf8, 0x3b, 0xb6,
|
||||
0x1f, 0x8a, 0x44, 0xf0, 0x19, 0x52, 0xc9, 0x53, 0x02, 0x3c, 0x03, 0x5e, 0x18, 0x82, 0x5d, 0x16,
|
||||
0xc4, 0x3b, 0x70, 0x86, 0xa7, 0xfe, 0x3d, 0x84, 0x36, 0x76, 0x05, 0x67, 0xaf, 0xd8, 0x4e, 0x0a,
|
||||
0xb8, 0x8b, 0xea, 0xbe, 0x80, 0x20, 0x69, 0x9b, 0xbd, 0xea, 0x62, 0xd3, 0x69, 0x4e, 0x8f, 0xba,
|
||||
0xf5, 0x4d, 0x09, 0xd0, 0x1c, 0x5f, 0x6b, 0x7c, 0xfa, 0xdc, 0x35, 0xf6, 0x7f, 0xf4, 0x8c, 0xfe,
|
||||
0x97, 0x0a, 0x6a, 0xbd, 0x8c, 0x26, 0x10, 0x52, 0xc8, 0x7c, 0xf8, 0x80, 0xdf, 0xa1, 0x86, 0xec,
|
||||
0xdb, 0x88, 0x09, 0xd6, 0x36, 0x7b, 0xe6, 0x62, 0x6b, 0xf9, 0x36, 0xd1, 0x23, 0x2c, 0xdb, 0x28,
|
||||
0x86, 0x28, 0xa3, 0x49, 0x36, 0x20, 0xcf, 0x87, 0xef, 0xc1, 0x15, 0x4f, 0x41, 0x30, 0x07, 0x1f,
|
||||
0x1c, 0x75, 0x8d, 0xe9, 0x51, 0x17, 0x15, 0x18, 0x3d, 0x51, 0xc5, 0x6f, 0x51, 0x2d, 0x89, 0xc1,
|
||||
0x6d, 0x57, 0x94, 0xfa, 0x43, 0x72, 0x91, 0x0f, 0x42, 0x4a, 0xa5, 0xbe, 0x88, 0xc1, 0x75, 0xfe,
|
||||
0xd3, 0xa9, 0x6a, 0xf2, 0x46, 0x95, 0x30, 0xf6, 0xd0, 0x5c, 0x22, 0x98, 0x48, 0x93, 0x76, 0x55,
|
||||
0xa5, 0x78, 0x74, 0xf9, 0x14, 0x4a, 0xc6, 0xf9, 0x5f, 0x27, 0x99, 0xcb, 0xef, 0x54, 0xcb, 0xf7,
|
||||
0x57, 0xd0, 0x95, 0xbf, 0xea, 0xc1, 0x37, 0x50, 0x5d, 0x48, 0x48, 0xf5, 0xae, 0xe9, 0xcc, 0x6b,
|
||||
0x66, 0x3d, 0x8f, 0xcb, 0xdf, 0xfa, 0x5f, 0x4d, 0x74, 0x75, 0x26, 0x0b, 0xbe, 0x8f, 0xe6, 0x4b,
|
||||
0xc5, 0xc0, 0x48, 0x49, 0x34, 0x9c, 0x6b, 0x5a, 0x62, 0x7e, 0xbd, 0xfc, 0x48, 0x4f, 0xc7, 0xe2,
|
||||
0xd7, 0xa8, 0x96, 0x26, 0xc0, 0x75, 0x53, 0x57, 0x2e, 0xe6, 0x78, 0x2b, 0x01, 0xbe, 0x19, 0x6e,
|
||||
0x47, 0x45, 0x37, 0x25, 0x42, 0x95, 0xa2, 0x74, 0x04, 0x9c, 0x47, 0x5c, 0x35, 0xb3, 0xe4, 0x68,
|
||||
0x43, 0x82, 0x34, 0x7f, 0xeb, 0x7f, 0xab, 0xa0, 0xc6, 0x1f, 0x15, 0xbc, 0x84, 0x1a, 0x92, 0x19,
|
||||
0xb2, 0x00, 0x74, 0x1b, 0x16, 0x34, 0x49, 0xc5, 0x48, 0x9c, 0x9e, 0x44, 0xe0, 0xeb, 0xa8, 0x9a,
|
||||
0xfa, 0x23, 0x55, 0x78, 0xd3, 0x69, 0xe9, 0xc0, 0xea, 0xd6, 0xe6, 0x63, 0x2a, 0x71, 0xdc, 0x47,
|
||||
0x73, 0x1e, 0x8f, 0xd2, 0x58, 0x0e, 0x53, 0xfe, 0x65, 0x24, 0xe7, 0xf0, 0x44, 0x21, 0x54, 0xbf,
|
||||
0xe0, 0x6d, 0x54, 0x07, 0xf9, 0xf9, 0xdb, 0xb5, 0x5e, 0x75, 0xb1, 0xb5, 0xbc, 0x7e, 0x39, 0xf7,
|
||||
0x44, 0x2d, 0xd0, 0x46, 0x28, 0xf8, 0x5e, 0xc9, 0xa5, 0xc4, 0x68, 0x2e, 0xdf, 0xe1, 0x7a, 0xc9,
|
||||
0x54, 0x0c, 0x5e, 0x40, 0xd5, 0x09, 0xec, 0xe5, 0x0e, 0xa9, 0x3c, 0xe2, 0x67, 0xa8, 0x9e, 0xc9,
|
||||
0xfd, 0xd3, 0x53, 0x58, 0xbd, 0x58, 0x1d, 0xc5, 0xfe, 0xd2, 0x5c, 0x66, 0xad, 0xb2, 0x6a, 0x3a,
|
||||
0x37, 0x0f, 0x8e, 0x2d, 0xe3, 0xf0, 0xd8, 0x32, 0xbe, 0x1f, 0x5b, 0xc6, 0xfe, 0xd4, 0x32, 0x0f,
|
||||
0xa6, 0x96, 0x79, 0x38, 0xb5, 0xcc, 0x9f, 0x53, 0xcb, 0xfc, 0xf8, 0xcb, 0x32, 0xde, 0x54, 0xb2,
|
||||
0xc1, 0xef, 0x00, 0x00, 0x00, 0xff, 0xff, 0xbb, 0x6b, 0x11, 0x20, 0xa4, 0x05, 0x00, 0x00,
|
||||
// 642 bytes of a gzipped FileDescriptorProto
|
||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x53, 0x4d, 0x6f, 0xd3, 0x40,
|
||||
0x10, 0xb5, 0xf3, 0x51, 0x25, 0x1b, 0x0a, 0x65, 0x25, 0xa4, 0x28, 0x02, 0x27, 0x0a, 0x12, 0xca,
|
||||
0x81, 0xae, 0x49, 0x41, 0xa5, 0x2a, 0x12, 0x12, 0x16, 0x11, 0xf4, 0x80, 0x2a, 0x2d, 0xb4, 0x48,
|
||||
0x9c, 0xd8, 0x38, 0x53, 0xc7, 0xa4, 0xfe, 0xd0, 0x7a, 0x6d, 0xe8, 0xad, 0x3f, 0x81, 0x23, 0xdc,
|
||||
0xf8, 0x17, 0x1c, 0xb9, 0xf6, 0xd8, 0x63, 0x0f, 0xa8, 0xa2, 0xe6, 0x8f, 0xa0, 0x5d, 0x2f, 0x4d,
|
||||
0xda, 0xd2, 0xd0, 0x9b, 0xf7, 0xcd, 0x7b, 0x6f, 0x67, 0x9e, 0x77, 0xd0, 0x60, 0xb2, 0x96, 0x10,
|
||||
0x3f, 0xb2, 0x27, 0xe9, 0x10, 0x78, 0x08, 0x02, 0x12, 0x3b, 0x83, 0x70, 0x14, 0x71, 0x5b, 0x17,
|
||||
0x58, 0xec, 0xdb, 0x2c, 0x15, 0x63, 0x08, 0x85, 0xef, 0x32, 0xe1, 0x47, 0xa1, 0x9d, 0xf5, 0x6d,
|
||||
0x0f, 0x42, 0xe0, 0x4c, 0xc0, 0x88, 0xc4, 0x3c, 0x12, 0x11, 0xbe, 0x5d, 0xb0, 0x09, 0x8b, 0x7d,
|
||||
0x72, 0x96, 0x4d, 0xb2, 0x7e, 0x6b, 0xd9, 0xf3, 0xc5, 0x38, 0x1d, 0x12, 0x37, 0x0a, 0x6c, 0x2f,
|
||||
0xf2, 0x22, 0x5b, 0x89, 0x86, 0xe9, 0x8e, 0x3a, 0xa9, 0x83, 0xfa, 0x2a, 0xcc, 0x5a, 0x8f, 0xa6,
|
||||
0x57, 0x07, 0xcc, 0x1d, 0xfb, 0x21, 0xf0, 0x3d, 0x3b, 0x9e, 0x78, 0x12, 0x48, 0xec, 0x00, 0x04,
|
||||
0xfb, 0x47, 0x0b, 0x2d, 0xfb, 0x32, 0x15, 0x4f, 0x43, 0xe1, 0x07, 0x70, 0x41, 0xb0, 0xfa, 0x3f,
|
||||
0x41, 0xe2, 0x8e, 0x21, 0x60, 0x17, 0x74, 0x0f, 0x2f, 0xd3, 0xa5, 0xc2, 0xdf, 0xb5, 0xfd, 0x50,
|
||||
0x24, 0x82, 0x9f, 0x17, 0x75, 0x1f, 0x23, 0x34, 0xf8, 0x24, 0x38, 0xdb, 0x66, 0xbb, 0x29, 0xe0,
|
||||
0x36, 0xaa, 0xfa, 0x02, 0x82, 0xa4, 0x69, 0x76, 0xca, 0xbd, 0xba, 0x53, 0xcf, 0x8f, 0xdb, 0xd5,
|
||||
0x0d, 0x09, 0xd0, 0x02, 0x5f, 0xaf, 0x7d, 0xf9, 0xd6, 0x36, 0xf6, 0x7f, 0x76, 0x8c, 0xee, 0xd7,
|
||||
0x12, 0x6a, 0xbc, 0x89, 0x26, 0x10, 0x52, 0xc8, 0x7c, 0xf8, 0x88, 0xdf, 0xa3, 0x9a, 0x4c, 0x60,
|
||||
0xc4, 0x04, 0x6b, 0x9a, 0x1d, 0xb3, 0xd7, 0x58, 0x79, 0x40, 0xa6, 0xe1, 0x9f, 0x36, 0x44, 0xe2,
|
||||
0x89, 0x27, 0x81, 0x84, 0x48, 0x36, 0xc9, 0xfa, 0x64, 0x73, 0xf8, 0x01, 0x5c, 0xf1, 0x0a, 0x04,
|
||||
0x73, 0xf0, 0xc1, 0x71, 0xdb, 0xc8, 0x8f, 0xdb, 0x68, 0x8a, 0xd1, 0x53, 0x57, 0xbc, 0x89, 0x2a,
|
||||
0x49, 0x0c, 0x6e, 0xb3, 0xa4, 0xdc, 0x97, 0xc9, 0xbc, 0x5f, 0x4b, 0x66, 0x5a, 0x7b, 0x1d, 0x83,
|
||||
0xeb, 0x5c, 0xd3, 0xd6, 0x15, 0x79, 0xa2, 0xca, 0x08, 0xbf, 0x45, 0x0b, 0x89, 0x60, 0x22, 0x4d,
|
||||
0x9a, 0x65, 0x65, 0x69, 0x5f, 0xdd, 0x52, 0xc9, 0x9c, 0xeb, 0xda, 0x74, 0xa1, 0x38, 0x53, 0x6d,
|
||||
0xd7, 0x5d, 0x45, 0x37, 0xce, 0xdd, 0x8f, 0xef, 0xa2, 0xaa, 0x90, 0x90, 0xca, 0xa6, 0xee, 0x2c,
|
||||
0x6a, 0x65, 0xb5, 0xe0, 0x15, 0xb5, 0xee, 0x0f, 0x13, 0xdd, 0xbc, 0x70, 0x0b, 0x7e, 0x82, 0x16,
|
||||
0x67, 0x9a, 0x81, 0x91, 0xb2, 0xa8, 0x39, 0xb7, 0xb4, 0xc5, 0xe2, 0xb3, 0xd9, 0x22, 0x3d, 0xcb,
|
||||
0xc5, 0x2f, 0x51, 0x25, 0x4d, 0x80, 0xeb, 0xd0, 0xee, 0xcd, 0x9f, 0x70, 0x2b, 0x01, 0xbe, 0x11,
|
||||
0xee, 0x44, 0xd3, 0xb4, 0x24, 0x42, 0x95, 0x83, 0x9c, 0x00, 0x38, 0x8f, 0xb8, 0x0a, 0x6b, 0x66,
|
||||
0x82, 0x81, 0x04, 0x69, 0x51, 0xeb, 0x7e, 0x2f, 0xa1, 0xda, 0x5f, 0x17, 0x7c, 0x1f, 0xd5, 0xa4,
|
||||
0x32, 0x64, 0x01, 0xe8, 0xb1, 0x97, 0xb4, 0x48, 0x71, 0x24, 0x4e, 0x4f, 0x19, 0xf8, 0x0e, 0x2a,
|
||||
0xa7, 0xfe, 0x48, 0x35, 0x5a, 0x77, 0x1a, 0x9a, 0x58, 0xde, 0xda, 0x78, 0x4e, 0x25, 0x8e, 0xbb,
|
||||
0x68, 0xc1, 0xe3, 0x51, 0x1a, 0xcb, 0x9f, 0x25, 0xdf, 0x26, 0x92, 0xb9, 0xbf, 0x50, 0x08, 0xd5,
|
||||
0x15, 0xbc, 0x8d, 0xaa, 0x20, 0x1f, 0x73, 0xb3, 0xd2, 0x29, 0xf7, 0x1a, 0x2b, 0xfd, 0xab, 0x4d,
|
||||
0x4b, 0xd4, 0x02, 0x0c, 0x42, 0xc1, 0xf7, 0x66, 0xa6, 0x92, 0x18, 0x2d, 0xec, 0x5a, 0x43, 0xbd,
|
||||
0x24, 0x8a, 0x83, 0x97, 0x50, 0x79, 0x02, 0x7b, 0xc5, 0x44, 0x54, 0x7e, 0xe2, 0xa7, 0xa8, 0x9a,
|
||||
0xc9, 0xfd, 0xd1, 0x29, 0xf7, 0xe6, 0xdf, 0x3b, 0xdd, 0x37, 0x5a, 0xc8, 0xd6, 0x4b, 0x6b, 0xa6,
|
||||
0xd3, 0x3b, 0x38, 0xb1, 0x8c, 0xc3, 0x13, 0xcb, 0x38, 0x3a, 0xb1, 0x8c, 0xfd, 0xdc, 0x32, 0x0f,
|
||||
0x72, 0xcb, 0x3c, 0xcc, 0x2d, 0xf3, 0x28, 0xb7, 0xcc, 0x5f, 0xb9, 0x65, 0x7e, 0xfe, 0x6d, 0x19,
|
||||
0xef, 0x4a, 0x59, 0xff, 0x4f, 0x00, 0x00, 0x00, 0xff, 0xff, 0xeb, 0x3a, 0x3c, 0x31, 0x1b, 0x05,
|
||||
0x00, 0x00,
|
||||
}
|
|
@ -19,13 +19,12 @@ limitations under the License.
|
|||
|
||||
syntax = 'proto2';
|
||||
|
||||
package k8s.io.kubernetes.pkg.apis.authentication.v1;
|
||||
package k8s.io.api.authentication.v1;
|
||||
|
||||
import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto";
|
||||
import "k8s.io/apimachinery/pkg/runtime/generated.proto";
|
||||
import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto";
|
||||
import "k8s.io/apimachinery/pkg/util/intstr/generated.proto";
|
||||
import "k8s.io/apiserver/pkg/apis/example/v1/generated.proto";
|
||||
|
||||
// Package-wide variables from generator "generated".
|
||||
option go_package = "v1";
|
|
@ -36,18 +36,11 @@ func Resource(resource string) schema.GroupResource {
|
|||
var (
|
||||
// TODO: move SchemeBuilder with zz_generated.deepcopy.go to k8s.io/api.
|
||||
// localSchemeBuilder and AddToScheme will stay in k8s.io/kubernetes.
|
||||
SchemeBuilder runtime.SchemeBuilder
|
||||
SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes)
|
||||
localSchemeBuilder = &SchemeBuilder
|
||||
AddToScheme = localSchemeBuilder.AddToScheme
|
||||
)
|
||||
|
||||
func init() {
|
||||
// We only register manually written functions here. The registration of the
|
||||
// generated functions takes place in the generated files. The separation
|
||||
// makes the code compile even when the generated files are missing.
|
||||
localSchemeBuilder.Register(addKnownTypes, addDefaultingFuncs, addConversionFuncs)
|
||||
}
|
||||
|
||||
// Adds the list of known types to api.Scheme.
|
||||
func addKnownTypes(scheme *runtime.Scheme) error {
|
||||
scheme.AddKnownTypes(SchemeGroupVersion,
|
|
@ -16,9 +16,7 @@ go_library(
|
|||
"types.generated.go",
|
||||
"types.go",
|
||||
"types_swagger_doc_generated.go",
|
||||
"zz_generated.conversion.go",
|
||||
"zz_generated.deepcopy.go",
|
||||
"zz_generated.defaults.go",
|
||||
],
|
||||
tags = ["automanaged"],
|
||||
deps = [
|
||||
|
@ -30,6 +28,5 @@ go_library(
|
|||
"//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
|
||||
"//vendor/k8s.io/client-go/pkg/apis/storage:go_default_library",
|
||||
],
|
||||
)
|
|
@ -14,5 +14,7 @@ See the License for the specific language governing permissions and
|
|||
limitations under the License.
|
||||
*/
|
||||
|
||||
// +k8s:deepcopy-gen=package,register
|
||||
// +groupName=authentication.k8s.io
|
||||
// +k8s:openapi-gen=true
|
||||
package v1beta1
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Reference in a new issue