Update godeps
This commit is contained in:
parent
423433bc5f
commit
701c5a0e30
482 changed files with 86915 additions and 19741 deletions
737
Godeps/Godeps.json
generated
737
Godeps/Godeps.json
generated
File diff suppressed because it is too large
Load diff
13
vendor/bitbucket.org/ww/goautoneg/Makefile
generated
vendored
13
vendor/bitbucket.org/ww/goautoneg/Makefile
generated
vendored
|
@ -1,13 +0,0 @@
|
|||
include $(GOROOT)/src/Make.inc
|
||||
|
||||
TARG=bitbucket.org/ww/goautoneg
|
||||
GOFILES=autoneg.go
|
||||
|
||||
include $(GOROOT)/src/Make.pkg
|
||||
|
||||
format:
|
||||
gofmt -w *.go
|
||||
|
||||
docs:
|
||||
gomake clean
|
||||
godoc ${TARG} > README.txt
|
20
vendor/github.com/beorn7/perks/LICENSE
generated
vendored
Normal file
20
vendor/github.com/beorn7/perks/LICENSE
generated
vendored
Normal file
|
@ -0,0 +1,20 @@
|
|||
Copyright (C) 2013 Blake Mizerany
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining
|
||||
a copy of this software and associated documentation files (the
|
||||
"Software"), to deal in the Software without restriction, including
|
||||
without limitation the rights to use, copy, modify, merge, publish,
|
||||
distribute, sublicense, and/or sell copies of the Software, and to
|
||||
permit persons to whom the Software is furnished to do so, subject to
|
||||
the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be
|
||||
included in all copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
||||
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
||||
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
|
||||
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
|
||||
LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
|
||||
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
|
||||
WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
202
vendor/github.com/coreos/go-oidc/LICENSE
generated
vendored
Normal file
202
vendor/github.com/coreos/go-oidc/LICENSE
generated
vendored
Normal file
|
@ -0,0 +1,202 @@
|
|||
Apache License
|
||||
Version 2.0, January 2004
|
||||
http://www.apache.org/licenses/
|
||||
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||
|
||||
1. Definitions.
|
||||
|
||||
"License" shall mean the terms and conditions for use, reproduction,
|
||||
and distribution as defined by Sections 1 through 9 of this document.
|
||||
|
||||
"Licensor" shall mean the copyright owner or entity authorized by
|
||||
the copyright owner that is granting the License.
|
||||
|
||||
"Legal Entity" shall mean the union of the acting entity and all
|
||||
other entities that control, are controlled by, or are under common
|
||||
control with that entity. For the purposes of this definition,
|
||||
"control" means (i) the power, direct or indirect, to cause the
|
||||
direction or management of such entity, whether by contract or
|
||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||
|
||||
"You" (or "Your") shall mean an individual or Legal Entity
|
||||
exercising permissions granted by this License.
|
||||
|
||||
"Source" form shall mean the preferred form for making modifications,
|
||||
including but not limited to software source code, documentation
|
||||
source, and configuration files.
|
||||
|
||||
"Object" form shall mean any form resulting from mechanical
|
||||
transformation or translation of a Source form, including but
|
||||
not limited to compiled object code, generated documentation,
|
||||
and conversions to other media types.
|
||||
|
||||
"Work" shall mean the work of authorship, whether in Source or
|
||||
Object form, made available under the License, as indicated by a
|
||||
copyright notice that is included in or attached to the work
|
||||
(an example is provided in the Appendix below).
|
||||
|
||||
"Derivative Works" shall mean any work, whether in Source or Object
|
||||
form, that is based on (or derived from) the Work and for which the
|
||||
editorial revisions, annotations, elaborations, or other modifications
|
||||
represent, as a whole, an original work of authorship. For the purposes
|
||||
of this License, Derivative Works shall not include works that remain
|
||||
separable from, or merely link (or bind by name) to the interfaces of,
|
||||
the Work and Derivative Works thereof.
|
||||
|
||||
"Contribution" shall mean any work of authorship, including
|
||||
the original version of the Work and any modifications or additions
|
||||
to that Work or Derivative Works thereof, that is intentionally
|
||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||
or by an individual or Legal Entity authorized to submit on behalf of
|
||||
the copyright owner. For the purposes of this definition, "submitted"
|
||||
means any form of electronic, verbal, or written communication sent
|
||||
to the Licensor or its representatives, including but not limited to
|
||||
communication on electronic mailing lists, source code control systems,
|
||||
and issue tracking systems that are managed by, or on behalf of, the
|
||||
Licensor for the purpose of discussing and improving the Work, but
|
||||
excluding communication that is conspicuously marked or otherwise
|
||||
designated in writing by the copyright owner as "Not a Contribution."
|
||||
|
||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||
on behalf of whom a Contribution has been received by Licensor and
|
||||
subsequently incorporated within the Work.
|
||||
|
||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
copyright license to reproduce, prepare Derivative Works of,
|
||||
publicly display, publicly perform, sublicense, and distribute the
|
||||
Work and such Derivative Works in Source or Object form.
|
||||
|
||||
3. Grant of Patent License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
(except as stated in this section) patent license to make, have made,
|
||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||
where such license applies only to those patent claims licensable
|
||||
by such Contributor that are necessarily infringed by their
|
||||
Contribution(s) alone or by combination of their Contribution(s)
|
||||
with the Work to which such Contribution(s) was submitted. If You
|
||||
institute patent litigation against any entity (including a
|
||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||
or a Contribution incorporated within the Work constitutes direct
|
||||
or contributory patent infringement, then any patent licenses
|
||||
granted to You under this License for that Work shall terminate
|
||||
as of the date such litigation is filed.
|
||||
|
||||
4. Redistribution. You may reproduce and distribute copies of the
|
||||
Work or Derivative Works thereof in any medium, with or without
|
||||
modifications, and in Source or Object form, provided that You
|
||||
meet the following conditions:
|
||||
|
||||
(a) You must give any other recipients of the Work or
|
||||
Derivative Works a copy of this License; and
|
||||
|
||||
(b) You must cause any modified files to carry prominent notices
|
||||
stating that You changed the files; and
|
||||
|
||||
(c) You must retain, in the Source form of any Derivative Works
|
||||
that You distribute, all copyright, patent, trademark, and
|
||||
attribution notices from the Source form of the Work,
|
||||
excluding those notices that do not pertain to any part of
|
||||
the Derivative Works; and
|
||||
|
||||
(d) If the Work includes a "NOTICE" text file as part of its
|
||||
distribution, then any Derivative Works that You distribute must
|
||||
include a readable copy of the attribution notices contained
|
||||
within such NOTICE file, excluding those notices that do not
|
||||
pertain to any part of the Derivative Works, in at least one
|
||||
of the following places: within a NOTICE text file distributed
|
||||
as part of the Derivative Works; within the Source form or
|
||||
documentation, if provided along with the Derivative Works; or,
|
||||
within a display generated by the Derivative Works, if and
|
||||
wherever such third-party notices normally appear. The contents
|
||||
of the NOTICE file are for informational purposes only and
|
||||
do not modify the License. You may add Your own attribution
|
||||
notices within Derivative Works that You distribute, alongside
|
||||
or as an addendum to the NOTICE text from the Work, provided
|
||||
that such additional attribution notices cannot be construed
|
||||
as modifying the License.
|
||||
|
||||
You may add Your own copyright statement to Your modifications and
|
||||
may provide additional or different license terms and conditions
|
||||
for use, reproduction, or distribution of Your modifications, or
|
||||
for any such Derivative Works as a whole, provided Your use,
|
||||
reproduction, and distribution of the Work otherwise complies with
|
||||
the conditions stated in this License.
|
||||
|
||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||
any Contribution intentionally submitted for inclusion in the Work
|
||||
by You to the Licensor shall be under the terms and conditions of
|
||||
this License, without any additional terms or conditions.
|
||||
Notwithstanding the above, nothing herein shall supersede or modify
|
||||
the terms of any separate license agreement you may have executed
|
||||
with Licensor regarding such Contributions.
|
||||
|
||||
6. Trademarks. This License does not grant permission to use the trade
|
||||
names, trademarks, service marks, or product names of the Licensor,
|
||||
except as required for reasonable and customary use in describing the
|
||||
origin of the Work and reproducing the content of the NOTICE file.
|
||||
|
||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||
agreed to in writing, Licensor provides the Work (and each
|
||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
implied, including, without limitation, any warranties or conditions
|
||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||
appropriateness of using or redistributing the Work and assume any
|
||||
risks associated with Your exercise of permissions under this License.
|
||||
|
||||
8. Limitation of Liability. In no event and under no legal theory,
|
||||
whether in tort (including negligence), contract, or otherwise,
|
||||
unless required by applicable law (such as deliberate and grossly
|
||||
negligent acts) or agreed to in writing, shall any Contributor be
|
||||
liable to You for damages, including any direct, indirect, special,
|
||||
incidental, or consequential damages of any character arising as a
|
||||
result of this License or out of the use or inability to use the
|
||||
Work (including but not limited to damages for loss of goodwill,
|
||||
work stoppage, computer failure or malfunction, or any and all
|
||||
other commercial damages or losses), even if such Contributor
|
||||
has been advised of the possibility of such damages.
|
||||
|
||||
9. Accepting Warranty or Additional Liability. While redistributing
|
||||
the Work or Derivative Works thereof, You may choose to offer,
|
||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||
or other liability obligations and/or rights consistent with this
|
||||
License. However, in accepting such obligations, You may act only
|
||||
on Your own behalf and on Your sole responsibility, not on behalf
|
||||
of any other Contributor, and only if You agree to indemnify,
|
||||
defend, and hold each Contributor harmless for any liability
|
||||
incurred by, or claims asserted against, such Contributor by reason
|
||||
of your accepting any such warranty or additional liability.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
APPENDIX: How to apply the Apache License to your work.
|
||||
|
||||
To apply the Apache License to your work, attach the following
|
||||
boilerplate notice, with the fields enclosed by brackets "{}"
|
||||
replaced with your own identifying information. (Don't include
|
||||
the brackets!) The text should be enclosed in the appropriate
|
||||
comment syntax for the file format. We also recommend that a
|
||||
file or class name and description of purpose be included on the
|
||||
same "printed page" as the copyright notice for easier
|
||||
identification within third-party archives.
|
||||
|
||||
Copyright {yyyy} {name of copyright owner}
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
|
5
vendor/github.com/coreos/go-oidc/NOTICE
generated
vendored
Normal file
5
vendor/github.com/coreos/go-oidc/NOTICE
generated
vendored
Normal file
|
@ -0,0 +1,5 @@
|
|||
CoreOS Project
|
||||
Copyright 2014 CoreOS, Inc
|
||||
|
||||
This product includes software developed at CoreOS, Inc.
|
||||
(http://www.coreos.com/).
|
7
vendor/github.com/coreos/go-oidc/http/client.go
generated
vendored
Normal file
7
vendor/github.com/coreos/go-oidc/http/client.go
generated
vendored
Normal file
|
@ -0,0 +1,7 @@
|
|||
package http
|
||||
|
||||
import "net/http"
|
||||
|
||||
type Client interface {
|
||||
Do(*http.Request) (*http.Response, error)
|
||||
}
|
159
vendor/github.com/coreos/go-oidc/http/http.go
generated
vendored
Normal file
159
vendor/github.com/coreos/go-oidc/http/http.go
generated
vendored
Normal file
|
@ -0,0 +1,159 @@
|
|||
package http
|
||||
|
||||
import (
|
||||
"encoding/base64"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"path"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/coreos/pkg/capnslog"
|
||||
)
|
||||
|
||||
var (
|
||||
log = capnslog.NewPackageLogger("github.com/coreos/go-oidc", "http")
|
||||
)
|
||||
|
||||
func WriteError(w http.ResponseWriter, code int, msg string) {
|
||||
e := struct {
|
||||
Error string `json:"error"`
|
||||
}{
|
||||
Error: msg,
|
||||
}
|
||||
b, err := json.Marshal(e)
|
||||
if err != nil {
|
||||
log.Errorf("Failed marshaling %#v to JSON: %v", e, err)
|
||||
}
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
w.WriteHeader(code)
|
||||
w.Write(b)
|
||||
}
|
||||
|
||||
// BasicAuth parses a username and password from the request's
|
||||
// Authorization header. This was pulled from golang master:
|
||||
// https://codereview.appspot.com/76540043
|
||||
func BasicAuth(r *http.Request) (username, password string, ok bool) {
|
||||
auth := r.Header.Get("Authorization")
|
||||
if auth == "" {
|
||||
return
|
||||
}
|
||||
|
||||
if !strings.HasPrefix(auth, "Basic ") {
|
||||
return
|
||||
}
|
||||
c, err := base64.StdEncoding.DecodeString(strings.TrimPrefix(auth, "Basic "))
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
cs := string(c)
|
||||
s := strings.IndexByte(cs, ':')
|
||||
if s < 0 {
|
||||
return
|
||||
}
|
||||
return cs[:s], cs[s+1:], true
|
||||
}
|
||||
|
||||
func cacheControlMaxAge(hdr string) (time.Duration, bool, error) {
|
||||
for _, field := range strings.Split(hdr, ",") {
|
||||
parts := strings.SplitN(strings.TrimSpace(field), "=", 2)
|
||||
k := strings.ToLower(strings.TrimSpace(parts[0]))
|
||||
if k != "max-age" {
|
||||
continue
|
||||
}
|
||||
|
||||
if len(parts) == 1 {
|
||||
return 0, false, errors.New("max-age has no value")
|
||||
}
|
||||
|
||||
v := strings.TrimSpace(parts[1])
|
||||
if v == "" {
|
||||
return 0, false, errors.New("max-age has empty value")
|
||||
}
|
||||
|
||||
age, err := strconv.Atoi(v)
|
||||
if err != nil {
|
||||
return 0, false, err
|
||||
}
|
||||
|
||||
if age <= 0 {
|
||||
return 0, false, nil
|
||||
}
|
||||
|
||||
return time.Duration(age) * time.Second, true, nil
|
||||
}
|
||||
|
||||
return 0, false, nil
|
||||
}
|
||||
|
||||
func expires(date, expires string) (time.Duration, bool, error) {
|
||||
if date == "" || expires == "" {
|
||||
return 0, false, nil
|
||||
}
|
||||
|
||||
te, err := time.Parse(time.RFC1123, expires)
|
||||
if err != nil {
|
||||
return 0, false, err
|
||||
}
|
||||
|
||||
td, err := time.Parse(time.RFC1123, date)
|
||||
if err != nil {
|
||||
return 0, false, err
|
||||
}
|
||||
|
||||
ttl := te.Sub(td)
|
||||
|
||||
// headers indicate data already expired, caller should not
|
||||
// have to care about this case
|
||||
if ttl <= 0 {
|
||||
return 0, false, nil
|
||||
}
|
||||
|
||||
return ttl, true, nil
|
||||
}
|
||||
|
||||
func Cacheable(hdr http.Header) (time.Duration, bool, error) {
|
||||
ttl, ok, err := cacheControlMaxAge(hdr.Get("Cache-Control"))
|
||||
if err != nil || ok {
|
||||
return ttl, ok, err
|
||||
}
|
||||
|
||||
return expires(hdr.Get("Date"), hdr.Get("Expires"))
|
||||
}
|
||||
|
||||
// MergeQuery appends additional query values to an existing URL.
|
||||
func MergeQuery(u url.URL, q url.Values) url.URL {
|
||||
uv := u.Query()
|
||||
for k, vs := range q {
|
||||
for _, v := range vs {
|
||||
uv.Add(k, v)
|
||||
}
|
||||
}
|
||||
u.RawQuery = uv.Encode()
|
||||
return u
|
||||
}
|
||||
|
||||
// NewResourceLocation appends a resource id to the end of the requested URL path.
|
||||
func NewResourceLocation(reqURL *url.URL, id string) string {
|
||||
var u url.URL
|
||||
u = *reqURL
|
||||
u.Path = path.Join(u.Path, id)
|
||||
u.RawQuery = ""
|
||||
u.Fragment = ""
|
||||
return u.String()
|
||||
}
|
||||
|
||||
// CopyRequest returns a clone of the provided *http.Request.
|
||||
// The returned object is a shallow copy of the struct and a
|
||||
// deep copy of its Header field.
|
||||
func CopyRequest(r *http.Request) *http.Request {
|
||||
r2 := *r
|
||||
r2.Header = make(http.Header)
|
||||
for k, s := range r.Header {
|
||||
r2.Header[k] = s
|
||||
}
|
||||
return &r2
|
||||
}
|
14
vendor/github.com/coreos/go-oidc/http/middleware.go
generated
vendored
Normal file
14
vendor/github.com/coreos/go-oidc/http/middleware.go
generated
vendored
Normal file
|
@ -0,0 +1,14 @@
|
|||
package http
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
)
|
||||
|
||||
type LoggingMiddleware struct {
|
||||
Next http.Handler
|
||||
}
|
||||
|
||||
func (l *LoggingMiddleware) ServeHTTP(w http.ResponseWriter, r *http.Request) {
|
||||
log.Infof("HTTP %s %v", r.Method, r.URL)
|
||||
l.Next.ServeHTTP(w, r)
|
||||
}
|
29
vendor/github.com/coreos/go-oidc/http/url.go
generated
vendored
Normal file
29
vendor/github.com/coreos/go-oidc/http/url.go
generated
vendored
Normal file
|
@ -0,0 +1,29 @@
|
|||
package http
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"net/url"
|
||||
)
|
||||
|
||||
// ParseNonEmptyURL checks that a string is a parsable URL which is also not empty
|
||||
// since `url.Parse("")` does not return an error. Must contian a scheme and a host.
|
||||
func ParseNonEmptyURL(u string) (*url.URL, error) {
|
||||
if u == "" {
|
||||
return nil, errors.New("url is empty")
|
||||
}
|
||||
|
||||
ur, err := url.Parse(u)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if ur.Scheme == "" {
|
||||
return nil, errors.New("url scheme is empty")
|
||||
}
|
||||
|
||||
if ur.Host == "" {
|
||||
return nil, errors.New("url host is empty")
|
||||
}
|
||||
|
||||
return ur, nil
|
||||
}
|
126
vendor/github.com/coreos/go-oidc/jose/claims.go
generated
vendored
Normal file
126
vendor/github.com/coreos/go-oidc/jose/claims.go
generated
vendored
Normal file
|
@ -0,0 +1,126 @@
|
|||
package jose
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"math"
|
||||
"time"
|
||||
)
|
||||
|
||||
type Claims map[string]interface{}
|
||||
|
||||
func (c Claims) Add(name string, value interface{}) {
|
||||
c[name] = value
|
||||
}
|
||||
|
||||
func (c Claims) StringClaim(name string) (string, bool, error) {
|
||||
cl, ok := c[name]
|
||||
if !ok {
|
||||
return "", false, nil
|
||||
}
|
||||
|
||||
v, ok := cl.(string)
|
||||
if !ok {
|
||||
return "", false, fmt.Errorf("unable to parse claim as string: %v", name)
|
||||
}
|
||||
|
||||
return v, true, nil
|
||||
}
|
||||
|
||||
func (c Claims) StringsClaim(name string) ([]string, bool, error) {
|
||||
cl, ok := c[name]
|
||||
if !ok {
|
||||
return nil, false, nil
|
||||
}
|
||||
|
||||
if v, ok := cl.([]string); ok {
|
||||
return v, true, nil
|
||||
}
|
||||
|
||||
// When unmarshaled, []string will become []interface{}.
|
||||
if v, ok := cl.([]interface{}); ok {
|
||||
var ret []string
|
||||
for _, vv := range v {
|
||||
str, ok := vv.(string)
|
||||
if !ok {
|
||||
return nil, false, fmt.Errorf("unable to parse claim as string array: %v", name)
|
||||
}
|
||||
ret = append(ret, str)
|
||||
}
|
||||
return ret, true, nil
|
||||
}
|
||||
|
||||
return nil, false, fmt.Errorf("unable to parse claim as string array: %v", name)
|
||||
}
|
||||
|
||||
func (c Claims) Int64Claim(name string) (int64, bool, error) {
|
||||
cl, ok := c[name]
|
||||
if !ok {
|
||||
return 0, false, nil
|
||||
}
|
||||
|
||||
v, ok := cl.(int64)
|
||||
if !ok {
|
||||
vf, ok := cl.(float64)
|
||||
if !ok {
|
||||
return 0, false, fmt.Errorf("unable to parse claim as int64: %v", name)
|
||||
}
|
||||
v = int64(vf)
|
||||
}
|
||||
|
||||
return v, true, nil
|
||||
}
|
||||
|
||||
func (c Claims) Float64Claim(name string) (float64, bool, error) {
|
||||
cl, ok := c[name]
|
||||
if !ok {
|
||||
return 0, false, nil
|
||||
}
|
||||
|
||||
v, ok := cl.(float64)
|
||||
if !ok {
|
||||
vi, ok := cl.(int64)
|
||||
if !ok {
|
||||
return 0, false, fmt.Errorf("unable to parse claim as float64: %v", name)
|
||||
}
|
||||
v = float64(vi)
|
||||
}
|
||||
|
||||
return v, true, nil
|
||||
}
|
||||
|
||||
func (c Claims) TimeClaim(name string) (time.Time, bool, error) {
|
||||
v, ok, err := c.Float64Claim(name)
|
||||
if !ok || err != nil {
|
||||
return time.Time{}, ok, err
|
||||
}
|
||||
|
||||
s := math.Trunc(v)
|
||||
ns := (v - s) * math.Pow(10, 9)
|
||||
return time.Unix(int64(s), int64(ns)).UTC(), true, nil
|
||||
}
|
||||
|
||||
func decodeClaims(payload []byte) (Claims, error) {
|
||||
var c Claims
|
||||
if err := json.Unmarshal(payload, &c); err != nil {
|
||||
return nil, fmt.Errorf("malformed JWT claims, unable to decode: %v", err)
|
||||
}
|
||||
return c, nil
|
||||
}
|
||||
|
||||
func marshalClaims(c Claims) ([]byte, error) {
|
||||
b, err := json.Marshal(c)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return b, nil
|
||||
}
|
||||
|
||||
func encodeClaims(c Claims) (string, error) {
|
||||
b, err := marshalClaims(c)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
return encodeSegment(b), nil
|
||||
}
|
112
vendor/github.com/coreos/go-oidc/jose/jose.go
generated
vendored
Normal file
112
vendor/github.com/coreos/go-oidc/jose/jose.go
generated
vendored
Normal file
|
@ -0,0 +1,112 @@
|
|||
package jose
|
||||
|
||||
import (
|
||||
"encoding/base64"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"strings"
|
||||
)
|
||||
|
||||
const (
|
||||
HeaderMediaType = "typ"
|
||||
HeaderKeyAlgorithm = "alg"
|
||||
HeaderKeyID = "kid"
|
||||
)
|
||||
|
||||
const (
|
||||
// Encryption Algorithm Header Parameter Values for JWS
|
||||
// See: https://tools.ietf.org/html/draft-ietf-jose-json-web-algorithms-40#page-6
|
||||
AlgHS256 = "HS256"
|
||||
AlgHS384 = "HS384"
|
||||
AlgHS512 = "HS512"
|
||||
AlgRS256 = "RS256"
|
||||
AlgRS384 = "RS384"
|
||||
AlgRS512 = "RS512"
|
||||
AlgES256 = "ES256"
|
||||
AlgES384 = "ES384"
|
||||
AlgES512 = "ES512"
|
||||
AlgPS256 = "PS256"
|
||||
AlgPS384 = "PS384"
|
||||
AlgPS512 = "PS512"
|
||||
AlgNone = "none"
|
||||
)
|
||||
|
||||
const (
|
||||
// Algorithm Header Parameter Values for JWE
|
||||
// See: https://tools.ietf.org/html/draft-ietf-jose-json-web-algorithms-40#section-4.1
|
||||
AlgRSA15 = "RSA1_5"
|
||||
AlgRSAOAEP = "RSA-OAEP"
|
||||
AlgRSAOAEP256 = "RSA-OAEP-256"
|
||||
AlgA128KW = "A128KW"
|
||||
AlgA192KW = "A192KW"
|
||||
AlgA256KW = "A256KW"
|
||||
AlgDir = "dir"
|
||||
AlgECDHES = "ECDH-ES"
|
||||
AlgECDHESA128KW = "ECDH-ES+A128KW"
|
||||
AlgECDHESA192KW = "ECDH-ES+A192KW"
|
||||
AlgECDHESA256KW = "ECDH-ES+A256KW"
|
||||
AlgA128GCMKW = "A128GCMKW"
|
||||
AlgA192GCMKW = "A192GCMKW"
|
||||
AlgA256GCMKW = "A256GCMKW"
|
||||
AlgPBES2HS256A128KW = "PBES2-HS256+A128KW"
|
||||
AlgPBES2HS384A192KW = "PBES2-HS384+A192KW"
|
||||
AlgPBES2HS512A256KW = "PBES2-HS512+A256KW"
|
||||
)
|
||||
|
||||
const (
|
||||
// Encryption Algorithm Header Parameter Values for JWE
|
||||
// See: https://tools.ietf.org/html/draft-ietf-jose-json-web-algorithms-40#page-22
|
||||
EncA128CBCHS256 = "A128CBC-HS256"
|
||||
EncA128CBCHS384 = "A128CBC-HS384"
|
||||
EncA256CBCHS512 = "A256CBC-HS512"
|
||||
EncA128GCM = "A128GCM"
|
||||
EncA192GCM = "A192GCM"
|
||||
EncA256GCM = "A256GCM"
|
||||
)
|
||||
|
||||
type JOSEHeader map[string]string
|
||||
|
||||
func (j JOSEHeader) Validate() error {
|
||||
if _, exists := j[HeaderKeyAlgorithm]; !exists {
|
||||
return fmt.Errorf("header missing %q parameter", HeaderKeyAlgorithm)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func decodeHeader(seg string) (JOSEHeader, error) {
|
||||
b, err := decodeSegment(seg)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var h JOSEHeader
|
||||
err = json.Unmarshal(b, &h)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return h, nil
|
||||
}
|
||||
|
||||
func encodeHeader(h JOSEHeader) (string, error) {
|
||||
b, err := json.Marshal(h)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
return encodeSegment(b), nil
|
||||
}
|
||||
|
||||
// Decode JWT specific base64url encoding with padding stripped
|
||||
func decodeSegment(seg string) ([]byte, error) {
|
||||
if l := len(seg) % 4; l != 0 {
|
||||
seg += strings.Repeat("=", 4-l)
|
||||
}
|
||||
return base64.URLEncoding.DecodeString(seg)
|
||||
}
|
||||
|
||||
// Encode JWT specific base64url encoding with padding stripped
|
||||
func encodeSegment(seg []byte) string {
|
||||
return strings.TrimRight(base64.URLEncoding.EncodeToString(seg), "=")
|
||||
}
|
135
vendor/github.com/coreos/go-oidc/jose/jwk.go
generated
vendored
Normal file
135
vendor/github.com/coreos/go-oidc/jose/jwk.go
generated
vendored
Normal file
|
@ -0,0 +1,135 @@
|
|||
package jose
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/base64"
|
||||
"encoding/binary"
|
||||
"encoding/json"
|
||||
"math/big"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// JSON Web Key
|
||||
// https://tools.ietf.org/html/draft-ietf-jose-json-web-key-36#page-5
|
||||
type JWK struct {
|
||||
ID string
|
||||
Type string
|
||||
Alg string
|
||||
Use string
|
||||
Exponent int
|
||||
Modulus *big.Int
|
||||
Secret []byte
|
||||
}
|
||||
|
||||
type jwkJSON struct {
|
||||
ID string `json:"kid"`
|
||||
Type string `json:"kty"`
|
||||
Alg string `json:"alg"`
|
||||
Use string `json:"use"`
|
||||
Exponent string `json:"e"`
|
||||
Modulus string `json:"n"`
|
||||
}
|
||||
|
||||
func (j *JWK) MarshalJSON() ([]byte, error) {
|
||||
t := jwkJSON{
|
||||
ID: j.ID,
|
||||
Type: j.Type,
|
||||
Alg: j.Alg,
|
||||
Use: j.Use,
|
||||
Exponent: encodeExponent(j.Exponent),
|
||||
Modulus: encodeModulus(j.Modulus),
|
||||
}
|
||||
|
||||
return json.Marshal(&t)
|
||||
}
|
||||
|
||||
func (j *JWK) UnmarshalJSON(data []byte) error {
|
||||
var t jwkJSON
|
||||
err := json.Unmarshal(data, &t)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
e, err := decodeExponent(t.Exponent)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
n, err := decodeModulus(t.Modulus)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
j.ID = t.ID
|
||||
j.Type = t.Type
|
||||
j.Alg = t.Alg
|
||||
j.Use = t.Use
|
||||
j.Exponent = e
|
||||
j.Modulus = n
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
type JWKSet struct {
|
||||
Keys []JWK `json:"keys"`
|
||||
}
|
||||
|
||||
func decodeExponent(e string) (int, error) {
|
||||
decE, err := decodeBase64URLPaddingOptional(e)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
var eBytes []byte
|
||||
if len(decE) < 8 {
|
||||
eBytes = make([]byte, 8-len(decE), 8)
|
||||
eBytes = append(eBytes, decE...)
|
||||
} else {
|
||||
eBytes = decE
|
||||
}
|
||||
eReader := bytes.NewReader(eBytes)
|
||||
var E uint64
|
||||
err = binary.Read(eReader, binary.BigEndian, &E)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return int(E), nil
|
||||
}
|
||||
|
||||
func encodeExponent(e int) string {
|
||||
b := make([]byte, 8)
|
||||
binary.BigEndian.PutUint64(b, uint64(e))
|
||||
var idx int
|
||||
for ; idx < 8; idx++ {
|
||||
if b[idx] != 0x0 {
|
||||
break
|
||||
}
|
||||
}
|
||||
return base64.URLEncoding.EncodeToString(b[idx:])
|
||||
}
|
||||
|
||||
// Turns a URL encoded modulus of a key into a big int.
|
||||
func decodeModulus(n string) (*big.Int, error) {
|
||||
decN, err := decodeBase64URLPaddingOptional(n)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
N := big.NewInt(0)
|
||||
N.SetBytes(decN)
|
||||
return N, nil
|
||||
}
|
||||
|
||||
func encodeModulus(n *big.Int) string {
|
||||
return base64.URLEncoding.EncodeToString(n.Bytes())
|
||||
}
|
||||
|
||||
// decodeBase64URLPaddingOptional decodes Base64 whether there is padding or not.
|
||||
// The stdlib version currently doesn't handle this.
|
||||
// We can get rid of this is if this bug:
|
||||
// https://github.com/golang/go/issues/4237
|
||||
// ever closes.
|
||||
func decodeBase64URLPaddingOptional(e string) ([]byte, error) {
|
||||
if m := len(e) % 4; m != 0 {
|
||||
e += strings.Repeat("=", 4-m)
|
||||
}
|
||||
return base64.URLEncoding.DecodeString(e)
|
||||
}
|
51
vendor/github.com/coreos/go-oidc/jose/jws.go
generated
vendored
Normal file
51
vendor/github.com/coreos/go-oidc/jose/jws.go
generated
vendored
Normal file
|
@ -0,0 +1,51 @@
|
|||
package jose
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
)
|
||||
|
||||
type JWS struct {
|
||||
RawHeader string
|
||||
Header JOSEHeader
|
||||
RawPayload string
|
||||
Payload []byte
|
||||
Signature []byte
|
||||
}
|
||||
|
||||
// Given a raw encoded JWS token parses it and verifies the structure.
|
||||
func ParseJWS(raw string) (JWS, error) {
|
||||
parts := strings.Split(raw, ".")
|
||||
if len(parts) != 3 {
|
||||
return JWS{}, fmt.Errorf("malformed JWS, only %d segments", len(parts))
|
||||
}
|
||||
|
||||
rawSig := parts[2]
|
||||
jws := JWS{
|
||||
RawHeader: parts[0],
|
||||
RawPayload: parts[1],
|
||||
}
|
||||
|
||||
header, err := decodeHeader(jws.RawHeader)
|
||||
if err != nil {
|
||||
return JWS{}, fmt.Errorf("malformed JWS, unable to decode header, %s", err)
|
||||
}
|
||||
if err = header.Validate(); err != nil {
|
||||
return JWS{}, fmt.Errorf("malformed JWS, %s", err)
|
||||
}
|
||||
jws.Header = header
|
||||
|
||||
payload, err := decodeSegment(jws.RawPayload)
|
||||
if err != nil {
|
||||
return JWS{}, fmt.Errorf("malformed JWS, unable to decode payload: %s", err)
|
||||
}
|
||||
jws.Payload = payload
|
||||
|
||||
sig, err := decodeSegment(rawSig)
|
||||
if err != nil {
|
||||
return JWS{}, fmt.Errorf("malformed JWS, unable to decode signature: %s", err)
|
||||
}
|
||||
jws.Signature = sig
|
||||
|
||||
return jws, nil
|
||||
}
|
82
vendor/github.com/coreos/go-oidc/jose/jwt.go
generated
vendored
Normal file
82
vendor/github.com/coreos/go-oidc/jose/jwt.go
generated
vendored
Normal file
|
@ -0,0 +1,82 @@
|
|||
package jose
|
||||
|
||||
import "strings"
|
||||
|
||||
type JWT JWS
|
||||
|
||||
func ParseJWT(token string) (jwt JWT, err error) {
|
||||
jws, err := ParseJWS(token)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
return JWT(jws), nil
|
||||
}
|
||||
|
||||
func NewJWT(header JOSEHeader, claims Claims) (jwt JWT, err error) {
|
||||
jwt = JWT{}
|
||||
|
||||
jwt.Header = header
|
||||
jwt.Header[HeaderMediaType] = "JWT"
|
||||
|
||||
claimBytes, err := marshalClaims(claims)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
jwt.Payload = claimBytes
|
||||
|
||||
eh, err := encodeHeader(header)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
jwt.RawHeader = eh
|
||||
|
||||
ec, err := encodeClaims(claims)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
jwt.RawPayload = ec
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func (j *JWT) KeyID() (string, bool) {
|
||||
kID, ok := j.Header[HeaderKeyID]
|
||||
return kID, ok
|
||||
}
|
||||
|
||||
func (j *JWT) Claims() (Claims, error) {
|
||||
return decodeClaims(j.Payload)
|
||||
}
|
||||
|
||||
// Encoded data part of the token which may be signed.
|
||||
func (j *JWT) Data() string {
|
||||
return strings.Join([]string{j.RawHeader, j.RawPayload}, ".")
|
||||
}
|
||||
|
||||
// Full encoded JWT token string in format: header.claims.signature
|
||||
func (j *JWT) Encode() string {
|
||||
d := j.Data()
|
||||
s := encodeSegment(j.Signature)
|
||||
return strings.Join([]string{d, s}, ".")
|
||||
}
|
||||
|
||||
func NewSignedJWT(claims Claims, s Signer) (*JWT, error) {
|
||||
header := JOSEHeader{
|
||||
HeaderKeyAlgorithm: s.Alg(),
|
||||
HeaderKeyID: s.ID(),
|
||||
}
|
||||
|
||||
jwt, err := NewJWT(header, claims)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
sig, err := s.Sign([]byte(jwt.Data()))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
jwt.Signature = sig
|
||||
|
||||
return &jwt, nil
|
||||
}
|
24
vendor/github.com/coreos/go-oidc/jose/sig.go
generated
vendored
Executable file
24
vendor/github.com/coreos/go-oidc/jose/sig.go
generated
vendored
Executable file
|
@ -0,0 +1,24 @@
|
|||
package jose
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
)
|
||||
|
||||
type Verifier interface {
|
||||
ID() string
|
||||
Alg() string
|
||||
Verify(sig []byte, data []byte) error
|
||||
}
|
||||
|
||||
type Signer interface {
|
||||
Verifier
|
||||
Sign(data []byte) (sig []byte, err error)
|
||||
}
|
||||
|
||||
func NewVerifier(jwk JWK) (Verifier, error) {
|
||||
if jwk.Type != "RSA" {
|
||||
return nil, fmt.Errorf("unsupported key type %q", jwk.Type)
|
||||
}
|
||||
|
||||
return NewVerifierRSA(jwk)
|
||||
}
|
67
vendor/github.com/coreos/go-oidc/jose/sig_hmac.go
generated
vendored
Executable file
67
vendor/github.com/coreos/go-oidc/jose/sig_hmac.go
generated
vendored
Executable file
|
@ -0,0 +1,67 @@
|
|||
package jose
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"crypto"
|
||||
"crypto/hmac"
|
||||
_ "crypto/sha256"
|
||||
"errors"
|
||||
"fmt"
|
||||
)
|
||||
|
||||
type VerifierHMAC struct {
|
||||
KeyID string
|
||||
Hash crypto.Hash
|
||||
Secret []byte
|
||||
}
|
||||
|
||||
type SignerHMAC struct {
|
||||
VerifierHMAC
|
||||
}
|
||||
|
||||
func NewVerifierHMAC(jwk JWK) (*VerifierHMAC, error) {
|
||||
if jwk.Alg != "" && jwk.Alg != "HS256" {
|
||||
return nil, fmt.Errorf("unsupported key algorithm %q", jwk.Alg)
|
||||
}
|
||||
|
||||
v := VerifierHMAC{
|
||||
KeyID: jwk.ID,
|
||||
Secret: jwk.Secret,
|
||||
Hash: crypto.SHA256,
|
||||
}
|
||||
|
||||
return &v, nil
|
||||
}
|
||||
|
||||
func (v *VerifierHMAC) ID() string {
|
||||
return v.KeyID
|
||||
}
|
||||
|
||||
func (v *VerifierHMAC) Alg() string {
|
||||
return "HS256"
|
||||
}
|
||||
|
||||
func (v *VerifierHMAC) Verify(sig []byte, data []byte) error {
|
||||
h := hmac.New(v.Hash.New, v.Secret)
|
||||
h.Write(data)
|
||||
if !bytes.Equal(sig, h.Sum(nil)) {
|
||||
return errors.New("invalid hmac signature")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func NewSignerHMAC(kid string, secret []byte) *SignerHMAC {
|
||||
return &SignerHMAC{
|
||||
VerifierHMAC: VerifierHMAC{
|
||||
KeyID: kid,
|
||||
Secret: secret,
|
||||
Hash: crypto.SHA256,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func (s *SignerHMAC) Sign(data []byte) ([]byte, error) {
|
||||
h := hmac.New(s.Hash.New, s.Secret)
|
||||
h.Write(data)
|
||||
return h.Sum(nil), nil
|
||||
}
|
67
vendor/github.com/coreos/go-oidc/jose/sig_rsa.go
generated
vendored
Executable file
67
vendor/github.com/coreos/go-oidc/jose/sig_rsa.go
generated
vendored
Executable file
|
@ -0,0 +1,67 @@
|
|||
package jose
|
||||
|
||||
import (
|
||||
"crypto"
|
||||
"crypto/rand"
|
||||
"crypto/rsa"
|
||||
"fmt"
|
||||
)
|
||||
|
||||
type VerifierRSA struct {
|
||||
KeyID string
|
||||
Hash crypto.Hash
|
||||
PublicKey rsa.PublicKey
|
||||
}
|
||||
|
||||
type SignerRSA struct {
|
||||
PrivateKey rsa.PrivateKey
|
||||
VerifierRSA
|
||||
}
|
||||
|
||||
func NewVerifierRSA(jwk JWK) (*VerifierRSA, error) {
|
||||
if jwk.Alg != "" && jwk.Alg != "RS256" {
|
||||
return nil, fmt.Errorf("unsupported key algorithm %q", jwk.Alg)
|
||||
}
|
||||
|
||||
v := VerifierRSA{
|
||||
KeyID: jwk.ID,
|
||||
PublicKey: rsa.PublicKey{
|
||||
N: jwk.Modulus,
|
||||
E: jwk.Exponent,
|
||||
},
|
||||
Hash: crypto.SHA256,
|
||||
}
|
||||
|
||||
return &v, nil
|
||||
}
|
||||
|
||||
func NewSignerRSA(kid string, key rsa.PrivateKey) *SignerRSA {
|
||||
return &SignerRSA{
|
||||
PrivateKey: key,
|
||||
VerifierRSA: VerifierRSA{
|
||||
KeyID: kid,
|
||||
PublicKey: key.PublicKey,
|
||||
Hash: crypto.SHA256,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func (v *VerifierRSA) ID() string {
|
||||
return v.KeyID
|
||||
}
|
||||
|
||||
func (v *VerifierRSA) Alg() string {
|
||||
return "RS256"
|
||||
}
|
||||
|
||||
func (v *VerifierRSA) Verify(sig []byte, data []byte) error {
|
||||
h := v.Hash.New()
|
||||
h.Write(data)
|
||||
return rsa.VerifyPKCS1v15(&v.PublicKey, v.Hash, h.Sum(nil), sig)
|
||||
}
|
||||
|
||||
func (s *SignerRSA) Sign(data []byte) ([]byte, error) {
|
||||
h := s.Hash.New()
|
||||
h.Write(data)
|
||||
return rsa.SignPKCS1v15(rand.Reader, &s.PrivateKey, s.Hash, h.Sum(nil))
|
||||
}
|
153
vendor/github.com/coreos/go-oidc/key/key.go
generated
vendored
Normal file
153
vendor/github.com/coreos/go-oidc/key/key.go
generated
vendored
Normal file
|
@ -0,0 +1,153 @@
|
|||
package key
|
||||
|
||||
import (
|
||||
"crypto/rand"
|
||||
"crypto/rsa"
|
||||
"encoding/base64"
|
||||
"encoding/json"
|
||||
"math/big"
|
||||
"time"
|
||||
|
||||
"github.com/coreos/go-oidc/jose"
|
||||
)
|
||||
|
||||
func NewPublicKey(jwk jose.JWK) *PublicKey {
|
||||
return &PublicKey{jwk: jwk}
|
||||
}
|
||||
|
||||
type PublicKey struct {
|
||||
jwk jose.JWK
|
||||
}
|
||||
|
||||
func (k *PublicKey) MarshalJSON() ([]byte, error) {
|
||||
return json.Marshal(&k.jwk)
|
||||
}
|
||||
|
||||
func (k *PublicKey) UnmarshalJSON(data []byte) error {
|
||||
var jwk jose.JWK
|
||||
if err := json.Unmarshal(data, &jwk); err != nil {
|
||||
return err
|
||||
}
|
||||
k.jwk = jwk
|
||||
return nil
|
||||
}
|
||||
|
||||
func (k *PublicKey) ID() string {
|
||||
return k.jwk.ID
|
||||
}
|
||||
|
||||
func (k *PublicKey) Verifier() (jose.Verifier, error) {
|
||||
return jose.NewVerifierRSA(k.jwk)
|
||||
}
|
||||
|
||||
type PrivateKey struct {
|
||||
KeyID string
|
||||
PrivateKey *rsa.PrivateKey
|
||||
}
|
||||
|
||||
func (k *PrivateKey) ID() string {
|
||||
return k.KeyID
|
||||
}
|
||||
|
||||
func (k *PrivateKey) Signer() jose.Signer {
|
||||
return jose.NewSignerRSA(k.ID(), *k.PrivateKey)
|
||||
}
|
||||
|
||||
func (k *PrivateKey) JWK() jose.JWK {
|
||||
return jose.JWK{
|
||||
ID: k.KeyID,
|
||||
Type: "RSA",
|
||||
Alg: "RS256",
|
||||
Use: "sig",
|
||||
Exponent: k.PrivateKey.PublicKey.E,
|
||||
Modulus: k.PrivateKey.PublicKey.N,
|
||||
}
|
||||
}
|
||||
|
||||
type KeySet interface {
|
||||
ExpiresAt() time.Time
|
||||
}
|
||||
|
||||
type PublicKeySet struct {
|
||||
keys []PublicKey
|
||||
index map[string]*PublicKey
|
||||
expiresAt time.Time
|
||||
}
|
||||
|
||||
func NewPublicKeySet(jwks []jose.JWK, exp time.Time) *PublicKeySet {
|
||||
keys := make([]PublicKey, len(jwks))
|
||||
index := make(map[string]*PublicKey)
|
||||
for i, jwk := range jwks {
|
||||
keys[i] = *NewPublicKey(jwk)
|
||||
index[keys[i].ID()] = &keys[i]
|
||||
}
|
||||
return &PublicKeySet{
|
||||
keys: keys,
|
||||
index: index,
|
||||
expiresAt: exp,
|
||||
}
|
||||
}
|
||||
|
||||
func (s *PublicKeySet) ExpiresAt() time.Time {
|
||||
return s.expiresAt
|
||||
}
|
||||
|
||||
func (s *PublicKeySet) Keys() []PublicKey {
|
||||
return s.keys
|
||||
}
|
||||
|
||||
func (s *PublicKeySet) Key(id string) *PublicKey {
|
||||
return s.index[id]
|
||||
}
|
||||
|
||||
type PrivateKeySet struct {
|
||||
keys []*PrivateKey
|
||||
ActiveKeyID string
|
||||
expiresAt time.Time
|
||||
}
|
||||
|
||||
func NewPrivateKeySet(keys []*PrivateKey, exp time.Time) *PrivateKeySet {
|
||||
return &PrivateKeySet{
|
||||
keys: keys,
|
||||
ActiveKeyID: keys[0].ID(),
|
||||
expiresAt: exp.UTC(),
|
||||
}
|
||||
}
|
||||
|
||||
func (s *PrivateKeySet) Keys() []*PrivateKey {
|
||||
return s.keys
|
||||
}
|
||||
|
||||
func (s *PrivateKeySet) ExpiresAt() time.Time {
|
||||
return s.expiresAt
|
||||
}
|
||||
|
||||
func (s *PrivateKeySet) Active() *PrivateKey {
|
||||
for i, k := range s.keys {
|
||||
if k.ID() == s.ActiveKeyID {
|
||||
return s.keys[i]
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
type GeneratePrivateKeyFunc func() (*PrivateKey, error)
|
||||
|
||||
func GeneratePrivateKey() (*PrivateKey, error) {
|
||||
pk, err := rsa.GenerateKey(rand.Reader, 2048)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
k := PrivateKey{
|
||||
KeyID: base64BigInt(pk.PublicKey.N),
|
||||
PrivateKey: pk,
|
||||
}
|
||||
|
||||
return &k, nil
|
||||
}
|
||||
|
||||
func base64BigInt(b *big.Int) string {
|
||||
return base64.URLEncoding.EncodeToString(b.Bytes())
|
||||
}
|
99
vendor/github.com/coreos/go-oidc/key/manager.go
generated
vendored
Normal file
99
vendor/github.com/coreos/go-oidc/key/manager.go
generated
vendored
Normal file
|
@ -0,0 +1,99 @@
|
|||
package key
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"time"
|
||||
|
||||
"github.com/jonboulle/clockwork"
|
||||
|
||||
"github.com/coreos/go-oidc/jose"
|
||||
"github.com/coreos/pkg/health"
|
||||
)
|
||||
|
||||
type PrivateKeyManager interface {
|
||||
ExpiresAt() time.Time
|
||||
Signer() (jose.Signer, error)
|
||||
JWKs() ([]jose.JWK, error)
|
||||
PublicKeys() ([]PublicKey, error)
|
||||
|
||||
WritableKeySetRepo
|
||||
health.Checkable
|
||||
}
|
||||
|
||||
func NewPrivateKeyManager() PrivateKeyManager {
|
||||
return &privateKeyManager{
|
||||
clock: clockwork.NewRealClock(),
|
||||
}
|
||||
}
|
||||
|
||||
type privateKeyManager struct {
|
||||
keySet *PrivateKeySet
|
||||
clock clockwork.Clock
|
||||
}
|
||||
|
||||
func (m *privateKeyManager) ExpiresAt() time.Time {
|
||||
if m.keySet == nil {
|
||||
return m.clock.Now().UTC()
|
||||
}
|
||||
|
||||
return m.keySet.ExpiresAt()
|
||||
}
|
||||
|
||||
func (m *privateKeyManager) Signer() (jose.Signer, error) {
|
||||
if err := m.Healthy(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return m.keySet.Active().Signer(), nil
|
||||
}
|
||||
|
||||
func (m *privateKeyManager) JWKs() ([]jose.JWK, error) {
|
||||
if err := m.Healthy(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
keys := m.keySet.Keys()
|
||||
jwks := make([]jose.JWK, len(keys))
|
||||
for i, k := range keys {
|
||||
jwks[i] = k.JWK()
|
||||
}
|
||||
return jwks, nil
|
||||
}
|
||||
|
||||
func (m *privateKeyManager) PublicKeys() ([]PublicKey, error) {
|
||||
jwks, err := m.JWKs()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
keys := make([]PublicKey, len(jwks))
|
||||
for i, jwk := range jwks {
|
||||
keys[i] = *NewPublicKey(jwk)
|
||||
}
|
||||
return keys, nil
|
||||
}
|
||||
|
||||
func (m *privateKeyManager) Healthy() error {
|
||||
if m.keySet == nil {
|
||||
return errors.New("private key manager uninitialized")
|
||||
}
|
||||
|
||||
if len(m.keySet.Keys()) == 0 {
|
||||
return errors.New("private key manager zero keys")
|
||||
}
|
||||
|
||||
if m.keySet.ExpiresAt().Before(m.clock.Now().UTC()) {
|
||||
return errors.New("private key manager keys expired")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *privateKeyManager) Set(keySet KeySet) error {
|
||||
privKeySet, ok := keySet.(*PrivateKeySet)
|
||||
if !ok {
|
||||
return errors.New("unable to cast to PrivateKeySet")
|
||||
}
|
||||
|
||||
m.keySet = privKeySet
|
||||
return nil
|
||||
}
|
55
vendor/github.com/coreos/go-oidc/key/repo.go
generated
vendored
Normal file
55
vendor/github.com/coreos/go-oidc/key/repo.go
generated
vendored
Normal file
|
@ -0,0 +1,55 @@
|
|||
package key
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"sync"
|
||||
)
|
||||
|
||||
var ErrorNoKeys = errors.New("no keys found")
|
||||
|
||||
type WritableKeySetRepo interface {
|
||||
Set(KeySet) error
|
||||
}
|
||||
|
||||
type ReadableKeySetRepo interface {
|
||||
Get() (KeySet, error)
|
||||
}
|
||||
|
||||
type PrivateKeySetRepo interface {
|
||||
WritableKeySetRepo
|
||||
ReadableKeySetRepo
|
||||
}
|
||||
|
||||
func NewPrivateKeySetRepo() PrivateKeySetRepo {
|
||||
return &memPrivateKeySetRepo{}
|
||||
}
|
||||
|
||||
type memPrivateKeySetRepo struct {
|
||||
mu sync.RWMutex
|
||||
pks PrivateKeySet
|
||||
}
|
||||
|
||||
func (r *memPrivateKeySetRepo) Set(ks KeySet) error {
|
||||
pks, ok := ks.(*PrivateKeySet)
|
||||
if !ok {
|
||||
return errors.New("unable to cast to PrivateKeySet")
|
||||
} else if pks == nil {
|
||||
return errors.New("nil KeySet")
|
||||
}
|
||||
|
||||
r.mu.Lock()
|
||||
defer r.mu.Unlock()
|
||||
|
||||
r.pks = *pks
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *memPrivateKeySetRepo) Get() (KeySet, error) {
|
||||
r.mu.RLock()
|
||||
defer r.mu.RUnlock()
|
||||
|
||||
if r.pks.keys == nil {
|
||||
return nil, ErrorNoKeys
|
||||
}
|
||||
return KeySet(&r.pks), nil
|
||||
}
|
165
vendor/github.com/coreos/go-oidc/key/rotate.go
generated
vendored
Normal file
165
vendor/github.com/coreos/go-oidc/key/rotate.go
generated
vendored
Normal file
|
@ -0,0 +1,165 @@
|
|||
package key
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"time"
|
||||
|
||||
"github.com/coreos/pkg/capnslog"
|
||||
ptime "github.com/coreos/pkg/timeutil"
|
||||
"github.com/jonboulle/clockwork"
|
||||
)
|
||||
|
||||
var (
|
||||
log = capnslog.NewPackageLogger("github.com/coreos/go-oidc", "key")
|
||||
|
||||
ErrorPrivateKeysExpired = errors.New("private keys have expired")
|
||||
)
|
||||
|
||||
func NewPrivateKeyRotator(repo PrivateKeySetRepo, ttl time.Duration) *PrivateKeyRotator {
|
||||
return &PrivateKeyRotator{
|
||||
repo: repo,
|
||||
ttl: ttl,
|
||||
|
||||
keep: 2,
|
||||
generateKey: GeneratePrivateKey,
|
||||
clock: clockwork.NewRealClock(),
|
||||
}
|
||||
}
|
||||
|
||||
type PrivateKeyRotator struct {
|
||||
repo PrivateKeySetRepo
|
||||
generateKey GeneratePrivateKeyFunc
|
||||
clock clockwork.Clock
|
||||
keep int
|
||||
ttl time.Duration
|
||||
}
|
||||
|
||||
func (r *PrivateKeyRotator) expiresAt() time.Time {
|
||||
return r.clock.Now().UTC().Add(r.ttl)
|
||||
}
|
||||
|
||||
func (r *PrivateKeyRotator) Healthy() error {
|
||||
pks, err := r.privateKeySet()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if r.clock.Now().After(pks.ExpiresAt()) {
|
||||
return ErrorPrivateKeysExpired
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *PrivateKeyRotator) privateKeySet() (*PrivateKeySet, error) {
|
||||
ks, err := r.repo.Get()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
pks, ok := ks.(*PrivateKeySet)
|
||||
if !ok {
|
||||
return nil, errors.New("unable to cast to PrivateKeySet")
|
||||
}
|
||||
return pks, nil
|
||||
}
|
||||
|
||||
func (r *PrivateKeyRotator) nextRotation() (time.Duration, error) {
|
||||
pks, err := r.privateKeySet()
|
||||
if err == ErrorNoKeys {
|
||||
log.Infof("No keys in private key set; must rotate immediately")
|
||||
return 0, nil
|
||||
}
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
now := r.clock.Now()
|
||||
|
||||
// Ideally, we want to rotate after half the TTL has elapsed.
|
||||
idealRotationTime := pks.ExpiresAt().Add(-r.ttl / 2)
|
||||
|
||||
// If we are past the ideal rotation time, rotate immediatly.
|
||||
return max(0, idealRotationTime.Sub(now)), nil
|
||||
}
|
||||
|
||||
func max(a, b time.Duration) time.Duration {
|
||||
if a > b {
|
||||
return a
|
||||
}
|
||||
return b
|
||||
}
|
||||
|
||||
func (r *PrivateKeyRotator) Run() chan struct{} {
|
||||
attempt := func() {
|
||||
k, err := r.generateKey()
|
||||
if err != nil {
|
||||
log.Errorf("Failed generating signing key: %v", err)
|
||||
return
|
||||
}
|
||||
|
||||
exp := r.expiresAt()
|
||||
if err := rotatePrivateKeys(r.repo, k, r.keep, exp); err != nil {
|
||||
log.Errorf("Failed key rotation: %v", err)
|
||||
return
|
||||
}
|
||||
|
||||
log.Infof("Rotated signing keys: id=%s expiresAt=%s", k.ID(), exp)
|
||||
}
|
||||
|
||||
stop := make(chan struct{})
|
||||
go func() {
|
||||
for {
|
||||
var nextRotation time.Duration
|
||||
var sleep time.Duration
|
||||
var err error
|
||||
for {
|
||||
if nextRotation, err = r.nextRotation(); err == nil {
|
||||
break
|
||||
}
|
||||
sleep = ptime.ExpBackoff(sleep, time.Minute)
|
||||
log.Errorf("error getting nextRotation, retrying in %v: %v", sleep, err)
|
||||
time.Sleep(sleep)
|
||||
}
|
||||
|
||||
log.Infof("will rotate keys in %v", nextRotation)
|
||||
select {
|
||||
case <-r.clock.After(nextRotation):
|
||||
attempt()
|
||||
case <-stop:
|
||||
return
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
return stop
|
||||
}
|
||||
|
||||
func rotatePrivateKeys(repo PrivateKeySetRepo, k *PrivateKey, keep int, exp time.Time) error {
|
||||
ks, err := repo.Get()
|
||||
if err != nil && err != ErrorNoKeys {
|
||||
return err
|
||||
}
|
||||
|
||||
var keys []*PrivateKey
|
||||
if ks != nil {
|
||||
pks, ok := ks.(*PrivateKeySet)
|
||||
if !ok {
|
||||
return errors.New("unable to cast to PrivateKeySet")
|
||||
}
|
||||
keys = pks.Keys()
|
||||
}
|
||||
|
||||
keys = append([]*PrivateKey{k}, keys...)
|
||||
if l := len(keys); l > keep {
|
||||
keys = keys[0:keep]
|
||||
}
|
||||
|
||||
nks := PrivateKeySet{
|
||||
keys: keys,
|
||||
ActiveKeyID: k.ID(),
|
||||
expiresAt: exp,
|
||||
}
|
||||
|
||||
return repo.Set(KeySet(&nks))
|
||||
}
|
91
vendor/github.com/coreos/go-oidc/key/sync.go
generated
vendored
Normal file
91
vendor/github.com/coreos/go-oidc/key/sync.go
generated
vendored
Normal file
|
@ -0,0 +1,91 @@
|
|||
package key
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"time"
|
||||
|
||||
"github.com/jonboulle/clockwork"
|
||||
|
||||
"github.com/coreos/pkg/timeutil"
|
||||
)
|
||||
|
||||
func NewKeySetSyncer(r ReadableKeySetRepo, w WritableKeySetRepo) *KeySetSyncer {
|
||||
return &KeySetSyncer{
|
||||
readable: r,
|
||||
writable: w,
|
||||
clock: clockwork.NewRealClock(),
|
||||
}
|
||||
}
|
||||
|
||||
type KeySetSyncer struct {
|
||||
readable ReadableKeySetRepo
|
||||
writable WritableKeySetRepo
|
||||
clock clockwork.Clock
|
||||
}
|
||||
|
||||
func (s *KeySetSyncer) Run() chan struct{} {
|
||||
stop := make(chan struct{})
|
||||
go func() {
|
||||
var failing bool
|
||||
var next time.Duration
|
||||
for {
|
||||
exp, err := syncKeySet(s.readable, s.writable, s.clock)
|
||||
if err != nil || exp == 0 {
|
||||
if !failing {
|
||||
failing = true
|
||||
next = time.Second
|
||||
} else {
|
||||
next = timeutil.ExpBackoff(next, time.Minute)
|
||||
}
|
||||
if exp == 0 {
|
||||
log.Errorf("Synced to already expired key set, retrying in %v: %v", next, err)
|
||||
|
||||
} else {
|
||||
log.Errorf("Failed syncing key set, retrying in %v: %v", next, err)
|
||||
}
|
||||
} else {
|
||||
failing = false
|
||||
next = exp / 2
|
||||
log.Infof("Synced key set, checking again in %v", next)
|
||||
}
|
||||
|
||||
select {
|
||||
case <-s.clock.After(next):
|
||||
continue
|
||||
case <-stop:
|
||||
return
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
return stop
|
||||
}
|
||||
|
||||
func Sync(r ReadableKeySetRepo, w WritableKeySetRepo) (time.Duration, error) {
|
||||
return syncKeySet(r, w, clockwork.NewRealClock())
|
||||
}
|
||||
|
||||
// syncKeySet copies the keyset from r to the KeySet at w and returns the duration in which the KeySet will expire.
|
||||
// If keyset has already expired, returns a zero duration.
|
||||
func syncKeySet(r ReadableKeySetRepo, w WritableKeySetRepo, clock clockwork.Clock) (exp time.Duration, err error) {
|
||||
var ks KeySet
|
||||
ks, err = r.Get()
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
if ks == nil {
|
||||
err = errors.New("no source KeySet")
|
||||
return
|
||||
}
|
||||
|
||||
if err = w.Set(ks); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
now := clock.Now()
|
||||
if ks.ExpiresAt().After(now) {
|
||||
exp = ks.ExpiresAt().Sub(now)
|
||||
}
|
||||
return
|
||||
}
|
29
vendor/github.com/coreos/go-oidc/oauth2/error.go
generated
vendored
Normal file
29
vendor/github.com/coreos/go-oidc/oauth2/error.go
generated
vendored
Normal file
|
@ -0,0 +1,29 @@
|
|||
package oauth2
|
||||
|
||||
const (
|
||||
ErrorAccessDenied = "access_denied"
|
||||
ErrorInvalidClient = "invalid_client"
|
||||
ErrorInvalidGrant = "invalid_grant"
|
||||
ErrorInvalidRequest = "invalid_request"
|
||||
ErrorServerError = "server_error"
|
||||
ErrorUnauthorizedClient = "unauthorized_client"
|
||||
ErrorUnsupportedGrantType = "unsupported_grant_type"
|
||||
ErrorUnsupportedResponseType = "unsupported_response_type"
|
||||
)
|
||||
|
||||
type Error struct {
|
||||
Type string `json:"error"`
|
||||
Description string `json:"error_description,omitempty"`
|
||||
State string `json:"state,omitempty"`
|
||||
}
|
||||
|
||||
func (e *Error) Error() string {
|
||||
if e.Description != "" {
|
||||
return e.Type + ": " + e.Description
|
||||
}
|
||||
return e.Type
|
||||
}
|
||||
|
||||
func NewError(typ string) *Error {
|
||||
return &Error{Type: typ}
|
||||
}
|
416
vendor/github.com/coreos/go-oidc/oauth2/oauth2.go
generated
vendored
Normal file
416
vendor/github.com/coreos/go-oidc/oauth2/oauth2.go
generated
vendored
Normal file
|
@ -0,0 +1,416 @@
|
|||
package oauth2
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"mime"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
phttp "github.com/coreos/go-oidc/http"
|
||||
)
|
||||
|
||||
// ResponseTypesEqual compares two response_type values. If either
|
||||
// contains a space, it is treated as an unordered list. For example,
|
||||
// comparing "code id_token" and "id_token code" would evaluate to true.
|
||||
func ResponseTypesEqual(r1, r2 string) bool {
|
||||
if !strings.Contains(r1, " ") || !strings.Contains(r2, " ") {
|
||||
// fast route, no split needed
|
||||
return r1 == r2
|
||||
}
|
||||
|
||||
// split, sort, and compare
|
||||
r1Fields := strings.Fields(r1)
|
||||
r2Fields := strings.Fields(r2)
|
||||
if len(r1Fields) != len(r2Fields) {
|
||||
return false
|
||||
}
|
||||
sort.Strings(r1Fields)
|
||||
sort.Strings(r2Fields)
|
||||
for i, r1Field := range r1Fields {
|
||||
if r1Field != r2Fields[i] {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
const (
|
||||
// OAuth2.0 response types registered by OIDC.
|
||||
//
|
||||
// See: https://openid.net/specs/oauth-v2-multiple-response-types-1_0.html#RegistryContents
|
||||
ResponseTypeCode = "code"
|
||||
ResponseTypeCodeIDToken = "code id_token"
|
||||
ResponseTypeCodeIDTokenToken = "code id_token token"
|
||||
ResponseTypeIDToken = "id_token"
|
||||
ResponseTypeIDTokenToken = "id_token token"
|
||||
ResponseTypeToken = "token"
|
||||
ResponseTypeNone = "none"
|
||||
)
|
||||
|
||||
const (
|
||||
GrantTypeAuthCode = "authorization_code"
|
||||
GrantTypeClientCreds = "client_credentials"
|
||||
GrantTypeUserCreds = "password"
|
||||
GrantTypeImplicit = "implicit"
|
||||
GrantTypeRefreshToken = "refresh_token"
|
||||
|
||||
AuthMethodClientSecretPost = "client_secret_post"
|
||||
AuthMethodClientSecretBasic = "client_secret_basic"
|
||||
AuthMethodClientSecretJWT = "client_secret_jwt"
|
||||
AuthMethodPrivateKeyJWT = "private_key_jwt"
|
||||
)
|
||||
|
||||
type Config struct {
|
||||
Credentials ClientCredentials
|
||||
Scope []string
|
||||
RedirectURL string
|
||||
AuthURL string
|
||||
TokenURL string
|
||||
|
||||
// Must be one of the AuthMethodXXX methods above. Right now, only
|
||||
// AuthMethodClientSecretPost and AuthMethodClientSecretBasic are supported.
|
||||
AuthMethod string
|
||||
}
|
||||
|
||||
type Client struct {
|
||||
hc phttp.Client
|
||||
creds ClientCredentials
|
||||
scope []string
|
||||
authURL *url.URL
|
||||
redirectURL *url.URL
|
||||
tokenURL *url.URL
|
||||
authMethod string
|
||||
}
|
||||
|
||||
type ClientCredentials struct {
|
||||
ID string
|
||||
Secret string
|
||||
}
|
||||
|
||||
func NewClient(hc phttp.Client, cfg Config) (c *Client, err error) {
|
||||
if len(cfg.Credentials.ID) == 0 {
|
||||
err = errors.New("missing client id")
|
||||
return
|
||||
}
|
||||
|
||||
if len(cfg.Credentials.Secret) == 0 {
|
||||
err = errors.New("missing client secret")
|
||||
return
|
||||
}
|
||||
|
||||
if cfg.AuthMethod == "" {
|
||||
cfg.AuthMethod = AuthMethodClientSecretBasic
|
||||
} else if cfg.AuthMethod != AuthMethodClientSecretPost && cfg.AuthMethod != AuthMethodClientSecretBasic {
|
||||
err = fmt.Errorf("auth method %q is not supported", cfg.AuthMethod)
|
||||
return
|
||||
}
|
||||
|
||||
au, err := phttp.ParseNonEmptyURL(cfg.AuthURL)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
tu, err := phttp.ParseNonEmptyURL(cfg.TokenURL)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
// Allow empty redirect URL in the case where the client
|
||||
// only needs to verify a given token.
|
||||
ru, err := url.Parse(cfg.RedirectURL)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
c = &Client{
|
||||
creds: cfg.Credentials,
|
||||
scope: cfg.Scope,
|
||||
redirectURL: ru,
|
||||
authURL: au,
|
||||
tokenURL: tu,
|
||||
hc: hc,
|
||||
authMethod: cfg.AuthMethod,
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// Return the embedded HTTP client
|
||||
func (c *Client) HttpClient() phttp.Client {
|
||||
return c.hc
|
||||
}
|
||||
|
||||
// Generate the url for initial redirect to oauth provider.
|
||||
func (c *Client) AuthCodeURL(state, accessType, prompt string) string {
|
||||
v := c.commonURLValues()
|
||||
v.Set("state", state)
|
||||
if strings.ToLower(accessType) == "offline" {
|
||||
v.Set("access_type", "offline")
|
||||
}
|
||||
|
||||
if prompt != "" {
|
||||
v.Set("prompt", prompt)
|
||||
}
|
||||
v.Set("response_type", "code")
|
||||
|
||||
q := v.Encode()
|
||||
u := *c.authURL
|
||||
if u.RawQuery == "" {
|
||||
u.RawQuery = q
|
||||
} else {
|
||||
u.RawQuery += "&" + q
|
||||
}
|
||||
return u.String()
|
||||
}
|
||||
|
||||
func (c *Client) commonURLValues() url.Values {
|
||||
return url.Values{
|
||||
"redirect_uri": {c.redirectURL.String()},
|
||||
"scope": {strings.Join(c.scope, " ")},
|
||||
"client_id": {c.creds.ID},
|
||||
}
|
||||
}
|
||||
|
||||
func (c *Client) newAuthenticatedRequest(urlToken string, values url.Values) (*http.Request, error) {
|
||||
var req *http.Request
|
||||
var err error
|
||||
switch c.authMethod {
|
||||
case AuthMethodClientSecretPost:
|
||||
values.Set("client_secret", c.creds.Secret)
|
||||
req, err = http.NewRequest("POST", urlToken, strings.NewReader(values.Encode()))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
case AuthMethodClientSecretBasic:
|
||||
req, err = http.NewRequest("POST", urlToken, strings.NewReader(values.Encode()))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
encodedID := url.QueryEscape(c.creds.ID)
|
||||
encodedSecret := url.QueryEscape(c.creds.Secret)
|
||||
req.SetBasicAuth(encodedID, encodedSecret)
|
||||
default:
|
||||
panic("misconfigured client: auth method not supported")
|
||||
}
|
||||
|
||||
req.Header.Set("Content-Type", "application/x-www-form-urlencoded")
|
||||
return req, nil
|
||||
|
||||
}
|
||||
|
||||
// ClientCredsToken posts the client id and secret to obtain a token scoped to the OAuth2 client via the "client_credentials" grant type.
|
||||
// May not be supported by all OAuth2 servers.
|
||||
func (c *Client) ClientCredsToken(scope []string) (result TokenResponse, err error) {
|
||||
v := url.Values{
|
||||
"scope": {strings.Join(scope, " ")},
|
||||
"grant_type": {GrantTypeClientCreds},
|
||||
}
|
||||
|
||||
req, err := c.newAuthenticatedRequest(c.tokenURL.String(), v)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
resp, err := c.hc.Do(req)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
return parseTokenResponse(resp)
|
||||
}
|
||||
|
||||
// UserCredsToken posts the username and password to obtain a token scoped to the OAuth2 client via the "password" grant_type
|
||||
// May not be supported by all OAuth2 servers.
|
||||
func (c *Client) UserCredsToken(username, password string) (result TokenResponse, err error) {
|
||||
v := url.Values{
|
||||
"scope": {strings.Join(c.scope, " ")},
|
||||
"grant_type": {GrantTypeUserCreds},
|
||||
"username": {username},
|
||||
"password": {password},
|
||||
}
|
||||
|
||||
req, err := c.newAuthenticatedRequest(c.tokenURL.String(), v)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
resp, err := c.hc.Do(req)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
return parseTokenResponse(resp)
|
||||
}
|
||||
|
||||
// RequestToken requests a token from the Token Endpoint with the specified grantType.
|
||||
// If 'grantType' == GrantTypeAuthCode, then 'value' should be the authorization code.
|
||||
// If 'grantType' == GrantTypeRefreshToken, then 'value' should be the refresh token.
|
||||
func (c *Client) RequestToken(grantType, value string) (result TokenResponse, err error) {
|
||||
v := c.commonURLValues()
|
||||
|
||||
v.Set("grant_type", grantType)
|
||||
v.Set("client_secret", c.creds.Secret)
|
||||
switch grantType {
|
||||
case GrantTypeAuthCode:
|
||||
v.Set("code", value)
|
||||
case GrantTypeRefreshToken:
|
||||
v.Set("refresh_token", value)
|
||||
default:
|
||||
err = fmt.Errorf("unsupported grant_type: %v", grantType)
|
||||
return
|
||||
}
|
||||
|
||||
req, err := c.newAuthenticatedRequest(c.tokenURL.String(), v)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
resp, err := c.hc.Do(req)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
return parseTokenResponse(resp)
|
||||
}
|
||||
|
||||
func parseTokenResponse(resp *http.Response) (result TokenResponse, err error) {
|
||||
body, err := ioutil.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
badStatusCode := resp.StatusCode < 200 || resp.StatusCode > 299
|
||||
|
||||
contentType, _, err := mime.ParseMediaType(resp.Header.Get("Content-Type"))
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
result = TokenResponse{
|
||||
RawBody: body,
|
||||
}
|
||||
|
||||
newError := func(typ, desc, state string) error {
|
||||
if typ == "" {
|
||||
return fmt.Errorf("unrecognized error %s", body)
|
||||
}
|
||||
return &Error{typ, desc, state}
|
||||
}
|
||||
|
||||
if contentType == "application/x-www-form-urlencoded" || contentType == "text/plain" {
|
||||
var vals url.Values
|
||||
vals, err = url.ParseQuery(string(body))
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
if error := vals.Get("error"); error != "" || badStatusCode {
|
||||
err = newError(error, vals.Get("error_description"), vals.Get("state"))
|
||||
return
|
||||
}
|
||||
e := vals.Get("expires_in")
|
||||
if e == "" {
|
||||
e = vals.Get("expires")
|
||||
}
|
||||
if e != "" {
|
||||
result.Expires, err = strconv.Atoi(e)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
result.AccessToken = vals.Get("access_token")
|
||||
result.TokenType = vals.Get("token_type")
|
||||
result.IDToken = vals.Get("id_token")
|
||||
result.RefreshToken = vals.Get("refresh_token")
|
||||
result.Scope = vals.Get("scope")
|
||||
} else {
|
||||
var r struct {
|
||||
AccessToken string `json:"access_token"`
|
||||
TokenType string `json:"token_type"`
|
||||
IDToken string `json:"id_token"`
|
||||
RefreshToken string `json:"refresh_token"`
|
||||
Scope string `json:"scope"`
|
||||
State string `json:"state"`
|
||||
ExpiresIn int `json:"expires_in"`
|
||||
Expires int `json:"expires"`
|
||||
Error string `json:"error"`
|
||||
Desc string `json:"error_description"`
|
||||
}
|
||||
if err = json.Unmarshal(body, &r); err != nil {
|
||||
return
|
||||
}
|
||||
if r.Error != "" || badStatusCode {
|
||||
err = newError(r.Error, r.Desc, r.State)
|
||||
return
|
||||
}
|
||||
result.AccessToken = r.AccessToken
|
||||
result.TokenType = r.TokenType
|
||||
result.IDToken = r.IDToken
|
||||
result.RefreshToken = r.RefreshToken
|
||||
result.Scope = r.Scope
|
||||
if r.ExpiresIn == 0 {
|
||||
result.Expires = r.Expires
|
||||
} else {
|
||||
result.Expires = r.ExpiresIn
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
type TokenResponse struct {
|
||||
AccessToken string
|
||||
TokenType string
|
||||
Expires int
|
||||
IDToken string
|
||||
RefreshToken string // OPTIONAL.
|
||||
Scope string // OPTIONAL, if identical to the scope requested by the client, otherwise, REQUIRED.
|
||||
RawBody []byte // In case callers need some other non-standard info from the token response
|
||||
}
|
||||
|
||||
type AuthCodeRequest struct {
|
||||
ResponseType string
|
||||
ClientID string
|
||||
RedirectURL *url.URL
|
||||
Scope []string
|
||||
State string
|
||||
}
|
||||
|
||||
func ParseAuthCodeRequest(q url.Values) (AuthCodeRequest, error) {
|
||||
acr := AuthCodeRequest{
|
||||
ResponseType: q.Get("response_type"),
|
||||
ClientID: q.Get("client_id"),
|
||||
State: q.Get("state"),
|
||||
Scope: make([]string, 0),
|
||||
}
|
||||
|
||||
qs := strings.TrimSpace(q.Get("scope"))
|
||||
if qs != "" {
|
||||
acr.Scope = strings.Split(qs, " ")
|
||||
}
|
||||
|
||||
err := func() error {
|
||||
if acr.ClientID == "" {
|
||||
return NewError(ErrorInvalidRequest)
|
||||
}
|
||||
|
||||
redirectURL := q.Get("redirect_uri")
|
||||
if redirectURL != "" {
|
||||
ru, err := url.Parse(redirectURL)
|
||||
if err != nil {
|
||||
return NewError(ErrorInvalidRequest)
|
||||
}
|
||||
acr.RedirectURL = ru
|
||||
}
|
||||
|
||||
return nil
|
||||
}()
|
||||
|
||||
return acr, err
|
||||
}
|
846
vendor/github.com/coreos/go-oidc/oidc/client.go
generated
vendored
Normal file
846
vendor/github.com/coreos/go-oidc/oidc/client.go
generated
vendored
Normal file
|
@ -0,0 +1,846 @@
|
|||
package oidc
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"net/mail"
|
||||
"net/url"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
phttp "github.com/coreos/go-oidc/http"
|
||||
"github.com/coreos/go-oidc/jose"
|
||||
"github.com/coreos/go-oidc/key"
|
||||
"github.com/coreos/go-oidc/oauth2"
|
||||
)
|
||||
|
||||
const (
|
||||
// amount of time that must pass after the last key sync
|
||||
// completes before another attempt may begin
|
||||
keySyncWindow = 5 * time.Second
|
||||
)
|
||||
|
||||
var (
|
||||
DefaultScope = []string{"openid", "email", "profile"}
|
||||
|
||||
supportedAuthMethods = map[string]struct{}{
|
||||
oauth2.AuthMethodClientSecretBasic: struct{}{},
|
||||
oauth2.AuthMethodClientSecretPost: struct{}{},
|
||||
}
|
||||
)
|
||||
|
||||
type ClientCredentials oauth2.ClientCredentials
|
||||
|
||||
type ClientIdentity struct {
|
||||
Credentials ClientCredentials
|
||||
Metadata ClientMetadata
|
||||
}
|
||||
|
||||
type JWAOptions struct {
|
||||
// SigningAlg specifies an JWA alg for signing JWTs.
|
||||
//
|
||||
// Specifying this field implies different actions depending on the context. It may
|
||||
// require objects be serialized and signed as a JWT instead of plain JSON, or
|
||||
// require an existing JWT object use the specified alg.
|
||||
//
|
||||
// See: http://openid.net/specs/openid-connect-registration-1_0.html#ClientMetadata
|
||||
SigningAlg string
|
||||
// EncryptionAlg, if provided, specifies that the returned or sent object be stored
|
||||
// (or nested) within a JWT object and encrypted with the provided JWA alg.
|
||||
EncryptionAlg string
|
||||
// EncryptionEnc specifies the JWA enc algorithm to use with EncryptionAlg. If
|
||||
// EncryptionAlg is provided and EncryptionEnc is omitted, this field defaults
|
||||
// to A128CBC-HS256.
|
||||
//
|
||||
// If EncryptionEnc is provided EncryptionAlg must also be specified.
|
||||
EncryptionEnc string
|
||||
}
|
||||
|
||||
func (opt JWAOptions) valid() error {
|
||||
if opt.EncryptionEnc != "" && opt.EncryptionAlg == "" {
|
||||
return errors.New("encryption encoding provided with no encryption algorithm")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (opt JWAOptions) defaults() JWAOptions {
|
||||
if opt.EncryptionAlg != "" && opt.EncryptionEnc == "" {
|
||||
opt.EncryptionEnc = jose.EncA128CBCHS256
|
||||
}
|
||||
return opt
|
||||
}
|
||||
|
||||
var (
|
||||
// Ensure ClientMetadata satisfies these interfaces.
|
||||
_ json.Marshaler = &ClientMetadata{}
|
||||
_ json.Unmarshaler = &ClientMetadata{}
|
||||
)
|
||||
|
||||
// ClientMetadata holds metadata that the authorization server associates
|
||||
// with a client identifier. The fields range from human-facing display
|
||||
// strings such as client name, to items that impact the security of the
|
||||
// protocol, such as the list of valid redirect URIs.
|
||||
//
|
||||
// See http://openid.net/specs/openid-connect-registration-1_0.html#ClientMetadata
|
||||
//
|
||||
// TODO: support language specific claim representations
|
||||
// http://openid.net/specs/openid-connect-registration-1_0.html#LanguagesAndScripts
|
||||
type ClientMetadata struct {
|
||||
RedirectURIs []url.URL // Required
|
||||
|
||||
// A list of OAuth 2.0 "response_type" values that the client wishes to restrict
|
||||
// itself to. Either "code", "token", or another registered extension.
|
||||
//
|
||||
// If omitted, only "code" will be used.
|
||||
ResponseTypes []string
|
||||
// A list of OAuth 2.0 grant types the client wishes to restrict itself to.
|
||||
// The grant type values used by OIDC are "authorization_code", "implicit",
|
||||
// and "refresh_token".
|
||||
//
|
||||
// If ommitted, only "authorization_code" will be used.
|
||||
GrantTypes []string
|
||||
// "native" or "web". If omitted, "web".
|
||||
ApplicationType string
|
||||
|
||||
// List of email addresses.
|
||||
Contacts []mail.Address
|
||||
// Name of client to be presented to the end-user.
|
||||
ClientName string
|
||||
// URL that references a logo for the Client application.
|
||||
LogoURI *url.URL
|
||||
// URL of the home page of the Client.
|
||||
ClientURI *url.URL
|
||||
// Profile data policies and terms of use to be provided to the end user.
|
||||
PolicyURI *url.URL
|
||||
TermsOfServiceURI *url.URL
|
||||
|
||||
// URL to or the value of the client's JSON Web Key Set document.
|
||||
JWKSURI *url.URL
|
||||
JWKS *jose.JWKSet
|
||||
|
||||
// URL referencing a flie with a single JSON array of redirect URIs.
|
||||
SectorIdentifierURI *url.URL
|
||||
|
||||
SubjectType string
|
||||
|
||||
// Options to restrict the JWS alg and enc values used for server responses and requests.
|
||||
IDTokenResponseOptions JWAOptions
|
||||
UserInfoResponseOptions JWAOptions
|
||||
RequestObjectOptions JWAOptions
|
||||
|
||||
// Client requested authorization method and signing options for the token endpoint.
|
||||
//
|
||||
// Defaults to "client_secret_basic"
|
||||
TokenEndpointAuthMethod string
|
||||
TokenEndpointAuthSigningAlg string
|
||||
|
||||
// DefaultMaxAge specifies the maximum amount of time in seconds before an authorized
|
||||
// user must reauthroize.
|
||||
//
|
||||
// If 0, no limitation is placed on the maximum.
|
||||
DefaultMaxAge int64
|
||||
// RequireAuthTime specifies if the auth_time claim in the ID token is required.
|
||||
RequireAuthTime bool
|
||||
|
||||
// Default Authentication Context Class Reference values for authentication requests.
|
||||
DefaultACRValues []string
|
||||
|
||||
// URI that a third party can use to initiate a login by the relaying party.
|
||||
//
|
||||
// See: http://openid.net/specs/openid-connect-core-1_0.html#ThirdPartyInitiatedLogin
|
||||
InitiateLoginURI *url.URL
|
||||
// Pre-registered request_uri values that may be cached by the server.
|
||||
RequestURIs []url.URL
|
||||
}
|
||||
|
||||
// Defaults returns a shallow copy of ClientMetadata with default
|
||||
// values replacing omitted fields.
|
||||
func (m ClientMetadata) Defaults() ClientMetadata {
|
||||
if len(m.ResponseTypes) == 0 {
|
||||
m.ResponseTypes = []string{oauth2.ResponseTypeCode}
|
||||
}
|
||||
if len(m.GrantTypes) == 0 {
|
||||
m.GrantTypes = []string{oauth2.GrantTypeAuthCode}
|
||||
}
|
||||
if m.ApplicationType == "" {
|
||||
m.ApplicationType = "web"
|
||||
}
|
||||
if m.TokenEndpointAuthMethod == "" {
|
||||
m.TokenEndpointAuthMethod = oauth2.AuthMethodClientSecretBasic
|
||||
}
|
||||
m.IDTokenResponseOptions = m.IDTokenResponseOptions.defaults()
|
||||
m.UserInfoResponseOptions = m.UserInfoResponseOptions.defaults()
|
||||
m.RequestObjectOptions = m.RequestObjectOptions.defaults()
|
||||
return m
|
||||
}
|
||||
|
||||
func (m *ClientMetadata) MarshalJSON() ([]byte, error) {
|
||||
e := m.toEncodableStruct()
|
||||
return json.Marshal(&e)
|
||||
}
|
||||
|
||||
func (m *ClientMetadata) UnmarshalJSON(data []byte) error {
|
||||
var e encodableClientMetadata
|
||||
if err := json.Unmarshal(data, &e); err != nil {
|
||||
return err
|
||||
}
|
||||
meta, err := e.toStruct()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err := meta.Valid(); err != nil {
|
||||
return err
|
||||
}
|
||||
*m = meta
|
||||
return nil
|
||||
}
|
||||
|
||||
type encodableClientMetadata struct {
|
||||
RedirectURIs []string `json:"redirect_uris"` // Required
|
||||
ResponseTypes []string `json:"response_types,omitempty"`
|
||||
GrantTypes []string `json:"grant_types,omitempty"`
|
||||
ApplicationType string `json:"application_type,omitempty"`
|
||||
Contacts []string `json:"contacts,omitempty"`
|
||||
ClientName string `json:"client_name,omitempty"`
|
||||
LogoURI string `json:"logo_uri,omitempty"`
|
||||
ClientURI string `json:"client_uri,omitempty"`
|
||||
PolicyURI string `json:"policy_uri,omitempty"`
|
||||
TermsOfServiceURI string `json:"tos_uri,omitempty"`
|
||||
JWKSURI string `json:"jwks_uri,omitempty"`
|
||||
JWKS *jose.JWKSet `json:"jwks,omitempty"`
|
||||
SectorIdentifierURI string `json:"sector_identifier_uri,omitempty"`
|
||||
SubjectType string `json:"subject_type,omitempty"`
|
||||
IDTokenSignedResponseAlg string `json:"id_token_signed_response_alg,omitempty"`
|
||||
IDTokenEncryptedResponseAlg string `json:"id_token_encrypted_response_alg,omitempty"`
|
||||
IDTokenEncryptedResponseEnc string `json:"id_token_encrypted_response_enc,omitempty"`
|
||||
UserInfoSignedResponseAlg string `json:"userinfo_signed_response_alg,omitempty"`
|
||||
UserInfoEncryptedResponseAlg string `json:"userinfo_encrypted_response_alg,omitempty"`
|
||||
UserInfoEncryptedResponseEnc string `json:"userinfo_encrypted_response_enc,omitempty"`
|
||||
RequestObjectSigningAlg string `json:"request_object_signing_alg,omitempty"`
|
||||
RequestObjectEncryptionAlg string `json:"request_object_encryption_alg,omitempty"`
|
||||
RequestObjectEncryptionEnc string `json:"request_object_encryption_enc,omitempty"`
|
||||
TokenEndpointAuthMethod string `json:"token_endpoint_auth_method,omitempty"`
|
||||
TokenEndpointAuthSigningAlg string `json:"token_endpoint_auth_signing_alg,omitempty"`
|
||||
DefaultMaxAge int64 `json:"default_max_age,omitempty"`
|
||||
RequireAuthTime bool `json:"require_auth_time,omitempty"`
|
||||
DefaultACRValues []string `json:"default_acr_values,omitempty"`
|
||||
InitiateLoginURI string `json:"initiate_login_uri,omitempty"`
|
||||
RequestURIs []string `json:"request_uris,omitempty"`
|
||||
}
|
||||
|
||||
func (c *encodableClientMetadata) toStruct() (ClientMetadata, error) {
|
||||
p := stickyErrParser{}
|
||||
m := ClientMetadata{
|
||||
RedirectURIs: p.parseURIs(c.RedirectURIs, "redirect_uris"),
|
||||
ResponseTypes: c.ResponseTypes,
|
||||
GrantTypes: c.GrantTypes,
|
||||
ApplicationType: c.ApplicationType,
|
||||
Contacts: p.parseEmails(c.Contacts, "contacts"),
|
||||
ClientName: c.ClientName,
|
||||
LogoURI: p.parseURI(c.LogoURI, "logo_uri"),
|
||||
ClientURI: p.parseURI(c.ClientURI, "client_uri"),
|
||||
PolicyURI: p.parseURI(c.PolicyURI, "policy_uri"),
|
||||
TermsOfServiceURI: p.parseURI(c.TermsOfServiceURI, "tos_uri"),
|
||||
JWKSURI: p.parseURI(c.JWKSURI, "jwks_uri"),
|
||||
JWKS: c.JWKS,
|
||||
SectorIdentifierURI: p.parseURI(c.SectorIdentifierURI, "sector_identifier_uri"),
|
||||
SubjectType: c.SubjectType,
|
||||
TokenEndpointAuthMethod: c.TokenEndpointAuthMethod,
|
||||
TokenEndpointAuthSigningAlg: c.TokenEndpointAuthSigningAlg,
|
||||
DefaultMaxAge: c.DefaultMaxAge,
|
||||
RequireAuthTime: c.RequireAuthTime,
|
||||
DefaultACRValues: c.DefaultACRValues,
|
||||
InitiateLoginURI: p.parseURI(c.InitiateLoginURI, "initiate_login_uri"),
|
||||
RequestURIs: p.parseURIs(c.RequestURIs, "request_uris"),
|
||||
IDTokenResponseOptions: JWAOptions{
|
||||
c.IDTokenSignedResponseAlg,
|
||||
c.IDTokenEncryptedResponseAlg,
|
||||
c.IDTokenEncryptedResponseEnc,
|
||||
},
|
||||
UserInfoResponseOptions: JWAOptions{
|
||||
c.UserInfoSignedResponseAlg,
|
||||
c.UserInfoEncryptedResponseAlg,
|
||||
c.UserInfoEncryptedResponseEnc,
|
||||
},
|
||||
RequestObjectOptions: JWAOptions{
|
||||
c.RequestObjectSigningAlg,
|
||||
c.RequestObjectEncryptionAlg,
|
||||
c.RequestObjectEncryptionEnc,
|
||||
},
|
||||
}
|
||||
if p.firstErr != nil {
|
||||
return ClientMetadata{}, p.firstErr
|
||||
}
|
||||
return m, nil
|
||||
}
|
||||
|
||||
// stickyErrParser parses URIs and email addresses. Once it encounters
|
||||
// a parse error, subsequent calls become no-op.
|
||||
type stickyErrParser struct {
|
||||
firstErr error
|
||||
}
|
||||
|
||||
func (p *stickyErrParser) parseURI(s, field string) *url.URL {
|
||||
if p.firstErr != nil || s == "" {
|
||||
return nil
|
||||
}
|
||||
u, err := url.Parse(s)
|
||||
if err == nil {
|
||||
if u.Host == "" {
|
||||
err = errors.New("no host in URI")
|
||||
} else if u.Scheme != "http" && u.Scheme != "https" {
|
||||
err = errors.New("invalid URI scheme")
|
||||
}
|
||||
}
|
||||
if err != nil {
|
||||
p.firstErr = fmt.Errorf("failed to parse %s: %v", field, err)
|
||||
return nil
|
||||
}
|
||||
return u
|
||||
}
|
||||
|
||||
func (p *stickyErrParser) parseURIs(s []string, field string) []url.URL {
|
||||
if p.firstErr != nil || len(s) == 0 {
|
||||
return nil
|
||||
}
|
||||
uris := make([]url.URL, len(s))
|
||||
for i, val := range s {
|
||||
if val == "" {
|
||||
p.firstErr = fmt.Errorf("invalid URI in field %s", field)
|
||||
return nil
|
||||
}
|
||||
if u := p.parseURI(val, field); u != nil {
|
||||
uris[i] = *u
|
||||
}
|
||||
}
|
||||
return uris
|
||||
}
|
||||
|
||||
func (p *stickyErrParser) parseEmails(s []string, field string) []mail.Address {
|
||||
if p.firstErr != nil || len(s) == 0 {
|
||||
return nil
|
||||
}
|
||||
addrs := make([]mail.Address, len(s))
|
||||
for i, addr := range s {
|
||||
if addr == "" {
|
||||
p.firstErr = fmt.Errorf("invalid email in field %s", field)
|
||||
return nil
|
||||
}
|
||||
a, err := mail.ParseAddress(addr)
|
||||
if err != nil {
|
||||
p.firstErr = fmt.Errorf("invalid email in field %s: %v", field, err)
|
||||
return nil
|
||||
}
|
||||
addrs[i] = *a
|
||||
}
|
||||
return addrs
|
||||
}
|
||||
|
||||
func (m *ClientMetadata) toEncodableStruct() encodableClientMetadata {
|
||||
return encodableClientMetadata{
|
||||
RedirectURIs: urisToStrings(m.RedirectURIs),
|
||||
ResponseTypes: m.ResponseTypes,
|
||||
GrantTypes: m.GrantTypes,
|
||||
ApplicationType: m.ApplicationType,
|
||||
Contacts: emailsToStrings(m.Contacts),
|
||||
ClientName: m.ClientName,
|
||||
LogoURI: uriToString(m.LogoURI),
|
||||
ClientURI: uriToString(m.ClientURI),
|
||||
PolicyURI: uriToString(m.PolicyURI),
|
||||
TermsOfServiceURI: uriToString(m.TermsOfServiceURI),
|
||||
JWKSURI: uriToString(m.JWKSURI),
|
||||
JWKS: m.JWKS,
|
||||
SectorIdentifierURI: uriToString(m.SectorIdentifierURI),
|
||||
SubjectType: m.SubjectType,
|
||||
IDTokenSignedResponseAlg: m.IDTokenResponseOptions.SigningAlg,
|
||||
IDTokenEncryptedResponseAlg: m.IDTokenResponseOptions.EncryptionAlg,
|
||||
IDTokenEncryptedResponseEnc: m.IDTokenResponseOptions.EncryptionEnc,
|
||||
UserInfoSignedResponseAlg: m.UserInfoResponseOptions.SigningAlg,
|
||||
UserInfoEncryptedResponseAlg: m.UserInfoResponseOptions.EncryptionAlg,
|
||||
UserInfoEncryptedResponseEnc: m.UserInfoResponseOptions.EncryptionEnc,
|
||||
RequestObjectSigningAlg: m.RequestObjectOptions.SigningAlg,
|
||||
RequestObjectEncryptionAlg: m.RequestObjectOptions.EncryptionAlg,
|
||||
RequestObjectEncryptionEnc: m.RequestObjectOptions.EncryptionEnc,
|
||||
TokenEndpointAuthMethod: m.TokenEndpointAuthMethod,
|
||||
TokenEndpointAuthSigningAlg: m.TokenEndpointAuthSigningAlg,
|
||||
DefaultMaxAge: m.DefaultMaxAge,
|
||||
RequireAuthTime: m.RequireAuthTime,
|
||||
DefaultACRValues: m.DefaultACRValues,
|
||||
InitiateLoginURI: uriToString(m.InitiateLoginURI),
|
||||
RequestURIs: urisToStrings(m.RequestURIs),
|
||||
}
|
||||
}
|
||||
|
||||
func uriToString(u *url.URL) string {
|
||||
if u == nil {
|
||||
return ""
|
||||
}
|
||||
return u.String()
|
||||
}
|
||||
|
||||
func urisToStrings(urls []url.URL) []string {
|
||||
if len(urls) == 0 {
|
||||
return nil
|
||||
}
|
||||
sli := make([]string, len(urls))
|
||||
for i, u := range urls {
|
||||
sli[i] = u.String()
|
||||
}
|
||||
return sli
|
||||
}
|
||||
|
||||
func emailsToStrings(addrs []mail.Address) []string {
|
||||
if len(addrs) == 0 {
|
||||
return nil
|
||||
}
|
||||
sli := make([]string, len(addrs))
|
||||
for i, addr := range addrs {
|
||||
sli[i] = addr.String()
|
||||
}
|
||||
return sli
|
||||
}
|
||||
|
||||
// Valid determines if a ClientMetadata conforms with the OIDC specification.
|
||||
//
|
||||
// Valid is called by UnmarshalJSON.
|
||||
//
|
||||
// NOTE(ericchiang): For development purposes Valid does not mandate 'https' for
|
||||
// URLs fields where the OIDC spec requires it. This may change in future releases
|
||||
// of this package. See: https://github.com/coreos/go-oidc/issues/34
|
||||
func (m *ClientMetadata) Valid() error {
|
||||
if len(m.RedirectURIs) == 0 {
|
||||
return errors.New("zero redirect URLs")
|
||||
}
|
||||
|
||||
validURI := func(u *url.URL, fieldName string) error {
|
||||
if u.Host == "" {
|
||||
return fmt.Errorf("no host for uri field %s", fieldName)
|
||||
}
|
||||
if u.Scheme != "http" && u.Scheme != "https" {
|
||||
return fmt.Errorf("uri field %s scheme is not http or https", fieldName)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
uris := []struct {
|
||||
val *url.URL
|
||||
name string
|
||||
}{
|
||||
{m.LogoURI, "logo_uri"},
|
||||
{m.ClientURI, "client_uri"},
|
||||
{m.PolicyURI, "policy_uri"},
|
||||
{m.TermsOfServiceURI, "tos_uri"},
|
||||
{m.JWKSURI, "jwks_uri"},
|
||||
{m.SectorIdentifierURI, "sector_identifier_uri"},
|
||||
{m.InitiateLoginURI, "initiate_login_uri"},
|
||||
}
|
||||
|
||||
for _, uri := range uris {
|
||||
if uri.val == nil {
|
||||
continue
|
||||
}
|
||||
if err := validURI(uri.val, uri.name); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
uriLists := []struct {
|
||||
vals []url.URL
|
||||
name string
|
||||
}{
|
||||
{m.RedirectURIs, "redirect_uris"},
|
||||
{m.RequestURIs, "request_uris"},
|
||||
}
|
||||
for _, list := range uriLists {
|
||||
for _, uri := range list.vals {
|
||||
if err := validURI(&uri, list.name); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
options := []struct {
|
||||
option JWAOptions
|
||||
name string
|
||||
}{
|
||||
{m.IDTokenResponseOptions, "id_token response"},
|
||||
{m.UserInfoResponseOptions, "userinfo response"},
|
||||
{m.RequestObjectOptions, "request_object"},
|
||||
}
|
||||
for _, option := range options {
|
||||
if err := option.option.valid(); err != nil {
|
||||
return fmt.Errorf("invalid JWA values for %s: %v", option.name, err)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type ClientRegistrationResponse struct {
|
||||
ClientID string // Required
|
||||
ClientSecret string
|
||||
RegistrationAccessToken string
|
||||
RegistrationClientURI string
|
||||
// If IsZero is true, unspecified.
|
||||
ClientIDIssuedAt time.Time
|
||||
// Time at which the client_secret will expire.
|
||||
// If IsZero is true, it will not expire.
|
||||
ClientSecretExpiresAt time.Time
|
||||
|
||||
ClientMetadata
|
||||
}
|
||||
|
||||
type encodableClientRegistrationResponse struct {
|
||||
ClientID string `json:"client_id"` // Required
|
||||
ClientSecret string `json:"client_secret,omitempty"`
|
||||
RegistrationAccessToken string `json:"registration_access_token,omitempty"`
|
||||
RegistrationClientURI string `json:"registration_client_uri,omitempty"`
|
||||
ClientIDIssuedAt int64 `json:"client_id_issued_at,omitempty"`
|
||||
// Time at which the client_secret will expire, in seconds since the epoch.
|
||||
// If 0 it will not expire.
|
||||
ClientSecretExpiresAt int64 `json:"client_secret_expires_at"` // Required
|
||||
|
||||
encodableClientMetadata
|
||||
}
|
||||
|
||||
func unixToSec(t time.Time) int64 {
|
||||
if t.IsZero() {
|
||||
return 0
|
||||
}
|
||||
return t.Unix()
|
||||
}
|
||||
|
||||
func (c *ClientRegistrationResponse) MarshalJSON() ([]byte, error) {
|
||||
e := encodableClientRegistrationResponse{
|
||||
ClientID: c.ClientID,
|
||||
ClientSecret: c.ClientSecret,
|
||||
RegistrationAccessToken: c.RegistrationAccessToken,
|
||||
RegistrationClientURI: c.RegistrationClientURI,
|
||||
ClientIDIssuedAt: unixToSec(c.ClientIDIssuedAt),
|
||||
ClientSecretExpiresAt: unixToSec(c.ClientSecretExpiresAt),
|
||||
encodableClientMetadata: c.ClientMetadata.toEncodableStruct(),
|
||||
}
|
||||
return json.Marshal(&e)
|
||||
}
|
||||
|
||||
func secToUnix(sec int64) time.Time {
|
||||
if sec == 0 {
|
||||
return time.Time{}
|
||||
}
|
||||
return time.Unix(sec, 0)
|
||||
}
|
||||
|
||||
func (c *ClientRegistrationResponse) UnmarshalJSON(data []byte) error {
|
||||
var e encodableClientRegistrationResponse
|
||||
if err := json.Unmarshal(data, &e); err != nil {
|
||||
return err
|
||||
}
|
||||
if e.ClientID == "" {
|
||||
return errors.New("no client_id in client registration response")
|
||||
}
|
||||
metadata, err := e.encodableClientMetadata.toStruct()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
*c = ClientRegistrationResponse{
|
||||
ClientID: e.ClientID,
|
||||
ClientSecret: e.ClientSecret,
|
||||
RegistrationAccessToken: e.RegistrationAccessToken,
|
||||
RegistrationClientURI: e.RegistrationClientURI,
|
||||
ClientIDIssuedAt: secToUnix(e.ClientIDIssuedAt),
|
||||
ClientSecretExpiresAt: secToUnix(e.ClientSecretExpiresAt),
|
||||
ClientMetadata: metadata,
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type ClientConfig struct {
|
||||
HTTPClient phttp.Client
|
||||
Credentials ClientCredentials
|
||||
Scope []string
|
||||
RedirectURL string
|
||||
ProviderConfig ProviderConfig
|
||||
KeySet key.PublicKeySet
|
||||
}
|
||||
|
||||
func NewClient(cfg ClientConfig) (*Client, error) {
|
||||
// Allow empty redirect URL in the case where the client
|
||||
// only needs to verify a given token.
|
||||
ru, err := url.Parse(cfg.RedirectURL)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("invalid redirect URL: %v", err)
|
||||
}
|
||||
|
||||
c := Client{
|
||||
credentials: cfg.Credentials,
|
||||
httpClient: cfg.HTTPClient,
|
||||
scope: cfg.Scope,
|
||||
redirectURL: ru.String(),
|
||||
providerConfig: newProviderConfigRepo(cfg.ProviderConfig),
|
||||
keySet: cfg.KeySet,
|
||||
}
|
||||
|
||||
if c.httpClient == nil {
|
||||
c.httpClient = http.DefaultClient
|
||||
}
|
||||
|
||||
if c.scope == nil {
|
||||
c.scope = make([]string, len(DefaultScope))
|
||||
copy(c.scope, DefaultScope)
|
||||
}
|
||||
|
||||
return &c, nil
|
||||
}
|
||||
|
||||
type Client struct {
|
||||
httpClient phttp.Client
|
||||
providerConfig *providerConfigRepo
|
||||
credentials ClientCredentials
|
||||
redirectURL string
|
||||
scope []string
|
||||
keySet key.PublicKeySet
|
||||
providerSyncer *ProviderConfigSyncer
|
||||
|
||||
keySetSyncMutex sync.RWMutex
|
||||
lastKeySetSync time.Time
|
||||
}
|
||||
|
||||
func (c *Client) Healthy() error {
|
||||
now := time.Now().UTC()
|
||||
|
||||
cfg := c.providerConfig.Get()
|
||||
|
||||
if cfg.Empty() {
|
||||
return errors.New("oidc client provider config empty")
|
||||
}
|
||||
|
||||
if !cfg.ExpiresAt.IsZero() && cfg.ExpiresAt.Before(now) {
|
||||
return errors.New("oidc client provider config expired")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *Client) OAuthClient() (*oauth2.Client, error) {
|
||||
cfg := c.providerConfig.Get()
|
||||
authMethod, err := chooseAuthMethod(cfg)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
ocfg := oauth2.Config{
|
||||
Credentials: oauth2.ClientCredentials(c.credentials),
|
||||
RedirectURL: c.redirectURL,
|
||||
AuthURL: cfg.AuthEndpoint.String(),
|
||||
TokenURL: cfg.TokenEndpoint.String(),
|
||||
Scope: c.scope,
|
||||
AuthMethod: authMethod,
|
||||
}
|
||||
|
||||
return oauth2.NewClient(c.httpClient, ocfg)
|
||||
}
|
||||
|
||||
func chooseAuthMethod(cfg ProviderConfig) (string, error) {
|
||||
if len(cfg.TokenEndpointAuthMethodsSupported) == 0 {
|
||||
return oauth2.AuthMethodClientSecretBasic, nil
|
||||
}
|
||||
|
||||
for _, authMethod := range cfg.TokenEndpointAuthMethodsSupported {
|
||||
if _, ok := supportedAuthMethods[authMethod]; ok {
|
||||
return authMethod, nil
|
||||
}
|
||||
}
|
||||
|
||||
return "", errors.New("no supported auth methods")
|
||||
}
|
||||
|
||||
// SyncProviderConfig starts the provider config syncer
|
||||
func (c *Client) SyncProviderConfig(discoveryURL string) chan struct{} {
|
||||
r := NewHTTPProviderConfigGetter(c.httpClient, discoveryURL)
|
||||
s := NewProviderConfigSyncer(r, c.providerConfig)
|
||||
stop := s.Run()
|
||||
s.WaitUntilInitialSync()
|
||||
return stop
|
||||
}
|
||||
|
||||
func (c *Client) maybeSyncKeys() error {
|
||||
tooSoon := func() bool {
|
||||
return time.Now().UTC().Before(c.lastKeySetSync.Add(keySyncWindow))
|
||||
}
|
||||
|
||||
// ignore request to sync keys if a sync operation has been
|
||||
// attempted too recently
|
||||
if tooSoon() {
|
||||
return nil
|
||||
}
|
||||
|
||||
c.keySetSyncMutex.Lock()
|
||||
defer c.keySetSyncMutex.Unlock()
|
||||
|
||||
// check again, as another goroutine may have been holding
|
||||
// the lock while updating the keys
|
||||
if tooSoon() {
|
||||
return nil
|
||||
}
|
||||
|
||||
cfg := c.providerConfig.Get()
|
||||
r := NewRemotePublicKeyRepo(c.httpClient, cfg.KeysEndpoint.String())
|
||||
w := &clientKeyRepo{client: c}
|
||||
_, err := key.Sync(r, w)
|
||||
c.lastKeySetSync = time.Now().UTC()
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
type clientKeyRepo struct {
|
||||
client *Client
|
||||
}
|
||||
|
||||
func (r *clientKeyRepo) Set(ks key.KeySet) error {
|
||||
pks, ok := ks.(*key.PublicKeySet)
|
||||
if !ok {
|
||||
return errors.New("unable to cast to PublicKey")
|
||||
}
|
||||
r.client.keySet = *pks
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *Client) ClientCredsToken(scope []string) (jose.JWT, error) {
|
||||
cfg := c.providerConfig.Get()
|
||||
|
||||
if !cfg.SupportsGrantType(oauth2.GrantTypeClientCreds) {
|
||||
return jose.JWT{}, fmt.Errorf("%v grant type is not supported", oauth2.GrantTypeClientCreds)
|
||||
}
|
||||
|
||||
oac, err := c.OAuthClient()
|
||||
if err != nil {
|
||||
return jose.JWT{}, err
|
||||
}
|
||||
|
||||
t, err := oac.ClientCredsToken(scope)
|
||||
if err != nil {
|
||||
return jose.JWT{}, err
|
||||
}
|
||||
|
||||
jwt, err := jose.ParseJWT(t.IDToken)
|
||||
if err != nil {
|
||||
return jose.JWT{}, err
|
||||
}
|
||||
|
||||
return jwt, c.VerifyJWT(jwt)
|
||||
}
|
||||
|
||||
// ExchangeAuthCode exchanges an OAuth2 auth code for an OIDC JWT ID token.
|
||||
func (c *Client) ExchangeAuthCode(code string) (jose.JWT, error) {
|
||||
oac, err := c.OAuthClient()
|
||||
if err != nil {
|
||||
return jose.JWT{}, err
|
||||
}
|
||||
|
||||
t, err := oac.RequestToken(oauth2.GrantTypeAuthCode, code)
|
||||
if err != nil {
|
||||
return jose.JWT{}, err
|
||||
}
|
||||
|
||||
jwt, err := jose.ParseJWT(t.IDToken)
|
||||
if err != nil {
|
||||
return jose.JWT{}, err
|
||||
}
|
||||
|
||||
return jwt, c.VerifyJWT(jwt)
|
||||
}
|
||||
|
||||
// RefreshToken uses a refresh token to exchange for a new OIDC JWT ID Token.
|
||||
func (c *Client) RefreshToken(refreshToken string) (jose.JWT, error) {
|
||||
oac, err := c.OAuthClient()
|
||||
if err != nil {
|
||||
return jose.JWT{}, err
|
||||
}
|
||||
|
||||
t, err := oac.RequestToken(oauth2.GrantTypeRefreshToken, refreshToken)
|
||||
if err != nil {
|
||||
return jose.JWT{}, err
|
||||
}
|
||||
|
||||
jwt, err := jose.ParseJWT(t.IDToken)
|
||||
if err != nil {
|
||||
return jose.JWT{}, err
|
||||
}
|
||||
|
||||
return jwt, c.VerifyJWT(jwt)
|
||||
}
|
||||
|
||||
func (c *Client) VerifyJWT(jwt jose.JWT) error {
|
||||
var keysFunc func() []key.PublicKey
|
||||
if kID, ok := jwt.KeyID(); ok {
|
||||
keysFunc = c.keysFuncWithID(kID)
|
||||
} else {
|
||||
keysFunc = c.keysFuncAll()
|
||||
}
|
||||
|
||||
v := NewJWTVerifier(
|
||||
c.providerConfig.Get().Issuer.String(),
|
||||
c.credentials.ID,
|
||||
c.maybeSyncKeys, keysFunc)
|
||||
|
||||
return v.Verify(jwt)
|
||||
}
|
||||
|
||||
// keysFuncWithID returns a function that retrieves at most unexpired
|
||||
// public key from the Client that matches the provided ID
|
||||
func (c *Client) keysFuncWithID(kID string) func() []key.PublicKey {
|
||||
return func() []key.PublicKey {
|
||||
c.keySetSyncMutex.RLock()
|
||||
defer c.keySetSyncMutex.RUnlock()
|
||||
|
||||
if c.keySet.ExpiresAt().Before(time.Now()) {
|
||||
return []key.PublicKey{}
|
||||
}
|
||||
|
||||
k := c.keySet.Key(kID)
|
||||
if k == nil {
|
||||
return []key.PublicKey{}
|
||||
}
|
||||
|
||||
return []key.PublicKey{*k}
|
||||
}
|
||||
}
|
||||
|
||||
// keysFuncAll returns a function that retrieves all unexpired public
|
||||
// keys from the Client
|
||||
func (c *Client) keysFuncAll() func() []key.PublicKey {
|
||||
return func() []key.PublicKey {
|
||||
c.keySetSyncMutex.RLock()
|
||||
defer c.keySetSyncMutex.RUnlock()
|
||||
|
||||
if c.keySet.ExpiresAt().Before(time.Now()) {
|
||||
return []key.PublicKey{}
|
||||
}
|
||||
|
||||
return c.keySet.Keys()
|
||||
}
|
||||
}
|
||||
|
||||
type providerConfigRepo struct {
|
||||
mu sync.RWMutex
|
||||
config ProviderConfig // do not access directly, use Get()
|
||||
}
|
||||
|
||||
func newProviderConfigRepo(pc ProviderConfig) *providerConfigRepo {
|
||||
return &providerConfigRepo{sync.RWMutex{}, pc}
|
||||
}
|
||||
|
||||
// returns an error to implement ProviderConfigSetter
|
||||
func (r *providerConfigRepo) Set(cfg ProviderConfig) error {
|
||||
r.mu.Lock()
|
||||
defer r.mu.Unlock()
|
||||
r.config = cfg
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *providerConfigRepo) Get() ProviderConfig {
|
||||
r.mu.RLock()
|
||||
defer r.mu.RUnlock()
|
||||
return r.config
|
||||
}
|
44
vendor/github.com/coreos/go-oidc/oidc/identity.go
generated
vendored
Normal file
44
vendor/github.com/coreos/go-oidc/oidc/identity.go
generated
vendored
Normal file
|
@ -0,0 +1,44 @@
|
|||
package oidc
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"time"
|
||||
|
||||
"github.com/coreos/go-oidc/jose"
|
||||
)
|
||||
|
||||
type Identity struct {
|
||||
ID string
|
||||
Name string
|
||||
Email string
|
||||
ExpiresAt time.Time
|
||||
}
|
||||
|
||||
func IdentityFromClaims(claims jose.Claims) (*Identity, error) {
|
||||
if claims == nil {
|
||||
return nil, errors.New("nil claim set")
|
||||
}
|
||||
|
||||
var ident Identity
|
||||
var err error
|
||||
var ok bool
|
||||
|
||||
if ident.ID, ok, err = claims.StringClaim("sub"); err != nil {
|
||||
return nil, err
|
||||
} else if !ok {
|
||||
return nil, errors.New("missing required claim: sub")
|
||||
}
|
||||
|
||||
if ident.Email, _, err = claims.StringClaim("email"); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
exp, ok, err := claims.TimeClaim("exp")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
} else if ok {
|
||||
ident.ExpiresAt = exp
|
||||
}
|
||||
|
||||
return &ident, nil
|
||||
}
|
3
vendor/github.com/coreos/go-oidc/oidc/interface.go
generated
vendored
Normal file
3
vendor/github.com/coreos/go-oidc/oidc/interface.go
generated
vendored
Normal file
|
@ -0,0 +1,3 @@
|
|||
package oidc
|
||||
|
||||
type LoginFunc func(ident Identity, sessionKey string) (redirectURL string, err error)
|
67
vendor/github.com/coreos/go-oidc/oidc/key.go
generated
vendored
Executable file
67
vendor/github.com/coreos/go-oidc/oidc/key.go
generated
vendored
Executable file
|
@ -0,0 +1,67 @@
|
|||
package oidc
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"net/http"
|
||||
"time"
|
||||
|
||||
phttp "github.com/coreos/go-oidc/http"
|
||||
"github.com/coreos/go-oidc/jose"
|
||||
"github.com/coreos/go-oidc/key"
|
||||
)
|
||||
|
||||
// DefaultPublicKeySetTTL is the default TTL set on the PublicKeySet if no
|
||||
// Cache-Control header is provided by the JWK Set document endpoint.
|
||||
const DefaultPublicKeySetTTL = 24 * time.Hour
|
||||
|
||||
// NewRemotePublicKeyRepo is responsible for fetching the JWK Set document.
|
||||
func NewRemotePublicKeyRepo(hc phttp.Client, ep string) *remotePublicKeyRepo {
|
||||
return &remotePublicKeyRepo{hc: hc, ep: ep}
|
||||
}
|
||||
|
||||
type remotePublicKeyRepo struct {
|
||||
hc phttp.Client
|
||||
ep string
|
||||
}
|
||||
|
||||
// Get returns a PublicKeySet fetched from the JWK Set document endpoint. A TTL
|
||||
// is set on the Key Set to avoid it having to be re-retrieved for every
|
||||
// encryption event. This TTL is typically controlled by the endpoint returning
|
||||
// a Cache-Control header, but defaults to 24 hours if no Cache-Control header
|
||||
// is found.
|
||||
func (r *remotePublicKeyRepo) Get() (key.KeySet, error) {
|
||||
req, err := http.NewRequest("GET", r.ep, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
resp, err := r.hc.Do(req)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
var d struct {
|
||||
Keys []jose.JWK `json:"keys"`
|
||||
}
|
||||
if err := json.NewDecoder(resp.Body).Decode(&d); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if len(d.Keys) == 0 {
|
||||
return nil, errors.New("zero keys in response")
|
||||
}
|
||||
|
||||
ttl, ok, err := phttp.Cacheable(resp.Header)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if !ok {
|
||||
ttl = DefaultPublicKeySetTTL
|
||||
}
|
||||
|
||||
exp := time.Now().UTC().Add(ttl)
|
||||
ks := key.NewPublicKeySet(d.Keys, exp)
|
||||
return ks, nil
|
||||
}
|
688
vendor/github.com/coreos/go-oidc/oidc/provider.go
generated
vendored
Normal file
688
vendor/github.com/coreos/go-oidc/oidc/provider.go
generated
vendored
Normal file
|
@ -0,0 +1,688 @@
|
|||
package oidc
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/coreos/pkg/capnslog"
|
||||
"github.com/coreos/pkg/timeutil"
|
||||
"github.com/jonboulle/clockwork"
|
||||
|
||||
phttp "github.com/coreos/go-oidc/http"
|
||||
"github.com/coreos/go-oidc/oauth2"
|
||||
)
|
||||
|
||||
var (
|
||||
log = capnslog.NewPackageLogger("github.com/coreos/go-oidc", "http")
|
||||
)
|
||||
|
||||
const (
|
||||
// Subject Identifier types defined by the OIDC spec. Specifies if the provider
|
||||
// should provide the same sub claim value to all clients (public) or a unique
|
||||
// value for each client (pairwise).
|
||||
//
|
||||
// See: http://openid.net/specs/openid-connect-core-1_0.html#SubjectIDTypes
|
||||
SubjectTypePublic = "public"
|
||||
SubjectTypePairwise = "pairwise"
|
||||
)
|
||||
|
||||
var (
|
||||
// Default values for omitted provider config fields.
|
||||
//
|
||||
// Use ProviderConfig's Defaults method to fill a provider config with these values.
|
||||
DefaultGrantTypesSupported = []string{oauth2.GrantTypeAuthCode, oauth2.GrantTypeImplicit}
|
||||
DefaultResponseModesSupported = []string{"query", "fragment"}
|
||||
DefaultTokenEndpointAuthMethodsSupported = []string{oauth2.AuthMethodClientSecretBasic}
|
||||
DefaultClaimTypesSupported = []string{"normal"}
|
||||
)
|
||||
|
||||
const (
|
||||
MaximumProviderConfigSyncInterval = 24 * time.Hour
|
||||
MinimumProviderConfigSyncInterval = time.Minute
|
||||
|
||||
discoveryConfigPath = "/.well-known/openid-configuration"
|
||||
)
|
||||
|
||||
// internally configurable for tests
|
||||
var minimumProviderConfigSyncInterval = MinimumProviderConfigSyncInterval
|
||||
|
||||
var (
|
||||
// Ensure ProviderConfig satisfies these interfaces.
|
||||
_ json.Marshaler = &ProviderConfig{}
|
||||
_ json.Unmarshaler = &ProviderConfig{}
|
||||
)
|
||||
|
||||
// ProviderConfig represents the OpenID Provider Metadata specifying what
|
||||
// configurations a provider supports.
|
||||
//
|
||||
// See: http://openid.net/specs/openid-connect-discovery-1_0.html#ProviderMetadata
|
||||
type ProviderConfig struct {
|
||||
Issuer *url.URL // Required
|
||||
AuthEndpoint *url.URL // Required
|
||||
TokenEndpoint *url.URL // Required if grant types other than "implicit" are supported
|
||||
UserInfoEndpoint *url.URL
|
||||
KeysEndpoint *url.URL // Required
|
||||
RegistrationEndpoint *url.URL
|
||||
|
||||
// Servers MAY choose not to advertise some supported scope values even when this
|
||||
// parameter is used, although those defined in OpenID Core SHOULD be listed, if supported.
|
||||
ScopesSupported []string
|
||||
// OAuth2.0 response types supported.
|
||||
ResponseTypesSupported []string // Required
|
||||
// OAuth2.0 response modes supported.
|
||||
//
|
||||
// If omitted, defaults to DefaultResponseModesSupported.
|
||||
ResponseModesSupported []string
|
||||
// OAuth2.0 grant types supported.
|
||||
//
|
||||
// If omitted, defaults to DefaultGrantTypesSupported.
|
||||
GrantTypesSupported []string
|
||||
ACRValuesSupported []string
|
||||
// SubjectTypesSupported specifies strategies for providing values for the sub claim.
|
||||
SubjectTypesSupported []string // Required
|
||||
|
||||
// JWA signing and encryption algorith values supported for ID tokens.
|
||||
IDTokenSigningAlgValues []string // Required
|
||||
IDTokenEncryptionAlgValues []string
|
||||
IDTokenEncryptionEncValues []string
|
||||
|
||||
// JWA signing and encryption algorith values supported for user info responses.
|
||||
UserInfoSigningAlgValues []string
|
||||
UserInfoEncryptionAlgValues []string
|
||||
UserInfoEncryptionEncValues []string
|
||||
|
||||
// JWA signing and encryption algorith values supported for request objects.
|
||||
ReqObjSigningAlgValues []string
|
||||
ReqObjEncryptionAlgValues []string
|
||||
ReqObjEncryptionEncValues []string
|
||||
|
||||
TokenEndpointAuthMethodsSupported []string
|
||||
TokenEndpointAuthSigningAlgValuesSupported []string
|
||||
DisplayValuesSupported []string
|
||||
ClaimTypesSupported []string
|
||||
ClaimsSupported []string
|
||||
ServiceDocs *url.URL
|
||||
ClaimsLocalsSupported []string
|
||||
UILocalsSupported []string
|
||||
ClaimsParameterSupported bool
|
||||
RequestParameterSupported bool
|
||||
RequestURIParamaterSupported bool
|
||||
RequireRequestURIRegistration bool
|
||||
|
||||
Policy *url.URL
|
||||
TermsOfService *url.URL
|
||||
|
||||
// Not part of the OpenID Provider Metadata
|
||||
ExpiresAt time.Time
|
||||
}
|
||||
|
||||
// Defaults returns a shallow copy of ProviderConfig with default
|
||||
// values replacing omitted fields.
|
||||
//
|
||||
// var cfg oidc.ProviderConfig
|
||||
// // Fill provider config with default values for omitted fields.
|
||||
// cfg = cfg.Defaults()
|
||||
//
|
||||
func (p ProviderConfig) Defaults() ProviderConfig {
|
||||
setDefault := func(val *[]string, defaultVal []string) {
|
||||
if len(*val) == 0 {
|
||||
*val = defaultVal
|
||||
}
|
||||
}
|
||||
setDefault(&p.GrantTypesSupported, DefaultGrantTypesSupported)
|
||||
setDefault(&p.ResponseModesSupported, DefaultResponseModesSupported)
|
||||
setDefault(&p.TokenEndpointAuthMethodsSupported, DefaultTokenEndpointAuthMethodsSupported)
|
||||
setDefault(&p.ClaimTypesSupported, DefaultClaimTypesSupported)
|
||||
return p
|
||||
}
|
||||
|
||||
func (p *ProviderConfig) MarshalJSON() ([]byte, error) {
|
||||
e := p.toEncodableStruct()
|
||||
return json.Marshal(&e)
|
||||
}
|
||||
|
||||
func (p *ProviderConfig) UnmarshalJSON(data []byte) error {
|
||||
var e encodableProviderConfig
|
||||
if err := json.Unmarshal(data, &e); err != nil {
|
||||
return err
|
||||
}
|
||||
conf, err := e.toStruct()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err := conf.Valid(); err != nil {
|
||||
return err
|
||||
}
|
||||
*p = conf
|
||||
return nil
|
||||
}
|
||||
|
||||
type encodableProviderConfig struct {
|
||||
Issuer string `json:"issuer"`
|
||||
AuthEndpoint string `json:"authorization_endpoint"`
|
||||
TokenEndpoint string `json:"token_endpoint"`
|
||||
UserInfoEndpoint string `json:"userinfo_endpoint,omitempty"`
|
||||
KeysEndpoint string `json:"jwks_uri"`
|
||||
RegistrationEndpoint string `json:"registration_endpoint,omitempty"`
|
||||
|
||||
// Use 'omitempty' for all slices as per OIDC spec:
|
||||
// "Claims that return multiple values are represented as JSON arrays.
|
||||
// Claims with zero elements MUST be omitted from the response."
|
||||
// http://openid.net/specs/openid-connect-discovery-1_0.html#ProviderConfigurationResponse
|
||||
|
||||
ScopesSupported []string `json:"scopes_supported,omitempty"`
|
||||
ResponseTypesSupported []string `json:"response_types_supported,omitempty"`
|
||||
ResponseModesSupported []string `json:"response_modes_supported,omitempty"`
|
||||
GrantTypesSupported []string `json:"grant_types_supported,omitempty"`
|
||||
ACRValuesSupported []string `json:"acr_values_supported,omitempty"`
|
||||
SubjectTypesSupported []string `json:"subject_types_supported,omitempty"`
|
||||
|
||||
IDTokenSigningAlgValues []string `json:"id_token_signing_alg_values_supported,omitempty"`
|
||||
IDTokenEncryptionAlgValues []string `json:"id_token_encryption_alg_values_supported,omitempty"`
|
||||
IDTokenEncryptionEncValues []string `json:"id_token_encryption_enc_values_supported,omitempty"`
|
||||
UserInfoSigningAlgValues []string `json:"userinfo_signing_alg_values_supported,omitempty"`
|
||||
UserInfoEncryptionAlgValues []string `json:"userinfo_encryption_alg_values_supported,omitempty"`
|
||||
UserInfoEncryptionEncValues []string `json:"userinfo_encryption_enc_values_supported,omitempty"`
|
||||
ReqObjSigningAlgValues []string `json:"request_object_signing_alg_values_supported,omitempty"`
|
||||
ReqObjEncryptionAlgValues []string `json:"request_object_encryption_alg_values_supported,omitempty"`
|
||||
ReqObjEncryptionEncValues []string `json:"request_object_encryption_enc_values_supported,omitempty"`
|
||||
|
||||
TokenEndpointAuthMethodsSupported []string `json:"token_endpoint_auth_methods_supported,omitempty"`
|
||||
TokenEndpointAuthSigningAlgValuesSupported []string `json:"token_endpoint_auth_signing_alg_values_supported,omitempty"`
|
||||
|
||||
DisplayValuesSupported []string `json:"display_values_supported,omitempty"`
|
||||
ClaimTypesSupported []string `json:"claim_types_supported,omitempty"`
|
||||
ClaimsSupported []string `json:"claims_supported,omitempty"`
|
||||
ServiceDocs string `json:"service_documentation,omitempty"`
|
||||
ClaimsLocalsSupported []string `json:"claims_locales_supported,omitempty"`
|
||||
UILocalsSupported []string `json:"ui_locales_supported,omitempty"`
|
||||
ClaimsParameterSupported bool `json:"claims_parameter_supported,omitempty"`
|
||||
RequestParameterSupported bool `json:"request_parameter_supported,omitempty"`
|
||||
RequestURIParamaterSupported bool `json:"request_uri_parameter_supported,omitempty"`
|
||||
RequireRequestURIRegistration bool `json:"require_request_uri_registration,omitempty"`
|
||||
|
||||
Policy string `json:"op_policy_uri,omitempty"`
|
||||
TermsOfService string `json:"op_tos_uri,omitempty"`
|
||||
}
|
||||
|
||||
func (cfg ProviderConfig) toEncodableStruct() encodableProviderConfig {
|
||||
return encodableProviderConfig{
|
||||
Issuer: uriToString(cfg.Issuer),
|
||||
AuthEndpoint: uriToString(cfg.AuthEndpoint),
|
||||
TokenEndpoint: uriToString(cfg.TokenEndpoint),
|
||||
UserInfoEndpoint: uriToString(cfg.UserInfoEndpoint),
|
||||
KeysEndpoint: uriToString(cfg.KeysEndpoint),
|
||||
RegistrationEndpoint: uriToString(cfg.RegistrationEndpoint),
|
||||
ScopesSupported: cfg.ScopesSupported,
|
||||
ResponseTypesSupported: cfg.ResponseTypesSupported,
|
||||
ResponseModesSupported: cfg.ResponseModesSupported,
|
||||
GrantTypesSupported: cfg.GrantTypesSupported,
|
||||
ACRValuesSupported: cfg.ACRValuesSupported,
|
||||
SubjectTypesSupported: cfg.SubjectTypesSupported,
|
||||
IDTokenSigningAlgValues: cfg.IDTokenSigningAlgValues,
|
||||
IDTokenEncryptionAlgValues: cfg.IDTokenEncryptionAlgValues,
|
||||
IDTokenEncryptionEncValues: cfg.IDTokenEncryptionEncValues,
|
||||
UserInfoSigningAlgValues: cfg.UserInfoSigningAlgValues,
|
||||
UserInfoEncryptionAlgValues: cfg.UserInfoEncryptionAlgValues,
|
||||
UserInfoEncryptionEncValues: cfg.UserInfoEncryptionEncValues,
|
||||
ReqObjSigningAlgValues: cfg.ReqObjSigningAlgValues,
|
||||
ReqObjEncryptionAlgValues: cfg.ReqObjEncryptionAlgValues,
|
||||
ReqObjEncryptionEncValues: cfg.ReqObjEncryptionEncValues,
|
||||
TokenEndpointAuthMethodsSupported: cfg.TokenEndpointAuthMethodsSupported,
|
||||
TokenEndpointAuthSigningAlgValuesSupported: cfg.TokenEndpointAuthSigningAlgValuesSupported,
|
||||
DisplayValuesSupported: cfg.DisplayValuesSupported,
|
||||
ClaimTypesSupported: cfg.ClaimTypesSupported,
|
||||
ClaimsSupported: cfg.ClaimsSupported,
|
||||
ServiceDocs: uriToString(cfg.ServiceDocs),
|
||||
ClaimsLocalsSupported: cfg.ClaimsLocalsSupported,
|
||||
UILocalsSupported: cfg.UILocalsSupported,
|
||||
ClaimsParameterSupported: cfg.ClaimsParameterSupported,
|
||||
RequestParameterSupported: cfg.RequestParameterSupported,
|
||||
RequestURIParamaterSupported: cfg.RequestURIParamaterSupported,
|
||||
RequireRequestURIRegistration: cfg.RequireRequestURIRegistration,
|
||||
Policy: uriToString(cfg.Policy),
|
||||
TermsOfService: uriToString(cfg.TermsOfService),
|
||||
}
|
||||
}
|
||||
|
||||
func (e encodableProviderConfig) toStruct() (ProviderConfig, error) {
|
||||
p := stickyErrParser{}
|
||||
conf := ProviderConfig{
|
||||
Issuer: p.parseURI(e.Issuer, "issuer"),
|
||||
AuthEndpoint: p.parseURI(e.AuthEndpoint, "authorization_endpoint"),
|
||||
TokenEndpoint: p.parseURI(e.TokenEndpoint, "token_endpoint"),
|
||||
UserInfoEndpoint: p.parseURI(e.UserInfoEndpoint, "userinfo_endpoint"),
|
||||
KeysEndpoint: p.parseURI(e.KeysEndpoint, "jwks_uri"),
|
||||
RegistrationEndpoint: p.parseURI(e.RegistrationEndpoint, "registration_endpoint"),
|
||||
ScopesSupported: e.ScopesSupported,
|
||||
ResponseTypesSupported: e.ResponseTypesSupported,
|
||||
ResponseModesSupported: e.ResponseModesSupported,
|
||||
GrantTypesSupported: e.GrantTypesSupported,
|
||||
ACRValuesSupported: e.ACRValuesSupported,
|
||||
SubjectTypesSupported: e.SubjectTypesSupported,
|
||||
IDTokenSigningAlgValues: e.IDTokenSigningAlgValues,
|
||||
IDTokenEncryptionAlgValues: e.IDTokenEncryptionAlgValues,
|
||||
IDTokenEncryptionEncValues: e.IDTokenEncryptionEncValues,
|
||||
UserInfoSigningAlgValues: e.UserInfoSigningAlgValues,
|
||||
UserInfoEncryptionAlgValues: e.UserInfoEncryptionAlgValues,
|
||||
UserInfoEncryptionEncValues: e.UserInfoEncryptionEncValues,
|
||||
ReqObjSigningAlgValues: e.ReqObjSigningAlgValues,
|
||||
ReqObjEncryptionAlgValues: e.ReqObjEncryptionAlgValues,
|
||||
ReqObjEncryptionEncValues: e.ReqObjEncryptionEncValues,
|
||||
TokenEndpointAuthMethodsSupported: e.TokenEndpointAuthMethodsSupported,
|
||||
TokenEndpointAuthSigningAlgValuesSupported: e.TokenEndpointAuthSigningAlgValuesSupported,
|
||||
DisplayValuesSupported: e.DisplayValuesSupported,
|
||||
ClaimTypesSupported: e.ClaimTypesSupported,
|
||||
ClaimsSupported: e.ClaimsSupported,
|
||||
ServiceDocs: p.parseURI(e.ServiceDocs, "service_documentation"),
|
||||
ClaimsLocalsSupported: e.ClaimsLocalsSupported,
|
||||
UILocalsSupported: e.UILocalsSupported,
|
||||
ClaimsParameterSupported: e.ClaimsParameterSupported,
|
||||
RequestParameterSupported: e.RequestParameterSupported,
|
||||
RequestURIParamaterSupported: e.RequestURIParamaterSupported,
|
||||
RequireRequestURIRegistration: e.RequireRequestURIRegistration,
|
||||
Policy: p.parseURI(e.Policy, "op_policy-uri"),
|
||||
TermsOfService: p.parseURI(e.TermsOfService, "op_tos_uri"),
|
||||
}
|
||||
if p.firstErr != nil {
|
||||
return ProviderConfig{}, p.firstErr
|
||||
}
|
||||
return conf, nil
|
||||
}
|
||||
|
||||
// Empty returns if a ProviderConfig holds no information.
|
||||
//
|
||||
// This case generally indicates a ProviderConfigGetter has experienced an error
|
||||
// and has nothing to report.
|
||||
func (p ProviderConfig) Empty() bool {
|
||||
return p.Issuer == nil
|
||||
}
|
||||
|
||||
func contains(sli []string, ele string) bool {
|
||||
for _, s := range sli {
|
||||
if s == ele {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// Valid determines if a ProviderConfig conforms with the OIDC specification.
|
||||
// If Valid returns successfully it guarantees required field are non-nil and
|
||||
// URLs are well formed.
|
||||
//
|
||||
// Valid is called by UnmarshalJSON.
|
||||
//
|
||||
// NOTE(ericchiang): For development purposes Valid does not mandate 'https' for
|
||||
// URLs fields where the OIDC spec requires it. This may change in future releases
|
||||
// of this package. See: https://github.com/coreos/go-oidc/issues/34
|
||||
func (p ProviderConfig) Valid() error {
|
||||
grantTypes := p.GrantTypesSupported
|
||||
if len(grantTypes) == 0 {
|
||||
grantTypes = DefaultGrantTypesSupported
|
||||
}
|
||||
implicitOnly := true
|
||||
for _, grantType := range grantTypes {
|
||||
if grantType != oauth2.GrantTypeImplicit {
|
||||
implicitOnly = false
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if len(p.SubjectTypesSupported) == 0 {
|
||||
return errors.New("missing required field subject_types_supported")
|
||||
}
|
||||
if len(p.IDTokenSigningAlgValues) == 0 {
|
||||
return errors.New("missing required field id_token_signing_alg_values_supported")
|
||||
}
|
||||
|
||||
if len(p.ScopesSupported) != 0 && !contains(p.ScopesSupported, "openid") {
|
||||
return errors.New("scoped_supported must be unspecified or include 'openid'")
|
||||
}
|
||||
|
||||
if !contains(p.IDTokenSigningAlgValues, "RS256") {
|
||||
return errors.New("id_token_signing_alg_values_supported must include 'RS256'")
|
||||
}
|
||||
if contains(p.TokenEndpointAuthMethodsSupported, "none") {
|
||||
return errors.New("token_endpoint_auth_signing_alg_values_supported cannot include 'none'")
|
||||
}
|
||||
|
||||
uris := []struct {
|
||||
val *url.URL
|
||||
name string
|
||||
required bool
|
||||
}{
|
||||
{p.Issuer, "issuer", true},
|
||||
{p.AuthEndpoint, "authorization_endpoint", true},
|
||||
{p.TokenEndpoint, "token_endpoint", !implicitOnly},
|
||||
{p.UserInfoEndpoint, "userinfo_endpoint", false},
|
||||
{p.KeysEndpoint, "jwks_uri", true},
|
||||
{p.RegistrationEndpoint, "registration_endpoint", false},
|
||||
{p.ServiceDocs, "service_documentation", false},
|
||||
{p.Policy, "op_policy_uri", false},
|
||||
{p.TermsOfService, "op_tos_uri", false},
|
||||
}
|
||||
|
||||
for _, uri := range uris {
|
||||
if uri.val == nil {
|
||||
if !uri.required {
|
||||
continue
|
||||
}
|
||||
return fmt.Errorf("empty value for required uri field %s", uri.name)
|
||||
}
|
||||
if uri.val.Host == "" {
|
||||
return fmt.Errorf("no host for uri field %s", uri.name)
|
||||
}
|
||||
if uri.val.Scheme != "http" && uri.val.Scheme != "https" {
|
||||
return fmt.Errorf("uri field %s schemeis not http or https", uri.name)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Supports determines if provider supports a client given their respective metadata.
|
||||
func (p ProviderConfig) Supports(c ClientMetadata) error {
|
||||
if err := p.Valid(); err != nil {
|
||||
return fmt.Errorf("invalid provider config: %v", err)
|
||||
}
|
||||
if err := c.Valid(); err != nil {
|
||||
return fmt.Errorf("invalid client config: %v", err)
|
||||
}
|
||||
|
||||
// Fill default values for omitted fields
|
||||
c = c.Defaults()
|
||||
p = p.Defaults()
|
||||
|
||||
// Do the supported values list the requested one?
|
||||
supports := []struct {
|
||||
supported []string
|
||||
requested string
|
||||
name string
|
||||
}{
|
||||
{p.IDTokenSigningAlgValues, c.IDTokenResponseOptions.SigningAlg, "id_token_signed_response_alg"},
|
||||
{p.IDTokenEncryptionAlgValues, c.IDTokenResponseOptions.EncryptionAlg, "id_token_encryption_response_alg"},
|
||||
{p.IDTokenEncryptionEncValues, c.IDTokenResponseOptions.EncryptionEnc, "id_token_encryption_response_enc"},
|
||||
{p.UserInfoSigningAlgValues, c.UserInfoResponseOptions.SigningAlg, "userinfo_signed_response_alg"},
|
||||
{p.UserInfoEncryptionAlgValues, c.UserInfoResponseOptions.EncryptionAlg, "userinfo_encryption_response_alg"},
|
||||
{p.UserInfoEncryptionEncValues, c.UserInfoResponseOptions.EncryptionEnc, "userinfo_encryption_response_enc"},
|
||||
{p.ReqObjSigningAlgValues, c.RequestObjectOptions.SigningAlg, "request_object_signing_alg"},
|
||||
{p.ReqObjEncryptionAlgValues, c.RequestObjectOptions.EncryptionAlg, "request_object_encryption_alg"},
|
||||
{p.ReqObjEncryptionEncValues, c.RequestObjectOptions.EncryptionEnc, "request_object_encryption_enc"},
|
||||
}
|
||||
for _, field := range supports {
|
||||
if field.requested == "" {
|
||||
continue
|
||||
}
|
||||
if !contains(field.supported, field.requested) {
|
||||
return fmt.Errorf("provider does not support requested value for field %s", field.name)
|
||||
}
|
||||
}
|
||||
|
||||
stringsEqual := func(s1, s2 string) bool { return s1 == s2 }
|
||||
|
||||
// For lists, are the list of requested values a subset of the supported ones?
|
||||
supportsAll := []struct {
|
||||
supported []string
|
||||
requested []string
|
||||
name string
|
||||
// OAuth2.0 response_type can be space separated lists where order doesn't matter.
|
||||
// For example "id_token token" is the same as "token id_token"
|
||||
// Support a custom compare method.
|
||||
comp func(s1, s2 string) bool
|
||||
}{
|
||||
{p.GrantTypesSupported, c.GrantTypes, "grant_types", stringsEqual},
|
||||
{p.ResponseTypesSupported, c.ResponseTypes, "response_type", oauth2.ResponseTypesEqual},
|
||||
}
|
||||
for _, field := range supportsAll {
|
||||
requestLoop:
|
||||
for _, req := range field.requested {
|
||||
for _, sup := range field.supported {
|
||||
if field.comp(req, sup) {
|
||||
continue requestLoop
|
||||
}
|
||||
}
|
||||
return fmt.Errorf("provider does not support requested value for field %s", field.name)
|
||||
}
|
||||
}
|
||||
|
||||
// TODO(ericchiang): Are there more checks we feel comfortable with begin strict about?
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p ProviderConfig) SupportsGrantType(grantType string) bool {
|
||||
var supported []string
|
||||
if len(p.GrantTypesSupported) == 0 {
|
||||
supported = DefaultGrantTypesSupported
|
||||
} else {
|
||||
supported = p.GrantTypesSupported
|
||||
}
|
||||
|
||||
for _, t := range supported {
|
||||
if t == grantType {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
type ProviderConfigGetter interface {
|
||||
Get() (ProviderConfig, error)
|
||||
}
|
||||
|
||||
type ProviderConfigSetter interface {
|
||||
Set(ProviderConfig) error
|
||||
}
|
||||
|
||||
type ProviderConfigSyncer struct {
|
||||
from ProviderConfigGetter
|
||||
to ProviderConfigSetter
|
||||
clock clockwork.Clock
|
||||
|
||||
initialSyncDone bool
|
||||
initialSyncWait sync.WaitGroup
|
||||
}
|
||||
|
||||
func NewProviderConfigSyncer(from ProviderConfigGetter, to ProviderConfigSetter) *ProviderConfigSyncer {
|
||||
return &ProviderConfigSyncer{
|
||||
from: from,
|
||||
to: to,
|
||||
clock: clockwork.NewRealClock(),
|
||||
}
|
||||
}
|
||||
|
||||
func (s *ProviderConfigSyncer) Run() chan struct{} {
|
||||
stop := make(chan struct{})
|
||||
|
||||
var next pcsStepper
|
||||
next = &pcsStepNext{aft: time.Duration(0)}
|
||||
|
||||
s.initialSyncWait.Add(1)
|
||||
go func() {
|
||||
for {
|
||||
select {
|
||||
case <-s.clock.After(next.after()):
|
||||
next = next.step(s.sync)
|
||||
case <-stop:
|
||||
return
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
return stop
|
||||
}
|
||||
|
||||
func (s *ProviderConfigSyncer) WaitUntilInitialSync() {
|
||||
s.initialSyncWait.Wait()
|
||||
}
|
||||
|
||||
func (s *ProviderConfigSyncer) sync() (time.Duration, error) {
|
||||
cfg, err := s.from.Get()
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
if err = s.to.Set(cfg); err != nil {
|
||||
return 0, fmt.Errorf("error setting provider config: %v", err)
|
||||
}
|
||||
|
||||
if !s.initialSyncDone {
|
||||
s.initialSyncWait.Done()
|
||||
s.initialSyncDone = true
|
||||
}
|
||||
|
||||
log.Infof("Updating provider config: config=%#v", cfg)
|
||||
|
||||
return nextSyncAfter(cfg.ExpiresAt, s.clock), nil
|
||||
}
|
||||
|
||||
type pcsStepFunc func() (time.Duration, error)
|
||||
|
||||
type pcsStepper interface {
|
||||
after() time.Duration
|
||||
step(pcsStepFunc) pcsStepper
|
||||
}
|
||||
|
||||
type pcsStepNext struct {
|
||||
aft time.Duration
|
||||
}
|
||||
|
||||
func (n *pcsStepNext) after() time.Duration {
|
||||
return n.aft
|
||||
}
|
||||
|
||||
func (n *pcsStepNext) step(fn pcsStepFunc) (next pcsStepper) {
|
||||
ttl, err := fn()
|
||||
if err == nil {
|
||||
next = &pcsStepNext{aft: ttl}
|
||||
log.Debugf("Synced provider config, next attempt in %v", next.after())
|
||||
} else {
|
||||
next = &pcsStepRetry{aft: time.Second}
|
||||
log.Errorf("Provider config sync failed, retrying in %v: %v", next.after(), err)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
type pcsStepRetry struct {
|
||||
aft time.Duration
|
||||
}
|
||||
|
||||
func (r *pcsStepRetry) after() time.Duration {
|
||||
return r.aft
|
||||
}
|
||||
|
||||
func (r *pcsStepRetry) step(fn pcsStepFunc) (next pcsStepper) {
|
||||
ttl, err := fn()
|
||||
if err == nil {
|
||||
next = &pcsStepNext{aft: ttl}
|
||||
log.Infof("Provider config sync no longer failing")
|
||||
} else {
|
||||
next = &pcsStepRetry{aft: timeutil.ExpBackoff(r.aft, time.Minute)}
|
||||
log.Errorf("Provider config sync still failing, retrying in %v: %v", next.after(), err)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func nextSyncAfter(exp time.Time, clock clockwork.Clock) time.Duration {
|
||||
if exp.IsZero() {
|
||||
return MaximumProviderConfigSyncInterval
|
||||
}
|
||||
|
||||
t := exp.Sub(clock.Now()) / 2
|
||||
if t > MaximumProviderConfigSyncInterval {
|
||||
t = MaximumProviderConfigSyncInterval
|
||||
} else if t < minimumProviderConfigSyncInterval {
|
||||
t = minimumProviderConfigSyncInterval
|
||||
}
|
||||
|
||||
return t
|
||||
}
|
||||
|
||||
type httpProviderConfigGetter struct {
|
||||
hc phttp.Client
|
||||
issuerURL string
|
||||
clock clockwork.Clock
|
||||
}
|
||||
|
||||
func NewHTTPProviderConfigGetter(hc phttp.Client, issuerURL string) *httpProviderConfigGetter {
|
||||
return &httpProviderConfigGetter{
|
||||
hc: hc,
|
||||
issuerURL: issuerURL,
|
||||
clock: clockwork.NewRealClock(),
|
||||
}
|
||||
}
|
||||
|
||||
func (r *httpProviderConfigGetter) Get() (cfg ProviderConfig, err error) {
|
||||
// If the Issuer value contains a path component, any terminating / MUST be removed before
|
||||
// appending /.well-known/openid-configuration.
|
||||
// https://openid.net/specs/openid-connect-discovery-1_0.html#ProviderConfigurationRequest
|
||||
discoveryURL := strings.TrimSuffix(r.issuerURL, "/") + discoveryConfigPath
|
||||
req, err := http.NewRequest("GET", discoveryURL, nil)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
resp, err := r.hc.Do(req)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
if err = json.NewDecoder(resp.Body).Decode(&cfg); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
var ttl time.Duration
|
||||
var ok bool
|
||||
ttl, ok, err = phttp.Cacheable(resp.Header)
|
||||
if err != nil {
|
||||
return
|
||||
} else if ok {
|
||||
cfg.ExpiresAt = r.clock.Now().UTC().Add(ttl)
|
||||
}
|
||||
|
||||
// The issuer value returned MUST be identical to the Issuer URL that was directly used to retrieve the configuration information.
|
||||
// http://openid.net/specs/openid-connect-discovery-1_0.html#ProviderConfigurationValidation
|
||||
if !urlEqual(cfg.Issuer.String(), r.issuerURL) {
|
||||
err = fmt.Errorf(`"issuer" in config (%v) does not match provided issuer URL (%v)`, cfg.Issuer, r.issuerURL)
|
||||
return
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func FetchProviderConfig(hc phttp.Client, issuerURL string) (ProviderConfig, error) {
|
||||
if hc == nil {
|
||||
hc = http.DefaultClient
|
||||
}
|
||||
|
||||
g := NewHTTPProviderConfigGetter(hc, issuerURL)
|
||||
return g.Get()
|
||||
}
|
||||
|
||||
func WaitForProviderConfig(hc phttp.Client, issuerURL string) (pcfg ProviderConfig) {
|
||||
return waitForProviderConfig(hc, issuerURL, clockwork.NewRealClock())
|
||||
}
|
||||
|
||||
func waitForProviderConfig(hc phttp.Client, issuerURL string, clock clockwork.Clock) (pcfg ProviderConfig) {
|
||||
var sleep time.Duration
|
||||
var err error
|
||||
for {
|
||||
pcfg, err = FetchProviderConfig(hc, issuerURL)
|
||||
if err == nil {
|
||||
break
|
||||
}
|
||||
|
||||
sleep = timeutil.ExpBackoff(sleep, time.Minute)
|
||||
fmt.Printf("Failed fetching provider config, trying again in %v: %v\n", sleep, err)
|
||||
time.Sleep(sleep)
|
||||
}
|
||||
|
||||
return
|
||||
}
|
88
vendor/github.com/coreos/go-oidc/oidc/transport.go
generated
vendored
Normal file
88
vendor/github.com/coreos/go-oidc/oidc/transport.go
generated
vendored
Normal file
|
@ -0,0 +1,88 @@
|
|||
package oidc
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net/http"
|
||||
"sync"
|
||||
|
||||
phttp "github.com/coreos/go-oidc/http"
|
||||
"github.com/coreos/go-oidc/jose"
|
||||
)
|
||||
|
||||
type TokenRefresher interface {
|
||||
// Verify checks if the provided token is currently valid or not.
|
||||
Verify(jose.JWT) error
|
||||
|
||||
// Refresh attempts to authenticate and retrieve a new token.
|
||||
Refresh() (jose.JWT, error)
|
||||
}
|
||||
|
||||
type ClientCredsTokenRefresher struct {
|
||||
Issuer string
|
||||
OIDCClient *Client
|
||||
}
|
||||
|
||||
func (c *ClientCredsTokenRefresher) Verify(jwt jose.JWT) (err error) {
|
||||
_, err = VerifyClientClaims(jwt, c.Issuer)
|
||||
return
|
||||
}
|
||||
|
||||
func (c *ClientCredsTokenRefresher) Refresh() (jwt jose.JWT, err error) {
|
||||
if err = c.OIDCClient.Healthy(); err != nil {
|
||||
err = fmt.Errorf("unable to authenticate, unhealthy OIDC client: %v", err)
|
||||
return
|
||||
}
|
||||
|
||||
jwt, err = c.OIDCClient.ClientCredsToken([]string{"openid"})
|
||||
if err != nil {
|
||||
err = fmt.Errorf("unable to verify auth code with issuer: %v", err)
|
||||
return
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
type AuthenticatedTransport struct {
|
||||
TokenRefresher
|
||||
http.RoundTripper
|
||||
|
||||
mu sync.Mutex
|
||||
jwt jose.JWT
|
||||
}
|
||||
|
||||
func (t *AuthenticatedTransport) verifiedJWT() (jose.JWT, error) {
|
||||
t.mu.Lock()
|
||||
defer t.mu.Unlock()
|
||||
|
||||
if t.TokenRefresher.Verify(t.jwt) == nil {
|
||||
return t.jwt, nil
|
||||
}
|
||||
|
||||
jwt, err := t.TokenRefresher.Refresh()
|
||||
if err != nil {
|
||||
return jose.JWT{}, fmt.Errorf("unable to acquire valid JWT: %v", err)
|
||||
}
|
||||
|
||||
t.jwt = jwt
|
||||
return t.jwt, nil
|
||||
}
|
||||
|
||||
// SetJWT sets the JWT held by the Transport.
|
||||
// This is useful for cases in which you want to set an initial JWT.
|
||||
func (t *AuthenticatedTransport) SetJWT(jwt jose.JWT) {
|
||||
t.mu.Lock()
|
||||
defer t.mu.Unlock()
|
||||
|
||||
t.jwt = jwt
|
||||
}
|
||||
|
||||
func (t *AuthenticatedTransport) RoundTrip(r *http.Request) (*http.Response, error) {
|
||||
jwt, err := t.verifiedJWT()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
req := phttp.CopyRequest(r)
|
||||
req.Header.Set("Authorization", fmt.Sprintf("Bearer %s", jwt.Encode()))
|
||||
return t.RoundTripper.RoundTrip(req)
|
||||
}
|
109
vendor/github.com/coreos/go-oidc/oidc/util.go
generated
vendored
Normal file
109
vendor/github.com/coreos/go-oidc/oidc/util.go
generated
vendored
Normal file
|
@ -0,0 +1,109 @@
|
|||
package oidc
|
||||
|
||||
import (
|
||||
"crypto/rand"
|
||||
"encoding/base64"
|
||||
"errors"
|
||||
"fmt"
|
||||
"net"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/coreos/go-oidc/jose"
|
||||
)
|
||||
|
||||
// RequestTokenExtractor funcs extract a raw encoded token from a request.
|
||||
type RequestTokenExtractor func(r *http.Request) (string, error)
|
||||
|
||||
// ExtractBearerToken is a RequestTokenExtractor which extracts a bearer token from a request's
|
||||
// Authorization header.
|
||||
func ExtractBearerToken(r *http.Request) (string, error) {
|
||||
ah := r.Header.Get("Authorization")
|
||||
if ah == "" {
|
||||
return "", errors.New("missing Authorization header")
|
||||
}
|
||||
|
||||
if len(ah) <= 6 || strings.ToUpper(ah[0:6]) != "BEARER" {
|
||||
return "", errors.New("should be a bearer token")
|
||||
}
|
||||
|
||||
val := ah[7:]
|
||||
if len(val) == 0 {
|
||||
return "", errors.New("bearer token is empty")
|
||||
}
|
||||
|
||||
return val, nil
|
||||
}
|
||||
|
||||
// CookieTokenExtractor returns a RequestTokenExtractor which extracts a token from the named cookie in a request.
|
||||
func CookieTokenExtractor(cookieName string) RequestTokenExtractor {
|
||||
return func(r *http.Request) (string, error) {
|
||||
ck, err := r.Cookie(cookieName)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("token cookie not found in request: %v", err)
|
||||
}
|
||||
|
||||
if ck.Value == "" {
|
||||
return "", errors.New("token cookie found but is empty")
|
||||
}
|
||||
|
||||
return ck.Value, nil
|
||||
}
|
||||
}
|
||||
|
||||
func NewClaims(iss, sub string, aud interface{}, iat, exp time.Time) jose.Claims {
|
||||
return jose.Claims{
|
||||
// required
|
||||
"iss": iss,
|
||||
"sub": sub,
|
||||
"aud": aud,
|
||||
"iat": iat.Unix(),
|
||||
"exp": exp.Unix(),
|
||||
}
|
||||
}
|
||||
|
||||
func GenClientID(hostport string) (string, error) {
|
||||
b, err := randBytes(32)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
var host string
|
||||
if strings.Contains(hostport, ":") {
|
||||
host, _, err = net.SplitHostPort(hostport)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
} else {
|
||||
host = hostport
|
||||
}
|
||||
|
||||
return fmt.Sprintf("%s@%s", base64.URLEncoding.EncodeToString(b), host), nil
|
||||
}
|
||||
|
||||
func randBytes(n int) ([]byte, error) {
|
||||
b := make([]byte, n)
|
||||
got, err := rand.Read(b)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
} else if n != got {
|
||||
return nil, errors.New("unable to generate enough random data")
|
||||
}
|
||||
return b, nil
|
||||
}
|
||||
|
||||
// urlEqual checks two urls for equality using only the host and path portions.
|
||||
func urlEqual(url1, url2 string) bool {
|
||||
u1, err := url.Parse(url1)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
u2, err := url.Parse(url2)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
|
||||
return strings.ToLower(u1.Host+u1.Path) == strings.ToLower(u2.Host+u2.Path)
|
||||
}
|
188
vendor/github.com/coreos/go-oidc/oidc/verification.go
generated
vendored
Normal file
188
vendor/github.com/coreos/go-oidc/oidc/verification.go
generated
vendored
Normal file
|
@ -0,0 +1,188 @@
|
|||
package oidc
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/jonboulle/clockwork"
|
||||
|
||||
"github.com/coreos/go-oidc/jose"
|
||||
"github.com/coreos/go-oidc/key"
|
||||
)
|
||||
|
||||
func VerifySignature(jwt jose.JWT, keys []key.PublicKey) (bool, error) {
|
||||
jwtBytes := []byte(jwt.Data())
|
||||
for _, k := range keys {
|
||||
v, err := k.Verifier()
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
if v.Verify(jwt.Signature, jwtBytes) == nil {
|
||||
return true, nil
|
||||
}
|
||||
}
|
||||
return false, nil
|
||||
}
|
||||
|
||||
// containsString returns true if the given string(needle) is found
|
||||
// in the string array(haystack).
|
||||
func containsString(needle string, haystack []string) bool {
|
||||
for _, v := range haystack {
|
||||
if v == needle {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// Verify claims in accordance with OIDC spec
|
||||
// http://openid.net/specs/openid-connect-basic-1_0.html#IDTokenValidation
|
||||
func VerifyClaims(jwt jose.JWT, issuer, clientID string) error {
|
||||
now := time.Now().UTC()
|
||||
|
||||
claims, err := jwt.Claims()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
ident, err := IdentityFromClaims(claims)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if ident.ExpiresAt.Before(now) {
|
||||
return errors.New("token is expired")
|
||||
}
|
||||
|
||||
// iss REQUIRED. Issuer Identifier for the Issuer of the response.
|
||||
// The iss value is a case sensitive URL using the https scheme that contains scheme,
|
||||
// host, and optionally, port number and path components and no query or fragment components.
|
||||
if iss, exists := claims["iss"].(string); exists {
|
||||
if !urlEqual(iss, issuer) {
|
||||
return fmt.Errorf("invalid claim value: 'iss'. expected=%s, found=%s.", issuer, iss)
|
||||
}
|
||||
} else {
|
||||
return errors.New("missing claim: 'iss'")
|
||||
}
|
||||
|
||||
// iat REQUIRED. Time at which the JWT was issued.
|
||||
// Its value is a JSON number representing the number of seconds from 1970-01-01T0:0:0Z
|
||||
// as measured in UTC until the date/time.
|
||||
if _, exists := claims["iat"].(float64); !exists {
|
||||
return errors.New("missing claim: 'iat'")
|
||||
}
|
||||
|
||||
// aud REQUIRED. Audience(s) that this ID Token is intended for.
|
||||
// It MUST contain the OAuth 2.0 client_id of the Relying Party as an audience value.
|
||||
// It MAY also contain identifiers for other audiences. In the general case, the aud
|
||||
// value is an array of case sensitive strings. In the common special case when there
|
||||
// is one audience, the aud value MAY be a single case sensitive string.
|
||||
if aud, ok, err := claims.StringClaim("aud"); err == nil && ok {
|
||||
if aud != clientID {
|
||||
return fmt.Errorf("invalid claims, 'aud' claim and 'client_id' do not match, aud=%s, client_id=%s", aud, clientID)
|
||||
}
|
||||
} else if aud, ok, err := claims.StringsClaim("aud"); err == nil && ok {
|
||||
if !containsString(clientID, aud) {
|
||||
return fmt.Errorf("invalid claims, cannot find 'client_id' in 'aud' claim, aud=%v, client_id=%s", aud, clientID)
|
||||
}
|
||||
} else {
|
||||
return errors.New("invalid claim value: 'aud' is required, and should be either string or string array")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// VerifyClientClaims verifies all the required claims are valid for a "client credentials" JWT.
|
||||
// Returns the client ID if valid, or an error if invalid.
|
||||
func VerifyClientClaims(jwt jose.JWT, issuer string) (string, error) {
|
||||
claims, err := jwt.Claims()
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("failed to parse JWT claims: %v", err)
|
||||
}
|
||||
|
||||
iss, ok, err := claims.StringClaim("iss")
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("failed to parse 'iss' claim: %v", err)
|
||||
} else if !ok {
|
||||
return "", errors.New("missing required 'iss' claim")
|
||||
} else if !urlEqual(iss, issuer) {
|
||||
return "", fmt.Errorf("'iss' claim does not match expected issuer, iss=%s", iss)
|
||||
}
|
||||
|
||||
sub, ok, err := claims.StringClaim("sub")
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("failed to parse 'sub' claim: %v", err)
|
||||
} else if !ok {
|
||||
return "", errors.New("missing required 'sub' claim")
|
||||
}
|
||||
|
||||
if aud, ok, err := claims.StringClaim("aud"); err == nil && ok {
|
||||
if aud != sub {
|
||||
return "", fmt.Errorf("invalid claims, 'aud' claim and 'sub' claim do not match, aud=%s, sub=%s", aud, sub)
|
||||
}
|
||||
} else if aud, ok, err := claims.StringsClaim("aud"); err == nil && ok {
|
||||
if !containsString(sub, aud) {
|
||||
return "", fmt.Errorf("invalid claims, cannot find 'sud' in 'aud' claim, aud=%v, sub=%s", aud, sub)
|
||||
}
|
||||
} else {
|
||||
return "", errors.New("invalid claim value: 'aud' is required, and should be either string or string array")
|
||||
}
|
||||
|
||||
now := time.Now().UTC()
|
||||
exp, ok, err := claims.TimeClaim("exp")
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("failed to parse 'exp' claim: %v", err)
|
||||
} else if !ok {
|
||||
return "", errors.New("missing required 'exp' claim")
|
||||
} else if exp.Before(now) {
|
||||
return "", fmt.Errorf("token already expired at: %v", exp)
|
||||
}
|
||||
|
||||
return sub, nil
|
||||
}
|
||||
|
||||
type JWTVerifier struct {
|
||||
issuer string
|
||||
clientID string
|
||||
syncFunc func() error
|
||||
keysFunc func() []key.PublicKey
|
||||
clock clockwork.Clock
|
||||
}
|
||||
|
||||
func NewJWTVerifier(issuer, clientID string, syncFunc func() error, keysFunc func() []key.PublicKey) JWTVerifier {
|
||||
return JWTVerifier{
|
||||
issuer: issuer,
|
||||
clientID: clientID,
|
||||
syncFunc: syncFunc,
|
||||
keysFunc: keysFunc,
|
||||
clock: clockwork.NewRealClock(),
|
||||
}
|
||||
}
|
||||
|
||||
func (v *JWTVerifier) Verify(jwt jose.JWT) error {
|
||||
ok, err := VerifySignature(jwt, v.keysFunc())
|
||||
if ok {
|
||||
goto SignatureVerified
|
||||
} else if err != nil {
|
||||
return fmt.Errorf("oidc: JWT signature verification failed: %v", err)
|
||||
}
|
||||
|
||||
if err = v.syncFunc(); err != nil {
|
||||
return fmt.Errorf("oidc: failed syncing KeySet: %v", err)
|
||||
}
|
||||
|
||||
ok, err = VerifySignature(jwt, v.keysFunc())
|
||||
if err != nil {
|
||||
return fmt.Errorf("oidc: JWT signature verification failed: %v", err)
|
||||
} else if !ok {
|
||||
return errors.New("oidc: unable to verify JWT signature: no matching keys")
|
||||
}
|
||||
|
||||
SignatureVerified:
|
||||
if err := VerifyClaims(jwt, v.issuer, v.clientID); err != nil {
|
||||
return fmt.Errorf("oidc: JWT claims invalid: %v", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
191
vendor/github.com/coreos/go-systemd/LICENSE
generated
vendored
Normal file
191
vendor/github.com/coreos/go-systemd/LICENSE
generated
vendored
Normal file
|
@ -0,0 +1,191 @@
|
|||
Apache License
|
||||
Version 2.0, January 2004
|
||||
http://www.apache.org/licenses/
|
||||
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||
|
||||
1. Definitions.
|
||||
|
||||
"License" shall mean the terms and conditions for use, reproduction, and
|
||||
distribution as defined by Sections 1 through 9 of this document.
|
||||
|
||||
"Licensor" shall mean the copyright owner or entity authorized by the copyright
|
||||
owner that is granting the License.
|
||||
|
||||
"Legal Entity" shall mean the union of the acting entity and all other entities
|
||||
that control, are controlled by, or are under common control with that entity.
|
||||
For the purposes of this definition, "control" means (i) the power, direct or
|
||||
indirect, to cause the direction or management of such entity, whether by
|
||||
contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||
|
||||
"You" (or "Your") shall mean an individual or Legal Entity exercising
|
||||
permissions granted by this License.
|
||||
|
||||
"Source" form shall mean the preferred form for making modifications, including
|
||||
but not limited to software source code, documentation source, and configuration
|
||||
files.
|
||||
|
||||
"Object" form shall mean any form resulting from mechanical transformation or
|
||||
translation of a Source form, including but not limited to compiled object code,
|
||||
generated documentation, and conversions to other media types.
|
||||
|
||||
"Work" shall mean the work of authorship, whether in Source or Object form, made
|
||||
available under the License, as indicated by a copyright notice that is included
|
||||
in or attached to the work (an example is provided in the Appendix below).
|
||||
|
||||
"Derivative Works" shall mean any work, whether in Source or Object form, that
|
||||
is based on (or derived from) the Work and for which the editorial revisions,
|
||||
annotations, elaborations, or other modifications represent, as a whole, an
|
||||
original work of authorship. For the purposes of this License, Derivative Works
|
||||
shall not include works that remain separable from, or merely link (or bind by
|
||||
name) to the interfaces of, the Work and Derivative Works thereof.
|
||||
|
||||
"Contribution" shall mean any work of authorship, including the original version
|
||||
of the Work and any modifications or additions to that Work or Derivative Works
|
||||
thereof, that is intentionally submitted to Licensor for inclusion in the Work
|
||||
by the copyright owner or by an individual or Legal Entity authorized to submit
|
||||
on behalf of the copyright owner. For the purposes of this definition,
|
||||
"submitted" means any form of electronic, verbal, or written communication sent
|
||||
to the Licensor or its representatives, including but not limited to
|
||||
communication on electronic mailing lists, source code control systems, and
|
||||
issue tracking systems that are managed by, or on behalf of, the Licensor for
|
||||
the purpose of discussing and improving the Work, but excluding communication
|
||||
that is conspicuously marked or otherwise designated in writing by the copyright
|
||||
owner as "Not a Contribution."
|
||||
|
||||
"Contributor" shall mean Licensor and any individual or Legal Entity on behalf
|
||||
of whom a Contribution has been received by Licensor and subsequently
|
||||
incorporated within the Work.
|
||||
|
||||
2. Grant of Copyright License.
|
||||
|
||||
Subject to the terms and conditions of this License, each Contributor hereby
|
||||
grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free,
|
||||
irrevocable copyright license to reproduce, prepare Derivative Works of,
|
||||
publicly display, publicly perform, sublicense, and distribute the Work and such
|
||||
Derivative Works in Source or Object form.
|
||||
|
||||
3. Grant of Patent License.
|
||||
|
||||
Subject to the terms and conditions of this License, each Contributor hereby
|
||||
grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free,
|
||||
irrevocable (except as stated in this section) patent license to make, have
|
||||
made, use, offer to sell, sell, import, and otherwise transfer the Work, where
|
||||
such license applies only to those patent claims licensable by such Contributor
|
||||
that are necessarily infringed by their Contribution(s) alone or by combination
|
||||
of their Contribution(s) with the Work to which such Contribution(s) was
|
||||
submitted. If You institute patent litigation against any entity (including a
|
||||
cross-claim or counterclaim in a lawsuit) alleging that the Work or a
|
||||
Contribution incorporated within the Work constitutes direct or contributory
|
||||
patent infringement, then any patent licenses granted to You under this License
|
||||
for that Work shall terminate as of the date such litigation is filed.
|
||||
|
||||
4. Redistribution.
|
||||
|
||||
You may reproduce and distribute copies of the Work or Derivative Works thereof
|
||||
in any medium, with or without modifications, and in Source or Object form,
|
||||
provided that You meet the following conditions:
|
||||
|
||||
You must give any other recipients of the Work or Derivative Works a copy of
|
||||
this License; and
|
||||
You must cause any modified files to carry prominent notices stating that You
|
||||
changed the files; and
|
||||
You must retain, in the Source form of any Derivative Works that You distribute,
|
||||
all copyright, patent, trademark, and attribution notices from the Source form
|
||||
of the Work, excluding those notices that do not pertain to any part of the
|
||||
Derivative Works; and
|
||||
If the Work includes a "NOTICE" text file as part of its distribution, then any
|
||||
Derivative Works that You distribute must include a readable copy of the
|
||||
attribution notices contained within such NOTICE file, excluding those notices
|
||||
that do not pertain to any part of the Derivative Works, in at least one of the
|
||||
following places: within a NOTICE text file distributed as part of the
|
||||
Derivative Works; within the Source form or documentation, if provided along
|
||||
with the Derivative Works; or, within a display generated by the Derivative
|
||||
Works, if and wherever such third-party notices normally appear. The contents of
|
||||
the NOTICE file are for informational purposes only and do not modify the
|
||||
License. You may add Your own attribution notices within Derivative Works that
|
||||
You distribute, alongside or as an addendum to the NOTICE text from the Work,
|
||||
provided that such additional attribution notices cannot be construed as
|
||||
modifying the License.
|
||||
You may add Your own copyright statement to Your modifications and may provide
|
||||
additional or different license terms and conditions for use, reproduction, or
|
||||
distribution of Your modifications, or for any such Derivative Works as a whole,
|
||||
provided Your use, reproduction, and distribution of the Work otherwise complies
|
||||
with the conditions stated in this License.
|
||||
|
||||
5. Submission of Contributions.
|
||||
|
||||
Unless You explicitly state otherwise, any Contribution intentionally submitted
|
||||
for inclusion in the Work by You to the Licensor shall be under the terms and
|
||||
conditions of this License, without any additional terms or conditions.
|
||||
Notwithstanding the above, nothing herein shall supersede or modify the terms of
|
||||
any separate license agreement you may have executed with Licensor regarding
|
||||
such Contributions.
|
||||
|
||||
6. Trademarks.
|
||||
|
||||
This License does not grant permission to use the trade names, trademarks,
|
||||
service marks, or product names of the Licensor, except as required for
|
||||
reasonable and customary use in describing the origin of the Work and
|
||||
reproducing the content of the NOTICE file.
|
||||
|
||||
7. Disclaimer of Warranty.
|
||||
|
||||
Unless required by applicable law or agreed to in writing, Licensor provides the
|
||||
Work (and each Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied,
|
||||
including, without limitation, any warranties or conditions of TITLE,
|
||||
NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are
|
||||
solely responsible for determining the appropriateness of using or
|
||||
redistributing the Work and assume any risks associated with Your exercise of
|
||||
permissions under this License.
|
||||
|
||||
8. Limitation of Liability.
|
||||
|
||||
In no event and under no legal theory, whether in tort (including negligence),
|
||||
contract, or otherwise, unless required by applicable law (such as deliberate
|
||||
and grossly negligent acts) or agreed to in writing, shall any Contributor be
|
||||
liable to You for damages, including any direct, indirect, special, incidental,
|
||||
or consequential damages of any character arising as a result of this License or
|
||||
out of the use or inability to use the Work (including but not limited to
|
||||
damages for loss of goodwill, work stoppage, computer failure or malfunction, or
|
||||
any and all other commercial damages or losses), even if such Contributor has
|
||||
been advised of the possibility of such damages.
|
||||
|
||||
9. Accepting Warranty or Additional Liability.
|
||||
|
||||
While redistributing the Work or Derivative Works thereof, You may choose to
|
||||
offer, and charge a fee for, acceptance of support, warranty, indemnity, or
|
||||
other liability obligations and/or rights consistent with this License. However,
|
||||
in accepting such obligations, You may act only on Your own behalf and on Your
|
||||
sole responsibility, not on behalf of any other Contributor, and only if You
|
||||
agree to indemnify, defend, and hold each Contributor harmless for any liability
|
||||
incurred by, or claims asserted against, such Contributor by reason of your
|
||||
accepting any such warranty or additional liability.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
APPENDIX: How to apply the Apache License to your work
|
||||
|
||||
To apply the Apache License to your work, attach the following boilerplate
|
||||
notice, with the fields enclosed by brackets "[]" replaced with your own
|
||||
identifying information. (Don't include the brackets!) The text should be
|
||||
enclosed in the appropriate comment syntax for the file format. We also
|
||||
recommend that a file or class name and description of purpose be included on
|
||||
the same "printed page" as the copyright notice for easier identification within
|
||||
third-party archives.
|
||||
|
||||
Copyright [yyyy] [name of copyright owner]
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
179
vendor/github.com/coreos/go-systemd/journal/journal.go
generated
vendored
Normal file
179
vendor/github.com/coreos/go-systemd/journal/journal.go
generated
vendored
Normal file
|
@ -0,0 +1,179 @@
|
|||
// Copyright 2015 CoreOS, Inc.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// Package journal provides write bindings to the local systemd journal.
|
||||
// It is implemented in pure Go and connects to the journal directly over its
|
||||
// unix socket.
|
||||
//
|
||||
// To read from the journal, see the "sdjournal" package, which wraps the
|
||||
// sd-journal a C API.
|
||||
//
|
||||
// http://www.freedesktop.org/software/systemd/man/systemd-journald.service.html
|
||||
package journal
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/binary"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"net"
|
||||
"os"
|
||||
"strconv"
|
||||
"strings"
|
||||
"syscall"
|
||||
)
|
||||
|
||||
// Priority of a journal message
|
||||
type Priority int
|
||||
|
||||
const (
|
||||
PriEmerg Priority = iota
|
||||
PriAlert
|
||||
PriCrit
|
||||
PriErr
|
||||
PriWarning
|
||||
PriNotice
|
||||
PriInfo
|
||||
PriDebug
|
||||
)
|
||||
|
||||
var conn net.Conn
|
||||
|
||||
func init() {
|
||||
var err error
|
||||
conn, err = net.Dial("unixgram", "/run/systemd/journal/socket")
|
||||
if err != nil {
|
||||
conn = nil
|
||||
}
|
||||
}
|
||||
|
||||
// Enabled returns true if the local systemd journal is available for logging
|
||||
func Enabled() bool {
|
||||
return conn != nil
|
||||
}
|
||||
|
||||
// Send a message to the local systemd journal. vars is a map of journald
|
||||
// fields to values. Fields must be composed of uppercase letters, numbers,
|
||||
// and underscores, but must not start with an underscore. Within these
|
||||
// restrictions, any arbitrary field name may be used. Some names have special
|
||||
// significance: see the journalctl documentation
|
||||
// (http://www.freedesktop.org/software/systemd/man/systemd.journal-fields.html)
|
||||
// for more details. vars may be nil.
|
||||
func Send(message string, priority Priority, vars map[string]string) error {
|
||||
if conn == nil {
|
||||
return journalError("could not connect to journald socket")
|
||||
}
|
||||
|
||||
data := new(bytes.Buffer)
|
||||
appendVariable(data, "PRIORITY", strconv.Itoa(int(priority)))
|
||||
appendVariable(data, "MESSAGE", message)
|
||||
for k, v := range vars {
|
||||
appendVariable(data, k, v)
|
||||
}
|
||||
|
||||
_, err := io.Copy(conn, data)
|
||||
if err != nil && isSocketSpaceError(err) {
|
||||
file, err := tempFd()
|
||||
if err != nil {
|
||||
return journalError(err.Error())
|
||||
}
|
||||
defer file.Close()
|
||||
_, err = io.Copy(file, data)
|
||||
if err != nil {
|
||||
return journalError(err.Error())
|
||||
}
|
||||
|
||||
rights := syscall.UnixRights(int(file.Fd()))
|
||||
|
||||
/* this connection should always be a UnixConn, but better safe than sorry */
|
||||
unixConn, ok := conn.(*net.UnixConn)
|
||||
if !ok {
|
||||
return journalError("can't send file through non-Unix connection")
|
||||
}
|
||||
unixConn.WriteMsgUnix([]byte{}, rights, nil)
|
||||
} else if err != nil {
|
||||
return journalError(err.Error())
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Print prints a message to the local systemd journal using Send().
|
||||
func Print(priority Priority, format string, a ...interface{}) error {
|
||||
return Send(fmt.Sprintf(format, a...), priority, nil)
|
||||
}
|
||||
|
||||
func appendVariable(w io.Writer, name, value string) {
|
||||
if !validVarName(name) {
|
||||
journalError("variable name contains invalid character, ignoring")
|
||||
}
|
||||
if strings.ContainsRune(value, '\n') {
|
||||
/* When the value contains a newline, we write:
|
||||
* - the variable name, followed by a newline
|
||||
* - the size (in 64bit little endian format)
|
||||
* - the data, followed by a newline
|
||||
*/
|
||||
fmt.Fprintln(w, name)
|
||||
binary.Write(w, binary.LittleEndian, uint64(len(value)))
|
||||
fmt.Fprintln(w, value)
|
||||
} else {
|
||||
/* just write the variable and value all on one line */
|
||||
fmt.Fprintf(w, "%s=%s\n", name, value)
|
||||
}
|
||||
}
|
||||
|
||||
func validVarName(name string) bool {
|
||||
/* The variable name must be in uppercase and consist only of characters,
|
||||
* numbers and underscores, and may not begin with an underscore. (from the docs)
|
||||
*/
|
||||
|
||||
valid := name[0] != '_'
|
||||
for _, c := range name {
|
||||
valid = valid && ('A' <= c && c <= 'Z') || ('0' <= c && c <= '9') || c == '_'
|
||||
}
|
||||
return valid
|
||||
}
|
||||
|
||||
func isSocketSpaceError(err error) bool {
|
||||
opErr, ok := err.(*net.OpError)
|
||||
if !ok {
|
||||
return false
|
||||
}
|
||||
|
||||
sysErr, ok := opErr.Err.(syscall.Errno)
|
||||
if !ok {
|
||||
return false
|
||||
}
|
||||
|
||||
return sysErr == syscall.EMSGSIZE || sysErr == syscall.ENOBUFS
|
||||
}
|
||||
|
||||
func tempFd() (*os.File, error) {
|
||||
file, err := ioutil.TempFile("/dev/shm/", "journal.XXXXX")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
syscall.Unlink(file.Name())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return file, nil
|
||||
}
|
||||
|
||||
func journalError(s string) error {
|
||||
s = "journal error: " + s
|
||||
fmt.Fprintln(os.Stderr, s)
|
||||
return errors.New(s)
|
||||
}
|
202
vendor/github.com/coreos/pkg/LICENSE
generated
vendored
Normal file
202
vendor/github.com/coreos/pkg/LICENSE
generated
vendored
Normal file
|
@ -0,0 +1,202 @@
|
|||
Apache License
|
||||
Version 2.0, January 2004
|
||||
http://www.apache.org/licenses/
|
||||
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||
|
||||
1. Definitions.
|
||||
|
||||
"License" shall mean the terms and conditions for use, reproduction,
|
||||
and distribution as defined by Sections 1 through 9 of this document.
|
||||
|
||||
"Licensor" shall mean the copyright owner or entity authorized by
|
||||
the copyright owner that is granting the License.
|
||||
|
||||
"Legal Entity" shall mean the union of the acting entity and all
|
||||
other entities that control, are controlled by, or are under common
|
||||
control with that entity. For the purposes of this definition,
|
||||
"control" means (i) the power, direct or indirect, to cause the
|
||||
direction or management of such entity, whether by contract or
|
||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||
|
||||
"You" (or "Your") shall mean an individual or Legal Entity
|
||||
exercising permissions granted by this License.
|
||||
|
||||
"Source" form shall mean the preferred form for making modifications,
|
||||
including but not limited to software source code, documentation
|
||||
source, and configuration files.
|
||||
|
||||
"Object" form shall mean any form resulting from mechanical
|
||||
transformation or translation of a Source form, including but
|
||||
not limited to compiled object code, generated documentation,
|
||||
and conversions to other media types.
|
||||
|
||||
"Work" shall mean the work of authorship, whether in Source or
|
||||
Object form, made available under the License, as indicated by a
|
||||
copyright notice that is included in or attached to the work
|
||||
(an example is provided in the Appendix below).
|
||||
|
||||
"Derivative Works" shall mean any work, whether in Source or Object
|
||||
form, that is based on (or derived from) the Work and for which the
|
||||
editorial revisions, annotations, elaborations, or other modifications
|
||||
represent, as a whole, an original work of authorship. For the purposes
|
||||
of this License, Derivative Works shall not include works that remain
|
||||
separable from, or merely link (or bind by name) to the interfaces of,
|
||||
the Work and Derivative Works thereof.
|
||||
|
||||
"Contribution" shall mean any work of authorship, including
|
||||
the original version of the Work and any modifications or additions
|
||||
to that Work or Derivative Works thereof, that is intentionally
|
||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||
or by an individual or Legal Entity authorized to submit on behalf of
|
||||
the copyright owner. For the purposes of this definition, "submitted"
|
||||
means any form of electronic, verbal, or written communication sent
|
||||
to the Licensor or its representatives, including but not limited to
|
||||
communication on electronic mailing lists, source code control systems,
|
||||
and issue tracking systems that are managed by, or on behalf of, the
|
||||
Licensor for the purpose of discussing and improving the Work, but
|
||||
excluding communication that is conspicuously marked or otherwise
|
||||
designated in writing by the copyright owner as "Not a Contribution."
|
||||
|
||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||
on behalf of whom a Contribution has been received by Licensor and
|
||||
subsequently incorporated within the Work.
|
||||
|
||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
copyright license to reproduce, prepare Derivative Works of,
|
||||
publicly display, publicly perform, sublicense, and distribute the
|
||||
Work and such Derivative Works in Source or Object form.
|
||||
|
||||
3. Grant of Patent License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
(except as stated in this section) patent license to make, have made,
|
||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||
where such license applies only to those patent claims licensable
|
||||
by such Contributor that are necessarily infringed by their
|
||||
Contribution(s) alone or by combination of their Contribution(s)
|
||||
with the Work to which such Contribution(s) was submitted. If You
|
||||
institute patent litigation against any entity (including a
|
||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||
or a Contribution incorporated within the Work constitutes direct
|
||||
or contributory patent infringement, then any patent licenses
|
||||
granted to You under this License for that Work shall terminate
|
||||
as of the date such litigation is filed.
|
||||
|
||||
4. Redistribution. You may reproduce and distribute copies of the
|
||||
Work or Derivative Works thereof in any medium, with or without
|
||||
modifications, and in Source or Object form, provided that You
|
||||
meet the following conditions:
|
||||
|
||||
(a) You must give any other recipients of the Work or
|
||||
Derivative Works a copy of this License; and
|
||||
|
||||
(b) You must cause any modified files to carry prominent notices
|
||||
stating that You changed the files; and
|
||||
|
||||
(c) You must retain, in the Source form of any Derivative Works
|
||||
that You distribute, all copyright, patent, trademark, and
|
||||
attribution notices from the Source form of the Work,
|
||||
excluding those notices that do not pertain to any part of
|
||||
the Derivative Works; and
|
||||
|
||||
(d) If the Work includes a "NOTICE" text file as part of its
|
||||
distribution, then any Derivative Works that You distribute must
|
||||
include a readable copy of the attribution notices contained
|
||||
within such NOTICE file, excluding those notices that do not
|
||||
pertain to any part of the Derivative Works, in at least one
|
||||
of the following places: within a NOTICE text file distributed
|
||||
as part of the Derivative Works; within the Source form or
|
||||
documentation, if provided along with the Derivative Works; or,
|
||||
within a display generated by the Derivative Works, if and
|
||||
wherever such third-party notices normally appear. The contents
|
||||
of the NOTICE file are for informational purposes only and
|
||||
do not modify the License. You may add Your own attribution
|
||||
notices within Derivative Works that You distribute, alongside
|
||||
or as an addendum to the NOTICE text from the Work, provided
|
||||
that such additional attribution notices cannot be construed
|
||||
as modifying the License.
|
||||
|
||||
You may add Your own copyright statement to Your modifications and
|
||||
may provide additional or different license terms and conditions
|
||||
for use, reproduction, or distribution of Your modifications, or
|
||||
for any such Derivative Works as a whole, provided Your use,
|
||||
reproduction, and distribution of the Work otherwise complies with
|
||||
the conditions stated in this License.
|
||||
|
||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||
any Contribution intentionally submitted for inclusion in the Work
|
||||
by You to the Licensor shall be under the terms and conditions of
|
||||
this License, without any additional terms or conditions.
|
||||
Notwithstanding the above, nothing herein shall supersede or modify
|
||||
the terms of any separate license agreement you may have executed
|
||||
with Licensor regarding such Contributions.
|
||||
|
||||
6. Trademarks. This License does not grant permission to use the trade
|
||||
names, trademarks, service marks, or product names of the Licensor,
|
||||
except as required for reasonable and customary use in describing the
|
||||
origin of the Work and reproducing the content of the NOTICE file.
|
||||
|
||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||
agreed to in writing, Licensor provides the Work (and each
|
||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
implied, including, without limitation, any warranties or conditions
|
||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||
appropriateness of using or redistributing the Work and assume any
|
||||
risks associated with Your exercise of permissions under this License.
|
||||
|
||||
8. Limitation of Liability. In no event and under no legal theory,
|
||||
whether in tort (including negligence), contract, or otherwise,
|
||||
unless required by applicable law (such as deliberate and grossly
|
||||
negligent acts) or agreed to in writing, shall any Contributor be
|
||||
liable to You for damages, including any direct, indirect, special,
|
||||
incidental, or consequential damages of any character arising as a
|
||||
result of this License or out of the use or inability to use the
|
||||
Work (including but not limited to damages for loss of goodwill,
|
||||
work stoppage, computer failure or malfunction, or any and all
|
||||
other commercial damages or losses), even if such Contributor
|
||||
has been advised of the possibility of such damages.
|
||||
|
||||
9. Accepting Warranty or Additional Liability. While redistributing
|
||||
the Work or Derivative Works thereof, You may choose to offer,
|
||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||
or other liability obligations and/or rights consistent with this
|
||||
License. However, in accepting such obligations, You may act only
|
||||
on Your own behalf and on Your sole responsibility, not on behalf
|
||||
of any other Contributor, and only if You agree to indemnify,
|
||||
defend, and hold each Contributor harmless for any liability
|
||||
incurred by, or claims asserted against, such Contributor by reason
|
||||
of your accepting any such warranty or additional liability.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
APPENDIX: How to apply the Apache License to your work.
|
||||
|
||||
To apply the Apache License to your work, attach the following
|
||||
boilerplate notice, with the fields enclosed by brackets "{}"
|
||||
replaced with your own identifying information. (Don't include
|
||||
the brackets!) The text should be enclosed in the appropriate
|
||||
comment syntax for the file format. We also recommend that a
|
||||
file or class name and description of purpose be included on the
|
||||
same "printed page" as the copyright notice for easier
|
||||
identification within third-party archives.
|
||||
|
||||
Copyright {yyyy} {name of copyright owner}
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
|
5
vendor/github.com/coreos/pkg/NOTICE
generated
vendored
Normal file
5
vendor/github.com/coreos/pkg/NOTICE
generated
vendored
Normal file
|
@ -0,0 +1,5 @@
|
|||
CoreOS Project
|
||||
Copyright 2014 CoreOS, Inc
|
||||
|
||||
This product includes software developed at CoreOS, Inc.
|
||||
(http://www.coreos.com/).
|
39
vendor/github.com/coreos/pkg/capnslog/README.md
generated
vendored
Normal file
39
vendor/github.com/coreos/pkg/capnslog/README.md
generated
vendored
Normal file
|
@ -0,0 +1,39 @@
|
|||
# capnslog, the CoreOS logging package
|
||||
|
||||
There are far too many logging packages out there, with varying degrees of licenses, far too many features (colorization, all sorts of log frameworks) or are just a pain to use (lack of `Fatalln()`?).
|
||||
capnslog provides a simple but consistent logging interface suitable for all kinds of projects.
|
||||
|
||||
### Design Principles
|
||||
|
||||
##### `package main` is the place where logging gets turned on and routed
|
||||
|
||||
A library should not touch log options, only generate log entries. Libraries are silent until main lets them speak.
|
||||
|
||||
##### All log options are runtime-configurable.
|
||||
|
||||
Still the job of `main` to expose these configurations. `main` may delegate this to, say, a configuration webhook, but does so explicitly.
|
||||
|
||||
##### There is one log object per package. It is registered under its repository and package name.
|
||||
|
||||
`main` activates logging for its repository and any dependency repositories it would also like to have output in its logstream. `main` also dictates at which level each subpackage logs.
|
||||
|
||||
##### There is *one* output stream, and it is an `io.Writer` composed with a formatter.
|
||||
|
||||
Splitting streams is probably not the job of your program, but rather, your log aggregation framework. If you must split output streams, again, `main` configures this and you can write a very simple two-output struct that satisfies io.Writer.
|
||||
|
||||
Fancy colorful formatting and JSON output are beyond the scope of a basic logging framework -- they're application/log-collector dependant. These are, at best, provided as options, but more likely, provided by your application.
|
||||
|
||||
##### Log objects are an interface
|
||||
|
||||
An object knows best how to print itself. Log objects can collect more interesting metadata if they wish, however, because text isn't going away anytime soon, they must all be marshalable to text. The simplest log object is a string, which returns itself. If you wish to do more fancy tricks for printing your log objects, see also JSON output -- introspect and write a formatter which can handle your advanced log interface. Making strings is the only thing guaranteed.
|
||||
|
||||
##### Log levels have specific meanings:
|
||||
|
||||
* Critical: Unrecoverable. Must fail.
|
||||
* Error: Data has been lost, a request has failed for a bad reason, or a required resource has been lost
|
||||
* Warning: (Hopefully) Temporary conditions that may cause errors, but may work fine. A replica disappearing (that may reconnect) is a warning.
|
||||
* Notice: Normal, but important (uncommon) log information.
|
||||
* Info: Normal, working log information, everything is fine, but helpful notices for auditing or common operations.
|
||||
* Debug: Everything is still fine, but even common operations may be logged, and less helpful but more quantity of notices.
|
||||
* Trace: Anything goes, from logging every function call as part of a common operation, to tracing execution of a query.
|
||||
|
157
vendor/github.com/coreos/pkg/capnslog/formatters.go
generated
vendored
Normal file
157
vendor/github.com/coreos/pkg/capnslog/formatters.go
generated
vendored
Normal file
|
@ -0,0 +1,157 @@
|
|||
// Copyright 2015 CoreOS, Inc.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package capnslog
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"fmt"
|
||||
"io"
|
||||
"log"
|
||||
"runtime"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
type Formatter interface {
|
||||
Format(pkg string, level LogLevel, depth int, entries ...interface{})
|
||||
Flush()
|
||||
}
|
||||
|
||||
func NewStringFormatter(w io.Writer) Formatter {
|
||||
return &StringFormatter{
|
||||
w: bufio.NewWriter(w),
|
||||
}
|
||||
}
|
||||
|
||||
type StringFormatter struct {
|
||||
w *bufio.Writer
|
||||
}
|
||||
|
||||
func (s *StringFormatter) Format(pkg string, l LogLevel, i int, entries ...interface{}) {
|
||||
now := time.Now().UTC()
|
||||
s.w.WriteString(now.Format(time.RFC3339))
|
||||
s.w.WriteByte(' ')
|
||||
writeEntries(s.w, pkg, l, i, entries...)
|
||||
s.Flush()
|
||||
}
|
||||
|
||||
func writeEntries(w *bufio.Writer, pkg string, _ LogLevel, _ int, entries ...interface{}) {
|
||||
if pkg != "" {
|
||||
w.WriteString(pkg + ": ")
|
||||
}
|
||||
str := fmt.Sprint(entries...)
|
||||
endsInNL := strings.HasSuffix(str, "\n")
|
||||
w.WriteString(str)
|
||||
if !endsInNL {
|
||||
w.WriteString("\n")
|
||||
}
|
||||
}
|
||||
|
||||
func (s *StringFormatter) Flush() {
|
||||
s.w.Flush()
|
||||
}
|
||||
|
||||
func NewPrettyFormatter(w io.Writer, debug bool) Formatter {
|
||||
return &PrettyFormatter{
|
||||
w: bufio.NewWriter(w),
|
||||
debug: debug,
|
||||
}
|
||||
}
|
||||
|
||||
type PrettyFormatter struct {
|
||||
w *bufio.Writer
|
||||
debug bool
|
||||
}
|
||||
|
||||
func (c *PrettyFormatter) Format(pkg string, l LogLevel, depth int, entries ...interface{}) {
|
||||
now := time.Now()
|
||||
ts := now.Format("2006-01-02 15:04:05")
|
||||
c.w.WriteString(ts)
|
||||
ms := now.Nanosecond() / 1000
|
||||
c.w.WriteString(fmt.Sprintf(".%06d", ms))
|
||||
if c.debug {
|
||||
_, file, line, ok := runtime.Caller(depth) // It's always the same number of frames to the user's call.
|
||||
if !ok {
|
||||
file = "???"
|
||||
line = 1
|
||||
} else {
|
||||
slash := strings.LastIndex(file, "/")
|
||||
if slash >= 0 {
|
||||
file = file[slash+1:]
|
||||
}
|
||||
}
|
||||
if line < 0 {
|
||||
line = 0 // not a real line number
|
||||
}
|
||||
c.w.WriteString(fmt.Sprintf(" [%s:%d]", file, line))
|
||||
}
|
||||
c.w.WriteString(fmt.Sprint(" ", l.Char(), " | "))
|
||||
writeEntries(c.w, pkg, l, depth, entries...)
|
||||
c.Flush()
|
||||
}
|
||||
|
||||
func (c *PrettyFormatter) Flush() {
|
||||
c.w.Flush()
|
||||
}
|
||||
|
||||
// LogFormatter emulates the form of the traditional built-in logger.
|
||||
type LogFormatter struct {
|
||||
logger *log.Logger
|
||||
prefix string
|
||||
}
|
||||
|
||||
// NewLogFormatter is a helper to produce a new LogFormatter struct. It uses the
|
||||
// golang log package to actually do the logging work so that logs look similar.
|
||||
func NewLogFormatter(w io.Writer, prefix string, flag int) Formatter {
|
||||
return &LogFormatter{
|
||||
logger: log.New(w, "", flag), // don't use prefix here
|
||||
prefix: prefix, // save it instead
|
||||
}
|
||||
}
|
||||
|
||||
// Format builds a log message for the LogFormatter. The LogLevel is ignored.
|
||||
func (lf *LogFormatter) Format(pkg string, _ LogLevel, _ int, entries ...interface{}) {
|
||||
str := fmt.Sprint(entries...)
|
||||
prefix := lf.prefix
|
||||
if pkg != "" {
|
||||
prefix = fmt.Sprintf("%s%s: ", prefix, pkg)
|
||||
}
|
||||
lf.logger.Output(5, fmt.Sprintf("%s%v", prefix, str)) // call depth is 5
|
||||
}
|
||||
|
||||
// Flush is included so that the interface is complete, but is a no-op.
|
||||
func (lf *LogFormatter) Flush() {
|
||||
// noop
|
||||
}
|
||||
|
||||
// NilFormatter is a no-op log formatter that does nothing.
|
||||
type NilFormatter struct {
|
||||
}
|
||||
|
||||
// NewNilFormatter is a helper to produce a new LogFormatter struct. It logs no
|
||||
// messages so that you can cause part of your logging to be silent.
|
||||
func NewNilFormatter() Formatter {
|
||||
return &NilFormatter{}
|
||||
}
|
||||
|
||||
// Format does nothing.
|
||||
func (_ *NilFormatter) Format(_ string, _ LogLevel, _ int, _ ...interface{}) {
|
||||
// noop
|
||||
}
|
||||
|
||||
// Flush is included so that the interface is complete, but is a no-op.
|
||||
func (_ *NilFormatter) Flush() {
|
||||
// noop
|
||||
}
|
96
vendor/github.com/coreos/pkg/capnslog/glog_formatter.go
generated
vendored
Normal file
96
vendor/github.com/coreos/pkg/capnslog/glog_formatter.go
generated
vendored
Normal file
|
@ -0,0 +1,96 @@
|
|||
// Copyright 2015 CoreOS, Inc.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package capnslog
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"io"
|
||||
"os"
|
||||
"runtime"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
var pid = os.Getpid()
|
||||
|
||||
type GlogFormatter struct {
|
||||
StringFormatter
|
||||
}
|
||||
|
||||
func NewGlogFormatter(w io.Writer) *GlogFormatter {
|
||||
g := &GlogFormatter{}
|
||||
g.w = bufio.NewWriter(w)
|
||||
return g
|
||||
}
|
||||
|
||||
func (g GlogFormatter) Format(pkg string, level LogLevel, depth int, entries ...interface{}) {
|
||||
g.w.Write(GlogHeader(level, depth+1))
|
||||
g.StringFormatter.Format(pkg, level, depth+1, entries...)
|
||||
}
|
||||
|
||||
func GlogHeader(level LogLevel, depth int) []byte {
|
||||
// Lmmdd hh:mm:ss.uuuuuu threadid file:line]
|
||||
now := time.Now().UTC()
|
||||
_, file, line, ok := runtime.Caller(depth) // It's always the same number of frames to the user's call.
|
||||
if !ok {
|
||||
file = "???"
|
||||
line = 1
|
||||
} else {
|
||||
slash := strings.LastIndex(file, "/")
|
||||
if slash >= 0 {
|
||||
file = file[slash+1:]
|
||||
}
|
||||
}
|
||||
if line < 0 {
|
||||
line = 0 // not a real line number
|
||||
}
|
||||
buf := &bytes.Buffer{}
|
||||
buf.Grow(30)
|
||||
_, month, day := now.Date()
|
||||
hour, minute, second := now.Clock()
|
||||
buf.WriteString(level.Char())
|
||||
twoDigits(buf, int(month))
|
||||
twoDigits(buf, day)
|
||||
buf.WriteByte(' ')
|
||||
twoDigits(buf, hour)
|
||||
buf.WriteByte(':')
|
||||
twoDigits(buf, minute)
|
||||
buf.WriteByte(':')
|
||||
twoDigits(buf, second)
|
||||
buf.WriteByte('.')
|
||||
buf.WriteString(strconv.Itoa(now.Nanosecond() / 1000))
|
||||
buf.WriteByte('Z')
|
||||
buf.WriteByte(' ')
|
||||
buf.WriteString(strconv.Itoa(pid))
|
||||
buf.WriteByte(' ')
|
||||
buf.WriteString(file)
|
||||
buf.WriteByte(':')
|
||||
buf.WriteString(strconv.Itoa(line))
|
||||
buf.WriteByte(']')
|
||||
buf.WriteByte(' ')
|
||||
return buf.Bytes()
|
||||
}
|
||||
|
||||
const digits = "0123456789"
|
||||
|
||||
func twoDigits(b *bytes.Buffer, d int) {
|
||||
c2 := digits[d%10]
|
||||
d /= 10
|
||||
c1 := digits[d%10]
|
||||
b.WriteByte(c1)
|
||||
b.WriteByte(c2)
|
||||
}
|
49
vendor/github.com/coreos/pkg/capnslog/init.go
generated
vendored
Normal file
49
vendor/github.com/coreos/pkg/capnslog/init.go
generated
vendored
Normal file
|
@ -0,0 +1,49 @@
|
|||
// Copyright 2015 CoreOS, Inc.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
//
|
||||
// +build !windows
|
||||
|
||||
package capnslog
|
||||
|
||||
import (
|
||||
"io"
|
||||
"os"
|
||||
"syscall"
|
||||
)
|
||||
|
||||
// Here's where the opinionation comes in. We need some sensible defaults,
|
||||
// especially after taking over the log package. Your project (whatever it may
|
||||
// be) may see things differently. That's okay; there should be no defaults in
|
||||
// the main package that cannot be controlled or overridden programatically,
|
||||
// otherwise it's a bug. Doing so is creating your own init_log.go file much
|
||||
// like this one.
|
||||
|
||||
func init() {
|
||||
initHijack()
|
||||
|
||||
// Go `log` pacakge uses os.Stderr.
|
||||
SetFormatter(NewDefaultFormatter(os.Stderr))
|
||||
SetGlobalLogLevel(INFO)
|
||||
}
|
||||
|
||||
func NewDefaultFormatter(out io.Writer) Formatter {
|
||||
if syscall.Getppid() == 1 {
|
||||
// We're running under init, which may be systemd.
|
||||
f, err := NewJournaldFormatter()
|
||||
if err == nil {
|
||||
return f
|
||||
}
|
||||
}
|
||||
return NewPrettyFormatter(out, false)
|
||||
}
|
25
vendor/github.com/coreos/pkg/capnslog/init_windows.go
generated
vendored
Normal file
25
vendor/github.com/coreos/pkg/capnslog/init_windows.go
generated
vendored
Normal file
|
@ -0,0 +1,25 @@
|
|||
// Copyright 2015 CoreOS, Inc.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package capnslog
|
||||
|
||||
import "os"
|
||||
|
||||
func init() {
|
||||
initHijack()
|
||||
|
||||
// Go `log` package uses os.Stderr.
|
||||
SetFormatter(NewPrettyFormatter(os.Stderr, false))
|
||||
SetGlobalLogLevel(INFO)
|
||||
}
|
68
vendor/github.com/coreos/pkg/capnslog/journald_formatter.go
generated
vendored
Normal file
68
vendor/github.com/coreos/pkg/capnslog/journald_formatter.go
generated
vendored
Normal file
|
@ -0,0 +1,68 @@
|
|||
// Copyright 2015 CoreOS, Inc.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
//
|
||||
// +build !windows
|
||||
|
||||
package capnslog
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/coreos/go-systemd/journal"
|
||||
)
|
||||
|
||||
func NewJournaldFormatter() (Formatter, error) {
|
||||
if !journal.Enabled() {
|
||||
return nil, errors.New("No systemd detected")
|
||||
}
|
||||
return &journaldFormatter{}, nil
|
||||
}
|
||||
|
||||
type journaldFormatter struct{}
|
||||
|
||||
func (j *journaldFormatter) Format(pkg string, l LogLevel, _ int, entries ...interface{}) {
|
||||
var pri journal.Priority
|
||||
switch l {
|
||||
case CRITICAL:
|
||||
pri = journal.PriCrit
|
||||
case ERROR:
|
||||
pri = journal.PriErr
|
||||
case WARNING:
|
||||
pri = journal.PriWarning
|
||||
case NOTICE:
|
||||
pri = journal.PriNotice
|
||||
case INFO:
|
||||
pri = journal.PriInfo
|
||||
case DEBUG:
|
||||
pri = journal.PriDebug
|
||||
case TRACE:
|
||||
pri = journal.PriDebug
|
||||
default:
|
||||
panic("Unhandled loglevel")
|
||||
}
|
||||
msg := fmt.Sprint(entries...)
|
||||
tags := map[string]string{
|
||||
"PACKAGE": pkg,
|
||||
"SYSLOG_IDENTIFIER": filepath.Base(os.Args[0]),
|
||||
}
|
||||
err := journal.Send(msg, pri, tags)
|
||||
if err != nil {
|
||||
fmt.Fprintln(os.Stderr, err)
|
||||
}
|
||||
}
|
||||
|
||||
func (j *journaldFormatter) Flush() {}
|
39
vendor/github.com/coreos/pkg/capnslog/log_hijack.go
generated
vendored
Normal file
39
vendor/github.com/coreos/pkg/capnslog/log_hijack.go
generated
vendored
Normal file
|
@ -0,0 +1,39 @@
|
|||
// Copyright 2015 CoreOS, Inc.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package capnslog
|
||||
|
||||
import (
|
||||
"log"
|
||||
)
|
||||
|
||||
func initHijack() {
|
||||
pkg := NewPackageLogger("log", "")
|
||||
w := packageWriter{pkg}
|
||||
log.SetFlags(0)
|
||||
log.SetPrefix("")
|
||||
log.SetOutput(w)
|
||||
}
|
||||
|
||||
type packageWriter struct {
|
||||
pl *PackageLogger
|
||||
}
|
||||
|
||||
func (p packageWriter) Write(b []byte) (int, error) {
|
||||
if p.pl.level < INFO {
|
||||
return 0, nil
|
||||
}
|
||||
p.pl.internalLog(calldepth+2, INFO, string(b))
|
||||
return len(b), nil
|
||||
}
|
240
vendor/github.com/coreos/pkg/capnslog/logmap.go
generated
vendored
Normal file
240
vendor/github.com/coreos/pkg/capnslog/logmap.go
generated
vendored
Normal file
|
@ -0,0 +1,240 @@
|
|||
// Copyright 2015 CoreOS, Inc.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package capnslog
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"strings"
|
||||
"sync"
|
||||
)
|
||||
|
||||
// LogLevel is the set of all log levels.
|
||||
type LogLevel int8
|
||||
|
||||
const (
|
||||
// CRITICAL is the lowest log level; only errors which will end the program will be propagated.
|
||||
CRITICAL LogLevel = iota - 1
|
||||
// ERROR is for errors that are not fatal but lead to troubling behavior.
|
||||
ERROR
|
||||
// WARNING is for errors which are not fatal and not errors, but are unusual. Often sourced from misconfigurations.
|
||||
WARNING
|
||||
// NOTICE is for normal but significant conditions.
|
||||
NOTICE
|
||||
// INFO is a log level for common, everyday log updates.
|
||||
INFO
|
||||
// DEBUG is the default hidden level for more verbose updates about internal processes.
|
||||
DEBUG
|
||||
// TRACE is for (potentially) call by call tracing of programs.
|
||||
TRACE
|
||||
)
|
||||
|
||||
// Char returns a single-character representation of the log level.
|
||||
func (l LogLevel) Char() string {
|
||||
switch l {
|
||||
case CRITICAL:
|
||||
return "C"
|
||||
case ERROR:
|
||||
return "E"
|
||||
case WARNING:
|
||||
return "W"
|
||||
case NOTICE:
|
||||
return "N"
|
||||
case INFO:
|
||||
return "I"
|
||||
case DEBUG:
|
||||
return "D"
|
||||
case TRACE:
|
||||
return "T"
|
||||
default:
|
||||
panic("Unhandled loglevel")
|
||||
}
|
||||
}
|
||||
|
||||
// String returns a multi-character representation of the log level.
|
||||
func (l LogLevel) String() string {
|
||||
switch l {
|
||||
case CRITICAL:
|
||||
return "CRITICAL"
|
||||
case ERROR:
|
||||
return "ERROR"
|
||||
case WARNING:
|
||||
return "WARNING"
|
||||
case NOTICE:
|
||||
return "NOTICE"
|
||||
case INFO:
|
||||
return "INFO"
|
||||
case DEBUG:
|
||||
return "DEBUG"
|
||||
case TRACE:
|
||||
return "TRACE"
|
||||
default:
|
||||
panic("Unhandled loglevel")
|
||||
}
|
||||
}
|
||||
|
||||
// Update using the given string value. Fulfills the flag.Value interface.
|
||||
func (l *LogLevel) Set(s string) error {
|
||||
value, err := ParseLevel(s)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
*l = value
|
||||
return nil
|
||||
}
|
||||
|
||||
// ParseLevel translates some potential loglevel strings into their corresponding levels.
|
||||
func ParseLevel(s string) (LogLevel, error) {
|
||||
switch s {
|
||||
case "CRITICAL", "C":
|
||||
return CRITICAL, nil
|
||||
case "ERROR", "0", "E":
|
||||
return ERROR, nil
|
||||
case "WARNING", "1", "W":
|
||||
return WARNING, nil
|
||||
case "NOTICE", "2", "N":
|
||||
return NOTICE, nil
|
||||
case "INFO", "3", "I":
|
||||
return INFO, nil
|
||||
case "DEBUG", "4", "D":
|
||||
return DEBUG, nil
|
||||
case "TRACE", "5", "T":
|
||||
return TRACE, nil
|
||||
}
|
||||
return CRITICAL, errors.New("couldn't parse log level " + s)
|
||||
}
|
||||
|
||||
type RepoLogger map[string]*PackageLogger
|
||||
|
||||
type loggerStruct struct {
|
||||
sync.Mutex
|
||||
repoMap map[string]RepoLogger
|
||||
formatter Formatter
|
||||
}
|
||||
|
||||
// logger is the global logger
|
||||
var logger = new(loggerStruct)
|
||||
|
||||
// SetGlobalLogLevel sets the log level for all packages in all repositories
|
||||
// registered with capnslog.
|
||||
func SetGlobalLogLevel(l LogLevel) {
|
||||
logger.Lock()
|
||||
defer logger.Unlock()
|
||||
for _, r := range logger.repoMap {
|
||||
r.setRepoLogLevelInternal(l)
|
||||
}
|
||||
}
|
||||
|
||||
// GetRepoLogger may return the handle to the repository's set of packages' loggers.
|
||||
func GetRepoLogger(repo string) (RepoLogger, error) {
|
||||
logger.Lock()
|
||||
defer logger.Unlock()
|
||||
r, ok := logger.repoMap[repo]
|
||||
if !ok {
|
||||
return nil, errors.New("no packages registered for repo " + repo)
|
||||
}
|
||||
return r, nil
|
||||
}
|
||||
|
||||
// MustRepoLogger returns the handle to the repository's packages' loggers.
|
||||
func MustRepoLogger(repo string) RepoLogger {
|
||||
r, err := GetRepoLogger(repo)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return r
|
||||
}
|
||||
|
||||
// SetRepoLogLevel sets the log level for all packages in the repository.
|
||||
func (r RepoLogger) SetRepoLogLevel(l LogLevel) {
|
||||
logger.Lock()
|
||||
defer logger.Unlock()
|
||||
r.setRepoLogLevelInternal(l)
|
||||
}
|
||||
|
||||
func (r RepoLogger) setRepoLogLevelInternal(l LogLevel) {
|
||||
for _, v := range r {
|
||||
v.level = l
|
||||
}
|
||||
}
|
||||
|
||||
// ParseLogLevelConfig parses a comma-separated string of "package=loglevel", in
|
||||
// order, and returns a map of the results, for use in SetLogLevel.
|
||||
func (r RepoLogger) ParseLogLevelConfig(conf string) (map[string]LogLevel, error) {
|
||||
setlist := strings.Split(conf, ",")
|
||||
out := make(map[string]LogLevel)
|
||||
for _, setstring := range setlist {
|
||||
setting := strings.Split(setstring, "=")
|
||||
if len(setting) != 2 {
|
||||
return nil, errors.New("oddly structured `pkg=level` option: " + setstring)
|
||||
}
|
||||
l, err := ParseLevel(setting[1])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
out[setting[0]] = l
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
|
||||
// SetLogLevel takes a map of package names within a repository to their desired
|
||||
// loglevel, and sets the levels appropriately. Unknown packages are ignored.
|
||||
// "*" is a special package name that corresponds to all packages, and will be
|
||||
// processed first.
|
||||
func (r RepoLogger) SetLogLevel(m map[string]LogLevel) {
|
||||
logger.Lock()
|
||||
defer logger.Unlock()
|
||||
if l, ok := m["*"]; ok {
|
||||
r.setRepoLogLevelInternal(l)
|
||||
}
|
||||
for k, v := range m {
|
||||
l, ok := r[k]
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
l.level = v
|
||||
}
|
||||
}
|
||||
|
||||
// SetFormatter sets the formatting function for all logs.
|
||||
func SetFormatter(f Formatter) {
|
||||
logger.Lock()
|
||||
defer logger.Unlock()
|
||||
logger.formatter = f
|
||||
}
|
||||
|
||||
// NewPackageLogger creates a package logger object.
|
||||
// This should be defined as a global var in your package, referencing your repo.
|
||||
func NewPackageLogger(repo string, pkg string) (p *PackageLogger) {
|
||||
logger.Lock()
|
||||
defer logger.Unlock()
|
||||
if logger.repoMap == nil {
|
||||
logger.repoMap = make(map[string]RepoLogger)
|
||||
}
|
||||
r, rok := logger.repoMap[repo]
|
||||
if !rok {
|
||||
logger.repoMap[repo] = make(RepoLogger)
|
||||
r = logger.repoMap[repo]
|
||||
}
|
||||
p, pok := r[pkg]
|
||||
if !pok {
|
||||
r[pkg] = &PackageLogger{
|
||||
pkg: pkg,
|
||||
level: INFO,
|
||||
}
|
||||
p = r[pkg]
|
||||
}
|
||||
return
|
||||
}
|
171
vendor/github.com/coreos/pkg/capnslog/pkg_logger.go
generated
vendored
Normal file
171
vendor/github.com/coreos/pkg/capnslog/pkg_logger.go
generated
vendored
Normal file
|
@ -0,0 +1,171 @@
|
|||
// Copyright 2015 CoreOS, Inc.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package capnslog
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
)
|
||||
|
||||
type PackageLogger struct {
|
||||
pkg string
|
||||
level LogLevel
|
||||
}
|
||||
|
||||
const calldepth = 2
|
||||
|
||||
func (p *PackageLogger) internalLog(depth int, inLevel LogLevel, entries ...interface{}) {
|
||||
logger.Lock()
|
||||
defer logger.Unlock()
|
||||
if inLevel != CRITICAL && p.level < inLevel {
|
||||
return
|
||||
}
|
||||
if logger.formatter != nil {
|
||||
logger.formatter.Format(p.pkg, inLevel, depth+1, entries...)
|
||||
}
|
||||
}
|
||||
|
||||
func (p *PackageLogger) LevelAt(l LogLevel) bool {
|
||||
logger.Lock()
|
||||
defer logger.Unlock()
|
||||
return p.level >= l
|
||||
}
|
||||
|
||||
// Log a formatted string at any level between ERROR and TRACE
|
||||
func (p *PackageLogger) Logf(l LogLevel, format string, args ...interface{}) {
|
||||
p.internalLog(calldepth, l, fmt.Sprintf(format, args...))
|
||||
}
|
||||
|
||||
// Log a message at any level between ERROR and TRACE
|
||||
func (p *PackageLogger) Log(l LogLevel, args ...interface{}) {
|
||||
p.internalLog(calldepth, l, fmt.Sprint(args...))
|
||||
}
|
||||
|
||||
// log stdlib compatibility
|
||||
|
||||
func (p *PackageLogger) Println(args ...interface{}) {
|
||||
p.internalLog(calldepth, INFO, fmt.Sprintln(args...))
|
||||
}
|
||||
|
||||
func (p *PackageLogger) Printf(format string, args ...interface{}) {
|
||||
p.Logf(INFO, format, args...)
|
||||
}
|
||||
|
||||
func (p *PackageLogger) Print(args ...interface{}) {
|
||||
p.internalLog(calldepth, INFO, fmt.Sprint(args...))
|
||||
}
|
||||
|
||||
// Panic and fatal
|
||||
|
||||
func (p *PackageLogger) Panicf(format string, args ...interface{}) {
|
||||
s := fmt.Sprintf(format, args...)
|
||||
p.internalLog(calldepth, CRITICAL, s)
|
||||
panic(s)
|
||||
}
|
||||
|
||||
func (p *PackageLogger) Panic(args ...interface{}) {
|
||||
s := fmt.Sprint(args...)
|
||||
p.internalLog(calldepth, CRITICAL, s)
|
||||
panic(s)
|
||||
}
|
||||
|
||||
func (p *PackageLogger) Fatalf(format string, args ...interface{}) {
|
||||
p.Logf(CRITICAL, format, args...)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
func (p *PackageLogger) Fatal(args ...interface{}) {
|
||||
s := fmt.Sprint(args...)
|
||||
p.internalLog(calldepth, CRITICAL, s)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
// Error Functions
|
||||
|
||||
func (p *PackageLogger) Errorf(format string, args ...interface{}) {
|
||||
p.Logf(ERROR, format, args...)
|
||||
}
|
||||
|
||||
func (p *PackageLogger) Error(entries ...interface{}) {
|
||||
p.internalLog(calldepth, ERROR, entries...)
|
||||
}
|
||||
|
||||
// Warning Functions
|
||||
|
||||
func (p *PackageLogger) Warningf(format string, args ...interface{}) {
|
||||
p.Logf(WARNING, format, args...)
|
||||
}
|
||||
|
||||
func (p *PackageLogger) Warning(entries ...interface{}) {
|
||||
p.internalLog(calldepth, WARNING, entries...)
|
||||
}
|
||||
|
||||
// Notice Functions
|
||||
|
||||
func (p *PackageLogger) Noticef(format string, args ...interface{}) {
|
||||
p.Logf(NOTICE, format, args...)
|
||||
}
|
||||
|
||||
func (p *PackageLogger) Notice(entries ...interface{}) {
|
||||
p.internalLog(calldepth, NOTICE, entries...)
|
||||
}
|
||||
|
||||
// Info Functions
|
||||
|
||||
func (p *PackageLogger) Infof(format string, args ...interface{}) {
|
||||
p.Logf(INFO, format, args...)
|
||||
}
|
||||
|
||||
func (p *PackageLogger) Info(entries ...interface{}) {
|
||||
p.internalLog(calldepth, INFO, entries...)
|
||||
}
|
||||
|
||||
// Debug Functions
|
||||
|
||||
func (p *PackageLogger) Debugf(format string, args ...interface{}) {
|
||||
if p.level < DEBUG {
|
||||
return
|
||||
}
|
||||
p.Logf(DEBUG, format, args...)
|
||||
}
|
||||
|
||||
func (p *PackageLogger) Debug(entries ...interface{}) {
|
||||
if p.level < DEBUG {
|
||||
return
|
||||
}
|
||||
p.internalLog(calldepth, DEBUG, entries...)
|
||||
}
|
||||
|
||||
// Trace Functions
|
||||
|
||||
func (p *PackageLogger) Tracef(format string, args ...interface{}) {
|
||||
if p.level < TRACE {
|
||||
return
|
||||
}
|
||||
p.Logf(TRACE, format, args...)
|
||||
}
|
||||
|
||||
func (p *PackageLogger) Trace(entries ...interface{}) {
|
||||
if p.level < TRACE {
|
||||
return
|
||||
}
|
||||
p.internalLog(calldepth, TRACE, entries...)
|
||||
}
|
||||
|
||||
func (p *PackageLogger) Flush() {
|
||||
logger.Lock()
|
||||
defer logger.Unlock()
|
||||
logger.formatter.Flush()
|
||||
}
|
65
vendor/github.com/coreos/pkg/capnslog/syslog_formatter.go
generated
vendored
Normal file
65
vendor/github.com/coreos/pkg/capnslog/syslog_formatter.go
generated
vendored
Normal file
|
@ -0,0 +1,65 @@
|
|||
// Copyright 2015 CoreOS, Inc.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
//
|
||||
// +build !windows
|
||||
|
||||
package capnslog
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"log/syslog"
|
||||
)
|
||||
|
||||
func NewSyslogFormatter(w *syslog.Writer) Formatter {
|
||||
return &syslogFormatter{w}
|
||||
}
|
||||
|
||||
func NewDefaultSyslogFormatter(tag string) (Formatter, error) {
|
||||
w, err := syslog.New(syslog.LOG_DEBUG, tag)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return NewSyslogFormatter(w), nil
|
||||
}
|
||||
|
||||
type syslogFormatter struct {
|
||||
w *syslog.Writer
|
||||
}
|
||||
|
||||
func (s *syslogFormatter) Format(pkg string, l LogLevel, _ int, entries ...interface{}) {
|
||||
for _, entry := range entries {
|
||||
str := fmt.Sprint(entry)
|
||||
switch l {
|
||||
case CRITICAL:
|
||||
s.w.Crit(str)
|
||||
case ERROR:
|
||||
s.w.Err(str)
|
||||
case WARNING:
|
||||
s.w.Warning(str)
|
||||
case NOTICE:
|
||||
s.w.Notice(str)
|
||||
case INFO:
|
||||
s.w.Info(str)
|
||||
case DEBUG:
|
||||
s.w.Debug(str)
|
||||
case TRACE:
|
||||
s.w.Debug(str)
|
||||
default:
|
||||
panic("Unhandled loglevel")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (s *syslogFormatter) Flush() {
|
||||
}
|
11
vendor/github.com/coreos/pkg/health/README.md
generated
vendored
Normal file
11
vendor/github.com/coreos/pkg/health/README.md
generated
vendored
Normal file
|
@ -0,0 +1,11 @@
|
|||
health
|
||||
====
|
||||
|
||||
A simple framework for implementing an HTTP health check endpoint on servers.
|
||||
|
||||
Users implement their `health.Checkable` types, and create a `health.Checker`, from which they can get an `http.HandlerFunc` using `health.Checker.MakeHealthHandlerFunc`.
|
||||
|
||||
### Documentation
|
||||
|
||||
For more details, visit the docs on [gopkgdoc](http://godoc.org/github.com/coreos/pkg/health)
|
||||
|
127
vendor/github.com/coreos/pkg/health/health.go
generated
vendored
Normal file
127
vendor/github.com/coreos/pkg/health/health.go
generated
vendored
Normal file
|
@ -0,0 +1,127 @@
|
|||
package health
|
||||
|
||||
import (
|
||||
"expvar"
|
||||
"fmt"
|
||||
"log"
|
||||
"net/http"
|
||||
|
||||
"github.com/coreos/pkg/httputil"
|
||||
)
|
||||
|
||||
// Checkables should return nil when the thing they are checking is healthy, and an error otherwise.
|
||||
type Checkable interface {
|
||||
Healthy() error
|
||||
}
|
||||
|
||||
// Checker provides a way to make an endpoint which can be probed for system health.
|
||||
type Checker struct {
|
||||
// Checks are the Checkables to be checked when probing.
|
||||
Checks []Checkable
|
||||
|
||||
// Unhealthyhandler is called when one or more of the checks are unhealthy.
|
||||
// If not provided DefaultUnhealthyHandler is called.
|
||||
UnhealthyHandler UnhealthyHandler
|
||||
|
||||
// HealthyHandler is called when all checks are healthy.
|
||||
// If not provided, DefaultHealthyHandler is called.
|
||||
HealthyHandler http.HandlerFunc
|
||||
}
|
||||
|
||||
func (c Checker) ServeHTTP(w http.ResponseWriter, r *http.Request) {
|
||||
unhealthyHandler := c.UnhealthyHandler
|
||||
if unhealthyHandler == nil {
|
||||
unhealthyHandler = DefaultUnhealthyHandler
|
||||
}
|
||||
|
||||
successHandler := c.HealthyHandler
|
||||
if successHandler == nil {
|
||||
successHandler = DefaultHealthyHandler
|
||||
}
|
||||
|
||||
if r.Method != "GET" {
|
||||
w.Header().Set("Allow", "GET")
|
||||
w.WriteHeader(http.StatusMethodNotAllowed)
|
||||
return
|
||||
}
|
||||
|
||||
if err := Check(c.Checks); err != nil {
|
||||
unhealthyHandler(w, r, err)
|
||||
return
|
||||
}
|
||||
|
||||
successHandler(w, r)
|
||||
}
|
||||
|
||||
type UnhealthyHandler func(w http.ResponseWriter, r *http.Request, err error)
|
||||
|
||||
type StatusResponse struct {
|
||||
Status string `json:"status"`
|
||||
Details *StatusResponseDetails `json:"details,omitempty"`
|
||||
}
|
||||
|
||||
type StatusResponseDetails struct {
|
||||
Code int `json:"code,omitempty"`
|
||||
Message string `json:"message,omitempty"`
|
||||
}
|
||||
|
||||
func Check(checks []Checkable) (err error) {
|
||||
errs := []error{}
|
||||
for _, c := range checks {
|
||||
if e := c.Healthy(); e != nil {
|
||||
errs = append(errs, e)
|
||||
}
|
||||
}
|
||||
|
||||
switch len(errs) {
|
||||
case 0:
|
||||
err = nil
|
||||
case 1:
|
||||
err = errs[0]
|
||||
default:
|
||||
err = fmt.Errorf("multiple health check failure: %v", errs)
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func DefaultHealthyHandler(w http.ResponseWriter, r *http.Request) {
|
||||
err := httputil.WriteJSONResponse(w, http.StatusOK, StatusResponse{
|
||||
Status: "ok",
|
||||
})
|
||||
if err != nil {
|
||||
// TODO(bobbyrullo): replace with logging from new logging pkg,
|
||||
// once it lands.
|
||||
log.Printf("Failed to write JSON response: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func DefaultUnhealthyHandler(w http.ResponseWriter, r *http.Request, err error) {
|
||||
writeErr := httputil.WriteJSONResponse(w, http.StatusInternalServerError, StatusResponse{
|
||||
Status: "error",
|
||||
Details: &StatusResponseDetails{
|
||||
Code: http.StatusInternalServerError,
|
||||
Message: err.Error(),
|
||||
},
|
||||
})
|
||||
if writeErr != nil {
|
||||
// TODO(bobbyrullo): replace with logging from new logging pkg,
|
||||
// once it lands.
|
||||
log.Printf("Failed to write JSON response: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
// ExpvarHandler is copied from https://golang.org/src/expvar/expvar.go, where it's sadly unexported.
|
||||
func ExpvarHandler(w http.ResponseWriter, r *http.Request) {
|
||||
w.Header().Set("Content-Type", "application/json; charset=utf-8")
|
||||
fmt.Fprintf(w, "{\n")
|
||||
first := true
|
||||
expvar.Do(func(kv expvar.KeyValue) {
|
||||
if !first {
|
||||
fmt.Fprintf(w, ",\n")
|
||||
}
|
||||
first = false
|
||||
fmt.Fprintf(w, "%q: %s", kv.Key, kv.Value)
|
||||
})
|
||||
fmt.Fprintf(w, "\n}\n")
|
||||
}
|
13
vendor/github.com/coreos/pkg/httputil/README.md
generated
vendored
Normal file
13
vendor/github.com/coreos/pkg/httputil/README.md
generated
vendored
Normal file
|
@ -0,0 +1,13 @@
|
|||
httputil
|
||||
====
|
||||
|
||||
Common code for dealing with HTTP.
|
||||
|
||||
Includes:
|
||||
|
||||
* Code for returning JSON responses.
|
||||
|
||||
### Documentation
|
||||
|
||||
Visit the docs on [gopkgdoc](http://godoc.org/github.com/coreos/pkg/httputil)
|
||||
|
21
vendor/github.com/coreos/pkg/httputil/cookie.go
generated
vendored
Normal file
21
vendor/github.com/coreos/pkg/httputil/cookie.go
generated
vendored
Normal file
|
@ -0,0 +1,21 @@
|
|||
package httputil
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
"time"
|
||||
)
|
||||
|
||||
// DeleteCookies effectively deletes all named cookies
|
||||
// by wiping all data and setting to expire immediately.
|
||||
func DeleteCookies(w http.ResponseWriter, cookieNames ...string) {
|
||||
for _, n := range cookieNames {
|
||||
c := &http.Cookie{
|
||||
Name: n,
|
||||
Value: "",
|
||||
Path: "/",
|
||||
MaxAge: -1,
|
||||
Expires: time.Time{},
|
||||
}
|
||||
http.SetCookie(w, c)
|
||||
}
|
||||
}
|
27
vendor/github.com/coreos/pkg/httputil/json.go
generated
vendored
Normal file
27
vendor/github.com/coreos/pkg/httputil/json.go
generated
vendored
Normal file
|
@ -0,0 +1,27 @@
|
|||
package httputil
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"net/http"
|
||||
)
|
||||
|
||||
const (
|
||||
JSONContentType = "application/json"
|
||||
)
|
||||
|
||||
func WriteJSONResponse(w http.ResponseWriter, code int, resp interface{}) error {
|
||||
enc, err := json.Marshal(resp)
|
||||
if err != nil {
|
||||
w.WriteHeader(http.StatusInternalServerError)
|
||||
return err
|
||||
}
|
||||
|
||||
w.Header().Set("Content-Type", JSONContentType)
|
||||
w.WriteHeader(code)
|
||||
|
||||
_, err = w.Write(enc)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
15
vendor/github.com/coreos/pkg/timeutil/backoff.go
generated
vendored
Normal file
15
vendor/github.com/coreos/pkg/timeutil/backoff.go
generated
vendored
Normal file
|
@ -0,0 +1,15 @@
|
|||
package timeutil
|
||||
|
||||
import (
|
||||
"time"
|
||||
)
|
||||
|
||||
func ExpBackoff(prev, max time.Duration) time.Duration {
|
||||
if prev == 0 {
|
||||
return time.Second
|
||||
}
|
||||
if prev > max/2 {
|
||||
return max
|
||||
}
|
||||
return 2 * prev
|
||||
}
|
79
vendor/github.com/emicklei/go-restful/container.go
generated
vendored
79
vendor/github.com/emicklei/go-restful/container.go
generated
vendored
|
@ -6,6 +6,7 @@ package restful
|
|||
|
||||
import (
|
||||
"bytes"
|
||||
"errors"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"os"
|
||||
|
@ -83,18 +84,42 @@ func (c *Container) EnableContentEncoding(enabled bool) {
|
|||
c.contentEncodingEnabled = enabled
|
||||
}
|
||||
|
||||
// Add a WebService to the Container. It will detect duplicate root paths and panic in that case.
|
||||
// Add a WebService to the Container. It will detect duplicate root paths and exit in that case.
|
||||
func (c *Container) Add(service *WebService) *Container {
|
||||
c.webServicesLock.Lock()
|
||||
defer c.webServicesLock.Unlock()
|
||||
// If registered on root then no additional specific mapping is needed
|
||||
|
||||
// if rootPath was not set then lazy initialize it
|
||||
if len(service.rootPath) == 0 {
|
||||
service.Path("/")
|
||||
}
|
||||
|
||||
// cannot have duplicate root paths
|
||||
for _, each := range c.webServices {
|
||||
if each.RootPath() == service.RootPath() {
|
||||
log.Printf("[restful] WebService with duplicate root path detected:['%v']", each)
|
||||
os.Exit(1)
|
||||
}
|
||||
}
|
||||
|
||||
// If not registered on root then add specific mapping
|
||||
if !c.isRegisteredOnRoot {
|
||||
pattern := c.fixedPrefixPath(service.RootPath())
|
||||
c.isRegisteredOnRoot = c.addHandler(service, c.ServeMux)
|
||||
}
|
||||
c.webServices = append(c.webServices, service)
|
||||
return c
|
||||
}
|
||||
|
||||
// addHandler may set a new HandleFunc for the serveMux
|
||||
// this function must run inside the critical region protected by the webServicesLock.
|
||||
// returns true if the function was registered on root ("/")
|
||||
func (c *Container) addHandler(service *WebService, serveMux *http.ServeMux) bool {
|
||||
pattern := fixedPrefixPath(service.RootPath())
|
||||
// check if root path registration is needed
|
||||
if "/" == pattern || "" == pattern {
|
||||
c.ServeMux.HandleFunc("/", c.dispatch)
|
||||
c.isRegisteredOnRoot = true
|
||||
} else {
|
||||
serveMux.HandleFunc("/", c.dispatch)
|
||||
return true
|
||||
}
|
||||
// detect if registration already exists
|
||||
alreadyMapped := false
|
||||
for _, each := range c.webServices {
|
||||
|
@ -104,38 +129,36 @@ func (c *Container) Add(service *WebService) *Container {
|
|||
}
|
||||
}
|
||||
if !alreadyMapped {
|
||||
c.ServeMux.HandleFunc(pattern, c.dispatch)
|
||||
serveMux.HandleFunc(pattern, c.dispatch)
|
||||
if !strings.HasSuffix(pattern, "/") {
|
||||
c.ServeMux.HandleFunc(pattern+"/", c.dispatch)
|
||||
serveMux.HandleFunc(pattern+"/", c.dispatch)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
// cannot have duplicate root paths
|
||||
for _, each := range c.webServices {
|
||||
if each.RootPath() == service.RootPath() {
|
||||
log.Printf("[restful] WebService with duplicate root path detected:['%v']", each)
|
||||
os.Exit(1)
|
||||
}
|
||||
}
|
||||
// if rootPath was not set then lazy initialize it
|
||||
if len(service.rootPath) == 0 {
|
||||
service.Path("/")
|
||||
}
|
||||
c.webServices = append(c.webServices, service)
|
||||
return c
|
||||
return false
|
||||
}
|
||||
|
||||
func (c *Container) Remove(ws *WebService) error {
|
||||
if c.ServeMux == http.DefaultServeMux {
|
||||
errMsg := fmt.Sprintf("[restful] cannot remove a WebService from a Container using the DefaultServeMux: ['%v']", ws)
|
||||
log.Printf(errMsg)
|
||||
return errors.New(errMsg)
|
||||
}
|
||||
c.webServicesLock.Lock()
|
||||
defer c.webServicesLock.Unlock()
|
||||
// build a new ServeMux and re-register all WebServices
|
||||
newServeMux := http.NewServeMux()
|
||||
newServices := []*WebService{}
|
||||
for ix := range c.webServices {
|
||||
if c.webServices[ix].rootPath != ws.rootPath {
|
||||
newServices = append(newServices, c.webServices[ix])
|
||||
newIsRegisteredOnRoot := false
|
||||
for _, each := range c.webServices {
|
||||
if each.rootPath != ws.rootPath {
|
||||
// If not registered on root then add specific mapping
|
||||
if !newIsRegisteredOnRoot {
|
||||
newIsRegisteredOnRoot = c.addHandler(each, newServeMux)
|
||||
}
|
||||
newServices = append(newServices, each)
|
||||
}
|
||||
}
|
||||
c.webServices = newServices
|
||||
c.webServices, c.ServeMux, c.isRegisteredOnRoot = newServices, newServeMux, newIsRegisteredOnRoot
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -251,7 +274,7 @@ func (c *Container) dispatch(httpWriter http.ResponseWriter, httpRequest *http.R
|
|||
}
|
||||
|
||||
// fixedPrefixPath returns the fixed part of the partspec ; it may include template vars {}
|
||||
func (c Container) fixedPrefixPath(pathspec string) string {
|
||||
func fixedPrefixPath(pathspec string) string {
|
||||
varBegin := strings.Index(pathspec, "{")
|
||||
if -1 == varBegin {
|
||||
return pathspec
|
||||
|
|
54
vendor/github.com/emicklei/go-restful/cors_filter.go
generated
vendored
54
vendor/github.com/emicklei/go-restful/cors_filter.go
generated
vendored
|
@ -5,6 +5,7 @@ package restful
|
|||
// that can be found in the LICENSE file.
|
||||
|
||||
import (
|
||||
"regexp"
|
||||
"strconv"
|
||||
"strings"
|
||||
)
|
||||
|
@ -19,11 +20,13 @@ import (
|
|||
type CrossOriginResourceSharing struct {
|
||||
ExposeHeaders []string // list of Header names
|
||||
AllowedHeaders []string // list of Header names
|
||||
AllowedDomains []string // list of allowed values for Http Origin. If empty all are allowed.
|
||||
AllowedDomains []string // list of allowed values for Http Origin. An allowed value can be a regular expression to support subdomain matching. If empty all are allowed.
|
||||
AllowedMethods []string
|
||||
MaxAge int // number of seconds before requiring new Options request
|
||||
CookiesAllowed bool
|
||||
Container *Container
|
||||
|
||||
allowedOriginPatterns []*regexp.Regexp // internal field for origin regexp check.
|
||||
}
|
||||
|
||||
// Filter is a filter function that implements the CORS flow as documented on http://enable-cors.org/server.html
|
||||
|
@ -37,22 +40,13 @@ func (c CrossOriginResourceSharing) Filter(req *Request, resp *Response, chain *
|
|||
chain.ProcessFilter(req, resp)
|
||||
return
|
||||
}
|
||||
if len(c.AllowedDomains) > 0 { // if provided then origin must be included
|
||||
included := false
|
||||
for _, each := range c.AllowedDomains {
|
||||
if each == origin {
|
||||
included = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if !included {
|
||||
if !c.isOriginAllowed(origin) { // check whether this origin is allowed
|
||||
if trace {
|
||||
traceLogger.Printf("HTTP Origin:%s is not part of %v", origin, c.AllowedDomains)
|
||||
traceLogger.Printf("HTTP Origin:%s is not part of %v, neither matches any part of %v", origin, c.AllowedDomains, c.allowedOriginPatterns)
|
||||
}
|
||||
chain.ProcessFilter(req, resp)
|
||||
return
|
||||
}
|
||||
}
|
||||
if req.Request.Method != "OPTIONS" {
|
||||
c.doActualRequest(req, resp)
|
||||
chain.ProcessFilter(req, resp)
|
||||
|
@ -128,13 +122,32 @@ func (c CrossOriginResourceSharing) isOriginAllowed(origin string) bool {
|
|||
if len(c.AllowedDomains) == 0 {
|
||||
return true
|
||||
}
|
||||
|
||||
allowed := false
|
||||
for _, each := range c.AllowedDomains {
|
||||
if each == origin {
|
||||
for _, domain := range c.AllowedDomains {
|
||||
if domain == origin {
|
||||
allowed = true
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if !allowed {
|
||||
if len(c.allowedOriginPatterns) == 0 {
|
||||
// compile allowed domains to allowed origin patterns
|
||||
allowedOriginRegexps, err := compileRegexps(c.AllowedDomains)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
c.allowedOriginPatterns = allowedOriginRegexps
|
||||
}
|
||||
|
||||
for _, pattern := range c.allowedOriginPatterns {
|
||||
if allowed = pattern.MatchString(origin); allowed {
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return allowed
|
||||
}
|
||||
|
||||
|
@ -174,3 +187,16 @@ func (c CrossOriginResourceSharing) isValidAccessControlRequestHeader(header str
|
|||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// Take a list of strings and compile them into a list of regular expressions.
|
||||
func compileRegexps(regexpStrings []string) ([]*regexp.Regexp, error) {
|
||||
regexps := []*regexp.Regexp{}
|
||||
for _, regexpStr := range regexpStrings {
|
||||
r, err := regexp.Compile(regexpStr)
|
||||
if err != nil {
|
||||
return regexps, err
|
||||
}
|
||||
regexps = append(regexps, r)
|
||||
}
|
||||
return regexps, nil
|
||||
}
|
||||
|
|
6
vendor/github.com/emicklei/go-restful/web_service.go
generated
vendored
6
vendor/github.com/emicklei/go-restful/web_service.go
generated
vendored
|
@ -36,9 +36,6 @@ func (w *WebService) SetDynamicRoutes(enable bool) {
|
|||
|
||||
// compilePathExpression ensures that the path is compiled into a RegEx for those routers that need it.
|
||||
func (w *WebService) compilePathExpression() {
|
||||
if len(w.rootPath) == 0 {
|
||||
w.Path("/") // lazy initialize path
|
||||
}
|
||||
compiled, err := newPathExpression(w.rootPath)
|
||||
if err != nil {
|
||||
log.Printf("[restful] invalid path:%s because:%v", w.rootPath, err)
|
||||
|
@ -60,6 +57,9 @@ func (w WebService) Version() string { return w.apiVersion }
|
|||
// All Routes will be relative to this path.
|
||||
func (w *WebService) Path(root string) *WebService {
|
||||
w.rootPath = root
|
||||
if len(w.rootPath) == 0 {
|
||||
w.rootPath = "/"
|
||||
}
|
||||
w.compilePathExpression()
|
||||
return w
|
||||
}
|
||||
|
|
6
vendor/github.com/evanphx/json-patch/README.md
generated
vendored
6
vendor/github.com/evanphx/json-patch/README.md
generated
vendored
|
@ -1,13 +1,13 @@
|
|||
## JSON-Patch
|
||||
|
||||
Provides the abiilty to modify and test a JSON according to a
|
||||
[RFC6902 JSON patch](http://tools.ietf.org/html/rfc6902) and [RFC7386 JSON Merge Patch](https://tools.ietf.org/html/rfc7386).
|
||||
Provides the ability to modify and test a JSON according to a
|
||||
[RFC6902 JSON patch](http://tools.ietf.org/html/rfc6902) and [RFC7396 JSON Merge Patch](https://tools.ietf.org/html/rfc7396).
|
||||
|
||||
*Version*: **1.0**
|
||||
|
||||
[](http://godoc.org/github.com/evanphx/json-patch)
|
||||
|
||||
[](https://travis-ci.org/evanphx/json-patch)
|
||||
[](https://travis-ci.org/evanphx/json-patch)
|
||||
|
||||
### API Usage
|
||||
|
||||
|
|
40
vendor/github.com/evanphx/json-patch/merge.go
generated
vendored
40
vendor/github.com/evanphx/json-patch/merge.go
generated
vendored
|
@ -4,6 +4,7 @@ import (
|
|||
"encoding/json"
|
||||
"fmt"
|
||||
"reflect"
|
||||
"strings"
|
||||
)
|
||||
|
||||
func merge(cur, patch *lazyNode) *lazyNode {
|
||||
|
@ -27,6 +28,7 @@ func merge(cur, patch *lazyNode) *lazyNode {
|
|||
|
||||
func mergeDocs(doc, patch *partialDoc) {
|
||||
for k, v := range *patch {
|
||||
k := decodePatchKey(k)
|
||||
if v == nil {
|
||||
delete(*doc, k)
|
||||
} else {
|
||||
|
@ -69,7 +71,7 @@ func pruneDocNulls(doc *partialDoc) *partialDoc {
|
|||
}
|
||||
|
||||
func pruneAryNulls(ary *partialArray) *partialArray {
|
||||
var newAry []*lazyNode
|
||||
newAry := []*lazyNode{}
|
||||
|
||||
for _, v := range *ary {
|
||||
if v != nil {
|
||||
|
@ -218,6 +220,9 @@ func matchesValue(av, bv interface{}) bool {
|
|||
}
|
||||
}
|
||||
return true
|
||||
case []interface{}:
|
||||
bt := bv.([]interface{})
|
||||
return matchesArray(at, bt)
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
@ -226,15 +231,16 @@ func matchesValue(av, bv interface{}) bool {
|
|||
func getDiff(a, b map[string]interface{}) (map[string]interface{}, error) {
|
||||
into := map[string]interface{}{}
|
||||
for key, bv := range b {
|
||||
escapedKey := encodePatchKey(key)
|
||||
av, ok := a[key]
|
||||
// value was added
|
||||
if !ok {
|
||||
into[key] = bv
|
||||
into[escapedKey] = bv
|
||||
continue
|
||||
}
|
||||
// If types have changed, replace completely
|
||||
if reflect.TypeOf(av) != reflect.TypeOf(bv) {
|
||||
into[key] = bv
|
||||
into[escapedKey] = bv
|
||||
continue
|
||||
}
|
||||
// Types are the same, compare values
|
||||
|
@ -247,23 +253,23 @@ func getDiff(a, b map[string]interface{}) (map[string]interface{}, error) {
|
|||
return nil, err
|
||||
}
|
||||
if len(dst) > 0 {
|
||||
into[key] = dst
|
||||
into[escapedKey] = dst
|
||||
}
|
||||
case string, float64, bool:
|
||||
if !matchesValue(av, bv) {
|
||||
into[key] = bv
|
||||
into[escapedKey] = bv
|
||||
}
|
||||
case []interface{}:
|
||||
bt := bv.([]interface{})
|
||||
if !matchesArray(at, bt) {
|
||||
into[key] = bv
|
||||
into[escapedKey] = bv
|
||||
}
|
||||
case nil:
|
||||
switch bv.(type) {
|
||||
case nil:
|
||||
// Both nil, fine.
|
||||
default:
|
||||
into[key] = bv
|
||||
into[escapedKey] = bv
|
||||
}
|
||||
default:
|
||||
panic(fmt.Sprintf("Unknown type:%T in key %s", av, key))
|
||||
|
@ -278,3 +284,23 @@ func getDiff(a, b map[string]interface{}) (map[string]interface{}, error) {
|
|||
}
|
||||
return into, nil
|
||||
}
|
||||
|
||||
// From http://tools.ietf.org/html/rfc6901#section-4 :
|
||||
//
|
||||
// Evaluation of each reference token begins by decoding any escaped
|
||||
// character sequence. This is performed by first transforming any
|
||||
// occurrence of the sequence '~1' to '/', and then transforming any
|
||||
// occurrence of the sequence '~0' to '~'.
|
||||
|
||||
var (
|
||||
rfc6901Encoder = strings.NewReplacer("~", "~0", "/", "~1")
|
||||
rfc6901Decoder = strings.NewReplacer("~1", "/", "~0", "~")
|
||||
)
|
||||
|
||||
func decodePatchKey(k string) string {
|
||||
return rfc6901Decoder.Replace(k)
|
||||
}
|
||||
|
||||
func encodePatchKey(k string) string {
|
||||
return rfc6901Encoder.Replace(k)
|
||||
}
|
||||
|
|
111
vendor/github.com/evanphx/json-patch/patch.go
generated
vendored
111
vendor/github.com/evanphx/json-patch/patch.go
generated
vendored
|
@ -32,6 +32,7 @@ type partialArray []*lazyNode
|
|||
type container interface {
|
||||
get(key string) (*lazyNode, error)
|
||||
set(key string, val *lazyNode) error
|
||||
add(key string, val *lazyNode) error
|
||||
remove(key string) error
|
||||
}
|
||||
|
||||
|
@ -42,7 +43,7 @@ func newLazyNode(raw *json.RawMessage) *lazyNode {
|
|||
func (n *lazyNode) MarshalJSON() ([]byte, error) {
|
||||
switch n.which {
|
||||
case eRaw:
|
||||
return *n.raw, nil
|
||||
return json.Marshal(n.raw)
|
||||
case eDoc:
|
||||
return json.Marshal(n.doc)
|
||||
case eAry:
|
||||
|
@ -269,7 +270,7 @@ func findObject(pd *partialDoc, path string) (container, string) {
|
|||
|
||||
for _, part := range parts {
|
||||
|
||||
next, ok := doc.get(part)
|
||||
next, ok := doc.get(decodePatchKey(part))
|
||||
|
||||
if next == nil || ok != nil {
|
||||
return nil, ""
|
||||
|
@ -290,7 +291,7 @@ func findObject(pd *partialDoc, path string) (container, string) {
|
|||
}
|
||||
}
|
||||
|
||||
return doc, key
|
||||
return doc, decodePatchKey(key)
|
||||
}
|
||||
|
||||
func (d *partialDoc) set(key string, val *lazyNode) error {
|
||||
|
@ -298,11 +299,21 @@ func (d *partialDoc) set(key string, val *lazyNode) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
func (d *partialDoc) add(key string, val *lazyNode) error {
|
||||
(*d)[key] = val
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *partialDoc) get(key string) (*lazyNode, error) {
|
||||
return (*d)[key], nil
|
||||
}
|
||||
|
||||
func (d *partialDoc) remove(key string) error {
|
||||
_, ok := (*d)[key]
|
||||
if !ok {
|
||||
return fmt.Errorf("Unable to remove nonexistant key: %s", key)
|
||||
}
|
||||
|
||||
delete(*d, key)
|
||||
return nil
|
||||
}
|
||||
|
@ -314,7 +325,38 @@ func (d *partialArray) set(key string, val *lazyNode) error {
|
|||
}
|
||||
|
||||
idx, err := strconv.Atoi(key)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
sz := len(*d)
|
||||
if idx+1 > sz {
|
||||
sz = idx + 1
|
||||
}
|
||||
|
||||
ary := make([]*lazyNode, sz)
|
||||
|
||||
cur := *d
|
||||
|
||||
copy(ary, cur)
|
||||
|
||||
if idx >= len(ary) {
|
||||
fmt.Printf("huh?: %#v[%d] %s, %s\n", ary, idx)
|
||||
}
|
||||
|
||||
ary[idx] = val
|
||||
|
||||
*d = ary
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *partialArray) add(key string, val *lazyNode) error {
|
||||
if key == "-" {
|
||||
*d = append(*d, val)
|
||||
return nil
|
||||
}
|
||||
|
||||
idx, err := strconv.Atoi(key)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -338,18 +380,25 @@ func (d *partialArray) get(key string) (*lazyNode, error) {
|
|||
return nil, err
|
||||
}
|
||||
|
||||
if idx >= len(*d) {
|
||||
return nil, fmt.Errorf("Unable to access invalid index: %d", idx)
|
||||
}
|
||||
|
||||
return (*d)[idx], nil
|
||||
}
|
||||
|
||||
func (d *partialArray) remove(key string) error {
|
||||
idx, err := strconv.Atoi(key)
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
cur := *d
|
||||
|
||||
if idx >= len(cur) {
|
||||
return fmt.Errorf("Unable to remove invalid index: %d", idx)
|
||||
}
|
||||
|
||||
ary := make([]*lazyNode, len(cur)-1)
|
||||
|
||||
copy(ary[0:idx], cur[0:idx])
|
||||
|
@ -366,12 +415,10 @@ func (p Patch) add(doc *partialDoc, op operation) error {
|
|||
con, key := findObject(doc, path)
|
||||
|
||||
if con == nil {
|
||||
return fmt.Errorf("Missing container: %s", path)
|
||||
return fmt.Errorf("jsonpatch add operation does not apply: doc is missing path: %s", path)
|
||||
}
|
||||
|
||||
con.set(key, op.value())
|
||||
|
||||
return nil
|
||||
return con.add(key, op.value())
|
||||
}
|
||||
|
||||
func (p Patch) remove(doc *partialDoc, op operation) error {
|
||||
|
@ -379,6 +426,10 @@ func (p Patch) remove(doc *partialDoc, op operation) error {
|
|||
|
||||
con, key := findObject(doc, path)
|
||||
|
||||
if con == nil {
|
||||
return fmt.Errorf("jsonpatch remove operation does not apply: doc is missing path: %s", path)
|
||||
}
|
||||
|
||||
return con.remove(key)
|
||||
}
|
||||
|
||||
|
@ -387,9 +438,11 @@ func (p Patch) replace(doc *partialDoc, op operation) error {
|
|||
|
||||
con, key := findObject(doc, path)
|
||||
|
||||
con.set(key, op.value())
|
||||
if con == nil {
|
||||
return fmt.Errorf("jsonpatch replace operation does not apply: doc is missing path: %s", path)
|
||||
}
|
||||
|
||||
return nil
|
||||
return con.set(key, op.value())
|
||||
}
|
||||
|
||||
func (p Patch) move(doc *partialDoc, op operation) error {
|
||||
|
@ -397,21 +450,29 @@ func (p Patch) move(doc *partialDoc, op operation) error {
|
|||
|
||||
con, key := findObject(doc, from)
|
||||
|
||||
val, err := con.get(key)
|
||||
if con == nil {
|
||||
return fmt.Errorf("jsonpatch move operation does not apply: doc is missing from path: %s", from)
|
||||
}
|
||||
|
||||
val, err := con.get(key)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
con.remove(key)
|
||||
err = con.remove(key)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
path := op.path()
|
||||
|
||||
con, key = findObject(doc, path)
|
||||
|
||||
con.set(key, val)
|
||||
if con == nil {
|
||||
return fmt.Errorf("jsonpatch move operation does not apply: doc is missing destination path: %s", path)
|
||||
}
|
||||
|
||||
return nil
|
||||
return con.set(key, val)
|
||||
}
|
||||
|
||||
func (p Patch) test(doc *partialDoc, op operation) error {
|
||||
|
@ -419,12 +480,24 @@ func (p Patch) test(doc *partialDoc, op operation) error {
|
|||
|
||||
con, key := findObject(doc, path)
|
||||
|
||||
if con == nil {
|
||||
return fmt.Errorf("jsonpatch test operation does not apply: is missing path: %s", path)
|
||||
}
|
||||
|
||||
val, err := con.get(key)
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if val == nil {
|
||||
if op.value().raw == nil {
|
||||
return nil
|
||||
} else {
|
||||
return fmt.Errorf("Testing value %s failed", path)
|
||||
}
|
||||
}
|
||||
|
||||
if val.equal(op.value()) {
|
||||
return nil
|
||||
}
|
||||
|
@ -461,6 +534,12 @@ func DecodePatch(buf []byte) (Patch, error) {
|
|||
// Apply mutates a JSON document according to the patch, and returns the new
|
||||
// document.
|
||||
func (p Patch) Apply(doc []byte) ([]byte, error) {
|
||||
return p.ApplyIndent(doc, "")
|
||||
}
|
||||
|
||||
// ApplyIndent mutates a JSON document according to the patch, and returns the new
|
||||
// document indented.
|
||||
func (p Patch) ApplyIndent(doc []byte, indent string) ([]byte, error) {
|
||||
pd := &partialDoc{}
|
||||
|
||||
err := json.Unmarshal(doc, pd)
|
||||
|
@ -492,5 +571,9 @@ func (p Patch) Apply(doc []byte) ([]byte, error) {
|
|||
}
|
||||
}
|
||||
|
||||
if indent != "" {
|
||||
return json.MarshalIndent(pd, "", indent)
|
||||
}
|
||||
|
||||
return json.Marshal(pd)
|
||||
}
|
||||
|
|
37
vendor/github.com/google/cadvisor/info/v1/docker.go
generated
vendored
Normal file
37
vendor/github.com/google/cadvisor/info/v1/docker.go
generated
vendored
Normal file
|
@ -0,0 +1,37 @@
|
|||
// Copyright 2016 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// Types used for docker containers.
|
||||
package v1
|
||||
|
||||
type DockerStatus struct {
|
||||
Version string `json:"version"`
|
||||
KernelVersion string `json:"kernel_version"`
|
||||
OS string `json:"os"`
|
||||
Hostname string `json:"hostname"`
|
||||
RootDir string `json:"root_dir"`
|
||||
Driver string `json:"driver"`
|
||||
DriverStatus map[string]string `json:"driver_status"`
|
||||
ExecDriver string `json:"exec_driver"`
|
||||
NumImages int `json:"num_images"`
|
||||
NumContainers int `json:"num_containers"`
|
||||
}
|
||||
|
||||
type DockerImage struct {
|
||||
ID string `json:"id"`
|
||||
RepoTags []string `json:"repo_tags"` // repository name and tags.
|
||||
Created int64 `json:"created"` // unix time since creation.
|
||||
VirtualSize int64 `json:"virtual_size"`
|
||||
Size int64 `json:"size"`
|
||||
}
|
25
vendor/github.com/jonboulle/clockwork/.gitignore
generated
vendored
Normal file
25
vendor/github.com/jonboulle/clockwork/.gitignore
generated
vendored
Normal file
|
@ -0,0 +1,25 @@
|
|||
# Compiled Object files, Static and Dynamic libs (Shared Objects)
|
||||
*.o
|
||||
*.a
|
||||
*.so
|
||||
|
||||
# Folders
|
||||
_obj
|
||||
_test
|
||||
|
||||
# Architecture specific extensions/prefixes
|
||||
*.[568vq]
|
||||
[568vq].out
|
||||
|
||||
*.cgo1.go
|
||||
*.cgo2.c
|
||||
_cgo_defun.c
|
||||
_cgo_gotypes.go
|
||||
_cgo_export.*
|
||||
|
||||
_testmain.go
|
||||
|
||||
*.exe
|
||||
*.test
|
||||
|
||||
*.swp
|
3
vendor/github.com/jonboulle/clockwork/.travis.yml
generated
vendored
Normal file
3
vendor/github.com/jonboulle/clockwork/.travis.yml
generated
vendored
Normal file
|
@ -0,0 +1,3 @@
|
|||
language: go
|
||||
go:
|
||||
- 1.3
|
0
vendor/k8s.io/heapster/LICENSE → vendor/github.com/jonboulle/clockwork/LICENSE
generated
vendored
0
vendor/k8s.io/heapster/LICENSE → vendor/github.com/jonboulle/clockwork/LICENSE
generated
vendored
61
vendor/github.com/jonboulle/clockwork/README.md
generated
vendored
Normal file
61
vendor/github.com/jonboulle/clockwork/README.md
generated
vendored
Normal file
|
@ -0,0 +1,61 @@
|
|||
clockwork
|
||||
=========
|
||||
|
||||
[](https://travis-ci.org/jonboulle/clockwork)
|
||||
[](http://godoc.org/github.com/jonboulle/clockwork)
|
||||
|
||||
a simple fake clock for golang
|
||||
|
||||
# Usage
|
||||
|
||||
Replace uses of the `time` package with the `clockwork.Clock` interface instead.
|
||||
|
||||
For example, instead of using `time.Sleep` directly:
|
||||
|
||||
```
|
||||
func my_func() {
|
||||
time.Sleep(3 * time.Second)
|
||||
do_something()
|
||||
}
|
||||
```
|
||||
|
||||
inject a clock and use its `Sleep` method instead:
|
||||
|
||||
```
|
||||
func my_func(clock clockwork.Clock) {
|
||||
clock.Sleep(3 * time.Second)
|
||||
do_something()
|
||||
}
|
||||
```
|
||||
|
||||
Now you can easily test `my_func` with a `FakeClock`:
|
||||
|
||||
```
|
||||
func TestMyFunc(t *testing.T) {
|
||||
c := clockwork.NewFakeClock()
|
||||
|
||||
// Start our sleepy function
|
||||
my_func(c)
|
||||
|
||||
// Ensure we wait until my_func is sleeping
|
||||
c.BlockUntil(1)
|
||||
|
||||
assert_state()
|
||||
|
||||
// Advance the FakeClock forward in time
|
||||
c.Advance(3)
|
||||
|
||||
assert_state()
|
||||
}
|
||||
```
|
||||
|
||||
and in production builds, simply inject the real clock instead:
|
||||
```
|
||||
my_func(clockwork.NewRealClock())
|
||||
```
|
||||
|
||||
See [example_test.go](example_test.go) for a full example.
|
||||
|
||||
# Credits
|
||||
|
||||
clockwork is inspired by @wickman's [threaded fake clock](https://gist.github.com/wickman/3840816), and the [Golang playground](http://blog.golang.org/playground#Faking time)
|
164
vendor/github.com/jonboulle/clockwork/clockwork.go
generated
vendored
Normal file
164
vendor/github.com/jonboulle/clockwork/clockwork.go
generated
vendored
Normal file
|
@ -0,0 +1,164 @@
|
|||
package clockwork
|
||||
|
||||
import (
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
// Clock provides an interface that packages can use instead of directly
|
||||
// using the time module, so that chronology-related behavior can be tested
|
||||
type Clock interface {
|
||||
After(d time.Duration) <-chan time.Time
|
||||
Sleep(d time.Duration)
|
||||
Now() time.Time
|
||||
}
|
||||
|
||||
// FakeClock provides an interface for a clock which can be
|
||||
// manually advanced through time
|
||||
type FakeClock interface {
|
||||
Clock
|
||||
// Advance advances the FakeClock to a new point in time, ensuring any existing
|
||||
// sleepers are notified appropriately before returning
|
||||
Advance(d time.Duration)
|
||||
// BlockUntil will block until the FakeClock has the given number of
|
||||
// sleepers (callers of Sleep or After)
|
||||
BlockUntil(n int)
|
||||
}
|
||||
|
||||
// NewRealClock returns a Clock which simply delegates calls to the actual time
|
||||
// package; it should be used by packages in production.
|
||||
func NewRealClock() Clock {
|
||||
return &realClock{}
|
||||
}
|
||||
|
||||
// NewFakeClock returns a FakeClock implementation which can be
|
||||
// manually advanced through time for testing.
|
||||
func NewFakeClock() FakeClock {
|
||||
return &fakeClock{
|
||||
l: sync.RWMutex{},
|
||||
|
||||
// use a fixture that does not fulfill Time.IsZero()
|
||||
time: time.Date(1900, time.January, 1, 0, 0, 0, 0, time.UTC),
|
||||
}
|
||||
}
|
||||
|
||||
type realClock struct{}
|
||||
|
||||
func (rc *realClock) After(d time.Duration) <-chan time.Time {
|
||||
return time.After(d)
|
||||
}
|
||||
|
||||
func (rc *realClock) Sleep(d time.Duration) {
|
||||
time.Sleep(d)
|
||||
}
|
||||
|
||||
func (rc *realClock) Now() time.Time {
|
||||
return time.Now()
|
||||
}
|
||||
|
||||
type fakeClock struct {
|
||||
sleepers []*sleeper
|
||||
blockers []*blocker
|
||||
time time.Time
|
||||
|
||||
l sync.RWMutex
|
||||
}
|
||||
|
||||
// sleeper represents a caller of After or Sleep
|
||||
type sleeper struct {
|
||||
until time.Time
|
||||
done chan time.Time
|
||||
}
|
||||
|
||||
// blocker represents a caller of BlockUntil
|
||||
type blocker struct {
|
||||
count int
|
||||
ch chan struct{}
|
||||
}
|
||||
|
||||
// After mimics time.After; it waits for the given duration to elapse on the
|
||||
// fakeClock, then sends the current time on the returned channel.
|
||||
func (fc *fakeClock) After(d time.Duration) <-chan time.Time {
|
||||
fc.l.Lock()
|
||||
defer fc.l.Unlock()
|
||||
now := fc.time
|
||||
done := make(chan time.Time, 1)
|
||||
if d.Nanoseconds() == 0 {
|
||||
// special case - trigger immediately
|
||||
done <- now
|
||||
} else {
|
||||
// otherwise, add to the set of sleepers
|
||||
s := &sleeper{
|
||||
until: now.Add(d),
|
||||
done: done,
|
||||
}
|
||||
fc.sleepers = append(fc.sleepers, s)
|
||||
// and notify any blockers
|
||||
fc.blockers = notifyBlockers(fc.blockers, len(fc.sleepers))
|
||||
}
|
||||
return done
|
||||
}
|
||||
|
||||
// notifyBlockers notifies all the blockers waiting until the
|
||||
// given number of sleepers are waiting on the fakeClock. It
|
||||
// returns an updated slice of blockers (i.e. those still waiting)
|
||||
func notifyBlockers(blockers []*blocker, count int) (newBlockers []*blocker) {
|
||||
for _, b := range blockers {
|
||||
if b.count == count {
|
||||
close(b.ch)
|
||||
} else {
|
||||
newBlockers = append(newBlockers, b)
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// Sleep blocks until the given duration has passed on the fakeClock
|
||||
func (fc *fakeClock) Sleep(d time.Duration) {
|
||||
<-fc.After(d)
|
||||
}
|
||||
|
||||
// Time returns the current time of the fakeClock
|
||||
func (fc *fakeClock) Now() time.Time {
|
||||
fc.l.Lock()
|
||||
defer fc.l.Unlock()
|
||||
return fc.time
|
||||
}
|
||||
|
||||
// Advance advances fakeClock to a new point in time, ensuring channels from any
|
||||
// previous invocations of After are notified appropriately before returning
|
||||
func (fc *fakeClock) Advance(d time.Duration) {
|
||||
fc.l.Lock()
|
||||
defer fc.l.Unlock()
|
||||
end := fc.time.Add(d)
|
||||
var newSleepers []*sleeper
|
||||
for _, s := range fc.sleepers {
|
||||
if end.Sub(s.until) >= 0 {
|
||||
s.done <- end
|
||||
} else {
|
||||
newSleepers = append(newSleepers, s)
|
||||
}
|
||||
}
|
||||
fc.sleepers = newSleepers
|
||||
fc.blockers = notifyBlockers(fc.blockers, len(fc.sleepers))
|
||||
fc.time = end
|
||||
}
|
||||
|
||||
// BlockUntil will block until the fakeClock has the given number of sleepers
|
||||
// (callers of Sleep or After)
|
||||
func (fc *fakeClock) BlockUntil(n int) {
|
||||
fc.l.Lock()
|
||||
// Fast path: current number of sleepers is what we're looking for
|
||||
if len(fc.sleepers) == n {
|
||||
fc.l.Unlock()
|
||||
return
|
||||
}
|
||||
// Otherwise, set up a new blocker
|
||||
b := &blocker{
|
||||
count: n,
|
||||
ch: make(chan struct{}),
|
||||
}
|
||||
fc.blockers = append(fc.blockers, b)
|
||||
fc.l.Unlock()
|
||||
<-b.ch
|
||||
}
|
0
vendor/github.com/pborman/uuid/dce.go
generated
vendored
Normal file → Executable file
0
vendor/github.com/pborman/uuid/dce.go
generated
vendored
Normal file → Executable file
0
vendor/github.com/pborman/uuid/doc.go
generated
vendored
Normal file → Executable file
0
vendor/github.com/pborman/uuid/doc.go
generated
vendored
Normal file → Executable file
0
vendor/github.com/pborman/uuid/node.go
generated
vendored
Normal file → Executable file
0
vendor/github.com/pborman/uuid/node.go
generated
vendored
Normal file → Executable file
0
vendor/github.com/pborman/uuid/time.go
generated
vendored
Normal file → Executable file
0
vendor/github.com/pborman/uuid/time.go
generated
vendored
Normal file → Executable file
0
vendor/github.com/pborman/uuid/uuid.go
generated
vendored
Normal file → Executable file
0
vendor/github.com/pborman/uuid/uuid.go
generated
vendored
Normal file → Executable file
201
vendor/github.com/prometheus/common/LICENSE
generated
vendored
Normal file
201
vendor/github.com/prometheus/common/LICENSE
generated
vendored
Normal file
|
@ -0,0 +1,201 @@
|
|||
Apache License
|
||||
Version 2.0, January 2004
|
||||
http://www.apache.org/licenses/
|
||||
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||
|
||||
1. Definitions.
|
||||
|
||||
"License" shall mean the terms and conditions for use, reproduction,
|
||||
and distribution as defined by Sections 1 through 9 of this document.
|
||||
|
||||
"Licensor" shall mean the copyright owner or entity authorized by
|
||||
the copyright owner that is granting the License.
|
||||
|
||||
"Legal Entity" shall mean the union of the acting entity and all
|
||||
other entities that control, are controlled by, or are under common
|
||||
control with that entity. For the purposes of this definition,
|
||||
"control" means (i) the power, direct or indirect, to cause the
|
||||
direction or management of such entity, whether by contract or
|
||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||
|
||||
"You" (or "Your") shall mean an individual or Legal Entity
|
||||
exercising permissions granted by this License.
|
||||
|
||||
"Source" form shall mean the preferred form for making modifications,
|
||||
including but not limited to software source code, documentation
|
||||
source, and configuration files.
|
||||
|
||||
"Object" form shall mean any form resulting from mechanical
|
||||
transformation or translation of a Source form, including but
|
||||
not limited to compiled object code, generated documentation,
|
||||
and conversions to other media types.
|
||||
|
||||
"Work" shall mean the work of authorship, whether in Source or
|
||||
Object form, made available under the License, as indicated by a
|
||||
copyright notice that is included in or attached to the work
|
||||
(an example is provided in the Appendix below).
|
||||
|
||||
"Derivative Works" shall mean any work, whether in Source or Object
|
||||
form, that is based on (or derived from) the Work and for which the
|
||||
editorial revisions, annotations, elaborations, or other modifications
|
||||
represent, as a whole, an original work of authorship. For the purposes
|
||||
of this License, Derivative Works shall not include works that remain
|
||||
separable from, or merely link (or bind by name) to the interfaces of,
|
||||
the Work and Derivative Works thereof.
|
||||
|
||||
"Contribution" shall mean any work of authorship, including
|
||||
the original version of the Work and any modifications or additions
|
||||
to that Work or Derivative Works thereof, that is intentionally
|
||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||
or by an individual or Legal Entity authorized to submit on behalf of
|
||||
the copyright owner. For the purposes of this definition, "submitted"
|
||||
means any form of electronic, verbal, or written communication sent
|
||||
to the Licensor or its representatives, including but not limited to
|
||||
communication on electronic mailing lists, source code control systems,
|
||||
and issue tracking systems that are managed by, or on behalf of, the
|
||||
Licensor for the purpose of discussing and improving the Work, but
|
||||
excluding communication that is conspicuously marked or otherwise
|
||||
designated in writing by the copyright owner as "Not a Contribution."
|
||||
|
||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||
on behalf of whom a Contribution has been received by Licensor and
|
||||
subsequently incorporated within the Work.
|
||||
|
||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
copyright license to reproduce, prepare Derivative Works of,
|
||||
publicly display, publicly perform, sublicense, and distribute the
|
||||
Work and such Derivative Works in Source or Object form.
|
||||
|
||||
3. Grant of Patent License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
(except as stated in this section) patent license to make, have made,
|
||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||
where such license applies only to those patent claims licensable
|
||||
by such Contributor that are necessarily infringed by their
|
||||
Contribution(s) alone or by combination of their Contribution(s)
|
||||
with the Work to which such Contribution(s) was submitted. If You
|
||||
institute patent litigation against any entity (including a
|
||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||
or a Contribution incorporated within the Work constitutes direct
|
||||
or contributory patent infringement, then any patent licenses
|
||||
granted to You under this License for that Work shall terminate
|
||||
as of the date such litigation is filed.
|
||||
|
||||
4. Redistribution. You may reproduce and distribute copies of the
|
||||
Work or Derivative Works thereof in any medium, with or without
|
||||
modifications, and in Source or Object form, provided that You
|
||||
meet the following conditions:
|
||||
|
||||
(a) You must give any other recipients of the Work or
|
||||
Derivative Works a copy of this License; and
|
||||
|
||||
(b) You must cause any modified files to carry prominent notices
|
||||
stating that You changed the files; and
|
||||
|
||||
(c) You must retain, in the Source form of any Derivative Works
|
||||
that You distribute, all copyright, patent, trademark, and
|
||||
attribution notices from the Source form of the Work,
|
||||
excluding those notices that do not pertain to any part of
|
||||
the Derivative Works; and
|
||||
|
||||
(d) If the Work includes a "NOTICE" text file as part of its
|
||||
distribution, then any Derivative Works that You distribute must
|
||||
include a readable copy of the attribution notices contained
|
||||
within such NOTICE file, excluding those notices that do not
|
||||
pertain to any part of the Derivative Works, in at least one
|
||||
of the following places: within a NOTICE text file distributed
|
||||
as part of the Derivative Works; within the Source form or
|
||||
documentation, if provided along with the Derivative Works; or,
|
||||
within a display generated by the Derivative Works, if and
|
||||
wherever such third-party notices normally appear. The contents
|
||||
of the NOTICE file are for informational purposes only and
|
||||
do not modify the License. You may add Your own attribution
|
||||
notices within Derivative Works that You distribute, alongside
|
||||
or as an addendum to the NOTICE text from the Work, provided
|
||||
that such additional attribution notices cannot be construed
|
||||
as modifying the License.
|
||||
|
||||
You may add Your own copyright statement to Your modifications and
|
||||
may provide additional or different license terms and conditions
|
||||
for use, reproduction, or distribution of Your modifications, or
|
||||
for any such Derivative Works as a whole, provided Your use,
|
||||
reproduction, and distribution of the Work otherwise complies with
|
||||
the conditions stated in this License.
|
||||
|
||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||
any Contribution intentionally submitted for inclusion in the Work
|
||||
by You to the Licensor shall be under the terms and conditions of
|
||||
this License, without any additional terms or conditions.
|
||||
Notwithstanding the above, nothing herein shall supersede or modify
|
||||
the terms of any separate license agreement you may have executed
|
||||
with Licensor regarding such Contributions.
|
||||
|
||||
6. Trademarks. This License does not grant permission to use the trade
|
||||
names, trademarks, service marks, or product names of the Licensor,
|
||||
except as required for reasonable and customary use in describing the
|
||||
origin of the Work and reproducing the content of the NOTICE file.
|
||||
|
||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||
agreed to in writing, Licensor provides the Work (and each
|
||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
implied, including, without limitation, any warranties or conditions
|
||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||
appropriateness of using or redistributing the Work and assume any
|
||||
risks associated with Your exercise of permissions under this License.
|
||||
|
||||
8. Limitation of Liability. In no event and under no legal theory,
|
||||
whether in tort (including negligence), contract, or otherwise,
|
||||
unless required by applicable law (such as deliberate and grossly
|
||||
negligent acts) or agreed to in writing, shall any Contributor be
|
||||
liable to You for damages, including any direct, indirect, special,
|
||||
incidental, or consequential damages of any character arising as a
|
||||
result of this License or out of the use or inability to use the
|
||||
Work (including but not limited to damages for loss of goodwill,
|
||||
work stoppage, computer failure or malfunction, or any and all
|
||||
other commercial damages or losses), even if such Contributor
|
||||
has been advised of the possibility of such damages.
|
||||
|
||||
9. Accepting Warranty or Additional Liability. While redistributing
|
||||
the Work or Derivative Works thereof, You may choose to offer,
|
||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||
or other liability obligations and/or rights consistent with this
|
||||
License. However, in accepting such obligations, You may act only
|
||||
on Your own behalf and on Your sole responsibility, not on behalf
|
||||
of any other Contributor, and only if You agree to indemnify,
|
||||
defend, and hold each Contributor harmless for any liability
|
||||
incurred by, or claims asserted against, such Contributor by reason
|
||||
of your accepting any such warranty or additional liability.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
APPENDIX: How to apply the Apache License to your work.
|
||||
|
||||
To apply the Apache License to your work, attach the following
|
||||
boilerplate notice, with the fields enclosed by brackets "[]"
|
||||
replaced with your own identifying information. (Don't include
|
||||
the brackets!) The text should be enclosed in the appropriate
|
||||
comment syntax for the file format. We also recommend that a
|
||||
file or class name and description of purpose be included on the
|
||||
same "printed page" as the copyright notice for easier
|
||||
identification within third-party archives.
|
||||
|
||||
Copyright [yyyy] [name of copyright owner]
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
5
vendor/github.com/prometheus/common/NOTICE
generated
vendored
Normal file
5
vendor/github.com/prometheus/common/NOTICE
generated
vendored
Normal file
|
@ -0,0 +1,5 @@
|
|||
Common libraries shared by Prometheus Go components.
|
||||
Copyright 2015 The Prometheus Authors
|
||||
|
||||
This product includes software developed at
|
||||
SoundCloud Ltd. (http://soundcloud.com/).
|
69
vendor/github.com/prometheus/common/expfmt/decode.go
generated
vendored
69
vendor/github.com/prometheus/common/expfmt/decode.go
generated
vendored
|
@ -37,12 +37,13 @@ type DecodeOptions struct {
|
|||
}
|
||||
|
||||
// ResponseFormat extracts the correct format from a HTTP response header.
|
||||
func ResponseFormat(h http.Header) (Format, error) {
|
||||
// If no matching format can be found FormatUnknown is returned.
|
||||
func ResponseFormat(h http.Header) Format {
|
||||
ct := h.Get(hdrContentType)
|
||||
|
||||
mediatype, params, err := mime.ParseMediaType(ct)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("invalid Content-Type header %q: %s", ct, err)
|
||||
return FmtUnknown
|
||||
}
|
||||
|
||||
const (
|
||||
|
@ -52,19 +53,19 @@ func ResponseFormat(h http.Header) (Format, error) {
|
|||
|
||||
switch mediatype {
|
||||
case ProtoType:
|
||||
if p := params["proto"]; p != ProtoProtocol {
|
||||
return "", fmt.Errorf("unrecognized protocol message %s", p)
|
||||
if p, ok := params["proto"]; ok && p != ProtoProtocol {
|
||||
return FmtUnknown
|
||||
}
|
||||
if e := params["encoding"]; e != "delimited" {
|
||||
return "", fmt.Errorf("unsupported encoding %s", e)
|
||||
if e, ok := params["encoding"]; ok && e != "delimited" {
|
||||
return FmtUnknown
|
||||
}
|
||||
return FmtProtoDelim, nil
|
||||
return FmtProtoDelim
|
||||
|
||||
case textType:
|
||||
if v, ok := params["version"]; ok && v != TextVersion {
|
||||
return "", fmt.Errorf("unrecognized protocol version %s", v)
|
||||
return FmtUnknown
|
||||
}
|
||||
return FmtText, nil
|
||||
return FmtText
|
||||
|
||||
case jsonType:
|
||||
var prometheusAPIVersion string
|
||||
|
@ -76,27 +77,26 @@ func ResponseFormat(h http.Header) (Format, error) {
|
|||
}
|
||||
|
||||
switch prometheusAPIVersion {
|
||||
case "0.0.2":
|
||||
return FmtJSON2, nil
|
||||
case "0.0.2", "":
|
||||
return fmtJSON2
|
||||
default:
|
||||
return "", fmt.Errorf("unrecognized API version %s", prometheusAPIVersion)
|
||||
return FmtUnknown
|
||||
}
|
||||
}
|
||||
|
||||
return "", fmt.Errorf("unsupported media type %q, expected %q or %q", mediatype, ProtoType, textType)
|
||||
return FmtUnknown
|
||||
}
|
||||
|
||||
// NewDecoder returns a new decoder based on the HTTP header.
|
||||
func NewDecoder(r io.Reader, format Format) (Decoder, error) {
|
||||
// NewDecoder returns a new decoder based on the given input format.
|
||||
// If the input format does not imply otherwise, a text format decoder is returned.
|
||||
func NewDecoder(r io.Reader, format Format) Decoder {
|
||||
switch format {
|
||||
case FmtProtoDelim:
|
||||
return &protoDecoder{r: r}, nil
|
||||
case FmtText:
|
||||
return &textDecoder{r: r}, nil
|
||||
case FmtJSON2:
|
||||
return newJSON2Decoder(r), nil
|
||||
return &protoDecoder{r: r}
|
||||
case fmtJSON2:
|
||||
return newJSON2Decoder(r)
|
||||
}
|
||||
return nil, fmt.Errorf("unsupported decoding format %q", format)
|
||||
return &textDecoder{r: r}
|
||||
}
|
||||
|
||||
// protoDecoder implements the Decoder interface for protocol buffers.
|
||||
|
@ -107,7 +107,29 @@ type protoDecoder struct {
|
|||
// Decode implements the Decoder interface.
|
||||
func (d *protoDecoder) Decode(v *dto.MetricFamily) error {
|
||||
_, err := pbutil.ReadDelimited(d.r, v)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if !model.IsValidMetricName(model.LabelValue(v.GetName())) {
|
||||
return fmt.Errorf("invalid metric name %q", v.GetName())
|
||||
}
|
||||
for _, m := range v.GetMetric() {
|
||||
if m == nil {
|
||||
continue
|
||||
}
|
||||
for _, l := range m.GetLabel() {
|
||||
if l == nil {
|
||||
continue
|
||||
}
|
||||
if !model.LabelValue(l.GetValue()).IsValid() {
|
||||
return fmt.Errorf("invalid label value %q", l.GetValue())
|
||||
}
|
||||
if !model.LabelName(l.GetName()).IsValid() {
|
||||
return fmt.Errorf("invalid label name %q", l.GetName())
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// textDecoder implements the Decoder interface for the text protcol.
|
||||
|
@ -129,13 +151,14 @@ func (d *textDecoder) Decode(v *dto.MetricFamily) error {
|
|||
if len(fams) == 0 {
|
||||
return io.EOF
|
||||
}
|
||||
d.fams = make([]*dto.MetricFamily, 0, len(fams))
|
||||
for _, f := range fams {
|
||||
d.fams = append(d.fams, f)
|
||||
}
|
||||
}
|
||||
|
||||
*v = *d.fams[len(d.fams)-1]
|
||||
d.fams = d.fams[:len(d.fams)-1]
|
||||
*v = *d.fams[0]
|
||||
d.fams = d.fams[1:]
|
||||
|
||||
return nil
|
||||
}
|
||||
|
|
2
vendor/github.com/prometheus/common/expfmt/encode.go
generated
vendored
2
vendor/github.com/prometheus/common/expfmt/encode.go
generated
vendored
|
@ -18,9 +18,9 @@ import (
|
|||
"io"
|
||||
"net/http"
|
||||
|
||||
"bitbucket.org/ww/goautoneg"
|
||||
"github.com/golang/protobuf/proto"
|
||||
"github.com/matttproud/golang_protobuf_extensions/pbutil"
|
||||
"github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg"
|
||||
|
||||
dto "github.com/prometheus/client_model/go"
|
||||
)
|
||||
|
|
5
vendor/github.com/prometheus/common/expfmt/expfmt.go
generated
vendored
5
vendor/github.com/prometheus/common/expfmt/expfmt.go
generated
vendored
|
@ -24,11 +24,14 @@ const (
|
|||
ProtoFmt = ProtoType + "; proto=" + ProtoProtocol + ";"
|
||||
|
||||
// The Content-Type values for the different wire protocols.
|
||||
FmtUnknown Format = `<unknown>`
|
||||
FmtText Format = `text/plain; version=` + TextVersion
|
||||
FmtProtoDelim Format = ProtoFmt + ` encoding=delimited`
|
||||
FmtProtoText Format = ProtoFmt + ` encoding=text`
|
||||
FmtProtoCompact Format = ProtoFmt + ` encoding=compact-text`
|
||||
FmtJSON2 Format = `application/json; version=0.0.2`
|
||||
|
||||
// fmtJSON2 is hidden as it is deprecated.
|
||||
fmtJSON2 Format = `application/json; version=0.0.2`
|
||||
)
|
||||
|
||||
const (
|
||||
|
|
4
vendor/github.com/prometheus/common/expfmt/fuzz.go
generated
vendored
4
vendor/github.com/prometheus/common/expfmt/fuzz.go
generated
vendored
|
@ -20,8 +20,8 @@ import "bytes"
|
|||
|
||||
// Fuzz text metric parser with with github.com/dvyukov/go-fuzz:
|
||||
//
|
||||
// go-fuzz-build github.com/prometheus/client_golang/text
|
||||
// go-fuzz -bin text-fuzz.zip -workdir fuzz
|
||||
// go-fuzz-build github.com/prometheus/common/expfmt
|
||||
// go-fuzz -bin expfmt-fuzz.zip -workdir fuzz
|
||||
//
|
||||
// Further input samples should go in the folder fuzz/corpus.
|
||||
func Fuzz(in []byte) int {
|
||||
|
|
20
vendor/github.com/prometheus/common/expfmt/json_decode.go
generated
vendored
20
vendor/github.com/prometheus/common/expfmt/json_decode.go
generated
vendored
|
@ -46,7 +46,7 @@ type counter002 struct {
|
|||
Value float64 `json:"value"`
|
||||
}
|
||||
|
||||
func protoLabelSet(base, ext model.LabelSet) []*dto.LabelPair {
|
||||
func protoLabelSet(base, ext model.LabelSet) ([]*dto.LabelPair, error) {
|
||||
labels := base.Clone().Merge(ext)
|
||||
delete(labels, model.MetricNameLabel)
|
||||
|
||||
|
@ -59,6 +59,9 @@ func protoLabelSet(base, ext model.LabelSet) []*dto.LabelPair {
|
|||
pairs := make([]*dto.LabelPair, 0, len(labels))
|
||||
|
||||
for _, ln := range names {
|
||||
if !model.LabelNameRE.MatchString(ln) {
|
||||
return nil, fmt.Errorf("invalid label name %q", ln)
|
||||
}
|
||||
lv := labels[model.LabelName(ln)]
|
||||
|
||||
pairs = append(pairs, &dto.LabelPair{
|
||||
|
@ -67,7 +70,7 @@ func protoLabelSet(base, ext model.LabelSet) []*dto.LabelPair {
|
|||
})
|
||||
}
|
||||
|
||||
return pairs
|
||||
return pairs, nil
|
||||
}
|
||||
|
||||
func (d *json2Decoder) more() error {
|
||||
|
@ -102,8 +105,12 @@ func (d *json2Decoder) more() error {
|
|||
}
|
||||
|
||||
for _, ctr := range values {
|
||||
labels, err := protoLabelSet(e.BaseLabels, ctr.Labels)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
f.Metric = append(f.Metric, &dto.Metric{
|
||||
Label: protoLabelSet(e.BaseLabels, ctr.Labels),
|
||||
Label: labels,
|
||||
Untyped: &dto.Untyped{
|
||||
Value: proto.Float64(ctr.Value),
|
||||
},
|
||||
|
@ -131,8 +138,13 @@ func (d *json2Decoder) more() error {
|
|||
// this remains "percentile"
|
||||
hist.Labels["percentile"] = model.LabelValue(q)
|
||||
|
||||
labels, err := protoLabelSet(e.BaseLabels, hist.Labels)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
f.Metric = append(f.Metric, &dto.Metric{
|
||||
Label: protoLabelSet(e.BaseLabels, hist.Labels),
|
||||
Label: labels,
|
||||
Untyped: &dto.Untyped{
|
||||
Value: proto.Float64(value),
|
||||
},
|
||||
|
|
7
vendor/github.com/prometheus/common/expfmt/text_parse.go
generated
vendored
7
vendor/github.com/prometheus/common/expfmt/text_parse.go
generated
vendored
|
@ -108,6 +108,13 @@ func (p *TextParser) TextToMetricFamilies(in io.Reader) (map[string]*dto.MetricF
|
|||
delete(p.metricFamiliesByName, k)
|
||||
}
|
||||
}
|
||||
// If p.err is io.EOF now, we have run into a premature end of the input
|
||||
// stream. Turn this error into something nicer and more
|
||||
// meaningful. (io.EOF is often used as a signal for the legitimate end
|
||||
// of an input stream.)
|
||||
if p.err == io.EOF {
|
||||
p.parseError("unexpected end of input stream")
|
||||
}
|
||||
return p.metricFamiliesByName, p.err
|
||||
}
|
||||
|
||||
|
|
136
vendor/github.com/prometheus/common/model/alert.go
generated
vendored
Normal file
136
vendor/github.com/prometheus/common/model/alert.go
generated
vendored
Normal file
|
@ -0,0 +1,136 @@
|
|||
// Copyright 2013 The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package model
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"time"
|
||||
)
|
||||
|
||||
type AlertStatus string
|
||||
|
||||
const (
|
||||
AlertFiring AlertStatus = "firing"
|
||||
AlertResolved AlertStatus = "resolved"
|
||||
)
|
||||
|
||||
// Alert is a generic representation of an alert in the Prometheus eco-system.
|
||||
type Alert struct {
|
||||
// Label value pairs for purpose of aggregation, matching, and disposition
|
||||
// dispatching. This must minimally include an "alertname" label.
|
||||
Labels LabelSet `json:"labels"`
|
||||
|
||||
// Extra key/value information which does not define alert identity.
|
||||
Annotations LabelSet `json:"annotations"`
|
||||
|
||||
// The known time range for this alert. Both ends are optional.
|
||||
StartsAt time.Time `json:"startsAt,omitempty"`
|
||||
EndsAt time.Time `json:"endsAt,omitempty"`
|
||||
GeneratorURL string `json:"generatorURL"`
|
||||
}
|
||||
|
||||
// Name returns the name of the alert. It is equivalent to the "alertname" label.
|
||||
func (a *Alert) Name() string {
|
||||
return string(a.Labels[AlertNameLabel])
|
||||
}
|
||||
|
||||
// Fingerprint returns a unique hash for the alert. It is equivalent to
|
||||
// the fingerprint of the alert's label set.
|
||||
func (a *Alert) Fingerprint() Fingerprint {
|
||||
return a.Labels.Fingerprint()
|
||||
}
|
||||
|
||||
func (a *Alert) String() string {
|
||||
s := fmt.Sprintf("%s[%s]", a.Name(), a.Fingerprint().String()[:7])
|
||||
if a.Resolved() {
|
||||
return s + "[resolved]"
|
||||
}
|
||||
return s + "[active]"
|
||||
}
|
||||
|
||||
// Resolved returns true iff the activity interval ended in the past.
|
||||
func (a *Alert) Resolved() bool {
|
||||
return a.ResolvedAt(time.Now())
|
||||
}
|
||||
|
||||
// ResolvedAt returns true off the activity interval ended before
|
||||
// the given timestamp.
|
||||
func (a *Alert) ResolvedAt(ts time.Time) bool {
|
||||
if a.EndsAt.IsZero() {
|
||||
return false
|
||||
}
|
||||
return !a.EndsAt.After(ts)
|
||||
}
|
||||
|
||||
// Status returns the status of the alert.
|
||||
func (a *Alert) Status() AlertStatus {
|
||||
if a.Resolved() {
|
||||
return AlertResolved
|
||||
}
|
||||
return AlertFiring
|
||||
}
|
||||
|
||||
// Validate checks whether the alert data is inconsistent.
|
||||
func (a *Alert) Validate() error {
|
||||
if a.StartsAt.IsZero() {
|
||||
return fmt.Errorf("start time missing")
|
||||
}
|
||||
if !a.EndsAt.IsZero() && a.EndsAt.Before(a.StartsAt) {
|
||||
return fmt.Errorf("start time must be before end time")
|
||||
}
|
||||
if err := a.Labels.Validate(); err != nil {
|
||||
return fmt.Errorf("invalid label set: %s", err)
|
||||
}
|
||||
if len(a.Labels) == 0 {
|
||||
return fmt.Errorf("at least one label pair required")
|
||||
}
|
||||
if err := a.Annotations.Validate(); err != nil {
|
||||
return fmt.Errorf("invalid annotations: %s", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Alert is a list of alerts that can be sorted in chronological order.
|
||||
type Alerts []*Alert
|
||||
|
||||
func (as Alerts) Len() int { return len(as) }
|
||||
func (as Alerts) Swap(i, j int) { as[i], as[j] = as[j], as[i] }
|
||||
|
||||
func (as Alerts) Less(i, j int) bool {
|
||||
if as[i].StartsAt.Before(as[j].StartsAt) {
|
||||
return true
|
||||
}
|
||||
if as[i].EndsAt.Before(as[j].EndsAt) {
|
||||
return true
|
||||
}
|
||||
return as[i].Fingerprint() < as[j].Fingerprint()
|
||||
}
|
||||
|
||||
// HasFiring returns true iff one of the alerts is not resolved.
|
||||
func (as Alerts) HasFiring() bool {
|
||||
for _, a := range as {
|
||||
if !a.Resolved() {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// Status returns StatusFiring iff at least one of the alerts is firing.
|
||||
func (as Alerts) Status() AlertStatus {
|
||||
if as.HasFiring() {
|
||||
return AlertFiring
|
||||
}
|
||||
return AlertResolved
|
||||
}
|
42
vendor/github.com/prometheus/common/model/fnv.go
generated
vendored
Normal file
42
vendor/github.com/prometheus/common/model/fnv.go
generated
vendored
Normal file
|
@ -0,0 +1,42 @@
|
|||
// Copyright 2015 The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package model
|
||||
|
||||
// Inline and byte-free variant of hash/fnv's fnv64a.
|
||||
|
||||
const (
|
||||
offset64 = 14695981039346656037
|
||||
prime64 = 1099511628211
|
||||
)
|
||||
|
||||
// hashNew initializies a new fnv64a hash value.
|
||||
func hashNew() uint64 {
|
||||
return offset64
|
||||
}
|
||||
|
||||
// hashAdd adds a string to a fnv64a hash value, returning the updated hash.
|
||||
func hashAdd(h uint64, s string) uint64 {
|
||||
for i := 0; i < len(s); i++ {
|
||||
h ^= uint64(s[i])
|
||||
h *= prime64
|
||||
}
|
||||
return h
|
||||
}
|
||||
|
||||
// hashAddByte adds a byte to a fnv64a hash value, returning the updated hash.
|
||||
func hashAddByte(h uint64, b byte) uint64 {
|
||||
h ^= uint64(b)
|
||||
h *= prime64
|
||||
return h
|
||||
}
|
22
vendor/github.com/prometheus/common/model/labels.go
generated
vendored
22
vendor/github.com/prometheus/common/model/labels.go
generated
vendored
|
@ -17,8 +17,8 @@ import (
|
|||
"encoding/json"
|
||||
"fmt"
|
||||
"regexp"
|
||||
"sort"
|
||||
"strings"
|
||||
"unicode/utf8"
|
||||
)
|
||||
|
||||
const (
|
||||
|
@ -87,6 +87,19 @@ var LabelNameRE = regexp.MustCompile("^[a-zA-Z_][a-zA-Z0-9_]*$")
|
|||
// therewith.
|
||||
type LabelName string
|
||||
|
||||
// IsValid is true iff the label name matches the pattern of LabelNameRE.
|
||||
func (ln LabelName) IsValid() bool {
|
||||
if len(ln) == 0 {
|
||||
return false
|
||||
}
|
||||
for i, b := range ln {
|
||||
if !((b >= 'a' && b <= 'z') || (b >= 'A' && b <= 'Z') || b == '_' || (b >= '0' && b <= '9' && i > 0)) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// UnmarshalYAML implements the yaml.Unmarshaler interface.
|
||||
func (ln *LabelName) UnmarshalYAML(unmarshal func(interface{}) error) error {
|
||||
var s string
|
||||
|
@ -139,6 +152,11 @@ func (l LabelNames) String() string {
|
|||
// A LabelValue is an associated value for a LabelName.
|
||||
type LabelValue string
|
||||
|
||||
// IsValid returns true iff the string is a valid UTF8.
|
||||
func (lv LabelValue) IsValid() bool {
|
||||
return utf8.ValidString(string(lv))
|
||||
}
|
||||
|
||||
// LabelValues is a sortable LabelValue slice. It implements sort.Interface.
|
||||
type LabelValues []LabelValue
|
||||
|
||||
|
@ -147,7 +165,7 @@ func (l LabelValues) Len() int {
|
|||
}
|
||||
|
||||
func (l LabelValues) Less(i, j int) bool {
|
||||
return sort.StringsAreSorted([]string{string(l[i]), string(l[j])})
|
||||
return string(l[i]) < string(l[j])
|
||||
}
|
||||
|
||||
func (l LabelValues) Swap(i, j int) {
|
||||
|
|
16
vendor/github.com/prometheus/common/model/labelset.go
generated
vendored
16
vendor/github.com/prometheus/common/model/labelset.go
generated
vendored
|
@ -27,6 +27,21 @@ import (
|
|||
// match.
|
||||
type LabelSet map[LabelName]LabelValue
|
||||
|
||||
// Validate checks whether all names and values in the label set
|
||||
// are valid.
|
||||
func (ls LabelSet) Validate() error {
|
||||
for ln, lv := range ls {
|
||||
if !ln.IsValid() {
|
||||
return fmt.Errorf("invalid name %q", ln)
|
||||
}
|
||||
if !lv.IsValid() {
|
||||
return fmt.Errorf("invalid value %q", lv)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Equal returns true iff both label sets have exactly the same key/value pairs.
|
||||
func (ls LabelSet) Equal(o LabelSet) bool {
|
||||
if len(ls) != len(o) {
|
||||
return false
|
||||
|
@ -90,6 +105,7 @@ func (ls LabelSet) Before(o LabelSet) bool {
|
|||
return false
|
||||
}
|
||||
|
||||
// Clone returns a copy of the label set.
|
||||
func (ls LabelSet) Clone() LabelSet {
|
||||
lsn := make(LabelSet, len(ls))
|
||||
for ln, lv := range ls {
|
||||
|
|
19
vendor/github.com/prometheus/common/model/metric.go
generated
vendored
19
vendor/github.com/prometheus/common/model/metric.go
generated
vendored
|
@ -15,11 +15,15 @@ package model
|
|||
|
||||
import (
|
||||
"fmt"
|
||||
"regexp"
|
||||
"sort"
|
||||
"strings"
|
||||
)
|
||||
|
||||
var separator = []byte{0}
|
||||
var (
|
||||
separator = []byte{0}
|
||||
MetricNameRE = regexp.MustCompile(`^[a-zA-Z_][a-zA-Z0-9_:]*$`)
|
||||
)
|
||||
|
||||
// A Metric is similar to a LabelSet, but the key difference is that a Metric is
|
||||
// a singleton and refers to one and only one stream of samples.
|
||||
|
@ -79,3 +83,16 @@ func (m Metric) Fingerprint() Fingerprint {
|
|||
func (m Metric) FastFingerprint() Fingerprint {
|
||||
return LabelSet(m).FastFingerprint()
|
||||
}
|
||||
|
||||
// IsValidMetricName returns true iff name matches the pattern of MetricNameRE.
|
||||
func IsValidMetricName(n LabelValue) bool {
|
||||
if len(n) == 0 {
|
||||
return false
|
||||
}
|
||||
for i, b := range n {
|
||||
if !((b >= 'a' && b <= 'z') || (b >= 'A' && b <= 'Z') || b == '_' || b == ':' || (b >= '0' && b <= '9' && i > 0)) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
|
108
vendor/github.com/prometheus/common/model/signature.go
generated
vendored
108
vendor/github.com/prometheus/common/model/signature.go
generated
vendored
|
@ -14,11 +14,7 @@
|
|||
package model
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"hash"
|
||||
"hash/fnv"
|
||||
"sort"
|
||||
"sync"
|
||||
)
|
||||
|
||||
// SeparatorByte is a byte that cannot occur in valid UTF-8 sequences and is
|
||||
|
@ -28,30 +24,9 @@ const SeparatorByte byte = 255
|
|||
|
||||
var (
|
||||
// cache the signature of an empty label set.
|
||||
emptyLabelSignature = fnv.New64a().Sum64()
|
||||
|
||||
hashAndBufPool sync.Pool
|
||||
emptyLabelSignature = hashNew()
|
||||
)
|
||||
|
||||
type hashAndBuf struct {
|
||||
h hash.Hash64
|
||||
b bytes.Buffer
|
||||
}
|
||||
|
||||
func getHashAndBuf() *hashAndBuf {
|
||||
hb := hashAndBufPool.Get()
|
||||
if hb == nil {
|
||||
return &hashAndBuf{h: fnv.New64a()}
|
||||
}
|
||||
return hb.(*hashAndBuf)
|
||||
}
|
||||
|
||||
func putHashAndBuf(hb *hashAndBuf) {
|
||||
hb.h.Reset()
|
||||
hb.b.Reset()
|
||||
hashAndBufPool.Put(hb)
|
||||
}
|
||||
|
||||
// LabelsToSignature returns a quasi-unique signature (i.e., fingerprint) for a
|
||||
// given label set. (Collisions are possible but unlikely if the number of label
|
||||
// sets the function is applied to is small.)
|
||||
|
@ -66,18 +41,14 @@ func LabelsToSignature(labels map[string]string) uint64 {
|
|||
}
|
||||
sort.Strings(labelNames)
|
||||
|
||||
hb := getHashAndBuf()
|
||||
defer putHashAndBuf(hb)
|
||||
|
||||
sum := hashNew()
|
||||
for _, labelName := range labelNames {
|
||||
hb.b.WriteString(labelName)
|
||||
hb.b.WriteByte(SeparatorByte)
|
||||
hb.b.WriteString(labels[labelName])
|
||||
hb.b.WriteByte(SeparatorByte)
|
||||
hb.h.Write(hb.b.Bytes())
|
||||
hb.b.Reset()
|
||||
sum = hashAdd(sum, labelName)
|
||||
sum = hashAddByte(sum, SeparatorByte)
|
||||
sum = hashAdd(sum, labels[labelName])
|
||||
sum = hashAddByte(sum, SeparatorByte)
|
||||
}
|
||||
return hb.h.Sum64()
|
||||
return sum
|
||||
}
|
||||
|
||||
// labelSetToFingerprint works exactly as LabelsToSignature but takes a LabelSet as
|
||||
|
@ -93,18 +64,14 @@ func labelSetToFingerprint(ls LabelSet) Fingerprint {
|
|||
}
|
||||
sort.Sort(labelNames)
|
||||
|
||||
hb := getHashAndBuf()
|
||||
defer putHashAndBuf(hb)
|
||||
|
||||
sum := hashNew()
|
||||
for _, labelName := range labelNames {
|
||||
hb.b.WriteString(string(labelName))
|
||||
hb.b.WriteByte(SeparatorByte)
|
||||
hb.b.WriteString(string(ls[labelName]))
|
||||
hb.b.WriteByte(SeparatorByte)
|
||||
hb.h.Write(hb.b.Bytes())
|
||||
hb.b.Reset()
|
||||
sum = hashAdd(sum, string(labelName))
|
||||
sum = hashAddByte(sum, SeparatorByte)
|
||||
sum = hashAdd(sum, string(ls[labelName]))
|
||||
sum = hashAddByte(sum, SeparatorByte)
|
||||
}
|
||||
return Fingerprint(hb.h.Sum64())
|
||||
return Fingerprint(sum)
|
||||
}
|
||||
|
||||
// labelSetToFastFingerprint works similar to labelSetToFingerprint but uses a
|
||||
|
@ -116,17 +83,12 @@ func labelSetToFastFingerprint(ls LabelSet) Fingerprint {
|
|||
}
|
||||
|
||||
var result uint64
|
||||
hb := getHashAndBuf()
|
||||
defer putHashAndBuf(hb)
|
||||
|
||||
for labelName, labelValue := range ls {
|
||||
hb.b.WriteString(string(labelName))
|
||||
hb.b.WriteByte(SeparatorByte)
|
||||
hb.b.WriteString(string(labelValue))
|
||||
hb.h.Write(hb.b.Bytes())
|
||||
result ^= hb.h.Sum64()
|
||||
hb.h.Reset()
|
||||
hb.b.Reset()
|
||||
sum := hashNew()
|
||||
sum = hashAdd(sum, string(labelName))
|
||||
sum = hashAddByte(sum, SeparatorByte)
|
||||
sum = hashAdd(sum, string(labelValue))
|
||||
result ^= sum
|
||||
}
|
||||
return Fingerprint(result)
|
||||
}
|
||||
|
@ -136,24 +98,20 @@ func labelSetToFastFingerprint(ls LabelSet) Fingerprint {
|
|||
// specified LabelNames into the signature calculation. The labels passed in
|
||||
// will be sorted by this function.
|
||||
func SignatureForLabels(m Metric, labels ...LabelName) uint64 {
|
||||
if len(m) == 0 || len(labels) == 0 {
|
||||
if len(labels) == 0 {
|
||||
return emptyLabelSignature
|
||||
}
|
||||
|
||||
sort.Sort(LabelNames(labels))
|
||||
|
||||
hb := getHashAndBuf()
|
||||
defer putHashAndBuf(hb)
|
||||
|
||||
sum := hashNew()
|
||||
for _, label := range labels {
|
||||
hb.b.WriteString(string(label))
|
||||
hb.b.WriteByte(SeparatorByte)
|
||||
hb.b.WriteString(string(m[label]))
|
||||
hb.b.WriteByte(SeparatorByte)
|
||||
hb.h.Write(hb.b.Bytes())
|
||||
hb.b.Reset()
|
||||
sum = hashAdd(sum, string(label))
|
||||
sum = hashAddByte(sum, SeparatorByte)
|
||||
sum = hashAdd(sum, string(m[label]))
|
||||
sum = hashAddByte(sum, SeparatorByte)
|
||||
}
|
||||
return hb.h.Sum64()
|
||||
return sum
|
||||
}
|
||||
|
||||
// SignatureWithoutLabels works like LabelsToSignature but takes a Metric as
|
||||
|
@ -175,16 +133,12 @@ func SignatureWithoutLabels(m Metric, labels map[LabelName]struct{}) uint64 {
|
|||
}
|
||||
sort.Sort(labelNames)
|
||||
|
||||
hb := getHashAndBuf()
|
||||
defer putHashAndBuf(hb)
|
||||
|
||||
sum := hashNew()
|
||||
for _, labelName := range labelNames {
|
||||
hb.b.WriteString(string(labelName))
|
||||
hb.b.WriteByte(SeparatorByte)
|
||||
hb.b.WriteString(string(m[labelName]))
|
||||
hb.b.WriteByte(SeparatorByte)
|
||||
hb.h.Write(hb.b.Bytes())
|
||||
hb.b.Reset()
|
||||
sum = hashAdd(sum, string(labelName))
|
||||
sum = hashAddByte(sum, SeparatorByte)
|
||||
sum = hashAdd(sum, string(m[labelName]))
|
||||
sum = hashAddByte(sum, SeparatorByte)
|
||||
}
|
||||
return hb.h.Sum64()
|
||||
return sum
|
||||
}
|
||||
|
|
106
vendor/github.com/prometheus/common/model/silence.go
generated
vendored
Normal file
106
vendor/github.com/prometheus/common/model/silence.go
generated
vendored
Normal file
|
@ -0,0 +1,106 @@
|
|||
// Copyright 2015 The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package model
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"regexp"
|
||||
"time"
|
||||
)
|
||||
|
||||
// Matcher describes a matches the value of a given label.
|
||||
type Matcher struct {
|
||||
Name LabelName `json:"name"`
|
||||
Value string `json:"value"`
|
||||
IsRegex bool `json:"isRegex"`
|
||||
}
|
||||
|
||||
func (m *Matcher) UnmarshalJSON(b []byte) error {
|
||||
type plain Matcher
|
||||
if err := json.Unmarshal(b, (*plain)(m)); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if len(m.Name) == 0 {
|
||||
return fmt.Errorf("label name in matcher must not be empty")
|
||||
}
|
||||
if m.IsRegex {
|
||||
if _, err := regexp.Compile(m.Value); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Validate returns true iff all fields of the matcher have valid values.
|
||||
func (m *Matcher) Validate() error {
|
||||
if !m.Name.IsValid() {
|
||||
return fmt.Errorf("invalid name %q", m.Name)
|
||||
}
|
||||
if m.IsRegex {
|
||||
if _, err := regexp.Compile(m.Value); err != nil {
|
||||
return fmt.Errorf("invalid regular expression %q", m.Value)
|
||||
}
|
||||
} else if !LabelValue(m.Value).IsValid() || len(m.Value) == 0 {
|
||||
return fmt.Errorf("invalid value %q", m.Value)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Silence defines the representation of a silence definiton
|
||||
// in the Prometheus eco-system.
|
||||
type Silence struct {
|
||||
ID uint64 `json:"id,omitempty"`
|
||||
|
||||
Matchers []*Matcher `json:"matchers"`
|
||||
|
||||
StartsAt time.Time `json:"startsAt"`
|
||||
EndsAt time.Time `json:"endsAt"`
|
||||
|
||||
CreatedAt time.Time `json:"createdAt,omitempty"`
|
||||
CreatedBy string `json:"createdBy"`
|
||||
Comment string `json:"comment,omitempty"`
|
||||
}
|
||||
|
||||
// Validate returns true iff all fields of the silence have valid values.
|
||||
func (s *Silence) Validate() error {
|
||||
if len(s.Matchers) == 0 {
|
||||
return fmt.Errorf("at least one matcher required")
|
||||
}
|
||||
for _, m := range s.Matchers {
|
||||
if err := m.Validate(); err != nil {
|
||||
return fmt.Errorf("invalid matcher: %s", err)
|
||||
}
|
||||
}
|
||||
if s.StartsAt.IsZero() {
|
||||
return fmt.Errorf("start time missing")
|
||||
}
|
||||
if s.EndsAt.IsZero() {
|
||||
return fmt.Errorf("end time missing")
|
||||
}
|
||||
if s.EndsAt.Before(s.StartsAt) {
|
||||
return fmt.Errorf("start time must be before end time")
|
||||
}
|
||||
if s.CreatedBy == "" {
|
||||
return fmt.Errorf("creator information missing")
|
||||
}
|
||||
if s.Comment == "" {
|
||||
return fmt.Errorf("comment missing")
|
||||
}
|
||||
if s.CreatedAt.IsZero() {
|
||||
return fmt.Errorf("creation timestamp missing")
|
||||
}
|
||||
return nil
|
||||
}
|
61
vendor/github.com/prometheus/common/model/time.go
generated
vendored
61
vendor/github.com/prometheus/common/model/time.go
generated
vendored
|
@ -163,51 +163,70 @@ func (t *Time) UnmarshalJSON(b []byte) error {
|
|||
// This type should not propagate beyond the scope of input/output processing.
|
||||
type Duration time.Duration
|
||||
|
||||
var durationRE = regexp.MustCompile("^([0-9]+)(y|w|d|h|m|s|ms)$")
|
||||
|
||||
// StringToDuration parses a string into a time.Duration, assuming that a year
|
||||
// a day always has 24h.
|
||||
// always has 365d, a week always has 7d, and a day always has 24h.
|
||||
func ParseDuration(durationStr string) (Duration, error) {
|
||||
matches := durationRE.FindStringSubmatch(durationStr)
|
||||
if len(matches) != 3 {
|
||||
return 0, fmt.Errorf("not a valid duration string: %q", durationStr)
|
||||
}
|
||||
durSeconds, _ := strconv.Atoi(matches[1])
|
||||
dur := time.Duration(durSeconds) * time.Second
|
||||
unit := matches[2]
|
||||
switch unit {
|
||||
var (
|
||||
n, _ = strconv.Atoi(matches[1])
|
||||
dur = time.Duration(n) * time.Millisecond
|
||||
)
|
||||
switch unit := matches[2]; unit {
|
||||
case "y":
|
||||
dur *= 1000 * 60 * 60 * 24 * 365
|
||||
case "w":
|
||||
dur *= 1000 * 60 * 60 * 24 * 7
|
||||
case "d":
|
||||
dur *= 60 * 60 * 24
|
||||
dur *= 1000 * 60 * 60 * 24
|
||||
case "h":
|
||||
dur *= 60 * 60
|
||||
dur *= 1000 * 60 * 60
|
||||
case "m":
|
||||
dur *= 60
|
||||
dur *= 1000 * 60
|
||||
case "s":
|
||||
dur *= 1
|
||||
dur *= 1000
|
||||
case "ms":
|
||||
// Value already correct
|
||||
default:
|
||||
return 0, fmt.Errorf("invalid time unit in duration string: %q", unit)
|
||||
}
|
||||
return Duration(dur), nil
|
||||
}
|
||||
|
||||
var durationRE = regexp.MustCompile("^([0-9]+)([ywdhms]+)$")
|
||||
|
||||
func (d Duration) String() string {
|
||||
seconds := int64(time.Duration(d) / time.Second)
|
||||
var (
|
||||
ms = int64(time.Duration(d) / time.Millisecond)
|
||||
unit = "ms"
|
||||
)
|
||||
factors := map[string]int64{
|
||||
"d": 60 * 60 * 24,
|
||||
"h": 60 * 60,
|
||||
"m": 60,
|
||||
"s": 1,
|
||||
"y": 1000 * 60 * 60 * 24 * 365,
|
||||
"w": 1000 * 60 * 60 * 24 * 7,
|
||||
"d": 1000 * 60 * 60 * 24,
|
||||
"h": 1000 * 60 * 60,
|
||||
"m": 1000 * 60,
|
||||
"s": 1000,
|
||||
"ms": 1,
|
||||
}
|
||||
unit := "s"
|
||||
|
||||
switch int64(0) {
|
||||
case seconds % factors["d"]:
|
||||
case ms % factors["y"]:
|
||||
unit = "y"
|
||||
case ms % factors["w"]:
|
||||
unit = "w"
|
||||
case ms % factors["d"]:
|
||||
unit = "d"
|
||||
case seconds % factors["h"]:
|
||||
case ms % factors["h"]:
|
||||
unit = "h"
|
||||
case seconds % factors["m"]:
|
||||
case ms % factors["m"]:
|
||||
unit = "m"
|
||||
case ms % factors["s"]:
|
||||
unit = "s"
|
||||
}
|
||||
return fmt.Sprintf("%v%v", seconds/factors[unit], unit)
|
||||
return fmt.Sprintf("%v%v", ms/factors[unit], unit)
|
||||
}
|
||||
|
||||
// MarshalYAML implements the yaml.Marshaler interface.
|
||||
|
|
18
vendor/github.com/prometheus/common/model/value.go
generated
vendored
18
vendor/github.com/prometheus/common/model/value.go
generated
vendored
|
@ -16,6 +16,7 @@ package model
|
|||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"math"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
@ -43,8 +44,14 @@ func (v *SampleValue) UnmarshalJSON(b []byte) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
// Equal returns true if the value of v and o is equal or if both are NaN. Note
|
||||
// that v==o is false if both are NaN. If you want the conventional float
|
||||
// behavior, use == to compare two SampleValues.
|
||||
func (v SampleValue) Equal(o SampleValue) bool {
|
||||
return v == o
|
||||
if v == o {
|
||||
return true
|
||||
}
|
||||
return math.IsNaN(float64(v)) && math.IsNaN(float64(o))
|
||||
}
|
||||
|
||||
func (v SampleValue) String() string {
|
||||
|
@ -77,9 +84,9 @@ func (s *SamplePair) UnmarshalJSON(b []byte) error {
|
|||
}
|
||||
|
||||
// Equal returns true if this SamplePair and o have equal Values and equal
|
||||
// Timestamps.
|
||||
// Timestamps. The sematics of Value equality is defined by SampleValue.Equal.
|
||||
func (s *SamplePair) Equal(o *SamplePair) bool {
|
||||
return s == o || (s.Value == o.Value && s.Timestamp.Equal(o.Timestamp))
|
||||
return s == o || (s.Value.Equal(o.Value) && s.Timestamp.Equal(o.Timestamp))
|
||||
}
|
||||
|
||||
func (s SamplePair) String() string {
|
||||
|
@ -93,7 +100,8 @@ type Sample struct {
|
|||
Timestamp Time `json:"timestamp"`
|
||||
}
|
||||
|
||||
// Equal compares first the metrics, then the timestamp, then the value.
|
||||
// Equal compares first the metrics, then the timestamp, then the value. The
|
||||
// sematics of value equality is defined by SampleValue.Equal.
|
||||
func (s *Sample) Equal(o *Sample) bool {
|
||||
if s == o {
|
||||
return true
|
||||
|
@ -105,7 +113,7 @@ func (s *Sample) Equal(o *Sample) bool {
|
|||
if !s.Timestamp.Equal(o.Timestamp) {
|
||||
return false
|
||||
}
|
||||
if s.Value != o.Value {
|
||||
if s.Value.Equal(o.Value) {
|
||||
return false
|
||||
}
|
||||
|
||||
|
|
0
vendor/github.com/ugorji/go/codec/prebuild.sh
generated
vendored
Normal file → Executable file
0
vendor/github.com/ugorji/go/codec/prebuild.sh
generated
vendored
Normal file → Executable file
0
vendor/github.com/ugorji/go/codec/test.py
generated
vendored
Normal file → Executable file
0
vendor/github.com/ugorji/go/codec/test.py
generated
vendored
Normal file → Executable file
0
vendor/github.com/ugorji/go/codec/tests.sh
generated
vendored
Normal file → Executable file
0
vendor/github.com/ugorji/go/codec/tests.sh
generated
vendored
Normal file → Executable file
2
vendor/golang.org/x/net/context/context.go
generated
vendored
2
vendor/golang.org/x/net/context/context.go
generated
vendored
|
@ -189,7 +189,7 @@ func Background() Context {
|
|||
}
|
||||
|
||||
// TODO returns a non-nil, empty Context. Code should use context.TODO when
|
||||
// it's unclear which Context to use or it's is not yet available (because the
|
||||
// it's unclear which Context to use or it is not yet available (because the
|
||||
// surrounding function has not yet been extended to accept a Context
|
||||
// parameter). TODO is recognized by static analysis tools that determine
|
||||
// whether Contexts are propagated correctly in a program.
|
||||
|
|
1
vendor/golang.org/x/net/context/ctxhttp/cancelreq.go
generated
vendored
1
vendor/golang.org/x/net/context/ctxhttp/cancelreq.go
generated
vendored
|
@ -9,6 +9,7 @@ package ctxhttp
|
|||
import "net/http"
|
||||
|
||||
func canceler(client *http.Client, req *http.Request) func() {
|
||||
// TODO(djd): Respect any existing value of req.Cancel.
|
||||
ch := make(chan struct{})
|
||||
req.Cancel = ch
|
||||
|
||||
|
|
67
vendor/golang.org/x/net/context/ctxhttp/ctxhttp.go
generated
vendored
67
vendor/golang.org/x/net/context/ctxhttp/ctxhttp.go
generated
vendored
|
@ -14,6 +14,14 @@ import (
|
|||
"golang.org/x/net/context"
|
||||
)
|
||||
|
||||
func nop() {}
|
||||
|
||||
var (
|
||||
testHookContextDoneBeforeHeaders = nop
|
||||
testHookDoReturned = nop
|
||||
testHookDidBodyClose = nop
|
||||
)
|
||||
|
||||
// Do sends an HTTP request with the provided http.Client and returns an HTTP response.
|
||||
// If the client is nil, http.DefaultClient is used.
|
||||
// If the context is canceled or times out, ctx.Err() will be returned.
|
||||
|
@ -33,16 +41,44 @@ func Do(ctx context.Context, client *http.Client, req *http.Request) (*http.Resp
|
|||
|
||||
go func() {
|
||||
resp, err := client.Do(req)
|
||||
testHookDoReturned()
|
||||
result <- responseAndError{resp, err}
|
||||
}()
|
||||
|
||||
var resp *http.Response
|
||||
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
testHookContextDoneBeforeHeaders()
|
||||
cancel()
|
||||
// Clean up after the goroutine calling client.Do:
|
||||
go func() {
|
||||
if r := <-result; r.resp != nil {
|
||||
testHookDidBodyClose()
|
||||
r.resp.Body.Close()
|
||||
}
|
||||
}()
|
||||
return nil, ctx.Err()
|
||||
case r := <-result:
|
||||
var err error
|
||||
resp, err = r.resp, r.err
|
||||
if err != nil {
|
||||
return resp, err
|
||||
}
|
||||
}
|
||||
|
||||
c := make(chan struct{})
|
||||
go func() {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
cancel()
|
||||
return nil, ctx.Err()
|
||||
case r := <-result:
|
||||
return r.resp, r.err
|
||||
case <-c:
|
||||
// The response's Body is closed.
|
||||
}
|
||||
}()
|
||||
resp.Body = ¬ifyingReader{resp.Body, c}
|
||||
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
// Get issues a GET request via the Do function.
|
||||
|
@ -77,3 +113,28 @@ func Post(ctx context.Context, client *http.Client, url string, bodyType string,
|
|||
func PostForm(ctx context.Context, client *http.Client, url string, data url.Values) (*http.Response, error) {
|
||||
return Post(ctx, client, url, "application/x-www-form-urlencoded", strings.NewReader(data.Encode()))
|
||||
}
|
||||
|
||||
// notifyingReader is an io.ReadCloser that closes the notify channel after
|
||||
// Close is called or a Read fails on the underlying ReadCloser.
|
||||
type notifyingReader struct {
|
||||
io.ReadCloser
|
||||
notify chan<- struct{}
|
||||
}
|
||||
|
||||
func (r *notifyingReader) Read(p []byte) (int, error) {
|
||||
n, err := r.ReadCloser.Read(p)
|
||||
if err != nil && r.notify != nil {
|
||||
close(r.notify)
|
||||
r.notify = nil
|
||||
}
|
||||
return n, err
|
||||
}
|
||||
|
||||
func (r *notifyingReader) Close() error {
|
||||
err := r.ReadCloser.Close()
|
||||
if r.notify != nil {
|
||||
close(r.notify)
|
||||
r.notify = nil
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
|
2
vendor/golang.org/x/net/http2/.gitignore
generated
vendored
Normal file
2
vendor/golang.org/x/net/http2/.gitignore
generated
vendored
Normal file
|
@ -0,0 +1,2 @@
|
|||
*~
|
||||
h2i/h2i
|
51
vendor/golang.org/x/net/http2/Dockerfile
generated
vendored
Normal file
51
vendor/golang.org/x/net/http2/Dockerfile
generated
vendored
Normal file
|
@ -0,0 +1,51 @@
|
|||
#
|
||||
# This Dockerfile builds a recent curl with HTTP/2 client support, using
|
||||
# a recent nghttp2 build.
|
||||
#
|
||||
# See the Makefile for how to tag it. If Docker and that image is found, the
|
||||
# Go tests use this curl binary for integration tests.
|
||||
#
|
||||
|
||||
FROM ubuntu:trusty
|
||||
|
||||
RUN apt-get update && \
|
||||
apt-get upgrade -y && \
|
||||
apt-get install -y git-core build-essential wget
|
||||
|
||||
RUN apt-get install -y --no-install-recommends \
|
||||
autotools-dev libtool pkg-config zlib1g-dev \
|
||||
libcunit1-dev libssl-dev libxml2-dev libevent-dev \
|
||||
automake autoconf
|
||||
|
||||
# The list of packages nghttp2 recommends for h2load:
|
||||
RUN apt-get install -y --no-install-recommends make binutils \
|
||||
autoconf automake autotools-dev \
|
||||
libtool pkg-config zlib1g-dev libcunit1-dev libssl-dev libxml2-dev \
|
||||
libev-dev libevent-dev libjansson-dev libjemalloc-dev \
|
||||
cython python3.4-dev python-setuptools
|
||||
|
||||
# Note: setting NGHTTP2_VER before the git clone, so an old git clone isn't cached:
|
||||
ENV NGHTTP2_VER 895da9a
|
||||
RUN cd /root && git clone https://github.com/tatsuhiro-t/nghttp2.git
|
||||
|
||||
WORKDIR /root/nghttp2
|
||||
RUN git reset --hard $NGHTTP2_VER
|
||||
RUN autoreconf -i
|
||||
RUN automake
|
||||
RUN autoconf
|
||||
RUN ./configure
|
||||
RUN make
|
||||
RUN make install
|
||||
|
||||
WORKDIR /root
|
||||
RUN wget http://curl.haxx.se/download/curl-7.45.0.tar.gz
|
||||
RUN tar -zxvf curl-7.45.0.tar.gz
|
||||
WORKDIR /root/curl-7.45.0
|
||||
RUN ./configure --with-ssl --with-nghttp2=/usr/local
|
||||
RUN make
|
||||
RUN make install
|
||||
RUN ldconfig
|
||||
|
||||
CMD ["-h"]
|
||||
ENTRYPOINT ["/usr/local/bin/curl"]
|
||||
|
3
vendor/golang.org/x/net/http2/Makefile
generated
vendored
Normal file
3
vendor/golang.org/x/net/http2/Makefile
generated
vendored
Normal file
|
@ -0,0 +1,3 @@
|
|||
curlimage:
|
||||
docker build -t gohttp2/curl .
|
||||
|
20
vendor/golang.org/x/net/http2/README
generated
vendored
Normal file
20
vendor/golang.org/x/net/http2/README
generated
vendored
Normal file
|
@ -0,0 +1,20 @@
|
|||
This is a work-in-progress HTTP/2 implementation for Go.
|
||||
|
||||
It will eventually live in the Go standard library and won't require
|
||||
any changes to your code to use. It will just be automatic.
|
||||
|
||||
Status:
|
||||
|
||||
* The server support is pretty good. A few things are missing
|
||||
but are being worked on.
|
||||
* The client work has just started but shares a lot of code
|
||||
is coming along much quicker.
|
||||
|
||||
Docs are at https://godoc.org/golang.org/x/net/http2
|
||||
|
||||
Demo test server at https://http2.golang.org/
|
||||
|
||||
Help & bug reports welcome!
|
||||
|
||||
Contributing: https://golang.org/doc/contribute.html
|
||||
Bugs: https://golang.org/issue/new?title=x/net/http2:+
|
225
vendor/golang.org/x/net/http2/client_conn_pool.go
generated
vendored
Normal file
225
vendor/golang.org/x/net/http2/client_conn_pool.go
generated
vendored
Normal file
|
@ -0,0 +1,225 @@
|
|||
// Copyright 2015 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Transport code's client connection pooling.
|
||||
|
||||
package http2
|
||||
|
||||
import (
|
||||
"crypto/tls"
|
||||
"net/http"
|
||||
"sync"
|
||||
)
|
||||
|
||||
// ClientConnPool manages a pool of HTTP/2 client connections.
|
||||
type ClientConnPool interface {
|
||||
GetClientConn(req *http.Request, addr string) (*ClientConn, error)
|
||||
MarkDead(*ClientConn)
|
||||
}
|
||||
|
||||
// TODO: use singleflight for dialing and addConnCalls?
|
||||
type clientConnPool struct {
|
||||
t *Transport
|
||||
|
||||
mu sync.Mutex // TODO: maybe switch to RWMutex
|
||||
// TODO: add support for sharing conns based on cert names
|
||||
// (e.g. share conn for googleapis.com and appspot.com)
|
||||
conns map[string][]*ClientConn // key is host:port
|
||||
dialing map[string]*dialCall // currently in-flight dials
|
||||
keys map[*ClientConn][]string
|
||||
addConnCalls map[string]*addConnCall // in-flight addConnIfNeede calls
|
||||
}
|
||||
|
||||
func (p *clientConnPool) GetClientConn(req *http.Request, addr string) (*ClientConn, error) {
|
||||
return p.getClientConn(req, addr, dialOnMiss)
|
||||
}
|
||||
|
||||
const (
|
||||
dialOnMiss = true
|
||||
noDialOnMiss = false
|
||||
)
|
||||
|
||||
func (p *clientConnPool) getClientConn(_ *http.Request, addr string, dialOnMiss bool) (*ClientConn, error) {
|
||||
p.mu.Lock()
|
||||
for _, cc := range p.conns[addr] {
|
||||
if cc.CanTakeNewRequest() {
|
||||
p.mu.Unlock()
|
||||
return cc, nil
|
||||
}
|
||||
}
|
||||
if !dialOnMiss {
|
||||
p.mu.Unlock()
|
||||
return nil, ErrNoCachedConn
|
||||
}
|
||||
call := p.getStartDialLocked(addr)
|
||||
p.mu.Unlock()
|
||||
<-call.done
|
||||
return call.res, call.err
|
||||
}
|
||||
|
||||
// dialCall is an in-flight Transport dial call to a host.
|
||||
type dialCall struct {
|
||||
p *clientConnPool
|
||||
done chan struct{} // closed when done
|
||||
res *ClientConn // valid after done is closed
|
||||
err error // valid after done is closed
|
||||
}
|
||||
|
||||
// requires p.mu is held.
|
||||
func (p *clientConnPool) getStartDialLocked(addr string) *dialCall {
|
||||
if call, ok := p.dialing[addr]; ok {
|
||||
// A dial is already in-flight. Don't start another.
|
||||
return call
|
||||
}
|
||||
call := &dialCall{p: p, done: make(chan struct{})}
|
||||
if p.dialing == nil {
|
||||
p.dialing = make(map[string]*dialCall)
|
||||
}
|
||||
p.dialing[addr] = call
|
||||
go call.dial(addr)
|
||||
return call
|
||||
}
|
||||
|
||||
// run in its own goroutine.
|
||||
func (c *dialCall) dial(addr string) {
|
||||
c.res, c.err = c.p.t.dialClientConn(addr)
|
||||
close(c.done)
|
||||
|
||||
c.p.mu.Lock()
|
||||
delete(c.p.dialing, addr)
|
||||
if c.err == nil {
|
||||
c.p.addConnLocked(addr, c.res)
|
||||
}
|
||||
c.p.mu.Unlock()
|
||||
}
|
||||
|
||||
// addConnIfNeeded makes a NewClientConn out of c if a connection for key doesn't
|
||||
// already exist. It coalesces concurrent calls with the same key.
|
||||
// This is used by the http1 Transport code when it creates a new connection. Because
|
||||
// the http1 Transport doesn't de-dup TCP dials to outbound hosts (because it doesn't know
|
||||
// the protocol), it can get into a situation where it has multiple TLS connections.
|
||||
// This code decides which ones live or die.
|
||||
// The return value used is whether c was used.
|
||||
// c is never closed.
|
||||
func (p *clientConnPool) addConnIfNeeded(key string, t *Transport, c *tls.Conn) (used bool, err error) {
|
||||
p.mu.Lock()
|
||||
for _, cc := range p.conns[key] {
|
||||
if cc.CanTakeNewRequest() {
|
||||
p.mu.Unlock()
|
||||
return false, nil
|
||||
}
|
||||
}
|
||||
call, dup := p.addConnCalls[key]
|
||||
if !dup {
|
||||
if p.addConnCalls == nil {
|
||||
p.addConnCalls = make(map[string]*addConnCall)
|
||||
}
|
||||
call = &addConnCall{
|
||||
p: p,
|
||||
done: make(chan struct{}),
|
||||
}
|
||||
p.addConnCalls[key] = call
|
||||
go call.run(t, key, c)
|
||||
}
|
||||
p.mu.Unlock()
|
||||
|
||||
<-call.done
|
||||
if call.err != nil {
|
||||
return false, call.err
|
||||
}
|
||||
return !dup, nil
|
||||
}
|
||||
|
||||
type addConnCall struct {
|
||||
p *clientConnPool
|
||||
done chan struct{} // closed when done
|
||||
err error
|
||||
}
|
||||
|
||||
func (c *addConnCall) run(t *Transport, key string, tc *tls.Conn) {
|
||||
cc, err := t.NewClientConn(tc)
|
||||
|
||||
p := c.p
|
||||
p.mu.Lock()
|
||||
if err != nil {
|
||||
c.err = err
|
||||
} else {
|
||||
p.addConnLocked(key, cc)
|
||||
}
|
||||
delete(p.addConnCalls, key)
|
||||
p.mu.Unlock()
|
||||
close(c.done)
|
||||
}
|
||||
|
||||
func (p *clientConnPool) addConn(key string, cc *ClientConn) {
|
||||
p.mu.Lock()
|
||||
p.addConnLocked(key, cc)
|
||||
p.mu.Unlock()
|
||||
}
|
||||
|
||||
// p.mu must be held
|
||||
func (p *clientConnPool) addConnLocked(key string, cc *ClientConn) {
|
||||
for _, v := range p.conns[key] {
|
||||
if v == cc {
|
||||
return
|
||||
}
|
||||
}
|
||||
if p.conns == nil {
|
||||
p.conns = make(map[string][]*ClientConn)
|
||||
}
|
||||
if p.keys == nil {
|
||||
p.keys = make(map[*ClientConn][]string)
|
||||
}
|
||||
p.conns[key] = append(p.conns[key], cc)
|
||||
p.keys[cc] = append(p.keys[cc], key)
|
||||
}
|
||||
|
||||
func (p *clientConnPool) MarkDead(cc *ClientConn) {
|
||||
p.mu.Lock()
|
||||
defer p.mu.Unlock()
|
||||
for _, key := range p.keys[cc] {
|
||||
vv, ok := p.conns[key]
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
newList := filterOutClientConn(vv, cc)
|
||||
if len(newList) > 0 {
|
||||
p.conns[key] = newList
|
||||
} else {
|
||||
delete(p.conns, key)
|
||||
}
|
||||
}
|
||||
delete(p.keys, cc)
|
||||
}
|
||||
|
||||
func (p *clientConnPool) closeIdleConnections() {
|
||||
p.mu.Lock()
|
||||
defer p.mu.Unlock()
|
||||
// TODO: don't close a cc if it was just added to the pool
|
||||
// milliseconds ago and has never been used. There's currently
|
||||
// a small race window with the HTTP/1 Transport's integration
|
||||
// where it can add an idle conn just before using it, and
|
||||
// somebody else can concurrently call CloseIdleConns and
|
||||
// break some caller's RoundTrip.
|
||||
for _, vv := range p.conns {
|
||||
for _, cc := range vv {
|
||||
cc.closeIfIdle()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func filterOutClientConn(in []*ClientConn, exclude *ClientConn) []*ClientConn {
|
||||
out := in[:0]
|
||||
for _, v := range in {
|
||||
if v != exclude {
|
||||
out = append(out, v)
|
||||
}
|
||||
}
|
||||
// If we filtered it out, zero out the last item to prevent
|
||||
// the GC from seeing it.
|
||||
if len(in) != len(out) {
|
||||
in[len(in)-1] = nil
|
||||
}
|
||||
return out
|
||||
}
|
89
vendor/golang.org/x/net/http2/configure_transport.go
generated
vendored
Normal file
89
vendor/golang.org/x/net/http2/configure_transport.go
generated
vendored
Normal file
|
@ -0,0 +1,89 @@
|
|||
// Copyright 2015 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build go1.6
|
||||
|
||||
package http2
|
||||
|
||||
import (
|
||||
"crypto/tls"
|
||||
"fmt"
|
||||
"net/http"
|
||||
)
|
||||
|
||||
func configureTransport(t1 *http.Transport) (*Transport, error) {
|
||||
connPool := new(clientConnPool)
|
||||
t2 := &Transport{
|
||||
ConnPool: noDialClientConnPool{connPool},
|
||||
t1: t1,
|
||||
}
|
||||
connPool.t = t2
|
||||
if err := registerHTTPSProtocol(t1, noDialH2RoundTripper{t2}); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if t1.TLSClientConfig == nil {
|
||||
t1.TLSClientConfig = new(tls.Config)
|
||||
}
|
||||
if !strSliceContains(t1.TLSClientConfig.NextProtos, "h2") {
|
||||
t1.TLSClientConfig.NextProtos = append([]string{"h2"}, t1.TLSClientConfig.NextProtos...)
|
||||
}
|
||||
if !strSliceContains(t1.TLSClientConfig.NextProtos, "http/1.1") {
|
||||
t1.TLSClientConfig.NextProtos = append(t1.TLSClientConfig.NextProtos, "http/1.1")
|
||||
}
|
||||
upgradeFn := func(authority string, c *tls.Conn) http.RoundTripper {
|
||||
addr := authorityAddr(authority)
|
||||
if used, err := connPool.addConnIfNeeded(addr, t2, c); err != nil {
|
||||
go c.Close()
|
||||
return erringRoundTripper{err}
|
||||
} else if !used {
|
||||
// Turns out we don't need this c.
|
||||
// For example, two goroutines made requests to the same host
|
||||
// at the same time, both kicking off TCP dials. (since protocol
|
||||
// was unknown)
|
||||
go c.Close()
|
||||
}
|
||||
return t2
|
||||
}
|
||||
if m := t1.TLSNextProto; len(m) == 0 {
|
||||
t1.TLSNextProto = map[string]func(string, *tls.Conn) http.RoundTripper{
|
||||
"h2": upgradeFn,
|
||||
}
|
||||
} else {
|
||||
m["h2"] = upgradeFn
|
||||
}
|
||||
return t2, nil
|
||||
}
|
||||
|
||||
// registerHTTPSProtocol calls Transport.RegisterProtocol but
|
||||
// convering panics into errors.
|
||||
func registerHTTPSProtocol(t *http.Transport, rt http.RoundTripper) (err error) {
|
||||
defer func() {
|
||||
if e := recover(); e != nil {
|
||||
err = fmt.Errorf("%v", e)
|
||||
}
|
||||
}()
|
||||
t.RegisterProtocol("https", rt)
|
||||
return nil
|
||||
}
|
||||
|
||||
// noDialClientConnPool is an implementation of http2.ClientConnPool
|
||||
// which never dials. We let the HTTP/1.1 client dial and use its TLS
|
||||
// connection instead.
|
||||
type noDialClientConnPool struct{ *clientConnPool }
|
||||
|
||||
func (p noDialClientConnPool) GetClientConn(req *http.Request, addr string) (*ClientConn, error) {
|
||||
return p.getClientConn(req, addr, noDialOnMiss)
|
||||
}
|
||||
|
||||
// noDialH2RoundTripper is a RoundTripper which only tries to complete the request
|
||||
// if there's already has a cached connection to the host.
|
||||
type noDialH2RoundTripper struct{ t *Transport }
|
||||
|
||||
func (rt noDialH2RoundTripper) RoundTrip(req *http.Request) (*http.Response, error) {
|
||||
res, err := rt.t.RoundTrip(req)
|
||||
if err == ErrNoCachedConn {
|
||||
return nil, http.ErrSkipAltProtocol
|
||||
}
|
||||
return res, err
|
||||
}
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Reference in a new issue