Merge remote-tracking branch 'master/master' into refactor-template-headers

Merge
This commit is contained in:
Manuel de Brito Fontes 2017-09-26 14:08:17 -03:00
commit d73edb8176
42 changed files with 2115 additions and 1102 deletions

5
Godeps/Godeps.json generated
View file

@ -418,6 +418,11 @@
"Comment": "v1.2.0", "Comment": "v1.2.0",
"Rev": "27e4946190b4a327b539185f2b5b1f7c84730728" "Rev": "27e4946190b4a327b539185f2b5b1f7c84730728"
}, },
{
"ImportPath": "gopkg.in/go-playground/pool.v3",
"Comment": "v3.1.1",
"Rev": "e73cd3a5ded835540c5cf4778488579c5b357d68"
},
{ {
"ImportPath": "gopkg.in/inf.v0", "ImportPath": "gopkg.in/inf.v0",
"Comment": "v0.9.0", "Comment": "v0.9.0",

View file

@ -66,11 +66,11 @@ func (t *apiServerTLSLoader) load(ing *extensions.Ingress) (*loadbalancers.TLSCe
} }
cert, ok := secret.Data[api_v1.TLSCertKey] cert, ok := secret.Data[api_v1.TLSCertKey]
if !ok { if !ok {
return nil, fmt.Errorf("secret %v has no private key", secretName) return nil, fmt.Errorf("secret %v has no 'tls.crt'", secretName)
} }
key, ok := secret.Data[api_v1.TLSPrivateKeyKey] key, ok := secret.Data[api_v1.TLSPrivateKeyKey]
if !ok { if !ok {
return nil, fmt.Errorf("secret %v has no cert", secretName) return nil, fmt.Errorf("secret %v has no 'tls.key'", secretName)
} }
certs := &loadbalancers.TLSCerts{Key: string(key), Cert: string(cert)} certs := &loadbalancers.TLSCerts{Key: string(key), Cert: string(cert)}
if err := t.validate(certs); err != nil { if err := t.validate(certs); err != nil {

View file

@ -35,7 +35,7 @@ IMAGE = $(REGISTRY)/$(IMGNAME)
MULTI_ARCH_IMG = $(IMAGE)-$(ARCH) MULTI_ARCH_IMG = $(IMAGE)-$(ARCH)
# Set default base image dynamically for each arch # Set default base image dynamically for each arch
BASEIMAGE?=gcr.io/google_containers/nginx-slim-$(ARCH):0.24 BASEIMAGE?=gcr.io/google_containers/nginx-slim-$(ARCH):0.25
ifeq ($(ARCH),arm) ifeq ($(ARCH),arm)
QEMUARCH=arm QEMUARCH=arm

View file

@ -1,6 +1,6 @@
# Nginx Ingress Controller # Nginx Ingress Controller
This is an nginx Ingress controller that uses [ConfigMap](https://github.com/kubernetes/community/blob/master/contributors/design-proposals/configmap.md) to store the nginx configuration. See [Ingress controller documentation](../README.md) for details on how it works. This is an nginx Ingress controller that uses [ConfigMap](https://kubernetes.io/docs/tasks/configure-pod-container/configmap/#understanding-configmaps) to store the nginx configuration. See [Ingress controller documentation](../README.md) for details on how it works.
## Contents ## Contents
* [Conventions](#conventions) * [Conventions](#conventions)

View file

@ -316,6 +316,7 @@ In NGINX this feature is implemented by the third party module [nginx-sticky-mod
### **Allowed parameters in configuration ConfigMap** ### **Allowed parameters in configuration ConfigMap**
**proxy-body-size:** Sets the maximum allowed size of the client request body. See NGINX [client_max_body_size](http://nginx.org/en/docs/http/ngx_http_core_module.html#client_max_body_size). **proxy-body-size:** Sets the maximum allowed size of the client request body. See NGINX [client_max_body_size](http://nginx.org/en/docs/http/ngx_http_core_module.html#client_max_body_size).
**custom-http-errors:** Enables which HTTP codes should be passed for processing with the [error_page directive](http://nginx.org/en/docs/http/ngx_http_core_module.html#error_page). **custom-http-errors:** Enables which HTTP codes should be passed for processing with the [error_page directive](http://nginx.org/en/docs/http/ngx_http_core_module.html#error_page).
Setting at least one code also enables [proxy_intercept_errors](http://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_intercept_errors) which are required to process error_page. Setting at least one code also enables [proxy_intercept_errors](http://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_intercept_errors) which are required to process error_page.

View file

@ -124,7 +124,7 @@ type NGINXController struct {
configmap *apiv1.ConfigMap configmap *apiv1.ConfigMap
storeLister ingress.StoreLister storeLister *ingress.StoreLister
binary string binary string
resolver []net.IP resolver []net.IP
@ -463,7 +463,7 @@ func (n *NGINXController) SetConfig(cmap *apiv1.ConfigMap) {
} }
// SetListers sets the configured store listers in the generic ingress controller // SetListers sets the configured store listers in the generic ingress controller
func (n *NGINXController) SetListers(lister ingress.StoreLister) { func (n *NGINXController) SetListers(lister *ingress.StoreLister) {
n.storeLister = lister n.storeLister = lister
} }

View file

@ -376,6 +376,21 @@ type Configuration struct {
// Sets the header field for identifying the originating IP address of a client // Sets the header field for identifying the originating IP address of a client
// Default is X-Forwarded-For // Default is X-Forwarded-For
ForwardedForHeader string `json:"forwarded-for-header,omitempty"` ForwardedForHeader string `json:"forwarded-for-header,omitempty"`
// EnableOpentracing enables the nginx Opentracing extension
// https://github.com/rnburn/nginx-opentracing
// By default this is disabled
EnableOpentracing bool `json:"enable-opentracing"`
// ZipkinCollectorHost specifies the host to use when uploading traces
ZipkinCollectorHost string `json:"zipkin-collector-host"`
// ZipkinCollectorPort specifies the port to use when uploading traces
ZipkinCollectorPort int `json:"zipkin-collector-port"`
// ZipkinServiceName specifies the service name to use for any traces created
// Default: nginx
ZipkinServiceName string `json:"zipkin-service-name"`
} }
// NewDefault returns the default nginx configuration // NewDefault returns the default nginx configuration
@ -418,7 +433,7 @@ func NewDefault() Configuration {
ShowServerTokens: true, ShowServerTokens: true,
SSLBufferSize: sslBufferSize, SSLBufferSize: sslBufferSize,
SSLCiphers: sslCiphers, SSLCiphers: sslCiphers,
SSLECDHCurve: "secp384r1", SSLECDHCurve: "auto",
SSLProtocols: sslProtocols, SSLProtocols: sslProtocols,
SSLSessionCache: true, SSLSessionCache: true,
SSLSessionCacheSize: sslSessionCacheSize, SSLSessionCacheSize: sslSessionCacheSize,
@ -456,6 +471,8 @@ func NewDefault() Configuration {
BindAddressIpv4: defBindAddress, BindAddressIpv4: defBindAddress,
BindAddressIpv6: defBindAddress, BindAddressIpv6: defBindAddress,
RealClientFrom: "auto", RealClientFrom: "auto",
ZipkinCollectorPort: 9411,
ZipkinServiceName: "nginx",
} }
if glog.V(5) { if glog.V(5) {

View file

@ -50,11 +50,9 @@ const (
// Template ... // Template ...
type Template struct { type Template struct {
tmpl *text_template.Template tmpl *text_template.Template
fw watch.FileWatcher fw watch.FileWatcher
s int s int
tmplBuf *bytes.Buffer
outCmdBuf *bytes.Buffer
} }
//NewTemplate returns a new Template instance or an //NewTemplate returns a new Template instance or an
@ -70,11 +68,9 @@ func NewTemplate(file string, onChange func()) (*Template, error) {
} }
return &Template{ return &Template{
tmpl: tmpl, tmpl: tmpl,
fw: fw, fw: fw,
s: defBufferSize, s: defBufferSize,
tmplBuf: bytes.NewBuffer(make([]byte, 0, defBufferSize)),
outCmdBuf: bytes.NewBuffer(make([]byte, 0, defBufferSize)),
}, nil }, nil
} }
@ -86,15 +82,13 @@ func (t *Template) Close() {
// Write populates a buffer using a template with NGINX configuration // Write populates a buffer using a template with NGINX configuration
// and the servers and upstreams created by Ingress rules // and the servers and upstreams created by Ingress rules
func (t *Template) Write(conf config.TemplateConfig) ([]byte, error) { func (t *Template) Write(conf config.TemplateConfig) ([]byte, error) {
defer t.tmplBuf.Reset() tmplBuf := bytes.NewBuffer(make([]byte, 0, t.s))
defer t.outCmdBuf.Reset() outCmdBuf := bytes.NewBuffer(make([]byte, 0, t.s))
defer func() { defer func() {
if t.s < t.tmplBuf.Cap() { if t.s < tmplBuf.Cap() {
glog.V(2).Infof("adjusting template buffer size from %v to %v", t.s, t.tmplBuf.Cap()) glog.V(2).Infof("adjusting template buffer size from %v to %v", t.s, tmplBuf.Cap())
t.s = t.tmplBuf.Cap() t.s = tmplBuf.Cap()
t.tmplBuf = bytes.NewBuffer(make([]byte, 0, t.tmplBuf.Cap()))
t.outCmdBuf = bytes.NewBuffer(make([]byte, 0, t.outCmdBuf.Cap()))
} }
}() }()
@ -106,7 +100,7 @@ func (t *Template) Write(conf config.TemplateConfig) ([]byte, error) {
glog.Infof("NGINX configuration: %v", string(b)) glog.Infof("NGINX configuration: %v", string(b))
} }
err := t.tmpl.Execute(t.tmplBuf, conf) err := t.tmpl.Execute(tmplBuf, conf)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -114,14 +108,14 @@ func (t *Template) Write(conf config.TemplateConfig) ([]byte, error) {
// squeezes multiple adjacent empty lines to be single // squeezes multiple adjacent empty lines to be single
// spaced this is to avoid the use of regular expressions // spaced this is to avoid the use of regular expressions
cmd := exec.Command("/ingress-controller/clean-nginx-conf.sh") cmd := exec.Command("/ingress-controller/clean-nginx-conf.sh")
cmd.Stdin = t.tmplBuf cmd.Stdin = tmplBuf
cmd.Stdout = t.outCmdBuf cmd.Stdout = outCmdBuf
if err := cmd.Run(); err != nil { if err := cmd.Run(); err != nil {
glog.Warningf("unexpected error cleaning template: %v", err) glog.Warningf("unexpected error cleaning template: %v", err)
return t.tmplBuf.Bytes(), nil return tmplBuf.Bytes(), nil
} }
return t.outCmdBuf.Bytes(), nil return outCmdBuf.Bytes(), nil
} }
var ( var (
@ -180,9 +174,14 @@ func formatIP(input string) string {
} }
// buildResolvers returns the resolvers reading the /etc/resolv.conf file // buildResolvers returns the resolvers reading the /etc/resolv.conf file
func buildResolvers(a interface{}) string { func buildResolvers(input interface{}) string {
// NGINX need IPV6 addresses to be surrounded by brackets // NGINX need IPV6 addresses to be surrounded by brackets
nss := a.([]net.IP) nss, ok := input.([]net.IP)
if !ok {
glog.Errorf("expected a '[]net.IP' type but %T was returned", input)
return ""
}
if len(nss) == 0 { if len(nss) == 0 {
return "" return ""
} }
@ -205,6 +204,7 @@ func buildResolvers(a interface{}) string {
func buildLocation(input interface{}) string { func buildLocation(input interface{}) string {
location, ok := input.(*ingress.Location) location, ok := input.(*ingress.Location)
if !ok { if !ok {
glog.Errorf("expected an '*ingress.Location' type but %T was returned", input)
return slash return slash
} }
@ -229,6 +229,7 @@ func buildLocation(input interface{}) string {
func buildAuthLocation(input interface{}) string { func buildAuthLocation(input interface{}) string {
location, ok := input.(*ingress.Location) location, ok := input.(*ingress.Location)
if !ok { if !ok {
glog.Errorf("expected an '*ingress.Location' type but %T was returned", input)
return "" return ""
} }
@ -246,6 +247,7 @@ func buildAuthResponseHeaders(input interface{}) []string {
location, ok := input.(*ingress.Location) location, ok := input.(*ingress.Location)
res := []string{} res := []string{}
if !ok { if !ok {
glog.Errorf("expected an '*ingress.Location' type but %T was returned", input)
return res return res
} }
@ -265,7 +267,8 @@ func buildAuthResponseHeaders(input interface{}) []string {
func buildLogFormatUpstream(input interface{}) string { func buildLogFormatUpstream(input interface{}) string {
cfg, ok := input.(config.Configuration) cfg, ok := input.(config.Configuration)
if !ok { if !ok {
glog.Errorf("error an ingress.buildLogFormatUpstream type but %T was returned", input) glog.Errorf("expected a 'config.Configuration' type but %T was returned", input)
return ""
} }
return cfg.BuildLogFormatUpstream() return cfg.BuildLogFormatUpstream()
@ -276,9 +279,15 @@ func buildLogFormatUpstream(input interface{}) string {
// If the annotation ingress.kubernetes.io/add-base-url:"true" is specified it will // If the annotation ingress.kubernetes.io/add-base-url:"true" is specified it will
// add a base tag in the head of the response from the service // add a base tag in the head of the response from the service
func buildProxyPass(host string, b interface{}, loc interface{}) string { func buildProxyPass(host string, b interface{}, loc interface{}) string {
backends := b.([]*ingress.Backend) backends, ok := b.([]*ingress.Backend)
if !ok {
glog.Errorf("expected an '[]*ingress.Backend' type but %T was returned", b)
return ""
}
location, ok := loc.(*ingress.Location) location, ok := loc.(*ingress.Location)
if !ok { if !ok {
glog.Errorf("expected a '*ingress.Location' type but %T was returned", loc)
return "" return ""
} }
@ -354,6 +363,7 @@ func filterRateLimits(input interface{}) []ratelimit.RateLimit {
servers, ok := input.([]*ingress.Server) servers, ok := input.([]*ingress.Server)
if !ok { if !ok {
glog.Errorf("expected a '[]ratelimit.RateLimit' type but %T was returned", input)
return ratelimits return ratelimits
} }
for _, server := range servers { for _, server := range servers {
@ -377,6 +387,7 @@ func buildRateLimitZones(input interface{}) []string {
servers, ok := input.([]*ingress.Server) servers, ok := input.([]*ingress.Server)
if !ok { if !ok {
glog.Errorf("expected a '[]*ingress.Server' type but %T was returned", input)
return zones.List() return zones.List()
} }
@ -426,6 +437,7 @@ func buildRateLimit(input interface{}) []string {
loc, ok := input.(*ingress.Location) loc, ok := input.(*ingress.Location)
if !ok { if !ok {
glog.Errorf("expected an '*ingress.Location' type but %T was returned", input)
return limits return limits
} }
@ -465,7 +477,7 @@ func buildRateLimit(input interface{}) []string {
func isLocationAllowed(input interface{}) bool { func isLocationAllowed(input interface{}) bool {
loc, ok := input.(*ingress.Location) loc, ok := input.(*ingress.Location)
if !ok { if !ok {
glog.Errorf("expected an ingress.Location type but %T was returned", input) glog.Errorf("expected an '*ingress.Location' type but %T was returned", input)
return false return false
} }
@ -482,7 +494,11 @@ var (
// size of the string to be used as a variable in nginx to avoid // size of the string to be used as a variable in nginx to avoid
// issue with the size of the variable bucket size directive // issue with the size of the variable bucket size directive
func buildDenyVariable(a interface{}) string { func buildDenyVariable(a interface{}) string {
l := a.(string) l, ok := a.(string)
if !ok {
glog.Errorf("expected a 'string' type but %T was returned", a)
return ""
}
if _, ok := denyPathSlugMap[l]; !ok { if _, ok := denyPathSlugMap[l]; !ok {
denyPathSlugMap[l] = buildRandomUUID() denyPathSlugMap[l] = buildRandomUUID()
@ -493,9 +509,16 @@ func buildDenyVariable(a interface{}) string {
// TODO: Needs Unit Tests // TODO: Needs Unit Tests
func buildUpstreamName(host string, b interface{}, loc interface{}) string { func buildUpstreamName(host string, b interface{}, loc interface{}) string {
backends := b.([]*ingress.Backend)
backends, ok := b.([]*ingress.Backend)
if !ok {
glog.Errorf("expected an '[]*ingress.Backend' type but %T was returned", b)
return ""
}
location, ok := loc.(*ingress.Location) location, ok := loc.(*ingress.Location)
if !ok { if !ok {
glog.Errorf("expected a '*ingress.Location' type but %T was returned", loc)
return "" return ""
} }
@ -531,7 +554,8 @@ func isSticky(host string, loc *ingress.Location, stickyLocations map[string][]s
func buildNextUpstream(input interface{}) string { func buildNextUpstream(input interface{}) string {
nextUpstream, ok := input.(string) nextUpstream, ok := input.(string)
if !ok { if !ok {
glog.Errorf("expected an string type but %T was returned", input) glog.Errorf("expected a 'string' type but %T was returned", input)
return ""
} }
parts := strings.Split(nextUpstream, " ") parts := strings.Split(nextUpstream, " ")
@ -549,7 +573,8 @@ func buildNextUpstream(input interface{}) string {
func buildAuthSignURL(input interface{}) string { func buildAuthSignURL(input interface{}) string {
s, ok := input.(string) s, ok := input.(string)
if !ok { if !ok {
glog.Errorf("expected an string type but %T was returned", input) glog.Errorf("expected an 'string' type but %T was returned", input)
return ""
} }
u, _ := url.Parse(s) u, _ := url.Parse(s)
@ -570,7 +595,7 @@ func buildRandomUUID() string {
func isValidClientBodyBufferSize(input interface{}) bool { func isValidClientBodyBufferSize(input interface{}) bool {
s, ok := input.(string) s, ok := input.(string)
if !ok { if !ok {
glog.Errorf("expected an string type but %T was returned", input) glog.Errorf("expected an 'string' type but %T was returned", input)
return false return false
} }
@ -611,13 +636,13 @@ type ingressInformation struct {
func getIngressInformation(i, p interface{}) *ingressInformation { func getIngressInformation(i, p interface{}) *ingressInformation {
ing, ok := i.(*extensions.Ingress) ing, ok := i.(*extensions.Ingress)
if !ok { if !ok {
glog.V(3).Infof("expected an Ingress type but %T was returned", i) glog.Errorf("expected an '*extensions.Ingress' type but %T was returned", i)
return &ingressInformation{} return &ingressInformation{}
} }
path, ok := p.(string) path, ok := p.(string)
if !ok { if !ok {
glog.V(3).Infof("expected a string type but %T was returned", p) glog.Errorf("expected a 'string' type but %T was returned", p)
return &ingressInformation{} return &ingressInformation{}
} }
@ -654,7 +679,8 @@ func getIngressInformation(i, p interface{}) *ingressInformation {
func buildForwardedFor(input interface{}) string { func buildForwardedFor(input interface{}) string {
s, ok := input.(string) s, ok := input.(string)
if !ok { if !ok {
glog.Errorf("expected an string type but %T was returned", input) glog.Errorf("expected a 'string' type but %T was returned", input)
return ""
} }
ffh := strings.Replace(s, "-", "_", -1) ffh := strings.Replace(s, "-", "_", -1)

View file

@ -87,6 +87,14 @@ http {
underscores_in_headers {{ if $cfg.EnableUnderscoresInHeaders }}on{{ else }}off{{ end }}; underscores_in_headers {{ if $cfg.EnableUnderscoresInHeaders }}on{{ else }}off{{ end }};
ignore_invalid_headers {{ if $cfg.IgnoreInvalidHeaders }}on{{ else }}off{{ end }}; ignore_invalid_headers {{ if $cfg.IgnoreInvalidHeaders }}on{{ else }}off{{ end }};
{{ if (and $cfg.EnableOpentracing (ne $cfg.ZipkinCollectorHost "")) }}
opentracing on;
zipkin_collector_host {{ $cfg.ZipkinCollectorHost }};
zipkin_collector_port {{ $cfg.ZipkinCollectorPort }};
zipkin_service_name {{ $cfg.ZipkinServiceName }};
{{ end }}
include /etc/nginx/mime.types; include /etc/nginx/mime.types;
default_type text/html; default_type text/html;
{{ if $cfg.UseGzip }} {{ if $cfg.UseGzip }}
@ -375,6 +383,7 @@ http {
{{ end }} {{ end }}
{{ range $index, $server := $servers }} {{ range $index, $server := $servers }}
server { server {
server_name {{ $server.Hostname }}; server_name {{ $server.Hostname }};
{{ template "SERVER" serverConfig $all $server }} {{ template "SERVER" serverConfig $all $server }}
@ -382,6 +391,7 @@ http {
{{ template "CUSTOM_ERRORS" $all }} {{ template "CUSTOM_ERRORS" $all }}
} }
{{ if $server.Alias }} {{ if $server.Alias }}
server { server {
server_name {{ $server.Alias }}; server_name {{ $server.Alias }};

View file

@ -1,12 +0,0 @@
package base64
import (
"encoding/base64"
"strings"
)
// Encode encodes a string to base64 removing the equals character
func Encode(s string) string {
str := base64.URLEncoding.EncodeToString([]byte(s))
return strings.Replace(str, "=", "", -1)
}

View file

@ -17,13 +17,13 @@ limitations under the License.
package ratelimit package ratelimit
import ( import (
"encoding/base64"
"fmt" "fmt"
"sort" "sort"
"strings" "strings"
extensions "k8s.io/api/extensions/v1beta1" extensions "k8s.io/api/extensions/v1beta1"
"k8s.io/ingress/core/pkg/base64"
"k8s.io/ingress/core/pkg/ingress/annotations/parser" "k8s.io/ingress/core/pkg/ingress/annotations/parser"
"k8s.io/ingress/core/pkg/ingress/resolver" "k8s.io/ingress/core/pkg/ingress/resolver"
"k8s.io/ingress/core/pkg/net" "k8s.io/ingress/core/pkg/net"
@ -218,7 +218,7 @@ func (a ratelimit) Parse(ing *extensions.Ingress) (interface{}, error) {
LimitRate: lr, LimitRate: lr,
LimitRateAfter: lra, LimitRateAfter: lra,
Name: zoneName, Name: zoneName,
ID: base64.Encode(zoneName), ID: encode(zoneName),
Whitelist: cidrs, Whitelist: cidrs,
}, nil }, nil
} }
@ -248,3 +248,8 @@ func parseCIDRs(s string) ([]string, error) {
return cidrs, nil return cidrs, nil
} }
func encode(s string) string {
str := base64.URLEncoding.EncodeToString([]byte(s))
return strings.Replace(str, "=", "", -1)
}

View file

@ -67,15 +67,11 @@ func (ic *GenericController) syncSecret(key string) {
// getPemCertificate receives a secret, and creates a ingress.SSLCert as return. // getPemCertificate receives a secret, and creates a ingress.SSLCert as return.
// It parses the secret and verifies if it's a keypair, or a 'ca.crt' secret only. // It parses the secret and verifies if it's a keypair, or a 'ca.crt' secret only.
func (ic *GenericController) getPemCertificate(secretName string) (*ingress.SSLCert, error) { func (ic *GenericController) getPemCertificate(secretName string) (*ingress.SSLCert, error) {
secretInterface, exists, err := ic.secrLister.Store.GetByKey(secretName) secret, err := ic.listers.Secret.GetByName(secretName)
if err != nil { if err != nil {
return nil, fmt.Errorf("error retrieving secret %v: %v", secretName, err) return nil, fmt.Errorf("error retrieving secret %v: %v", secretName, err)
} }
if !exists {
return nil, fmt.Errorf("secret named %v does not exist", secretName)
}
secret := secretInterface.(*apiv1.Secret)
cert, okcert := secret.Data[apiv1.TLSCertKey] cert, okcert := secret.Data[apiv1.TLSCertKey]
key, okkey := secret.Data[apiv1.TLSPrivateKeyKey] key, okkey := secret.Data[apiv1.TLSPrivateKeyKey]
@ -85,6 +81,9 @@ func (ic *GenericController) getPemCertificate(secretName string) (*ingress.SSLC
var s *ingress.SSLCert var s *ingress.SSLCert
if okcert && okkey { if okcert && okkey {
if cert == nil || key == nil {
return nil, fmt.Errorf("error retrieving cert or key from secret %v: %v", secretName, err)
}
s, err = ssl.AddOrUpdateCertAndKey(nsSecName, cert, key, ca) s, err = ssl.AddOrUpdateCertAndKey(nsSecName, cert, key, ca)
if err != nil { if err != nil {
return nil, fmt.Errorf("unexpected error creating pem file %v", err) return nil, fmt.Errorf("unexpected error creating pem file %v", err)

View file

@ -86,6 +86,13 @@ func buildSecrListerForBackendSSL() store.SecretLister {
return secrLister return secrLister
} }
func buildListers() *ingress.StoreLister {
sl := &ingress.StoreLister{}
sl.Ingress.Store = buildIngListenerForBackendSSL()
sl.Secret.Store = buildSecrListerForBackendSSL()
return sl
}
func buildControllerForBackendSSL() cache_client.Controller { func buildControllerForBackendSSL() cache_client.Controller {
cfg := &cache_client.Config{ cfg := &cache_client.Config{
Queue: &MockQueue{Synced: true}, Queue: &MockQueue{Synced: true},
@ -99,8 +106,7 @@ func buildGenericControllerForBackendSSL() *GenericController {
cfg: &Configuration{ cfg: &Configuration{
Client: buildSimpleClientSetForBackendSSL(), Client: buildSimpleClientSetForBackendSSL(),
}, },
ingLister: buildIngListenerForBackendSSL(), listers: buildListers(),
secrLister: buildSecrListerForBackendSSL(),
ingController: buildControllerForBackendSSL(), ingController: buildControllerForBackendSSL(),
endpController: buildControllerForBackendSSL(), endpController: buildControllerForBackendSSL(),
@ -162,7 +168,7 @@ func TestSyncSecret(t *testing.T) {
secret.SetNamespace("default") secret.SetNamespace("default")
secret.SetName("foo_secret") secret.SetName("foo_secret")
secret.Data = foo.Data secret.Data = foo.Data
ic.secrLister.Add(secret) ic.listers.Secret.Add(secret)
key := "default/foo_secret" key := "default/foo_secret"
// for add // for add
@ -209,7 +215,7 @@ func TestGetPemCertificate(t *testing.T) {
ic := buildGenericControllerForBackendSSL() ic := buildGenericControllerForBackendSSL()
secret := buildSecretForBackendSSL() secret := buildSecretForBackendSSL()
secret.Data = foo.Data secret.Data = foo.Data
ic.secrLister.Add(secret) ic.listers.Secret.Add(secret)
sslCert, err := ic.getPemCertificate(foo.secretName) sslCert, err := ic.getPemCertificate(foo.secretName)
if foo.eErr { if foo.eErr {

View file

@ -19,11 +19,13 @@ package controller
import ( import (
"fmt" "fmt"
"math/rand" "math/rand"
"net"
"reflect" "reflect"
"sort" "sort"
"strconv" "strconv"
"strings" "strings"
"sync" "sync"
"sync/atomic"
"time" "time"
"github.com/golang/glog" "github.com/golang/glog"
@ -31,7 +33,6 @@ import (
apiv1 "k8s.io/api/core/v1" apiv1 "k8s.io/api/core/v1"
extensions "k8s.io/api/extensions/v1beta1" extensions "k8s.io/api/extensions/v1beta1"
"k8s.io/apimachinery/pkg/conversion" "k8s.io/apimachinery/pkg/conversion"
"k8s.io/apimachinery/pkg/fields"
"k8s.io/apimachinery/pkg/util/intstr" "k8s.io/apimachinery/pkg/util/intstr"
"k8s.io/apimachinery/pkg/util/runtime" "k8s.io/apimachinery/pkg/util/runtime"
"k8s.io/apimachinery/pkg/util/sets" "k8s.io/apimachinery/pkg/util/sets"
@ -39,7 +40,6 @@ import (
"k8s.io/client-go/kubernetes/scheme" "k8s.io/client-go/kubernetes/scheme"
v1core "k8s.io/client-go/kubernetes/typed/core/v1" v1core "k8s.io/client-go/kubernetes/typed/core/v1"
"k8s.io/client-go/tools/cache" "k8s.io/client-go/tools/cache"
fcache "k8s.io/client-go/tools/cache/testing"
"k8s.io/client-go/tools/record" "k8s.io/client-go/tools/record"
"k8s.io/client-go/util/flowcontrol" "k8s.io/client-go/util/flowcontrol"
@ -51,7 +51,6 @@ import (
"k8s.io/ingress/core/pkg/ingress/defaults" "k8s.io/ingress/core/pkg/ingress/defaults"
"k8s.io/ingress/core/pkg/ingress/resolver" "k8s.io/ingress/core/pkg/ingress/resolver"
"k8s.io/ingress/core/pkg/ingress/status" "k8s.io/ingress/core/pkg/ingress/status"
"k8s.io/ingress/core/pkg/ingress/store"
"k8s.io/ingress/core/pkg/k8s" "k8s.io/ingress/core/pkg/k8s"
"k8s.io/ingress/core/pkg/net/ssl" "k8s.io/ingress/core/pkg/net/ssl"
local_strings "k8s.io/ingress/core/pkg/strings" local_strings "k8s.io/ingress/core/pkg/strings"
@ -87,12 +86,7 @@ type GenericController struct {
secrController cache.Controller secrController cache.Controller
mapController cache.Controller mapController cache.Controller
ingLister store.IngressLister listers *ingress.StoreLister
svcLister store.ServiceLister
nodeLister store.NodeLister
endpLister store.EndpointLister
secrLister store.SecretLister
mapLister store.ConfigMapLister
annotations annotationExtractor annotations annotationExtractor
@ -118,7 +112,7 @@ type GenericController struct {
// runningConfig contains the running configuration in the Backend // runningConfig contains the running configuration in the Backend
runningConfig *ingress.Configuration runningConfig *ingress.Configuration
forceReload bool forceReload int32
} }
// Configuration contains all the settings required by an Ingress controller // Configuration contains all the settings required by an Ingress controller
@ -171,177 +165,18 @@ func newIngressController(config *Configuration) *GenericController {
Component: "ingress-controller", Component: "ingress-controller",
}), }),
sslCertTracker: newSSLCertTracker(), sslCertTracker: newSSLCertTracker(),
listers: &ingress.StoreLister{},
} }
ic.syncQueue = task.NewTaskQueue(ic.syncIngress) ic.syncQueue = task.NewTaskQueue(ic.syncIngress)
// from here to the end of the method all the code is just boilerplate ic.createListers(config.DisableNodeList)
// required to watch Ingress, Secrets, ConfigMaps and Endoints.
// This is used to detect new content, updates or removals and act accordingly
ingEventHandler := cache.ResourceEventHandlerFuncs{
AddFunc: func(obj interface{}) {
addIng := obj.(*extensions.Ingress)
if !class.IsValid(addIng, ic.cfg.IngressClass, ic.cfg.DefaultIngressClass) {
a, _ := parser.GetStringAnnotation(class.IngressKey, addIng)
glog.Infof("ignoring add for ingress %v based on annotation %v with value %v", addIng.Name, class.IngressKey, a)
return
}
ic.recorder.Eventf(addIng, apiv1.EventTypeNormal, "CREATE", fmt.Sprintf("Ingress %s/%s", addIng.Namespace, addIng.Name))
ic.syncQueue.Enqueue(obj)
},
DeleteFunc: func(obj interface{}) {
delIng, ok := obj.(*extensions.Ingress)
if !ok {
// If we reached here it means the ingress was deleted but its final state is unrecorded.
tombstone, ok := obj.(cache.DeletedFinalStateUnknown)
if !ok {
glog.Errorf("couldn't get object from tombstone %#v", obj)
return
}
delIng, ok = tombstone.Obj.(*extensions.Ingress)
if !ok {
glog.Errorf("Tombstone contained object that is not an Ingress: %#v", obj)
return
}
}
if !class.IsValid(delIng, ic.cfg.IngressClass, ic.cfg.DefaultIngressClass) {
glog.Infof("ignoring delete for ingress %v based on annotation %v", delIng.Name, class.IngressKey)
return
}
ic.recorder.Eventf(delIng, apiv1.EventTypeNormal, "DELETE", fmt.Sprintf("Ingress %s/%s", delIng.Namespace, delIng.Name))
ic.syncQueue.Enqueue(obj)
},
UpdateFunc: func(old, cur interface{}) {
oldIng := old.(*extensions.Ingress)
curIng := cur.(*extensions.Ingress)
validOld := class.IsValid(oldIng, ic.cfg.IngressClass, ic.cfg.DefaultIngressClass)
validCur := class.IsValid(curIng, ic.cfg.IngressClass, ic.cfg.DefaultIngressClass)
if !validOld && validCur {
glog.Infof("creating ingress %v based on annotation %v", curIng.Name, class.IngressKey)
ic.recorder.Eventf(curIng, apiv1.EventTypeNormal, "CREATE", fmt.Sprintf("Ingress %s/%s", curIng.Namespace, curIng.Name))
} else if validOld && !validCur {
glog.Infof("removing ingress %v based on annotation %v", curIng.Name, class.IngressKey)
ic.recorder.Eventf(curIng, apiv1.EventTypeNormal, "DELETE", fmt.Sprintf("Ingress %s/%s", curIng.Namespace, curIng.Name))
} else if validCur && !reflect.DeepEqual(old, cur) {
ic.recorder.Eventf(curIng, apiv1.EventTypeNormal, "UPDATE", fmt.Sprintf("Ingress %s/%s", curIng.Namespace, curIng.Name))
}
ic.syncQueue.Enqueue(cur)
},
}
secrEventHandler := cache.ResourceEventHandlerFuncs{
UpdateFunc: func(old, cur interface{}) {
if !reflect.DeepEqual(old, cur) {
sec := cur.(*apiv1.Secret)
key := fmt.Sprintf("%v/%v", sec.Namespace, sec.Name)
ic.syncSecret(key)
}
},
DeleteFunc: func(obj interface{}) {
sec, ok := obj.(*apiv1.Secret)
if !ok {
// If we reached here it means the secret was deleted but its final state is unrecorded.
tombstone, ok := obj.(cache.DeletedFinalStateUnknown)
if !ok {
glog.Errorf("couldn't get object from tombstone %#v", obj)
return
}
sec, ok = tombstone.Obj.(*apiv1.Secret)
if !ok {
glog.Errorf("Tombstone contained object that is not a Secret: %#v", obj)
return
}
}
key := fmt.Sprintf("%v/%v", sec.Namespace, sec.Name)
ic.sslCertTracker.DeleteAll(key)
},
}
eventHandler := cache.ResourceEventHandlerFuncs{
AddFunc: func(obj interface{}) {
ic.syncQueue.Enqueue(obj)
},
DeleteFunc: func(obj interface{}) {
ic.syncQueue.Enqueue(obj)
},
UpdateFunc: func(old, cur interface{}) {
oep := old.(*apiv1.Endpoints)
ocur := cur.(*apiv1.Endpoints)
if !reflect.DeepEqual(ocur.Subsets, oep.Subsets) {
ic.syncQueue.Enqueue(cur)
}
},
}
mapEventHandler := cache.ResourceEventHandlerFuncs{
AddFunc: func(obj interface{}) {
upCmap := obj.(*apiv1.ConfigMap)
mapKey := fmt.Sprintf("%s/%s", upCmap.Namespace, upCmap.Name)
if mapKey == ic.cfg.ConfigMapName {
glog.V(2).Infof("adding configmap %v to backend", mapKey)
ic.cfg.Backend.SetConfig(upCmap)
ic.forceReload = true
}
},
UpdateFunc: func(old, cur interface{}) {
if !reflect.DeepEqual(old, cur) {
upCmap := cur.(*apiv1.ConfigMap)
mapKey := fmt.Sprintf("%s/%s", upCmap.Namespace, upCmap.Name)
if mapKey == ic.cfg.ConfigMapName {
glog.V(2).Infof("updating configmap backend (%v)", mapKey)
ic.cfg.Backend.SetConfig(upCmap)
ic.forceReload = true
}
// updates to configuration configmaps can trigger an update
if mapKey == ic.cfg.ConfigMapName || mapKey == ic.cfg.TCPConfigMapName || mapKey == ic.cfg.UDPConfigMapName {
ic.recorder.Eventf(upCmap, apiv1.EventTypeNormal, "UPDATE", fmt.Sprintf("ConfigMap %v", mapKey))
ic.syncQueue.Enqueue(cur)
}
}
},
}
watchNs := apiv1.NamespaceAll
if ic.cfg.ForceNamespaceIsolation && ic.cfg.Namespace != apiv1.NamespaceAll {
watchNs = ic.cfg.Namespace
}
ic.ingLister.Store, ic.ingController = cache.NewInformer(
cache.NewListWatchFromClient(ic.cfg.Client.ExtensionsV1beta1().RESTClient(), "ingresses", ic.cfg.Namespace, fields.Everything()),
&extensions.Ingress{}, ic.cfg.ResyncPeriod, ingEventHandler)
ic.endpLister.Store, ic.endpController = cache.NewInformer(
cache.NewListWatchFromClient(ic.cfg.Client.CoreV1().RESTClient(), "endpoints", ic.cfg.Namespace, fields.Everything()),
&apiv1.Endpoints{}, ic.cfg.ResyncPeriod, eventHandler)
ic.secrLister.Store, ic.secrController = cache.NewInformer(
cache.NewListWatchFromClient(ic.cfg.Client.CoreV1().RESTClient(), "secrets", watchNs, fields.Everything()),
&apiv1.Secret{}, ic.cfg.ResyncPeriod, secrEventHandler)
ic.mapLister.Store, ic.mapController = cache.NewInformer(
cache.NewListWatchFromClient(ic.cfg.Client.CoreV1().RESTClient(), "configmaps", watchNs, fields.Everything()),
&apiv1.ConfigMap{}, ic.cfg.ResyncPeriod, mapEventHandler)
ic.svcLister.Store, ic.svcController = cache.NewInformer(
cache.NewListWatchFromClient(ic.cfg.Client.CoreV1().RESTClient(), "services", ic.cfg.Namespace, fields.Everything()),
&apiv1.Service{}, ic.cfg.ResyncPeriod, cache.ResourceEventHandlerFuncs{})
var nodeListerWatcher cache.ListerWatcher
if config.DisableNodeList {
nodeListerWatcher = fcache.NewFakeControllerSource()
} else {
nodeListerWatcher = cache.NewListWatchFromClient(ic.cfg.Client.CoreV1().RESTClient(), "nodes", apiv1.NamespaceAll, fields.Everything())
}
ic.nodeLister.Store, ic.nodeController = cache.NewInformer(
nodeListerWatcher,
&apiv1.Node{}, ic.cfg.ResyncPeriod, cache.ResourceEventHandlerFuncs{})
if config.UpdateStatus { if config.UpdateStatus {
ic.syncStatus = status.NewStatusSyncer(status.Config{ ic.syncStatus = status.NewStatusSyncer(status.Config{
Client: config.Client, Client: config.Client,
PublishService: ic.cfg.PublishService, PublishService: ic.cfg.PublishService,
IngressLister: ic.ingLister, IngressLister: ic.listers.Ingress,
ElectionID: config.ElectionID, ElectionID: config.ElectionID,
IngressClass: config.IngressClass, IngressClass: config.IngressClass,
DefaultIngressClass: config.DefaultIngressClass, DefaultIngressClass: config.DefaultIngressClass,
@ -353,14 +188,7 @@ func newIngressController(config *Configuration) *GenericController {
} }
ic.annotations = newAnnotationExtractor(ic) ic.annotations = newAnnotationExtractor(ic)
ic.cfg.Backend.SetListers(ingress.StoreLister{ ic.cfg.Backend.SetListers(ic.listers)
Ingress: ic.ingLister,
Service: ic.svcLister,
Node: ic.nodeLister,
Endpoint: ic.endpLister,
Secret: ic.secrLister,
ConfigMap: ic.mapLister,
})
cloner.RegisterDeepCopyFunc(ingress.GetGeneratedDeepCopyFuncs) cloner.RegisterDeepCopyFunc(ingress.GetGeneratedDeepCopyFuncs)
@ -384,7 +212,7 @@ func (ic GenericController) GetDefaultBackend() defaults.Backend {
// GetPublishService returns the configured service used to set ingress status // GetPublishService returns the configured service used to set ingress status
func (ic GenericController) GetPublishService() *apiv1.Service { func (ic GenericController) GetPublishService() *apiv1.Service {
s, err := ic.GetService(ic.cfg.PublishService) s, err := ic.listers.Service.GetByName(ic.cfg.PublishService)
if err != nil { if err != nil {
return nil return nil
} }
@ -399,37 +227,12 @@ func (ic GenericController) GetRecorder() record.EventRecorder {
// GetSecret searches for a secret in the local secrets Store // GetSecret searches for a secret in the local secrets Store
func (ic GenericController) GetSecret(name string) (*apiv1.Secret, error) { func (ic GenericController) GetSecret(name string) (*apiv1.Secret, error) {
s, exists, err := ic.secrLister.Store.GetByKey(name) return ic.listers.Secret.GetByName(name)
if err != nil {
return nil, err
}
if !exists {
return nil, fmt.Errorf("secret %v was not found", name)
}
return s.(*apiv1.Secret), nil
} }
// GetService searches for a service in the local secrets Store // GetService searches for a service in the local secrets Store
func (ic GenericController) GetService(name string) (*apiv1.Service, error) { func (ic GenericController) GetService(name string) (*apiv1.Service, error) {
s, exists, err := ic.svcLister.Store.GetByKey(name) return ic.listers.Service.GetByName(name)
if err != nil {
return nil, err
}
if !exists {
return nil, fmt.Errorf("service %v was not found", name)
}
return s.(*apiv1.Service), nil
}
func (ic *GenericController) getConfigMap(ns, name string) (*apiv1.ConfigMap, error) {
s, exists, err := ic.mapLister.Store.GetByKey(fmt.Sprintf("%v/%v", ns, name))
if err != nil {
return nil, err
}
if !exists {
return nil, fmt.Errorf("configmap %v was not found", name)
}
return s.(*apiv1.ConfigMap), nil
} }
// sync collects all the pieces required to assemble the configuration file and // sync collects all the pieces required to assemble the configuration file and
@ -443,13 +246,32 @@ func (ic *GenericController) syncIngress(key interface{}) error {
} }
if name, ok := key.(string); ok { if name, ok := key.(string); ok {
if obj, exists, _ := ic.ingLister.GetByKey(name); exists { if obj, exists, _ := ic.listers.Ingress.GetByKey(name); exists {
ing := obj.(*extensions.Ingress) ing := obj.(*extensions.Ingress)
ic.readSecrets(ing) ic.readSecrets(ing)
} }
} }
upstreams, servers := ic.getBackendServers() // Sort ingress rules using the ResourceVersion field
ings := ic.listers.Ingress.List()
sort.SliceStable(ings, func(i, j int) bool {
ir := ings[i].(*ingress.SSLCert).ResourceVersion
jr := ings[j].(*ingress.SSLCert).ResourceVersion
return ir < jr
})
// filter ingress rules
var ingresses []*extensions.Ingress
for _, ingIf := range ings {
ing := ingIf.(*extensions.Ingress)
if !class.IsValid(ing, ic.cfg.IngressClass, ic.cfg.DefaultIngressClass) {
continue
}
ingresses = append(ingresses, ing)
}
upstreams, servers := ic.getBackendServers(ingresses)
var passUpstreams []*ingress.SSLPassthroughBackend var passUpstreams []*ingress.SSLPassthroughBackend
for _, server := range servers { for _, server := range servers {
@ -480,7 +302,7 @@ func (ic *GenericController) syncIngress(key interface{}) error {
PassthroughBackends: passUpstreams, PassthroughBackends: passUpstreams,
} }
if !ic.forceReload && ic.runningConfig != nil && ic.runningConfig.Equal(&pcfg) { if !ic.isForceReload() && ic.runningConfig != nil && ic.runningConfig.Equal(&pcfg) {
glog.V(3).Infof("skipping backend reload (no changes detected)") glog.V(3).Infof("skipping backend reload (no changes detected)")
return nil return nil
} }
@ -499,7 +321,7 @@ func (ic *GenericController) syncIngress(key interface{}) error {
setSSLExpireTime(servers) setSSLExpireTime(servers)
ic.runningConfig = &pcfg ic.runningConfig = &pcfg
ic.forceReload = false ic.setForceReload(false)
return nil return nil
} }
@ -511,15 +333,15 @@ func (ic *GenericController) getStreamServices(configmapName string, proto apiv1
return []ingress.L4Service{} return []ingress.L4Service{}
} }
ns, name, err := k8s.ParseNameNS(configmapName) _, _, err := k8s.ParseNameNS(configmapName)
if err != nil { if err != nil {
glog.Errorf("unexpected error reading configmap %v: %v", name, err) glog.Errorf("unexpected error reading configmap %v: %v", configmapName, err)
return []ingress.L4Service{} return []ingress.L4Service{}
} }
configmap, err := ic.getConfigMap(ns, name) configmap, err := ic.listers.ConfigMap.GetByName(configmapName)
if err != nil { if err != nil {
glog.Errorf("unexpected error reading configmap %v: %v", name, err) glog.Errorf("unexpected error reading configmap %v: %v", configmapName, err)
return []ingress.L4Service{} return []ingress.L4Service{}
} }
@ -562,7 +384,7 @@ func (ic *GenericController) getStreamServices(configmapName string, proto apiv1
continue continue
} }
svcObj, svcExists, err := ic.svcLister.Store.GetByKey(nsName) svcObj, svcExists, err := ic.listers.Service.GetByKey(nsName)
if err != nil { if err != nil {
glog.Warningf("error getting service %v: %v", nsName, err) glog.Warningf("error getting service %v: %v", nsName, err)
continue continue
@ -578,7 +400,7 @@ func (ic *GenericController) getStreamServices(configmapName string, proto apiv1
var endps []ingress.Endpoint var endps []ingress.Endpoint
targetPort, err := strconv.Atoi(svcPort) targetPort, err := strconv.Atoi(svcPort)
if err != nil { if err != nil {
glog.V(3).Infof("searching service %v/%v endpoints using the name '%v'", svcNs, svcName, svcPort) glog.V(3).Infof("searching service %v endpoints using the name '%v'", svcNs, svcName, svcPort)
for _, sp := range svc.Spec.Ports { for _, sp := range svc.Spec.Ports {
if sp.Name == svcPort { if sp.Name == svcPort {
if sp.Protocol == proto { if sp.Protocol == proto {
@ -631,7 +453,7 @@ func (ic *GenericController) getDefaultUpstream() *ingress.Backend {
Name: defUpstreamName, Name: defUpstreamName,
} }
svcKey := ic.cfg.DefaultService svcKey := ic.cfg.DefaultService
svcObj, svcExists, err := ic.svcLister.Store.GetByKey(svcKey) svcObj, svcExists, err := ic.listers.Service.GetByKey(svcKey)
if err != nil { if err != nil {
glog.Warningf("unexpected error searching the default backend %v: %v", ic.cfg.DefaultService, err) glog.Warningf("unexpected error searching the default backend %v: %v", ic.cfg.DefaultService, err)
upstream.Endpoints = append(upstream.Endpoints, ic.cfg.Backend.DefaultEndpoint()) upstream.Endpoints = append(upstream.Endpoints, ic.cfg.Backend.DefaultEndpoint())
@ -656,46 +478,15 @@ func (ic *GenericController) getDefaultUpstream() *ingress.Backend {
return upstream return upstream
} }
type ingressByRevision []interface{}
func (c ingressByRevision) Len() int { return len(c) }
func (c ingressByRevision) Swap(i, j int) { c[i], c[j] = c[j], c[i] }
func (c ingressByRevision) Less(i, j int) bool {
ir := c[i].(*extensions.Ingress).ResourceVersion
jr := c[j].(*extensions.Ingress).ResourceVersion
return ir < jr
}
// getBackendServers returns a list of Upstream and Server to be used by the backend // getBackendServers returns a list of Upstream and Server to be used by the backend
// An upstream can be used in multiple servers if the namespace, service name and port are the same // An upstream can be used in multiple servers if the namespace, service name and port are the same
func (ic *GenericController) getBackendServers() ([]*ingress.Backend, []*ingress.Server) { func (ic *GenericController) getBackendServers(ingresses []*extensions.Ingress) ([]*ingress.Backend, []*ingress.Server) {
ings := ic.ingLister.Store.List() du := ic.getDefaultUpstream()
sort.Sort(ingressByRevision(ings)) upstreams := ic.createUpstreams(ingresses, du)
servers := ic.createServers(ingresses, upstreams, du)
upstreams := ic.createUpstreams(ings)
servers := ic.createServers(ings, upstreams)
// If a server has a hostname equivalent to a pre-existing alias, then we
// remove the alias to avoid conflicts.
for _, server := range servers {
for j, alias := range servers {
if server.Hostname == alias.Alias {
glog.Warningf("There is a conflict with server hostname '%v' and alias '%v' (in server %v). Removing alias to avoid conflicts.",
server.Hostname, alias.Hostname, alias.Hostname)
servers[j].Alias = ""
}
}
}
for _, ingIf := range ings {
ing := ingIf.(*extensions.Ingress)
for _, ing := range ingresses {
affinity := ic.annotations.SessionAffinity(ing) affinity := ic.annotations.SessionAffinity(ing)
if !class.IsValid(ing, ic.cfg.IngressClass, ic.cfg.DefaultIngressClass) {
continue
}
anns := ic.annotations.Extract(ing) anns := ic.annotations.Extract(ing)
for _, rule := range ing.Spec.Rules { for _, rule := range ing.Spec.Rules {
@ -860,15 +651,22 @@ func (ic *GenericController) getBackendServers() ([]*ingress.Backend, []*ingress
} }
if ic.cfg.SortBackends { if ic.cfg.SortBackends {
sort.Sort(ingress.BackendByNameServers(aUpstreams)) sort.SliceStable(aUpstreams, func(a, b int) bool {
return aUpstreams[a].Name < aUpstreams[b].Name
})
} }
aServers := make([]*ingress.Server, 0, len(servers)) aServers := make([]*ingress.Server, 0, len(servers))
for _, value := range servers { for _, value := range servers {
sort.Sort(ingress.LocationByPath(value.Locations)) sort.SliceStable(value.Locations, func(i, j int) bool {
return value.Locations[i].Path > value.Locations[j].Path
})
aServers = append(aServers, value) aServers = append(aServers, value)
} }
sort.Sort(ingress.ServerByName(aServers))
sort.SliceStable(aServers, func(i, j int) bool {
return aServers[i].Hostname < aServers[j].Hostname
})
return aUpstreams, aServers return aUpstreams, aServers
} }
@ -879,7 +677,7 @@ func (ic GenericController) GetAuthCertificate(secretName string) (*resolver.Aut
ic.syncSecret(secretName) ic.syncSecret(secretName)
} }
_, err := ic.GetSecret(secretName) _, err := ic.listers.Secret.GetByName(secretName)
if err != nil { if err != nil {
return &resolver.AuthSSLCert{}, fmt.Errorf("unexpected error: %v", err) return &resolver.AuthSSLCert{}, fmt.Errorf("unexpected error: %v", err)
} }
@ -898,17 +696,11 @@ func (ic GenericController) GetAuthCertificate(secretName string) (*resolver.Aut
// createUpstreams creates the NGINX upstreams for each service referenced in // createUpstreams creates the NGINX upstreams for each service referenced in
// Ingress rules. The servers inside the upstream are endpoints. // Ingress rules. The servers inside the upstream are endpoints.
func (ic *GenericController) createUpstreams(data []interface{}) map[string]*ingress.Backend { func (ic *GenericController) createUpstreams(data []*extensions.Ingress, du *ingress.Backend) map[string]*ingress.Backend {
upstreams := make(map[string]*ingress.Backend) upstreams := make(map[string]*ingress.Backend)
upstreams[defUpstreamName] = ic.getDefaultUpstream() upstreams[defUpstreamName] = du
for _, ingIf := range data {
ing := ingIf.(*extensions.Ingress)
if !class.IsValid(ing, ic.cfg.IngressClass, ic.cfg.DefaultIngressClass) {
continue
}
for _, ing := range data {
secUpstream := ic.annotations.SecureUpstream(ing) secUpstream := ic.annotations.SecureUpstream(ing)
hz := ic.annotations.HealthCheck(ing) hz := ic.annotations.HealthCheck(ing)
serviceUpstream := ic.annotations.ServiceUpstream(ing) serviceUpstream := ic.annotations.ServiceUpstream(ing)
@ -994,18 +786,13 @@ func (ic *GenericController) createUpstreams(data []interface{}) map[string]*ing
upstreams[name].Endpoints = endp upstreams[name].Endpoints = endp
} }
s, exists, err := ic.svcLister.Store.GetByKey(svcKey) s, err := ic.listers.Service.GetByName(svcKey)
if err != nil { if err != nil {
glog.Warningf("error obtaining service: %v", err) glog.Warningf("error obtaining service: %v", err)
continue continue
} }
if !exists { upstreams[name].Service = s
glog.Warningf("service %v does not exists", svcKey)
continue
}
upstreams[name].Service = s.(*apiv1.Service)
} }
} }
} }
@ -1014,7 +801,7 @@ func (ic *GenericController) createUpstreams(data []interface{}) map[string]*ing
} }
func (ic *GenericController) getServiceClusterEndpoint(svcKey string, backend *extensions.IngressBackend) (endpoint ingress.Endpoint, err error) { func (ic *GenericController) getServiceClusterEndpoint(svcKey string, backend *extensions.IngressBackend) (endpoint ingress.Endpoint, err error) {
svcObj, svcExists, err := ic.svcLister.Store.GetByKey(svcKey) svcObj, svcExists, err := ic.listers.Service.GetByKey(svcKey)
if !svcExists { if !svcExists {
return endpoint, fmt.Errorf("service %v does not exist", svcKey) return endpoint, fmt.Errorf("service %v does not exist", svcKey)
@ -1035,19 +822,13 @@ func (ic *GenericController) getServiceClusterEndpoint(svcKey string, backend *e
// to a service. // to a service.
func (ic *GenericController) serviceEndpoints(svcKey, backendPort string, func (ic *GenericController) serviceEndpoints(svcKey, backendPort string,
hz *healthcheck.Upstream) ([]ingress.Endpoint, error) { hz *healthcheck.Upstream) ([]ingress.Endpoint, error) {
svcObj, svcExists, err := ic.svcLister.Store.GetByKey(svcKey) svc, err := ic.listers.Service.GetByName(svcKey)
var upstreams []ingress.Endpoint var upstreams []ingress.Endpoint
if err != nil { if err != nil {
return upstreams, fmt.Errorf("error getting service %v from the cache: %v", svcKey, err) return upstreams, fmt.Errorf("error getting service %v from the cache: %v", svcKey, err)
} }
if !svcExists {
err = fmt.Errorf("service %v does not exist", svcKey)
return upstreams, err
}
svc := svcObj.(*apiv1.Service)
glog.V(3).Infof("obtaining port information for service %v", svcKey) glog.V(3).Infof("obtaining port information for service %v", svcKey)
for _, servicePort := range svc.Spec.Ports { for _, servicePort := range svc.Spec.Ports {
// targetPort could be a string, use the name or the port (int) // targetPort could be a string, use the name or the port (int)
@ -1061,7 +842,15 @@ func (ic *GenericController) serviceEndpoints(svcKey, backendPort string,
} }
if ic.cfg.SortBackends { if ic.cfg.SortBackends {
sort.Sort(ingress.EndpointByAddrPort(endps)) sort.SliceStable(endps, func(i, j int) bool {
iName := endps[i].Address
jName := endps[j].Address
if iName != jName {
return iName < jName
}
return endps[i].Port < endps[j].Port
})
} }
upstreams = append(upstreams, endps...) upstreams = append(upstreams, endps...)
break break
@ -1083,12 +872,17 @@ func (ic *GenericController) serviceEndpoints(svcKey, backendPort string,
// FDQN referenced by ingress rules and the common name field in the referenced // FDQN referenced by ingress rules and the common name field in the referenced
// SSL certificates. Each server is configured with location / using a default // SSL certificates. Each server is configured with location / using a default
// backend specified by the user or the one inside the ingress spec. // backend specified by the user or the one inside the ingress spec.
func (ic *GenericController) createServers(data []interface{}, func (ic *GenericController) createServers(data []*extensions.Ingress,
upstreams map[string]*ingress.Backend) map[string]*ingress.Server { upstreams map[string]*ingress.Backend,
servers := make(map[string]*ingress.Server) du *ingress.Backend) map[string]*ingress.Server {
servers := make(map[string]*ingress.Server, len(data))
// If a server has a hostname equivalent to a pre-existing alias, then we
// remove the alias to avoid conflicts.
aliases := make(map[string]string, len(data))
bdef := ic.GetDefaultBackend() bdef := ic.GetDefaultBackend()
ngxProxy := proxy.Configuration{ ngxProxy := &proxy.Configuration{
BodySize: bdef.ProxyBodySize, BodySize: bdef.ProxyBodySize,
ConnectTimeout: bdef.ProxyConnectTimeout, ConnectTimeout: bdef.ProxyConnectTimeout,
SendTimeout: bdef.ProxySendTimeout, SendTimeout: bdef.ProxySendTimeout,
@ -1111,7 +905,6 @@ func (ic *GenericController) createServers(data []interface{},
} }
// initialize the default server // initialize the default server
du := ic.getDefaultUpstream()
servers[defServerName] = &ingress.Server{ servers[defServerName] = &ingress.Server{
Hostname: defServerName, Hostname: defServerName,
SSLCertificate: defaultPemFileName, SSLCertificate: defaultPemFileName,
@ -1127,17 +920,12 @@ func (ic *GenericController) createServers(data []interface{},
}} }}
// initialize all the servers // initialize all the servers
for _, ingIf := range data { for _, ing := range data {
ing := ingIf.(*extensions.Ingress)
if !class.IsValid(ing, ic.cfg.IngressClass, ic.cfg.DefaultIngressClass) {
continue
}
// check if ssl passthrough is configured // check if ssl passthrough is configured
sslpt := ic.annotations.SSLPassthrough(ing) sslpt := ic.annotations.SSLPassthrough(ing)
// default upstream server // default upstream server
du := ic.getDefaultUpstream()
un := du.Name un := du.Name
if ing.Spec.Backend != nil { if ing.Spec.Backend != nil {
@ -1178,17 +966,14 @@ func (ic *GenericController) createServers(data []interface{},
Proxy: ngxProxy, Proxy: ngxProxy,
Service: &apiv1.Service{}, Service: &apiv1.Service{},
}, },
}, SSLPassthrough: sslpt} },
SSLPassthrough: sslpt,
}
} }
} }
// configure default location, alias, and SSL // configure default location, alias, and SSL
for _, ingIf := range data { for _, ing := range data {
ing := ingIf.(*extensions.Ingress)
if !class.IsValid(ing, ic.cfg.IngressClass, ic.cfg.DefaultIngressClass) {
continue
}
// setup server-alias based on annotations // setup server-alias based on annotations
aliasAnnotation := ic.annotations.Alias(ing) aliasAnnotation := ic.annotations.Alias(ing)
@ -1200,6 +985,11 @@ func (ic *GenericController) createServers(data []interface{},
// setup server aliases // setup server aliases
servers[host].Alias = aliasAnnotation servers[host].Alias = aliasAnnotation
if aliasAnnotation != "" {
if _, ok := aliases[aliasAnnotation]; !ok {
aliases[aliasAnnotation] = host
}
}
// only add a certificate if the server does not have one previously configured // only add a certificate if the server does not have one previously configured
if servers[host].SSLCertificate != "" { if servers[host].SSLCertificate != "" {
@ -1258,6 +1048,12 @@ func (ic *GenericController) createServers(data []interface{},
} }
} }
for alias, host := range aliases {
if _, ok := servers[alias]; ok {
glog.Warningf("There is a conflict with server hostname '%v' and alias '%v' (in server %v). Removing alias to avoid conflicts.", alias, host)
servers[host].Alias = ""
}
}
return servers return servers
} }
@ -1283,6 +1079,14 @@ func (ic *GenericController) getEndpoints(
return upsServers return upsServers
} }
if net.ParseIP(s.Spec.ExternalName) == nil {
_, err := net.LookupHost(s.Spec.ExternalName)
if err != nil {
glog.Errorf("unexpected error resolving host %v: %v", s.Spec.ExternalName, err)
return upsServers
}
}
return append(upsServers, ingress.Endpoint{ return append(upsServers, ingress.Endpoint{
Address: s.Spec.ExternalName, Address: s.Spec.ExternalName,
Port: fmt.Sprintf("%v", targetPort), Port: fmt.Sprintf("%v", targetPort),
@ -1292,7 +1096,7 @@ func (ic *GenericController) getEndpoints(
} }
glog.V(3).Infof("getting endpoints for service %v/%v and port %v", s.Namespace, s.Name, servicePort.String()) glog.V(3).Infof("getting endpoints for service %v/%v and port %v", s.Namespace, s.Name, servicePort.String())
ep, err := ic.endpLister.GetServiceEndpoints(s) ep, err := ic.listers.Endpoint.GetServiceEndpoints(s)
if err != nil { if err != nil {
glog.Warningf("unexpected error obtaining service endpoints: %v", err) glog.Warningf("unexpected error obtaining service endpoints: %v", err)
return upsServers return upsServers
@ -1379,7 +1183,7 @@ func (ic GenericController) Stop() error {
} }
// Start starts the Ingress controller. // Start starts the Ingress controller.
func (ic GenericController) Start() { func (ic *GenericController) Start() {
glog.Infof("starting Ingress controller") glog.Infof("starting Ingress controller")
go ic.ingController.Run(ic.stopCh) go ic.ingController.Run(ic.stopCh)
@ -1402,9 +1206,16 @@ func (ic GenericController) Start() {
} }
// initial sync of secrets to avoid unnecessary reloads // initial sync of secrets to avoid unnecessary reloads
for _, key := range ic.ingLister.ListKeys() { for _, key := range ic.listers.Ingress.ListKeys() {
if obj, exists, _ := ic.ingLister.GetByKey(key); exists { if obj, exists, _ := ic.listers.Ingress.GetByKey(key); exists {
ing := obj.(*extensions.Ingress) ing := obj.(*extensions.Ingress)
if !class.IsValid(ing, ic.cfg.IngressClass, ic.cfg.DefaultIngressClass) {
a, _ := parser.GetStringAnnotation(class.IngressKey, ing)
glog.Infof("ignoring add for ingress %v based on annotation %v with value %v", ing.Name, class.IngressKey, a)
continue
}
ic.readSecrets(ing) ic.readSecrets(ing)
} }
} }
@ -1417,9 +1228,25 @@ func (ic GenericController) Start() {
go ic.syncStatus.Run(ic.stopCh) go ic.syncStatus.Run(ic.stopCh)
} }
time.Sleep(5 * time.Second)
// force initial sync
ic.syncQueue.Enqueue(&extensions.Ingress{})
<-ic.stopCh <-ic.stopCh
} }
func (ic *GenericController) isForceReload() bool {
return atomic.LoadInt32(&ic.forceReload) != 0
}
func (ic *GenericController) setForceReload(shouldReload bool) {
if shouldReload {
atomic.StoreInt32(&ic.forceReload, 1)
} else {
atomic.StoreInt32(&ic.forceReload, 0)
}
}
func createDefaultSSLCertificate() { func createDefaultSSLCertificate() {
defCert, defKey := ssl.GetFakeSSLCert() defCert, defKey := ssl.GetFakeSSLCert()
c, err := ssl.AddOrUpdateCertAndKey(fakeCertificate, defCert, defKey, []byte{}) c, err := ssl.AddOrUpdateCertAndKey(fakeCertificate, defCert, defKey, []byte{})

View file

@ -0,0 +1,197 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package controller
import (
"fmt"
"reflect"
"github.com/golang/glog"
apiv1 "k8s.io/api/core/v1"
extensions "k8s.io/api/extensions/v1beta1"
"k8s.io/apimachinery/pkg/fields"
"k8s.io/client-go/tools/cache"
fcache "k8s.io/client-go/tools/cache/testing"
"k8s.io/ingress/core/pkg/ingress/annotations/class"
"k8s.io/ingress/core/pkg/ingress/annotations/parser"
)
func (ic *GenericController) createListers(disableNodeLister bool) {
// from here to the end of the method all the code is just boilerplate
// required to watch Ingress, Secrets, ConfigMaps and Endoints.
// This is used to detect new content, updates or removals and act accordingly
ingEventHandler := cache.ResourceEventHandlerFuncs{
AddFunc: func(obj interface{}) {
addIng := obj.(*extensions.Ingress)
if !class.IsValid(addIng, ic.cfg.IngressClass, ic.cfg.DefaultIngressClass) {
a, _ := parser.GetStringAnnotation(class.IngressKey, addIng)
glog.Infof("ignoring add for ingress %v based on annotation %v with value %v", addIng.Name, class.IngressKey, a)
return
}
ic.recorder.Eventf(addIng, apiv1.EventTypeNormal, "CREATE", fmt.Sprintf("Ingress %s/%s", addIng.Namespace, addIng.Name))
ic.syncQueue.Enqueue(obj)
},
DeleteFunc: func(obj interface{}) {
delIng, ok := obj.(*extensions.Ingress)
if !ok {
// If we reached here it means the ingress was deleted but its final state is unrecorded.
tombstone, ok := obj.(cache.DeletedFinalStateUnknown)
if !ok {
glog.Errorf("couldn't get object from tombstone %#v", obj)
return
}
delIng, ok = tombstone.Obj.(*extensions.Ingress)
if !ok {
glog.Errorf("Tombstone contained object that is not an Ingress: %#v", obj)
return
}
}
if !class.IsValid(delIng, ic.cfg.IngressClass, ic.cfg.DefaultIngressClass) {
glog.Infof("ignoring delete for ingress %v based on annotation %v", delIng.Name, class.IngressKey)
return
}
ic.recorder.Eventf(delIng, apiv1.EventTypeNormal, "DELETE", fmt.Sprintf("Ingress %s/%s", delIng.Namespace, delIng.Name))
ic.syncQueue.Enqueue(obj)
},
UpdateFunc: func(old, cur interface{}) {
oldIng := old.(*extensions.Ingress)
curIng := cur.(*extensions.Ingress)
validOld := class.IsValid(oldIng, ic.cfg.IngressClass, ic.cfg.DefaultIngressClass)
validCur := class.IsValid(curIng, ic.cfg.IngressClass, ic.cfg.DefaultIngressClass)
if !validOld && validCur {
glog.Infof("creating ingress %v based on annotation %v", curIng.Name, class.IngressKey)
ic.recorder.Eventf(curIng, apiv1.EventTypeNormal, "CREATE", fmt.Sprintf("Ingress %s/%s", curIng.Namespace, curIng.Name))
} else if validOld && !validCur {
glog.Infof("removing ingress %v based on annotation %v", curIng.Name, class.IngressKey)
ic.recorder.Eventf(curIng, apiv1.EventTypeNormal, "DELETE", fmt.Sprintf("Ingress %s/%s", curIng.Namespace, curIng.Name))
} else if validCur && !reflect.DeepEqual(old, cur) {
ic.recorder.Eventf(curIng, apiv1.EventTypeNormal, "UPDATE", fmt.Sprintf("Ingress %s/%s", curIng.Namespace, curIng.Name))
}
ic.syncQueue.Enqueue(cur)
},
}
secrEventHandler := cache.ResourceEventHandlerFuncs{
UpdateFunc: func(old, cur interface{}) {
if !reflect.DeepEqual(old, cur) {
sec := cur.(*apiv1.Secret)
key := fmt.Sprintf("%v/%v", sec.Namespace, sec.Name)
ic.syncSecret(key)
}
},
DeleteFunc: func(obj interface{}) {
sec, ok := obj.(*apiv1.Secret)
if !ok {
// If we reached here it means the secret was deleted but its final state is unrecorded.
tombstone, ok := obj.(cache.DeletedFinalStateUnknown)
if !ok {
glog.Errorf("couldn't get object from tombstone %#v", obj)
return
}
sec, ok = tombstone.Obj.(*apiv1.Secret)
if !ok {
glog.Errorf("Tombstone contained object that is not a Secret: %#v", obj)
return
}
}
key := fmt.Sprintf("%v/%v", sec.Namespace, sec.Name)
ic.sslCertTracker.DeleteAll(key)
},
}
eventHandler := cache.ResourceEventHandlerFuncs{
AddFunc: func(obj interface{}) {
ic.syncQueue.Enqueue(obj)
},
DeleteFunc: func(obj interface{}) {
ic.syncQueue.Enqueue(obj)
},
UpdateFunc: func(old, cur interface{}) {
oep := old.(*apiv1.Endpoints)
ocur := cur.(*apiv1.Endpoints)
if !reflect.DeepEqual(ocur.Subsets, oep.Subsets) {
ic.syncQueue.Enqueue(cur)
}
},
}
mapEventHandler := cache.ResourceEventHandlerFuncs{
AddFunc: func(obj interface{}) {
upCmap := obj.(*apiv1.ConfigMap)
mapKey := fmt.Sprintf("%s/%s", upCmap.Namespace, upCmap.Name)
if mapKey == ic.cfg.ConfigMapName {
glog.V(2).Infof("adding configmap %v to backend", mapKey)
ic.cfg.Backend.SetConfig(upCmap)
ic.setForceReload(true)
}
},
UpdateFunc: func(old, cur interface{}) {
if !reflect.DeepEqual(old, cur) {
upCmap := cur.(*apiv1.ConfigMap)
mapKey := fmt.Sprintf("%s/%s", upCmap.Namespace, upCmap.Name)
if mapKey == ic.cfg.ConfigMapName {
glog.V(2).Infof("updating configmap backend (%v)", mapKey)
ic.cfg.Backend.SetConfig(upCmap)
ic.setForceReload(true)
}
// updates to configuration configmaps can trigger an update
if mapKey == ic.cfg.ConfigMapName || mapKey == ic.cfg.TCPConfigMapName || mapKey == ic.cfg.UDPConfigMapName {
ic.recorder.Eventf(upCmap, apiv1.EventTypeNormal, "UPDATE", fmt.Sprintf("ConfigMap %v", mapKey))
ic.syncQueue.Enqueue(cur)
}
}
},
}
watchNs := apiv1.NamespaceAll
if ic.cfg.ForceNamespaceIsolation && ic.cfg.Namespace != apiv1.NamespaceAll {
watchNs = ic.cfg.Namespace
}
ic.listers.Ingress.Store, ic.ingController = cache.NewInformer(
cache.NewListWatchFromClient(ic.cfg.Client.ExtensionsV1beta1().RESTClient(), "ingresses", ic.cfg.Namespace, fields.Everything()),
&extensions.Ingress{}, ic.cfg.ResyncPeriod, ingEventHandler)
ic.listers.Endpoint.Store, ic.endpController = cache.NewInformer(
cache.NewListWatchFromClient(ic.cfg.Client.CoreV1().RESTClient(), "endpoints", ic.cfg.Namespace, fields.Everything()),
&apiv1.Endpoints{}, ic.cfg.ResyncPeriod, eventHandler)
ic.listers.Secret.Store, ic.secrController = cache.NewInformer(
cache.NewListWatchFromClient(ic.cfg.Client.CoreV1().RESTClient(), "secrets", watchNs, fields.Everything()),
&apiv1.Secret{}, ic.cfg.ResyncPeriod, secrEventHandler)
ic.listers.ConfigMap.Store, ic.mapController = cache.NewInformer(
cache.NewListWatchFromClient(ic.cfg.Client.CoreV1().RESTClient(), "configmaps", watchNs, fields.Everything()),
&apiv1.ConfigMap{}, ic.cfg.ResyncPeriod, mapEventHandler)
ic.listers.Service.Store, ic.svcController = cache.NewInformer(
cache.NewListWatchFromClient(ic.cfg.Client.CoreV1().RESTClient(), "services", ic.cfg.Namespace, fields.Everything()),
&apiv1.Service{}, ic.cfg.ResyncPeriod, cache.ResourceEventHandlerFuncs{})
var nodeListerWatcher cache.ListerWatcher
if disableNodeLister {
nodeListerWatcher = fcache.NewFakeControllerSource()
} else {
nodeListerWatcher = cache.NewListWatchFromClient(ic.cfg.Client.CoreV1().RESTClient(), "nodes", apiv1.NamespaceAll, fields.Everything())
}
ic.listers.Node.Store, ic.nodeController = cache.NewInformer(
nodeListerWatcher,
&apiv1.Node{}, ic.cfg.ResyncPeriod, cache.ResourceEventHandlerFuncs{})
}

View file

@ -51,7 +51,7 @@ func TestMergeLocationAnnotations(t *testing.T) {
"Redirect": redirect.Redirect{}, "Redirect": redirect.Redirect{},
"Rewrite": rewrite.Redirect{}, "Rewrite": rewrite.Redirect{},
"Whitelist": ipwhitelist.SourceRange{}, "Whitelist": ipwhitelist.SourceRange{},
"Proxy": proxy.Configuration{}, "Proxy": &proxy.Configuration{},
"UsePortInRedirects": true, "UsePortInRedirects": true,
} }

View file

@ -24,52 +24,6 @@ import (
"k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/runtime/schema"
) )
// BackendByNameServers sorts upstreams by name
type BackendByNameServers []*Backend
func (c BackendByNameServers) Len() int { return len(c) }
func (c BackendByNameServers) Swap(i, j int) { c[i], c[j] = c[j], c[i] }
func (c BackendByNameServers) Less(i, j int) bool {
return c[i].Name < c[j].Name
}
// EndpointByAddrPort sorts endpoints by address and port
type EndpointByAddrPort []Endpoint
func (c EndpointByAddrPort) Len() int { return len(c) }
func (c EndpointByAddrPort) Swap(i, j int) { c[i], c[j] = c[j], c[i] }
func (c EndpointByAddrPort) Less(i, j int) bool {
iName := c[i].Address
jName := c[j].Address
if iName != jName {
return iName < jName
}
iU := c[i].Port
jU := c[j].Port
return iU < jU
}
// ServerByName sorts servers by name
type ServerByName []*Server
func (c ServerByName) Len() int { return len(c) }
func (c ServerByName) Swap(i, j int) { c[i], c[j] = c[j], c[i] }
func (c ServerByName) Less(i, j int) bool {
return c[i].Hostname < c[j].Hostname
}
// LocationByPath sorts location by path in descending order
// Location / is the last one
type LocationByPath []*Location
func (c LocationByPath) Len() int { return len(c) }
func (c LocationByPath) Swap(i, j int) { c[i], c[j] = c[j], c[i] }
func (c LocationByPath) Less(i, j int) bool {
return c[i].Path > c[j].Path
}
// SSLCert describes a SSL certificate to be used in a server // SSLCert describes a SSL certificate to be used in a server
type SSLCert struct { type SSLCert struct {
metav1.ObjectMeta `json:"metadata,omitempty"` metav1.ObjectMeta `json:"metadata,omitempty"`

View file

@ -22,347 +22,6 @@ import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
) )
func buildBackendByNameServers() BackendByNameServers {
return []*Backend{
{
Name: "foo1",
Secure: true,
Endpoints: []Endpoint{},
},
{
Name: "foo2",
Secure: false,
Endpoints: []Endpoint{},
},
{
Name: "foo3",
Secure: true,
Endpoints: []Endpoint{},
},
}
}
func TestBackendByNameServersLen(t *testing.T) {
fooTests := []struct {
backends BackendByNameServers
el int
}{
{[]*Backend{}, 0},
{buildBackendByNameServers(), 3},
{nil, 0},
}
for _, fooTest := range fooTests {
r := fooTest.backends.Len()
if r != fooTest.el {
t.Errorf("returned %v but expected %v for the len of BackendByNameServers: %v", r, fooTest.el, fooTest.backends)
}
}
}
func TestBackendByNameServersSwap(t *testing.T) {
fooTests := []struct {
backends BackendByNameServers
i int
j int
}{
{buildBackendByNameServers(), 0, 1},
{buildBackendByNameServers(), 2, 1},
}
for _, fooTest := range fooTests {
fooi := fooTest.backends[fooTest.i]
fooj := fooTest.backends[fooTest.j]
fooTest.backends.Swap(fooTest.i, fooTest.j)
if fooi.Name != fooTest.backends[fooTest.j].Name || fooj.Name != fooTest.backends[fooTest.i].Name {
t.Errorf("failed to swap for ByNameServers, foo: %v", fooTest)
}
}
}
func TestBackendByNameServersLess(t *testing.T) {
fooTests := []struct {
backends BackendByNameServers
i int
j int
er bool
}{
// order by name
{buildBackendByNameServers(), 0, 2, true},
{buildBackendByNameServers(), 1, 0, false},
}
for _, fooTest := range fooTests {
r := fooTest.backends.Less(fooTest.i, fooTest.j)
if r != fooTest.er {
t.Errorf("returned %v but expected %v for the foo: %v", r, fooTest.er, fooTest)
}
}
}
func buildEndpointByAddrPort() EndpointByAddrPort {
return []Endpoint{
{
Address: "127.0.0.1",
Port: "8080",
MaxFails: 3,
FailTimeout: 10,
},
{
Address: "127.0.0.1",
Port: "8081",
MaxFails: 3,
FailTimeout: 10,
},
{
Address: "127.0.0.1",
Port: "8082",
MaxFails: 3,
FailTimeout: 10,
},
{
Address: "127.0.0.2",
Port: "8082",
MaxFails: 3,
FailTimeout: 10,
},
}
}
func TestEndpointByAddrPortLen(t *testing.T) {
fooTests := []struct {
endpoints EndpointByAddrPort
el int
}{
{[]Endpoint{}, 0},
{buildEndpointByAddrPort(), 4},
{nil, 0},
}
for _, fooTest := range fooTests {
r := fooTest.endpoints.Len()
if r != fooTest.el {
t.Errorf("returned %v but expected %v for the len of EndpointByAddrPort: %v", r, fooTest.el, fooTest.endpoints)
}
}
}
func TestEndpointByAddrPortSwap(t *testing.T) {
fooTests := []struct {
endpoints EndpointByAddrPort
i int
j int
}{
{buildEndpointByAddrPort(), 0, 1},
{buildEndpointByAddrPort(), 2, 1},
}
for _, fooTest := range fooTests {
fooi := fooTest.endpoints[fooTest.i]
fooj := fooTest.endpoints[fooTest.j]
fooTest.endpoints.Swap(fooTest.i, fooTest.j)
if fooi.Port != fooTest.endpoints[fooTest.j].Port ||
fooi.Address != fooTest.endpoints[fooTest.j].Address ||
fooj.Port != fooTest.endpoints[fooTest.i].Port ||
fooj.Address != fooTest.endpoints[fooTest.i].Address {
t.Errorf("failed to swap for EndpointByAddrPort, foo: %v", fooTest)
}
}
}
func TestEndpointByAddrPortLess(t *testing.T) {
fooTests := []struct {
endpoints EndpointByAddrPort
i int
j int
er bool
}{
// 1) order by name
// 2) order by port(if the name is the same one)
{buildEndpointByAddrPort(), 0, 1, true},
{buildEndpointByAddrPort(), 2, 1, false},
{buildEndpointByAddrPort(), 2, 3, true},
}
for _, fooTest := range fooTests {
r := fooTest.endpoints.Less(fooTest.i, fooTest.j)
if r != fooTest.er {
t.Errorf("returned %v but expected %v for the foo: %v", r, fooTest.er, fooTest)
}
}
}
func buildServerByName() ServerByName {
return []*Server{
{
Hostname: "foo1",
SSLPassthrough: true,
SSLCertificate: "foo1_cert",
SSLPemChecksum: "foo1_pem",
Locations: []*Location{},
},
{
Hostname: "foo2",
SSLPassthrough: true,
SSLCertificate: "foo2_cert",
SSLPemChecksum: "foo2_pem",
Locations: []*Location{},
},
{
Hostname: "foo3",
SSLPassthrough: false,
SSLCertificate: "foo3_cert",
SSLPemChecksum: "foo3_pem",
Locations: []*Location{},
},
{
Hostname: "_",
SSLPassthrough: false,
SSLCertificate: "foo4_cert",
SSLPemChecksum: "foo4_pem",
Locations: []*Location{},
},
}
}
func TestServerByNameLen(t *testing.T) {
fooTests := []struct {
servers ServerByName
el int
}{
{[]*Server{}, 0},
{buildServerByName(), 4},
{nil, 0},
}
for _, fooTest := range fooTests {
r := fooTest.servers.Len()
if r != fooTest.el {
t.Errorf("returned %v but expected %v for the len of ServerByName: %v", r, fooTest.el, fooTest.servers)
}
}
}
func TestServerByNameSwap(t *testing.T) {
fooTests := []struct {
servers ServerByName
i int
j int
}{
{buildServerByName(), 0, 1},
{buildServerByName(), 2, 1},
}
for _, fooTest := range fooTests {
fooi := fooTest.servers[fooTest.i]
fooj := fooTest.servers[fooTest.j]
fooTest.servers.Swap(fooTest.i, fooTest.j)
if fooi.Hostname != fooTest.servers[fooTest.j].Hostname ||
fooj.Hostname != fooTest.servers[fooTest.i].Hostname {
t.Errorf("failed to swap for ServerByName, foo: %v", fooTest)
}
}
}
func TestServerByNameLess(t *testing.T) {
fooTests := []struct {
servers ServerByName
i int
j int
er bool
}{
{buildServerByName(), 0, 1, true},
{buildServerByName(), 2, 1, false},
{buildServerByName(), 2, 3, false},
}
for _, fooTest := range fooTests {
r := fooTest.servers.Less(fooTest.i, fooTest.j)
if r != fooTest.er {
t.Errorf("returned %v but expected %v for the foo: %v", r, fooTest.er, fooTest)
}
}
}
func buildLocationByPath() LocationByPath {
return []*Location{
{
Path: "a",
IsDefBackend: true,
Backend: "a_back",
},
{
Path: "b",
IsDefBackend: true,
Backend: "b_back",
},
{
Path: "c",
IsDefBackend: true,
Backend: "c_back",
},
}
}
func TestLocationByPath(t *testing.T) {
fooTests := []struct {
locations LocationByPath
el int
}{
{[]*Location{}, 0},
{buildLocationByPath(), 3},
{nil, 0},
}
for _, fooTest := range fooTests {
r := fooTest.locations.Len()
if r != fooTest.el {
t.Errorf("returned %v but expected %v for the len of LocationByPath: %v", r, fooTest.el, fooTest.locations)
}
}
}
func TestLocationByPathSwap(t *testing.T) {
fooTests := []struct {
locations LocationByPath
i int
j int
}{
{buildLocationByPath(), 0, 1},
{buildLocationByPath(), 2, 1},
}
for _, fooTest := range fooTests {
fooi := fooTest.locations[fooTest.i]
fooj := fooTest.locations[fooTest.j]
fooTest.locations.Swap(fooTest.i, fooTest.j)
if fooi.Path != fooTest.locations[fooTest.j].Path ||
fooj.Path != fooTest.locations[fooTest.i].Path {
t.Errorf("failed to swap for LocationByPath, foo: %v", fooTest)
}
}
}
func TestLocationByPathLess(t *testing.T) {
fooTests := []struct {
locations LocationByPath
i int
j int
er bool
}{
// sorts location by path in descending order
{buildLocationByPath(), 0, 1, false},
{buildLocationByPath(), 2, 1, true},
}
for _, fooTest := range fooTests {
r := fooTest.locations.Less(fooTest.i, fooTest.j)
if r != fooTest.er {
t.Errorf("returned %v but expected %v for the foo: %v", r, fooTest.er, fooTest)
}
}
}
func TestGetObjectKindForSSLCert(t *testing.T) { func TestGetObjectKindForSSLCert(t *testing.T) {
fk := &SSLCert{ fk := &SSLCert{
ObjectMeta: metav1.ObjectMeta{}, ObjectMeta: metav1.ObjectMeta{},

View file

@ -21,11 +21,13 @@ import (
"net" "net"
"os" "os"
"sort" "sort"
"sync" "strings"
"time" "time"
"github.com/golang/glog" "github.com/golang/glog"
"github.com/pkg/errors"
pool "gopkg.in/go-playground/pool.v3"
apiv1 "k8s.io/api/core/v1" apiv1 "k8s.io/api/core/v1"
extensions "k8s.io/api/extensions/v1beta1" extensions "k8s.io/api/extensions/v1beta1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
@ -40,12 +42,12 @@ import (
"k8s.io/ingress/core/pkg/ingress/annotations/class" "k8s.io/ingress/core/pkg/ingress/annotations/class"
"k8s.io/ingress/core/pkg/ingress/store" "k8s.io/ingress/core/pkg/ingress/store"
"k8s.io/ingress/core/pkg/k8s" "k8s.io/ingress/core/pkg/k8s"
"k8s.io/ingress/core/pkg/strings" ingress_strings "k8s.io/ingress/core/pkg/strings"
"k8s.io/ingress/core/pkg/task" "k8s.io/ingress/core/pkg/task"
) )
const ( const (
updateInterval = 30 * time.Second updateInterval = 60 * time.Second
) )
// Sync ... // Sync ...
@ -56,14 +58,16 @@ type Sync interface {
// Config ... // Config ...
type Config struct { type Config struct {
Client clientset.Interface Client clientset.Interface
PublishService string PublishService string
IngressLister store.IngressLister
ElectionID string ElectionID string
UpdateStatusOnShutdown bool UpdateStatusOnShutdown bool
IngressLister store.IngressLister
DefaultIngressClass string DefaultIngressClass string
IngressClass string IngressClass string
@ -264,7 +268,7 @@ func (s *statusSync) runningAddresses() ([]string, error) {
addrs := []string{} addrs := []string{}
for _, pod := range pods.Items { for _, pod := range pods.Items {
name := k8s.GetNodeIP(s.Client, pod.Spec.NodeName) name := k8s.GetNodeIP(s.Client, pod.Spec.NodeName)
if !strings.StringInSlice(name, addrs) { if !ingress_strings.StringInSlice(name, addrs) {
addrs = append(addrs, name) addrs = append(addrs, name)
} }
} }
@ -293,7 +297,10 @@ func sliceToStatus(endpoints []string) []apiv1.LoadBalancerIngress {
} }
} }
sort.Sort(loadBalancerIngressByIP(lbi)) sort.SliceStable(lbi, func(a, b int) bool {
return lbi[a].IP < lbi[b].IP
})
return lbi return lbi
} }
@ -302,48 +309,77 @@ func sliceToStatus(endpoints []string) []apiv1.LoadBalancerIngress {
// of nil then it uses the returned value or the newIngressPoint values // of nil then it uses the returned value or the newIngressPoint values
func (s *statusSync) updateStatus(newIngressPoint []apiv1.LoadBalancerIngress) { func (s *statusSync) updateStatus(newIngressPoint []apiv1.LoadBalancerIngress) {
ings := s.IngressLister.List() ings := s.IngressLister.List()
var wg sync.WaitGroup
wg.Add(len(ings)) p := pool.NewLimited(10)
defer p.Close()
batch := p.Batch()
for _, cur := range ings { for _, cur := range ings {
ing := cur.(*extensions.Ingress) ing := cur.(*extensions.Ingress)
if !class.IsValid(ing, s.Config.IngressClass, s.Config.DefaultIngressClass) { if !class.IsValid(ing, s.Config.IngressClass, s.Config.DefaultIngressClass) {
wg.Done()
continue continue
} }
go func(wg *sync.WaitGroup, ing *extensions.Ingress) { batch.Queue(runUpdate(ing, newIngressPoint, s.Client, s.CustomIngressStatus))
defer wg.Done()
ingClient := s.Client.Extensions().Ingresses(ing.Namespace)
currIng, err := ingClient.Get(ing.Name, metav1.GetOptions{})
if err != nil {
glog.Errorf("unexpected error searching Ingress %v/%v: %v", ing.Namespace, ing.Name, err)
return
}
addrs := newIngressPoint
ca := s.CustomIngressStatus(currIng)
if ca != nil {
addrs = ca
}
curIPs := currIng.Status.LoadBalancer.Ingress
sort.Sort(loadBalancerIngressByIP(curIPs))
if ingressSliceEqual(addrs, curIPs) {
glog.V(3).Infof("skipping update of Ingress %v/%v (no change)", currIng.Namespace, currIng.Name)
return
}
glog.Infof("updating Ingress %v/%v status to %v", currIng.Namespace, currIng.Name, addrs)
currIng.Status.LoadBalancer.Ingress = addrs
_, err = ingClient.UpdateStatus(currIng)
if err != nil {
glog.Warningf("error updating ingress rule: %v", err)
}
}(&wg, ing)
} }
wg.Wait() batch.QueueComplete()
batch.WaitAll()
}
func runUpdate(ing *extensions.Ingress, status []apiv1.LoadBalancerIngress,
client clientset.Interface,
statusFunc func(*extensions.Ingress) []apiv1.LoadBalancerIngress) pool.WorkFunc {
return func(wu pool.WorkUnit) (interface{}, error) {
if wu.IsCancelled() {
return nil, nil
}
addrs := status
ca := statusFunc(ing)
if ca != nil {
addrs = ca
}
sort.SliceStable(addrs, lessLoadBalancerIngress(addrs))
curIPs := ing.Status.LoadBalancer.Ingress
sort.SliceStable(curIPs, lessLoadBalancerIngress(curIPs))
if ingressSliceEqual(addrs, curIPs) {
glog.V(3).Infof("skipping update of Ingress %v/%v (no change)", ing.Namespace, ing.Name)
return true, nil
}
ingClient := client.Extensions().Ingresses(ing.Namespace)
currIng, err := ingClient.Get(ing.Name, metav1.GetOptions{})
if err != nil {
return nil, errors.Wrap(err, fmt.Sprintf("unexpected error searching Ingress %v/%v", ing.Namespace, ing.Name))
}
glog.Infof("updating Ingress %v/%v status to %v", currIng.Namespace, currIng.Name, addrs)
currIng.Status.LoadBalancer.Ingress = addrs
_, err = ingClient.UpdateStatus(currIng)
if err != nil {
glog.Warningf("error updating ingress rule: %v", err)
}
return true, nil
}
}
func lessLoadBalancerIngress(addrs []apiv1.LoadBalancerIngress) func(int, int) bool {
return func(a, b int) bool {
switch strings.Compare(addrs[a].Hostname, addrs[b].Hostname) {
case -1:
return true
case 1:
return false
}
return addrs[a].IP < addrs[b].IP
}
} }
func ingressSliceEqual(lhs, rhs []apiv1.LoadBalancerIngress) bool { func ingressSliceEqual(lhs, rhs []apiv1.LoadBalancerIngress) bool {
@ -361,12 +397,3 @@ func ingressSliceEqual(lhs, rhs []apiv1.LoadBalancerIngress) bool {
} }
return true return true
} }
// loadBalancerIngressByIP sorts LoadBalancerIngress using the field IP
type loadBalancerIngressByIP []apiv1.LoadBalancerIngress
func (c loadBalancerIngressByIP) Len() int { return len(c) }
func (c loadBalancerIngressByIP) Swap(i, j int) { c[i], c[j] = c[j], c[i] }
func (c loadBalancerIngressByIP) Less(i, j int) bool {
return c[i].IP < c[j].IP
}

View file

@ -18,7 +18,6 @@ package status
import ( import (
"os" "os"
"sort"
"testing" "testing"
"time" "time"
@ -35,7 +34,7 @@ import (
"k8s.io/ingress/core/pkg/task" "k8s.io/ingress/core/pkg/task"
) )
func buildLoadBalancerIngressByIP() loadBalancerIngressByIP { func buildLoadBalancerIngressByIP() []apiv1.LoadBalancerIngress {
return []apiv1.LoadBalancerIngress{ return []apiv1.LoadBalancerIngress{
{ {
IP: "10.0.0.1", IP: "10.0.0.1",
@ -232,6 +231,7 @@ func buildIngressListener() store.IngressLister {
}, },
}, },
}) })
return store.IngressLister{Store: s} return store.IngressLister{Store: s}
} }
@ -373,10 +373,11 @@ func TestRunningAddresessWithPods(t *testing.T) {
} }
} }
/*
TODO: this test requires a refactoring
func TestUpdateStatus(t *testing.T) { func TestUpdateStatus(t *testing.T) {
fk := buildStatusSync() fk := buildStatusSync()
newIPs := buildLoadBalancerIngressByIP() newIPs := buildLoadBalancerIngressByIP()
sort.Sort(loadBalancerIngressByIP(newIPs))
fk.updateStatus(newIPs) fk.updateStatus(newIPs)
fooIngress1, err1 := fk.Client.Extensions().Ingresses(apiv1.NamespaceDefault).Get("foo_ingress_1", metav1.GetOptions{}) fooIngress1, err1 := fk.Client.Extensions().Ingresses(apiv1.NamespaceDefault).Get("foo_ingress_1", metav1.GetOptions{})
@ -397,7 +398,7 @@ func TestUpdateStatus(t *testing.T) {
t.Fatalf("returned %v but expected %v", fooIngress2CurIPs, []apiv1.LoadBalancerIngress{}) t.Fatalf("returned %v but expected %v", fooIngress2CurIPs, []apiv1.LoadBalancerIngress{})
} }
} }
*/
func TestSliceToStatus(t *testing.T) { func TestSliceToStatus(t *testing.T) {
fkEndpoints := []string{ fkEndpoints := []string{
"10.0.0.1", "10.0.0.1",
@ -460,61 +461,3 @@ func TestIngressSliceEqual(t *testing.T) {
} }
} }
} }
func TestLoadBalancerIngressByIPLen(t *testing.T) {
fooTests := []struct {
ips loadBalancerIngressByIP
el int
}{
{[]apiv1.LoadBalancerIngress{}, 0},
{buildLoadBalancerIngressByIP(), 4},
{nil, 0},
}
for _, fooTest := range fooTests {
r := fooTest.ips.Len()
if r != fooTest.el {
t.Errorf("returned %v but expected %v ", r, fooTest.el)
}
}
}
func TestLoadBalancerIngressByIPSwap(t *testing.T) {
fooTests := []struct {
ips loadBalancerIngressByIP
i int
j int
}{
{buildLoadBalancerIngressByIP(), 0, 1},
{buildLoadBalancerIngressByIP(), 2, 1},
}
for _, fooTest := range fooTests {
fooi := fooTest.ips[fooTest.i]
fooj := fooTest.ips[fooTest.j]
fooTest.ips.Swap(fooTest.i, fooTest.j)
if fooi.IP != fooTest.ips[fooTest.j].IP ||
fooj.IP != fooTest.ips[fooTest.i].IP {
t.Errorf("failed to swap for loadBalancerIngressByIP")
}
}
}
func TestLoadBalancerIngressByIPLess(t *testing.T) {
fooTests := []struct {
ips loadBalancerIngressByIP
i int
j int
er bool
}{
{buildLoadBalancerIngressByIP(), 0, 1, true},
{buildLoadBalancerIngressByIP(), 2, 1, false},
}
for _, fooTest := range fooTests {
r := fooTest.ips.Less(fooTest.i, fooTest.j)
if r != fooTest.er {
t.Errorf("returned %v but expected %v ", r, fooTest.er)
}
}
}

View file

@ -19,7 +19,7 @@ package store
import ( import (
"fmt" "fmt"
api "k8s.io/api/core/v1" apiv1 "k8s.io/api/core/v1"
"k8s.io/client-go/tools/cache" "k8s.io/client-go/tools/cache"
) )
@ -29,20 +29,56 @@ type IngressLister struct {
} }
// SecretsLister makes a Store that lists Secrets. // SecretsLister makes a Store that lists Secrets.
type SecretsLister struct { type SecretLister struct {
cache.Store cache.Store
} }
// GetByName searches for a secret in the local secrets Store
func (sl *SecretLister) GetByName(name string) (*apiv1.Secret, error) {
s, exists, err := sl.GetByKey(name)
if err != nil {
return nil, err
}
if !exists {
return nil, fmt.Errorf("secret %v was not found", name)
}
return s.(*apiv1.Secret), nil
}
// ConfigMapLister makes a Store that lists Configmaps. // ConfigMapLister makes a Store that lists Configmaps.
type ConfigMapLister struct { type ConfigMapLister struct {
cache.Store cache.Store
} }
// GetByName searches for a configmap in the local configmaps Store
func (cml *ConfigMapLister) GetByName(name string) (*apiv1.ConfigMap, error) {
s, exists, err := cml.GetByKey(name)
if err != nil {
return nil, err
}
if !exists {
return nil, fmt.Errorf("configmap %v was not found", name)
}
return s.(*apiv1.ConfigMap), nil
}
// ServiceLister makes a Store that lists Services. // ServiceLister makes a Store that lists Services.
type ServiceLister struct { type ServiceLister struct {
cache.Store cache.Store
} }
// GetByName searches for a service in the local secrets Store
func (sl *ServiceLister) GetByName(name string) (*apiv1.Service, error) {
s, exists, err := sl.GetByKey(name)
if err != nil {
return nil, err
}
if !exists {
return nil, fmt.Errorf("service %v was not found", name)
}
return s.(*apiv1.Service), nil
}
// NodeLister makes a Store that lists Nodes. // NodeLister makes a Store that lists Nodes.
type NodeLister struct { type NodeLister struct {
cache.Store cache.Store
@ -54,9 +90,9 @@ type EndpointLister struct {
} }
// GetServiceEndpoints returns the endpoints of a service, matched on service name. // GetServiceEndpoints returns the endpoints of a service, matched on service name.
func (s *EndpointLister) GetServiceEndpoints(svc *api.Service) (ep api.Endpoints, err error) { func (s *EndpointLister) GetServiceEndpoints(svc *apiv1.Service) (ep apiv1.Endpoints, err error) {
for _, m := range s.Store.List() { for _, m := range s.Store.List() {
ep = *m.(*api.Endpoints) ep = *m.(*apiv1.Endpoints)
if svc.Name == ep.Name && svc.Namespace == ep.Namespace { if svc.Name == ep.Name && svc.Namespace == ep.Namespace {
return ep, nil return ep, nil
} }
@ -64,8 +100,3 @@ func (s *EndpointLister) GetServiceEndpoints(svc *api.Service) (ep api.Endpoints
err = fmt.Errorf("could not find endpoints for service: %v", svc.Name) err = fmt.Errorf("could not find endpoints for service: %v", svc.Name)
return return
} }
// SecretLister makes a Store that lists Secres.
type SecretLister struct {
cache.Store
}

View file

@ -81,7 +81,7 @@ type Controller interface {
SetConfig(*apiv1.ConfigMap) SetConfig(*apiv1.ConfigMap)
// SetListers allows the access of store listers present in the generic controller // SetListers allows the access of store listers present in the generic controller
// This avoid the use of the kubernetes client. // This avoid the use of the kubernetes client.
SetListers(StoreLister) SetListers(*StoreLister)
// BackendDefaults returns the minimum settings required to configure the // BackendDefaults returns the minimum settings required to configure the
// communication to endpoints // communication to endpoints
BackendDefaults() defaults.Backend BackendDefaults() defaults.Backend
@ -309,7 +309,7 @@ type Location struct {
// Proxy contains information about timeouts and buffer sizes // Proxy contains information about timeouts and buffer sizes
// to be used in connections against endpoints // to be used in connections against endpoints
// +optional // +optional
Proxy proxy.Configuration `json:"proxy,omitempty"` Proxy *proxy.Configuration `json:"proxy,omitempty"`
// UsePortInRedirects indicates if redirects must specify the port // UsePortInRedirects indicates if redirects must specify the port
// +optional // +optional
UsePortInRedirects bool `json:"usePortInRedirects"` UsePortInRedirects bool `json:"usePortInRedirects"`

View file

@ -70,15 +70,9 @@ func (c1 *Configuration) Equal(c2 *Configuration) bool {
return false return false
} }
for _, c1s := range c1.Servers { // Servers are sorted
found := false for idx, c1s := range c1.Servers {
for _, c2s := range c2.Servers { if !c1s.Equal(c2.Servers[idx]) {
if c1s.Equal(c2s) {
found = true
break
}
}
if !found {
return false return false
} }
} }
@ -306,15 +300,9 @@ func (s1 *Server) Equal(s2 *Server) bool {
return false return false
} }
for _, s1l := range s1.Locations { // Location are sorted
found := false for idx, s1l := range s1.Locations {
for _, sl2 := range s2.Locations { if !s1l.Equal(s2.Locations[idx]) {
if s1l.Equal(sl2) {
found = true
break
}
}
if !found {
return false return false
} }
} }
@ -382,7 +370,7 @@ func (l1 *Location) Equal(l2 *Location) bool {
if !(&l1.Whitelist).Equal(&l2.Whitelist) { if !(&l1.Whitelist).Equal(&l2.Whitelist) {
return false return false
} }
if !(&l1.Proxy).Equal(&l2.Proxy) { if !(l1.Proxy).Equal(l2.Proxy) {
return false return false
} }
if l1.UsePortInRedirects != l2.UsePortInRedirects { if l1.UsePortInRedirects != l2.UsePortInRedirects {

View file

@ -31,8 +31,10 @@ var (
keyFunc = cache.DeletionHandlingMetaNamespaceKeyFunc keyFunc = cache.DeletionHandlingMetaNamespaceKeyFunc
) )
// Queue manages a work queue through an independent worker that // Queue manages a time work queue through an independent worker that invokes the
// invokes the given sync function for every work item inserted. // given sync function for every work item inserted.
// The queue uses an internal timestamp that allows the removal of certain elements
// which timestamp is older than the last successful get operation.
type Queue struct { type Queue struct {
// queue is the work queue the worker polls // queue is the work queue the worker polls
queue workqueue.RateLimitingInterface queue workqueue.RateLimitingInterface
@ -42,6 +44,13 @@ type Queue struct {
workerDone chan bool workerDone chan bool
fn func(obj interface{}) (interface{}, error) fn func(obj interface{}) (interface{}, error)
lastSync int64
}
type element struct {
Key interface{}
Timestamp int64
} }
// Run ... // Run ...
@ -56,13 +65,17 @@ func (t *Queue) Enqueue(obj interface{}) {
return return
} }
ts := time.Now().UnixNano()
glog.V(3).Infof("queuing item %v", obj) glog.V(3).Infof("queuing item %v", obj)
key, err := t.fn(obj) key, err := t.fn(obj)
if err != nil { if err != nil {
glog.Errorf("%v", err) glog.Errorf("%v", err)
return return
} }
t.queue.Add(key) t.queue.Add(element{
Key: key,
Timestamp: ts,
})
} }
func (t *Queue) defaultKeyFunc(obj interface{}) (interface{}, error) { func (t *Queue) defaultKeyFunc(obj interface{}) (interface{}, error) {
@ -84,13 +97,26 @@ func (t *Queue) worker() {
} }
return return
} }
ts := time.Now().UnixNano()
glog.V(3).Infof("syncing %v", key) item := key.(element)
if t.lastSync > item.Timestamp {
glog.V(3).Infof("skipping %v sync (%v > %v)", item.Key, t.lastSync, item.Timestamp)
t.queue.Forget(key)
t.queue.Done(key)
continue
}
glog.V(3).Infof("syncing %v", item.Key)
if err := t.sync(key); err != nil { if err := t.sync(key); err != nil {
glog.Warningf("requeuing %v, err %v", key, err) glog.Warningf("requeuing %v, err %v", item.Key, err)
t.queue.AddRateLimited(key) t.queue.AddRateLimited(element{
Key: item.Key,
Timestamp: time.Now().UnixNano(),
})
} else { } else {
t.queue.Forget(key) t.queue.Forget(key)
t.lastSync = ts
} }
t.queue.Done(key) t.queue.Done(key)

View file

@ -131,3 +131,29 @@ func TestEnqueueKeyError(t *testing.T) {
// shutdown queue before exit // shutdown queue before exit
q.Shutdown() q.Shutdown()
} }
func TestSkipEnqueue(t *testing.T) {
// initialize result
atomic.StoreUint32(&sr, 0)
q := NewCustomTaskQueue(mockSynFn, mockKeyFn)
stopCh := make(chan struct{})
// run queue
go q.Run(time.Second, stopCh)
// mock object whichi will be enqueue
mo := mockEnqueueObj{
k: "testKey",
v: "testValue",
}
q.Enqueue(mo)
q.Enqueue(mo)
q.Enqueue(mo)
q.Enqueue(mo)
// wait for 'mockSynFn'
time.Sleep(time.Millisecond * 10)
if atomic.LoadUint32(&sr) != 1 {
t.Errorf("sr should be 1, but is %d", sr)
}
// shutdown queue before exit
q.Shutdown()
}

View file

@ -1,6 +1,6 @@
# Haproxy Ingress DaemonSet # Haproxy Ingress DaemonSet
In some cases, the Ingress controller will be required to be run at all the nodes in cluster. Using [DaemonSet](https://github.com/kubernetes/community/blob/master/contributors/design-proposals/daemon.md) can achieve this requirement. In some cases, the Ingress controller will be required to be run at all the nodes in cluster. Using [DaemonSet](https://github.com/kubernetes/community/blob/master/contributors/design-proposals/apps/daemon.md) can achieve this requirement.
## Prerequisites ## Prerequisites

View file

@ -1,6 +1,6 @@
# Nginx Ingress DaemonSet # Nginx Ingress DaemonSet
In some cases, the Ingress controller will be required to be run at all the nodes in cluster. Using [DaemonSet](https://github.com/kubernetes/community/blob/master/contributors/design-proposals/daemon.md) can achieve this requirement. In some cases, the Ingress controller will be required to be run at all the nodes in cluster. Using [DaemonSet](https://github.com/kubernetes/community/blob/master/contributors/design-proposals/apps/daemon.md) can achieve this requirement.
## Default Backend ## Default Backend

View file

@ -12,5 +12,10 @@ spec:
nodePort: 30080 nodePort: 30080
targetPort: 80 targetPort: 80
protocol: TCP protocol: TCP
- name: https
port: 443
nodePort: 30443
targetPort: 443
protocol: TCP
selector: selector:
k8s-app: nginx-ingress-lb k8s-app: nginx-ingress-lb

View file

@ -33,3 +33,5 @@ spec:
ports: ports:
- name: http - name: http
containerPort: 80 containerPort: 80
- name: https
containerPort: 443

View file

@ -13,7 +13,7 @@
# limitations under the License. # limitations under the License.
# 0.0.0 shouldn't clobber any released builds # 0.0.0 shouldn't clobber any released builds
TAG = 0.24 TAG = 0.25
REGISTRY = gcr.io/google_containers REGISTRY = gcr.io/google_containers
ARCH ?= $(shell go env GOARCH) ARCH ?= $(shell go env GOARCH)
ALL_ARCH = amd64 arm arm64 ppc64le ALL_ARCH = amd64 arm arm64 ppc64le

View file

@ -25,9 +25,15 @@ export STICKY_SESSIONS_VERSION=08a395c66e42
export MORE_HEADERS_VERSION=0.32 export MORE_HEADERS_VERSION=0.32
export NGINX_DIGEST_AUTH=7955af9c77598c697ac292811914ce1e2b3b824c export NGINX_DIGEST_AUTH=7955af9c77598c697ac292811914ce1e2b3b824c
export NGINX_SUBSTITUTIONS=bc58cb11844bc42735bbaef7085ea86ace46d05b export NGINX_SUBSTITUTIONS=bc58cb11844bc42735bbaef7085ea86ace46d05b
export NGINX_OPENTRACING=fcc2e822c6dfc7d1f432c16b07dee9437c24236a
export OPENTRACING_CPP=42cbb358b68e53145c5b479efa09a25dbc81a95a
export ZIPKIN_CPP=8eae512bd750b304764d96058c65229f9a7712a9
export BUILD_PATH=/tmp/build export BUILD_PATH=/tmp/build
export NGINX_OPENTRACING_VENDOR="ZIPKIN"
ARCH=$(uname -p) ARCH=$(uname -p)
get_src() get_src()
@ -65,8 +71,12 @@ apt-get update && apt-get install --no-install-recommends -y \
libaio1 \ libaio1 \
libaio-dev \ libaio-dev \
openssl \ openssl \
libperl-dev \
cmake \
libcurl4-openssl-dev \
linux-headers-generic || exit 1 linux-headers-generic || exit 1
# download, verify and extract the source files # download, verify and extract the source files
get_src 0e75b94429b3f745377aeba3aff97da77bf2b03fcb9ff15b3bad9b038db29f2e \ get_src 0e75b94429b3f745377aeba3aff97da77bf2b03fcb9ff15b3bad9b038db29f2e \
"http://nginx.org/download/nginx-$NGINX_VERSION.tar.gz" "http://nginx.org/download/nginx-$NGINX_VERSION.tar.gz"
@ -92,15 +102,45 @@ get_src 9b1d0075df787338bb607f14925886249bda60b6b3156713923d5d59e99a708b \
get_src 618551948ab14cac51d6e4ad00452312c7b09938f59ebff4f93875013be31f2d \ get_src 618551948ab14cac51d6e4ad00452312c7b09938f59ebff4f93875013be31f2d \
"https://github.com/yaoweibin/ngx_http_substitutions_filter_module/archive/$NGINX_SUBSTITUTIONS.tar.gz" "https://github.com/yaoweibin/ngx_http_substitutions_filter_module/archive/$NGINX_SUBSTITUTIONS.tar.gz"
get_src d48c83e81aeeaebbf894adc5557cb8d027fb336d2afe95b68b2aa75920a3be74 \
"https://github.com/rnburn/nginx-opentracing/archive/$NGINX_OPENTRACING.tar.gz"
get_src d1afc7c38bef055ac8a3759f117281b9d9287785e044a7d4e79134fa6ea99324 \
"https://github.com/opentracing/opentracing-cpp/archive/$OPENTRACING_CPP.tar.gz"
get_src c9961b503da1119eaeb15393bac804a303d49d86f701dbfd31c425d2214354d5 \
"https://github.com/rnburn/zipkin-cpp-opentracing/archive/$ZIPKIN_CPP.tar.gz"
#https://blog.cloudflare.com/optimizing-tls-over-tcp-to-reduce-latency/ #https://blog.cloudflare.com/optimizing-tls-over-tcp-to-reduce-latency/
curl -sSL -o nginx__dynamic_tls_records.patch https://raw.githubusercontent.com/cloudflare/sslconfig/master/patches/nginx__1.11.5_dynamic_tls_records.patch curl -sSL -o nginx__dynamic_tls_records.patch https://raw.githubusercontent.com/cloudflare/sslconfig/master/patches/nginx__1.11.5_dynamic_tls_records.patch
# http2 header compression
curl -sSL -o nginx_http2_hpack.patch https://raw.githubusercontent.com/cloudflare/sslconfig/master/patches/nginx_http2_hpack.patch
# build opentracing lib
cd "$BUILD_PATH/opentracing-cpp-$OPENTRACING_CPP"
mkdir .build
cd .build
cmake ..
make
make install
# build zipkin lib
cd "$BUILD_PATH/zipkin-cpp-opentracing-$ZIPKIN_CPP"
mkdir .build
cd .build
cmake -DBUILD_SHARED_LIBS=1 ..
make
make install
# build nginx # build nginx
cd "$BUILD_PATH/nginx-$NGINX_VERSION" cd "$BUILD_PATH/nginx-$NGINX_VERSION"
echo "Applying tls nginx patches..." echo "Applying tls nginx patches..."
patch -p1 < $BUILD_PATH/nginx__dynamic_tls_records.patch patch -p1 < $BUILD_PATH/nginx__dynamic_tls_records.patch
patch -p1 < $BUILD_PATH/nginx_http2_hpack.patch
WITH_FLAGS="--with-debug \ WITH_FLAGS="--with-debug \
--with-pcre-jit \ --with-pcre-jit \
@ -114,6 +154,7 @@ WITH_FLAGS="--with-debug \
--with-http_gzip_static_module \ --with-http_gzip_static_module \
--with-http_sub_module \ --with-http_sub_module \
--with-http_v2_module \ --with-http_v2_module \
--with-http_v2_hpack_enc \
--with-stream \ --with-stream \
--with-stream_ssl_module \ --with-stream_ssl_module \
--with-stream_ssl_preread_module \ --with-stream_ssl_preread_module \
@ -123,7 +164,8 @@ if [[ ${ARCH} != "armv7l" || ${ARCH} != "aarch64" ]]; then
WITH_FLAGS+=" --with-file-aio" WITH_FLAGS+=" --with-file-aio"
fi fi
CC_OPT='-O2 -g -pipe -Wall -Wp,-D_FORTIFY_SOURCE=2 -fexceptions -fstack-protector --param=ssp-buffer-size=4' CC_OPT='-g -O3 -flto -fPIE -fstack-protector-strong -Wformat -Werror=format-security -Wdate-time -D_FORTIFY_SOURCE=2 --param=ssp-buffer-size=4 -DTCP_FASTOPEN=23 -Wno-error=strict-aliasing'
LD_OPT='-Wl,-Bsymbolic-functions -fPIE -pie -Wl,-z,relro -Wl,-z,now'
if [[ ${ARCH} == "x86_64" ]]; then if [[ ${ARCH} == "x86_64" ]]; then
CC_OPT+=' -m64 -mtune=generic' CC_OPT+=' -m64 -mtune=generic'
@ -148,6 +190,7 @@ fi
--without-http_uwsgi_module \ --without-http_uwsgi_module \
--without-http_scgi_module \ --without-http_scgi_module \
--with-cc-opt="${CC_OPT}" \ --with-cc-opt="${CC_OPT}" \
--with-ld-opt="${LD_OPT}" \
--add-module="$BUILD_PATH/ngx_devel_kit-$NDK_VERSION" \ --add-module="$BUILD_PATH/ngx_devel_kit-$NDK_VERSION" \
--add-module="$BUILD_PATH/set-misc-nginx-module-$SETMISC_VERSION" \ --add-module="$BUILD_PATH/set-misc-nginx-module-$SETMISC_VERSION" \
--add-module="$BUILD_PATH/nginx-module-vts-$VTS_VERSION" \ --add-module="$BUILD_PATH/nginx-module-vts-$VTS_VERSION" \
@ -155,6 +198,7 @@ fi
--add-module="$BUILD_PATH/nginx-goodies-nginx-sticky-module-ng-$STICKY_SESSIONS_VERSION" \ --add-module="$BUILD_PATH/nginx-goodies-nginx-sticky-module-ng-$STICKY_SESSIONS_VERSION" \
--add-module="$BUILD_PATH/nginx-http-auth-digest-$NGINX_DIGEST_AUTH" \ --add-module="$BUILD_PATH/nginx-http-auth-digest-$NGINX_DIGEST_AUTH" \
--add-module="$BUILD_PATH/ngx_http_substitutions_filter_module-$NGINX_SUBSTITUTIONS" \ --add-module="$BUILD_PATH/ngx_http_substitutions_filter_module-$NGINX_SUBSTITUTIONS" \
--add-module="$BUILD_PATH/nginx-opentracing-$NGINX_OPENTRACING" \
&& make || exit 1 \ && make || exit 1 \
&& make install || exit 1 && make install || exit 1

View file

@ -199,198 +199,6 @@
} }
}], }],
"servers": [{ "servers": [{
"hostname": "domain.tld",
"sslPassthrough": false,
"sslCertificate": "",
"sslExpireTime": "0001-01-01T00:00:00Z",
"sslPemChecksum": "",
"locations": [{
"path": "/dashboard",
"isDefBackend": false,
"backend": "kube-system-kubernetes-dashboard-80",
"service": {
"metadata": {
"name": "kubernetes-dashboard",
"namespace": "kube-system",
"selfLink": "/api/v1/namespaces/kube-system/services/kubernetes-dashboard",
"uid": "b957713f-5176-11e7-b3db-080027494b5d",
"resourceVersion": "82",
"creationTimestamp": "2017-06-15T03:00:01Z",
"labels": {
"addonmanager.kubernetes.io/mode": "Reconcile",
"app": "kubernetes-dashboard",
"kubernetes.io/minikube-addons": "dashboard",
"kubernetes.io/minikube-addons-endpoint": "dashboard"
},
"annotations": {
"kubectl.kubernetes.io/last-applied-configuration": "{\"apiVersion\":\"v1\",\"kind\":\"Service\",\"metadata\":{\"annotations\":{},\"labels\":{\"addonmanager.kubernetes.io/mode\":\"Reconcile\",\"app\":\"kubernetes-dashboard\",\"kubernetes.io/minikube-addons\":\"dashboard\",\"kubernetes.io/minikube-addons-endpoint\":\"dashboard\"},\"name\":\"kubernetes-dashboard\",\"namespace\":\"kube-system\"},\"spec\":{\"ports\":[{\"nodePort\":30000,\"port\":80,\"targetPort\":9090}],\"selector\":{\"app\":\"kubernetes-dashboard\"},\"type\":\"NodePort\"}}\n"
}
},
"spec": {
"ports": [{
"protocol": "TCP",
"port": 80,
"targetPort": 9090,
"nodePort": 30000
}],
"selector": {
"app": "kubernetes-dashboard"
},
"clusterIP": "10.0.0.120",
"type": "NodePort",
"sessionAffinity": "None"
},
"status": {
"loadBalancer": {}
}
},
"port": 80,
"basicDigestAuth": {
"type": "",
"realm": "",
"file": "",
"secured": false,
"fileSha": ""
},
"externalAuth": {
"url": "",
"host": "",
"signinUrl": "",
"method": "",
"sendBody": false,
"responseHeaders": null
},
"rateLimit": {
"connections": {
"name": "",
"limit": 0,
"burst": 0,
"sharedSize": 0
},
"rps": {
"name": "",
"limit": 0,
"burst": 0,
"sharedSize": 0
},
"rpm": {
"name": "",
"limit": 0,
"burst": 0,
"sharedSize": 0
}
},
"redirect": {
"target": "/",
"addBaseUrl": false,
"sslRedirect": true,
"forceSSLRedirect": false,
"appRoot": ""
},
"whitelist": {
"cidr": null
},
"proxy": {
"bodySize": "1m",
"conectTimeout": 5,
"sendTimeout": 60,
"readTimeout": 60,
"bufferSize": "4k",
"cookieDomain": "off",
"cookiePath": "off",
"nextUpstream": "error timeout invalid_header http_502 http_503 http_504"
},
"certificateAuth": {
"authSSLCert": {
"secret": "",
"caFilename": "",
"pemSha": ""
},
"validationDepth": 0
},
"use-port-in-redirects": false,
"configuration-snippet": ""
}, {
"path": "/",
"isDefBackend": true,
"backend": "upstream-default-backend",
"service": {
"metadata": {
"creationTimestamp": null
},
"spec": {},
"status": {
"loadBalancer": {}
}
},
"port": 0,
"basicDigestAuth": {
"type": "",
"realm": "",
"file": "",
"secured": false,
"fileSha": ""
},
"externalAuth": {
"url": "",
"host": "",
"signinUrl": "",
"method": "",
"sendBody": false,
"responseHeaders": null
},
"rateLimit": {
"connections": {
"name": "",
"limit": 0,
"burst": 0,
"sharedSize": 0
},
"rps": {
"name": "",
"limit": 0,
"burst": 0,
"sharedSize": 0
},
"rpm": {
"name": "",
"limit": 0,
"burst": 0,
"sharedSize": 0
}
},
"redirect": {
"target": "",
"addBaseUrl": false,
"sslRedirect": false,
"forceSSLRedirect": false,
"appRoot": ""
},
"whitelist": {
"cidr": null
},
"proxy": {
"bodySize": "1m",
"conectTimeout": 5,
"sendTimeout": 60,
"readTimeout": 60,
"bufferSize": "4k",
"cookieDomain": "off",
"cookiePath": "off",
"nextUpstream": "error timeout invalid_header http_502 http_503 http_504"
},
"certificateAuth": {
"authSSLCert": {
"secret": "",
"caFilename": "",
"pemSha": ""
},
"validationDepth": 0
},
"use-port-in-redirects": false,
"configuration-snippet": ""
}]
},{
"hostname": "_", "hostname": "_",
"sslPassthrough": false, "sslPassthrough": false,
"sslCertificate": "/ingress-controller/ssl/default-fake-certificate.pem", "sslCertificate": "/ingress-controller/ssl/default-fake-certificate.pem",
@ -683,6 +491,198 @@
"use-port-in-redirects": false, "use-port-in-redirects": false,
"configuration-snippet": "" "configuration-snippet": ""
}] }]
}, {
"hostname": "domain.tld",
"sslPassthrough": false,
"sslCertificate": "",
"sslExpireTime": "0001-01-01T00:00:00Z",
"sslPemChecksum": "",
"locations": [{
"path": "/dashboard",
"isDefBackend": false,
"backend": "kube-system-kubernetes-dashboard-80",
"service": {
"metadata": {
"name": "kubernetes-dashboard",
"namespace": "kube-system",
"selfLink": "/api/v1/namespaces/kube-system/services/kubernetes-dashboard",
"uid": "b957713f-5176-11e7-b3db-080027494b5d",
"resourceVersion": "82",
"creationTimestamp": "2017-06-15T03:00:01Z",
"labels": {
"addonmanager.kubernetes.io/mode": "Reconcile",
"app": "kubernetes-dashboard",
"kubernetes.io/minikube-addons": "dashboard",
"kubernetes.io/minikube-addons-endpoint": "dashboard"
},
"annotations": {
"kubectl.kubernetes.io/last-applied-configuration": "{\"apiVersion\":\"v1\",\"kind\":\"Service\",\"metadata\":{\"annotations\":{},\"labels\":{\"addonmanager.kubernetes.io/mode\":\"Reconcile\",\"app\":\"kubernetes-dashboard\",\"kubernetes.io/minikube-addons\":\"dashboard\",\"kubernetes.io/minikube-addons-endpoint\":\"dashboard\"},\"name\":\"kubernetes-dashboard\",\"namespace\":\"kube-system\"},\"spec\":{\"ports\":[{\"nodePort\":30000,\"port\":80,\"targetPort\":9090}],\"selector\":{\"app\":\"kubernetes-dashboard\"},\"type\":\"NodePort\"}}\n"
}
},
"spec": {
"ports": [{
"protocol": "TCP",
"port": 80,
"targetPort": 9090,
"nodePort": 30000
}],
"selector": {
"app": "kubernetes-dashboard"
},
"clusterIP": "10.0.0.120",
"type": "NodePort",
"sessionAffinity": "None"
},
"status": {
"loadBalancer": {}
}
},
"port": 80,
"basicDigestAuth": {
"type": "",
"realm": "",
"file": "",
"secured": false,
"fileSha": ""
},
"externalAuth": {
"url": "",
"host": "",
"signinUrl": "",
"method": "",
"sendBody": false,
"responseHeaders": null
},
"rateLimit": {
"connections": {
"name": "",
"limit": 0,
"burst": 0,
"sharedSize": 0
},
"rps": {
"name": "",
"limit": 0,
"burst": 0,
"sharedSize": 0
},
"rpm": {
"name": "",
"limit": 0,
"burst": 0,
"sharedSize": 0
}
},
"redirect": {
"target": "/",
"addBaseUrl": false,
"sslRedirect": true,
"forceSSLRedirect": false,
"appRoot": ""
},
"whitelist": {
"cidr": null
},
"proxy": {
"bodySize": "1m",
"conectTimeout": 5,
"sendTimeout": 60,
"readTimeout": 60,
"bufferSize": "4k",
"cookieDomain": "off",
"cookiePath": "off",
"nextUpstream": "error timeout invalid_header http_502 http_503 http_504"
},
"certificateAuth": {
"authSSLCert": {
"secret": "",
"caFilename": "",
"pemSha": ""
},
"validationDepth": 0
},
"use-port-in-redirects": false,
"configuration-snippet": ""
}, {
"path": "/",
"isDefBackend": true,
"backend": "upstream-default-backend",
"service": {
"metadata": {
"creationTimestamp": null
},
"spec": {},
"status": {
"loadBalancer": {}
}
},
"port": 0,
"basicDigestAuth": {
"type": "",
"realm": "",
"file": "",
"secured": false,
"fileSha": ""
},
"externalAuth": {
"url": "",
"host": "",
"signinUrl": "",
"method": "",
"sendBody": false,
"responseHeaders": null
},
"proxy": {
"bodySize": "1m",
"conectTimeout": 5,
"sendTimeout": 60,
"readTimeout": 60,
"bufferSize": "4k",
"cookieDomain": "off",
"cookiePath": "off",
"nextUpstream": "error timeout invalid_header http_502 http_503 http_504"
},
"certificateAuth": {
"authSSLCert": {
"secret": "",
"caFilename": "",
"pemSha": ""
},
"validationDepth": 0
},
"use-port-in-redirects": false,
"configuration-snippet": "",
"rateLimit": {
"connections": {
"name": "",
"limit": 0,
"burst": 0,
"sharedSize": 0
},
"rps": {
"name": "",
"limit": 0,
"burst": 0,
"sharedSize": 0
},
"rpm": {
"name": "",
"limit": 0,
"burst": 0,
"sharedSize": 0
}
},
"redirect": {
"target": "",
"addBaseUrl": false,
"sslRedirect": false,
"forceSSLRedirect": false,
"appRoot": ""
},
"whitelist": {
"cidr": null
}
}]
}], }],
"TCPBackends": [], "TCPBackends": [],
"UDPBackends": [] "UDPBackends": []

27
vendor/gopkg.in/go-playground/pool.v3/.gitignore generated vendored Normal file
View file

@ -0,0 +1,27 @@
# Compiled Object files, Static and Dynamic libs (Shared Objects)
*.o
*.a
*.so
# Folders
_obj
_test
# Architecture specific extensions/prefixes
*.[568vq]
[568vq].out
*.cgo1.go
*.cgo2.c
_cgo_defun.c
_cgo_gotypes.go
_cgo_export.*
_testmain.go
*.exe
*.test
*.prof
pool
old.txt
new.txt

22
vendor/gopkg.in/go-playground/pool.v3/LICENSE generated vendored Normal file
View file

@ -0,0 +1,22 @@
The MIT License (MIT)
Copyright (c) 2015 Dean Karn
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.

276
vendor/gopkg.in/go-playground/pool.v3/README.md generated vendored Normal file
View file

@ -0,0 +1,276 @@
Package pool
============
![Project status](https://img.shields.io/badge/version-3.1.1-green.svg)
[![Build Status](https://semaphoreci.com/api/v1/joeybloggs/pool/branches/v3/badge.svg)](https://semaphoreci.com/joeybloggs/pool)
[![Coverage Status](https://coveralls.io/repos/go-playground/pool/badge.svg?branch=v3&service=github)](https://coveralls.io/github/go-playground/pool?branch=v3)
[![Go Report Card](https://goreportcard.com/badge/gopkg.in/go-playground/pool.v3)](https://goreportcard.com/report/gopkg.in/go-playground/pool.v3)
[![GoDoc](https://godoc.org/gopkg.in/go-playground/pool.v3?status.svg)](https://godoc.org/gopkg.in/go-playground/pool.v3)
![License](https://img.shields.io/dub/l/vibe-d.svg)
Package pool implements a limited consumer goroutine or unlimited goroutine pool for easier goroutine handling and cancellation.
Features:
- Dead simple to use and makes no assumptions about how you will use it.
- Automatic recovery from consumer goroutines which returns an error to the results
Pool v2 advantages over Pool v1:
- Up to 300% faster due to lower contention ( BenchmarkSmallRun used to take 3 seconds, now 1 second )
- Cancels are much faster
- Easier to use, no longer need to know the # of Work Units to be processed.
- Pool can now be used as a long running/globally defined pool if desired ( v1 Pool was only good for one run )
- Supports single units of work as well as batching
- Pool can easily be reset after a Close() or Cancel() for reuse.
- Multiple Batches can be run and even cancelled on the same Pool.
- Supports individual Work Unit cancellation.
Pool v3 advantages over Pool v2:
- Objects are not interfaces allowing for less breaking changes going forward.
- Now there are 2 Pool types, both completely interchangeable, a limited worker pool and unlimited pool.
- Simpler usage of Work Units, instead of `<-work.Done` now can do `work.Wait()`
Installation
------------
Use go get.
go get gopkg.in/go-playground/pool.v3
Then import the pool package into your own code.
import "gopkg.in/go-playground/pool.v3"
Important Information READ THIS!
------
- It is recommended that you cancel a pool or batch from the calling function and not inside of the Unit of Work, it will work fine, however because of the goroutine scheduler and context switching it may not cancel as soon as if called from outside.
- When Batching DO NOT FORGET TO CALL batch.QueueComplete(), if you do the Batch WILL deadlock
- It is your responsibility to call WorkUnit.IsCancelled() to check if it's cancelled after a blocking operation like waiting for a connection from a pool. (optional)
Usage and documentation
------
Please see http://godoc.org/gopkg.in/go-playground/pool.v3 for detailed usage docs.
##### Examples:
both Limited Pool and Unlimited Pool have the same signatures and are completely interchangeable.
Per Unit Work
```go
package main
import (
"fmt"
"time"
"gopkg.in/go-playground/pool.v3"
)
func main() {
p := pool.NewLimited(10)
defer p.Close()
user := p.Queue(getUser(13))
other := p.Queue(getOtherInfo(13))
user.Wait()
if err := user.Error(); err != nil {
// handle error
}
// do stuff with user
username := user.Value().(string)
fmt.Println(username)
other.Wait()
if err := other.Error(); err != nil {
// handle error
}
// do stuff with other
otherInfo := other.Value().(string)
fmt.Println(otherInfo)
}
func getUser(id int) pool.WorkFunc {
return func(wu pool.WorkUnit) (interface{}, error) {
// simulate waiting for something, like TCP connection to be established
// or connection from pool grabbed
time.Sleep(time.Second * 1)
if wu.IsCancelled() {
// return values not used
return nil, nil
}
// ready for processing...
return "Joeybloggs", nil
}
}
func getOtherInfo(id int) pool.WorkFunc {
return func(wu pool.WorkUnit) (interface{}, error) {
// simulate waiting for something, like TCP connection to be established
// or connection from pool grabbed
time.Sleep(time.Second * 1)
if wu.IsCancelled() {
// return values not used
return nil, nil
}
// ready for processing...
return "Other Info", nil
}
}
```
Batch Work
```go
package main
import (
"fmt"
"time"
"gopkg.in/go-playground/pool.v3"
)
func main() {
p := pool.NewLimited(10)
defer p.Close()
batch := p.Batch()
// for max speed Queue in another goroutine
// but it is not required, just can't start reading results
// until all items are Queued.
go func() {
for i := 0; i < 10; i++ {
batch.Queue(sendEmail("email content"))
}
// DO NOT FORGET THIS OR GOROUTINES WILL DEADLOCK
// if calling Cancel() it calles QueueComplete() internally
batch.QueueComplete()
}()
for email := range batch.Results() {
if err := email.Error(); err != nil {
// handle error
// maybe call batch.Cancel()
}
// use return value
fmt.Println(email.Value().(bool))
}
}
func sendEmail(email string) pool.WorkFunc {
return func(wu pool.WorkUnit) (interface{}, error) {
// simulate waiting for something, like TCP connection to be established
// or connection from pool grabbed
time.Sleep(time.Second * 1)
if wu.IsCancelled() {
// return values not used
return nil, nil
}
// ready for processing...
return true, nil // everything ok, send nil, error if not
}
}
```
Benchmarks
------
###### Run on MacBook Pro (Retina, 15-inch, Late 2013) 2.6 GHz Intel Core i7 16 GB 1600 MHz DDR3 using Go 1.6.2
run with 1, 2, 4,8 and 16 cpu to show it scales well...16 is double the # of logical cores on this machine.
NOTE: Cancellation times CAN vary depending how busy your system is and how the goroutine scheduler is but
worse case I've seen is 1s to cancel instead of 0ns
```go
go test -cpu=1,2,4,8,16 -bench=. -benchmem=true
PASS
BenchmarkLimitedSmallRun 1 1002492008 ns/op 3552 B/op 55 allocs/op
BenchmarkLimitedSmallRun-2 1 1002347196 ns/op 3568 B/op 55 allocs/op
BenchmarkLimitedSmallRun-4 1 1010533571 ns/op 4720 B/op 73 allocs/op
BenchmarkLimitedSmallRun-8 1 1008883324 ns/op 4080 B/op 63 allocs/op
BenchmarkLimitedSmallRun-16 1 1002317677 ns/op 3632 B/op 56 allocs/op
BenchmarkLimitedSmallCancel 2000000000 0.00 ns/op 0 B/op 0 allocs/op
BenchmarkLimitedSmallCancel-2 2000000000 0.00 ns/op 0 B/op 0 allocs/op
BenchmarkLimitedSmallCancel-4 2000000000 0.00 ns/op 0 B/op 0 allocs/op
BenchmarkLimitedSmallCancel-8 2000000000 0.00 ns/op 0 B/op 0 allocs/op
BenchmarkLimitedSmallCancel-16 2000000000 0.00 ns/op 0 B/op 0 allocs/op
BenchmarkLimitedLargeCancel 2000000000 0.00 ns/op 0 B/op 0 allocs/op
BenchmarkLimitedLargeCancel-2 2000000000 0.00 ns/op 0 B/op 0 allocs/op
BenchmarkLimitedLargeCancel-4 2000000000 0.00 ns/op 0 B/op 0 allocs/op
BenchmarkLimitedLargeCancel-8 1000000 1006 ns/op 0 B/op 0 allocs/op
BenchmarkLimitedLargeCancel-16 2000000000 0.00 ns/op 0 B/op 0 allocs/op
BenchmarkLimitedOverconsumeLargeRun 1 4027153081 ns/op 36176 B/op 572 allocs/op
BenchmarkLimitedOverconsumeLargeRun-2 1 4003489261 ns/op 32336 B/op 512 allocs/op
BenchmarkLimitedOverconsumeLargeRun-4 1 4005579847 ns/op 34128 B/op 540 allocs/op
BenchmarkLimitedOverconsumeLargeRun-8 1 4004639857 ns/op 34992 B/op 553 allocs/op
BenchmarkLimitedOverconsumeLargeRun-16 1 4022695297 ns/op 36864 B/op 532 allocs/op
BenchmarkLimitedBatchSmallRun 1 1000785511 ns/op 6336 B/op 94 allocs/op
BenchmarkLimitedBatchSmallRun-2 1 1001459945 ns/op 4480 B/op 65 allocs/op
BenchmarkLimitedBatchSmallRun-4 1 1002475371 ns/op 6672 B/op 99 allocs/op
BenchmarkLimitedBatchSmallRun-8 1 1002498902 ns/op 4624 B/op 67 allocs/op
BenchmarkLimitedBatchSmallRun-16 1 1002202273 ns/op 5344 B/op 78 allocs/op
BenchmarkUnlimitedSmallRun 1 1002361538 ns/op 3696 B/op 59 allocs/op
BenchmarkUnlimitedSmallRun-2 1 1002230293 ns/op 3776 B/op 60 allocs/op
BenchmarkUnlimitedSmallRun-4 1 1002148953 ns/op 3776 B/op 60 allocs/op
BenchmarkUnlimitedSmallRun-8 1 1002120679 ns/op 3584 B/op 57 allocs/op
BenchmarkUnlimitedSmallRun-16 1 1001698519 ns/op 3968 B/op 63 allocs/op
BenchmarkUnlimitedSmallCancel 2000000000 0.00 ns/op 0 B/op 0 allocs/op
BenchmarkUnlimitedSmallCancel-2 2000000000 0.00 ns/op 0 B/op 0 allocs/op
BenchmarkUnlimitedSmallCancel-4 2000000000 0.00 ns/op 0 B/op 0 allocs/op
BenchmarkUnlimitedSmallCancel-8 2000000000 0.00 ns/op 0 B/op 0 allocs/op
BenchmarkUnlimitedSmallCancel-16 2000000000 0.00 ns/op 0 B/op 0 allocs/op
BenchmarkUnlimitedLargeCancel 2000000000 0.00 ns/op 0 B/op 0 allocs/op
BenchmarkUnlimitedLargeCancel-2 2000000000 0.00 ns/op 0 B/op 0 allocs/op
BenchmarkUnlimitedLargeCancel-4 2000000000 0.00 ns/op 0 B/op 0 allocs/op
BenchmarkUnlimitedLargeCancel-8 2000000000 0.00 ns/op 0 B/op 0 allocs/op
BenchmarkUnlimitedLargeCancel-16 2000000000 0.00 ns/op 0 B/op 0 allocs/op
BenchmarkUnlimitedLargeRun 1 1001631711 ns/op 40352 B/op 603 allocs/op
BenchmarkUnlimitedLargeRun-2 1 1002603908 ns/op 38304 B/op 586 allocs/op
BenchmarkUnlimitedLargeRun-4 1 1001452975 ns/op 38192 B/op 584 allocs/op
BenchmarkUnlimitedLargeRun-8 1 1005382882 ns/op 35200 B/op 537 allocs/op
BenchmarkUnlimitedLargeRun-16 1 1001818482 ns/op 37056 B/op 566 allocs/op
BenchmarkUnlimitedBatchSmallRun 1 1002391247 ns/op 4240 B/op 63 allocs/op
BenchmarkUnlimitedBatchSmallRun-2 1 1010313222 ns/op 4688 B/op 70 allocs/op
BenchmarkUnlimitedBatchSmallRun-4 1 1008364651 ns/op 4304 B/op 64 allocs/op
BenchmarkUnlimitedBatchSmallRun-8 1 1001858192 ns/op 4448 B/op 66 allocs/op
BenchmarkUnlimitedBatchSmallRun-16 1 1001228000 ns/op 4320 B/op 64 allocs/op
```
To put some of these benchmarks in perspective:
- BenchmarkLimitedSmallRun did 10 seconds worth of processing in 1.002492008s
- BenchmarkLimitedSmallCancel ran 20 jobs, cancelled on job 6 and and ran in 0s
- BenchmarkLimitedLargeCancel ran 1000 jobs, cancelled on job 6 and and ran in 0s
- BenchmarkLimitedOverconsumeLargeRun ran 100 jobs using 25 workers in 4.027153081s
License
------
Distributed under MIT License, please see license file in code for more details.

131
vendor/gopkg.in/go-playground/pool.v3/batch.go generated vendored Normal file
View file

@ -0,0 +1,131 @@
package pool
import "sync"
// Batch contains all information for a batch run of WorkUnits
type Batch interface {
// Queue queues the work to be run in the pool and starts processing immediately
// and also retains a reference for Cancellation and outputting to results.
// WARNING be sure to call QueueComplete() once all work has been Queued.
Queue(fn WorkFunc)
// QueueComplete lets the batch know that there will be no more Work Units Queued
// so that it may close the results channels once all work is completed.
// WARNING: if this function is not called the results channel will never exhaust,
// but block forever listening for more results.
QueueComplete()
// Cancel cancels the Work Units belonging to this Batch
Cancel()
// Results returns a Work Unit result channel that will output all
// completed units of work.
Results() <-chan WorkUnit
// WaitAll is an alternative to Results() where you
// may want/need to wait until all work has been
// processed, but don't need to check results.
// eg. individual units of work may handle their own
// errors, logging...
WaitAll()
}
// batch contains all information for a batch run of WorkUnits
type batch struct {
pool Pool
m sync.Mutex
units []WorkUnit
results chan WorkUnit
done chan struct{}
closed bool
wg *sync.WaitGroup
}
func newBatch(p Pool) Batch {
return &batch{
pool: p,
units: make([]WorkUnit, 0, 4), // capacity it to 4 so it doesn't grow and allocate too many times.
results: make(chan WorkUnit),
done: make(chan struct{}),
wg: new(sync.WaitGroup),
}
}
// Queue queues the work to be run in the pool and starts processing immediately
// and also retains a reference for Cancellation and outputting to results.
// WARNING be sure to call QueueComplete() once all work has been Queued.
func (b *batch) Queue(fn WorkFunc) {
b.m.Lock()
if b.closed {
b.m.Unlock()
return
}
wu := b.pool.Queue(fn)
b.units = append(b.units, wu) // keeping a reference for cancellation purposes
b.wg.Add(1)
b.m.Unlock()
go func(b *batch, wu WorkUnit) {
wu.Wait()
b.results <- wu
b.wg.Done()
}(b, wu)
}
// QueueComplete lets the batch know that there will be no more Work Units Queued
// so that it may close the results channels once all work is completed.
// WARNING: if this function is not called the results channel will never exhaust,
// but block forever listening for more results.
func (b *batch) QueueComplete() {
b.m.Lock()
b.closed = true
close(b.done)
b.m.Unlock()
}
// Cancel cancels the Work Units belonging to this Batch
func (b *batch) Cancel() {
b.QueueComplete() // no more to be added
b.m.Lock()
// go in reverse order to try and cancel as many as possbile
// one at end are less likely to have run than those at the beginning
for i := len(b.units) - 1; i >= 0; i-- {
b.units[i].Cancel()
}
b.m.Unlock()
}
// Results returns a Work Unit result channel that will output all
// completed units of work.
func (b *batch) Results() <-chan WorkUnit {
go func(b *batch) {
<-b.done
b.m.Lock()
b.wg.Wait()
b.m.Unlock()
close(b.results)
}(b)
return b.results
}
// WaitAll is an alternative to Results() where you
// may want/need to wait until all work has been
// processed, but don't need to check results.
// eg. individual units of work may handle their own
// errors and logging...
func (b *batch) WaitAll() {
for range b.Results() {
}
}

261
vendor/gopkg.in/go-playground/pool.v3/doc.go generated vendored Normal file
View file

@ -0,0 +1,261 @@
/*
Package pool implements a limited consumer goroutine or unlimited goroutine pool for easier goroutine handling and cancellation.
Features:
- Dead simple to use and makes no assumptions about how you will use it.
- Automatic recovery from consumer goroutines which returns an error to
the results
Pool v2 advantages over Pool v1:
- Up to 300% faster due to lower contention,
BenchmarkSmallRun used to take 3 seconds, now 1 second
- Cancels are much faster
- Easier to use, no longer need to know the # of Work Units to be processed.
- Pool can now be used as a long running/globally defined pool if desired,
v1 Pool was only good for one run
- Supports single units of work as well as batching
- Pool can easily be reset after a Close() or Cancel() for reuse.
- Multiple Batches can be run and even cancelled on the same Pool.
- Supports individual Work Unit cancellation.
Pool v3 advantages over Pool v2:
- Objects are not interfaces allowing for less breaking changes going forward.
- Now there are 2 Pool types, both completely interchangeable, a limited worker pool
and unlimited pool.
- Simpler usage of Work Units, instead of `<-work.Done` now can do `work.Wait()`
Important Information READ THIS!
important usage information
- It is recommended that you cancel a pool or batch from the calling
function and not inside of the Unit of Work, it will work fine, however
because of the goroutine scheduler and context switching it may not
cancel as soon as if called from outside.
- When Batching DO NOT FORGET TO CALL batch.QueueComplete(),
if you do the Batch WILL deadlock
- It is your responsibility to call WorkUnit.IsCancelled() to check if it's cancelled
after a blocking operation like waiting for a connection from a pool. (optional)
Usage and documentation
both Limited Pool and Unlimited Pool have the same signatures and are completely interchangeable.
Per Unit Work
package main
import (
"fmt"
"time"
"gopkg.in/go-playground/pool.v3"
)
func main() {
p := pool.NewLimited(10)
defer p.Close()
user := p.Queue(getUser(13))
other := p.Queue(getOtherInfo(13))
user.Wait()
if err := user.Error(); err != nil {
// handle error
}
// do stuff with user
username := user.Value().(string)
fmt.Println(username)
other.Wait()
if err := other.Error(); err != nil {
// handle error
}
// do stuff with other
otherInfo := other.Value().(string)
fmt.Println(otherInfo)
}
func getUser(id int) pool.WorkFunc {
return func(wu pool.WorkUnit) (interface{}, error) {
// simulate waiting for something, like TCP connection to be established
// or connection from pool grabbed
time.Sleep(time.Second * 1)
if wu.IsCancelled() {
// return values not used
return nil, nil
}
// ready for processing...
return "Joeybloggs", nil
}
}
func getOtherInfo(id int) pool.WorkFunc {
return func(wu pool.WorkUnit) (interface{}, error) {
// simulate waiting for something, like TCP connection to be established
// or connection from pool grabbed
time.Sleep(time.Second * 1)
if wu.IsCancelled() {
// return values not used
return nil, nil
}
// ready for processing...
return "Other Info", nil
}
}
Batch Work
package main
import (
"fmt"
"time"
"gopkg.in/go-playground/pool.v3"
)
func main() {
p := pool.NewLimited(10)
defer p.Close()
batch := p.Batch()
// for max speed Queue in another goroutine
// but it is not required, just can't start reading results
// until all items are Queued.
go func() {
for i := 0; i < 10; i++ {
batch.Queue(sendEmail("email content"))
}
// DO NOT FORGET THIS OR GOROUTINES WILL DEADLOCK
// if calling Cancel() it calles QueueComplete() internally
batch.QueueComplete()
}()
for email := range batch.Results() {
if err := email.Error(); err != nil {
// handle error
// maybe call batch.Cancel()
}
// use return value
fmt.Println(email.Value().(bool))
}
}
func sendEmail(email string) pool.WorkFunc {
return func(wu pool.WorkUnit) (interface{}, error) {
// simulate waiting for something, like TCP connection to be established
// or connection from pool grabbed
time.Sleep(time.Second * 1)
if wu.IsCancelled() {
// return values not used
return nil, nil
}
// ready for processing...
return true, nil // everything ok, send nil, error if not
}
}
Benchmarks
Run on MacBook Pro (Retina, 15-inch, Late 2013) 2.6 GHz Intel Core i7 16 GB 1600 MHz DDR3 using Go 1.6.2
run with 1, 2, 4,8 and 16 cpu to show it scales well...16 is double the # of logical cores on this machine.
NOTE: Cancellation times CAN vary depending how busy your system is and how the goroutine scheduler is but
worse case I've seen is 1 second to cancel instead of 0ns
go test -cpu=1,2,4,8,16 -bench=. -benchmem=true
PASS
BenchmarkLimitedSmallRun 1 1002492008 ns/op 3552 B/op 55 allocs/op
BenchmarkLimitedSmallRun-2 1 1002347196 ns/op 3568 B/op 55 allocs/op
BenchmarkLimitedSmallRun-4 1 1010533571 ns/op 4720 B/op 73 allocs/op
BenchmarkLimitedSmallRun-8 1 1008883324 ns/op 4080 B/op 63 allocs/op
BenchmarkLimitedSmallRun-16 1 1002317677 ns/op 3632 B/op 56 allocs/op
BenchmarkLimitedSmallCancel 2000000000 0.00 ns/op 0 B/op 0 allocs/op
BenchmarkLimitedSmallCancel-2 2000000000 0.00 ns/op 0 B/op 0 allocs/op
BenchmarkLimitedSmallCancel-4 2000000000 0.00 ns/op 0 B/op 0 allocs/op
BenchmarkLimitedSmallCancel-8 2000000000 0.00 ns/op 0 B/op 0 allocs/op
BenchmarkLimitedSmallCancel-16 2000000000 0.00 ns/op 0 B/op 0 allocs/op
BenchmarkLimitedLargeCancel 2000000000 0.00 ns/op 0 B/op 0 allocs/op
BenchmarkLimitedLargeCancel-2 2000000000 0.00 ns/op 0 B/op 0 allocs/op
BenchmarkLimitedLargeCancel-4 2000000000 0.00 ns/op 0 B/op 0 allocs/op
BenchmarkLimitedLargeCancel-8 1000000 1006 ns/op 0 B/op 0 allocs/op
BenchmarkLimitedLargeCancel-16 2000000000 0.00 ns/op 0 B/op 0 allocs/op
BenchmarkLimitedOverconsumeLargeRun 1 4027153081 ns/op 36176 B/op 572 allocs/op
BenchmarkLimitedOverconsumeLargeRun-2 1 4003489261 ns/op 32336 B/op 512 allocs/op
BenchmarkLimitedOverconsumeLargeRun-4 1 4005579847 ns/op 34128 B/op 540 allocs/op
BenchmarkLimitedOverconsumeLargeRun-8 1 4004639857 ns/op 34992 B/op 553 allocs/op
BenchmarkLimitedOverconsumeLargeRun-16 1 4022695297 ns/op 36864 B/op 532 allocs/op
BenchmarkLimitedBatchSmallRun 1 1000785511 ns/op 6336 B/op 94 allocs/op
BenchmarkLimitedBatchSmallRun-2 1 1001459945 ns/op 4480 B/op 65 allocs/op
BenchmarkLimitedBatchSmallRun-4 1 1002475371 ns/op 6672 B/op 99 allocs/op
BenchmarkLimitedBatchSmallRun-8 1 1002498902 ns/op 4624 B/op 67 allocs/op
BenchmarkLimitedBatchSmallRun-16 1 1002202273 ns/op 5344 B/op 78 allocs/op
BenchmarkUnlimitedSmallRun 1 1002361538 ns/op 3696 B/op 59 allocs/op
BenchmarkUnlimitedSmallRun-2 1 1002230293 ns/op 3776 B/op 60 allocs/op
BenchmarkUnlimitedSmallRun-4 1 1002148953 ns/op 3776 B/op 60 allocs/op
BenchmarkUnlimitedSmallRun-8 1 1002120679 ns/op 3584 B/op 57 allocs/op
BenchmarkUnlimitedSmallRun-16 1 1001698519 ns/op 3968 B/op 63 allocs/op
BenchmarkUnlimitedSmallCancel 2000000000 0.00 ns/op 0 B/op 0 allocs/op
BenchmarkUnlimitedSmallCancel-2 2000000000 0.00 ns/op 0 B/op 0 allocs/op
BenchmarkUnlimitedSmallCancel-4 2000000000 0.00 ns/op 0 B/op 0 allocs/op
BenchmarkUnlimitedSmallCancel-8 2000000000 0.00 ns/op 0 B/op 0 allocs/op
BenchmarkUnlimitedSmallCancel-16 2000000000 0.00 ns/op 0 B/op 0 allocs/op
BenchmarkUnlimitedLargeCancel 2000000000 0.00 ns/op 0 B/op 0 allocs/op
BenchmarkUnlimitedLargeCancel-2 2000000000 0.00 ns/op 0 B/op 0 allocs/op
BenchmarkUnlimitedLargeCancel-4 2000000000 0.00 ns/op 0 B/op 0 allocs/op
BenchmarkUnlimitedLargeCancel-8 2000000000 0.00 ns/op 0 B/op 0 allocs/op
BenchmarkUnlimitedLargeCancel-16 2000000000 0.00 ns/op 0 B/op 0 allocs/op
BenchmarkUnlimitedLargeRun 1 1001631711 ns/op 40352 B/op 603 allocs/op
BenchmarkUnlimitedLargeRun-2 1 1002603908 ns/op 38304 B/op 586 allocs/op
BenchmarkUnlimitedLargeRun-4 1 1001452975 ns/op 38192 B/op 584 allocs/op
BenchmarkUnlimitedLargeRun-8 1 1005382882 ns/op 35200 B/op 537 allocs/op
BenchmarkUnlimitedLargeRun-16 1 1001818482 ns/op 37056 B/op 566 allocs/op
BenchmarkUnlimitedBatchSmallRun 1 1002391247 ns/op 4240 B/op 63 allocs/op
BenchmarkUnlimitedBatchSmallRun-2 1 1010313222 ns/op 4688 B/op 70 allocs/op
BenchmarkUnlimitedBatchSmallRun-4 1 1008364651 ns/op 4304 B/op 64 allocs/op
BenchmarkUnlimitedBatchSmallRun-8 1 1001858192 ns/op 4448 B/op 66 allocs/op
BenchmarkUnlimitedBatchSmallRun-16 1 1001228000 ns/op 4320 B/op 64 allocs/op
To put some of these benchmarks in perspective:
- BenchmarkLimitedSmallRun did 10 seconds worth of processing in 1.002492008s
- BenchmarkLimitedSmallCancel ran 20 jobs, cancelled on job 6 and and ran in 0s
- BenchmarkLimitedLargeCancel ran 1000 jobs, cancelled on job 6 and and ran in 0s
- BenchmarkLimitedOverconsumeLargeRun ran 100 jobs using 25 workers in 4.027153081s
*/
package pool

37
vendor/gopkg.in/go-playground/pool.v3/errors.go generated vendored Normal file
View file

@ -0,0 +1,37 @@
package pool
const (
errCancelled = "ERROR: Work Unit Cancelled"
errRecovery = "ERROR: Work Unit failed due to a recoverable error: '%v'\n, Stack Trace:\n %s"
errClosed = "ERROR: Work Unit added/run after the pool had been closed or cancelled"
)
// ErrRecovery contains the error when a consumer goroutine needed to be recovers
type ErrRecovery struct {
s string
}
// Error prints recovery error
func (e *ErrRecovery) Error() string {
return e.s
}
// ErrPoolClosed is the error returned to all work units that may have been in or added to the pool after it's closing.
type ErrPoolClosed struct {
s string
}
// Error prints Work Unit Close error
func (e *ErrPoolClosed) Error() string {
return e.s
}
// ErrCancelled is the error returned to a Work Unit when it has been cancelled.
type ErrCancelled struct {
s string
}
// Error prints Work Unit Cancellation error
func (e *ErrCancelled) Error() string {
return e.s
}

200
vendor/gopkg.in/go-playground/pool.v3/limited_pool.go generated vendored Normal file
View file

@ -0,0 +1,200 @@
package pool
import (
"fmt"
"math"
"runtime"
"sync"
)
var _ Pool = new(limitedPool)
// limitedPool contains all information for a limited pool instance.
type limitedPool struct {
workers uint
work chan *workUnit
cancel chan struct{}
closed bool
m sync.RWMutex
}
// NewLimited returns a new limited pool instance
func NewLimited(workers uint) Pool {
if workers == 0 {
panic("invalid workers '0'")
}
p := &limitedPool{
workers: workers,
}
p.initialize()
return p
}
func (p *limitedPool) initialize() {
p.work = make(chan *workUnit, p.workers*2)
p.cancel = make(chan struct{})
p.closed = false
// fire up workers here
for i := 0; i < int(p.workers); i++ {
p.newWorker(p.work, p.cancel)
}
}
// passing work and cancel channels to newWorker() to avoid any potential race condition
// betweeen p.work read & write
func (p *limitedPool) newWorker(work chan *workUnit, cancel chan struct{}) {
go func(p *limitedPool) {
var wu *workUnit
defer func(p *limitedPool) {
if err := recover(); err != nil {
trace := make([]byte, 1<<16)
n := runtime.Stack(trace, true)
s := fmt.Sprintf(errRecovery, err, string(trace[:int(math.Min(float64(n), float64(7000)))]))
iwu := wu
iwu.err = &ErrRecovery{s: s}
close(iwu.done)
// need to fire up new worker to replace this one as this one is exiting
p.newWorker(p.work, p.cancel)
}
}(p)
var value interface{}
var err error
for {
select {
case wu = <-work:
// possible for one more nilled out value to make it
// through when channel closed, don't quite understad the why
if wu == nil {
continue
}
// support for individual WorkUnit cancellation
// and batch job cancellation
if wu.cancelled.Load() == nil {
value, err = wu.fn(wu)
wu.writing.Store(struct{}{})
// need to check again in case the WorkFunc cancelled this unit of work
// otherwise we'll have a race condition
if wu.cancelled.Load() == nil && wu.cancelling.Load() == nil {
wu.value, wu.err = value, err
// who knows where the Done channel is being listened to on the other end
// don't want this to block just because caller is waiting on another unit
// of work to be done first so we use close
close(wu.done)
}
}
case <-cancel:
return
}
}
}(p)
}
// Queue queues the work to be run, and starts processing immediately
func (p *limitedPool) Queue(fn WorkFunc) WorkUnit {
w := &workUnit{
done: make(chan struct{}),
fn: fn,
}
go func() {
p.m.RLock()
if p.closed {
w.err = &ErrPoolClosed{s: errClosed}
if w.cancelled.Load() == nil {
close(w.done)
}
p.m.RUnlock()
return
}
p.work <- w
p.m.RUnlock()
}()
return w
}
// Reset reinitializes a pool that has been closed/cancelled back to a working state.
// if the pool has not been closed/cancelled, nothing happens as the pool is still in
// a valid running state
func (p *limitedPool) Reset() {
p.m.Lock()
if !p.closed {
p.m.Unlock()
return
}
// cancelled the pool, not closed it, pool will be usable after calling initialize().
p.initialize()
p.m.Unlock()
}
func (p *limitedPool) closeWithError(err error) {
p.m.Lock()
if !p.closed {
close(p.cancel)
close(p.work)
p.closed = true
}
for wu := range p.work {
wu.cancelWithError(err)
}
p.m.Unlock()
}
// Cancel cleans up the pool workers and channels and cancels and pending
// work still yet to be processed.
// call Reset() to reinitialize the pool for use.
func (p *limitedPool) Cancel() {
err := &ErrCancelled{s: errCancelled}
p.closeWithError(err)
}
// Close cleans up the pool workers and channels and cancels any pending
// work still yet to be processed.
// call Reset() to reinitialize the pool for use.
func (p *limitedPool) Close() {
err := &ErrPoolClosed{s: errClosed}
p.closeWithError(err)
}
// Batch creates a new Batch object for queueing Work Units separate from any others
// that may be running on the pool. Grouping these Work Units together allows for individual
// Cancellation of the Batch Work Units without affecting anything else running on the pool
// as well as outputting the results on a channel as they complete.
// NOTE: Batch is not reusable, once QueueComplete() has been called it's lifetime has been sealed
// to completing the Queued items.
func (p *limitedPool) Batch() Batch {
return newBatch(p)
}

32
vendor/gopkg.in/go-playground/pool.v3/pool.go generated vendored Normal file
View file

@ -0,0 +1,32 @@
package pool
// Pool contains all information for a pool instance.
type Pool interface {
// Queue queues the work to be run, and starts processing immediately
Queue(fn WorkFunc) WorkUnit
// Reset reinitializes a pool that has been closed/cancelled back to a working
// state. if the pool has not been closed/cancelled, nothing happens as the pool
// is still in a valid running state
Reset()
// Cancel cancels any pending work still not committed to processing.
// Call Reset() to reinitialize the pool for use.
Cancel()
// Close cleans up pool data and cancels any pending work still not committed
// to processing. Call Reset() to reinitialize the pool for use.
Close()
// Batch creates a new Batch object for queueing Work Units separate from any
// others that may be running on the pool. Grouping these Work Units together
// allows for individual Cancellation of the Batch Work Units without affecting
// anything else running on the pool as well as outputting the results on a
// channel as they complete. NOTE: Batch is not reusable, once QueueComplete()
// has been called it's lifetime has been sealed to completing the Queued items.
Batch() Batch
}
// WorkFunc is the function type needed by the pool for execution
type WorkFunc func(wu WorkUnit) (interface{}, error)

164
vendor/gopkg.in/go-playground/pool.v3/unlimited_pool.go generated vendored Normal file
View file

@ -0,0 +1,164 @@
package pool
import (
"fmt"
"math"
"runtime"
"sync"
)
var _ Pool = new(unlimitedPool)
// unlimitedPool contains all information for an unlimited pool instance.
type unlimitedPool struct {
units []*workUnit
cancel chan struct{}
closed bool
m sync.Mutex
}
// New returns a new unlimited pool instance
func New() Pool {
p := &unlimitedPool{
units: make([]*workUnit, 0, 4), // init capacity to 4, assuming if using pool, then probably a few have at least that many and will reduce array resizes
}
p.initialize()
return p
}
func (p *unlimitedPool) initialize() {
p.cancel = make(chan struct{})
p.closed = false
}
// Queue queues the work to be run, and starts processing immediately
func (p *unlimitedPool) Queue(fn WorkFunc) WorkUnit {
w := &workUnit{
done: make(chan struct{}),
fn: fn,
}
p.m.Lock()
if p.closed {
w.err = &ErrPoolClosed{s: errClosed}
// if w.cancelled.Load() == nil {
close(w.done)
// }
p.m.Unlock()
return w
}
p.units = append(p.units, w)
go func(w *workUnit) {
defer func(w *workUnit) {
if err := recover(); err != nil {
trace := make([]byte, 1<<16)
n := runtime.Stack(trace, true)
s := fmt.Sprintf(errRecovery, err, string(trace[:int(math.Min(float64(n), float64(7000)))]))
w.cancelled.Store(struct{}{})
w.err = &ErrRecovery{s: s}
close(w.done)
}
}(w)
// support for individual WorkUnit cancellation
// and batch job cancellation
if w.cancelled.Load() == nil {
val, err := w.fn(w)
w.writing.Store(struct{}{})
// need to check again in case the WorkFunc cancelled this unit of work
// otherwise we'll have a race condition
if w.cancelled.Load() == nil && w.cancelling.Load() == nil {
w.value, w.err = val, err
// who knows where the Done channel is being listened to on the other end
// don't want this to block just because caller is waiting on another unit
// of work to be done first so we use close
close(w.done)
}
}
}(w)
p.m.Unlock()
return w
}
// Reset reinitializes a pool that has been closed/cancelled back to a working state.
// if the pool has not been closed/cancelled, nothing happens as the pool is still in
// a valid running state
func (p *unlimitedPool) Reset() {
p.m.Lock()
if !p.closed {
p.m.Unlock()
return
}
// cancelled the pool, not closed it, pool will be usable after calling initialize().
p.initialize()
p.m.Unlock()
}
func (p *unlimitedPool) closeWithError(err error) {
p.m.Lock()
if !p.closed {
close(p.cancel)
p.closed = true
// clear out array values for garbage collection, but reuse array just in case going to reuse
// go in reverse order to try and cancel as many as possbile
// one at end are less likely to have run than those at the beginning
for i := len(p.units) - 1; i >= 0; i-- {
p.units[i].cancelWithError(err)
p.units[i] = nil
}
p.units = p.units[0:0]
}
p.m.Unlock()
}
// Cancel cleans up the pool workers and channels and cancels and pending
// work still yet to be processed.
// call Reset() to reinitialize the pool for use.
func (p *unlimitedPool) Cancel() {
err := &ErrCancelled{s: errCancelled}
p.closeWithError(err)
}
// Close cleans up the pool workers and channels and cancels any pending
// work still yet to be processed.
// call Reset() to reinitialize the pool for use.
func (p *unlimitedPool) Close() {
err := &ErrPoolClosed{s: errClosed}
p.closeWithError(err)
}
// Batch creates a new Batch object for queueing Work Units separate from any others
// that may be running on the pool. Grouping these Work Units together allows for individual
// Cancellation of the Batch Work Units without affecting anything else running on the pool
// as well as outputting the results on a channel as they complete.
// NOTE: Batch is not reusable, once QueueComplete() has been called it's lifetime has been sealed
// to completing the Queued items.
func (p *unlimitedPool) Batch() Batch {
return newBatch(p)
}

77
vendor/gopkg.in/go-playground/pool.v3/work_unit.go generated vendored Normal file
View file

@ -0,0 +1,77 @@
package pool
import "sync/atomic"
// WorkUnit contains a single uint of works values
type WorkUnit interface {
// Wait blocks until WorkUnit has been processed or cancelled
Wait()
// Value returns the work units return value
Value() interface{}
// Error returns the Work Unit's error
Error() error
// Cancel cancels this specific unit of work, if not already committed
// to processing.
Cancel()
// IsCancelled returns if the Work Unit has been cancelled.
// NOTE: After Checking IsCancelled(), if it returns false the
// Work Unit can no longer be cancelled and will use your returned values.
IsCancelled() bool
}
var _ WorkUnit = new(workUnit)
// workUnit contains a single unit of works values
type workUnit struct {
value interface{}
err error
done chan struct{}
fn WorkFunc
cancelled atomic.Value
cancelling atomic.Value
writing atomic.Value
}
// Cancel cancels this specific unit of work, if not already committed to processing.
func (wu *workUnit) Cancel() {
wu.cancelWithError(&ErrCancelled{s: errCancelled})
}
func (wu *workUnit) cancelWithError(err error) {
wu.cancelling.Store(struct{}{})
if wu.writing.Load() == nil && wu.cancelled.Load() == nil {
wu.cancelled.Store(struct{}{})
wu.err = err
close(wu.done)
}
}
// Wait blocks until WorkUnit has been processed or cancelled
func (wu *workUnit) Wait() {
<-wu.done
}
// Value returns the work units return value
func (wu *workUnit) Value() interface{} {
return wu.value
}
// Error returns the Work Unit's error
func (wu *workUnit) Error() error {
return wu.err
}
// IsCancelled returns if the Work Unit has been cancelled.
// NOTE: After Checking IsCancelled(), if it returns false the
// Work Unit can no longer be cancelled and will use your returned values.
func (wu *workUnit) IsCancelled() bool {
wu.writing.Store(struct{}{}) // ensure that after this check we are committed as cannot be cancelled if not aalready
return wu.cancelled.Load() != nil
}