commit
16a213c076
1271 changed files with 186289 additions and 303314 deletions
3
.gitignore
vendored
3
.gitignore
vendored
|
@ -2,6 +2,9 @@
|
||||||
._*
|
._*
|
||||||
.DS_Store
|
.DS_Store
|
||||||
|
|
||||||
|
# intellij files
|
||||||
|
.idea/*
|
||||||
|
|
||||||
# Eclipse files
|
# Eclipse files
|
||||||
.classpath
|
.classpath
|
||||||
.project
|
.project
|
||||||
|
|
1092
Godeps/Godeps.json
generated
1092
Godeps/Godeps.json
generated
File diff suppressed because it is too large
Load diff
|
@ -26,10 +26,11 @@ import (
|
||||||
"github.com/golang/glog"
|
"github.com/golang/glog"
|
||||||
|
|
||||||
compute "google.golang.org/api/compute/v1"
|
compute "google.golang.org/api/compute/v1"
|
||||||
|
|
||||||
|
v1 "k8s.io/api/core/v1"
|
||||||
"k8s.io/apimachinery/pkg/types"
|
"k8s.io/apimachinery/pkg/types"
|
||||||
"k8s.io/apimachinery/pkg/util/intstr"
|
"k8s.io/apimachinery/pkg/util/intstr"
|
||||||
"k8s.io/apimachinery/pkg/util/sets"
|
"k8s.io/apimachinery/pkg/util/sets"
|
||||||
api_v1 "k8s.io/client-go/pkg/api/v1"
|
|
||||||
|
|
||||||
"k8s.io/ingress/controllers/gce/healthchecks"
|
"k8s.io/ingress/controllers/gce/healthchecks"
|
||||||
"k8s.io/ingress/controllers/gce/instances"
|
"k8s.io/ingress/controllers/gce/instances"
|
||||||
|
@ -161,7 +162,7 @@ func (b *Backends) Init(pp probeProvider) {
|
||||||
|
|
||||||
// Get returns a single backend.
|
// Get returns a single backend.
|
||||||
func (b *Backends) Get(port int64) (*compute.BackendService, error) {
|
func (b *Backends) Get(port int64) (*compute.BackendService, error) {
|
||||||
be, err := b.cloud.GetBackendService(b.namer.BeName(port))
|
be, err := b.cloud.GetGlobalBackendService(b.namer.BeName(port))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
@ -203,7 +204,7 @@ func (b *Backends) create(namedPort *compute.NamedPort, hcLink string, sp Servic
|
||||||
Port: namedPort.Port,
|
Port: namedPort.Port,
|
||||||
PortName: namedPort.Name,
|
PortName: namedPort.Name,
|
||||||
}
|
}
|
||||||
if err := b.cloud.CreateBackendService(bs); err != nil {
|
if err := b.cloud.CreateGlobalBackendService(bs); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
return b.Get(namedPort.Port)
|
return b.Get(namedPort.Port)
|
||||||
|
@ -248,7 +249,7 @@ func (b *Backends) Add(p ServicePort) error {
|
||||||
be.Protocol = string(p.Protocol)
|
be.Protocol = string(p.Protocol)
|
||||||
be.HealthChecks = []string{hcLink}
|
be.HealthChecks = []string{hcLink}
|
||||||
be.Description = p.Description()
|
be.Description = p.Description()
|
||||||
if err = b.cloud.UpdateBackendService(be); err != nil {
|
if err = b.cloud.UpdateGlobalBackendService(be); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -282,7 +283,7 @@ func (b *Backends) Delete(port int64) (err error) {
|
||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
// Try deleting health checks even if a backend is not found.
|
// Try deleting health checks even if a backend is not found.
|
||||||
if err = b.cloud.DeleteBackendService(name); err != nil && !utils.IsHTTPErrorCode(err, http.StatusNotFound) {
|
if err = b.cloud.DeleteGlobalBackendService(name); err != nil && !utils.IsHTTPErrorCode(err, http.StatusNotFound) {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -294,7 +295,7 @@ func (b *Backends) List() ([]interface{}, error) {
|
||||||
// TODO: for consistency with the rest of this sub-package this method
|
// TODO: for consistency with the rest of this sub-package this method
|
||||||
// should return a list of backend ports.
|
// should return a list of backend ports.
|
||||||
interList := []interface{}{}
|
interList := []interface{}{}
|
||||||
be, err := b.cloud.ListBackendServices()
|
be, err := b.cloud.ListGlobalBackendServices()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return interList, err
|
return interList, err
|
||||||
}
|
}
|
||||||
|
@ -361,7 +362,7 @@ func (b *Backends) edgeHop(be *compute.BackendService, igs []*compute.InstanceGr
|
||||||
newBackends := getBackendsForIGs(addIGs, bm)
|
newBackends := getBackendsForIGs(addIGs, bm)
|
||||||
be.Backends = append(originalBackends, newBackends...)
|
be.Backends = append(originalBackends, newBackends...)
|
||||||
|
|
||||||
if err := b.cloud.UpdateBackendService(be); err != nil {
|
if err := b.cloud.UpdateGlobalBackendService(be); err != nil {
|
||||||
if utils.IsHTTPErrorCode(err, http.StatusBadRequest) {
|
if utils.IsHTTPErrorCode(err, http.StatusBadRequest) {
|
||||||
glog.V(2).Infof("Updating backend service backends with balancing mode %v failed, will try another mode. err:%v", bm, err)
|
glog.V(2).Infof("Updating backend service backends with balancing mode %v failed, will try another mode. err:%v", bm, err)
|
||||||
errs = append(errs, err.Error())
|
errs = append(errs, err.Error())
|
||||||
|
@ -427,14 +428,14 @@ func (b *Backends) Shutdown() error {
|
||||||
|
|
||||||
// Status returns the status of the given backend by name.
|
// Status returns the status of the given backend by name.
|
||||||
func (b *Backends) Status(name string) string {
|
func (b *Backends) Status(name string) string {
|
||||||
backend, err := b.cloud.GetBackendService(name)
|
backend, err := b.cloud.GetGlobalBackendService(name)
|
||||||
if err != nil || len(backend.Backends) == 0 {
|
if err != nil || len(backend.Backends) == 0 {
|
||||||
return "Unknown"
|
return "Unknown"
|
||||||
}
|
}
|
||||||
|
|
||||||
// TODO: Look at more than one backend's status
|
// TODO: Look at more than one backend's status
|
||||||
// TODO: Include port, ip in the status, since it's in the health info.
|
// TODO: Include port, ip in the status, since it's in the health info.
|
||||||
hs, err := b.cloud.GetHealth(name, backend.Backends[0].Group)
|
hs, err := b.cloud.GetGlobalBackendServiceHealth(name, backend.Backends[0].Group)
|
||||||
if err != nil || len(hs.HealthStatus) == 0 || hs.HealthStatus[0] == nil {
|
if err != nil || len(hs.HealthStatus) == 0 || hs.HealthStatus[0] == nil {
|
||||||
return "Unknown"
|
return "Unknown"
|
||||||
}
|
}
|
||||||
|
@ -453,7 +454,7 @@ func applyLegacyHCToHC(existing *compute.HttpHealthCheck, hc *healthchecks.Healt
|
||||||
hc.UnhealthyThreshold = existing.UnhealthyThreshold
|
hc.UnhealthyThreshold = existing.UnhealthyThreshold
|
||||||
}
|
}
|
||||||
|
|
||||||
func applyProbeSettingsToHC(p *api_v1.Probe, hc *healthchecks.HealthCheck) {
|
func applyProbeSettingsToHC(p *v1.Probe, hc *healthchecks.HealthCheck) {
|
||||||
healthPath := p.Handler.HTTPGet.Path
|
healthPath := p.Handler.HTTPGet.Path
|
||||||
// GCE requires a leading "/" for health check urls.
|
// GCE requires a leading "/" for health check urls.
|
||||||
if !strings.HasPrefix(healthPath, "/") {
|
if !strings.HasPrefix(healthPath, "/") {
|
||||||
|
|
|
@ -23,9 +23,9 @@ import (
|
||||||
|
|
||||||
compute "google.golang.org/api/compute/v1"
|
compute "google.golang.org/api/compute/v1"
|
||||||
"google.golang.org/api/googleapi"
|
"google.golang.org/api/googleapi"
|
||||||
|
api_v1 "k8s.io/api/core/v1"
|
||||||
"k8s.io/apimachinery/pkg/util/intstr"
|
"k8s.io/apimachinery/pkg/util/intstr"
|
||||||
"k8s.io/apimachinery/pkg/util/sets"
|
"k8s.io/apimachinery/pkg/util/sets"
|
||||||
api_v1 "k8s.io/client-go/pkg/api/v1"
|
|
||||||
|
|
||||||
"k8s.io/ingress/controllers/gce/healthchecks"
|
"k8s.io/ingress/controllers/gce/healthchecks"
|
||||||
"k8s.io/ingress/controllers/gce/instances"
|
"k8s.io/ingress/controllers/gce/instances"
|
||||||
|
@ -87,7 +87,7 @@ func TestBackendPoolAdd(t *testing.T) {
|
||||||
beName := namer.BeName(nodePort.Port)
|
beName := namer.BeName(nodePort.Port)
|
||||||
|
|
||||||
// Check that the new backend has the right port
|
// Check that the new backend has the right port
|
||||||
be, err := f.GetBackendService(beName)
|
be, err := f.GetGlobalBackendService(beName)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("Did not find expected backend %v", beName)
|
t.Fatalf("Did not find expected backend %v", beName)
|
||||||
}
|
}
|
||||||
|
@ -171,7 +171,7 @@ func TestBackendPoolUpdate(t *testing.T) {
|
||||||
pool.Add(p)
|
pool.Add(p)
|
||||||
beName := namer.BeName(p.Port)
|
beName := namer.BeName(p.Port)
|
||||||
|
|
||||||
be, err := f.GetBackendService(beName)
|
be, err := f.GetGlobalBackendService(beName)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("Unexpected err: %v", err)
|
t.Fatalf("Unexpected err: %v", err)
|
||||||
}
|
}
|
||||||
|
@ -190,7 +190,7 @@ func TestBackendPoolUpdate(t *testing.T) {
|
||||||
p.Protocol = utils.ProtocolHTTPS
|
p.Protocol = utils.ProtocolHTTPS
|
||||||
pool.Sync([]ServicePort{p})
|
pool.Sync([]ServicePort{p})
|
||||||
|
|
||||||
be, err = f.GetBackendService(beName)
|
be, err = f.GetGlobalBackendService(beName)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("Unexpected err retrieving backend service after update: %v", err)
|
t.Fatalf("Unexpected err retrieving backend service after update: %v", err)
|
||||||
}
|
}
|
||||||
|
@ -217,7 +217,7 @@ func TestBackendPoolChaosMonkey(t *testing.T) {
|
||||||
pool.Add(nodePort)
|
pool.Add(nodePort)
|
||||||
beName := namer.BeName(nodePort.Port)
|
beName := namer.BeName(nodePort.Port)
|
||||||
|
|
||||||
be, _ := f.GetBackendService(beName)
|
be, _ := f.GetGlobalBackendService(beName)
|
||||||
|
|
||||||
// Mess up the link between backend service and instance group.
|
// Mess up the link between backend service and instance group.
|
||||||
// This simulates a user doing foolish things through the UI.
|
// This simulates a user doing foolish things through the UI.
|
||||||
|
@ -225,7 +225,7 @@ func TestBackendPoolChaosMonkey(t *testing.T) {
|
||||||
{Group: "test edge hop"},
|
{Group: "test edge hop"},
|
||||||
}
|
}
|
||||||
f.calls = []int{}
|
f.calls = []int{}
|
||||||
f.UpdateBackendService(be)
|
f.UpdateGlobalBackendService(be)
|
||||||
|
|
||||||
pool.Add(nodePort)
|
pool.Add(nodePort)
|
||||||
for _, call := range f.calls {
|
for _, call := range f.calls {
|
||||||
|
@ -233,7 +233,7 @@ func TestBackendPoolChaosMonkey(t *testing.T) {
|
||||||
t.Fatalf("Unexpected create for existing backend service")
|
t.Fatalf("Unexpected create for existing backend service")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
gotBackend, err := f.GetBackendService(beName)
|
gotBackend, err := f.GetGlobalBackendService(beName)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("Failed to find a backend with name %v: %v", beName, err)
|
t.Fatalf("Failed to find a backend with name %v: %v", beName, err)
|
||||||
}
|
}
|
||||||
|
@ -296,12 +296,12 @@ func TestBackendPoolSync(t *testing.T) {
|
||||||
// k8s-be-3001--uid - another cluster tagged with uid
|
// k8s-be-3001--uid - another cluster tagged with uid
|
||||||
unrelatedBackends := sets.NewString([]string{"foo", "k8s-be-foo", "k8s--bar--foo", "k8s-be-30001--uid"}...)
|
unrelatedBackends := sets.NewString([]string{"foo", "k8s-be-foo", "k8s--bar--foo", "k8s-be-30001--uid"}...)
|
||||||
for _, name := range unrelatedBackends.List() {
|
for _, name := range unrelatedBackends.List() {
|
||||||
f.CreateBackendService(&compute.BackendService{Name: name})
|
f.CreateGlobalBackendService(&compute.BackendService{Name: name})
|
||||||
}
|
}
|
||||||
|
|
||||||
namer := &utils.Namer{}
|
namer := &utils.Namer{}
|
||||||
// This backend should get deleted again since it is managed by this cluster.
|
// This backend should get deleted again since it is managed by this cluster.
|
||||||
f.CreateBackendService(&compute.BackendService{Name: namer.BeName(deletedPorts[0].Port)})
|
f.CreateGlobalBackendService(&compute.BackendService{Name: namer.BeName(deletedPorts[0].Port)})
|
||||||
|
|
||||||
// TODO: Avoid casting.
|
// TODO: Avoid casting.
|
||||||
// Repopulate the pool with a cloud list, which now includes the 82 port
|
// Repopulate the pool with a cloud list, which now includes the 82 port
|
||||||
|
@ -311,7 +311,7 @@ func TestBackendPoolSync(t *testing.T) {
|
||||||
|
|
||||||
pool.GC(svcNodePorts)
|
pool.GC(svcNodePorts)
|
||||||
|
|
||||||
currBackends, _ := f.ListBackendServices()
|
currBackends, _ := f.ListGlobalBackendServices()
|
||||||
currSet := sets.NewString()
|
currSet := sets.NewString()
|
||||||
for _, b := range currBackends.Items {
|
for _, b := range currBackends.Items {
|
||||||
currSet.Insert(b.Name)
|
currSet.Insert(b.Name)
|
||||||
|
@ -355,7 +355,7 @@ func TestBackendPoolDeleteLegacyHealthChecks(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
// Create backend service with expected name and link to legacy health check
|
// Create backend service with expected name and link to legacy health check
|
||||||
f.CreateBackendService(&compute.BackendService{
|
f.CreateGlobalBackendService(&compute.BackendService{
|
||||||
Name: beName,
|
Name: beName,
|
||||||
HealthChecks: []string{hc.SelfLink},
|
HealthChecks: []string{hc.SelfLink},
|
||||||
})
|
})
|
||||||
|
@ -390,7 +390,7 @@ func TestBackendPoolShutdown(t *testing.T) {
|
||||||
// Add a backend-service and verify that it doesn't exist after Shutdown()
|
// Add a backend-service and verify that it doesn't exist after Shutdown()
|
||||||
pool.Add(ServicePort{Port: 80})
|
pool.Add(ServicePort{Port: 80})
|
||||||
pool.Shutdown()
|
pool.Shutdown()
|
||||||
if _, err := f.GetBackendService(namer.BeName(80)); err == nil {
|
if _, err := f.GetGlobalBackendService(namer.BeName(80)); err == nil {
|
||||||
t.Fatalf("%v", err)
|
t.Fatalf("%v", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -404,7 +404,7 @@ func TestBackendInstanceGroupClobbering(t *testing.T) {
|
||||||
// This will add the instance group k8s-ig to the instance pool
|
// This will add the instance group k8s-ig to the instance pool
|
||||||
pool.Add(ServicePort{Port: 80})
|
pool.Add(ServicePort{Port: 80})
|
||||||
|
|
||||||
be, err := f.GetBackendService(namer.BeName(80))
|
be, err := f.GetGlobalBackendService(namer.BeName(80))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("%v", err)
|
t.Fatalf("%v", err)
|
||||||
}
|
}
|
||||||
|
@ -415,13 +415,13 @@ func TestBackendInstanceGroupClobbering(t *testing.T) {
|
||||||
{Group: "k8s-ig-foo"},
|
{Group: "k8s-ig-foo"},
|
||||||
}
|
}
|
||||||
be.Backends = append(be.Backends, newGroups...)
|
be.Backends = append(be.Backends, newGroups...)
|
||||||
if err = f.UpdateBackendService(be); err != nil {
|
if err = f.UpdateGlobalBackendService(be); err != nil {
|
||||||
t.Fatalf("Failed to update backend service %v", be.Name)
|
t.Fatalf("Failed to update backend service %v", be.Name)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Make sure repeated adds don't clobber the inserted instance group
|
// Make sure repeated adds don't clobber the inserted instance group
|
||||||
pool.Add(ServicePort{Port: 80})
|
pool.Add(ServicePort{Port: 80})
|
||||||
be, err = f.GetBackendService(namer.BeName(80))
|
be, err = f.GetGlobalBackendService(namer.BeName(80))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("%v", err)
|
t.Fatalf("%v", err)
|
||||||
}
|
}
|
||||||
|
@ -463,7 +463,7 @@ func TestBackendCreateBalancingMode(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
pool.Add(nodePort)
|
pool.Add(nodePort)
|
||||||
be, err := f.GetBackendService(namer.BeName(nodePort.Port))
|
be, err := f.GetGlobalBackendService(namer.BeName(nodePort.Port))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("%v", err)
|
t.Fatalf("%v", err)
|
||||||
}
|
}
|
||||||
|
|
|
@ -20,7 +20,7 @@ import (
|
||||||
"fmt"
|
"fmt"
|
||||||
|
|
||||||
compute "google.golang.org/api/compute/v1"
|
compute "google.golang.org/api/compute/v1"
|
||||||
api_v1 "k8s.io/client-go/pkg/api/v1"
|
api_v1 "k8s.io/api/core/v1"
|
||||||
"k8s.io/client-go/tools/cache"
|
"k8s.io/client-go/tools/cache"
|
||||||
|
|
||||||
"k8s.io/ingress/controllers/gce/utils"
|
"k8s.io/ingress/controllers/gce/utils"
|
||||||
|
@ -44,8 +44,8 @@ type FakeBackendServices struct {
|
||||||
errFunc func(op int, be *compute.BackendService) error
|
errFunc func(op int, be *compute.BackendService) error
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetBackendService fakes getting a backend service from the cloud.
|
// GetGlobalBackendService fakes getting a backend service from the cloud.
|
||||||
func (f *FakeBackendServices) GetBackendService(name string) (*compute.BackendService, error) {
|
func (f *FakeBackendServices) GetGlobalBackendService(name string) (*compute.BackendService, error) {
|
||||||
f.calls = append(f.calls, utils.Get)
|
f.calls = append(f.calls, utils.Get)
|
||||||
obj, exists, err := f.backendServices.GetByKey(name)
|
obj, exists, err := f.backendServices.GetByKey(name)
|
||||||
if !exists {
|
if !exists {
|
||||||
|
@ -62,8 +62,8 @@ func (f *FakeBackendServices) GetBackendService(name string) (*compute.BackendSe
|
||||||
return nil, fmt.Errorf("backend service %v not found", name)
|
return nil, fmt.Errorf("backend service %v not found", name)
|
||||||
}
|
}
|
||||||
|
|
||||||
// CreateBackendService fakes backend service creation.
|
// CreateGlobalBackendService fakes backend service creation.
|
||||||
func (f *FakeBackendServices) CreateBackendService(be *compute.BackendService) error {
|
func (f *FakeBackendServices) CreateGlobalBackendService(be *compute.BackendService) error {
|
||||||
if f.errFunc != nil {
|
if f.errFunc != nil {
|
||||||
if err := f.errFunc(utils.Create, be); err != nil {
|
if err := f.errFunc(utils.Create, be); err != nil {
|
||||||
return err
|
return err
|
||||||
|
@ -74,8 +74,8 @@ func (f *FakeBackendServices) CreateBackendService(be *compute.BackendService) e
|
||||||
return f.backendServices.Update(be)
|
return f.backendServices.Update(be)
|
||||||
}
|
}
|
||||||
|
|
||||||
// DeleteBackendService fakes backend service deletion.
|
// DeleteGlobalBackendService fakes backend service deletion.
|
||||||
func (f *FakeBackendServices) DeleteBackendService(name string) error {
|
func (f *FakeBackendServices) DeleteGlobalBackendService(name string) error {
|
||||||
f.calls = append(f.calls, utils.Delete)
|
f.calls = append(f.calls, utils.Delete)
|
||||||
svc, exists, err := f.backendServices.GetByKey(name)
|
svc, exists, err := f.backendServices.GetByKey(name)
|
||||||
if !exists {
|
if !exists {
|
||||||
|
@ -87,8 +87,8 @@ func (f *FakeBackendServices) DeleteBackendService(name string) error {
|
||||||
return f.backendServices.Delete(svc)
|
return f.backendServices.Delete(svc)
|
||||||
}
|
}
|
||||||
|
|
||||||
// ListBackendServices fakes backend service listing.
|
// ListGlobalBackendServices fakes backend service listing.
|
||||||
func (f *FakeBackendServices) ListBackendServices() (*compute.BackendServiceList, error) {
|
func (f *FakeBackendServices) ListGlobalBackendServices() (*compute.BackendServiceList, error) {
|
||||||
var svcs []*compute.BackendService
|
var svcs []*compute.BackendService
|
||||||
for _, s := range f.backendServices.List() {
|
for _, s := range f.backendServices.List() {
|
||||||
svc := s.(*compute.BackendService)
|
svc := s.(*compute.BackendService)
|
||||||
|
@ -97,8 +97,8 @@ func (f *FakeBackendServices) ListBackendServices() (*compute.BackendServiceList
|
||||||
return &compute.BackendServiceList{Items: svcs}, nil
|
return &compute.BackendServiceList{Items: svcs}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// UpdateBackendService fakes updating a backend service.
|
// UpdateGlobalBackendService fakes updating a backend service.
|
||||||
func (f *FakeBackendServices) UpdateBackendService(be *compute.BackendService) error {
|
func (f *FakeBackendServices) UpdateGlobalBackendService(be *compute.BackendService) error {
|
||||||
if f.errFunc != nil {
|
if f.errFunc != nil {
|
||||||
if err := f.errFunc(utils.Update, be); err != nil {
|
if err := f.errFunc(utils.Update, be); err != nil {
|
||||||
return err
|
return err
|
||||||
|
@ -108,9 +108,9 @@ func (f *FakeBackendServices) UpdateBackendService(be *compute.BackendService) e
|
||||||
return f.backendServices.Update(be)
|
return f.backendServices.Update(be)
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetHealth fakes getting backend service health.
|
// GetGlobalBackendServiceHealth fakes getting backend service health.
|
||||||
func (f *FakeBackendServices) GetHealth(name, instanceGroupLink string) (*compute.BackendServiceGroupHealth, error) {
|
func (f *FakeBackendServices) GetGlobalBackendServiceHealth(name, instanceGroupLink string) (*compute.BackendServiceGroupHealth, error) {
|
||||||
be, err := f.GetBackendService(name)
|
be, err := f.GetGlobalBackendService(name)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
|
@ -18,7 +18,7 @@ package backends
|
||||||
|
|
||||||
import (
|
import (
|
||||||
compute "google.golang.org/api/compute/v1"
|
compute "google.golang.org/api/compute/v1"
|
||||||
api_v1 "k8s.io/client-go/pkg/api/v1"
|
api_v1 "k8s.io/api/core/v1"
|
||||||
)
|
)
|
||||||
|
|
||||||
// ProbeProvider retrieves a probe struct given a nodePort
|
// ProbeProvider retrieves a probe struct given a nodePort
|
||||||
|
@ -42,10 +42,10 @@ type BackendPool interface {
|
||||||
|
|
||||||
// BackendServices is an interface for managing gce backend services.
|
// BackendServices is an interface for managing gce backend services.
|
||||||
type BackendServices interface {
|
type BackendServices interface {
|
||||||
GetBackendService(name string) (*compute.BackendService, error)
|
GetGlobalBackendService(name string) (*compute.BackendService, error)
|
||||||
UpdateBackendService(bg *compute.BackendService) error
|
UpdateGlobalBackendService(bg *compute.BackendService) error
|
||||||
CreateBackendService(bg *compute.BackendService) error
|
CreateGlobalBackendService(bg *compute.BackendService) error
|
||||||
DeleteBackendService(name string) error
|
DeleteGlobalBackendService(name string) error
|
||||||
ListBackendServices() (*compute.BackendServiceList, error)
|
ListGlobalBackendServices() (*compute.BackendServiceList, error)
|
||||||
GetHealth(name, instanceGroupLink string) (*compute.BackendServiceGroupHealth, error)
|
GetGlobalBackendServiceHealth(name, instanceGroupLink string) (*compute.BackendServiceGroupHealth, error)
|
||||||
}
|
}
|
||||||
|
|
|
@ -224,7 +224,7 @@ func getGCEClient(config io.Reader) *gce.GCECloud {
|
||||||
// user has no need for Ingress in this case. If they grant
|
// user has no need for Ingress in this case. If they grant
|
||||||
// permissions to the node they will have to restart the controller
|
// permissions to the node they will have to restart the controller
|
||||||
// manually to re-create the client.
|
// manually to re-create the client.
|
||||||
if _, err = cloud.ListBackendServices(); err == nil || utils.IsHTTPErrorCode(err, http.StatusForbidden) {
|
if _, err = cloud.ListGlobalBackendServices(); err == nil || utils.IsHTTPErrorCode(err, http.StatusForbidden) {
|
||||||
return cloud
|
return cloud
|
||||||
}
|
}
|
||||||
glog.Warningf("Failed to list backend services, retrying: %v", err)
|
glog.Warningf("Failed to list backend services, retrying: %v", err)
|
||||||
|
|
|
@ -24,14 +24,14 @@ import (
|
||||||
|
|
||||||
"github.com/golang/glog"
|
"github.com/golang/glog"
|
||||||
|
|
||||||
|
api_v1 "k8s.io/api/core/v1"
|
||||||
|
extensions "k8s.io/api/extensions/v1beta1"
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
"k8s.io/apimachinery/pkg/fields"
|
"k8s.io/apimachinery/pkg/fields"
|
||||||
"k8s.io/client-go/kubernetes"
|
"k8s.io/client-go/kubernetes"
|
||||||
scheme "k8s.io/client-go/kubernetes/scheme"
|
scheme "k8s.io/client-go/kubernetes/scheme"
|
||||||
unversionedcore "k8s.io/client-go/kubernetes/typed/core/v1"
|
unversionedcore "k8s.io/client-go/kubernetes/typed/core/v1"
|
||||||
listers "k8s.io/client-go/listers/core/v1"
|
listers "k8s.io/client-go/listers/core/v1"
|
||||||
api_v1 "k8s.io/client-go/pkg/api/v1"
|
|
||||||
extensions "k8s.io/client-go/pkg/apis/extensions/v1beta1"
|
|
||||||
"k8s.io/client-go/tools/cache"
|
"k8s.io/client-go/tools/cache"
|
||||||
"k8s.io/client-go/tools/record"
|
"k8s.io/client-go/tools/record"
|
||||||
|
|
||||||
|
|
|
@ -24,14 +24,14 @@ import (
|
||||||
|
|
||||||
compute "google.golang.org/api/compute/v1"
|
compute "google.golang.org/api/compute/v1"
|
||||||
|
|
||||||
|
api_v1 "k8s.io/api/core/v1"
|
||||||
|
extensions "k8s.io/api/extensions/v1beta1"
|
||||||
meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
"k8s.io/apimachinery/pkg/util/intstr"
|
"k8s.io/apimachinery/pkg/util/intstr"
|
||||||
"k8s.io/apimachinery/pkg/util/sets"
|
"k8s.io/apimachinery/pkg/util/sets"
|
||||||
"k8s.io/apimachinery/pkg/util/uuid"
|
"k8s.io/apimachinery/pkg/util/uuid"
|
||||||
"k8s.io/client-go/kubernetes/fake"
|
"k8s.io/client-go/kubernetes/fake"
|
||||||
"k8s.io/client-go/pkg/api"
|
"k8s.io/kubernetes/pkg/api"
|
||||||
api_v1 "k8s.io/client-go/pkg/api/v1"
|
|
||||||
extensions "k8s.io/client-go/pkg/apis/extensions/v1beta1"
|
|
||||||
|
|
||||||
"k8s.io/ingress/controllers/gce/firewalls"
|
"k8s.io/ingress/controllers/gce/firewalls"
|
||||||
"k8s.io/ingress/controllers/gce/loadbalancers"
|
"k8s.io/ingress/controllers/gce/loadbalancers"
|
||||||
|
@ -428,7 +428,7 @@ func TestLbChangeStaticIP(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
ing.Annotations = map[string]string{staticIPNameKey: "testip"}
|
ing.Annotations = map[string]string{staticIPNameKey: "testip"}
|
||||||
cm.fakeLbs.ReserveGlobalStaticIP("testip", "1.2.3.4")
|
cm.fakeLbs.ReserveGlobalAddress(&compute.Address{Name: "testip", Address: "1.2.3.4"})
|
||||||
|
|
||||||
// Second sync reassigns 1.2.3.4 to existing forwarding rule (by recreating it)
|
// Second sync reassigns 1.2.3.4 to existing forwarding rule (by recreating it)
|
||||||
lbc.sync(ingStoreKey)
|
lbc.sync(ingStoreKey)
|
||||||
|
|
|
@ -65,7 +65,7 @@ func NewFakeClusterManager(clusterName, firewallName string) *fakeClusterManager
|
||||||
testDefaultBeNodePort,
|
testDefaultBeNodePort,
|
||||||
namer,
|
namer,
|
||||||
)
|
)
|
||||||
frPool := firewalls.NewFirewallPool(firewalls.NewFakeFirewallsProvider(namer), namer)
|
frPool := firewalls.NewFirewallPool(firewalls.NewFakeFirewallsProvider(), namer)
|
||||||
cm := &ClusterManager{
|
cm := &ClusterManager{
|
||||||
ClusterNamer: namer,
|
ClusterNamer: namer,
|
||||||
instancePool: nodePool,
|
instancePool: nodePool,
|
||||||
|
|
|
@ -21,10 +21,10 @@ import (
|
||||||
|
|
||||||
"github.com/golang/glog"
|
"github.com/golang/glog"
|
||||||
|
|
||||||
|
api_v1 "k8s.io/api/core/v1"
|
||||||
|
extensions "k8s.io/api/extensions/v1beta1"
|
||||||
meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
"k8s.io/client-go/kubernetes"
|
"k8s.io/client-go/kubernetes"
|
||||||
api_v1 "k8s.io/client-go/pkg/api/v1"
|
|
||||||
extensions "k8s.io/client-go/pkg/apis/extensions/v1beta1"
|
|
||||||
|
|
||||||
"k8s.io/ingress/controllers/gce/loadbalancers"
|
"k8s.io/ingress/controllers/gce/loadbalancers"
|
||||||
)
|
)
|
||||||
|
|
|
@ -21,10 +21,10 @@ import (
|
||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
api_v1 "k8s.io/api/core/v1"
|
||||||
meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
"k8s.io/apimachinery/pkg/util/intstr"
|
"k8s.io/apimachinery/pkg/util/intstr"
|
||||||
"k8s.io/apimachinery/pkg/util/sets"
|
"k8s.io/apimachinery/pkg/util/sets"
|
||||||
api_v1 "k8s.io/client-go/pkg/api/v1"
|
|
||||||
"k8s.io/ingress/controllers/gce/backends"
|
"k8s.io/ingress/controllers/gce/backends"
|
||||||
"k8s.io/ingress/controllers/gce/utils"
|
"k8s.io/ingress/controllers/gce/utils"
|
||||||
)
|
)
|
||||||
|
@ -76,20 +76,19 @@ func TestInstancesAddedToZones(t *testing.T) {
|
||||||
lbc.CloudClusterManager.instancePool.Sync([]string{"n1", "n2", "n3"})
|
lbc.CloudClusterManager.instancePool.Sync([]string{"n1", "n2", "n3"})
|
||||||
gotZonesToNode := cm.fakeIGs.GetInstancesByZone()
|
gotZonesToNode := cm.fakeIGs.GetInstancesByZone()
|
||||||
|
|
||||||
i := 0
|
if cm.fakeIGs.Ports[0] != testPort {
|
||||||
|
t.Errorf("Expected the same node port on all igs, got ports %+v", cm.fakeIGs.Ports)
|
||||||
|
}
|
||||||
|
|
||||||
for z, nodeNames := range zoneToNode {
|
for z, nodeNames := range zoneToNode {
|
||||||
if ig, err := cm.fakeIGs.GetInstanceGroup(testIG, z); err != nil {
|
if ig, err := cm.fakeIGs.GetInstanceGroup(testIG, z); err != nil {
|
||||||
t.Errorf("Failed to find ig %v in zone %v, found %+v: %v", testIG, z, ig, err)
|
t.Errorf("Failed to find ig %v in zone %v, found %+v: %v", testIG, z, ig, err)
|
||||||
}
|
}
|
||||||
if cm.fakeIGs.Ports[i] != testPort {
|
|
||||||
t.Errorf("Expected the same node port on all igs, got ports %+v", cm.fakeIGs.Ports)
|
|
||||||
}
|
|
||||||
expNodes := sets.NewString(nodeNames...)
|
expNodes := sets.NewString(nodeNames...)
|
||||||
gotNodes := sets.NewString(gotZonesToNode[z]...)
|
gotNodes := sets.NewString(gotZonesToNode[z]...)
|
||||||
if !gotNodes.Equal(expNodes) {
|
if !gotNodes.Equal(expNodes) {
|
||||||
t.Errorf("Nodes not added to zones, expected %+v got %+v", expNodes, gotNodes)
|
t.Errorf("Nodes not added to zones, expected %+v got %+v", expNodes, gotNodes)
|
||||||
}
|
}
|
||||||
i++
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -26,6 +26,8 @@ import (
|
||||||
"github.com/golang/glog"
|
"github.com/golang/glog"
|
||||||
|
|
||||||
compute "google.golang.org/api/compute/v1"
|
compute "google.golang.org/api/compute/v1"
|
||||||
|
api_v1 "k8s.io/api/core/v1"
|
||||||
|
extensions "k8s.io/api/extensions/v1beta1"
|
||||||
"k8s.io/apimachinery/pkg/api/meta"
|
"k8s.io/apimachinery/pkg/api/meta"
|
||||||
meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
"k8s.io/apimachinery/pkg/labels"
|
"k8s.io/apimachinery/pkg/labels"
|
||||||
|
@ -34,8 +36,6 @@ import (
|
||||||
"k8s.io/apimachinery/pkg/util/sets"
|
"k8s.io/apimachinery/pkg/util/sets"
|
||||||
"k8s.io/apimachinery/pkg/util/wait"
|
"k8s.io/apimachinery/pkg/util/wait"
|
||||||
listers "k8s.io/client-go/listers/core/v1"
|
listers "k8s.io/client-go/listers/core/v1"
|
||||||
api_v1 "k8s.io/client-go/pkg/api/v1"
|
|
||||||
extensions "k8s.io/client-go/pkg/apis/extensions/v1beta1"
|
|
||||||
"k8s.io/client-go/tools/cache"
|
"k8s.io/client-go/tools/cache"
|
||||||
"k8s.io/client-go/util/workqueue"
|
"k8s.io/client-go/util/workqueue"
|
||||||
|
|
||||||
|
@ -595,8 +595,8 @@ func (t *GCETranslator) getHTTPProbe(svc api_v1.Service, targetPort intstr.IntOr
|
||||||
// - has no special host or headers fields, except for possibly an HTTP Host header
|
// - has no special host or headers fields, except for possibly an HTTP Host header
|
||||||
func isSimpleHTTPProbe(probe *api_v1.Probe) bool {
|
func isSimpleHTTPProbe(probe *api_v1.Probe) bool {
|
||||||
return (probe != nil && probe.Handler.HTTPGet != nil && probe.Handler.HTTPGet.Host == "" &&
|
return (probe != nil && probe.Handler.HTTPGet != nil && probe.Handler.HTTPGet.Host == "" &&
|
||||||
(len(probe.Handler.HTTPGet.HTTPHeaders) == 0 ||
|
(len(probe.Handler.HTTPGet.HTTPHeaders) == 0 ||
|
||||||
(len(probe.Handler.HTTPGet.HTTPHeaders) == 1 && probe.Handler.HTTPGet.HTTPHeaders[0].Name == "Host")))
|
(len(probe.Handler.HTTPGet.HTTPHeaders) == 1 && probe.Handler.HTTPGet.HTTPHeaders[0].Name == "Host")))
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetProbe returns a probe that's used for the given nodeport
|
// GetProbe returns a probe that's used for the given nodeport
|
||||||
|
|
|
@ -26,13 +26,13 @@ import (
|
||||||
"io/ioutil"
|
"io/ioutil"
|
||||||
"log"
|
"log"
|
||||||
|
|
||||||
|
api_v1 "k8s.io/api/core/v1"
|
||||||
registered "k8s.io/apimachinery/pkg/apimachinery/registered"
|
registered "k8s.io/apimachinery/pkg/apimachinery/registered"
|
||||||
meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
"k8s.io/apimachinery/pkg/runtime"
|
"k8s.io/apimachinery/pkg/runtime"
|
||||||
"k8s.io/client-go/pkg/api"
|
|
||||||
api_v1 "k8s.io/client-go/pkg/api/v1"
|
|
||||||
|
|
||||||
// This installs the legacy v1 API
|
// This installs the legacy v1 API
|
||||||
|
"k8s.io/kubernetes/pkg/api"
|
||||||
_ "k8s.io/kubernetes/pkg/api/install"
|
_ "k8s.io/kubernetes/pkg/api/install"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
|
@ -18,86 +18,66 @@ package firewalls
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
"strconv"
|
|
||||||
|
|
||||||
compute "google.golang.org/api/compute/v1"
|
compute "google.golang.org/api/compute/v1"
|
||||||
netset "k8s.io/kubernetes/pkg/util/net/sets"
|
|
||||||
|
|
||||||
"k8s.io/ingress/controllers/gce/utils"
|
"k8s.io/ingress/controllers/gce/utils"
|
||||||
)
|
)
|
||||||
|
|
||||||
type fakeFirewallsProvider struct {
|
type fakeFirewallsProvider struct {
|
||||||
fw map[string]*compute.Firewall
|
fw map[string]*compute.Firewall
|
||||||
namer *utils.Namer
|
networkUrl string
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewFakeFirewallsProvider creates a fake for firewall rules.
|
// NewFakeFirewallsProvider creates a fake for firewall rules.
|
||||||
func NewFakeFirewallsProvider(namer *utils.Namer) *fakeFirewallsProvider {
|
func NewFakeFirewallsProvider() *fakeFirewallsProvider {
|
||||||
return &fakeFirewallsProvider{
|
return &fakeFirewallsProvider{
|
||||||
fw: make(map[string]*compute.Firewall),
|
fw: make(map[string]*compute.Firewall),
|
||||||
namer: namer,
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (f *fakeFirewallsProvider) GetFirewall(prefixedName string) (*compute.Firewall, error) {
|
func (ff *fakeFirewallsProvider) GetFirewall(name string) (*compute.Firewall, error) {
|
||||||
rule, exists := f.fw[prefixedName]
|
rule, exists := ff.fw[name]
|
||||||
if exists {
|
if exists {
|
||||||
return rule, nil
|
return rule, nil
|
||||||
}
|
}
|
||||||
return nil, utils.FakeGoogleAPINotFoundErr()
|
return nil, utils.FakeGoogleAPINotFoundErr()
|
||||||
}
|
}
|
||||||
|
|
||||||
func (f *fakeFirewallsProvider) CreateFirewall(name, msgTag string, srcRange netset.IPNet, ports []int64, hosts []string) error {
|
func (ff *fakeFirewallsProvider) CreateFirewall(f *compute.Firewall) error {
|
||||||
prefixedName := f.namer.FrName(name)
|
if _, exists := ff.fw[f.Name]; exists {
|
||||||
strPorts := []string{}
|
return fmt.Errorf("firewall rule %v already exists", f.Name)
|
||||||
for _, p := range ports {
|
|
||||||
strPorts = append(strPorts, strconv.FormatInt(p, 10))
|
|
||||||
}
|
|
||||||
if _, exists := f.fw[prefixedName]; exists {
|
|
||||||
return fmt.Errorf("firewall rule %v already exists", prefixedName)
|
|
||||||
}
|
|
||||||
|
|
||||||
f.fw[prefixedName] = &compute.Firewall{
|
|
||||||
// To accurately mimic the cloudprovider we need to add the k8s-fw
|
|
||||||
// prefix to the given rule name.
|
|
||||||
Name: prefixedName,
|
|
||||||
SourceRanges: srcRange.StringSlice(),
|
|
||||||
Allowed: []*compute.FirewallAllowed{{Ports: strPorts}},
|
|
||||||
TargetTags: hosts, // WARNING: This is actually not correct, but good enough for testing this package
|
|
||||||
}
|
}
|
||||||
|
ff.fw[f.Name] = f
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (f *fakeFirewallsProvider) DeleteFirewall(name string) error {
|
func (ff *fakeFirewallsProvider) DeleteFirewall(name string) error {
|
||||||
// We need the full name for the same reason as CreateFirewall.
|
// We need the full name for the same reason as CreateFirewall.
|
||||||
prefixedName := f.namer.FrName(name)
|
_, exists := ff.fw[name]
|
||||||
_, exists := f.fw[prefixedName]
|
|
||||||
if !exists {
|
if !exists {
|
||||||
return utils.FakeGoogleAPINotFoundErr()
|
return utils.FakeGoogleAPINotFoundErr()
|
||||||
}
|
}
|
||||||
|
|
||||||
delete(f.fw, prefixedName)
|
delete(ff.fw, name)
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (f *fakeFirewallsProvider) UpdateFirewall(name, msgTag string, srcRange netset.IPNet, ports []int64, hosts []string) error {
|
func (ff *fakeFirewallsProvider) UpdateFirewall(f *compute.Firewall) error {
|
||||||
strPorts := []string{}
|
|
||||||
for _, p := range ports {
|
|
||||||
strPorts = append(strPorts, strconv.FormatInt(p, 10))
|
|
||||||
}
|
|
||||||
|
|
||||||
// We need the full name for the same reason as CreateFirewall.
|
// We need the full name for the same reason as CreateFirewall.
|
||||||
prefixedName := f.namer.FrName(name)
|
_, exists := ff.fw[f.Name]
|
||||||
_, exists := f.fw[prefixedName]
|
|
||||||
if !exists {
|
if !exists {
|
||||||
return fmt.Errorf("update failed for rule %v, srcRange %v ports %v, rule not found", prefixedName, srcRange, ports)
|
return fmt.Errorf("update failed for rule %v, srcRange %v ports %+v, rule not found", f.Name, f.SourceRanges, f.Allowed)
|
||||||
}
|
}
|
||||||
|
|
||||||
f.fw[prefixedName] = &compute.Firewall{
|
ff.fw[f.Name] = f
|
||||||
Name: name,
|
|
||||||
SourceRanges: srcRange.StringSlice(),
|
|
||||||
Allowed: []*compute.FirewallAllowed{{Ports: strPorts}},
|
|
||||||
TargetTags: hosts, // WARNING: This is actually not correct, but good enough for testing this package
|
|
||||||
}
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (ff *fakeFirewallsProvider) NetworkURL() string {
|
||||||
|
return ff.networkUrl
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ff *fakeFirewallsProvider) GetNodeTags(nodeNames []string) ([]string, error) {
|
||||||
|
return nodeNames, nil
|
||||||
|
}
|
||||||
|
|
|
@ -35,18 +35,18 @@ var l7SrcRanges = []string{"130.211.0.0/22", "35.191.0.0/16"}
|
||||||
type FirewallRules struct {
|
type FirewallRules struct {
|
||||||
cloud Firewall
|
cloud Firewall
|
||||||
namer *utils.Namer
|
namer *utils.Namer
|
||||||
srcRanges netset.IPNet
|
srcRanges []string
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewFirewallPool creates a new firewall rule manager.
|
// NewFirewallPool creates a new firewall rule manager.
|
||||||
// cloud: the cloud object implementing Firewall.
|
// cloud: the cloud object implementing Firewall.
|
||||||
// namer: cluster namer.
|
// namer: cluster namer.
|
||||||
func NewFirewallPool(cloud Firewall, namer *utils.Namer) SingleFirewallPool {
|
func NewFirewallPool(cloud Firewall, namer *utils.Namer) SingleFirewallPool {
|
||||||
srcNetSet, err := netset.ParseIPNets(l7SrcRanges...)
|
_, err := netset.ParseIPNets(l7SrcRanges...)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
glog.Fatalf("Could not parse L7 src ranges %v for firewall rule: %v", l7SrcRanges, err)
|
glog.Fatalf("Could not parse L7 src ranges %v for firewall rule: %v", l7SrcRanges, err)
|
||||||
}
|
}
|
||||||
return &FirewallRules{cloud: cloud, namer: namer, srcRanges: srcNetSet}
|
return &FirewallRules{cloud: cloud, namer: namer, srcRanges: l7SrcRanges}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Sync sync firewall rules with the cloud.
|
// Sync sync firewall rules with the cloud.
|
||||||
|
@ -60,9 +60,15 @@ func (fr *FirewallRules) Sync(nodePorts []int64, nodeNames []string) error {
|
||||||
// instead of the whole name.
|
// instead of the whole name.
|
||||||
name := fr.namer.FrName(suffix)
|
name := fr.namer.FrName(suffix)
|
||||||
rule, _ := fr.cloud.GetFirewall(name)
|
rule, _ := fr.cloud.GetFirewall(name)
|
||||||
|
|
||||||
|
firewall, err := fr.createFirewallObject(name, "GCE L7 firewall rule", nodePorts, nodeNames)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
if rule == nil {
|
if rule == nil {
|
||||||
glog.Infof("Creating global l7 firewall rule %v", name)
|
glog.Infof("Creating global l7 firewall rule %v", name)
|
||||||
return fr.cloud.CreateFirewall(suffix, "GCE L7 firewall rule", fr.srcRanges, nodePorts, nodeNames)
|
return fr.cloud.CreateFirewall(firewall)
|
||||||
}
|
}
|
||||||
|
|
||||||
requiredPorts := sets.NewString()
|
requiredPorts := sets.NewString()
|
||||||
|
@ -85,17 +91,17 @@ func (fr *FirewallRules) Sync(nodePorts []int64, nodeNames []string) error {
|
||||||
glog.V(4).Info("Firewall does not need update of ports or source ranges")
|
glog.V(4).Info("Firewall does not need update of ports or source ranges")
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
glog.V(3).Infof("Firewall %v already exists, updating nodeports %v", name, nodePorts)
|
glog.V(3).Infof("Firewall %v already exists, updating nodeports %v", name, nodePorts)
|
||||||
return fr.cloud.UpdateFirewall(suffix, "GCE L7 firewall", fr.srcRanges, nodePorts, nodeNames)
|
return fr.cloud.UpdateFirewall(firewall)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Shutdown shuts down this firewall rules manager.
|
// Shutdown shuts down this firewall rules manager.
|
||||||
func (fr *FirewallRules) Shutdown() error {
|
func (fr *FirewallRules) Shutdown() error {
|
||||||
glog.Infof("Deleting firewall with suffix %v", fr.namer.FrSuffix())
|
name := fr.namer.FrName(fr.namer.FrSuffix())
|
||||||
err := fr.cloud.DeleteFirewall(fr.namer.FrSuffix())
|
glog.Infof("Deleting firewall %v", name)
|
||||||
|
err := fr.cloud.DeleteFirewall(name)
|
||||||
if err != nil && utils.IsHTTPErrorCode(err, 404) {
|
if err != nil && utils.IsHTTPErrorCode(err, 404) {
|
||||||
glog.Infof("Firewall with suffix %v didn't exist at Shutdown", fr.namer.FrSuffix())
|
glog.Infof("Firewall with name %v didn't exist at Shutdown", name)
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
return err
|
return err
|
||||||
|
@ -107,3 +113,31 @@ func (fr *FirewallRules) Shutdown() error {
|
||||||
func (fr *FirewallRules) GetFirewall(name string) (*compute.Firewall, error) {
|
func (fr *FirewallRules) GetFirewall(name string) (*compute.Firewall, error) {
|
||||||
return fr.cloud.GetFirewall(name)
|
return fr.cloud.GetFirewall(name)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (fr *FirewallRules) createFirewallObject(firewallName, description string, nodePorts []int64, nodeNames []string) (*compute.Firewall, error) {
|
||||||
|
ports := make([]string, len(nodePorts))
|
||||||
|
for ix := range nodePorts {
|
||||||
|
ports[ix] = strconv.Itoa(int(nodePorts[ix]))
|
||||||
|
}
|
||||||
|
|
||||||
|
// If the node tags to be used for this cluster have been predefined in the
|
||||||
|
// provider config, just use them. Otherwise, invoke computeHostTags method to get the tags.
|
||||||
|
targetTags, err := fr.cloud.GetNodeTags(nodeNames)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return &compute.Firewall{
|
||||||
|
Name: firewallName,
|
||||||
|
Description: description,
|
||||||
|
SourceRanges: fr.srcRanges,
|
||||||
|
Network: fr.cloud.NetworkURL(),
|
||||||
|
Allowed: []*compute.FirewallAllowed{
|
||||||
|
{
|
||||||
|
IPProtocol: "tcp",
|
||||||
|
Ports: ports,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
TargetTags: targetTags,
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
|
@ -22,14 +22,11 @@ import (
|
||||||
|
|
||||||
"k8s.io/apimachinery/pkg/util/sets"
|
"k8s.io/apimachinery/pkg/util/sets"
|
||||||
"k8s.io/ingress/controllers/gce/utils"
|
"k8s.io/ingress/controllers/gce/utils"
|
||||||
netset "k8s.io/kubernetes/pkg/util/net/sets"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
const allCIDR = "0.0.0.0/0"
|
|
||||||
|
|
||||||
func TestSyncFirewallPool(t *testing.T) {
|
func TestSyncFirewallPool(t *testing.T) {
|
||||||
namer := utils.NewNamer("ABC", "XYZ")
|
namer := utils.NewNamer("ABC", "XYZ")
|
||||||
fwp := NewFakeFirewallsProvider(namer)
|
fwp := NewFakeFirewallsProvider()
|
||||||
fp := NewFirewallPool(fwp, namer)
|
fp := NewFirewallPool(fwp, namer)
|
||||||
ruleName := namer.FrName(namer.FrSuffix())
|
ruleName := namer.FrName(namer.FrSuffix())
|
||||||
|
|
||||||
|
@ -50,12 +47,16 @@ func TestSyncFirewallPool(t *testing.T) {
|
||||||
}
|
}
|
||||||
verifyFirewallRule(fwp, ruleName, nodePorts, nodes, l7SrcRanges, t)
|
verifyFirewallRule(fwp, ruleName, nodePorts, nodes, l7SrcRanges, t)
|
||||||
|
|
||||||
srcRanges, _ := netset.ParseIPNets(allCIDR)
|
firewall, err := fp.(*FirewallRules).createFirewallObject(namer.FrName(namer.FrSuffix()), "", nodePorts, nodes)
|
||||||
err = fwp.UpdateFirewall(namer.FrSuffix(), "", srcRanges, nodePorts, nodes)
|
if err != nil {
|
||||||
|
t.Errorf("unexpected err when creating firewall object, err: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
err = fwp.UpdateFirewall(firewall)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Errorf("failed to update firewall rule, err: %v", err)
|
t.Errorf("failed to update firewall rule, err: %v", err)
|
||||||
}
|
}
|
||||||
verifyFirewallRule(fwp, ruleName, nodePorts, nodes, []string{allCIDR}, t)
|
verifyFirewallRule(fwp, ruleName, nodePorts, nodes, l7SrcRanges, t)
|
||||||
|
|
||||||
// Run Sync and expect l7 src ranges to be returned
|
// Run Sync and expect l7 src ranges to be returned
|
||||||
err = fp.Sync(nodePorts, nodes)
|
err = fp.Sync(nodePorts, nodes)
|
||||||
|
|
|
@ -18,7 +18,6 @@ package firewalls
|
||||||
|
|
||||||
import (
|
import (
|
||||||
compute "google.golang.org/api/compute/v1"
|
compute "google.golang.org/api/compute/v1"
|
||||||
netset "k8s.io/kubernetes/pkg/util/net/sets"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// SingleFirewallPool syncs the firewall rule for L7 traffic.
|
// SingleFirewallPool syncs the firewall rule for L7 traffic.
|
||||||
|
@ -32,8 +31,10 @@ type SingleFirewallPool interface {
|
||||||
// This interface is a little different from the rest because it dovetails into
|
// This interface is a little different from the rest because it dovetails into
|
||||||
// the same firewall methods used by the TCPLoadBalancer.
|
// the same firewall methods used by the TCPLoadBalancer.
|
||||||
type Firewall interface {
|
type Firewall interface {
|
||||||
CreateFirewall(name, msgTag string, srcRange netset.IPNet, ports []int64, hosts []string) error
|
CreateFirewall(f *compute.Firewall) error
|
||||||
GetFirewall(name string) (*compute.Firewall, error)
|
GetFirewall(name string) (*compute.Firewall, error)
|
||||||
DeleteFirewall(name string) error
|
DeleteFirewall(name string) error
|
||||||
UpdateFirewall(name, msgTag string, srcRange netset.IPNet, ports []int64, hosts []string) error
|
UpdateFirewall(f *compute.Firewall) error
|
||||||
|
GetNodeTags(nodeNames []string) ([]string, error)
|
||||||
|
NetworkURL() string
|
||||||
}
|
}
|
||||||
|
|
|
@ -18,6 +18,7 @@ package instances
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"strings"
|
||||||
|
|
||||||
compute "google.golang.org/api/compute/v1"
|
compute "google.golang.org/api/compute/v1"
|
||||||
"k8s.io/apimachinery/pkg/util/sets"
|
"k8s.io/apimachinery/pkg/util/sets"
|
||||||
|
@ -110,7 +111,8 @@ func (f *FakeInstanceGroups) ListInstancesInInstanceGroup(name, zone string, sta
|
||||||
}
|
}
|
||||||
|
|
||||||
// AddInstancesToInstanceGroup fakes adding instances to an instance group.
|
// AddInstancesToInstanceGroup fakes adding instances to an instance group.
|
||||||
func (f *FakeInstanceGroups) AddInstancesToInstanceGroup(name, zone string, instanceNames []string) error {
|
func (f *FakeInstanceGroups) AddInstancesToInstanceGroup(name, zone string, instanceRefs []*compute.InstanceReference) error {
|
||||||
|
instanceNames := toInstanceNames(instanceRefs)
|
||||||
f.calls = append(f.calls, utils.AddInstances)
|
f.calls = append(f.calls, utils.AddInstances)
|
||||||
f.instances.Insert(instanceNames...)
|
f.instances.Insert(instanceNames...)
|
||||||
if _, ok := f.zonesToInstances[zone]; !ok {
|
if _, ok := f.zonesToInstances[zone]; !ok {
|
||||||
|
@ -126,7 +128,8 @@ func (f *FakeInstanceGroups) GetInstancesByZone() map[string][]string {
|
||||||
}
|
}
|
||||||
|
|
||||||
// RemoveInstancesFromInstanceGroup fakes removing instances from an instance group.
|
// RemoveInstancesFromInstanceGroup fakes removing instances from an instance group.
|
||||||
func (f *FakeInstanceGroups) RemoveInstancesFromInstanceGroup(name, zone string, instanceNames []string) error {
|
func (f *FakeInstanceGroups) RemoveInstancesFromInstanceGroup(name, zone string, instanceRefs []*compute.InstanceReference) error {
|
||||||
|
instanceNames := toInstanceNames(instanceRefs)
|
||||||
f.calls = append(f.calls, utils.RemoveInstances)
|
f.calls = append(f.calls, utils.RemoveInstances)
|
||||||
f.instances.Delete(instanceNames...)
|
f.instances.Delete(instanceNames...)
|
||||||
l, ok := f.zonesToInstances[zone]
|
l, ok := f.zonesToInstances[zone]
|
||||||
|
@ -145,10 +148,23 @@ func (f *FakeInstanceGroups) RemoveInstancesFromInstanceGroup(name, zone string,
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// AddPortToInstanceGroup fakes adding ports to an Instance Group.
|
func (f *FakeInstanceGroups) SetNamedPortsOfInstanceGroup(igName, zone string, namedPorts []*compute.NamedPort) error {
|
||||||
func (f *FakeInstanceGroups) AddPortToInstanceGroup(ig *compute.InstanceGroup, port int64) (*compute.NamedPort, error) {
|
found := false
|
||||||
f.Ports = append(f.Ports, port)
|
for _, ig := range f.instanceGroups {
|
||||||
return &compute.NamedPort{Name: f.namer.BeName(port), Port: port}, nil
|
if ig.Name == igName && ig.Zone == zone {
|
||||||
|
found = true
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if !found {
|
||||||
|
return fmt.Errorf("Failed to find instance group %q in zone %q", igName, zone)
|
||||||
|
}
|
||||||
|
|
||||||
|
f.Ports = f.Ports[:0]
|
||||||
|
for _, port := range namedPorts {
|
||||||
|
f.Ports = append(f.Ports, port.Port)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// getInstanceList returns an instance list based on the given names.
|
// getInstanceList returns an instance list based on the given names.
|
||||||
|
@ -157,9 +173,7 @@ func getInstanceList(nodeNames sets.String) *compute.InstanceGroupsListInstances
|
||||||
instanceNames := nodeNames.List()
|
instanceNames := nodeNames.List()
|
||||||
computeInstances := []*compute.InstanceWithNamedPorts{}
|
computeInstances := []*compute.InstanceWithNamedPorts{}
|
||||||
for _, name := range instanceNames {
|
for _, name := range instanceNames {
|
||||||
instanceLink := fmt.Sprintf(
|
instanceLink := getInstanceUrl(name)
|
||||||
"https://www.googleapis.com/compute/v1/projects/%s/zones/%s/instances/%s",
|
|
||||||
"project", "zone", name)
|
|
||||||
computeInstances = append(
|
computeInstances = append(
|
||||||
computeInstances, &compute.InstanceWithNamedPorts{
|
computeInstances, &compute.InstanceWithNamedPorts{
|
||||||
Instance: instanceLink})
|
Instance: instanceLink})
|
||||||
|
@ -168,3 +182,26 @@ func getInstanceList(nodeNames sets.String) *compute.InstanceGroupsListInstances
|
||||||
Items: computeInstances,
|
Items: computeInstances,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (f *FakeInstanceGroups) ToInstanceReferences(zone string, instanceNames []string) (refs []*compute.InstanceReference) {
|
||||||
|
for _, ins := range instanceNames {
|
||||||
|
instanceLink := getInstanceUrl(ins)
|
||||||
|
refs = append(refs, &compute.InstanceReference{Instance: instanceLink})
|
||||||
|
}
|
||||||
|
return refs
|
||||||
|
}
|
||||||
|
|
||||||
|
func getInstanceUrl(instanceName string) string {
|
||||||
|
return fmt.Sprintf("https://www.googleapis.com/compute/v1/projects/%s/zones/%s/instances/%s",
|
||||||
|
"project", "zone", instanceName)
|
||||||
|
}
|
||||||
|
|
||||||
|
func toInstanceNames(instanceRefs []*compute.InstanceReference) []string {
|
||||||
|
instanceNames := make([]string, len(instanceRefs))
|
||||||
|
for ix := range instanceRefs {
|
||||||
|
url := instanceRefs[ix].Instance
|
||||||
|
parts := strings.Split(url, "/")
|
||||||
|
instanceNames[ix] = parts[len(parts)-1]
|
||||||
|
}
|
||||||
|
return instanceNames
|
||||||
|
}
|
||||||
|
|
|
@ -63,13 +63,15 @@ func (i *Instances) Init(zl zoneLister) {
|
||||||
// all of which have the exact same named port.
|
// all of which have the exact same named port.
|
||||||
func (i *Instances) AddInstanceGroup(name string, port int64) ([]*compute.InstanceGroup, *compute.NamedPort, error) {
|
func (i *Instances) AddInstanceGroup(name string, port int64) ([]*compute.InstanceGroup, *compute.NamedPort, error) {
|
||||||
igs := []*compute.InstanceGroup{}
|
igs := []*compute.InstanceGroup{}
|
||||||
namedPort := &compute.NamedPort{}
|
// TODO: move port naming to namer
|
||||||
|
namedPort := &compute.NamedPort{Name: fmt.Sprintf("port%v", port), Port: port}
|
||||||
|
|
||||||
zones, err := i.ListZones()
|
zones, err := i.ListZones()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return igs, namedPort, err
|
return igs, namedPort, err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
defer i.snapshotter.Add(name, struct{}{})
|
||||||
for _, zone := range zones {
|
for _, zone := range zones {
|
||||||
ig, _ := i.Get(name, zone)
|
ig, _ := i.Get(name, zone)
|
||||||
var err error
|
var err error
|
||||||
|
@ -82,10 +84,19 @@ func (i *Instances) AddInstanceGroup(name string, port int64) ([]*compute.Instan
|
||||||
} else {
|
} else {
|
||||||
glog.V(3).Infof("Instance group %v already exists in zone %v, adding port %d to it", name, zone, port)
|
glog.V(3).Infof("Instance group %v already exists in zone %v, adding port %d to it", name, zone, port)
|
||||||
}
|
}
|
||||||
defer i.snapshotter.Add(name, struct{}{})
|
|
||||||
namedPort, err = i.cloud.AddPortToInstanceGroup(ig, port)
|
found := false
|
||||||
if err != nil {
|
for _, np := range ig.NamedPorts {
|
||||||
return nil, nil, err
|
if np.Port == port {
|
||||||
|
glog.V(3).Infof("Instance group %v already has named port %+v", ig.Name, np)
|
||||||
|
found = true
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if !found {
|
||||||
|
if err := i.cloud.SetNamedPortsOfInstanceGroup(ig.Name, zone, append(ig.NamedPorts, namedPort)); err != nil {
|
||||||
|
return nil, nil, err
|
||||||
|
}
|
||||||
}
|
}
|
||||||
igs = append(igs, ig)
|
igs = append(igs, ig)
|
||||||
}
|
}
|
||||||
|
@ -173,7 +184,7 @@ func (i *Instances) Add(groupName string, names []string) error {
|
||||||
errs := []error{}
|
errs := []error{}
|
||||||
for zone, nodeNames := range i.splitNodesByZone(names) {
|
for zone, nodeNames := range i.splitNodesByZone(names) {
|
||||||
glog.V(1).Infof("Adding nodes %v to %v in zone %v", nodeNames, groupName, zone)
|
glog.V(1).Infof("Adding nodes %v to %v in zone %v", nodeNames, groupName, zone)
|
||||||
if err := i.cloud.AddInstancesToInstanceGroup(groupName, zone, nodeNames); err != nil {
|
if err := i.cloud.AddInstancesToInstanceGroup(groupName, zone, i.cloud.ToInstanceReferences(zone, nodeNames)); err != nil {
|
||||||
errs = append(errs, err)
|
errs = append(errs, err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -187,8 +198,8 @@ func (i *Instances) Add(groupName string, names []string) error {
|
||||||
func (i *Instances) Remove(groupName string, names []string) error {
|
func (i *Instances) Remove(groupName string, names []string) error {
|
||||||
errs := []error{}
|
errs := []error{}
|
||||||
for zone, nodeNames := range i.splitNodesByZone(names) {
|
for zone, nodeNames := range i.splitNodesByZone(names) {
|
||||||
glog.V(1).Infof("Adding nodes %v to %v in zone %v", nodeNames, groupName, zone)
|
glog.V(1).Infof("Removing nodes %v from %v in zone %v", nodeNames, groupName, zone)
|
||||||
if err := i.cloud.RemoveInstancesFromInstanceGroup(groupName, zone, nodeNames); err != nil {
|
if err := i.cloud.RemoveInstancesFromInstanceGroup(groupName, zone, i.cloud.ToInstanceReferences(zone, nodeNames)); err != nil {
|
||||||
errs = append(errs, err)
|
errs = append(errs, err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -50,7 +50,8 @@ type InstanceGroups interface {
|
||||||
|
|
||||||
// TODO: Refactor for modulatiry.
|
// TODO: Refactor for modulatiry.
|
||||||
ListInstancesInInstanceGroup(name, zone string, state string) (*compute.InstanceGroupsListInstances, error)
|
ListInstancesInInstanceGroup(name, zone string, state string) (*compute.InstanceGroupsListInstances, error)
|
||||||
AddInstancesToInstanceGroup(name, zone string, instanceNames []string) error
|
AddInstancesToInstanceGroup(name, zone string, instanceRefs []*compute.InstanceReference) error
|
||||||
RemoveInstancesFromInstanceGroup(name, zone string, instanceName []string) error
|
RemoveInstancesFromInstanceGroup(name, zone string, instanceRefs []*compute.InstanceReference) error
|
||||||
AddPortToInstanceGroup(ig *compute.InstanceGroup, port int64) (*compute.NamedPort, error)
|
ToInstanceReferences(zone string, instanceNames []string) (refs []*compute.InstanceReference)
|
||||||
|
SetNamedPortsOfInstanceGroup(igName, zone string, namedPorts []*compute.NamedPort) error
|
||||||
}
|
}
|
||||||
|
|
|
@ -132,10 +132,10 @@ func (f *FakeLoadBalancers) CreateGlobalForwardingRule(proxyLink, ip, name, port
|
||||||
}
|
}
|
||||||
|
|
||||||
// SetProxyForGlobalForwardingRule fakes setting a global forwarding rule.
|
// SetProxyForGlobalForwardingRule fakes setting a global forwarding rule.
|
||||||
func (f *FakeLoadBalancers) SetProxyForGlobalForwardingRule(fw *compute.ForwardingRule, proxyLink string) error {
|
func (f *FakeLoadBalancers) SetProxyForGlobalForwardingRule(forwardingRuleName, proxyLink string) error {
|
||||||
f.calls = append(f.calls, "SetProxyForGlobalForwardingRule")
|
f.calls = append(f.calls, "SetProxyForGlobalForwardingRule")
|
||||||
for i := range f.Fw {
|
for i := range f.Fw {
|
||||||
if f.Fw[i].Name == fw.Name {
|
if f.Fw[i].Name == forwardingRuleName {
|
||||||
f.Fw[i].Target = proxyLink
|
f.Fw[i].Target = proxyLink
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -397,20 +397,16 @@ func (f *FakeLoadBalancers) CheckURLMap(t *testing.T, l7 *L7, expectedMap map[st
|
||||||
|
|
||||||
// Static IP fakes
|
// Static IP fakes
|
||||||
|
|
||||||
// ReserveGlobalStaticIP fakes out static IP reservation.
|
// ReserveGlobalAddress fakes out static IP reservation.
|
||||||
func (f *FakeLoadBalancers) ReserveGlobalStaticIP(name, IPAddress string) (*compute.Address, error) {
|
func (f *FakeLoadBalancers) ReserveGlobalAddress(addr *compute.Address) error {
|
||||||
f.calls = append(f.calls, "ReserveGlobalStaticIP")
|
f.calls = append(f.calls, "ReserveGlobalAddress")
|
||||||
ip := &compute.Address{
|
f.IP = append(f.IP, addr)
|
||||||
Name: name,
|
return nil
|
||||||
Address: IPAddress,
|
|
||||||
}
|
|
||||||
f.IP = append(f.IP, ip)
|
|
||||||
return ip, nil
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetGlobalStaticIP fakes out static IP retrieval.
|
// GetGlobalAddress fakes out static IP retrieval.
|
||||||
func (f *FakeLoadBalancers) GetGlobalStaticIP(name string) (*compute.Address, error) {
|
func (f *FakeLoadBalancers) GetGlobalAddress(name string) (*compute.Address, error) {
|
||||||
f.calls = append(f.calls, "GetGlobalStaticIP")
|
f.calls = append(f.calls, "GetGlobalAddress")
|
||||||
for i := range f.IP {
|
for i := range f.IP {
|
||||||
if f.IP[i].Name == name {
|
if f.IP[i].Name == name {
|
||||||
return f.IP[i], nil
|
return f.IP[i], nil
|
||||||
|
@ -419,9 +415,9 @@ func (f *FakeLoadBalancers) GetGlobalStaticIP(name string) (*compute.Address, er
|
||||||
return nil, fmt.Errorf("static IP %v not found", name)
|
return nil, fmt.Errorf("static IP %v not found", name)
|
||||||
}
|
}
|
||||||
|
|
||||||
// DeleteGlobalStaticIP fakes out static IP deletion.
|
// DeleteGlobalAddress fakes out static IP deletion.
|
||||||
func (f *FakeLoadBalancers) DeleteGlobalStaticIP(name string) error {
|
func (f *FakeLoadBalancers) DeleteGlobalAddress(name string) error {
|
||||||
f.calls = append(f.calls, "DeleteGlobalStaticIP")
|
f.calls = append(f.calls, "DeleteGlobalAddress")
|
||||||
ip := []*compute.Address{}
|
ip := []*compute.Address{}
|
||||||
for i := range f.IP {
|
for i := range f.IP {
|
||||||
if f.IP[i].Name != name {
|
if f.IP[i].Name != name {
|
||||||
|
|
|
@ -30,7 +30,7 @@ type LoadBalancers interface {
|
||||||
GetGlobalForwardingRule(name string) (*compute.ForwardingRule, error)
|
GetGlobalForwardingRule(name string) (*compute.ForwardingRule, error)
|
||||||
CreateGlobalForwardingRule(proxyLink, ip, name, portRange string) (*compute.ForwardingRule, error)
|
CreateGlobalForwardingRule(proxyLink, ip, name, portRange string) (*compute.ForwardingRule, error)
|
||||||
DeleteGlobalForwardingRule(name string) error
|
DeleteGlobalForwardingRule(name string) error
|
||||||
SetProxyForGlobalForwardingRule(fw *compute.ForwardingRule, proxy string) error
|
SetProxyForGlobalForwardingRule(fw, proxy string) error
|
||||||
|
|
||||||
// UrlMaps
|
// UrlMaps
|
||||||
GetUrlMap(name string) (*compute.UrlMap, error)
|
GetUrlMap(name string) (*compute.UrlMap, error)
|
||||||
|
@ -57,9 +57,10 @@ type LoadBalancers interface {
|
||||||
DeleteSslCertificate(name string) error
|
DeleteSslCertificate(name string) error
|
||||||
|
|
||||||
// Static IP
|
// Static IP
|
||||||
ReserveGlobalStaticIP(name, IPAddress string) (*compute.Address, error)
|
|
||||||
GetGlobalStaticIP(name string) (*compute.Address, error)
|
ReserveGlobalAddress(addr *compute.Address) error
|
||||||
DeleteGlobalStaticIP(name string) error
|
GetGlobalAddress(name string) (*compute.Address, error)
|
||||||
|
DeleteGlobalAddress(name string) error
|
||||||
}
|
}
|
||||||
|
|
||||||
// LoadBalancerPool is an interface to manage the cloud resources associated
|
// LoadBalancerPool is an interface to manage the cloud resources associated
|
||||||
|
|
|
@ -544,7 +544,7 @@ func (l *L7) checkForwardingRule(name, proxyLink, ip, portRange string) (fw *com
|
||||||
} else {
|
} else {
|
||||||
glog.Infof("Forwarding rule %v has the wrong proxy, setting %v overwriting %v",
|
glog.Infof("Forwarding rule %v has the wrong proxy, setting %v overwriting %v",
|
||||||
fw.Name, fw.Target, proxyLink)
|
fw.Name, fw.Target, proxyLink)
|
||||||
if err := l.cloud.SetProxyForGlobalForwardingRule(fw, proxyLink); err != nil {
|
if err := l.cloud.SetProxyForGlobalForwardingRule(fw.Name, proxyLink); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -576,7 +576,7 @@ func (l *L7) getEffectiveIP() (string, bool) {
|
||||||
if l.runtimeInfo.StaticIPName != "" {
|
if l.runtimeInfo.StaticIPName != "" {
|
||||||
// Existing static IPs allocated to forwarding rules will get orphaned
|
// Existing static IPs allocated to forwarding rules will get orphaned
|
||||||
// till the Ingress is torn down.
|
// till the Ingress is torn down.
|
||||||
if ip, err := l.cloud.GetGlobalStaticIP(l.runtimeInfo.StaticIPName); err != nil || ip == nil {
|
if ip, err := l.cloud.GetGlobalAddress(l.runtimeInfo.StaticIPName); err != nil || ip == nil {
|
||||||
glog.Warningf("The given static IP name %v doesn't translate to an existing global static IP, ignoring it and allocating a new IP: %v",
|
glog.Warningf("The given static IP name %v doesn't translate to an existing global static IP, ignoring it and allocating a new IP: %v",
|
||||||
l.runtimeInfo.StaticIPName, err)
|
l.runtimeInfo.StaticIPName, err)
|
||||||
} else {
|
} else {
|
||||||
|
@ -629,10 +629,10 @@ func (l *L7) checkStaticIP() (err error) {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
staticIPName := l.namer.Truncate(fmt.Sprintf("%v-%v", forwardingRulePrefix, l.Name))
|
staticIPName := l.namer.Truncate(fmt.Sprintf("%v-%v", forwardingRulePrefix, l.Name))
|
||||||
ip, _ := l.cloud.GetGlobalStaticIP(staticIPName)
|
ip, _ := l.cloud.GetGlobalAddress(staticIPName)
|
||||||
if ip == nil {
|
if ip == nil {
|
||||||
glog.Infof("Creating static ip %v", staticIPName)
|
glog.Infof("Creating static ip %v", staticIPName)
|
||||||
ip, err = l.cloud.ReserveGlobalStaticIP(staticIPName, l.fw.IPAddress)
|
err = l.cloud.ReserveGlobalAddress(&compute.Address{Name: staticIPName, Address: l.fw.IPAddress})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if utils.IsHTTPErrorCode(err, http.StatusConflict) ||
|
if utils.IsHTTPErrorCode(err, http.StatusConflict) ||
|
||||||
utils.IsHTTPErrorCode(err, http.StatusBadRequest) {
|
utils.IsHTTPErrorCode(err, http.StatusBadRequest) {
|
||||||
|
@ -642,6 +642,10 @@ func (l *L7) checkStaticIP() (err error) {
|
||||||
}
|
}
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
ip, err = l.cloud.GetGlobalAddress(staticIPName)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
}
|
}
|
||||||
l.ip = ip
|
l.ip = ip
|
||||||
return nil
|
return nil
|
||||||
|
@ -903,7 +907,7 @@ func (l *L7) Cleanup() error {
|
||||||
}
|
}
|
||||||
if l.ip != nil {
|
if l.ip != nil {
|
||||||
glog.V(2).Infof("Deleting static IP %v(%v)", l.ip.Name, l.ip.Address)
|
glog.V(2).Infof("Deleting static IP %v(%v)", l.ip.Name, l.ip.Address)
|
||||||
if err := utils.IgnoreHTTPNotFound(l.cloud.DeleteGlobalStaticIP(l.ip.Name)); err != nil {
|
if err := utils.IgnoreHTTPNotFound(l.cloud.DeleteGlobalAddress(l.ip.Name)); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
l.ip = nil
|
l.ip = nil
|
||||||
|
|
|
@ -289,7 +289,7 @@ func TestCreateBothLoadBalancers(t *testing.T) {
|
||||||
if err != nil || fw.Target != tp.SelfLink {
|
if err != nil || fw.Target != tp.SelfLink {
|
||||||
t.Fatalf("%v", err)
|
t.Fatalf("%v", err)
|
||||||
}
|
}
|
||||||
ip, err := f.GetGlobalStaticIP(f.fwName(false))
|
ip, err := f.GetGlobalAddress(f.fwName(false))
|
||||||
if err != nil || ip.Address != fw.IPAddress || ip.Address != fws.IPAddress {
|
if err != nil || ip.Address != fw.IPAddress || ip.Address != fws.IPAddress {
|
||||||
t.Fatalf("%v", err)
|
t.Fatalf("%v", err)
|
||||||
}
|
}
|
||||||
|
|
|
@ -30,14 +30,13 @@ import (
|
||||||
|
|
||||||
"github.com/prometheus/client_golang/prometheus/promhttp"
|
"github.com/prometheus/client_golang/prometheus/promhttp"
|
||||||
flag "github.com/spf13/pflag"
|
flag "github.com/spf13/pflag"
|
||||||
meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
"k8s.io/api/core/v1"
|
||||||
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
"k8s.io/apimachinery/pkg/labels"
|
"k8s.io/apimachinery/pkg/labels"
|
||||||
"k8s.io/apimachinery/pkg/types"
|
"k8s.io/apimachinery/pkg/types"
|
||||||
"k8s.io/apimachinery/pkg/util/intstr"
|
"k8s.io/apimachinery/pkg/util/intstr"
|
||||||
"k8s.io/apimachinery/pkg/util/wait"
|
"k8s.io/apimachinery/pkg/util/wait"
|
||||||
"k8s.io/client-go/kubernetes"
|
"k8s.io/client-go/kubernetes"
|
||||||
"k8s.io/client-go/pkg/api"
|
|
||||||
api_v1 "k8s.io/client-go/pkg/api/v1"
|
|
||||||
"k8s.io/client-go/rest"
|
"k8s.io/client-go/rest"
|
||||||
"k8s.io/client-go/tools/clientcmd"
|
"k8s.io/client-go/tools/clientcmd"
|
||||||
clientcmdapi "k8s.io/client-go/tools/clientcmd/api"
|
clientcmdapi "k8s.io/client-go/tools/clientcmd/api"
|
||||||
|
@ -122,7 +121,7 @@ var (
|
||||||
`Path used to health-check a backend service. All Services must serve
|
`Path used to health-check a backend service. All Services must serve
|
||||||
a 200 page on this path. Currently this is only configurable globally.`)
|
a 200 page on this path. Currently this is only configurable globally.`)
|
||||||
|
|
||||||
watchNamespace = flags.String("watch-namespace", api.NamespaceAll,
|
watchNamespace = flags.String("watch-namespace", v1.NamespaceAll,
|
||||||
`Namespace to watch for Ingress/Services/Endpoints.`)
|
`Namespace to watch for Ingress/Services/Endpoints.`)
|
||||||
|
|
||||||
verbose = flags.Bool("verbose", false,
|
verbose = flags.Bool("verbose", false,
|
||||||
|
@ -287,7 +286,7 @@ func newNamer(kubeClient kubernetes.Interface, clusterName string, fwName string
|
||||||
}
|
}
|
||||||
|
|
||||||
namer := utils.NewNamer(name, fw_name)
|
namer := utils.NewNamer(name, fw_name)
|
||||||
uidVault := storage.NewConfigMapVault(kubeClient, api.NamespaceSystem, uidConfigMapName)
|
uidVault := storage.NewConfigMapVault(kubeClient, metav1.NamespaceSystem, uidConfigMapName)
|
||||||
|
|
||||||
// Start a goroutine to poll the cluster UID config map
|
// Start a goroutine to poll the cluster UID config map
|
||||||
// We don't watch because we know exactly which configmap we want and this
|
// We don't watch because we know exactly which configmap we want and this
|
||||||
|
@ -359,7 +358,7 @@ func useDefaultOrLookupVault(cfgVault *storage.ConfigMapVault, cm_key, default_n
|
||||||
// Use getFlagOrLookupVault to obtain a stored or overridden value for the firewall name.
|
// Use getFlagOrLookupVault to obtain a stored or overridden value for the firewall name.
|
||||||
// else, use the cluster UID as a backup (this retains backwards compatibility).
|
// else, use the cluster UID as a backup (this retains backwards compatibility).
|
||||||
func getFirewallName(kubeClient kubernetes.Interface, name, cluster_uid string) (string, error) {
|
func getFirewallName(kubeClient kubernetes.Interface, name, cluster_uid string) (string, error) {
|
||||||
cfgVault := storage.NewConfigMapVault(kubeClient, api.NamespaceSystem, uidConfigMapName)
|
cfgVault := storage.NewConfigMapVault(kubeClient, metav1.NamespaceSystem, uidConfigMapName)
|
||||||
if fw_name, err := useDefaultOrLookupVault(cfgVault, storage.ProviderDataKey, name); err != nil {
|
if fw_name, err := useDefaultOrLookupVault(cfgVault, storage.ProviderDataKey, name); err != nil {
|
||||||
return "", err
|
return "", err
|
||||||
} else if fw_name != "" {
|
} else if fw_name != "" {
|
||||||
|
@ -377,7 +376,7 @@ func getFirewallName(kubeClient kubernetes.Interface, name, cluster_uid string)
|
||||||
// - remember that "" is the cluster uid
|
// - remember that "" is the cluster uid
|
||||||
// else, allocate a new uid
|
// else, allocate a new uid
|
||||||
func getClusterUID(kubeClient kubernetes.Interface, name string) (string, error) {
|
func getClusterUID(kubeClient kubernetes.Interface, name string) (string, error) {
|
||||||
cfgVault := storage.NewConfigMapVault(kubeClient, api.NamespaceSystem, uidConfigMapName)
|
cfgVault := storage.NewConfigMapVault(kubeClient, metav1.NamespaceSystem, uidConfigMapName)
|
||||||
if name, err := useDefaultOrLookupVault(cfgVault, storage.UidDataKey, name); err != nil {
|
if name, err := useDefaultOrLookupVault(cfgVault, storage.UidDataKey, name); err != nil {
|
||||||
return "", err
|
return "", err
|
||||||
} else if name != "" {
|
} else if name != "" {
|
||||||
|
@ -385,7 +384,7 @@ func getClusterUID(kubeClient kubernetes.Interface, name string) (string, error)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Check if the cluster has an Ingress with ip
|
// Check if the cluster has an Ingress with ip
|
||||||
ings, err := kubeClient.Extensions().Ingresses(api.NamespaceAll).List(meta_v1.ListOptions{
|
ings, err := kubeClient.Extensions().Ingresses(metav1.NamespaceAll).List(metav1.ListOptions{
|
||||||
LabelSelector: labels.Everything().String(),
|
LabelSelector: labels.Everything().String(),
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -419,10 +418,10 @@ func getClusterUID(kubeClient kubernetes.Interface, name string) (string, error)
|
||||||
|
|
||||||
// getNodePort waits for the Service, and returns it's first node port.
|
// getNodePort waits for the Service, and returns it's first node port.
|
||||||
func getNodePort(client kubernetes.Interface, ns, name string) (port, nodePort int32, err error) {
|
func getNodePort(client kubernetes.Interface, ns, name string) (port, nodePort int32, err error) {
|
||||||
var svc *api_v1.Service
|
var svc *v1.Service
|
||||||
glog.V(3).Infof("Waiting for %v/%v", ns, name)
|
glog.V(3).Infof("Waiting for %v/%v", ns, name)
|
||||||
wait.Poll(1*time.Second, 5*time.Minute, func() (bool, error) {
|
wait.Poll(1*time.Second, 5*time.Minute, func() (bool, error) {
|
||||||
svc, err = client.Core().Services(ns).Get(name, meta_v1.GetOptions{})
|
svc, err = client.Core().Services(ns).Get(name, metav1.GetOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return false, nil
|
return false, nil
|
||||||
}
|
}
|
||||||
|
|
|
@ -23,10 +23,10 @@ import (
|
||||||
|
|
||||||
"github.com/golang/glog"
|
"github.com/golang/glog"
|
||||||
|
|
||||||
|
api_v1 "k8s.io/api/core/v1"
|
||||||
"k8s.io/apimachinery/pkg/api/errors"
|
"k8s.io/apimachinery/pkg/api/errors"
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
"k8s.io/client-go/kubernetes"
|
"k8s.io/client-go/kubernetes"
|
||||||
api_v1 "k8s.io/client-go/pkg/api/v1"
|
|
||||||
"k8s.io/client-go/tools/cache"
|
"k8s.io/client-go/tools/cache"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
|
@ -19,7 +19,7 @@ package storage
|
||||||
import (
|
import (
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"k8s.io/client-go/pkg/api"
|
api "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestConfigMapUID(t *testing.T) {
|
func TestConfigMapUID(t *testing.T) {
|
||||||
|
|
|
@ -34,7 +34,7 @@ import (
|
||||||
"github.com/spf13/pflag"
|
"github.com/spf13/pflag"
|
||||||
|
|
||||||
proxyproto "github.com/armon/go-proxyproto"
|
proxyproto "github.com/armon/go-proxyproto"
|
||||||
api_v1 "k8s.io/client-go/pkg/api/v1"
|
api_v1 "k8s.io/api/core/v1"
|
||||||
|
|
||||||
"k8s.io/ingress/controllers/nginx/pkg/config"
|
"k8s.io/ingress/controllers/nginx/pkg/config"
|
||||||
ngx_template "k8s.io/ingress/controllers/nginx/pkg/template"
|
ngx_template "k8s.io/ingress/controllers/nginx/pkg/template"
|
||||||
|
|
|
@ -335,39 +335,39 @@ func NewDefault() Configuration {
|
||||||
HTTP2MaxFieldSize: "4k",
|
HTTP2MaxFieldSize: "4k",
|
||||||
HTTP2MaxHeaderSize: "16k",
|
HTTP2MaxHeaderSize: "16k",
|
||||||
HSTS: true,
|
HSTS: true,
|
||||||
HSTSIncludeSubdomains: true,
|
HSTSIncludeSubdomains: true,
|
||||||
HSTSMaxAge: hstsMaxAge,
|
HSTSMaxAge: hstsMaxAge,
|
||||||
HSTSPreload: false,
|
HSTSPreload: false,
|
||||||
IgnoreInvalidHeaders: true,
|
IgnoreInvalidHeaders: true,
|
||||||
GzipTypes: gzipTypes,
|
GzipTypes: gzipTypes,
|
||||||
KeepAlive: 75,
|
KeepAlive: 75,
|
||||||
KeepAliveRequests: 100,
|
KeepAliveRequests: 100,
|
||||||
LargeClientHeaderBuffers: "4 8k",
|
LargeClientHeaderBuffers: "4 8k",
|
||||||
LogFormatEscapeJSON: false,
|
LogFormatEscapeJSON: false,
|
||||||
LogFormatStream: logFormatStream,
|
LogFormatStream: logFormatStream,
|
||||||
LogFormatUpstream: logFormatUpstream,
|
LogFormatUpstream: logFormatUpstream,
|
||||||
MaxWorkerConnections: 16384,
|
MaxWorkerConnections: 16384,
|
||||||
MapHashBucketSize: 64,
|
MapHashBucketSize: 64,
|
||||||
ProxyRealIPCIDR: defIPCIDR,
|
ProxyRealIPCIDR: defIPCIDR,
|
||||||
ServerNameHashMaxSize: 1024,
|
ServerNameHashMaxSize: 1024,
|
||||||
ProxyHeadersHashMaxSize: 512,
|
ProxyHeadersHashMaxSize: 512,
|
||||||
ProxyHeadersHashBucketSize: 64,
|
ProxyHeadersHashBucketSize: 64,
|
||||||
ShowServerTokens: true,
|
ShowServerTokens: true,
|
||||||
SSLBufferSize: sslBufferSize,
|
SSLBufferSize: sslBufferSize,
|
||||||
SSLCiphers: sslCiphers,
|
SSLCiphers: sslCiphers,
|
||||||
SSLECDHCurve: "secp384r1",
|
SSLECDHCurve: "secp384r1",
|
||||||
SSLProtocols: sslProtocols,
|
SSLProtocols: sslProtocols,
|
||||||
SSLSessionCache: true,
|
SSLSessionCache: true,
|
||||||
SSLSessionCacheSize: sslSessionCacheSize,
|
SSLSessionCacheSize: sslSessionCacheSize,
|
||||||
SSLSessionTickets: true,
|
SSLSessionTickets: true,
|
||||||
SSLSessionTimeout: sslSessionTimeout,
|
SSLSessionTimeout: sslSessionTimeout,
|
||||||
UseGzip: true,
|
UseGzip: true,
|
||||||
WorkerProcesses: strconv.Itoa(runtime.NumCPU()),
|
WorkerProcesses: strconv.Itoa(runtime.NumCPU()),
|
||||||
LoadBalanceAlgorithm: defaultLoadBalancerAlgorithm,
|
LoadBalanceAlgorithm: defaultLoadBalancerAlgorithm,
|
||||||
VtsStatusZoneSize: "10m",
|
VtsStatusZoneSize: "10m",
|
||||||
VariablesHashBucketSize: 64,
|
VariablesHashBucketSize: 64,
|
||||||
VariablesHashMaxSize: 2048,
|
VariablesHashMaxSize: 2048,
|
||||||
UseHTTP2: true,
|
UseHTTP2: true,
|
||||||
Backend: defaults.Backend{
|
Backend: defaults.Backend{
|
||||||
ProxyBodySize: bodySize,
|
ProxyBodySize: bodySize,
|
||||||
ProxyConnectTimeout: 5,
|
ProxyConnectTimeout: 5,
|
||||||
|
|
|
@ -24,8 +24,8 @@ import (
|
||||||
"regexp"
|
"regexp"
|
||||||
|
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
api "k8s.io/client-go/pkg/api/v1"
|
api "k8s.io/api/core/v1"
|
||||||
extensions "k8s.io/client-go/pkg/apis/extensions/v1beta1"
|
extensions "k8s.io/api/extensions/v1beta1"
|
||||||
|
|
||||||
"k8s.io/ingress/core/pkg/ingress/annotations/parser"
|
"k8s.io/ingress/core/pkg/ingress/annotations/parser"
|
||||||
ing_errors "k8s.io/ingress/core/pkg/ingress/errors"
|
ing_errors "k8s.io/ingress/core/pkg/ingress/errors"
|
||||||
|
|
|
@ -25,10 +25,10 @@ import (
|
||||||
|
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
|
|
||||||
|
api "k8s.io/api/core/v1"
|
||||||
|
extensions "k8s.io/api/extensions/v1beta1"
|
||||||
meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
"k8s.io/apimachinery/pkg/util/intstr"
|
"k8s.io/apimachinery/pkg/util/intstr"
|
||||||
api "k8s.io/client-go/pkg/api/v1"
|
|
||||||
extensions "k8s.io/client-go/pkg/apis/extensions/v1beta1"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
func buildIngress() *extensions.Ingress {
|
func buildIngress() *extensions.Ingress {
|
||||||
|
|
|
@ -21,7 +21,7 @@ import (
|
||||||
"regexp"
|
"regexp"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
extensions "k8s.io/client-go/pkg/apis/extensions/v1beta1"
|
extensions "k8s.io/api/extensions/v1beta1"
|
||||||
|
|
||||||
"k8s.io/ingress/core/pkg/ingress/annotations/parser"
|
"k8s.io/ingress/core/pkg/ingress/annotations/parser"
|
||||||
ing_errors "k8s.io/ingress/core/pkg/ingress/errors"
|
ing_errors "k8s.io/ingress/core/pkg/ingress/errors"
|
||||||
|
|
|
@ -21,9 +21,9 @@ import (
|
||||||
"reflect"
|
"reflect"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
|
api "k8s.io/api/core/v1"
|
||||||
|
extensions "k8s.io/api/extensions/v1beta1"
|
||||||
meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
api "k8s.io/client-go/pkg/api/v1"
|
|
||||||
extensions "k8s.io/client-go/pkg/apis/extensions/v1beta1"
|
|
||||||
|
|
||||||
"k8s.io/apimachinery/pkg/util/intstr"
|
"k8s.io/apimachinery/pkg/util/intstr"
|
||||||
)
|
)
|
||||||
|
|
|
@ -18,7 +18,7 @@ package authtls
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
extensions "k8s.io/client-go/pkg/apis/extensions/v1beta1"
|
extensions "k8s.io/api/extensions/v1beta1"
|
||||||
|
|
||||||
"k8s.io/ingress/core/pkg/ingress/annotations/parser"
|
"k8s.io/ingress/core/pkg/ingress/annotations/parser"
|
||||||
ing_errors "k8s.io/ingress/core/pkg/ingress/errors"
|
ing_errors "k8s.io/ingress/core/pkg/ingress/errors"
|
||||||
|
|
|
@ -19,10 +19,10 @@ package authtls
|
||||||
import (
|
import (
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
|
api "k8s.io/api/core/v1"
|
||||||
|
extensions "k8s.io/api/extensions/v1beta1"
|
||||||
meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
"k8s.io/apimachinery/pkg/util/intstr"
|
"k8s.io/apimachinery/pkg/util/intstr"
|
||||||
api "k8s.io/client-go/pkg/api/v1"
|
|
||||||
extensions "k8s.io/client-go/pkg/apis/extensions/v1beta1"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
func buildIngress() *extensions.Ingress {
|
func buildIngress() *extensions.Ingress {
|
||||||
|
|
|
@ -18,7 +18,7 @@ package class
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"github.com/golang/glog"
|
"github.com/golang/glog"
|
||||||
extensions "k8s.io/client-go/pkg/apis/extensions/v1beta1"
|
extensions "k8s.io/api/extensions/v1beta1"
|
||||||
|
|
||||||
"k8s.io/ingress/core/pkg/ingress/annotations/parser"
|
"k8s.io/ingress/core/pkg/ingress/annotations/parser"
|
||||||
"k8s.io/ingress/core/pkg/ingress/errors"
|
"k8s.io/ingress/core/pkg/ingress/errors"
|
||||||
|
|
|
@ -19,9 +19,9 @@ package class
|
||||||
import (
|
import (
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
|
api "k8s.io/api/core/v1"
|
||||||
|
extensions "k8s.io/api/extensions/v1beta1"
|
||||||
meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
api "k8s.io/client-go/pkg/api/v1"
|
|
||||||
extensions "k8s.io/client-go/pkg/apis/extensions/v1beta1"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestIsValidClass(t *testing.T) {
|
func TestIsValidClass(t *testing.T) {
|
||||||
|
|
|
@ -17,7 +17,7 @@ limitations under the License.
|
||||||
package cors
|
package cors
|
||||||
|
|
||||||
import (
|
import (
|
||||||
extensions "k8s.io/client-go/pkg/apis/extensions/v1beta1"
|
extensions "k8s.io/api/extensions/v1beta1"
|
||||||
|
|
||||||
"k8s.io/ingress/core/pkg/ingress/annotations/parser"
|
"k8s.io/ingress/core/pkg/ingress/annotations/parser"
|
||||||
)
|
)
|
||||||
|
|
|
@ -19,9 +19,9 @@ package cors
|
||||||
import (
|
import (
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
|
api "k8s.io/api/core/v1"
|
||||||
|
extensions "k8s.io/api/extensions/v1beta1"
|
||||||
meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
api "k8s.io/client-go/pkg/api/v1"
|
|
||||||
extensions "k8s.io/client-go/pkg/apis/extensions/v1beta1"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
|
|
|
@ -17,7 +17,7 @@ limitations under the License.
|
||||||
package healthcheck
|
package healthcheck
|
||||||
|
|
||||||
import (
|
import (
|
||||||
extensions "k8s.io/client-go/pkg/apis/extensions/v1beta1"
|
extensions "k8s.io/api/extensions/v1beta1"
|
||||||
|
|
||||||
"k8s.io/ingress/core/pkg/ingress/annotations/parser"
|
"k8s.io/ingress/core/pkg/ingress/annotations/parser"
|
||||||
"k8s.io/ingress/core/pkg/ingress/resolver"
|
"k8s.io/ingress/core/pkg/ingress/resolver"
|
||||||
|
|
|
@ -19,10 +19,10 @@ package healthcheck
|
||||||
import (
|
import (
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
|
api "k8s.io/api/core/v1"
|
||||||
|
extensions "k8s.io/api/extensions/v1beta1"
|
||||||
meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
"k8s.io/apimachinery/pkg/util/intstr"
|
"k8s.io/apimachinery/pkg/util/intstr"
|
||||||
api "k8s.io/client-go/pkg/api/v1"
|
|
||||||
extensions "k8s.io/client-go/pkg/apis/extensions/v1beta1"
|
|
||||||
|
|
||||||
"k8s.io/ingress/core/pkg/ingress/defaults"
|
"k8s.io/ingress/core/pkg/ingress/defaults"
|
||||||
)
|
)
|
||||||
|
|
|
@ -22,7 +22,7 @@ import (
|
||||||
|
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
|
|
||||||
extensions "k8s.io/client-go/pkg/apis/extensions/v1beta1"
|
extensions "k8s.io/api/extensions/v1beta1"
|
||||||
"k8s.io/kubernetes/pkg/util/net/sets"
|
"k8s.io/kubernetes/pkg/util/net/sets"
|
||||||
|
|
||||||
"k8s.io/ingress/core/pkg/ingress/annotations/parser"
|
"k8s.io/ingress/core/pkg/ingress/annotations/parser"
|
||||||
|
|
|
@ -20,10 +20,10 @@ import (
|
||||||
"reflect"
|
"reflect"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
|
api "k8s.io/api/core/v1"
|
||||||
|
extensions "k8s.io/api/extensions/v1beta1"
|
||||||
meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
"k8s.io/apimachinery/pkg/util/intstr"
|
"k8s.io/apimachinery/pkg/util/intstr"
|
||||||
api "k8s.io/client-go/pkg/api/v1"
|
|
||||||
extensions "k8s.io/client-go/pkg/apis/extensions/v1beta1"
|
|
||||||
|
|
||||||
"k8s.io/ingress/core/pkg/ingress/defaults"
|
"k8s.io/ingress/core/pkg/ingress/defaults"
|
||||||
"k8s.io/ingress/core/pkg/ingress/errors"
|
"k8s.io/ingress/core/pkg/ingress/errors"
|
||||||
|
|
|
@ -19,7 +19,7 @@ package parser
|
||||||
import (
|
import (
|
||||||
"strconv"
|
"strconv"
|
||||||
|
|
||||||
extensions "k8s.io/client-go/pkg/apis/extensions/v1beta1"
|
extensions "k8s.io/api/extensions/v1beta1"
|
||||||
|
|
||||||
"k8s.io/ingress/core/pkg/ingress/errors"
|
"k8s.io/ingress/core/pkg/ingress/errors"
|
||||||
)
|
)
|
||||||
|
|
|
@ -19,9 +19,9 @@ package parser
|
||||||
import (
|
import (
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
|
api "k8s.io/api/core/v1"
|
||||||
|
extensions "k8s.io/api/extensions/v1beta1"
|
||||||
meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
api "k8s.io/client-go/pkg/api/v1"
|
|
||||||
extensions "k8s.io/client-go/pkg/apis/extensions/v1beta1"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
func buildIngress() *extensions.Ingress {
|
func buildIngress() *extensions.Ingress {
|
||||||
|
|
|
@ -17,7 +17,7 @@ limitations under the License.
|
||||||
package portinredirect
|
package portinredirect
|
||||||
|
|
||||||
import (
|
import (
|
||||||
extensions "k8s.io/client-go/pkg/apis/extensions/v1beta1"
|
extensions "k8s.io/api/extensions/v1beta1"
|
||||||
|
|
||||||
"k8s.io/ingress/core/pkg/ingress/annotations/parser"
|
"k8s.io/ingress/core/pkg/ingress/annotations/parser"
|
||||||
"k8s.io/ingress/core/pkg/ingress/resolver"
|
"k8s.io/ingress/core/pkg/ingress/resolver"
|
||||||
|
|
|
@ -19,10 +19,10 @@ package portinredirect
|
||||||
import (
|
import (
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
|
api "k8s.io/api/core/v1"
|
||||||
|
extensions "k8s.io/api/extensions/v1beta1"
|
||||||
meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
"k8s.io/apimachinery/pkg/util/intstr"
|
"k8s.io/apimachinery/pkg/util/intstr"
|
||||||
api "k8s.io/client-go/pkg/api/v1"
|
|
||||||
extensions "k8s.io/client-go/pkg/apis/extensions/v1beta1"
|
|
||||||
|
|
||||||
"fmt"
|
"fmt"
|
||||||
|
|
||||||
|
|
|
@ -17,7 +17,7 @@ limitations under the License.
|
||||||
package proxy
|
package proxy
|
||||||
|
|
||||||
import (
|
import (
|
||||||
extensions "k8s.io/client-go/pkg/apis/extensions/v1beta1"
|
extensions "k8s.io/api/extensions/v1beta1"
|
||||||
|
|
||||||
"k8s.io/ingress/core/pkg/ingress/annotations/parser"
|
"k8s.io/ingress/core/pkg/ingress/annotations/parser"
|
||||||
"k8s.io/ingress/core/pkg/ingress/resolver"
|
"k8s.io/ingress/core/pkg/ingress/resolver"
|
||||||
|
|
|
@ -19,10 +19,10 @@ package proxy
|
||||||
import (
|
import (
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
|
api "k8s.io/api/core/v1"
|
||||||
|
extensions "k8s.io/api/extensions/v1beta1"
|
||||||
meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
"k8s.io/apimachinery/pkg/util/intstr"
|
"k8s.io/apimachinery/pkg/util/intstr"
|
||||||
api "k8s.io/client-go/pkg/api/v1"
|
|
||||||
extensions "k8s.io/client-go/pkg/apis/extensions/v1beta1"
|
|
||||||
|
|
||||||
"k8s.io/ingress/core/pkg/ingress/defaults"
|
"k8s.io/ingress/core/pkg/ingress/defaults"
|
||||||
)
|
)
|
||||||
|
|
|
@ -19,7 +19,7 @@ package ratelimit
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
|
|
||||||
extensions "k8s.io/client-go/pkg/apis/extensions/v1beta1"
|
extensions "k8s.io/api/extensions/v1beta1"
|
||||||
|
|
||||||
"k8s.io/ingress/core/pkg/ingress/annotations/parser"
|
"k8s.io/ingress/core/pkg/ingress/annotations/parser"
|
||||||
)
|
)
|
||||||
|
|
|
@ -19,9 +19,9 @@ package ratelimit
|
||||||
import (
|
import (
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
|
api "k8s.io/api/core/v1"
|
||||||
|
extensions "k8s.io/api/extensions/v1beta1"
|
||||||
meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
api "k8s.io/client-go/pkg/api/v1"
|
|
||||||
extensions "k8s.io/client-go/pkg/apis/extensions/v1beta1"
|
|
||||||
|
|
||||||
"k8s.io/apimachinery/pkg/util/intstr"
|
"k8s.io/apimachinery/pkg/util/intstr"
|
||||||
)
|
)
|
||||||
|
|
|
@ -17,7 +17,7 @@ limitations under the License.
|
||||||
package rewrite
|
package rewrite
|
||||||
|
|
||||||
import (
|
import (
|
||||||
extensions "k8s.io/client-go/pkg/apis/extensions/v1beta1"
|
extensions "k8s.io/api/extensions/v1beta1"
|
||||||
|
|
||||||
"k8s.io/ingress/core/pkg/ingress/annotations/parser"
|
"k8s.io/ingress/core/pkg/ingress/annotations/parser"
|
||||||
"k8s.io/ingress/core/pkg/ingress/resolver"
|
"k8s.io/ingress/core/pkg/ingress/resolver"
|
||||||
|
|
|
@ -19,10 +19,10 @@ package rewrite
|
||||||
import (
|
import (
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
|
api "k8s.io/api/core/v1"
|
||||||
|
extensions "k8s.io/api/extensions/v1beta1"
|
||||||
meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
"k8s.io/apimachinery/pkg/util/intstr"
|
"k8s.io/apimachinery/pkg/util/intstr"
|
||||||
api "k8s.io/client-go/pkg/api/v1"
|
|
||||||
extensions "k8s.io/client-go/pkg/apis/extensions/v1beta1"
|
|
||||||
|
|
||||||
"k8s.io/ingress/core/pkg/ingress/defaults"
|
"k8s.io/ingress/core/pkg/ingress/defaults"
|
||||||
)
|
)
|
||||||
|
|
|
@ -20,7 +20,7 @@ import (
|
||||||
"fmt"
|
"fmt"
|
||||||
|
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
extensions "k8s.io/client-go/pkg/apis/extensions/v1beta1"
|
extensions "k8s.io/api/extensions/v1beta1"
|
||||||
|
|
||||||
"k8s.io/ingress/core/pkg/ingress/annotations/parser"
|
"k8s.io/ingress/core/pkg/ingress/annotations/parser"
|
||||||
"k8s.io/ingress/core/pkg/ingress/resolver"
|
"k8s.io/ingress/core/pkg/ingress/resolver"
|
||||||
|
|
|
@ -19,9 +19,9 @@ package secureupstream
|
||||||
import (
|
import (
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
|
api "k8s.io/api/core/v1"
|
||||||
|
extensions "k8s.io/api/extensions/v1beta1"
|
||||||
meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
api "k8s.io/client-go/pkg/api/v1"
|
|
||||||
extensions "k8s.io/client-go/pkg/apis/extensions/v1beta1"
|
|
||||||
|
|
||||||
"fmt"
|
"fmt"
|
||||||
"k8s.io/apimachinery/pkg/util/intstr"
|
"k8s.io/apimachinery/pkg/util/intstr"
|
||||||
|
|
|
@ -17,7 +17,7 @@ limitations under the License.
|
||||||
package serviceupstream
|
package serviceupstream
|
||||||
|
|
||||||
import (
|
import (
|
||||||
extensions "k8s.io/client-go/pkg/apis/extensions/v1beta1"
|
extensions "k8s.io/api/extensions/v1beta1"
|
||||||
"k8s.io/ingress/core/pkg/ingress/annotations/parser"
|
"k8s.io/ingress/core/pkg/ingress/annotations/parser"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
|
@ -21,8 +21,8 @@ import (
|
||||||
|
|
||||||
meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
"k8s.io/apimachinery/pkg/util/intstr"
|
"k8s.io/apimachinery/pkg/util/intstr"
|
||||||
api "k8s.io/client-go/pkg/api/v1"
|
api "k8s.io/api/core/v1"
|
||||||
extensions "k8s.io/client-go/pkg/apis/extensions/v1beta1"
|
extensions "k8s.io/api/extensions/v1beta1"
|
||||||
)
|
)
|
||||||
|
|
||||||
func buildIngress() *extensions.Ingress {
|
func buildIngress() *extensions.Ingress {
|
||||||
|
|
|
@ -21,7 +21,7 @@ import (
|
||||||
|
|
||||||
"github.com/golang/glog"
|
"github.com/golang/glog"
|
||||||
|
|
||||||
extensions "k8s.io/client-go/pkg/apis/extensions/v1beta1"
|
extensions "k8s.io/api/extensions/v1beta1"
|
||||||
|
|
||||||
"k8s.io/ingress/core/pkg/ingress/annotations/parser"
|
"k8s.io/ingress/core/pkg/ingress/annotations/parser"
|
||||||
)
|
)
|
||||||
|
|
|
@ -19,10 +19,10 @@ package sessionaffinity
|
||||||
import (
|
import (
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
|
api "k8s.io/api/core/v1"
|
||||||
|
extensions "k8s.io/api/extensions/v1beta1"
|
||||||
meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
"k8s.io/apimachinery/pkg/util/intstr"
|
"k8s.io/apimachinery/pkg/util/intstr"
|
||||||
api "k8s.io/client-go/pkg/api/v1"
|
|
||||||
extensions "k8s.io/client-go/pkg/apis/extensions/v1beta1"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
func buildIngress() *extensions.Ingress {
|
func buildIngress() *extensions.Ingress {
|
||||||
|
|
|
@ -17,7 +17,7 @@ limitations under the License.
|
||||||
package snippet
|
package snippet
|
||||||
|
|
||||||
import (
|
import (
|
||||||
extensions "k8s.io/client-go/pkg/apis/extensions/v1beta1"
|
extensions "k8s.io/api/extensions/v1beta1"
|
||||||
|
|
||||||
"k8s.io/ingress/core/pkg/ingress/annotations/parser"
|
"k8s.io/ingress/core/pkg/ingress/annotations/parser"
|
||||||
)
|
)
|
||||||
|
|
|
@ -19,9 +19,9 @@ package snippet
|
||||||
import (
|
import (
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
|
api "k8s.io/api/core/v1"
|
||||||
|
extensions "k8s.io/api/extensions/v1beta1"
|
||||||
meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
api "k8s.io/client-go/pkg/api/v1"
|
|
||||||
extensions "k8s.io/client-go/pkg/apis/extensions/v1beta1"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestParse(t *testing.T) {
|
func TestParse(t *testing.T) {
|
||||||
|
|
|
@ -17,7 +17,7 @@ limitations under the License.
|
||||||
package sslpassthrough
|
package sslpassthrough
|
||||||
|
|
||||||
import (
|
import (
|
||||||
extensions "k8s.io/client-go/pkg/apis/extensions/v1beta1"
|
extensions "k8s.io/api/extensions/v1beta1"
|
||||||
|
|
||||||
"k8s.io/ingress/core/pkg/ingress/annotations/parser"
|
"k8s.io/ingress/core/pkg/ingress/annotations/parser"
|
||||||
ing_errors "k8s.io/ingress/core/pkg/ingress/errors"
|
ing_errors "k8s.io/ingress/core/pkg/ingress/errors"
|
||||||
|
|
|
@ -19,9 +19,9 @@ package sslpassthrough
|
||||||
import (
|
import (
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
|
api "k8s.io/api/core/v1"
|
||||||
|
extensions "k8s.io/api/extensions/v1beta1"
|
||||||
meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
api "k8s.io/client-go/pkg/api/v1"
|
|
||||||
extensions "k8s.io/client-go/pkg/apis/extensions/v1beta1"
|
|
||||||
|
|
||||||
"k8s.io/apimachinery/pkg/util/intstr"
|
"k8s.io/apimachinery/pkg/util/intstr"
|
||||||
)
|
)
|
||||||
|
|
|
@ -18,7 +18,7 @@ package controller
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"github.com/golang/glog"
|
"github.com/golang/glog"
|
||||||
extensions "k8s.io/client-go/pkg/apis/extensions/v1beta1"
|
extensions "k8s.io/api/extensions/v1beta1"
|
||||||
"k8s.io/ingress/core/pkg/ingress/annotations/auth"
|
"k8s.io/ingress/core/pkg/ingress/annotations/auth"
|
||||||
"k8s.io/ingress/core/pkg/ingress/annotations/authreq"
|
"k8s.io/ingress/core/pkg/ingress/annotations/authreq"
|
||||||
"k8s.io/ingress/core/pkg/ingress/annotations/authtls"
|
"k8s.io/ingress/core/pkg/ingress/annotations/authtls"
|
||||||
|
|
|
@ -19,10 +19,10 @@ package controller
|
||||||
import (
|
import (
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
|
api "k8s.io/api/core/v1"
|
||||||
|
extensions "k8s.io/api/extensions/v1beta1"
|
||||||
meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
"k8s.io/apimachinery/pkg/util/intstr"
|
"k8s.io/apimachinery/pkg/util/intstr"
|
||||||
api "k8s.io/client-go/pkg/api/v1"
|
|
||||||
extensions "k8s.io/client-go/pkg/apis/extensions/v1beta1"
|
|
||||||
|
|
||||||
"k8s.io/ingress/core/pkg/ingress/defaults"
|
"k8s.io/ingress/core/pkg/ingress/defaults"
|
||||||
"k8s.io/ingress/core/pkg/ingress/resolver"
|
"k8s.io/ingress/core/pkg/ingress/resolver"
|
||||||
|
|
|
@ -23,8 +23,8 @@ import (
|
||||||
|
|
||||||
"github.com/golang/glog"
|
"github.com/golang/glog"
|
||||||
|
|
||||||
api "k8s.io/client-go/pkg/api/v1"
|
api "k8s.io/api/core/v1"
|
||||||
extensions "k8s.io/client-go/pkg/apis/extensions/v1beta1"
|
extensions "k8s.io/api/extensions/v1beta1"
|
||||||
"k8s.io/client-go/tools/cache"
|
"k8s.io/client-go/tools/cache"
|
||||||
|
|
||||||
"k8s.io/ingress/core/pkg/ingress"
|
"k8s.io/ingress/core/pkg/ingress"
|
||||||
|
|
|
@ -23,9 +23,9 @@ import (
|
||||||
|
|
||||||
"fmt"
|
"fmt"
|
||||||
|
|
||||||
|
api_v1 "k8s.io/api/core/v1"
|
||||||
meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
testclient "k8s.io/client-go/kubernetes/fake"
|
testclient "k8s.io/client-go/kubernetes/fake"
|
||||||
api_v1 "k8s.io/client-go/pkg/api/v1"
|
|
||||||
cache_client "k8s.io/client-go/tools/cache"
|
cache_client "k8s.io/client-go/tools/cache"
|
||||||
"k8s.io/ingress/core/pkg/ingress"
|
"k8s.io/ingress/core/pkg/ingress"
|
||||||
"k8s.io/ingress/core/pkg/ingress/store"
|
"k8s.io/ingress/core/pkg/ingress/store"
|
||||||
|
|
|
@ -29,14 +29,14 @@ import (
|
||||||
|
|
||||||
"github.com/golang/glog"
|
"github.com/golang/glog"
|
||||||
|
|
||||||
|
api "k8s.io/api/core/v1"
|
||||||
|
extensions "k8s.io/api/extensions/v1beta1"
|
||||||
"k8s.io/apimachinery/pkg/fields"
|
"k8s.io/apimachinery/pkg/fields"
|
||||||
"k8s.io/apimachinery/pkg/util/intstr"
|
"k8s.io/apimachinery/pkg/util/intstr"
|
||||||
"k8s.io/apimachinery/pkg/util/runtime"
|
"k8s.io/apimachinery/pkg/util/runtime"
|
||||||
clientset "k8s.io/client-go/kubernetes"
|
clientset "k8s.io/client-go/kubernetes"
|
||||||
"k8s.io/client-go/kubernetes/scheme"
|
"k8s.io/client-go/kubernetes/scheme"
|
||||||
unversionedcore "k8s.io/client-go/kubernetes/typed/core/v1"
|
unversionedcore "k8s.io/client-go/kubernetes/typed/core/v1"
|
||||||
api "k8s.io/client-go/pkg/api/v1"
|
|
||||||
extensions "k8s.io/client-go/pkg/apis/extensions/v1beta1"
|
|
||||||
"k8s.io/client-go/tools/cache"
|
"k8s.io/client-go/tools/cache"
|
||||||
"k8s.io/client-go/tools/record"
|
"k8s.io/client-go/tools/record"
|
||||||
"k8s.io/client-go/util/flowcontrol"
|
"k8s.io/client-go/util/flowcontrol"
|
||||||
|
|
|
@ -14,9 +14,9 @@ import (
|
||||||
"github.com/prometheus/client_golang/prometheus/promhttp"
|
"github.com/prometheus/client_golang/prometheus/promhttp"
|
||||||
"github.com/spf13/pflag"
|
"github.com/spf13/pflag"
|
||||||
|
|
||||||
|
api "k8s.io/api/core/v1"
|
||||||
"k8s.io/apiserver/pkg/server/healthz"
|
"k8s.io/apiserver/pkg/server/healthz"
|
||||||
"k8s.io/client-go/kubernetes"
|
"k8s.io/client-go/kubernetes"
|
||||||
api "k8s.io/client-go/pkg/api/v1"
|
|
||||||
"k8s.io/client-go/rest"
|
"k8s.io/client-go/rest"
|
||||||
"k8s.io/client-go/tools/clientcmd"
|
"k8s.io/client-go/tools/clientcmd"
|
||||||
clientcmd_api "k8s.io/client-go/tools/clientcmd/api"
|
clientcmd_api "k8s.io/client-go/tools/clientcmd/api"
|
||||||
|
|
|
@ -17,7 +17,7 @@ limitations under the License.
|
||||||
package resolver
|
package resolver
|
||||||
|
|
||||||
import (
|
import (
|
||||||
api "k8s.io/client-go/pkg/api/v1"
|
api "k8s.io/api/core/v1"
|
||||||
|
|
||||||
"k8s.io/ingress/core/pkg/ingress/defaults"
|
"k8s.io/ingress/core/pkg/ingress/defaults"
|
||||||
)
|
)
|
||||||
|
|
|
@ -1,122 +0,0 @@
|
||||||
/*
|
|
||||||
Copyright 2015 The Kubernetes Authors.
|
|
||||||
|
|
||||||
Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
you may not use this file except in compliance with the License.
|
|
||||||
You may obtain a copy of the License at
|
|
||||||
|
|
||||||
http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
|
|
||||||
Unless required by applicable law or agreed to in writing, software
|
|
||||||
distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
See the License for the specific language governing permissions and
|
|
||||||
limitations under the License.
|
|
||||||
*/
|
|
||||||
|
|
||||||
package status
|
|
||||||
|
|
||||||
import (
|
|
||||||
"encoding/json"
|
|
||||||
"os"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/golang/glog"
|
|
||||||
|
|
||||||
"k8s.io/apimachinery/pkg/api/errors"
|
|
||||||
meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
|
||||||
client "k8s.io/client-go/kubernetes"
|
|
||||||
"k8s.io/client-go/kubernetes/scheme"
|
|
||||||
api "k8s.io/client-go/pkg/api/v1"
|
|
||||||
"k8s.io/client-go/tools/record"
|
|
||||||
|
|
||||||
"k8s.io/ingress/core/pkg/ingress/status/leaderelection"
|
|
||||||
"k8s.io/ingress/core/pkg/ingress/status/leaderelection/resourcelock"
|
|
||||||
)
|
|
||||||
|
|
||||||
func getCurrentLeader(electionID, namespace string, c client.Interface) (string, *api.Endpoints, error) {
|
|
||||||
endpoints, err := c.Core().Endpoints(namespace).Get(electionID, meta_v1.GetOptions{})
|
|
||||||
if err != nil {
|
|
||||||
return "", nil, err
|
|
||||||
}
|
|
||||||
val, found := endpoints.Annotations[resourcelock.LeaderElectionRecordAnnotationKey]
|
|
||||||
if !found {
|
|
||||||
return "", endpoints, nil
|
|
||||||
}
|
|
||||||
electionRecord := resourcelock.LeaderElectionRecord{}
|
|
||||||
if err = json.Unmarshal([]byte(val), &electionRecord); err != nil {
|
|
||||||
return "", nil, err
|
|
||||||
}
|
|
||||||
return electionRecord.HolderIdentity, endpoints, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewElection creates an election. 'namespace'/'election' should be an existing Kubernetes Service
|
|
||||||
// 'id' is the id if this leader, should be unique.
|
|
||||||
func NewElection(electionID,
|
|
||||||
id,
|
|
||||||
namespace string,
|
|
||||||
ttl time.Duration,
|
|
||||||
callback func(leader string),
|
|
||||||
c client.Interface) (*leaderelection.LeaderElector, error) {
|
|
||||||
|
|
||||||
_, err := c.Core().Endpoints(namespace).Get(electionID, meta_v1.GetOptions{})
|
|
||||||
if err != nil {
|
|
||||||
if errors.IsNotFound(err) {
|
|
||||||
_, err = c.Core().Endpoints(namespace).Create(&api.Endpoints{
|
|
||||||
ObjectMeta: meta_v1.ObjectMeta{
|
|
||||||
Name: electionID,
|
|
||||||
},
|
|
||||||
})
|
|
||||||
if err != nil && !errors.IsConflict(err) {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
callbacks := leaderelection.LeaderCallbacks{
|
|
||||||
OnStartedLeading: func(stop <-chan struct{}) {
|
|
||||||
callback(id)
|
|
||||||
},
|
|
||||||
OnStoppedLeading: func() {
|
|
||||||
leader, _, err := getCurrentLeader(electionID, namespace, c)
|
|
||||||
if err != nil {
|
|
||||||
glog.Errorf("failed to get leader: %v", err)
|
|
||||||
// empty string means leader is unknown
|
|
||||||
callback("")
|
|
||||||
return
|
|
||||||
}
|
|
||||||
callback(leader)
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
broadcaster := record.NewBroadcaster()
|
|
||||||
hostname, err := os.Hostname()
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
recorder := broadcaster.NewRecorder(scheme.Scheme, api.EventSource{
|
|
||||||
Component: "ingress-leader-elector",
|
|
||||||
Host: hostname,
|
|
||||||
})
|
|
||||||
|
|
||||||
lock := resourcelock.ConfigMapLock{
|
|
||||||
ConfigMapMeta: meta_v1.ObjectMeta{Namespace: namespace, Name: electionID},
|
|
||||||
Client: c,
|
|
||||||
LockConfig: resourcelock.ResourceLockConfig{
|
|
||||||
Identity: id,
|
|
||||||
EventRecorder: recorder,
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
config := leaderelection.LeaderElectionConfig{
|
|
||||||
Lock: &lock,
|
|
||||||
LeaseDuration: ttl,
|
|
||||||
RenewDeadline: ttl / 2,
|
|
||||||
RetryPeriod: ttl / 4,
|
|
||||||
Callbacks: callbacks,
|
|
||||||
}
|
|
||||||
|
|
||||||
return leaderelection.NewLeaderElector(config)
|
|
||||||
}
|
|
|
@ -1,132 +0,0 @@
|
||||||
/*
|
|
||||||
Copyright 2017 The Kubernetes Authors.
|
|
||||||
|
|
||||||
Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
you may not use this file except in compliance with the License.
|
|
||||||
You may obtain a copy of the License at
|
|
||||||
|
|
||||||
http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
|
|
||||||
Unless required by applicable law or agreed to in writing, software
|
|
||||||
distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
See the License for the specific language governing permissions and
|
|
||||||
limitations under the License.
|
|
||||||
*/
|
|
||||||
|
|
||||||
package status
|
|
||||||
|
|
||||||
import (
|
|
||||||
"encoding/json"
|
|
||||||
"testing"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
|
||||||
"k8s.io/client-go/kubernetes/fake"
|
|
||||||
"k8s.io/client-go/pkg/api"
|
|
||||||
api_v1 "k8s.io/client-go/pkg/api/v1"
|
|
||||||
|
|
||||||
"k8s.io/ingress/core/pkg/ingress/status/leaderelection/resourcelock"
|
|
||||||
)
|
|
||||||
|
|
||||||
func TestGetCurrentLeaderLeaderExist(t *testing.T) {
|
|
||||||
fkER := resourcelock.LeaderElectionRecord{
|
|
||||||
HolderIdentity: "currentLeader",
|
|
||||||
LeaseDurationSeconds: 30,
|
|
||||||
AcquireTime: meta_v1.NewTime(time.Now()),
|
|
||||||
RenewTime: meta_v1.NewTime(time.Now()),
|
|
||||||
LeaderTransitions: 3,
|
|
||||||
}
|
|
||||||
leaderInfo, _ := json.Marshal(fkER)
|
|
||||||
fkEndpoints := api_v1.Endpoints{
|
|
||||||
ObjectMeta: meta_v1.ObjectMeta{
|
|
||||||
Name: "ingress-controller-test",
|
|
||||||
Namespace: api.NamespaceSystem,
|
|
||||||
Annotations: map[string]string{
|
|
||||||
resourcelock.LeaderElectionRecordAnnotationKey: string(leaderInfo),
|
|
||||||
},
|
|
||||||
},
|
|
||||||
}
|
|
||||||
fk := fake.NewSimpleClientset(&api_v1.EndpointsList{Items: []api_v1.Endpoints{fkEndpoints}})
|
|
||||||
identity, endpoints, err := getCurrentLeader("ingress-controller-test", api.NamespaceSystem, fk)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("expected identitiy and endpoints but returned error %s", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if endpoints == nil {
|
|
||||||
t.Fatalf("returned nil but expected an endpoints")
|
|
||||||
}
|
|
||||||
|
|
||||||
if identity != "currentLeader" {
|
|
||||||
t.Fatalf("returned %v but expected %v", identity, "currentLeader")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestGetCurrentLeaderLeaderNotExist(t *testing.T) {
|
|
||||||
fkEndpoints := api_v1.Endpoints{
|
|
||||||
ObjectMeta: meta_v1.ObjectMeta{
|
|
||||||
Name: "ingress-controller-test",
|
|
||||||
Namespace: api.NamespaceSystem,
|
|
||||||
Annotations: map[string]string{},
|
|
||||||
},
|
|
||||||
}
|
|
||||||
fk := fake.NewSimpleClientset(&api_v1.EndpointsList{Items: []api_v1.Endpoints{fkEndpoints}})
|
|
||||||
identity, endpoints, err := getCurrentLeader("ingress-controller-test", api.NamespaceSystem, fk)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("unexpeted error: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if endpoints == nil {
|
|
||||||
t.Fatalf("returned nil but expected an endpoints")
|
|
||||||
}
|
|
||||||
|
|
||||||
if identity != "" {
|
|
||||||
t.Fatalf("returned %s but expected %s", identity, "")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestGetCurrentLeaderAnnotationError(t *testing.T) {
|
|
||||||
fkEndpoints := api_v1.Endpoints{
|
|
||||||
ObjectMeta: meta_v1.ObjectMeta{
|
|
||||||
Name: "ingress-controller-test",
|
|
||||||
Namespace: api.NamespaceSystem,
|
|
||||||
Annotations: map[string]string{
|
|
||||||
resourcelock.LeaderElectionRecordAnnotationKey: "just-test-error-leader-annotation",
|
|
||||||
},
|
|
||||||
},
|
|
||||||
}
|
|
||||||
fk := fake.NewSimpleClientset(&api_v1.EndpointsList{Items: []api_v1.Endpoints{fkEndpoints}})
|
|
||||||
_, _, err := getCurrentLeader("ingress-controller-test", api.NamespaceSystem, fk)
|
|
||||||
if err == nil {
|
|
||||||
t.Errorf("expected error")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestNewElection(t *testing.T) {
|
|
||||||
fk := fake.NewSimpleClientset(&api_v1.EndpointsList{Items: []api_v1.Endpoints{
|
|
||||||
{
|
|
||||||
ObjectMeta: meta_v1.ObjectMeta{
|
|
||||||
Name: "ingress-controller-test",
|
|
||||||
Namespace: api.NamespaceSystem,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
ObjectMeta: meta_v1.ObjectMeta{
|
|
||||||
Name: "ingress-controller-test-020",
|
|
||||||
Namespace: api.NamespaceSystem,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
}})
|
|
||||||
|
|
||||||
ne, err := NewElection("ingress-controller-test", "startLeader", api.NamespaceSystem, 4*time.Second, func(leader string) {
|
|
||||||
// do nothing
|
|
||||||
go t.Logf("execute callback fun, leader is: %s", leader)
|
|
||||||
}, fk)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("unexpected error %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if ne == nil {
|
|
||||||
t.Fatalf("unexpected nil")
|
|
||||||
}
|
|
||||||
}
|
|
|
@ -19,21 +19,25 @@ package status
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
"net"
|
"net"
|
||||||
|
"os"
|
||||||
"sort"
|
"sort"
|
||||||
"sync"
|
"sync"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/golang/glog"
|
"github.com/golang/glog"
|
||||||
|
|
||||||
|
v1 "k8s.io/api/core/v1"
|
||||||
|
extensions "k8s.io/api/extensions/v1beta1"
|
||||||
meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
"k8s.io/apimachinery/pkg/labels"
|
"k8s.io/apimachinery/pkg/labels"
|
||||||
"k8s.io/apimachinery/pkg/util/wait"
|
"k8s.io/apimachinery/pkg/util/wait"
|
||||||
clientset "k8s.io/client-go/kubernetes"
|
clientset "k8s.io/client-go/kubernetes"
|
||||||
api_v1 "k8s.io/client-go/pkg/api/v1"
|
"k8s.io/client-go/kubernetes/scheme"
|
||||||
extensions "k8s.io/client-go/pkg/apis/extensions/v1beta1"
|
"k8s.io/client-go/tools/leaderelection"
|
||||||
|
"k8s.io/client-go/tools/leaderelection/resourcelock"
|
||||||
|
"k8s.io/client-go/tools/record"
|
||||||
|
|
||||||
"k8s.io/ingress/core/pkg/ingress/annotations/class"
|
"k8s.io/ingress/core/pkg/ingress/annotations/class"
|
||||||
"k8s.io/ingress/core/pkg/ingress/status/leaderelection"
|
|
||||||
"k8s.io/ingress/core/pkg/ingress/store"
|
"k8s.io/ingress/core/pkg/ingress/store"
|
||||||
"k8s.io/ingress/core/pkg/k8s"
|
"k8s.io/ingress/core/pkg/k8s"
|
||||||
"k8s.io/ingress/core/pkg/strings"
|
"k8s.io/ingress/core/pkg/strings"
|
||||||
|
@ -126,7 +130,7 @@ func (s statusSync) Shutdown() {
|
||||||
}
|
}
|
||||||
|
|
||||||
glog.Infof("removing address from ingress status (%v)", addrs)
|
glog.Infof("removing address from ingress status (%v)", addrs)
|
||||||
s.updateStatus([]api_v1.LoadBalancerIngress{})
|
s.updateStatus([]v1.LoadBalancerIngress{})
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *statusSync) run() {
|
func (s *statusSync) run() {
|
||||||
|
@ -198,17 +202,49 @@ func NewStatusSyncer(config Config) Sync {
|
||||||
|
|
||||||
// we need to use the defined ingress class to allow multiple leaders
|
// we need to use the defined ingress class to allow multiple leaders
|
||||||
// in order to update information about ingress status
|
// in order to update information about ingress status
|
||||||
id := fmt.Sprintf("%v-%v", config.ElectionID, config.DefaultIngressClass)
|
electionID := fmt.Sprintf("%v-%v", config.ElectionID, config.DefaultIngressClass)
|
||||||
if config.IngressClass != "" {
|
if config.IngressClass != "" {
|
||||||
id = fmt.Sprintf("%v-%v", config.ElectionID, config.IngressClass)
|
electionID = fmt.Sprintf("%v-%v", config.ElectionID, config.IngressClass)
|
||||||
}
|
}
|
||||||
|
|
||||||
le, err := NewElection(id,
|
callbacks := leaderelection.LeaderCallbacks{
|
||||||
pod.Name, pod.Namespace, 30*time.Second,
|
OnStartedLeading: func(stop <-chan struct{}) {
|
||||||
st.callback, config.Client)
|
st.callback(pod.Name)
|
||||||
|
},
|
||||||
|
OnStoppedLeading: func() {
|
||||||
|
st.callback("")
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
broadcaster := record.NewBroadcaster()
|
||||||
|
hostname, _ := os.Hostname()
|
||||||
|
|
||||||
|
recorder := broadcaster.NewRecorder(scheme.Scheme, v1.EventSource{
|
||||||
|
Component: "ingress-leader-elector",
|
||||||
|
Host: hostname,
|
||||||
|
})
|
||||||
|
|
||||||
|
lock := resourcelock.ConfigMapLock{
|
||||||
|
ConfigMapMeta: meta_v1.ObjectMeta{Namespace: pod.Namespace, Name: electionID},
|
||||||
|
Client: config.Client.Core(),
|
||||||
|
LockConfig: resourcelock.ResourceLockConfig{
|
||||||
|
Identity: electionID,
|
||||||
|
EventRecorder: recorder,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
le, err := leaderelection.NewLeaderElector(leaderelection.LeaderElectionConfig{
|
||||||
|
Lock: &lock,
|
||||||
|
LeaseDuration: 30 * time.Second,
|
||||||
|
RenewDeadline: 15 * time.Second,
|
||||||
|
RetryPeriod: 5 * time.Second,
|
||||||
|
Callbacks: callbacks,
|
||||||
|
})
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
glog.Fatalf("unexpected error starting leader election: %v", err)
|
glog.Fatalf("unexpected error starting leader election: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
st.elector = le
|
st.elector = le
|
||||||
return st
|
return st
|
||||||
}
|
}
|
||||||
|
@ -265,13 +301,13 @@ func (s *statusSync) isRunningMultiplePods() bool {
|
||||||
}
|
}
|
||||||
|
|
||||||
// sliceToStatus converts a slice of IP and/or hostnames to LoadBalancerIngress
|
// sliceToStatus converts a slice of IP and/or hostnames to LoadBalancerIngress
|
||||||
func sliceToStatus(endpoints []string) []api_v1.LoadBalancerIngress {
|
func sliceToStatus(endpoints []string) []v1.LoadBalancerIngress {
|
||||||
lbi := []api_v1.LoadBalancerIngress{}
|
lbi := []v1.LoadBalancerIngress{}
|
||||||
for _, ep := range endpoints {
|
for _, ep := range endpoints {
|
||||||
if net.ParseIP(ep) == nil {
|
if net.ParseIP(ep) == nil {
|
||||||
lbi = append(lbi, api_v1.LoadBalancerIngress{Hostname: ep})
|
lbi = append(lbi, v1.LoadBalancerIngress{Hostname: ep})
|
||||||
} else {
|
} else {
|
||||||
lbi = append(lbi, api_v1.LoadBalancerIngress{IP: ep})
|
lbi = append(lbi, v1.LoadBalancerIngress{IP: ep})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -279,7 +315,7 @@ func sliceToStatus(endpoints []string) []api_v1.LoadBalancerIngress {
|
||||||
return lbi
|
return lbi
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *statusSync) updateStatus(newIPs []api_v1.LoadBalancerIngress) {
|
func (s *statusSync) updateStatus(newIPs []v1.LoadBalancerIngress) {
|
||||||
ings := s.IngressLister.List()
|
ings := s.IngressLister.List()
|
||||||
var wg sync.WaitGroup
|
var wg sync.WaitGroup
|
||||||
wg.Add(len(ings))
|
wg.Add(len(ings))
|
||||||
|
@ -319,7 +355,7 @@ func (s *statusSync) updateStatus(newIPs []api_v1.LoadBalancerIngress) {
|
||||||
wg.Wait()
|
wg.Wait()
|
||||||
}
|
}
|
||||||
|
|
||||||
func ingressSliceEqual(lhs, rhs []api_v1.LoadBalancerIngress) bool {
|
func ingressSliceEqual(lhs, rhs []v1.LoadBalancerIngress) bool {
|
||||||
if len(lhs) != len(rhs) {
|
if len(lhs) != len(rhs) {
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
@ -336,7 +372,7 @@ func ingressSliceEqual(lhs, rhs []api_v1.LoadBalancerIngress) bool {
|
||||||
}
|
}
|
||||||
|
|
||||||
// loadBalancerIngressByIP sorts LoadBalancerIngress using the field IP
|
// loadBalancerIngressByIP sorts LoadBalancerIngress using the field IP
|
||||||
type loadBalancerIngressByIP []api_v1.LoadBalancerIngress
|
type loadBalancerIngressByIP []v1.LoadBalancerIngress
|
||||||
|
|
||||||
func (c loadBalancerIngressByIP) Len() int { return len(c) }
|
func (c loadBalancerIngressByIP) Len() int { return len(c) }
|
||||||
func (c loadBalancerIngressByIP) Swap(i, j int) { c[i], c[j] = c[j], c[i] }
|
func (c loadBalancerIngressByIP) Swap(i, j int) { c[i], c[j] = c[j], c[i] }
|
||||||
|
|
|
@ -23,12 +23,12 @@ import (
|
||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
api_v1 "k8s.io/api/core/v1"
|
||||||
|
extensions "k8s.io/api/extensions/v1beta1"
|
||||||
meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
testclient "k8s.io/client-go/kubernetes/fake"
|
testclient "k8s.io/client-go/kubernetes/fake"
|
||||||
"k8s.io/client-go/pkg/api"
|
|
||||||
api_v1 "k8s.io/client-go/pkg/api/v1"
|
|
||||||
extensions "k8s.io/client-go/pkg/apis/extensions/v1beta1"
|
|
||||||
"k8s.io/client-go/tools/cache"
|
"k8s.io/client-go/tools/cache"
|
||||||
|
"k8s.io/kubernetes/pkg/api"
|
||||||
|
|
||||||
"k8s.io/ingress/core/pkg/ingress/annotations/class"
|
"k8s.io/ingress/core/pkg/ingress/annotations/class"
|
||||||
cache_store "k8s.io/ingress/core/pkg/ingress/store"
|
cache_store "k8s.io/ingress/core/pkg/ingress/store"
|
||||||
|
|
|
@ -19,7 +19,7 @@ package store
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
|
|
||||||
api "k8s.io/client-go/pkg/api/v1"
|
api "k8s.io/api/core/v1"
|
||||||
"k8s.io/client-go/tools/cache"
|
"k8s.io/client-go/tools/cache"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
|
@ -21,9 +21,9 @@ import (
|
||||||
|
|
||||||
"github.com/spf13/pflag"
|
"github.com/spf13/pflag"
|
||||||
|
|
||||||
|
api "k8s.io/api/core/v1"
|
||||||
"k8s.io/apimachinery/pkg/util/intstr"
|
"k8s.io/apimachinery/pkg/util/intstr"
|
||||||
"k8s.io/apiserver/pkg/server/healthz"
|
"k8s.io/apiserver/pkg/server/healthz"
|
||||||
api "k8s.io/client-go/pkg/api/v1"
|
|
||||||
|
|
||||||
"k8s.io/ingress/core/pkg/ingress/annotations/auth"
|
"k8s.io/ingress/core/pkg/ingress/annotations/auth"
|
||||||
"k8s.io/ingress/core/pkg/ingress/annotations/authreq"
|
"k8s.io/ingress/core/pkg/ingress/annotations/authreq"
|
||||||
|
|
|
@ -21,9 +21,9 @@ import (
|
||||||
"os"
|
"os"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
|
api "k8s.io/api/core/v1"
|
||||||
meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
clientset "k8s.io/client-go/kubernetes"
|
clientset "k8s.io/client-go/kubernetes"
|
||||||
api "k8s.io/client-go/pkg/api/v1"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// IsValidService checks if exists a service with the specified name
|
// IsValidService checks if exists a service with the specified name
|
||||||
|
|
|
@ -20,9 +20,9 @@ import (
|
||||||
"os"
|
"os"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
|
api "k8s.io/api/core/v1"
|
||||||
meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
testclient "k8s.io/client-go/kubernetes/fake"
|
testclient "k8s.io/client-go/kubernetes/fake"
|
||||||
api "k8s.io/client-go/pkg/api/v1"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestParseNameNS(t *testing.T) {
|
func TestParseNameNS(t *testing.T) {
|
||||||
|
|
|
@ -24,7 +24,7 @@ import (
|
||||||
|
|
||||||
"github.com/spf13/pflag"
|
"github.com/spf13/pflag"
|
||||||
|
|
||||||
api "k8s.io/client-go/pkg/api/v1"
|
api "k8s.io/api/core/v1"
|
||||||
|
|
||||||
nginxconfig "k8s.io/ingress/controllers/nginx/pkg/config"
|
nginxconfig "k8s.io/ingress/controllers/nginx/pkg/config"
|
||||||
"k8s.io/ingress/core/pkg/ingress"
|
"k8s.io/ingress/core/pkg/ingress"
|
||||||
|
|
13
vendor/bitbucket.org/ww/goautoneg/Makefile
generated
vendored
13
vendor/bitbucket.org/ww/goautoneg/Makefile
generated
vendored
|
@ -1,13 +0,0 @@
|
||||||
include $(GOROOT)/src/Make.inc
|
|
||||||
|
|
||||||
TARG=bitbucket.org/ww/goautoneg
|
|
||||||
GOFILES=autoneg.go
|
|
||||||
|
|
||||||
include $(GOROOT)/src/Make.pkg
|
|
||||||
|
|
||||||
format:
|
|
||||||
gofmt -w *.go
|
|
||||||
|
|
||||||
docs:
|
|
||||||
gomake clean
|
|
||||||
godoc ${TARG} > README.txt
|
|
67
vendor/bitbucket.org/ww/goautoneg/README.txt
generated
vendored
67
vendor/bitbucket.org/ww/goautoneg/README.txt
generated
vendored
|
@ -1,67 +0,0 @@
|
||||||
PACKAGE
|
|
||||||
|
|
||||||
package goautoneg
|
|
||||||
import "bitbucket.org/ww/goautoneg"
|
|
||||||
|
|
||||||
HTTP Content-Type Autonegotiation.
|
|
||||||
|
|
||||||
The functions in this package implement the behaviour specified in
|
|
||||||
http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html
|
|
||||||
|
|
||||||
Copyright (c) 2011, Open Knowledge Foundation Ltd.
|
|
||||||
All rights reserved.
|
|
||||||
|
|
||||||
Redistribution and use in source and binary forms, with or without
|
|
||||||
modification, are permitted provided that the following conditions are
|
|
||||||
met:
|
|
||||||
|
|
||||||
Redistributions of source code must retain the above copyright
|
|
||||||
notice, this list of conditions and the following disclaimer.
|
|
||||||
|
|
||||||
Redistributions in binary form must reproduce the above copyright
|
|
||||||
notice, this list of conditions and the following disclaimer in
|
|
||||||
the documentation and/or other materials provided with the
|
|
||||||
distribution.
|
|
||||||
|
|
||||||
Neither the name of the Open Knowledge Foundation Ltd. nor the
|
|
||||||
names of its contributors may be used to endorse or promote
|
|
||||||
products derived from this software without specific prior written
|
|
||||||
permission.
|
|
||||||
|
|
||||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
|
||||||
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
|
||||||
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
|
||||||
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
|
||||||
HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
|
||||||
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
|
||||||
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
|
||||||
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
|
||||||
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
|
||||||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
|
||||||
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
|
||||||
|
|
||||||
|
|
||||||
FUNCTIONS
|
|
||||||
|
|
||||||
func Negotiate(header string, alternatives []string) (content_type string)
|
|
||||||
Negotiate the most appropriate content_type given the accept header
|
|
||||||
and a list of alternatives.
|
|
||||||
|
|
||||||
func ParseAccept(header string) (accept []Accept)
|
|
||||||
Parse an Accept Header string returning a sorted list
|
|
||||||
of clauses
|
|
||||||
|
|
||||||
|
|
||||||
TYPES
|
|
||||||
|
|
||||||
type Accept struct {
|
|
||||||
Type, SubType string
|
|
||||||
Q float32
|
|
||||||
Params map[string]string
|
|
||||||
}
|
|
||||||
Structure to represent a clause in an HTTP Accept Header
|
|
||||||
|
|
||||||
|
|
||||||
SUBDIRECTORIES
|
|
||||||
|
|
||||||
.hg
|
|
162
vendor/bitbucket.org/ww/goautoneg/autoneg.go
generated
vendored
162
vendor/bitbucket.org/ww/goautoneg/autoneg.go
generated
vendored
|
@ -1,162 +0,0 @@
|
||||||
/*
|
|
||||||
HTTP Content-Type Autonegotiation.
|
|
||||||
|
|
||||||
The functions in this package implement the behaviour specified in
|
|
||||||
http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html
|
|
||||||
|
|
||||||
Copyright (c) 2011, Open Knowledge Foundation Ltd.
|
|
||||||
All rights reserved.
|
|
||||||
|
|
||||||
Redistribution and use in source and binary forms, with or without
|
|
||||||
modification, are permitted provided that the following conditions are
|
|
||||||
met:
|
|
||||||
|
|
||||||
Redistributions of source code must retain the above copyright
|
|
||||||
notice, this list of conditions and the following disclaimer.
|
|
||||||
|
|
||||||
Redistributions in binary form must reproduce the above copyright
|
|
||||||
notice, this list of conditions and the following disclaimer in
|
|
||||||
the documentation and/or other materials provided with the
|
|
||||||
distribution.
|
|
||||||
|
|
||||||
Neither the name of the Open Knowledge Foundation Ltd. nor the
|
|
||||||
names of its contributors may be used to endorse or promote
|
|
||||||
products derived from this software without specific prior written
|
|
||||||
permission.
|
|
||||||
|
|
||||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
|
||||||
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
|
||||||
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
|
||||||
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
|
||||||
HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
|
||||||
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
|
||||||
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
|
||||||
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
|
||||||
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
|
||||||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
|
||||||
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
|
||||||
|
|
||||||
|
|
||||||
*/
|
|
||||||
package goautoneg
|
|
||||||
|
|
||||||
import (
|
|
||||||
"sort"
|
|
||||||
"strconv"
|
|
||||||
"strings"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Structure to represent a clause in an HTTP Accept Header
|
|
||||||
type Accept struct {
|
|
||||||
Type, SubType string
|
|
||||||
Q float64
|
|
||||||
Params map[string]string
|
|
||||||
}
|
|
||||||
|
|
||||||
// For internal use, so that we can use the sort interface
|
|
||||||
type accept_slice []Accept
|
|
||||||
|
|
||||||
func (accept accept_slice) Len() int {
|
|
||||||
slice := []Accept(accept)
|
|
||||||
return len(slice)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (accept accept_slice) Less(i, j int) bool {
|
|
||||||
slice := []Accept(accept)
|
|
||||||
ai, aj := slice[i], slice[j]
|
|
||||||
if ai.Q > aj.Q {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
if ai.Type != "*" && aj.Type == "*" {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
if ai.SubType != "*" && aj.SubType == "*" {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
func (accept accept_slice) Swap(i, j int) {
|
|
||||||
slice := []Accept(accept)
|
|
||||||
slice[i], slice[j] = slice[j], slice[i]
|
|
||||||
}
|
|
||||||
|
|
||||||
// Parse an Accept Header string returning a sorted list
|
|
||||||
// of clauses
|
|
||||||
func ParseAccept(header string) (accept []Accept) {
|
|
||||||
parts := strings.Split(header, ",")
|
|
||||||
accept = make([]Accept, 0, len(parts))
|
|
||||||
for _, part := range parts {
|
|
||||||
part := strings.Trim(part, " ")
|
|
||||||
|
|
||||||
a := Accept{}
|
|
||||||
a.Params = make(map[string]string)
|
|
||||||
a.Q = 1.0
|
|
||||||
|
|
||||||
mrp := strings.Split(part, ";")
|
|
||||||
|
|
||||||
media_range := mrp[0]
|
|
||||||
sp := strings.Split(media_range, "/")
|
|
||||||
a.Type = strings.Trim(sp[0], " ")
|
|
||||||
|
|
||||||
switch {
|
|
||||||
case len(sp) == 1 && a.Type == "*":
|
|
||||||
a.SubType = "*"
|
|
||||||
case len(sp) == 2:
|
|
||||||
a.SubType = strings.Trim(sp[1], " ")
|
|
||||||
default:
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
if len(mrp) == 1 {
|
|
||||||
accept = append(accept, a)
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, param := range mrp[1:] {
|
|
||||||
sp := strings.SplitN(param, "=", 2)
|
|
||||||
if len(sp) != 2 {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
token := strings.Trim(sp[0], " ")
|
|
||||||
if token == "q" {
|
|
||||||
a.Q, _ = strconv.ParseFloat(sp[1], 32)
|
|
||||||
} else {
|
|
||||||
a.Params[token] = strings.Trim(sp[1], " ")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
accept = append(accept, a)
|
|
||||||
}
|
|
||||||
|
|
||||||
slice := accept_slice(accept)
|
|
||||||
sort.Sort(slice)
|
|
||||||
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// Negotiate the most appropriate content_type given the accept header
|
|
||||||
// and a list of alternatives.
|
|
||||||
func Negotiate(header string, alternatives []string) (content_type string) {
|
|
||||||
asp := make([][]string, 0, len(alternatives))
|
|
||||||
for _, ctype := range alternatives {
|
|
||||||
asp = append(asp, strings.SplitN(ctype, "/", 2))
|
|
||||||
}
|
|
||||||
for _, clause := range ParseAccept(header) {
|
|
||||||
for i, ctsp := range asp {
|
|
||||||
if clause.Type == ctsp[0] && clause.SubType == ctsp[1] {
|
|
||||||
content_type = alternatives[i]
|
|
||||||
return
|
|
||||||
}
|
|
||||||
if clause.Type == ctsp[0] && clause.SubType == "*" {
|
|
||||||
content_type = alternatives[i]
|
|
||||||
return
|
|
||||||
}
|
|
||||||
if clause.Type == "*" && clause.SubType == "*" {
|
|
||||||
content_type = alternatives[i]
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return
|
|
||||||
}
|
|
36
vendor/github.com/go-openapi/analysis/.drone.yml
generated
vendored
36
vendor/github.com/go-openapi/analysis/.drone.yml
generated
vendored
|
@ -1,36 +0,0 @@
|
||||||
clone:
|
|
||||||
path: github.com/go-openapi/analysis
|
|
||||||
|
|
||||||
matrix:
|
|
||||||
GO_VERSION:
|
|
||||||
- "1.6"
|
|
||||||
|
|
||||||
build:
|
|
||||||
integration:
|
|
||||||
image: golang:$$GO_VERSION
|
|
||||||
pull: true
|
|
||||||
commands:
|
|
||||||
- go get -u github.com/stretchr/testify/assert
|
|
||||||
- go get -u gopkg.in/yaml.v2
|
|
||||||
- go get -u github.com/go-openapi/swag
|
|
||||||
- go get -u github.com/go-openapi/jsonpointer
|
|
||||||
- go get -u github.com/go-openapi/spec
|
|
||||||
- go get -u github.com/go-openapi/loads/fmts
|
|
||||||
- go test -race ./...
|
|
||||||
- go test -v -cover -coverprofile=coverage.out -covermode=count ./...
|
|
||||||
|
|
||||||
notify:
|
|
||||||
slack:
|
|
||||||
channel: bots
|
|
||||||
webhook_url: $$SLACK_URL
|
|
||||||
username: drone
|
|
||||||
|
|
||||||
publish:
|
|
||||||
coverage:
|
|
||||||
server: https://coverage.vmware.run
|
|
||||||
token: $$GITHUB_TOKEN
|
|
||||||
# threshold: 70
|
|
||||||
# must_increase: true
|
|
||||||
when:
|
|
||||||
matrix:
|
|
||||||
GO_VERSION: "1.6"
|
|
2
vendor/github.com/go-openapi/analysis/.gitignore
generated
vendored
2
vendor/github.com/go-openapi/analysis/.gitignore
generated
vendored
|
@ -1,2 +0,0 @@
|
||||||
secrets.yml
|
|
||||||
coverage.out
|
|
12
vendor/github.com/go-openapi/analysis/.pullapprove.yml
generated
vendored
12
vendor/github.com/go-openapi/analysis/.pullapprove.yml
generated
vendored
|
@ -1,12 +0,0 @@
|
||||||
approve_by_comment: true
|
|
||||||
approve_regex: '^(:shipit:|:\+1:|\+1|LGTM|lgtm|Approved)'
|
|
||||||
reject_regex: ^[Rr]ejected
|
|
||||||
reset_on_push: false
|
|
||||||
reviewers:
|
|
||||||
members:
|
|
||||||
- casualjim
|
|
||||||
- frapposelli
|
|
||||||
- vburenin
|
|
||||||
- pytlesk4
|
|
||||||
name: pullapprove
|
|
||||||
required: 1
|
|
74
vendor/github.com/go-openapi/analysis/CODE_OF_CONDUCT.md
generated
vendored
74
vendor/github.com/go-openapi/analysis/CODE_OF_CONDUCT.md
generated
vendored
|
@ -1,74 +0,0 @@
|
||||||
# Contributor Covenant Code of Conduct
|
|
||||||
|
|
||||||
## Our Pledge
|
|
||||||
|
|
||||||
In the interest of fostering an open and welcoming environment, we as
|
|
||||||
contributors and maintainers pledge to making participation in our project and
|
|
||||||
our community a harassment-free experience for everyone, regardless of age, body
|
|
||||||
size, disability, ethnicity, gender identity and expression, level of experience,
|
|
||||||
nationality, personal appearance, race, religion, or sexual identity and
|
|
||||||
orientation.
|
|
||||||
|
|
||||||
## Our Standards
|
|
||||||
|
|
||||||
Examples of behavior that contributes to creating a positive environment
|
|
||||||
include:
|
|
||||||
|
|
||||||
* Using welcoming and inclusive language
|
|
||||||
* Being respectful of differing viewpoints and experiences
|
|
||||||
* Gracefully accepting constructive criticism
|
|
||||||
* Focusing on what is best for the community
|
|
||||||
* Showing empathy towards other community members
|
|
||||||
|
|
||||||
Examples of unacceptable behavior by participants include:
|
|
||||||
|
|
||||||
* The use of sexualized language or imagery and unwelcome sexual attention or
|
|
||||||
advances
|
|
||||||
* Trolling, insulting/derogatory comments, and personal or political attacks
|
|
||||||
* Public or private harassment
|
|
||||||
* Publishing others' private information, such as a physical or electronic
|
|
||||||
address, without explicit permission
|
|
||||||
* Other conduct which could reasonably be considered inappropriate in a
|
|
||||||
professional setting
|
|
||||||
|
|
||||||
## Our Responsibilities
|
|
||||||
|
|
||||||
Project maintainers are responsible for clarifying the standards of acceptable
|
|
||||||
behavior and are expected to take appropriate and fair corrective action in
|
|
||||||
response to any instances of unacceptable behavior.
|
|
||||||
|
|
||||||
Project maintainers have the right and responsibility to remove, edit, or
|
|
||||||
reject comments, commits, code, wiki edits, issues, and other contributions
|
|
||||||
that are not aligned to this Code of Conduct, or to ban temporarily or
|
|
||||||
permanently any contributor for other behaviors that they deem inappropriate,
|
|
||||||
threatening, offensive, or harmful.
|
|
||||||
|
|
||||||
## Scope
|
|
||||||
|
|
||||||
This Code of Conduct applies both within project spaces and in public spaces
|
|
||||||
when an individual is representing the project or its community. Examples of
|
|
||||||
representing a project or community include using an official project e-mail
|
|
||||||
address, posting via an official social media account, or acting as an appointed
|
|
||||||
representative at an online or offline event. Representation of a project may be
|
|
||||||
further defined and clarified by project maintainers.
|
|
||||||
|
|
||||||
## Enforcement
|
|
||||||
|
|
||||||
Instances of abusive, harassing, or otherwise unacceptable behavior may be
|
|
||||||
reported by contacting the project team at ivan+abuse@flanders.co.nz. All
|
|
||||||
complaints will be reviewed and investigated and will result in a response that
|
|
||||||
is deemed necessary and appropriate to the circumstances. The project team is
|
|
||||||
obligated to maintain confidentiality with regard to the reporter of an incident.
|
|
||||||
Further details of specific enforcement policies may be posted separately.
|
|
||||||
|
|
||||||
Project maintainers who do not follow or enforce the Code of Conduct in good
|
|
||||||
faith may face temporary or permanent repercussions as determined by other
|
|
||||||
members of the project's leadership.
|
|
||||||
|
|
||||||
## Attribution
|
|
||||||
|
|
||||||
This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4,
|
|
||||||
available at [http://contributor-covenant.org/version/1/4][version]
|
|
||||||
|
|
||||||
[homepage]: http://contributor-covenant.org
|
|
||||||
[version]: http://contributor-covenant.org/version/1/4/
|
|
6
vendor/github.com/go-openapi/analysis/README.md
generated
vendored
6
vendor/github.com/go-openapi/analysis/README.md
generated
vendored
|
@ -1,6 +0,0 @@
|
||||||
# OpenAPI initiative analysis [](https://ci.vmware.run/go-openapi/analysis) [](https://coverage.vmware.run/go-openapi/analysis) [](https://slackin.goswagger.io)
|
|
||||||
|
|
||||||
[](https://raw.githubusercontent.com/go-openapi/analysis/master/LICENSE) [](http://godoc.org/github.com/go-openapi/analysis)
|
|
||||||
|
|
||||||
|
|
||||||
A foundational library to analyze an OAI specification document for easier reasoning about the content.
|
|
614
vendor/github.com/go-openapi/analysis/analyzer.go
generated
vendored
614
vendor/github.com/go-openapi/analysis/analyzer.go
generated
vendored
|
@ -1,614 +0,0 @@
|
||||||
// Copyright 2015 go-swagger maintainers
|
|
||||||
//
|
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
// you may not use this file except in compliance with the License.
|
|
||||||
// You may obtain a copy of the License at
|
|
||||||
//
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
// See the License for the specific language governing permissions and
|
|
||||||
// limitations under the License.
|
|
||||||
|
|
||||||
package analysis
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
slashpath "path"
|
|
||||||
"strconv"
|
|
||||||
"strings"
|
|
||||||
|
|
||||||
"github.com/go-openapi/jsonpointer"
|
|
||||||
"github.com/go-openapi/spec"
|
|
||||||
"github.com/go-openapi/swag"
|
|
||||||
)
|
|
||||||
|
|
||||||
type referenceAnalysis struct {
|
|
||||||
schemas map[string]spec.Ref
|
|
||||||
responses map[string]spec.Ref
|
|
||||||
parameters map[string]spec.Ref
|
|
||||||
items map[string]spec.Ref
|
|
||||||
allRefs map[string]spec.Ref
|
|
||||||
referenced struct {
|
|
||||||
schemas map[string]SchemaRef
|
|
||||||
responses map[string]*spec.Response
|
|
||||||
parameters map[string]*spec.Parameter
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *referenceAnalysis) addRef(key string, ref spec.Ref) {
|
|
||||||
r.allRefs["#"+key] = ref
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *referenceAnalysis) addItemsRef(key string, items *spec.Items) {
|
|
||||||
r.items["#"+key] = items.Ref
|
|
||||||
r.addRef(key, items.Ref)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *referenceAnalysis) addSchemaRef(key string, ref SchemaRef) {
|
|
||||||
r.schemas["#"+key] = ref.Schema.Ref
|
|
||||||
r.addRef(key, ref.Schema.Ref)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *referenceAnalysis) addResponseRef(key string, resp *spec.Response) {
|
|
||||||
r.responses["#"+key] = resp.Ref
|
|
||||||
r.addRef(key, resp.Ref)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *referenceAnalysis) addParamRef(key string, param *spec.Parameter) {
|
|
||||||
r.parameters["#"+key] = param.Ref
|
|
||||||
r.addRef(key, param.Ref)
|
|
||||||
}
|
|
||||||
|
|
||||||
// New takes a swagger spec object and returns an analyzed spec document.
|
|
||||||
// The analyzed document contains a number of indices that make it easier to
|
|
||||||
// reason about semantics of a swagger specification for use in code generation
|
|
||||||
// or validation etc.
|
|
||||||
func New(doc *spec.Swagger) *Spec {
|
|
||||||
a := &Spec{
|
|
||||||
spec: doc,
|
|
||||||
consumes: make(map[string]struct{}, 150),
|
|
||||||
produces: make(map[string]struct{}, 150),
|
|
||||||
authSchemes: make(map[string]struct{}, 150),
|
|
||||||
operations: make(map[string]map[string]*spec.Operation, 150),
|
|
||||||
allSchemas: make(map[string]SchemaRef, 150),
|
|
||||||
allOfs: make(map[string]SchemaRef, 150),
|
|
||||||
references: referenceAnalysis{
|
|
||||||
schemas: make(map[string]spec.Ref, 150),
|
|
||||||
responses: make(map[string]spec.Ref, 150),
|
|
||||||
parameters: make(map[string]spec.Ref, 150),
|
|
||||||
items: make(map[string]spec.Ref, 150),
|
|
||||||
allRefs: make(map[string]spec.Ref, 150),
|
|
||||||
},
|
|
||||||
}
|
|
||||||
a.references.referenced.schemas = make(map[string]SchemaRef, 150)
|
|
||||||
a.references.referenced.responses = make(map[string]*spec.Response, 150)
|
|
||||||
a.references.referenced.parameters = make(map[string]*spec.Parameter, 150)
|
|
||||||
a.initialize()
|
|
||||||
return a
|
|
||||||
}
|
|
||||||
|
|
||||||
// Spec takes a swagger spec object and turns it into a registry
|
|
||||||
// with a bunch of utility methods to act on the information in the spec
|
|
||||||
type Spec struct {
|
|
||||||
spec *spec.Swagger
|
|
||||||
consumes map[string]struct{}
|
|
||||||
produces map[string]struct{}
|
|
||||||
authSchemes map[string]struct{}
|
|
||||||
operations map[string]map[string]*spec.Operation
|
|
||||||
references referenceAnalysis
|
|
||||||
allSchemas map[string]SchemaRef
|
|
||||||
allOfs map[string]SchemaRef
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *Spec) initialize() {
|
|
||||||
for _, c := range s.spec.Consumes {
|
|
||||||
s.consumes[c] = struct{}{}
|
|
||||||
}
|
|
||||||
for _, c := range s.spec.Produces {
|
|
||||||
s.produces[c] = struct{}{}
|
|
||||||
}
|
|
||||||
for _, ss := range s.spec.Security {
|
|
||||||
for k := range ss {
|
|
||||||
s.authSchemes[k] = struct{}{}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
for path, pathItem := range s.AllPaths() {
|
|
||||||
s.analyzeOperations(path, &pathItem)
|
|
||||||
}
|
|
||||||
|
|
||||||
for name, parameter := range s.spec.Parameters {
|
|
||||||
refPref := slashpath.Join("/parameters", jsonpointer.Escape(name))
|
|
||||||
if parameter.Items != nil {
|
|
||||||
s.analyzeItems("items", parameter.Items, refPref)
|
|
||||||
}
|
|
||||||
if parameter.In == "body" && parameter.Schema != nil {
|
|
||||||
s.analyzeSchema("schema", *parameter.Schema, refPref)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
for name, response := range s.spec.Responses {
|
|
||||||
refPref := slashpath.Join("/responses", jsonpointer.Escape(name))
|
|
||||||
for _, v := range response.Headers {
|
|
||||||
if v.Items != nil {
|
|
||||||
s.analyzeItems("items", v.Items, refPref)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if response.Schema != nil {
|
|
||||||
s.analyzeSchema("schema", *response.Schema, refPref)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
for name, schema := range s.spec.Definitions {
|
|
||||||
s.analyzeSchema(name, schema, "/definitions")
|
|
||||||
}
|
|
||||||
// TODO: after analyzing all things and flattening schemas etc
|
|
||||||
// resolve all the collected references to their final representations
|
|
||||||
// best put in a separate method because this could get expensive
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *Spec) analyzeOperations(path string, pi *spec.PathItem) {
|
|
||||||
// TODO: resolve refs here?
|
|
||||||
op := pi
|
|
||||||
s.analyzeOperation("GET", path, op.Get)
|
|
||||||
s.analyzeOperation("PUT", path, op.Put)
|
|
||||||
s.analyzeOperation("POST", path, op.Post)
|
|
||||||
s.analyzeOperation("PATCH", path, op.Patch)
|
|
||||||
s.analyzeOperation("DELETE", path, op.Delete)
|
|
||||||
s.analyzeOperation("HEAD", path, op.Head)
|
|
||||||
s.analyzeOperation("OPTIONS", path, op.Options)
|
|
||||||
for i, param := range op.Parameters {
|
|
||||||
refPref := slashpath.Join("/paths", jsonpointer.Escape(path), "parameters", strconv.Itoa(i))
|
|
||||||
if param.Ref.String() != "" {
|
|
||||||
s.references.addParamRef(refPref, ¶m)
|
|
||||||
}
|
|
||||||
if param.Items != nil {
|
|
||||||
s.analyzeItems("items", param.Items, refPref)
|
|
||||||
}
|
|
||||||
if param.Schema != nil {
|
|
||||||
s.analyzeSchema("schema", *param.Schema, refPref)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *Spec) analyzeItems(name string, items *spec.Items, prefix string) {
|
|
||||||
if items == nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
refPref := slashpath.Join(prefix, name)
|
|
||||||
s.analyzeItems(name, items.Items, refPref)
|
|
||||||
if items.Ref.String() != "" {
|
|
||||||
s.references.addItemsRef(refPref, items)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *Spec) analyzeOperation(method, path string, op *spec.Operation) {
|
|
||||||
if op == nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, c := range op.Consumes {
|
|
||||||
s.consumes[c] = struct{}{}
|
|
||||||
}
|
|
||||||
for _, c := range op.Produces {
|
|
||||||
s.produces[c] = struct{}{}
|
|
||||||
}
|
|
||||||
for _, ss := range op.Security {
|
|
||||||
for k := range ss {
|
|
||||||
s.authSchemes[k] = struct{}{}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if _, ok := s.operations[method]; !ok {
|
|
||||||
s.operations[method] = make(map[string]*spec.Operation)
|
|
||||||
}
|
|
||||||
s.operations[method][path] = op
|
|
||||||
prefix := slashpath.Join("/paths", jsonpointer.Escape(path), strings.ToLower(method))
|
|
||||||
for i, param := range op.Parameters {
|
|
||||||
refPref := slashpath.Join(prefix, "parameters", strconv.Itoa(i))
|
|
||||||
if param.Ref.String() != "" {
|
|
||||||
s.references.addParamRef(refPref, ¶m)
|
|
||||||
}
|
|
||||||
s.analyzeItems("items", param.Items, refPref)
|
|
||||||
if param.In == "body" && param.Schema != nil {
|
|
||||||
s.analyzeSchema("schema", *param.Schema, refPref)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if op.Responses != nil {
|
|
||||||
if op.Responses.Default != nil {
|
|
||||||
refPref := slashpath.Join(prefix, "responses", "default")
|
|
||||||
if op.Responses.Default.Ref.String() != "" {
|
|
||||||
s.references.addResponseRef(refPref, op.Responses.Default)
|
|
||||||
}
|
|
||||||
for _, v := range op.Responses.Default.Headers {
|
|
||||||
s.analyzeItems("items", v.Items, refPref)
|
|
||||||
}
|
|
||||||
if op.Responses.Default.Schema != nil {
|
|
||||||
s.analyzeSchema("schema", *op.Responses.Default.Schema, refPref)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
for k, res := range op.Responses.StatusCodeResponses {
|
|
||||||
refPref := slashpath.Join(prefix, "responses", strconv.Itoa(k))
|
|
||||||
if res.Ref.String() != "" {
|
|
||||||
s.references.addResponseRef(refPref, &res)
|
|
||||||
}
|
|
||||||
for _, v := range res.Headers {
|
|
||||||
s.analyzeItems("items", v.Items, refPref)
|
|
||||||
}
|
|
||||||
if res.Schema != nil {
|
|
||||||
s.analyzeSchema("schema", *res.Schema, refPref)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *Spec) analyzeSchema(name string, schema spec.Schema, prefix string) {
|
|
||||||
refURI := slashpath.Join(prefix, jsonpointer.Escape(name))
|
|
||||||
schRef := SchemaRef{
|
|
||||||
Name: name,
|
|
||||||
Schema: &schema,
|
|
||||||
Ref: spec.MustCreateRef("#" + refURI),
|
|
||||||
}
|
|
||||||
s.allSchemas["#"+refURI] = schRef
|
|
||||||
if schema.Ref.String() != "" {
|
|
||||||
s.references.addSchemaRef(refURI, schRef)
|
|
||||||
}
|
|
||||||
for k, v := range schema.Definitions {
|
|
||||||
s.analyzeSchema(k, v, slashpath.Join(refURI, "definitions"))
|
|
||||||
}
|
|
||||||
for k, v := range schema.Properties {
|
|
||||||
s.analyzeSchema(k, v, slashpath.Join(refURI, "properties"))
|
|
||||||
}
|
|
||||||
for k, v := range schema.PatternProperties {
|
|
||||||
s.analyzeSchema(k, v, slashpath.Join(refURI, "patternProperties"))
|
|
||||||
}
|
|
||||||
for i, v := range schema.AllOf {
|
|
||||||
s.analyzeSchema(strconv.Itoa(i), v, slashpath.Join(refURI, "allOf"))
|
|
||||||
}
|
|
||||||
if len(schema.AllOf) > 0 {
|
|
||||||
s.allOfs["#"+refURI] = SchemaRef{Name: name, Schema: &schema, Ref: spec.MustCreateRef("#" + refURI)}
|
|
||||||
}
|
|
||||||
for i, v := range schema.AnyOf {
|
|
||||||
s.analyzeSchema(strconv.Itoa(i), v, slashpath.Join(refURI, "anyOf"))
|
|
||||||
}
|
|
||||||
for i, v := range schema.OneOf {
|
|
||||||
s.analyzeSchema(strconv.Itoa(i), v, slashpath.Join(refURI, "oneOf"))
|
|
||||||
}
|
|
||||||
if schema.Not != nil {
|
|
||||||
s.analyzeSchema("not", *schema.Not, refURI)
|
|
||||||
}
|
|
||||||
if schema.AdditionalProperties != nil && schema.AdditionalProperties.Schema != nil {
|
|
||||||
s.analyzeSchema("additionalProperties", *schema.AdditionalProperties.Schema, refURI)
|
|
||||||
}
|
|
||||||
if schema.AdditionalItems != nil && schema.AdditionalItems.Schema != nil {
|
|
||||||
s.analyzeSchema("additionalItems", *schema.AdditionalItems.Schema, refURI)
|
|
||||||
}
|
|
||||||
if schema.Items != nil {
|
|
||||||
if schema.Items.Schema != nil {
|
|
||||||
s.analyzeSchema("items", *schema.Items.Schema, refURI)
|
|
||||||
}
|
|
||||||
for i, sch := range schema.Items.Schemas {
|
|
||||||
s.analyzeSchema(strconv.Itoa(i), sch, slashpath.Join(refURI, "items"))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// SecurityRequirement is a representation of a security requirement for an operation
|
|
||||||
type SecurityRequirement struct {
|
|
||||||
Name string
|
|
||||||
Scopes []string
|
|
||||||
}
|
|
||||||
|
|
||||||
// SecurityRequirementsFor gets the security requirements for the operation
|
|
||||||
func (s *Spec) SecurityRequirementsFor(operation *spec.Operation) []SecurityRequirement {
|
|
||||||
if s.spec.Security == nil && operation.Security == nil {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
schemes := s.spec.Security
|
|
||||||
if operation.Security != nil {
|
|
||||||
schemes = operation.Security
|
|
||||||
}
|
|
||||||
|
|
||||||
unique := make(map[string]SecurityRequirement)
|
|
||||||
for _, scheme := range schemes {
|
|
||||||
for k, v := range scheme {
|
|
||||||
if _, ok := unique[k]; !ok {
|
|
||||||
unique[k] = SecurityRequirement{Name: k, Scopes: v}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
var result []SecurityRequirement
|
|
||||||
for _, v := range unique {
|
|
||||||
result = append(result, v)
|
|
||||||
}
|
|
||||||
return result
|
|
||||||
}
|
|
||||||
|
|
||||||
// SecurityDefinitionsFor gets the matching security definitions for a set of requirements
|
|
||||||
func (s *Spec) SecurityDefinitionsFor(operation *spec.Operation) map[string]spec.SecurityScheme {
|
|
||||||
requirements := s.SecurityRequirementsFor(operation)
|
|
||||||
if len(requirements) == 0 {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
result := make(map[string]spec.SecurityScheme)
|
|
||||||
for _, v := range requirements {
|
|
||||||
if definition, ok := s.spec.SecurityDefinitions[v.Name]; ok {
|
|
||||||
if definition != nil {
|
|
||||||
result[v.Name] = *definition
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return result
|
|
||||||
}
|
|
||||||
|
|
||||||
// ConsumesFor gets the mediatypes for the operation
|
|
||||||
func (s *Spec) ConsumesFor(operation *spec.Operation) []string {
|
|
||||||
|
|
||||||
if len(operation.Consumes) == 0 {
|
|
||||||
cons := make(map[string]struct{}, len(s.spec.Consumes))
|
|
||||||
for _, k := range s.spec.Consumes {
|
|
||||||
cons[k] = struct{}{}
|
|
||||||
}
|
|
||||||
return s.structMapKeys(cons)
|
|
||||||
}
|
|
||||||
|
|
||||||
cons := make(map[string]struct{}, len(operation.Consumes))
|
|
||||||
for _, c := range operation.Consumes {
|
|
||||||
cons[c] = struct{}{}
|
|
||||||
}
|
|
||||||
return s.structMapKeys(cons)
|
|
||||||
}
|
|
||||||
|
|
||||||
// ProducesFor gets the mediatypes for the operation
|
|
||||||
func (s *Spec) ProducesFor(operation *spec.Operation) []string {
|
|
||||||
if len(operation.Produces) == 0 {
|
|
||||||
prod := make(map[string]struct{}, len(s.spec.Produces))
|
|
||||||
for _, k := range s.spec.Produces {
|
|
||||||
prod[k] = struct{}{}
|
|
||||||
}
|
|
||||||
return s.structMapKeys(prod)
|
|
||||||
}
|
|
||||||
|
|
||||||
prod := make(map[string]struct{}, len(operation.Produces))
|
|
||||||
for _, c := range operation.Produces {
|
|
||||||
prod[c] = struct{}{}
|
|
||||||
}
|
|
||||||
return s.structMapKeys(prod)
|
|
||||||
}
|
|
||||||
|
|
||||||
func mapKeyFromParam(param *spec.Parameter) string {
|
|
||||||
return fmt.Sprintf("%s#%s", param.In, fieldNameFromParam(param))
|
|
||||||
}
|
|
||||||
|
|
||||||
func fieldNameFromParam(param *spec.Parameter) string {
|
|
||||||
if nm, ok := param.Extensions.GetString("go-name"); ok {
|
|
||||||
return nm
|
|
||||||
}
|
|
||||||
return swag.ToGoName(param.Name)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *Spec) paramsAsMap(parameters []spec.Parameter, res map[string]spec.Parameter) {
|
|
||||||
for _, param := range parameters {
|
|
||||||
pr := param
|
|
||||||
if pr.Ref.String() != "" {
|
|
||||||
obj, _, err := pr.Ref.GetPointer().Get(s.spec)
|
|
||||||
if err != nil {
|
|
||||||
panic(err)
|
|
||||||
}
|
|
||||||
pr = obj.(spec.Parameter)
|
|
||||||
}
|
|
||||||
res[mapKeyFromParam(&pr)] = pr
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// ParametersFor the specified operation id
|
|
||||||
func (s *Spec) ParametersFor(operationID string) []spec.Parameter {
|
|
||||||
gatherParams := func(pi *spec.PathItem, op *spec.Operation) []spec.Parameter {
|
|
||||||
bag := make(map[string]spec.Parameter)
|
|
||||||
s.paramsAsMap(pi.Parameters, bag)
|
|
||||||
s.paramsAsMap(op.Parameters, bag)
|
|
||||||
|
|
||||||
var res []spec.Parameter
|
|
||||||
for _, v := range bag {
|
|
||||||
res = append(res, v)
|
|
||||||
}
|
|
||||||
return res
|
|
||||||
}
|
|
||||||
for _, pi := range s.spec.Paths.Paths {
|
|
||||||
if pi.Get != nil && pi.Get.ID == operationID {
|
|
||||||
return gatherParams(&pi, pi.Get)
|
|
||||||
}
|
|
||||||
if pi.Head != nil && pi.Head.ID == operationID {
|
|
||||||
return gatherParams(&pi, pi.Head)
|
|
||||||
}
|
|
||||||
if pi.Options != nil && pi.Options.ID == operationID {
|
|
||||||
return gatherParams(&pi, pi.Options)
|
|
||||||
}
|
|
||||||
if pi.Post != nil && pi.Post.ID == operationID {
|
|
||||||
return gatherParams(&pi, pi.Post)
|
|
||||||
}
|
|
||||||
if pi.Patch != nil && pi.Patch.ID == operationID {
|
|
||||||
return gatherParams(&pi, pi.Patch)
|
|
||||||
}
|
|
||||||
if pi.Put != nil && pi.Put.ID == operationID {
|
|
||||||
return gatherParams(&pi, pi.Put)
|
|
||||||
}
|
|
||||||
if pi.Delete != nil && pi.Delete.ID == operationID {
|
|
||||||
return gatherParams(&pi, pi.Delete)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// ParamsFor the specified method and path. Aggregates them with the defaults etc, so it's all the params that
|
|
||||||
// apply for the method and path.
|
|
||||||
func (s *Spec) ParamsFor(method, path string) map[string]spec.Parameter {
|
|
||||||
res := make(map[string]spec.Parameter)
|
|
||||||
if pi, ok := s.spec.Paths.Paths[path]; ok {
|
|
||||||
s.paramsAsMap(pi.Parameters, res)
|
|
||||||
s.paramsAsMap(s.operations[strings.ToUpper(method)][path].Parameters, res)
|
|
||||||
}
|
|
||||||
return res
|
|
||||||
}
|
|
||||||
|
|
||||||
// OperationForName gets the operation for the given id
|
|
||||||
func (s *Spec) OperationForName(operationID string) (string, string, *spec.Operation, bool) {
|
|
||||||
for method, pathItem := range s.operations {
|
|
||||||
for path, op := range pathItem {
|
|
||||||
if operationID == op.ID {
|
|
||||||
return method, path, op, true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return "", "", nil, false
|
|
||||||
}
|
|
||||||
|
|
||||||
// OperationFor the given method and path
|
|
||||||
func (s *Spec) OperationFor(method, path string) (*spec.Operation, bool) {
|
|
||||||
if mp, ok := s.operations[strings.ToUpper(method)]; ok {
|
|
||||||
op, fn := mp[path]
|
|
||||||
return op, fn
|
|
||||||
}
|
|
||||||
return nil, false
|
|
||||||
}
|
|
||||||
|
|
||||||
// Operations gathers all the operations specified in the spec document
|
|
||||||
func (s *Spec) Operations() map[string]map[string]*spec.Operation {
|
|
||||||
return s.operations
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *Spec) structMapKeys(mp map[string]struct{}) []string {
|
|
||||||
if len(mp) == 0 {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
result := make([]string, 0, len(mp))
|
|
||||||
for k := range mp {
|
|
||||||
result = append(result, k)
|
|
||||||
}
|
|
||||||
return result
|
|
||||||
}
|
|
||||||
|
|
||||||
// AllPaths returns all the paths in the swagger spec
|
|
||||||
func (s *Spec) AllPaths() map[string]spec.PathItem {
|
|
||||||
if s.spec == nil || s.spec.Paths == nil {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
return s.spec.Paths.Paths
|
|
||||||
}
|
|
||||||
|
|
||||||
// OperationIDs gets all the operation ids based on method an dpath
|
|
||||||
func (s *Spec) OperationIDs() []string {
|
|
||||||
if len(s.operations) == 0 {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
result := make([]string, 0, len(s.operations))
|
|
||||||
for method, v := range s.operations {
|
|
||||||
for p, o := range v {
|
|
||||||
if o.ID != "" {
|
|
||||||
result = append(result, o.ID)
|
|
||||||
} else {
|
|
||||||
result = append(result, fmt.Sprintf("%s %s", strings.ToUpper(method), p))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return result
|
|
||||||
}
|
|
||||||
|
|
||||||
// RequiredConsumes gets all the distinct consumes that are specified in the specification document
|
|
||||||
func (s *Spec) RequiredConsumes() []string {
|
|
||||||
return s.structMapKeys(s.consumes)
|
|
||||||
}
|
|
||||||
|
|
||||||
// RequiredProduces gets all the distinct produces that are specified in the specification document
|
|
||||||
func (s *Spec) RequiredProduces() []string {
|
|
||||||
return s.structMapKeys(s.produces)
|
|
||||||
}
|
|
||||||
|
|
||||||
// RequiredSecuritySchemes gets all the distinct security schemes that are specified in the swagger spec
|
|
||||||
func (s *Spec) RequiredSecuritySchemes() []string {
|
|
||||||
return s.structMapKeys(s.authSchemes)
|
|
||||||
}
|
|
||||||
|
|
||||||
// SchemaRef is a reference to a schema
|
|
||||||
type SchemaRef struct {
|
|
||||||
Name string
|
|
||||||
Ref spec.Ref
|
|
||||||
Schema *spec.Schema
|
|
||||||
}
|
|
||||||
|
|
||||||
// SchemasWithAllOf returns schema references to all schemas that are defined
|
|
||||||
// with an allOf key
|
|
||||||
func (s *Spec) SchemasWithAllOf() (result []SchemaRef) {
|
|
||||||
for _, v := range s.allOfs {
|
|
||||||
result = append(result, v)
|
|
||||||
}
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// AllDefinitions returns schema references for all the definitions that were discovered
|
|
||||||
func (s *Spec) AllDefinitions() (result []SchemaRef) {
|
|
||||||
for _, v := range s.allSchemas {
|
|
||||||
result = append(result, v)
|
|
||||||
}
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// AllDefinitionReferences returns json refs for all the discovered schemas
|
|
||||||
func (s *Spec) AllDefinitionReferences() (result []string) {
|
|
||||||
for _, v := range s.references.schemas {
|
|
||||||
result = append(result, v.String())
|
|
||||||
}
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// AllParameterReferences returns json refs for all the discovered parameters
|
|
||||||
func (s *Spec) AllParameterReferences() (result []string) {
|
|
||||||
for _, v := range s.references.parameters {
|
|
||||||
result = append(result, v.String())
|
|
||||||
}
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// AllResponseReferences returns json refs for all the discovered responses
|
|
||||||
func (s *Spec) AllResponseReferences() (result []string) {
|
|
||||||
for _, v := range s.references.responses {
|
|
||||||
result = append(result, v.String())
|
|
||||||
}
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// AllItemsReferences returns the references for all the items
|
|
||||||
func (s *Spec) AllItemsReferences() (result []string) {
|
|
||||||
for _, v := range s.references.items {
|
|
||||||
result = append(result, v.String())
|
|
||||||
}
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// AllReferences returns all the references found in the document
|
|
||||||
func (s *Spec) AllReferences() (result []string) {
|
|
||||||
for _, v := range s.references.allRefs {
|
|
||||||
result = append(result, v.String())
|
|
||||||
}
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// AllRefs returns all the unique references found in the document
|
|
||||||
func (s *Spec) AllRefs() (result []spec.Ref) {
|
|
||||||
set := make(map[string]struct{})
|
|
||||||
for _, v := range s.references.allRefs {
|
|
||||||
a := v.String()
|
|
||||||
if a == "" {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
if _, ok := set[a]; !ok {
|
|
||||||
set[a] = struct{}{}
|
|
||||||
result = append(result, v)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return
|
|
||||||
}
|
|
39
vendor/github.com/go-openapi/loads/.drone.yml
generated
vendored
39
vendor/github.com/go-openapi/loads/.drone.yml
generated
vendored
|
@ -1,39 +0,0 @@
|
||||||
clone:
|
|
||||||
path: github.com/go-openapi/loads
|
|
||||||
|
|
||||||
matrix:
|
|
||||||
GO_VERSION:
|
|
||||||
- "1.6"
|
|
||||||
|
|
||||||
build:
|
|
||||||
integration:
|
|
||||||
image: golang:$$GO_VERSION
|
|
||||||
pull: true
|
|
||||||
environment:
|
|
||||||
GOCOVMODE: "count"
|
|
||||||
commands:
|
|
||||||
- go get -u github.com/axw/gocov/gocov
|
|
||||||
- go get -u gopkg.in/matm/v1/gocov-html
|
|
||||||
- go get -u github.com/cee-dub/go-junit-report
|
|
||||||
- go get -u github.com/stretchr/testify/assert
|
|
||||||
- go get -u gopkg.in/yaml.v2
|
|
||||||
- go get -u github.com/go-openapi/swag
|
|
||||||
- go get -u github.com/go-openapi/analysis
|
|
||||||
- go get -u github.com/go-openapi/spec
|
|
||||||
- ./hack/build-drone.sh
|
|
||||||
|
|
||||||
notify:
|
|
||||||
slack:
|
|
||||||
channel: bots
|
|
||||||
webhook_url: $$SLACK_URL
|
|
||||||
username: drone
|
|
||||||
|
|
||||||
publish:
|
|
||||||
coverage:
|
|
||||||
server: https://coverage.vmware.run
|
|
||||||
token: $$GITHUB_TOKEN
|
|
||||||
# threshold: 70
|
|
||||||
# must_increase: true
|
|
||||||
when:
|
|
||||||
matrix:
|
|
||||||
GO_VERSION: "1.6"
|
|
4
vendor/github.com/go-openapi/loads/.gitignore
generated
vendored
4
vendor/github.com/go-openapi/loads/.gitignore
generated
vendored
|
@ -1,4 +0,0 @@
|
||||||
secrets.yml
|
|
||||||
coverage.out
|
|
||||||
profile.cov
|
|
||||||
profile.out
|
|
13
vendor/github.com/go-openapi/loads/.pullapprove.yml
generated
vendored
13
vendor/github.com/go-openapi/loads/.pullapprove.yml
generated
vendored
|
@ -1,13 +0,0 @@
|
||||||
approve_by_comment: true
|
|
||||||
approve_regex: '^(:shipit:|:\+1:|\+1|LGTM|lgtm|Approved)'
|
|
||||||
reject_regex: ^[Rr]ejected
|
|
||||||
reset_on_push: false
|
|
||||||
reviewers:
|
|
||||||
members:
|
|
||||||
- casualjim
|
|
||||||
- chancez
|
|
||||||
- frapposelli
|
|
||||||
- vburenin
|
|
||||||
- pytlesk4
|
|
||||||
name: pullapprove
|
|
||||||
required: 1
|
|
74
vendor/github.com/go-openapi/loads/CODE_OF_CONDUCT.md
generated
vendored
74
vendor/github.com/go-openapi/loads/CODE_OF_CONDUCT.md
generated
vendored
|
@ -1,74 +0,0 @@
|
||||||
# Contributor Covenant Code of Conduct
|
|
||||||
|
|
||||||
## Our Pledge
|
|
||||||
|
|
||||||
In the interest of fostering an open and welcoming environment, we as
|
|
||||||
contributors and maintainers pledge to making participation in our project and
|
|
||||||
our community a harassment-free experience for everyone, regardless of age, body
|
|
||||||
size, disability, ethnicity, gender identity and expression, level of experience,
|
|
||||||
nationality, personal appearance, race, religion, or sexual identity and
|
|
||||||
orientation.
|
|
||||||
|
|
||||||
## Our Standards
|
|
||||||
|
|
||||||
Examples of behavior that contributes to creating a positive environment
|
|
||||||
include:
|
|
||||||
|
|
||||||
* Using welcoming and inclusive language
|
|
||||||
* Being respectful of differing viewpoints and experiences
|
|
||||||
* Gracefully accepting constructive criticism
|
|
||||||
* Focusing on what is best for the community
|
|
||||||
* Showing empathy towards other community members
|
|
||||||
|
|
||||||
Examples of unacceptable behavior by participants include:
|
|
||||||
|
|
||||||
* The use of sexualized language or imagery and unwelcome sexual attention or
|
|
||||||
advances
|
|
||||||
* Trolling, insulting/derogatory comments, and personal or political attacks
|
|
||||||
* Public or private harassment
|
|
||||||
* Publishing others' private information, such as a physical or electronic
|
|
||||||
address, without explicit permission
|
|
||||||
* Other conduct which could reasonably be considered inappropriate in a
|
|
||||||
professional setting
|
|
||||||
|
|
||||||
## Our Responsibilities
|
|
||||||
|
|
||||||
Project maintainers are responsible for clarifying the standards of acceptable
|
|
||||||
behavior and are expected to take appropriate and fair corrective action in
|
|
||||||
response to any instances of unacceptable behavior.
|
|
||||||
|
|
||||||
Project maintainers have the right and responsibility to remove, edit, or
|
|
||||||
reject comments, commits, code, wiki edits, issues, and other contributions
|
|
||||||
that are not aligned to this Code of Conduct, or to ban temporarily or
|
|
||||||
permanently any contributor for other behaviors that they deem inappropriate,
|
|
||||||
threatening, offensive, or harmful.
|
|
||||||
|
|
||||||
## Scope
|
|
||||||
|
|
||||||
This Code of Conduct applies both within project spaces and in public spaces
|
|
||||||
when an individual is representing the project or its community. Examples of
|
|
||||||
representing a project or community include using an official project e-mail
|
|
||||||
address, posting via an official social media account, or acting as an appointed
|
|
||||||
representative at an online or offline event. Representation of a project may be
|
|
||||||
further defined and clarified by project maintainers.
|
|
||||||
|
|
||||||
## Enforcement
|
|
||||||
|
|
||||||
Instances of abusive, harassing, or otherwise unacceptable behavior may be
|
|
||||||
reported by contacting the project team at ivan+abuse@flanders.co.nz. All
|
|
||||||
complaints will be reviewed and investigated and will result in a response that
|
|
||||||
is deemed necessary and appropriate to the circumstances. The project team is
|
|
||||||
obligated to maintain confidentiality with regard to the reporter of an incident.
|
|
||||||
Further details of specific enforcement policies may be posted separately.
|
|
||||||
|
|
||||||
Project maintainers who do not follow or enforce the Code of Conduct in good
|
|
||||||
faith may face temporary or permanent repercussions as determined by other
|
|
||||||
members of the project's leadership.
|
|
||||||
|
|
||||||
## Attribution
|
|
||||||
|
|
||||||
This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4,
|
|
||||||
available at [http://contributor-covenant.org/version/1/4][version]
|
|
||||||
|
|
||||||
[homepage]: http://contributor-covenant.org
|
|
||||||
[version]: http://contributor-covenant.org/version/1/4/
|
|
5
vendor/github.com/go-openapi/loads/README.md
generated
vendored
5
vendor/github.com/go-openapi/loads/README.md
generated
vendored
|
@ -1,5 +0,0 @@
|
||||||
# Loads OAI specs [](https://ci.vmware.run/go-openapi/loads) [](https://coverage.vmware.run/go-openapi/loads) [](https://slackin.goswagger.io)
|
|
||||||
|
|
||||||
[](https://raw.githubusercontent.com/go-openapi/loads/master/LICENSE) [](http://godoc.org/github.com/go-openapi/loads)
|
|
||||||
|
|
||||||
Loading of OAI specification documents from local or remote locations.
|
|
203
vendor/github.com/go-openapi/loads/spec.go
generated
vendored
203
vendor/github.com/go-openapi/loads/spec.go
generated
vendored
|
@ -1,203 +0,0 @@
|
||||||
// Copyright 2015 go-swagger maintainers
|
|
||||||
//
|
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
// you may not use this file except in compliance with the License.
|
|
||||||
// You may obtain a copy of the License at
|
|
||||||
//
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
// See the License for the specific language governing permissions and
|
|
||||||
// limitations under the License.
|
|
||||||
|
|
||||||
package loads
|
|
||||||
|
|
||||||
import (
|
|
||||||
"encoding/json"
|
|
||||||
"fmt"
|
|
||||||
"net/url"
|
|
||||||
|
|
||||||
"github.com/go-openapi/analysis"
|
|
||||||
"github.com/go-openapi/spec"
|
|
||||||
"github.com/go-openapi/swag"
|
|
||||||
)
|
|
||||||
|
|
||||||
// JSONDoc loads a json document from either a file or a remote url
|
|
||||||
func JSONDoc(path string) (json.RawMessage, error) {
|
|
||||||
data, err := swag.LoadFromFileOrHTTP(path)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return json.RawMessage(data), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// DocLoader represents a doc loader type
|
|
||||||
type DocLoader func(string) (json.RawMessage, error)
|
|
||||||
|
|
||||||
// DocMatcher represents a predicate to check if a loader matches
|
|
||||||
type DocMatcher func(string) bool
|
|
||||||
|
|
||||||
var loaders = &loader{Match: func(_ string) bool { return true }, Fn: JSONDoc}
|
|
||||||
|
|
||||||
// AddLoader for a document
|
|
||||||
func AddLoader(predicate DocMatcher, load DocLoader) {
|
|
||||||
prev := loaders
|
|
||||||
loaders = &loader{
|
|
||||||
Match: predicate,
|
|
||||||
Fn: load,
|
|
||||||
Next: prev,
|
|
||||||
}
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
type loader struct {
|
|
||||||
Fn DocLoader
|
|
||||||
Match DocMatcher
|
|
||||||
Next *loader
|
|
||||||
}
|
|
||||||
|
|
||||||
// JSONSpec loads a spec from a json document
|
|
||||||
func JSONSpec(path string) (*Document, error) {
|
|
||||||
data, err := JSONDoc(path)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
// convert to json
|
|
||||||
return Analyzed(json.RawMessage(data), "")
|
|
||||||
}
|
|
||||||
|
|
||||||
// Document represents a swagger spec document
|
|
||||||
type Document struct {
|
|
||||||
// specAnalyzer
|
|
||||||
Analyzer *analysis.Spec
|
|
||||||
spec *spec.Swagger
|
|
||||||
origSpec *spec.Swagger
|
|
||||||
schema *spec.Schema
|
|
||||||
raw json.RawMessage
|
|
||||||
}
|
|
||||||
|
|
||||||
// Spec loads a new spec document
|
|
||||||
func Spec(path string) (*Document, error) {
|
|
||||||
specURL, err := url.Parse(path)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
for l := loaders.Next; l != nil; l = l.Next {
|
|
||||||
if loaders.Match(specURL.Path) {
|
|
||||||
b, err2 := loaders.Fn(path)
|
|
||||||
if err2 != nil {
|
|
||||||
return nil, err2
|
|
||||||
}
|
|
||||||
return Analyzed(b, "")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
b, err := loaders.Fn(path)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return Analyzed(b, "")
|
|
||||||
}
|
|
||||||
|
|
||||||
var swag20Schema = spec.MustLoadSwagger20Schema()
|
|
||||||
|
|
||||||
// Analyzed creates a new analyzed spec document
|
|
||||||
func Analyzed(data json.RawMessage, version string) (*Document, error) {
|
|
||||||
if version == "" {
|
|
||||||
version = "2.0"
|
|
||||||
}
|
|
||||||
if version != "2.0" {
|
|
||||||
return nil, fmt.Errorf("spec version %q is not supported", version)
|
|
||||||
}
|
|
||||||
|
|
||||||
swspec := new(spec.Swagger)
|
|
||||||
if err := json.Unmarshal(data, swspec); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
origsqspec := new(spec.Swagger)
|
|
||||||
if err := json.Unmarshal(data, origsqspec); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
d := &Document{
|
|
||||||
Analyzer: analysis.New(swspec),
|
|
||||||
schema: swag20Schema,
|
|
||||||
spec: swspec,
|
|
||||||
raw: data,
|
|
||||||
origSpec: origsqspec,
|
|
||||||
}
|
|
||||||
return d, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Expanded expands the ref fields in the spec document and returns a new spec document
|
|
||||||
func (d *Document) Expanded() (*Document, error) {
|
|
||||||
swspec := new(spec.Swagger)
|
|
||||||
if err := json.Unmarshal(d.raw, swspec); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
if err := spec.ExpandSpec(swspec); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
dd := &Document{
|
|
||||||
Analyzer: analysis.New(swspec),
|
|
||||||
spec: swspec,
|
|
||||||
schema: swag20Schema,
|
|
||||||
raw: d.raw,
|
|
||||||
origSpec: d.origSpec,
|
|
||||||
}
|
|
||||||
return dd, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// BasePath the base path for this spec
|
|
||||||
func (d *Document) BasePath() string {
|
|
||||||
return d.spec.BasePath
|
|
||||||
}
|
|
||||||
|
|
||||||
// Version returns the version of this spec
|
|
||||||
func (d *Document) Version() string {
|
|
||||||
return d.spec.Swagger
|
|
||||||
}
|
|
||||||
|
|
||||||
// Schema returns the swagger 2.0 schema
|
|
||||||
func (d *Document) Schema() *spec.Schema {
|
|
||||||
return d.schema
|
|
||||||
}
|
|
||||||
|
|
||||||
// Spec returns the swagger spec object model
|
|
||||||
func (d *Document) Spec() *spec.Swagger {
|
|
||||||
return d.spec
|
|
||||||
}
|
|
||||||
|
|
||||||
// Host returns the host for the API
|
|
||||||
func (d *Document) Host() string {
|
|
||||||
return d.spec.Host
|
|
||||||
}
|
|
||||||
|
|
||||||
// Raw returns the raw swagger spec as json bytes
|
|
||||||
func (d *Document) Raw() json.RawMessage {
|
|
||||||
return d.raw
|
|
||||||
}
|
|
||||||
|
|
||||||
func (d *Document) OrigSpec() *spec.Swagger {
|
|
||||||
return d.origSpec
|
|
||||||
}
|
|
||||||
|
|
||||||
// ResetDefinitions gives a shallow copy with the models reset
|
|
||||||
func (d *Document) ResetDefinitions() *Document {
|
|
||||||
defs := make(map[string]spec.Schema, len(d.origSpec.Definitions))
|
|
||||||
for k, v := range d.origSpec.Definitions {
|
|
||||||
defs[k] = v
|
|
||||||
}
|
|
||||||
|
|
||||||
d.spec.Definitions = defs
|
|
||||||
return d
|
|
||||||
}
|
|
||||||
|
|
||||||
// Pristine creates a new pristine document instance based on the input data
|
|
||||||
func (d *Document) Pristine() *Document {
|
|
||||||
dd, _ := Analyzed(d.Raw(), d.Version())
|
|
||||||
return dd
|
|
||||||
}
|
|
136
vendor/github.com/golang/protobuf/ptypes/any.go
generated
vendored
Normal file
136
vendor/github.com/golang/protobuf/ptypes/any.go
generated
vendored
Normal file
|
@ -0,0 +1,136 @@
|
||||||
|
// Go support for Protocol Buffers - Google's data interchange format
|
||||||
|
//
|
||||||
|
// Copyright 2016 The Go Authors. All rights reserved.
|
||||||
|
// https://github.com/golang/protobuf
|
||||||
|
//
|
||||||
|
// Redistribution and use in source and binary forms, with or without
|
||||||
|
// modification, are permitted provided that the following conditions are
|
||||||
|
// met:
|
||||||
|
//
|
||||||
|
// * Redistributions of source code must retain the above copyright
|
||||||
|
// notice, this list of conditions and the following disclaimer.
|
||||||
|
// * Redistributions in binary form must reproduce the above
|
||||||
|
// copyright notice, this list of conditions and the following disclaimer
|
||||||
|
// in the documentation and/or other materials provided with the
|
||||||
|
// distribution.
|
||||||
|
// * Neither the name of Google Inc. nor the names of its
|
||||||
|
// contributors may be used to endorse or promote products derived from
|
||||||
|
// this software without specific prior written permission.
|
||||||
|
//
|
||||||
|
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||||
|
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||||
|
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||||
|
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||||
|
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||||
|
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||||
|
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||||
|
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||||
|
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||||
|
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||||
|
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||||
|
|
||||||
|
package ptypes
|
||||||
|
|
||||||
|
// This file implements functions to marshal proto.Message to/from
|
||||||
|
// google.protobuf.Any message.
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"reflect"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"github.com/golang/protobuf/proto"
|
||||||
|
"github.com/golang/protobuf/ptypes/any"
|
||||||
|
)
|
||||||
|
|
||||||
|
const googleApis = "type.googleapis.com/"
|
||||||
|
|
||||||
|
// AnyMessageName returns the name of the message contained in a google.protobuf.Any message.
|
||||||
|
//
|
||||||
|
// Note that regular type assertions should be done using the Is
|
||||||
|
// function. AnyMessageName is provided for less common use cases like filtering a
|
||||||
|
// sequence of Any messages based on a set of allowed message type names.
|
||||||
|
func AnyMessageName(any *any.Any) (string, error) {
|
||||||
|
slash := strings.LastIndex(any.TypeUrl, "/")
|
||||||
|
if slash < 0 {
|
||||||
|
return "", fmt.Errorf("message type url %q is invalid", any.TypeUrl)
|
||||||
|
}
|
||||||
|
return any.TypeUrl[slash+1:], nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// MarshalAny takes the protocol buffer and encodes it into google.protobuf.Any.
|
||||||
|
func MarshalAny(pb proto.Message) (*any.Any, error) {
|
||||||
|
value, err := proto.Marshal(pb)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return &any.Any{TypeUrl: googleApis + proto.MessageName(pb), Value: value}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// DynamicAny is a value that can be passed to UnmarshalAny to automatically
|
||||||
|
// allocate a proto.Message for the type specified in a google.protobuf.Any
|
||||||
|
// message. The allocated message is stored in the embedded proto.Message.
|
||||||
|
//
|
||||||
|
// Example:
|
||||||
|
//
|
||||||
|
// var x ptypes.DynamicAny
|
||||||
|
// if err := ptypes.UnmarshalAny(a, &x); err != nil { ... }
|
||||||
|
// fmt.Printf("unmarshaled message: %v", x.Message)
|
||||||
|
type DynamicAny struct {
|
||||||
|
proto.Message
|
||||||
|
}
|
||||||
|
|
||||||
|
// Empty returns a new proto.Message of the type specified in a
|
||||||
|
// google.protobuf.Any message. It returns an error if corresponding message
|
||||||
|
// type isn't linked in.
|
||||||
|
func Empty(any *any.Any) (proto.Message, error) {
|
||||||
|
aname, err := AnyMessageName(any)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
t := proto.MessageType(aname)
|
||||||
|
if t == nil {
|
||||||
|
return nil, fmt.Errorf("any: message type %q isn't linked in", aname)
|
||||||
|
}
|
||||||
|
return reflect.New(t.Elem()).Interface().(proto.Message), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// UnmarshalAny parses the protocol buffer representation in a google.protobuf.Any
|
||||||
|
// message and places the decoded result in pb. It returns an error if type of
|
||||||
|
// contents of Any message does not match type of pb message.
|
||||||
|
//
|
||||||
|
// pb can be a proto.Message, or a *DynamicAny.
|
||||||
|
func UnmarshalAny(any *any.Any, pb proto.Message) error {
|
||||||
|
if d, ok := pb.(*DynamicAny); ok {
|
||||||
|
if d.Message == nil {
|
||||||
|
var err error
|
||||||
|
d.Message, err = Empty(any)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return UnmarshalAny(any, d.Message)
|
||||||
|
}
|
||||||
|
|
||||||
|
aname, err := AnyMessageName(any)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
mname := proto.MessageName(pb)
|
||||||
|
if aname != mname {
|
||||||
|
return fmt.Errorf("mismatched message type: got %q want %q", aname, mname)
|
||||||
|
}
|
||||||
|
return proto.Unmarshal(any.Value, pb)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Is returns true if any value contains a given message type.
|
||||||
|
func Is(any *any.Any, pb proto.Message) bool {
|
||||||
|
aname, err := AnyMessageName(any)
|
||||||
|
if err != nil {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
return aname == proto.MessageName(pb)
|
||||||
|
}
|
155
vendor/github.com/golang/protobuf/ptypes/any/any.pb.go
generated
vendored
Normal file
155
vendor/github.com/golang/protobuf/ptypes/any/any.pb.go
generated
vendored
Normal file
|
@ -0,0 +1,155 @@
|
||||||
|
// Code generated by protoc-gen-go.
|
||||||
|
// source: github.com/golang/protobuf/ptypes/any/any.proto
|
||||||
|
// DO NOT EDIT!
|
||||||
|
|
||||||
|
/*
|
||||||
|
Package any is a generated protocol buffer package.
|
||||||
|
|
||||||
|
It is generated from these files:
|
||||||
|
github.com/golang/protobuf/ptypes/any/any.proto
|
||||||
|
|
||||||
|
It has these top-level messages:
|
||||||
|
Any
|
||||||
|
*/
|
||||||
|
package any
|
||||||
|
|
||||||
|
import proto "github.com/golang/protobuf/proto"
|
||||||
|
import fmt "fmt"
|
||||||
|
import math "math"
|
||||||
|
|
||||||
|
// Reference imports to suppress errors if they are not otherwise used.
|
||||||
|
var _ = proto.Marshal
|
||||||
|
var _ = fmt.Errorf
|
||||||
|
var _ = math.Inf
|
||||||
|
|
||||||
|
// This is a compile-time assertion to ensure that this generated file
|
||||||
|
// is compatible with the proto package it is being compiled against.
|
||||||
|
// A compilation error at this line likely means your copy of the
|
||||||
|
// proto package needs to be updated.
|
||||||
|
const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
|
||||||
|
|
||||||
|
// `Any` contains an arbitrary serialized protocol buffer message along with a
|
||||||
|
// URL that describes the type of the serialized message.
|
||||||
|
//
|
||||||
|
// Protobuf library provides support to pack/unpack Any values in the form
|
||||||
|
// of utility functions or additional generated methods of the Any type.
|
||||||
|
//
|
||||||
|
// Example 1: Pack and unpack a message in C++.
|
||||||
|
//
|
||||||
|
// Foo foo = ...;
|
||||||
|
// Any any;
|
||||||
|
// any.PackFrom(foo);
|
||||||
|
// ...
|
||||||
|
// if (any.UnpackTo(&foo)) {
|
||||||
|
// ...
|
||||||
|
// }
|
||||||
|
//
|
||||||
|
// Example 2: Pack and unpack a message in Java.
|
||||||
|
//
|
||||||
|
// Foo foo = ...;
|
||||||
|
// Any any = Any.pack(foo);
|
||||||
|
// ...
|
||||||
|
// if (any.is(Foo.class)) {
|
||||||
|
// foo = any.unpack(Foo.class);
|
||||||
|
// }
|
||||||
|
//
|
||||||
|
// Example 3: Pack and unpack a message in Python.
|
||||||
|
//
|
||||||
|
// foo = Foo(...)
|
||||||
|
// any = Any()
|
||||||
|
// any.Pack(foo)
|
||||||
|
// ...
|
||||||
|
// if any.Is(Foo.DESCRIPTOR):
|
||||||
|
// any.Unpack(foo)
|
||||||
|
// ...
|
||||||
|
//
|
||||||
|
// The pack methods provided by protobuf library will by default use
|
||||||
|
// 'type.googleapis.com/full.type.name' as the type URL and the unpack
|
||||||
|
// methods only use the fully qualified type name after the last '/'
|
||||||
|
// in the type URL, for example "foo.bar.com/x/y.z" will yield type
|
||||||
|
// name "y.z".
|
||||||
|
//
|
||||||
|
//
|
||||||
|
// JSON
|
||||||
|
// ====
|
||||||
|
// The JSON representation of an `Any` value uses the regular
|
||||||
|
// representation of the deserialized, embedded message, with an
|
||||||
|
// additional field `@type` which contains the type URL. Example:
|
||||||
|
//
|
||||||
|
// package google.profile;
|
||||||
|
// message Person {
|
||||||
|
// string first_name = 1;
|
||||||
|
// string last_name = 2;
|
||||||
|
// }
|
||||||
|
//
|
||||||
|
// {
|
||||||
|
// "@type": "type.googleapis.com/google.profile.Person",
|
||||||
|
// "firstName": <string>,
|
||||||
|
// "lastName": <string>
|
||||||
|
// }
|
||||||
|
//
|
||||||
|
// If the embedded message type is well-known and has a custom JSON
|
||||||
|
// representation, that representation will be embedded adding a field
|
||||||
|
// `value` which holds the custom JSON in addition to the `@type`
|
||||||
|
// field. Example (for message [google.protobuf.Duration][]):
|
||||||
|
//
|
||||||
|
// {
|
||||||
|
// "@type": "type.googleapis.com/google.protobuf.Duration",
|
||||||
|
// "value": "1.212s"
|
||||||
|
// }
|
||||||
|
//
|
||||||
|
type Any struct {
|
||||||
|
// A URL/resource name whose content describes the type of the
|
||||||
|
// serialized protocol buffer message.
|
||||||
|
//
|
||||||
|
// For URLs which use the scheme `http`, `https`, or no scheme, the
|
||||||
|
// following restrictions and interpretations apply:
|
||||||
|
//
|
||||||
|
// * If no scheme is provided, `https` is assumed.
|
||||||
|
// * The last segment of the URL's path must represent the fully
|
||||||
|
// qualified name of the type (as in `path/google.protobuf.Duration`).
|
||||||
|
// The name should be in a canonical form (e.g., leading "." is
|
||||||
|
// not accepted).
|
||||||
|
// * An HTTP GET on the URL must yield a [google.protobuf.Type][]
|
||||||
|
// value in binary format, or produce an error.
|
||||||
|
// * Applications are allowed to cache lookup results based on the
|
||||||
|
// URL, or have them precompiled into a binary to avoid any
|
||||||
|
// lookup. Therefore, binary compatibility needs to be preserved
|
||||||
|
// on changes to types. (Use versioned type names to manage
|
||||||
|
// breaking changes.)
|
||||||
|
//
|
||||||
|
// Schemes other than `http`, `https` (or the empty scheme) might be
|
||||||
|
// used with implementation specific semantics.
|
||||||
|
//
|
||||||
|
TypeUrl string `protobuf:"bytes,1,opt,name=type_url,json=typeUrl" json:"type_url,omitempty"`
|
||||||
|
// Must be a valid serialized protocol buffer of the above specified type.
|
||||||
|
Value []byte `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *Any) Reset() { *m = Any{} }
|
||||||
|
func (m *Any) String() string { return proto.CompactTextString(m) }
|
||||||
|
func (*Any) ProtoMessage() {}
|
||||||
|
func (*Any) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} }
|
||||||
|
func (*Any) XXX_WellKnownType() string { return "Any" }
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
proto.RegisterType((*Any)(nil), "google.protobuf.Any")
|
||||||
|
}
|
||||||
|
|
||||||
|
func init() { proto.RegisterFile("github.com/golang/protobuf/ptypes/any/any.proto", fileDescriptor0) }
|
||||||
|
|
||||||
|
var fileDescriptor0 = []byte{
|
||||||
|
// 187 bytes of a gzipped FileDescriptorProto
|
||||||
|
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xe2, 0xd2, 0x4f, 0xcf, 0x2c, 0xc9,
|
||||||
|
0x28, 0x4d, 0xd2, 0x4b, 0xce, 0xcf, 0xd5, 0x4f, 0xcf, 0xcf, 0x49, 0xcc, 0x4b, 0xd7, 0x2f, 0x28,
|
||||||
|
0xca, 0x2f, 0xc9, 0x4f, 0x2a, 0x4d, 0xd3, 0x2f, 0x28, 0xa9, 0x2c, 0x48, 0x2d, 0xd6, 0x4f, 0xcc,
|
||||||
|
0xab, 0x04, 0x61, 0x3d, 0xb0, 0xb8, 0x10, 0x7f, 0x7a, 0x7e, 0x7e, 0x7a, 0x4e, 0xaa, 0x1e, 0x4c,
|
||||||
|
0x95, 0x92, 0x19, 0x17, 0xb3, 0x63, 0x5e, 0xa5, 0x90, 0x24, 0x17, 0x07, 0x48, 0x79, 0x7c, 0x69,
|
||||||
|
0x51, 0x8e, 0x04, 0xa3, 0x02, 0xa3, 0x06, 0x67, 0x10, 0x3b, 0x88, 0x1f, 0x5a, 0x94, 0x23, 0x24,
|
||||||
|
0xc2, 0xc5, 0x5a, 0x96, 0x98, 0x53, 0x9a, 0x2a, 0xc1, 0xa4, 0xc0, 0xa8, 0xc1, 0x13, 0x04, 0xe1,
|
||||||
|
0x38, 0x15, 0x71, 0x09, 0x27, 0xe7, 0xe7, 0xea, 0xa1, 0x19, 0xe7, 0xc4, 0xe1, 0x98, 0x57, 0x19,
|
||||||
|
0x00, 0xe2, 0x04, 0x30, 0x46, 0xa9, 0x12, 0xe5, 0xb8, 0x05, 0x8c, 0x8c, 0x8b, 0x98, 0x98, 0xdd,
|
||||||
|
0x03, 0x9c, 0x56, 0x31, 0xc9, 0xb9, 0x43, 0x4c, 0x0b, 0x80, 0xaa, 0xd2, 0x0b, 0x4f, 0xcd, 0xc9,
|
||||||
|
0xf1, 0xce, 0xcb, 0x2f, 0xcf, 0x0b, 0x01, 0xa9, 0x4e, 0x62, 0x03, 0x6b, 0x37, 0x06, 0x04, 0x00,
|
||||||
|
0x00, 0xff, 0xff, 0xc6, 0x4d, 0x03, 0x23, 0xf6, 0x00, 0x00, 0x00,
|
||||||
|
}
|
140
vendor/github.com/golang/protobuf/ptypes/any/any.proto
generated
vendored
Normal file
140
vendor/github.com/golang/protobuf/ptypes/any/any.proto
generated
vendored
Normal file
|
@ -0,0 +1,140 @@
|
||||||
|
// Protocol Buffers - Google's data interchange format
|
||||||
|
// Copyright 2008 Google Inc. All rights reserved.
|
||||||
|
// https://developers.google.com/protocol-buffers/
|
||||||
|
//
|
||||||
|
// Redistribution and use in source and binary forms, with or without
|
||||||
|
// modification, are permitted provided that the following conditions are
|
||||||
|
// met:
|
||||||
|
//
|
||||||
|
// * Redistributions of source code must retain the above copyright
|
||||||
|
// notice, this list of conditions and the following disclaimer.
|
||||||
|
// * Redistributions in binary form must reproduce the above
|
||||||
|
// copyright notice, this list of conditions and the following disclaimer
|
||||||
|
// in the documentation and/or other materials provided with the
|
||||||
|
// distribution.
|
||||||
|
// * Neither the name of Google Inc. nor the names of its
|
||||||
|
// contributors may be used to endorse or promote products derived from
|
||||||
|
// this software without specific prior written permission.
|
||||||
|
//
|
||||||
|
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||||
|
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||||
|
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||||
|
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||||
|
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||||
|
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||||
|
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||||
|
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||||
|
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||||
|
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||||
|
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||||
|
|
||||||
|
syntax = "proto3";
|
||||||
|
|
||||||
|
package google.protobuf;
|
||||||
|
|
||||||
|
option csharp_namespace = "Google.Protobuf.WellKnownTypes";
|
||||||
|
option go_package = "github.com/golang/protobuf/ptypes/any";
|
||||||
|
option java_package = "com.google.protobuf";
|
||||||
|
option java_outer_classname = "AnyProto";
|
||||||
|
option java_multiple_files = true;
|
||||||
|
option java_generate_equals_and_hash = true;
|
||||||
|
option objc_class_prefix = "GPB";
|
||||||
|
|
||||||
|
// `Any` contains an arbitrary serialized protocol buffer message along with a
|
||||||
|
// URL that describes the type of the serialized message.
|
||||||
|
//
|
||||||
|
// Protobuf library provides support to pack/unpack Any values in the form
|
||||||
|
// of utility functions or additional generated methods of the Any type.
|
||||||
|
//
|
||||||
|
// Example 1: Pack and unpack a message in C++.
|
||||||
|
//
|
||||||
|
// Foo foo = ...;
|
||||||
|
// Any any;
|
||||||
|
// any.PackFrom(foo);
|
||||||
|
// ...
|
||||||
|
// if (any.UnpackTo(&foo)) {
|
||||||
|
// ...
|
||||||
|
// }
|
||||||
|
//
|
||||||
|
// Example 2: Pack and unpack a message in Java.
|
||||||
|
//
|
||||||
|
// Foo foo = ...;
|
||||||
|
// Any any = Any.pack(foo);
|
||||||
|
// ...
|
||||||
|
// if (any.is(Foo.class)) {
|
||||||
|
// foo = any.unpack(Foo.class);
|
||||||
|
// }
|
||||||
|
//
|
||||||
|
// Example 3: Pack and unpack a message in Python.
|
||||||
|
//
|
||||||
|
// foo = Foo(...)
|
||||||
|
// any = Any()
|
||||||
|
// any.Pack(foo)
|
||||||
|
// ...
|
||||||
|
// if any.Is(Foo.DESCRIPTOR):
|
||||||
|
// any.Unpack(foo)
|
||||||
|
// ...
|
||||||
|
//
|
||||||
|
// The pack methods provided by protobuf library will by default use
|
||||||
|
// 'type.googleapis.com/full.type.name' as the type URL and the unpack
|
||||||
|
// methods only use the fully qualified type name after the last '/'
|
||||||
|
// in the type URL, for example "foo.bar.com/x/y.z" will yield type
|
||||||
|
// name "y.z".
|
||||||
|
//
|
||||||
|
//
|
||||||
|
// JSON
|
||||||
|
// ====
|
||||||
|
// The JSON representation of an `Any` value uses the regular
|
||||||
|
// representation of the deserialized, embedded message, with an
|
||||||
|
// additional field `@type` which contains the type URL. Example:
|
||||||
|
//
|
||||||
|
// package google.profile;
|
||||||
|
// message Person {
|
||||||
|
// string first_name = 1;
|
||||||
|
// string last_name = 2;
|
||||||
|
// }
|
||||||
|
//
|
||||||
|
// {
|
||||||
|
// "@type": "type.googleapis.com/google.profile.Person",
|
||||||
|
// "firstName": <string>,
|
||||||
|
// "lastName": <string>
|
||||||
|
// }
|
||||||
|
//
|
||||||
|
// If the embedded message type is well-known and has a custom JSON
|
||||||
|
// representation, that representation will be embedded adding a field
|
||||||
|
// `value` which holds the custom JSON in addition to the `@type`
|
||||||
|
// field. Example (for message [google.protobuf.Duration][]):
|
||||||
|
//
|
||||||
|
// {
|
||||||
|
// "@type": "type.googleapis.com/google.protobuf.Duration",
|
||||||
|
// "value": "1.212s"
|
||||||
|
// }
|
||||||
|
//
|
||||||
|
message Any {
|
||||||
|
// A URL/resource name whose content describes the type of the
|
||||||
|
// serialized protocol buffer message.
|
||||||
|
//
|
||||||
|
// For URLs which use the scheme `http`, `https`, or no scheme, the
|
||||||
|
// following restrictions and interpretations apply:
|
||||||
|
//
|
||||||
|
// * If no scheme is provided, `https` is assumed.
|
||||||
|
// * The last segment of the URL's path must represent the fully
|
||||||
|
// qualified name of the type (as in `path/google.protobuf.Duration`).
|
||||||
|
// The name should be in a canonical form (e.g., leading "." is
|
||||||
|
// not accepted).
|
||||||
|
// * An HTTP GET on the URL must yield a [google.protobuf.Type][]
|
||||||
|
// value in binary format, or produce an error.
|
||||||
|
// * Applications are allowed to cache lookup results based on the
|
||||||
|
// URL, or have them precompiled into a binary to avoid any
|
||||||
|
// lookup. Therefore, binary compatibility needs to be preserved
|
||||||
|
// on changes to types. (Use versioned type names to manage
|
||||||
|
// breaking changes.)
|
||||||
|
//
|
||||||
|
// Schemes other than `http`, `https` (or the empty scheme) might be
|
||||||
|
// used with implementation specific semantics.
|
||||||
|
//
|
||||||
|
string type_url = 1;
|
||||||
|
|
||||||
|
// Must be a valid serialized protocol buffer of the above specified type.
|
||||||
|
bytes value = 2;
|
||||||
|
}
|
35
vendor/github.com/golang/protobuf/ptypes/doc.go
generated
vendored
Normal file
35
vendor/github.com/golang/protobuf/ptypes/doc.go
generated
vendored
Normal file
|
@ -0,0 +1,35 @@
|
||||||
|
// Go support for Protocol Buffers - Google's data interchange format
|
||||||
|
//
|
||||||
|
// Copyright 2016 The Go Authors. All rights reserved.
|
||||||
|
// https://github.com/golang/protobuf
|
||||||
|
//
|
||||||
|
// Redistribution and use in source and binary forms, with or without
|
||||||
|
// modification, are permitted provided that the following conditions are
|
||||||
|
// met:
|
||||||
|
//
|
||||||
|
// * Redistributions of source code must retain the above copyright
|
||||||
|
// notice, this list of conditions and the following disclaimer.
|
||||||
|
// * Redistributions in binary form must reproduce the above
|
||||||
|
// copyright notice, this list of conditions and the following disclaimer
|
||||||
|
// in the documentation and/or other materials provided with the
|
||||||
|
// distribution.
|
||||||
|
// * Neither the name of Google Inc. nor the names of its
|
||||||
|
// contributors may be used to endorse or promote products derived from
|
||||||
|
// this software without specific prior written permission.
|
||||||
|
//
|
||||||
|
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||||
|
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||||
|
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||||
|
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||||
|
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||||
|
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||||
|
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||||
|
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||||
|
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||||
|
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||||
|
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||||
|
|
||||||
|
/*
|
||||||
|
Package ptypes contains code for interacting with well-known types.
|
||||||
|
*/
|
||||||
|
package ptypes
|
102
vendor/github.com/golang/protobuf/ptypes/duration.go
generated
vendored
Normal file
102
vendor/github.com/golang/protobuf/ptypes/duration.go
generated
vendored
Normal file
|
@ -0,0 +1,102 @@
|
||||||
|
// Go support for Protocol Buffers - Google's data interchange format
|
||||||
|
//
|
||||||
|
// Copyright 2016 The Go Authors. All rights reserved.
|
||||||
|
// https://github.com/golang/protobuf
|
||||||
|
//
|
||||||
|
// Redistribution and use in source and binary forms, with or without
|
||||||
|
// modification, are permitted provided that the following conditions are
|
||||||
|
// met:
|
||||||
|
//
|
||||||
|
// * Redistributions of source code must retain the above copyright
|
||||||
|
// notice, this list of conditions and the following disclaimer.
|
||||||
|
// * Redistributions in binary form must reproduce the above
|
||||||
|
// copyright notice, this list of conditions and the following disclaimer
|
||||||
|
// in the documentation and/or other materials provided with the
|
||||||
|
// distribution.
|
||||||
|
// * Neither the name of Google Inc. nor the names of its
|
||||||
|
// contributors may be used to endorse or promote products derived from
|
||||||
|
// this software without specific prior written permission.
|
||||||
|
//
|
||||||
|
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||||
|
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||||
|
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||||
|
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||||
|
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||||
|
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||||
|
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||||
|
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||||
|
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||||
|
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||||
|
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||||
|
|
||||||
|
package ptypes
|
||||||
|
|
||||||
|
// This file implements conversions between google.protobuf.Duration
|
||||||
|
// and time.Duration.
|
||||||
|
|
||||||
|
import (
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
durpb "github.com/golang/protobuf/ptypes/duration"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
// Range of a durpb.Duration in seconds, as specified in
|
||||||
|
// google/protobuf/duration.proto. This is about 10,000 years in seconds.
|
||||||
|
maxSeconds = int64(10000 * 365.25 * 24 * 60 * 60)
|
||||||
|
minSeconds = -maxSeconds
|
||||||
|
)
|
||||||
|
|
||||||
|
// validateDuration determines whether the durpb.Duration is valid according to the
|
||||||
|
// definition in google/protobuf/duration.proto. A valid durpb.Duration
|
||||||
|
// may still be too large to fit into a time.Duration (the range of durpb.Duration
|
||||||
|
// is about 10,000 years, and the range of time.Duration is about 290).
|
||||||
|
func validateDuration(d *durpb.Duration) error {
|
||||||
|
if d == nil {
|
||||||
|
return errors.New("duration: nil Duration")
|
||||||
|
}
|
||||||
|
if d.Seconds < minSeconds || d.Seconds > maxSeconds {
|
||||||
|
return fmt.Errorf("duration: %v: seconds out of range", d)
|
||||||
|
}
|
||||||
|
if d.Nanos <= -1e9 || d.Nanos >= 1e9 {
|
||||||
|
return fmt.Errorf("duration: %v: nanos out of range", d)
|
||||||
|
}
|
||||||
|
// Seconds and Nanos must have the same sign, unless d.Nanos is zero.
|
||||||
|
if (d.Seconds < 0 && d.Nanos > 0) || (d.Seconds > 0 && d.Nanos < 0) {
|
||||||
|
return fmt.Errorf("duration: %v: seconds and nanos have different signs", d)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Duration converts a durpb.Duration to a time.Duration. Duration
|
||||||
|
// returns an error if the durpb.Duration is invalid or is too large to be
|
||||||
|
// represented in a time.Duration.
|
||||||
|
func Duration(p *durpb.Duration) (time.Duration, error) {
|
||||||
|
if err := validateDuration(p); err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
d := time.Duration(p.Seconds) * time.Second
|
||||||
|
if int64(d/time.Second) != p.Seconds {
|
||||||
|
return 0, fmt.Errorf("duration: %v is out of range for time.Duration", p)
|
||||||
|
}
|
||||||
|
if p.Nanos != 0 {
|
||||||
|
d += time.Duration(p.Nanos)
|
||||||
|
if (d < 0) != (p.Nanos < 0) {
|
||||||
|
return 0, fmt.Errorf("duration: %v is out of range for time.Duration", p)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return d, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// DurationProto converts a time.Duration to a durpb.Duration.
|
||||||
|
func DurationProto(d time.Duration) *durpb.Duration {
|
||||||
|
nanos := d.Nanoseconds()
|
||||||
|
secs := nanos / 1e9
|
||||||
|
nanos -= secs * 1e9
|
||||||
|
return &durpb.Duration{
|
||||||
|
Seconds: secs,
|
||||||
|
Nanos: int32(nanos),
|
||||||
|
}
|
||||||
|
}
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Reference in a new issue