adapt gce controller to godep updates
This commit is contained in:
parent
4fb61c73d1
commit
ee3054dd52
25 changed files with 256 additions and 188 deletions
3
.gitignore
vendored
3
.gitignore
vendored
|
@ -2,6 +2,9 @@
|
|||
._*
|
||||
.DS_Store
|
||||
|
||||
# intellij files
|
||||
.idea/*
|
||||
|
||||
# Eclipse files
|
||||
.classpath
|
||||
.project
|
||||
|
|
|
@ -26,7 +26,8 @@ import (
|
|||
"github.com/golang/glog"
|
||||
|
||||
compute "google.golang.org/api/compute/v1"
|
||||
api_v1 "k8s.io/api/core/v1"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/apimachinery/pkg/util/intstr"
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
|
@ -161,7 +162,7 @@ func (b *Backends) Init(pp probeProvider) {
|
|||
|
||||
// Get returns a single backend.
|
||||
func (b *Backends) Get(port int64) (*compute.BackendService, error) {
|
||||
be, err := b.cloud.GetBackendService(b.namer.BeName(port))
|
||||
be, err := b.cloud.GetGlobalBackendService(b.namer.BeName(port))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -203,7 +204,7 @@ func (b *Backends) create(namedPort *compute.NamedPort, hcLink string, sp Servic
|
|||
Port: namedPort.Port,
|
||||
PortName: namedPort.Name,
|
||||
}
|
||||
if err := b.cloud.CreateBackendService(bs); err != nil {
|
||||
if err := b.cloud.CreateGlobalBackendService(bs); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return b.Get(namedPort.Port)
|
||||
|
@ -248,7 +249,7 @@ func (b *Backends) Add(p ServicePort) error {
|
|||
be.Protocol = string(p.Protocol)
|
||||
be.HealthChecks = []string{hcLink}
|
||||
be.Description = p.Description()
|
||||
if err = b.cloud.UpdateBackendService(be); err != nil {
|
||||
if err = b.cloud.UpdateGlobalBackendService(be); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
@ -282,7 +283,7 @@ func (b *Backends) Delete(port int64) (err error) {
|
|||
}
|
||||
}()
|
||||
// Try deleting health checks even if a backend is not found.
|
||||
if err = b.cloud.DeleteBackendService(name); err != nil && !utils.IsHTTPErrorCode(err, http.StatusNotFound) {
|
||||
if err = b.cloud.DeleteGlobalBackendService(name); err != nil && !utils.IsHTTPErrorCode(err, http.StatusNotFound) {
|
||||
return err
|
||||
}
|
||||
|
||||
|
@ -294,7 +295,7 @@ func (b *Backends) List() ([]interface{}, error) {
|
|||
// TODO: for consistency with the rest of this sub-package this method
|
||||
// should return a list of backend ports.
|
||||
interList := []interface{}{}
|
||||
be, err := b.cloud.ListBackendServices()
|
||||
be, err := b.cloud.ListGlobalBackendServices()
|
||||
if err != nil {
|
||||
return interList, err
|
||||
}
|
||||
|
@ -361,7 +362,7 @@ func (b *Backends) edgeHop(be *compute.BackendService, igs []*compute.InstanceGr
|
|||
newBackends := getBackendsForIGs(addIGs, bm)
|
||||
be.Backends = append(originalBackends, newBackends...)
|
||||
|
||||
if err := b.cloud.UpdateBackendService(be); err != nil {
|
||||
if err := b.cloud.UpdateGlobalBackendService(be); err != nil {
|
||||
if utils.IsHTTPErrorCode(err, http.StatusBadRequest) {
|
||||
glog.V(2).Infof("Updating backend service backends with balancing mode %v failed, will try another mode. err:%v", bm, err)
|
||||
errs = append(errs, err.Error())
|
||||
|
@ -427,14 +428,14 @@ func (b *Backends) Shutdown() error {
|
|||
|
||||
// Status returns the status of the given backend by name.
|
||||
func (b *Backends) Status(name string) string {
|
||||
backend, err := b.cloud.GetBackendService(name)
|
||||
backend, err := b.cloud.GetGlobalBackendService(name)
|
||||
if err != nil || len(backend.Backends) == 0 {
|
||||
return "Unknown"
|
||||
}
|
||||
|
||||
// TODO: Look at more than one backend's status
|
||||
// TODO: Include port, ip in the status, since it's in the health info.
|
||||
hs, err := b.cloud.GetHealth(name, backend.Backends[0].Group)
|
||||
hs, err := b.cloud.GetGlobalBackendServiceHealth(name, backend.Backends[0].Group)
|
||||
if err != nil || len(hs.HealthStatus) == 0 || hs.HealthStatus[0] == nil {
|
||||
return "Unknown"
|
||||
}
|
||||
|
@ -453,7 +454,7 @@ func applyLegacyHCToHC(existing *compute.HttpHealthCheck, hc *healthchecks.Healt
|
|||
hc.UnhealthyThreshold = existing.UnhealthyThreshold
|
||||
}
|
||||
|
||||
func applyProbeSettingsToHC(p *api_v1.Probe, hc *healthchecks.HealthCheck) {
|
||||
func applyProbeSettingsToHC(p *v1.Probe, hc *healthchecks.HealthCheck) {
|
||||
healthPath := p.Handler.HTTPGet.Path
|
||||
// GCE requires a leading "/" for health check urls.
|
||||
if !strings.HasPrefix(healthPath, "/") {
|
||||
|
|
|
@ -87,7 +87,7 @@ func TestBackendPoolAdd(t *testing.T) {
|
|||
beName := namer.BeName(nodePort.Port)
|
||||
|
||||
// Check that the new backend has the right port
|
||||
be, err := f.GetBackendService(beName)
|
||||
be, err := f.GetGlobalBackendService(beName)
|
||||
if err != nil {
|
||||
t.Fatalf("Did not find expected backend %v", beName)
|
||||
}
|
||||
|
@ -171,7 +171,7 @@ func TestBackendPoolUpdate(t *testing.T) {
|
|||
pool.Add(p)
|
||||
beName := namer.BeName(p.Port)
|
||||
|
||||
be, err := f.GetBackendService(beName)
|
||||
be, err := f.GetGlobalBackendService(beName)
|
||||
if err != nil {
|
||||
t.Fatalf("Unexpected err: %v", err)
|
||||
}
|
||||
|
@ -190,7 +190,7 @@ func TestBackendPoolUpdate(t *testing.T) {
|
|||
p.Protocol = utils.ProtocolHTTPS
|
||||
pool.Sync([]ServicePort{p})
|
||||
|
||||
be, err = f.GetBackendService(beName)
|
||||
be, err = f.GetGlobalBackendService(beName)
|
||||
if err != nil {
|
||||
t.Fatalf("Unexpected err retrieving backend service after update: %v", err)
|
||||
}
|
||||
|
@ -217,7 +217,7 @@ func TestBackendPoolChaosMonkey(t *testing.T) {
|
|||
pool.Add(nodePort)
|
||||
beName := namer.BeName(nodePort.Port)
|
||||
|
||||
be, _ := f.GetBackendService(beName)
|
||||
be, _ := f.GetGlobalBackendService(beName)
|
||||
|
||||
// Mess up the link between backend service and instance group.
|
||||
// This simulates a user doing foolish things through the UI.
|
||||
|
@ -225,7 +225,7 @@ func TestBackendPoolChaosMonkey(t *testing.T) {
|
|||
{Group: "test edge hop"},
|
||||
}
|
||||
f.calls = []int{}
|
||||
f.UpdateBackendService(be)
|
||||
f.UpdateGlobalBackendService(be)
|
||||
|
||||
pool.Add(nodePort)
|
||||
for _, call := range f.calls {
|
||||
|
@ -233,7 +233,7 @@ func TestBackendPoolChaosMonkey(t *testing.T) {
|
|||
t.Fatalf("Unexpected create for existing backend service")
|
||||
}
|
||||
}
|
||||
gotBackend, err := f.GetBackendService(beName)
|
||||
gotBackend, err := f.GetGlobalBackendService(beName)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to find a backend with name %v: %v", beName, err)
|
||||
}
|
||||
|
@ -296,12 +296,12 @@ func TestBackendPoolSync(t *testing.T) {
|
|||
// k8s-be-3001--uid - another cluster tagged with uid
|
||||
unrelatedBackends := sets.NewString([]string{"foo", "k8s-be-foo", "k8s--bar--foo", "k8s-be-30001--uid"}...)
|
||||
for _, name := range unrelatedBackends.List() {
|
||||
f.CreateBackendService(&compute.BackendService{Name: name})
|
||||
f.CreateGlobalBackendService(&compute.BackendService{Name: name})
|
||||
}
|
||||
|
||||
namer := &utils.Namer{}
|
||||
// This backend should get deleted again since it is managed by this cluster.
|
||||
f.CreateBackendService(&compute.BackendService{Name: namer.BeName(deletedPorts[0].Port)})
|
||||
f.CreateGlobalBackendService(&compute.BackendService{Name: namer.BeName(deletedPorts[0].Port)})
|
||||
|
||||
// TODO: Avoid casting.
|
||||
// Repopulate the pool with a cloud list, which now includes the 82 port
|
||||
|
@ -311,7 +311,7 @@ func TestBackendPoolSync(t *testing.T) {
|
|||
|
||||
pool.GC(svcNodePorts)
|
||||
|
||||
currBackends, _ := f.ListBackendServices()
|
||||
currBackends, _ := f.ListGlobalBackendServices()
|
||||
currSet := sets.NewString()
|
||||
for _, b := range currBackends.Items {
|
||||
currSet.Insert(b.Name)
|
||||
|
@ -355,7 +355,7 @@ func TestBackendPoolDeleteLegacyHealthChecks(t *testing.T) {
|
|||
}
|
||||
|
||||
// Create backend service with expected name and link to legacy health check
|
||||
f.CreateBackendService(&compute.BackendService{
|
||||
f.CreateGlobalBackendService(&compute.BackendService{
|
||||
Name: beName,
|
||||
HealthChecks: []string{hc.SelfLink},
|
||||
})
|
||||
|
@ -390,7 +390,7 @@ func TestBackendPoolShutdown(t *testing.T) {
|
|||
// Add a backend-service and verify that it doesn't exist after Shutdown()
|
||||
pool.Add(ServicePort{Port: 80})
|
||||
pool.Shutdown()
|
||||
if _, err := f.GetBackendService(namer.BeName(80)); err == nil {
|
||||
if _, err := f.GetGlobalBackendService(namer.BeName(80)); err == nil {
|
||||
t.Fatalf("%v", err)
|
||||
}
|
||||
}
|
||||
|
@ -404,7 +404,7 @@ func TestBackendInstanceGroupClobbering(t *testing.T) {
|
|||
// This will add the instance group k8s-ig to the instance pool
|
||||
pool.Add(ServicePort{Port: 80})
|
||||
|
||||
be, err := f.GetBackendService(namer.BeName(80))
|
||||
be, err := f.GetGlobalBackendService(namer.BeName(80))
|
||||
if err != nil {
|
||||
t.Fatalf("%v", err)
|
||||
}
|
||||
|
@ -415,13 +415,13 @@ func TestBackendInstanceGroupClobbering(t *testing.T) {
|
|||
{Group: "k8s-ig-foo"},
|
||||
}
|
||||
be.Backends = append(be.Backends, newGroups...)
|
||||
if err = f.UpdateBackendService(be); err != nil {
|
||||
if err = f.UpdateGlobalBackendService(be); err != nil {
|
||||
t.Fatalf("Failed to update backend service %v", be.Name)
|
||||
}
|
||||
|
||||
// Make sure repeated adds don't clobber the inserted instance group
|
||||
pool.Add(ServicePort{Port: 80})
|
||||
be, err = f.GetBackendService(namer.BeName(80))
|
||||
be, err = f.GetGlobalBackendService(namer.BeName(80))
|
||||
if err != nil {
|
||||
t.Fatalf("%v", err)
|
||||
}
|
||||
|
@ -463,7 +463,7 @@ func TestBackendCreateBalancingMode(t *testing.T) {
|
|||
}
|
||||
|
||||
pool.Add(nodePort)
|
||||
be, err := f.GetBackendService(namer.BeName(nodePort.Port))
|
||||
be, err := f.GetGlobalBackendService(namer.BeName(nodePort.Port))
|
||||
if err != nil {
|
||||
t.Fatalf("%v", err)
|
||||
}
|
||||
|
|
|
@ -44,8 +44,8 @@ type FakeBackendServices struct {
|
|||
errFunc func(op int, be *compute.BackendService) error
|
||||
}
|
||||
|
||||
// GetBackendService fakes getting a backend service from the cloud.
|
||||
func (f *FakeBackendServices) GetBackendService(name string) (*compute.BackendService, error) {
|
||||
// GetGlobalBackendService fakes getting a backend service from the cloud.
|
||||
func (f *FakeBackendServices) GetGlobalBackendService(name string) (*compute.BackendService, error) {
|
||||
f.calls = append(f.calls, utils.Get)
|
||||
obj, exists, err := f.backendServices.GetByKey(name)
|
||||
if !exists {
|
||||
|
@ -62,8 +62,8 @@ func (f *FakeBackendServices) GetBackendService(name string) (*compute.BackendSe
|
|||
return nil, fmt.Errorf("backend service %v not found", name)
|
||||
}
|
||||
|
||||
// CreateBackendService fakes backend service creation.
|
||||
func (f *FakeBackendServices) CreateBackendService(be *compute.BackendService) error {
|
||||
// CreateGlobalBackendService fakes backend service creation.
|
||||
func (f *FakeBackendServices) CreateGlobalBackendService(be *compute.BackendService) error {
|
||||
if f.errFunc != nil {
|
||||
if err := f.errFunc(utils.Create, be); err != nil {
|
||||
return err
|
||||
|
@ -74,8 +74,8 @@ func (f *FakeBackendServices) CreateBackendService(be *compute.BackendService) e
|
|||
return f.backendServices.Update(be)
|
||||
}
|
||||
|
||||
// DeleteBackendService fakes backend service deletion.
|
||||
func (f *FakeBackendServices) DeleteBackendService(name string) error {
|
||||
// DeleteGlobalBackendService fakes backend service deletion.
|
||||
func (f *FakeBackendServices) DeleteGlobalBackendService(name string) error {
|
||||
f.calls = append(f.calls, utils.Delete)
|
||||
svc, exists, err := f.backendServices.GetByKey(name)
|
||||
if !exists {
|
||||
|
@ -87,8 +87,8 @@ func (f *FakeBackendServices) DeleteBackendService(name string) error {
|
|||
return f.backendServices.Delete(svc)
|
||||
}
|
||||
|
||||
// ListBackendServices fakes backend service listing.
|
||||
func (f *FakeBackendServices) ListBackendServices() (*compute.BackendServiceList, error) {
|
||||
// ListGlobalBackendServices fakes backend service listing.
|
||||
func (f *FakeBackendServices) ListGlobalBackendServices() (*compute.BackendServiceList, error) {
|
||||
var svcs []*compute.BackendService
|
||||
for _, s := range f.backendServices.List() {
|
||||
svc := s.(*compute.BackendService)
|
||||
|
@ -97,8 +97,8 @@ func (f *FakeBackendServices) ListBackendServices() (*compute.BackendServiceList
|
|||
return &compute.BackendServiceList{Items: svcs}, nil
|
||||
}
|
||||
|
||||
// UpdateBackendService fakes updating a backend service.
|
||||
func (f *FakeBackendServices) UpdateBackendService(be *compute.BackendService) error {
|
||||
// UpdateGlobalBackendService fakes updating a backend service.
|
||||
func (f *FakeBackendServices) UpdateGlobalBackendService(be *compute.BackendService) error {
|
||||
if f.errFunc != nil {
|
||||
if err := f.errFunc(utils.Update, be); err != nil {
|
||||
return err
|
||||
|
@ -108,9 +108,9 @@ func (f *FakeBackendServices) UpdateBackendService(be *compute.BackendService) e
|
|||
return f.backendServices.Update(be)
|
||||
}
|
||||
|
||||
// GetHealth fakes getting backend service health.
|
||||
func (f *FakeBackendServices) GetHealth(name, instanceGroupLink string) (*compute.BackendServiceGroupHealth, error) {
|
||||
be, err := f.GetBackendService(name)
|
||||
// GetGlobalBackendServiceHealth fakes getting backend service health.
|
||||
func (f *FakeBackendServices) GetGlobalBackendServiceHealth(name, instanceGroupLink string) (*compute.BackendServiceGroupHealth, error) {
|
||||
be, err := f.GetGlobalBackendService(name)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
|
|
@ -42,10 +42,10 @@ type BackendPool interface {
|
|||
|
||||
// BackendServices is an interface for managing gce backend services.
|
||||
type BackendServices interface {
|
||||
GetBackendService(name string) (*compute.BackendService, error)
|
||||
UpdateBackendService(bg *compute.BackendService) error
|
||||
CreateBackendService(bg *compute.BackendService) error
|
||||
DeleteBackendService(name string) error
|
||||
ListBackendServices() (*compute.BackendServiceList, error)
|
||||
GetHealth(name, instanceGroupLink string) (*compute.BackendServiceGroupHealth, error)
|
||||
GetGlobalBackendService(name string) (*compute.BackendService, error)
|
||||
UpdateGlobalBackendService(bg *compute.BackendService) error
|
||||
CreateGlobalBackendService(bg *compute.BackendService) error
|
||||
DeleteGlobalBackendService(name string) error
|
||||
ListGlobalBackendServices() (*compute.BackendServiceList, error)
|
||||
GetGlobalBackendServiceHealth(name, instanceGroupLink string) (*compute.BackendServiceGroupHealth, error)
|
||||
}
|
||||
|
|
|
@ -224,7 +224,7 @@ func getGCEClient(config io.Reader) *gce.GCECloud {
|
|||
// user has no need for Ingress in this case. If they grant
|
||||
// permissions to the node they will have to restart the controller
|
||||
// manually to re-create the client.
|
||||
if _, err = cloud.ListBackendServices(); err == nil || utils.IsHTTPErrorCode(err, http.StatusForbidden) {
|
||||
if _, err = cloud.ListGlobalBackendServices(); err == nil || utils.IsHTTPErrorCode(err, http.StatusForbidden) {
|
||||
return cloud
|
||||
}
|
||||
glog.Warningf("Failed to list backend services, retrying: %v", err)
|
||||
|
|
|
@ -428,7 +428,7 @@ func TestLbChangeStaticIP(t *testing.T) {
|
|||
}
|
||||
|
||||
ing.Annotations = map[string]string{staticIPNameKey: "testip"}
|
||||
cm.fakeLbs.ReserveGlobalStaticIP("testip", "1.2.3.4")
|
||||
cm.fakeLbs.ReserveGlobalAddress(&compute.Address{Name: "testip", Address: "1.2.3.4"})
|
||||
|
||||
// Second sync reassigns 1.2.3.4 to existing forwarding rule (by recreating it)
|
||||
lbc.sync(ingStoreKey)
|
||||
|
|
|
@ -65,7 +65,7 @@ func NewFakeClusterManager(clusterName, firewallName string) *fakeClusterManager
|
|||
testDefaultBeNodePort,
|
||||
namer,
|
||||
)
|
||||
frPool := firewalls.NewFirewallPool(firewalls.NewFakeFirewallsProvider(namer), namer)
|
||||
frPool := firewalls.NewFirewallPool(firewalls.NewFakeFirewallsProvider(), namer)
|
||||
cm := &ClusterManager{
|
||||
ClusterNamer: namer,
|
||||
instancePool: nodePool,
|
||||
|
|
|
@ -76,20 +76,19 @@ func TestInstancesAddedToZones(t *testing.T) {
|
|||
lbc.CloudClusterManager.instancePool.Sync([]string{"n1", "n2", "n3"})
|
||||
gotZonesToNode := cm.fakeIGs.GetInstancesByZone()
|
||||
|
||||
i := 0
|
||||
if cm.fakeIGs.Ports[0] != testPort {
|
||||
t.Errorf("Expected the same node port on all igs, got ports %+v", cm.fakeIGs.Ports)
|
||||
}
|
||||
|
||||
for z, nodeNames := range zoneToNode {
|
||||
if ig, err := cm.fakeIGs.GetInstanceGroup(testIG, z); err != nil {
|
||||
t.Errorf("Failed to find ig %v in zone %v, found %+v: %v", testIG, z, ig, err)
|
||||
}
|
||||
if cm.fakeIGs.Ports[i] != testPort {
|
||||
t.Errorf("Expected the same node port on all igs, got ports %+v", cm.fakeIGs.Ports)
|
||||
}
|
||||
expNodes := sets.NewString(nodeNames...)
|
||||
gotNodes := sets.NewString(gotZonesToNode[z]...)
|
||||
if !gotNodes.Equal(expNodes) {
|
||||
t.Errorf("Nodes not added to zones, expected %+v got %+v", expNodes, gotNodes)
|
||||
}
|
||||
i++
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -18,86 +18,66 @@ package firewalls
|
|||
|
||||
import (
|
||||
"fmt"
|
||||
"strconv"
|
||||
|
||||
compute "google.golang.org/api/compute/v1"
|
||||
netset "k8s.io/kubernetes/pkg/util/net/sets"
|
||||
|
||||
"k8s.io/ingress/controllers/gce/utils"
|
||||
)
|
||||
|
||||
type fakeFirewallsProvider struct {
|
||||
fw map[string]*compute.Firewall
|
||||
namer *utils.Namer
|
||||
fw map[string]*compute.Firewall
|
||||
networkUrl string
|
||||
}
|
||||
|
||||
// NewFakeFirewallsProvider creates a fake for firewall rules.
|
||||
func NewFakeFirewallsProvider(namer *utils.Namer) *fakeFirewallsProvider {
|
||||
func NewFakeFirewallsProvider() *fakeFirewallsProvider {
|
||||
return &fakeFirewallsProvider{
|
||||
fw: make(map[string]*compute.Firewall),
|
||||
namer: namer,
|
||||
fw: make(map[string]*compute.Firewall),
|
||||
}
|
||||
}
|
||||
|
||||
func (f *fakeFirewallsProvider) GetFirewall(prefixedName string) (*compute.Firewall, error) {
|
||||
rule, exists := f.fw[prefixedName]
|
||||
func (ff *fakeFirewallsProvider) GetFirewall(name string) (*compute.Firewall, error) {
|
||||
rule, exists := ff.fw[name]
|
||||
if exists {
|
||||
return rule, nil
|
||||
}
|
||||
return nil, utils.FakeGoogleAPINotFoundErr()
|
||||
}
|
||||
|
||||
func (f *fakeFirewallsProvider) CreateFirewall(name, msgTag string, srcRange netset.IPNet, ports []int64, hosts []string) error {
|
||||
prefixedName := f.namer.FrName(name)
|
||||
strPorts := []string{}
|
||||
for _, p := range ports {
|
||||
strPorts = append(strPorts, strconv.FormatInt(p, 10))
|
||||
}
|
||||
if _, exists := f.fw[prefixedName]; exists {
|
||||
return fmt.Errorf("firewall rule %v already exists", prefixedName)
|
||||
}
|
||||
|
||||
f.fw[prefixedName] = &compute.Firewall{
|
||||
// To accurately mimic the cloudprovider we need to add the k8s-fw
|
||||
// prefix to the given rule name.
|
||||
Name: prefixedName,
|
||||
SourceRanges: srcRange.StringSlice(),
|
||||
Allowed: []*compute.FirewallAllowed{{Ports: strPorts}},
|
||||
TargetTags: hosts, // WARNING: This is actually not correct, but good enough for testing this package
|
||||
func (ff *fakeFirewallsProvider) CreateFirewall(f *compute.Firewall) error {
|
||||
if _, exists := ff.fw[f.Name]; exists {
|
||||
return fmt.Errorf("firewall rule %v already exists", f.Name)
|
||||
}
|
||||
ff.fw[f.Name] = f
|
||||
return nil
|
||||
}
|
||||
|
||||
func (f *fakeFirewallsProvider) DeleteFirewall(name string) error {
|
||||
func (ff *fakeFirewallsProvider) DeleteFirewall(name string) error {
|
||||
// We need the full name for the same reason as CreateFirewall.
|
||||
prefixedName := f.namer.FrName(name)
|
||||
_, exists := f.fw[prefixedName]
|
||||
_, exists := ff.fw[name]
|
||||
if !exists {
|
||||
return utils.FakeGoogleAPINotFoundErr()
|
||||
}
|
||||
|
||||
delete(f.fw, prefixedName)
|
||||
delete(ff.fw, name)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (f *fakeFirewallsProvider) UpdateFirewall(name, msgTag string, srcRange netset.IPNet, ports []int64, hosts []string) error {
|
||||
strPorts := []string{}
|
||||
for _, p := range ports {
|
||||
strPorts = append(strPorts, strconv.FormatInt(p, 10))
|
||||
}
|
||||
|
||||
func (ff *fakeFirewallsProvider) UpdateFirewall(f *compute.Firewall) error {
|
||||
// We need the full name for the same reason as CreateFirewall.
|
||||
prefixedName := f.namer.FrName(name)
|
||||
_, exists := f.fw[prefixedName]
|
||||
_, exists := ff.fw[f.Name]
|
||||
if !exists {
|
||||
return fmt.Errorf("update failed for rule %v, srcRange %v ports %v, rule not found", prefixedName, srcRange, ports)
|
||||
return fmt.Errorf("update failed for rule %v, srcRange %v ports %+v, rule not found", f.Name, f.SourceRanges, f.Allowed)
|
||||
}
|
||||
|
||||
f.fw[prefixedName] = &compute.Firewall{
|
||||
Name: name,
|
||||
SourceRanges: srcRange.StringSlice(),
|
||||
Allowed: []*compute.FirewallAllowed{{Ports: strPorts}},
|
||||
TargetTags: hosts, // WARNING: This is actually not correct, but good enough for testing this package
|
||||
}
|
||||
ff.fw[f.Name] = f
|
||||
return nil
|
||||
}
|
||||
|
||||
func (ff *fakeFirewallsProvider) NetworkURL() string {
|
||||
return ff.networkUrl
|
||||
}
|
||||
|
||||
func (ff *fakeFirewallsProvider) GetNodeTags(nodeNames []string) ([]string, error) {
|
||||
return nodeNames, nil
|
||||
}
|
||||
|
|
|
@ -35,18 +35,18 @@ var l7SrcRanges = []string{"130.211.0.0/22", "35.191.0.0/16"}
|
|||
type FirewallRules struct {
|
||||
cloud Firewall
|
||||
namer *utils.Namer
|
||||
srcRanges netset.IPNet
|
||||
srcRanges []string
|
||||
}
|
||||
|
||||
// NewFirewallPool creates a new firewall rule manager.
|
||||
// cloud: the cloud object implementing Firewall.
|
||||
// namer: cluster namer.
|
||||
func NewFirewallPool(cloud Firewall, namer *utils.Namer) SingleFirewallPool {
|
||||
srcNetSet, err := netset.ParseIPNets(l7SrcRanges...)
|
||||
_, err := netset.ParseIPNets(l7SrcRanges...)
|
||||
if err != nil {
|
||||
glog.Fatalf("Could not parse L7 src ranges %v for firewall rule: %v", l7SrcRanges, err)
|
||||
}
|
||||
return &FirewallRules{cloud: cloud, namer: namer, srcRanges: srcNetSet}
|
||||
return &FirewallRules{cloud: cloud, namer: namer, srcRanges: l7SrcRanges}
|
||||
}
|
||||
|
||||
// Sync sync firewall rules with the cloud.
|
||||
|
@ -60,9 +60,15 @@ func (fr *FirewallRules) Sync(nodePorts []int64, nodeNames []string) error {
|
|||
// instead of the whole name.
|
||||
name := fr.namer.FrName(suffix)
|
||||
rule, _ := fr.cloud.GetFirewall(name)
|
||||
|
||||
firewall, err := fr.createFirewallObject(name, "GCE L7 firewall rule", nodePorts, nodeNames)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if rule == nil {
|
||||
glog.Infof("Creating global l7 firewall rule %v", name)
|
||||
return fr.cloud.CreateFirewall(suffix, "GCE L7 firewall rule", fr.srcRanges, nodePorts, nodeNames)
|
||||
return fr.cloud.CreateFirewall(firewall)
|
||||
}
|
||||
|
||||
requiredPorts := sets.NewString()
|
||||
|
@ -85,17 +91,17 @@ func (fr *FirewallRules) Sync(nodePorts []int64, nodeNames []string) error {
|
|||
glog.V(4).Info("Firewall does not need update of ports or source ranges")
|
||||
return nil
|
||||
}
|
||||
|
||||
glog.V(3).Infof("Firewall %v already exists, updating nodeports %v", name, nodePorts)
|
||||
return fr.cloud.UpdateFirewall(suffix, "GCE L7 firewall", fr.srcRanges, nodePorts, nodeNames)
|
||||
return fr.cloud.UpdateFirewall(firewall)
|
||||
}
|
||||
|
||||
// Shutdown shuts down this firewall rules manager.
|
||||
func (fr *FirewallRules) Shutdown() error {
|
||||
glog.Infof("Deleting firewall with suffix %v", fr.namer.FrSuffix())
|
||||
err := fr.cloud.DeleteFirewall(fr.namer.FrSuffix())
|
||||
name := fr.namer.FrName(fr.namer.FrSuffix())
|
||||
glog.Infof("Deleting firewall %v", name)
|
||||
err := fr.cloud.DeleteFirewall(name)
|
||||
if err != nil && utils.IsHTTPErrorCode(err, 404) {
|
||||
glog.Infof("Firewall with suffix %v didn't exist at Shutdown", fr.namer.FrSuffix())
|
||||
glog.Infof("Firewall with name %v didn't exist at Shutdown", name)
|
||||
return nil
|
||||
}
|
||||
return err
|
||||
|
@ -107,3 +113,31 @@ func (fr *FirewallRules) Shutdown() error {
|
|||
func (fr *FirewallRules) GetFirewall(name string) (*compute.Firewall, error) {
|
||||
return fr.cloud.GetFirewall(name)
|
||||
}
|
||||
|
||||
func (fr *FirewallRules) createFirewallObject(firewallName, description string, nodePorts []int64, nodeNames []string) (*compute.Firewall, error) {
|
||||
ports := make([]string, len(nodePorts))
|
||||
for ix := range nodePorts {
|
||||
ports[ix] = strconv.Itoa(int(nodePorts[ix]))
|
||||
}
|
||||
|
||||
// If the node tags to be used for this cluster have been predefined in the
|
||||
// provider config, just use them. Otherwise, invoke computeHostTags method to get the tags.
|
||||
targetTags, err := fr.cloud.GetNodeTags(nodeNames)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &compute.Firewall{
|
||||
Name: firewallName,
|
||||
Description: description,
|
||||
SourceRanges: fr.srcRanges,
|
||||
Network: fr.cloud.NetworkURL(),
|
||||
Allowed: []*compute.FirewallAllowed{
|
||||
{
|
||||
IPProtocol: "tcp",
|
||||
Ports: ports,
|
||||
},
|
||||
},
|
||||
TargetTags: targetTags,
|
||||
}, nil
|
||||
}
|
||||
|
|
|
@ -22,14 +22,11 @@ import (
|
|||
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
"k8s.io/ingress/controllers/gce/utils"
|
||||
netset "k8s.io/kubernetes/pkg/util/net/sets"
|
||||
)
|
||||
|
||||
const allCIDR = "0.0.0.0/0"
|
||||
|
||||
func TestSyncFirewallPool(t *testing.T) {
|
||||
namer := utils.NewNamer("ABC", "XYZ")
|
||||
fwp := NewFakeFirewallsProvider(namer)
|
||||
fwp := NewFakeFirewallsProvider()
|
||||
fp := NewFirewallPool(fwp, namer)
|
||||
ruleName := namer.FrName(namer.FrSuffix())
|
||||
|
||||
|
@ -50,12 +47,16 @@ func TestSyncFirewallPool(t *testing.T) {
|
|||
}
|
||||
verifyFirewallRule(fwp, ruleName, nodePorts, nodes, l7SrcRanges, t)
|
||||
|
||||
srcRanges, _ := netset.ParseIPNets(allCIDR)
|
||||
err = fwp.UpdateFirewall(namer.FrSuffix(), "", srcRanges, nodePorts, nodes)
|
||||
firewall, err := fp.(*FirewallRules).createFirewallObject(namer.FrName(namer.FrSuffix()), "", nodePorts, nodes)
|
||||
if err != nil {
|
||||
t.Errorf("unexpected err when creating firewall object, err: %v", err)
|
||||
}
|
||||
|
||||
err = fwp.UpdateFirewall(firewall)
|
||||
if err != nil {
|
||||
t.Errorf("failed to update firewall rule, err: %v", err)
|
||||
}
|
||||
verifyFirewallRule(fwp, ruleName, nodePorts, nodes, []string{allCIDR}, t)
|
||||
verifyFirewallRule(fwp, ruleName, nodePorts, nodes, l7SrcRanges, t)
|
||||
|
||||
// Run Sync and expect l7 src ranges to be returned
|
||||
err = fp.Sync(nodePorts, nodes)
|
||||
|
|
|
@ -18,7 +18,6 @@ package firewalls
|
|||
|
||||
import (
|
||||
compute "google.golang.org/api/compute/v1"
|
||||
netset "k8s.io/kubernetes/pkg/util/net/sets"
|
||||
)
|
||||
|
||||
// SingleFirewallPool syncs the firewall rule for L7 traffic.
|
||||
|
@ -32,8 +31,10 @@ type SingleFirewallPool interface {
|
|||
// This interface is a little different from the rest because it dovetails into
|
||||
// the same firewall methods used by the TCPLoadBalancer.
|
||||
type Firewall interface {
|
||||
CreateFirewall(name, msgTag string, srcRange netset.IPNet, ports []int64, hosts []string) error
|
||||
CreateFirewall(f *compute.Firewall) error
|
||||
GetFirewall(name string) (*compute.Firewall, error)
|
||||
DeleteFirewall(name string) error
|
||||
UpdateFirewall(name, msgTag string, srcRange netset.IPNet, ports []int64, hosts []string) error
|
||||
UpdateFirewall(f *compute.Firewall) error
|
||||
GetNodeTags(nodeNames []string) ([]string, error)
|
||||
NetworkURL() string
|
||||
}
|
||||
|
|
|
@ -18,6 +18,7 @@ package instances
|
|||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
compute "google.golang.org/api/compute/v1"
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
|
@ -110,7 +111,8 @@ func (f *FakeInstanceGroups) ListInstancesInInstanceGroup(name, zone string, sta
|
|||
}
|
||||
|
||||
// AddInstancesToInstanceGroup fakes adding instances to an instance group.
|
||||
func (f *FakeInstanceGroups) AddInstancesToInstanceGroup(name, zone string, instanceNames []string) error {
|
||||
func (f *FakeInstanceGroups) AddInstancesToInstanceGroup(name, zone string, instanceRefs []*compute.InstanceReference) error {
|
||||
instanceNames := toInstanceNames(instanceRefs)
|
||||
f.calls = append(f.calls, utils.AddInstances)
|
||||
f.instances.Insert(instanceNames...)
|
||||
if _, ok := f.zonesToInstances[zone]; !ok {
|
||||
|
@ -126,7 +128,8 @@ func (f *FakeInstanceGroups) GetInstancesByZone() map[string][]string {
|
|||
}
|
||||
|
||||
// RemoveInstancesFromInstanceGroup fakes removing instances from an instance group.
|
||||
func (f *FakeInstanceGroups) RemoveInstancesFromInstanceGroup(name, zone string, instanceNames []string) error {
|
||||
func (f *FakeInstanceGroups) RemoveInstancesFromInstanceGroup(name, zone string, instanceRefs []*compute.InstanceReference) error {
|
||||
instanceNames := toInstanceNames(instanceRefs)
|
||||
f.calls = append(f.calls, utils.RemoveInstances)
|
||||
f.instances.Delete(instanceNames...)
|
||||
l, ok := f.zonesToInstances[zone]
|
||||
|
@ -145,10 +148,23 @@ func (f *FakeInstanceGroups) RemoveInstancesFromInstanceGroup(name, zone string,
|
|||
return nil
|
||||
}
|
||||
|
||||
// AddPortToInstanceGroup fakes adding ports to an Instance Group.
|
||||
func (f *FakeInstanceGroups) AddPortToInstanceGroup(ig *compute.InstanceGroup, port int64) (*compute.NamedPort, error) {
|
||||
f.Ports = append(f.Ports, port)
|
||||
return &compute.NamedPort{Name: f.namer.BeName(port), Port: port}, nil
|
||||
func (f *FakeInstanceGroups) SetNamedPortsOfInstanceGroup(igName, zone string, namedPorts []*compute.NamedPort) error {
|
||||
found := false
|
||||
for _, ig := range f.instanceGroups {
|
||||
if ig.Name == igName && ig.Zone == zone {
|
||||
found = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if !found {
|
||||
return fmt.Errorf("Failed to find instance group %q in zone %q", igName, zone)
|
||||
}
|
||||
|
||||
f.Ports = f.Ports[:0]
|
||||
for _, port := range namedPorts {
|
||||
f.Ports = append(f.Ports, port.Port)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// getInstanceList returns an instance list based on the given names.
|
||||
|
@ -157,9 +173,7 @@ func getInstanceList(nodeNames sets.String) *compute.InstanceGroupsListInstances
|
|||
instanceNames := nodeNames.List()
|
||||
computeInstances := []*compute.InstanceWithNamedPorts{}
|
||||
for _, name := range instanceNames {
|
||||
instanceLink := fmt.Sprintf(
|
||||
"https://www.googleapis.com/compute/v1/projects/%s/zones/%s/instances/%s",
|
||||
"project", "zone", name)
|
||||
instanceLink := getInstanceUrl(name)
|
||||
computeInstances = append(
|
||||
computeInstances, &compute.InstanceWithNamedPorts{
|
||||
Instance: instanceLink})
|
||||
|
@ -168,3 +182,26 @@ func getInstanceList(nodeNames sets.String) *compute.InstanceGroupsListInstances
|
|||
Items: computeInstances,
|
||||
}
|
||||
}
|
||||
|
||||
func (f *FakeInstanceGroups) ToInstanceReferences(zone string, instanceNames []string) (refs []*compute.InstanceReference) {
|
||||
for _, ins := range instanceNames {
|
||||
instanceLink := getInstanceUrl(ins)
|
||||
refs = append(refs, &compute.InstanceReference{Instance: instanceLink})
|
||||
}
|
||||
return refs
|
||||
}
|
||||
|
||||
func getInstanceUrl(instanceName string) string {
|
||||
return fmt.Sprintf("https://www.googleapis.com/compute/v1/projects/%s/zones/%s/instances/%s",
|
||||
"project", "zone", instanceName)
|
||||
}
|
||||
|
||||
func toInstanceNames(instanceRefs []*compute.InstanceReference) []string {
|
||||
instanceNames := make([]string, len(instanceRefs))
|
||||
for ix := range instanceRefs {
|
||||
url := instanceRefs[ix].Instance
|
||||
parts := strings.Split(url, "/")
|
||||
instanceNames[ix] = parts[len(parts)-1]
|
||||
}
|
||||
return instanceNames
|
||||
}
|
||||
|
|
|
@ -63,13 +63,15 @@ func (i *Instances) Init(zl zoneLister) {
|
|||
// all of which have the exact same named port.
|
||||
func (i *Instances) AddInstanceGroup(name string, port int64) ([]*compute.InstanceGroup, *compute.NamedPort, error) {
|
||||
igs := []*compute.InstanceGroup{}
|
||||
namedPort := &compute.NamedPort{}
|
||||
// TODO: move port naming to namer
|
||||
namedPort := &compute.NamedPort{Name: fmt.Sprintf("port%v", port), Port: port}
|
||||
|
||||
zones, err := i.ListZones()
|
||||
if err != nil {
|
||||
return igs, namedPort, err
|
||||
}
|
||||
|
||||
defer i.snapshotter.Add(name, struct{}{})
|
||||
for _, zone := range zones {
|
||||
ig, _ := i.Get(name, zone)
|
||||
var err error
|
||||
|
@ -82,10 +84,19 @@ func (i *Instances) AddInstanceGroup(name string, port int64) ([]*compute.Instan
|
|||
} else {
|
||||
glog.V(3).Infof("Instance group %v already exists in zone %v, adding port %d to it", name, zone, port)
|
||||
}
|
||||
defer i.snapshotter.Add(name, struct{}{})
|
||||
namedPort, err = i.cloud.AddPortToInstanceGroup(ig, port)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
|
||||
found := false
|
||||
for _, np := range ig.NamedPorts {
|
||||
if np.Port == port {
|
||||
glog.V(3).Infof("Instance group %v already has named port %+v", ig.Name, np)
|
||||
found = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if !found {
|
||||
if err := i.cloud.SetNamedPortsOfInstanceGroup(ig.Name, zone, append(ig.NamedPorts, namedPort)); err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
}
|
||||
igs = append(igs, ig)
|
||||
}
|
||||
|
@ -173,7 +184,7 @@ func (i *Instances) Add(groupName string, names []string) error {
|
|||
errs := []error{}
|
||||
for zone, nodeNames := range i.splitNodesByZone(names) {
|
||||
glog.V(1).Infof("Adding nodes %v to %v in zone %v", nodeNames, groupName, zone)
|
||||
if err := i.cloud.AddInstancesToInstanceGroup(groupName, zone, nodeNames); err != nil {
|
||||
if err := i.cloud.AddInstancesToInstanceGroup(groupName, zone, i.cloud.ToInstanceReferences(zone, nodeNames)); err != nil {
|
||||
errs = append(errs, err)
|
||||
}
|
||||
}
|
||||
|
@ -187,8 +198,8 @@ func (i *Instances) Add(groupName string, names []string) error {
|
|||
func (i *Instances) Remove(groupName string, names []string) error {
|
||||
errs := []error{}
|
||||
for zone, nodeNames := range i.splitNodesByZone(names) {
|
||||
glog.V(1).Infof("Adding nodes %v to %v in zone %v", nodeNames, groupName, zone)
|
||||
if err := i.cloud.RemoveInstancesFromInstanceGroup(groupName, zone, nodeNames); err != nil {
|
||||
glog.V(1).Infof("Removing nodes %v from %v in zone %v", nodeNames, groupName, zone)
|
||||
if err := i.cloud.RemoveInstancesFromInstanceGroup(groupName, zone, i.cloud.ToInstanceReferences(zone, nodeNames)); err != nil {
|
||||
errs = append(errs, err)
|
||||
}
|
||||
}
|
||||
|
|
|
@ -50,7 +50,8 @@ type InstanceGroups interface {
|
|||
|
||||
// TODO: Refactor for modulatiry.
|
||||
ListInstancesInInstanceGroup(name, zone string, state string) (*compute.InstanceGroupsListInstances, error)
|
||||
AddInstancesToInstanceGroup(name, zone string, instanceNames []string) error
|
||||
RemoveInstancesFromInstanceGroup(name, zone string, instanceName []string) error
|
||||
AddPortToInstanceGroup(ig *compute.InstanceGroup, port int64) (*compute.NamedPort, error)
|
||||
AddInstancesToInstanceGroup(name, zone string, instanceRefs []*compute.InstanceReference) error
|
||||
RemoveInstancesFromInstanceGroup(name, zone string, instanceRefs []*compute.InstanceReference) error
|
||||
ToInstanceReferences(zone string, instanceNames []string) (refs []*compute.InstanceReference)
|
||||
SetNamedPortsOfInstanceGroup(igName, zone string, namedPorts []*compute.NamedPort) error
|
||||
}
|
||||
|
|
|
@ -132,10 +132,10 @@ func (f *FakeLoadBalancers) CreateGlobalForwardingRule(proxyLink, ip, name, port
|
|||
}
|
||||
|
||||
// SetProxyForGlobalForwardingRule fakes setting a global forwarding rule.
|
||||
func (f *FakeLoadBalancers) SetProxyForGlobalForwardingRule(fw *compute.ForwardingRule, proxyLink string) error {
|
||||
func (f *FakeLoadBalancers) SetProxyForGlobalForwardingRule(forwardingRuleName, proxyLink string) error {
|
||||
f.calls = append(f.calls, "SetProxyForGlobalForwardingRule")
|
||||
for i := range f.Fw {
|
||||
if f.Fw[i].Name == fw.Name {
|
||||
if f.Fw[i].Name == forwardingRuleName {
|
||||
f.Fw[i].Target = proxyLink
|
||||
}
|
||||
}
|
||||
|
@ -397,20 +397,16 @@ func (f *FakeLoadBalancers) CheckURLMap(t *testing.T, l7 *L7, expectedMap map[st
|
|||
|
||||
// Static IP fakes
|
||||
|
||||
// ReserveGlobalStaticIP fakes out static IP reservation.
|
||||
func (f *FakeLoadBalancers) ReserveGlobalStaticIP(name, IPAddress string) (*compute.Address, error) {
|
||||
f.calls = append(f.calls, "ReserveGlobalStaticIP")
|
||||
ip := &compute.Address{
|
||||
Name: name,
|
||||
Address: IPAddress,
|
||||
}
|
||||
f.IP = append(f.IP, ip)
|
||||
return ip, nil
|
||||
// ReserveGlobalAddress fakes out static IP reservation.
|
||||
func (f *FakeLoadBalancers) ReserveGlobalAddress(addr *compute.Address) error {
|
||||
f.calls = append(f.calls, "ReserveGlobalAddress")
|
||||
f.IP = append(f.IP, addr)
|
||||
return nil
|
||||
}
|
||||
|
||||
// GetGlobalStaticIP fakes out static IP retrieval.
|
||||
func (f *FakeLoadBalancers) GetGlobalStaticIP(name string) (*compute.Address, error) {
|
||||
f.calls = append(f.calls, "GetGlobalStaticIP")
|
||||
// GetGlobalAddress fakes out static IP retrieval.
|
||||
func (f *FakeLoadBalancers) GetGlobalAddress(name string) (*compute.Address, error) {
|
||||
f.calls = append(f.calls, "GetGlobalAddress")
|
||||
for i := range f.IP {
|
||||
if f.IP[i].Name == name {
|
||||
return f.IP[i], nil
|
||||
|
@ -419,9 +415,9 @@ func (f *FakeLoadBalancers) GetGlobalStaticIP(name string) (*compute.Address, er
|
|||
return nil, fmt.Errorf("static IP %v not found", name)
|
||||
}
|
||||
|
||||
// DeleteGlobalStaticIP fakes out static IP deletion.
|
||||
func (f *FakeLoadBalancers) DeleteGlobalStaticIP(name string) error {
|
||||
f.calls = append(f.calls, "DeleteGlobalStaticIP")
|
||||
// DeleteGlobalAddress fakes out static IP deletion.
|
||||
func (f *FakeLoadBalancers) DeleteGlobalAddress(name string) error {
|
||||
f.calls = append(f.calls, "DeleteGlobalAddress")
|
||||
ip := []*compute.Address{}
|
||||
for i := range f.IP {
|
||||
if f.IP[i].Name != name {
|
||||
|
|
|
@ -30,7 +30,7 @@ type LoadBalancers interface {
|
|||
GetGlobalForwardingRule(name string) (*compute.ForwardingRule, error)
|
||||
CreateGlobalForwardingRule(proxyLink, ip, name, portRange string) (*compute.ForwardingRule, error)
|
||||
DeleteGlobalForwardingRule(name string) error
|
||||
SetProxyForGlobalForwardingRule(fw *compute.ForwardingRule, proxy string) error
|
||||
SetProxyForGlobalForwardingRule(fw, proxy string) error
|
||||
|
||||
// UrlMaps
|
||||
GetUrlMap(name string) (*compute.UrlMap, error)
|
||||
|
@ -57,9 +57,10 @@ type LoadBalancers interface {
|
|||
DeleteSslCertificate(name string) error
|
||||
|
||||
// Static IP
|
||||
ReserveGlobalStaticIP(name, IPAddress string) (*compute.Address, error)
|
||||
GetGlobalStaticIP(name string) (*compute.Address, error)
|
||||
DeleteGlobalStaticIP(name string) error
|
||||
|
||||
ReserveGlobalAddress(addr *compute.Address) error
|
||||
GetGlobalAddress(name string) (*compute.Address, error)
|
||||
DeleteGlobalAddress(name string) error
|
||||
}
|
||||
|
||||
// LoadBalancerPool is an interface to manage the cloud resources associated
|
||||
|
|
|
@ -544,7 +544,7 @@ func (l *L7) checkForwardingRule(name, proxyLink, ip, portRange string) (fw *com
|
|||
} else {
|
||||
glog.Infof("Forwarding rule %v has the wrong proxy, setting %v overwriting %v",
|
||||
fw.Name, fw.Target, proxyLink)
|
||||
if err := l.cloud.SetProxyForGlobalForwardingRule(fw, proxyLink); err != nil {
|
||||
if err := l.cloud.SetProxyForGlobalForwardingRule(fw.Name, proxyLink); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
@ -576,7 +576,7 @@ func (l *L7) getEffectiveIP() (string, bool) {
|
|||
if l.runtimeInfo.StaticIPName != "" {
|
||||
// Existing static IPs allocated to forwarding rules will get orphaned
|
||||
// till the Ingress is torn down.
|
||||
if ip, err := l.cloud.GetGlobalStaticIP(l.runtimeInfo.StaticIPName); err != nil || ip == nil {
|
||||
if ip, err := l.cloud.GetGlobalAddress(l.runtimeInfo.StaticIPName); err != nil || ip == nil {
|
||||
glog.Warningf("The given static IP name %v doesn't translate to an existing global static IP, ignoring it and allocating a new IP: %v",
|
||||
l.runtimeInfo.StaticIPName, err)
|
||||
} else {
|
||||
|
@ -629,10 +629,10 @@ func (l *L7) checkStaticIP() (err error) {
|
|||
return nil
|
||||
}
|
||||
staticIPName := l.namer.Truncate(fmt.Sprintf("%v-%v", forwardingRulePrefix, l.Name))
|
||||
ip, _ := l.cloud.GetGlobalStaticIP(staticIPName)
|
||||
ip, _ := l.cloud.GetGlobalAddress(staticIPName)
|
||||
if ip == nil {
|
||||
glog.Infof("Creating static ip %v", staticIPName)
|
||||
ip, err = l.cloud.ReserveGlobalStaticIP(staticIPName, l.fw.IPAddress)
|
||||
err = l.cloud.ReserveGlobalAddress(&compute.Address{Name: staticIPName, Address: l.fw.IPAddress})
|
||||
if err != nil {
|
||||
if utils.IsHTTPErrorCode(err, http.StatusConflict) ||
|
||||
utils.IsHTTPErrorCode(err, http.StatusBadRequest) {
|
||||
|
@ -642,6 +642,10 @@ func (l *L7) checkStaticIP() (err error) {
|
|||
}
|
||||
return err
|
||||
}
|
||||
ip, err = l.cloud.GetGlobalAddress(staticIPName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
l.ip = ip
|
||||
return nil
|
||||
|
@ -903,7 +907,7 @@ func (l *L7) Cleanup() error {
|
|||
}
|
||||
if l.ip != nil {
|
||||
glog.V(2).Infof("Deleting static IP %v(%v)", l.ip.Name, l.ip.Address)
|
||||
if err := utils.IgnoreHTTPNotFound(l.cloud.DeleteGlobalStaticIP(l.ip.Name)); err != nil {
|
||||
if err := utils.IgnoreHTTPNotFound(l.cloud.DeleteGlobalAddress(l.ip.Name)); err != nil {
|
||||
return err
|
||||
}
|
||||
l.ip = nil
|
||||
|
|
|
@ -289,7 +289,7 @@ func TestCreateBothLoadBalancers(t *testing.T) {
|
|||
if err != nil || fw.Target != tp.SelfLink {
|
||||
t.Fatalf("%v", err)
|
||||
}
|
||||
ip, err := f.GetGlobalStaticIP(f.fwName(false))
|
||||
ip, err := f.GetGlobalAddress(f.fwName(false))
|
||||
if err != nil || ip.Address != fw.IPAddress || ip.Address != fws.IPAddress {
|
||||
t.Fatalf("%v", err)
|
||||
}
|
||||
|
|
|
@ -30,14 +30,13 @@ import (
|
|||
|
||||
"github.com/prometheus/client_golang/prometheus/promhttp"
|
||||
flag "github.com/spf13/pflag"
|
||||
meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/apimachinery/pkg/util/intstr"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
"k8s.io/api/core/v1"
|
||||
api_v1 "k8s.io/api/core/v1"
|
||||
"k8s.io/client-go/rest"
|
||||
"k8s.io/client-go/tools/clientcmd"
|
||||
clientcmdapi "k8s.io/client-go/tools/clientcmd/api"
|
||||
|
@ -122,7 +121,7 @@ var (
|
|||
`Path used to health-check a backend service. All Services must serve
|
||||
a 200 page on this path. Currently this is only configurable globally.`)
|
||||
|
||||
watchNamespace = flags.String("watch-namespace", api.NamespaceAll,
|
||||
watchNamespace = flags.String("watch-namespace", v1.NamespaceAll,
|
||||
`Namespace to watch for Ingress/Services/Endpoints.`)
|
||||
|
||||
verbose = flags.Bool("verbose", false,
|
||||
|
@ -287,7 +286,7 @@ func newNamer(kubeClient kubernetes.Interface, clusterName string, fwName string
|
|||
}
|
||||
|
||||
namer := utils.NewNamer(name, fw_name)
|
||||
uidVault := storage.NewConfigMapVault(kubeClient, api.NamespaceSystem, uidConfigMapName)
|
||||
uidVault := storage.NewConfigMapVault(kubeClient, metav1.NamespaceSystem, uidConfigMapName)
|
||||
|
||||
// Start a goroutine to poll the cluster UID config map
|
||||
// We don't watch because we know exactly which configmap we want and this
|
||||
|
@ -359,7 +358,7 @@ func useDefaultOrLookupVault(cfgVault *storage.ConfigMapVault, cm_key, default_n
|
|||
// Use getFlagOrLookupVault to obtain a stored or overridden value for the firewall name.
|
||||
// else, use the cluster UID as a backup (this retains backwards compatibility).
|
||||
func getFirewallName(kubeClient kubernetes.Interface, name, cluster_uid string) (string, error) {
|
||||
cfgVault := storage.NewConfigMapVault(kubeClient, api.NamespaceSystem, uidConfigMapName)
|
||||
cfgVault := storage.NewConfigMapVault(kubeClient, metav1.NamespaceSystem, uidConfigMapName)
|
||||
if fw_name, err := useDefaultOrLookupVault(cfgVault, storage.ProviderDataKey, name); err != nil {
|
||||
return "", err
|
||||
} else if fw_name != "" {
|
||||
|
@ -377,7 +376,7 @@ func getFirewallName(kubeClient kubernetes.Interface, name, cluster_uid string)
|
|||
// - remember that "" is the cluster uid
|
||||
// else, allocate a new uid
|
||||
func getClusterUID(kubeClient kubernetes.Interface, name string) (string, error) {
|
||||
cfgVault := storage.NewConfigMapVault(kubeClient, api.NamespaceSystem, uidConfigMapName)
|
||||
cfgVault := storage.NewConfigMapVault(kubeClient, metav1.NamespaceSystem, uidConfigMapName)
|
||||
if name, err := useDefaultOrLookupVault(cfgVault, storage.UidDataKey, name); err != nil {
|
||||
return "", err
|
||||
} else if name != "" {
|
||||
|
@ -385,7 +384,7 @@ func getClusterUID(kubeClient kubernetes.Interface, name string) (string, error)
|
|||
}
|
||||
|
||||
// Check if the cluster has an Ingress with ip
|
||||
ings, err := kubeClient.Extensions().Ingresses(api.NamespaceAll).List(meta_v1.ListOptions{
|
||||
ings, err := kubeClient.Extensions().Ingresses(metav1.NamespaceAll).List(metav1.ListOptions{
|
||||
LabelSelector: labels.Everything().String(),
|
||||
})
|
||||
if err != nil {
|
||||
|
@ -419,10 +418,10 @@ func getClusterUID(kubeClient kubernetes.Interface, name string) (string, error)
|
|||
|
||||
// getNodePort waits for the Service, and returns it's first node port.
|
||||
func getNodePort(client kubernetes.Interface, ns, name string) (port, nodePort int32, err error) {
|
||||
var svc *api_v1.Service
|
||||
var svc *v1.Service
|
||||
glog.V(3).Infof("Waiting for %v/%v", ns, name)
|
||||
wait.Poll(1*time.Second, 5*time.Minute, func() (bool, error) {
|
||||
svc, err = client.Core().Services(ns).Get(name, meta_v1.GetOptions{})
|
||||
svc, err = client.Core().Services(ns).Get(name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return false, nil
|
||||
}
|
||||
|
|
|
@ -17,7 +17,7 @@ limitations under the License.
|
|||
package serviceupstream
|
||||
|
||||
import (
|
||||
extensions "k8s.io/client-go/pkg/apis/extensions/v1beta1"
|
||||
extensions "k8s.io/api/extensions/v1beta1"
|
||||
"k8s.io/ingress/core/pkg/ingress/annotations/parser"
|
||||
)
|
||||
|
||||
|
|
|
@ -21,8 +21,8 @@ import (
|
|||
|
||||
meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/util/intstr"
|
||||
api "k8s.io/client-go/pkg/api/v1"
|
||||
extensions "k8s.io/client-go/pkg/apis/extensions/v1beta1"
|
||||
api "k8s.io/api/core/v1"
|
||||
extensions "k8s.io/api/extensions/v1beta1"
|
||||
)
|
||||
|
||||
func buildIngress() *extensions.Ingress {
|
||||
|
|
|
@ -23,8 +23,8 @@ import (
|
|||
|
||||
"github.com/golang/glog"
|
||||
|
||||
api "k8s.io/client-go/pkg/api/v1"
|
||||
extensions "k8s.io/client-go/pkg/apis/extensions/v1beta1"
|
||||
api "k8s.io/api/core/v1"
|
||||
extensions "k8s.io/api/extensions/v1beta1"
|
||||
"k8s.io/client-go/tools/cache"
|
||||
|
||||
"k8s.io/ingress/core/pkg/ingress"
|
||||
|
|
|
@ -26,7 +26,7 @@ import (
|
|||
|
||||
"github.com/golang/glog"
|
||||
|
||||
api "k8s.io/api/core/v1"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
extensions "k8s.io/api/extensions/v1beta1"
|
||||
meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
|
@ -130,7 +130,7 @@ func (s statusSync) Shutdown() {
|
|||
}
|
||||
|
||||
glog.Infof("removing address from ingress status (%v)", addrs)
|
||||
s.updateStatus([]api.LoadBalancerIngress{})
|
||||
s.updateStatus([]v1.LoadBalancerIngress{})
|
||||
}
|
||||
|
||||
func (s *statusSync) run() {
|
||||
|
@ -219,7 +219,7 @@ func NewStatusSyncer(config Config) Sync {
|
|||
broadcaster := record.NewBroadcaster()
|
||||
hostname, _ := os.Hostname()
|
||||
|
||||
recorder := broadcaster.NewRecorder(scheme.Scheme, api.EventSource{
|
||||
recorder := broadcaster.NewRecorder(scheme.Scheme, v1.EventSource{
|
||||
Component: "ingress-leader-elector",
|
||||
Host: hostname,
|
||||
})
|
||||
|
@ -301,13 +301,13 @@ func (s *statusSync) isRunningMultiplePods() bool {
|
|||
}
|
||||
|
||||
// sliceToStatus converts a slice of IP and/or hostnames to LoadBalancerIngress
|
||||
func sliceToStatus(endpoints []string) []api.LoadBalancerIngress {
|
||||
lbi := []api.LoadBalancerIngress{}
|
||||
func sliceToStatus(endpoints []string) []v1.LoadBalancerIngress {
|
||||
lbi := []v1.LoadBalancerIngress{}
|
||||
for _, ep := range endpoints {
|
||||
if net.ParseIP(ep) == nil {
|
||||
lbi = append(lbi, api.LoadBalancerIngress{Hostname: ep})
|
||||
lbi = append(lbi, v1.LoadBalancerIngress{Hostname: ep})
|
||||
} else {
|
||||
lbi = append(lbi, api.LoadBalancerIngress{IP: ep})
|
||||
lbi = append(lbi, v1.LoadBalancerIngress{IP: ep})
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -315,7 +315,7 @@ func sliceToStatus(endpoints []string) []api.LoadBalancerIngress {
|
|||
return lbi
|
||||
}
|
||||
|
||||
func (s *statusSync) updateStatus(newIPs []api.LoadBalancerIngress) {
|
||||
func (s *statusSync) updateStatus(newIPs []v1.LoadBalancerIngress) {
|
||||
ings := s.IngressLister.List()
|
||||
var wg sync.WaitGroup
|
||||
wg.Add(len(ings))
|
||||
|
@ -355,7 +355,7 @@ func (s *statusSync) updateStatus(newIPs []api.LoadBalancerIngress) {
|
|||
wg.Wait()
|
||||
}
|
||||
|
||||
func ingressSliceEqual(lhs, rhs []api.LoadBalancerIngress) bool {
|
||||
func ingressSliceEqual(lhs, rhs []v1.LoadBalancerIngress) bool {
|
||||
if len(lhs) != len(rhs) {
|
||||
return false
|
||||
}
|
||||
|
@ -372,7 +372,7 @@ func ingressSliceEqual(lhs, rhs []api.LoadBalancerIngress) bool {
|
|||
}
|
||||
|
||||
// loadBalancerIngressByIP sorts LoadBalancerIngress using the field IP
|
||||
type loadBalancerIngressByIP []api.LoadBalancerIngress
|
||||
type loadBalancerIngressByIP []v1.LoadBalancerIngress
|
||||
|
||||
func (c loadBalancerIngressByIP) Len() int { return len(c) }
|
||||
func (c loadBalancerIngressByIP) Swap(i, j int) { c[i], c[j] = c[j], c[i] }
|
||||
|
|
Loading…
Reference in a new issue