explicitly pick a version
This commit is contained in:
parent
c5e30973e5
commit
33b8309c73
11 changed files with 35 additions and 35 deletions
|
@ -96,7 +96,7 @@ func NewLoadBalancerController(kubeClient kubernetes.Interface, clusterManager *
|
|||
eventBroadcaster := record.NewBroadcaster()
|
||||
eventBroadcaster.StartLogging(glog.Infof)
|
||||
eventBroadcaster.StartRecordingToSink(&unversionedcore.EventSinkImpl{
|
||||
Interface: kubeClient.Core().Events(""),
|
||||
Interface: kubeClient.CoreV1().Events(""),
|
||||
})
|
||||
lbc := LoadBalancerController{
|
||||
client: kubeClient,
|
||||
|
@ -156,7 +156,7 @@ func NewLoadBalancerController(kubeClient kubernetes.Interface, clusterManager *
|
|||
}
|
||||
|
||||
lbc.svcLister.Indexer, lbc.svcController = cache.NewIndexerInformer(
|
||||
cache.NewListWatchFromClient(lbc.client.Core().RESTClient(), "services", namespace, fields.Everything()),
|
||||
cache.NewListWatchFromClient(lbc.client.CoreV1().RESTClient(), "services", namespace, fields.Everything()),
|
||||
&api_v1.Service{},
|
||||
resyncPeriod,
|
||||
svcHandlers,
|
||||
|
@ -164,7 +164,7 @@ func NewLoadBalancerController(kubeClient kubernetes.Interface, clusterManager *
|
|||
)
|
||||
|
||||
lbc.podLister.Indexer, lbc.podController = cache.NewIndexerInformer(
|
||||
cache.NewListWatchFromClient(lbc.client.Core().RESTClient(), "pods", namespace, fields.Everything()),
|
||||
cache.NewListWatchFromClient(lbc.client.CoreV1().RESTClient(), "pods", namespace, fields.Everything()),
|
||||
&api_v1.Pod{},
|
||||
resyncPeriod,
|
||||
cache.ResourceEventHandlerFuncs{},
|
||||
|
@ -173,7 +173,7 @@ func NewLoadBalancerController(kubeClient kubernetes.Interface, clusterManager *
|
|||
|
||||
// Node watch handlers
|
||||
lbc.nodeLister.Indexer, lbc.nodeController = cache.NewIndexerInformer(
|
||||
cache.NewListWatchFromClient(lbc.client.Core().RESTClient(), "nodes", api_v1.NamespaceAll, fields.Everything()),
|
||||
cache.NewListWatchFromClient(lbc.client.CoreV1().RESTClient(), "nodes", api_v1.NamespaceAll, fields.Everything()),
|
||||
&api_v1.Node{},
|
||||
resyncPeriod,
|
||||
cache.ResourceEventHandlerFuncs{},
|
||||
|
|
|
@ -60,7 +60,7 @@ func (t *apiServerTLSLoader) load(ing *extensions.Ingress) (*loadbalancers.TLSCe
|
|||
secretName := ing.Spec.TLS[0].SecretName
|
||||
// TODO: Replace this for a secret watcher.
|
||||
glog.V(3).Infof("Retrieving secret for ing %v with name %v", ing.Name, secretName)
|
||||
secret, err := t.client.Core().Secrets(ing.Namespace).Get(secretName, meta_v1.GetOptions{})
|
||||
secret, err := t.client.CoreV1().Secrets(ing.Namespace).Get(secretName, meta_v1.GetOptions{})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
|
|
@ -422,7 +422,7 @@ func getNodePort(client kubernetes.Interface, ns, name string) (port, nodePort i
|
|||
var svc *api_v1.Service
|
||||
glog.V(3).Infof("Waiting for %v/%v", ns, name)
|
||||
wait.Poll(1*time.Second, 5*time.Minute, func() (bool, error) {
|
||||
svc, err = client.Core().Services(ns).Get(name, meta_v1.GetOptions{})
|
||||
svc, err = client.CoreV1().Services(ns).Get(name, meta_v1.GetOptions{})
|
||||
if err != nil {
|
||||
return false, nil
|
||||
}
|
||||
|
|
|
@ -156,21 +156,21 @@ type APIServerConfigMapStore struct {
|
|||
// Add adds the given config map to the apiserver's store.
|
||||
func (a *APIServerConfigMapStore) Add(obj interface{}) error {
|
||||
cfg := obj.(*api_v1.ConfigMap)
|
||||
_, err := a.client.Core().ConfigMaps(cfg.Namespace).Create(cfg)
|
||||
_, err := a.client.CoreV1().ConfigMaps(cfg.Namespace).Create(cfg)
|
||||
return err
|
||||
}
|
||||
|
||||
// Update updates the existing config map object.
|
||||
func (a *APIServerConfigMapStore) Update(obj interface{}) error {
|
||||
cfg := obj.(*api_v1.ConfigMap)
|
||||
_, err := a.client.Core().ConfigMaps(cfg.Namespace).Update(cfg)
|
||||
_, err := a.client.CoreV1().ConfigMaps(cfg.Namespace).Update(cfg)
|
||||
return err
|
||||
}
|
||||
|
||||
// Delete deletes the existing config map object.
|
||||
func (a *APIServerConfigMapStore) Delete(obj interface{}) error {
|
||||
cfg := obj.(*api_v1.ConfigMap)
|
||||
return a.client.Core().ConfigMaps(cfg.Namespace).Delete(cfg.Name, &metav1.DeleteOptions{})
|
||||
return a.client.CoreV1().ConfigMaps(cfg.Namespace).Delete(cfg.Name, &metav1.DeleteOptions{})
|
||||
}
|
||||
|
||||
// GetByKey returns the config map for a given key.
|
||||
|
@ -181,7 +181,7 @@ func (a *APIServerConfigMapStore) GetByKey(key string) (item interface{}, exists
|
|||
return nil, false, fmt.Errorf("failed to get key %v, unexpecte format, expecting ns/name", key)
|
||||
}
|
||||
ns, name := nsName[0], nsName[1]
|
||||
cfg, err := a.client.Core().ConfigMaps(ns).Get(name, metav1.GetOptions{})
|
||||
cfg, err := a.client.CoreV1().ConfigMaps(ns).Get(name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
// Translate not found errors to found=false, err=nil
|
||||
if errors.IsNotFound(err) {
|
||||
|
|
|
@ -146,7 +146,7 @@ func newIngressController(config *Configuration) *GenericController {
|
|||
eventBroadcaster := record.NewBroadcaster()
|
||||
eventBroadcaster.StartLogging(glog.Infof)
|
||||
eventBroadcaster.StartRecordingToSink(&unversionedcore.EventSinkImpl{
|
||||
Interface: config.Client.Core().Events(config.Namespace),
|
||||
Interface: config.Client.CoreV1().Events(config.Namespace),
|
||||
})
|
||||
|
||||
ic := GenericController{
|
||||
|
@ -277,23 +277,23 @@ func newIngressController(config *Configuration) *GenericController {
|
|||
&extensions.Ingress{}, ic.cfg.ResyncPeriod, ingEventHandler)
|
||||
|
||||
ic.endpLister.Store, ic.endpController = cache.NewInformer(
|
||||
cache.NewListWatchFromClient(ic.cfg.Client.Core().RESTClient(), "endpoints", ic.cfg.Namespace, fields.Everything()),
|
||||
cache.NewListWatchFromClient(ic.cfg.Client.CoreV1().RESTClient(), "endpoints", ic.cfg.Namespace, fields.Everything()),
|
||||
&api.Endpoints{}, ic.cfg.ResyncPeriod, eventHandler)
|
||||
|
||||
ic.secrLister.Store, ic.secrController = cache.NewInformer(
|
||||
cache.NewListWatchFromClient(ic.cfg.Client.Core().RESTClient(), "secrets", watchNs, fields.Everything()),
|
||||
cache.NewListWatchFromClient(ic.cfg.Client.CoreV1().RESTClient(), "secrets", watchNs, fields.Everything()),
|
||||
&api.Secret{}, ic.cfg.ResyncPeriod, secrEventHandler)
|
||||
|
||||
ic.mapLister.Store, ic.mapController = cache.NewInformer(
|
||||
cache.NewListWatchFromClient(ic.cfg.Client.Core().RESTClient(), "configmaps", watchNs, fields.Everything()),
|
||||
cache.NewListWatchFromClient(ic.cfg.Client.CoreV1().RESTClient(), "configmaps", watchNs, fields.Everything()),
|
||||
&api.ConfigMap{}, ic.cfg.ResyncPeriod, mapEventHandler)
|
||||
|
||||
ic.svcLister.Store, ic.svcController = cache.NewInformer(
|
||||
cache.NewListWatchFromClient(ic.cfg.Client.Core().RESTClient(), "services", ic.cfg.Namespace, fields.Everything()),
|
||||
cache.NewListWatchFromClient(ic.cfg.Client.CoreV1().RESTClient(), "services", ic.cfg.Namespace, fields.Everything()),
|
||||
&api.Service{}, ic.cfg.ResyncPeriod, cache.ResourceEventHandlerFuncs{})
|
||||
|
||||
ic.nodeLister.Store, ic.nodeController = cache.NewInformer(
|
||||
cache.NewListWatchFromClient(ic.cfg.Client.Core().RESTClient(), "nodes", api.NamespaceAll, fields.Everything()),
|
||||
cache.NewListWatchFromClient(ic.cfg.Client.CoreV1().RESTClient(), "nodes", api.NamespaceAll, fields.Everything()),
|
||||
&api.Node{}, ic.cfg.ResyncPeriod, cache.ResourceEventHandlerFuncs{})
|
||||
|
||||
if config.UpdateStatus {
|
||||
|
|
|
@ -39,7 +39,7 @@ import (
|
|||
func (ic *GenericController) checkSvcForUpdate(svc *api_v1.Service) error {
|
||||
// get the pods associated with the service
|
||||
// TODO: switch this to a watch
|
||||
pods, err := ic.cfg.Client.Core().Pods(svc.Namespace).List(meta_v1.ListOptions{
|
||||
pods, err := ic.cfg.Client.CoreV1().Pods(svc.Namespace).List(meta_v1.ListOptions{
|
||||
LabelSelector: labels.Set(svc.Spec.Selector).AsSelector().String(),
|
||||
})
|
||||
|
||||
|
@ -83,7 +83,7 @@ func (ic *GenericController) checkSvcForUpdate(svc *api_v1.Service) error {
|
|||
if len(namedPorts) > 0 && !reflect.DeepEqual(curNamedPort, namedPorts) {
|
||||
data, _ := json.Marshal(namedPorts)
|
||||
|
||||
newSvc, err := ic.cfg.Client.Core().Services(svc.Namespace).Get(svc.Name, meta_v1.GetOptions{})
|
||||
newSvc, err := ic.cfg.Client.CoreV1().Services(svc.Namespace).Get(svc.Name, meta_v1.GetOptions{})
|
||||
if err != nil {
|
||||
return fmt.Errorf("error getting service %v/%v: %v", svc.Namespace, svc.Name, err)
|
||||
}
|
||||
|
@ -94,7 +94,7 @@ func (ic *GenericController) checkSvcForUpdate(svc *api_v1.Service) error {
|
|||
|
||||
newSvc.ObjectMeta.Annotations[service.NamedPortAnnotation] = string(data)
|
||||
glog.Infof("updating service %v with new named port mappings", svc.Name)
|
||||
_, err = ic.cfg.Client.Core().Services(svc.Namespace).Update(newSvc)
|
||||
_, err = ic.cfg.Client.CoreV1().Services(svc.Namespace).Update(newSvc)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error syncing service %v/%v: %v", svc.Namespace, svc.Name, err)
|
||||
}
|
||||
|
|
|
@ -184,7 +184,7 @@ func TestCheckSvcForUpdate(t *testing.T) {
|
|||
t.Fatalf("unexpected error: %v", err)
|
||||
}
|
||||
|
||||
rs, _ := gc.cfg.Client.Core().Services(api.NamespaceDefault).Get("named_port_test_service", meta_v1.GetOptions{})
|
||||
rs, _ := gc.cfg.Client.CoreV1().Services(api.NamespaceDefault).Get("named_port_test_service", meta_v1.GetOptions{})
|
||||
rr := rs.ObjectMeta.Annotations[service.NamedPortAnnotation]
|
||||
if !reflect.DeepEqual(rr, foo.er) {
|
||||
t.Errorf("Returned %s, but expected %s for %s", rr, foo.er, foo.n)
|
||||
|
|
|
@ -35,7 +35,7 @@ import (
|
|||
)
|
||||
|
||||
func getCurrentLeader(electionID, namespace string, c client.Interface) (string, *api.Endpoints, error) {
|
||||
endpoints, err := c.Core().Endpoints(namespace).Get(electionID, meta_v1.GetOptions{})
|
||||
endpoints, err := c.CoreV1().Endpoints(namespace).Get(electionID, meta_v1.GetOptions{})
|
||||
if err != nil {
|
||||
return "", nil, err
|
||||
}
|
||||
|
@ -59,10 +59,10 @@ func NewElection(electionID,
|
|||
callback func(leader string),
|
||||
c client.Interface) (*leaderelection.LeaderElector, error) {
|
||||
|
||||
_, err := c.Core().Endpoints(namespace).Get(electionID, meta_v1.GetOptions{})
|
||||
_, err := c.CoreV1().Endpoints(namespace).Get(electionID, meta_v1.GetOptions{})
|
||||
if err != nil {
|
||||
if errors.IsNotFound(err) {
|
||||
_, err = c.Core().Endpoints(namespace).Create(&api.Endpoints{
|
||||
_, err = c.CoreV1().Endpoints(namespace).Create(&api.Endpoints{
|
||||
ObjectMeta: meta_v1.ObjectMeta{
|
||||
Name: electionID,
|
||||
},
|
||||
|
|
|
@ -38,7 +38,7 @@ type EndpointsLock struct {
|
|||
func (el *EndpointsLock) Get() (*LeaderElectionRecord, error) {
|
||||
var record LeaderElectionRecord
|
||||
var err error
|
||||
el.e, err = el.Client.Core().Endpoints(el.EndpointsMeta.Namespace).Get(el.EndpointsMeta.Name, meta_v1.GetOptions{})
|
||||
el.e, err = el.Client.CoreV1().Endpoints(el.EndpointsMeta.Namespace).Get(el.EndpointsMeta.Name, meta_v1.GetOptions{})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -59,7 +59,7 @@ func (el *EndpointsLock) Create(ler LeaderElectionRecord) error {
|
|||
if err != nil {
|
||||
return err
|
||||
}
|
||||
el.e, err = el.Client.Core().Endpoints(el.EndpointsMeta.Namespace).Create(&v1.Endpoints{
|
||||
el.e, err = el.Client.CoreV1().Endpoints(el.EndpointsMeta.Namespace).Create(&v1.Endpoints{
|
||||
ObjectMeta: meta_v1.ObjectMeta{
|
||||
Name: el.EndpointsMeta.Name,
|
||||
Namespace: el.EndpointsMeta.Namespace,
|
||||
|
@ -81,7 +81,7 @@ func (el *EndpointsLock) Update(ler LeaderElectionRecord) error {
|
|||
return err
|
||||
}
|
||||
el.e.Annotations[LeaderElectionRecordAnnotationKey] = string(recordBytes)
|
||||
el.e, err = el.Client.Core().Endpoints(el.EndpointsMeta.Namespace).Update(el.e)
|
||||
el.e, err = el.Client.CoreV1().Endpoints(el.EndpointsMeta.Namespace).Update(el.e)
|
||||
return err
|
||||
}
|
||||
|
||||
|
|
|
@ -210,7 +210,7 @@ func NewStatusSyncer(config Config) Sync {
|
|||
func (s *statusSync) runningAddresess() ([]string, error) {
|
||||
if s.PublishService != "" {
|
||||
ns, name, _ := k8s.ParseNameNS(s.PublishService)
|
||||
svc, err := s.Client.Core().Services(ns).Get(name, meta_v1.GetOptions{})
|
||||
svc, err := s.Client.CoreV1().Services(ns).Get(name, meta_v1.GetOptions{})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -228,7 +228,7 @@ func (s *statusSync) runningAddresess() ([]string, error) {
|
|||
}
|
||||
|
||||
// get information about all the pods running the ingress controller
|
||||
pods, err := s.Client.Core().Pods(s.pod.Namespace).List(meta_v1.ListOptions{
|
||||
pods, err := s.Client.CoreV1().Pods(s.pod.Namespace).List(meta_v1.ListOptions{
|
||||
LabelSelector: labels.SelectorFromSet(s.pod.Labels).String(),
|
||||
})
|
||||
if err != nil {
|
||||
|
@ -246,7 +246,7 @@ func (s *statusSync) runningAddresess() ([]string, error) {
|
|||
}
|
||||
|
||||
func (s *statusSync) isRunningMultiplePods() bool {
|
||||
pods, err := s.Client.Core().Pods(s.pod.Namespace).List(meta_v1.ListOptions{
|
||||
pods, err := s.Client.CoreV1().Pods(s.pod.Namespace).List(meta_v1.ListOptions{
|
||||
LabelSelector: labels.SelectorFromSet(s.pod.Labels).String(),
|
||||
})
|
||||
if err != nil {
|
||||
|
|
|
@ -32,7 +32,7 @@ func IsValidService(kubeClient clientset.Interface, name string) (*api.Service,
|
|||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return kubeClient.Core().Services(ns).Get(name, meta_v1.GetOptions{})
|
||||
return kubeClient.CoreV1().Services(ns).Get(name, meta_v1.GetOptions{})
|
||||
}
|
||||
|
||||
// IsValidConfigMap check if exists a configmap with the specified name
|
||||
|
@ -44,7 +44,7 @@ func IsValidConfigMap(kubeClient clientset.Interface, fullName string) (*api.Con
|
|||
return nil, err
|
||||
}
|
||||
|
||||
configMap, err := kubeClient.Core().ConfigMaps(ns).Get(name, meta_v1.GetOptions{})
|
||||
configMap, err := kubeClient.CoreV1().ConfigMaps(ns).Get(name, meta_v1.GetOptions{})
|
||||
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("configmap not found: %v", err)
|
||||
|
@ -56,7 +56,7 @@ func IsValidConfigMap(kubeClient clientset.Interface, fullName string) (*api.Con
|
|||
|
||||
// IsValidNamespace chck if exists a namespace with the specified name
|
||||
func IsValidNamespace(kubeClient clientset.Interface, name string) (*api.Namespace, error) {
|
||||
return kubeClient.Core().Namespaces().Get(name, meta_v1.GetOptions{})
|
||||
return kubeClient.CoreV1().Namespaces().Get(name, meta_v1.GetOptions{})
|
||||
}
|
||||
|
||||
// IsValidSecret checks if exists a secret with the specified name
|
||||
|
@ -65,7 +65,7 @@ func IsValidSecret(kubeClient clientset.Interface, name string) (*api.Secret, er
|
|||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return kubeClient.Core().Secrets(ns).Get(name, meta_v1.GetOptions{})
|
||||
return kubeClient.CoreV1().Secrets(ns).Get(name, meta_v1.GetOptions{})
|
||||
}
|
||||
|
||||
// ParseNameNS parses a string searching a namespace and name
|
||||
|
@ -81,7 +81,7 @@ func ParseNameNS(input string) (string, string, error) {
|
|||
// GetNodeIP returns the IP address of a node in the cluster
|
||||
func GetNodeIP(kubeClient clientset.Interface, name string) string {
|
||||
var externalIP string
|
||||
node, err := kubeClient.Core().Nodes().Get(name, meta_v1.GetOptions{})
|
||||
node, err := kubeClient.CoreV1().Nodes().Get(name, meta_v1.GetOptions{})
|
||||
if err != nil {
|
||||
return externalIP
|
||||
}
|
||||
|
@ -121,7 +121,7 @@ func GetPodDetails(kubeClient clientset.Interface) (*PodInfo, error) {
|
|||
return nil, fmt.Errorf("unable to get POD information (missing POD_NAME or POD_NAMESPACE environment variable")
|
||||
}
|
||||
|
||||
pod, _ := kubeClient.Core().Pods(podNs).Get(podName, meta_v1.GetOptions{})
|
||||
pod, _ := kubeClient.CoreV1().Pods(podNs).Get(podName, meta_v1.GetOptions{})
|
||||
if pod == nil {
|
||||
return nil, fmt.Errorf("unable to get POD information")
|
||||
}
|
||||
|
|
Loading…
Reference in a new issue