feat: using LeaseLock for election (#8921)

We removed the use of configmap as an election lock, so we will use the
Lease API to complete the election.

Before this, we used `MultiLock` to facilitate smooth migration of
existing users of ingress-nginx from configmap to LeaseLock.

Signed-off-by: Jintao Zhang <zhangjintao9020@gmail.com>

Signed-off-by: Jintao Zhang <zhangjintao9020@gmail.com>
This commit is contained in:
Jintao Zhang 2022-08-23 06:38:16 +08:00 committed by GitHub
parent d13f32d67c
commit 3aee7416e5
3 changed files with 8 additions and 32 deletions

View file

@ -58,21 +58,6 @@ rules:
- get - get
- list - list
- watch - watch
- apiGroups:
- ""
resources:
- configmaps
resourceNames:
- {{ .Values.controller.electionID }}
verbs:
- get
- update
- apiGroups:
- ""
resources:
- configmaps
verbs:
- create
- apiGroups: - apiGroups:
- coordination.k8s.io - coordination.k8s.io
resources: resources:

View file

@ -99,25 +99,16 @@ func setupLeaderElection(config *leaderElectionConfig) {
EventRecorder: recorder, EventRecorder: recorder,
} }
// TODO: If we upgrade client-go to v0.24 then we can only use LeaseLock. lock := &resourcelock.LeaseLock{
// MultiLock is used for lock's migration LeaseMeta: objectMeta,
lock := resourcelock.MultiLock{ Client: config.Client.CoordinationV1(),
Primary: &resourcelock.ConfigMapLock{ LockConfig: resourceLockConfig,
ConfigMapMeta: objectMeta,
Client: config.Client.CoreV1(),
LockConfig: resourceLockConfig,
},
Secondary: &resourcelock.LeaseLock{
LeaseMeta: objectMeta,
Client: config.Client.CoordinationV1(),
LockConfig: resourceLockConfig,
},
} }
ttl := 30 * time.Second ttl := 30 * time.Second
elector, err := leaderelection.NewLeaderElector(leaderelection.LeaderElectionConfig{ elector, err := leaderelection.NewLeaderElector(leaderelection.LeaderElectionConfig{
Lock: &lock, Lock: lock,
LeaseDuration: ttl, LeaseDuration: ttl,
RenewDeadline: ttl / 2, RenewDeadline: ttl / 2,
RetryPeriod: ttl / 4, RetryPeriod: ttl / 4,

View file

@ -92,10 +92,10 @@ var _ = framework.IngressNginxDescribe("[Status] status update", func() {
assert.Nil(ginkgo.GinkgoT(), err, "unexpected error cleaning Ingress status") assert.Nil(ginkgo.GinkgoT(), err, "unexpected error cleaning Ingress status")
framework.Sleep(10 * time.Second) framework.Sleep(10 * time.Second)
err = f.KubeClientSet.CoreV1(). err = f.KubeClientSet.CoordinationV1().
ConfigMaps(f.Namespace). Leases(f.Namespace).
Delete(context.TODO(), "ingress-controller-leader", metav1.DeleteOptions{}) Delete(context.TODO(), "ingress-controller-leader", metav1.DeleteOptions{})
assert.Nil(ginkgo.GinkgoT(), err, "unexpected error deleting leader election configmap") assert.Nil(ginkgo.GinkgoT(), err, "unexpected error deleting leader election lease")
_, cmd, err = f.KubectlProxy(port) _, cmd, err = f.KubectlProxy(port)
assert.Nil(ginkgo.GinkgoT(), err, "unexpected error starting kubectl proxy") assert.Nil(ginkgo.GinkgoT(), err, "unexpected error starting kubectl proxy")