feat: using LeaseLock for election (#8921)
We removed the use of configmap as an election lock, so we will use the Lease API to complete the election. Before this, we used `MultiLock` to facilitate smooth migration of existing users of ingress-nginx from configmap to LeaseLock. Signed-off-by: Jintao Zhang <zhangjintao9020@gmail.com> Signed-off-by: Jintao Zhang <zhangjintao9020@gmail.com>
This commit is contained in:
parent
d13f32d67c
commit
3aee7416e5
3 changed files with 8 additions and 32 deletions
|
@ -58,21 +58,6 @@ rules:
|
|||
- get
|
||||
- list
|
||||
- watch
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- configmaps
|
||||
resourceNames:
|
||||
- {{ .Values.controller.electionID }}
|
||||
verbs:
|
||||
- get
|
||||
- update
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- configmaps
|
||||
verbs:
|
||||
- create
|
||||
- apiGroups:
|
||||
- coordination.k8s.io
|
||||
resources:
|
||||
|
|
|
@ -99,25 +99,16 @@ func setupLeaderElection(config *leaderElectionConfig) {
|
|||
EventRecorder: recorder,
|
||||
}
|
||||
|
||||
// TODO: If we upgrade client-go to v0.24 then we can only use LeaseLock.
|
||||
// MultiLock is used for lock's migration
|
||||
lock := resourcelock.MultiLock{
|
||||
Primary: &resourcelock.ConfigMapLock{
|
||||
ConfigMapMeta: objectMeta,
|
||||
Client: config.Client.CoreV1(),
|
||||
LockConfig: resourceLockConfig,
|
||||
},
|
||||
Secondary: &resourcelock.LeaseLock{
|
||||
LeaseMeta: objectMeta,
|
||||
Client: config.Client.CoordinationV1(),
|
||||
LockConfig: resourceLockConfig,
|
||||
},
|
||||
lock := &resourcelock.LeaseLock{
|
||||
LeaseMeta: objectMeta,
|
||||
Client: config.Client.CoordinationV1(),
|
||||
LockConfig: resourceLockConfig,
|
||||
}
|
||||
|
||||
ttl := 30 * time.Second
|
||||
|
||||
elector, err := leaderelection.NewLeaderElector(leaderelection.LeaderElectionConfig{
|
||||
Lock: &lock,
|
||||
Lock: lock,
|
||||
LeaseDuration: ttl,
|
||||
RenewDeadline: ttl / 2,
|
||||
RetryPeriod: ttl / 4,
|
||||
|
|
|
@ -92,10 +92,10 @@ var _ = framework.IngressNginxDescribe("[Status] status update", func() {
|
|||
assert.Nil(ginkgo.GinkgoT(), err, "unexpected error cleaning Ingress status")
|
||||
framework.Sleep(10 * time.Second)
|
||||
|
||||
err = f.KubeClientSet.CoreV1().
|
||||
ConfigMaps(f.Namespace).
|
||||
err = f.KubeClientSet.CoordinationV1().
|
||||
Leases(f.Namespace).
|
||||
Delete(context.TODO(), "ingress-controller-leader", metav1.DeleteOptions{})
|
||||
assert.Nil(ginkgo.GinkgoT(), err, "unexpected error deleting leader election configmap")
|
||||
assert.Nil(ginkgo.GinkgoT(), err, "unexpected error deleting leader election lease")
|
||||
|
||||
_, cmd, err = f.KubectlProxy(port)
|
||||
assert.Nil(ginkgo.GinkgoT(), err, "unexpected error starting kubectl proxy")
|
||||
|
|
Loading…
Reference in a new issue