ingress-nginx-helm/rootfs/etc/nginx/lua/balancer/sticky_balanced.lua

54 lines
1.6 KiB
Lua
Raw Normal View History

-- An affinity mode which makes sure connections are rebalanced when a deployment is scaled.
-- The advantage of this mode is that the load on the pods will be redistributed.
2019-08-30 17:08:03 +00:00
-- The drawback of this mode is that, when scaling up a deployment, roughly (n-c)/n users
-- will lose their session, where c is the current number of pods and n is the new number of
-- pods.
--
local balancer_sticky = require("balancer.sticky")
2019-09-24 08:46:02 +00:00
local math_random = require("math").random
local resty_chash = require("resty.chash")
2019-09-24 08:46:02 +00:00
local util_get_nodes = require("util").get_nodes
local ngx = ngx
local string = string
local setmetatable = setmetatable
local _M = balancer_sticky:new()
-- Consider the situation of N upstreams one of which is failing.
-- Then the probability to obtain failing upstream after M iterations would be close to (1/N)**M.
-- For the worst case (2 upstreams; 20 iterations) it would be ~10**(-6)
-- which is much better then ~10**(-3) for 10 iterations.
local MAX_UPSTREAM_CHECKS_COUNT = 20
function _M.new(self, backend)
2019-09-24 08:46:02 +00:00
local nodes = util_get_nodes(backend.endpoints)
local o = {
name = "sticky_balanced",
instance = resty_chash:new(nodes)
}
setmetatable(o, self)
self.__index = self
2019-08-30 17:08:03 +00:00
balancer_sticky.sync(o, backend)
return o
end
function _M.pick_new_upstream(self, failed_upstreams)
for i = 1, MAX_UPSTREAM_CHECKS_COUNT do
2019-09-24 08:46:02 +00:00
local key = string.format("%s.%s.%s", ngx.now() + i, ngx.worker.pid(), math_random(999999))
local new_upstream = self.instance:find(key)
if not failed_upstreams[new_upstream] then
return new_upstream, key
end
end
return nil, nil
end
return _M