From 91705911858e9f81b3d6f17d68d55abe945e012b Mon Sep 17 00:00:00 2001 From: Alexander Maret-Huskinson Date: Fri, 30 Aug 2019 11:40:29 +0200 Subject: [PATCH 1/5] Added new affinity mode for maximum session stickyness. Fixes kubernetes/ingress-nginx#4475 --- docs/examples/affinity/cookie/README.md | 1 + .../nginx-configuration/annotations.md | 3 + .../ingress/annotations/annotations_test.go | 11 +- .../annotations/sessionaffinity/main.go | 10 ++ .../annotations/sessionaffinity/main_test.go | 5 + internal/ingress/controller/controller.go | 4 + internal/ingress/types.go | 1 + internal/ingress/types_equals.go | 3 + rootfs/etc/nginx/lua/affinity/balanced.lua | 57 ++++++ rootfs/etc/nginx/lua/affinity/persistent.lua | 53 ++++++ rootfs/etc/nginx/lua/balancer/sticky.lua | 109 ++++++++---- .../nginx/lua/test/balancer/sticky_test.lua | 46 +++-- .../etc/nginx/lua/test/util/nodemap_test.lua | 167 ++++++++++++++++++ rootfs/etc/nginx/lua/util/nodemap.lua | 120 +++++++++++++ test/manifests/configuration-a.json | 3 + test/manifests/configuration-b.json | 3 + 16 files changed, 541 insertions(+), 55 deletions(-) create mode 100644 rootfs/etc/nginx/lua/affinity/balanced.lua create mode 100644 rootfs/etc/nginx/lua/affinity/persistent.lua create mode 100644 rootfs/etc/nginx/lua/test/util/nodemap_test.lua create mode 100644 rootfs/etc/nginx/lua/util/nodemap.lua diff --git a/docs/examples/affinity/cookie/README.md b/docs/examples/affinity/cookie/README.md index 5ee8855d0..ee6684bde 100644 --- a/docs/examples/affinity/cookie/README.md +++ b/docs/examples/affinity/cookie/README.md @@ -9,6 +9,7 @@ Session affinity can be configured using the following annotations: |Name|Description|Value| | --- | --- | --- | |nginx.ingress.kubernetes.io/affinity|Type of the affinity, set this to `cookie` to enable session affinity|string (NGINX only supports `cookie`)| +|nginx.ingress.kubernetes.io/affinity-mode|The affinity mode defines how sticky a session is. Use `balanced` to redistribute some sessions when scaling pods or `persistent` for maximum stickyness.|`balanced` (default) or `persistent`| |nginx.ingress.kubernetes.io/session-cookie-name|Name of the cookie that will be created|string (defaults to `INGRESSCOOKIE`)| |nginx.ingress.kubernetes.io/session-cookie-path|Path that will be set on the cookie (required if your [Ingress paths][ingress-paths] use regular expressions)|string (defaults to the currently [matched path][ingress-paths])| |nginx.ingress.kubernetes.io/session-cookie-max-age|Time until the cookie expires, corresponds to the `Max-Age` cookie directive|number of seconds| diff --git a/docs/user-guide/nginx-configuration/annotations.md b/docs/user-guide/nginx-configuration/annotations.md index 5ea9ebe16..280fc73a9 100755 --- a/docs/user-guide/nginx-configuration/annotations.md +++ b/docs/user-guide/nginx-configuration/annotations.md @@ -17,6 +17,7 @@ You can add these Kubernetes annotations to specific Ingress objects to customiz |---------------------------|------| |[nginx.ingress.kubernetes.io/app-root](#rewrite)|string| |[nginx.ingress.kubernetes.io/affinity](#session-affinity)|cookie| +|[nginx.ingress.kubernetes.io/affinity-mode](#session-affinity)|"balanced" or "persistent"| |[nginx.ingress.kubernetes.io/auth-realm](#authentication)|string| |[nginx.ingress.kubernetes.io/auth-secret](#authentication)|string| |[nginx.ingress.kubernetes.io/auth-type](#authentication)|basic or digest| @@ -151,6 +152,8 @@ If the Application Root is exposed in a different path and needs to be redirecte The annotation `nginx.ingress.kubernetes.io/affinity` enables and sets the affinity type in all Upstreams of an Ingress. This way, a request will always be directed to the same upstream server. The only affinity type available for NGINX is `cookie`. +The annotation `nginx.ingress.kubernetes.io/affinity-mode` defines the stickyness of a session. Setting this to `balanced` (default) will redistribute some sessions if a deployment gets scaled up, therefore rebalancing the load on the servers. Setting this to `persistent` will not rebalance sessions to new servers, therefore providing maximum stickyness. + !!! attention If more than one Ingress is defined for a host and at least one Ingress uses `nginx.ingress.kubernetes.io/affinity: cookie`, then only paths on the Ingress using `nginx.ingress.kubernetes.io/affinity` will use session cookie affinity. All paths defined on other Ingresses for the host will be load balanced through the random selection of a backend server. diff --git a/internal/ingress/annotations/annotations_test.go b/internal/ingress/annotations/annotations_test.go index 69d90fd0c..2eac8c234 100644 --- a/internal/ingress/annotations/annotations_test.go +++ b/internal/ingress/annotations/annotations_test.go @@ -199,11 +199,12 @@ func TestAffinitySession(t *testing.T) { fooAnns := []struct { annotations map[string]string affinitytype string + affinitymode string name string }{ - {map[string]string{annotationAffinityType: "cookie", annotationAffinityCookieName: "route"}, "cookie", "route"}, - {map[string]string{annotationAffinityType: "cookie", annotationAffinityCookieName: "route1"}, "cookie", "route1"}, - {map[string]string{annotationAffinityType: "cookie", annotationAffinityCookieName: ""}, "cookie", "INGRESSCOOKIE"}, + {map[string]string{annotationAffinityType: "cookie", annotationAffinityMode: "balanced", annotationAffinityCookieName: "route"}, "cookie", "balanced", "route"}, + {map[string]string{annotationAffinityType: "cookie", annotationAffinityMode: "persistent", annotationAffinityCookieName: "route1"}, "cookie", "persistent", "route1"}, + {map[string]string{annotationAffinityType: "cookie", annotationAffinityMode: "balanced", annotationAffinityCookieName: ""}, "cookie", "balanced", "INGRESSCOOKIE"}, {map[string]string{}, "", ""}, {nil, "", ""}, } @@ -213,6 +214,10 @@ func TestAffinitySession(t *testing.T) { r := ec.Extract(ing).SessionAffinity t.Logf("Testing pass %v %v", foo.affinitytype, foo.name) + if (r.Mode != foo.affinitymode) { + t.Errorf("Returned %v but expected %v for Name", r.Mode, foo.affinitymode) + } + if r.Cookie.Name != foo.name { t.Errorf("Returned %v but expected %v for Name", r.Cookie.Name, foo.name) } diff --git a/internal/ingress/annotations/sessionaffinity/main.go b/internal/ingress/annotations/sessionaffinity/main.go index 417f625b0..d6ce54f5f 100644 --- a/internal/ingress/annotations/sessionaffinity/main.go +++ b/internal/ingress/annotations/sessionaffinity/main.go @@ -28,6 +28,7 @@ import ( const ( annotationAffinityType = "affinity" + annotationAffinityMode = "affinity-mode" // If a cookie with this name exists, // its value is used as an index into the list of available backends. annotationAffinityCookieName = "session-cookie-name" @@ -57,6 +58,8 @@ var ( type Config struct { // The type of affinity that will be used Type string `json:"type"` + // The affinity mode, i.e. how sticky a session is + Mode string `json:"mode"` Cookie } @@ -136,6 +139,12 @@ func (a affinity) Parse(ing *networking.Ingress) (interface{}, error) { at = "" } + // Check the afinity mode that will be used + am, err := parser.GetStringAnnotation(annotationAffinityMode, ing) + if err != nil { + am = "" + } + switch at { case "cookie": cookie = a.cookieAffinityParse(ing) @@ -146,6 +155,7 @@ func (a affinity) Parse(ing *networking.Ingress) (interface{}, error) { return &Config{ Type: at, + Mode: am, Cookie: *cookie, }, nil } diff --git a/internal/ingress/annotations/sessionaffinity/main_test.go b/internal/ingress/annotations/sessionaffinity/main_test.go index bdb038e0d..29c514047 100644 --- a/internal/ingress/annotations/sessionaffinity/main_test.go +++ b/internal/ingress/annotations/sessionaffinity/main_test.go @@ -67,6 +67,7 @@ func TestIngressAffinityCookieConfig(t *testing.T) { data := map[string]string{} data[parser.GetAnnotationWithPrefix(annotationAffinityType)] = "cookie" + data[parser.GetAnnotationWithPrefix(annotationAffinityMode)] = "balanced" data[parser.GetAnnotationWithPrefix(annotationAffinityCookieName)] = "INGRESSCOOKIE" data[parser.GetAnnotationWithPrefix(annotationAffinityCookieExpires)] = "4500" data[parser.GetAnnotationWithPrefix(annotationAffinityCookieMaxAge)] = "3000" @@ -84,6 +85,10 @@ func TestIngressAffinityCookieConfig(t *testing.T) { t.Errorf("expected cookie as affinity but returned %v", nginxAffinity.Type) } + if nginxAffinity.Mode != "balanced" { + t.Errorf("expected balanced as affinity mode but returned %v", nginxAffinity.Mode) + } + if nginxAffinity.Cookie.Name != "INGRESSCOOKIE" { t.Errorf("expected INGRESSCOOKIE as session-cookie-name but returned %v", nginxAffinity.Cookie.Name) } diff --git a/internal/ingress/controller/controller.go b/internal/ingress/controller/controller.go index b33bf1594..d7a4f032d 100644 --- a/internal/ingress/controller/controller.go +++ b/internal/ingress/controller/controller.go @@ -568,6 +568,10 @@ func (n *NGINXController) getBackendServers(ingresses []*ingress.Ingress) ([]*in ups.SessionAffinity.AffinityType = anns.SessionAffinity.Type } + if ups.SessionAffinity.AffinityMode == "" { + ups.SessionAffinity.AffinityMode = anns.SessionAffinity.Mode + } + if anns.SessionAffinity.Type == "cookie" { cookiePath := anns.SessionAffinity.Cookie.Path if anns.Rewrite.UseRegex && cookiePath == "" { diff --git a/internal/ingress/types.go b/internal/ingress/types.go index c87beb49d..9dd58a4f7 100644 --- a/internal/ingress/types.go +++ b/internal/ingress/types.go @@ -141,6 +141,7 @@ func (s Backend) HashInclude(field string, v interface{}) (bool, error) { // +k8s:deepcopy-gen=true type SessionAffinityConfig struct { AffinityType string `json:"name"` + AffinityMode string `json:"mode"` CookieSessionAffinity CookieSessionAffinity `json:"cookieSessionAffinity"` } diff --git a/internal/ingress/types_equals.go b/internal/ingress/types_equals.go index 758590309..1b81f69fa 100644 --- a/internal/ingress/types_equals.go +++ b/internal/ingress/types_equals.go @@ -152,6 +152,9 @@ func (sac1 *SessionAffinityConfig) Equal(sac2 *SessionAffinityConfig) bool { if sac1.AffinityType != sac2.AffinityType { return false } + if sac1.AffinityMode != sac2.AffinityMode { + return false + } if !(&sac1.CookieSessionAffinity).Equal(&sac2.CookieSessionAffinity) { return false } diff --git a/rootfs/etc/nginx/lua/affinity/balanced.lua b/rootfs/etc/nginx/lua/affinity/balanced.lua new file mode 100644 index 000000000..9c74048bf --- /dev/null +++ b/rootfs/etc/nginx/lua/affinity/balanced.lua @@ -0,0 +1,57 @@ +-- An affinity mode which makes sure connections are rebalanced when a deployment is scaled. +-- The advantage of this mode is that the load on the pods will be redistributed. +-- The drawback of this mode is that, when scaling up a deployment, roughly (n-c)/n users +-- will lose their session, where c is the current number of pods and n is the new number of +-- pods. +-- +-- This class extends/implements the abstract class balancer.sticky. +-- +local math = require("math") +local resty_chash = require("resty.chash") +local util = require("util") + +local _M = {} + +-- Consider the situation of N upstreams one of which is failing. +-- Then the probability to obtain failing upstream after M iterations would be close to (1/N)**M. +-- For the worst case (2 upstreams; 20 iterations) it would be ~10**(-6) +-- which is much better then ~10**(-3) for 10 iterations. +local MAX_UPSTREAM_CHECKS_COUNT = 20 + +local function get_routing_key(self) + return self:get_cookie(), nil +end + +local function set_routing_key(self, key) + self:set_cookie(key) +end + +local function pick_new_upstream(self, failed_upstreams) + for i = 1, MAX_UPSTREAM_CHECKS_COUNT do + local key = string.format("%s.%s.%s", ngx.now() + i, ngx.worker.pid(), math.random(999999)) + + local new_upstream = self.instance:find(key) + + if not failed_upstreams[new_upstream] then + return new_upstream, key + end + end + + return nil, nil +end + +function _M.new(self, sticky_balancer, backend) + local o = sticky_balancer or {} + + local nodes = util.get_nodes(backend.endpoints) + + -- override sticky.balancer methods + o.instance = resty_chash:new(nodes) + o.get_routing_key = get_routing_key + o.set_routing_key = set_routing_key + o.pick_new_upstream = pick_new_upstream + + return sticky_balancer +end + +return _M diff --git a/rootfs/etc/nginx/lua/affinity/persistent.lua b/rootfs/etc/nginx/lua/affinity/persistent.lua new file mode 100644 index 000000000..aa3e252d3 --- /dev/null +++ b/rootfs/etc/nginx/lua/affinity/persistent.lua @@ -0,0 +1,53 @@ +-- An affinity mode which makes sure a session is always routed to the same endpoint. +-- The advantage of this mode is that a user will never lose his session. +-- The drawback of this mode is that when scaling up a deployment, sessions will not +-- be rebalanced. +-- +local util = require("util") +local util_nodemap = require("util.nodemap") + +local _M = {} + +local function get_routing_key(self) + local cookie_value = self:get_cookie() + + if cookie_value then + -- format .. + local routing_key = string.match(cookie_value, '[^\\.]+$') + + if routing_key == nil then + local err = string.format("Failed to extract routing key from cookie '%s'!", cookie_value) + return nil, err + end + + return routing_key, nil + end + + return nil, nil +end + +local function set_routing_key(self, key) + local value = string.format("%s.%s.%s", ngx.now(), ngx.worker.pid(), key) + self:set_cookie(value); +end + +local function pick_new_upstream(self, failed_upstreams) + return self.instance:random_except(failed_upstreams) +end + +function _M.new(self, sticky_balancer, backend) + local o = sticky_balancer or {} + + local nodes = util.get_nodes(backend.endpoints) + local hash_salt = backend["name"] + + -- override sticky.balancer methods + o.instance = util_nodemap:new(nodes, hash_salt) + o.get_routing_key = get_routing_key + o.set_routing_key = set_routing_key + o.pick_new_upstream = pick_new_upstream + + return sticky_balancer +end + +return _M diff --git a/rootfs/etc/nginx/lua/balancer/sticky.lua b/rootfs/etc/nginx/lua/balancer/sticky.lua index 366ee6d32..3ceee9597 100644 --- a/rootfs/etc/nginx/lua/balancer/sticky.lua +++ b/rootfs/etc/nginx/lua/balancer/sticky.lua @@ -1,8 +1,8 @@ +local affinity_balanced = require("affinity.balanced") +local affinity_persistent = require("affinity.persistent") local balancer_resty = require("balancer.resty") -local resty_chash = require("resty.chash") local util = require("util") local ck = require("resty.cookie") -local math = require("math") local ngx_balancer = require("ngx.balancer") local split = require("util.split") @@ -10,34 +10,60 @@ local string_format = string.format local ngx_log = ngx.log local INFO = ngx.INFO -local _M = balancer_resty:new({ factory = resty_chash, name = "sticky" }) +local _M = balancer_resty:new({ name = "sticky" }) local DEFAULT_COOKIE_NAME = "route" --- Consider the situation of N upstreams one of which is failing. --- Then the probability to obtain failing upstream after M iterations would be close to (1/N)**M. --- For the worst case (2 upstreams; 20 iterations) it would be ~10**(-6) --- which is much better then ~10**(-3) for 10 iterations. -local MAX_UPSTREAM_CHECKS_COUNT = 20 function _M.cookie_name(self) return self.cookie_session_affinity.name or DEFAULT_COOKIE_NAME end -function _M.new(self, backend) - local nodes = util.get_nodes(backend.endpoints) +local function init_affinity_mode(self, backend) + local mode = backend["sessionAffinityConfig"]["mode"] or 'balanced' + -- set default mode to 'balanced' for backwards compatibility + if mode == nil or mode == '' then + mode = 'balanced' + end + + self.affinity_mode = mode + + if mode == 'persistent' then + return affinity_persistent:new(self, backend) + end + + -- default is 'balanced' for backwards compatibility + if mode ~= 'balanced' then + ngx.log(ngx.WARN, string.format("Invalid affinity mode '%s'! Using 'balanced' as a default.", mode)) + end + + return affinity_balanced:new(self, backend) +end + +function _M.new(self, backend) local o = { - instance = self.factory:new(nodes), + instance = nil, + affinity_mode = nil, traffic_shaping_policy = backend.trafficShapingPolicy, alternative_backends = backend.alternativeBackends, cookie_session_affinity = backend["sessionAffinityConfig"]["cookieSessionAffinity"] } setmetatable(o, self) self.__index = self - return o + + return init_affinity_mode(o, backend) end -local function set_cookie(self, value) +function _M.get_cookie(self) + local cookie, err = ck:new() + if not cookie then + ngx.log(ngx.ERR, err) + end + + return cookie:get(self:cookie_name()) +end + +function _M.set_cookie(self, value) local cookie, err = ck:new() if not cookie then ngx.log(ngx.ERR, err) @@ -86,19 +112,30 @@ local function get_failed_upstreams() return indexed_upstream_addrs end -local function pick_new_upstream(self) - local failed_upstreams = get_failed_upstreams() +--- get_routing_key gets the current routing key from the cookie +-- @treturn string, string The routing key and an error message if an error occured. +function _M.get_routing_key(self) + -- interface method to get the routing key from the cookie + -- has to be overridden by an affinity mode + ngx.log(ngx.ERR, "[BUG] Failed to get routing key as no implementation has been provided!") + return nil, nil +end - for i = 1, MAX_UPSTREAM_CHECKS_COUNT do - local key = string.format("%s.%s.%s", ngx.now() + i, ngx.worker.pid(), math.random(999999)) - - local new_upstream = self.instance:find(key) - - if not failed_upstreams[new_upstream] then - return new_upstream, key - end - end +--- set_routing_key sets the current routing key on the cookie +-- @tparam string key The routing key to set on the cookie. +function _M.set_routing_key(self, key) + -- interface method to set the routing key on the cookie + -- has to be overridden by an affinity mode + ngx.log(ngx.ERR, "[BUG] Failed to set routing key as no implementation has been provided!") +end +--- pick_new_upstream picks a new upstream while ignoring the given failed upstreams. +-- @tparam {[string]=boolean} A table of upstreams to ignore where the key is the endpoint and the value a boolean. +-- @treturn string, string The endpoint and its key. +function _M.pick_new_upstream(self, failed_upstreams) + -- interface method to get a new upstream + -- has to be overridden by an affinity mode + ngx.log(ngx.ERR, "[BUG] Failed to pick new upstream as no implementation has been provided!") return nil, nil end @@ -128,15 +165,9 @@ local function should_set_cookie(self) end function _M.balance(self) - local cookie, err = ck:new() - if not cookie then - ngx.log(ngx.ERR, "error while initializing cookie: " .. tostring(err)) - return - end - local upstream_from_cookie - local key = cookie:get(self:cookie_name()) + local key = self:get_routing_key() if key then upstream_from_cookie = self.instance:find(key) end @@ -151,24 +182,34 @@ function _M.balance(self) local new_upstream - new_upstream, key = pick_new_upstream(self) + new_upstream, key = self:pick_new_upstream(get_failed_upstreams()) if not new_upstream then ngx.log(ngx.WARN, string.format("failed to get new upstream; using upstream %s", new_upstream)) elseif should_set_cookie(self) then - set_cookie(self, key) + self:set_routing_key(key) end return new_upstream end function _M.sync(self, backend) + local changed = false + + -- check and reinit affinity mode before syncing the balancer which will reinit the nodes + if self.affinity_mode ~= backend.sessionAffinityConfig.mode then + changed = true + init_affinity_mode(self, backend) + end + + -- reload balancer nodes balancer_resty.sync(self, backend) -- Reload the balancer if any of the annotations have changed. - local changed = not util.deep_compare( + changed = changed or not util.deep_compare( self.cookie_session_affinity, backend.sessionAffinityConfig.cookieSessionAffinity ) + if not changed then return end diff --git a/rootfs/etc/nginx/lua/test/balancer/sticky_test.lua b/rootfs/etc/nginx/lua/test/balancer/sticky_test.lua index 0c8b3297d..7a5f07519 100644 --- a/rootfs/etc/nginx/lua/test/balancer/sticky_test.lua +++ b/rootfs/etc/nginx/lua/test/balancer/sticky_test.lua @@ -15,11 +15,16 @@ local function reset_ngx() end function get_mocked_cookie_new() + local o = { value = nil } + local mock = { + get = function(self, n) return self.value end, + set = function(self, c) self.value = c.value ; return true, nil end + } + setmetatable(o, mock) + mock.__index = mock + return function(self) - return { - get = function(self, n) return nil, "error" end, - set = function(self, n) return true, "" end - } + return o; end end @@ -229,7 +234,7 @@ describe("Sticky", function() end) end) - local function get_several_test_backends(change_on_failure) + local function get_several_test_backends(option) return { name = "access-router-production-web-80", endpoints = { @@ -238,7 +243,13 @@ describe("Sticky", function() }, sessionAffinityConfig = { name = "cookie", - cookieSessionAffinity = { name = "test_name", hash = "sha1", change_on_failure = change_on_failure } + mode = option["mode"], + cookieSessionAffinity = { + name = "test_name", + hash = "sha1", + change_on_failure = option["change_on_failure"], + locations = { ['test.com'] = {'/'} } + } }, } end @@ -257,21 +268,20 @@ describe("Sticky", function() context("when request to upstream fails", function() it("changes upstream when change_on_failure option is true", function() - -- create sticky cookie - cookie.new = function(self) - local return_obj = { - set = function(v) return false, nil end, - get = function(k) return "" end, - } - return return_obj, false - end - - local options = {false, true} + local options = { + {["change_on_failure"] = false, ["mode"] = nil}, + {["change_on_failure"] = false, ["mode"] = 'balanced'}, + {["change_on_failure"] = false, ["mode"] = 'persistent'}, + {["change_on_failure"] = true, ["mode"] = nil}, + {["change_on_failure"] = true, ["mode"] = 'balanced'}, + {["change_on_failure"] = true, ["mode"] = 'persistent'} + } for _, option in ipairs(options) do local sticky_balancer_instance = sticky:new(get_several_test_backends(option)) local old_upstream = sticky_balancer_instance:balance() + assert.is.Not.Nil(old_upstream) for _ = 1, 100 do -- make sure upstream doesn't change on subsequent calls of balance() assert.equal(old_upstream, sticky_balancer_instance:balance()) @@ -281,11 +291,11 @@ describe("Sticky", function() sticky_balancer_instance.get_last_failure = function() return "failed" end - _G.ngx.var = { upstream_addr = old_upstream } + _G.ngx.var.upstream_addr = old_upstream for _ = 1, 100 do local new_upstream = sticky_balancer_instance:balance() - if option == false then + if option["change_on_failure"] == false then -- upstream should be the same inspite of error, if change_on_failure option is false assert.equal(new_upstream, old_upstream) else diff --git a/rootfs/etc/nginx/lua/test/util/nodemap_test.lua b/rootfs/etc/nginx/lua/test/util/nodemap_test.lua new file mode 100644 index 000000000..f012bb7ee --- /dev/null +++ b/rootfs/etc/nginx/lua/test/util/nodemap_test.lua @@ -0,0 +1,167 @@ +local util = require("util") +local nodemap = require("util.nodemap") + +local function get_test_backend_single() + return { + name = "access-router-production-web-80", + endpoints = { + { address = "10.184.7.40", port = "8080", maxFails = 0, failTimeout = 0 } + } + } +end + +local function get_test_backend_multi() + return { + name = "access-router-production-web-80", + endpoints = { + { address = "10.184.7.40", port = "8080", maxFails = 0, failTimeout = 0 }, + { address = "10.184.7.41", port = "8080", maxFails = 0, failTimeout = 0 } + } + } +end + +local function get_test_nodes_ignore(endpoint) + local ignore = {} + ignore[endpoint] = true + return ignore +end + +describe("Node Map", function() + + local test_backend_single = get_test_backend_single() + local test_backend_multi = get_test_backend_multi() + local test_salt = test_backend_single.name + local test_nodes_single = util.get_nodes(test_backend_single.endpoints) + local test_nodes_multi = util.get_nodes(test_backend_multi.endpoints) + local test_endpoint1 = test_backend_multi.endpoints[1].address .. ":" .. test_backend_multi.endpoints[1].port + local test_endpoint2 = test_backend_multi.endpoints[2].address .. ":" .. test_backend_multi.endpoints[2].port + local test_nodes_ignore = get_test_nodes_ignore(test_endpoint1) + + describe("new()", function() + context("when no salt has been provided", function() + it("random() returns an unsalted key", function() + local nodemap_instance = nodemap:new(test_nodes_single, nil) + local expected_endpoint = test_endpoint1 + local expected_hash_key = ngx.md5(expected_endpoint) + local actual_endpoint + local actual_hash_key + + actual_endpoint, actual_hash_key = nodemap_instance:random() + + assert.equal(actual_endpoint, expected_endpoint) + assert.equal(expected_hash_key, actual_hash_key) + end) + end) + + context("when a salt has been provided", function() + it("random() returns a salted key", function() + local nodemap_instance = nodemap:new(test_nodes_single, test_salt) + local expected_endpoint = test_endpoint1 + local expected_hash_key = ngx.md5(test_salt .. expected_endpoint) + local actual_endpoint + local actual_hash_key + + actual_endpoint, actual_hash_key = nodemap_instance:random() + + assert.equal(actual_endpoint, expected_endpoint) + assert.equal(expected_hash_key, actual_hash_key) + end) + end) + + context("when no nodes have been provided", function() + it("random() returns nil", function() + local nodemap_instance = nodemap:new({}, test_salt) + local actual_endpoint + local actual_hash_key + + actual_endpoint, actual_hash_key = nodemap_instance:random() + + assert.equal(actual_endpoint, nil) + assert.equal(expected_hash_key, nil) + end) + end) + end) + + describe("find()", function() + before_each(function() + package.loaded["util.nodemap"] = nil + nodemap = require("util.nodemap") + end) + + context("when a hash key is valid", function() + it("find() returns the correct endpoint", function() + local nodemap_instance = nodemap:new(test_nodes_single, test_salt) + local test_hash_key + local expected_endpoint + local actual_endpoint + + expected_endpoint, test_hash_key = nodemap_instance:random() + assert.not_equal(expected_endpoint, nil) + assert.not_equal(test_hash_key, nil) + + actual_endpoint = nodemap_instance:find(test_hash_key) + assert.equal(actual_endpoint, expected_endpoint) + end) + end) + + context("when a hash key is invalid", function() + it("find() returns nil", function() + local nodemap_instance = nodemap:new(test_nodes_single, test_salt) + local test_hash_key = "invalid or nonexistent hash key" + local actual_endpoint + + actual_endpoint = nodemap_instance:find(test_hash_key) + + assert.equal(actual_endpoint, nil) + end) + end) + end) + + + describe("random_except()", function() + before_each(function() + package.loaded["util.nodemap"] = nil + nodemap = require("util.nodemap") + end) + + context("when nothing has been excluded", function() + it("random_except() returns the correct endpoint", function() + local nodemap_instance = nodemap:new(test_nodes_single, test_salt) + local expected_endpoint = test_endpoint1 + local test_hash_key + local actual_endpoint + + actual_endpoint, test_hash_key = nodemap_instance:random_except({}) + assert.equal(expected_endpoint, actual_endpoint) + assert.not_equal(test_hash_key, nil) + end) + end) + + context("when everything has been excluded", function() + it("random_except() returns nil", function() + local nodemap_instance = nodemap:new(test_nodes_single, test_salt) + local actual_hash_key + local actual_endpoint + + actual_endpoint, actual_hash_key = nodemap_instance:random_except(test_nodes_ignore) + + assert.equal(actual_endpoint, nil) + assert.equal(actual_hash_key, nil) + end) + end) + + context("when an endpoint has been excluded", function() + it("random_except() does not return it", function() + local nodemap_instance = nodemap:new(test_nodes_multi, test_salt) + local expected_endpoint = test_endpoint2 + local actual_endpoint + local test_hash_key + + actual_endpoint, test_hash_key = nodemap_instance:random_except(test_nodes_ignore) + + assert.equal(actual_endpoint, expected_endpoint) + assert.not_equal(test_hash_key, nil) + end) + end) + end) +end) diff --git a/rootfs/etc/nginx/lua/util/nodemap.lua b/rootfs/etc/nginx/lua/util/nodemap.lua new file mode 100644 index 000000000..b4b4c9738 --- /dev/null +++ b/rootfs/etc/nginx/lua/util/nodemap.lua @@ -0,0 +1,120 @@ +local math = require("math") +local util = require("util") + +local _M = {} + +--- create_map generates the node hash table +-- @tparam {[string]=number} nodes A table with the node as a key and its weight as a value. +-- @tparam string salt A salt that will be used to generate salted hash keys. +local function create_map(nodes, salt) + local hash_map = {} + + for endpoint, _ in pairs(nodes) do + -- obfuscate the endpoint with a shared key to prevent brute force + -- and rainbow table attacks which could reveal internal endpoints + local key = salt .. endpoint + local hash_key = ngx.md5(key) + hash_map[hash_key] = endpoint + end + + return hash_map +end + +--- get_random_node picks a random node from the given map. +-- @tparam {[string], ...} map A key to node hash table. +-- @treturn string,string The node and its key +local function get_random_node(map) + local size = util.tablelength(map) + + if size < 1 then + return nil, nil + end + + local index = math.random(1, size) + local count = 1 + + for key, endpoint in pairs(map) do + if count == index then + return endpoint, key + end + + count = count + 1 + end + + ngx.log(ngx.ERR, string.format("Failed to find node %d of %d! This is a bug, please report!", index, size)) + + return nil, nil +end + +--- new constructs a new instance of the node map +-- +-- The map uses MD5 to create hash keys for a given node. For security reasons it supports +-- salted hash keys, to prevent attackers from using rainbow tables or brute forcing +-- the node endpoints, which would reveal cluster internal network information. +-- +-- To make sure hash keys are reproducible on different ingress controller instances the salt +-- needs to be shared and therefore is not simply generated randomly. +-- +-- @tparam {[string]=number} endpoints A table with the node endpoint as a key and its weight as a value. +-- @tparam[opt] string hash_salt A optional hash salt that will be used to obfuscate the hash key. +function _M.new(self, endpoints, hash_salt) + + if hash_salt == nil then + hash_salt = '' + end + + -- the endpoints have to be saved as 'nodes' to keep compatibility to balancer.resty + local o = { + salt = hash_salt, + nodes = endpoints, + map = create_map(endpoints, hash_salt) + } + + setmetatable(o, self) + self.__index = self + return o +end + +--- reinit reinitializes the node map reusing the original salt +-- @tparam {[string]=number} nodes A table with the node as a key and its weight as a value. +function _M.reinit(self, nodes) + self.nodes = nodes + self.map = create_map(nodes, self.salt) +end + +--- find looks up a node by hash key. +-- @tparam string key The hash key. +-- @treturn string The node. +function _M.find(self, key) + return self.map[key] +end + +--- random picks a random node from the hashmap. +-- @treturn string,string A random node and its key or both nil. +function _M.random(self) + return get_random_node(self.map) +end + +--- random_except picks a random node from the hashmap, ignoring the nodes in the given table +-- @tparam {string, } ignore_nodes A table of nodes to ignore, the node needs to be the key, +-- the value needs to be set to true +-- @treturn string,string A random node and its key or both nil. +function _M.random_except(self, ignore_nodes) + local valid_nodes = {} + + -- avoid generating the map if no ignores where provided + if ignore_nodes == nil or util.tablelength(ignore_nodes) == 0 then + return get_random_node(self.map) + end + + -- generate valid endpoints + for key, endpoint in pairs(self.map) do + if not ignore_nodes[endpoint] then + valid_nodes[key] = endpoint + end + end + + return get_random_node(valid_nodes) +end + +return _M diff --git a/test/manifests/configuration-a.json b/test/manifests/configuration-a.json index 673b12696..fcd000ed1 100644 --- a/test/manifests/configuration-a.json +++ b/test/manifests/configuration-a.json @@ -54,6 +54,7 @@ }], "sessionAffinityConfig": { "name": "", + "mode": "", "cookieSessionAffinity": { "name": "" } @@ -126,6 +127,7 @@ }], "sessionAffinityConfig": { "name": "", + "mode": "", "cookieSessionAffinity": { "name": "" } @@ -191,6 +193,7 @@ }], "sessionAffinityConfig": { "name": "", + "mode": "", "cookieSessionAffinity": { "name": "" } diff --git a/test/manifests/configuration-b.json b/test/manifests/configuration-b.json index 21a1cfc18..40a8d27a4 100644 --- a/test/manifests/configuration-b.json +++ b/test/manifests/configuration-b.json @@ -54,6 +54,7 @@ }], "sessionAffinityConfig": { "name": "", + "mode": "", "cookieSessionAffinity": { "name": "" } @@ -126,6 +127,7 @@ }], "sessionAffinityConfig": { "name": "", + "mode": "", "cookieSessionAffinity": { "name": "" } @@ -191,6 +193,7 @@ }], "sessionAffinityConfig": { "name": "", + "mode": "", "cookieSessionAffinity": { "name": "" } From 881e352d68d27a0b820009fa565d4a9b5f6fcf28 Mon Sep 17 00:00:00 2001 From: Alexander Maret-Huskinson Date: Fri, 30 Aug 2019 18:07:24 +0200 Subject: [PATCH 2/5] Converted sticky session balancers into separate classes. --- rootfs/etc/nginx/lua/balancer.lua | 12 +- rootfs/etc/nginx/lua/balancer/sticky.lua | 82 ++------- .../sticky_balanced.lua} | 42 ++--- .../sticky_persistent.lua} | 41 +++-- .../nginx/lua/test/balancer/sticky_test.lua | 168 +++++++++++------- rootfs/etc/nginx/lua/test/balancer_test.lua | 6 +- 6 files changed, 174 insertions(+), 177 deletions(-) rename rootfs/etc/nginx/lua/{affinity/balanced.lua => balancer/sticky_balanced.lua} (70%) rename rootfs/etc/nginx/lua/{affinity/persistent.lua => balancer/sticky_persistent.lua} (68%) diff --git a/rootfs/etc/nginx/lua/balancer.lua b/rootfs/etc/nginx/lua/balancer.lua index ec8eb194b..b23ee4e15 100644 --- a/rootfs/etc/nginx/lua/balancer.lua +++ b/rootfs/etc/nginx/lua/balancer.lua @@ -6,7 +6,8 @@ local configuration = require("configuration") local round_robin = require("balancer.round_robin") local chash = require("balancer.chash") local chashsubset = require("balancer.chashsubset") -local sticky = require("balancer.sticky") +local sticky_balanced = require("balancer.sticky_balanced") +local sticky_persistent = require("balancer.sticky_persistent") local ewma = require("balancer.ewma") -- measured in seconds @@ -19,7 +20,8 @@ local IMPLEMENTATIONS = { round_robin = round_robin, chash = chash, chashsubset = chashsubset, - sticky = sticky, + sticky_balanced = sticky_balanced, + sticky_persistent = sticky_persistent, ewma = ewma, } @@ -30,7 +32,11 @@ local function get_implementation(backend) local name = backend["load-balance"] or DEFAULT_LB_ALG if backend["sessionAffinityConfig"] and backend["sessionAffinityConfig"]["name"] == "cookie" then - name = "sticky" + if backend["sessionAffinityConfig"]["mode"] == 'persistent' then + name = "sticky_persistent" + else + name = "sticky_balanced" + end elseif backend["upstreamHashByConfig"] and backend["upstreamHashByConfig"]["upstream-hash-by"] then if backend["upstreamHashByConfig"]["upstream-hash-by-subset"] then name = "chashsubset" diff --git a/rootfs/etc/nginx/lua/balancer/sticky.lua b/rootfs/etc/nginx/lua/balancer/sticky.lua index 3ceee9597..b088ff860 100644 --- a/rootfs/etc/nginx/lua/balancer/sticky.lua +++ b/rootfs/etc/nginx/lua/balancer/sticky.lua @@ -1,5 +1,3 @@ -local affinity_balanced = require("affinity.balanced") -local affinity_persistent = require("affinity.persistent") local balancer_resty = require("balancer.resty") local util = require("util") local ck = require("resty.cookie") @@ -10,48 +8,24 @@ local string_format = string.format local ngx_log = ngx.log local INFO = ngx.INFO -local _M = balancer_resty:new({ name = "sticky" }) +local _M = balancer_resty:new() local DEFAULT_COOKIE_NAME = "route" - function _M.cookie_name(self) return self.cookie_session_affinity.name or DEFAULT_COOKIE_NAME end -local function init_affinity_mode(self, backend) - local mode = backend["sessionAffinityConfig"]["mode"] or 'balanced' - - -- set default mode to 'balanced' for backwards compatibility - if mode == nil or mode == '' then - mode = 'balanced' - end - - self.affinity_mode = mode - - if mode == 'persistent' then - return affinity_persistent:new(self, backend) - end - - -- default is 'balanced' for backwards compatibility - if mode ~= 'balanced' then - ngx.log(ngx.WARN, string.format("Invalid affinity mode '%s'! Using 'balanced' as a default.", mode)) - end - - return affinity_balanced:new(self, backend) -end - -function _M.new(self, backend) +function _M.new(self) local o = { - instance = nil, - affinity_mode = nil, - traffic_shaping_policy = backend.trafficShapingPolicy, - alternative_backends = backend.alternativeBackends, - cookie_session_affinity = backend["sessionAffinityConfig"]["cookieSessionAffinity"] + alternative_backends = nil, + cookie_session_affinity = nil, + traffic_shaping_policy = nil } + setmetatable(o, self) self.__index = self - - return init_affinity_mode(o, backend) + + return o end function _M.get_cookie(self) @@ -112,34 +86,8 @@ local function get_failed_upstreams() return indexed_upstream_addrs end ---- get_routing_key gets the current routing key from the cookie --- @treturn string, string The routing key and an error message if an error occured. -function _M.get_routing_key(self) - -- interface method to get the routing key from the cookie - -- has to be overridden by an affinity mode - ngx.log(ngx.ERR, "[BUG] Failed to get routing key as no implementation has been provided!") - return nil, nil -end - ---- set_routing_key sets the current routing key on the cookie --- @tparam string key The routing key to set on the cookie. -function _M.set_routing_key(self, key) - -- interface method to set the routing key on the cookie - -- has to be overridden by an affinity mode - ngx.log(ngx.ERR, "[BUG] Failed to set routing key as no implementation has been provided!") -end - ---- pick_new_upstream picks a new upstream while ignoring the given failed upstreams. --- @tparam {[string]=boolean} A table of upstreams to ignore where the key is the endpoint and the value a boolean. --- @treturn string, string The endpoint and its key. -function _M.pick_new_upstream(self, failed_upstreams) - -- interface method to get a new upstream - -- has to be overridden by an affinity mode - ngx.log(ngx.ERR, "[BUG] Failed to pick new upstream as no implementation has been provided!") - return nil, nil -end - local function should_set_cookie(self) + if self.cookie_session_affinity.locations and ngx.var.host then local locs = self.cookie_session_affinity.locations[ngx.var.host] if locs == nil then @@ -193,19 +141,11 @@ function _M.balance(self) end function _M.sync(self, backend) - local changed = false - - -- check and reinit affinity mode before syncing the balancer which will reinit the nodes - if self.affinity_mode ~= backend.sessionAffinityConfig.mode then - changed = true - init_affinity_mode(self, backend) - end - -- reload balancer nodes balancer_resty.sync(self, backend) -- Reload the balancer if any of the annotations have changed. - changed = changed or not util.deep_compare( + local changed = not util.deep_compare( self.cookie_session_affinity, backend.sessionAffinityConfig.cookieSessionAffinity ) @@ -216,6 +156,8 @@ function _M.sync(self, backend) ngx_log(INFO, string_format("[%s] nodes have changed for backend %s", self.name, backend.name)) + self.traffic_shaping_policy = backend.trafficShapingPolicy + self.alternative_backends = backend.alternativeBackends self.cookie_session_affinity = backend.sessionAffinityConfig.cookieSessionAffinity end diff --git a/rootfs/etc/nginx/lua/affinity/balanced.lua b/rootfs/etc/nginx/lua/balancer/sticky_balanced.lua similarity index 70% rename from rootfs/etc/nginx/lua/affinity/balanced.lua rename to rootfs/etc/nginx/lua/balancer/sticky_balanced.lua index 9c74048bf..52d097388 100644 --- a/rootfs/etc/nginx/lua/affinity/balanced.lua +++ b/rootfs/etc/nginx/lua/balancer/sticky_balanced.lua @@ -4,13 +4,12 @@ -- will lose their session, where c is the current number of pods and n is the new number of -- pods. -- --- This class extends/implements the abstract class balancer.sticky. --- +local balancer_sticky = require("balancer.sticky") local math = require("math") local resty_chash = require("resty.chash") local util = require("util") -local _M = {} +local _M = balancer_sticky:new() -- Consider the situation of N upstreams one of which is failing. -- Then the probability to obtain failing upstream after M iterations would be close to (1/N)**M. @@ -18,18 +17,33 @@ local _M = {} -- which is much better then ~10**(-3) for 10 iterations. local MAX_UPSTREAM_CHECKS_COUNT = 20 -local function get_routing_key(self) +function _M.new(self, backend) + local nodes = util.get_nodes(backend.endpoints) + + local o = { + name = "sticky_balanced", + instance = resty_chash:new(nodes) + } + + setmetatable(o, self) + self.__index = self + + balancer_sticky.sync(o, backend) + + return o +end + +function _M.get_routing_key(self) return self:get_cookie(), nil end -local function set_routing_key(self, key) +function _M.set_routing_key(self, key) self:set_cookie(key) end -local function pick_new_upstream(self, failed_upstreams) +function _M.pick_new_upstream(self, failed_upstreams) for i = 1, MAX_UPSTREAM_CHECKS_COUNT do local key = string.format("%s.%s.%s", ngx.now() + i, ngx.worker.pid(), math.random(999999)) - local new_upstream = self.instance:find(key) if not failed_upstreams[new_upstream] then @@ -40,18 +54,4 @@ local function pick_new_upstream(self, failed_upstreams) return nil, nil end -function _M.new(self, sticky_balancer, backend) - local o = sticky_balancer or {} - - local nodes = util.get_nodes(backend.endpoints) - - -- override sticky.balancer methods - o.instance = resty_chash:new(nodes) - o.get_routing_key = get_routing_key - o.set_routing_key = set_routing_key - o.pick_new_upstream = pick_new_upstream - - return sticky_balancer -end - return _M diff --git a/rootfs/etc/nginx/lua/affinity/persistent.lua b/rootfs/etc/nginx/lua/balancer/sticky_persistent.lua similarity index 68% rename from rootfs/etc/nginx/lua/affinity/persistent.lua rename to rootfs/etc/nginx/lua/balancer/sticky_persistent.lua index aa3e252d3..0bd6dc8d0 100644 --- a/rootfs/etc/nginx/lua/affinity/persistent.lua +++ b/rootfs/etc/nginx/lua/balancer/sticky_persistent.lua @@ -3,12 +3,30 @@ -- The drawback of this mode is that when scaling up a deployment, sessions will not -- be rebalanced. -- +local balancer_sticky = require("balancer.sticky") local util = require("util") local util_nodemap = require("util.nodemap") -local _M = {} +local _M = balancer_sticky:new() -local function get_routing_key(self) +function _M.new(self, backend) + local nodes = util.get_nodes(backend.endpoints) + local hash_salt = backend["name"] + + local o = { + name = "sticky_persistent", + instance = util_nodemap:new(nodes, hash_salt) + } + + setmetatable(o, self) + self.__index = self + + balancer_sticky.sync(o, backend) + + return o +end + +function _M.get_routing_key(self) local cookie_value = self:get_cookie() if cookie_value then @@ -26,28 +44,13 @@ local function get_routing_key(self) return nil, nil end -local function set_routing_key(self, key) +function _M.set_routing_key(self, key) local value = string.format("%s.%s.%s", ngx.now(), ngx.worker.pid(), key) self:set_cookie(value); end -local function pick_new_upstream(self, failed_upstreams) +function _M.pick_new_upstream(self, failed_upstreams) return self.instance:random_except(failed_upstreams) end -function _M.new(self, sticky_balancer, backend) - local o = sticky_balancer or {} - - local nodes = util.get_nodes(backend.endpoints) - local hash_salt = backend["name"] - - -- override sticky.balancer methods - o.instance = util_nodemap:new(nodes, hash_salt) - o.get_routing_key = get_routing_key - o.set_routing_key = set_routing_key - o.pick_new_upstream = pick_new_upstream - - return sticky_balancer -end - return _M diff --git a/rootfs/etc/nginx/lua/test/balancer/sticky_test.lua b/rootfs/etc/nginx/lua/test/balancer/sticky_test.lua index 7a5f07519..e9503dc5f 100644 --- a/rootfs/etc/nginx/lua/test/balancer/sticky_test.lua +++ b/rootfs/etc/nginx/lua/test/balancer/sticky_test.lua @@ -1,4 +1,5 @@ -local sticky = require("balancer.sticky") +local sticky_balanced = require("balancer.sticky_balanced") +local sticky_persistent = require("balancer.sticky_persistent") local cookie = require("resty.cookie") local util = require("util") @@ -57,21 +58,27 @@ describe("Sticky", function() describe("new(backend)", function() context("when backend specifies cookie name", function() - it("returns an instance containing the corresponding cookie name", function() + local function test(sticky) local sticky_balancer_instance = sticky:new(test_backend) local test_backend_cookie_name = test_backend.sessionAffinityConfig.cookieSessionAffinity.name assert.equal(sticky_balancer_instance:cookie_name(), test_backend_cookie_name) - end) + end + + it("returns an instance containing the corresponding cookie name", function() test(sticky_balanced) end) + it("returns an instance containing the corresponding cookie name", function() test(sticky_persistent) end) end) context("when backend does not specify cookie name", function() - it("returns an instance with 'route' as cookie name", function() + local function test(sticky) local temp_backend = util.deepcopy(test_backend) temp_backend.sessionAffinityConfig.cookieSessionAffinity.name = nil local sticky_balancer_instance = sticky:new(temp_backend) local default_cookie_name = "route" assert.equal(sticky_balancer_instance:cookie_name(), default_cookie_name) - end) + end + + it("returns an instance with 'route' as cookie name", function() test(sticky_balanced) end) + it("returns an instance with 'route' as cookie name", function() test(sticky_persistent) end) end) end) @@ -79,8 +86,10 @@ describe("Sticky", function() local mocked_cookie_new = cookie.new before_each(function() - package.loaded["balancer.sticky"] = nil - sticky = require("balancer.sticky") + package.loaded["balancer.sticky_balanced"] = nil + package.loaded["balancer.sticky_persistent"] = nil + sticky_balanced = require("balancer.sticky_balanced") + sticky_persistent = require("balancer.sticky_persistent") end) after_each(function() @@ -88,13 +97,17 @@ describe("Sticky", function() end) context("when client doesn't have a cookie set and location is in cookie_locations", function() - it("picks an endpoint for the client", function() + + local function test_pick_endpoint(sticky) local sticky_balancer_instance = sticky:new(test_backend) local peer = sticky_balancer_instance:balance() - assert.equal(peer, test_backend_endpoint) - end) + assert.equal(test_backend_endpoint, peer) + end - it("sets a cookie on the client", function() + it("picks an endpoint for the client", function() test_pick_endpoint(sticky_balanced) end) + it("picks an endpoint for the client", function() test_pick_endpoint(sticky_persistent) end) + + local function test_set_cookie(sticky) local s = {} cookie.new = function(self) local cookie_instance = { @@ -117,9 +130,12 @@ describe("Sticky", function() local sticky_balancer_instance = sticky:new(b) assert.has_no.errors(function() sticky_balancer_instance:balance() end) assert.spy(s).was_called() - end) + end - it("sets a secure cookie on the client when being in ssl mode", function() + it("sets a cookie on the client", function() test_set_cookie(sticky_balanced) end) + it("sets a cookie on the client", function() test_set_cookie(sticky_persistent) end) + + local function test_set_ssl_cookie(sticky) ngx.var.https = "on" local s = {} cookie.new = function(self) @@ -143,10 +159,18 @@ describe("Sticky", function() local sticky_balancer_instance = sticky:new(b) assert.has_no.errors(function() sticky_balancer_instance:balance() end) assert.spy(s).was_called() + end + + it("sets a secure cookie on the client when being in ssl mode", function() + test_set_ssl_cookie(sticky_balanced) + end) + it("sets a secure cookie on the client when being in ssl mode", function() + test_set_ssl_cookie(sticky_persistent) end) end) - context("when client doesn't have a cookie set and cookie_locations contains a matching wildcard location", function() + context("when client doesn't have a cookie set and cookie_locations contains a matching wildcard location", + function() before_each(function () ngx.var.host = "dev.test.com" end) @@ -154,7 +178,7 @@ describe("Sticky", function() ngx.var.host = "test.com" end) - it("sets a cookie on the client", function() + local function test(sticky) local s = {} cookie.new = function(self) local cookie_instance = { @@ -178,17 +202,24 @@ describe("Sticky", function() local sticky_balancer_instance = sticky:new(b) assert.has_no.errors(function() sticky_balancer_instance:balance() end) assert.spy(s).was_called() - end) + end + + it("sets a cookie on the client", function() test(sticky_balanced) end) + it("sets a cookie on the client", function() test(sticky_persistent) end) end) context("when client doesn't have a cookie set and location not in cookie_locations", function() - it("picks an endpoint for the client", function() + + local function test_pick_endpoint(sticky) local sticky_balancer_instance = sticky:new(test_backend) local peer = sticky_balancer_instance:balance() assert.equal(peer, test_backend_endpoint) - end) + end - it("does not set a cookie on the client", function() + it("picks an endpoint for the client", function() test_pick_endpoint(sticky_balanced) end) + it("picks an endpoint for the client", function() test_pick_endpoint(sticky_persistent) end) + + local function test_no_cookie(sticky) local s = {} cookie.new = function(self) local cookie_instance = { @@ -207,11 +238,15 @@ describe("Sticky", function() local sticky_balancer_instance = sticky:new(get_test_backend()) assert.has_no.errors(function() sticky_balancer_instance:balance() end) assert.spy(s).was_not_called() - end) + end + + it("does not set a cookie on the client", function() test_no_cookie(sticky_balanced) end) + it("does not set a cookie on the client", function() test_no_cookie(sticky_persistent) end) end) context("when client has a cookie set", function() - it("does not set a cookie", function() + + local function test_no_cookie(sticky) local s = {} cookie.new = function(self) local return_obj = { @@ -224,17 +259,23 @@ describe("Sticky", function() local sticky_balancer_instance = sticky:new(test_backend) assert.has_no.errors(function() sticky_balancer_instance:balance() end) assert.spy(s).was_not_called() - end) + end - it("returns the correct endpoint for the client", function() + it("does not set a cookie", test_no_cookie(sticky_balanced)) + it("does not set a cookie", test_no_cookie(sticky_persistent)) + + local function test_correct_endpoint(sticky) local sticky_balancer_instance = sticky:new(test_backend) local peer = sticky_balancer_instance:balance() assert.equal(peer, test_backend_endpoint) - end) + end + + it("returns the correct endpoint for the client", function() test_correct_endpoint(sticky_balanced) end) + it("returns the correct endpoint for the client", function() test_correct_endpoint(sticky_persistent) end) end) end) - local function get_several_test_backends(option) + local function get_several_test_backends(change_on_failure) return { name = "access-router-production-web-80", endpoints = { @@ -243,11 +284,10 @@ describe("Sticky", function() }, sessionAffinityConfig = { name = "cookie", - mode = option["mode"], cookieSessionAffinity = { name = "test_name", hash = "sha1", - change_on_failure = option["change_on_failure"], + change_on_failure = change_on_failure, locations = { ['test.com'] = {'/'} } } }, @@ -258,52 +298,58 @@ describe("Sticky", function() local mocked_cookie_new = cookie.new before_each(function() - package.loaded["balancer.sticky"] = nil - sticky = require("balancer.sticky") + package.loaded["balancer.sticky_balanced"] = nil + package.loaded["balancer.sticky_persistent"] = nil + sticky_balanced = require("balancer.sticky_balanced") + sticky_persistent = require("balancer.sticky_persistent") + mock_ngx({ var = { location_path = "/", host = "test.com" } }) end) after_each(function() - cookie.new = mocked_cookie_new + reset_ngx() end) context("when request to upstream fails", function() - it("changes upstream when change_on_failure option is true", function() - local options = { - {["change_on_failure"] = false, ["mode"] = nil}, - {["change_on_failure"] = false, ["mode"] = 'balanced'}, - {["change_on_failure"] = false, ["mode"] = 'persistent'}, - {["change_on_failure"] = true, ["mode"] = nil}, - {["change_on_failure"] = true, ["mode"] = 'balanced'}, - {["change_on_failure"] = true, ["mode"] = 'persistent'} - } - for _, option in ipairs(options) do - local sticky_balancer_instance = sticky:new(get_several_test_backends(option)) + local function test(sticky, change_on_failure) + local sticky_balancer_instance = sticky:new(get_several_test_backends(change_on_failure)) - local old_upstream = sticky_balancer_instance:balance() - assert.is.Not.Nil(old_upstream) - for _ = 1, 100 do - -- make sure upstream doesn't change on subsequent calls of balance() - assert.equal(old_upstream, sticky_balancer_instance:balance()) - end + local old_upstream = sticky_balancer_instance:balance() + assert.is.Not.Nil(old_upstream) + for _ = 1, 100 do + -- make sure upstream doesn't change on subsequent calls of balance() + assert.equal(old_upstream, sticky_balancer_instance:balance()) + end - -- simulate request failure - sticky_balancer_instance.get_last_failure = function() - return "failed" - end - _G.ngx.var.upstream_addr = old_upstream + -- simulate request failure + sticky_balancer_instance.get_last_failure = function() + return "failed" + end + _G.ngx.var.upstream_addr = old_upstream - for _ = 1, 100 do - local new_upstream = sticky_balancer_instance:balance() - if option["change_on_failure"] == false then - -- upstream should be the same inspite of error, if change_on_failure option is false - assert.equal(new_upstream, old_upstream) - else - -- upstream should change after error, if change_on_failure option is true - assert.not_equal(new_upstream, old_upstream) - end + for _ = 1, 100 do + local new_upstream = sticky_balancer_instance:balance() + if change_on_failure == false then + -- upstream should be the same inspite of error, if change_on_failure option is false + assert.equal(new_upstream, old_upstream) + else + -- upstream should change after error, if change_on_failure option is true + assert.not_equal(new_upstream, old_upstream) end end + end + + it("changes upstream when change_on_failure option is true", function() + test(sticky_balanced, 'balanced', true) + end) + it("changes upstream when change_on_failure option is true", function() + test(sticky_balanced, 'balanced', false) + end) + it("changes upstream when change_on_failure option is true", function() + test(sticky_persistent, 'balanced', true) + end) + it("changes upstream when change_on_failure option is true", function() + test(sticky_persistent, 'balanced', false) end) end) end) diff --git a/rootfs/etc/nginx/lua/test/balancer_test.lua b/rootfs/etc/nginx/lua/test/balancer_test.lua index 2851f5853..a3a004896 100644 --- a/rootfs/etc/nginx/lua/test/balancer_test.lua +++ b/rootfs/etc/nginx/lua/test/balancer_test.lua @@ -23,9 +23,9 @@ local function reset_expected_implementations() ["access-router-production-web-80"] = package.loaded["balancer.round_robin"], ["my-dummy-app-1"] = package.loaded["balancer.round_robin"], ["my-dummy-app-2"] = package.loaded["balancer.chash"], - ["my-dummy-app-3"] = package.loaded["balancer.sticky"], + ["my-dummy-app-3"] = package.loaded["balancer.sticky_persistent"], ["my-dummy-app-4"] = package.loaded["balancer.ewma"], - ["my-dummy-app-5"] = package.loaded["balancer.sticky"], + ["my-dummy-app-5"] = package.loaded["balancer.sticky_balanced"] } end @@ -55,7 +55,7 @@ local function reset_backends() }, { name = "my-dummy-app-3", ["load-balance"] = "ewma", - sessionAffinityConfig = { name = "cookie", cookieSessionAffinity = { name = "route" } } + sessionAffinityConfig = { name = "cookie", mode = 'persistent', cookieSessionAffinity = { name = "route" } } }, { name = "my-dummy-app-4", ["load-balance"] = "ewma", }, { From 880b3dc5f17aaeb49d0e867245bf5ca15262d915 Mon Sep 17 00:00:00 2001 From: Alexander Maret-Huskinson Date: Fri, 30 Aug 2019 19:08:03 +0200 Subject: [PATCH 3/5] Fixed test findings. --- internal/ingress/annotations/annotations_test.go | 7 ++++--- rootfs/etc/nginx/lua/balancer/sticky_balanced.lua | 6 +++--- rootfs/etc/nginx/lua/balancer/sticky_persistent.lua | 2 +- rootfs/etc/nginx/lua/test/balancer/sticky_test.lua | 12 ++++++------ 4 files changed, 14 insertions(+), 13 deletions(-) diff --git a/internal/ingress/annotations/annotations_test.go b/internal/ingress/annotations/annotations_test.go index 2eac8c234..a496cd9ba 100644 --- a/internal/ingress/annotations/annotations_test.go +++ b/internal/ingress/annotations/annotations_test.go @@ -33,6 +33,7 @@ var ( annotationSecureVerifyCACert = parser.GetAnnotationWithPrefix("secure-verify-ca-secret") annotationPassthrough = parser.GetAnnotationWithPrefix("ssl-passthrough") annotationAffinityType = parser.GetAnnotationWithPrefix("affinity") + annotationAffinityMode = parser.GetAnnotationWithPrefix("affinity-mode") annotationCorsEnabled = parser.GetAnnotationWithPrefix("enable-cors") annotationCorsAllowMethods = parser.GetAnnotationWithPrefix("cors-allow-methods") annotationCorsAllowHeaders = parser.GetAnnotationWithPrefix("cors-allow-headers") @@ -205,8 +206,8 @@ func TestAffinitySession(t *testing.T) { {map[string]string{annotationAffinityType: "cookie", annotationAffinityMode: "balanced", annotationAffinityCookieName: "route"}, "cookie", "balanced", "route"}, {map[string]string{annotationAffinityType: "cookie", annotationAffinityMode: "persistent", annotationAffinityCookieName: "route1"}, "cookie", "persistent", "route1"}, {map[string]string{annotationAffinityType: "cookie", annotationAffinityMode: "balanced", annotationAffinityCookieName: ""}, "cookie", "balanced", "INGRESSCOOKIE"}, - {map[string]string{}, "", ""}, - {nil, "", ""}, + {map[string]string{}, "", "", ""}, + {nil, "", "", ""}, } for _, foo := range fooAnns { @@ -214,7 +215,7 @@ func TestAffinitySession(t *testing.T) { r := ec.Extract(ing).SessionAffinity t.Logf("Testing pass %v %v", foo.affinitytype, foo.name) - if (r.Mode != foo.affinitymode) { + if r.Mode != foo.affinitymode { t.Errorf("Returned %v but expected %v for Name", r.Mode, foo.affinitymode) } diff --git a/rootfs/etc/nginx/lua/balancer/sticky_balanced.lua b/rootfs/etc/nginx/lua/balancer/sticky_balanced.lua index 52d097388..e2132dc08 100644 --- a/rootfs/etc/nginx/lua/balancer/sticky_balanced.lua +++ b/rootfs/etc/nginx/lua/balancer/sticky_balanced.lua @@ -1,7 +1,7 @@ -- An affinity mode which makes sure connections are rebalanced when a deployment is scaled. -- The advantage of this mode is that the load on the pods will be redistributed. --- The drawback of this mode is that, when scaling up a deployment, roughly (n-c)/n users --- will lose their session, where c is the current number of pods and n is the new number of +-- The drawback of this mode is that, when scaling up a deployment, roughly (n-c)/n users +-- will lose their session, where c is the current number of pods and n is the new number of -- pods. -- local balancer_sticky = require("balancer.sticky") @@ -27,7 +27,7 @@ function _M.new(self, backend) setmetatable(o, self) self.__index = self - + balancer_sticky.sync(o, backend) return o diff --git a/rootfs/etc/nginx/lua/balancer/sticky_persistent.lua b/rootfs/etc/nginx/lua/balancer/sticky_persistent.lua index 0bd6dc8d0..993eb8c8c 100644 --- a/rootfs/etc/nginx/lua/balancer/sticky_persistent.lua +++ b/rootfs/etc/nginx/lua/balancer/sticky_persistent.lua @@ -20,7 +20,7 @@ function _M.new(self, backend) setmetatable(o, self) self.__index = self - + balancer_sticky.sync(o, backend) return o diff --git a/rootfs/etc/nginx/lua/test/balancer/sticky_test.lua b/rootfs/etc/nginx/lua/test/balancer/sticky_test.lua index e9503dc5f..53f1c1ea7 100644 --- a/rootfs/etc/nginx/lua/test/balancer/sticky_test.lua +++ b/rootfs/etc/nginx/lua/test/balancer/sticky_test.lua @@ -261,8 +261,8 @@ describe("Sticky", function() assert.spy(s).was_not_called() end - it("does not set a cookie", test_no_cookie(sticky_balanced)) - it("does not set a cookie", test_no_cookie(sticky_persistent)) + it("does not set a cookie", function() test_no_cookie(sticky_balanced) end) + it("does not set a cookie", function() test_no_cookie(sticky_persistent) end) local function test_correct_endpoint(sticky) local sticky_balancer_instance = sticky:new(test_backend) @@ -340,16 +340,16 @@ describe("Sticky", function() end it("changes upstream when change_on_failure option is true", function() - test(sticky_balanced, 'balanced', true) + test(sticky_balanced, true) end) it("changes upstream when change_on_failure option is true", function() - test(sticky_balanced, 'balanced', false) + test(sticky_balanced, false) end) it("changes upstream when change_on_failure option is true", function() - test(sticky_persistent, 'balanced', true) + test(sticky_persistent, true) end) it("changes upstream when change_on_failure option is true", function() - test(sticky_persistent, 'balanced', false) + test(sticky_persistent, false) end) end) end) From f1839ddb4287e715a95cc3b0c04ab899d2c94bba Mon Sep 17 00:00:00 2001 From: Alexander Maret-Huskinson Date: Tue, 24 Sep 2019 10:46:02 +0200 Subject: [PATCH 4/5] Fixed review findings. --- rootfs/etc/nginx/lua/balancer/sticky.lua | 18 ++----------- .../nginx/lua/balancer/sticky_balanced.lua | 16 +++-------- .../nginx/lua/balancer/sticky_persistent.lua | 27 ++----------------- rootfs/etc/nginx/lua/util/nodemap.lua | 11 ++++---- 4 files changed, 13 insertions(+), 59 deletions(-) diff --git a/rootfs/etc/nginx/lua/balancer/sticky.lua b/rootfs/etc/nginx/lua/balancer/sticky.lua index b088ff860..73498d8a3 100644 --- a/rootfs/etc/nginx/lua/balancer/sticky.lua +++ b/rootfs/etc/nginx/lua/balancer/sticky.lua @@ -1,5 +1,4 @@ local balancer_resty = require("balancer.resty") -local util = require("util") local ck = require("resty.cookie") local ngx_balancer = require("ngx.balancer") local split = require("util.split") @@ -87,7 +86,6 @@ local function get_failed_upstreams() end local function should_set_cookie(self) - if self.cookie_session_affinity.locations and ngx.var.host then local locs = self.cookie_session_affinity.locations[ngx.var.host] if locs == nil then @@ -115,7 +113,7 @@ end function _M.balance(self) local upstream_from_cookie - local key = self:get_routing_key() + local key = self:get_cookie() if key then upstream_from_cookie = self.instance:find(key) end @@ -134,7 +132,7 @@ function _M.balance(self) if not new_upstream then ngx.log(ngx.WARN, string.format("failed to get new upstream; using upstream %s", new_upstream)) elseif should_set_cookie(self) then - self:set_routing_key(key) + self:set_cookie(key) end return new_upstream @@ -144,18 +142,6 @@ function _M.sync(self, backend) -- reload balancer nodes balancer_resty.sync(self, backend) - -- Reload the balancer if any of the annotations have changed. - local changed = not util.deep_compare( - self.cookie_session_affinity, - backend.sessionAffinityConfig.cookieSessionAffinity - ) - - if not changed then - return - end - - ngx_log(INFO, string_format("[%s] nodes have changed for backend %s", self.name, backend.name)) - self.traffic_shaping_policy = backend.trafficShapingPolicy self.alternative_backends = backend.alternativeBackends self.cookie_session_affinity = backend.sessionAffinityConfig.cookieSessionAffinity diff --git a/rootfs/etc/nginx/lua/balancer/sticky_balanced.lua b/rootfs/etc/nginx/lua/balancer/sticky_balanced.lua index e2132dc08..ce0018158 100644 --- a/rootfs/etc/nginx/lua/balancer/sticky_balanced.lua +++ b/rootfs/etc/nginx/lua/balancer/sticky_balanced.lua @@ -5,9 +5,9 @@ -- pods. -- local balancer_sticky = require("balancer.sticky") -local math = require("math") +local math_random = require("math").random local resty_chash = require("resty.chash") -local util = require("util") +local util_get_nodes = require("util").get_nodes local _M = balancer_sticky:new() @@ -18,7 +18,7 @@ local _M = balancer_sticky:new() local MAX_UPSTREAM_CHECKS_COUNT = 20 function _M.new(self, backend) - local nodes = util.get_nodes(backend.endpoints) + local nodes = util_get_nodes(backend.endpoints) local o = { name = "sticky_balanced", @@ -33,17 +33,9 @@ function _M.new(self, backend) return o end -function _M.get_routing_key(self) - return self:get_cookie(), nil -end - -function _M.set_routing_key(self, key) - self:set_cookie(key) -end - function _M.pick_new_upstream(self, failed_upstreams) for i = 1, MAX_UPSTREAM_CHECKS_COUNT do - local key = string.format("%s.%s.%s", ngx.now() + i, ngx.worker.pid(), math.random(999999)) + local key = string.format("%s.%s.%s", ngx.now() + i, ngx.worker.pid(), math_random(999999)) local new_upstream = self.instance:find(key) if not failed_upstreams[new_upstream] then diff --git a/rootfs/etc/nginx/lua/balancer/sticky_persistent.lua b/rootfs/etc/nginx/lua/balancer/sticky_persistent.lua index 993eb8c8c..a4a6f0da2 100644 --- a/rootfs/etc/nginx/lua/balancer/sticky_persistent.lua +++ b/rootfs/etc/nginx/lua/balancer/sticky_persistent.lua @@ -4,13 +4,13 @@ -- be rebalanced. -- local balancer_sticky = require("balancer.sticky") -local util = require("util") +local util_get_nodes = require("util").get_nodes local util_nodemap = require("util.nodemap") local _M = balancer_sticky:new() function _M.new(self, backend) - local nodes = util.get_nodes(backend.endpoints) + local nodes = util_get_nodes(backend.endpoints) local hash_salt = backend["name"] local o = { @@ -26,29 +26,6 @@ function _M.new(self, backend) return o end -function _M.get_routing_key(self) - local cookie_value = self:get_cookie() - - if cookie_value then - -- format .. - local routing_key = string.match(cookie_value, '[^\\.]+$') - - if routing_key == nil then - local err = string.format("Failed to extract routing key from cookie '%s'!", cookie_value) - return nil, err - end - - return routing_key, nil - end - - return nil, nil -end - -function _M.set_routing_key(self, key) - local value = string.format("%s.%s.%s", ngx.now(), ngx.worker.pid(), key) - self:set_cookie(value); -end - function _M.pick_new_upstream(self, failed_upstreams) return self.instance:random_except(failed_upstreams) end diff --git a/rootfs/etc/nginx/lua/util/nodemap.lua b/rootfs/etc/nginx/lua/util/nodemap.lua index b4b4c9738..04d034023 100644 --- a/rootfs/etc/nginx/lua/util/nodemap.lua +++ b/rootfs/etc/nginx/lua/util/nodemap.lua @@ -1,5 +1,5 @@ -local math = require("math") -local util = require("util") +local math_random = require("math").random +local util_tablelength = require("util").tablelength local _M = {} @@ -24,13 +24,13 @@ end -- @tparam {[string], ...} map A key to node hash table. -- @treturn string,string The node and its key local function get_random_node(map) - local size = util.tablelength(map) + local size = util_tablelength(map) if size < 1 then return nil, nil end - local index = math.random(1, size) + local index = math_random(1, size) local count = 1 for key, endpoint in pairs(map) do @@ -58,7 +58,6 @@ end -- @tparam {[string]=number} endpoints A table with the node endpoint as a key and its weight as a value. -- @tparam[opt] string hash_salt A optional hash salt that will be used to obfuscate the hash key. function _M.new(self, endpoints, hash_salt) - if hash_salt == nil then hash_salt = '' end @@ -103,7 +102,7 @@ function _M.random_except(self, ignore_nodes) local valid_nodes = {} -- avoid generating the map if no ignores where provided - if ignore_nodes == nil or util.tablelength(ignore_nodes) == 0 then + if ignore_nodes == nil or util_tablelength(ignore_nodes) == 0 then return get_random_node(self.map) end From c26ab315b8fb838a2fa8337c4d3de0a274647709 Mon Sep 17 00:00:00 2001 From: Alexander Maret-Huskinson Date: Tue, 24 Sep 2019 10:56:11 +0200 Subject: [PATCH 5/5] Fixed LUA lint findings. --- rootfs/etc/nginx/lua/balancer/sticky.lua | 4 ---- 1 file changed, 4 deletions(-) diff --git a/rootfs/etc/nginx/lua/balancer/sticky.lua b/rootfs/etc/nginx/lua/balancer/sticky.lua index 73498d8a3..a5570911c 100644 --- a/rootfs/etc/nginx/lua/balancer/sticky.lua +++ b/rootfs/etc/nginx/lua/balancer/sticky.lua @@ -3,10 +3,6 @@ local ck = require("resty.cookie") local ngx_balancer = require("ngx.balancer") local split = require("util.split") -local string_format = string.format -local ngx_log = ngx.log -local INFO = ngx.INFO - local _M = balancer_resty:new() local DEFAULT_COOKIE_NAME = "route"