diff --git a/docs/examples/affinity/cookie/README.md b/docs/examples/affinity/cookie/README.md index 7abad84ed..5ee8855d0 100644 --- a/docs/examples/affinity/cookie/README.md +++ b/docs/examples/affinity/cookie/README.md @@ -13,6 +13,7 @@ Session affinity can be configured using the following annotations: |nginx.ingress.kubernetes.io/session-cookie-path|Path that will be set on the cookie (required if your [Ingress paths][ingress-paths] use regular expressions)|string (defaults to the currently [matched path][ingress-paths])| |nginx.ingress.kubernetes.io/session-cookie-max-age|Time until the cookie expires, corresponds to the `Max-Age` cookie directive|number of seconds| |nginx.ingress.kubernetes.io/session-cookie-expires|Legacy version of the previous annotation for compatibility with older browsers, generates an `Expires` cookie directive by adding the seconds to the current date|number of seconds| +|nginx.ingress.kubernetes.io/session-cookie-change-on-failure|When set to `false` nginx ingress will send request to upstream pointed by sticky cookie even if previous attempt failed. When set to `true` and previous attempt failed, sticky cookie will be changed to point to another upstream.|`true` or `false` (defaults to `false`)| You can create the [example Ingress](ingress.yaml) to test this: diff --git a/docs/user-guide/nginx-configuration/annotations.md b/docs/user-guide/nginx-configuration/annotations.md index 830ab4f77..0a359b496 100755 --- a/docs/user-guide/nginx-configuration/annotations.md +++ b/docs/user-guide/nginx-configuration/annotations.md @@ -73,6 +73,7 @@ You can add these Kubernetes annotations to specific Ingress objects to customiz |[nginx.ingress.kubernetes.io/service-upstream](#service-upstream)|"true" or "false"| |[nginx.ingress.kubernetes.io/session-cookie-name](#cookie-affinity)|string| |[nginx.ingress.kubernetes.io/session-cookie-path](#cookie-affinity)|string| +|[nginx.ingress.kubernetes.io/session-cookie-change-on-failure](#cookie-affinity)|"true" or "false"| |[nginx.ingress.kubernetes.io/ssl-redirect](#server-side-https-enforcement-through-redirect)|"true" or "false"| |[nginx.ingress.kubernetes.io/ssl-passthrough](#ssl-passthrough)|"true" or "false"| |[nginx.ingress.kubernetes.io/upstream-hash-by](#custom-nginx-upstream-hashing)|string| diff --git a/internal/ingress/annotations/sessionaffinity/main.go b/internal/ingress/annotations/sessionaffinity/main.go index 44b449347..3f5db661f 100644 --- a/internal/ingress/annotations/sessionaffinity/main.go +++ b/internal/ingress/annotations/sessionaffinity/main.go @@ -44,10 +44,14 @@ const ( // This is used to control the cookie path when use-regex is set to true annotationAffinityCookiePath = "session-cookie-path" + + // This is used to control the cookie change after request failure + annotationAffinityCookieChangeOnFailure = "session-cookie-change-on-failure" ) var ( - affinityCookieExpiresRegex = regexp.MustCompile(`(^0|-?[1-9]\d*$)`) + affinityCookieExpiresRegex = regexp.MustCompile(`(^0|-?[1-9]\d*$)`) + affinityCookieChangeOnFailureRegex = regexp.MustCompile(`^(true|false)$`) ) // Config describes the per ingress session affinity config @@ -67,6 +71,8 @@ type Cookie struct { MaxAge string `json:"maxage"` // The path that a cookie will be set on Path string `json:"path"` + // Flag that allows cookie regeneration on request failure + ChangeOnFailure string `json:"changeonfailure"` } // cookieAffinityParse gets the annotation values related to Cookie Affinity @@ -99,6 +105,11 @@ func (a affinity) cookieAffinityParse(ing *extensions.Ingress) *Cookie { klog.V(3).Infof("Invalid or no annotation value found in Ingress %v: %v. Ignoring it", ing.Name, annotationAffinityCookieMaxAge) } + cookie.ChangeOnFailure, err = parser.GetStringAnnotation(annotationAffinityCookieChangeOnFailure, ing) + if err != nil || !affinityCookieChangeOnFailureRegex.MatchString(cookie.ChangeOnFailure) { + klog.V(3).Infof("Invalid or no annotation value found in Ingress %v: %v. Ignoring it", ing.Name, annotationAffinityCookieChangeOnFailure) + } + return cookie } diff --git a/internal/ingress/annotations/sessionaffinity/main_test.go b/internal/ingress/annotations/sessionaffinity/main_test.go index faa20005d..cf7bbcb31 100644 --- a/internal/ingress/annotations/sessionaffinity/main_test.go +++ b/internal/ingress/annotations/sessionaffinity/main_test.go @@ -71,6 +71,7 @@ func TestIngressAffinityCookieConfig(t *testing.T) { data[parser.GetAnnotationWithPrefix(annotationAffinityCookieExpires)] = "4500" data[parser.GetAnnotationWithPrefix(annotationAffinityCookieMaxAge)] = "3000" data[parser.GetAnnotationWithPrefix(annotationAffinityCookiePath)] = "/foo" + data[parser.GetAnnotationWithPrefix(annotationAffinityCookieChangeOnFailure)] = "true" ing.SetAnnotations(data) affin, _ := NewParser(&resolver.Mock{}).Parse(ing) @@ -98,4 +99,8 @@ func TestIngressAffinityCookieConfig(t *testing.T) { if nginxAffinity.Cookie.Path != "/foo" { t.Errorf("expected /foo as session-cookie-path but returned %v", nginxAffinity.Cookie.Path) } + + if nginxAffinity.Cookie.ChangeOnFailure != "true" { + t.Errorf("expected change of failure parameter set to true but returned %v", nginxAffinity.Cookie.ChangeOnFailure) + } } diff --git a/internal/ingress/controller/controller.go b/internal/ingress/controller/controller.go index 5d2b46810..a275a18ab 100755 --- a/internal/ingress/controller/controller.go +++ b/internal/ingress/controller/controller.go @@ -572,6 +572,7 @@ func (n *NGINXController) getBackendServers(ingresses []*ingress.Ingress) ([]*in ups.SessionAffinity.CookieSessionAffinity.Expires = anns.SessionAffinity.Cookie.Expires ups.SessionAffinity.CookieSessionAffinity.MaxAge = anns.SessionAffinity.Cookie.MaxAge ups.SessionAffinity.CookieSessionAffinity.Path = cookiePath + ups.SessionAffinity.CookieSessionAffinity.ChangeOnFailure = anns.SessionAffinity.Cookie.ChangeOnFailure locs := ups.SessionAffinity.CookieSessionAffinity.Locations if _, ok := locs[host]; !ok { diff --git a/internal/ingress/types.go b/internal/ingress/types.go index 54afa9a18..ec4128479 100755 --- a/internal/ingress/types.go +++ b/internal/ingress/types.go @@ -144,11 +144,12 @@ type SessionAffinityConfig struct { // CookieSessionAffinity defines the structure used in Affinity configured by Cookies. // +k8s:deepcopy-gen=true type CookieSessionAffinity struct { - Name string `json:"name"` - Expires string `json:"expires,omitempty"` - MaxAge string `json:"maxage,omitempty"` - Locations map[string][]string `json:"locations,omitempty"` - Path string `json:"path,omitempty"` + Name string `json:"name"` + Expires string `json:"expires,omitempty"` + MaxAge string `json:"maxage,omitempty"` + Locations map[string][]string `json:"locations,omitempty"` + Path string `json:"path,omitempty"` + ChangeOnFailure string `json:"changeonfailure"` } // UpstreamHashByConfig described setting from the upstream-hash-by* annotations. diff --git a/rootfs/etc/nginx/lua/balancer/sticky.lua b/rootfs/etc/nginx/lua/balancer/sticky.lua index 3db2ba3d5..798112eaf 100644 --- a/rootfs/etc/nginx/lua/balancer/sticky.lua +++ b/rootfs/etc/nginx/lua/balancer/sticky.lua @@ -3,6 +3,8 @@ local resty_chash = require("resty.chash") local util = require("util") local ck = require("resty.cookie") local math = require("math") +local ngx_balancer = require("ngx.balancer") +local split = require("util.split") local string_format = string.format local ngx_log = ngx.log @@ -11,6 +13,12 @@ local INFO = ngx.INFO local _M = balancer_resty:new({ factory = resty_chash, name = "sticky" }) local DEFAULT_COOKIE_NAME = "route" +-- Consider the situation of N upstreams one of which is failing. +-- Then the probability to obtain failing upstream after M iterations would be close to (1/N)**M. +-- For the worst case (2 upstreams; 20 iterations) it would be ~10**(-6) +-- which is much better then ~10**(-3) for 10 iterations. +local MAX_UPSTREAM_CHECKS_COUNT = 20 + function _M.cookie_name(self) return self.cookie_session_affinity.name or DEFAULT_COOKIE_NAME end @@ -63,6 +71,10 @@ local function set_cookie(self, value) end end +function _M.get_last_failure() + return ngx_balancer.get_last_failure() +end + function _M.balance(self) local cookie, err = ck:new() if not cookie then @@ -70,24 +82,75 @@ function _M.balance(self) return end - local key = cookie:get(self:cookie_name()) - if not key then - key = string.format("%s.%s.%s", ngx.now(), ngx.worker.pid(), math.random(999999)) + -- upstream_from_cookie: upstream which is pointed by sticky cookie + local upstream_from_cookie = nil - if self.cookie_session_affinity.locations then - local locs = self.cookie_session_affinity.locations[ngx.var.host] - if locs ~= nil then - for _, path in pairs(locs) do - if ngx.var.location_path == path then - set_cookie(self, key) - break - end - end - end + local key = cookie:get(self:cookie_name()) + if key then + upstream_from_cookie = self.instance:find(key) + end + + -- get state of the previous attempt + local state_name = self.get_last_failure() + + if upstream_from_cookie ~= nil then + -- use previous upstream if this is the first attempt or previous attempt succeeded + -- or ingress is configured to ignore previous request result + if state_name == nil or self.cookie_session_affinity.changeonfailure == "false" then + return upstream_from_cookie end end - return self.instance:find(key) + -- failed_upstream: upstream which failed during previous attempt + local failed_upstream = nil + + -- If previous attempt failed recent upstream can be obtained from ngx.var.upstream_addr. + -- Do nothing if ingress is configured to ignore previous request result. + if state_name ~= nil and self.cookie_session_affinity.changeonfailure == "true" then + local upstream_addr = ngx.var.upstream_addr + failed_upstream = split.get_last_value(upstream_addr) + + if failed_upstream == nil then + ngx.log(ngx.ERR, string.format("failed to get failed_upstream from upstream_addr (%s)", upstream_addr)) + end + end + + -- new_upstream: upstream which is pointed by new key + local new_upstream = nil + + -- generate new upstream key if sticky cookie not set or previous attempt failed + for _ = 1, MAX_UPSTREAM_CHECKS_COUNT do + key = string.format("%s.%s.%s", ngx.now(), ngx.worker.pid(), math.random(999999)) + + new_upstream = self.instance:find(key) + + if failed_upstream ~= new_upstream then + -- set cookie only when we get NOT THE SAME upstream + if upstream_from_cookie ~= new_upstream then + if self.cookie_session_affinity.locations then + local locs = self.cookie_session_affinity.locations[ngx.var.host] + if locs ~= nil then + for _, path in pairs(locs) do + if ngx.var.location_path == path then + set_cookie(self, key) + break + end + end + end + end + + end + + -- new upstream was obtained; return it to the balancer + do return new_upstream end + end + + -- generated key points to the failed upstream; try another key + end + + ngx.log(ngx.WARN, string.format("failed to get new upstream; using upstream %s", new_upstream)) + + return new_upstream end function _M.sync(self, backend) diff --git a/rootfs/etc/nginx/lua/test/balancer/sticky_test.lua b/rootfs/etc/nginx/lua/test/balancer/sticky_test.lua index 207f21973..3e18a8274 100644 --- a/rootfs/etc/nginx/lua/test/balancer/sticky_test.lua +++ b/rootfs/etc/nginx/lua/test/balancer/sticky_test.lua @@ -164,7 +164,7 @@ describe("Sticky", function() s = spy.on(cookie_instance, "set") return cookie_instance, false end - local sticky_balancer_instance = sticky:new(get_test_backend()) + local sticky_balancer_instance = sticky:new(get_test_backend()) assert.has_no.errors(function() sticky_balancer_instance:balance() end) assert.spy(s).was_not_called() end) @@ -193,4 +193,73 @@ describe("Sticky", function() end) end) end) + + local function get_several_test_backends(changeOnFailure) + return { + name = "access-router-production-web-80", + endpoints = { + { address = "10.184.7.40", port = "8080", maxFails = 0, failTimeout = 0 }, + { address = "10.184.7.41", port = "8080", maxFails = 0, failTimeout = 0 }, + }, + sessionAffinityConfig = { + name = "cookie", + cookieSessionAffinity = { name = "test_name", hash = "sha1", changeonfailure = changeOnFailure } + }, + } + end + + describe("balance() after error", function() + local mocked_cookie_new = cookie.new + + before_each(function() + package.loaded["balancer.sticky"] = nil + sticky = require("balancer.sticky") + end) + + after_each(function() + cookie.new = mocked_cookie_new + end) + + context("when request to upstream fails", function() + it("changes upstream when changeOnFailure option is true", function() + -- create sticky cookie + cookie.new = function(self) + local return_obj = { + set = function(v) return false, nil end, + get = function(k) return "" end, + } + return return_obj, false + end + + local options = {'false', 'true'} + + for _, option in ipairs(options) do + local sticky_balancer_instance = sticky:new(get_several_test_backends(option)) + + local old_upstream = sticky_balancer_instance:balance() + for _ = 1, 100 do + -- make sure upstream doesn't change on subsequent calls of balance() + assert.equal(old_upstream, sticky_balancer_instance:balance()) + end + + -- simulate request failure + sticky_balancer_instance.get_last_failure = function() + return "failed" + end + _G.ngx.var = { upstream_addr = old_upstream } + + for _ = 1, 100 do + local new_upstream = sticky_balancer_instance:balance() + if option == 'false' then + -- upstream should be the same inspite of error, if changeOnFailure option is false + assert.equal(new_upstream, old_upstream) + else + -- upstream should change after error, if changeOnFailure option is true + assert.not_equal(new_upstream, old_upstream) + end + end + end + end) + end) + end) end) diff --git a/rootfs/etc/nginx/lua/util/split.lua b/rootfs/etc/nginx/lua/util/split.lua index 620e083a5..5a1999786 100644 --- a/rootfs/etc/nginx/lua/util/split.lua +++ b/rootfs/etc/nginx/lua/util/split.lua @@ -16,6 +16,12 @@ function _M.get_first_value(var) return t[1] end +function _M.get_last_value(var) + local t = _M.split_upstream_var(var) or {} + if #t == 0 then return nil end + return t[#t] +end + -- http://nginx.org/en/docs/http/ngx_http_upstream_module.html#example -- CAVEAT: nginx is giving out : instead of , so the docs are wrong -- 127.0.0.1:26157 : 127.0.0.1:26157 , ngx.var.upstream_addr