fix: issue where canary overwrites default backend

Signed-off-by: Spazzy <brendankamp757@gmail.com>
This commit is contained in:
Spazzy 2023-06-12 09:22:03 +02:00
parent dc999d81da
commit 64229d2c08
No known key found for this signature in database
GPG key ID: 82244B5B6E60989E
6 changed files with 253 additions and 29 deletions

View file

@ -72,6 +72,8 @@ local function get_implementation(backend)
return implementation return implementation
end end
-- used to get the IP address of the upstream to set as the
-- backends endpoint to route to
local function resolve_external_names(original_backend) local function resolve_external_names(original_backend)
local backend = util.deepcopy(original_backend) local backend = util.deepcopy(original_backend)
local endpoints = {} local endpoints = {}
@ -181,6 +183,7 @@ local function sync_backends()
backends_last_synced_at = raw_backends_last_synced_at backends_last_synced_at = raw_backends_last_synced_at
end end
-- logic used to pick up if request should be routed to an alternative backend
local function route_to_alternative_balancer(balancer) local function route_to_alternative_balancer(balancer)
if balancer.is_affinitized(balancer) then if balancer.is_affinitized(balancer) then
-- If request is already affinitized to a primary balancer, keep the primary balancer. -- If request is already affinitized to a primary balancer, keep the primary balancer.
@ -218,7 +221,6 @@ local function route_to_alternative_balancer(balancer)
"of backend: ", tostring(backend_name)) "of backend: ", tostring(backend_name))
return false return false
end end
local target_header = util.replace_special_char(traffic_shaping_policy.header, local target_header = util.replace_special_char(traffic_shaping_policy.header,
"-", "_") "-", "_")
local header = ngx.var["http_" .. target_header] local header = ngx.var["http_" .. target_header]
@ -278,14 +280,15 @@ local function get_balancer()
local backend_name = ngx.var.proxy_upstream_name local backend_name = ngx.var.proxy_upstream_name
local balancer = balancers[backend_name] local balancer = balancers[backend_name]
if not balancer then if not balancer then
return nil return nil
end end
if route_to_alternative_balancer(balancer) then --we should not overwrite balancer when it is the default backend
if route_to_alternative_balancer(balancer) and not balancer.is_default_backend then
local alternative_backend_name = balancer.alternative_backends[1] local alternative_backend_name = balancer.alternative_backends[1]
ngx.var.proxy_alternative_upstream_name = alternative_backend_name ngx.var.proxy_alternative_upstream_name = alternative_backend_name
balancer = balancers[alternative_backend_name] balancer = balancers[alternative_backend_name]
end end
@ -318,6 +321,7 @@ end
function _M.rewrite() function _M.rewrite()
local balancer = get_balancer() local balancer = get_balancer()
if not balancer then if not balancer then
ngx.status = ngx.HTTP_SERVICE_UNAVAILABLE ngx.status = ngx.HTTP_SERVICE_UNAVAILABLE
return ngx.exit(ngx.status) return ngx.exit(ngx.status)
@ -344,6 +348,7 @@ function _M.balance()
ngx_balancer.set_more_tries(1) ngx_balancer.set_more_tries(1)
local ok, err = ngx_balancer.set_current_peer(peer) local ok, err = ngx_balancer.set_current_peer(peer)
if not ok then if not ok then
ngx.log(ngx.ERR, "error while setting current upstream peer ", peer, ngx.log(ngx.ERR, "error while setting current upstream peer ", peer,
": ", err) ": ", err)
@ -363,6 +368,16 @@ function _M.log()
balancer:after_balance() balancer:after_balance()
end end
--this is used to check if we are routing to the
--default backend for sepcific error codes so that we do not overwrite it with
--alternative routes
--https://github.com/kubernetes/ingress-nginx/issues/9944
function _M.is_default_backend()
if ngx.ctx.balancer then
ngx.ctx.balancer.is_default_backend = true
end
end
setmetatable(_M, {__index = { setmetatable(_M, {__index = {
get_implementation = get_implementation, get_implementation = get_implementation,
sync_backend = sync_backend, sync_backend = sync_backend,

View file

@ -931,6 +931,10 @@ stream {
rewrite (.*) / break; rewrite (.*) / break;
proxy_pass http://upstream_balancer; proxy_pass http://upstream_balancer;
access_by_lua_block {
balancer.is_default_backend()
}
log_by_lua_block { log_by_lua_block {
{{ if $enableMetrics }} {{ if $enableMetrics }}
monitor.call() monitor.call()

View file

@ -0,0 +1 @@
registry.k8s.io/ingress-nginx/nginx-errors:v20230505@sha256:3600dcd1bbd0d05959bb01af4b272714e94d22d24a64e91838e7183c80e53f7f

View file

@ -86,6 +86,54 @@ var _ = framework.DescribeAnnotation("canary-*", func() {
NotContains(canaryService) NotContains(canaryService)
}) })
// issue: https://github.com/kubernetes/ingress-nginx/issues/9944
// canary routing should not overwrite custom errors
ginkgo.It("should respond with a 401 status from the custom errors backend when canary responds with a 401",
func() {
host := "foo"
annotations := map[string]string{
"nginx.ingress.kubernetes.io/custom-http-errors": "401",
"nginx.ingress.kubernetes.io/default-backend": framework.DefaultBackendService,
}
f.NewDefaultBackendDeployment()
f.EnsureIngress(framework.NewSingleIngress(
host,
"/",
host,
f.Namespace,
framework.HTTPBunService,
80,
annotations,
))
f.WaitForNginxServer(host, func(server string) bool {
return strings.Contains(server, "server_name foo")
})
f.EnsureIngress(framework.NewSingleIngress(
canaryService,
"/",
host,
f.Namespace,
canaryService,
80,
map[string]string{
"nginx.ingress.kubernetes.io/canary": "true",
"nginx.ingress.kubernetes.io/canary-by-header": "CanaryByHeader",
},
))
f.HTTPTestClient().
GET("/status/401").
WithHeader("Host", host).
Expect().
Status(http.StatusUnauthorized).
Body().
Contains("401")
})
ginkgo.It("should return 404 status for requests to the canary if no matching ingress is found", func() { ginkgo.It("should return 404 status for requests to the canary if no matching ingress is found", func() {
host := fooHost host := fooHost

View file

@ -31,20 +31,30 @@ import (
"k8s.io/apimachinery/pkg/util/intstr" "k8s.io/apimachinery/pkg/util/intstr"
) )
// EchoService name of the deployment for the echo app const (
const EchoService = "echo" // EchoService name of the deployment for the echo app
EchoService = "echo"
// SlowEchoService name of the deployment for the echo app // SlowEchoService name of the deployment for the echo app
const SlowEchoService = "slow-echo" SlowEchoService = "slow-echo"
// HTTPBunService name of the deployment for the httpbun app // HTTPBunService name of the deployment for the httpbun app
const HTTPBunService = "httpbun" HTTPBunService = "httpbun"
// NipService name of external service using nip.io // NipService name of external service using nip.io
const NIPService = "external-nip" NIPService = "external-nip"
// HTTPBunImage is the default image that is used to deploy HTTPBun with the framwork // DefaultBackendService name of default backend deployment
var HTTPBunImage = os.Getenv("HTTPBUN_IMAGE") DefaultBackendService = "default-backend"
)
var (
// HTTPBunImage is the default image that is used to deploy HTTPBun with the framwork
HTTPBunImage = os.Getenv("HTTPBUN_IMAGE")
// DefaultBackendImage is the default image that is used to deploy custom
// default backend with the framwork
DefaultBackendImage = os.Getenv("DEFAULT_BACKEND_IMAGE")
)
// EchoImage is the default image to be used by the echo service // EchoImage is the default image to be used by the echo service
const EchoImage = "registry.k8s.io/ingress-nginx/e2e-test-echo@sha256:4938d1d91a2b7d19454460a8c1b010b89f6ff92d2987fd889ac3e8fc3b70d91a" //#nosec G101 const EchoImage = "registry.k8s.io/ingress-nginx/e2e-test-echo@sha256:4938d1d91a2b7d19454460a8c1b010b89f6ff92d2987fd889ac3e8fc3b70d91a" //#nosec G101
@ -56,8 +66,16 @@ type deploymentOptions struct {
name string name string
namespace string namespace string
image string image string
port int32
replicas int replicas int
command []string
args []string
env []corev1.EnvVar
volumeMounts []corev1.VolumeMount
volumes []corev1.Volume
svcAnnotations map[string]string svcAnnotations map[string]string
setProbe bool
probe *corev1.HTTPGetAction
} }
// WithDeploymentNamespace allows configuring the deployment's namespace // WithDeploymentNamespace allows configuring the deployment's namespace
@ -103,6 +121,13 @@ func WithImage(i string) func(*deploymentOptions) {
} }
} }
// WithProbeHandler used to set probe on deployment
func WithProbeHandler(p *corev1.HTTPGetAction) func(*deploymentOptions) {
return func(o *deploymentOptions) {
o.probe = p
}
}
// NewEchoDeployment creates a new single replica deployment of the echo server image in a particular namespace // NewEchoDeployment creates a new single replica deployment of the echo server image in a particular namespace
func (f *Framework) NewEchoDeployment(opts ...func(*deploymentOptions)) { func (f *Framework) NewEchoDeployment(opts ...func(*deploymentOptions)) {
options := &deploymentOptions{ options := &deploymentOptions{
@ -158,6 +183,108 @@ func (f *Framework) NewEchoDeployment(opts ...func(*deploymentOptions)) {
assert.Nil(ginkgo.GinkgoT(), err, "waiting for endpoints to become ready") assert.Nil(ginkgo.GinkgoT(), err, "waiting for endpoints to become ready")
} }
// NewDefaultBackendDeployment creates a new single replica deployment of the
// custom errors backend in a particular namespace
func (f *Framework) NewDefaultBackendDeployment(opts ...func(*deploymentOptions)) {
options := &deploymentOptions{
namespace: f.Namespace,
name: DefaultBackendService,
replicas: 1,
image: DefaultBackendImage,
}
for _, o := range opts {
o(options)
}
f.EnsureConfigMap(&corev1.ConfigMap{
ObjectMeta: metav1.ObjectMeta{
Name: options.name,
Namespace: options.namespace,
},
Data: map[string]string{
"404": `<!DOCTYPE html><html><body>404</body></html>`,
"401": `<!DOCTYPE html><html><body>401</body></html>`,
"503": `<!DOCTYPE html><html><body>503</body></html>`,
},
})
f.EnsureDeployment(newDeployment(
options.name,
options.namespace,
options.image,
8080,
int32(options.replicas),
nil, nil, nil,
[]corev1.VolumeMount{
{
Name: options.name,
MountPath: "/www",
},
},
[]corev1.Volume{
{
Name: options.name,
VolumeSource: corev1.VolumeSource{
ConfigMap: &corev1.ConfigMapVolumeSource{
LocalObjectReference: corev1.LocalObjectReference{
Name: options.name,
},
Items: []corev1.KeyToPath{
{
Key: "404",
Path: "404.html",
},
{
Key: "401",
Path: "401.html",
},
{
Key: "503",
Path: "503.html",
},
},
},
},
},
},
false,
WithProbeHandler(&corev1.HTTPGetAction{
Port: intstr.FromString("http"),
Path: "/healthz",
}),
))
f.EnsureService(&corev1.Service{
ObjectMeta: metav1.ObjectMeta{
Name: options.name,
Namespace: options.namespace,
Annotations: options.svcAnnotations,
},
Spec: corev1.ServiceSpec{
Ports: []corev1.ServicePort{
{
Name: "http",
Port: 80,
TargetPort: intstr.FromInt(8080),
Protocol: corev1.ProtocolTCP,
},
},
Selector: map[string]string{
"app": options.name,
},
},
})
err := WaitForEndpoints(
f.KubeClientSet,
DefaultTimeout,
options.name,
options.namespace,
options.replicas,
)
assert.Nil(ginkgo.GinkgoT(), err, "waiting for endpoints to become ready")
}
// BuildNipHost used to generate a nip host for DNS resolving // BuildNipHost used to generate a nip host for DNS resolving
func BuildNIPHost(ip string) string { func BuildNIPHost(ip string) string {
return fmt.Sprintf("%s.nip.io", ip) return fmt.Sprintf("%s.nip.io", ip)
@ -261,7 +388,7 @@ func (f *Framework) NewHttpbunDeployment(opts ...func(*deploymentOptions)) strin
e, err := f.KubeClientSet. e, err := f.KubeClientSet.
CoreV1(). CoreV1().
Endpoints(f.Namespace). Endpoints(f.Namespace).
Get(context.TODO(), HTTPBunService, metav1.GetOptions{}) Get(context.TODO(), options.name, metav1.GetOptions{})
assert.Nil(ginkgo.GinkgoT(), err, "failed to get httpbun endpoint") assert.Nil(ginkgo.GinkgoT(), err, "failed to get httpbun endpoint")
return e.Subsets[0].Addresses[0].IP return e.Subsets[0].Addresses[0].IP
@ -486,9 +613,38 @@ func (f *Framework) NewGRPCBinDeployment() {
assert.Nil(ginkgo.GinkgoT(), err, "waiting for endpoints to become ready") assert.Nil(ginkgo.GinkgoT(), err, "waiting for endpoints to become ready")
} }
func newDeployment(name, namespace, image string, port int32, replicas int32, command []string, args []string, env []corev1.EnvVar, func newDeployment(
volumeMounts []corev1.VolumeMount, volumes []corev1.Volume, setProbe bool, name, namespace, image string,
port int32, replicas int32,
command, args []string,
env []corev1.EnvVar,
volumeMounts []corev1.VolumeMount,
volumes []corev1.Volume,
setProbe bool,
opts ...func(*deploymentOptions),
) *appsv1.Deployment { ) *appsv1.Deployment {
// TODO: we should move to using options to configure deployments to have less
// logic here
o := &deploymentOptions{
name: name,
namespace: namespace,
image: image,
port: port,
command: command,
args: args,
env: env,
volumeMounts: volumeMounts,
volumes: volumes,
setProbe: setProbe,
probe: &corev1.HTTPGetAction{
Port: intstr.FromString("http"),
Path: "/",
},
}
for _, opt := range opts {
opt(o)
}
probe := &corev1.Probe{ probe := &corev1.Probe{
InitialDelaySeconds: 2, InitialDelaySeconds: 2,
PeriodSeconds: 1, PeriodSeconds: 1,
@ -496,48 +652,46 @@ func newDeployment(name, namespace, image string, port int32, replicas int32, co
TimeoutSeconds: 2, TimeoutSeconds: 2,
FailureThreshold: 6, FailureThreshold: 6,
ProbeHandler: corev1.ProbeHandler{ ProbeHandler: corev1.ProbeHandler{
HTTPGet: &corev1.HTTPGetAction{ HTTPGet: o.probe,
Port: intstr.FromString("http"),
Path: "/",
},
}, },
} }
d := &appsv1.Deployment{ d := &appsv1.Deployment{
ObjectMeta: metav1.ObjectMeta{ ObjectMeta: metav1.ObjectMeta{
Name: name, Name: o.name,
Namespace: namespace, Namespace: o.namespace,
}, },
Spec: appsv1.DeploymentSpec{ Spec: appsv1.DeploymentSpec{
Replicas: NewInt32(replicas), Replicas: NewInt32(replicas),
Selector: &metav1.LabelSelector{ Selector: &metav1.LabelSelector{
MatchLabels: map[string]string{ MatchLabels: map[string]string{
"app": name, "app": o.name,
}, },
}, },
Template: corev1.PodTemplateSpec{ Template: corev1.PodTemplateSpec{
ObjectMeta: metav1.ObjectMeta{ ObjectMeta: metav1.ObjectMeta{
Labels: map[string]string{ Labels: map[string]string{
"app": name, "app": o.name,
}, },
}, },
Spec: corev1.PodSpec{ Spec: corev1.PodSpec{
TerminationGracePeriodSeconds: NewInt64(0), TerminationGracePeriodSeconds: NewInt64(0),
Containers: []corev1.Container{ Containers: []corev1.Container{
{ {
Name: name, Name: o.name,
Image: image, Image: image,
Env: []corev1.EnvVar{}, Env: o.env,
Args: o.args,
Ports: []corev1.ContainerPort{ Ports: []corev1.ContainerPort{
{ {
Name: "http", Name: "http",
ContainerPort: port, ContainerPort: o.port,
}, },
}, },
VolumeMounts: volumeMounts, VolumeMounts: o.volumeMounts,
}, },
}, },
Volumes: volumes, Volumes: o.volumes,
}, },
}, },
}, },

View file

@ -52,6 +52,7 @@ fi
BASEDIR=$(dirname "$0") BASEDIR=$(dirname "$0")
NGINX_BASE_IMAGE=$(cat $BASEDIR/../../NGINX_BASE) NGINX_BASE_IMAGE=$(cat $BASEDIR/../../NGINX_BASE)
HTTPBUN_IMAGE=$(cat $BASEDIR/HTTPBUN_IMAGE) HTTPBUN_IMAGE=$(cat $BASEDIR/HTTPBUN_IMAGE)
DEFAULT_BACKEND_IMAGE=$(cat $BASEDIR/DEFAULT_BACKEND_IMAGE)
echo -e "${BGREEN}Granting permissions to ingress-nginx e2e service account...${NC}" echo -e "${BGREEN}Granting permissions to ingress-nginx e2e service account...${NC}"
kubectl create serviceaccount ingress-nginx-e2e || true kubectl create serviceaccount ingress-nginx-e2e || true
@ -83,6 +84,7 @@ kubectl run --rm \
--env="E2E_CHECK_LEAKS=${E2E_CHECK_LEAKS}" \ --env="E2E_CHECK_LEAKS=${E2E_CHECK_LEAKS}" \
--env="NGINX_BASE_IMAGE=${NGINX_BASE_IMAGE}" \ --env="NGINX_BASE_IMAGE=${NGINX_BASE_IMAGE}" \
--env="HTTPBUN_IMAGE=${HTTPBUN_IMAGE}" \ --env="HTTPBUN_IMAGE=${HTTPBUN_IMAGE}" \
--env="DEFAULT_BACKEND_IMAGE=${DEFAULT_BACKEND_IMAGE}" \
--overrides='{ "apiVersion": "v1", "spec":{"serviceAccountName": "ingress-nginx-e2e"}}' \ --overrides='{ "apiVersion": "v1", "spec":{"serviceAccountName": "ingress-nginx-e2e"}}' \
e2e --image=nginx-ingress-controller:e2e e2e --image=nginx-ingress-controller:e2e