troubleshooting build

Signed-off-by: James Strong <strong.james.e@gmail.com>
This commit is contained in:
James Strong 2022-09-28 16:26:30 -04:00 committed by James Strong
parent 39e151710d
commit 1d5bc6463b
Failed to extract signature
77 changed files with 9648 additions and 95 deletions

View file

@ -190,6 +190,10 @@ check_dead_links: ## Check if the documentation contains dead links.
dev-env: ## Starts a local Kubernetes cluster using kind, building and deploying the ingress controller. dev-env: ## Starts a local Kubernetes cluster using kind, building and deploying the ingress controller.
@build/dev-env.sh @build/dev-env.sh
.PHONY: dev-apko
dev-apko: ## Starts a local Kubernetes cluster using kind, building and deploying the ingress controller.
@build/dev-apko.sh
.PHONY: dev-env-stop .PHONY: dev-env-stop
dev-env-stop: ## Deletes local Kubernetes cluster created by kind. dev-env-stop: ## Deletes local Kubernetes cluster created by kind.
@kind delete cluster --name ingress-nginx-dev @kind delete cluster --name ingress-nginx-dev

130
build/dev-apko.sh Executable file
View file

@ -0,0 +1,130 @@
#!/bin/bash
# Copyright 2018 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
if [ -n "$DEBUG" ]; then
set -x
fi
set -o errexit
set -o nounset
set -o pipefail
DIR=$(cd $(dirname "${BASH_SOURCE}") && pwd -P)
export TAG=${TAG:-"1.0.0-dev"}
export REGISTRY=${REGISTRY:-registry.k8s.io}
export DIGEST=${DIGEST:-}
export DEV_IMAGE=${DEV_IMAGE:-${REGISTRY}/controller:${TAG}}
if ! command -v kind &> /dev/null; then
echo "kind is not installed"
echo "Use a package manager (i.e 'brew install kind') or visit the official site https://kind.sigs.k8s.io"
exit 1
fi
if ! command -v kubectl &> /dev/null; then
echo "Please install kubectl 1.24.0 or higher"
exit 1
fi
if ! command -v helm &> /dev/null; then
echo "Please install helm"
exit 1
fi
HELM_VERSION=$(helm version 2>&1 | grep -oE 'v[0-9]+\.[0-9]+\.[0-9]+') || true
if [[ ${HELM_VERSION} < "v3.9.0" ]]; then
echo "Please upgrade helm to v3.9.0 or higher"
exit 1
fi
KUBE_CLIENT_VERSION=$(kubectl version --client --short 2>/dev/null | grep Client | awk '{print $3}' | cut -d. -f2) || true
if [[ ${KUBE_CLIENT_VERSION} -lt 24 ]]; then
echo "Please update kubectl to 1.24.2 or higher"
exit 1
fi
if [ "${SKIP_IMAGE_CREATION:-false}" = "false" ]; then
echo "[dev-env] building image"
make build image
docker tag "${REGISTRY}/controller:${TAG}" "${DEV_IMAGE}"
fi
export K8S_VERSION=${K8S_VERSION:-v1.24.2@sha256:1f0cee2282f43150b52dc7933183ed96abdcfc8d293f30ec07082495874876f1}
KIND_CLUSTER_NAME="ingress-nginx-dev"
if ! kind get clusters -q | grep -q ${KIND_CLUSTER_NAME}; then
echo "[dev-env] creating Kubernetes cluster with kind"
cat <<EOF | kind create cluster --name ${KIND_CLUSTER_NAME} --image "kindest/node:${K8S_VERSION}" --config=-
kind: Cluster
apiVersion: kind.x-k8s.io/v1alpha4
nodes:
- role: control-plane
kubeadmConfigPatches:
- |
kind: InitConfiguration
nodeRegistration:
kubeletExtraArgs:
node-labels: "ingress-ready=true"
authorization-mode: "AlwaysAllow"
extraPortMappings:
- containerPort: 80
hostPort: 80
protocol: TCP
- containerPort: 443
hostPort: 443
protocol: TCP
EOF
else
echo "[dev-env] using existing Kubernetes kind cluster"
fi
echo "[dev-env] copying docker images to cluster..."
kind load docker-image --name="${KIND_CLUSTER_NAME}" "${REGISTRY}/${DEV_IMAGE}:${TAG}"
echo "[dev-env] deploying NGINX Ingress controller..."
kubectl create namespace ingress-nginx &> /dev/null || true
cat << EOF | helm template ingress-nginx ${DIR}/../charts/ingress-nginx --namespace=ingress-nginx --values - | kubectl apply -n ingress-nginx -f -
controller:
image:
registry: "${REGISTRY}"
image: "${DEV_IMAGE}"
tag: "${TAG}"
digest: "${DIGEST}"
config:
worker-processes: "1"
podLabels:
deploy-date: "$(date +%s)"
updateStrategy:
type: RollingUpdate
rollingUpdate:
maxUnavailable: 1
hostPort:
enabled: true
terminationGracePeriodSeconds: 0
service:
type: NodePort
EOF
cat <<EOF
Kubernetes cluster ready and ingress-nginx listening in localhost using ports 80 and 443
To delete the dev cluster execute: 'kind delete cluster --name ingress-nginx-dev'
EOF

View file

@ -29,12 +29,12 @@ ARCH := $(shell uname -m)
MELANGE_DIR ?= melange MELANGE_DIR ?= melange
APKO_DIR ?= apko APKO_DIR ?= apko
MELANGE ?= docker run --rm --privileged -v "${PWD}":/work distroless.dev/melange:latest MELANGE ?= docker run --rm --privileged -v "${PWD}":/work distroless.dev/melange:latest
MELANGE ?= docker run -d --rm --privileged -v "${PWD}":/work distroless.dev/melange:latest MELANGE_DETACHED ?= docker run -d --rm --privileged -v "${PWD}":/work distroless.dev/melange:latest
APKO ?= docker run --rm -v "${PWD}":/work distroless.dev/apko:latest APKO ?= docker run --rm -v "${PWD}":/work distroless.dev/apko:latest
KEY ?= melange.rsa KEY ?= melange.rsa
REPO ?= packages REPO ?= packages
TEMPLATE ?= melange/nginx-templates.json TEMPLATE ?= melange/nginx-templates.json
MELANGE_OPTS ?= --empty-workspace -k ${KEY}.pub --signing-key ${KEY} --arch ${ARCHS} MELANGE_OPTS ?= -k ${KEY}.pub --signing-key ${KEY} --arch ${ARCHS}
KEY ?= melange.rsa KEY ?= melange.rsa
REPO ?= $(shell pwd)/packages REPO ?= $(shell pwd)/packages
ARCHS?="amd64,arm64,arm/v6,arm/v7,s390x" ARCHS?="amd64,arm64,arm/v6,arm/v7,s390x"
@ -46,19 +46,24 @@ endef
keygen: ## Generate Key pair for use with signing apks keygen: ## Generate Key pair for use with signing apks
docker run --rm --privileged -v "${PWD}":/work distroless.dev/melange:latest keygen docker run --rm --privileged -v "${PWD}":/work distroless.dev/melange:latest keygen
.PHONY: melange
melange: ## Build melange $FILE melange: ## Build melange $FILE
${MELANGE} build ${MELANGE_DIR}/${FILE}.yaml ${MELANGE_OPTS} --template '$(shell cat ${TEMPLATE})' ${MELANGE} build ${MELANGE_DIR}/${FILE}.yaml ${MELANGE_OPTS} --template '$(shell cat ${TEMPLATE})'
nginx-melange:
${MELANGE} build ${MELANGE_DIR}/${FILE}.yaml --source-dir ${MELANGE_DIR}/${FILE} ${MELANGE_OPTS} --template '$(shell cat ${TEMPLATE})'
apko-build: ## Build an apko pipeline with $KEY and $FILE apko-build: ## Build an apko pipeline with $KEY and $FILE
${APKO} build -k ${KEY}.pub --debug ${APKO_DIR}/${FILE}.yaml $(IMAGE):$(TAG) $(IMAGE)-$(TAG).tar ${APKO} build -k ${KEY}.pub --debug ${APKO_DIR}/${FILE}.yaml $(IMAGE):$(TAG) $(IMAGE)-$(TAG).tar
apko-push: ## Push apko built conatiner $IMAGE:$TAG to $REGISTRY apko-push: ## Push apko built container $IMAGE:$TAG to $REGISTRY
${APKO} publish -k ${KEY}.pub --debug ${APKO_DIR}/${FILE}.yaml $(IMAGE):$(TAG) ${APKO} publish -k ${KEY}.pub --debug ${APKO_DIR}/${FILE}.yaml $(IMAGE):$(TAG)
load: ## Load apko built image into docker load: ## Load apko built image into docker
docker load < $(IMAGE)-$(TAG).tar docker load < $(IMAGE)-$(TAG).tar
docker tag $(IMAGE):$(TAG) $(REGISTRY)/$(IMAGE):$(TAG) docker tag $(IMAGE):$(TAG) $(REGISTRY)/$(IMAGE):$(TAG)
docker push $(REGISTRY)/$(IMAGE):$(TAG) docker push $(REGISTRY)/$(IMAGE):$(TAG)
rm $(IMAGE)-$(TAG).tar
build-all: clean-packages all-packages nginx-package ingress-packages ## Fresh build of all melange pipelines and apko files, default is all $ARCHS build-all: clean-packages all-packages nginx-package ingress-packages ## Fresh build of all melange pipelines and apko files, default is all $ARCHS

View file

@ -1,5 +1,3 @@
include: apko/nginx.yaml
contents: contents:
repositories: repositories:
- https://dl-cdn.alpinelinux.org/alpine/edge/main - https://dl-cdn.alpinelinux.org/alpine/edge/main
@ -7,19 +5,43 @@ contents:
- '@local /work/packages' - '@local /work/packages'
packages: packages:
- alpine-baselayout-data - alpine-baselayout-data
- alpine-base
- apk-tools
- busybox
- dbg@local - dbg@local
- waitshutdown@local - waitshutdown@local
- ingress-nginx@local - ingress-nginx@local
- dumb-init - dumb-init
- bash
- geoip-dev
- nginx@local
- opentracing@local
- msgpack-cpp@local
- datadog-cpp@local
- yaml-cpp@local
- zipkin-cpp@local
- modsecurity@local
- luaresty-balancer@local
- luacjson@local
- luajit@local
- lua-resty-cache@local
- lua-resty-cookie@local
- lua-resty-dns@local
- lua-resty-core@local
- lua-resty-global-throttle@local
- lua-resty-ipmatcher@local
- lua-resty-lock@local
- lua-resty-redis@local
- lua-resty-string@local
- lua-resty-upload@local
accounts: accounts:
groups: groups:
- groupname: www-data - groupname: www-data
gid: 10000 gid: 101
users: users:
- username: www-data - username: www-data
uid: 10000 uid: 101
run-as: 10000 run-as: 101
environments: environments:
PATH: "/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/local/luajit/bin" PATH: "/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/local/luajit/bin"
@ -50,17 +72,29 @@ paths:
source: /usr/local/nginx/sbin/nginx source: /usr/local/nginx/sbin/nginx
permissions: 0o755 permissions: 0o755
recursive: true recursive: true
- path: /var/lib/nginx/proxy
type: directory
permissions: 0o755
uid: 101
gid: 101
recursive: true
- path: /etc/ingress-controller/auth
type: directory
permissions: 0o755
uid: 101
gid: 101
recursive: true
- path: /usr/local/ - path: /usr/local/
type: directory type: directory
permissions: 0o755 permissions: 0o755
uid: 10000 uid: 101
gid: 10000 gid: 101
recursive: true recursive: true
- path: /usr/include/lua5.1 - path: /usr/include/lua5.1
type: directory type: directory
permissions: 0o755 permissions: 0o755
uid: 10000 uid: 101
gid: 10000 gid: 101
recursive: true recursive: true
- path: /usr/local/bin/lua - path: /usr/local/bin/lua
type: hardlink type: hardlink
@ -73,108 +107,108 @@ paths:
- path: /var/lib/ - path: /var/lib/
type: directory type: directory
permissions: 0o755 permissions: 0o755
uid: 10000 uid: 101
gid: 10000 gid: 101
recursive: true recursive: true
- path: /var/log/nginx/ - path: /var/log/nginx/
type: directory type: directory
uid: 10000 uid: 101
gid: 10000 gid: 101
permissions: 0o755 permissions: 0o755
recursive: true recursive: true
- path: /var/lib/nginx/ - path: /var/lib/nginx/
type: directory type: directory
uid: 10000 uid: 101
gid: 10000 gid: 101
permissions: 0o755 permissions: 0o755
recursive: true recursive: true
- path: /var/lib/nginx/body - path: /var/lib/nginx/body
type: directory type: directory
uid: 10000 uid: 101
gid: 10000 gid: 101
permissions: 0o755 permissions: 0o755
recursive: true recursive: true
- path: /usr/local/nginx - path: /usr/local/nginx
type: directory type: directory
uid: 10000 uid: 101
gid: 10000 gid: 101
permissions: 0o755 permissions: 0o755
recursive: true recursive: true
- path: /run/nginx - path: /run/nginx
uid: 10000 uid: 101
gid: 10000 gid: 101
type: directory type: directory
permissions: 0o755 permissions: 0o755
recursive: true recursive: true
- path: /run/nginx.pid - path: /run/nginx.pid
uid: 10000 uid: 101
gid: 10000 gid: 101
type: empty-file type: empty-file
permissions: 0o755 permissions: 0o755
recursive: true recursive: true
- path: /etc/nginx/modules - path: /etc/nginx/modules
uid: 10000 uid: 101
gid: 10000 gid: 101
type: directory type: directory
permissions: 0o755 permissions: 0o755
recursive: true recursive: true
- path: /etc/nginx - path: /etc/nginx
uid: 10000 uid: 101
gid: 10000 gid: 101
type: directory type: directory
permissions: 0o755 permissions: 0o755
recursive: true recursive: true
- path: /usr/local/nginx - path: /usr/local/nginx
uid: 10000 uid: 101
gid: 10000 gid: 101
type: directory type: directory
permissions: 0o755 permissions: 0o755
recursive: true recursive: true
- path: /opt/modsecurity/var/log - path: /opt/modsecurity/var/log
uid: 10000 uid: 101
gid: 10000 gid: 101
type: directory type: directory
permissions: 0o755 permissions: 0o755
recursive: true recursive: true
- path: /opt/modsecurity/var/upload - path: /opt/modsecurity/var/upload
uid: 10000 uid: 101
gid: 10000 gid: 101
type: directory type: directory
permissions: 0o755 permissions: 0o755
recursive: true recursive: true
- path: /opt/modsecurity/var/audit - path: /opt/modsecurity/var/audit
uid: 10000 uid: 101
gid: 10000 gid: 101
type: directory type: directory
permissions: 0o755 permissions: 0o755
recursive: true recursive: true
- path: /var/log - path: /var/log
uid: 10000 uid: 101
gid: 10000 gid: 101
type: directory type: directory
permissions: 0o755 permissions: 0o755
recursive: true recursive: true
- path: /etc/ingress-controller - path: /etc/ingress-controller
uid: 10000 uid: 101
gid: 10000 gid: 101
type: directory type: directory
permissions: 0o755 permissions: 0o755
recursive: true recursive: true
- path: /tmp/nginx - path: /tmp/nginx
uid: 10000 uid: 101
gid: 10000 gid: 101
type: directory type: directory
permissions: 0o755 permissions: 0o755
recursive: true recursive: true
- path: /var/log/nginx/ - path: /var/log/nginx/
uid: 10000 uid: 101
gid: 10000 gid: 101
type: directory type: directory
permissions: 0o755 permissions: 0o755
recursive: true recursive: true
- path: /var/log/audit - path: /var/log/audit
uid: 10000 uid: 101
gid: 10000 gid: 101
type: directory type: directory
permissions: 0o755 permissions: 0o755
recursive: true recursive: true

View file

@ -28,11 +28,11 @@ contents:
accounts: accounts:
groups: groups:
- groupname: www-data - groupname: www-data
gid: 10000 gid: 101
users: users:
- username: www-data - username: www-data
uid: 10000 uid: 101
run-as: 10000 run-as: 101
environments: environments:
PATH: "/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/local/luajit/bin" PATH: "/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/local/luajit/bin"
@ -44,14 +44,14 @@ paths:
- path: /usr/local/ - path: /usr/local/
type: directory type: directory
permissions: 0o755 permissions: 0o755
uid: 10000 uid: 101
gid: 10000 gid: 101
recursive: true recursive: true
- path: /usr/include/lua5.1 - path: /usr/include/lua5.1
type: directory type: directory
permissions: 0o755 permissions: 0o755
uid: 10000 uid: 101
gid: 10000 gid: 101
recursive: true recursive: true
- path: /usr/local/bin/lua - path: /usr/local/bin/lua
type: hardlink type: hardlink
@ -64,108 +64,108 @@ paths:
- path: /var/lib/ - path: /var/lib/
type: directory type: directory
permissions: 0o755 permissions: 0o755
uid: 10000 uid: 101
gid: 10000 gid: 101
recursive: true recursive: true
- path: /var/log/nginx/ - path: /var/log/nginx/
type: directory type: directory
uid: 10000 uid: 101
gid: 10000 gid: 101
permissions: 0o755 permissions: 0o755
recursive: true recursive: true
- path: /var/lib/nginx/ - path: /var/lib/nginx/
type: directory type: directory
uid: 10000 uid: 101
gid: 10000 gid: 101
permissions: 0o755 permissions: 0o755
recursive: true recursive: true
- path: /var/lib/nginx/body - path: /var/lib/nginx/body
type: directory type: directory
uid: 10000 uid: 101
gid: 10000 gid: 101
permissions: 0o755 permissions: 0o755
recursive: true recursive: true
- path: /usr/local/nginx - path: /usr/local/nginx
type: directory type: directory
uid: 10000 uid: 101
gid: 10000 gid: 101
permissions: 0o755 permissions: 0o755
recursive: true recursive: true
- path: /run/nginx - path: /run/nginx
uid: 10000 uid: 101
gid: 10000 gid: 101
type: directory type: directory
permissions: 0o755 permissions: 0o755
recursive: true recursive: true
- path: /run/nginx.pid - path: /run/nginx.pid
uid: 10000 uid: 101
gid: 10000 gid: 101
type: empty-file type: empty-file
permissions: 0o755 permissions: 0o755
recursive: true recursive: true
- path: /etc/nginx/modules - path: /etc/nginx/modules
uid: 10000 uid: 101
gid: 10000 gid: 101
type: directory type: directory
permissions: 0o755 permissions: 0o755
recursive: true recursive: true
- path: /etc/nginx - path: /etc/nginx
uid: 10000 uid: 101
gid: 10000 gid: 101
type: directory type: directory
permissions: 0o755 permissions: 0o755
recursive: true recursive: true
- path: /usr/local/nginx - path: /usr/local/nginx
uid: 10000 uid: 101
gid: 10000 gid: 101
type: directory type: directory
permissions: 0o755 permissions: 0o755
recursive: true recursive: true
- path: /opt/modsecurity/var/log - path: /opt/modsecurity/var/log
uid: 10000 uid: 101
gid: 10000 gid: 101
type: directory type: directory
permissions: 0o755 permissions: 0o755
recursive: true recursive: true
- path: /opt/modsecurity/var/upload - path: /opt/modsecurity/var/upload
uid: 10000 uid: 101
gid: 10000 gid: 101
type: directory type: directory
permissions: 0o755 permissions: 0o755
recursive: true recursive: true
- path: /opt/modsecurity/var/audit - path: /opt/modsecurity/var/audit
uid: 10000 uid: 101
gid: 10000 gid: 101
type: directory type: directory
permissions: 0o755 permissions: 0o755
recursive: true recursive: true
- path: /var/log - path: /var/log
uid: 10000 uid: 101
gid: 10000 gid: 101
type: directory type: directory
permissions: 0o755 permissions: 0o755
recursive: true recursive: true
- path: /etc/ingress-controller - path: /etc/ingress-controller
uid: 10000 uid: 101
gid: 10000 gid: 101
type: directory type: directory
permissions: 0o755 permissions: 0o755
recursive: true recursive: true
- path: /tmp/nginx - path: /tmp/nginx
uid: 10000 uid: 101
gid: 10000 gid: 101
type: directory type: directory
permissions: 0o755 permissions: 0o755
recursive: true recursive: true
- path: /var/log/nginx/ - path: /var/log/nginx/
uid: 10000 uid: 101
gid: 10000 gid: 101
type: directory type: directory
permissions: 0o755 permissions: 0o755
recursive: true recursive: true
- path: /var/log/audit - path: /var/log/audit
uid: 10000 uid: 101
gid: 10000 gid: 101
type: directory type: directory
permissions: 0o755 permissions: 0o755
recursive: true recursive: true

View file

@ -81,6 +81,7 @@ environment:
- libmaxminddb-dev - libmaxminddb-dev
- bc - bc
- unzip - unzip
- tree
- dos2unix - dos2unix
- libcrypto1.1 - libcrypto1.1
- libcrypto3 - libcrypto3
@ -96,10 +97,10 @@ environment:
accounts: accounts:
groups: groups:
- groupname: www-data - groupname: www-data
gid: 10000 gid: 101
users: users:
- username: www-data - username: www-data
uid: 10000 uid: 101
pipeline: pipeline:
- uses: fetch - uses: fetch
@ -212,6 +213,7 @@ pipeline:
set -o nounset set -o nounset
set -o pipefail set -o pipefail
ls -lah
export BUILD_PATH="${PWD}" export BUILD_PATH="${PWD}"
echo "BUILD_PATH $BUILD_PATH" echo "BUILD_PATH $BUILD_PATH"
echo "Arch: $(uname -m)" echo "Arch: $(uname -m)"
@ -227,6 +229,10 @@ pipeline:
ln -s $LUA_INCLUDE_DIR /usr/include/lua5.1 ln -s $LUA_INCLUDE_DIR /usr/include/lua5.1
ARCH=$(uname -m) ARCH=$(uname -m)
tree etc/nginx/
mkdir -p ${{targets.destdir}}/etc/nginx/
cp -R etc/nginx/ ${{targets.destdir}}/etc/nginx/
tree ${{targets.destdir}}/etc/nginx/
# Get Brotli source and deps # Get Brotli source and deps
echo "::::::::::::::::::::::::::::::::::::::" echo "::::::::::::::::::::::::::::::::::::::"
@ -300,7 +306,17 @@ pipeline:
echo "::::::::::::::::::::::::::::::::::::::" echo "::::::::::::::::::::::::::::::::::::::"
echo ":::: nginx-{{ .NGINX_VERSION }} ::::" echo ":::: nginx-{{ .NGINX_VERSION }} ::::"
echo "::::::::::::::::::::::::::::::::::::::" echo "::::::::::::::::::::::::::::::::::::::"
cd "$BUILD_PATH/nginx-{{ .NGINX_VERSION }}" cd "$BUILD_PATH/nginx-{{ .NGINX_VERSION }}"
# apply nginx patches
for PATCH in `ls patches`;do
echo "Patch: $PATCH"
if [[ "$PATCH" == *.txt ]]; then
patch -p0 < patches/$PATCH
else
patch -p1 < patches/$PATCH
fi
done
WITH_FLAGS="--with-debug \ WITH_FLAGS="--with-debug \
--with-compat \ --with-compat \

View file

@ -0,0 +1,4 @@
# See the OWNERS docs: https://github.com/kubernetes/community/blob/master/contributors/guide/owners.md
labels:
- area/lua

View file

@ -0,0 +1,374 @@
local ngx_balancer = require("ngx.balancer")
local cjson = require("cjson.safe")
local util = require("util")
local dns_lookup = require("util.dns").lookup
local configuration = require("configuration")
local round_robin = require("balancer.round_robin")
local chash = require("balancer.chash")
local chashsubset = require("balancer.chashsubset")
local sticky_balanced = require("balancer.sticky_balanced")
local sticky_persistent = require("balancer.sticky_persistent")
local ewma = require("balancer.ewma")
local string = string
local ipairs = ipairs
local table = table
local getmetatable = getmetatable
local tostring = tostring
local pairs = pairs
local math = math
local ngx = ngx
-- measured in seconds
-- for an Nginx worker to pick up the new list of upstream peers
-- it will take <the delay until controller POSTed the backend object to the
-- Nginx endpoint> + BACKENDS_SYNC_INTERVAL
local BACKENDS_SYNC_INTERVAL = 1
local DEFAULT_LB_ALG = "round_robin"
local IMPLEMENTATIONS = {
round_robin = round_robin,
chash = chash,
chashsubset = chashsubset,
sticky_balanced = sticky_balanced,
sticky_persistent = sticky_persistent,
ewma = ewma,
}
local PROHIBITED_LOCALHOST_PORT = configuration.prohibited_localhost_port or '10246'
local PROHIBITED_PEER_PATTERN = "^127.*:" .. PROHIBITED_LOCALHOST_PORT .. "$"
local _M = {}
local balancers = {}
local backends_with_external_name = {}
local backends_last_synced_at = 0
local function get_implementation(backend)
local name = backend["load-balance"] or DEFAULT_LB_ALG
if backend["sessionAffinityConfig"] and
backend["sessionAffinityConfig"]["name"] == "cookie" then
if backend["sessionAffinityConfig"]["mode"] == "persistent" then
name = "sticky_persistent"
else
name = "sticky_balanced"
end
elseif backend["upstreamHashByConfig"] and
backend["upstreamHashByConfig"]["upstream-hash-by"] then
if backend["upstreamHashByConfig"]["upstream-hash-by-subset"] then
name = "chashsubset"
else
name = "chash"
end
end
local implementation = IMPLEMENTATIONS[name]
if not implementation then
ngx.log(ngx.WARN, backend["load-balance"], "is not supported, ",
"falling back to ", DEFAULT_LB_ALG)
implementation = IMPLEMENTATIONS[DEFAULT_LB_ALG]
end
return implementation
end
local function resolve_external_names(original_backend)
local backend = util.deepcopy(original_backend)
local endpoints = {}
for _, endpoint in ipairs(backend.endpoints) do
local ips = dns_lookup(endpoint.address)
for _, ip in ipairs(ips) do
table.insert(endpoints, { address = ip, port = endpoint.port })
end
end
backend.endpoints = endpoints
return backend
end
local function format_ipv6_endpoints(endpoints)
local formatted_endpoints = {}
for _, endpoint in ipairs(endpoints) do
local formatted_endpoint = endpoint
if not endpoint.address:match("^%d+.%d+.%d+.%d+$") then
formatted_endpoint.address = string.format("[%s]", endpoint.address)
end
table.insert(formatted_endpoints, formatted_endpoint)
end
return formatted_endpoints
end
local function is_backend_with_external_name(backend)
local serv_type = backend.service and backend.service.spec
and backend.service.spec["type"]
return serv_type == "ExternalName"
end
local function sync_backend(backend)
if not backend.endpoints or #backend.endpoints == 0 then
balancers[backend.name] = nil
return
end
if is_backend_with_external_name(backend) then
backend = resolve_external_names(backend)
end
backend.endpoints = format_ipv6_endpoints(backend.endpoints)
local implementation = get_implementation(backend)
local balancer = balancers[backend.name]
if not balancer then
balancers[backend.name] = implementation:new(backend)
return
end
-- every implementation is the metatable of its instances (see .new(...) functions)
-- here we check if `balancer` is the instance of `implementation`
-- if it is not then we deduce LB algorithm has changed for the backend
if getmetatable(balancer) ~= implementation then
ngx.log(ngx.INFO,
string.format("LB algorithm changed from %s to %s, resetting the instance",
balancer.name, implementation.name))
balancers[backend.name] = implementation:new(backend)
return
end
balancer:sync(backend)
end
local function sync_backends_with_external_name()
for _, backend_with_external_name in pairs(backends_with_external_name) do
sync_backend(backend_with_external_name)
end
end
local function sync_backends()
local raw_backends_last_synced_at = configuration.get_raw_backends_last_synced_at()
if raw_backends_last_synced_at <= backends_last_synced_at then
return
end
local backends_data = configuration.get_backends_data()
if not backends_data then
balancers = {}
return
end
local new_backends, err = cjson.decode(backends_data)
if not new_backends then
ngx.log(ngx.ERR, "could not parse backends data: ", err)
return
end
local balancers_to_keep = {}
for _, new_backend in ipairs(new_backends) do
if is_backend_with_external_name(new_backend) then
local backend_with_external_name = util.deepcopy(new_backend)
backends_with_external_name[backend_with_external_name.name] = backend_with_external_name
else
sync_backend(new_backend)
end
balancers_to_keep[new_backend.name] = true
end
for backend_name, _ in pairs(balancers) do
if not balancers_to_keep[backend_name] then
balancers[backend_name] = nil
backends_with_external_name[backend_name] = nil
end
end
backends_last_synced_at = raw_backends_last_synced_at
end
local function route_to_alternative_balancer(balancer)
if balancer.is_affinitized(balancer) then
-- If request is already affinitized to a primary balancer, keep the primary balancer.
return false
end
if not balancer.alternative_backends then
return false
end
-- TODO: support traffic shaping for n > 1 alternative backends
local backend_name = balancer.alternative_backends[1]
if not backend_name then
ngx.log(ngx.ERR, "empty alternative backend")
return false
end
local alternative_balancer = balancers[backend_name]
if not alternative_balancer then
ngx.log(ngx.ERR, "no alternative balancer for backend: ",
tostring(backend_name))
return false
end
if alternative_balancer.is_affinitized(alternative_balancer) then
-- If request is affinitized to an alternative balancer, instruct caller to
-- switch to alternative.
return true
end
-- Use traffic shaping policy, if request didn't have affinity set.
local traffic_shaping_policy = alternative_balancer.traffic_shaping_policy
if not traffic_shaping_policy then
ngx.log(ngx.ERR, "traffic shaping policy is not set for balancer ",
"of backend: ", tostring(backend_name))
return false
end
local target_header = util.replace_special_char(traffic_shaping_policy.header,
"-", "_")
local header = ngx.var["http_" .. target_header]
if header then
if traffic_shaping_policy.headerValue
and #traffic_shaping_policy.headerValue > 0 then
if traffic_shaping_policy.headerValue == header then
return true
end
elseif traffic_shaping_policy.headerPattern
and #traffic_shaping_policy.headerPattern > 0 then
local m, err = ngx.re.match(header, traffic_shaping_policy.headerPattern)
if m then
return true
elseif err then
ngx.log(ngx.ERR, "error when matching canary-by-header-pattern: '",
traffic_shaping_policy.headerPattern, "', error: ", err)
return false
end
elseif header == "always" then
return true
elseif header == "never" then
return false
end
end
local target_cookie = traffic_shaping_policy.cookie
local cookie = ngx.var["cookie_" .. target_cookie]
if cookie then
if cookie == "always" then
return true
elseif cookie == "never" then
return false
end
end
local weightTotal = 100
if traffic_shaping_policy.weightTotal ~= nil and traffic_shaping_policy.weightTotal > 100 then
weightTotal = traffic_shaping_policy.weightTotal
end
if math.random(weightTotal) <= traffic_shaping_policy.weight then
return true
end
return false
end
local function get_balancer_by_upstream_name(upstream_name)
return balancers[upstream_name]
end
local function get_balancer()
if ngx.ctx.balancer then
return ngx.ctx.balancer
end
local backend_name = ngx.var.proxy_upstream_name
local balancer = balancers[backend_name]
if not balancer then
return nil
end
if route_to_alternative_balancer(balancer) then
local alternative_backend_name = balancer.alternative_backends[1]
ngx.var.proxy_alternative_upstream_name = alternative_backend_name
balancer = balancers[alternative_backend_name]
end
ngx.ctx.balancer = balancer
return balancer
end
function _M.init_worker()
-- when worker starts, sync non ExternalName backends without delay
sync_backends()
-- we call sync_backends_with_external_name in timer because for endpoints that require
-- DNS resolution it needs to use socket which is not available in
-- init_worker phase
local ok, err = ngx.timer.at(0, sync_backends_with_external_name)
if not ok then
ngx.log(ngx.ERR, "failed to create timer: ", err)
end
ok, err = ngx.timer.every(BACKENDS_SYNC_INTERVAL, sync_backends)
if not ok then
ngx.log(ngx.ERR, "error when setting up timer.every for sync_backends: ", err)
end
ok, err = ngx.timer.every(BACKENDS_SYNC_INTERVAL, sync_backends_with_external_name)
if not ok then
ngx.log(ngx.ERR, "error when setting up timer.every for sync_backends_with_external_name: ",
err)
end
end
function _M.rewrite()
local balancer = get_balancer()
if not balancer then
ngx.status = ngx.HTTP_SERVICE_UNAVAILABLE
return ngx.exit(ngx.status)
end
end
function _M.balance()
local balancer = get_balancer()
if not balancer then
return
end
local peer = balancer:balance()
if not peer then
ngx.log(ngx.WARN, "no peer was returned, balancer: " .. balancer.name)
return
end
if peer:match(PROHIBITED_PEER_PATTERN) then
ngx.log(ngx.ERR, "attempted to proxy to self, balancer: ", balancer.name, ", peer: ", peer)
return
end
ngx_balancer.set_more_tries(1)
local ok, err = ngx_balancer.set_current_peer(peer)
if not ok then
ngx.log(ngx.ERR, "error while setting current upstream peer ", peer,
": ", err)
end
end
function _M.log()
local balancer = get_balancer()
if not balancer then
return
end
if not balancer.after_balance then
return
end
balancer:after_balance()
end
setmetatable(_M, {__index = {
get_implementation = get_implementation,
sync_backend = sync_backend,
route_to_alternative_balancer = route_to_alternative_balancer,
get_balancer = get_balancer,
get_balancer_by_upstream_name = get_balancer_by_upstream_name,
}})
return _M

View file

@ -0,0 +1,34 @@
local balancer_resty = require("balancer.resty")
local resty_chash = require("resty.chash")
local util = require("util")
local ngx_log = ngx.log
local ngx_ERR = ngx.ERR
local setmetatable = setmetatable
local _M = balancer_resty:new({ factory = resty_chash, name = "chash" })
function _M.new(self, backend)
local nodes = util.get_nodes(backend.endpoints)
local complex_val, err =
util.parse_complex_value(backend["upstreamHashByConfig"]["upstream-hash-by"])
if err ~= nil then
ngx_log(ngx_ERR, "could not parse the value of the upstream-hash-by: ", err)
end
local o = {
instance = self.factory:new(nodes),
hash_by = complex_val,
traffic_shaping_policy = backend.trafficShapingPolicy,
alternative_backends = backend.alternativeBackends,
}
setmetatable(o, self)
self.__index = self
return o
end
function _M.balance(self)
local key = util.generate_var_value(self.hash_by)
return self.instance:find(key)
end
return _M

View file

@ -0,0 +1,101 @@
-- Consistent hashing to a subset of nodes. Instead of returning the same node
-- always, we return the same subset always.
local resty_chash = require("resty.chash")
local util = require("util")
local ngx_log = ngx.log
local ngx_ERR = ngx.ERR
local setmetatable = setmetatable
local tostring = tostring
local math = math
local table = table
local pairs = pairs
local _M = { name = "chashsubset" }
local function build_subset_map(backend)
local endpoints = {}
local subset_map = {}
local subsets = {}
local subset_size = backend["upstreamHashByConfig"]["upstream-hash-by-subset-size"]
for _, endpoint in pairs(backend.endpoints) do
table.insert(endpoints, endpoint)
end
local set_count = math.ceil(#endpoints/subset_size)
local node_count = set_count * subset_size
-- if we don't have enough endpoints, we reuse endpoints in the last set to
-- keep the same number on all of them.
local j = 1
for _ = #endpoints+1, node_count do
table.insert(endpoints, endpoints[j])
j = j+1
end
local k = 1
for i = 1, set_count do
local subset = {}
local subset_id = "set" .. tostring(i)
for _ = 1, subset_size do
table.insert(subset, endpoints[k])
k = k+1
end
subsets[subset_id] = subset
subset_map[subset_id] = 1
end
return subset_map, subsets
end
function _M.new(self, backend)
local subset_map, subsets = build_subset_map(backend)
local complex_val, err =
util.parse_complex_value(backend["upstreamHashByConfig"]["upstream-hash-by"])
if err ~= nil then
ngx_log(ngx_ERR, "could not parse the value of the upstream-hash-by: ", err)
end
local o = {
instance = resty_chash:new(subset_map),
hash_by = complex_val,
subsets = subsets,
current_endpoints = backend.endpoints,
traffic_shaping_policy = backend.trafficShapingPolicy,
alternative_backends = backend.alternativeBackends,
}
setmetatable(o, self)
self.__index = self
return o
end
function _M.is_affinitized()
return false
end
function _M.balance(self)
local key = util.generate_var_value(self.hash_by)
local subset_id = self.instance:find(key)
local endpoints = self.subsets[subset_id]
local endpoint = endpoints[math.random(#endpoints)]
return endpoint.address .. ":" .. endpoint.port
end
function _M.sync(self, backend)
local subset_map
local changed = not util.deep_compare(self.current_endpoints, backend.endpoints)
if not changed then
return
end
self.current_endpoints = backend.endpoints
subset_map, self.subsets = build_subset_map(backend)
self.instance:reinit(subset_map)
return
end
return _M

View file

@ -0,0 +1,276 @@
-- Original Authors: Shiv Nagarajan & Scott Francis
-- Accessed: March 12, 2018
-- Inspiration drawn from:
-- https://github.com/twitter/finagle/blob/1bc837c4feafc0096e43c0e98516a8e1c50c4421
-- /finagle-core/src/main/scala/com/twitter/finagle/loadbalancer/PeakEwma.scala
local resty_lock = require("resty.lock")
local util = require("util")
local split = require("util.split")
local ngx = ngx
local math = math
local pairs = pairs
local ipairs = ipairs
local tostring = tostring
local string = string
local tonumber = tonumber
local setmetatable = setmetatable
local string_format = string.format
local table_insert = table.insert
local ngx_log = ngx.log
local INFO = ngx.INFO
local DECAY_TIME = 10 -- this value is in seconds
local LOCK_KEY = ":ewma_key"
local PICK_SET_SIZE = 2
local ewma_lock, ewma_lock_err = resty_lock:new("balancer_ewma_locks", {timeout = 0, exptime = 0.1})
if not ewma_lock then
error(ewma_lock_err)
end
local _M = { name = "ewma" }
local function lock(upstream)
local _, err = ewma_lock:lock(upstream .. LOCK_KEY)
if err then
if err ~= "timeout" then
ngx.log(ngx.ERR, string.format("EWMA Balancer failed to lock: %s", tostring(err)))
end
end
return err
end
local function unlock()
local ok, err = ewma_lock:unlock()
if not ok then
ngx.log(ngx.ERR, string.format("EWMA Balancer failed to unlock: %s", tostring(err)))
end
return err
end
local function decay_ewma(ewma, last_touched_at, rtt, now)
local td = now - last_touched_at
td = (td > 0) and td or 0
local weight = math.exp(-td/DECAY_TIME)
ewma = ewma * weight + rtt * (1.0 - weight)
return ewma
end
local function store_stats(upstream, ewma, now)
local success, err, forcible = ngx.shared.balancer_ewma_last_touched_at:set(upstream, now)
if not success then
ngx.log(ngx.WARN, "balancer_ewma_last_touched_at:set failed " .. err)
end
if forcible then
ngx.log(ngx.WARN, "balancer_ewma_last_touched_at:set valid items forcibly overwritten")
end
success, err, forcible = ngx.shared.balancer_ewma:set(upstream, ewma)
if not success then
ngx.log(ngx.WARN, "balancer_ewma:set failed " .. err)
end
if forcible then
ngx.log(ngx.WARN, "balancer_ewma:set valid items forcibly overwritten")
end
end
local function get_or_update_ewma(upstream, rtt, update)
local lock_err = nil
if update then
lock_err = lock(upstream)
end
local ewma = ngx.shared.balancer_ewma:get(upstream) or 0
if lock_err ~= nil then
return ewma, lock_err
end
local now = ngx.now()
local last_touched_at = ngx.shared.balancer_ewma_last_touched_at:get(upstream) or 0
ewma = decay_ewma(ewma, last_touched_at, rtt, now)
if not update then
return ewma, nil
end
store_stats(upstream, ewma, now)
unlock()
return ewma, nil
end
local function get_upstream_name(upstream)
return upstream.address .. ":" .. upstream.port
end
local function score(upstream)
-- Original implementation used names
-- Endpoints don't have names, so passing in IP:Port as key instead
local upstream_name = get_upstream_name(upstream)
return get_or_update_ewma(upstream_name, 0, false)
end
-- implementation similar to https://en.wikipedia.org/wiki/Fisher%E2%80%93Yates_shuffle
-- or https://en.wikipedia.org/wiki/Random_permutation
-- loop from 1 .. k
-- pick a random value r from the remaining set of unpicked values (i .. n)
-- swap the value at position i with the value at position r
local function shuffle_peers(peers, k)
for i=1, k do
local rand_index = math.random(i,#peers)
peers[i], peers[rand_index] = peers[rand_index], peers[i]
end
-- peers[1 .. k] will now contain a randomly selected k from #peers
end
local function pick_and_score(peers, k)
shuffle_peers(peers, k)
local lowest_score_index = 1
local lowest_score = score(peers[lowest_score_index])
for i = 2, k do
local new_score = score(peers[i])
if new_score < lowest_score then
lowest_score_index, lowest_score = i, new_score
end
end
return peers[lowest_score_index], lowest_score
end
-- slow_start_ewma is something we use to avoid sending too many requests
-- to the newly introduced endpoints. We currently use average ewma values
-- of existing endpoints.
local function calculate_slow_start_ewma(self)
local total_ewma = 0
local endpoints_count = 0
for _, endpoint in pairs(self.peers) do
local endpoint_string = get_upstream_name(endpoint)
local ewma = ngx.shared.balancer_ewma:get(endpoint_string)
if ewma then
endpoints_count = endpoints_count + 1
total_ewma = total_ewma + ewma
end
end
if endpoints_count == 0 then
ngx.log(ngx.INFO, "no ewma value exists for the endpoints")
return nil
end
return total_ewma / endpoints_count
end
function _M.is_affinitized()
return false
end
function _M.balance(self)
local peers = self.peers
local endpoint, ewma_score = peers[1], -1
if #peers > 1 then
local k = (#peers < PICK_SET_SIZE) and #peers or PICK_SET_SIZE
local tried_endpoints
if not ngx.ctx.balancer_ewma_tried_endpoints then
tried_endpoints = {}
ngx.ctx.balancer_ewma_tried_endpoints = tried_endpoints
else
tried_endpoints = ngx.ctx.balancer_ewma_tried_endpoints
end
local filtered_peers
for _, peer in ipairs(peers) do
if not tried_endpoints[get_upstream_name(peer)] then
if not filtered_peers then
filtered_peers = {}
end
table_insert(filtered_peers, peer)
end
end
if not filtered_peers then
ngx.log(ngx.WARN, "all endpoints have been retried")
filtered_peers = util.deepcopy(peers)
end
if #filtered_peers > 1 then
endpoint, ewma_score = pick_and_score(filtered_peers, k)
else
endpoint, ewma_score = filtered_peers[1], score(filtered_peers[1])
end
tried_endpoints[get_upstream_name(endpoint)] = true
end
ngx.var.balancer_ewma_score = ewma_score
-- TODO(elvinefendi) move this processing to _M.sync
return get_upstream_name(endpoint)
end
function _M.after_balance(_)
local response_time = tonumber(split.get_last_value(ngx.var.upstream_response_time)) or 0
local connect_time = tonumber(split.get_last_value(ngx.var.upstream_connect_time)) or 0
local rtt = connect_time + response_time
local upstream = split.get_last_value(ngx.var.upstream_addr)
if util.is_blank(upstream) then
return
end
get_or_update_ewma(upstream, rtt, true)
end
function _M.sync(self, backend)
self.traffic_shaping_policy = backend.trafficShapingPolicy
self.alternative_backends = backend.alternativeBackends
local normalized_endpoints_added, normalized_endpoints_removed =
util.diff_endpoints(self.peers, backend.endpoints)
if #normalized_endpoints_added == 0 and #normalized_endpoints_removed == 0 then
ngx.log(ngx.INFO, "endpoints did not change for backend " .. tostring(backend.name))
return
end
ngx_log(INFO, string_format("[%s] peers have changed for backend %s", self.name, backend.name))
self.peers = backend.endpoints
for _, endpoint_string in ipairs(normalized_endpoints_removed) do
ngx.shared.balancer_ewma:delete(endpoint_string)
ngx.shared.balancer_ewma_last_touched_at:delete(endpoint_string)
end
local slow_start_ewma = calculate_slow_start_ewma(self)
if slow_start_ewma ~= nil then
local now = ngx.now()
for _, endpoint_string in ipairs(normalized_endpoints_added) do
store_stats(endpoint_string, slow_start_ewma, now)
end
end
end
function _M.new(self, backend)
local o = {
peers = backend.endpoints,
traffic_shaping_policy = backend.trafficShapingPolicy,
alternative_backends = backend.alternativeBackends,
}
setmetatable(o, self)
self.__index = self
return o
end
return _M

View file

@ -0,0 +1,36 @@
local util = require("util")
local string_format = string.format
local ngx_log = ngx.log
local INFO = ngx.INFO
local setmetatable = setmetatable
local _M = {}
function _M.new(self, o)
o = o or {}
setmetatable(o, self)
self.__index = self
return o
end
function _M.is_affinitized()
return false
end
function _M.sync(self, backend)
self.traffic_shaping_policy = backend.trafficShapingPolicy
self.alternative_backends = backend.alternativeBackends
local nodes = util.get_nodes(backend.endpoints)
local changed = not util.deep_compare(self.instance.nodes, nodes)
if not changed then
return
end
ngx_log(INFO, string_format("[%s] nodes have changed for backend %s", self.name, backend.name))
self.instance:reinit(nodes)
end
return _M

View file

@ -0,0 +1,25 @@
local balancer_resty = require("balancer.resty")
local resty_roundrobin = require("resty.roundrobin")
local util = require("util")
local setmetatable = setmetatable
local _M = balancer_resty:new({ factory = resty_roundrobin, name = "round_robin" })
function _M.new(self, backend)
local nodes = util.get_nodes(backend.endpoints)
local o = {
instance = self.factory:new(nodes),
traffic_shaping_policy = backend.trafficShapingPolicy,
alternative_backends = backend.alternativeBackends,
}
setmetatable(o, self)
self.__index = self
return o
end
function _M.balance(self)
return self.instance:find()
end
return _M

View file

@ -0,0 +1,207 @@
local balancer_resty = require("balancer.resty")
local ck = require("resty.cookie")
local ngx_balancer = require("ngx.balancer")
local split = require("util.split")
local same_site = require("util.same_site")
local ngx = ngx
local pairs = pairs
local ipairs = ipairs
local string = string
local tonumber = tonumber
local setmetatable = setmetatable
local _M = balancer_resty:new()
local DEFAULT_COOKIE_NAME = "route"
local COOKIE_VALUE_DELIMITER = "|"
function _M.cookie_name(self)
return self.cookie_session_affinity.name or DEFAULT_COOKIE_NAME
end
function _M.new(self)
local o = {
alternative_backends = nil,
cookie_session_affinity = nil,
traffic_shaping_policy = nil,
backend_key = nil
}
setmetatable(o, self)
self.__index = self
return o
end
function _M.get_cookie_parsed(self)
local cookie, err = ck:new()
if not cookie then
ngx.log(ngx.ERR, err)
end
local result = {
upstream_key = nil,
backend_key = nil
}
local raw_value = cookie:get(self:cookie_name())
if not raw_value then
return result
end
local parsed_value, len = split.split_string(raw_value, COOKIE_VALUE_DELIMITER)
if len == 0 then
return result
end
result.upstream_key = parsed_value[1]
if len > 1 then
result.backend_key = parsed_value[2]
end
return result
end
function _M.get_cookie(self)
return self:get_cookie_parsed().upstream_key
end
function _M.set_cookie(self, value)
local cookie, err = ck:new()
if not cookie then
ngx.log(ngx.ERR, err)
end
local cookie_path = self.cookie_session_affinity.path
if not cookie_path then
cookie_path = ngx.var.location_path
end
local cookie_samesite = self.cookie_session_affinity.samesite
if cookie_samesite then
local cookie_conditional_samesite_none = self.cookie_session_affinity.conditional_samesite_none
if cookie_conditional_samesite_none
and cookie_samesite == "None"
and not same_site.same_site_none_compatible(ngx.var.http_user_agent) then
cookie_samesite = nil
end
end
local cookie_secure = self.cookie_session_affinity.secure
if cookie_secure == nil then
cookie_secure = ngx.var.https == "on"
end
local cookie_data = {
key = self:cookie_name(),
value = value .. COOKIE_VALUE_DELIMITER .. self.backend_key,
path = cookie_path,
httponly = true,
samesite = cookie_samesite,
secure = cookie_secure,
}
if self.cookie_session_affinity.expires and self.cookie_session_affinity.expires ~= "" then
cookie_data.expires = ngx.cookie_time(ngx.time() +
tonumber(self.cookie_session_affinity.expires))
end
if self.cookie_session_affinity.maxage and self.cookie_session_affinity.maxage ~= "" then
cookie_data.max_age = tonumber(self.cookie_session_affinity.maxage)
end
local ok
ok, err = cookie:set(cookie_data)
if not ok then
ngx.log(ngx.ERR, err)
end
end
function _M.is_affinitized(self)
return self:get_cookie_parsed().backend_key == self.backend_key
end
function _M.get_last_failure()
return ngx_balancer.get_last_failure()
end
local function get_failed_upstreams()
local indexed_upstream_addrs = {}
local upstream_addrs = split.split_upstream_var(ngx.var.upstream_addr) or {}
for _, addr in ipairs(upstream_addrs) do
indexed_upstream_addrs[addr] = true
end
return indexed_upstream_addrs
end
local function should_set_cookie(self)
local host = ngx.var.host
if ngx.var.server_name == '_' then
host = ngx.var.server_name
end
if self.cookie_session_affinity.locations then
local locs = self.cookie_session_affinity.locations[host]
if locs == nil then
-- Based off of wildcard hostname in ../certificate.lua
local wildcard_host, _, err = ngx.re.sub(host, "^[^\\.]+\\.", "*.", "jo")
if err then
ngx.log(ngx.ERR, "error: ", err);
elseif wildcard_host then
locs = self.cookie_session_affinity.locations[wildcard_host]
end
end
if locs ~= nil then
for _, path in pairs(locs) do
if ngx.var.location_path == path then
return true
end
end
end
end
return false
end
function _M.balance(self)
local upstream_from_cookie
local key = self:get_cookie()
if key then
upstream_from_cookie = self.instance:find(key)
end
local last_failure = self.get_last_failure()
local should_pick_new_upstream = last_failure ~= nil and
self.cookie_session_affinity.change_on_failure or upstream_from_cookie == nil
if not should_pick_new_upstream then
return upstream_from_cookie
end
local new_upstream
new_upstream, key = self:pick_new_upstream(get_failed_upstreams())
if not new_upstream then
ngx.log(ngx.WARN, string.format("failed to get new upstream; using upstream %s", new_upstream))
elseif should_set_cookie(self) then
self:set_cookie(key)
end
return new_upstream
end
function _M.sync(self, backend)
-- reload balancer nodes
balancer_resty.sync(self, backend)
self.traffic_shaping_policy = backend.trafficShapingPolicy
self.alternative_backends = backend.alternativeBackends
self.cookie_session_affinity = backend.sessionAffinityConfig.cookieSessionAffinity
self.backend_key = ngx.md5(ngx.md5(backend.name) .. backend.name)
end
return _M

View file

@ -0,0 +1,53 @@
-- An affinity mode which makes sure connections are rebalanced when a deployment is scaled.
-- The advantage of this mode is that the load on the pods will be redistributed.
-- The drawback of this mode is that, when scaling up a deployment, roughly (n-c)/n users
-- will lose their session, where c is the current number of pods and n is the new number of
-- pods.
--
local balancer_sticky = require("balancer.sticky")
local math_random = require("math").random
local resty_chash = require("resty.chash")
local util_get_nodes = require("util").get_nodes
local ngx = ngx
local string = string
local setmetatable = setmetatable
local _M = balancer_sticky:new()
-- Consider the situation of N upstreams one of which is failing.
-- Then the probability to obtain failing upstream after M iterations would be close to (1/N)**M.
-- For the worst case (2 upstreams; 20 iterations) it would be ~10**(-6)
-- which is much better then ~10**(-3) for 10 iterations.
local MAX_UPSTREAM_CHECKS_COUNT = 20
function _M.new(self, backend)
local nodes = util_get_nodes(backend.endpoints)
local o = {
name = "sticky_balanced",
instance = resty_chash:new(nodes)
}
setmetatable(o, self)
self.__index = self
balancer_sticky.sync(o, backend)
return o
end
function _M.pick_new_upstream(self, failed_upstreams)
for i = 1, MAX_UPSTREAM_CHECKS_COUNT do
local key = string.format("%s.%s.%s", ngx.now() + i, ngx.worker.pid(), math_random(999999))
local new_upstream = self.instance:find(key)
if not failed_upstreams[new_upstream] then
return new_upstream, key
end
end
return nil, nil
end
return _M

View file

@ -0,0 +1,34 @@
-- An affinity mode which makes sure a session is always routed to the same endpoint.
-- The advantage of this mode is that a user will never lose his session.
-- The drawback of this mode is that when scaling up a deployment, sessions will not
-- be rebalanced.
--
local balancer_sticky = require("balancer.sticky")
local util_get_nodes = require("util").get_nodes
local util_nodemap = require("util.nodemap")
local setmetatable = setmetatable
local _M = balancer_sticky:new()
function _M.new(self, backend)
local nodes = util_get_nodes(backend.endpoints)
local hash_salt = backend["name"]
local o = {
name = "sticky_persistent",
instance = util_nodemap:new(nodes, hash_salt)
}
setmetatable(o, self)
self.__index = self
balancer_sticky.sync(o, backend)
return o
end
function _M.pick_new_upstream(self, failed_upstreams)
return self.instance:random_except(failed_upstreams)
end
return _M

View file

@ -0,0 +1,275 @@
local http = require("resty.http")
local ssl = require("ngx.ssl")
local ocsp = require("ngx.ocsp")
local ngx = ngx
local string = string
local tostring = tostring
local re_sub = ngx.re.sub
local unpack = unpack
local dns_lookup = require("util.dns").lookup
local _M = {
is_ocsp_stapling_enabled = false
}
local DEFAULT_CERT_HOSTNAME = "_"
local certificate_data = ngx.shared.certificate_data
local certificate_servers = ngx.shared.certificate_servers
local ocsp_response_cache = ngx.shared.ocsp_response_cache
local function get_der_cert_and_priv_key(pem_cert_key)
local der_cert, der_cert_err = ssl.cert_pem_to_der(pem_cert_key)
if not der_cert then
return nil, nil, "failed to convert certificate chain from PEM to DER: " .. der_cert_err
end
local der_priv_key, dev_priv_key_err = ssl.priv_key_pem_to_der(pem_cert_key)
if not der_priv_key then
return nil, nil, "failed to convert private key from PEM to DER: " .. dev_priv_key_err
end
return der_cert, der_priv_key, nil
end
local function set_der_cert_and_key(der_cert, der_priv_key)
local set_cert_ok, set_cert_err = ssl.set_der_cert(der_cert)
if not set_cert_ok then
return "failed to set DER cert: " .. set_cert_err
end
local set_priv_key_ok, set_priv_key_err = ssl.set_der_priv_key(der_priv_key)
if not set_priv_key_ok then
return "failed to set DER private key: " .. set_priv_key_err
end
end
local function get_pem_cert_uid(raw_hostname)
-- Convert hostname to ASCII lowercase (see RFC 6125 6.4.1) so that requests with uppercase
-- host would lead to the right certificate being chosen (controller serves certificates for
-- lowercase hostnames as specified in Ingress object's spec.rules.host)
local hostname = re_sub(raw_hostname, "\\.$", "", "jo"):gsub("[A-Z]",
function(c) return c:lower() end)
local uid = certificate_servers:get(hostname)
if uid then
return uid
end
local wildcard_hostname, _, err = re_sub(hostname, "^[^\\.]+\\.", "*.", "jo")
if err then
ngx.log(ngx.ERR, "error: ", err)
return uid
end
if wildcard_hostname then
uid = certificate_servers:get(wildcard_hostname)
end
return uid
end
local function is_ocsp_stapling_enabled_for(_)
-- TODO: implement per ingress OCSP stapling control
-- and make use of uid. The idea is to have configureCertificates
-- in controller side to push uid -> is_ocsp_enabled data to Lua land.
return _M.is_ocsp_stapling_enabled
end
local function get_resolved_url(parsed_url)
local scheme, host, port, path = unpack(parsed_url)
local ip = dns_lookup(host)[1]
return string.format("%s://%s:%s%s", scheme, ip, port, path)
end
local function do_ocsp_request(url, ocsp_request)
local httpc = http.new()
httpc:set_timeout(1000, 1000, 2000)
local parsed_url, err = httpc:parse_uri(url)
if not parsed_url then
return nil, err
end
local resolved_url = get_resolved_url(parsed_url)
local http_response
http_response, err = httpc:request_uri(resolved_url, {
method = "POST",
headers = {
["Content-Type"] = "application/ocsp-request",
["Host"] = parsed_url[2],
},
body = ocsp_request,
})
if not http_response then
return nil, err
end
if http_response.status ~= 200 then
return nil, "unexpected OCSP responder status code: " .. tostring(http_response.status)
end
return http_response.body, nil
end
-- TODO: ideally this function should have a lock around to ensure
-- only one instance runs at a time. Otherwise it is theoretically possible
-- that this function gets called from multiple Nginx workers at the same time.
-- While this has no functional implications, it generates extra load on OCSP servers.
local function fetch_and_cache_ocsp_response(uid, der_cert)
local url, err = ocsp.get_ocsp_responder_from_der_chain(der_cert)
if not url and err then
ngx.log(ngx.ERR, "could not extract OCSP responder URL: ", err)
return
end
if not url and not err then
ngx.log(ngx.DEBUG, "no OCSP responder URL returned")
return
end
local request
request, err = ocsp.create_ocsp_request(der_cert)
if not request then
ngx.log(ngx.ERR, "could not create OCSP request: ", err)
return
end
local ocsp_response
ocsp_response, err = do_ocsp_request(url, request)
if err then
ngx.log(ngx.ERR, "could not get OCSP response: ", err)
return
end
if not ocsp_response or #ocsp_response == 0 then
ngx.log(ngx.ERR, "OCSP responder returned an empty response")
return
end
local ok
ok, err = ocsp.validate_ocsp_response(ocsp_response, der_cert)
if not ok then
-- We are doing the same thing as vanilla Nginx here - if response status is not "good"
-- we do not use it - no stapling.
-- We can look into differentiation of validation errors and when status is i.e "revoked"
-- we might want to continue with stapling - it is at the least counterintuitive that
-- one would not staple response when certificate is revoked (I have not managed to find
-- and spec about this). Also one would expect browsers to do all these verifications
-- comprehensively, so why we bother doing this on server side? This can be tricky though:
-- imagine the certificate is not revoked but its OCSP responder is having some issues
-- and not generating a valid OCSP response. We would then staple that invalid OCSP response
-- and then browser would fail the connection because of invalid OCSP response - as a result
-- user request fails. But as a server we can validate response here and not staple it
-- to the connection if it is invalid. But if browser/client has must-staple enabled
-- then this will break anyway. So for must-staple there's no difference from users'
-- perspective. When must-staple is not enabled though it is better to not staple
-- invalid response and let the client/browser to fallback to CRL check or retry OCSP
-- on its own.
--
-- Also we should do negative caching here to avoid sending too many request to
-- the OCSP responder. Imagine OCSP responder is having an intermittent issue
-- and we keep sending request. It might make things worse for the responder.
ngx.log(ngx.NOTICE, "OCSP response validation failed: ", err)
return
end
-- Normally this should be (nextUpdate - thisUpdate), but Lua API does not expose
-- those attributes.
local expiry = 3600 * 24 * 3
local success, forcible
success, err, forcible = ocsp_response_cache:set(uid, ocsp_response, expiry)
if not success then
ngx.log(ngx.ERR, "failed to cache OCSP response: ", err)
end
if forcible then
ngx.log(ngx.NOTICE, "removed an existing item when saving OCSP response, ",
"consider increasing shared dictionary size for 'ocsp_response_cache'")
end
end
-- ocsp_staple looks at the cache and staples response from cache if it exists
-- if there is no cached response or the existing response is stale,
-- it enqueues fetch_and_cache_ocsp_response function to refetch the response.
-- This design tradeoffs lack of OCSP response in the first request with better latency.
--
-- Serving stale response ensures that we don't serve another request without OCSP response
-- when the cache entry expires. Instead we serve the single request with stale response
-- and enqueue fetch_and_cache_ocsp_response for refetch.
local function ocsp_staple(uid, der_cert)
local response, _, is_stale = ocsp_response_cache:get_stale(uid)
if not response or is_stale then
ngx.timer.at(0, function() fetch_and_cache_ocsp_response(uid, der_cert) end)
return false, nil
end
local ok, err = ocsp.set_ocsp_status_resp(response)
if not ok then
return false, err
end
return true, nil
end
function _M.configured_for_current_request()
if ngx.ctx.cert_configured_for_current_request == nil then
ngx.ctx.cert_configured_for_current_request = get_pem_cert_uid(ngx.var.host) ~= nil
end
return ngx.ctx.cert_configured_for_current_request
end
function _M.call()
local hostname, hostname_err = ssl.server_name()
if hostname_err then
ngx.log(ngx.ERR, "error while obtaining hostname: " .. hostname_err)
end
if not hostname then
ngx.log(ngx.INFO, "obtained hostname is nil (the client does "
.. "not support SNI?), falling back to default certificate")
hostname = DEFAULT_CERT_HOSTNAME
end
local pem_cert
local pem_cert_uid = get_pem_cert_uid(hostname)
if not pem_cert_uid then
pem_cert_uid = get_pem_cert_uid(DEFAULT_CERT_HOSTNAME)
end
if pem_cert_uid then
pem_cert = certificate_data:get(pem_cert_uid)
end
if not pem_cert then
ngx.log(ngx.ERR, "certificate not found, falling back to fake certificate for hostname: "
.. tostring(hostname))
return
end
local clear_ok, clear_err = ssl.clear_certs()
if not clear_ok then
ngx.log(ngx.ERR, "failed to clear existing (fallback) certificates: " .. clear_err)
return ngx.exit(ngx.ERROR)
end
local der_cert, der_priv_key, der_err = get_der_cert_and_priv_key(pem_cert)
if der_err then
ngx.log(ngx.ERR, der_err)
return ngx.exit(ngx.ERROR)
end
local set_der_err = set_der_cert_and_key(der_cert, der_priv_key)
if set_der_err then
ngx.log(ngx.ERR, set_der_err)
return ngx.exit(ngx.ERROR)
end
if is_ocsp_stapling_enabled_for(pem_cert_uid) then
local _, err = ocsp_staple(pem_cert_uid, der_cert)
if err then
ngx.log(ngx.ERR, "error during OCSP stapling: ", err)
end
end
end
return _M

View file

@ -0,0 +1,256 @@
local cjson = require("cjson.safe")
local io = io
local ngx = ngx
local tostring = tostring
local string = string
local table = table
local pairs = pairs
-- this is the Lua representation of Configuration struct in internal/ingress/types.go
local configuration_data = ngx.shared.configuration_data
local certificate_data = ngx.shared.certificate_data
local certificate_servers = ngx.shared.certificate_servers
local ocsp_response_cache = ngx.shared.ocsp_response_cache
local EMPTY_UID = "-1"
local _M = {}
function _M.get_backends_data()
return configuration_data:get("backends")
end
function _M.get_general_data()
return configuration_data:get("general")
end
function _M.get_raw_backends_last_synced_at()
local raw_backends_last_synced_at = configuration_data:get("raw_backends_last_synced_at")
if raw_backends_last_synced_at == nil then
raw_backends_last_synced_at = 1
end
return raw_backends_last_synced_at
end
local function fetch_request_body()
ngx.req.read_body()
local body = ngx.req.get_body_data()
if not body then
-- request body might've been written to tmp file if body > client_body_buffer_size
local file_name = ngx.req.get_body_file()
local file = io.open(file_name, "rb")
if not file then
return nil
end
body = file:read("*all")
file:close()
end
return body
end
local function get_pem_cert(hostname)
local uid = certificate_servers:get(hostname)
if not uid then
return nil
end
return certificate_data:get(uid)
end
local function handle_servers()
if ngx.var.request_method ~= "POST" then
ngx.status = ngx.HTTP_BAD_REQUEST
ngx.print("Only POST requests are allowed!")
return
end
local raw_configuration = fetch_request_body()
local configuration, err = cjson.decode(raw_configuration)
if not configuration then
ngx.log(ngx.ERR, "could not parse configuration: ", err)
ngx.status = ngx.HTTP_BAD_REQUEST
return
end
local err_buf = {}
for server, uid in pairs(configuration.servers) do
if uid == EMPTY_UID then
-- notice that we do not delete certificate corresponding to this server
-- this is because a certificate can be used by multiple servers/hostnames
certificate_servers:delete(server)
else
local success, set_err, forcible = certificate_servers:set(server, uid)
if not success then
local err_msg = string.format("error setting certificate for %s: %s\n",
server, tostring(set_err))
table.insert(err_buf, err_msg)
end
if forcible then
local msg = string.format("certificate_servers dictionary is full, "
.. "LRU entry has been removed to store %s", server)
ngx.log(ngx.WARN, msg)
end
end
end
for uid, cert in pairs(configuration.certificates) do
-- don't delete the cache here, certificate_data[uid] is not replaced yet.
-- there is small chance that nginx worker still get the old certificate,
-- then fetch and cache the old OCSP Response
local old_cert = certificate_data:get(uid)
local is_renew = (old_cert ~= nil and old_cert ~= cert)
local success, set_err, forcible = certificate_data:set(uid, cert)
if success then
-- delete ocsp cache after certificate_data:set succeed
if is_renew then
ocsp_response_cache:delete(uid)
end
else
local err_msg = string.format("error setting certificate for %s: %s\n",
uid, tostring(set_err))
table.insert(err_buf, err_msg)
end
if forcible then
local msg = string.format("certificate_data dictionary is full, "
.. "LRU entry has been removed to store %s", uid)
ngx.log(ngx.WARN, msg)
end
end
if #err_buf > 0 then
ngx.log(ngx.ERR, table.concat(err_buf))
ngx.status = ngx.HTTP_INTERNAL_SERVER_ERROR
return
end
ngx.status = ngx.HTTP_CREATED
end
local function handle_general()
if ngx.var.request_method == "GET" then
ngx.status = ngx.HTTP_OK
ngx.print(_M.get_general_data())
return
end
if ngx.var.request_method ~= "POST" then
ngx.status = ngx.HTTP_BAD_REQUEST
ngx.print("Only POST and GET requests are allowed!")
return
end
local config = fetch_request_body()
local success, err = configuration_data:safe_set("general", config)
if not success then
ngx.status = ngx.HTTP_INTERNAL_SERVER_ERROR
ngx.log(ngx.ERR, "error setting general config: " .. tostring(err))
return
end
ngx.status = ngx.HTTP_CREATED
end
local function handle_certs()
if ngx.var.request_method ~= "GET" then
ngx.status = ngx.HTTP_BAD_REQUEST
ngx.print("Only GET requests are allowed!")
return
end
local query = ngx.req.get_uri_args()
if not query["hostname"] then
ngx.status = ngx.HTTP_BAD_REQUEST
ngx.print("Hostname must be specified.")
return
end
local key = get_pem_cert(query["hostname"])
if key then
ngx.status = ngx.HTTP_OK
ngx.print(key)
return
else
ngx.status = ngx.HTTP_NOT_FOUND
ngx.print("No key associated with this hostname.")
return
end
end
local function handle_backends()
if ngx.var.request_method == "GET" then
ngx.status = ngx.HTTP_OK
ngx.print(_M.get_backends_data())
return
end
local backends = fetch_request_body()
if not backends then
ngx.log(ngx.ERR, "dynamic-configuration: unable to read valid request body")
ngx.status = ngx.HTTP_BAD_REQUEST
return
end
local success, err = configuration_data:set("backends", backends)
if not success then
ngx.log(ngx.ERR, "dynamic-configuration: error updating configuration: " .. tostring(err))
ngx.status = ngx.HTTP_BAD_REQUEST
return
end
ngx.update_time()
local raw_backends_last_synced_at = ngx.time()
success, err = configuration_data:set("raw_backends_last_synced_at", raw_backends_last_synced_at)
if not success then
ngx.log(ngx.ERR, "dynamic-configuration: error updating when backends sync, " ..
"new upstream peers waiting for force syncing: " .. tostring(err))
ngx.status = ngx.HTTP_BAD_REQUEST
return
end
ngx.status = ngx.HTTP_CREATED
end
function _M.call()
if ngx.var.request_method ~= "POST" and ngx.var.request_method ~= "GET" then
ngx.status = ngx.HTTP_BAD_REQUEST
ngx.print("Only POST and GET requests are allowed!")
return
end
if ngx.var.request_uri == "/configuration/servers" then
handle_servers()
return
end
if ngx.var.request_uri == "/configuration/general" then
handle_general()
return
end
if ngx.var.uri == "/configuration/certs" then
handle_certs()
return
end
if ngx.var.request_uri == "/configuration/backends" then
handle_backends()
return
end
ngx.status = ngx.HTTP_NOT_FOUND
ngx.print("Not found!")
end
setmetatable(_M, {__index = { handle_servers = handle_servers }})
return _M

View file

@ -0,0 +1,131 @@
local resty_global_throttle = require("resty.global_throttle")
local resty_ipmatcher = require("resty.ipmatcher")
local util = require("util")
local ngx = ngx
local ngx_exit = ngx.exit
local ngx_log = ngx.log
local ngx_ERR = ngx.ERR
local ngx_INFO = ngx.INFO
local _M = {}
local DECISION_CACHE = ngx.shared.global_throttle_cache
-- it does not make sense to cache decision for too little time
-- the benefit of caching likely is negated if we cache for too little time
-- Lua Shared Dict's time resolution for expiry is 0.001.
local CACHE_THRESHOLD = 0.001
local DEFAULT_RAW_KEY = "remote_addr"
local function should_ignore_request(ignored_cidrs)
if not ignored_cidrs or #ignored_cidrs == 0 then
return false
end
local ignored_cidrs_matcher, err = resty_ipmatcher.new(ignored_cidrs)
if not ignored_cidrs_matcher then
ngx_log(ngx_ERR, "failed to initialize resty-ipmatcher: ", err)
return false
end
local is_ignored
is_ignored, err = ignored_cidrs_matcher:match(ngx.var.remote_addr)
if err then
ngx_log(ngx_ERR, "failed to match ip: '",
ngx.var.remote_addr, "': ", err)
return false
end
return is_ignored
end
local function is_enabled(config, location_config)
if config.memcached.host == "" or config.memcached.port == 0 then
return false
end
if location_config.limit == 0 or
location_config.window_size == 0 then
return false
end
if should_ignore_request(location_config.ignored_cidrs) then
return false
end
return true
end
local function get_namespaced_key_value(namespace, key_value)
return namespace .. key_value
end
function _M.throttle(config, location_config)
if not is_enabled(config, location_config) then
return
end
local key_value = util.generate_var_value(location_config.key)
if not key_value or key_value == "" then
key_value = ngx.var[DEFAULT_RAW_KEY]
end
local namespaced_key_value =
get_namespaced_key_value(location_config.namespace, key_value)
local is_limit_exceeding = DECISION_CACHE:get(namespaced_key_value)
if is_limit_exceeding then
ngx.var.global_rate_limit_exceeding = "c"
return ngx_exit(config.status_code)
end
local my_throttle, err = resty_global_throttle.new(
location_config.namespace,
location_config.limit,
location_config.window_size,
{
provider = "memcached",
host = config.memcached.host,
port = config.memcached.port,
connect_timeout = config.memcached.connect_timeout,
max_idle_timeout = config.memcached.max_idle_timeout,
pool_size = config.memcached.pool_size,
}
)
if err then
ngx.log(ngx.ERR, "faled to initialize resty_global_throttle: ", err)
-- fail open
return
end
local desired_delay, estimated_final_count
estimated_final_count, desired_delay, err = my_throttle:process(key_value)
if err then
ngx.log(ngx.ERR, "error while processing key: ", err)
-- fail open
return
end
if desired_delay then
if desired_delay > CACHE_THRESHOLD then
local ok
ok, err =
DECISION_CACHE:safe_add(namespaced_key_value, true, desired_delay)
if not ok then
if err ~= "exists" then
ngx_log(ngx_ERR, "failed to cache decision: ", err)
end
end
end
ngx.var.global_rate_limit_exceeding = "y"
ngx_log(ngx_INFO, "limit is exceeding for ",
location_config.namespace, "/", key_value,
" with estimated_final_count: ", estimated_final_count)
return ngx_exit(config.status_code)
end
end
return _M

View file

@ -0,0 +1,183 @@
local ngx_re_split = require("ngx.re").split
local certificate_configured_for_current_request =
require("certificate").configured_for_current_request
local global_throttle = require("global_throttle")
local ngx = ngx
local io = io
local math = math
local string = string
local original_randomseed = math.randomseed
local string_format = string.format
local ngx_redirect = ngx.redirect
local _M = {}
local seeds = {}
-- general Nginx configuration passed by controller to be used in this module
local config
local function get_seed_from_urandom()
local seed
local frandom, err = io.open("/dev/urandom", "rb")
if not frandom then
ngx.log(ngx.WARN, 'failed to open /dev/urandom: ', err)
return nil
end
local str = frandom:read(4)
frandom:close()
if not str then
ngx.log(ngx.WARN, 'failed to read data from /dev/urandom')
return nil
end
seed = 0
for i = 1, 4 do
seed = 256 * seed + str:byte(i)
end
return seed
end
math.randomseed = function(seed)
local pid = ngx.worker.pid()
if seeds[pid] then
ngx.log(ngx.WARN, string.format("ignoring math.randomseed(%d) since PRNG "
.. "is already seeded for worker %d", seed, pid))
return
end
original_randomseed(seed)
seeds[pid] = seed
end
local function randomseed()
local seed = get_seed_from_urandom()
if not seed then
ngx.log(ngx.WARN, 'failed to get seed from urandom')
seed = ngx.now() * 1000 + ngx.worker.pid()
end
math.randomseed(seed)
end
local function redirect_to_https(location_config)
if location_config.force_no_ssl_redirect then
return false
end
if location_config.force_ssl_redirect and ngx.var.pass_access_scheme == "http" then
return true
end
if ngx.var.pass_access_scheme ~= "http" then
return false
end
return location_config.ssl_redirect and certificate_configured_for_current_request()
end
local function redirect_host()
local host_port, err = ngx_re_split(ngx.var.best_http_host, ":")
if err then
ngx.log(ngx.ERR, "could not parse variable: ", err)
return ngx.var.best_http_host;
end
return host_port[1];
end
local function parse_x_forwarded_host()
local hosts, err = ngx_re_split(ngx.var.http_x_forwarded_host, ",")
if err then
ngx.log(ngx.ERR, string_format("could not parse variable: %s", err))
return ""
end
return hosts[1]
end
function _M.init_worker()
randomseed()
end
function _M.set_config(new_config)
config = new_config
end
-- rewrite gets called in every location context.
-- This is where we do variable assignments to be used in subsequent
-- phases or redirection
function _M.rewrite(location_config)
ngx.var.pass_access_scheme = ngx.var.scheme
ngx.var.best_http_host = ngx.var.http_host or ngx.var.host
if config.use_forwarded_headers then
-- trust http_x_forwarded_proto headers correctly indicate ssl offloading
if ngx.var.http_x_forwarded_proto then
ngx.var.pass_access_scheme = ngx.var.http_x_forwarded_proto
end
if ngx.var.http_x_forwarded_port then
ngx.var.pass_server_port = ngx.var.http_x_forwarded_port
end
-- Obtain best http host
if ngx.var.http_x_forwarded_host then
ngx.var.best_http_host = parse_x_forwarded_host()
end
end
if config.use_proxy_protocol then
if ngx.var.proxy_protocol_server_port == "443" then
ngx.var.pass_access_scheme = "https"
end
end
ngx.var.pass_port = ngx.var.pass_server_port
if config.is_ssl_passthrough_enabled then
if ngx.var.pass_server_port == config.listen_ports.ssl_proxy then
ngx.var.pass_port = 443
end
elseif ngx.var.pass_server_port == config.listen_ports.https then
ngx.var.pass_port = 443
end
if redirect_to_https(location_config) then
local request_uri = ngx.var.request_uri
-- do not append a trailing slash on redirects unless enabled by annotations
if location_config.preserve_trailing_slash == false then
if string.byte(request_uri, -1, -1) == string.byte('/') then
request_uri = string.sub(request_uri, 1, -2)
end
end
local uri = string_format("https://%s%s", redirect_host(), request_uri)
if location_config.use_port_in_redirects then
uri = string_format("https://%s:%s%s", redirect_host(),
config.listen_ports.https, request_uri)
end
return ngx_redirect(uri, config.http_redirect_code)
end
global_throttle.throttle(config.global_throttle, location_config.global_throttle)
end
function _M.header()
if config.hsts and ngx.var.scheme == "https" and certificate_configured_for_current_request then
local value = "max-age=" .. config.hsts_max_age
if config.hsts_include_subdomains then
value = value .. "; includeSubDomains"
end
if config.hsts_preload then
value = value .. "; preload"
end
ngx.header["Strict-Transport-Security"] = value
end
end
return _M

View file

@ -0,0 +1,123 @@
local ngx = ngx
local tonumber = tonumber
local assert = assert
local string = string
local tostring = tostring
local socket = ngx.socket.tcp
local cjson = require("cjson.safe")
local new_tab = require "table.new"
local clear_tab = require "table.clear"
local table = table
local pairs = pairs
-- if an Nginx worker processes more than (MAX_BATCH_SIZE/FLUSH_INTERVAL) RPS
-- then it will start dropping metrics
local MAX_BATCH_SIZE = 10000
local FLUSH_INTERVAL = 1 -- second
local metrics_batch = new_tab(MAX_BATCH_SIZE, 0)
local metrics_count = 0
-- for save json raw metrics table
local metrics_raw_batch = new_tab(MAX_BATCH_SIZE, 0)
local _M = {}
local function send(payload)
local s = assert(socket())
assert(s:connect("unix:/tmp/nginx/prometheus-nginx.socket"))
assert(s:send(payload))
assert(s:close())
end
local function metrics()
return {
host = ngx.var.host or "-",
namespace = ngx.var.namespace or "-",
ingress = ngx.var.ingress_name or "-",
service = ngx.var.service_name or "-",
canary = ngx.var.proxy_alternative_upstream_name or "-",
path = ngx.var.location_path or "-",
method = ngx.var.request_method or "-",
status = ngx.var.status or "-",
requestLength = tonumber(ngx.var.request_length) or -1,
requestTime = tonumber(ngx.var.request_time) or -1,
responseLength = tonumber(ngx.var.bytes_sent) or -1,
upstreamLatency = tonumber(ngx.var.upstream_connect_time) or -1,
upstreamHeaderTime = tonumber(ngx.var.upstream_header_time) or -1,
upstreamResponseTime = tonumber(ngx.var.upstream_response_time) or -1,
upstreamResponseLength = tonumber(ngx.var.upstream_response_length) or -1,
--upstreamStatus = ngx.var.upstream_status or "-",
}
end
local function flush(premature)
if premature then
return
end
if metrics_count == 0 then
return
end
metrics_count = 0
clear_tab(metrics_batch)
local request_metrics = {}
table.insert(request_metrics, "[")
for i in pairs(metrics_raw_batch) do
local item = metrics_raw_batch[i] ..","
if i == table.getn(metrics_raw_batch) then
item = metrics_raw_batch[i]
end
table.insert(request_metrics, item)
end
table.insert(request_metrics, "]")
local payload = table.concat(request_metrics)
clear_tab(metrics_raw_batch)
send(payload)
end
local function set_metrics_max_batch_size(max_batch_size)
if max_batch_size > 10000 then
MAX_BATCH_SIZE = max_batch_size
end
end
function _M.init_worker(max_batch_size)
set_metrics_max_batch_size(max_batch_size)
local _, err = ngx.timer.every(FLUSH_INTERVAL, flush)
if err then
ngx.log(ngx.ERR, string.format("error when setting up timer.every: %s", tostring(err)))
end
end
function _M.call()
if metrics_count >= MAX_BATCH_SIZE then
ngx.log(ngx.WARN, "omitting metrics for the request, current batch is full")
return
end
local metrics_obj = metrics()
local payload, err = cjson.encode(metrics_obj)
if err then
ngx.log(ngx.ERR, string.format("error when encoding metrics: %s", tostring(err)))
return
end
metrics_count = metrics_count + 1
metrics_batch[metrics_count] = metrics_obj
metrics_raw_batch[metrics_count] = payload
end
setmetatable(_M, {__index = {
flush = flush,
set_metrics_max_batch_size = set_metrics_max_batch_size,
get_metrics_batch = function() return metrics_batch end,
}})
return _M

View file

@ -0,0 +1,61 @@
local require = require
local ngx = ngx
local ipairs = ipairs
local string_format = string.format
local ngx_log = ngx.log
local INFO = ngx.INFO
local ERR = ngx.ERR
local pcall = pcall
local _M = {}
local MAX_NUMBER_OF_PLUGINS = 20
local plugins = {}
local function load_plugin(name)
local path = string_format("plugins.%s.main", name)
local ok, plugin = pcall(require, path)
if not ok then
ngx_log(ERR, string_format("error loading plugin \"%s\": %s", path, plugin))
return
end
local index = #plugins
if (plugin.name == nil or plugin.name == '') then
plugin.name = name
end
plugins[index + 1] = plugin
end
function _M.init(names)
local count = 0
for _, name in ipairs(names) do
if count >= MAX_NUMBER_OF_PLUGINS then
ngx_log(ERR, "the total number of plugins exceed the maximum number: ", MAX_NUMBER_OF_PLUGINS)
break
end
load_plugin(name)
count = count + 1 -- ignore loading failure, just count the total
end
end
function _M.run()
local phase = ngx.get_phase()
for _, plugin in ipairs(plugins) do
if plugin[phase] then
ngx_log(INFO, string_format("running plugin \"%s\" in phase \"%s\"", plugin.name, phase))
-- TODO: consider sandboxing this, should we?
-- probably yes, at least prohibit plugin from accessing env vars etc
-- but since the plugins are going to be installed by ingress-nginx
-- operator they can be assumed to be safe also
local ok, err = pcall(plugin[phase])
if not ok then
ngx_log(ERR, string_format("error while running plugin \"%s\" in phase \"%s\": %s",
plugin.name, phase, err))
end
end
end
end
return _M

View file

@ -0,0 +1,36 @@
# Custom Lua plugins
ingress-nginx uses [https://github.com/openresty/lua-nginx-module](https://github.com/openresty/lua-nginx-module) to run custom Lua code
within Nginx workers. It is recommended to familiarize yourself with that ecosystem before deploying your custom Lua based ingress-nginx plugin.
### Writing a plugin
Every ingress-nginx Lua plugin is expected to have `main.lua` file and all of its dependencies.
`main.lua` is the entry point of the plugin. The plugin manager uses convention over configuration
strategy and automatically runs functions defined in `main.lua` in the corresponding Nginx phase based on their name.
Nginx has different [request processing phases](https://nginx.org/en/docs/dev/development_guide.html#http_phases).
By defining functions with the following names, you can run your custom Lua code in the corresponding Nginx phase:
- `init_worker`: useful for initializing some data per Nginx worker process
- `rewrite`: useful for modifying request, changing headers, redirection, dropping request, doing authentication etc
- `header_filter`: this is called when backend response header is received, it is useful for modifying response headers
- `body_filter`: this is called when response body is received, it is useful for logging response body
- `log`: this is called when request processing is completed and a response is delivered to the client
Check this [`hello_world`](https://github.com/kubernetes/ingress-nginx/tree/main/rootfs/etc/nginx/lua/plugins/hello_world) plugin as a simple example or refer to [OpenID Connect integration](https://github.com/ElvinEfendi/ingress-nginx-openidc/tree/master/rootfs/etc/nginx/lua/plugins/openidc) for more advanced usage.
Do not forget to write tests for your plugin.
### Installing a plugin
There are two options:
- mount your plugin into `/etc/nginx/lua/plugins/<your plugin name>` in the ingress-nginx pod
- build your own ingress-nginx image like it is done in the [example](https://github.com/ElvinEfendi/ingress-nginx-openidc/tree/master/rootfs/etc/nginx/lua/plugins/openidc) and install your plugin during image build
Mounting is the quickest option.
### Enabling plugins
Once your plugin is ready you need to use [`plugins` configuration setting](https://kubernetes.github.io/ingress-nginx/user-guide/nginx-configuration/configmap/#plugins) to activate it. Let's say you want to activate `hello_world` and `open_idc` plugins, then you set `plugins` setting to `"hello_world, open_idc"`. _Note_ that the plugins will be executed in the given order.

View file

@ -0,0 +1,13 @@
local ngx = ngx
local _M = {}
function _M.rewrite()
local ua = ngx.var.http_user_agent
if ua == "hello" then
ngx.req.set_header("x-hello-world", "1")
end
end
return _M

View file

@ -0,0 +1,24 @@
local main = require("plugins.hello_world.main")
-- The unit tests are run within a timer phase in a headless Nginx process.
-- Since `set_header` and `ngx.var.http_` API are disabled in this phase we have to stub it
-- to avoid `API disabled in the current context` error.
describe("main", function()
describe("rewrite", function()
it("sets x-hello-world header to 1 when user agent is hello", function()
ngx.var = { http_user_agent = "hello" }
stub(ngx.req, "set_header")
main.rewrite()
assert.stub(ngx.req.set_header).was_called_with("x-hello-world", "1")
end)
it("does not set x-hello-world header to 1 when user agent is not hello", function()
ngx.var = { http_user_agent = "not-hello" }
stub(ngx.req, "set_header")
main.rewrite()
assert.stub(ngx.req.set_header).was_not_called_with("x-hello-world", "1")
end)
end)
end)

View file

@ -0,0 +1,218 @@
local ngx_balancer = require("ngx.balancer")
local cjson = require("cjson.safe")
local util = require("util")
local dns_lookup = require("util.dns").lookup
local configuration = require("tcp_udp_configuration")
local round_robin = require("balancer.round_robin")
local ngx = ngx
local table = table
local ipairs = ipairs
local pairs = pairs
local tostring = tostring
local string = string
local getmetatable = getmetatable
-- measured in seconds
-- for an Nginx worker to pick up the new list of upstream peers
-- it will take <the delay until controller POSTed the backend object
-- to the Nginx endpoint> + BACKENDS_SYNC_INTERVAL
local BACKENDS_SYNC_INTERVAL = 1
local BACKENDS_FORCE_SYNC_INTERVAL = 30
local DEFAULT_LB_ALG = "round_robin"
local IMPLEMENTATIONS = {
round_robin = round_robin
}
local PROHIBITED_LOCALHOST_PORT = configuration.prohibited_localhost_port or '10246'
local PROHIBITED_PEER_PATTERN = "^127.*:" .. PROHIBITED_LOCALHOST_PORT .. "$"
local _M = {}
local balancers = {}
local backends_with_external_name = {}
local backends_last_synced_at = 0
local function get_implementation(backend)
local name = backend["load-balance"] or DEFAULT_LB_ALG
local implementation = IMPLEMENTATIONS[name]
if not implementation then
ngx.log(ngx.WARN, string.format("%s is not supported, falling back to %s",
backend["load-balance"], DEFAULT_LB_ALG))
implementation = IMPLEMENTATIONS[DEFAULT_LB_ALG]
end
return implementation
end
local function resolve_external_names(original_backend)
local backend = util.deepcopy(original_backend)
local endpoints = {}
for _, endpoint in ipairs(backend.endpoints) do
local ips = dns_lookup(endpoint.address)
for _, ip in ipairs(ips) do
table.insert(endpoints, {address = ip, port = endpoint.port})
end
end
backend.endpoints = endpoints
return backend
end
local function format_ipv6_endpoints(endpoints)
local formatted_endpoints = {}
for _, endpoint in ipairs(endpoints) do
local formatted_endpoint = endpoint
if not endpoint.address:match("^%d+.%d+.%d+.%d+$") then
formatted_endpoint.address = string.format("[%s]", endpoint.address)
end
table.insert(formatted_endpoints, formatted_endpoint)
end
return formatted_endpoints
end
local function is_backend_with_external_name(backend)
local serv_type = backend.service and backend.service.spec
and backend.service.spec["type"]
return serv_type == "ExternalName"
end
local function sync_backend(backend)
if not backend.endpoints or #backend.endpoints == 0 then
return
end
ngx.log(ngx.INFO, "sync tcp/udp backend: ", backend.name)
local implementation = get_implementation(backend)
local balancer = balancers[backend.name]
if not balancer then
balancers[backend.name] = implementation:new(backend)
return
end
-- every implementation is the metatable of its instances (see .new(...) functions)
-- here we check if `balancer` is the instance of `implementation`
-- if it is not then we deduce LB algorithm has changed for the backend
if getmetatable(balancer) ~= implementation then
ngx.log(ngx.INFO, string.format("LB algorithm changed from %s to %s, "
.. "resetting the instance", balancer.name, implementation.name))
balancers[backend.name] = implementation:new(backend)
return
end
if is_backend_with_external_name(backend) then
backend = resolve_external_names(backend)
end
backend.endpoints = format_ipv6_endpoints(backend.endpoints)
balancer:sync(backend)
end
local function sync_backends()
local raw_backends_last_synced_at = configuration.get_raw_backends_last_synced_at()
ngx.update_time()
local current_timestamp = ngx.time()
if current_timestamp - backends_last_synced_at < BACKENDS_FORCE_SYNC_INTERVAL
and raw_backends_last_synced_at <= backends_last_synced_at then
for _, backend_with_external_name in pairs(backends_with_external_name) do
sync_backend(backend_with_external_name)
end
return
end
local backends_data = configuration.get_backends_data()
if not backends_data then
balancers = {}
return
end
local new_backends, err = cjson.decode(backends_data)
if not new_backends then
ngx.log(ngx.ERR, "could not parse backends data: ", err)
return
end
local balancers_to_keep = {}
for _, new_backend in ipairs(new_backends) do
sync_backend(new_backend)
balancers_to_keep[new_backend.name] = balancers[new_backend.name]
if is_backend_with_external_name(new_backend) then
local backend_with_external_name = util.deepcopy(new_backend)
backends_with_external_name[backend_with_external_name.name] = backend_with_external_name
end
end
for backend_name, _ in pairs(balancers) do
if not balancers_to_keep[backend_name] then
balancers[backend_name] = nil
backends_with_external_name[backend_name] = nil
end
end
backends_last_synced_at = raw_backends_last_synced_at
end
local function get_balancer()
local backend_name = ngx.var.proxy_upstream_name
local balancer = balancers[backend_name]
if not balancer then
return
end
return balancer
end
function _M.init_worker()
sync_backends() -- when worker starts, sync backends without delay
local _, err = ngx.timer.every(BACKENDS_SYNC_INTERVAL, sync_backends)
if err then
ngx.log(ngx.ERR, string.format("error when setting up timer.every "
.. "for sync_backends: %s", tostring(err)))
end
end
function _M.balance()
local balancer = get_balancer()
if not balancer then
return
end
local peer = balancer:balance()
if not peer then
ngx.log(ngx.WARN, "no peer was returned, balancer: " .. balancer.name)
return
end
if peer:match(PROHIBITED_PEER_PATTERN) then
ngx.log(ngx.ERR, "attempted to proxy to self, balancer: ", balancer.name, ", peer: ", peer)
return
end
ngx_balancer.set_more_tries(1)
local ok, err = ngx_balancer.set_current_peer(peer)
if not ok then
ngx.log(ngx.ERR, string.format("error while setting current upstream peer %s: %s", peer, err))
end
end
function _M.log()
local balancer = get_balancer()
if not balancer then
return
end
if not balancer.after_balance then
return
end
balancer:after_balance()
end
setmetatable(_M, {__index = {
get_implementation = get_implementation,
sync_backend = sync_backend,
}})
return _M

View file

@ -0,0 +1,59 @@
local ngx = ngx
local tostring = tostring
-- this is the Lua representation of TCP/UDP Configuration
local tcp_udp_configuration_data = ngx.shared.tcp_udp_configuration_data
local _M = {}
function _M.get_backends_data()
return tcp_udp_configuration_data:get("backends")
end
function _M.get_raw_backends_last_synced_at()
local raw_backends_last_synced_at = tcp_udp_configuration_data:get("raw_backends_last_synced_at")
if raw_backends_last_synced_at == nil then
raw_backends_last_synced_at = 1
end
return raw_backends_last_synced_at
end
function _M.call()
local sock, err = ngx.req.socket(true)
if not sock then
ngx.log(ngx.ERR, "failed to get raw req socket: ", err)
ngx.say("error: ", err)
return
end
local reader = sock:receiveuntil("\r\n")
local backends, err_read = reader()
if not backends then
ngx.log(ngx.ERR, "failed TCP/UDP dynamic-configuration:", err_read)
ngx.say("error: ", err_read)
return
end
if backends == nil or backends == "" then
return
end
local success, err_conf = tcp_udp_configuration_data:set("backends", backends)
if not success then
ngx.log(ngx.ERR, "dynamic-configuration: error updating configuration: " .. tostring(err_conf))
ngx.say("error: ", err_conf)
return
end
ngx.update_time()
local raw_backends_last_synced_at = ngx.time()
success, err = tcp_udp_configuration_data:set("raw_backends_last_synced_at",
raw_backends_last_synced_at)
if not success then
ngx.log(ngx.ERR, "dynamic-configuration: error updating when backends sync, " ..
"new upstream peers waiting for force syncing: " .. tostring(err))
ngx.status = ngx.HTTP_BAD_REQUEST
return
end
end
return _M

View file

@ -0,0 +1,31 @@
describe("Balancer chash", function()
after_each(function()
reset_ngx()
end)
describe("balance()", function()
it("uses correct key for given backend", function()
ngx.var = { request_uri = "/alma/armud"}
local balancer_chash = require_without_cache("balancer.chash")
local resty_chash = package.loaded["resty.chash"]
resty_chash.new = function(self, nodes)
return {
find = function(self, key)
assert.equal("/alma/armud", key)
return "10.184.7.40:8080"
end
}
end
local backend = {
name = "my-dummy-backend", upstreamHashByConfig = { ["upstream-hash-by"] = "$request_uri" },
endpoints = { { address = "10.184.7.40", port = "8080", maxFails = 0, failTimeout = 0 } }
}
local instance = balancer_chash:new(backend)
local peer = instance:balance()
assert.equal("10.184.7.40:8080", peer)
end)
end)
end)

View file

@ -0,0 +1,106 @@
function mock_ngx(mock)
local _ngx = mock
setmetatable(_ngx, {__index = _G.ngx})
_G.ngx = _ngx
end
local function get_test_backend(n_endpoints)
local backend = {
name = "my-dummy-backend",
["upstreamHashByConfig"] = {
["upstream-hash-by"] = "$request_uri",
["upstream-hash-by-subset"] = true,
["upstream-hash-by-subset-size"] = 3
},
endpoints = {}
}
for i = 1, n_endpoints do
backend.endpoints[i] = { address = "10.184.7." .. tostring(i), port = "8080", maxFails = 0, failTimeout = 0 }
end
return backend
end
describe("Balancer chash subset", function()
local balancer_chashsubset
before_each(function()
mock_ngx({ var = { request_uri = "/alma/armud" }})
balancer_chashsubset = require("balancer.chashsubset")
end)
describe("balance()", function()
it("returns peers from the same subset", function()
local backend = get_test_backend(9)
local instance = balancer_chashsubset:new(backend)
instance:sync(backend)
local first_node = instance:balance()
local subset_id
local endpoint_strings
local function has_value (tab, val)
for _, value in ipairs(tab) do
if value == val then
return true
end
end
return false
end
for id, endpoints in pairs(instance["subsets"]) do
endpoint_strings = {}
for _, endpoint in pairs(endpoints) do
local endpoint_string = endpoint.address .. ":" .. endpoint.port
table.insert(endpoint_strings, endpoint_string)
if first_node == endpoint_string then
-- found the set of first_node
subset_id = id
end
end
if subset_id then
break
end
end
-- multiple calls to balance must return nodes from the same subset
for i = 0, 10 do
assert.True(has_value(endpoint_strings, instance:balance()))
end
end)
end)
describe("new(backend)", function()
it("fills last subset correctly", function()
local backend = get_test_backend(7)
local instance = balancer_chashsubset:new(backend)
instance:sync(backend)
for id, endpoints in pairs(instance["subsets"]) do
assert.are.equal(#endpoints, 3)
end
end)
it("set alternative backends", function()
local backend = get_test_backend(7)
backend.trafficShapingPolicy = {
weight = 0,
header = "",
headerValue = "",
cookie = ""
}
backend.alternativeBackends = {
"my-dummy-canary-backend"
}
local instance = balancer_chashsubset:new(backend)
assert.not_equal(instance.traffic_shaping_policy, nil)
assert.not_equal(instance.alternative_backends, nil)
end)
end)
end)

View file

@ -0,0 +1,217 @@
local util = require("util")
local original_ngx = ngx
local function reset_ngx()
_G.ngx = original_ngx
end
local function mock_ngx(mock)
local _ngx = mock
setmetatable(_ngx, { __index = ngx })
_G.ngx = _ngx
end
local function flush_all_ewma_stats()
ngx.shared.balancer_ewma:flush_all()
ngx.shared.balancer_ewma_last_touched_at:flush_all()
end
local function store_ewma_stats(endpoint_string, ewma, touched_at)
ngx.shared.balancer_ewma:set(endpoint_string, ewma)
ngx.shared.balancer_ewma_last_touched_at:set(endpoint_string, touched_at)
end
local function assert_ewma_stats(endpoint_string, ewma, touched_at)
assert.are.equals(ewma, ngx.shared.balancer_ewma:get(endpoint_string))
assert.are.equals(touched_at, ngx.shared.balancer_ewma_last_touched_at:get(endpoint_string))
end
describe("Balancer ewma", function()
local balancer_ewma = require("balancer.ewma")
local ngx_now = 1543238266
local backend, instance
before_each(function()
mock_ngx({ now = function() return ngx_now end, var = { balancer_ewma_score = -1 } })
package.loaded["balancer.ewma"] = nil
balancer_ewma = require("balancer.ewma")
backend = {
name = "namespace-service-port", ["load-balance"] = "ewma",
endpoints = {
{ address = "10.10.10.1", port = "8080", maxFails = 0, failTimeout = 0 },
{ address = "10.10.10.2", port = "8080", maxFails = 0, failTimeout = 0 },
{ address = "10.10.10.3", port = "8080", maxFails = 0, failTimeout = 0 },
}
}
store_ewma_stats("10.10.10.1:8080", 0.2, ngx_now - 1)
store_ewma_stats("10.10.10.2:8080", 0.3, ngx_now - 5)
store_ewma_stats("10.10.10.3:8080", 1.2, ngx_now - 20)
instance = balancer_ewma:new(backend)
end)
after_each(function()
reset_ngx()
flush_all_ewma_stats()
end)
describe("after_balance()", function()
it("updates EWMA stats", function()
ngx.var = { upstream_addr = "10.10.10.2:8080", upstream_connect_time = "0.02", upstream_response_time = "0.1" }
instance:after_balance()
local weight = math.exp(-5 / 10)
local expected_ewma = 0.3 * weight + 0.12 * (1.0 - weight)
assert.are.equals(expected_ewma, ngx.shared.balancer_ewma:get(ngx.var.upstream_addr))
assert.are.equals(ngx_now, ngx.shared.balancer_ewma_last_touched_at:get(ngx.var.upstream_addr))
end)
it("updates EWMA stats with the latest result", function()
ngx.var = { upstream_addr = "10.10.10.1:8080, 10.10.10.2:8080", upstream_connect_time = "0.05, 0.02", upstream_response_time = "0.2, 0.1" }
instance:after_balance()
local weight = math.exp(-5 / 10)
local expected_ewma = 0.3 * weight + 0.12 * (1.0 - weight)
assert.are.equals(expected_ewma, ngx.shared.balancer_ewma:get("10.10.10.2:8080"))
assert.are.equals(ngx_now, ngx.shared.balancer_ewma_last_touched_at:get("10.10.10.2:8080"))
end)
end)
describe("balance()", function()
it("returns single endpoint when the given backend has only one endpoint", function()
local single_endpoint_backend = util.deepcopy(backend)
table.remove(single_endpoint_backend.endpoints, 3)
table.remove(single_endpoint_backend.endpoints, 2)
local single_endpoint_instance = balancer_ewma:new(single_endpoint_backend)
local peer = single_endpoint_instance:balance()
assert.are.equals("10.10.10.1:8080", peer)
assert.are.equals(-1, ngx.var.balancer_ewma_score)
end)
it("picks the endpoint with lowest decayed score", function()
local two_endpoints_backend = util.deepcopy(backend)
table.remove(two_endpoints_backend.endpoints, 2)
local two_endpoints_instance = balancer_ewma:new(two_endpoints_backend)
local peer = two_endpoints_instance:balance()
-- even though 10.10.10.1:8080 has a lower ewma score
-- algorithm picks 10.10.10.3:8080 because its decayed score is even lower
assert.equal("10.10.10.3:8080", peer)
assert.equal(true, ngx.ctx.balancer_ewma_tried_endpoints["10.10.10.3:8080"])
assert.are.equals(0.16240233988393523723, ngx.var.balancer_ewma_score)
end)
it("doesn't pick the tried endpoint while retry", function()
local two_endpoints_backend = util.deepcopy(backend)
table.remove(two_endpoints_backend.endpoints, 2)
local two_endpoints_instance = balancer_ewma:new(two_endpoints_backend)
ngx.ctx.balancer_ewma_tried_endpoints = {
["10.10.10.3:8080"] = true,
}
local peer = two_endpoints_instance:balance()
assert.equal("10.10.10.1:8080", peer)
assert.equal(true, ngx.ctx.balancer_ewma_tried_endpoints["10.10.10.1:8080"])
end)
it("all the endpoints are tried, pick the one with lowest score", function()
local two_endpoints_backend = util.deepcopy(backend)
table.remove(two_endpoints_backend.endpoints, 2)
local two_endpoints_instance = balancer_ewma:new(two_endpoints_backend)
ngx.ctx.balancer_ewma_tried_endpoints = {
["10.10.10.1:8080"] = true,
["10.10.10.3:8080"] = true,
}
local peer = two_endpoints_instance:balance()
assert.equal("10.10.10.3:8080", peer)
end)
end)
describe("sync()", function()
it("does not reset stats when endpoints do not change", function()
local new_backend = util.deepcopy(backend)
instance:sync(new_backend)
assert.are.same(new_backend.endpoints, instance.peers)
assert_ewma_stats("10.10.10.1:8080", 0.2, ngx_now - 1)
assert_ewma_stats("10.10.10.2:8080", 0.3, ngx_now - 5)
assert_ewma_stats("10.10.10.3:8080", 1.2, ngx_now - 20)
end)
it("resets alternative backends and traffic shaping policy even if endpoints do not change", function()
assert.are.same(nil, instance.alternativeBackends)
assert.are.same(nil, instance.trafficShapingPolicy)
local new_backend = util.deepcopy(backend)
new_backend.alternativeBackends = {"my-canary-namespace-my-canary-service-my-port"}
new_backend.trafficShapingPolicy = {
cookie = "",
header = "",
headerPattern = "",
headerValue = "",
weight = 20,
}
instance:sync(new_backend)
assert.are.same(new_backend.alternativeBackends, instance.alternative_backends)
assert.are.same(new_backend.trafficShapingPolicy, instance.traffic_shaping_policy)
assert.are.same(new_backend.endpoints, instance.peers)
assert_ewma_stats("10.10.10.1:8080", 0.2, ngx_now - 1)
assert_ewma_stats("10.10.10.2:8080", 0.3, ngx_now - 5)
assert_ewma_stats("10.10.10.3:8080", 1.2, ngx_now - 20)
end)
it("updates peers, deletes stats for old endpoints and sets average ewma score to new ones", function()
local new_backend = util.deepcopy(backend)
-- existing endpoint 10.10.10.2 got deleted
-- and replaced with 10.10.10.4
new_backend.endpoints[2].address = "10.10.10.4"
-- and there's one new extra endpoint
table.insert(new_backend.endpoints, { address = "10.10.10.5", port = "8080", maxFails = 0, failTimeout = 0 })
instance:sync(new_backend)
assert.are.same(new_backend.endpoints, instance.peers)
assert_ewma_stats("10.10.10.1:8080", 0.2, ngx_now - 1)
assert_ewma_stats("10.10.10.2:8080", nil, nil)
assert_ewma_stats("10.10.10.3:8080", 1.2, ngx_now - 20)
local slow_start_ewma = (0.2 + 1.2) / 2
assert_ewma_stats("10.10.10.4:8080", slow_start_ewma, ngx_now)
assert_ewma_stats("10.10.10.5:8080", slow_start_ewma, ngx_now)
end)
it("does not set slow_start_ewma when there is no existing ewma", function()
local new_backend = util.deepcopy(backend)
table.insert(new_backend.endpoints, { address = "10.10.10.4", port = "8080", maxFails = 0, failTimeout = 0 })
-- when the LB algorithm instance is just instantiated it won't have any
-- ewma value set for the initial endpoints (because it has not processed any request yet),
-- this test is trying to simulate that by flushing existing ewma values
flush_all_ewma_stats()
instance:sync(new_backend)
assert_ewma_stats("10.10.10.1:8080", nil, nil)
assert_ewma_stats("10.10.10.2:8080", nil, nil)
assert_ewma_stats("10.10.10.3:8080", nil, nil)
assert_ewma_stats("10.10.10.4:8080", nil, nil)
end)
end)
end)

View file

@ -0,0 +1,627 @@
local sticky_balanced
local sticky_persistent
local cookie = require("resty.cookie")
local util = require("util")
local original_ngx = ngx
local function reset_sticky_balancer()
package.loaded["balancer.sticky"] = nil
package.loaded["balancer.sticky_balanced"] = nil
package.loaded["balancer.sticky_persistent"] = nil
sticky_balanced = require("balancer.sticky_balanced")
sticky_persistent = require("balancer.sticky_persistent")
end
local function mock_ngx(mock, after_mock_set)
local _ngx = mock
setmetatable(_ngx, { __index = ngx })
_G.ngx = _ngx
if after_mock_set then
after_mock_set()
end
-- Balancer module caches ngx module, must be reset after mocks were configured.
reset_sticky_balancer()
end
local function reset_ngx()
_G.ngx = original_ngx
-- Ensure balancer cache is reset.
_G.ngx.ctx.balancer = nil
end
function get_mocked_cookie_new()
local o = { value = nil }
local mock = {
get = function(self, n) return self.value end,
set = function(self, c) self.value = c.value ; return true, nil end
}
setmetatable(o, mock)
mock.__index = mock
return function(self)
return o;
end
end
cookie.new = get_mocked_cookie_new()
local function get_test_backend()
return {
name = "access-router-production-web-80",
endpoints = {
{ address = "10.184.7.40", port = "8080", maxFails = 0, failTimeout = 0 },
},
sessionAffinityConfig = {
name = "cookie",
cookieSessionAffinity = { name = "test_name", hash = "sha1" }
},
}
end
describe("Sticky", function()
before_each(function()
mock_ngx({ var = { location_path = "/", host = "test.com" } })
end)
after_each(function()
reset_ngx()
end)
local test_backend = get_test_backend()
local test_backend_endpoint= test_backend.endpoints[1].address .. ":" .. test_backend.endpoints[1].port
local legacy_cookie_value = test_backend_endpoint
local function create_current_cookie_value(backend_key)
return test_backend_endpoint .. "|" .. backend_key
end
describe("new(backend)", function()
describe("when backend specifies cookie name", function()
local function test_with(sticky_balancer_type)
local sticky_balancer_instance = sticky_balancer_type:new(test_backend)
local test_backend_cookie_name = test_backend.sessionAffinityConfig.cookieSessionAffinity.name
assert.equal(sticky_balancer_instance:cookie_name(), test_backend_cookie_name)
end
it("returns an instance containing the corresponding cookie name", function() test_with(sticky_balanced) end)
it("returns an instance containing the corresponding cookie name", function() test_with(sticky_persistent) end)
end)
describe("when backend does not specify cookie name", function()
local function test_with(sticky_balancer_type)
local temp_backend = util.deepcopy(test_backend)
temp_backend.sessionAffinityConfig.cookieSessionAffinity.name = nil
local sticky_balancer_instance = sticky_balancer_type:new(temp_backend)
local default_cookie_name = "route"
assert.equal(sticky_balancer_instance:cookie_name(), default_cookie_name)
end
it("returns an instance with 'route' as cookie name", function() test_with(sticky_balanced) end)
it("returns an instance with 'route' as cookie name", function() test_with(sticky_persistent) end)
end)
describe("backend_key", function()
local function test_with(sticky_balancer_type)
local sticky_balancer_instance = sticky_balancer_type:new(test_backend)
assert.is_truthy(sticky_balancer_instance.backend_key)
end
it("calculates at construction time", function() test_with(sticky_balanced) end)
it("calculates at construction time", function() test_with(sticky_persistent) end)
end)
end)
describe("balance()", function()
local mocked_cookie_new = cookie.new
before_each(function()
reset_sticky_balancer()
end)
after_each(function()
cookie.new = mocked_cookie_new
end)
describe("when client doesn't have a cookie set and location is in cookie_locations", function()
local function test_pick_endpoint_with(sticky_balancer_type)
local sticky_balancer_instance = sticky_balancer_type:new(test_backend)
local peer = sticky_balancer_instance:balance()
assert.equal(test_backend_endpoint, peer)
end
it("picks an endpoint for the client", function() test_pick_endpoint_with(sticky_balanced) end)
it("picks an endpoint for the client", function() test_pick_endpoint_with(sticky_persistent) end)
local function test_set_cookie_with(sticky_balancer_type)
local s = {}
cookie.new = function(self)
local cookie_instance = {
set = function(self, payload)
assert.equal(payload.key, test_backend.sessionAffinityConfig.cookieSessionAffinity.name)
assert.equal(payload.path, ngx.var.location_path)
assert.equal(payload.samesite, nil)
assert.equal(payload.domain, nil)
assert.equal(payload.httponly, true)
assert.equal(payload.secure, false)
return true, nil
end,
get = function(k) return false end,
}
s = spy.on(cookie_instance, "set")
return cookie_instance, false
end
local b = get_test_backend()
b.sessionAffinityConfig.cookieSessionAffinity.locations = {}
b.sessionAffinityConfig.cookieSessionAffinity.locations["test.com"] = {"/"}
local sticky_balancer_instance = sticky_balancer_type:new(b)
assert.has_no.errors(function() sticky_balancer_instance:balance() end)
assert.spy(s).was_called()
end
it("sets a cookie on the client", function() test_set_cookie_with(sticky_balanced) end)
it("sets a cookie on the client", function() test_set_cookie_with(sticky_persistent) end)
local function test_set_ssl_cookie_with(sticky_balancer_type)
ngx.var.https = "on"
local s = {}
cookie.new = function(self)
local cookie_instance = {
set = function(self, payload)
assert.equal(payload.key, test_backend.sessionAffinityConfig.cookieSessionAffinity.name)
assert.equal(payload.path, ngx.var.location_path)
assert.equal(payload.samesite, nil)
assert.equal(payload.domain, nil)
assert.equal(payload.httponly, true)
assert.equal(payload.secure, true)
return true, nil
end,
get = function(k) return false end,
}
s = spy.on(cookie_instance, "set")
return cookie_instance, false
end
local b = get_test_backend()
b.sessionAffinityConfig.cookieSessionAffinity.locations = {}
b.sessionAffinityConfig.cookieSessionAffinity.locations["test.com"] = {"/"}
local sticky_balancer_instance = sticky_balancer_type:new(b)
assert.has_no.errors(function() sticky_balancer_instance:balance() end)
assert.spy(s).was_called()
end
it("sets a secure cookie on the client when being in ssl mode", function() test_set_ssl_cookie_with(sticky_balanced) end)
it("sets a secure cookie on the client when being in ssl mode", function() test_set_ssl_cookie_with(sticky_persistent) end)
end)
describe("when client doesn't have a cookie set and cookie_locations contains a matching wildcard location", function()
before_each(function ()
ngx.var.host = "dev.test.com"
end)
after_each(function ()
ngx.var.host = "test.com"
end)
local function test_with(sticky_balancer_type)
local s = {}
cookie.new = function(self)
local cookie_instance = {
set = function(self, payload)
assert.equal(payload.key, test_backend.sessionAffinityConfig.cookieSessionAffinity.name)
assert.equal(payload.path, ngx.var.location_path)
assert.equal(payload.samesite, nil)
assert.equal(payload.domain, nil)
assert.equal(payload.httponly, true)
assert.equal(payload.secure, false)
return true, nil
end,
get = function(k) return false end,
}
s = spy.on(cookie_instance, "set")
return cookie_instance, false
end
local b = get_test_backend()
b.sessionAffinityConfig.cookieSessionAffinity.locations = {}
b.sessionAffinityConfig.cookieSessionAffinity.locations["*.test.com"] = {"/"}
local sticky_balancer_instance = sticky_balancer_type:new(b)
assert.has_no.errors(function() sticky_balancer_instance:balance() end)
assert.spy(s).was_called()
end
it("sets a cookie on the client", function() test_with(sticky_balanced) end)
it("sets a cookie on the client", function() test_with(sticky_persistent) end)
end)
describe("when client doesn't have a cookie set and location not in cookie_locations", function()
local function test_pick_endpoint_with(sticky_balancer_type)
local sticky_balancer_instance = sticky_balancer_type:new(test_backend)
local peer = sticky_balancer_instance:balance()
assert.equal(peer, test_backend_endpoint)
end
it("picks an endpoint for the client", function() test_pick_endpoint_with(sticky_balanced) end)
it("picks an endpoint for the client", function() test_pick_endpoint_with(sticky_persistent) end)
local function test_no_cookie_with(sticky_balancer_type)
local s = {}
cookie.new = function(self)
local cookie_instance = {
set = function(self, payload)
assert.equal(payload.key, test_backend.sessionAffinityConfig.cookieSessionAffinity.name)
assert.equal(payload.path, ngx.var.location_path)
assert.equal(payload.domain, ngx.var.host)
assert.equal(payload.httponly, true)
assert.equal(payload.samesite, nil)
return true, nil
end,
get = function(k) return false end,
}
s = spy.on(cookie_instance, "set")
return cookie_instance, false
end
local sticky_balancer_instance = sticky_balancer_type:new(get_test_backend())
assert.has_no.errors(function() sticky_balancer_instance:balance() end)
assert.spy(s).was_not_called()
end
it("does not set a cookie on the client", function() test_no_cookie_with(sticky_balanced) end)
it("does not set a cookie on the client", function() test_no_cookie_with(sticky_persistent) end)
end)
describe("when client has a cookie set", function()
local function test_no_cookie_with(sticky_balancer_type)
local s = {}
cookie.new = function(self)
local return_obj = {
set = function(v) return false, nil end,
get = function(k) return legacy_cookie_value end,
}
s = spy.on(return_obj, "set")
return return_obj, false
end
local sticky_balancer_instance = sticky_balancer_type:new(test_backend)
assert.has_no.errors(function() sticky_balancer_instance:balance() end)
assert.spy(s).was_not_called()
end
it("does not set a cookie", function() test_no_cookie_with(sticky_balanced) end)
it("does not set a cookie", function() test_no_cookie_with(sticky_persistent) end)
local function test_correct_endpoint(sticky)
local sticky_balancer_instance = sticky:new(test_backend)
local peer = sticky_balancer_instance:balance()
assert.equal(peer, test_backend_endpoint)
end
it("returns the correct endpoint for the client", function() test_correct_endpoint(sticky_balanced) end)
it("returns the correct endpoint for the client", function() test_correct_endpoint(sticky_persistent) end)
end)
end)
local function get_several_test_backends(change_on_failure)
return {
name = "access-router-production-web-80",
endpoints = {
{ address = "10.184.7.40", port = "8080", maxFails = 0, failTimeout = 0 },
{ address = "10.184.7.41", port = "8080", maxFails = 0, failTimeout = 0 },
},
sessionAffinityConfig = {
name = "cookie",
cookieSessionAffinity = {
name = "test_name",
hash = "sha1",
change_on_failure = change_on_failure,
locations = { ['test.com'] = {'/'} }
}
},
}
end
describe("balance() after error", function()
local mocked_cookie_new = cookie.new
before_each(function()
mock_ngx({ var = { location_path = "/", host = "test.com" } })
end)
after_each(function()
reset_ngx()
end)
describe("when request to upstream fails", function()
local function test_with(sticky_balancer_type, change_on_failure)
local sticky_balancer_instance = sticky_balancer_type:new(get_several_test_backends(change_on_failure))
local old_upstream = sticky_balancer_instance:balance()
assert.is.Not.Nil(old_upstream)
for _ = 1, 100 do
-- make sure upstream doesn't change on subsequent calls of balance()
assert.equal(old_upstream, sticky_balancer_instance:balance())
end
-- simulate request failure
sticky_balancer_instance.get_last_failure = function()
return "failed"
end
_G.ngx.var.upstream_addr = old_upstream
for _ = 1, 100 do
local new_upstream = sticky_balancer_instance:balance()
if change_on_failure == false then
-- upstream should be the same inspite of error, if change_on_failure option is false
assert.equal(new_upstream, old_upstream)
else
-- upstream should change after error, if change_on_failure option is true
assert.not_equal(new_upstream, old_upstream)
end
end
end
it("changes upstream when change_on_failure option is true", function() test_with(sticky_balanced, true) end)
it("changes upstream when change_on_failure option is true", function() test_with(sticky_persistent, true) end)
it("changes upstream when change_on_failure option is false", function() test_with(sticky_balanced, false) end)
it("changes upstream when change_on_failure option is false", function() test_with(sticky_persistent, false) end)
end)
end)
describe("when client doesn't have a cookie set and no host header, matching default server '_'", function()
before_each(function ()
ngx.var.host = "not-default-server"
ngx.var.server_name = "_"
end)
local function test_with(sticky_balancer_type)
local s = {}
cookie.new = function(self)
local cookie_instance = {
set = function(self, payload)
assert.equal(payload.key, test_backend.sessionAffinityConfig.cookieSessionAffinity.name)
assert.equal(payload.path, ngx.var.location_path)
assert.equal(payload.samesite, nil)
assert.equal(payload.domain, nil)
assert.equal(payload.httponly, true)
assert.equal(payload.secure, false)
return true, nil
end,
get = function(k) return false end,
}
s = spy.on(cookie_instance, "set")
return cookie_instance, false
end
local b = get_test_backend()
b.sessionAffinityConfig.cookieSessionAffinity.locations = {}
b.sessionAffinityConfig.cookieSessionAffinity.locations["_"] = {"/"}
local sticky_balancer_instance = sticky_balancer_type:new(b)
assert.has_no.errors(function() sticky_balancer_instance:balance() end)
assert.spy(s).was_called()
end
it("sets a cookie on the client", function() test_with(sticky_balanced) end)
it("sets a cookie on the client", function() test_with(sticky_persistent) end)
end)
describe("SameSite settings", function()
local mocked_cookie_new = cookie.new
before_each(function()
reset_sticky_balancer()
end)
after_each(function()
cookie.new = mocked_cookie_new
end)
local function test_set_cookie_with(sticky_balancer_type, samesite, conditional_samesite_none, expected_path, expected_samesite, secure, expected_secure)
local s = {}
cookie.new = function(self)
local cookie_instance = {
set = function(self, payload)
assert.equal(payload.key, test_backend.sessionAffinityConfig.cookieSessionAffinity.name)
assert.equal(payload.path, expected_path)
assert.equal(payload.samesite, expected_samesite)
assert.equal(payload.domain, nil)
assert.equal(payload.httponly, true)
assert.equal(payload.secure, expected_secure)
return true, nil
end,
get = function(k) return false end,
}
s = spy.on(cookie_instance, "set")
return cookie_instance, false
end
local b = get_test_backend()
b.sessionAffinityConfig.cookieSessionAffinity.locations = {}
b.sessionAffinityConfig.cookieSessionAffinity.locations["test.com"] = {"/"}
b.sessionAffinityConfig.cookieSessionAffinity.samesite = samesite
b.sessionAffinityConfig.cookieSessionAffinity.conditional_samesite_none = conditional_samesite_none
b.sessionAffinityConfig.cookieSessionAffinity.secure = secure
local sticky_balancer_instance = sticky_balancer_type:new(b)
assert.has_no.errors(function() sticky_balancer_instance:balance() end)
assert.spy(s).was_called()
end
it("returns a secure cookie with SameSite=Strict when user specifies samesite strict and secure=true", function()
test_set_cookie_with(sticky_balanced, "Lax", false, "/", "Lax", true, true)
end)
it("returns a cookie with SameSite=Strict when user specifies samesite strict and conditional samesite none", function()
test_set_cookie_with(sticky_balanced, "Strict", true, "/", "Strict", nil, false)
end)
it("returns a cookie with SameSite=Lax when user specifies samesite lax", function()
test_set_cookie_with(sticky_balanced, "Lax", false, "/", "Lax", nil, false)
end)
it("returns a cookie with SameSite=Lax when user specifies samesite lax and conditional samesite none", function()
test_set_cookie_with(sticky_balanced, "Lax", true, "/", "Lax", nil, false)
end)
it("returns a cookie with SameSite=None when user specifies samesite None", function()
test_set_cookie_with(sticky_balanced, "None", false, "/", "None", nil, false)
end)
it("returns a cookie with SameSite=None when user specifies samesite None and conditional samesite none with supported user agent", function()
mock_ngx({ var = { location_path = "/", host = "test.com" , http_user_agent = "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/71.0.2704.103 Safari/537.36"} })
test_set_cookie_with(sticky_balanced, "None", true, "/", "None", nil, false)
end)
it("returns a cookie without SameSite=None when user specifies samesite None and conditional samesite none with unsupported user agent", function()
mock_ngx({ var = { location_path = "/", host = "test.com" , http_user_agent = "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/51.0.2704.103 Safari/537.36"} })
test_set_cookie_with(sticky_balanced, "None", true, "/", nil, nil, false)
end)
it("returns a secure cookie with SameSite=Strict when user specifies samesite strict and secure=true", function()
test_set_cookie_with(sticky_persistent, "Lax", false, "/", "Lax", true, true)
end)
it("returns a cookie with SameSite=Strict when user specifies samesite strict", function()
test_set_cookie_with(sticky_persistent, "Strict", false, "/", "Strict", nil, false)
end)
it("returns a cookie with SameSite=Strict when user specifies samesite strict and conditional samesite none", function()
test_set_cookie_with(sticky_persistent, "Strict", true, "/", "Strict", nil, false)
end)
it("returns a cookie with SameSite=Lax when user specifies samesite lax", function()
test_set_cookie_with(sticky_persistent, "Lax", false, "/", "Lax", nil, false)
end)
it("returns a cookie with SameSite=Lax when user specifies samesite lax and conditional samesite none", function()
test_set_cookie_with(sticky_persistent, "Lax", true, "/", "Lax", nil, false)
end)
it("returns a cookie with SameSite=None when user specifies samesite None", function()
test_set_cookie_with(sticky_persistent, "None", false, "/", "None", nil, false)
end)
it("returns a cookie with SameSite=None when user specifies samesite None and conditional samesite none with supported user agent", function()
mock_ngx({ var = { location_path = "/", host = "test.com" , http_user_agent = "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/71.0.2704.103 Safari/537.36"} })
test_set_cookie_with(sticky_persistent, "None", true, "/", "None", nil, false)
end)
it("returns a cookie without SameSite=None when user specifies samesite None and conditional samesite none with unsupported user agent", function()
mock_ngx({ var = { location_path = "/", host = "test.com" , http_user_agent = "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/51.0.2704.103 Safari/537.36"} })
test_set_cookie_with(sticky_persistent, "None", true, "/", nil, nil, false)
end)
end)
describe("get_cookie()", function()
describe("legacy cookie value", function()
local function test_with(sticky_balancer_type)
local sticky_balancer_instance = sticky_balancer_type:new(test_backend)
cookie.new = function(self)
local return_obj = {
set = function(v) return false, nil end,
get = function(k) return legacy_cookie_value end,
}
return return_obj, false
end
assert.equal(test_backend_endpoint, sticky_balancer_instance.get_cookie(sticky_balancer_instance))
end
it("retrieves upstream key value", function() test_with(sticky_balanced) end)
it("retrieves upstream key value", function() test_with(sticky_persistent) end)
end)
describe("current cookie value", function()
local function test_with(sticky_balancer_type)
local sticky_balancer_instance = sticky_balancer_type:new(test_backend)
cookie.new = function(self)
local return_obj = {
set = function(v) return false, nil end,
get = function(k) return create_current_cookie_value(sticky_balancer_instance.backend_key) end,
}
return return_obj, false
end
assert.equal(test_backend_endpoint, sticky_balancer_instance.get_cookie(sticky_balancer_instance))
end
it("retrieves upstream key value", function() test_with(sticky_balanced) end)
it("retrieves upstream key value", function() test_with(sticky_persistent) end)
end)
end)
describe("get_cookie_parsed()", function()
describe("legacy cookie value", function()
local function test_with(sticky_balancer_type)
local sticky_balancer_instance = sticky_balancer_type:new(test_backend)
cookie.new = function(self)
local return_obj = {
set = function(v) return false, nil end,
get = function(k) return legacy_cookie_value end,
}
return return_obj, false
end
local parsed_cookie = sticky_balancer_instance.get_cookie_parsed(sticky_balancer_instance)
assert.is_truthy(parsed_cookie)
assert.equal(test_backend_endpoint, parsed_cookie.upstream_key)
assert.is_falsy(parsed_cookie.backend_key)
end
it("retrieves upstream key value", function() test_with(sticky_balanced) end)
it("retrieves upstream key value", function() test_with(sticky_persistent) end)
end)
describe("current cookie value", function()
local function test_with(sticky_balancer_type)
local sticky_balancer_instance = sticky_balancer_type:new(test_backend)
cookie.new = function(self)
local return_obj = {
set = function(v) return false, nil end,
get = function(k) return create_current_cookie_value(sticky_balancer_instance.backend_key) end,
}
return return_obj, false
end
local parsed_cookie = sticky_balancer_instance.get_cookie_parsed(sticky_balancer_instance)
assert.is_truthy(parsed_cookie)
assert.equal(test_backend_endpoint, parsed_cookie.upstream_key)
assert.equal(sticky_balancer_instance.backend_key, parsed_cookie.backend_key)
end
it("retrieves all supported values", function() test_with(sticky_balanced) end)
it("retrieves all supported values", function() test_with(sticky_persistent) end)
end)
end)
describe("set_cookie()", function()
local function test_with(sticky_balancer_type)
local sticky_balancer_instance = sticky_balancer_type:new(test_backend)
local cookieSetSpy = {}
cookie.new = function(self)
local return_obj = {
set = function(self, payload)
assert.equal(create_current_cookie_value(sticky_balancer_instance.backend_key), payload.value)
return true, nil
end,
get = function(k) return nil end,
}
cookieSetSpy = spy.on(return_obj, "set")
return return_obj, false
end
sticky_balancer_instance.set_cookie(sticky_balancer_instance, test_backend_endpoint)
assert.spy(cookieSetSpy).was_called()
end
it("constructs correct cookie value", function() test_with(sticky_balanced) end)
it("constructs correct cookie value", function() test_with(sticky_persistent) end)
end)
end)

View file

@ -0,0 +1,536 @@
local cjson = require("cjson.safe")
local util = require("util")
local balancer, expected_implementations, backends
local original_ngx = ngx
local function reset_ngx()
_G.ngx = original_ngx
-- Ensure balancer cache is reset.
_G.ngx.ctx.balancer = nil
end
local function reset_balancer()
package.loaded["balancer"] = nil
balancer = require("balancer")
end
local function mock_ngx(mock, after_mock_set)
local _ngx = mock
setmetatable(_ngx, { __index = ngx })
_G.ngx = _ngx
if after_mock_set then
after_mock_set()
end
-- Balancer module caches ngx module, must be reset after mocks were configured.
reset_balancer()
end
local function reset_expected_implementations()
expected_implementations = {
["access-router-production-web-80"] = package.loaded["balancer.round_robin"],
["my-dummy-app-1"] = package.loaded["balancer.round_robin"],
["my-dummy-app-2"] = package.loaded["balancer.chash"],
["my-dummy-app-3"] = package.loaded["balancer.sticky_persistent"],
["my-dummy-app-4"] = package.loaded["balancer.ewma"],
["my-dummy-app-5"] = package.loaded["balancer.sticky_balanced"],
["my-dummy-app-6"] = package.loaded["balancer.chashsubset"]
}
end
local function reset_backends()
backends = {
{
name = "access-router-production-web-80", port = "80", secure = false,
sslPassthrough = false,
endpoints = {
{ address = "10.184.7.40", port = "8080", maxFails = 0, failTimeout = 0 },
{ address = "10.184.97.100", port = "8080", maxFails = 0, failTimeout = 0 },
{ address = "10.184.98.239", port = "8080", maxFails = 0, failTimeout = 0 },
},
sessionAffinityConfig = { name = "", cookieSessionAffinity = { name = "" } },
trafficShapingPolicy = {
weight = 0,
header = "",
headerValue = "",
cookie = ""
},
},
{
name = "my-dummy-app-1",
["load-balance"] = "round_robin",
},
{
name = "my-dummy-app-2",
["load-balance"] = "round_robin", -- upstreamHashByConfig will take priority.
upstreamHashByConfig = { ["upstream-hash-by"] = "$request_uri", },
},
{
name = "my-dummy-app-3",
["load-balance"] = "ewma", -- sessionAffinityConfig will take priority.
sessionAffinityConfig = { name = "cookie", mode = "persistent", cookieSessionAffinity = { name = "route" } }
},
{
name = "my-dummy-app-4",
["load-balance"] = "ewma",
},
{
name = "my-dummy-app-5",
["load-balance"] = "ewma", -- sessionAffinityConfig will take priority.
upstreamHashByConfig = { ["upstream-hash-by"] = "$request_uri", },
sessionAffinityConfig = { name = "cookie", cookieSessionAffinity = { name = "route" } }
},
{
name = "my-dummy-app-6",
["load-balance"] = "ewma", -- upstreamHashByConfig will take priority.
upstreamHashByConfig = { ["upstream-hash-by"] = "$request_uri", ["upstream-hash-by-subset"] = "true", }
},
}
end
describe("Balancer", function()
before_each(function()
reset_balancer()
reset_expected_implementations()
reset_backends()
end)
after_each(function()
reset_ngx()
end)
describe("get_implementation()", function()
it("uses heuristics to select correct load balancer implementation for a given backend", function()
for _, backend in pairs(backends) do
local expected_implementation = expected_implementations[backend.name]
local implementation = balancer.get_implementation(backend)
assert.equal(expected_implementation, balancer.get_implementation(backend))
end
end)
end)
describe("get_balancer()", function()
it("always returns the same balancer for given request context", function()
local backend = {
name = "my-dummy-app-100", ["load-balance"] = "ewma",
alternativeBackends = { "my-dummy-canary-app-100" },
endpoints = { { address = "10.184.7.40", port = "8080", maxFails = 0, failTimeout = 0 } },
trafficShapingPolicy = {
weight = 0,
header = "",
headerValue = "",
cookie = ""
},
}
local canary_backend = {
name = "my-dummy-canary-app-100", ["load-balance"] = "ewma",
alternativeBackends = { "my-dummy-canary-app-100" },
endpoints = { { address = "11.184.7.40", port = "8080", maxFails = 0, failTimeout = 0 } },
trafficShapingPolicy = {
weight = 5,
header = "",
headerValue = "",
cookie = ""
},
}
mock_ngx({ var = { proxy_upstream_name = backend.name } })
balancer.sync_backend(backend)
balancer.sync_backend(canary_backend)
local expected = balancer.get_balancer()
for i = 1,50,1 do
assert.are.same(expected, balancer.get_balancer())
end
end)
end)
describe("route_to_alternative_balancer()", function()
local backend, _primaryBalancer
before_each(function()
backend = backends[1]
_primaryBalancer = {
alternative_backends = {
backend.name,
}
}
mock_ngx({ var = { request_uri = "/" } })
end)
-- Not affinitized request must follow traffic shaping policies.
describe("not affinitized", function()
before_each(function()
_primaryBalancer.is_affinitized = function (_)
return false
end
end)
it("returns false when no trafficShapingPolicy is set", function()
balancer.sync_backend(backend)
assert.equal(false, balancer.route_to_alternative_balancer(_primaryBalancer))
end)
it("returns false when no alternative backends is set", function()
backend.trafficShapingPolicy.weight = 100
balancer.sync_backend(backend)
_primaryBalancer.alternative_backends = nil
assert.equal(false, balancer.route_to_alternative_balancer(_primaryBalancer))
end)
it("returns false when alternative backends name does not match", function()
backend.trafficShapingPolicy.weight = 100
balancer.sync_backend(backend)
_primaryBalancer.alternative_backends[1] = "nonExistingBackend"
assert.equal(false, balancer.route_to_alternative_balancer(_primaryBalancer))
end)
describe("canary by weight", function()
it("returns true when weight is 100", function()
backend.trafficShapingPolicy.weight = 100
balancer.sync_backend(backend)
assert.equal(true, balancer.route_to_alternative_balancer(_primaryBalancer))
end)
it("returns false when weight is 0", function()
backend.trafficShapingPolicy.weight = 0
balancer.sync_backend(backend)
assert.equal(false, balancer.route_to_alternative_balancer(_primaryBalancer))
end)
it("returns true when weight is 1000 and weight total is 1000", function()
backend.trafficShapingPolicy.weight = 1000
backend.trafficShapingPolicy.weightTotal = 1000
balancer.sync_backend(backend)
assert.equal(true, balancer.route_to_alternative_balancer(_primaryBalancer))
end)
it("returns false when weight is 0 and weight total is 1000", function()
backend.trafficShapingPolicy.weight = 1000
backend.trafficShapingPolicy.weightTotal = 1000
balancer.sync_backend(backend)
assert.equal(true, balancer.route_to_alternative_balancer(_primaryBalancer))
end)
end)
describe("canary by cookie", function()
it("returns correct result for given cookies", function()
local test_patterns = {
{
case_title = "cookie_value is 'always'",
request_cookie_name = "canaryCookie",
request_cookie_value = "always",
expected_result = true,
},
{
case_title = "cookie_value is 'never'",
request_cookie_name = "canaryCookie",
request_cookie_value = "never",
expected_result = false,
},
{
case_title = "cookie_value is undefined",
request_cookie_name = "canaryCookie",
request_cookie_value = "foo",
expected_result = false,
},
{
case_title = "cookie_name is undefined",
request_cookie_name = "foo",
request_cookie_value = "always",
expected_result = false
},
}
for _, test_pattern in pairs(test_patterns) do
mock_ngx({ var = {
["cookie_" .. test_pattern.request_cookie_name] = test_pattern.request_cookie_value,
request_uri = "/"
}})
backend.trafficShapingPolicy.cookie = "canaryCookie"
balancer.sync_backend(backend)
assert.message("\nTest data pattern: " .. test_pattern.case_title)
.equal(test_pattern.expected_result, balancer.route_to_alternative_balancer(_primaryBalancer))
reset_ngx()
end
end)
end)
describe("canary by header", function()
it("returns correct result for given headers", function()
local test_patterns = {
-- with no header value setting
{
case_title = "no custom header value and header value is 'always'",
header_name = "canaryHeader",
header_value = "",
request_header_name = "canaryHeader",
request_header_value = "always",
expected_result = true,
},
{
case_title = "no custom header value and header value is 'never'",
header_name = "canaryHeader",
header_value = "",
request_header_name = "canaryHeader",
request_header_value = "never",
expected_result = false,
},
{
case_title = "no custom header value and header value is undefined",
header_name = "canaryHeader",
header_value = "",
request_header_name = "canaryHeader",
request_header_value = "foo",
expected_result = false,
},
{
case_title = "no custom header value and header name is undefined",
header_name = "canaryHeader",
header_value = "",
request_header_name = "foo",
request_header_value = "always",
expected_result = false,
},
-- with header value setting
{
case_title = "custom header value is set and header value is 'always'",
header_name = "canaryHeader",
header_value = "foo",
request_header_name = "canaryHeader",
request_header_value = "always",
expected_result = false,
},
{
case_title = "custom header value is set and header value match custom header value",
header_name = "canaryHeader",
header_value = "foo",
request_header_name = "canaryHeader",
request_header_value = "foo",
expected_result = true,
},
{
case_title = "custom header value is set and header name is undefined",
header_name = "canaryHeader",
header_value = "foo",
request_header_name = "bar",
request_header_value = "foo",
expected_result = false
},
}
for _, test_pattern in pairs(test_patterns) do
mock_ngx({ var = {
["http_" .. test_pattern.request_header_name] = test_pattern.request_header_value,
request_uri = "/"
}})
backend.trafficShapingPolicy.header = test_pattern.header_name
backend.trafficShapingPolicy.headerValue = test_pattern.header_value
balancer.sync_backend(backend)
assert.message("\nTest data pattern: " .. test_pattern.case_title)
.equal(test_pattern.expected_result, balancer.route_to_alternative_balancer(_primaryBalancer))
reset_ngx()
end
end)
end)
end)
-- Affinitized request prefers backend it is affinitized to.
describe("affinitized", function()
before_each(function()
mock_ngx({ var = { request_uri = "/", proxy_upstream_name = backend.name } })
balancer.sync_backend(backend)
end)
it("returns false if request is affinitized to primary backend", function()
_primaryBalancer.is_affinitized = function (_)
return true
end
local alternativeBalancer = balancer.get_balancer_by_upstream_name(backend.name)
local primarySpy = spy.on(_primaryBalancer, "is_affinitized")
local alternativeSpy = spy.on(alternativeBalancer, "is_affinitized")
assert.is_false(balancer.route_to_alternative_balancer(_primaryBalancer))
assert.spy(_primaryBalancer.is_affinitized).was_called()
assert.spy(alternativeBalancer.is_affinitized).was_not_called()
end)
it("returns true if request is affinitized to alternative backend", function()
_primaryBalancer.is_affinitized = function (_)
return false
end
local alternativeBalancer = balancer.get_balancer_by_upstream_name(backend.name)
alternativeBalancer.is_affinitized = function (_)
return true
end
local primarySpy = spy.on(_primaryBalancer, "is_affinitized")
local alternativeSpy = spy.on(alternativeBalancer, "is_affinitized")
assert.is_true(balancer.route_to_alternative_balancer(_primaryBalancer))
assert.spy(_primaryBalancer.is_affinitized).was_called()
assert.spy(alternativeBalancer.is_affinitized).was_called()
end)
end)
end)
describe("sync_backend()", function()
local backend, implementation
before_each(function()
backend = backends[1]
implementation = expected_implementations[backend.name]
end)
it("initializes balancer for given backend", function()
local s = spy.on(implementation, "new")
assert.has_no.errors(function() balancer.sync_backend(backend) end)
assert.spy(s).was_called_with(implementation, backend)
end)
it("resolves external name to endpoints when service is of type External name", function()
backend = {
name = "example-com", service = { spec = { ["type"] = "ExternalName" } },
endpoints = {
{ address = "example.com", port = "80", maxFails = 0, failTimeout = 0 }
}
}
helpers.mock_resty_dns_query(nil, {
{
name = "example.com",
address = "192.168.1.1",
ttl = 3600,
},
{
name = "example.com",
address = "1.2.3.4",
ttl = 60,
}
})
expected_backend = {
name = "example-com", service = { spec = { ["type"] = "ExternalName" } },
endpoints = {
{ address = "192.168.1.1", port = "80" },
{ address = "1.2.3.4", port = "80" },
}
}
local mock_instance = { sync = function(backend) end }
setmetatable(mock_instance, implementation)
implementation.new = function(self, backend) return mock_instance end
local s = spy.on(implementation, "new")
assert.has_no.errors(function() balancer.sync_backend(backend) end)
assert.spy(s).was_called_with(implementation, expected_backend)
stub(mock_instance, "sync")
assert.has_no.errors(function() balancer.sync_backend(backend) end)
assert.stub(mock_instance.sync).was_called_with(mock_instance, expected_backend)
end)
it("wraps IPv6 addresses into square brackets", function()
local backend = {
name = "example-com",
endpoints = {
{ address = "::1", port = "8080", maxFails = 0, failTimeout = 0 },
{ address = "192.168.1.1", port = "8080", maxFails = 0, failTimeout = 0 },
}
}
local expected_backend = {
name = "example-com",
endpoints = {
{ address = "[::1]", port = "8080", maxFails = 0, failTimeout = 0 },
{ address = "192.168.1.1", port = "8080", maxFails = 0, failTimeout = 0 },
}
}
local mock_instance = { sync = function(backend) end }
setmetatable(mock_instance, implementation)
implementation.new = function(self, backend) return mock_instance end
local s = spy.on(implementation, "new")
assert.has_no.errors(function() balancer.sync_backend(util.deepcopy(backend)) end)
assert.spy(s).was_called_with(implementation, expected_backend)
stub(mock_instance, "sync")
assert.has_no.errors(function() balancer.sync_backend(util.deepcopy(backend)) end)
assert.stub(mock_instance.sync).was_called_with(mock_instance, expected_backend)
end)
it("replaces the existing balancer when load balancing config changes for backend", function()
assert.has_no.errors(function() balancer.sync_backend(backend) end)
backend["load-balance"] = "ewma"
local new_implementation = package.loaded["balancer.ewma"]
local s_old = spy.on(implementation, "new")
local s = spy.on(new_implementation, "new")
local s_ngx_log = spy.on(ngx, "log")
assert.has_no.errors(function() balancer.sync_backend(backend) end)
assert.spy(s_ngx_log).was_called_with(ngx.INFO,
"LB algorithm changed from round_robin to ewma, resetting the instance")
assert.spy(s).was_called_with(new_implementation, backend)
assert.spy(s).was_called(1)
assert.spy(s_old).was_not_called()
end)
it("calls sync(backend) on existing balancer instance when load balancing config does not change", function()
local mock_instance = { sync = function(...) end }
setmetatable(mock_instance, implementation)
implementation.new = function(self, backend) return mock_instance end
assert.has_no.errors(function() balancer.sync_backend(backend) end)
stub(mock_instance, "sync")
assert.has_no.errors(function() balancer.sync_backend(backend) end)
assert.stub(mock_instance.sync).was_called_with(mock_instance, backend)
end)
end)
describe("sync_backends()", function()
after_each(function()
reset_ngx()
end)
it("sync backends", function()
backends = {
{
name = "access-router-production-web-80", port = "80", secure = false,
sslPassthrough = false,
endpoints = {
{ address = "10.184.7.40", port = "8080", maxFails = 0, failTimeout = 0 },
{ address = "10.184.97.100", port = "8080", maxFails = 0, failTimeout = 0 },
{ address = "10.184.98.239", port = "8080", maxFails = 0, failTimeout = 0 },
},
sessionAffinityConfig = { name = "", cookieSessionAffinity = { name = "" } },
trafficShapingPolicy = {
weight = 0,
header = "",
headerValue = "",
cookie = ""
},
}
}
mock_ngx({ var = { proxy_upstream_name = "access-router-production-web-80" }, ctx = { } }, function()
ngx.shared.configuration_data:set("backends", cjson.encode(backends))
end)
balancer.init_worker()
assert.not_equal(balancer.get_balancer(), nil)
end)
end)
end)

View file

@ -0,0 +1,188 @@
local certificate = require("certificate")
local ssl = require("ngx.ssl")
local function read_file(path)
local file = assert(io.open(path, "rb"))
local content = file:read("*a")
file:close()
return content
end
local EXAMPLE_CERT = read_file("rootfs/etc/nginx/lua/test/fixtures/example-com-cert.pem")
local DEFAULT_CERT = read_file("rootfs/etc/nginx/lua/test/fixtures/default-cert.pem")
local DEFAULT_CERT_HOSTNAME = "_"
local UUID = "2ea8adb5-8ebb-4b14-a79b-0cdcd892e884"
local DEFAULT_UUID = "00000000-0000-0000-0000-000000000000"
local function assert_certificate_is_set(cert)
spy.on(ngx, "log")
spy.on(ssl, "set_der_cert")
spy.on(ssl, "set_der_priv_key")
assert.has_no.errors(certificate.call)
assert.spy(ngx.log).was_not_called_with(ngx.ERR, _)
assert.spy(ssl.set_der_cert).was_called_with(ssl.cert_pem_to_der(cert))
assert.spy(ssl.set_der_priv_key).was_called_with(ssl.priv_key_pem_to_der(cert))
end
local function refute_certificate_is_set()
spy.on(ssl, "set_der_cert")
spy.on(ssl, "set_der_priv_key")
assert.has_no.errors(certificate.call)
assert.spy(ssl.set_der_cert).was_not_called()
assert.spy(ssl.set_der_priv_key).was_not_called()
end
local function set_certificate(hostname, certificate, uuid)
local success, err = ngx.shared.certificate_servers:set(hostname, uuid)
if not success then
error(err)
end
success, err = ngx.shared.certificate_data:set(uuid, certificate)
if not success then
error(err)
end
end
local unmocked_ngx = _G.ngx
describe("Certificate", function()
describe("call", function()
before_each(function()
ssl.server_name = function() return "hostname", nil end
ssl.clear_certs = function() return true, "" end
ssl.set_der_cert = function(cert) return true, "" end
ssl.set_der_priv_key = function(priv_key) return true, "" end
ngx.exit = function(status) end
set_certificate(DEFAULT_CERT_HOSTNAME, DEFAULT_CERT, DEFAULT_UUID)
end)
after_each(function()
ngx = unmocked_ngx
ngx.shared.certificate_data:flush_all()
ngx.shared.certificate_servers:flush_all()
end)
it("sets certificate and key when hostname is found in dictionary", function()
set_certificate("hostname", EXAMPLE_CERT, UUID)
assert_certificate_is_set(EXAMPLE_CERT)
end)
it("sets certificate and key for wildcard cert", function()
ssl.server_name = function() return "sub.hostname", nil end
set_certificate("*.hostname", EXAMPLE_CERT, UUID)
assert_certificate_is_set(EXAMPLE_CERT)
end)
it("sets certificate and key for domain with trailing dot", function()
ssl.server_name = function() return "hostname.", nil end
set_certificate("hostname", EXAMPLE_CERT, UUID)
assert_certificate_is_set(EXAMPLE_CERT)
end)
it("fallbacks to default certificate and key for domain with many trailing dots", function()
ssl.server_name = function() return "hostname..", nil end
set_certificate("hostname", EXAMPLE_CERT, UUID)
assert_certificate_is_set(DEFAULT_CERT)
end)
it("sets certificate and key for nested wildcard cert", function()
ssl.server_name = function() return "sub.nested.hostname", nil end
set_certificate("*.nested.hostname", EXAMPLE_CERT, UUID)
assert_certificate_is_set(EXAMPLE_CERT)
end)
it("logs error message when certificate in dictionary is invalid", function()
set_certificate("hostname", "something invalid", UUID)
spy.on(ngx, "log")
refute_certificate_is_set()
assert.spy(ngx.log).was_called_with(ngx.ERR, "failed to convert certificate chain from PEM to DER: PEM_read_bio_X509_AUX() failed")
end)
it("uses default certificate when there's none found for given hostname", function()
assert_certificate_is_set(DEFAULT_CERT)
end)
it("uses default certificate when hostname can not be obtained", function()
ssl.server_name = function() return nil, "crazy hostname error" end
assert_certificate_is_set(DEFAULT_CERT)
assert.spy(ngx.log).was_called_with(ngx.ERR, "error while obtaining hostname: crazy hostname error")
end)
it("fails when hostname does not have certificate and default cert is invalid", function()
set_certificate(DEFAULT_CERT_HOSTNAME, "invalid", UUID)
spy.on(ngx, "log")
refute_certificate_is_set()
assert.spy(ngx.log).was_called_with(ngx.ERR, "failed to convert certificate chain from PEM to DER: PEM_read_bio_X509_AUX() failed")
end)
describe("OCSP stapling", function()
before_each(function()
certificate.is_ocsp_stapling_enabled = true
end)
after_each(function()
certificate.is_ocsp_stapling_enabled = false
end)
it("fetches and caches OCSP response when there is no cached response", function()
end)
it("fetches and caches OCSP response when cached response is stale", function()
end)
it("staples using cached OCSP response", function()
end)
it("staples using cached stale OCSP response", function()
end)
it("does negative caching when OCSP response URL extraction fails", function()
end)
it("does negative caching when the request to OCSP responder fails", function()
end)
end)
end)
describe("configured_for_current_request", function()
before_each(function()
local _ngx = { var = { host = "hostname" } }
setmetatable(_ngx, {__index = _G.ngx})
_G.ngx = _ngx
ngx.ctx.cert_configured_for_current_request = nil
package.loaded["certificate"] = nil
certificate = require("certificate")
set_certificate("hostname", EXAMPLE_CERT, UUID)
end)
it("returns true when certificate exists for given server", function()
assert.is_true(certificate.configured_for_current_request())
end)
it("returns false when certificate does not exist for given server", function()
ngx.var.host = "hostname.xyz"
assert.is_false(certificate.configured_for_current_request())
end)
it("returns cached value from ngx.ctx", function()
ngx.ctx.cert_configured_for_current_request = false
assert.is_false(certificate.configured_for_current_request())
end)
end)
end)

View file

@ -0,0 +1,308 @@
local cjson = require("cjson")
local configuration = require("configuration")
local unmocked_ngx = _G.ngx
local certificate_data = ngx.shared.certificate_data
local certificate_servers = ngx.shared.certificate_servers
local ocsp_response_cache = ngx.shared.ocsp_response_cache
local function get_backends()
return {
{
name = "my-dummy-backend-1", ["load-balance"] = "sticky",
endpoints = { { address = "10.183.7.40", port = "8080", maxFails = 0, failTimeout = 0 } },
sessionAffinityConfig = { name = "cookie", cookieSessionAffinity = { name = "route" } },
},
{
name = "my-dummy-backend-2", ["load-balance"] = "ewma",
endpoints = {
{ address = "10.184.7.40", port = "7070", maxFails = 3, failTimeout = 2 },
{ address = "10.184.7.41", port = "7070", maxFails = 2, failTimeout = 1 },
}
},
{
name = "my-dummy-backend-3", ["load-balance"] = "round_robin",
endpoints = {
{ address = "10.185.7.40", port = "6060", maxFails = 0, failTimeout = 0 },
{ address = "10.185.7.41", port = "6060", maxFails = 2, failTimeout = 1 },
}
},
}
end
local function get_mocked_ngx_env()
local _ngx = {
status = ngx.HTTP_OK,
var = {},
req = {
read_body = function() end,
get_body_data = function() return cjson.encode(get_backends()) end,
get_body_file = function() return nil end,
},
log = function(msg) end,
}
setmetatable(_ngx, {__index = _G.ngx})
return _ngx
end
describe("Configuration", function()
before_each(function()
_G.ngx = get_mocked_ngx_env()
package.loaded["configuration"] = nil
configuration = require("configuration")
end)
after_each(function()
_G.ngx = unmocked_ngx
end)
describe("Backends", function()
context("Request method is neither GET nor POST", function()
it("sends 'Only POST and GET requests are allowed!' in the response body", function()
ngx.var.request_method = "PUT"
local s = spy.on(ngx, "print")
assert.has_no.errors(configuration.call)
assert.spy(s).was_called_with("Only POST and GET requests are allowed!")
end)
it("returns a status code of 400", function()
ngx.var.request_method = "PUT"
assert.has_no.errors(configuration.call)
assert.equal(ngx.status, ngx.HTTP_BAD_REQUEST)
end)
end)
context("GET request to /configuration/backends", function()
before_each(function()
ngx.var.request_method = "GET"
ngx.var.request_uri = "/configuration/backends"
end)
it("returns the current configured backends on the response body", function()
-- Encoding backends since comparing tables fail due to reference comparison
local encoded_backends = cjson.encode(get_backends())
ngx.shared.configuration_data:set("backends", encoded_backends)
local s = spy.on(ngx, "print")
assert.has_no.errors(configuration.call)
assert.spy(s).was_called_with(encoded_backends)
end)
it("returns a status of 200", function()
assert.has_no.errors(configuration.call)
assert.equal(ngx.status, ngx.HTTP_OK)
end)
end)
context("POST request to /configuration/backends", function()
before_each(function()
ngx.var.request_method = "POST"
ngx.var.request_uri = "/configuration/backends"
end)
it("stores the posted backends on the shared dictionary", function()
-- Encoding backends since comparing tables fail due to reference comparison
assert.has_no.errors(configuration.call)
assert.equal(ngx.shared.configuration_data:get("backends"), cjson.encode(get_backends()))
end)
context("Failed to read request body", function()
local mocked_get_body_data = ngx.req.get_body_data
before_each(function()
ngx.req.get_body_data = function() return nil end
end)
teardown(function()
ngx.req.get_body_data = mocked_get_body_data
end)
it("returns a status of 400", function()
local original_io_open = _G.io.open
_G.io.open = function(filename, extension) return false end
assert.has_no.errors(configuration.call)
assert.equal(ngx.status, ngx.HTTP_BAD_REQUEST)
_G.io.open = original_io_open
end)
it("logs 'dynamic-configuration: unable to read valid request body to stderr'", function()
local original_io_open = _G.io.open
_G.io.open = function(filename, extension) return false end
local s = spy.on(ngx, "log")
assert.has_no.errors(configuration.call)
assert.spy(s).was_called_with(ngx.ERR, "dynamic-configuration: unable to read valid request body")
_G.io.open = original_io_open
end)
end)
context("Failed to set the new backends to the configuration dictionary", function()
local resty_configuration_data_set = ngx.shared.configuration_data.set
before_each(function()
ngx.shared.configuration_data.set = function(key, value) return false, "" end
end)
teardown(function()
ngx.shared.configuration_data.set = resty_configuration_data_set
end)
it("returns a status of 400", function()
assert.has_no.errors(configuration.call)
assert.equal(ngx.status, ngx.HTTP_BAD_REQUEST)
end)
it("logs 'dynamic-configuration: error updating configuration:' to stderr", function()
local s = spy.on(ngx, "log")
assert.has_no.errors(configuration.call)
assert.spy(s).was_called_with(ngx.ERR, "dynamic-configuration: error updating configuration: ")
end)
end)
context("Succeeded to update backends configuration", function()
it("returns a status of 201", function()
assert.has_no.errors(configuration.call)
assert.equal(ngx.status, ngx.HTTP_CREATED)
end)
end)
end)
end)
describe("handle_servers()", function()
local UUID = "2ea8adb5-8ebb-4b14-a79b-0cdcd892e884"
local function mock_ssl_configuration(configuration)
local json = cjson.encode(configuration)
ngx.req.get_body_data = function() return json end
end
before_each(function()
ngx.var.request_method = "POST"
end)
it("should not accept non POST methods", function()
ngx.var.request_method = "GET"
local s = spy.on(ngx, "print")
assert.has_no.errors(configuration.handle_servers)
assert.spy(s).was_called_with("Only POST requests are allowed!")
assert.same(ngx.status, ngx.HTTP_BAD_REQUEST)
end)
it("should not delete ocsp_response_cache if certificate remain the same", function()
ngx.shared.certificate_data.get = function(self, uid)
return "pemCertKey"
end
mock_ssl_configuration({
servers = { ["hostname"] = UUID },
certificates = { [UUID] = "pemCertKey" }
})
local s = spy.on(ngx.shared.ocsp_response_cache, "delete")
assert.has_no.errors(configuration.handle_servers)
assert.spy(s).was_not_called()
end)
it("should not delete ocsp_response_cache if certificate is empty", function()
ngx.shared.certificate_data.get = function(self, uid)
return nil
end
mock_ssl_configuration({
servers = { ["hostname"] = UUID },
certificates = { [UUID] = "pemCertKey" }
})
local s = spy.on(ngx.shared.ocsp_response_cache, "delete")
assert.has_no.errors(configuration.handle_servers)
assert.spy(s).was_not_called()
end)
it("should delete ocsp_response_cache if certificate changed", function()
local stored_entries = {
[UUID] = "pemCertKey"
}
ngx.shared.certificate_data.get = function(self, uid)
return stored_entries[uid]
end
mock_ssl_configuration({
servers = { ["hostname"] = UUID },
certificates = { [UUID] = "pemCertKey2" }
})
local s = spy.on(ngx.shared.ocsp_response_cache, "delete")
assert.has_no.errors(configuration.handle_servers)
assert.spy(s).was.called_with(ocsp_response_cache, UUID)
end)
it("deletes server with empty UID without touching the corresponding certificate", function()
mock_ssl_configuration({
servers = { ["hostname"] = UUID },
certificates = { [UUID] = "pemCertKey" }
})
assert.has_no.errors(configuration.handle_servers)
assert.same("pemCertKey", certificate_data:get(UUID))
assert.same(UUID, certificate_servers:get("hostname"))
assert.same(ngx.HTTP_CREATED, ngx.status)
local EMPTY_UID = "-1"
mock_ssl_configuration({
servers = { ["hostname"] = EMPTY_UID },
certificates = { [UUID] = "pemCertKey" }
})
assert.has_no.errors(configuration.handle_servers)
assert.same("pemCertKey", certificate_data:get(UUID))
assert.same(nil, certificate_servers:get("hostname"))
assert.same(ngx.HTTP_CREATED, ngx.status)
end)
it("should successfully update certificates and keys for each host", function()
mock_ssl_configuration({
servers = { ["hostname"] = UUID },
certificates = { [UUID] = "pemCertKey" }
})
assert.has_no.errors(configuration.handle_servers)
assert.same("pemCertKey", certificate_data:get(UUID))
assert.same(UUID, certificate_servers:get("hostname"))
assert.same(ngx.HTTP_CREATED, ngx.status)
end)
it("should log an err and set status to Internal Server Error when a certificate cannot be set", function()
local uuid2 = "8ea8adb5-8ebb-4b14-a79b-0cdcd892e999"
ngx.shared.certificate_data.set = function(self, uuid, certificate)
return false, "error", nil
end
mock_ssl_configuration({
servers = { ["hostname"] = UUID, ["hostname2"] = uuid2 },
certificates = { [UUID] = "pemCertKey", [uuid2] = "pemCertKey2" }
})
local s = spy.on(ngx, "log")
assert.has_no.errors(configuration.handle_servers)
assert.same(ngx.HTTP_INTERNAL_SERVER_ERROR, ngx.status)
end)
it("logs a warning when entry is forcibly stored", function()
local uuid2 = "8ea8adb5-8ebb-4b14-a79b-0cdcd892e999"
local stored_entries = {}
ngx.shared.certificate_data.set = function(self, uuid, certificate)
stored_entries[uuid] = certificate
return true, nil, true
end
mock_ssl_configuration({
servers = { ["hostname"] = UUID, ["hostname2"] = uuid2 },
certificates = { [UUID] = "pemCertKey", [uuid2] = "pemCertKey2" }
})
local s1 = spy.on(ngx, "log")
assert.has_no.errors(configuration.handle_servers)
assert.spy(s1).was_called_with(ngx.WARN, string.format("certificate_data dictionary is full, LRU entry has been removed to store %s", UUID))
assert.equal("pemCertKey", stored_entries[UUID])
assert.equal("pemCertKey2", stored_entries[uuid2])
assert.same(ngx.HTTP_CREATED, ngx.status)
end)
end)
end)

View file

@ -0,0 +1,45 @@
-----BEGIN CERTIFICATE-----
MIICxDCCAawCCQCjnxUYH38uOjANBgkqhkiG9w0BAQsFADAkMRAwDgYDVQQDDAdk
ZWZhdWx0MRAwDgYDVQQKDAdkZWZhdWx0MB4XDTE5MDQxMzE3NTgwNVoXDTM5MDQw
ODE3NTgwNVowJDEQMA4GA1UEAwwHZGVmYXVsdDEQMA4GA1UECgwHZGVmYXVsdDCC
ASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBANuhdV19jJGMRCy7/x6krbHh
GUoN/C2T9a/ZuA8CaBHpDCcCnYCrYjPWjOezuuk5CMWx/IHRgTxbz9z2MXyfSMli
7bkDra0tmahI3Z0ADNxt/QQ30f1I84Y877urO5RJt3W4NmWM9jqbv/AFO8+oWRjI
s9+leQxyvIWtKz524eXGmu0iGD4KkeH6bPCXYotC/t5XH4v9NfHRoZ3M9eaDuKd6
k54EVol8LUDaBvbicIE8M1Znf1vQWdP8w4nhP739Oc/p5YKcG7jJahLa9nx+AJIe
vxPP9/nQxN1PAcuXK6HAtgF3nkadtW2nd9Ws3bsOn+ZHaE+hQXtMzLZ5/L8BZ50C
AwEAATANBgkqhkiG9w0BAQsFAAOCAQEApWib3ctn/okShC0Krw56vyjqbuKx9KMQ
QuClYR6HTU8D5F9zr2NFyrSMik12wbqPH6VPYRAjVBfFEhzYDaO+DjTJp0wcIe1z
a2fWVjELLg9PEDlB4mVmtJUMkVknwbZ6eD4XRO6ooifSOhg/36KchilbnGchwwaY
Gh4/rNKWqKD5rPVQhUsptNnsZ8trPQ+W3p94rzXyQkWS8KWCD0EeMzdRZnUm/utx
4lDGCdw1GLEfm/SnNR+dyu4ETzY6/s5csChBVZw9xlXzId6QymeGvJe0jcoTnLCG
KNq3F1fTqUXXhP3PTuuNclz0c4/8QZC/l2xH6Xb07H2iOPuuFnDVZA==
-----END CERTIFICATE-----
-----BEGIN PRIVATE KEY-----
MIIEvwIBADANBgkqhkiG9w0BAQEFAASCBKkwggSlAgEAAoIBAQDboXVdfYyRjEQs
u/8epK2x4RlKDfwtk/Wv2bgPAmgR6QwnAp2Aq2Iz1ozns7rpOQjFsfyB0YE8W8/c
9jF8n0jJYu25A62tLZmoSN2dAAzcbf0EN9H9SPOGPO+7qzuUSbd1uDZljPY6m7/w
BTvPqFkYyLPfpXkMcryFrSs+duHlxprtIhg+CpHh+mzwl2KLQv7eVx+L/TXx0aGd
zPXmg7inepOeBFaJfC1A2gb24nCBPDNWZ39b0FnT/MOJ4T+9/TnP6eWCnBu4yWoS
2vZ8fgCSHr8Tz/f50MTdTwHLlyuhwLYBd55GnbVtp3fVrN27Dp/mR2hPoUF7TMy2
efy/AWedAgMBAAECggEBAJ37LLX8CjHjqGJZNDCxmfNajFtVZfDO/inovNmnDH7d
mI0y92JHZRMOoDpGcQszqFi0J4Kl1YU6MXGqcXxIAw5BJ+guei4Yn++JwkcdcyLX
xujS0iyT3f/QM01V5TxMLjfyMsanN7J+t/iJezVqzfPi4mfb2g+XNH4fSvzafLFO
7p9/Mw3J2rB2rV0aJxh8abh0p4bSSPSoQgeubQ6KlwoOJYBZ/a8TmmZqB8DjOPYb
Pad0sTHsQc4q9wrP7zxZmeCDnD0XEluAxX9ZF4Ou/AWBHTRQpu5HH2pUXvm88VI1
/4QAaxYozxuAknqSnVqpCXjSpYoXAXEX64aroJui/UECgYEA7VdN4jtn62jgVK1V
HHaoZlyfIAzrC7yObyfdlL39NDz4B7emRNvFx7FXgslPRnvlMb/B2mASzrQij7uI
sfsIO7kOJBq6BqnEalCynFj9p5EFQcOehOXYu46Qj1dKp43dptTaxnYnA1xKL9Z5
DDwrxpD2Z6ur3o6A55qX7M6tLTECgYEA7OW33x3dTX4H5ea8ghhXvrSnqycFhhqE
Grae9HpAUFV5/u6LC14xHk6Cd27cVI/OXAIar1M9aA1FNwnU+NmMgLKyAGgnKVPi
GkDWaAaWKeW32bNHdqg3XmP2TcEXn1PCSwNc4cVPWDfeVQeCtposH0jWITFB9C4O
9sKkfVMCVi0CgYEAzecn0lTnWvupYszdQcxPXD6ObifG4m+6wgQ734bT3DXomAlj
XemsNApOeVBcTjG+LOLHMsSWjG0KbterR30ZL3bkJb5qFM3DcNiBm9I4fN77SIqF
Q5aD6HNORozcX3BcExgmlHZ8chXm5omSimLJN4MbweTVPkcy3brogrDq3IECgYEA
x3Ls6NuS+/BVI/ms0nc+QOCGnfG/k9V1TaxdjgXzae9dRAaAaHTIM/TzoSxkMonU
uuBGqUAS3iz2Dk2n0lAPHDfW58LI3eGy5lmaaoDJIsM2lAJ982fTHhRZRcOBaPIz
DcbqB2eA0wxOkxY8thJ9fWVsawu2tKemj5j2tlESEY0CgYAl3QnNqfkuKKpen7wJ
3LF+rm0Xtw3a9kSE2+dsdkzn3sl2OpX44V1UtSJDJmpsk8OBw20ISgWp6UlnnncG
J0xmjSNaRH0UBfQ7PyntvC7FhaOncP5emrwH80oOjlGyY2i6m9ognLQBo44/XgGq
VwtXclxMu2tvVKKXaXQAwQiNOA==
-----END PRIVATE KEY-----

View file

@ -0,0 +1,45 @@
-----BEGIN CERTIFICATE-----
MIICzDCCAbQCCQD8UB3X6pdyYjANBgkqhkiG9w0BAQsFADAoMRQwEgYDVQQDDAtl
eGFtcGxlLmNvbTEQMA4GA1UECgwHZXhhbXBsZTAeFw0xOTA0MTMxNjU2MzJaFw0z
OTA0MDgxNjU2MzJaMCgxFDASBgNVBAMMC2V4YW1wbGUuY29tMRAwDgYDVQQKDAdl
eGFtcGxlMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA3+IeuF0/KeVZ
wuV54zZ1D747T+EGUmGAJgS7rBmzagSk87tFcFkVFz2Pfh/bytde0YjoYEfb+Nil
LEwCZG1tZsYAxah2sy5BQAWfclQQ5mj+VMn611Eceq5ELVzZeHTIHEqJnNuUyh7V
DCeZeWjT+kwc/NnCn8F1lwVMvm6ZTQ37reyVYKZqkQRWjCst9aTFAlQl6hYLD+LR
cg/b5oOo2SiAtqELaBJDU3lX/zBqG38o0N1hIT364bj6+9vzngx0ce8TMj1y92MJ
YA4r6RUy7NwYc6sfxVjoBr30ARmqsXdYEZwu1DK37fikWCgmqiilBT/AIVjCdb5J
MO+6NhtP6wIDAQABMA0GCSqGSIb3DQEBCwUAA4IBAQCSu0r59BdG0uLmQ/ncLvJJ
vealSX6pbbZInPCCztGa7SJpfkbVNj3c/Fs0q5UH2SPYH/O/yqgp8IWNYNUuTlO3
2IJWFovi6cHpetszLxBat75XDq3Spvw8mIGd8Lhw2B6RpR5Hy/kO/mXmnpH/1aty
xRJY6V5Tin/bsx3IwWKK/kcXzEtrCAnS2w2V4WTOk7y6WOGhsEfmwVc4MbvXQc/5
yysvN41AUcWK94XJ2FZZc8ykUkHJ+TeRGq8wnl7l3E9d0wQw+ArL4toD4puFmxvH
qZV5n628d+ecNTbAhanX46A4xdfxhD0LvnURizAfu3N5snMmhjfgL5ukMwrGCwRo
-----END CERTIFICATE-----
-----BEGIN PRIVATE KEY-----
MIIEvwIBADANBgkqhkiG9w0BAQEFAASCBKkwggSlAgEAAoIBAQDf4h64XT8p5VnC
5XnjNnUPvjtP4QZSYYAmBLusGbNqBKTzu0VwWRUXPY9+H9vK117RiOhgR9v42KUs
TAJkbW1mxgDFqHazLkFABZ9yVBDmaP5UyfrXURx6rkQtXNl4dMgcSomc25TKHtUM
J5l5aNP6TBz82cKfwXWXBUy+bplNDfut7JVgpmqRBFaMKy31pMUCVCXqFgsP4tFy
D9vmg6jZKIC2oQtoEkNTeVf/MGobfyjQ3WEhPfrhuPr72/OeDHRx7xMyPXL3Ywlg
DivpFTLs3Bhzqx/FWOgGvfQBGaqxd1gRnC7UMrft+KRYKCaqKKUFP8AhWMJ1vkkw
77o2G0/rAgMBAAECggEBALVyswki8b1H137gsu+WRDu1Jqbvrkr4IH8vmNa7obBM
AVBUN8v9Nt22E+TZdy4nbP6PYh4eP0aodv22wL2Z/m+sDBYmGcXQuCtmIzrqrSPA
dlhLtpPpdhZrxG+rb8lzhHeBZZSOVkGVyX9nXLiMYDjclSXMazNE/MOgFPnF81MB
fQIf1g1FzJKbH5cPrl5hAnxoaRv3SvCCxsCTs51XvweKHmy5X4MlvRAYIj9IKutk
iF2EYTQSY6MSrJWP1buZm0JriJncvT3BdArihNK6OuraxRhc5TUCW7nIx4Pi+hwo
FODwbgtj5AtHmAdiL2AWJnaJoQVPEw6Oq1JBr9i3AGECgYEA9i+0D9dS1vsckQ2G
E1P1ItVkoZBjbSFV6lB8sBsx2hAl6bUIQtJvgoffDlCqkuCl2jagGcmHwlk4V8sc
O2HivNB9TcoQh5L4m8uN6uytLUXw4vUS23YI1LNImAuwf1refEuKVPM+Mn5Y/FMk
n0fK7IfuLgu13WZ6iYkBS+C7RNECgYEA6M7RK9mw/kcquK2bag96S0/0znsYtXtj
naNgsDOfIjuOHJJFinNrVbdW72zqJePXRPtpQ8/5xoyWjysKUqb7I94BXYGPMXzv
Z8fCzSDKTFBODpu4cMvgQk7c4D4ZgQSaWP1+wf9x8WglKowyUeh0CwJ307SYa3Mw
SYPdg2OTJ/sCgYEAsUkbF0lNy6kcIk0l32dXodUgWcTcBOu7rjh2AnAjD1EPrGSE
5XIbgVmNRQbMP2dtqF4sH0Xk8Q1FKNwIoa7VFHnjspAwJSGuzKrisWntMCws05P/
F3HB3EKbpXrNiHkMvV+8534frUcVl+fb+KQ/uuQMnrYqKp0w4zh5aYYV9fECgYA0
EAw3AjfSpZeoNSrMTSnMLdVRV7Xu3+knF6JHxUORJEBjo1Jp4+XdBWMrp++1CX7a
rl6cC6aQAGCrI7TrRuxi2QL1JkQfjRD85G9r8ClNZ6gNHEXi87TzHy/F9h09/QmH
XSk7uSSCGAg3u6KFLrbEv4iMj5aGcPwbdKHVAC+ogQKBgQC9vkSFhE2bVrIwul6Y
SEDap+YzbA0yfP8PXd0lX47uyOsd3hD0OoGwTRsmeJr1/4Yuf0Wub838ZxgP2dpP
qfW7+7OeNyTvS2avxygWvolVV+O5Yx13rE2Dsd7DQnIyGO9yRCWCPjpuOHFqEgMv
HzJX6j3SaubH52pePu5mrzMLcg==
-----END PRIVATE KEY-----

View file

@ -0,0 +1,258 @@
local util = require("util")
local function assert_request_rejected(config, location_config, opts)
stub(ngx, "exit")
local global_throttle = require_without_cache("global_throttle")
assert.has_no.errors(function()
global_throttle.throttle(config, location_config)
end)
assert.stub(ngx.exit).was_called_with(config.status_code)
if opts.with_cache then
assert.are.same("c", ngx.var.global_rate_limit_exceeding)
else
assert.are.same("y", ngx.var.global_rate_limit_exceeding)
end
end
local function assert_request_not_rejected(config, location_config)
stub(ngx, "exit")
local cache_safe_add_spy = spy.on(ngx.shared.global_throttle_cache, "safe_add")
local global_throttle = require_without_cache("global_throttle")
assert.has_no.errors(function()
global_throttle.throttle(config, location_config)
end)
assert.stub(ngx.exit).was_not_called()
assert.is_nil(ngx.var.global_rate_limit_exceeding)
assert.spy(cache_safe_add_spy).was_not_called()
end
local function assert_short_circuits(f)
local cache_get_spy = spy.on(ngx.shared.global_throttle_cache, "get")
local resty_global_throttle = require_without_cache("resty.global_throttle")
local resty_global_throttle_new_spy = spy.on(resty_global_throttle, "new")
local global_throttle = require_without_cache("global_throttle")
f(global_throttle)
assert.spy(resty_global_throttle_new_spy).was_not_called()
assert.spy(cache_get_spy).was_not_called()
end
local function assert_fails_open(config, location_config, ...)
stub(ngx, "exit")
stub(ngx, "log")
local global_throttle = require_without_cache("global_throttle")
assert.has_no.errors(function()
global_throttle.throttle(config, location_config)
end)
assert.stub(ngx.exit).was_not_called()
assert.stub(ngx.log).was_called_with(ngx.ERR, ...)
assert.is_nil(ngx.var.global_rate_limit_exceeding)
end
local function stub_resty_global_throttle_process(ret1, ret2, ret3, f)
local resty_global_throttle = require_without_cache("resty.global_throttle")
local resty_global_throttle_mock = {
process = function(self, key) return ret1, ret2, ret3 end
}
stub(resty_global_throttle, "new", resty_global_throttle_mock)
f()
assert.stub(resty_global_throttle.new).was_called()
end
local function cache_rejection_decision(namespace, key_value, desired_delay)
local namespaced_key_value = namespace .. key_value
local ok, err = ngx.shared.global_throttle_cache:safe_add(namespaced_key_value, true, desired_delay)
assert.is_nil(err)
assert.is_true(ok)
assert.is_true(ngx.shared.global_throttle_cache:get(namespaced_key_value))
end
describe("global_throttle", function()
local snapshot
local NAMESPACE = "31285d47b1504dcfbd6f12c46d769f6e"
local LOCATION_CONFIG = {
namespace = NAMESPACE,
limit = 10,
window_size = 60,
key = {},
ignored_cidrs = {},
}
local CONFIG = {
memcached = {
host = "memc.default.svc.cluster.local", port = 11211,
connect_timeout = 50, max_idle_timeout = 10000, pool_size = 50,
},
status_code = 429,
}
before_each(function()
snapshot = assert:snapshot()
ngx.var = { remote_addr = "127.0.0.1", global_rate_limit_exceeding = nil }
end)
after_each(function()
snapshot:revert()
ngx.shared.global_throttle_cache:flush_all()
reset_ngx()
end)
it("short circuits when memcached is not configured", function()
assert_short_circuits(function(global_throttle)
assert.has_no.errors(function()
global_throttle.throttle({ memcached = { host = "", port = 0 } }, LOCATION_CONFIG)
end)
end)
end)
it("short circuits when limit or window_size is not configured", function()
assert_short_circuits(function(global_throttle)
local location_config_copy = util.deepcopy(LOCATION_CONFIG)
location_config_copy.limit = 0
assert.has_no.errors(function()
global_throttle.throttle(CONFIG, location_config_copy)
end)
end)
assert_short_circuits(function(global_throttle)
local location_config_copy = util.deepcopy(LOCATION_CONFIG)
location_config_copy.window_size = 0
assert.has_no.errors(function()
global_throttle.throttle(CONFIG, location_config_copy)
end)
end)
end)
it("short circuits when remote_addr is in ignored_cidrs", function()
local global_throttle = require_without_cache("global_throttle")
local location_config = util.deepcopy(LOCATION_CONFIG)
location_config.ignored_cidrs = { ngx.var.remote_addr }
assert_short_circuits(function(global_throttle)
assert.has_no.errors(function()
global_throttle.throttle(CONFIG, location_config)
end)
end)
end)
it("rejects when exceeding limit has already been cached", function()
local key_value = "foo"
local location_config = util.deepcopy(LOCATION_CONFIG)
location_config.key = { { nil, nil, nil, key_value } }
cache_rejection_decision(NAMESPACE, key_value, 0.5)
assert_request_rejected(CONFIG, location_config, { with_cache = true })
end)
describe("when resty_global_throttle fails", function()
it("fails open in case of initialization error", function()
local too_long_namespace = ""
for i=1,36,1 do
too_long_namespace = too_long_namespace .. "a"
end
local location_config = util.deepcopy(LOCATION_CONFIG)
location_config.namespace = too_long_namespace
assert_fails_open(CONFIG, location_config, "faled to initialize resty_global_throttle: ", "'namespace' can be at most 35 characters")
end)
it("fails open in case of key processing error", function()
stub_resty_global_throttle_process(nil, nil, "failed to process", function()
assert_fails_open(CONFIG, LOCATION_CONFIG, "error while processing key: ", "failed to process")
end)
end)
end)
it("initializes resty_global_throttle with the right parameters", function()
local resty_global_throttle = require_without_cache("resty.global_throttle")
local resty_global_throttle_original_new = resty_global_throttle.new
resty_global_throttle.new = function(namespace, limit, window_size, store_opts)
local o, err = resty_global_throttle_original_new(namespace, limit, window_size, store_opts)
if not o then
return nil, err
end
o.process = function(self, key) return 1, nil, nil end
local expected = LOCATION_CONFIG
assert.are.same(expected.namespace, namespace)
assert.are.same(expected.limit, limit)
assert.are.same(expected.window_size, window_size)
assert.are.same("memcached", store_opts.provider)
assert.are.same(CONFIG.memcached.host, store_opts.host)
assert.are.same(CONFIG.memcached.port, store_opts.port)
assert.are.same(CONFIG.memcached.connect_timeout, store_opts.connect_timeout)
assert.are.same(CONFIG.memcached.max_idle_timeout, store_opts.max_idle_timeout)
assert.are.same(CONFIG.memcached.pool_size, store_opts.pool_size)
return o, nil
end
local resty_global_throttle_new_spy = spy.on(resty_global_throttle, "new")
local global_throttle = require_without_cache("global_throttle")
assert.has_no.errors(function()
global_throttle.throttle(CONFIG, LOCATION_CONFIG)
end)
assert.spy(resty_global_throttle_new_spy).was_called()
end)
it("rejects request and caches decision when limit is exceeding after processing a key", function()
local desired_delay = 0.015
stub_resty_global_throttle_process(LOCATION_CONFIG.limit + 1, desired_delay, nil, function()
assert_request_rejected(CONFIG, LOCATION_CONFIG, { with_cache = false })
local cache_key = LOCATION_CONFIG.namespace .. ngx.var.remote_addr
assert.is_true(ngx.shared.global_throttle_cache:get(cache_key))
-- we assume it won't take more than this after caching
-- until we execute the assertion below
local delta = 0.001
local ttl = ngx.shared.global_throttle_cache:ttl(cache_key)
assert.is_true(ttl > desired_delay - delta)
assert.is_true(ttl <= desired_delay)
end)
end)
it("rejects request and skip caching of decision when limit is exceeding after processing a key but desired delay is lower than the threshold", function()
local desired_delay = 0.0009
stub_resty_global_throttle_process(LOCATION_CONFIG.limit, desired_delay, nil, function()
assert_request_rejected(CONFIG, LOCATION_CONFIG, { with_cache = false })
local cache_key = LOCATION_CONFIG.namespace .. ngx.var.remote_addr
assert.is_nil(ngx.shared.global_throttle_cache:get(cache_key))
end)
end)
it("allows the request when limit is not exceeding after processing a key", function()
stub_resty_global_throttle_process(LOCATION_CONFIG.limit - 3, nil, nil,
function()
assert_request_not_rejected(CONFIG, LOCATION_CONFIG)
end
)
end)
it("rejects with custom status code", function()
cache_rejection_decision(NAMESPACE, ngx.var.remote_addr, 0.3)
local config = util.deepcopy(CONFIG)
config.status_code = 503
assert_request_rejected(config, LOCATION_CONFIG, { with_cache = true })
end)
end)

View file

@ -0,0 +1,50 @@
local _M = {}
local resty_dns_resolver = require("resty.dns.resolver")
local original_resty_dns_resolver_new = resty_dns_resolver.new
local original_io_open = io.open
function _M.with_resolv_conf(content, func)
local new_resolv_conf_f = assert(io.tmpfile())
new_resolv_conf_f:write(content)
new_resolv_conf_f:seek("set", 0)
io.open = function(path, mode)
if path ~= "/etc/resolv.conf" then
error("expected '/etc/resolv.conf' as path but got: " .. tostring(path))
end
if mode ~= "r" then
error("expected 'r' as mode but got: " .. tostring(mode))
end
return new_resolv_conf_f, nil
end
func()
io.open = original_io_open
if io.type(new_resolv_conf_f) ~= "closed file" then
error("file was left open")
end
end
function _M.mock_resty_dns_new(func)
resty_dns_resolver.new = func
end
function _M.mock_resty_dns_query(mocked_host, response, err)
resty_dns_resolver.new = function(self, options)
local r = original_resty_dns_resolver_new(self, options)
r.query = function(self, host, options, tries)
if mocked_host and mocked_host ~= host then
return error(tostring(host) .. " is not mocked")
end
return response, err
end
return r
end
end
return _M

View file

@ -0,0 +1,9 @@
describe("lua_ingress", function()
it("patches math.randomseed to not be called more than once per worker", function()
local s = spy.on(ngx, "log")
math.randomseed(100)
assert.spy(s).was_called_with(ngx.WARN,
string.format("ignoring math.randomseed(%d) since PRNG is already seeded for worker %d", 100, ngx.worker.pid()))
end)
end)

View file

@ -0,0 +1,159 @@
local cjson = require("cjson.safe")
local original_ngx = ngx
local function reset_ngx()
_G.ngx = original_ngx
end
local function mock_ngx(mock)
local _ngx = mock
setmetatable(_ngx, { __index = ngx })
_G.ngx = _ngx
end
local function mock_ngx_socket_tcp()
local tcp_mock = {}
stub(tcp_mock, "connect", true)
stub(tcp_mock, "send", true)
stub(tcp_mock, "close", true)
local socket_mock = {}
stub(socket_mock, "tcp", tcp_mock)
mock_ngx({ socket = socket_mock })
return tcp_mock
end
describe("Monitor", function()
after_each(function()
reset_ngx()
package.loaded["monitor"] = nil
end)
it("extended batch size", function()
mock_ngx({ var = {} })
local monitor = require("monitor")
monitor.set_metrics_max_batch_size(20000)
for i = 1,20000,1 do
monitor.call()
end
assert.equal(20000, #monitor.get_metrics_batch())
end)
it("batches metrics", function()
mock_ngx({ var = {} })
local monitor = require("monitor")
for i = 1,10,1 do
monitor.call()
end
assert.equal(10, #monitor.get_metrics_batch())
end)
describe("flush", function()
it("short circuits when premature is true (when worker is shutting down)", function()
local tcp_mock = mock_ngx_socket_tcp()
mock_ngx({ var = {} })
local monitor = require("monitor")
for i = 1,10,1 do
monitor.call()
end
monitor.flush(true)
assert.stub(tcp_mock.connect).was_not_called()
end)
it("short circuits when there's no metrics batched", function()
local tcp_mock = mock_ngx_socket_tcp()
local monitor = require("monitor")
monitor.flush()
assert.stub(tcp_mock.connect).was_not_called()
end)
it("JSON encodes and sends the batched metrics", function()
local tcp_mock = mock_ngx_socket_tcp()
local ngx_var_mock = {
host = "example.com",
namespace = "default",
ingress_name = "example",
service_name = "http-svc",
proxy_alternative_upstream_name = "default-http-svc-canary-80",
location_path = "/",
request_method = "GET",
status = "200",
request_length = "256",
request_time = "0.04",
bytes_sent = "512",
upstream_addr = "10.10.0.1",
upstream_connect_time = "0.01",
upstream_header_time = "0.02",
upstream_response_time = "0.03",
upstream_response_length = "456",
upstream_status = "200",
}
mock_ngx({ var = ngx_var_mock })
local monitor = require("monitor")
monitor.call()
local ngx_var_mock1 = ngx_var_mock
ngx_var_mock1.status = "201"
ngx_var_mock1.request_method = "POST"
mock_ngx({ var = ngx_var_mock })
monitor.call()
monitor.flush()
local expected_payload = cjson.encode({
{
host = "example.com",
namespace = "default",
ingress = "example",
service = "http-svc",
canary = "default-http-svc-canary-80",
path = "/",
method = "GET",
status = "200",
requestLength = 256,
requestTime = 0.04,
responseLength = 512,
upstreamLatency = 0.01,
upstreamHeaderTime = 0.02,
upstreamResponseTime = 0.03,
upstreamResponseLength = 456,
},
{
host = "example.com",
namespace = "default",
ingress = "example",
service = "http-svc",
canary = "default-http-svc-canary-80",
path = "/",
method = "POST",
status = "201",
requestLength = 256,
requestTime = 0.04,
responseLength = 512,
upstreamLatency = 0.01,
upstreamHeaderTime = 0.02,
upstreamResponseTime = 0.03,
upstreamResponseLength = 456,
},
})
assert.stub(tcp_mock.connect).was_called_with(tcp_mock, "unix:/tmp/nginx/prometheus-nginx.socket")
assert.stub(tcp_mock.send).was_called_with(tcp_mock, expected_payload)
assert.stub(tcp_mock.close).was_called_with(tcp_mock)
end)
end)
end)

View file

@ -0,0 +1,23 @@
describe("plugins", function()
describe("#run", function()
it("runs the plugins in the given order", function()
ngx.get_phase = function() return "rewrite" end
local plugins = require("plugins")
local called_plugins = {}
local plugins_to_mock = {"plugins.pluginfirst.main", "plugins.pluginsecond.main", "plugins.pluginthird.main"}
for i=1, 3, 1
do
package.loaded[plugins_to_mock[i]] = {
rewrite = function()
called_plugins[#called_plugins + 1] = plugins_to_mock[i]
end
}
end
assert.has_no.errors(function()
plugins.init({"pluginfirst", "pluginsecond", "pluginthird"})
end)
assert.has_no.errors(plugins.run)
assert.are.same(plugins_to_mock, called_plugins)
end)
end)
end)

View file

@ -0,0 +1,86 @@
local busted_runner
do
-- avoid warning during test runs caused by
-- https://github.com/openresty/lua-nginx-module/blob/2524330e59f0a385a9c77d4d1b957476dce7cb33/src/ngx_http_lua_util.c#L810
local traceback = require "debug".traceback
setmetatable(_G, { __newindex = function(table, key, value) rawset(table, key, value) end })
busted_runner = require "busted.runner"
-- if there's more constants need to be whitelisted for test runs, add here.
local GLOBALS_ALLOWED_IN_TEST = {
helpers = true,
require_without_cache = true,
reset_ngx = true,
}
local newindex = function(table, key, value)
rawset(table, key, value)
local phase = ngx.get_phase()
if phase == "init_worker" or phase == "init" then
return
end
-- we check only timer phase because resty-cli runs everything in timer phase
if phase == "timer" and GLOBALS_ALLOWED_IN_TEST[key] then
return
end
local message = "writing a global lua variable " .. key ..
" which may lead to race conditions between concurrent requests, so prefer the use of 'local' variables " .. traceback('', 2)
-- it's important to do print here because ngx.log is mocked below
print(message)
end
setmetatable(_G, { __newindex = newindex })
end
_G.helpers = require("test.helpers")
local ffi = require("ffi")
local lua_ingress = require("lua_ingress")
-- without this we get errors such as "attempt to redefine XXX"
local old_cdef = ffi.cdef
local exists = {}
ffi.cdef = function(def)
if exists[def] then
return
end
exists[def] = true
return old_cdef(def)
end
local old_udp = ngx.socket.udp
ngx.socket.udp = function(...)
local socket = old_udp(...)
socket.send = function(...)
error("ngx.socket.udp:send please mock this to use in tests")
end
return socket
end
local old_tcp = ngx.socket.tcp
ngx.socket.tcp = function(...)
local socket = old_tcp(...)
socket.send = function(...)
error("ngx.socket.tcp:send please mock this to use in tests")
end
return socket
end
ngx.log = function(...) end
ngx.print = function(...) end
local original_ngx = ngx
_G.reset_ngx = function()
ngx = original_ngx
end
_G.require_without_cache = function(module)
package.loaded[module] = nil
return require(module)
end
lua_ingress.init_worker()
busted_runner({ standalone = false })

View file

@ -0,0 +1,136 @@
local conf = [===[
nameserver 1.2.3.4
nameserver 4.5.6.7
search ingress-nginx.svc.cluster.local svc.cluster.local cluster.local
options ndots:5
]===]
package.loaded["util.resolv_conf"] = nil
helpers.with_resolv_conf(conf, function()
require("util.resolv_conf")
end)
describe("dns.lookup", function()
local dns, dns_lookup, spy_ngx_log
before_each(function()
spy_ngx_log = spy.on(ngx, "log")
dns = require("util.dns")
dns_lookup = dns.lookup
end)
after_each(function()
package.loaded["util.dns"] = nil
end)
it("sets correct nameservers", function()
helpers.mock_resty_dns_new(function(self, options)
assert.are.same({ nameservers = { "1.2.3.4", "4.5.6.7" }, retrans = 5, timeout = 2000 }, options)
return nil, ""
end)
dns_lookup("example.com")
end)
describe("when there's an error", function()
it("returns host when resolver can not be instantiated", function()
helpers.mock_resty_dns_new(function(...) return nil, "an error" end)
assert.are.same({ "example.com" }, dns_lookup("example.com"))
assert.spy(spy_ngx_log).was_called_with(ngx.ERR, "failed to instantiate the resolver: an error")
end)
it("returns host when the query returns nil", function()
helpers.mock_resty_dns_query(nil, nil, "oops!")
assert.are.same({ "example.com" }, dns_lookup("example.com"))
assert.spy(spy_ngx_log).was_called_with(ngx.ERR, "failed to query the DNS server for ", "example.com", ":\n", "oops!\noops!")
end)
it("returns host when the query returns empty answer", function()
helpers.mock_resty_dns_query(nil, {})
assert.are.same({ "example.com" }, dns_lookup("example.com"))
assert.spy(spy_ngx_log).was_called_with(ngx.ERR, "failed to query the DNS server for ", "example.com", ":\n", "no A record resolved\nno AAAA record resolved")
end)
it("returns host when there's answer but with error", function()
helpers.mock_resty_dns_query(nil, { errcode = 1, errstr = "format error" })
assert.are.same({ "example.com" }, dns_lookup("example.com"))
assert.spy(spy_ngx_log).was_called_with(ngx.ERR, "failed to query the DNS server for ", "example.com", ":\n", "server returned error code: 1: format error\nserver returned error code: 1: format error")
end)
it("returns host when there's answer but no A/AAAA record in it", function()
helpers.mock_resty_dns_query(nil, { { name = "example.com", cname = "sub.example.com", ttl = 60 } })
assert.are.same({ "example.com" }, dns_lookup("example.com"))
assert.spy(spy_ngx_log).was_called_with(ngx.ERR, "failed to query the DNS server for ", "example.com", ":\n", "no A record resolved\nno AAAA record resolved")
end)
it("returns host when the query returns nil and number of dots is not less than configured ndots", function()
helpers.mock_resty_dns_query(nil, nil, "oops!")
assert.are.same({ "a.b.c.d.example.com" }, dns_lookup("a.b.c.d.example.com"))
assert.spy(spy_ngx_log).was_called_with(ngx.ERR, "failed to query the DNS server for ", "a.b.c.d.example.com", ":\n", "oops!\noops!")
end)
it("returns host when the query returns nil for a fully qualified domain", function()
helpers.mock_resty_dns_query("example.com.", nil, "oops!")
assert.are.same({ "example.com." }, dns_lookup("example.com."))
assert.spy(spy_ngx_log).was_called_with(ngx.ERR, "failed to query the DNS server for ", "example.com.", ":\n", "oops!\noops!")
end)
end)
it("returns answer from cache if it exists without doing actual DNS query", function()
dns._cache:set("example.com", { "192.168.1.1" })
assert.are.same({ "192.168.1.1" }, dns_lookup("example.com"))
end)
it("resolves a fully qualified domain without looking at resolv.conf search and caches result", function()
helpers.mock_resty_dns_query("example.com.", {
{
name = "example.com.",
address = "192.168.1.1",
ttl = 3600,
},
{
name = "example.com",
address = "1.2.3.4",
ttl = 60,
}
})
assert.are.same({ "192.168.1.1", "1.2.3.4" }, dns_lookup("example.com."))
assert.are.same({ "192.168.1.1", "1.2.3.4" }, dns._cache:get("example.com."))
end)
it("starts with host itself when number of dots is not less than configured ndots", function()
local host = "a.b.c.d.example.com"
helpers.mock_resty_dns_query(host, { { name = host, address = "192.168.1.1", ttl = 3600, } } )
assert.are.same({ "192.168.1.1" }, dns_lookup(host))
assert.are.same({ "192.168.1.1" }, dns._cache:get(host))
end)
it("starts with first search entry when number of dots is less than configured ndots", function()
local host = "example.com.ingress-nginx.svc.cluster.local"
helpers.mock_resty_dns_query(host, { { name = host, address = "192.168.1.1", ttl = 3600, } } )
assert.are.same({ "192.168.1.1" }, dns_lookup(host))
assert.are.same({ "192.168.1.1" }, dns._cache:get(host))
end)
it("it caches with minimal ttl", function()
helpers.mock_resty_dns_query("example.com.", {
{
name = "example.com.",
address = "192.168.1.1",
ttl = 3600,
},
{
name = "example.com.",
address = "1.2.3.4",
ttl = 60,
}
})
local spy_cache_set = spy.on(dns._cache, "set")
assert.are.same({ "192.168.1.1", "1.2.3.4" }, dns_lookup("example.com."))
assert.spy(spy_cache_set).was_called_with(match.is_table(), "example.com.", { "192.168.1.1", "1.2.3.4" }, 60)
end)
end)

View file

@ -0,0 +1,167 @@
local util = require("util")
local nodemap = require("util.nodemap")
local function get_test_backend_single()
return {
name = "access-router-production-web-80",
endpoints = {
{ address = "10.184.7.40", port = "8080", maxFails = 0, failTimeout = 0 }
}
}
end
local function get_test_backend_multi()
return {
name = "access-router-production-web-80",
endpoints = {
{ address = "10.184.7.40", port = "8080", maxFails = 0, failTimeout = 0 },
{ address = "10.184.7.41", port = "8080", maxFails = 0, failTimeout = 0 }
}
}
end
local function get_test_nodes_ignore(endpoint)
local ignore = {}
ignore[endpoint] = true
return ignore
end
describe("Node Map", function()
local test_backend_single = get_test_backend_single()
local test_backend_multi = get_test_backend_multi()
local test_salt = test_backend_single.name
local test_nodes_single = util.get_nodes(test_backend_single.endpoints)
local test_nodes_multi = util.get_nodes(test_backend_multi.endpoints)
local test_endpoint1 = test_backend_multi.endpoints[1].address .. ":" .. test_backend_multi.endpoints[1].port
local test_endpoint2 = test_backend_multi.endpoints[2].address .. ":" .. test_backend_multi.endpoints[2].port
local test_nodes_ignore = get_test_nodes_ignore(test_endpoint1)
describe("new()", function()
context("when no salt has been provided", function()
it("random() returns an unsalted key", function()
local nodemap_instance = nodemap:new(test_nodes_single, nil)
local expected_endpoint = test_endpoint1
local expected_hash_key = ngx.md5(expected_endpoint)
local actual_endpoint
local actual_hash_key
actual_endpoint, actual_hash_key = nodemap_instance:random()
assert.equal(actual_endpoint, expected_endpoint)
assert.equal(expected_hash_key, actual_hash_key)
end)
end)
context("when a salt has been provided", function()
it("random() returns a salted key", function()
local nodemap_instance = nodemap:new(test_nodes_single, test_salt)
local expected_endpoint = test_endpoint1
local expected_hash_key = ngx.md5(test_salt .. expected_endpoint)
local actual_endpoint
local actual_hash_key
actual_endpoint, actual_hash_key = nodemap_instance:random()
assert.equal(actual_endpoint, expected_endpoint)
assert.equal(expected_hash_key, actual_hash_key)
end)
end)
context("when no nodes have been provided", function()
it("random() returns nil", function()
local nodemap_instance = nodemap:new({}, test_salt)
local actual_endpoint
local actual_hash_key
actual_endpoint, actual_hash_key = nodemap_instance:random()
assert.equal(actual_endpoint, nil)
assert.equal(expected_hash_key, nil)
end)
end)
end)
describe("find()", function()
before_each(function()
package.loaded["util.nodemap"] = nil
nodemap = require("util.nodemap")
end)
context("when a hash key is valid", function()
it("find() returns the correct endpoint", function()
local nodemap_instance = nodemap:new(test_nodes_single, test_salt)
local test_hash_key
local expected_endpoint
local actual_endpoint
expected_endpoint, test_hash_key = nodemap_instance:random()
assert.not_equal(expected_endpoint, nil)
assert.not_equal(test_hash_key, nil)
actual_endpoint = nodemap_instance:find(test_hash_key)
assert.equal(actual_endpoint, expected_endpoint)
end)
end)
context("when a hash key is invalid", function()
it("find() returns nil", function()
local nodemap_instance = nodemap:new(test_nodes_single, test_salt)
local test_hash_key = "invalid or nonexistent hash key"
local actual_endpoint
actual_endpoint = nodemap_instance:find(test_hash_key)
assert.equal(actual_endpoint, nil)
end)
end)
end)
describe("random_except()", function()
before_each(function()
package.loaded["util.nodemap"] = nil
nodemap = require("util.nodemap")
end)
context("when nothing has been excluded", function()
it("random_except() returns the correct endpoint", function()
local nodemap_instance = nodemap:new(test_nodes_single, test_salt)
local expected_endpoint = test_endpoint1
local test_hash_key
local actual_endpoint
actual_endpoint, test_hash_key = nodemap_instance:random_except({})
assert.equal(expected_endpoint, actual_endpoint)
assert.not_equal(test_hash_key, nil)
end)
end)
context("when everything has been excluded", function()
it("random_except() returns nil", function()
local nodemap_instance = nodemap:new(test_nodes_single, test_salt)
local actual_hash_key
local actual_endpoint
actual_endpoint, actual_hash_key = nodemap_instance:random_except(test_nodes_ignore)
assert.equal(actual_endpoint, nil)
assert.equal(actual_hash_key, nil)
end)
end)
context("when an endpoint has been excluded", function()
it("random_except() does not return it", function()
local nodemap_instance = nodemap:new(test_nodes_multi, test_salt)
local expected_endpoint = test_endpoint2
local actual_endpoint
local test_hash_key
actual_endpoint, test_hash_key = nodemap_instance:random_except(test_nodes_ignore)
assert.equal(actual_endpoint, expected_endpoint)
assert.not_equal(test_hash_key, nil)
end)
end)
end)
end)

View file

@ -0,0 +1,65 @@
local original_io_open = io.open
describe("resolv_conf", function()
before_each(function()
package.loaded["util.resolv_conf"] = nil
io.open = original_io_open
end)
it("errors when file can not be opened", function()
io.open = function(...)
return nil, "file does not exist"
end
assert.has_error(function() require("util.resolv_conf") end, "could not open /etc/resolv.conf: file does not exist")
end)
it("opens '/etc/resolv.conf' with mode 'r'", function()
io.open = function(path, mode)
assert.are.same("/etc/resolv.conf", path)
assert.are.same("r", mode)
return original_io_open(path, mode)
end
assert.has_no.errors(function() require("util.resolv_conf") end)
end)
it("correctly parses resolv.conf", function()
local conf = [===[
# This is a comment
nameserver 10.96.0.10
nameserver 10.96.0.99
nameserver 2001:4860:4860::8888
search ingress-nginx.svc.cluster.local svc.cluster.local cluster.local
options ndots:5
]===]
helpers.with_resolv_conf(conf, function()
local resolv_conf = require("util.resolv_conf")
assert.are.same({
nameservers = { "10.96.0.10", "10.96.0.99", "[2001:4860:4860::8888]" },
search = { "ingress-nginx.svc.cluster.local", "svc.cluster.local", "cluster.local" },
ndots = 5,
}, resolv_conf)
end)
end)
it("ignores options that it does not understand", function()
local conf = [===[
nameserver 10.96.0.10
search example.com
options debug
options ndots:3
]===]
helpers.with_resolv_conf(conf, function()
local resolv_conf = require("util.resolv_conf")
assert.are.same({
nameservers = { "10.96.0.10" },
search = { "example.com" },
ndots = 3,
}, resolv_conf)
end)
end)
end)

View file

@ -0,0 +1,55 @@
describe("same_site_compatible_test", function()
it("returns true for nil user agent", function()
local same_site = require("util.same_site")
assert.True(same_site.same_site_none_compatible(nil))
end)
it("returns false for chrome 4", function()
local same_site = require("util.same_site")
assert.False(same_site.same_site_none_compatible("Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2704.103 Safari/537.36"))
end)
it("returns false for chrome 5", function()
local same_site = require("util.same_site")
assert.False(same_site.same_site_none_compatible("Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2704.103 Safari/537.36"))
end)
it("returns false for chrome 6", function()
local same_site = require("util.same_site")
assert.False(same_site.same_site_none_compatible("Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2704.103 Safari/537.36"))
end)
it("returns false for iPhone OS 12", function()
local same_site = require("util.same_site")
assert.False(same_site.same_site_none_compatible("Mozilla/5.0 (iPhone; CPU iPhone OS 12_0 like Mac OS X) AppleWebKit/602.1.50 (KHTML, like Gecko) CriOS/56.0.2924.75 Mobile/14E5239e Safari/602.1"))
end)
it("returns false for iPad OS 12", function()
local same_site = require("util.same_site")
assert.False(same_site.same_site_none_compatible("Mozilla/5.0 (iPad; CPU OS 12_0 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/13.0 Mobile/15E148 Safari/604.1"))
end)
it("returns false for Mac 10.14 Safari", function()
local same_site = require("util.same_site")
assert.False(same_site.same_site_none_compatible("Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_6) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/12.1.2 Safari/605.1.15"))
end)
it("returns true for chrome 7", function()
local same_site = require("util.same_site")
assert.True(same_site.same_site_none_compatible("Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/71.0.2704.103 Safari/537.36"))
end)
it("returns true for chrome 8", function()
local same_site = require("util.same_site")
assert.True(same_site.same_site_none_compatible("Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.2704.103 Safari/537.36"))
end)
it("returns true for iPhone OS 13", function()
local same_site = require("util.same_site")
assert.True(same_site.same_site_none_compatible("Mozilla/5.0 (iPhone; CPU iPhone OS 13_0 like Mac OS X) AppleWebKit/602.1.50 (KHTML, like Gecko) CriOS/56.0.2924.75 Mobile/14E5239e Safari/602.1"))
end)
it("returns true for iPad OS 13", function()
local same_site = require("util.same_site")
assert.True(same_site.same_site_none_compatible("Mozilla/5.0 (iPad; CPU OS 13_0 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/13.0 Mobile/15E148 Safari/604.1"))
end)
it("returns true for Mac 10.15 Safari", function()
local same_site = require("util.same_site")
assert.True(same_site.same_site_none_compatible("Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_2) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/13.0.4 Safari/605.1.15"))
end)
it("returns true for Mac 10.14 Chrome", function()
local same_site = require("util.same_site")
assert.True(same_site.same_site_none_compatible("Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/79.0.3945.117 Safari/537.36"))
end)
end)

View file

@ -0,0 +1,57 @@
local split = require("util.split")
describe("split", function()
describe("get_last_value", function()
it("splits value of an upstream variable and returns last value", function()
for _, case in ipairs({{"127.0.0.1:26157 : 127.0.0.1:26158", "127.0.0.1:26158"},
{"127.0.0.1:26157, 127.0.0.1:26158", "127.0.0.1:26158"},
{"127.0.0.1:26158", "127.0.0.1:26158"}}) do
local last = split.get_last_value(case[1])
assert.equal(case[2], last)
end
end)
end)
describe("split_string", function()
it("returns empty array if input string is empty", function()
local splits, len = split.split_string("", ",")
assert.equal(0, len)
assert.is.truthy(splits)
end)
it("returns empty array if input string is nil", function()
local splits, len = split.split_string(nil, ",")
assert.equal(0, len)
assert.is.truthy(splits)
end)
it("returns empty array if delimiter is empty", function()
local splits, len = split.split_string("1,2", "")
assert.equal(0, len)
assert.is.truthy(splits)
end)
it("returns empty array delimiter is nil", function()
local splits, len = split.split_string("1,2", nil)
assert.equal(0, len)
assert.is.truthy(splits)
end)
it("returns array of 1 value if input string is not a list", function()
local splits, len = split.split_string("123", ",")
assert.equal(1, len)
assert.equal("123", splits[1])
end)
it("returns array of values extracted from the input string", function()
local splits, len = split.split_string("1,2,3", ",")
assert.equal(3, len)
assert.equal("1", splits[1])
assert.equal("2", splits[2])
assert.equal("3", splits[3])
end)
end)
end)

View file

@ -0,0 +1,105 @@
local util
describe("utility", function()
before_each(function()
ngx.var = { remote_addr = "192.168.1.1", [1] = "nginx/regexp/1/group/capturing" }
util = require_without_cache("util")
end)
after_each(function()
reset_ngx()
end)
describe("ngx_complex_value", function()
local ngx_complex_value = function(data)
local ret, err = util.parse_complex_value(data)
if err ~= nil then
return ""
end
return util.generate_var_value(ret)
end
it("returns value of nginx var by key", function()
assert.equal("192.168.1.1", ngx_complex_value("$remote_addr"))
end)
it("returns value of nginx var when key is number", function()
assert.equal("nginx/regexp/1/group/capturing", ngx_complex_value("$1"))
end)
it("returns value of nginx var by multiple variables", function()
assert.equal("192.168.1.1nginx/regexp/1/group/capturing", ngx_complex_value("$remote_addr$1"))
end)
it("returns value by the combination of variable and text value", function()
assert.equal("192.168.1.1-text-value", ngx_complex_value("${remote_addr}-text-value"))
end)
it("returns empty when variable is not defined", function()
assert.equal("", ngx_complex_value("$foo_bar"))
end)
end)
describe("diff_endpoints", function()
it("returns removed and added endpoints", function()
local old = {
{ address = "10.10.10.1", port = "8080" },
{ address = "10.10.10.2", port = "8080" },
{ address = "10.10.10.3", port = "8080" },
}
local new = {
{ address = "10.10.10.1", port = "8080" },
{ address = "10.10.10.2", port = "8081" },
{ address = "11.10.10.2", port = "8080" },
{ address = "11.10.10.3", port = "8080" },
}
local expected_added = { "10.10.10.2:8081", "11.10.10.2:8080", "11.10.10.3:8080" }
table.sort(expected_added)
local expected_removed = { "10.10.10.2:8080", "10.10.10.3:8080" }
table.sort(expected_removed)
local added, removed = util.diff_endpoints(old, new)
table.sort(added)
table.sort(removed)
assert.are.same(expected_added, added)
assert.are.same(expected_removed, removed)
end)
it("returns empty results for empty inputs", function()
local added, removed = util.diff_endpoints({}, {})
assert.are.same({}, added)
assert.are.same({}, removed)
end)
it("returns empty results for same inputs", function()
local old = {
{ address = "10.10.10.1", port = "8080" },
{ address = "10.10.10.2", port = "8080" },
{ address = "10.10.10.3", port = "8080" },
}
local new = util.deepcopy(old)
local added, removed = util.diff_endpoints(old, new)
assert.are.same({}, added)
assert.are.same({}, removed)
end)
it("handles endpoints with nil attribute", function()
local old = {
{ address = nil, port = "8080" },
{ address = "10.10.10.2", port = "8080" },
{ address = "10.10.10.3", port = "8080" },
}
local new = util.deepcopy(old)
new[2].port = nil
local added, removed = util.diff_endpoints(old, new)
assert.are.same({ "10.10.10.2:nil" }, added)
assert.are.same({ "10.10.10.2:8080" }, removed)
end)
end)
end)

View file

@ -0,0 +1,183 @@
local ngx = ngx
local string = string
local string_len = string.len
local string_format = string.format
local pairs = pairs
local ipairs = ipairs
local tonumber = tonumber
local getmetatable = getmetatable
local type = type
local next = next
local table = table
local re_gmatch = ngx.re.gmatch
local _M = {}
function _M.get_nodes(endpoints)
local nodes = {}
local weight = 1
for _, endpoint in pairs(endpoints) do
local endpoint_string = endpoint.address .. ":" .. endpoint.port
nodes[endpoint_string] = weight
end
return nodes
end
-- parse the compound variables, then call generate_var_value function
-- to parse into a string value.
function _M.parse_complex_value(complex_value)
local reg = [[ (\\\$[0-9a-zA-Z_]+) | ]] -- \$var
.. [[ \$\{([0-9a-zA-Z_]+)\} | ]] -- ${var}
.. [[ \$([0-9a-zA-Z_]+) | ]] -- $var
.. [[ (\$|[^$\\]+) ]] -- $ or text value
local iterator, err = re_gmatch(complex_value, reg, "jiox")
if not iterator then
return nil, err
end
local v
local t = {}
while true do
v, err = iterator()
if err then
return nil, err
end
if not v then
break
end
table.insert(t, v)
end
return t
end
-- Parse the return value of function parse_complex_value
-- into a string value
function _M.generate_var_value(data)
if data == nil then
return ""
end
local t = {}
for _, value in ipairs(data) do
local var_name = value[2] or value[3]
if var_name then
if var_name:match("^%d+$") then
var_name = tonumber(var_name)
end
table.insert(t, ngx.var[var_name])
else
table.insert(t, value[1] or value[4])
end
end
return table.concat(t, "")
end
-- normalize_endpoints takes endpoints as an array of endpoint objects
-- and returns a table where keys are string that's
-- endpoint.address .. ":" .. endpoint.port and values are all true
local function normalize_endpoints(endpoints)
local normalized_endpoints = {}
for _, endpoint in pairs(endpoints) do
local endpoint_string = string_format("%s:%s", endpoint.address, endpoint.port)
normalized_endpoints[endpoint_string] = true
end
return normalized_endpoints
end
-- diff_endpoints compares old and new
-- and as a first argument returns what endpoints are in new
-- but are not in old, and as a second argument it returns
-- what endpoints are in old but are in new.
-- Both return values are normalized (ip:port).
function _M.diff_endpoints(old, new)
local endpoints_added, endpoints_removed = {}, {}
local normalized_old = normalize_endpoints(old)
local normalized_new = normalize_endpoints(new)
for endpoint_string, _ in pairs(normalized_old) do
if not normalized_new[endpoint_string] then
table.insert(endpoints_removed, endpoint_string)
end
end
for endpoint_string, _ in pairs(normalized_new) do
if not normalized_old[endpoint_string] then
table.insert(endpoints_added, endpoint_string)
end
end
return endpoints_added, endpoints_removed
end
-- this implementation is taken from
-- https://web.archive.org/web/20131225070434/http://snippets.
-- luacode.org/snippets/Deep_Comparison_of_Two_Values_3
-- and modified for use in this project
local function deep_compare(t1, t2, ignore_mt)
local ty1 = type(t1)
local ty2 = type(t2)
if ty1 ~= ty2 then return false end
-- non-table types can be directly compared
if ty1 ~= 'table' and ty2 ~= 'table' then return t1 == t2 end
-- as well as tables which have the metamethod __eq
local mt = getmetatable(t1)
if not ignore_mt and mt and mt.__eq then return t1 == t2 end
for k1,v1 in pairs(t1) do
local v2 = t2[k1]
if v2 == nil or not deep_compare(v1,v2) then return false end
end
for k2,v2 in pairs(t2) do
local v1 = t1[k2]
if v1 == nil or not deep_compare(v1,v2) then return false end
end
return true
end
_M.deep_compare = deep_compare
function _M.is_blank(str)
return str == nil or string_len(str) == 0
end
-- this implementation is taken from:
-- https://github.com/luafun/luafun/blob/master/fun.lua#L33
-- SHA: 04c99f9c393e54a604adde4b25b794f48104e0d0
local function deepcopy(orig)
local orig_type = type(orig)
local copy
if orig_type == 'table' then
copy = {}
for orig_key, orig_value in next, orig, nil do
copy[deepcopy(orig_key)] = deepcopy(orig_value)
end
else
copy = orig
end
return copy
end
_M.deepcopy = deepcopy
local function tablelength(T)
local count = 0
for _ in pairs(T) do
count = count + 1
end
return count
end
_M.tablelength = tablelength
-- replaces special character value a with value b for all occurrences in a
-- string
local function replace_special_char(str, a, b)
return string.gsub(str, "%" .. a, b)
end
_M.replace_special_char = replace_special_char
return _M

View file

@ -0,0 +1,161 @@
local resolver = require("resty.dns.resolver")
local lrucache = require("resty.lrucache")
local resolv_conf = require("util.resolv_conf")
local ngx_log = ngx.log
local ngx_INFO = ngx.INFO
local ngx_ERR = ngx.ERR
local string_format = string.format
local table_concat = table.concat
local table_insert = table.insert
local ipairs = ipairs
local tostring = tostring
local _M = {}
local CACHE_SIZE = 10000
-- maximum value according to https://tools.ietf.org/html/rfc2181
local MAXIMUM_TTL_VALUE = 2147483647
-- for every host we will try two queries for the following types with the order set here
local QTYPES_TO_CHECK = { resolver.TYPE_A, resolver.TYPE_AAAA }
local cache
do
local err
cache, err = lrucache.new(CACHE_SIZE)
if not cache then
return error("failed to create the cache: " .. (err or "unknown"))
end
end
local function cache_set(host, addresses, ttl)
cache:set(host, addresses, ttl)
ngx_log(ngx_INFO, string_format("cache set for '%s' with value of [%s] and ttl of %s.",
host, table_concat(addresses, ", "), ttl))
end
local function is_fully_qualified(host)
return host:sub(-1) == "."
end
local function a_records_and_min_ttl(answers)
local addresses = {}
local ttl = MAXIMUM_TTL_VALUE -- maximum value according to https://tools.ietf.org/html/rfc2181
for _, ans in ipairs(answers) do
if ans.address then
table_insert(addresses, ans.address)
if ans.ttl < ttl then
ttl = ans.ttl
end
end
end
return addresses, ttl
end
local function resolve_host_for_qtype(r, host, qtype)
local answers, err = r:query(host, { qtype = qtype }, {})
if not answers then
return nil, -1, err
end
if answers.errcode then
return nil, -1, string_format("server returned error code: %s: %s",
answers.errcode, answers.errstr)
end
local addresses, ttl = a_records_and_min_ttl(answers)
if #addresses == 0 then
local msg = "no A record resolved"
if qtype == resolver.TYPE_AAAA then msg = "no AAAA record resolved" end
return nil, -1, msg
end
return addresses, ttl, nil
end
local function resolve_host(r, host)
local dns_errors = {}
for _, qtype in ipairs(QTYPES_TO_CHECK) do
local addresses, ttl, err = resolve_host_for_qtype(r, host, qtype)
if addresses and #addresses > 0 then
return addresses, ttl, nil
end
table_insert(dns_errors, tostring(err))
end
return nil, nil, dns_errors
end
function _M.lookup(host)
local cached_addresses = cache:get(host)
if cached_addresses then
return cached_addresses
end
local r, err = resolver:new{
nameservers = resolv_conf.nameservers,
retrans = 5,
timeout = 2000, -- 2 sec
}
if not r then
ngx_log(ngx_ERR, string_format("failed to instantiate the resolver: %s", err))
return { host }
end
local addresses, ttl, dns_errors
-- when the queried domain is fully qualified
-- then we don't go through resolv_conf.search
-- NOTE(elvinefendi): currently FQDN as externalName will be supported starting
-- with K8s 1.15: https://github.com/kubernetes/kubernetes/pull/78385
if is_fully_qualified(host) then
addresses, ttl, dns_errors = resolve_host(r, host)
if addresses then
cache_set(host, addresses, ttl)
return addresses
end
ngx_log(ngx_ERR, "failed to query the DNS server for ",
host, ":\n", table_concat(dns_errors, "\n"))
return { host }
end
-- for non fully qualified domains if number of dots in
-- the queried host is less than resolv_conf.ndots then we try
-- with all the entries in resolv_conf.search before trying the original host
--
-- if number of dots is not less than resolv_conf.ndots then we start with
-- the original host and then try entries in resolv_conf.search
local _, host_ndots = host:gsub("%.", "")
local search_start, search_end = 0, #resolv_conf.search
if host_ndots < resolv_conf.ndots then
search_start = 1
search_end = #resolv_conf.search + 1
end
for i = search_start, search_end, 1 do
local new_host = resolv_conf.search[i] and
string_format("%s.%s", host, resolv_conf.search[i]) or host
addresses, ttl, dns_errors = resolve_host(r, new_host)
if addresses then
cache_set(host, addresses, ttl)
return addresses
end
end
if #dns_errors > 0 then
ngx_log(ngx_ERR, "failed to query the DNS server for ",
host, ":\n", table_concat(dns_errors, "\n"))
end
return { host }
end
setmetatable(_M, {__index = { _cache = cache }})
return _M

View file

@ -0,0 +1,125 @@
local math_random = require("math").random
local util_tablelength = require("util").tablelength
local ngx = ngx
local pairs = pairs
local string = string
local setmetatable = setmetatable
local _M = {}
--- create_map generates the node hash table
-- @tparam {[string]=number} nodes A table with the node as a key and its weight as a value.
-- @tparam string salt A salt that will be used to generate salted hash keys.
local function create_map(nodes, salt)
local hash_map = {}
for endpoint, _ in pairs(nodes) do
-- obfuscate the endpoint with a shared key to prevent brute force
-- and rainbow table attacks which could reveal internal endpoints
local key = salt .. endpoint
local hash_key = ngx.md5(key)
hash_map[hash_key] = endpoint
end
return hash_map
end
--- get_random_node picks a random node from the given map.
-- @tparam {[string], ...} map A key to node hash table.
-- @treturn string,string The node and its key
local function get_random_node(map)
local size = util_tablelength(map)
if size < 1 then
return nil, nil
end
local index = math_random(1, size)
local count = 1
for key, endpoint in pairs(map) do
if count == index then
return endpoint, key
end
count = count + 1
end
ngx.log(ngx.ERR, string.format("Failed to find node %d of %d! "
.. "This is a bug, please report!", index, size))
return nil, nil
end
--- new constructs a new instance of the node map
--
-- The map uses MD5 to create hash keys for a given node. For security reasons it supports
-- salted hash keys, to prevent attackers from using rainbow tables or brute forcing
-- the node endpoints, which would reveal cluster internal network information.
--
-- To make sure hash keys are reproducible on different ingress controller instances the salt
-- needs to be shared and therefore is not simply generated randomly.
--
-- @tparam {[string]=number} endpoints A table with the node endpoint
-- as a key and its weight as a value.
-- @tparam[opt] string hash_salt A optional hash salt that will be used to obfuscate the hash key.
function _M.new(self, endpoints, hash_salt)
if hash_salt == nil then
hash_salt = ''
end
-- the endpoints have to be saved as 'nodes' to keep compatibility to balancer.resty
local o = {
salt = hash_salt,
nodes = endpoints,
map = create_map(endpoints, hash_salt)
}
setmetatable(o, self)
self.__index = self
return o
end
--- reinit reinitializes the node map reusing the original salt
-- @tparam {[string]=number} nodes A table with the node as a key and its weight as a value.
function _M.reinit(self, nodes)
self.nodes = nodes
self.map = create_map(nodes, self.salt)
end
--- find looks up a node by hash key.
-- @tparam string key The hash key.
-- @treturn string The node.
function _M.find(self, key)
return self.map[key]
end
--- random picks a random node from the hashmap.
-- @treturn string,string A random node and its key or both nil.
function _M.random(self)
return get_random_node(self.map)
end
--- random_except picks a random node from the hashmap, ignoring the nodes in the given table
-- @tparam {string, } ignore_nodes A table of nodes to ignore, the node needs to be the key,
-- the value needs to be set to true
-- @treturn string,string A random node and its key or both nil.
function _M.random_except(self, ignore_nodes)
local valid_nodes = {}
-- avoid generating the map if no ignores where provided
if ignore_nodes == nil or util_tablelength(ignore_nodes) == 0 then
return get_random_node(self.map)
end
-- generate valid endpoints
for key, endpoint in pairs(self.map) do
if not ignore_nodes[endpoint] then
valid_nodes[key] = endpoint
end
end
return get_random_node(valid_nodes)
end
return _M

View file

@ -0,0 +1,84 @@
local ngx_re_split = require("ngx.re").split
local string_format = string.format
local tonumber = tonumber
local ngx_log = ngx.log
local ngx_ERR = ngx.ERR
local CONF_PATH = "/etc/resolv.conf"
local nameservers, search, ndots = {}, {}, 1
local function set_search(parts)
local length = #parts
for i = 2, length, 1 do
search[i-1] = parts[i]
end
end
local function set_ndots(parts)
local option = parts[2]
if not option then
return
end
local option_parts, err = ngx_re_split(option, ":")
if err then
ngx_log(ngx_ERR, err)
return
end
if option_parts[1] ~= "ndots" then
return
end
ndots = tonumber(option_parts[2])
end
local function is_comment(line)
return line:sub(1, 1) == "#"
end
local function parse_line(line)
if is_comment(line) then
return
end
local parts, err = ngx_re_split(line, "\\s+")
if err then
ngx_log(ngx_ERR, err)
end
local keyword, value = parts[1], parts[2]
if keyword == "nameserver" then
if not value:match("^%d+.%d+.%d+.%d+$") then
value = string_format("[%s]", value)
end
nameservers[#nameservers + 1] = value
elseif keyword == "search" then
set_search(parts)
elseif keyword == "options" then
set_ndots(parts)
end
end
do
local f, err = io.open(CONF_PATH, "r")
if not f then
error("could not open " .. CONF_PATH .. ": " .. tostring(err))
end
for line in f:lines() do
parse_line(line)
end
f:close()
end
return {
nameservers = nameservers,
search = search,
ndots = ndots,
}

View file

@ -0,0 +1,40 @@
local string = string
local _M = {}
-- determines whether to apply a SameSite=None attribute
-- to a cookie, based on the user agent.
-- returns: boolean
--
-- Chrome 80 treating third-party cookies as SameSite=Strict
-- if SameSite is missing. Certain old browsers don't recognize
-- SameSite=None and will reject cookies entirely bearing SameSite=None.
-- This creates a situation where fixing things for
-- Chrome >= 80 breaks things for old browsers.
-- This function compares the user agent against known
-- browsers which will reject SameSite=None cookies.
-- reference: https://www.chromium.org/updates/same-site/incompatible-clients
function _M.same_site_none_compatible(user_agent)
if not user_agent then
return true
elseif string.match(user_agent, "Chrome/4") then
return false
elseif string.match(user_agent, "Chrome/5") then
return false
elseif string.match(user_agent, "Chrome/6") then
return false
elseif string.match(user_agent, "CPU iPhone OS 12") then
return false
elseif string.match(user_agent, "iPad; CPU OS 12") then
return false
elseif string.match(user_agent, "Macintosh")
and string.match(user_agent, "Intel Mac OS X 10_14")
and string.match(user_agent, "Safari")
and not string.match(user_agent, "Chrome") then
return false
end
return true
end
return _M

View file

@ -0,0 +1,83 @@
local ipairs = ipairs
local _M = {}
-- splits strings into host and port
local function parse_addr(addr)
local _, _, host, port = addr:find("([^:]+):([^:]+)")
if host and port then
return {host=host, port=port}
else
return nil, "error in parsing upstream address!"
end
end
function _M.get_first_value(var)
local t = _M.split_upstream_var(var) or {}
if #t == 0 then return nil end
return t[1]
end
function _M.get_last_value(var)
local t = _M.split_upstream_var(var) or {}
if #t == 0 then return nil end
return t[#t]
end
-- http://nginx.org/en/docs/http/ngx_http_upstream_module.html#example
-- CAVEAT: nginx is giving out : instead of , so the docs are wrong
-- 127.0.0.1:26157 : 127.0.0.1:26157 , ngx.var.upstream_addr
-- 200 : 200 , ngx.var.upstream_status
-- 0.00 : 0.00, ngx.var.upstream_response_time
function _M.split_upstream_var(var)
if not var then
return nil, nil
end
local t = {}
for v in var:gmatch("[^%s|,]+") do
if v ~= ":" then
t[#t+1] = v
end
end
return t
end
-- Splits an NGINX $upstream_addr and returns an array of tables
-- with a `host` and `port` key-value pair.
function _M.split_upstream_addr(addrs_str)
if not addrs_str then
return nil, nil
end
local addrs = _M.split_upstream_var(addrs_str)
local host_and_ports = {}
for _, v in ipairs(addrs) do
local a, err = parse_addr(v)
if err then
return nil, err
end
host_and_ports[#host_and_ports+1] = a
end
if #host_and_ports == 0 then
return nil, "no upstream addresses to parse!"
end
return host_and_ports
end
-- Splits string by delimiter. Returns array of parsed values and the length of the array.
function _M.split_string(what, delim)
local result = {}
local idx = 0
if what and delim and delim ~= "" then
for chunk in what:gmatch("([^" .. delim .. "]+)") do
idx = idx + 1
result[idx] = chunk
end
end
return result, idx
end
return _M

View file

@ -0,0 +1,8 @@
# A very simple nginx configuration file that forces nginx to start.
pid /tmp/nginx/nginx.pid;
error_log stderr;
events {}
http {}
daemon off;

View file

@ -0,0 +1 @@
{}

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,144 @@
:100644 100644 c7463dcd 00000000 M src/http/ngx_http_core_module.c
diff --git a/src/http/ngx_http_core_module.c b/src/http/ngx_http_core_module.c
index c7463dcd..e2e45931 100644
--- a/src/http/ngx_http_core_module.c
+++ b/src/http/ngx_http_core_module.c
@@ -55,7 +55,6 @@ static char *ngx_http_core_listen(ngx_conf_t *cf, ngx_command_t *cmd,
void *conf);
static char *ngx_http_core_server_name(ngx_conf_t *cf, ngx_command_t *cmd,
void *conf);
-static char *ngx_http_core_root(ngx_conf_t *cf, ngx_command_t *cmd, void *conf);
static char *ngx_http_core_limit_except(ngx_conf_t *cf, ngx_command_t *cmd,
void *conf);
static char *ngx_http_core_set_aio(ngx_conf_t *cf, ngx_command_t *cmd,
@@ -323,21 +322,6 @@ static ngx_command_t ngx_http_core_commands[] = {
offsetof(ngx_http_core_loc_conf_t, default_type),
NULL },
- { ngx_string("root"),
- NGX_HTTP_MAIN_CONF|NGX_HTTP_SRV_CONF|NGX_HTTP_LOC_CONF|NGX_HTTP_LIF_CONF
- |NGX_CONF_TAKE1,
- ngx_http_core_root,
- NGX_HTTP_LOC_CONF_OFFSET,
- 0,
- NULL },
-
- { ngx_string("alias"),
- NGX_HTTP_LOC_CONF|NGX_CONF_TAKE1,
- ngx_http_core_root,
- NGX_HTTP_LOC_CONF_OFFSET,
- 0,
- NULL },
-
{ ngx_string("limit_except"),
NGX_HTTP_LOC_CONF|NGX_CONF_BLOCK|NGX_CONF_1MORE,
ngx_http_core_limit_except,
@@ -4312,108 +4296,6 @@ ngx_http_core_server_name(ngx_conf_t *cf, ngx_command_t *cmd, void *conf)
}
-static char *
-ngx_http_core_root(ngx_conf_t *cf, ngx_command_t *cmd, void *conf)
-{
- ngx_http_core_loc_conf_t *clcf = conf;
-
- ngx_str_t *value;
- ngx_int_t alias;
- ngx_uint_t n;
- ngx_http_script_compile_t sc;
-
- alias = (cmd->name.len == sizeof("alias") - 1) ? 1 : 0;
-
- if (clcf->root.data) {
-
- if ((clcf->alias != 0) == alias) {
- return "is duplicate";
- }
-
- ngx_conf_log_error(NGX_LOG_EMERG, cf, 0,
- "\"%V\" directive is duplicate, "
- "\"%s\" directive was specified earlier",
- &cmd->name, clcf->alias ? "alias" : "root");
-
- return NGX_CONF_ERROR;
- }
-
- if (clcf->named && alias) {
- ngx_conf_log_error(NGX_LOG_EMERG, cf, 0,
- "the \"alias\" directive cannot be used "
- "inside the named location");
-
- return NGX_CONF_ERROR;
- }
-
- value = cf->args->elts;
-
- if (ngx_strstr(value[1].data, "$document_root")
- || ngx_strstr(value[1].data, "${document_root}"))
- {
- ngx_conf_log_error(NGX_LOG_EMERG, cf, 0,
- "the $document_root variable cannot be used "
- "in the \"%V\" directive",
- &cmd->name);
-
- return NGX_CONF_ERROR;
- }
-
- if (ngx_strstr(value[1].data, "$realpath_root")
- || ngx_strstr(value[1].data, "${realpath_root}"))
- {
- ngx_conf_log_error(NGX_LOG_EMERG, cf, 0,
- "the $realpath_root variable cannot be used "
- "in the \"%V\" directive",
- &cmd->name);
-
- return NGX_CONF_ERROR;
- }
-
- clcf->alias = alias ? clcf->name.len : 0;
- clcf->root = value[1];
-
- if (!alias && clcf->root.len > 0
- && clcf->root.data[clcf->root.len - 1] == '/')
- {
- clcf->root.len--;
- }
-
- if (clcf->root.data[0] != '$') {
- if (ngx_conf_full_name(cf->cycle, &clcf->root, 0) != NGX_OK) {
- return NGX_CONF_ERROR;
- }
- }
-
- n = ngx_http_script_variables_count(&clcf->root);
-
- ngx_memzero(&sc, sizeof(ngx_http_script_compile_t));
- sc.variables = n;
-
-#if (NGX_PCRE)
- if (alias && clcf->regex) {
- clcf->alias = NGX_MAX_SIZE_T_VALUE;
- n = 1;
- }
-#endif
-
- if (n) {
- sc.cf = cf;
- sc.source = &clcf->root;
- sc.lengths = &clcf->root_lengths;
- sc.values = &clcf->root_values;
- sc.complete_lengths = 1;
- sc.complete_values = 1;
-
- if (ngx_http_script_compile(&sc) != NGX_OK) {
- return NGX_CONF_ERROR;
- }
- }
-
- return NGX_CONF_OK;
-}
-
-
static ngx_http_method_name_t ngx_methods_names[] = {
{ (u_char *) "GET", (uint32_t) ~NGX_HTTP_GET },
{ (u_char *) "HEAD", (uint32_t) ~NGX_HTTP_HEAD },

View file

@ -0,0 +1,72 @@
diff --git a/src/http/ngx_http_upstream.c b/src/http/ngx_http_upstream.c
index f8d5707d..6efe0047 100644
--- a/src/http/ngx_http_upstream.c
+++ b/src/http/ngx_http_upstream.c
@@ -1515,6 +1515,11 @@ ngx_http_upstream_connect(ngx_http_request_t *r, ngx_http_upstream_t *u)
return;
}
+ if (rc >= NGX_HTTP_SPECIAL_RESPONSE) {
+ ngx_http_upstream_finalize_request(r, u, rc);
+ return;
+ }
+
u->state->peer = u->peer.name;
if (rc == NGX_BUSY) {
diff --git a/src/http/ngx_http_upstream.h b/src/http/ngx_http_upstream.h
index 3e714e5b..dfbb25e0 100644
--- a/src/http/ngx_http_upstream.h
+++ b/src/http/ngx_http_upstream.h
@@ -427,4 +427,9 @@ extern ngx_conf_bitmask_t ngx_http_upstream_cache_method_mask[];
extern ngx_conf_bitmask_t ngx_http_upstream_ignore_headers_masks[];
+#ifndef HAVE_BALANCER_STATUS_CODE_PATCH
+#define HAVE_BALANCER_STATUS_CODE_PATCH
+#endif
+
+
#endif /* _NGX_HTTP_UPSTREAM_H_INCLUDED_ */
diff --git a/src/stream/ngx_stream.h b/src/stream/ngx_stream.h
index 09d24593..d8b4b584 100644
--- a/src/stream/ngx_stream.h
+++ b/src/stream/ngx_stream.h
@@ -27,6 +27,7 @@ typedef struct ngx_stream_session_s ngx_stream_session_t;
#define NGX_STREAM_OK 200
+#define NGX_STREAM_SPECIAL_RESPONSE 300
#define NGX_STREAM_BAD_REQUEST 400
#define NGX_STREAM_FORBIDDEN 403
#define NGX_STREAM_INTERNAL_SERVER_ERROR 500
diff --git a/src/stream/ngx_stream_proxy_module.c b/src/stream/ngx_stream_proxy_module.c
index 818d7329..329dcdc6 100644
--- a/src/stream/ngx_stream_proxy_module.c
+++ b/src/stream/ngx_stream_proxy_module.c
@@ -691,6 +691,11 @@ ngx_stream_proxy_connect(ngx_stream_session_t *s)
return;
}
+ if (rc >= NGX_STREAM_SPECIAL_RESPONSE) {
+ ngx_stream_proxy_finalize(s, rc);
+ return;
+ }
+
u->state->peer = u->peer.name;
if (rc == NGX_BUSY) {
diff --git a/src/stream/ngx_stream_upstream.h b/src/stream/ngx_stream_upstream.h
index 73947f46..21bc0ad7 100644
--- a/src/stream/ngx_stream_upstream.h
+++ b/src/stream/ngx_stream_upstream.h
@@ -151,4 +151,9 @@ ngx_stream_upstream_srv_conf_t *ngx_stream_upstream_add(ngx_conf_t *cf,
extern ngx_module_t ngx_stream_upstream_module;
+#ifndef HAVE_BALANCER_STATUS_CODE_PATCH
+#define HAVE_BALANCER_STATUS_CODE_PATCH
+#endif
+
+
#endif /* _NGX_STREAM_UPSTREAM_H_INCLUDED_ */

View file

@ -0,0 +1,19 @@
# HG changeset patch
# User Yichun Zhang <agentzh@gmail.com>
# Date 1383598130 28800
# Node ID f64218e1ac963337d84092536f588b8e0d99bbaa
# Parent dea321e5c0216efccbb23e84bbce7cf3e28f130c
Cache: gracefully exit the cache manager process.
diff -r dea321e5c021 -r f64218e1ac96 src/os/unix/ngx_process_cycle.c
--- a/src/os/unix/ngx_process_cycle.c Thu Oct 31 18:23:49 2013 +0400
+++ b/src/os/unix/ngx_process_cycle.c Mon Nov 04 12:48:50 2013 -0800
@@ -1134,7 +1134,7 @@
if (ngx_terminate || ngx_quit) {
ngx_log_error(NGX_LOG_NOTICE, cycle->log, 0, "exiting");
- exit(0);
+ ngx_worker_process_exit(cycle);
}
if (ngx_reopen) {

View file

@ -0,0 +1,98 @@
diff --git a/src/event/ngx_event.c b/src/event/ngx_event.c
index 57af8132..4853945f 100644
--- a/src/event/ngx_event.c
+++ b/src/event/ngx_event.c
@@ -196,6 +196,9 @@ ngx_process_events_and_timers(ngx_cycle_t *cycle)
ngx_uint_t flags;
ngx_msec_t timer, delta;
+ ngx_queue_t *q;
+ ngx_event_t *ev;
+
if (ngx_timer_resolution) {
timer = NGX_TIMER_INFINITE;
flags = 0;
@@ -215,6 +218,13 @@ ngx_process_events_and_timers(ngx_cycle_t *cycle)
#endif
}
+ if (!ngx_queue_empty(&ngx_posted_delayed_events)) {
+ ngx_log_debug0(NGX_LOG_DEBUG_EVENT, cycle->log, 0,
+ "posted delayed event queue not empty"
+ " making poll timeout 0");
+ timer = 0;
+ }
+
if (ngx_use_accept_mutex) {
if (ngx_accept_disabled > 0) {
ngx_accept_disabled--;
@@ -257,6 +267,35 @@ ngx_process_events_and_timers(ngx_cycle_t *cycle)
}
ngx_event_process_posted(cycle, &ngx_posted_events);
+
+ while (!ngx_queue_empty(&ngx_posted_delayed_events)) {
+ q = ngx_queue_head(&ngx_posted_delayed_events);
+
+ ev = ngx_queue_data(q, ngx_event_t, queue);
+ if (ev->delayed) {
+ /* start of newly inserted nodes */
+ for (/* void */;
+ q != ngx_queue_sentinel(&ngx_posted_delayed_events);
+ q = ngx_queue_next(q))
+ {
+ ev = ngx_queue_data(q, ngx_event_t, queue);
+ ev->delayed = 0;
+
+ ngx_log_debug1(NGX_LOG_DEBUG_EVENT, cycle->log, 0,
+ "skipping delayed posted event %p,"
+ " till next iteration", ev);
+ }
+
+ break;
+ }
+
+ ngx_log_debug1(NGX_LOG_DEBUG_EVENT, cycle->log, 0,
+ "delayed posted event %p", ev);
+
+ ngx_delete_posted_event(ev);
+
+ ev->handler(ev);
+ }
}
@@ -600,6 +639,7 @@ ngx_event_process_init(ngx_cycle_t *cycle)
ngx_queue_init(&ngx_posted_accept_events);
ngx_queue_init(&ngx_posted_events);
+ ngx_queue_init(&ngx_posted_delayed_events);
if (ngx_event_timer_init(cycle->log) == NGX_ERROR) {
return NGX_ERROR;
diff --git a/src/event/ngx_event_posted.c b/src/event/ngx_event_posted.c
index d851f3d1..b6cea009 100644
--- a/src/event/ngx_event_posted.c
+++ b/src/event/ngx_event_posted.c
@@ -12,6 +12,7 @@
ngx_queue_t ngx_posted_accept_events;
ngx_queue_t ngx_posted_events;
+ngx_queue_t ngx_posted_delayed_events;
void
diff --git a/src/event/ngx_event_posted.h b/src/event/ngx_event_posted.h
index 145d30fe..6c388553 100644
--- a/src/event/ngx_event_posted.h
+++ b/src/event/ngx_event_posted.h
@@ -43,6 +43,9 @@ void ngx_event_process_posted(ngx_cycle_t *cycle, ngx_queue_t *posted);
extern ngx_queue_t ngx_posted_accept_events;
extern ngx_queue_t ngx_posted_events;
+extern ngx_queue_t ngx_posted_delayed_events;
+
+#define HAVE_POSTED_DELAYED_EVENTS_PATCH
#endif /* _NGX_EVENT_POSTED_H_INCLUDED_ */

View file

@ -0,0 +1,20 @@
# HG changeset patch
# User Yichun Zhang <agentzh@gmail.com>
# Date 1412276417 25200
# Thu Oct 02 12:00:17 2014 -0700
# Node ID 4032b992f23b054c1a2cfb0be879330d2c6708e5
# Parent 1ff0f68d9376e3d184d65814a6372856bf65cfcd
Hash: buffer overflow might happen when exceeding the pre-configured limits.
diff -r 1ff0f68d9376 -r 4032b992f23b src/core/ngx_hash.c
--- a/src/core/ngx_hash.c Tue Sep 30 15:50:28 2014 -0700
+++ b/src/core/ngx_hash.c Thu Oct 02 12:00:17 2014 -0700
@@ -312,6 +312,8 @@ ngx_hash_init(ngx_hash_init_t *hinit, ng
continue;
}
+ size--;
+
ngx_log_error(NGX_LOG_WARN, hinit->pool->log, 0,
"could not build optimal %s, you should increase "
"either %s_max_size: %i or %s_bucket_size: %i; "

View file

@ -0,0 +1,59 @@
diff -rup nginx-1.19.9/src/core/nginx.c nginx-1.19.9-patched/src/core/nginx.c
--- nginx-1.19.9/src/core/nginx.c 2017-12-17 00:00:38.136470108 -0800
+++ nginx-1.19.9-patched/src/core/nginx.c 2017-12-16 23:59:51.680958322 -0800
@@ -186,6 +186,7 @@ static u_char *ngx_prefix;
static u_char *ngx_conf_file;
static u_char *ngx_conf_params;
static char *ngx_signal;
+ngx_pool_t *saved_init_cycle_pool = NULL;
static char **ngx_os_environ;
@@ -253,6 +254,8 @@ main(int argc, char *const *argv)
return 1;
}
+ saved_init_cycle_pool = init_cycle.pool;
+
if (ngx_save_argv(&init_cycle, argc, argv) != NGX_OK) {
return 1;
}
diff -rup nginx-1.19.9/src/core/ngx_core.h nginx-1.19.9-patched/src/core/ngx_core.h
--- nginx-1.19.9/src/core/ngx_core.h 2017-10-10 08:22:51.000000000 -0700
+++ nginx-1.19.9-patched/src/core/ngx_core.h 2017-12-16 23:59:51.679958370 -0800
@@ -108,4 +108,6 @@ void ngx_cpuinfo(void);
#define NGX_DISABLE_SYMLINKS_NOTOWNER 2
#endif
+extern ngx_pool_t *saved_init_cycle_pool;
+
#endif /* _NGX_CORE_H_INCLUDED_ */
diff -rup nginx-1.19.9/src/core/ngx_cycle.c nginx-1.19.9-patched/src/core/ngx_cycle.c
--- nginx-1.19.9/src/core/ngx_cycle.c 2017-10-10 08:22:51.000000000 -0700
+++ nginx-1.19.9-patched/src/core/ngx_cycle.c 2017-12-16 23:59:51.678958419 -0800
@@ -748,6 +748,10 @@ old_shm_zone_done:
if (ngx_process == NGX_PROCESS_MASTER || ngx_is_init_cycle(old_cycle)) {
+ if (ngx_is_init_cycle(old_cycle)) {
+ saved_init_cycle_pool = NULL;
+ }
+
ngx_destroy_pool(old_cycle->pool);
cycle->old_cycle = NULL;
diff -rup nginx-1.19.9/src/os/unix/ngx_process_cycle.c nginx-1.19.9-patched/src/os/unix/ngx_process_cycle.c
--- nginx-1.19.9/src/os/unix/ngx_process_cycle.c 2017-12-17 00:00:38.142469762 -0800
+++ nginx-1.19.9-patched/src/os/unix/ngx_process_cycle.c 2017-12-16 23:59:51.691957791 -0800
@@ -687,6 +692,11 @@ ngx_master_process_exit(ngx_cycle_t *cyc
ngx_exit_cycle.files_n = ngx_cycle->files_n;
ngx_cycle = &ngx_exit_cycle;
+ if (saved_init_cycle_pool != NULL && saved_init_cycle_pool != cycle->pool) {
+ ngx_destroy_pool(saved_init_cycle_pool);
+ saved_init_cycle_pool = NULL;
+ }
+
ngx_destroy_pool(cycle->pool);
exit(0);

View file

@ -0,0 +1,13 @@
--- nginx-1.19.9/src/core/ngx_log.h 2013-10-08 05:07:14.000000000 -0700
+++ nginx-1.19.9-patched/src/core/ngx_log.h 2013-12-05 20:35:35.996236720 -0800
@@ -64,7 +64,9 @@ struct ngx_log_s {
};
-#define NGX_MAX_ERROR_STR 2048
+#ifndef NGX_MAX_ERROR_STR
+#define NGX_MAX_ERROR_STR 4096
+#endif
/*********************************/

View file

@ -0,0 +1,36 @@
diff -urp nginx-1.19.9/auto/cc/clang nginx-1.19.9-patched/auto/cc/clang
--- nginx-1.19.9/auto/cc/clang 2014-03-04 03:39:24.000000000 -0800
+++ nginx-1.19.9-patched/auto/cc/clang 2014-03-13 20:54:26.241413360 -0700
@@ -89,7 +89,7 @@ CFLAGS="$CFLAGS -Wconditional-uninitiali
CFLAGS="$CFLAGS -Wno-unused-parameter"
# stop on warning
-CFLAGS="$CFLAGS -Werror"
+#CFLAGS="$CFLAGS -Werror"
# debug
CFLAGS="$CFLAGS -g"
diff -urp nginx-1.19.9/auto/cc/gcc nginx-1.19.9-patched/auto/cc/gcc
--- nginx-1.19.9/auto/cc/gcc 2014-03-04 03:39:24.000000000 -0800
+++ nginx-1.19.9-patched/auto/cc/gcc 2014-03-13 20:54:13.301355329 -0700
@@ -168,7 +168,7 @@ esac
# stop on warning
-CFLAGS="$CFLAGS -Werror"
+#CFLAGS="$CFLAGS -Werror"
# debug
CFLAGS="$CFLAGS -g"
diff -urp nginx-1.19.9/auto/cc/icc nginx-1.19.9-patched/auto/cc/icc
--- nginx-1.19.9/auto/cc/icc 2014-03-04 03:39:24.000000000 -0800
+++ nginx-1.19.9-patched/auto/cc/icc 2014-03-13 20:54:13.301355329 -0700
@@ -115,7 +115,7 @@ case "$NGX_ICC_VER" in
esac
# stop on warning
-CFLAGS="$CFLAGS -Werror"
+#CFLAGS="$CFLAGS -Werror"
# debug
CFLAGS="$CFLAGS -g"

View file

@ -0,0 +1,19 @@
--- nginx-1.19.9/src/http/modules/ngx_http_proxy_module.c 2017-07-16 14:02:51.000000000 +0800
+++ nginx-1.19.9-patched/src/http/modules/ngx_http_proxy_module.c 2017-07-16 14:02:51.000000000 +0800
@@ -793,13 +793,13 @@ static ngx_keyval_t ngx_http_proxy_cach
static ngx_http_variable_t ngx_http_proxy_vars[] = {
{ ngx_string("proxy_host"), NULL, ngx_http_proxy_host_variable, 0,
- NGX_HTTP_VAR_CHANGEABLE|NGX_HTTP_VAR_NOCACHEABLE|NGX_HTTP_VAR_NOHASH, 0 },
+ NGX_HTTP_VAR_CHANGEABLE|NGX_HTTP_VAR_NOCACHEABLE, 0 },
{ ngx_string("proxy_port"), NULL, ngx_http_proxy_port_variable, 0,
- NGX_HTTP_VAR_CHANGEABLE|NGX_HTTP_VAR_NOCACHEABLE|NGX_HTTP_VAR_NOHASH, 0 },
+ NGX_HTTP_VAR_CHANGEABLE|NGX_HTTP_VAR_NOCACHEABLE, 0 },
{ ngx_string("proxy_add_x_forwarded_for"), NULL,
- ngx_http_proxy_add_x_forwarded_for_variable, 0, NGX_HTTP_VAR_NOHASH, 0 },
+ ngx_http_proxy_add_x_forwarded_for_variable, 0, 0, 0 },
#if 0
{ ngx_string("proxy_add_via"), NULL, NULL, 0, NGX_HTTP_VAR_NOHASH, 0 },

View file

@ -0,0 +1,263 @@
diff --git a/src/core/ngx_resolver.c b/src/core/ngx_resolver.c
index cd55520c..dade1846 100644
--- a/src/core/ngx_resolver.c
+++ b/src/core/ngx_resolver.c
@@ -9,12 +9,26 @@
#include <ngx_core.h>
#include <ngx_event.h>
+#if !(NGX_WIN32)
+#include <resolv.h>
+#endif
+
#define NGX_RESOLVER_UDP_SIZE 4096
#define NGX_RESOLVER_TCP_RSIZE (2 + 65535)
#define NGX_RESOLVER_TCP_WSIZE 8192
+#if !(NGX_WIN32)
+/*
+ * note that 2KB should be more than enough for majority of the
+ * resolv.conf files out there. it also acts as a safety guard to prevent
+ * abuse.
+ */
+#define NGX_RESOLVER_FILE_BUF_SIZE 2048
+#define NGX_RESOLVER_FILE_NAME "/etc/resolv.conf"
+#endif
+
typedef struct {
u_char ident_hi;
@@ -131,6 +145,191 @@ static ngx_resolver_node_t *ngx_resolver_lookup_addr6(ngx_resolver_t *r,
#endif
+#if !(NGX_WIN32)
+static ngx_int_t
+ngx_resolver_read_resolv_conf(ngx_conf_t *cf, ngx_resolver_t *r, u_char *path,
+ size_t path_len)
+{
+ ngx_url_t u;
+ ngx_resolver_connection_t *rec;
+ ngx_fd_t fd;
+ ngx_file_t file;
+ u_char buf[NGX_RESOLVER_FILE_BUF_SIZE];
+ u_char ipv6_buf[NGX_INET6_ADDRSTRLEN];
+ ngx_uint_t address = 0, j, total = 0;
+ ssize_t n, i;
+ enum {
+ sw_nameserver,
+ sw_spaces,
+ sw_address,
+ sw_skip
+ } state;
+
+ file.name.data = path;
+ file.name.len = path_len;
+
+ if (ngx_conf_full_name(cf->cycle, &file.name, 1) != NGX_OK) {
+ return NGX_ERROR;
+ }
+
+ fd = ngx_open_file(file.name.data, NGX_FILE_RDONLY,
+ NGX_FILE_OPEN, 0);
+
+ if (fd == NGX_INVALID_FILE) {
+ ngx_conf_log_error(NGX_LOG_EMERG, cf, ngx_errno,
+ ngx_open_file_n " \"%s\" failed", file.name.data);
+
+ return NGX_ERROR;
+ }
+
+ ngx_memzero(&file, sizeof(ngx_file_t));
+
+ file.fd = fd;
+ file.log = cf->log;
+
+ state = sw_nameserver;
+
+ n = ngx_read_file(&file, buf, NGX_RESOLVER_FILE_BUF_SIZE, 0);
+
+ if (n == NGX_ERROR) {
+ ngx_conf_log_error(NGX_LOG_ALERT, cf, ngx_errno,
+ ngx_read_file_n " \"%s\" failed", file.name.data);
+ }
+
+ if (ngx_close_file(file.fd) == NGX_FILE_ERROR) {
+ ngx_conf_log_error(NGX_LOG_ALERT, cf, ngx_errno,
+ ngx_close_file_n " \"%s\" failed", file.name.data);
+ }
+
+ if (n == NGX_ERROR) {
+ return NGX_ERROR;
+ }
+
+ if (n == 0) {
+ return NGX_OK;
+ }
+
+ for (i = 0; i < n && total < MAXNS; /* void */) {
+ if (buf[i] == '#' || buf[i] == ';') {
+ state = sw_skip;
+ }
+
+ switch (state) {
+
+ case sw_nameserver:
+
+ if ((size_t) n - i >= sizeof("nameserver") - 1
+ && ngx_memcmp(buf + i, "nameserver",
+ sizeof("nameserver") - 1) == 0)
+ {
+ state = sw_spaces;
+ i += sizeof("nameserver") - 1;
+
+ continue;
+ }
+
+ break;
+
+ case sw_spaces:
+ if (buf[i] != '\t' && buf[i] != ' ') {
+ address = i;
+ state = sw_address;
+ }
+
+ break;
+
+ case sw_address:
+
+ if (buf[i] == CR || buf[i] == LF || i == n - 1) {
+ ngx_memzero(&u, sizeof(ngx_url_t));
+
+ u.url.data = buf + address;
+
+ if (i == n - 1 && buf[i] != CR && buf[i] != LF) {
+ u.url.len = n - address;
+
+ } else {
+ u.url.len = i - address;
+ }
+
+ u.default_port = 53;
+
+ /* IPv6? */
+ if (ngx_strlchr(u.url.data, u.url.data + u.url.len,
+ ':') != NULL)
+ {
+ if (u.url.len + 2 > sizeof(ipv6_buf)) {
+ ngx_conf_log_error(NGX_LOG_EMERG, cf, 0,
+ "IPv6 resolver address is too long:"
+ " \"%V\"", &u.url);
+
+ return NGX_ERROR;
+ }
+
+ ipv6_buf[0] = '[';
+ ngx_memcpy(ipv6_buf + 1, u.url.data, u.url.len);
+ ipv6_buf[u.url.len + 1] = ']';
+
+ u.url.data = ipv6_buf;
+ u.url.len = u.url.len + 2;
+ }
+
+ if (ngx_parse_url(cf->pool, &u) != NGX_OK) {
+ if (u.err) {
+ ngx_conf_log_error(NGX_LOG_EMERG, cf, 0,
+ "%s in resolver \"%V\"",
+ u.err, &u.url);
+ }
+
+ return NGX_ERROR;
+ }
+
+ rec = ngx_array_push_n(&r->connections, u.naddrs);
+ if (rec == NULL) {
+ return NGX_ERROR;
+ }
+
+ ngx_memzero(rec, u.naddrs * sizeof(ngx_resolver_connection_t));
+
+ for (j = 0; j < u.naddrs; j++) {
+ rec[j].sockaddr = u.addrs[j].sockaddr;
+ rec[j].socklen = u.addrs[j].socklen;
+ rec[j].server = u.addrs[j].name;
+ rec[j].resolver = r;
+ }
+
+ total++;
+
+#if (NGX_DEBUG)
+ /*
+ * logs with level below NGX_LOG_NOTICE will not be printed
+ * in this early phase
+ */
+ ngx_conf_log_error(NGX_LOG_NOTICE, cf, 0,
+ "parsed a resolver: \"%V\"", &u.url);
+#endif
+
+ state = sw_nameserver;
+ }
+
+ break;
+
+ case sw_skip:
+ if (buf[i] == CR || buf[i] == LF) {
+ state = sw_nameserver;
+ }
+
+ break;
+ }
+
+ i++;
+ }
+
+ return NGX_OK;
+}
+#endif
+
+
ngx_resolver_t *
ngx_resolver_create(ngx_conf_t *cf, ngx_str_t *names, ngx_uint_t n)
{
@@ -246,6 +445,39 @@ ngx_resolver_create(ngx_conf_t *cf, ngx_str_t *names, ngx_uint_t n)
}
#endif
+#if !(NGX_WIN32)
+ if (ngx_strncmp(names[i].data, "local=", 6) == 0) {
+
+ if (ngx_strcmp(&names[i].data[6], "on") == 0) {
+ if (ngx_resolver_read_resolv_conf(cf, r,
+ (u_char *)
+ NGX_RESOLVER_FILE_NAME,
+ sizeof(NGX_RESOLVER_FILE_NAME)
+ - 1)
+ != NGX_OK)
+ {
+ ngx_conf_log_error(NGX_LOG_EMERG, cf, 0,
+ "unable to parse local resolver");
+ return NULL;
+ }
+
+ } else if (ngx_strcmp(&names[i].data[6], "off") != 0) {
+ if (ngx_resolver_read_resolv_conf(cf, r,
+ &names[i].data[6],
+ names[i].len - 6)
+ != NGX_OK)
+ {
+ ngx_conf_log_error(NGX_LOG_EMERG, cf, 0,
+ "unable to parse local resolver");
+ return NULL;
+ }
+
+ }
+
+ continue;
+ }
+#endif
+
ngx_memzero(&u, sizeof(ngx_url_t));
u.url = names[i];

View file

@ -0,0 +1,38 @@
diff --git a/src/core/ngx_connection.c b/src/core/ngx_connection.c
--- a/src/core/ngx_connection.c
+++ b/src/core/ngx_connection.c
@@ -1118,6 +1118,12 @@ ngx_close_listening_sockets(ngx_cycle_t *cycle)
ls = cycle->listening.elts;
for (i = 0; i < cycle->listening.nelts; i++) {
+#if (NGX_HAVE_REUSEPORT)
+ if (ls[i].fd == (ngx_socket_t) -1) {
+ continue;
+ }
+#endif
+
c = ls[i].connection;
if (c) {
diff --git a/src/event/ngx_event.c b/src/event/ngx_event.c
--- a/src/event/ngx_event.c
+++ b/src/event/ngx_event.c
@@ -775,6 +775,18 @@ ngx_event_process_init(ngx_cycle_t *cycle)
#if (NGX_HAVE_REUSEPORT)
if (ls[i].reuseport && ls[i].worker != ngx_worker) {
+ ngx_log_debug2(NGX_LOG_DEBUG_CORE, cycle->log, 0,
+ "closing unused fd:%d listening on %V",
+ ls[i].fd, &ls[i].addr_text);
+
+ if (ngx_close_socket(ls[i].fd) == -1) {
+ ngx_log_error(NGX_LOG_EMERG, cycle->log, ngx_socket_errno,
+ ngx_close_socket_n " %V failed",
+ &ls[i].addr_text);
+ }
+
+ ls[i].fd = (ngx_socket_t) -1;
+
continue;
}
#endif

View file

@ -0,0 +1,75 @@
diff --git a/src/os/unix/ngx_process.c b/src/os/unix/ngx_process.c
index 15680237..12a8c687 100644
--- a/src/os/unix/ngx_process.c
+++ b/src/os/unix/ngx_process.c
@@ -362,8 +362,15 @@ ngx_signal_handler(int signo, siginfo_t *siginfo, void *ucontext)
break;
case ngx_signal_value(NGX_RECONFIGURE_SIGNAL):
- ngx_reconfigure = 1;
- action = ", reconfiguring";
+ if (ngx_process == NGX_PROCESS_SINGLE) {
+ ngx_terminate = 1;
+ action = ", exiting";
+
+ } else {
+ ngx_reconfigure = 1;
+ action = ", reconfiguring";
+ }
+
break;
case ngx_signal_value(NGX_REOPEN_SIGNAL):
diff --git a/src/os/unix/ngx_process_cycle.c b/src/os/unix/ngx_process_cycle.c
index 5817a2c2..f3d58e97 100644
--- a/src/os/unix/ngx_process_cycle.c
+++ b/src/os/unix/ngx_process_cycle.c
@@ -305,11 +305,26 @@ ngx_single_process_cycle(ngx_cycle_t *cycle)
}
for ( ;; ) {
+ if (ngx_exiting) {
+ if (ngx_event_no_timers_left() == NGX_OK) {
+ ngx_log_error(NGX_LOG_NOTICE, cycle->log, 0, "exiting");
+
+ for (i = 0; cycle->modules[i]; i++) {
+ if (cycle->modules[i]->exit_process) {
+ cycle->modules[i]->exit_process(cycle);
+ }
+ }
+
+ ngx_master_process_exit(cycle);
+ }
+ }
+
ngx_log_debug0(NGX_LOG_DEBUG_EVENT, cycle->log, 0, "worker cycle");
ngx_process_events_and_timers(cycle);
- if (ngx_terminate || ngx_quit) {
+ if (ngx_terminate) {
+ ngx_log_error(NGX_LOG_NOTICE, cycle->log, 0, "exiting");
for (i = 0; cycle->modules[i]; i++) {
if (cycle->modules[i]->exit_process) {
@@ -320,6 +335,20 @@ ngx_single_process_cycle(ngx_cycle_t *cycle)
ngx_master_process_exit(cycle);
}
+ if (ngx_quit) {
+ ngx_quit = 0;
+ ngx_log_error(NGX_LOG_NOTICE, cycle->log, 0,
+ "gracefully shutting down");
+ ngx_setproctitle("process is shutting down");
+
+ if (!ngx_exiting) {
+ ngx_exiting = 1;
+ ngx_set_shutdown_timer(cycle);
+ ngx_close_listening_sockets(cycle);
+ ngx_close_idle_connections(cycle);
+ }
+ }
+
if (ngx_reconfigure) {
ngx_reconfigure = 0;
ngx_log_error(NGX_LOG_NOTICE, cycle->log, 0, "reconfiguring");

View file

@ -0,0 +1,185 @@
diff --git a/auto/unix b/auto/unix
index 10835f6c..b5b33bb3 100644
--- a/auto/unix
+++ b/auto/unix
@@ -990,3 +990,27 @@ ngx_feature_test='struct addrinfo *res;
if (getaddrinfo("localhost", NULL, NULL, &res) != 0) return 1;
freeaddrinfo(res)'
. auto/feature
+
+ngx_feature="SOCK_CLOEXEC support"
+ngx_feature_name="NGX_HAVE_SOCKET_CLOEXEC"
+ngx_feature_run=no
+ngx_feature_incs="#include <sys/types.h>
+ #include <sys/socket.h>"
+ngx_feature_path=
+ngx_feature_libs=
+ngx_feature_test="int fd;
+ fd = socket(AF_INET, SOCK_STREAM | SOCK_CLOEXEC, 0);"
+. auto/feature
+
+ngx_feature="FD_CLOEXEC support"
+ngx_feature_name="NGX_HAVE_FD_CLOEXEC"
+ngx_feature_run=no
+ngx_feature_incs="#include <sys/types.h>
+ #include <sys/socket.h>
+ #include <fcntl.h>"
+ngx_feature_path=
+ngx_feature_libs=
+ngx_feature_test="int fd;
+ fd = socket(AF_INET, SOCK_STREAM, 0);
+ fcntl(fd, F_SETFD, FD_CLOEXEC);"
+. auto/feature
diff --git a/src/core/ngx_resolver.c b/src/core/ngx_resolver.c
index cd55520c..438e0806 100644
--- a/src/core/ngx_resolver.c
+++ b/src/core/ngx_resolver.c
@@ -4466,8 +4466,14 @@ ngx_tcp_connect(ngx_resolver_connection_t *rec)
ngx_event_t *rev, *wev;
ngx_connection_t *c;
+#if (NGX_HAVE_SOCKET_CLOEXEC)
+ s = ngx_socket(rec->sockaddr->sa_family, SOCK_STREAM | SOCK_CLOEXEC, 0);
+
+#else
s = ngx_socket(rec->sockaddr->sa_family, SOCK_STREAM, 0);
+#endif
+
ngx_log_debug1(NGX_LOG_DEBUG_EVENT, &rec->log, 0, "TCP socket %d", s);
if (s == (ngx_socket_t) -1) {
@@ -4494,6 +4500,15 @@ ngx_tcp_connect(ngx_resolver_connection_t *rec)
goto failed;
}
+#if (NGX_HAVE_FD_CLOEXEC)
+ if (ngx_cloexec(s) == -1) {
+ ngx_log_error(NGX_LOG_ALERT, &rec->log, ngx_socket_errno,
+ ngx_cloexec_n " failed");
+
+ goto failed;
+ }
+#endif
+
rev = c->read;
wev = c->write;
diff --git a/src/event/ngx_event.h b/src/event/ngx_event.h
index 19fec68..8c2f01a 100644
--- a/src/event/ngx_event.h
+++ b/src/event/ngx_event.h
@@ -73,6 +73,9 @@ struct ngx_event_s {
/* to test on worker exit */
unsigned channel:1;
unsigned resolver:1;
+#if (HAVE_SOCKET_CLOEXEC_PATCH)
+ unsigned skip_socket_leak_check:1;
+#endif
unsigned cancelable:1;
diff --git a/src/event/ngx_event_accept.c b/src/event/ngx_event_accept.c
index 77563709..5827b9d0 100644
--- a/src/event/ngx_event_accept.c
+++ b/src/event/ngx_event_accept.c
@@ -62,7 +62,9 @@ ngx_event_accept(ngx_event_t *ev)
#if (NGX_HAVE_ACCEPT4)
if (use_accept4) {
- s = accept4(lc->fd, &sa.sockaddr, &socklen, SOCK_NONBLOCK);
+ s = accept4(lc->fd, &sa.sockaddr, &socklen,
+ SOCK_NONBLOCK | SOCK_CLOEXEC);
+
} else {
s = accept(lc->fd, &sa.sockaddr, &socklen);
}
@@ -202,6 +204,16 @@ ngx_event_accept(ngx_event_t *ev)
ngx_close_accepted_connection(c);
return;
}
+
+#if (NGX_HAVE_FD_CLOEXEC)
+ if (ngx_cloexec(s) == -1) {
+ ngx_log_error(NGX_LOG_ALERT, ev->log, ngx_socket_errno,
+ ngx_cloexec_n " failed");
+ ngx_close_accepted_connection(c);
+ return;
+ }
+#endif
+
}
}
diff --git a/src/event/ngx_event_connect.c b/src/event/ngx_event_connect.c
index c5bb8068..cf33b1d2 100644
--- a/src/event/ngx_event_connect.c
+++ b/src/event/ngx_event_connect.c
@@ -38,8 +38,15 @@ ngx_event_connect_peer(ngx_peer_connection_t *pc)
type = (pc->type ? pc->type : SOCK_STREAM);
+#if (NGX_HAVE_SOCKET_CLOEXEC)
+ s = ngx_socket(pc->sockaddr->sa_family, type | SOCK_CLOEXEC, 0);
+
+#else
s = ngx_socket(pc->sockaddr->sa_family, type, 0);
+#endif
+
+
ngx_log_debug2(NGX_LOG_DEBUG_EVENT, pc->log, 0, "%s socket %d",
(type == SOCK_STREAM) ? "stream" : "dgram", s);
@@ -80,6 +87,15 @@ ngx_event_connect_peer(ngx_peer_connection_t *pc)
goto failed;
}
+#if (NGX_HAVE_FD_CLOEXEC)
+ if (ngx_cloexec(s) == -1) {
+ ngx_log_error(NGX_LOG_ALERT, pc->log, ngx_socket_errno,
+ ngx_cloexec_n " failed");
+
+ goto failed;
+ }
+#endif
+
if (pc->local) {
#if (NGX_HAVE_TRANSPARENT_PROXY)
diff --git a/src/os/unix/ngx_process_cycle.c b/src/os/unix/ngx_process_cycle.c
index c4376a5..48e8fa8 100644
--- a/src/os/unix/ngx_process_cycle.c
+++ b/src/os/unix/ngx_process_cycle.c
@@ -960,6 +1029,9 @@ ngx_worker_process_exit(ngx_cycle_t *cycle)
for (i = 0; i < cycle->connection_n; i++) {
if (c[i].fd != -1
&& c[i].read
+#if (HAVE_SOCKET_CLOEXEC_PATCH)
+ && !c[i].read->skip_socket_leak_check
+#endif
&& !c[i].read->accept
&& !c[i].read->channel
&& !c[i].read->resolver)
diff --git a/src/os/unix/ngx_socket.h b/src/os/unix/ngx_socket.h
index fcc51533..d1eebf47 100644
--- a/src/os/unix/ngx_socket.h
+++ b/src/os/unix/ngx_socket.h
@@ -38,6 +38,17 @@ int ngx_blocking(ngx_socket_t s);
#endif
+#if (NGX_HAVE_FD_CLOEXEC)
+
+#define ngx_cloexec(s) fcntl(s, F_SETFD, FD_CLOEXEC)
+#define ngx_cloexec_n "fcntl(FD_CLOEXEC)"
+
+/* at least FD_CLOEXEC is required to ensure connection fd is closed
+ * after execve */
+#define HAVE_SOCKET_CLOEXEC_PATCH 1
+
+#endif
+
int ngx_tcp_nopush(ngx_socket_t s);
int ngx_tcp_push(ngx_socket_t s);

View file

@ -0,0 +1,64 @@
# HG changeset patch
# User Yichun Zhang <agentzh@openresty.org>
# Date 1451762084 28800
# Sat Jan 02 11:14:44 2016 -0800
# Node ID 449f0461859c16e95bdb18e8be6b94401545d3dd
# Parent 78b4e10b4367b31367aad3c83c9c3acdd42397c4
SSL: handled SSL_CTX_set_cert_cb() callback yielding.
OpenSSL 1.0.2+ introduces SSL_CTX_set_cert_cb() to allow custom
callbacks to serve the SSL certificiates and private keys dynamically
and lazily. The callbacks may yield for nonblocking I/O or sleeping.
Here we added support for such usage in NGINX 3rd-party modules
(like ngx_lua) in NGINX's event handlers for downstream SSL
connections.
diff -r 78b4e10b4367 -r 449f0461859c src/event/ngx_event_openssl.c
--- a/src/event/ngx_event_openssl.c Thu Dec 17 16:39:15 2015 +0300
+++ b/src/event/ngx_event_openssl.c Sat Jan 02 11:14:44 2016 -0800
@@ -1445,6 +1445,23 @@ ngx_ssl_handshake(ngx_connection_t *c)
return NGX_AGAIN;
}
+#if OPENSSL_VERSION_NUMBER >= 0x10002000L
+ if (sslerr == SSL_ERROR_WANT_X509_LOOKUP) {
+ c->read->handler = ngx_ssl_handshake_handler;
+ c->write->handler = ngx_ssl_handshake_handler;
+
+ if (ngx_handle_read_event(c->read, 0) != NGX_OK) {
+ return NGX_ERROR;
+ }
+
+ if (ngx_handle_write_event(c->write, 0) != NGX_OK) {
+ return NGX_ERROR;
+ }
+
+ return NGX_AGAIN;
+ }
+#endif
+
err = (sslerr == SSL_ERROR_SYSCALL) ? ngx_errno : 0;
c->ssl->no_wait_shutdown = 1;
@@ -1558,6 +1575,21 @@ ngx_ssl_try_early_data(ngx_connection_t *c)
return NGX_AGAIN;
}
+ if (sslerr == SSL_ERROR_WANT_X509_LOOKUP) {
+ c->read->handler = ngx_ssl_handshake_handler;
+ c->write->handler = ngx_ssl_handshake_handler;
+
+ if (ngx_handle_read_event(c->read, 0) != NGX_OK) {
+ return NGX_ERROR;
+ }
+
+ if (ngx_handle_write_event(c->write, 0) != NGX_OK) {
+ return NGX_ERROR;
+ }
+
+ return NGX_AGAIN;
+ }
+
err = (sslerr == SSL_ERROR_SYSCALL) ? ngx_errno : 0;
c->ssl->no_wait_shutdown = 1;

View file

@ -0,0 +1,41 @@
diff --git a/src/event/ngx_event_openssl.c b/src/event/ngx_event_openssl.c
--- a/src/event/ngx_event_openssl.c
+++ b/src/event/ngx_event_openssl.c
@@ -1446,7 +1446,12 @@ ngx_ssl_handshake(ngx_connection_t *c)
}
#if OPENSSL_VERSION_NUMBER >= 0x10002000L
- if (sslerr == SSL_ERROR_WANT_X509_LOOKUP) {
+ if (sslerr == SSL_ERROR_WANT_X509_LOOKUP
+# ifdef SSL_ERROR_PENDING_SESSION
+ || sslerr == SSL_ERROR_PENDING_SESSION
+# endif
+ )
+ {
c->read->handler = ngx_ssl_handshake_handler;
c->write->handler = ngx_ssl_handshake_handler;
@@ -1575,6 +1580,23 @@ ngx_ssl_try_early_data(ngx_connection_t *c)
return NGX_AGAIN;
}
+#ifdef SSL_ERROR_PENDING_SESSION
+ if (sslerr == SSL_ERROR_PENDING_SESSION) {
+ c->read->handler = ngx_ssl_handshake_handler;
+ c->write->handler = ngx_ssl_handshake_handler;
+
+ if (ngx_handle_read_event(c->read, 0) != NGX_OK) {
+ return NGX_ERROR;
+ }
+
+ if (ngx_handle_write_event(c->write, 0) != NGX_OK) {
+ return NGX_ERROR;
+ }
+
+ return NGX_AGAIN;
+ }
+#endif
+
err = (sslerr == SSL_ERROR_SYSCALL) ? ngx_errno : 0;
c->ssl->no_wait_shutdown = 1;

View file

@ -0,0 +1,31 @@
diff --git a/src/stream/ngx_stream.h b/src/stream/ngx_stream.h
index 09d2459..de92724 100644
--- a/src/stream/ngx_stream.h
+++ b/src/stream/ngx_stream.h
@@ -303,4 +303,7 @@ typedef ngx_int_t (*ngx_stream_filter_pt)(ngx_stream_session_t *s,
extern ngx_stream_filter_pt ngx_stream_top_filter;
+#define HAS_NGX_STREAM_PROXY_GET_NEXT_UPSTREAM_TRIES_PATCH 1
+
+
#endif /* _NGX_STREAM_H_INCLUDED_ */
diff --git a/src/stream/ngx_stream_proxy_module.c b/src/stream/ngx_stream_proxy_module.c
index 0afde1c..3254ce1 100644
--- a/src/stream/ngx_stream_proxy_module.c
+++ b/src/stream/ngx_stream_proxy_module.c
@@ -2156,3 +2156,14 @@ ngx_stream_proxy_bind(ngx_conf_t *cf, ngx_command_t *cmd, void *conf)
return NGX_CONF_OK;
}
+
+
+ngx_uint_t
+ngx_stream_proxy_get_next_upstream_tries(ngx_stream_session_t *s)
+{
+ ngx_stream_proxy_srv_conf_t *pscf;
+
+ pscf = ngx_stream_get_module_srv_conf(s, ngx_stream_proxy_module);
+
+ return pscf->next_upstream_tries;
+}

View file

@ -0,0 +1,13 @@
diff --git a/src/stream/ngx_stream_ssl_preread_module.c b/src/stream/ngx_stream_ssl_preread_module.c
index e3d11fd9..3717b5fe 100644
--- a/src/stream/ngx_stream_ssl_preread_module.c
+++ b/src/stream/ngx_stream_ssl_preread_module.c
@@ -159,7 +159,7 @@ ngx_stream_ssl_preread_handler(ngx_stream_session_t *s)
rc = ngx_stream_ssl_preread_parse_record(ctx, p, p + len);
if (rc != NGX_AGAIN) {
- return rc;
+ return rc == NGX_OK ? NGX_DECLINED : rc;
}
p += len;

View file

@ -0,0 +1,23 @@
commit f9907b72a76a21ac5413187b83177a919475c75f
Author: Yichun Zhang (agentzh) <agentzh@gmail.com>
Date: Wed Feb 10 16:05:08 2016 -0800
bugfix: upstream: keep sending request data after the first write attempt.
See
http://mailman.nginx.org/pipermail/nginx-devel/2012-March/002040.html
for more details on the issue.
diff --git a/src/http/ngx_http_upstream.c b/src/http/ngx_http_upstream.c
index 69019417..92b7c97f 100644
--- a/src/http/ngx_http_upstream.c
+++ b/src/http/ngx_http_upstream.c
@@ -2239,7 +2239,7 @@ ngx_http_upstream_send_request_handler(ngx_http_request_t *r,
#endif
- if (u->header_sent && !u->conf->preserve_output) {
+ if (u->request_body_sent && !u->conf->preserve_output) {
u->write_event_handler = ngx_http_upstream_dummy_handler;
(void) ngx_handle_write_event(c->write, 0);

View file

@ -0,0 +1,112 @@
diff --git a/src/http/ngx_http_upstream.c b/src/http/ngx_http_upstream.c
index 69019417..2265d8f7 100644
--- a/src/http/ngx_http_upstream.c
+++ b/src/http/ngx_http_upstream.c
@@ -509,12 +509,19 @@ void
ngx_http_upstream_init(ngx_http_request_t *r)
{
ngx_connection_t *c;
+ ngx_http_upstream_t *u;
c = r->connection;
ngx_log_debug1(NGX_LOG_DEBUG_HTTP, c->log, 0,
"http init upstream, client timer: %d", c->read->timer_set);
+ u = r->upstream;
+
+ u->connect_timeout = u->conf->connect_timeout;
+ u->send_timeout = u->conf->send_timeout;
+ u->read_timeout = u->conf->read_timeout;
+
#if (NGX_HTTP_V2)
if (r->stream) {
ngx_http_upstream_init_request(r);
@@ -1626,7 +1633,7 @@ ngx_http_upstream_connect(ngx_http_request_t *r, ngx_http_upstream_t *u)
u->request_body_blocked = 0;
if (rc == NGX_AGAIN) {
- ngx_add_timer(c->write, u->conf->connect_timeout);
+ ngx_add_timer(c->write, u->connect_timeout);
return;
}
@@ -1704,7 +1711,7 @@ ngx_http_upstream_ssl_init_connection(ngx_http_request_t *r,
if (rc == NGX_AGAIN) {
if (!c->write->timer_set) {
- ngx_add_timer(c->write, u->conf->connect_timeout);
+ ngx_add_timer(c->write, u->connect_timeout);
}
c->ssl->handler = ngx_http_upstream_ssl_handshake_handler;
@@ -2022,7 +2029,7 @@ ngx_http_upstream_send_request(ngx_http_request_t *r, ngx_http_upstream_t *u,
if (rc == NGX_AGAIN) {
if (!c->write->ready || u->request_body_blocked) {
- ngx_add_timer(c->write, u->conf->send_timeout);
+ ngx_add_timer(c->write, u->send_timeout);
} else if (c->write->timer_set) {
ngx_del_timer(c->write);
@@ -2084,7 +2091,7 @@ ngx_http_upstream_send_request(ngx_http_request_t *r, ngx_http_upstream_t *u,
return;
}
- ngx_add_timer(c->read, u->conf->read_timeout);
+ ngx_add_timer(c->read, u->read_timeout);
if (c->read->ready) {
ngx_http_upstream_process_header(r, u);
@@ -3213,7 +3220,7 @@ ngx_http_upstream_send_response(ngx_http_request_t *r, ngx_http_upstream_t *u)
p->cyclic_temp_file = 0;
}
- p->read_timeout = u->conf->read_timeout;
+ p->read_timeout = u->read_timeout;
p->send_timeout = clcf->send_timeout;
p->send_lowat = clcf->send_lowat;
@@ -3458,7 +3465,7 @@ ngx_http_upstream_process_upgraded(ngx_http_request_t *r,
}
if (upstream->write->active && !upstream->write->ready) {
- ngx_add_timer(upstream->write, u->conf->send_timeout);
+ ngx_add_timer(upstream->write, u->send_timeout);
} else if (upstream->write->timer_set) {
ngx_del_timer(upstream->write);
@@ -3470,7 +3477,7 @@ ngx_http_upstream_process_upgraded(ngx_http_request_t *r,
}
if (upstream->read->active && !upstream->read->ready) {
- ngx_add_timer(upstream->read, u->conf->read_timeout);
+ ngx_add_timer(upstream->read, u->read_timeout);
} else if (upstream->read->timer_set) {
ngx_del_timer(upstream->read);
@@ -3664,7 +3671,7 @@ ngx_http_upstream_process_non_buffered_request(ngx_http_request_t *r,
}
if (upstream->read->active && !upstream->read->ready) {
- ngx_add_timer(upstream->read, u->conf->read_timeout);
+ ngx_add_timer(upstream->read, u->read_timeout);
} else if (upstream->read->timer_set) {
ngx_del_timer(upstream->read);
diff --git a/src/http/ngx_http_upstream.h b/src/http/ngx_http_upstream.h
index c2f4dc0b..b9eef118 100644
--- a/src/http/ngx_http_upstream.h
+++ b/src/http/ngx_http_upstream.h
@@ -333,6 +333,11 @@ struct ngx_http_upstream_s {
ngx_array_t *caches;
#endif
+#define HAVE_NGX_UPSTREAM_TIMEOUT_FIELDS 1
+ ngx_msec_t connect_timeout;
+ ngx_msec_t send_timeout;
+ ngx_msec_t read_timeout;
+
ngx_http_upstream_headers_in_t headers_in;
ngx_http_upstream_resolved_t *resolved;

View file

@ -0,0 +1,23 @@
diff --git src/core/ngx_resolver.c src/core/ngx_resolver.c
--- src/core/ngx_resolver.c
+++ src/core/ngx_resolver.c
@@ -4008,15 +4008,15 @@ done:
n = *src++;
} else {
+ if (dst != name->data) {
+ *dst++ = '.';
+ }
+
ngx_strlow(dst, src, n);
dst += n;
src += n;
n = *src++;
-
- if (n != 0) {
- *dst++ = '.';
- }
}
if (n == 0) {