Add echo image to avoid building and installing dependencies in each test (#5028)
This commit is contained in:
parent
9278f0cad2
commit
ac2ce11739
5 changed files with 140 additions and 133 deletions
6
images/echo/Dockerfile
Normal file
6
images/echo/Dockerfile
Normal file
|
@ -0,0 +1,6 @@
|
|||
FROM openresty/openresty:1.15.8.2-alpine
|
||||
|
||||
RUN apk add -U perl curl \
|
||||
&& opm get bungle/lua-resty-template
|
||||
|
||||
COPY nginx.conf /usr/local/openresty/nginx/conf/nginx.conf
|
33
images/echo/Makefile
Normal file
33
images/echo/Makefile
Normal file
|
@ -0,0 +1,33 @@
|
|||
# Copyright 2020 The Kubernetes Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
# Docker image for e2e testing.
|
||||
|
||||
# Use the 0.0 tag for testing, it shouldn't clobber any release builds
|
||||
TAG ?= 0.0
|
||||
|
||||
REGISTRY ?= ingress-controller
|
||||
DOCKER ?= docker
|
||||
|
||||
IMGNAME = echo
|
||||
IMAGE = $(REGISTRY)/$(IMGNAME)
|
||||
|
||||
container:
|
||||
$(DOCKER) buildx build \
|
||||
--load \
|
||||
--platform linux/amd64 \
|
||||
-t $(IMAGE):$(TAG) .
|
||||
|
||||
clean:
|
||||
$(DOCKER) rmi -f $(IMAGE):$(TAG) || true
|
93
images/echo/nginx.conf
Normal file
93
images/echo/nginx.conf
Normal file
|
@ -0,0 +1,93 @@
|
|||
env HOSTNAME;
|
||||
env NODE_NAME;
|
||||
env POD_NAME;
|
||||
env POD_NAMESPACE;
|
||||
env POD_IP;
|
||||
|
||||
daemon off;
|
||||
|
||||
events {
|
||||
worker_connections 1024;
|
||||
}
|
||||
|
||||
http {
|
||||
default_type 'text/plain';
|
||||
client_max_body_size 0;
|
||||
|
||||
init_by_lua_block {
|
||||
local template = require "resty.template"
|
||||
|
||||
tmpl = template.compile([[
|
||||
|
||||
Hostname: {*os.getenv("HOSTNAME") or "N/A"*}
|
||||
|
||||
Pod Information:
|
||||
{% if os.getenv("POD_NAME") then %}
|
||||
node name: {*os.getenv("NODE_NAME") or "N/A"*}
|
||||
pod name: {*os.getenv("POD_NAME") or "N/A"*}
|
||||
pod namespace: {*os.getenv("POD_NAMESPACE") or "N/A"*}
|
||||
pod IP: {*os.getenv("POD_IP") or "N/A"*}
|
||||
{% else %}
|
||||
-no pod information available-
|
||||
{% end %}
|
||||
|
||||
Server values:
|
||||
server_version=nginx: {*ngx.var.nginx_version*} - lua: {*ngx.config.ngx_lua_version*}
|
||||
|
||||
Request Information:
|
||||
client_address={*ngx.var.remote_addr*}
|
||||
method={*ngx.req.get_method()*}
|
||||
real path={*ngx.var.request_uri*}
|
||||
query={*ngx.var.query_string or ""*}
|
||||
request_version={*ngx.req.http_version()*}
|
||||
request_scheme={*ngx.var.scheme*}
|
||||
request_uri={*ngx.var.scheme.."://"..ngx.var.host..":"..ngx.var.server_port..ngx.var.request_uri*}
|
||||
|
||||
Request Headers:
|
||||
{% for i, key in ipairs(keys) do %}
|
||||
{% local val = headers[key] %}
|
||||
{% if type(val) == "table" then %}
|
||||
{% for i = 1,#val do %}
|
||||
{*key*}={*val[i]*}
|
||||
{% end %}
|
||||
{% else %}
|
||||
{*key*}={*val*}
|
||||
{% end %}
|
||||
{% end %}
|
||||
|
||||
Request Body:
|
||||
{*ngx.var.request_body or " -no body in request-"*}
|
||||
]])
|
||||
}
|
||||
|
||||
server {
|
||||
listen 80 default_server reuseport;
|
||||
|
||||
server_name _;
|
||||
|
||||
keepalive_timeout 620s;
|
||||
|
||||
location / {
|
||||
lua_need_request_body on;
|
||||
|
||||
header_filter_by_lua_block {
|
||||
if ngx.var.arg_hsts == "true" then
|
||||
ngx.header["Strict-Transport-Security"] = "max-age=3600; preload"
|
||||
end
|
||||
}
|
||||
|
||||
content_by_lua_block {
|
||||
ngx.header["Server"] = "echoserver"
|
||||
|
||||
local headers = ngx.req.get_headers()
|
||||
local keys = {}
|
||||
for key, val in pairs(headers) do
|
||||
table.insert(keys, key)
|
||||
end
|
||||
table.sort(keys)
|
||||
|
||||
ngx.say(tmpl({os=os, ngx=ngx, keys=keys, headers=headers}))
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
|
@ -51,140 +51,12 @@ func (f *Framework) NewEchoDeploymentWithReplicas(replicas int) {
|
|||
// replicas is configurable and
|
||||
// name is configurable
|
||||
func (f *Framework) NewEchoDeploymentWithNameAndReplicas(name string, replicas int) {
|
||||
|
||||
data := map[string]string{}
|
||||
data["nginx.conf"] = `#
|
||||
|
||||
env HOSTNAME;
|
||||
env NODE_NAME;
|
||||
env POD_NAME;
|
||||
env POD_NAMESPACE;
|
||||
env POD_IP;
|
||||
|
||||
daemon off;
|
||||
|
||||
events {
|
||||
worker_connections 1024;
|
||||
}
|
||||
|
||||
http {
|
||||
default_type 'text/plain';
|
||||
client_max_body_size 0;
|
||||
|
||||
init_by_lua_block {
|
||||
local template = require "resty.template"
|
||||
|
||||
tmpl = template.compile([[
|
||||
|
||||
Hostname: {*os.getenv("HOSTNAME") or "N/A"*}
|
||||
|
||||
Pod Information:
|
||||
{% if os.getenv("POD_NAME") then %}
|
||||
node name: {*os.getenv("NODE_NAME") or "N/A"*}
|
||||
pod name: {*os.getenv("POD_NAME") or "N/A"*}
|
||||
pod namespace: {*os.getenv("POD_NAMESPACE") or "N/A"*}
|
||||
pod IP: {*os.getenv("POD_IP") or "N/A"*}
|
||||
{% else %}
|
||||
-no pod information available-
|
||||
{% end %}
|
||||
|
||||
Server values:
|
||||
server_version=nginx: {*ngx.var.nginx_version*} - lua: {*ngx.config.ngx_lua_version*}
|
||||
|
||||
Request Information:
|
||||
client_address={*ngx.var.remote_addr*}
|
||||
method={*ngx.req.get_method()*}
|
||||
real path={*ngx.var.request_uri*}
|
||||
query={*ngx.var.query_string or ""*}
|
||||
request_version={*ngx.req.http_version()*}
|
||||
request_scheme={*ngx.var.scheme*}
|
||||
request_uri={*ngx.var.scheme.."://"..ngx.var.host..":"..ngx.var.server_port..ngx.var.request_uri*}
|
||||
|
||||
Request Headers:
|
||||
{% for i, key in ipairs(keys) do %}
|
||||
{% local val = headers[key] %}
|
||||
{% if type(val) == "table" then %}
|
||||
{% for i = 1,#val do %}
|
||||
{*key*}={*val[i]*}
|
||||
{% end %}
|
||||
{% else %}
|
||||
{*key*}={*val*}
|
||||
{% end %}
|
||||
{% end %}
|
||||
|
||||
Request Body:
|
||||
{*ngx.var.request_body or " -no body in request-"*}
|
||||
]])
|
||||
}
|
||||
|
||||
server {
|
||||
listen 80 default_server reuseport;
|
||||
|
||||
server_name _;
|
||||
|
||||
keepalive_timeout 620s;
|
||||
|
||||
location / {
|
||||
lua_need_request_body on;
|
||||
|
||||
header_filter_by_lua_block {
|
||||
if ngx.var.arg_hsts == "true" then
|
||||
ngx.header["Strict-Transport-Security"] = "max-age=3600; preload"
|
||||
end
|
||||
}
|
||||
|
||||
content_by_lua_block {
|
||||
ngx.header["Server"] = "echoserver"
|
||||
|
||||
local headers = ngx.req.get_headers()
|
||||
local keys = {}
|
||||
for key, val in pairs(headers) do
|
||||
table.insert(keys, key)
|
||||
end
|
||||
table.sort(keys)
|
||||
|
||||
ngx.say(tmpl({os=os, ngx=ngx, keys=keys, headers=headers}))
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
`
|
||||
|
||||
_, err := f.KubeClientSet.CoreV1().ConfigMaps(f.Namespace).Create(&corev1.ConfigMap{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: name,
|
||||
Namespace: f.Namespace,
|
||||
},
|
||||
Data: data,
|
||||
})
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to create a deployment")
|
||||
|
||||
deployment := newDeployment(name, f.Namespace, "openresty/openresty:1.15.8.2-alpine", 80, int32(replicas),
|
||||
deployment := newDeployment(name, f.Namespace, "ingress-controller/echo:dev", 80, int32(replicas),
|
||||
[]string{
|
||||
"/bin/sh",
|
||||
"-c",
|
||||
"apk add -U perl curl && opm get bungle/lua-resty-template && openresty",
|
||||
},
|
||||
[]corev1.VolumeMount{
|
||||
{
|
||||
Name: name,
|
||||
MountPath: "/usr/local/openresty/nginx/conf/nginx.conf",
|
||||
SubPath: "nginx.conf",
|
||||
ReadOnly: true,
|
||||
},
|
||||
},
|
||||
[]corev1.Volume{
|
||||
{
|
||||
Name: name,
|
||||
VolumeSource: corev1.VolumeSource{
|
||||
ConfigMap: &corev1.ConfigMapVolumeSource{
|
||||
LocalObjectReference: corev1.LocalObjectReference{
|
||||
Name: name,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
"openresty",
|
||||
},
|
||||
[]corev1.VolumeMount{},
|
||||
[]corev1.Volume{},
|
||||
)
|
||||
|
||||
d := f.EnsureDeployment(deployment)
|
||||
|
@ -213,7 +85,7 @@ Request Body:
|
|||
s := f.EnsureService(service)
|
||||
Expect(s).NotTo(BeNil(), "expected a service but none returned")
|
||||
|
||||
err = WaitForEndpoints(f.KubeClientSet, DefaultTimeout, name, f.Namespace, replicas)
|
||||
err := WaitForEndpoints(f.KubeClientSet, DefaultTimeout, name, f.Namespace, replicas)
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to wait for endpoints to become ready")
|
||||
}
|
||||
|
||||
|
|
|
@ -79,6 +79,7 @@ echo "
|
|||
make -C ${DIR}/../../ build container
|
||||
make -C ${DIR}/../../ e2e-test-image
|
||||
make -C ${DIR}/../../images/fastcgi-helloserver/ build container
|
||||
make -C ${DIR}/../../images/echo/ container
|
||||
make -C ${DIR}/../../images/httpbin/ container
|
||||
" | parallel --joblog /tmp/log {} || cat /tmp/log
|
||||
|
||||
|
@ -95,6 +96,8 @@ kind load docker-image --name="${KIND_CLUSTER_NAME}" ${REGISTRY}/nginx-ingress-c
|
|||
kind load docker-image --name="${KIND_CLUSTER_NAME}" ${REGISTRY}/fastcgi-helloserver:${TAG}
|
||||
kind load docker-image --name="${KIND_CLUSTER_NAME}" openresty/openresty:1.15.8.2-alpine
|
||||
kind load docker-image --name="${KIND_CLUSTER_NAME}" ${REGISTRY}/httpbin:${TAG}
|
||||
kind load docker-image --name="${KIND_CLUSTER_NAME}" ${REGISTRY}/echo:${TAG}
|
||||
|
||||
" | parallel --joblog /tmp/log {} || cat /tmp/log
|
||||
|
||||
echo "[dev-env] running e2e tests..."
|
||||
|
|
Loading…
Reference in a new issue