Migration e2e installation to helm (#5086)

This commit is contained in:
Manuel Alejandro de Brito Fontes 2020-02-16 11:58:37 -03:00 committed by GitHub
parent 4b5c39e97b
commit 37c24b0df5
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
46 changed files with 321 additions and 725 deletions

View file

@ -195,7 +195,7 @@ e2e-test: check-go-version ## Run e2e tests (expects access to a working Kuberne
@build/run-e2e-suite.sh
.PHONY: e2e-test-image
e2e-test-image: e2e-test-binary ## Build image for e2e tests.
e2e-test-image: ## Build image for e2e tests.
@make -C test/e2e-image
.PHONY: e2e-test-binary

View file

@ -24,7 +24,7 @@ set -o pipefail
DIR=$(cd $(dirname "${BASH_SOURCE}") && pwd -P)
export TAG=dev
export TAG=0.0.0-dev
export ARCH=amd64
export REGISTRY=${REGISTRY:-ingress-controller}

View file

@ -1,11 +0,0 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
bases:
- ../../cloud-generic
patchesStrategicMerge:
- service-l4.yaml
configMapGenerator:
- name: nginx-configuration
behavior: merge
literals:
- use-proxy-protocol=true

View file

@ -1,13 +0,0 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
bases:
- ../../cloud-generic
patchesStrategicMerge:
- service-l7.yaml
configMapGenerator:
- name: nginx-configuration
behavior: merge
literals:
- use-proxy-protocol=false
- use-forwarded-headers=true
- proxy-real-ip-cidr=0.0.0.0/0 # restrict this to the IP addresses of ELB

View file

@ -1,6 +0,0 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
bases:
- ../../cloud-generic
patchesStrategicMerge:
- service-nlb.yaml

View file

@ -1,6 +0,0 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
bases:
- ../cloud-generic
patchesStrategicMerge:
- service-nodeport.yaml

View file

@ -1,91 +0,0 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: nginx-ingress-controller
spec:
replicas: 1
template:
metadata:
annotations:
prometheus.io/port: "10254"
prometheus.io/scrape: "true"
spec:
# wait up to five minutes for the drain of connections
terminationGracePeriodSeconds: 300
serviceAccountName: nginx-ingress-serviceaccount
containers:
- name: nginx-ingress-controller
image: quay.io/kubernetes-ingress-controller/nginx-ingress-controller:0.29.0
args:
- /nginx-ingress-controller
- --configmap=$(POD_NAMESPACE)/$(NGINX_CONFIGMAP_NAME)
- --tcp-services-configmap=$(POD_NAMESPACE)/$(TCP_CONFIGMAP_NAME)
- --udp-services-configmap=$(POD_NAMESPACE)/$(UDP_CONFIGMAP_NAME)
- --publish-service=$(POD_NAMESPACE)/$(SERVICE_NAME)
- --annotations-prefix=nginx.ingress.kubernetes.io
securityContext:
allowPrivilegeEscalation: true
capabilities:
drop:
- ALL
add:
- NET_BIND_SERVICE
# www-data -> 101
runAsUser: 101
env:
- name: POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
- name: POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
ports:
- name: http
containerPort: 80
protocol: TCP
- name: https
containerPort: 443
protocol: TCP
livenessProbe:
failureThreshold: 3
httpGet:
path: /healthz
port: 10254
scheme: HTTP
initialDelaySeconds: 10
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 10
readinessProbe:
failureThreshold: 3
httpGet:
path: /healthz
port: 10254
scheme: HTTP
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 10
lifecycle:
preStop:
exec:
command:
- /wait-shutdown
---
apiVersion: v1
kind: LimitRange
metadata:
name: ingress-nginx
namespace: ingress-nginx
labels:
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/part-of: ingress-nginx
spec:
limits:
- min:
memory: 90Mi
cpu: 100m
type: Container

View file

@ -1,50 +0,0 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
namespace: ingress-nginx
commonLabels:
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/part-of: ingress-nginx
resources:
- deployment.yaml
- role-binding.yaml
- role.yaml
- service-account.yaml
- service.yaml
images:
- name: quay.io/kubernetes-ingress-controller/nginx-ingress-controller
newTag: 0.29.0
vars:
- fieldref:
fieldPath: metadata.name
name: NGINX_CONFIGMAP_NAME
objref:
apiVersion: v1
kind: ConfigMap
name: nginx-configuration
- fieldref:
fieldPath: metadata.name
name: TCP_CONFIGMAP_NAME
objref:
apiVersion: v1
kind: ConfigMap
name: tcp-services
- fieldref:
fieldPath: metadata.name
name: UDP_CONFIGMAP_NAME
objref:
apiVersion: v1
kind: ConfigMap
name: udp-services
- fieldref:
fieldPath: metadata.name
name: SERVICE_NAME
objref:
apiVersion: v1
kind: Service
name: ingress-nginx
configMapGenerator:
- name: nginx-configuration
- name: tcp-services
- name: udp-services
generatorOptions:
disableNameSuffixHash: true

View file

@ -1,11 +0,0 @@
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: RoleBinding
metadata:
name: nginx-ingress-role-nisa-binding
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: nginx-ingress-role
subjects:
- kind: ServiceAccount
name: nginx-ingress-serviceaccount

View file

@ -1,39 +0,0 @@
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: Role
metadata:
name: nginx-ingress-role
rules:
- apiGroups:
- ""
resources:
- configmaps
- pods
- secrets
- namespaces
verbs:
- get
- apiGroups:
- ""
resources:
- configmaps
resourceNames:
# Defaults to "<election-id>-<ingress-class>"
# Here: "<ingress-controller-leader>-<nginx>"
# This has to be adapted if you change either parameter
# when launching the nginx-ingress-controller.
- "ingress-controller-leader-nginx"
verbs:
- get
- update
- apiGroups:
- ""
resources:
- configmaps
verbs:
- create
- apiGroups:
- ""
resources:
- endpoints
verbs:
- get

View file

@ -1,4 +0,0 @@
apiVersion: v1
kind: ServiceAccount
metadata:
name: nginx-ingress-serviceaccount

View file

@ -1,16 +0,0 @@
kind: Service
apiVersion: v1
metadata:
name: ingress-nginx
spec:
externalTrafficPolicy: Local
type: LoadBalancer
ports:
- name: http
port: 80
protocol: TCP
targetPort: http
- name: https
port: 443
protocol: TCP
targetPort: https

View file

@ -1,11 +0,0 @@
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRoleBinding
metadata:
name: nginx-ingress-clusterrole-nisa-binding
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: nginx-ingress-clusterrole
subjects:
- kind: ServiceAccount
name: nginx-ingress-serviceaccount

View file

@ -1,54 +0,0 @@
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRole
metadata:
name: nginx-ingress-clusterrole
rules:
- apiGroups:
- ""
resources:
- configmaps
- endpoints
- nodes
- pods
- secrets
verbs:
- list
- watch
- apiGroups:
- ""
resources:
- nodes
verbs:
- get
- apiGroups:
- ""
resources:
- services
verbs:
- get
- list
- watch
- apiGroups:
- ""
resources:
- events
verbs:
- create
- patch
- apiGroups:
- "extensions"
- "networking.k8s.io"
resources:
- ingresses
verbs:
- get
- list
- watch
- apiGroups:
- "extensions"
- "networking.k8s.io"
resources:
- ingresses/status
verbs:
- update

View file

@ -1,8 +0,0 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
commonLabels:
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/part-of: ingress-nginx
resources:
- cluster-role.yaml
- cluster-role-binding.yaml

View file

@ -1,28 +0,0 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: nginx-ingress-controller
spec:
template:
spec:
containers:
- name: nginx-ingress-controller
livenessProbe:
failureThreshold: 3
httpGet:
path: /healthz
port: 10254
scheme: HTTP
initialDelaySeconds: 5
periodSeconds: 5
successThreshold: 1
timeoutSeconds: 10
readinessProbe:
failureThreshold: 3
httpGet:
path: /healthz
port: 10254
scheme: HTTP
periodSeconds: 5
successThreshold: 1
timeoutSeconds: 5

View file

@ -1,12 +0,0 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
namespace: ingress-nginx
bases:
- ../baremetal
- ../cluster-wide
images:
- name: quay.io/kubernetes-ingress-controller/nginx-ingress-controller
newTag: dev
patchesStrategicMerge:
- service-hostport.yaml
- deployment.yaml

View file

@ -1,25 +0,0 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: nginx-ingress-controller
spec:
replicas: 1
template:
metadata:
annotations:
prometheus.io/port: "10254"
prometheus.io/scrape: "true"
spec:
containers:
- name: nginx-ingress-controller
ports:
- containerPort: 80
hostPort: 80
- containerPort: 443
hostPort: 443
nodeSelector:
ingress-ready: "true"
tolerations:
- key: node-role.kubernetes.io/master
operator: Equal
effect: NoSchedule

View file

@ -531,87 +531,58 @@ func New(
return name == configmap || name == tcp || name == udp
}
cmEventHandler := cache.ResourceEventHandlerFuncs{
AddFunc: func(obj interface{}) {
cm := obj.(*corev1.ConfigMap)
key := k8s.MetaNamespaceKey(cm)
handleCfgMapEvent := func(key string, cfgMap *corev1.ConfigMap, eventName string) {
// updates to configuration configmaps can trigger an update
triggerUpdate := false
if changeTriggerUpdate(key) {
triggerUpdate = true
recorder.Eventf(cfgMap, corev1.EventTypeNormal, eventName, fmt.Sprintf("ConfigMap %v", key))
if key == configmap {
store.setConfig(cfgMap)
}
}
triggerUpdate := false
// updates to configuration configmaps can trigger an update
if changeTriggerUpdate(key) {
recorder.Eventf(cm, corev1.EventTypeNormal, "CREATE", fmt.Sprintf("ConfigMap %v", key))
triggerUpdate = true
if key == configmap {
store.setConfig(cm)
}
ings := store.listers.IngressWithAnnotation.List()
for _, ingKey := range ings {
key := k8s.MetaNamespaceKey(ingKey)
ing, err := store.getIngress(key)
if err != nil {
klog.Errorf("could not find Ingress %v in local store: %v", key, err)
continue
}
ings := store.listers.IngressWithAnnotation.List()
for _, ingKey := range ings {
key := k8s.MetaNamespaceKey(ingKey)
ing, err := store.getIngress(key)
if err != nil {
klog.Errorf("could not find Ingress %v in local store: %v", key, err)
continue
}
if parser.AnnotationsReferencesConfigmap(ing) {
recorder.Eventf(cm, corev1.EventTypeNormal, "CREATE", fmt.Sprintf("ConfigMap %v", key))
store.syncIngress(ing)
triggerUpdate = true
}
if parser.AnnotationsReferencesConfigmap(ing) {
store.syncIngress(ing)
continue
}
if triggerUpdate {
updateCh.In() <- Event{
Type: ConfigurationEvent,
Obj: obj,
}
store.syncIngress(ing)
}
}
if triggerUpdate {
updateCh.In() <- Event{
Type: ConfigurationEvent,
Obj: cfgMap,
}
}
}
cmEventHandler := cache.ResourceEventHandlerFuncs{
AddFunc: func(obj interface{}) {
cfgMap := obj.(*corev1.ConfigMap)
key := k8s.MetaNamespaceKey(cfgMap)
handleCfgMapEvent(key, cfgMap, "CREATE")
},
UpdateFunc: func(old, cur interface{}) {
if reflect.DeepEqual(old, cur) {
return
}
// used to limit the number of events
triggerUpdate := false
cm := cur.(*corev1.ConfigMap)
key := k8s.MetaNamespaceKey(cm)
// updates to configuration configmaps can trigger an update
if changeTriggerUpdate(key) {
recorder.Eventf(cm, corev1.EventTypeNormal, "UPDATE", fmt.Sprintf("ConfigMap %v", key))
triggerUpdate = true
}
if key == configmap {
store.setConfig(cm)
}
ings := store.listers.IngressWithAnnotation.List()
for _, ingKey := range ings {
key := k8s.MetaNamespaceKey(ingKey)
ing, err := store.getIngress(key)
if err != nil {
klog.Errorf("could not find Ingress %v in local store: %v", key, err)
continue
}
if parser.AnnotationsReferencesConfigmap(ing) {
recorder.Eventf(cm, corev1.EventTypeNormal, "UPDATE", fmt.Sprintf("ConfigMap %v", key))
store.syncIngress(ing)
triggerUpdate = true
}
}
if triggerUpdate {
updateCh.In() <- Event{
Type: ConfigurationEvent,
Obj: cur,
}
}
cfgMap := cur.(*corev1.ConfigMap)
key := k8s.MetaNamespaceKey(cfgMap)
handleCfgMapEvent(key, cfgMap, "UPDATE")
},
}

View file

@ -10,15 +10,16 @@ RUN apk add -U --no-cache \
libc6-compat \
openssl
RUN curl -sSL https://raw.githubusercontent.com/helm/helm/master/scripts/get-helm-3 | bash \
&& helm repo add stable https://kubernetes-charts.storage.googleapis.com \
&& helm repo update
COPY --from=BASE /go/bin/ginkgo /usr/local/bin/
COPY --from=BASE /usr/local/bin/kubectl /usr/local/bin/
COPY e2e.sh /e2e.sh
COPY cloud-generic /cloud-generic
COPY cluster-wide /cluster-wide
COPY overlay /overlay
COPY namespace-overlays /namespace-overlays
RUN sed -E -i 's|^- .*deploy/cloud-generic$|- ../cloud-generic|' /overlay/kustomization.yaml
COPY wait-for-nginx.sh /
COPY e2e.test /

View file

@ -22,8 +22,6 @@ endif
cp ../e2e/e2e.test .
cp ../e2e/wait-for-nginx.sh .
cp -r ../../deploy/cloud-generic .
cp -r ../../deploy/cluster-wide .
docker buildx build \
--load \

View file

@ -1,18 +0,0 @@
- op: replace
path: /spec/template/spec/containers/0/livenessProbe/httpGet/path
value: /not-healthz
- op: replace
path: /spec/template/spec/containers/0/livenessProbe/httpGet/port
value: 9090
- op: replace
path: /spec/template/spec/containers/0/readinessProbe/httpGet/path
value: /not-healthz
- op: replace
path: /spec/template/spec/containers/0/readinessProbe/httpGet/port
value: 9090
- op: add
path: /spec/template/spec/containers/0/args/-
value: --health-check-path=/not-healthz
- op: add
path: /spec/template/spec/containers/0/args/-
value: --healthz-port=9090

View file

@ -1,11 +0,0 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
patchesJson6902:
- target:
group: apps
version: v1
kind: Deployment
name: nginx-ingress-controller
path: deployment-patch.yaml
bases:
- ../../overlay

View file

@ -0,0 +1,33 @@
controller:
image:
repository: ingress-controller/nginx-ingress-controller
tag: 1.0.0-dev
extraArgs:
healthz-port: "9090"
# e2e tests do not require information about ingress status
update-status: "false"
scope:
enabled: true
config:
worker-processes: "1"
readinessProbe:
port: 9090
initialDelaySeconds: 1
livenessProbe:
port: 9090
initialDelaySeconds: 1
podLabels:
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/part-of: ingress-nginx
service:
type: NodePort
labels:
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/part-of: ingress-nginx
defaultBackend:
enabled: false
rbac:
create: false

View file

@ -1,12 +0,0 @@
- op: replace
path: /spec/template/spec/containers/0/ports/0/containerPort
value: 1080
- op: replace
path: /spec/template/spec/containers/0/ports/1/containerPort
value: 1443
- op: add
path: /spec/template/spec/containers/0/args/-
value: --http-port=1080
- op: add
path: /spec/template/spec/containers/0/args/-
value: --https-port=1443

View file

@ -1,16 +0,0 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
patchesJson6902:
- target:
group: apps
version: v1
kind: Deployment
name: nginx-ingress-controller
path: deployment-patch.yaml
- target:
version: v1
kind: Service
name: ingress-nginx
path: service-patch.yaml
bases:
- ../../overlay

View file

@ -1,6 +0,0 @@
- op: replace
path: /spec/ports/0/targetPort
value: 1080
- op: replace
path: /spec/ports/1/targetPort
value: 1443

View file

@ -0,0 +1,34 @@
controller:
image:
repository: ingress-controller/nginx-ingress-controller
tag: 1.0.0-dev
containerPort:
http: "1080"
https: "1443"
extraArgs:
http-port: "1080"
https-port: "1443"
# e2e tests do not require information about ingress status
update-status: "false"
scope:
enabled: true
config:
worker-processes: "1"
podLabels:
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/part-of: ingress-nginx
service:
name: ingress-nginx
type: NodePort
labels:
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/part-of: ingress-nginx
defaultBackend:
enabled: false
rbac:
create: false

View file

@ -1,35 +0,0 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: nginx-ingress-controller
spec:
template:
spec:
terminationGracePeriodSeconds: 0
initContainers:
- name: enable-coredump
image: busybox
command:
- /bin/sh
- -c
- |
ulimit -c unlimited
echo "/tmp/core.%e.%p" > /proc/sys/kernel/core_pattern
sysctl -w fs.suid_dumpable=2
securityContext:
privileged: true
containers:
- name: nginx-ingress-controller
livenessProbe:
timeoutSeconds: 1
initialDelaySeconds: 1
periodSeconds: 2
readinessProbe:
timeoutSeconds: 1
initialDelaySeconds: 1
periodSeconds: 2
lifecycle:
preStop:
exec:
command:
- /wait-shutdown

View file

@ -1,3 +0,0 @@
- op: add
path: /spec/template/spec/containers/0/args/-1
value: "--watch-namespace=$(POD_NAMESPACE)"

View file

@ -1,34 +0,0 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
bases:
- ../../../deploy/cloud-generic
configMapGenerator:
- name: nginx-configuration
behavior: merge
literals:
- worker-processes=1
patchesStrategicMerge:
- deployment-e2e.yaml
- service-protocol-tcp.yaml
patchesJson6902:
- path: deployment-namespace-patch.yaml
target:
group: apps
kind: Deployment
name: nginx-ingress-controller
version: v1
- path: service-cluster-patch.yaml
target:
kind: Service
name: ingress-nginx
version: v1
- path: role.yaml
target:
group: rbac.authorization.k8s.io
kind: Role
name: nginx-ingress-role
version: v1beta1
images:
- name: quay.io/kubernetes-ingress-controller/nginx-ingress-controller
newName: ingress-controller/nginx-ingress-controller
newTag: dev

View file

@ -1,3 +0,0 @@
- op: add
path: /rules/1/resourceNames/-1
value: "ingress-controller-leader-testclass"

View file

@ -1,4 +0,0 @@
- op: remove
path: /spec/externalTrafficPolicy
- op: remove
path: /spec/type

View file

@ -1,14 +0,0 @@
apiVersion: v1
kind: Service
metadata:
name: ingress-nginx
spec:
ports:
- name: http
port: 80
targetPort: 80
protocol: TCP
- name: https
port: 443
targetPort: 443
protocol: TCP

View file

@ -178,8 +178,8 @@ var _ = framework.IngressNginxDescribe("Annotations - Proxy", func() {
It("should build proxy next upstream", func() {
annotations := map[string]string{
"nginx.ingress.kubernetes.io/proxy-next-upstream": "error timeout http_502",
"nginx.ingress.kubernetes.io/proxy-next-upstream-timeout": "10",
"nginx.ingress.kubernetes.io/proxy-next-upstream-tries": "5",
"nginx.ingress.kubernetes.io/proxy-next-upstream-timeout": "999999",
"nginx.ingress.kubernetes.io/proxy-next-upstream-tries": "888888",
}
ing := framework.NewSingleIngress(host, "/", host, f.Namespace, framework.EchoService, 80, annotations)
@ -187,9 +187,9 @@ var _ = framework.IngressNginxDescribe("Annotations - Proxy", func() {
f.WaitForNginxServer(host,
func(server string) bool {
return strings.Contains(server, "proxy_next_upstream error timeout http_502;") &&
strings.Contains(server, "proxy_next_upstream_timeout 10;") &&
strings.Contains(server, "proxy_next_upstream_tries 5;")
return strings.Contains(server, "error timeout http_502;") &&
strings.Contains(server, "999999;") &&
strings.Contains(server, "888888;")
})
})
@ -200,15 +200,15 @@ var _ = framework.IngressNginxDescribe("Annotations - Proxy", func() {
f.SetNginxConfigMapData(map[string]string{
"proxy-next-upstream": "timeout http_502",
"proxy-next-upstream-timeout": "53",
"proxy-next-upstream-tries": "44",
"proxy-next-upstream-timeout": "999999",
"proxy-next-upstream-tries": "888888",
})
f.WaitForNginxServer(host,
func(server string) bool {
return strings.Contains(server, "proxy_next_upstream timeout http_502;") &&
strings.Contains(server, "proxy_next_upstream_timeout 53;") &&
strings.Contains(server, "proxy_next_upstream_tries 44;")
return strings.Contains(server, "timeout http_502;") &&
strings.Contains(server, "999999;") &&
strings.Contains(server, "888888;")
})
})

View file

@ -51,7 +51,7 @@ func (f *Framework) NewEchoDeploymentWithReplicas(replicas int) {
// replicas is configurable and
// name is configurable
func (f *Framework) NewEchoDeploymentWithNameAndReplicas(name string, replicas int) {
deployment := newDeployment(name, f.Namespace, "ingress-controller/echo:dev", 80, int32(replicas),
deployment := newDeployment(name, f.Namespace, "ingress-controller/echo:1.0.0-dev", 80, int32(replicas),
[]string{
"openresty",
},
@ -329,7 +329,7 @@ func newDeployment(name, namespace, image string, port int32, replicas int32, co
// NewHttpbinDeployment creates a new single replica deployment of the httpbin image in a particular namespace.
func (f *Framework) NewHttpbinDeployment() {
f.NewDeployment(HTTPBinService, "ingress-controller/httpbin:dev", 80, 1)
f.NewDeployment(HTTPBinService, "ingress-controller/httpbin:1.0.0-dev", 80, 1)
}
// NewDeployment creates a new deployment in a particular namespace.

View file

@ -58,7 +58,7 @@ func (f *Framework) NewNewFastCGIHelloServerDeploymentWithReplicas(replicas int3
Containers: []corev1.Container{
{
Name: "fastcgi-helloserver",
Image: "ingress-controller/fastcgi-helloserver:dev",
Image: "ingress-controller/fastcgi-helloserver:1.0.0-dev",
Env: []corev1.EnvVar{},
Ports: []corev1.ContainerPort{
{

View file

@ -145,7 +145,7 @@ func (f *Framework) AfterEach() {
// IngressNginxDescribe wrapper function for ginkgo describe. Adds namespacing.
func IngressNginxDescribe(text string, body func()) bool {
return ginkgo.Describe("[ingress-nginx] "+text, body)
return ginkgo.Describe(text, body)
}
// MemoryLeakIt is wrapper function for ginkgo It. Adds "[MemoryLeak]" tag and makes static analysis easier.
@ -158,7 +158,7 @@ func (f *Framework) GetNginxIP() string {
s, err := f.KubeClientSet.
CoreV1().
Services(f.Namespace).
Get("ingress-nginx", metav1.GetOptions{})
Get("nginx-ingress-controller", metav1.GetOptions{})
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "unexpected error obtaining NGINX IP address")
return s.Spec.ClusterIP
}
@ -168,7 +168,7 @@ func (f *Framework) GetNginxPodIP() []string {
e, err := f.KubeClientSet.
CoreV1().
Endpoints(f.Namespace).
Get("ingress-nginx", metav1.GetOptions{})
Get("nginx-ingress-controller", metav1.GetOptions{})
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "unexpected error obtaining NGINX IP address")
eips := make([]string, 0)
for _, s := range e.Subsets {
@ -262,7 +262,7 @@ func (f *Framework) matchNginxConditions(name string, matcher func(cfg string) b
}
func (f *Framework) getNginxConfigMap() (*v1.ConfigMap, error) {
return f.getConfigMap("nginx-configuration")
return f.getConfigMap("nginx-ingress-controller")
}
func (f *Framework) getConfigMap(name string) (*v1.ConfigMap, error) {
@ -281,36 +281,19 @@ func (f *Framework) getConfigMap(name string) (*v1.ConfigMap, error) {
return config, err
}
// GetNginxConfigMapData gets ingress-nginx's nginx-configuration map's data
func (f *Framework) GetNginxConfigMapData() (map[string]string, error) {
config, err := f.getNginxConfigMap()
if err != nil {
return nil, err
}
if config.Data == nil {
config.Data = map[string]string{}
}
return config.Data, err
}
// SetNginxConfigMapData sets ingress-nginx's nginx-configuration configMap data
// SetNginxConfigMapData sets ingress-nginx's nginx-ingress-controller configMap data
func (f *Framework) SetNginxConfigMapData(cmData map[string]string) {
f.SetConfigMapData("nginx-configuration", cmData)
}
func (f *Framework) SetConfigMapData(name string, cmData map[string]string) {
config, err := f.getConfigMap(name)
cfgMap, err := f.getConfigMap("nginx-ingress-controller")
gomega.Expect(err).NotTo(gomega.HaveOccurred())
gomega.Expect(config).NotTo(gomega.BeNil(), "expected a configmap but none returned")
gomega.Expect(cfgMap).NotTo(gomega.BeNil(), "expected a configmap but none returned")
config.Data = cmData
cfgMap.Data = cmData
_, err = f.KubeClientSet.
CoreV1().
ConfigMaps(f.Namespace).
Update(config)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
Update(cfgMap)
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "unexpected error updating configuration configmap")
time.Sleep(5 * time.Second)
}
@ -326,15 +309,20 @@ func (f *Framework) CreateConfigMap(name string, data map[string]string) {
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to create configMap")
}
// UpdateNginxConfigMapData updates single field in ingress-nginx's nginx-configuration map data
// UpdateNginxConfigMapData updates single field in ingress-nginx's nginx-ingress-controller map data
func (f *Framework) UpdateNginxConfigMapData(key string, value string) {
config, err := f.GetNginxConfigMapData()
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "unexpected error reading configmap")
config, err := f.getConfigMap("nginx-ingress-controller")
gomega.Expect(err).NotTo(gomega.HaveOccurred())
gomega.Expect(config).NotTo(gomega.BeNil(), "expected a configmap but none returned")
config[key] = value
config.Data[key] = value
f.SetNginxConfigMapData(config)
time.Sleep(1 * time.Second)
_, err = f.KubeClientSet.
CoreV1().
ConfigMaps(f.Namespace).
Update(config)
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "unexpected error updating configuration configmap")
time.Sleep(5 * time.Second)
}
// DeleteNGINXPod deletes the currently running pod. It waits for the replacement pod to be up.

View file

@ -36,10 +36,10 @@ import (
const (
// Poll how often to poll for conditions
Poll = 3 * time.Second
Poll = 2 * time.Second
// DefaultTimeout time to wait for operations to complete
DefaultTimeout = 3 * time.Minute
DefaultTimeout = 2 * time.Minute
)
func nowStamp() string {

View file

@ -54,7 +54,8 @@ fi
DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
export TAG=dev
# Use 1.0.0-dev to make sure we use the latest configuration in the helm template
export TAG=1.0.0-dev
export ARCH=amd64
export REGISTRY=ingress-controller

View file

@ -30,10 +30,10 @@ var _ = framework.IngressNginxDescribe("Customize health check path", func() {
f := framework.NewDefaultFramework("custom-health-check-path")
Context("with a plain HTTP ingress", func() {
It("should return HTTP/1.1 200 OK on custom health check path and port", func() {
It("should return HTTP/1.1 200 OK on custom health port", func() {
f.WaitForNginxConfiguration(func(server string) bool {
return strings.Contains(server, "location /not-healthz")
return strings.Contains(server, "location /healthz")
})
err := framework.WaitForPodsReady(f.KubeClientSet, framework.DefaultTimeout, 1, f.Namespace, metav1.ListOptions{

View file

@ -17,7 +17,6 @@ limitations under the License.
package settings
import (
"fmt"
"net/http"
"strings"
@ -49,7 +48,7 @@ var _ = framework.IngressNginxDescribe("Pod Security Policies", func() {
Expect(err).NotTo(HaveOccurred(), "creating Pod Security Policy")
}
role, err := f.KubeClientSet.RbacV1().ClusterRoles().Get(fmt.Sprintf("nginx-ingress-clusterrole-%v", f.Namespace), metav1.GetOptions{})
role, err := f.KubeClientSet.RbacV1().Roles(f.Namespace).Get("nginx-ingress-controller", metav1.GetOptions{})
Expect(err).NotTo(HaveOccurred(), "getting ingress controller cluster role")
Expect(role).NotTo(BeNil())
@ -60,7 +59,7 @@ var _ = framework.IngressNginxDescribe("Pod Security Policies", func() {
Verbs: []string{"use"},
})
_, err = f.KubeClientSet.RbacV1().ClusterRoles().Update(role)
_, err = f.KubeClientSet.RbacV1().Roles(f.Namespace).Update(role)
Expect(err).NotTo(HaveOccurred(), "updating ingress controller cluster role to use a pod security policy")
// update the deployment just to trigger a rolling update and the use of the security policy

View file

@ -17,7 +17,6 @@ limitations under the License.
package settings
import (
"fmt"
"net/http"
"strings"
@ -45,7 +44,7 @@ var _ = framework.IngressNginxDescribe("Pod Security Policies with volumes", fun
Expect(err).NotTo(HaveOccurred(), "creating Pod Security Policy")
}
role, err := f.KubeClientSet.RbacV1().ClusterRoles().Get(fmt.Sprintf("nginx-ingress-clusterrole-%v", f.Namespace), metav1.GetOptions{})
role, err := f.KubeClientSet.RbacV1().Roles(f.Namespace).Get("nginx-ingress-controller", metav1.GetOptions{})
Expect(err).NotTo(HaveOccurred(), "getting ingress controller cluster role")
Expect(role).NotTo(BeNil())
@ -56,7 +55,7 @@ var _ = framework.IngressNginxDescribe("Pod Security Policies with volumes", fun
Verbs: []string{"use"},
})
_, err = f.KubeClientSet.RbacV1().ClusterRoles().Update(role)
_, err = f.KubeClientSet.RbacV1().Roles(f.Namespace).Update(role)
Expect(err).NotTo(HaveOccurred(), "updating ingress controller cluster role to use a pod security policy")
err = framework.UpdateDeployment(f.KubeClientSet, f.Namespace, "nginx-ingress-controller", 1,

View file

@ -45,20 +45,23 @@ var _ = framework.IngressNginxDescribe("Status Update [Status]", func() {
err = framework.UpdateDeployment(f.KubeClientSet, f.Namespace, "nginx-ingress-controller", 1,
func(deployment *appsv1.Deployment) error {
args := deployment.Spec.Template.Spec.Containers[0].Args
args := []string{}
// flags --publish-service and --publish-status-address are mutually exclusive
for _, v := range deployment.Spec.Template.Spec.Containers[0].Args {
if strings.Contains(v, "--publish-service") {
continue
}
if strings.Contains(v, "--update-status") {
continue
}
args = append(args, v)
}
args = append(args, fmt.Sprintf("--apiserver-host=http://%s:%d", address.String(), port))
args = append(args, "--publish-status-address=1.1.0.0")
// flags --publish-service and --publish-status-address are mutually exclusive
var index int
for k, v := range args {
if strings.Contains(v, "--publish-service") {
index = k
break
}
}
if index > -1 {
args[index] = ""
}
deployment.Spec.Template.Spec.Containers[0].Args = args
_, err := f.KubeClientSet.AppsV1().Deployments(f.Namespace).Update(deployment)

View file

@ -62,7 +62,7 @@ var _ = framework.IngressNginxDescribe("TCP Feature", func() {
svc, err := f.KubeClientSet.
CoreV1().
Services(f.Namespace).
Get("ingress-nginx", metav1.GetOptions{})
Get("nginx-ingress-controller", metav1.GetOptions{})
Expect(err).To(BeNil(), "unexpected error obtaining ingress-nginx service")
Expect(svc).NotTo(BeNil(), "expected a service but none returned")
@ -121,7 +121,7 @@ var _ = framework.IngressNginxDescribe("TCP Feature", func() {
svc, err := f.KubeClientSet.
CoreV1().
Services(f.Namespace).
Get("ingress-nginx", metav1.GetOptions{})
Get("nginx-ingress-controller", metav1.GetOptions{})
Expect(err).To(BeNil(), "unexpected error obtaining ingress-nginx service")
Expect(svc).NotTo(BeNil(), "expected a service but none returned")

View file

@ -36,50 +36,160 @@ function on_exit {
}
trap on_exit EXIT
CLUSTER_WIDE="$DIR/cluster-wide-$NAMESPACE"
cat << EOF | kubectl apply --namespace=$NAMESPACE -f -
# Required for e2e tcp tests
kind: ConfigMap
apiVersion: v1
metadata:
name: tcp-services
namespace: $NAMESPACE
labels:
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/part-of: ingress-nginx
mkdir "$CLUSTER_WIDE"
---
cat << EOF > "$CLUSTER_WIDE/kustomization.yaml"
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
bases:
- ../cluster-wide
nameSuffix: "-$NAMESPACE"
EOF
# Source: nginx-ingress/templates/controller-role.yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
labels:
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/part-of: ingress-nginx
name: nginx-ingress-controller
rules:
- apiGroups:
- ""
resources:
- namespaces
verbs:
- get
- apiGroups:
- ""
resources:
- configmaps
- pods
- secrets
- endpoints
verbs:
- get
- list
- watch
- apiGroups:
- ""
resources:
- services
verbs:
- get
- list
- update
- watch
- apiGroups:
- extensions
- "networking.k8s.io" # k8s 1.14+
resources:
- ingresses
verbs:
- get
- list
- watch
- apiGroups:
- extensions
- "networking.k8s.io" # k8s 1.14+
resources:
- ingresses/status
verbs:
- update
- apiGroups:
- ""
resources:
- configmaps
resourceNames:
- ingress-controller-leader-nginx
verbs:
- get
- update
- apiGroups:
- ""
resources:
- configmaps
verbs:
- create
- apiGroups:
- ""
resources:
- endpoints
verbs:
- create
- get
- update
- apiGroups:
- ""
resources:
- events
verbs:
- create
- patch
---
# Source: nginx-ingress/templates/controller-rolebinding.yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
labels:
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/part-of: ingress-nginx
name: nginx-ingress-controller
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: nginx-ingress-controller
subjects:
- kind: ServiceAccount
name: nginx-ingress
namespace: $NAMESPACE
OVERLAY="$DIR/overlay-$NAMESPACE"
mkdir "$OVERLAY"
cat << EOF > "$OVERLAY/kustomization.yaml"
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
namespace: $NAMESPACE
bases:
- ../overlay
- ../cluster-wide-$NAMESPACE
EOF
# Use the namespace overlay if it was requested
if [[ ! -z "$NAMESPACE_OVERLAY" && -d "$DIR/namespace-overlays/$NAMESPACE_OVERLAY" ]]; then
echo "Namespace overlay $NAMESPACE_OVERLAY is being used for namespace $NAMESPACE"
OVERLAY="$DIR/namespace-overlays/$NAMESPACE"
mkdir "$OVERLAY"
cat << EOF > "$OVERLAY/kustomization.yaml"
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
namespace: $NAMESPACE
bases:
- ../../namespace-overlays/$NAMESPACE_OVERLAY
- ../../cluster-wide-$NAMESPACE
helm install nginx-ingress stable/nginx-ingress \
--namespace=$NAMESPACE \
--wait \
--values "$DIR/namespace-overlays/$NAMESPACE_OVERLAY/values.yaml"
else
cat << EOF | helm install nginx-ingress stable/nginx-ingress --namespace=$NAMESPACE --wait --values -
controller:
image:
repository: ingress-controller/nginx-ingress-controller
tag: 1.0.0-dev
scope:
enabled: true
config:
worker-processes: "1"
readinessProbe:
initialDelaySeconds: 1
livenessProbe:
initialDelaySeconds: 1
podLabels:
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/part-of: ingress-nginx
service:
type: NodePort
labels:
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/part-of: ingress-nginx
extraArgs:
tcp-services-configmap: $NAMESPACE/tcp-services
# e2e tests do not require information about ingress status
update-status: "false"
terminationGracePeriodSeconds: 1
defaultBackend:
enabled: false
rbac:
create: false
EOF
fi
kubectl apply --kustomize "$OVERLAY"
# wait for the deployment and fail if there is an error before starting the execution of any test
kubectl rollout status \
--request-timeout=3m \
--namespace $NAMESPACE \
deployment nginx-ingress-controller