Add configuration for retries in non-idempotent requests
This commit is contained in:
parent
c9f8a06399
commit
7abc7a77f6
8 changed files with 105 additions and 32 deletions
|
@ -36,8 +36,8 @@ kubectl run echoheaders --image=gcr.io/google_containers/echoserver:1.3 --replic
|
|||
|
||||
Now we expose the same application in two different services (so we can create different Ingress rules)
|
||||
```
|
||||
kubectl expose rc echoheaders --port=80 --target-port=8080 --name=echoheaders-x
|
||||
kubectl expose rc echoheaders --port=80 --target-port=8080 --name=echoheaders-y
|
||||
kubectl expose deployment echoheaders --port=80 --target-port=8080 --name=echoheaders-x
|
||||
kubectl expose deployment echoheaders --port=80 --target-port=8080 --name=echoheaders-y
|
||||
```
|
||||
|
||||
Next we create a couple of Ingress rules
|
||||
|
@ -215,6 +215,13 @@ I0316 12:24:37.610073 1 command.go:69] change in configuration detected. R
|
|||
- `--v=5` configures NGINX in [debug mode](http://nginx.org/en/docs/debugging_log.html)
|
||||
|
||||
|
||||
|
||||
### Retries in no idempotent methods
|
||||
|
||||
Since 1.9.13 NGINX will not retry non-idempotent requests (POST, LOCK, PATCH) in case of an error.
|
||||
The previous behavior can be restored using `retry-non-idempotent=true` in the configuration ConfigMap
|
||||
|
||||
|
||||
## Limitations
|
||||
|
||||
TODO
|
||||
|
|
|
@ -198,11 +198,11 @@ func (lbc *loadBalancerController) sync(key string) {
|
|||
}
|
||||
|
||||
ngxConfig := lbc.nginx.ReadConfig(cfg)
|
||||
tcpServices := lbc.getTCPServices()
|
||||
lbc.nginx.CheckAndReload(ngxConfig, nginx.IngressConfig{
|
||||
Upstreams: upstreams,
|
||||
Servers: servers,
|
||||
TCPUpstreams: tcpServices,
|
||||
TCPUpstreams: lbc.getTCPServices(),
|
||||
UDPUpstreams: lbc.getUDPServices(),
|
||||
})
|
||||
}
|
||||
|
||||
|
@ -285,12 +285,12 @@ func (lbc *loadBalancerController) getServices(data map[string]string, proto api
|
|||
var endps []nginx.UpstreamServer
|
||||
targetPort, err := strconv.Atoi(svcPort[1])
|
||||
if err != nil {
|
||||
endps = lbc.getEndpoints(svc, intstr.FromString(svcPort[1]))
|
||||
endps = lbc.getEndpoints(svc, intstr.FromString(svcPort[1]), proto)
|
||||
} else {
|
||||
// we need to use the TargetPort (where the endpoints are running)
|
||||
for _, sp := range svc.Spec.Ports {
|
||||
if sp.Port == targetPort {
|
||||
endps = lbc.getEndpoints(svc, sp.TargetPort)
|
||||
endps = lbc.getEndpoints(svc, sp.TargetPort, proto)
|
||||
break
|
||||
}
|
||||
}
|
||||
|
@ -335,7 +335,7 @@ func (lbc *loadBalancerController) getDefaultUpstream() *nginx.Upstream {
|
|||
|
||||
svc := svcObj.(*api.Service)
|
||||
|
||||
endps := lbc.getEndpoints(svc, svc.Spec.Ports[0].TargetPort)
|
||||
endps := lbc.getEndpoints(svc, svc.Spec.Ports[0].TargetPort, api.ProtocolTCP)
|
||||
if len(endps) == 0 {
|
||||
glog.Warningf("service %v does no have any active endpoints", svcKey)
|
||||
upstream.Backends = append(upstream.Backends, nginx.NewDefaultServer())
|
||||
|
@ -383,7 +383,7 @@ func (lbc *loadBalancerController) getUpstreamServers(data []interface{}) ([]*ng
|
|||
|
||||
for _, servicePort := range svc.Spec.Ports {
|
||||
if servicePort.Port == path.Backend.ServicePort.IntValue() {
|
||||
endps := lbc.getEndpoints(svc, servicePort.TargetPort)
|
||||
endps := lbc.getEndpoints(svc, servicePort.TargetPort, api.ProtocolTCP)
|
||||
if len(endps) == 0 {
|
||||
glog.Warningf("service %v does no have any active endpoints", svcKey)
|
||||
}
|
||||
|
@ -526,7 +526,7 @@ func (lbc *loadBalancerController) getPemsFromIngress(data []interface{}) map[st
|
|||
}
|
||||
|
||||
// getEndpoints returns a list of <endpoint ip>:<port> for a given service/target port combination.
|
||||
func (lbc *loadBalancerController) getEndpoints(s *api.Service, servicePort intstr.IntOrString) []nginx.UpstreamServer {
|
||||
func (lbc *loadBalancerController) getEndpoints(s *api.Service, servicePort intstr.IntOrString, proto api.Protocol) []nginx.UpstreamServer {
|
||||
glog.V(3).Infof("getting endpoints for service %v/%v and port %v", s.Namespace, s.Name, servicePort.String())
|
||||
ep, err := lbc.endpLister.GetServiceEndpoints(s)
|
||||
if err != nil {
|
||||
|
@ -538,6 +538,11 @@ func (lbc *loadBalancerController) getEndpoints(s *api.Service, servicePort ints
|
|||
|
||||
for _, ss := range ep.Subsets {
|
||||
for _, epPort := range ss.Ports {
|
||||
|
||||
if !reflect.DeepEqual(epPort.Protocol, proto) {
|
||||
continue
|
||||
}
|
||||
|
||||
var targetPort int
|
||||
switch servicePort.Type {
|
||||
case intstr.Int:
|
||||
|
|
13
controllers/nginx/examples/udp/README.md
Normal file
13
controllers/nginx/examples/udp/README.md
Normal file
|
@ -0,0 +1,13 @@
|
|||
|
||||
To configure which services and ports will be exposed
|
||||
```
|
||||
kubectl create -f udp-configmap-example.yaml
|
||||
```
|
||||
|
||||
The file `udp-configmap-example.yaml` uses a ConfigMap where the key is the external port to use and the value is
|
||||
`<namespace/service name>:<service port>`
|
||||
It is possible to use a number or the name of the port.
|
||||
|
||||
```
|
||||
kubectl create -f rc-udp.yaml
|
||||
```
|
56
controllers/nginx/examples/udp/rc-udp.yaml
Normal file
56
controllers/nginx/examples/udp/rc-udp.yaml
Normal file
|
@ -0,0 +1,56 @@
|
|||
apiVersion: v1
|
||||
kind: ReplicationController
|
||||
metadata:
|
||||
name: nginx-ingress-controller
|
||||
labels:
|
||||
k8s-app: nginx-ingress-lb
|
||||
spec:
|
||||
replicas: 1
|
||||
selector:
|
||||
k8s-app: nginx-ingress-lb
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: nginx-ingress-lb
|
||||
name: nginx-ingress-lb
|
||||
spec:
|
||||
containers:
|
||||
- image: aledbf/nginx-third-party:0.9
|
||||
name: nginx-ingress-lb
|
||||
imagePullPolicy: Always
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
path: /healthz
|
||||
port: 10249
|
||||
scheme: HTTP
|
||||
initialDelaySeconds: 30
|
||||
timeoutSeconds: 5
|
||||
# use downward API
|
||||
env:
|
||||
- name: POD_IP
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: status.podIP
|
||||
- name: POD_NAME
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: metadata.name
|
||||
- name: POD_NAMESPACE
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: metadata.namespace
|
||||
ports:
|
||||
- containerPort: 80
|
||||
hostPort: 80
|
||||
- containerPort: 443
|
||||
hostPort: 4444
|
||||
# we expose 8080 to access nginx stats in url /nginx-status
|
||||
# this is optional
|
||||
- containerPort: 8080
|
||||
hostPort: 8081
|
||||
- containerPort: 53
|
||||
hostPort: 53
|
||||
args:
|
||||
- /nginx-ingress-controller
|
||||
- --default-backend-service=default/default-http-backend
|
||||
- --udp-services-configmap=default/udp-configmap-example
|
|
@ -0,0 +1,6 @@
|
|||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: udp-configmap-example
|
||||
data:
|
||||
53: "kube-system/kube-dns:53"
|
|
@ -1,8 +0,0 @@
|
|||
SecRulesEnabled;
|
||||
DeniedUrl "/RequestDenied";
|
||||
## check rules
|
||||
CheckRule "$SQL >= 8" BLOCK;
|
||||
CheckRule "$RFI >= 8" BLOCK;
|
||||
CheckRule "$TRAVERSAL >= 4" BLOCK;
|
||||
CheckRule "$EVADE >= 4" BLOCK;
|
||||
CheckRule "$XSS >= 8" BLOCK;
|
|
@ -24,11 +24,6 @@ http {
|
|||
require("error_page")
|
||||
}
|
||||
|
||||
{{ if $cfg.enableWaf}}
|
||||
# https://github.com/nbs-system/naxsi/wiki/basicsetup
|
||||
include /etc/nginx/naxsi/*.rules;
|
||||
{{ end }}
|
||||
|
||||
sendfile on;
|
||||
aio threads;
|
||||
tcp_nopush on;
|
||||
|
@ -146,7 +141,7 @@ http {
|
|||
error_page 504 = @custom_504;
|
||||
|
||||
# In case of errors try the next upstream server before returning an error
|
||||
proxy_next_upstream error timeout invalid_header http_502 http_503 http_504;
|
||||
proxy_next_upstream error timeout invalid_header http_502 http_503 http_504 {{ if $cfg.retryNonIdempotent }}non_idempotent{{ end }};
|
||||
|
||||
server {
|
||||
listen 80 default_server{{ if $cfg.useProxyProtocol }} proxy_protocol{{ end }};
|
||||
|
@ -219,11 +214,6 @@ http {
|
|||
proxy_pass http://{{ $location.Upstream.Name }};
|
||||
}
|
||||
{{ end }}
|
||||
{{ if $cfg.enableWaf}}
|
||||
location /RequestDenied {
|
||||
return 418;
|
||||
}
|
||||
{{ end }}
|
||||
{{ template "CUSTOM_ERRORS" $cfg }}
|
||||
}
|
||||
{{ end }}
|
||||
|
@ -291,15 +281,15 @@ stream {
|
|||
# UDP services
|
||||
{{ range $i, $udpServer := .udpUpstreams }}
|
||||
upstream udp-{{ $udpServer.Upstream.Name }} {
|
||||
{{ range $server := $tcpServer.Upstream.Backends }}server {{ $server.Address }}:{{ $server.Port }};
|
||||
{{ range $server := $udpServer.Upstream.Backends }}server {{ $server.Address }}:{{ $server.Port }};
|
||||
{{ end }}
|
||||
}
|
||||
|
||||
server {
|
||||
listen {{ $tcpServer.Path }} udp;
|
||||
proxy_timeout 1s;
|
||||
listen {{ $udpServer.Path }} udp;
|
||||
proxy_timeout 10s;
|
||||
proxy_responses 1;
|
||||
proxy_pass udp-{{ $tcpServer.Upstream.Name }};
|
||||
proxy_pass udp-{{ $udpServer.Upstream.Name }};
|
||||
}
|
||||
{{ end }}
|
||||
|
||||
|
|
|
@ -94,6 +94,10 @@ type nginxConfiguration struct {
|
|||
|
||||
VtsStatusZoneSize string `structs:"vts-status-zone-size,omitempty"`
|
||||
|
||||
// RetryNonIdempotent since 1.9.13 NGINX will not retry non-idempotent requests (POST, LOCK, PATCH)
|
||||
// in case of an error. The previous behavior can be restored using the value true
|
||||
RetryNonIdempotent bool `structs:"retry-non-idempotent"`
|
||||
|
||||
// http://nginx.org/en/docs/ngx_core_module.html#error_log
|
||||
// Configures logging level [debug | info | notice | warn | error | crit | alert | emerg]
|
||||
// Log levels above are listed in the order of increasing severity
|
||||
|
|
Loading…
Reference in a new issue