This commit is contained in:
Manuel de Brito Fontes 2016-04-16 19:36:45 -03:00
parent 102c2eeaa4
commit a86a682429
9 changed files with 126 additions and 110 deletions

View file

@ -12,7 +12,7 @@
# See the License for the specific language governing permissions and
# limitations under the License.
FROM gcr.io/google_containers/nginx-slim:0.5
FROM gcr.io/google_containers/nginx-slim:0.7
RUN apt-get update && apt-get install -y \
diffutils \

View file

@ -11,7 +11,6 @@ This is a nginx Ingress controller that uses [ConfigMap](https://github.com/kube
- custom ssl_dhparam (optional). Just mount a secret with a file named `dhparam.pem`.
- support for TCP services (flag `--tcp-services-configmap`)
- custom nginx configuration using [ConfigMap](https://github.com/kubernetes/kubernetes/blob/master/docs/proposals/configmap.md)
- custom error pages. Using the flag `--custom-error-service` is possible to use a custom compatible [404-server](https://github.com/kubernetes/contrib/tree/master/404-server) image
## Requirements
@ -120,10 +119,13 @@ Please follow [test.sh](https://github.com/bprashanth/Ingress/blob/master/exampl
Check the [example](examples/tls/README.md)
### Force HTTPS
### HTTP Strict Transport Security
By default the controller redirects (301) to HTTPS if there is a TLS Ingress rule. To disable this behavior use `use-hts=false` in the NGINX ConfigMap.
HTTP Strict Transport Security (HSTS) is an opt-in security enhancement specified through the use of a special response header. Once a supported browser receives this header that browser will prevent any communications from being sent over HTTP to the specified domain and will instead send all communications over HTTPS.
By default the controller redirects (301) to HTTPS if there is a TLS Ingress rule.
To disable this behavior use `hsts=false` in the NGINX ConfigMap.
#### Optimizing TLS Time To First Byte (TTTFB)
@ -190,25 +192,22 @@ Please check the example `example/rc-default.yaml`
To extract the information in JSON format the module provides a custom URL: `/nginx_status/format/json`
### Custom errors
In case of an error in a request the body of the response is obtained from the `default backend`. Each request to the default backend includes two headers:
- `X-Code` indicates the HTTP code
- `X-Format` the value of the `Accept` header
Using this two headers is possible to use a custom backend service like [this one](https://github.com/aledbf/contrib/tree/nginx-debug-server/Ingress/images/nginx-error-server) that inspect each request and returns a custom error page with the format expected by the client. This images handles `html` and `json` responses.
## Troubleshooting
Problems encountered during [1.2.0-alpha7 deployment](https://github.com/kubernetes/kubernetes/blob/master/docs/getting-started-guides/docker.md):
* make setup-files.sh file in hypercube does not provide 10.0.0.1 IP to make-ca-certs, resulting in CA certs that are issued to the external cluster IP address rather then 10.0.0.1 -> this results in nginx-third-party-lb appearing to get stuck at "Utils.go:177 - Waiting for default/default-http-backend" in the docker logs. Kubernetes will eventually kill the container before nginx-third-party-lb times out with a message indicating that the CA certificate issuer is invalid (wrong ip), to verify this add zeros to the end of initialDelaySeconds and timeoutSeconds and reload the RC, and docker will log this error before kubernetes kills the container.
* To fix the above, setup-files.sh must be patched before the cluster is inited (refer to https://github.com/kubernetes/kubernetes/pull/21504)
### Custom errors
The default backend provides a way to customize the default 404 page. This helps but sometimes is not enough.
Using the flag `--custom-error-service` is possible to use an image that must be 404 compatible and provide the route /error
[Here](https://github.com/aledbf/contrib/tree/nginx-debug-server/Ingress/images/nginx-error-server) there is an example of the the image
The route `/error` expects two arguments: code and format
* code defines the wich error code is expected to be returned (502,503,etc.)
* format the format that should be returned For instance /error?code=504&format=json or /error?code=502&format=html
Using a volume pointing to `/var/www/html` directory is possible to use a custom error
### Debug
Using the flag `--v=XX` it is possible to increase the level of logging.

View file

@ -44,9 +44,10 @@ import (
)
const (
defUpstreamName = "upstream-default-backend"
defServerName = "_"
namedPortAnnotation = "kubernetes.io/ingress-named-ports"
defUpstreamName = "upstream-default-backend"
defServerName = "_"
namedPortAnnotation = "kubernetes.io/ingress-named-ports"
podStoreSyncedPollPeriod = 1 * time.Second
)
var (
@ -127,7 +128,9 @@ func newLoadBalancerController(kubeClient *client.Client, resyncPeriod time.Dura
tcpConfigMap: tcpConfigMapName,
udpConfigMap: udpConfigMapName,
defaultSvc: defaultSvc,
recorder: eventBroadcaster.NewRecorder(api.EventSource{Component: "loadbalancer-controller"}),
recorder: eventBroadcaster.NewRecorder(api.EventSource{
Component: "nginx-ingress-controller",
}),
}
lbc.syncQueue = NewTaskQueue(lbc.sync)
@ -250,6 +253,7 @@ func (lbc *loadBalancerController) getUDPConfigMap(ns, name string) (*api.Config
func (lbc *loadBalancerController) updateEpNamedPorts(key string) {
if !lbc.controllersInSync() {
time.Sleep(podStoreSyncedPollPeriod)
lbc.svcEpQueue.requeue(key, fmt.Errorf("deferring sync till endpoints controller has synced"))
return
}
@ -291,6 +295,12 @@ func (lbc *loadBalancerController) updateEpNamedPorts(key string) {
return
}
// check to avoid a call to checkSvcForUpdate if the port is not a string
_, err = strconv.Atoi(path.Backend.ServicePort.StrVal)
if err == nil {
continue
}
err = lbc.checkSvcForUpdate(svc)
if err != nil {
lbc.svcEpQueue.requeue(key, err)
@ -306,6 +316,7 @@ func (lbc *loadBalancerController) updateEpNamedPorts(key string) {
// the current state
func (lbc *loadBalancerController) checkSvcForUpdate(svc *api.Service) error {
// get the pods associated with the service
// TODO: switch this to a watch
pods, err := lbc.client.Pods(svc.Namespace).List(api.ListOptions{
LabelSelector: labels.Set(svc.Spec.Selector).AsSelector(),
})
@ -345,7 +356,7 @@ func (lbc *loadBalancerController) checkSvcForUpdate(svc *api.Service) error {
}
curNamedPort := svc.ObjectMeta.Annotations[namedPortAnnotation]
if !reflect.DeepEqual(curNamedPort, namedPorts) {
if len(namedPorts) > 0 && !reflect.DeepEqual(curNamedPort, namedPorts) {
data, _ := json.Marshal(namedPorts)
newSvc, err := lbc.client.Services(svc.Namespace).Get(svc.Name)
@ -353,8 +364,8 @@ func (lbc *loadBalancerController) checkSvcForUpdate(svc *api.Service) error {
return fmt.Errorf("error getting service %v/%v: %v", svc.Namespace, svc.Name, err)
}
if svc.ObjectMeta.Annotations == nil {
svc.ObjectMeta.Annotations = map[string]string{}
if newSvc.ObjectMeta.Annotations == nil {
newSvc.ObjectMeta.Annotations = map[string]string{}
}
newSvc.ObjectMeta.Annotations[namedPortAnnotation] = string(data)
@ -370,6 +381,7 @@ func (lbc *loadBalancerController) checkSvcForUpdate(svc *api.Service) error {
func (lbc *loadBalancerController) sync(key string) {
if !lbc.controllersInSync() {
time.Sleep(podStoreSyncedPollPeriod)
lbc.syncQueue.requeue(key, fmt.Errorf("deferring sync till endpoints controller has synced"))
return
}
@ -396,6 +408,7 @@ func (lbc *loadBalancerController) sync(key string) {
func (lbc *loadBalancerController) updateIngressStatus(key string) {
if !lbc.controllersInSync() {
time.Sleep(podStoreSyncedPollPeriod)
lbc.ingQueue.requeue(key, fmt.Errorf("deferring sync till endpoints controller has synced"))
return
}
@ -462,7 +475,7 @@ func (lbc *loadBalancerController) getTCPServices() []*nginx.Location {
return []*nginx.Location{}
}
return lbc.getServices(tcpMap.Data, api.ProtocolTCP)
return lbc.getStreamServices(tcpMap.Data, api.ProtocolTCP)
}
func (lbc *loadBalancerController) getUDPServices() []*nginx.Location {
@ -482,10 +495,10 @@ func (lbc *loadBalancerController) getUDPServices() []*nginx.Location {
return []*nginx.Location{}
}
return lbc.getServices(tcpMap.Data, api.ProtocolUDP)
return lbc.getStreamServices(tcpMap.Data, api.ProtocolUDP)
}
func (lbc *loadBalancerController) getServices(data map[string]string, proto api.Protocol) []*nginx.Location {
func (lbc *loadBalancerController) getStreamServices(data map[string]string, proto api.Protocol) []*nginx.Location {
var svcs []*nginx.Location
// k -> port to expose in nginx
// v -> <namespace>/<service name>:<port from service to be used>
@ -496,36 +509,45 @@ func (lbc *loadBalancerController) getServices(data map[string]string, proto api
continue
}
svcPort := strings.Split(v, ":")
if len(svcPort) != 2 {
// this ports are required for NGINX
if k == "80" || k == "443" || k == "8181" {
glog.Warningf("port %v cannot be used for TCP or UDP services. Is reserved for NGINX", k)
continue
}
nsSvcPort := strings.Split(v, ":")
if len(nsSvcPort) != 2 {
glog.Warningf("invalid format (namespace/name:port) '%v'", k)
continue
}
svcNs, svcName, err := parseNsName(svcPort[0])
nsName := nsSvcPort[0]
svcPort := nsSvcPort[1]
svcNs, svcName, err := parseNsName(nsName)
if err != nil {
glog.Warningf("%v", err)
continue
}
svcObj, svcExists, err := lbc.svcLister.Store.GetByKey(svcPort[0])
svcObj, svcExists, err := lbc.svcLister.Store.GetByKey(nsName)
if err != nil {
glog.Warningf("error getting service %v: %v", svcPort[0], err)
glog.Warningf("error getting service %v: %v", nsName, err)
continue
}
if !svcExists {
glog.Warningf("service %v was not found", svcPort[0])
glog.Warningf("service %v was not found", nsName)
continue
}
svc := svcObj.(*api.Service)
var endps []nginx.UpstreamServer
targetPort, err := strconv.Atoi(svcPort[1])
targetPort, err := strconv.Atoi(svcPort)
if err != nil {
for _, sp := range svc.Spec.Ports {
if sp.Name == svcPort[1] {
if sp.Name == svcPort {
endps = lbc.getEndpoints(svc, sp.TargetPort, proto)
break
}
@ -616,55 +638,32 @@ func (lbc *loadBalancerController) getUpstreamServers(data []interface{}) ([]*ng
server := servers[rule.Host]
for _, path := range rule.HTTP.Paths {
upsName := fmt.Sprintf("%v-%v-%v", ing.GetNamespace(), path.Backend.ServiceName, path.Backend.ServicePort.StrVal)
upsName := fmt.Sprintf("%v-%v-%v", ing.GetNamespace(), path.Backend.ServiceName, path.Backend.ServicePort.String())
ups := upstreams[upsName]
svcKey := fmt.Sprintf("%v/%v", ing.GetNamespace(), path.Backend.ServiceName)
svcObj, svcExists, err := lbc.svcLister.Store.GetByKey(svcKey)
if err != nil {
glog.Infof("error getting service %v from the cache: %v", svcKey, err)
continue
}
if !svcExists {
glog.Warningf("service %v does no exists", svcKey)
continue
}
svc := svcObj.(*api.Service)
for _, servicePort := range svc.Spec.Ports {
port := servicePort.TargetPort
if servicePort.Name != "" {
port = intstr.FromString(servicePort.Name)
}
if port == path.Backend.ServicePort {
endps := lbc.getEndpoints(svc, port, api.ProtocolTCP)
if len(endps) == 0 {
glog.Warningf("service %v does no have any active endpoints", svcKey)
}
ups.Backends = append(ups.Backends, endps...)
break
}
nginxPath := path.Path
// if there's no path defined we assume /
if nginxPath == "" {
lbc.recorder.Eventf(ing, api.EventTypeWarning, "MAPPING",
"Ingress rule '%v/%v' contains no path definition. Assuming /", ing.GetNamespace(), ing.GetName())
nginxPath = "/"
}
// Validate that there is no another previuous rule
// for the same host and path.
skipLoc := false
addLoc := true
for _, loc := range server.Locations {
if loc.Path == path.Path {
lbc.recorder.Eventf(ing, api.EventTypeWarning, "MAPPING", "Path '%v' already defined in another Ingress rule", path)
skipLoc = true
if loc.Path == nginxPath {
lbc.recorder.Eventf(ing, api.EventTypeWarning, "MAPPING",
"Path '%v' already defined in another Ingress rule", nginxPath)
addLoc = false
break
}
}
if skipLoc == false {
if addLoc {
server.Locations = append(server.Locations, &nginx.Location{
Path: path.Path,
Path: nginxPath,
Upstream: *ups,
})
}
@ -707,9 +706,39 @@ func (lbc *loadBalancerController) createUpstreams(data []interface{}) map[strin
}
for _, path := range rule.HTTP.Paths {
name := fmt.Sprintf("%v-%v-%v", ing.GetNamespace(), path.Backend.ServiceName, path.Backend.ServicePort.StrVal)
name := fmt.Sprintf("%v-%v-%v", ing.GetNamespace(), path.Backend.ServiceName, path.Backend.ServicePort.String())
if _, ok := upstreams[name]; !ok {
upstreams[name] = nginx.NewUpstream(name)
svcKey := fmt.Sprintf("%v/%v", ing.GetNamespace(), path.Backend.ServiceName)
svcObj, svcExists, err := lbc.svcLister.Store.GetByKey(svcKey)
if err != nil {
glog.Infof("error getting service %v from the cache: %v", svcKey, err)
continue
}
if !svcExists {
glog.Warningf("service %v does no exists", svcKey)
continue
}
svc := svcObj.(*api.Service)
for _, servicePort := range svc.Spec.Ports {
port := servicePort.TargetPort
if servicePort.Name != "" {
port = intstr.FromString(servicePort.Name)
}
if port == path.Backend.ServicePort {
endps := lbc.getEndpoints(svc, port, api.ProtocolTCP)
if len(endps) == 0 {
glog.Warningf("service %v does no have any active endpoints", svcKey)
}
upstreams[name].Backends = append(upstreams[name].Backends, endps...)
break
}
}
}
}
}
@ -877,6 +906,9 @@ func (lbc *loadBalancerController) Stop() error {
return fmt.Errorf("shutdown already in progress")
}
// removeFromIngress removes the IP address of the node where the Ingres
// controller is running before shutdown to avoid incorrect status
// information in Ingress rules
func (lbc *loadBalancerController) removeFromIngress() {
ings := lbc.ingLister.Store.List()
glog.Infof("updating %v Ingress rule/s", len(ings))
@ -921,8 +953,6 @@ func (lbc *loadBalancerController) Run() {
go lbc.endpController.Run(lbc.stopCh)
go lbc.svcController.Run(lbc.stopCh)
time.Sleep(1 * time.Second)
go lbc.syncQueue.run(time.Second, lbc.stopCh)
go lbc.ingQueue.run(time.Second, lbc.stopCh)
go lbc.svcEpQueue.run(time.Second, lbc.stopCh)

View file

@ -41,10 +41,6 @@ spec:
hostPort: 80
- containerPort: 443
hostPort: 443
# we expose 8080 to access nginx stats in url /nginx-status
# this is optional
- containerPort: 8080
hostPort: 8081
# service echoheaders as TCP service default/echoheaders:9000
# 9000 indicates the port used to expose the service
- containerPort: 9000

View file

@ -41,8 +41,6 @@ spec:
hostPort: 80
- containerPort: 443
hostPort: 443
- containerPort: 8080
hostPort: 9000
args:
- /nginx-ingress-controller
- --default-backend-service=default/default-http-backend

View file

@ -41,10 +41,6 @@ spec:
hostPort: 80
- containerPort: 443
hostPort: 443
# we expose 8080 to access nginx stats in url /nginx-status
# this is optional
- containerPort: 8080
hostPort: 8081
- containerPort: 53
hostPort: 53
args:

View file

@ -149,8 +149,8 @@ http {
{{ range $server := .servers }}
server {
listen 80;
{{ if $server.SSL }}listen 443 ssl http2;
listen 80{{ if $cfg.useProxyProtocol }} proxy_protocol{{ end }};
{{ if $server.SSL }}listen 443{{ if $cfg.useProxyProtocol }} proxy_protocol{{ end }} ssl http2;
ssl_certificate {{ $server.SSLCertificate }};
ssl_certificate_key {{ $server.SSLCertificateKey }};{{ end }}
{{ if $cfg.enableVtsStatus }}
@ -159,12 +159,12 @@ http {
server_name {{ $server.Name }};
{{ if (and $server.SSL $cfg.UseHTS) }}
{{ if (and $server.SSL $cfg.hsts) }}
if ($scheme = http) {
return 301 https://$host$request_uri;
}
more_set_headers "Strict-Transport-Security: max-age={{ $cfg.htsMaxAge }}{{ if $cfg.htsIncludeSubdomains }}; includeSubDomains{{ end }}; preload";
more_set_headers "Strict-Transport-Security: max-age={{ $cfg.hstsMaxAge }}{{ if $cfg.hstsIncludeSubdomains }}; includeSubDomains{{ end }}; preload";
{{ end }}
{{ range $location := $server.Locations }}
@ -180,7 +180,7 @@ http {
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Host $host;
proxy_set_header X-Forwarded­-Port $server_port;
proxy_set_header X-Forwarded-Port $server_port;
proxy_set_header X-Forwarded-Proto $pass_access_scheme;
proxy_connect_timeout {{ $cfg.proxyConnectTimeout }}s;
@ -213,18 +213,13 @@ http {
# default server, including healthcheck
server {
listen 8080 default_server{{ if $cfg.useProxyProtocol }} proxy_protocol{{ end }} reuseport;
listen 8080 default_server reuseport;
location /healthz {
access_log off;
return 200;
}
location /health-check {
access_log off;
proxy_pass http://127.0.0.1:10249/healthz;
}
location /nginx_status {
{{ if $cfg.enableVtsStatus }}
vhost_traffic_status_display;
@ -253,9 +248,7 @@ http {
}
}
stream {
# TCP services
{{ range $i, $tcpServer := .tcpUpstreams }}
upstream tcp-{{ $tcpServer.Upstream.Name }} {
@ -285,7 +278,6 @@ stream {
proxy_pass udp-{{ $udpServer.Upstream.Name }};
}
{{ end }}
}
{{/* definition of templates to avoid repetitions */}}

View file

@ -49,7 +49,7 @@ const (
// that tell browsers that it should only be communicated with using HTTPS, instead of using HTTP.
// https://developer.mozilla.org/en-US/docs/Web/Security/HTTP_strict_transport_security
// max-age is the time, in seconds, that the browser should remember that this site is only to be accessed using HTTPS.
htsMaxAge = "15724800"
hstsMaxAge = "15724800"
// If UseProxyProtocol is enabled defIPCIDR defines the default the IP/network address of your external load balancer
defIPCIDR = "0.0.0.0/0"
@ -105,18 +105,19 @@ type nginxConfiguration struct {
// Log levels above are listed in the order of increasing severity
ErrorLogLevel string `structs:"error-log-level,omitempty"`
// Enables or disables the header HTS in servers running SSL
UseHTS bool `structs:"use-hts,omitempty"`
// Enables or disables the header HSTS in servers running SSL
HSTS bool `structs:"hsts,omitempty"`
// Enables or disables the use of HTS in all the subdomains of the servername
HTSIncludeSubdomains bool `structs:"hts-include-subdomains,omitempty"`
// Enables or disables the use of HSTS in all the subdomains of the servername
// Default: true
HSTSIncludeSubdomains bool `structs:"hsts-include-subdomains,omitempty"`
// HTTP Strict Transport Security (often abbreviated as HSTS) is a security feature (HTTP header)
// that tell browsers that it should only be communicated with using HTTPS, instead of using HTTP.
// https://developer.mozilla.org/en-US/docs/Web/Security/HTTP_strict_transport_security
// max-age is the time, in seconds, that the browser should remember that this site is only to be
// accessed using HTTPS.
HTSMaxAge string `structs:"hts-max-age,omitempty"`
HSTSMaxAge string `structs:"hsts-max-age,omitempty"`
// Time during which a keep-alive client connection will stay open on the server side.
// The zero value disables keep-alive client connections
@ -239,11 +240,11 @@ type Manager struct {
// in the file default-conf.json
func newDefaultNginxCfg() nginxConfiguration {
cfg := nginxConfiguration{
BodySize: bodySize,
ErrorLogLevel: errorLevel,
UseHTS: true,
HTSIncludeSubdomains: true,
HTSMaxAge: htsMaxAge,
BodySize: bodySize,
ErrorLogLevel: errorLevel,
HSTS: true,
HSTSIncludeSubdomains: true,
HSTSMaxAge: hstsMaxAge,
GzipTypes: gzipTypes,
KeepAlive: 75,
MaxWorkerConnections: 16384,

View file

@ -93,6 +93,10 @@ spec:
hostPort: 80
- containerPort: 443
hostPort: 443
# we expose 8080 to access nginx stats in url /nginx-status
# this is optional
- containerPort: 8080
hostPort: 8080
args:
- /nginx-ingress-controller
- --default-backend-service=default/default-http-backend