Merge pull request #766 from aledbf/nginx-tls

Add support for named port, better docs for TLS nginx Ingress
This commit is contained in:
Prashanth B 2016-05-02 09:01:55 -07:00
commit 496ace4d3a
22 changed files with 523 additions and 155 deletions

View file

@ -12,7 +12,7 @@
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
FROM gcr.io/google_containers/nginx-slim:0.5 FROM gcr.io/google_containers/nginx-slim:0.6
RUN apt-get update && apt-get install -y \ RUN apt-get update && apt-get install -y \
diffutils \ diffutils \
@ -20,7 +20,7 @@ RUN apt-get update && apt-get install -y \
&& rm -rf /var/lib/apt/lists/* && rm -rf /var/lib/apt/lists/*
COPY nginx-ingress-controller / COPY nginx-ingress-controller /
COPY nginx.tmpl / COPY nginx.tmpl /etc/nginx/template/nginx.tmpl
COPY default.conf /etc/nginx/nginx.conf COPY default.conf /etc/nginx/nginx.conf
COPY lua /etc/nginx/lua/ COPY lua /etc/nginx/lua/

View file

@ -1,7 +1,7 @@
all: push all: push
# 0.0 shouldn't clobber any release builds # 0.0 shouldn't clobber any release builds
TAG = 0.5 TAG = 0.6
PREFIX = gcr.io/google_containers/nginx-ingress-controller PREFIX = gcr.io/google_containers/nginx-ingress-controller
REPO_INFO=$(shell git config --get remote.origin.url) REPO_INFO=$(shell git config --get remote.origin.url)

View file

@ -11,7 +11,6 @@ This is a nginx Ingress controller that uses [ConfigMap](https://github.com/kube
- custom ssl_dhparam (optional). Just mount a secret with a file named `dhparam.pem`. - custom ssl_dhparam (optional). Just mount a secret with a file named `dhparam.pem`.
- support for TCP services (flag `--tcp-services-configmap`) - support for TCP services (flag `--tcp-services-configmap`)
- custom nginx configuration using [ConfigMap](https://github.com/kubernetes/kubernetes/blob/master/docs/proposals/configmap.md) - custom nginx configuration using [ConfigMap](https://github.com/kubernetes/kubernetes/blob/master/docs/proposals/configmap.md)
- custom error pages. Using the flag `--custom-error-service` is possible to use a custom compatible [404-server](https://github.com/kubernetes/contrib/tree/master/404-server) image
## Requirements ## Requirements
@ -120,7 +119,13 @@ Please follow [test.sh](https://github.com/bprashanth/Ingress/blob/master/exampl
Check the [example](examples/tls/README.md) Check the [example](examples/tls/README.md)
### HTTP Strict Transport Security
HTTP Strict Transport Security (HSTS) is an opt-in security enhancement specified through the use of a special response header. Once a supported browser receives this header that browser will prevent any communications from being sent over HTTP to the specified domain and will instead send all communications over HTTPS.
By default the controller redirects (301) to HTTPS if there is a TLS Ingress rule.
To disable this behavior use `hsts=false` in the NGINX ConfigMap.
#### Optimizing TLS Time To First Byte (TTTFB) #### Optimizing TLS Time To First Byte (TTTFB)
@ -175,6 +180,15 @@ Using a ConfigMap it is possible to customize the defaults in nginx.
Please check the [tcp services](examples/custom-configuration/README.md) example Please check the [tcp services](examples/custom-configuration/README.md) example
## Custom NGINX template
The NGINX template is located in the file `/etc/nginx/template/nginx.tmpl`. Mounting a volume is possible to use a custom version.
Use the [custom-template](examples/custom-template/README.md) example as a guide
**Please note the template is tied to the go code. Be sure to no change names in the variable `$cfg`**
### NGINX status page ### NGINX status page
The ngx_http_stub_status_module module provides access to basic status information. This is the default module active in the url `/nginx_status`. The ngx_http_stub_status_module module provides access to basic status information. This is the default module active in the url `/nginx_status`.
@ -187,25 +201,22 @@ Please check the example `example/rc-default.yaml`
To extract the information in JSON format the module provides a custom URL: `/nginx_status/format/json` To extract the information in JSON format the module provides a custom URL: `/nginx_status/format/json`
### Custom errors
In case of an error in a request the body of the response is obtained from the `default backend`. Each request to the default backend includes two headers:
- `X-Code` indicates the HTTP code
- `X-Format` the value of the `Accept` header
Using this two headers is possible to use a custom backend service like [this one](https://github.com/aledbf/contrib/tree/nginx-debug-server/Ingress/images/nginx-error-server) that inspect each request and returns a custom error page with the format expected by the client. This images handles `html` and `json` responses.
## Troubleshooting ## Troubleshooting
Problems encountered during [1.2.0-alpha7 deployment](https://github.com/kubernetes/kubernetes/blob/master/docs/getting-started-guides/docker.md): Problems encountered during [1.2.0-alpha7 deployment](https://github.com/kubernetes/kubernetes/blob/master/docs/getting-started-guides/docker.md):
* make setup-files.sh file in hypercube does not provide 10.0.0.1 IP to make-ca-certs, resulting in CA certs that are issued to the external cluster IP address rather then 10.0.0.1 -> this results in nginx-third-party-lb appearing to get stuck at "Utils.go:177 - Waiting for default/default-http-backend" in the docker logs. Kubernetes will eventually kill the container before nginx-third-party-lb times out with a message indicating that the CA certificate issuer is invalid (wrong ip), to verify this add zeros to the end of initialDelaySeconds and timeoutSeconds and reload the RC, and docker will log this error before kubernetes kills the container. * make setup-files.sh file in hypercube does not provide 10.0.0.1 IP to make-ca-certs, resulting in CA certs that are issued to the external cluster IP address rather then 10.0.0.1 -> this results in nginx-third-party-lb appearing to get stuck at "Utils.go:177 - Waiting for default/default-http-backend" in the docker logs. Kubernetes will eventually kill the container before nginx-third-party-lb times out with a message indicating that the CA certificate issuer is invalid (wrong ip), to verify this add zeros to the end of initialDelaySeconds and timeoutSeconds and reload the RC, and docker will log this error before kubernetes kills the container.
* To fix the above, setup-files.sh must be patched before the cluster is inited (refer to https://github.com/kubernetes/kubernetes/pull/21504) * To fix the above, setup-files.sh must be patched before the cluster is inited (refer to https://github.com/kubernetes/kubernetes/pull/21504)
### Custom errors
The default backend provides a way to customize the default 404 page. This helps but sometimes is not enough.
Using the flag `--custom-error-service` is possible to use an image that must be 404 compatible and provide the route /error
[Here](https://github.com/aledbf/contrib/tree/nginx-debug-server/Ingress/images/nginx-error-server) there is an example of the the image
The route `/error` expects two arguments: code and format
* code defines the wich error code is expected to be returned (502,503,etc.)
* format the format that should be returned For instance /error?code=504&format=json or /error?code=502&format=html
Using a volume pointing to `/var/www/html` directory is possible to use a custom error
### Debug ### Debug
Using the flag `--v=XX` it is possible to increase the level of logging. Using the flag `--v=XX` it is possible to increase the level of logging.
@ -241,3 +252,5 @@ The previous behavior can be restored using `retry-non-idempotent=true` in the c
## Limitations ## Limitations
- Ingress rules for TLS require the definition of the field `host` - Ingress rules for TLS require the definition of the field `host`
- The IP address in the status of loadBalancer could contain old values

View file

@ -17,6 +17,7 @@ limitations under the License.
package main package main
import ( import (
"encoding/json"
"fmt" "fmt"
"reflect" "reflect"
"sort" "sort"
@ -28,11 +29,13 @@ import (
"github.com/golang/glog" "github.com/golang/glog"
"k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api"
podutil "k8s.io/kubernetes/pkg/api/pod"
"k8s.io/kubernetes/pkg/apis/extensions" "k8s.io/kubernetes/pkg/apis/extensions"
"k8s.io/kubernetes/pkg/client/cache" "k8s.io/kubernetes/pkg/client/cache"
"k8s.io/kubernetes/pkg/client/record" "k8s.io/kubernetes/pkg/client/record"
client "k8s.io/kubernetes/pkg/client/unversioned" client "k8s.io/kubernetes/pkg/client/unversioned"
"k8s.io/kubernetes/pkg/controller/framework" "k8s.io/kubernetes/pkg/controller/framework"
"k8s.io/kubernetes/pkg/labels"
"k8s.io/kubernetes/pkg/runtime" "k8s.io/kubernetes/pkg/runtime"
"k8s.io/kubernetes/pkg/util/intstr" "k8s.io/kubernetes/pkg/util/intstr"
"k8s.io/kubernetes/pkg/watch" "k8s.io/kubernetes/pkg/watch"
@ -41,14 +44,40 @@ import (
) )
const ( const (
defUpstreamName = "upstream-default-backend" defUpstreamName = "upstream-default-backend"
defServerName = "_" defServerName = "_"
namedPortAnnotation = "kubernetes.io/ingress-named-ports"
podStoreSyncedPollPeriod = 1 * time.Second
rootLocation = "/"
) )
var ( var (
keyFunc = framework.DeletionHandlingMetaNamespaceKeyFunc keyFunc = framework.DeletionHandlingMetaNamespaceKeyFunc
) )
type namedPortMapping map[string]string
// getPort returns the port defined in a named port
func (npm namedPortMapping) getPort(name string) (string, bool) {
val, ok := npm.getPortMappings()[name]
return val, ok
}
// getPortMappings returns the map containing the
// mapping of named port names and the port number
func (npm namedPortMapping) getPortMappings() map[string]string {
data := npm[namedPortAnnotation]
var mapping map[string]string
if data == "" {
return mapping
}
if err := json.Unmarshal([]byte(data), &mapping); err != nil {
glog.Errorf("unexpected error reading annotations: %v", err)
}
return mapping
}
// loadBalancerController watches the kubernetes api and adds/removes services // loadBalancerController watches the kubernetes api and adds/removes services
// from the loadbalancer // from the loadbalancer
type loadBalancerController struct { type loadBalancerController struct {
@ -88,7 +117,7 @@ func newLoadBalancerController(kubeClient *client.Client, resyncPeriod time.Dura
eventBroadcaster := record.NewBroadcaster() eventBroadcaster := record.NewBroadcaster()
eventBroadcaster.StartLogging(glog.Infof) eventBroadcaster.StartLogging(glog.Infof)
eventBroadcaster.StartRecordingToSink(kubeClient.Events("")) eventBroadcaster.StartRecordingToSink(kubeClient.Events(namespace))
lbc := loadBalancerController{ lbc := loadBalancerController{
client: kubeClient, client: kubeClient,
@ -99,7 +128,9 @@ func newLoadBalancerController(kubeClient *client.Client, resyncPeriod time.Dura
tcpConfigMap: tcpConfigMapName, tcpConfigMap: tcpConfigMapName,
udpConfigMap: udpConfigMapName, udpConfigMap: udpConfigMapName,
defaultSvc: defaultSvc, defaultSvc: defaultSvc,
recorder: eventBroadcaster.NewRecorder(api.EventSource{Component: "loadbalancer-controller"}), recorder: eventBroadcaster.NewRecorder(api.EventSource{
Component: "nginx-ingress-controller",
}),
} }
lbc.syncQueue = NewTaskQueue(lbc.sync) lbc.syncQueue = NewTaskQueue(lbc.sync)
@ -217,8 +248,81 @@ func (lbc *loadBalancerController) getUDPConfigMap(ns, name string) (*api.Config
return lbc.client.ConfigMaps(ns).Get(name) return lbc.client.ConfigMaps(ns).Get(name)
} }
// checkSvcForUpdate verifies if one of the running pods for a service contains
// named port. If the annotation in the service does not exists or is not equals
// to the port mapping obtained from the pod the service must be updated to reflect
// the current state
func (lbc *loadBalancerController) checkSvcForUpdate(svc *api.Service) (map[string]string, error) {
// get the pods associated with the service
// TODO: switch this to a watch
pods, err := lbc.client.Pods(svc.Namespace).List(api.ListOptions{
LabelSelector: labels.Set(svc.Spec.Selector).AsSelector(),
})
namedPorts := map[string]string{}
if err != nil {
return namedPorts, fmt.Errorf("error searching service pods %v/%v: %v", svc.Namespace, svc.Name, err)
}
if len(pods.Items) == 0 {
return namedPorts, nil
}
// we need to check only one pod searching for named ports
pod := &pods.Items[0]
glog.V(4).Infof("checking pod %v/%v for named port information", pod.Namespace, pod.Name)
for i := range svc.Spec.Ports {
servicePort := &svc.Spec.Ports[i]
_, err := strconv.Atoi(servicePort.TargetPort.StrVal)
if err != nil {
portNum, err := podutil.FindPort(pod, servicePort)
if err != nil {
glog.V(4).Infof("failed to find port for service %s/%s: %v", svc.Namespace, svc.Name, err)
continue
}
if servicePort.TargetPort.StrVal == "" {
continue
}
namedPorts[servicePort.TargetPort.StrVal] = fmt.Sprintf("%v", portNum)
}
}
if svc.ObjectMeta.Annotations == nil {
svc.ObjectMeta.Annotations = map[string]string{}
}
curNamedPort := svc.ObjectMeta.Annotations[namedPortAnnotation]
if len(namedPorts) > 0 && !reflect.DeepEqual(curNamedPort, namedPorts) {
data, _ := json.Marshal(namedPorts)
newSvc, err := lbc.client.Services(svc.Namespace).Get(svc.Name)
if err != nil {
return namedPorts, fmt.Errorf("error getting service %v/%v: %v", svc.Namespace, svc.Name, err)
}
if newSvc.ObjectMeta.Annotations == nil {
newSvc.ObjectMeta.Annotations = map[string]string{}
}
newSvc.ObjectMeta.Annotations[namedPortAnnotation] = string(data)
glog.Infof("updating service %v with new named port mappings", svc.Name)
_, err = lbc.client.Services(svc.Namespace).Update(newSvc)
if err != nil {
return namedPorts, fmt.Errorf("error syncing service %v/%v: %v", svc.Namespace, svc.Name, err)
}
return newSvc.ObjectMeta.Annotations, nil
}
return namedPorts, nil
}
func (lbc *loadBalancerController) sync(key string) { func (lbc *loadBalancerController) sync(key string) {
if !lbc.controllersInSync() { if !lbc.controllersInSync() {
time.Sleep(podStoreSyncedPollPeriod)
lbc.syncQueue.requeue(key, fmt.Errorf("deferring sync till endpoints controller has synced")) lbc.syncQueue.requeue(key, fmt.Errorf("deferring sync till endpoints controller has synced"))
return return
} }
@ -245,6 +349,7 @@ func (lbc *loadBalancerController) sync(key string) {
func (lbc *loadBalancerController) updateIngressStatus(key string) { func (lbc *loadBalancerController) updateIngressStatus(key string) {
if !lbc.controllersInSync() { if !lbc.controllersInSync() {
time.Sleep(podStoreSyncedPollPeriod)
lbc.ingQueue.requeue(key, fmt.Errorf("deferring sync till endpoints controller has synced")) lbc.ingQueue.requeue(key, fmt.Errorf("deferring sync till endpoints controller has synced"))
return return
} }
@ -311,7 +416,7 @@ func (lbc *loadBalancerController) getTCPServices() []*nginx.Location {
return []*nginx.Location{} return []*nginx.Location{}
} }
return lbc.getServices(tcpMap.Data, api.ProtocolTCP) return lbc.getStreamServices(tcpMap.Data, api.ProtocolTCP)
} }
func (lbc *loadBalancerController) getUDPServices() []*nginx.Location { func (lbc *loadBalancerController) getUDPServices() []*nginx.Location {
@ -331,10 +436,10 @@ func (lbc *loadBalancerController) getUDPServices() []*nginx.Location {
return []*nginx.Location{} return []*nginx.Location{}
} }
return lbc.getServices(tcpMap.Data, api.ProtocolUDP) return lbc.getStreamServices(tcpMap.Data, api.ProtocolUDP)
} }
func (lbc *loadBalancerController) getServices(data map[string]string, proto api.Protocol) []*nginx.Location { func (lbc *loadBalancerController) getStreamServices(data map[string]string, proto api.Protocol) []*nginx.Location {
var svcs []*nginx.Location var svcs []*nginx.Location
// k -> port to expose in nginx // k -> port to expose in nginx
// v -> <namespace>/<service name>:<port from service to be used> // v -> <namespace>/<service name>:<port from service to be used>
@ -345,35 +450,49 @@ func (lbc *loadBalancerController) getServices(data map[string]string, proto api
continue continue
} }
svcPort := strings.Split(v, ":") // this ports are required for NGINX
if len(svcPort) != 2 { if k == "80" || k == "443" || k == "8181" {
glog.Warningf("port %v cannot be used for TCP or UDP services. Is reserved for NGINX", k)
continue
}
nsSvcPort := strings.Split(v, ":")
if len(nsSvcPort) != 2 {
glog.Warningf("invalid format (namespace/name:port) '%v'", k) glog.Warningf("invalid format (namespace/name:port) '%v'", k)
continue continue
} }
svcNs, svcName, err := parseNsName(svcPort[0]) nsName := nsSvcPort[0]
svcPort := nsSvcPort[1]
svcNs, svcName, err := parseNsName(nsName)
if err != nil { if err != nil {
glog.Warningf("%v", err) glog.Warningf("%v", err)
continue continue
} }
svcObj, svcExists, err := lbc.svcLister.Store.GetByKey(svcPort[0]) svcObj, svcExists, err := lbc.svcLister.Store.GetByKey(nsName)
if err != nil { if err != nil {
glog.Warningf("error getting service %v: %v", svcPort[0], err) glog.Warningf("error getting service %v: %v", nsName, err)
continue continue
} }
if !svcExists { if !svcExists {
glog.Warningf("service %v was not found", svcPort[0]) glog.Warningf("service %v was not found", nsName)
continue continue
} }
svc := svcObj.(*api.Service) svc := svcObj.(*api.Service)
var endps []nginx.UpstreamServer var endps []nginx.UpstreamServer
targetPort, err := strconv.Atoi(svcPort[1]) targetPort, err := strconv.Atoi(svcPort)
if err != nil { if err != nil {
endps = lbc.getEndpoints(svc, intstr.FromString(svcPort[1]), proto) for _, sp := range svc.Spec.Ports {
if sp.Name == svcPort {
endps = lbc.getEndpoints(svc, sp.TargetPort, proto)
break
}
}
} else { } else {
// we need to use the TargetPort (where the endpoints are running) // we need to use the TargetPort (where the endpoints are running)
for _, sp := range svc.Spec.Ports { for _, sp := range svc.Spec.Ports {
@ -439,14 +558,18 @@ func (lbc *loadBalancerController) getUpstreamServers(data []interface{}) ([]*ng
upstreams[defUpstreamName] = lbc.getDefaultUpstream() upstreams[defUpstreamName] = lbc.getDefaultUpstream()
servers := lbc.createServers(data) servers := lbc.createServers(data)
// default server - no servername. if _, ok := servers[defServerName]; !ok {
servers[defServerName] = &nginx.Server{ // default server - no servername.
Name: defServerName, // there is no rule with default backend
Locations: []*nginx.Location{{ servers[defServerName] = &nginx.Server{
Path: "/", Name: defServerName,
Upstream: *lbc.getDefaultUpstream(), Locations: []*nginx.Location{{
}, Path: rootLocation,
}, IsDefBackend: true,
Upstream: *lbc.getDefaultUpstream(),
},
},
}
} }
for _, ingIf := range data { for _, ingIf := range data {
@ -457,51 +580,51 @@ func (lbc *loadBalancerController) getUpstreamServers(data []interface{}) ([]*ng
continue continue
} }
server := servers[rule.Host] host := rule.Host
locations := []*nginx.Location{} if host == "" {
host = defServerName
for _, path := range rule.HTTP.Paths { }
upsName := fmt.Sprintf("%v-%v-%v", ing.GetNamespace(), path.Backend.ServiceName, path.Backend.ServicePort.IntValue()) server := servers[host]
ups := upstreams[upsName] if server == nil {
server = servers["_"]
svcKey := fmt.Sprintf("%v/%v", ing.GetNamespace(), path.Backend.ServiceName)
svcObj, svcExists, err := lbc.svcLister.Store.GetByKey(svcKey)
if err != nil {
glog.Infof("error getting service %v from the cache: %v", svcKey, err)
continue
}
if !svcExists {
glog.Warningf("service %v does no exists", svcKey)
continue
}
svc := svcObj.(*api.Service)
for _, servicePort := range svc.Spec.Ports {
if servicePort.Port == path.Backend.ServicePort.IntValue() {
endps := lbc.getEndpoints(svc, servicePort.TargetPort, api.ProtocolTCP)
if len(endps) == 0 {
glog.Warningf("service %v does no have any active endpoints", svcKey)
}
ups.Backends = append(ups.Backends, endps...)
break
}
}
for _, ups := range upstreams {
if upsName == ups.Name {
loc := &nginx.Location{Path: path.Path}
loc.Upstream = *ups
locations = append(locations, loc)
break
}
}
} }
for _, loc := range locations { for _, path := range rule.HTTP.Paths {
server.Locations = append(server.Locations, loc) upsName := fmt.Sprintf("%v-%v-%v", ing.GetNamespace(), path.Backend.ServiceName, path.Backend.ServicePort.String())
ups := upstreams[upsName]
nginxPath := path.Path
// if there's no path defined we assume /
if nginxPath == "" {
lbc.recorder.Eventf(ing, api.EventTypeWarning, "MAPPING",
"Ingress rule '%v/%v' contains no path definition. Assuming /", ing.GetNamespace(), ing.GetName())
nginxPath = rootLocation
}
// Validate that there is no another previuous
// rule for the same host and path.
addLoc := true
for _, loc := range server.Locations {
if loc.Path == rootLocation && nginxPath == rootLocation && loc.IsDefBackend {
loc.Upstream = *ups
addLoc = false
continue
}
if loc.Path == nginxPath {
lbc.recorder.Eventf(ing, api.EventTypeWarning, "MAPPING",
"Path '%v' already defined in another Ingress rule", nginxPath)
addLoc = false
break
}
}
if addLoc {
server.Locations = append(server.Locations, &nginx.Location{
Path: nginxPath,
Upstream: *ups,
})
}
} }
} }
} }
@ -512,6 +635,7 @@ func (lbc *loadBalancerController) getUpstreamServers(data []interface{}) ([]*ng
aUpstreams := make([]*nginx.Upstream, 0, len(upstreams)) aUpstreams := make([]*nginx.Upstream, 0, len(upstreams))
for _, value := range upstreams { for _, value := range upstreams {
if len(value.Backends) == 0 { if len(value.Backends) == 0 {
glog.Warningf("upstream %v does no have any active endpoints. Using default backend", value.Name)
value.Backends = append(value.Backends, nginx.NewDefaultServer()) value.Backends = append(value.Backends, nginx.NewDefaultServer())
} }
sort.Sort(nginx.UpstreamServerByAddrPort(value.Backends)) sort.Sort(nginx.UpstreamServerByAddrPort(value.Backends))
@ -529,6 +653,8 @@ func (lbc *loadBalancerController) getUpstreamServers(data []interface{}) ([]*ng
return aUpstreams, aServers return aUpstreams, aServers
} }
// createUpstreams creates the NGINX upstreams for each service referenced in
// Ingress rules. The servers inside the upstream are endpoints.
func (lbc *loadBalancerController) createUpstreams(data []interface{}) map[string]*nginx.Upstream { func (lbc *loadBalancerController) createUpstreams(data []interface{}) map[string]*nginx.Upstream {
upstreams := make(map[string]*nginx.Upstream) upstreams := make(map[string]*nginx.Upstream)
@ -541,9 +667,40 @@ func (lbc *loadBalancerController) createUpstreams(data []interface{}) map[strin
} }
for _, path := range rule.HTTP.Paths { for _, path := range rule.HTTP.Paths {
name := fmt.Sprintf("%v-%v-%v", ing.GetNamespace(), path.Backend.ServiceName, path.Backend.ServicePort.IntValue()) name := fmt.Sprintf("%v-%v-%v", ing.GetNamespace(), path.Backend.ServiceName, path.Backend.ServicePort.String())
if _, ok := upstreams[name]; !ok { if _, ok := upstreams[name]; ok {
upstreams[name] = nginx.NewUpstream(name) continue
}
glog.V(3).Infof("creating upstream %v", name)
upstreams[name] = nginx.NewUpstream(name)
svcKey := fmt.Sprintf("%v/%v", ing.GetNamespace(), path.Backend.ServiceName)
svcObj, svcExists, err := lbc.svcLister.Store.GetByKey(svcKey)
if err != nil {
glog.Infof("error getting service %v from the cache: %v", svcKey, err)
continue
}
if !svcExists {
glog.Warningf("service %v does no exists", svcKey)
continue
}
svc := svcObj.(*api.Service)
glog.V(3).Infof("obtaining port information for service %v", svcKey)
bp := path.Backend.ServicePort.String()
for _, servicePort := range svc.Spec.Ports {
// targetPort could be a string, use the name or the port (int)
if strconv.Itoa(servicePort.Port) == bp || servicePort.TargetPort.String() == bp || servicePort.Name == bp {
endps := lbc.getEndpoints(svc, servicePort.TargetPort, api.ProtocolTCP)
if len(endps) == 0 {
glog.Warningf("service %v does no have any active endpoints", svcKey)
}
upstreams[name].Backends = append(upstreams[name].Backends, endps...)
break
}
} }
} }
} }
@ -561,12 +718,23 @@ func (lbc *loadBalancerController) createServers(data []interface{}) map[string]
ing := ingIf.(*extensions.Ingress) ing := ingIf.(*extensions.Ingress)
for _, rule := range ing.Spec.Rules { for _, rule := range ing.Spec.Rules {
if _, ok := servers[rule.Host]; !ok { host := rule.Host
servers[rule.Host] = &nginx.Server{Name: rule.Host, Locations: []*nginx.Location{}} if host == "" {
host = defServerName
} }
if pemFile, ok := pems[rule.Host]; ok { if _, ok := servers[host]; !ok {
server := servers[rule.Host] locs := []*nginx.Location{}
locs = append(locs, &nginx.Location{
Path: rootLocation,
IsDefBackend: true,
Upstream: *lbc.getDefaultUpstream(),
})
servers[host] = &nginx.Server{Name: host, Locations: locs}
}
if pemFile, ok := pems[host]; ok {
server := servers[host]
server.SSL = true server.SSL = true
server.SSLCertificate = pemFile server.SSLCertificate = pemFile
server.SSLCertificateKey = pemFile server.SSLCertificateKey = pemFile
@ -661,8 +829,32 @@ func (lbc *loadBalancerController) getEndpoints(s *api.Service, servicePort ints
targetPort = epPort.Port targetPort = epPort.Port
} }
case intstr.String: case intstr.String:
if epPort.Name == servicePort.StrVal { namedPorts := s.ObjectMeta.Annotations
targetPort = epPort.Port val, ok := namedPortMapping(namedPorts).getPort(servicePort.StrVal)
if ok {
port, err := strconv.Atoi(val)
if err != nil {
glog.Warningf("%v is not valid as a port", val)
continue
}
targetPort = port
} else {
newnp, err := lbc.checkSvcForUpdate(s)
if err != nil {
glog.Warningf("error mapping service ports: %v", err)
continue
}
val, ok := namedPortMapping(newnp).getPort(servicePort.StrVal)
if ok {
port, err := strconv.Atoi(val)
if err != nil {
glog.Warningf("%v is not valid as a port", val)
continue
}
targetPort = port
}
} }
} }
@ -703,6 +895,9 @@ func (lbc *loadBalancerController) Stop() error {
return fmt.Errorf("shutdown already in progress") return fmt.Errorf("shutdown already in progress")
} }
// removeFromIngress removes the IP address of the node where the Ingres
// controller is running before shutdown to avoid incorrect status
// information in Ingress rules
func (lbc *loadBalancerController) removeFromIngress() { func (lbc *loadBalancerController) removeFromIngress() {
ings := lbc.ingLister.Store.List() ings := lbc.ingLister.Store.List()
glog.Infof("updating %v Ingress rule/s", len(ings)) glog.Infof("updating %v Ingress rule/s", len(ings))

View file

@ -0,0 +1,8 @@
All the examples references the services `echoheaders-x` and `echoheaders-y`
```
kubectl run echoheaders --image=gcr.io/google_containers/echoserver:1.3 --replicas=1 --port=8080
kubectl expose deployment echoheaders --port=80 --target-port=8080 --name=echoheaders-x
kubectl expose deployment echoheaders --port=80 --target-port=8080 --name=echoheaders-x
```

View file

@ -16,7 +16,7 @@ spec:
spec: spec:
terminationGracePeriodSeconds: 60 terminationGracePeriodSeconds: 60
containers: containers:
- image: gcr.io/google_containers/nginx-ingress-controller:0.5 - image: gcr.io/google_containers/nginx-ingress-controller:0.6
name: nginx-ingress-lb name: nginx-ingress-lb
imagePullPolicy: Always imagePullPolicy: Always
livenessProbe: livenessProbe:
@ -40,7 +40,7 @@ spec:
- containerPort: 80 - containerPort: 80
hostPort: 80 hostPort: 80
- containerPort: 443 - containerPort: 443
hostPort: 4444 hostPort: 443
args: args:
- /nginx-ingress-controller - /nginx-ingress-controller
- --default-backend-service=default/default-http-backend - --default-backend-service=default/default-http-backend

View file

@ -0,0 +1,9 @@
This example shows how is possible to use a custom template
First create a configmap with a template inside running:
```
kubectl create configmap nginx-template --from-file=nginx.tmpl=../../nginx.tmpl
```
Next create the rc `kubectl create -f custom-template.yaml`

View file

@ -0,0 +1,57 @@
apiVersion: v1
kind: ReplicationController
metadata:
name: nginx-ingress-controller
labels:
k8s-app: nginx-ingress-lb
spec:
replicas: 1
selector:
k8s-app: nginx-ingress-lb
template:
metadata:
labels:
k8s-app: nginx-ingress-lb
name: nginx-ingress-lb
spec:
terminationGracePeriodSeconds: 60
containers:
- image: gcr.io/google_containers/nginx-ingress-controller:0.6
name: nginx-ingress-lb
imagePullPolicy: Always
livenessProbe:
httpGet:
path: /healthz
port: 10249
scheme: HTTP
initialDelaySeconds: 30
timeoutSeconds: 5
# use downward API
env:
- name: POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
- name: POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
ports:
- containerPort: 80
hostPort: 80
- containerPort: 443
hostPort: 443
args:
- /nginx-ingress-controller
- --default-backend-service=default/default-http-backend
volumeMounts:
- mountPath: /etc/nginx/template
name: nginx-template-volume
readOnly: true
volumes:
- name: nginx-template-volume
configMap:
name: nginx-template
items:
- key: nginx.tmpl
path: nginx.tmpl

View file

@ -10,7 +10,7 @@ spec:
spec: spec:
terminationGracePeriodSeconds: 60 terminationGracePeriodSeconds: 60
containers: containers:
- image: gcr.io/google_containers/nginx-ingress-controller:0.5 - image: gcr.io/google_containers/nginx-ingress-controller:0.6
name: nginx-ingress-lb name: nginx-ingress-lb
imagePullPolicy: Always imagePullPolicy: Always
livenessProbe: livenessProbe:
@ -34,7 +34,7 @@ spec:
- containerPort: 80 - containerPort: 80
hostPort: 80 hostPort: 80
- containerPort: 443 - containerPort: 443
hostPort: 4444 hostPort: 443
args: args:
- /nginx-ingress-controller - /nginx-ingress-controller
- --default-backend-service=default/default-http-backend - --default-backend-service=default/default-http-backend

View file

@ -16,7 +16,7 @@ spec:
spec: spec:
terminationGracePeriodSeconds: 60 terminationGracePeriodSeconds: 60
containers: containers:
- image: gcr.io/google_containers/nginx-ingress-controller:0.5 - image: gcr.io/google_containers/nginx-ingress-controller:0.6
name: nginx-ingress-lb name: nginx-ingress-lb
imagePullPolicy: Always imagePullPolicy: Always
livenessProbe: livenessProbe:
@ -40,7 +40,7 @@ spec:
- containerPort: 80 - containerPort: 80
hostPort: 80 hostPort: 80
- containerPort: 443 - containerPort: 443
hostPort: 4444 hostPort: 443
args: args:
- /nginx-ingress-controller - /nginx-ingress-controller
- --default-backend-service=default/default-http-backend - --default-backend-service=default/default-http-backend

View file

@ -21,7 +21,7 @@ spec:
secret: secret:
secretName: dhparam-example secretName: dhparam-example
containers: containers:
- image: gcr.io/google_containers/nginx-ingress-controller:0.5 - image: gcr.io/google_containers/nginx-ingress-controller:0.6
name: nginx-ingress-lb name: nginx-ingress-lb
imagePullPolicy: Always imagePullPolicy: Always
livenessProbe: livenessProbe:
@ -45,7 +45,7 @@ spec:
- containerPort: 80 - containerPort: 80
hostPort: 80 hostPort: 80
- containerPort: 443 - containerPort: 443
hostPort: 4444 hostPort: 443
- containerPort: 8080 - containerPort: 8080
hostPort: 9000 hostPort: 9000
volumeMounts: volumeMounts:

View file

@ -16,7 +16,7 @@ spec:
spec: spec:
terminationGracePeriodSeconds: 60 terminationGracePeriodSeconds: 60
containers: containers:
- image: gcr.io/google_containers/nginx-ingress-controller:0.5 - image: gcr.io/google_containers/nginx-ingress-controller:0.6
name: nginx-ingress-lb name: nginx-ingress-lb
imagePullPolicy: Always imagePullPolicy: Always
livenessProbe: livenessProbe:
@ -40,11 +40,7 @@ spec:
- containerPort: 80 - containerPort: 80
hostPort: 80 hostPort: 80
- containerPort: 443 - containerPort: 443
hostPort: 4444 hostPort: 443
# we expose 8080 to access nginx stats in url /nginx-status
# this is optional
- containerPort: 8080
hostPort: 8081
# service echoheaders as TCP service default/echoheaders:9000 # service echoheaders as TCP service default/echoheaders:9000
# 9000 indicates the port used to expose the service # 9000 indicates the port used to expose the service
- containerPort: 9000 - containerPort: 9000

View file

@ -0,0 +1,90 @@
This is an example to use a TLS Ingress rule to use SSL in NGINX
# TLS certificate termination
This examples uses 2 different certificates to terminate SSL for 2 hostnames.
1. Deploy the controller by creating the rc in the parent dir
2. Create tls secret for foo.bar.com
3. Create rc-ssl.yaml
*Next create a SSL certificate for `foo.bar.com` host:*
```
openssl req -x509 -nodes -days 365 -newkey rsa:2048 -keyout /tmp/tls.key -out /tmp/tls.crt -subj "/CN=foo.bar.com"
```
*Now store the SSL certificate in a secret:*
```
echo "
apiVersion: v1
kind: Secret
metadata:
name: foo-secret
data:
tls.crt: `base64 /tmp/tls.crt`
tls.key: `base64 /tmp/tls.key`
" | kubectl create -f -
```
*Finally create a tls Ingress rule:*
```
echo "
apiVersion: extensions/v1beta1
kind: Ingress
metadata:
name: foo
namespace: default
spec:
tls:
- hosts:
- foo.bar.com
secretName: foo-secret
rules:
- host: foo.bar.com
http:
paths:
- backend:
serviceName: echoheaders-x
servicePort: 80
path: /
" | kubectl create -f -
```
You should be able to reach your nginx service or echoheaders service using a hostname:
```
$ kubectl get ing
NAME RULE BACKEND ADDRESS
foo - 10.4.0.3
foo.bar.com
/ echoheaders-x:80
```
```
$ curl https://10.4.0.3 -H 'Host:foo.bar.com' -k
old-mbp:contrib aledbf$ curl https://10.4.0.3 -H 'Host:foo.bar.com' -k
CLIENT VALUES:
client_address=10.2.48.4
command=GET
real path=/
query=nil
request_version=1.1
request_uri=http://foo.bar.com:8080/
SERVER VALUES:
server_version=nginx: 1.9.7 - lua: 9019
HEADERS RECEIVED:
accept=*/*
connection=close
host=foo.bar.com
user-agent=curl/7.43.0
x-forwarded-for=10.2.48.1
x-forwarded-host=foo.bar.com
x-forwarded-proto=https
x-real-ip=10.2.48.1
BODY:
-no body in request-
```

View file

@ -16,7 +16,7 @@ spec:
spec: spec:
terminationGracePeriodSeconds: 60 terminationGracePeriodSeconds: 60
containers: containers:
- image: gcr.io/google_containers/nginx-ingress-controller:0.5 - image: gcr.io/google_containers/nginx-ingress-controller:0.6
name: nginx-ingress-lb name: nginx-ingress-lb
imagePullPolicy: Always imagePullPolicy: Always
livenessProbe: livenessProbe:
@ -40,9 +40,7 @@ spec:
- containerPort: 80 - containerPort: 80
hostPort: 80 hostPort: 80
- containerPort: 443 - containerPort: 443
hostPort: 4444 hostPort: 443
- containerPort: 8080
hostPort: 9000
args: args:
- /nginx-ingress-controller - /nginx-ingress-controller
- --default-backend-service=default/default-http-backend - --default-backend-service=default/default-http-backend

View file

@ -16,7 +16,7 @@ spec:
spec: spec:
terminationGracePeriodSeconds: 60 terminationGracePeriodSeconds: 60
containers: containers:
- image: gcr.io/google_containers/nginx-ingress-controller:0.5 - image: gcr.io/google_containers/nginx-ingress-controller:0.6
name: nginx-ingress-lb name: nginx-ingress-lb
imagePullPolicy: Always imagePullPolicy: Always
livenessProbe: livenessProbe:
@ -40,11 +40,7 @@ spec:
- containerPort: 80 - containerPort: 80
hostPort: 80 hostPort: 80
- containerPort: 443 - containerPort: 443
hostPort: 4444 hostPort: 443
# we expose 8080 to access nginx stats in url /nginx-status
# this is optional
- containerPort: 8080
hostPort: 8081
- containerPort: 53 - containerPort: 53
hostPort: 53 hostPort: 53
args: args:

View file

@ -43,7 +43,7 @@ const (
var ( var (
// value overwritten during build. This can be used to resolve issues. // value overwritten during build. This can be used to resolve issues.
version = "0.5" version = "0.6"
gitRepo = "https://github.com/kubernetes/contrib" gitRepo = "https://github.com/kubernetes/contrib"
flags = pflag.NewFlagSet("", pflag.ExitOnError) flags = pflag.NewFlagSet("", pflag.ExitOnError)

View file

@ -80,10 +80,6 @@ http {
'' $scheme; '' $scheme;
} }
map $pass_access_scheme $sts {
'https' 'max-age={{ $cfg.htsMaxAge }}{{ if $cfg.htsIncludeSubdomains }}; includeSubDomains{{ end }}; preload';
}
# Map a response error watching the header Content-Type # Map a response error watching the header Content-Type
map $http_accept $httpAccept { map $http_accept $httpAccept {
default html; default html;
@ -145,28 +141,31 @@ http {
{{range $name, $upstream := .upstreams}} {{range $name, $upstream := .upstreams}}
upstream {{$upstream.Name}} { upstream {{$upstream.Name}} {
{{ if $cfg.enableStickySessions }}
sticky hash=sha1 httponly;
{{ else }}
least_conn; least_conn;
{{range $server := $upstream.Backends}}server {{$server.Address}}:{{$server.Port}}; {{ end }}
{{end}} {{ range $server := $upstream.Backends }}server {{ $server.Address }}:{{ $server.Port }};
{{ end }}
} }
{{end}} {{end}}
{{ range $server := .servers }} {{ range $server := .servers }}
server { server {
listen 80; listen 80{{ if $cfg.useProxyProtocol }} proxy_protocol{{ end }};
{{ if $server.SSL }}listen 443 ssl http2; {{ if $server.SSL }}listen 443{{ if $cfg.useProxyProtocol }} proxy_protocol{{ end }} ssl http2;
ssl_certificate {{ $server.SSLCertificate }}; ssl_certificate {{ $server.SSLCertificate }};
ssl_certificate_key {{ $server.SSLCertificateKey }};{{ end }} ssl_certificate_key {{ $server.SSLCertificateKey }};{{ end }}
{{ if $cfg.enableVtsStatus }}
vhost_traffic_status_filter_by_set_key {{ $server.Name }} application::*;
{{ end }}
server_name {{ $server.Name }}; server_name {{ $server.Name }};
{{ if $server.SSL }} {{ if (and $server.SSL $cfg.hsts) }}
if ($scheme = http) { if ($scheme = http) {
return 301 https://$host$request_uri; return 301 https://$host$request_uri;
} }
more_set_headers "Strict-Transport-Security: max-age={{ $cfg.hstsMaxAge }}{{ if $cfg.hstsIncludeSubdomains }}; includeSubDomains{{ end }}; preload";
{{ end }} {{ end }}
{{ range $location := $server.Locations }} {{ range $location := $server.Locations }}
@ -182,6 +181,7 @@ http {
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Host $host; proxy_set_header X-Forwarded-Host $host;
proxy_set_header X-Forwarded-Port $server_port;
proxy_set_header X-Forwarded-Proto $pass_access_scheme; proxy_set_header X-Forwarded-Proto $pass_access_scheme;
proxy_connect_timeout {{ $cfg.proxyConnectTimeout }}s; proxy_connect_timeout {{ $cfg.proxyConnectTimeout }}s;
@ -214,18 +214,13 @@ http {
# default server, including healthcheck # default server, including healthcheck
server { server {
listen 8080 default_server{{ if $cfg.useProxyProtocol }} proxy_protocol{{ end }} reuseport; listen 8080 default_server reuseport;
location /healthz { location /healthz {
access_log off; access_log off;
return 200; return 200;
} }
location /health-check {
access_log off;
proxy_pass http://127.0.0.1:10249/healthz;
}
location /nginx_status { location /nginx_status {
{{ if $cfg.enableVtsStatus }} {{ if $cfg.enableVtsStatus }}
vhost_traffic_status_display; vhost_traffic_status_display;
@ -254,9 +249,7 @@ http {
} }
} }
stream { stream {
# TCP services # TCP services
{{ range $i, $tcpServer := .tcpUpstreams }} {{ range $i, $tcpServer := .tcpUpstreams }}
upstream tcp-{{ $tcpServer.Upstream.Name }} { upstream tcp-{{ $tcpServer.Upstream.Name }} {
@ -286,7 +279,6 @@ stream {
proxy_pass udp-{{ $udpServer.Upstream.Name }}; proxy_pass udp-{{ $udpServer.Upstream.Name }};
} }
{{ end }} {{ end }}
} }
{{/* definition of templates to avoid repetitions */}} {{/* definition of templates to avoid repetitions */}}

View file

@ -49,7 +49,7 @@ const (
// that tell browsers that it should only be communicated with using HTTPS, instead of using HTTP. // that tell browsers that it should only be communicated with using HTTPS, instead of using HTTP.
// https://developer.mozilla.org/en-US/docs/Web/Security/HTTP_strict_transport_security // https://developer.mozilla.org/en-US/docs/Web/Security/HTTP_strict_transport_security
// max-age is the time, in seconds, that the browser should remember that this site is only to be accessed using HTTPS. // max-age is the time, in seconds, that the browser should remember that this site is only to be accessed using HTTPS.
htsMaxAge = "15724800" hstsMaxAge = "15724800"
// If UseProxyProtocol is enabled defIPCIDR defines the default the IP/network address of your external load balancer // If UseProxyProtocol is enabled defIPCIDR defines the default the IP/network address of your external load balancer
defIPCIDR = "0.0.0.0/0" defIPCIDR = "0.0.0.0/0"
@ -89,6 +89,11 @@ type nginxConfiguration struct {
// Sets the maximum allowed size of the client request body // Sets the maximum allowed size of the client request body
BodySize string `structs:"body-size,omitempty"` BodySize string `structs:"body-size,omitempty"`
// EnableStickySessions enabled sticky sessions using cookies
// https://bitbucket.org/nginx-goodies/nginx-sticky-module-ng
// By default this is disabled
EnableStickySessions bool `structs:"enable-sticky-sessions,omitempty"`
// EnableVtsStatus allows the replacement of the default status page with a third party module named // EnableVtsStatus allows the replacement of the default status page with a third party module named
// nginx-module-vts - https://github.com/vozlt/nginx-module-vts // nginx-module-vts - https://github.com/vozlt/nginx-module-vts
// By default this is disabled // By default this is disabled
@ -105,18 +110,19 @@ type nginxConfiguration struct {
// Log levels above are listed in the order of increasing severity // Log levels above are listed in the order of increasing severity
ErrorLogLevel string `structs:"error-log-level,omitempty"` ErrorLogLevel string `structs:"error-log-level,omitempty"`
// Enables or disables the header HTS in servers running SSL // Enables or disables the header HSTS in servers running SSL
UseHTS bool `structs:"use-hts,omitempty"` HSTS bool `structs:"hsts,omitempty"`
// Enables or disables the use of HTS in all the subdomains of the servername // Enables or disables the use of HSTS in all the subdomains of the servername
HTSIncludeSubdomains bool `structs:"hts-include-subdomains,omitempty"` // Default: true
HSTSIncludeSubdomains bool `structs:"hsts-include-subdomains,omitempty"`
// HTTP Strict Transport Security (often abbreviated as HSTS) is a security feature (HTTP header) // HTTP Strict Transport Security (often abbreviated as HSTS) is a security feature (HTTP header)
// that tell browsers that it should only be communicated with using HTTPS, instead of using HTTP. // that tell browsers that it should only be communicated with using HTTPS, instead of using HTTP.
// https://developer.mozilla.org/en-US/docs/Web/Security/HTTP_strict_transport_security // https://developer.mozilla.org/en-US/docs/Web/Security/HTTP_strict_transport_security
// max-age is the time, in seconds, that the browser should remember that this site is only to be // max-age is the time, in seconds, that the browser should remember that this site is only to be
// accessed using HTTPS. // accessed using HTTPS.
HTSMaxAge string `structs:"hts-max-age,omitempty"` HSTSMaxAge string `structs:"hsts-max-age,omitempty"`
// Time during which a keep-alive client connection will stay open on the server side. // Time during which a keep-alive client connection will stay open on the server side.
// The zero value disables keep-alive client connections // The zero value disables keep-alive client connections
@ -239,11 +245,11 @@ type Manager struct {
// in the file default-conf.json // in the file default-conf.json
func newDefaultNginxCfg() nginxConfiguration { func newDefaultNginxCfg() nginxConfiguration {
cfg := nginxConfiguration{ cfg := nginxConfiguration{
BodySize: bodySize, BodySize: bodySize,
ErrorLogLevel: errorLevel, ErrorLogLevel: errorLevel,
UseHTS: true, HSTS: true,
HTSIncludeSubdomains: true, HSTSIncludeSubdomains: true,
HTSMaxAge: htsMaxAge, HSTSMaxAge: hstsMaxAge,
GzipTypes: gzipTypes, GzipTypes: gzipTypes,
KeepAlive: 75, KeepAlive: 75,
MaxWorkerConnections: 16384, MaxWorkerConnections: 16384,

View file

@ -82,8 +82,9 @@ func (c ServerByName) Less(i, j int) bool {
// Location describes an NGINX location // Location describes an NGINX location
type Location struct { type Location struct {
Path string Path string
Upstream Upstream IsDefBackend bool
Upstream Upstream
} }
// LocationByPath sorts location by path // LocationByPath sorts location by path

View file

@ -53,8 +53,10 @@ func (nginx *Manager) CheckSSLCertificate(pemFileName string) ([]string, error)
return []string{}, err return []string{}, err
} }
var block *pem.Block block, _ := pem.Decode(pemCerts)
block, _ = pem.Decode(pemCerts) if block == nil {
return []string{}, fmt.Errorf("No valid PEM formatted block found")
}
cert, err := x509.ParseCertificate(block.Bytes) cert, err := x509.ParseCertificate(block.Bytes)
if err != nil { if err != nil {

View file

@ -29,6 +29,7 @@ import (
var ( var (
camelRegexp = regexp.MustCompile("[0-9A-Za-z]+") camelRegexp = regexp.MustCompile("[0-9A-Za-z]+")
tmplPath = "/etc/nginx/template/nginx.tmpl"
funcMap = template.FuncMap{ funcMap = template.FuncMap{
"empty": func(input interface{}) bool { "empty": func(input interface{}) bool {
@ -43,7 +44,7 @@ var (
) )
func (ngx *Manager) loadTemplate() { func (ngx *Manager) loadTemplate() {
tmpl, _ := template.New("nginx.tmpl").Funcs(funcMap).ParseFiles("./nginx.tmpl") tmpl, _ := template.New("nginx.tmpl").Funcs(funcMap).ParseFiles(tmplPath)
ngx.template = tmpl ngx.template = tmpl
} }

View file

@ -68,7 +68,7 @@ spec:
spec: spec:
terminationGracePeriodSeconds: 60 terminationGracePeriodSeconds: 60
containers: containers:
- image: gcr.io/google_containers/nginx-ingress-controller:0.5 - image: gcr.io/google_containers/nginx-ingress-controller:0.6
name: nginx-ingress-lb name: nginx-ingress-lb
imagePullPolicy: Always imagePullPolicy: Always
livenessProbe: livenessProbe:
@ -93,6 +93,10 @@ spec:
hostPort: 80 hostPort: 80
- containerPort: 443 - containerPort: 443
hostPort: 443 hostPort: 443
# we expose 8080 to access nginx stats in url /nginx-status
# this is optional
- containerPort: 8080
hostPort: 8080
args: args:
- /nginx-ingress-controller - /nginx-ingress-controller
- --default-backend-service=default/default-http-backend - --default-backend-service=default/default-http-backend