Configure nginx using a ConfigMap

This commit is contained in:
Manuel de Brito Fontes 2016-03-19 20:29:29 -03:00
parent 28f9cb0b2b
commit d9934ec4db
17 changed files with 378 additions and 417 deletions

View file

@ -1,7 +1,7 @@
all: push
# 0.0 shouldn't clobber any release builds
TAG = 0.3
TAG = 0.4
PREFIX = gcr.io/google_containers/nginx-third-party
controller: controller.go clean

View file

@ -144,6 +144,15 @@ First we need to remove the running
kubectl delete rc nginx-ingress-3rdpartycfg
```
To configure which services and ports will be exposed
```
kubectl create -f examples/tcp-configmap-example.yaml
```
The file `examples/tcp-configmap-example.yaml` uses a ConfigMap where the key is the external port to use and the value is <namespace/service name>:<service port>.
(Is possible to use a number or the name of the port)
```
kubectl create -f examples/rc-tcp.yaml
```
@ -211,8 +220,6 @@ BODY:
## SSL
Currently Ingress rules does not contains SSL definitions. In order to support SSL in nginx this controller uses secrets mounted inside the directory `/etc/nginx-ssl` to detect if some Ingress rule contains a host for which it is possible the creation of an SSL server.
First create a secret containing the ssl certificate and key. This example creates the certificate and the secret (json):
`SECRET_NAME=secret-echoheaders-1 HOSTS=foo.bar.com ./examples/certs.sh`
@ -305,9 +312,3 @@ Using a volume pointing to `/var/www/html` directory is possible to use a custom
Problems encountered during [1.2.0-alpha7 deployment](https://github.com/kubernetes/kubernetes/blob/master/docs/getting-started-guides/docker.md):
* make setup-files.sh file in hypercube does not provide 10.0.0.1 IP to make-ca-certs, resulting in CA certs that are issued to the external cluster IP address rather then 10.0.0.1 -> this results in nginx-third-party-lb appearing to get stuck at "Utils.go:177 - Waiting for default/default-http-backend" in the docker logs. Kubernetes will eventually kill the container before nginx-third-party-lb times out with a message indicating that the CA certificate issuer is invalid (wrong ip), to verify this add zeros to the end of initialDelaySeconds and timeoutSeconds and reload the RC, and docker will log this error before kubernetes kills the container.
* To fix the above, setup-files.sh must be patched before the cluster is inited (refer to https://github.com/kubernetes/kubernetes/pull/21504)
* if once the nginx-third-party-lb starts, its docker log spams this message continously "utils.go:(line #)] Requeuing default/echomap, err Post http://127.0.0.1:8080/update-ingress: dial tcp 127.0.0.1:8080: getsockopt: connection refused", it means that the container is unable to use DNS to resolve the service address, DNS autoconfigure is broken on 1.2.0-alpha7 (refer again to https://github.com/kubernetes/kubernetes/pull/21504 for fixes)
## TODO:
- multiple SSL certificates
- custom nginx configuration using [ConfigMap](https://github.com/kubernetes/kubernetes/blob/master/docs/proposals/configmap.md)

View file

@ -20,6 +20,8 @@ import (
"fmt"
"net/http"
"sort"
"strconv"
"strings"
"sync"
"time"
@ -30,7 +32,6 @@ import (
"k8s.io/kubernetes/pkg/client/cache"
client "k8s.io/kubernetes/pkg/client/unversioned"
"k8s.io/kubernetes/pkg/controller/framework"
"k8s.io/kubernetes/pkg/labels"
"k8s.io/kubernetes/pkg/runtime"
"k8s.io/kubernetes/pkg/util/intstr"
"k8s.io/kubernetes/pkg/util/wait"
@ -40,21 +41,6 @@ import (
)
const (
// Name of the default config map that contains the configuration for nginx.
// Takes the form namespace/name.
// If the annotation does not exists the controller will create a new annotation with the default
// configuration.
lbConfigName = "lbconfig"
// If you have pure tcp services or https services that need L3 routing, you
// must specify them by name. Note that you are responsible for:
// 1. Making sure there is no collision between the service ports of these services.
// - You can have multiple <mysql svc name>:3306 specifications in this map, and as
// long as the service ports of your mysql service don't clash, you'll get
// loadbalancing for each one.
// 2. Exposing the service ports as node ports on a pod.
// 3. Adding firewall rules so these ports can ingress traffic.
defUpstreamName = "upstream-default-backend"
)
@ -63,16 +49,14 @@ const (
type loadBalancerController struct {
client *client.Client
ingController *framework.Controller
configController *framework.Controller
endpController *framework.Controller
svcController *framework.Controller
ingLister StoreToIngressLister
svcLister cache.StoreToServiceLister
configLister StoreToConfigMapLister
endpLister cache.StoreToEndpointsLister
stopCh chan struct{}
nginx *nginx.NginxManager
lbInfo *lbInfo
defaultSvc string
nxgConfigMap string
tcpConfigMap string
@ -81,18 +65,20 @@ type loadBalancerController struct {
// allowing concurrent stoppers leads to stack traces.
stopLock sync.Mutex
shutdown bool
stopCh chan struct{}
}
// newLoadBalancerController creates a controller for nginx loadbalancer
func newLoadBalancerController(kubeClient *client.Client, resyncPeriod time.Duration, defaultSvc nginx.Service,
namespace, nxgConfigMapName, tcpConfigMapName string, lbInfo *lbInfo) (*loadBalancerController, error) {
func newLoadBalancerController(kubeClient *client.Client, resyncPeriod time.Duration, defaultSvc,
namespace, nxgConfigMapName, tcpConfigMapName string, lbRuntimeInfo *lbInfo) (*loadBalancerController, error) {
lbc := loadBalancerController{
client: kubeClient,
stopCh: make(chan struct{}),
lbInfo: lbInfo,
nginx: nginx.NewManager(kubeClient, defaultSvc),
lbInfo: lbRuntimeInfo,
nginx: nginx.NewManager(kubeClient),
nxgConfigMap: nxgConfigMapName,
tcpConfigMap: tcpConfigMapName,
defaultSvc: defaultSvc,
}
lbc.ingLister.Store, lbc.ingController = framework.NewInformer(
@ -102,24 +88,17 @@ func newLoadBalancerController(kubeClient *client.Client, resyncPeriod time.Dura
},
&extensions.Ingress{}, resyncPeriod, framework.ResourceEventHandlerFuncs{})
lbc.configLister.Store, lbc.configController = framework.NewInformer(
&cache.ListWatch{
ListFunc: configListFunc(kubeClient, lbc.lbInfo.DeployType, namespace, lbInfo.ObjectName),
WatchFunc: configWatchFunc(kubeClient, lbc.lbInfo.DeployType, namespace, lbInfo.ObjectName),
},
&api.ReplicationController{}, resyncPeriod, framework.ResourceEventHandlerFuncs{})
lbc.endpLister.Store, lbc.endpController = framework.NewInformer(
&cache.ListWatch{
ListFunc: endpointsListFunc(kubeClient, namespace),
WatchFunc: endpointsWatchFunc(kubeClient, namespace),
ListFunc: endpointsListFunc(lbc.client, namespace),
WatchFunc: endpointsWatchFunc(lbc.client, namespace),
},
&api.Endpoints{}, resyncPeriod, framework.ResourceEventHandlerFuncs{})
lbc.svcLister.Store, lbc.svcController = framework.NewInformer(
&cache.ListWatch{
ListFunc: serviceListFunc(kubeClient, namespace),
WatchFunc: serviceWatchFunc(kubeClient, namespace),
ListFunc: serviceListFunc(lbc.client, namespace),
WatchFunc: serviceWatchFunc(lbc.client, namespace),
},
&api.Service{}, resyncPeriod, framework.ResourceEventHandlerFuncs{})
@ -150,39 +129,6 @@ func serviceWatchFunc(c *client.Client, ns string) func(options api.ListOptions)
}
}
func configListFunc(c *client.Client, deployType runtime.Object, ns, name string) func(api.ListOptions) (runtime.Object, error) {
return func(api.ListOptions) (runtime.Object, error) {
switch deployType.(type) {
case *api.ReplicationController:
rc, err := c.ReplicationControllers(ns).Get(name)
return &api.ReplicationControllerList{
Items: []api.ReplicationController{*rc},
}, err
case *extensions.DaemonSet:
ds, err := c.Extensions().DaemonSets(ns).Get(name)
return &extensions.DaemonSetList{
Items: []extensions.DaemonSet{*ds},
}, err
default:
return nil, errInvalidKind
}
}
}
func configWatchFunc(c *client.Client, deployType runtime.Object, ns, name string) func(options api.ListOptions) (watch.Interface, error) {
return func(options api.ListOptions) (watch.Interface, error) {
switch deployType.(type) {
case *api.ReplicationController:
options.LabelSelector = labels.SelectorFromSet(labels.Set{"name": name})
return c.ReplicationControllers(ns).Watch(options)
case *extensions.DaemonSet:
options.LabelSelector = labels.SelectorFromSet(labels.Set{"name": name})
return c.Extensions().DaemonSets(ns).Watch(options)
default:
return nil, errInvalidKind
}
}
}
func endpointsListFunc(c *client.Client, ns string) func(api.ListOptions) (runtime.Object, error) {
return func(opts api.ListOptions) (runtime.Object, error) {
return c.Endpoints(ns).List(opts)
@ -195,12 +141,12 @@ func endpointsWatchFunc(c *client.Client, ns string) func(options api.ListOption
}
}
func (lbc *loadBalancerController) getConfigMap(name string) (api.ConfigMap, error) {
return lbc.client.ConfigMaps(lbc.lbInfo.PodNamespace).Get(name)
func (lbc *loadBalancerController) getConfigMap(ns, name string) (*api.ConfigMap, error) {
return lbc.client.ConfigMaps(ns).Get(name)
}
func (lbc *loadBalancerController) getTCPConfigMap(name string) (api.ConfigMap, error) {
return lbc.client.ConfigMaps(lbc.lbInfo.PodNamespace).Get(name)
func (lbc *loadBalancerController) getTCPConfigMap(ns, name string) (*api.ConfigMap, error) {
return lbc.client.ConfigMaps(ns).Get(name)
}
func (lbc *loadBalancerController) registerHandlers() {
@ -226,22 +172,141 @@ func (lbc *loadBalancerController) sync() {
ings := lbc.ingLister.Store.List()
upstreams, servers := lbc.getUpstreamServers(ings)
cfg, err := lbc.getConfigMap(lbc.nxgConfigMap)
var cfg *api.ConfigMap
ngxConfig, err := lbc.nginx.ReadConfig("")
ns, name, _ := parseNsName(lbc.nxgConfigMap)
cfg, err := lbc.getConfigMap(ns, name)
if err != nil {
cfg = &api.ConfigMap{}
}
ngxConfig, err := lbc.nginx.ReadConfig(cfg)
if err != nil {
glog.Warningf("%v", err)
}
tcpServices := lbc.getTCPServices()
lbc.nginx.CheckAndReload(ngxConfig, upstreams, servers, tcpServices)
lbc.nginx.CheckAndReload(ngxConfig, nginx.IngressConfig{
Upstreams: upstreams,
Servers: servers,
TCPUpstreams: tcpServices,
})
}
func (lbc *loadBalancerController) getTCPServices() []*nginx.Location {
if lbc.tcpConfigMap == "" {
// no configmap for TCP services
return []*nginx.Location{}
}
ns, name, err := parseNsName(lbc.tcpConfigMap)
if err != nil {
glog.Warningf("%v", err)
return []*nginx.Location{}
}
tcpMap, err := lbc.getTCPConfigMap(ns, name)
if err != nil {
glog.V(3).Infof("no configured tcp services found: %v", err)
return []*nginx.Location{}
}
var tcpSvcs []*nginx.Location
// k -> port to expose in nginx
// v -> <namespace>/<service name>:<port from service to be used>
for k, v := range tcpMap.Data {
port, err := strconv.Atoi(k)
if err != nil {
glog.Warningf("%v is not valid as a TCP port", k)
continue
}
svcPort := strings.Split(v, ":")
if len(svcPort) != 2 {
glog.Warningf("invalid format (namespace/name:port) '%v'", k)
continue
}
svcNs, svcName, err := parseNsName(svcPort[0])
if err != nil {
glog.Warningf("%v", err)
continue
}
svcObj, svcExists, err := lbc.svcLister.Store.GetByKey(svcPort[0])
if err != nil {
glog.Warningf("error getting service %v: %v", svcPort[0], err)
continue
}
if !svcExists {
glog.Warningf("service %v was not found", svcPort[0])
continue
}
svc := svcObj.(*api.Service)
var endps []nginx.UpstreamServer
targetPort, err := strconv.Atoi(svcPort[1])
if err != nil {
endps = lbc.getEndpoints(svc, intstr.FromString(svcPort[1]))
} else {
// we need to use the TargetPort (where the endpoints are running)
for _, sp := range svc.Spec.Ports {
if sp.Port == targetPort {
endps = lbc.getEndpoints(svc, sp.TargetPort)
break
}
}
}
tcpSvcs = append(tcpSvcs, &nginx.Location{
Path: k,
Upstream: nginx.Upstream{
Name: fmt.Sprintf("%v-%v-%v", svcNs, svcName, port),
Backends: endps,
},
})
}
return tcpSvcs
}
func (lbc *loadBalancerController) getDefaultUpstream() *nginx.Upstream {
upstream := &nginx.Upstream{
Name: defUpstreamName,
}
svcKey := lbc.defaultSvc
svcObj, svcExists, err := lbc.svcLister.Store.GetByKey(svcKey)
if err != nil {
glog.Warningf("unexpected error searching the default backend %v: %v", lbc.defaultSvc, err)
upstream.Backends = append(upstream.Backends, nginx.NewDefaultServer())
return upstream
}
if !svcExists {
glog.Warningf("service %v does no exists", svcKey)
upstream.Backends = append(upstream.Backends, nginx.NewDefaultServer())
return upstream
}
svc := svcObj.(*api.Service)
endps := lbc.getEndpoints(svc, svc.Spec.Ports[0].TargetPort)
if len(endps) == 0 {
glog.Warningf("service %v does no have any active endpoints", svcKey)
upstream.Backends = append(upstream.Backends, nginx.NewDefaultServer())
} else {
upstream.Backends = append(upstream.Backends, endps...)
}
return upstream
}
func (lbc *loadBalancerController) getUpstreamServers(data []interface{}) ([]*nginx.Upstream, []*nginx.Server) {
upstreams := lbc.createUpstreams(data)
servers := lbc.createServers(data)
//TODO: add default backend upstream
upstreams[defUpstreamName] = lbc.getDefaultUpstream()
for _, ingIf := range data {
ing := ingIf.(*extensions.Ingress)
@ -252,13 +317,13 @@ func (lbc *loadBalancerController) getUpstreamServers(data []interface{}) ([]*ng
}
server := servers[rule.Host]
var locations []nginx.Location
locations := []*nginx.Location{}
for _, path := range rule.HTTP.Paths {
upsName := ing.GetNamespace() + "-" + path.Backend.ServiceName
upsName := fmt.Sprintf("%v-%v-%v", ing.GetNamespace(), path.Backend.ServiceName, path.Backend.ServicePort.IntValue())
ups := upstreams[upsName]
svcKey := ing.GetNamespace() + "/" + path.Backend.ServiceName
svcKey := fmt.Sprintf("%v/%v", ing.GetNamespace(), path.Backend.ServiceName)
svcObj, svcExists, err := lbc.svcLister.Store.GetByKey(svcKey)
if err != nil {
glog.Infof("error getting service %v from the cache: %v", svcKey, err)
@ -286,7 +351,7 @@ func (lbc *loadBalancerController) getUpstreamServers(data []interface{}) ([]*ng
for _, ups := range upstreams {
if upsName == ups.Name {
loc := nginx.Location{Path: path.Path}
loc := &nginx.Location{Path: path.Path}
loc.Upstream = *ups
locations = append(locations, loc)
break
@ -294,7 +359,9 @@ func (lbc *loadBalancerController) getUpstreamServers(data []interface{}) ([]*ng
}
}
server.Locations = append(server.Locations, locations...)
for _, loc := range locations {
server.Locations = append(server.Locations, loc)
}
}
}
@ -334,7 +401,7 @@ func (lbc *loadBalancerController) createUpstreams(data []interface{}) map[strin
}
for _, path := range rule.HTTP.Paths {
name := ing.GetNamespace() + "-" + path.Backend.ServiceName
name := fmt.Sprintf("%v-%v-%v", ing.GetNamespace(), path.Backend.ServiceName, path.Backend.ServicePort.IntValue())
if _, ok := upstreams[name]; !ok {
upstreams[name] = nginx.NewUpstream(name)
}
@ -355,7 +422,7 @@ func (lbc *loadBalancerController) createServers(data []interface{}) map[string]
for _, rule := range ing.Spec.Rules {
if _, ok := servers[rule.Host]; !ok {
servers[rule.Host] = &nginx.Server{Name: rule.Host}
servers[rule.Host] = &nginx.Server{Name: rule.Host, Locations: []*nginx.Location{}}
}
if pemFile, ok := pems[rule.Host]; ok {
@ -417,18 +484,18 @@ func (lbc *loadBalancerController) getPemsFromIngress(data []interface{}) map[st
// getEndpoints returns a list of <endpoint ip>:<port> for a given service/target port combination.
func (lbc *loadBalancerController) getEndpoints(s *api.Service, servicePort intstr.IntOrString) []nginx.UpstreamServer {
glog.V(3).Infof("getting endpoints for service %v/%v and port %v", s.Namespace, s.Name, servicePort.String())
ep, err := lbc.endpLister.GetServiceEndpoints(s)
if err != nil {
glog.Warningf("unexpected error obtaining service endpoints: %v", err)
return []nginx.UpstreamServer{}
}
var upsServers []nginx.UpstreamServer
upsServers := []nginx.UpstreamServer{}
for _, ss := range ep.Subsets {
for _, epPort := range ss.Ports {
var targetPort int
switch servicePort.Type {
case intstr.Int:
if epPort.Port == servicePort.IntValue() {
@ -451,6 +518,7 @@ func (lbc *loadBalancerController) getEndpoints(s *api.Service, servicePort ints
}
}
glog.V(3).Infof("endpoints found: %v", upsServers)
return upsServers
}
@ -474,7 +542,6 @@ func (lbc *loadBalancerController) Run() {
go lbc.nginx.Start()
go lbc.registerHandlers()
go lbc.configController.Run(lbc.stopCh)
go lbc.ingController.Run(lbc.stopCh)
go lbc.endpController.Run(lbc.stopCh)
go lbc.svcController.Run(lbc.stopCh)

View file

@ -21,9 +21,6 @@ spec:
# Follow this https://github.com/bprashanth/Ingress/blob/master/examples/sni/nginx/test.sh
# as a guide on how to generate secrets containing SSL certificates.
volumes:
- name: secret-echoheaders-1
secret:
secretName: echoheaders
- name: dhparam-example
secret:
secretName: dhparam-example
@ -60,8 +57,6 @@ spec:
- containerPort: 8080
hostPort: 9000
volumeMounts:
- mountPath: /etc/nginx-ssl/secret-echoheaders-1
name: secret-echoheaders-1
- mountPath: /etc/nginx-ssl/dhparam
name: dhparam-example
# the flags tcp-services is required because Ingress do not support TCP rules
@ -69,7 +64,5 @@ spec:
# containerPort 8080 is mapped to 9000 in the node.
args:
- /nginx-third-party-lb
- --tcp-services=default/example-go:8080
- --tcp-services-configmap=default/tcp-configmap-example
- --default-backend-service=default/default-http-backend
- --custom-error-service=default/default-error-backend

View file

@ -14,14 +14,6 @@ spec:
k8s-app: nginx-ingress-lb
name: nginx-ingress-lb
spec:
# A secret for each nginx host that requires SSL. These secrets need to
# exist before hand, see README.
# Follow this https://github.com/kubernetes/contrib/Ingress/controllers/nginx-third-party/examples/certs.sh
# as a guide on how to generate secrets containing SSL certificates.
volumes:
- name: secret-echoheaders-1
secret:
secretName: secret-echoheaders-1
containers:
- image: gcr.io/google_containers/nginx-third-party:0.4
name: nginx-ingress-lb
@ -54,10 +46,6 @@ spec:
hostPort: 4444
- containerPort: 8080
hostPort: 9000
# the mountpoints for the SSL secrets must be a /etc/nginx-ssl subdirectory
volumeMounts:
- mountPath: /etc/nginx-ssl/secret-echoheaders-1
name: secret-echoheaders-1
# to configure ssl_dhparam http://nginx.org/en/docs/http/ngx_http_ssl_module.html#ssl_dhparam
# use the dhparam.sh file to generate and mount a secret that containing the key dhparam.pem or
# create a configuration with the content of dhparam.pem in the field sslDHParam.

View file

@ -55,3 +55,4 @@ spec:
args:
- /nginx-third-party-lb
- --default-backend-service=default/default-http-backend
- --tcp-services-configmap=default/tcp-configmap-example

View file

@ -0,0 +1,6 @@
apiVersion: v1
kind: ConfigMap
metadata:
name: tcp-configmap-example
data:
9000: "default/example-go:8080"

View file

@ -18,12 +18,15 @@ package main
import (
"flag"
"fmt"
"os"
"time"
"github.com/golang/glog"
"github.com/spf13/pflag"
"k8s.io/contrib/ingress/controllers/nginx-third-party/nginx"
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/client/unversioned"
"k8s.io/kubernetes/pkg/runtime"
@ -41,6 +44,9 @@ var (
namespace/name. The controller uses the first node port of this Service for
the default backend.`)
nxgConfigMap = flags.String("nginx-configmap", "",
`Name of the ConfigMap that containes the custom nginx configuration to use`)
tcpConfigMapName = flags.String("tcp-services-configmap", "",
`Name of the ConfigMap that containes the definition of the TCP services to expose.
The key in the map indicates the external port to be used. The value is the name of the
@ -55,12 +61,20 @@ var (
`Namespace to watch for Ingress. Default is to watch all namespaces`)
healthzPort = flags.Int("healthz-port", healthPort, "port for healthz endpoint.")
buildCfg = flags.Bool("dump-nginx—configuration", false, `Returns a ConfigMap with the default nginx conguration.
This can be used as a guide to create a custom configuration.`)
)
func main() {
flags.AddGoFlagSet(flag.CommandLine)
flags.Parse(os.Args)
if *buildCfg {
fmt.Printf("Example of ConfigMap to customize NGINX configuration:\n%v", nginx.ConfigMapAsString())
os.Exit(0)
}
if *defaultSvc == "" {
glog.Fatalf("Please specify --default-backend")
}
@ -70,7 +84,10 @@ func main() {
glog.Fatalf("failed to create client: %v", err)
}
lbInfo, _ := getLBDetails(kubeClient)
lbInfo, err := getLBDetails(kubeClient)
if err != nil {
glog.Fatalf("unexpected error getting runtime information: %v", err)
}
err = isValidService(kubeClient, *defaultSvc)
if err != nil {

View file

@ -1,14 +1,14 @@
{{ $cfg := .cfg }}
daemon off;
worker_processes {{ $cfg.WorkerProcesses }};
worker_processes {{ $cfg.workerProcesses }};
pid /run/nginx.pid;
worker_rlimit_nofile 131072;
events {
worker_connections {{ $cfg.MaxWorkerConnections }};
worker_connections {{ $cfg.maxWorkerConnections }};
}
http {
@ -24,37 +24,37 @@ http {
tcp_nopush on;
tcp_nodelay on;
keepalive_timeout {{ $cfg.KeepAlive }}s;
keepalive_timeout {{ $cfg.keepAlive }}s;
types_hash_max_size 2048;
server_names_hash_max_size {{ $cfg.ServerNameHashMaxSize }};
server_names_hash_bucket_size {{ $cfg.ServerNameHashBucketSize }};
server_names_hash_max_size {{ $cfg.serverNameHashMaxSize }};
server_names_hash_bucket_size {{ $cfg.serverNameHashBucketSize }};
include /etc/nginx/mime.types;
default_type application/octet-stream;
{{ if $cfg.UseGzip }}
{{ if $cfg.useGzip }}
gzip on;
gzip_comp_level 5;
gzip_http_version 1.1;
gzip_min_length 256;
gzip_types {{ $.cfg.GzipTypes }};
gzip_types {{ $cfg.gzipTypes }};
gzip_proxied any;
gzip_vary on;
{{ end }}
client_max_body_size "{{ $cfg.BodySize }}";
client_max_body_size "{{ $cfg.bodySize }}";
{{ if $cfg.UseProxyProtocol }}
set_real_ip_from {{ $cfg.ProxyRealIpCidr }};
{{ if $cfg.useProxyProtocol }}
set_real_ip_from {{ $cfg.proxyRealIpCidr }};
real_ip_header proxy_protocol;
{{ end }}
log_format upstreaminfo '{{ if $cfg.UseProxyProtocol }}$proxy_protocol_addr{{ else }}$remote_addr{{ end }} - '
log_format upstreaminfo '{{ if $cfg.useProxyProtocol }}$proxy_protocol_addr{{ else }}$remote_addr{{ end }} - '
'[$proxy_add_x_forwarded_for] - $remote_user [$time_local] "$request" $status $body_bytes_sent "$http_referer" "$http_user_agent" '
'$request_length $request_time $upstream_addr $upstream_response_length $upstream_response_time $upstream_status';
access_log /var/log/nginx/access.log upstreaminfo;
error_log /var/log/nginx/error.log {{ $cfg.ErrorLogLevel }};
error_log /var/log/nginx/error.log {{ $cfg.errorLogLevel }};
{{ if not (empty .defResolver) }}# Custom dns resolver.
resolver {{ .defResolver }} valid=30s;
@ -72,7 +72,7 @@ http {
}
map $access_scheme $sts {
'https' 'max-age={{ $cfg.HtsMaxAge }}{{ if $cfg.HtsIncludeSubdomains }}; includeSubDomains{{ end }}; preload';
'https' 'max-age={{ $cfg.htsMaxAge }}{{ if $cfg.htsIncludeSubdomains }}; includeSubDomains{{ end }}; preload';
}
# Map a response error watching the header Content-Type
@ -93,23 +93,23 @@ http {
server_name_in_redirect off;
port_in_redirect off;
ssl_protocols {{ $cfg.SSLProtocols }};
ssl_protocols {{ $cfg.sslProtocols }};
# turn on session caching to drastically improve performance
{{ if $cfg.SSLSessionCache }}
ssl_session_cache builtin:1000 shared:SSL:{{ $cfg.SSLSessionCacheSize }};
ssl_session_timeout {{ $cfg.SSLSessionTimeout }};
{{ if $cfg.sslSessionCache }}
ssl_session_cache builtin:1000 shared:SSL:{{ $cfg.sslSessionCacheSize }};
ssl_session_timeout {{ $cfg.sslSessionTimeout }};
{{ end }}
# allow configuring ssl session tickets
ssl_session_tickets {{ if $cfg.SSLSessionTickets }}on{{ else }}off{{ end }};
ssl_session_tickets {{ if $cfg.sslSessionTickets }}on{{ else }}off{{ end }};
# slightly reduce the time-to-first-byte
ssl_buffer_size {{ $cfg.SSLBufferSize }};
ssl_buffer_size {{ $cfg.sslBufferSize }};
{{ if not (empty $cfg.SSLCiphers) }}
{{ if not (empty $cfg.sslCiphers) }}
# allow configuring custom ssl ciphers
ssl_ciphers '{{ $cfg.SSLCiphers }}';
ssl_ciphers '{{ $cfg.sslCiphers }}';
ssl_prefer_server_ciphers on;
{{ end }}
@ -142,9 +142,9 @@ http {
proxy_set_header X-Forwarded-Port $http_x_forwarded_port;
proxy_set_header X-Forwarded-Proto $scheme;
proxy_connect_timeout {{ .cfg.ProxyConnectTimeout }}s;
proxy_send_timeout {{ .cfg.ProxySendTimeout }}s;
proxy_read_timeout {{ .cfg.ProxyReadTimeout }}s;
proxy_connect_timeout {{ $cfg.proxyConnectTimeout }}s;
proxy_send_timeout {{ $cfg.proxySendTimeout }}s;
proxy_read_timeout {{ $cfg.proxyReadTimeout }}s;
proxy_buffering off;
@ -155,10 +155,10 @@ http {
proxy_set_header Connection $connection_upgrade;
# In case of errors try the next upstream server before returning an error
proxy_next_upstream error timeout http_501 http_502 http_503 http_504;
proxy_next_upstream error timeout http_502 http_503 http_504;
server {
listen 80 default_server{{ if $cfg.UseProxyProtocol }} proxy_protocol{{ end }};
listen 80 default_server{{ if $cfg.useProxyProtocol }} proxy_protocol{{ end }};
location / {
return 200;
@ -196,13 +196,12 @@ http {
{{ end }}
{{ template "CUSTOM_ERRORS" $cfg }}
}
{{ end }}
# default server, including healthcheck
server {
listen 8080 default_server{{ if $cfg.UseProxyProtocol }} proxy_protocol{{ end }} reuseport;
listen 8080 default_server{{ if $cfg.useProxyProtocol }} proxy_protocol{{ end }} reuseport;
#vhost_traffic_status_filter_by_host on;
location /healthz {
@ -229,7 +228,7 @@ http {
# default server for services without endpoints
server {
listen 8081;
listen 8181;
location / {
content_by_lua_block {
@ -241,20 +240,17 @@ http {
# TCP services
stream {
{{ range $name, $upstream := .tcpUpstreams }}
upstream tcp-{{ $upstream.Name }} {
least_conn;
{{ range $server := $upstream.Backends }}server {{ $server.Address }}:{{ $server.Port }};
{{ range $i, $tcpServer := .tcpUpstreams }}
upstream tcp-{{ $tcpServer.Upstream.Name }} {
{{ range $server := $tcpServer.Upstream.Backends }}server {{ $server.Address }}:{{ $server.Port }};
{{ end }}
}
{{ end }}
{{ range $tcpSvc := .tcpServices }}
server {
listen {{ $tcpSvc.ExposedPort }};
proxy_connect_timeout {{ $cfg.ProxyConnectTimeout }}s;
proxy_timeout {{ $cfg.ProxyReadTimeout }}s;
proxy_pass {{ $tcpSvc.Namespace }}-{{ $tcpSvc.ServiceName }}:{{ $tcpSvc.ServicePort }};
listen {{ $tcpServer.Path }};
proxy_connect_timeout {{ $cfg.proxyConnectTimeout }}s;
proxy_timeout {{ $cfg.proxyReadTimeout }}s;
proxy_pass tcp-{{ $tcpServer.Upstream.Name }};
}
{{ end }}
}
@ -291,12 +287,6 @@ stream {
}
}
location @custom_501 {
content_by_lua_block {
openURL(501)
}
}
location @custom_502 {
content_by_lua_block {
openURL(502)

View file

@ -23,10 +23,6 @@ import (
"github.com/golang/glog"
)
const (
nginxEvent = "NGINX"
)
// Start starts a nginx (master process) and waits. If the process ends
// we need to kill the controller process and return the reason.
func (ngx *NginxManager) Start() {
@ -54,11 +50,12 @@ func (ngx *NginxManager) Start() {
// shut down, stop accepting new connections and continue to service current requests
// until all such requests are serviced. After that, the old worker processes exit.
// http://nginx.org/en/docs/beginners_guide.html#control
func (ngx *NginxManager) CheckAndReload(cfg *nginxConfiguration, upstreams []*Upstream, servers []*Server, servicesL4 []*Upstream) {
func (ngx *NginxManager) CheckAndReload(cfg *nginxConfiguration, ingressCfg IngressConfig) {
ngx.reloadLock.Lock()
defer ngx.reloadLock.Unlock()
newCfg, err := ngx.writeCfg(cfg, upstreams, servers, servicesL4)
newCfg, err := ngx.writeCfg(cfg, ingressCfg)
if err != nil {
glog.Errorf("failed to write new nginx configuration. Avoiding reload: %v", err)
return

View file

@ -17,6 +17,7 @@ limitations under the License.
package nginx
import (
"fmt"
"os"
"runtime"
"strconv"
@ -26,10 +27,11 @@ import (
"github.com/golang/glog"
"k8s.io/contrib/ingress/controllers/nginx-third-party/ssl"
"github.com/fatih/structs"
"github.com/ghodss/yaml"
"k8s.io/kubernetes/pkg/api"
client "k8s.io/kubernetes/pkg/client/unversioned"
k8sruntime "k8s.io/kubernetes/pkg/runtime"
)
const (
@ -82,153 +84,138 @@ const (
type nginxConfiguration struct {
// http://nginx.org/en/docs/http/ngx_http_core_module.html#client_max_body_size
// Sets the maximum allowed size of the client request body
BodySize string `json:"bodySize,omitempty" structs:",omitempty"`
BodySize string `json:"bodySize,omitempty" structs:"bodySize,omitempty"`
// http://nginx.org/en/docs/ngx_core_module.html#error_log
// Configures logging level [debug | info | notice | warn | error | crit | alert | emerg]
// Log levels above are listed in the order of increasing severity
ErrorLogLevel string `json:"errorLogLevel,omitempty" structs:",omitempty"`
ErrorLogLevel string `json:"errorLogLevel,omitempty" structs:"errorLogLevel,omitempty"`
// Enables or disables the header HTS in servers running SSL
UseHTS bool `json:"useHTS,omitempty" structs:",omitempty"`
UseHTS bool `json:"useHTS,omitempty" structs:"useHTS,omitempty"`
// Enables or disables the use of HTS in all the subdomains of the servername
HTSIncludeSubdomains bool `json:"htsIncludeSubdomains,omitempty" structs:",omitempty"`
HTSIncludeSubdomains bool `json:"htsIncludeSubdomains,omitempty" structs:"htsIncludeSubdomains,omitempty"`
// HTTP Strict Transport Security (often abbreviated as HSTS) is a security feature (HTTP header)
// that tell browsers that it should only be communicated with using HTTPS, instead of using HTTP.
// https://developer.mozilla.org/en-US/docs/Web/Security/HTTP_strict_transport_security
// max-age is the time, in seconds, that the browser should remember that this site is only to be
// accessed using HTTPS.
HTSMaxAge string `json:"htsMaxAge,omitempty" structs:",omitempty"`
HTSMaxAge string `json:"htsMaxAge,omitempty" structs:"htsMaxAge,omitempty"`
// Time during which a keep-alive client connection will stay open on the server side.
// The zero value disables keep-alive client connections
// http://nginx.org/en/docs/http/ngx_http_core_module.html#keepalive_timeout
KeepAlive int `json:"keepAlive,omitempty" structs:",omitempty"`
KeepAlive int `json:"keepAlive,omitempty" structs:"keepAlive,omitempty"`
// Maximum number of simultaneous connections that can be opened by each worker process
// http://nginx.org/en/docs/ngx_core_module.html#worker_connections
MaxWorkerConnections int `json:"maxWorkerConnections,omitempty" structs:",omitempty"`
MaxWorkerConnections int `json:"maxWorkerConnections,omitempty" structs:"maxWorkerConnections,omitempty"`
// Defines a timeout for establishing a connection with a proxied server.
// It should be noted that this timeout cannot usually exceed 75 seconds.
// http://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_connect_timeout
ProxyConnectTimeout int `json:"proxyConnectTimeout,omitempty" structs:",omitempty"`
ProxyConnectTimeout int `json:"proxyConnectTimeout,omitempty" structs:"proxyConnectTimeout,omitempty"`
// If UseProxyProtocol is enabled ProxyRealIPCIDR defines the default the IP/network address
// of your external load balancer
ProxyRealIPCIDR string `json:"proxyRealIPCIDR,omitempty" structs:",omitempty"`
ProxyRealIPCIDR string `json:"proxyRealIPCIDR,omitempty" structs:"proxyRealIPCIDR,omitempty"`
// Timeout in seconds for reading a response from the proxied server. The timeout is set only between
// two successive read operations, not for the transmission of the whole response
// http://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_read_timeout
ProxyReadTimeout int `json:"proxyReadTimeout,omitempty" structs:",omitempty"`
ProxyReadTimeout int `json:"proxyReadTimeout,omitempty" structs:"proxyReadTimeout,omitempty"`
// Timeout in seconds for transmitting a request to the proxied server. The timeout is set only between
// two successive write operations, not for the transmission of the whole request.
// http://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_send_timeout
ProxySendTimeout int `json:"proxySendTimeout,omitempty" structs:",omitempty"`
ProxySendTimeout int `json:"proxySendTimeout,omitempty" structs:"proxySendTimeout,omitempty"`
// Configures name servers used to resolve names of upstream servers into addresses
// http://nginx.org/en/docs/http/ngx_http_core_module.html#resolver
Resolver string `json:"resolver,omitempty" structs:",omitempty"`
Resolver string `json:"resolver,omitempty" structs:"resolver,omitempty"`
// Maximum size of the server names hash tables used in server names, map directives values,
// MIME types, names of request header strings, etcd.
// http://nginx.org/en/docs/hash.html
// http://nginx.org/en/docs/http/ngx_http_core_module.html#server_names_hash_max_size
ServerNameHashMaxSize int `json:"serverNameHashMaxSize,omitempty" structs:",omitempty"`
ServerNameHashMaxSize int `json:"serverNameHashMaxSize,omitempty" structs:"serverNameHashMaxSize,omitempty"`
// Size of the bucker for the server names hash tables
// http://nginx.org/en/docs/hash.html
// http://nginx.org/en/docs/http/ngx_http_core_module.html#server_names_hash_bucket_size
ServerNameHashBucketSize int `json:"serverNameHashBucketSize,omitempty" structs:",omitempty"`
ServerNameHashBucketSize int `json:"serverNameHashBucketSize,omitempty" structs:"serverNameHashBucketSize,omitempty"`
// http://nginx.org/en/docs/http/ngx_http_ssl_module.html#ssl_buffer_size
// Sets the size of the buffer used for sending data.
// 4k helps NGINX to improve TLS Time To First Byte (TTTFB)
// https://www.igvita.com/2013/12/16/optimizing-nginx-tls-time-to-first-byte/
SSLBufferSize string `json:"sslBufferSize,omitempty" structs:",omitempty"`
SSLBufferSize string `json:"sslBufferSize,omitempty" structs:"sslBufferSize,omitempty"`
// Enabled ciphers list to enabled. The ciphers are specified in the format understood by
// the OpenSSL library
// http://nginx.org/en/docs/http/ngx_http_ssl_module.html#ssl_ciphers
SSLCiphers string `json:"sslCiphers,omitempty" structs:",omitempty"`
SSLCiphers string `json:"sslCiphers,omitempty" structs:"sslCiphers,omitempty"`
// Base64 string that contains Diffie-Hellman key to help with "Perfect Forward Secrecy"
// https://www.openssl.org/docs/manmaster/apps/dhparam.html
// https://wiki.mozilla.org/Security/Server_Side_TLS#DHE_handshake_and_dhparam
// http://nginx.org/en/docs/http/ngx_http_ssl_module.html#ssl_dhparam
SSLDHParam string `json:"sslDHParam,omitempty" structs:",omitempty"`
SSLDHParam string `json:"sslDHParam,omitempty" structs:"sslDHParam,omitempty"`
// SSL enabled protocols to use
// http://nginx.org/en/docs/http/ngx_http_ssl_module.html#ssl_protocols
SSLProtocols string `json:"sslProtocols,omitempty" structs:",omitempty"`
SSLProtocols string `json:"sslProtocols,omitempty" structs:"sslProtocols,omitempty"`
// Enables or disables the use of shared SSL cache among worker processes.
// http://nginx.org/en/docs/http/ngx_http_ssl_module.html#ssl_session_cache
SSLSessionCache bool `json:"sslSessionCache,omitempty" structs:",omitempty"`
SSLSessionCache bool `json:"sslSessionCache,omitempty" structs:"sslSessionCache,omitempty"`
// Size of the SSL shared cache between all worker processes.
// http://nginx.org/en/docs/http/ngx_http_ssl_module.html#ssl_session_cache
SSLSessionCacheSize string `json:"sslSessionCacheSize,omitempty" structs:",omitempty"`
SSLSessionCacheSize string `json:"sslSessionCacheSize,omitempty" structs:"sslSessionCacheSize,omitempty"`
// Enables or disables session resumption through TLS session tickets.
// http://nginx.org/en/docs/http/ngx_http_ssl_module.html#ssl_session_tickets
SSLSessionTickets bool `json:"sslSessionTickets,omitempty" structs:",omitempty"`
SSLSessionTickets bool `json:"sslSessionTickets,omitempty" structs:"sslSessionTickets,omitempty"`
// Time during which a client may reuse the session parameters stored in a cache.
// http://nginx.org/en/docs/http/ngx_http_ssl_module.html#ssl_session_timeout
SSLSessionTimeout string `json:"sslSessionTimeout,omitempty" structs:",omitempty"`
SSLSessionTimeout string `json:"sslSessionTimeout,omitempty" structs:"sslSessionTimeout,omitempty"`
// Enables or disables the use of the PROXY protocol to receive client connection
// (real IP address) information passed through proxy servers and load balancers
// such as HAproxy and Amazon Elastic Load Balancer (ELB).
// https://www.nginx.com/resources/admin-guide/proxy-protocol/
UseProxyProtocol bool `json:"useProxyProtocol,omitempty" structs:",omitempty"`
UseProxyProtocol bool `json:"useProxyProtocol,omitempty" structs:"useProxyProtocol,omitempty"`
// Enables or disables the use of the nginx module that compresses responses using the "gzip" method
// http://nginx.org/en/docs/http/ngx_http_gzip_module.html
UseGzip bool `json:"useGzip,omitempty" structs:",omitempty"`
UseGzip bool `json:"useGzip,omitempty" structs:"useGzip,omitempty"`
// MIME types in addition to "text/html" to compress. The special value “*” matches any MIME type.
// Responses with the “text/html” type are always compressed if UseGzip is enabled
GzipTypes string `json:"gzipTypes,omitempty" structs:",omitempty"`
GzipTypes string `json:"gzipTypes,omitempty" structs:"gzipTypes,omitempty"`
// Defines the number of worker processes. By default auto means number of available CPU cores
// http://nginx.org/en/docs/ngx_core_module.html#worker_processes
WorkerProcesses string `json:"workerProcesses,omitempty" structs:",omitempty"`
}
// Service service definition to use in nginx template
type Service struct {
ServiceName string
ServicePort string
Namespace string
// ExposedPort port used by nginx to listen for the stream upstream
ExposedPort string
WorkerProcesses string `json:"workerProcesses,omitempty" structs:"workerProcesses,omitempty"`
}
// NginxManager ...
type NginxManager struct {
defCfg *nginxConfiguration
defResolver string
// path to the configuration file to be used by nginx
ConfigFile string
defCfg *nginxConfiguration
defResolver string
sslDHParam string
servicesL4 []Service
client *client.Client
// template loaded ready to be used to generate the nginx configuration file
template *template.Template
// obj runtime object to be used in events
obj k8sruntime.Object
reloadLock *sync.Mutex
}
@ -280,7 +267,7 @@ func NewManager(kubeClient *client.Client) *NginxManager {
ngx.createCertsDir(sslDirectory)
ngx.sslDHParam = ssl.SearchDHParamFile(sslDirectory)
ngx.sslDHParam = ngx.SearchDHParamFile(sslDirectory)
ngx.loadTemplate()
@ -292,3 +279,25 @@ func (nginx *NginxManager) createCertsDir(base string) {
glog.Fatalf("Couldn't create directory %v: %v", base, err)
}
}
// ConfigMapAsString returns a ConfigMap with the default NGINX
// configuration to be used a guide to provide a custom configuration
func ConfigMapAsString() string {
cfg := &api.ConfigMap{}
cfg.Name = "custom-name"
cfg.Namespace = "a-valid-namespace"
cfg.Data = make(map[string]string)
data := structs.Map(newDefaultNginxCfg())
for k, v := range data {
cfg.Data[k] = fmt.Sprintf("%v", v)
}
out, err := yaml.Marshal(cfg)
if err != nil {
glog.Warningf("Unexpected error creating default configuration: %v", err)
return ""
}
return string(out)
}

View file

@ -16,10 +16,11 @@ limitations under the License.
package nginx
// IngressNGINXConfig describes an NGINX configuration
type IngressNGINXConfig struct {
Upstreams []Upstream
Servers []Server
// IngressConfig describes an NGINX configuration
type IngressConfig struct {
Upstreams []*Upstream
Servers []*Server
TCPUpstreams []*Location
}
// Upstream describes an NGINX upstream
@ -63,7 +64,7 @@ func (c UpstreamServerByAddrPort) Less(i, j int) bool {
// Server describes an NGINX server
type Server struct {
Name string
Locations []Location
Locations []*Location
SSL bool
SSLCertificate string
SSLCertificateKey string
@ -85,7 +86,7 @@ type Location struct {
}
// LocationByPath sorts location by path
type LocationByPath []Location
type LocationByPath []*Location
func (c LocationByPath) Len() int { return len(c) }
func (c LocationByPath) Swap(i, j int) { c[i], c[j] = c[j], c[i] }
@ -93,7 +94,7 @@ func (c LocationByPath) Less(i, j int) bool {
return c[i].Path < c[j].Path
}
// NewDefaultServer return an UpstreamServer to be use as default server returns 502.
// NewDefaultServer return an UpstreamServer to be use as default server that returns 503.
func NewDefaultServer() UpstreamServer {
return UpstreamServer{Address: "127.0.0.1", Port: "8181"}
}

View file

@ -70,3 +70,24 @@ func (nginx *NginxManager) CheckSSLCertificate(secretName string) ([]string, err
glog.V(2).Infof("DNS %v %v\n", cn, len(cn))
return cn, nil
}
// SearchDHParamFile iterates all the secrets mounted inside the /etc/nginx-ssl directory
// in order to find a file with the name dhparam.pem. If such file exists it will
// returns the path. If not it just returns an empty string
func (nginx *NginxManager) SearchDHParamFile(baseDir string) string {
files, _ := ioutil.ReadDir(baseDir)
for _, file := range files {
if !file.IsDir() {
continue
}
dhPath := fmt.Sprintf("%v/%v/dhparam.pem", baseDir, file.Name())
if _, err := os.Stat(dhPath); err == nil {
glog.Infof("using file '%v' for parameter ssl_dhparam", dhPath)
return dhPath
}
}
glog.Warning("no file dhparam.pem found in secrets")
return ""
}

View file

@ -24,7 +24,6 @@ import (
"github.com/fatih/structs"
"github.com/golang/glog"
"github.com/imdario/mergo"
)
var funcMap = template.FuncMap{
@ -43,15 +42,15 @@ func (ngx *NginxManager) loadTemplate() {
ngx.template = tmpl
}
func (ngx *NginxManager) writeCfg(cfg *nginxConfiguration, upstreams []*Upstream, servers []*Server, tcpUpstreams []*Upstream) (bool, error) {
func (ngx *NginxManager) writeCfg(cfg *nginxConfiguration, ingressCfg IngressConfig) (bool, error) {
fromMap := structs.Map(cfg)
toMap := structs.Map(ngx.defCfg)
curNginxCfg := mergo.MergeWithOverwrite(toMap, fromMap)
curNginxCfg := merge(toMap, fromMap)
conf := make(map[string]interface{})
conf["upstreams"] = upstreams
conf["servers"] = servers
conf["tcpUpstreams"] = tcpUpstreams
conf["upstreams"] = ingressCfg.Upstreams
conf["servers"] = ingressCfg.Servers
conf["tcpUpstreams"] = ingressCfg.TCPUpstreams
conf["defResolver"] = ngx.defResolver
conf["sslDHParam"] = ngx.sslDHParam
conf["cfg"] = curNginxCfg
@ -59,11 +58,7 @@ func (ngx *NginxManager) writeCfg(cfg *nginxConfiguration, upstreams []*Upstream
buffer := new(bytes.Buffer)
err := ngx.template.Execute(buffer, conf)
if err != nil {
return false, err
}
changed, err := ngx.needsReload(buffer)
if err != nil {
glog.Infof("NGINX error: %v", err)
return false, err
}
@ -75,5 +70,10 @@ func (ngx *NginxManager) writeCfg(cfg *nginxConfiguration, upstreams []*Upstream
glog.Infof("NGINX configuration: %v", string(b))
}
changed, err := ngx.needsReload(buffer)
if err != nil {
return false, err
}
return changed, nil
}

View file

@ -24,6 +24,7 @@ import (
"net/http"
"os"
"os/exec"
"reflect"
"strings"
"github.com/golang/glog"
@ -76,8 +77,7 @@ func getDnsServers() []string {
return nameservers
}
// ReadConfig obtains the configuration defined by the user or returns the default if it does not
// exists or if is not a well formed json object
// ReadConfig obtains the configuration defined by the user merged with the defaults.
func (ngx *NginxManager) ReadConfig(config *api.ConfigMap) (*nginxConfiguration, error) {
if len(config.Data) == 0 {
return newDefaultNginxCfg(), nil
@ -157,3 +157,32 @@ func diff(b1, b2 []byte) (data []byte, err error) {
}
return
}
func merge(dst, src map[string]interface{}) map[string]interface{} {
for key, srcVal := range src {
if dstVal, ok := dst[key]; ok {
srcMap, srcMapOk := toMap(srcVal)
dstMap, dstMapOk := toMap(dstVal)
if srcMapOk && dstMapOk {
srcVal = merge(dstMap, srcMap)
}
}
dst[key] = srcVal
}
return dst
}
func toMap(iface interface{}) (map[string]interface{}, bool) {
value := reflect.ValueOf(iface)
if value.Kind() == reflect.Map {
m := map[string]interface{}{}
for _, k := range value.MapKeys() {
m[k.String()] = value.MapIndex(k).Interface()
}
return m, true
}
return map[string]interface{}{}, false
}

View file

@ -1,46 +0,0 @@
/*
Copyright 2015 The Kubernetes Authors All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package ssl
import (
"fmt"
"io/ioutil"
"os"
"github.com/golang/glog"
)
// SearchDHParamFile iterates all the secrets mounted inside the /etc/nginx-ssl directory
// in order to find a file with the name dhparam.pem. If such file exists it will
// returns the path. If not it just returns an empty string
func SearchDHParamFile(baseDir string) string {
files, _ := ioutil.ReadDir(baseDir)
for _, file := range files {
if !file.IsDir() {
continue
}
dhPath := fmt.Sprintf("%v/%v/dhparam.pem", baseDir, file.Name())
if _, err := os.Stat(dhPath); err == nil {
glog.Infof("using file '%v' for parameter ssl_dhparam", dhPath)
return dhPath
}
}
glog.Warning("no file dhparam.pem found in secrets")
return ""
}

View file

@ -17,56 +17,27 @@ limitations under the License.
package main
import (
"encoding/json"
"fmt"
"os"
"strconv"
"strings"
"k8s.io/contrib/ingress/controllers/nginx-third-party/nginx"
"github.com/golang/glog"
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/apis/extensions"
"k8s.io/kubernetes/pkg/client/cache"
"k8s.io/kubernetes/pkg/client/unversioned"
)
const (
httpPort = "80"
httpsPort = "443"
)
var (
errMissingPodInfo = fmt.Errorf("Unable to get POD information")
errInvalidKind = fmt.Errorf("Please check the field Kind, only ReplicationController or DaemonSet are allowed")
)
// StoreToIngressLister makes a Store that lists Ingress.
// TODO: use cache/listers post 1.1.
type StoreToIngressLister struct {
cache.Store
}
// List lists all Ingress' in the store.
func (s *StoreToIngressLister) List() (ing extensions.IngressList, err error) {
for _, m := range s.Store.List() {
ing.Items = append(ing.Items, *(m.(*extensions.Ingress)))
}
return ing, nil
}
// StoreToConfigMapLister makes a Store that lists existing ConfigMap.
type StoreToConfigMapLister struct {
cache.Store
}
// getLBDetails returns runtime information about the pod (name, IP) and replication
// controller or daemonset (namespace and name).
// This is required to watch for changes in annotations or configuration (ConfigMap)
func getLBDetails(kubeClient *unversioned.Client) (rc *lbInfo, err error) {
func getLBDetails(kubeClient *unversioned.Client) (*lbInfo, error) {
podIP := os.Getenv("POD_IP")
podName := os.Getenv("POD_NAME")
podNs := os.Getenv("POD_NAMESPACE")
@ -76,30 +47,11 @@ func getLBDetails(kubeClient *unversioned.Client) (rc *lbInfo, err error) {
return nil, errMissingPodInfo
}
annotations := pod.Annotations["kubernetes.io/created-by"]
var sref api.SerializedReference
err = json.Unmarshal([]byte(annotations), &sref)
if err != nil {
return nil, err
}
rc = &lbInfo{
ObjectName: sref.Reference.Name,
return &lbInfo{
PodIP: podIP,
Podname: podName,
PodNamespace: podNs,
}
switch sref.Reference.Kind {
case "ReplicationController":
rc.DeployType = &api.ReplicationController{}
return rc, nil
case "DaemonSet":
rc.DeployType = &extensions.DaemonSet{}
return rc, nil
default:
return nil, errInvalidKind
}
}, nil
}
func isValidService(kubeClient *unversioned.Client, name string) error {
@ -112,84 +64,10 @@ func isValidService(kubeClient *unversioned.Client, name string) error {
return fmt.Errorf("Invalid name format (namespace/name) in service '%v'", name)
}
_, err = kubeClient.Services(parts[0]).Get(parts[1])
_, err := kubeClient.Services(parts[0]).Get(parts[1])
return err
}
// func getService(kubeClient *unversioned.Client, name string) (nginx.Service, error) {
// if name == "" {
// return nginx.Service{}, fmt.Errorf("Empty string is not a valid service name")
// }
// parts := strings.Split(name, "/")
// if len(parts) != 2 {
// glog.Fatalf("Please check the service format (namespace/name) in service %v", name)
// }
// defaultPort, err := getServicePorts(kubeClient, parts[0], parts[1])
// if err != nil {
// return nginx.Service{}, fmt.Errorf("Error obtaining service %v: %v", name, err)
// }
// return nginx.Service{
// ServiceName: parts[1],
// ServicePort: defaultPort[0], //TODO: which port?
// Namespace: parts[0],
// }, nil
// }
// // getServicePorts returns the ports defined in a service spec
// func getServicePorts(kubeClient *unversioned.Client, ns, name string) (ports []string, err error) {
// var svc *api.Service
// glog.Infof("Checking service %v/%v", ns, name)
// svc, err = kubeClient.Services(ns).Get(name)
// if err != nil {
// return
// }
// for _, p := range svc.Spec.Ports {
// if p.Port != 0 {
// ports = append(ports, strconv.Itoa(p.Port))
// break
// }
// }
// glog.Infof("Ports for %v/%v : %v", ns, name, ports)
// return
// }
// func getTCPServices(kubeClient *unversioned.Client, tcpServices string) []nginx.Service {
// svcs := []nginx.Service{}
// for _, svc := range strings.Split(tcpServices, ",") {
// if svc == "" {
// continue
// }
// namePort := strings.Split(svc, ":")
// if len(namePort) == 2 {
// tcpSvc, err := getService(kubeClient, namePort[0])
// if err != nil {
// glog.Errorf("%s", err)
// continue
// }
// // the exposed TCP service cannot use 80 or 443 as ports
// if namePort[1] == httpPort || namePort[1] == httpsPort {
// glog.Errorf("The TCP service %v cannot use ports 80 or 443 (it creates a conflict with nginx)", svc)
// continue
// }
// tcpSvc.ExposedPort = namePort[1]
// svcs = append(svcs, tcpSvc)
// } else {
// glog.Errorf("TCP services should take the form namespace/name:port not %v from %v", namePort, svc)
// }
// }
// return svcs
// }
func isHostValid(host string, cns []string) bool {
for _, cn := range cns {
if matchHostnames(cn, host) {
@ -226,3 +104,12 @@ func matchHostnames(pattern, host string) bool {
return true
}
func parseNsName(input string) (string, string, error) {
nsName := strings.Split(input, "/")
if len(nsName) != 2 {
return "", "", fmt.Errorf("invalid format (namespace/name) found in '%v'", input)
}
return nsName[0], nsName[1], nil
}