ingress-nginx-helm/internal/ingress/controller/nginx.go

809 lines
21 KiB
Go
Raw Normal View History

/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
2017-10-06 20:33:32 +00:00
package controller
import (
"bytes"
"encoding/json"
2016-11-29 01:39:17 +00:00
"errors"
"fmt"
"io/ioutil"
2016-11-29 01:39:17 +00:00
"net"
"net/http"
"os"
"os/exec"
"path/filepath"
"strconv"
"strings"
2017-11-05 01:18:28 +00:00
"sync"
2016-11-29 01:39:17 +00:00
"syscall"
"time"
"github.com/golang/glog"
proxyproto "github.com/armon/go-proxyproto"
"github.com/eapache/channels"
2017-09-17 18:42:31 +00:00
apiv1 "k8s.io/api/core/v1"
extensions "k8s.io/api/extensions/v1beta1"
2017-11-05 01:18:28 +00:00
"k8s.io/client-go/kubernetes/scheme"
v1core "k8s.io/client-go/kubernetes/typed/core/v1"
"k8s.io/client-go/tools/record"
"k8s.io/client-go/util/flowcontrol"
2017-11-06 22:34:30 +00:00
"k8s.io/kubernetes/pkg/util/filesystem"
2016-11-16 18:24:26 +00:00
"k8s.io/ingress-nginx/internal/file"
2017-11-07 22:02:12 +00:00
"k8s.io/ingress-nginx/internal/ingress"
"k8s.io/ingress-nginx/internal/ingress/annotations"
"k8s.io/ingress-nginx/internal/ingress/annotations/class"
ngx_config "k8s.io/ingress-nginx/internal/ingress/controller/config"
"k8s.io/ingress-nginx/internal/ingress/controller/process"
"k8s.io/ingress-nginx/internal/ingress/controller/store"
2017-11-07 22:02:12 +00:00
ngx_template "k8s.io/ingress-nginx/internal/ingress/controller/template"
"k8s.io/ingress-nginx/internal/ingress/status"
ing_net "k8s.io/ingress-nginx/internal/net"
"k8s.io/ingress-nginx/internal/net/dns"
"k8s.io/ingress-nginx/internal/net/ssl"
"k8s.io/ingress-nginx/internal/task"
"k8s.io/ingress-nginx/internal/watch"
2016-11-29 01:39:17 +00:00
)
type statusModule string
2016-11-29 01:39:17 +00:00
const (
ngxHealthPath = "/healthz"
defaultStatusModule statusModule = "default"
vtsStatusModule statusModule = "vts"
)
var (
tmplPath = "/etc/nginx/template/nginx.tmpl"
cfgPath = "/etc/nginx/nginx.conf"
nginxBinary = "/usr/sbin/nginx"
)
2017-10-06 20:33:32 +00:00
// NewNGINXController creates a new NGINX Ingress controller.
func NewNGINXController(config *Configuration, fs file.Filesystem) *NGINXController {
ngx := os.Getenv("NGINX_BINARY")
if ngx == "" {
ngx = nginxBinary
}
2017-04-11 14:47:49 +00:00
2017-11-05 01:18:28 +00:00
eventBroadcaster := record.NewBroadcaster()
eventBroadcaster.StartLogging(glog.Infof)
eventBroadcaster.StartRecordingToSink(&v1core.EventSinkImpl{
Interface: config.Client.CoreV1().Events(config.Namespace),
})
2017-04-11 14:47:49 +00:00
h, err := dns.GetSystemNameServers()
if err != nil {
2018-06-13 18:15:45 +00:00
glog.Warningf("Error reading system nameservers: %v", err)
2017-04-11 14:47:49 +00:00
}
2017-03-12 15:27:05 +00:00
n := &NGINXController{
binary: ngx,
2017-11-05 01:18:28 +00:00
isIPV6Enabled: ing_net.IsIPv6Enabled(),
2017-08-29 19:40:03 +00:00
resolver: h,
2017-11-05 01:18:28 +00:00
cfg: config,
syncRateLimiter: flowcontrol.NewTokenBucketRateLimiter(config.SyncRateLimit, 1),
2017-11-05 01:18:28 +00:00
recorder: eventBroadcaster.NewRecorder(scheme.Scheme, apiv1.EventSource{
Component: "nginx-ingress-controller",
}),
stopCh: make(chan struct{}),
updateCh: channels.NewRingChannel(1024),
2017-11-05 01:18:28 +00:00
stopLock: &sync.Mutex{},
2017-11-06 22:34:30 +00:00
fileSystem: fs,
2017-11-27 22:22:59 +00:00
2018-06-13 18:15:45 +00:00
runningConfig: new(ingress.Configuration),
2018-01-23 20:10:02 +00:00
Proxy: &TCPProxy{},
}
n.store = store.New(
config.EnableSSLChainCompletion,
config.Namespace,
config.ConfigMapName,
config.TCPConfigMapName,
config.UDPConfigMapName,
config.DefaultSSLCertificate,
config.ResyncPeriod,
config.Client,
fs,
n.updateCh)
2017-11-07 16:36:51 +00:00
n.stats = newStatsCollector(config.Namespace, class.IngressClass, n.binary, n.cfg.ListenPorts.Status)
2017-11-05 01:18:28 +00:00
n.syncQueue = task.NewTaskQueue(n.syncIngress)
n.annotations = annotations.NewAnnotationExtractor(n.store)
2017-11-05 01:18:28 +00:00
if config.UpdateStatus {
n.syncStatus = status.NewStatusSyncer(status.Config{
Client: config.Client,
2017-11-06 01:22:49 +00:00
PublishService: config.PublishService,
PublishStatusAddress: config.PublishStatusAddress,
IngressLister: n.store,
2017-11-05 01:18:28 +00:00
ElectionID: config.ElectionID,
IngressClass: class.IngressClass,
DefaultIngressClass: class.DefaultClass,
2017-11-05 01:18:28 +00:00
UpdateStatusOnShutdown: config.UpdateStatusOnShutdown,
2017-11-06 01:22:49 +00:00
UseNodeInternalIP: config.UseNodeInternalIP,
2017-11-05 01:18:28 +00:00
})
} else {
2018-06-13 18:15:45 +00:00
glog.Warning("Update of Ingress status is disabled (flag --update-status)")
2017-11-05 01:18:28 +00:00
}
2018-04-25 21:53:49 +00:00
onTemplateChange := func() {
template, err := ngx_template.NewTemplate(tmplPath, fs)
if err != nil {
// this error is different from the rest because it must be clear why nginx is not working
glog.Errorf(`
-------------------------------------------------------------------------------
2018-06-13 18:15:45 +00:00
Error loading new template: %v
-------------------------------------------------------------------------------
`, err)
return
}
n.t = template
2018-06-13 18:15:45 +00:00
glog.Info("New NGINX configuration template loaded.")
2017-11-05 01:18:28 +00:00
n.SetForceReload(true)
}
ngxTpl, err := ngx_template.NewTemplate(tmplPath, fs)
if err != nil {
2018-06-13 18:15:45 +00:00
glog.Fatalf("Invalid NGINX configuration template: %v", err)
}
n.t = ngxTpl
2016-11-29 01:39:17 +00:00
// TODO: refactor
if _, ok := fs.(filesystem.DefaultFs); !ok {
watch.NewDummyFileWatcher(tmplPath, onTemplateChange)
} else {
_, err = watch.NewFileWatcher(tmplPath, onTemplateChange)
if err != nil {
2018-06-13 18:15:45 +00:00
glog.Fatalf("Error creating file watcher for %v: %v", tmplPath, err)
}
filesToWatch := []string{}
err := filepath.Walk("/etc/nginx/geoip/", func(path string, info os.FileInfo, err error) error {
if err != nil {
return err
}
if info.IsDir() {
return nil
}
filesToWatch = append(filesToWatch, path)
return nil
})
if err != nil {
2018-06-13 18:15:45 +00:00
glog.Fatalf("Error creating file watchers: %v", err)
}
for _, f := range filesToWatch {
_, err = watch.NewFileWatcher(f, func() {
2018-06-13 18:15:45 +00:00
glog.Info("File %v changed. Reloading NGINX", f)
n.SetForceReload(true)
})
if err != nil {
2018-06-13 18:15:45 +00:00
glog.Fatalf("Error creating file watcher for %v: %v", f, err)
}
}
}
2017-08-28 16:06:58 +00:00
return n
}
2018-06-13 18:15:45 +00:00
// NGINXController describes a NGINX Ingress controller.
type NGINXController struct {
2017-11-05 01:18:28 +00:00
cfg *Configuration
2017-11-07 16:36:51 +00:00
annotations annotations.Extractor
2017-11-05 01:18:28 +00:00
recorder record.EventRecorder
syncQueue *task.Queue
syncStatus status.Sync
syncRateLimiter flowcontrol.RateLimiter
2018-06-13 18:15:45 +00:00
// stopLock is used to enforce that only a single call to Stop send at
// a given time. We allow stopping through an HTTP endpoint and
2017-11-05 01:18:28 +00:00
// allowing concurrent stoppers leads to stack traces.
stopLock *sync.Mutex
stopCh chan struct{}
updateCh *channels.RingChannel
2017-11-05 01:18:28 +00:00
2018-06-13 18:15:45 +00:00
// ngxErrCh is used to detect errors with the NGINX processes
2017-11-05 01:18:28 +00:00
ngxErrCh chan error
// runningConfig contains the running configuration in the Backend
runningConfig *ingress.Configuration
forceReload int32
t *ngx_template.Template
binary string
resolver []net.IP
2017-03-12 15:27:05 +00:00
stats *statsCollector
statusModule statusModule
isIPV6Enabled bool
2017-04-11 14:47:49 +00:00
2017-08-28 16:06:58 +00:00
isShuttingDown bool
2017-11-05 01:18:28 +00:00
Proxy *TCPProxy
2017-08-26 03:46:17 +00:00
store store.Storer
2017-11-06 22:34:30 +00:00
fileSystem filesystem.Filesystem
}
2018-06-13 18:15:45 +00:00
// Start starts a new NGINX master process running in the foreground.
2017-03-12 15:27:05 +00:00
func (n *NGINXController) Start() {
2018-06-13 18:15:45 +00:00
glog.Infof("Starting NGINX Ingress controller")
2017-11-05 01:18:28 +00:00
n.store.Run(n.stopCh)
2017-11-05 01:18:28 +00:00
if n.syncStatus != nil {
go n.syncStatus.Run()
2017-11-05 01:18:28 +00:00
}
cmd := exec.Command(n.binary, "-c", cfgPath)
2017-08-28 16:06:58 +00:00
2018-06-13 18:15:45 +00:00
// put NGINX in another process group to prevent it
2017-08-28 16:06:58 +00:00
// to receive signals meant for the controller
cmd.SysProcAttr = &syscall.SysProcAttr{
Setpgid: true,
Pgid: 0,
}
if n.cfg.EnableSSLPassthrough {
n.setupSSLProxy()
}
2018-06-13 18:15:45 +00:00
glog.Info("Starting NGINX process")
2017-11-05 01:18:28 +00:00
n.start(cmd)
2016-11-29 01:39:17 +00:00
2017-11-27 22:22:59 +00:00
go n.syncQueue.Run(time.Second, n.stopCh)
2017-11-05 01:18:28 +00:00
// force initial sync
n.syncQueue.Enqueue(&extensions.Ingress{})
2017-08-28 16:06:58 +00:00
2017-11-05 01:18:28 +00:00
for {
select {
2018-03-03 12:23:06 +00:00
case err := <-n.ngxErrCh:
2017-11-05 01:18:28 +00:00
if n.isShuttingDown {
2016-11-29 01:39:17 +00:00
break
}
2017-11-05 01:18:28 +00:00
// if the nginx master process dies the workers continue to process requests,
// passing checks but in case of updates in ingress no updates will be
// reflected in the nginx configuration which can lead to confusion and report
// issues because of this behavior.
// To avoid this issue we restart nginx in case of errors.
if process.IsRespawnIfRequired(err) {
process.WaitUntilPortIsAvailable(n.cfg.ListenPorts.HTTP)
// release command resources
cmd.Process.Release()
// start a new nginx master process if the controller is not being stopped
cmd = exec.Command(n.binary, "-c", cfgPath)
cmd.SysProcAttr = &syscall.SysProcAttr{
Setpgid: true,
Pgid: 0,
}
2017-11-05 01:18:28 +00:00
n.start(cmd)
}
case event := <-n.updateCh.Out():
if n.isShuttingDown {
break
}
if evt, ok := event.(store.Event); ok {
glog.V(3).Infof("Event %v received - object %v", evt.Type, evt.Obj)
if evt.Type == store.ConfigurationEvent {
n.SetForceReload(true)
}
n.syncQueue.Enqueue(evt.Obj)
} else {
2018-06-13 18:15:45 +00:00
glog.Warningf("Unexpected event type received %T", event)
2018-01-18 23:04:40 +00:00
}
2017-11-05 01:18:28 +00:00
case <-n.stopCh:
break
2016-11-29 01:39:17 +00:00
}
}
}
2017-08-28 16:06:58 +00:00
// Stop gracefully stops the NGINX master process.
func (n *NGINXController) Stop() error {
n.isShuttingDown = true
2017-11-05 01:18:28 +00:00
n.stopLock.Lock()
defer n.stopLock.Unlock()
if n.syncQueue.IsShuttingDown() {
return fmt.Errorf("shutdown already in progress")
}
2018-06-13 18:15:45 +00:00
glog.Infof("Shutting down controller queues")
2017-11-05 01:18:28 +00:00
close(n.stopCh)
go n.syncQueue.Shutdown()
if n.syncStatus != nil {
n.syncStatus.Shutdown()
}
2017-08-28 16:06:58 +00:00
2018-06-13 18:15:45 +00:00
// send stop signal to NGINX
glog.Info("Stopping NGINX process")
2017-08-28 16:06:58 +00:00
cmd := exec.Command(n.binary, "-c", cfgPath, "-s", "quit")
cmd.Stdout = os.Stdout
cmd.Stderr = os.Stderr
err := cmd.Run()
if err != nil {
return err
}
2018-06-13 18:15:45 +00:00
// wait for the NGINX process to terminate
2017-11-05 01:18:28 +00:00
timer := time.NewTicker(time.Second * 1)
2017-11-06 22:38:16 +00:00
for range timer.C {
2017-11-05 01:18:28 +00:00
if !process.IsNginxRunning() {
glog.Info("NGINX process has stopped")
timer.Stop()
break
}
}
2017-08-28 16:06:58 +00:00
return nil
}
2017-11-05 01:18:28 +00:00
func (n *NGINXController) start(cmd *exec.Cmd) {
cmd.Stdout = os.Stdout
cmd.Stderr = os.Stderr
if err := cmd.Start(); err != nil {
2018-06-13 18:15:45 +00:00
glog.Fatalf("NGINX error: %v", err)
2017-11-05 01:18:28 +00:00
n.ngxErrCh <- err
2016-11-29 01:39:17 +00:00
return
}
2016-11-29 01:39:17 +00:00
go func() {
2017-11-05 01:18:28 +00:00
n.ngxErrCh <- cmd.Wait()
2016-11-29 01:39:17 +00:00
}()
}
// DefaultEndpoint returns the default endpoint to be use as default server that returns 404.
func (n NGINXController) DefaultEndpoint() ingress.Endpoint {
return ingress.Endpoint{
Address: "127.0.0.1",
2017-11-05 01:18:28 +00:00
Port: fmt.Sprintf("%v", n.cfg.ListenPorts.Default),
2017-09-17 18:42:31 +00:00
Target: &apiv1.ObjectReference{},
}
}
// testTemplate checks if the NGINX configuration inside the byte array is valid
// running the command "nginx -t" using a temporal file.
func (n NGINXController) testTemplate(cfg []byte) error {
if len(cfg) == 0 {
2018-06-13 18:15:45 +00:00
return fmt.Errorf("Invalid NGINX configuration (empty)")
}
tmpfile, err := ioutil.TempFile("", "nginx-cfg")
if err != nil {
return err
}
defer tmpfile.Close()
2018-06-12 12:40:40 +00:00
err = ioutil.WriteFile(tmpfile.Name(), cfg, file.ReadWriteByUser)
2017-03-03 07:17:32 +00:00
if err != nil {
return err
}
2016-11-24 00:14:14 +00:00
out, err := exec.Command(n.binary, "-t", "-c", tmpfile.Name()).CombinedOutput()
if err != nil {
// this error is different from the rest because it must be clear why nginx is not working
2016-11-16 18:24:26 +00:00
oe := fmt.Sprintf(`
-------------------------------------------------------------------------------
Error: %v
%v
-------------------------------------------------------------------------------
`, err, string(out))
2016-11-16 18:24:26 +00:00
return errors.New(oe)
}
os.Remove(tmpfile.Name())
return nil
}
2018-06-13 18:15:45 +00:00
// OnUpdate is called by the synchronization loop whenever configuration
// changes were detected. The received backend Configuration is merged with the
// configuration ConfigMap before generating the final configuration file.
// Returns nil in case the backend was successfully reloaded.
2017-06-11 19:56:40 +00:00
func (n *NGINXController) OnUpdate(ingressCfg ingress.Configuration) error {
cfg := n.store.GetBackendConfiguration()
2017-04-11 14:47:49 +00:00
cfg.Resolver = n.resolver
2018-01-23 20:10:02 +00:00
if n.cfg.EnableSSLPassthrough {
servers := []*TCPServer{}
for _, pb := range ingressCfg.PassthroughBackends {
svc := pb.Service
if svc == nil {
2018-06-13 18:15:45 +00:00
glog.Warningf("Missing Service for SSL Passthrough backend %q", pb.Backend)
2018-01-23 20:10:02 +00:00
continue
}
2018-01-23 20:10:02 +00:00
port, err := strconv.Atoi(pb.Port.String())
if err != nil {
for _, sp := range svc.Spec.Ports {
if sp.Name == pb.Port.String() {
port = int(sp.Port)
break
}
}
} else {
for _, sp := range svc.Spec.Ports {
if sp.Port == int32(port) {
port = int(sp.Port)
break
}
}
}
2018-01-23 20:10:02 +00:00
2018-06-13 18:15:45 +00:00
// TODO: Allow PassthroughBackends to specify they support proxy-protocol
2018-01-23 20:10:02 +00:00
servers = append(servers, &TCPServer{
Hostname: pb.Hostname,
IP: svc.Spec.ClusterIP,
Port: port,
ProxyProtocol: false,
})
}
2018-01-23 20:10:02 +00:00
n.Proxy.ServerList = servers
}
// we need to check if the status module configuration changed
2017-03-12 15:27:05 +00:00
if cfg.EnableVtsStatus {
n.setupMonitor(vtsStatusModule)
} else {
n.setupMonitor(defaultStatusModule)
}
2018-06-13 18:15:45 +00:00
// NGINX cannot resize the hash tables used to store server names. For
// this reason we check if the current size is correct for the host
// names defined in the Ingress rules and adjust the value if
// necessary.
// https://trac.nginx.org/nginx/ticket/352
// https://trac.nginx.org/nginx/ticket/631
2017-08-16 07:02:30 +00:00
var longestName int
var serverNameBytes int
redirectServers := make(map[string]string)
2017-08-16 07:02:30 +00:00
for _, srv := range ingressCfg.Servers {
if longestName < len(srv.Hostname) {
longestName = len(srv.Hostname)
}
serverNameBytes += len(srv.Hostname)
if srv.RedirectFromToWWW {
var n string
if strings.HasPrefix(srv.Hostname, "www.") {
n = strings.TrimPrefix(srv.Hostname, "www.")
} else {
n = fmt.Sprintf("www.%v", srv.Hostname)
}
2018-06-13 18:15:45 +00:00
glog.V(3).Infof("Creating redirect from %q to %q", srv.Hostname, n)
if _, ok := redirectServers[n]; !ok {
found := false
for _, esrv := range ingressCfg.Servers {
if esrv.Hostname == n {
found = true
break
}
}
if !found {
redirectServers[n] = srv.Hostname
}
}
}
2017-08-16 07:02:30 +00:00
}
if cfg.ServerNameHashBucketSize == 0 {
2017-08-16 07:02:30 +00:00
nameHashBucketSize := nginxHashBucketSize(longestName)
2018-06-13 18:15:45 +00:00
glog.V(3).Infof("Adjusting ServerNameHashBucketSize variable to %q", nameHashBucketSize)
cfg.ServerNameHashBucketSize = nameHashBucketSize
}
serverNameHashMaxSize := nextPowerOf2(serverNameBytes)
if cfg.ServerNameHashMaxSize < serverNameHashMaxSize {
2018-06-13 18:15:45 +00:00
glog.V(3).Infof("Adjusting ServerNameHashMaxSize variable to %q", serverNameHashMaxSize)
cfg.ServerNameHashMaxSize = serverNameHashMaxSize
}
// the limit of open files is per worker process
// and we leave some room to avoid consuming all the FDs available
wp, err := strconv.Atoi(cfg.WorkerProcesses)
2018-06-13 18:15:45 +00:00
glog.V(3).Infof("Number of worker processes: %d", wp)
if err != nil {
wp = 1
}
maxOpenFiles := (sysctlFSFileMax() / wp) - 1024
2018-06-13 18:15:45 +00:00
glog.V(2).Infof("Maximum number of open file descriptors: %d", maxOpenFiles)
if maxOpenFiles < 1024 {
// this means the value of RLIMIT_NOFILE is too low.
maxOpenFiles = 1024
}
setHeaders := map[string]string{}
if cfg.ProxySetHeaders != "" {
cmap, err := n.store.GetConfigMap(cfg.ProxySetHeaders)
if err != nil {
2018-06-13 18:15:45 +00:00
glog.Warningf("Error reading ConfigMap %q from local store: %v", cfg.ProxySetHeaders, err)
}
setHeaders = cmap.Data
}
2017-05-18 10:21:03 +00:00
addHeaders := map[string]string{}
if cfg.AddHeaders != "" {
cmap, err := n.store.GetConfigMap(cfg.AddHeaders)
2017-05-18 10:21:03 +00:00
if err != nil {
2018-06-13 18:15:45 +00:00
glog.Warningf("Error reading ConfigMap %q from local store: %v", cfg.AddHeaders, err)
2017-05-18 10:21:03 +00:00
}
addHeaders = cmap.Data
2017-05-18 10:21:03 +00:00
}
sslDHParam := ""
if cfg.SSLDHParam != "" {
secretName := cfg.SSLDHParam
secret, err := n.store.GetSecret(secretName)
if err != nil {
2018-06-13 18:15:45 +00:00
glog.Warningf("Error reading Secret %q from local store: %v", secretName, err)
}
nsSecName := strings.Replace(secretName, "/", "-", -1)
dh, ok := secret.Data["dhparam.pem"]
if ok {
pemFileName, err := ssl.AddOrUpdateDHParam(nsSecName, dh, n.fileSystem)
if err != nil {
2018-06-13 18:15:45 +00:00
glog.Warningf("Error adding or updating dhparam file %v: %v", nsSecName, err)
} else {
sslDHParam = pemFileName
}
}
}
cfg.SSLDHParam = sslDHParam
2017-11-05 01:18:28 +00:00
tc := ngx_config.TemplateConfig{
ProxySetHeaders: setHeaders,
AddHeaders: addHeaders,
MaxOpenFiles: maxOpenFiles,
BacklogSize: sysctlSomaxconn(),
Backends: ingressCfg.Backends,
PassthroughBackends: ingressCfg.PassthroughBackends,
Servers: ingressCfg.Servers,
TCPBackends: ingressCfg.TCPEndpoints,
UDPBackends: ingressCfg.UDPEndpoints,
HealthzURI: ngxHealthPath,
CustomErrors: len(cfg.CustomHTTPErrors) > 0,
Cfg: cfg,
IsIPV6Enabled: n.isIPV6Enabled && !cfg.DisableIpv6,
NginxStatusIpv4Whitelist: cfg.NginxStatusIpv4Whitelist,
NginxStatusIpv6Whitelist: cfg.NginxStatusIpv6Whitelist,
RedirectServers: redirectServers,
IsSSLPassthroughEnabled: n.cfg.EnableSSLPassthrough,
ListenPorts: n.cfg.ListenPorts,
PublishService: n.GetPublishService(),
DynamicConfigurationEnabled: n.cfg.DynamicConfigurationEnabled,
DisableLua: n.cfg.DisableLua,
}
content, err := n.t.Write(tc)
2017-06-11 19:56:40 +00:00
if err != nil {
return err
}
err = n.testTemplate(content)
if err != nil {
return err
}
2017-11-05 01:18:28 +00:00
if glog.V(2) {
src, _ := ioutil.ReadFile(cfgPath)
if !bytes.Equal(src, content) {
tmpfile, err := ioutil.TempFile("", "new-nginx-cfg")
if err != nil {
return err
}
defer tmpfile.Close()
2018-06-12 12:40:40 +00:00
err = ioutil.WriteFile(tmpfile.Name(), content, file.ReadWriteByUser)
2017-11-05 01:18:28 +00:00
if err != nil {
return err
}
2018-06-13 18:15:45 +00:00
// TODO: executing diff can return exit code != 0
2017-11-06 01:22:49 +00:00
diffOutput, _ := exec.Command("diff", "-u", cfgPath, tmpfile.Name()).CombinedOutput()
2017-11-05 01:18:28 +00:00
2018-06-13 18:15:45 +00:00
glog.Infof("NGINX configuration diff:\n%v", string(diffOutput))
2017-11-05 01:18:28 +00:00
2018-06-13 18:15:45 +00:00
// we do not defer the deletion of temp files in order
// to keep them around for inspection in case of error
2017-11-05 01:18:28 +00:00
os.Remove(tmpfile.Name())
}
}
2017-06-11 19:56:40 +00:00
2018-06-12 12:40:40 +00:00
err = ioutil.WriteFile(cfgPath, content, file.ReadWriteByUser)
if err != nil {
2017-06-11 19:56:40 +00:00
return err
}
2017-06-23 13:55:45 +00:00
o, err := exec.Command(n.binary, "-s", "reload", "-c", cfgPath).CombinedOutput()
2017-06-11 19:56:40 +00:00
if err != nil {
2017-06-23 13:55:45 +00:00
return fmt.Errorf("%v\n%v", err, string(o))
}
2017-06-11 19:56:40 +00:00
return nil
}
2018-06-13 18:15:45 +00:00
// nginxHashBucketSize computes the correct NGINX hash_bucket_size for a hash
// with the given longest key.
func nginxHashBucketSize(longestString int) int {
2018-06-13 18:15:45 +00:00
// see https://github.com/kubernetes/ingress-nginxs/issues/623 for an explanation
wordSize := 8 // Assume 64 bit CPU
n := longestString + 2
aligned := (n + wordSize - 1) & ^(wordSize - 1)
rawSize := wordSize + wordSize + aligned
return nextPowerOf2(rawSize)
}
// http://graphics.stanford.edu/~seander/bithacks.html#RoundUpPowerOf2
// https://play.golang.org/p/TVSyCcdxUh
func nextPowerOf2(v int) int {
v--
v |= v >> 1
v |= v >> 2
v |= v >> 4
v |= v >> 8
v |= v >> 16
v++
return v
}
func (n *NGINXController) setupSSLProxy() {
cfg := n.store.GetBackendConfiguration()
sslPort := n.cfg.ListenPorts.HTTPS
proxyPort := n.cfg.ListenPorts.SSLProxy
2018-06-13 18:15:45 +00:00
glog.Info("Starting TLS proxy for SSL Passthrough")
n.Proxy = &TCPProxy{
Default: &TCPServer{
Hostname: "localhost",
IP: "127.0.0.1",
Port: proxyPort,
ProxyProtocol: true,
},
}
listener, err := net.Listen("tcp", fmt.Sprintf(":%v", sslPort))
if err != nil {
glog.Fatalf("%v", err)
}
proxyList := &proxyproto.Listener{Listener: listener, ProxyHeaderTimeout: cfg.ProxyProtocolHeaderTimeout}
2018-06-13 18:15:45 +00:00
// accept TCP connections on the configured HTTPS port
go func() {
for {
var conn net.Conn
var err error
if n.store.GetBackendConfiguration().UseProxyProtocol {
2018-06-13 18:15:45 +00:00
// wrap the listener in order to decode Proxy
// Protocol before handling the connection
conn, err = proxyList.Accept()
} else {
conn, err = listener.Accept()
}
if err != nil {
2018-06-13 18:15:45 +00:00
glog.Warningf("Error accepting TCP connection: %v", err)
continue
}
2018-06-13 18:15:45 +00:00
glog.V(3).Infof("Handling connection from remote address %s to local %s", conn.RemoteAddr(), conn.LocalAddr())
go n.Proxy.Handle(conn)
}
}()
}
2018-06-13 18:15:45 +00:00
// IsDynamicConfigurationEnough returns whether a Configuration can be
// dynamically applied, without reloading the backend.
func (n *NGINXController) IsDynamicConfigurationEnough(pcfg *ingress.Configuration) bool {
copyOfRunningConfig := *n.runningConfig
copyOfPcfg := *pcfg
copyOfRunningConfig.Backends = []*ingress.Backend{}
copyOfPcfg.Backends = []*ingress.Backend{}
return copyOfRunningConfig.Equal(&copyOfPcfg)
}
2018-06-13 18:15:45 +00:00
// configureDynamically encodes new Backends in JSON format and POSTs the
// payload to an internal HTTP endpoint handled by Lua.
func configureDynamically(pcfg *ingress.Configuration, port int) error {
backends := make([]*ingress.Backend, len(pcfg.Backends))
for i, backend := range pcfg.Backends {
luaBackend := &ingress.Backend{
Name: backend.Name,
Port: backend.Port,
Secure: backend.Secure,
SSLPassthrough: backend.SSLPassthrough,
SessionAffinity: backend.SessionAffinity,
UpstreamHashBy: backend.UpstreamHashBy,
LoadBalancing: backend.LoadBalancing,
}
var endpoints []ingress.Endpoint
for _, endpoint := range backend.Endpoints {
endpoints = append(endpoints, ingress.Endpoint{
Address: endpoint.Address,
FailTimeout: endpoint.FailTimeout,
MaxFails: endpoint.MaxFails,
Port: endpoint.Port,
})
}
luaBackend.Endpoints = endpoints
backends[i] = luaBackend
}
buf, err := json.Marshal(backends)
if err != nil {
return err
}
2018-06-13 18:15:45 +00:00
glog.V(2).Infof("Posting backends configuration: %s", buf)
url := fmt.Sprintf("http://localhost:%d/configuration/backends", port)
resp, err := http.Post(url, "application/json", bytes.NewReader(buf))
if err != nil {
return err
}
defer func() {
if err := resp.Body.Close(); err != nil {
2018-06-13 18:15:45 +00:00
glog.Warningf("Error while closing response body:\n%v", err)
}
}()
if resp.StatusCode != http.StatusCreated {
return fmt.Errorf("Unexpected error code: %d", resp.StatusCode)
}
return nil
}