Scrap json metrics from nginx vts

upgrade vts to the latest version
This commit is contained in:
Giancarlo Rubio 2017-02-22 22:51:53 +01:00 committed by Manuel de Brito Fontes
parent dd7f8b4a97
commit 1d38e3a384
7 changed files with 424 additions and 75 deletions

View file

@ -24,51 +24,22 @@ import (
common "github.com/ncabatoff/process-exporter" common "github.com/ncabatoff/process-exporter"
"github.com/ncabatoff/process-exporter/proc" "github.com/ncabatoff/process-exporter/proc"
"github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus"
"reflect"
) )
type exeMatcher struct { // TODO add current namespace
name string // TODO add ingress class
args []string
}
func (em exeMatcher) MatchAndName(nacl common.NameAndCmdline) (bool, string) {
if len(nacl.Cmdline) == 0 {
return false, ""
}
cmd := filepath.Base(nacl.Cmdline[0])
return em.name == cmd, ""
}
func (n *NGINXController) setupMonitor(args []string) {
pc, err := newProcessCollector(true, exeMatcher{"nginx", args})
if err != nil {
glog.Fatalf("unexpected error registering nginx collector: %v", err)
}
err = prometheus.Register(pc)
if err != nil {
glog.Warningf("unexpected error registering nginx collector: %v", err)
}
}
var ( var (
numprocsDesc = prometheus.NewDesc( // descriptions borrow from https://github.com/vozlt/nginx-module-vts
"nginx_num_procs",
"number of processes",
nil, nil)
cpuSecsDesc = prometheus.NewDesc( cpuSecsDesc = prometheus.NewDesc(
"nginx_cpu_seconds_total", "nginx_cpu_seconds_total",
"Cpu usage in seconds", "Cpu usage in seconds",
nil, nil) nil, nil)
readBytesDesc = prometheus.NewDesc( numprocsDesc = prometheus.NewDesc(
"nginx_read_bytes_total", "nginx_num_procs",
"number of bytes read", "number of processes",
nil, nil)
writeBytesDesc = prometheus.NewDesc(
"nginx_write_bytes_total",
"number of bytes written",
nil, nil) nil, nil)
memResidentbytesDesc = prometheus.NewDesc( memResidentbytesDesc = prometheus.NewDesc(
@ -81,11 +52,107 @@ var (
"number of bytes of memory in use", "number of bytes of memory in use",
nil, nil) nil, nil)
readBytesDesc = prometheus.NewDesc(
"nginx_read_bytes_total",
"number of bytes read",
nil, nil)
startTimeDesc = prometheus.NewDesc( startTimeDesc = prometheus.NewDesc(
"nginx_oldest_start_time_seconds", "nginx_oldest_start_time_seconds",
"start time in seconds since 1970/01/01", "start time in seconds since 1970/01/01",
nil, nil) nil, nil)
writeBytesDesc = prometheus.NewDesc(
"nginx_write_bytes_total",
"number of bytes written",
nil, nil)
//vts metrics
vtsBytesDesc = prometheus.NewDesc(
"nginx_vts_bytes_total",
"Nginx bytes count",
[]string{"server_zone", "direction"}, nil)
vtsCacheDesc = prometheus.NewDesc(
"nginx_vts_cache_total",
"Nginx cache count",
[]string{"server_zone", "type"}, nil)
vtsConnectionsDesc = prometheus.NewDesc(
"nginx_vts_connections_total",
"Nginx connections count",
[]string{"type"}, nil)
vtsResponseDesc = prometheus.NewDesc(
"nginx_vts_responses_total",
"The number of responses with status codes 1xx, 2xx, 3xx, 4xx, and 5xx.",
[]string{"server_zone", "status_code"}, nil)
vtsRequestDesc = prometheus.NewDesc(
"nginx_vts_requests_total",
"The total number of requested client connections.",
[]string{"server_zone"}, nil)
vtsFilterZoneBytesDesc = prometheus.NewDesc(
"nginx_vts_filterzone_bytes_total",
"Nginx bytes count",
[]string{"server_zone", "country", "direction"}, nil)
vtsFilterZoneResponseDesc = prometheus.NewDesc(
"nginx_vts_filterzone_responses_total",
"The number of responses with status codes 1xx, 2xx, 3xx, 4xx, and 5xx.",
[]string{"server_zone", "country", "status_code"}, nil)
vtsFilterZoneCacheDesc = prometheus.NewDesc(
"nginx_vts_filterzone_cache_total",
"Nginx cache count",
[]string{"server_zone", "country", "type"}, nil)
vtsUpstreamBackupDesc = prometheus.NewDesc(
"nginx_vts_upstream_backup",
"Current backup setting of the server.",
[]string{"upstream", "server"}, nil)
vtsUpstreamBytesDesc = prometheus.NewDesc(
"nginx_vts_upstream_bytes_total",
"The total number of bytes sent to this server.",
[]string{"upstream", "server", "direction"}, nil)
vtsUpstreamDownDesc = prometheus.NewDesc(
"nginx_vts_upstream_down_total",
"Current down setting of the server.",
[]string{"upstream", "server"}, nil)
vtsUpstreamFailTimeoutDesc = prometheus.NewDesc(
"nginx_vts_upstream_fail_timeout",
"Current fail_timeout setting of the server.",
[]string{"upstream", "server"}, nil)
vtsUpstreamMaxFailsDesc = prometheus.NewDesc(
"nginx_vts_upstream_maxfails",
"Current max_fails setting of the server.",
[]string{"upstream", "server"}, nil)
vtsUpstreamResponsesDesc = prometheus.NewDesc(
"nginx_vts_upstream_responses_total",
"The number of upstream responses with status codes 1xx, 2xx, 3xx, 4xx, and 5xx.",
[]string{"upstream", "server", "status_code"}, nil)
vtsUpstreamRequestDesc = prometheus.NewDesc(
"nginx_vts_upstream_requests_total",
"The total number of client connections forwarded to this server.",
[]string{"upstream", "server"}, nil)
vtsUpstreamResponseMsecDesc = prometheus.NewDesc(
"nginx_vts_upstream_response_msecs_avg",
"The average of only upstream response processing times in milliseconds.",
[]string{"upstream", "server"}, nil)
vtsUpstreamWeightDesc = prometheus.NewDesc(
"nginx_vts_upstream_weight",
"Current upstream weight setting of the server.",
[]string{"upstream", "server"}, nil)
activeDesc = prometheus.NewDesc( activeDesc = prometheus.NewDesc(
"nginx_active_connections", "nginx_active_connections",
"total number of active connections", "total number of active connections",
@ -122,6 +189,37 @@ var (
nil, nil) nil, nil)
) )
type exeMatcher struct {
name string
args []string
}
func (em exeMatcher) MatchAndName(nacl common.NameAndCmdline) (bool, string) {
if len(nacl.Cmdline) == 0 {
return false, ""
}
cmd := filepath.Base(nacl.Cmdline[0])
return em.name == cmd, ""
}
func (n *NGINXController) setupMonitor(args []string, vtsCollector bool) {
pc, err := newProcessCollector(true, exeMatcher{"nginx", args}, vtsCollector)
if err != nil {
glog.Fatalf("unexpected error registering nginx collector: %v", err)
}
err = prometheus.Register(pc)
if err != nil {
if _, ok := err.(prometheus.AlreadyRegisteredError); !ok {
glog.Warningf("unexpected error registering nginx collector: %v", err)
}
}
}
type ( type (
scrapeRequest struct { scrapeRequest struct {
results chan<- prometheus.Metric results chan<- prometheus.Metric
@ -131,22 +229,25 @@ type (
namedProcessCollector struct { namedProcessCollector struct {
scrapeChan chan scrapeRequest scrapeChan chan scrapeRequest
*proc.Grouper *proc.Grouper
fs *proc.FS fs *proc.FS
enableVtsCollector bool
} }
) )
func newProcessCollector( func newProcessCollector(
children bool, children bool,
n common.MatchNamer) (*namedProcessCollector, error) { n common.MatchNamer,
enableVtsCollector bool) (*namedProcessCollector, error) {
fs, err := proc.NewFS("/proc") fs, err := proc.NewFS("/proc")
if err != nil { if err != nil {
return nil, err return nil, err
} }
p := &namedProcessCollector{ p := &namedProcessCollector{
scrapeChan: make(chan scrapeRequest), scrapeChan: make(chan scrapeRequest),
Grouper: proc.NewGrouper(children, n), Grouper: proc.NewGrouper(children, n),
fs: fs, fs: fs,
enableVtsCollector: enableVtsCollector,
} }
_, err = p.Update(p.fs.AllProcs()) _, err = p.Update(p.fs.AllProcs())
if err != nil { if err != nil {
@ -160,6 +261,7 @@ func newProcessCollector(
// Describe implements prometheus.Collector. // Describe implements prometheus.Collector.
func (p *namedProcessCollector) Describe(ch chan<- *prometheus.Desc) { func (p *namedProcessCollector) Describe(ch chan<- *prometheus.Desc) {
ch <- cpuSecsDesc ch <- cpuSecsDesc
ch <- numprocsDesc ch <- numprocsDesc
ch <- readBytesDesc ch <- readBytesDesc
@ -167,6 +269,26 @@ func (p *namedProcessCollector) Describe(ch chan<- *prometheus.Desc) {
ch <- memResidentbytesDesc ch <- memResidentbytesDesc
ch <- memVirtualbytesDesc ch <- memVirtualbytesDesc
ch <- startTimeDesc ch <- startTimeDesc
//vts metrics
ch <- vtsBytesDesc
ch <- vtsCacheDesc
ch <- vtsConnectionsDesc
ch <- vtsRequestDesc
ch <- vtsResponseDesc
ch <- vtsUpstreamBackupDesc
ch <- vtsUpstreamBytesDesc
ch <- vtsUpstreamDownDesc
ch <- vtsUpstreamFailTimeoutDesc
ch <- vtsUpstreamMaxFailsDesc
ch <- vtsUpstreamRequestDesc
ch <- vtsUpstreamResponseMsecDesc
ch <- vtsUpstreamResponsesDesc
ch <- vtsUpstreamWeightDesc
ch <- vtsFilterZoneBytesDesc
ch <- vtsFilterZoneCacheDesc
ch <- vtsFilterZoneResponseDesc
} }
// Collect implements prometheus.Collector. // Collect implements prometheus.Collector.
@ -177,15 +299,21 @@ func (p *namedProcessCollector) Collect(ch chan<- prometheus.Metric) {
} }
func (p *namedProcessCollector) start() { func (p *namedProcessCollector) start() {
for req := range p.scrapeChan { for req := range p.scrapeChan {
ch := req.results ch := req.results
p.scrape(ch) p.scrapeNginxStatus(ch)
p.scrapeProcs(ch)
p.scrapeVts(ch)
req.done <- struct{}{} req.done <- struct{}{}
} }
} }
func (p *namedProcessCollector) scrape(ch chan<- prometheus.Metric) { // scrapeNginxStatus scrap the nginx status
func (p *namedProcessCollector) scrapeNginxStatus(ch chan<- prometheus.Metric) {
s, err := getNginxStatus() s, err := getNginxStatus()
if err != nil { if err != nil {
glog.Warningf("unexpected error obtaining nginx status info: %v", err) glog.Warningf("unexpected error obtaining nginx status info: %v", err)
return return
@ -206,7 +334,93 @@ func (p *namedProcessCollector) scrape(ch chan<- prometheus.Metric) {
ch <- prometheus.MustNewConstMetric(waitingDesc, ch <- prometheus.MustNewConstMetric(waitingDesc,
prometheus.GaugeValue, float64(s.Waiting)) prometheus.GaugeValue, float64(s.Waiting))
_, err = p.Update(p.fs.AllProcs()) }
// scrapeVts scrape nginx vts metrics
func (p *namedProcessCollector) scrapeVts(ch chan<- prometheus.Metric) {
nginxMetrics, err := getNginxVtsMetrics()
if err != nil {
glog.Warningf("unexpected error obtaining nginx status info: %v", err)
return
}
reflectMetrics(&nginxMetrics.Connections, vtsConnectionsDesc, ch)
for name, zones := range nginxMetrics.UpstreamZones {
for pos, value := range zones {
reflectMetrics(&zones[pos].Responses, vtsUpstreamResponsesDesc, ch, name, value.Server)
ch <- prometheus.MustNewConstMetric(vtsUpstreamRequestDesc,
prometheus.CounterValue, float64(zones[pos].RequestCounter), name, value.Server)
ch <- prometheus.MustNewConstMetric(vtsUpstreamDownDesc,
prometheus.CounterValue, float64(zones[pos].Down), name, value.Server)
ch <- prometheus.MustNewConstMetric(vtsUpstreamWeightDesc,
prometheus.CounterValue, float64(zones[pos].Weight), name, value.Server)
ch <- prometheus.MustNewConstMetric(vtsUpstreamResponseMsecDesc,
prometheus.CounterValue, float64(zones[pos].ResponseMsec), name, value.Server)
ch <- prometheus.MustNewConstMetric(vtsUpstreamBackupDesc,
prometheus.CounterValue, float64(zones[pos].Backup), name, value.Server)
ch <- prometheus.MustNewConstMetric(vtsUpstreamFailTimeoutDesc,
prometheus.CounterValue, float64(zones[pos].FailTimeout), name, value.Server)
ch <- prometheus.MustNewConstMetric(vtsUpstreamMaxFailsDesc,
prometheus.CounterValue, float64(zones[pos].MaxFails), name, value.Server)
ch <- prometheus.MustNewConstMetric(vtsUpstreamBytesDesc,
prometheus.CounterValue, float64(zones[pos].InBytes), name, value.Server, "in")
ch <- prometheus.MustNewConstMetric(vtsUpstreamBytesDesc,
prometheus.CounterValue, float64(zones[pos].OutBytes), name, value.Server, "out")
}
}
for name, zone := range nginxMetrics.ServerZones {
reflectMetrics(&zone.Responses, vtsResponseDesc, ch, name)
reflectMetrics(&zone.Cache, vtsCacheDesc, ch, name)
ch <- prometheus.MustNewConstMetric(vtsRequestDesc,
prometheus.CounterValue, float64(zone.RequestCounter), name)
ch <- prometheus.MustNewConstMetric(vtsBytesDesc,
prometheus.CounterValue, float64(zone.InBytes), name, "in")
ch <- prometheus.MustNewConstMetric(vtsBytesDesc,
prometheus.CounterValue, float64(zone.OutBytes), name, "out")
}
for serverZone, countries := range nginxMetrics.FilterZones {
for country, zone := range countries {
reflectMetrics(&zone.Responses, vtsFilterZoneResponseDesc, ch, serverZone, country)
reflectMetrics(&zone.Cache, vtsFilterZoneCacheDesc, ch, serverZone, country)
ch <- prometheus.MustNewConstMetric(vtsFilterZoneBytesDesc,
prometheus.CounterValue, float64(zone.InBytes), serverZone, country, "in")
ch <- prometheus.MustNewConstMetric(vtsFilterZoneBytesDesc,
prometheus.CounterValue, float64(zone.OutBytes), serverZone, country, "out")
}
}
}
func (p *namedProcessCollector) scrapeProcs(ch chan<- prometheus.Metric) {
_, err := p.Update(p.fs.AllProcs())
if err != nil { if err != nil {
glog.Warningf("unexpected error obtaining nginx process info: %v", err) glog.Warningf("unexpected error obtaining nginx process info: %v", err)
return return
@ -231,3 +445,18 @@ func (p *namedProcessCollector) scrape(ch chan<- prometheus.Metric) {
prometheus.CounterValue, float64(gcounts.WriteBytes)) prometheus.CounterValue, float64(gcounts.WriteBytes))
} }
} }
func reflectMetrics(value interface{}, desc *prometheus.Desc, ch chan<- prometheus.Metric, labels ...string) {
val := reflect.ValueOf(value).Elem()
for i := 0; i < val.NumField(); i++ {
tag := val.Type().Field(i).Tag
labels := append(labels, tag.Get("json"))
ch <- prometheus.MustNewConstMetric(desc,
prometheus.CounterValue, float64(val.Field(i).Interface().(float64)),
labels...)
}
}

View file

@ -25,7 +25,6 @@ import (
"net/http" "net/http"
"os" "os"
"os/exec" "os/exec"
"strings"
"syscall" "syscall"
"time" "time"
@ -40,12 +39,14 @@ import (
"k8s.io/ingress/core/pkg/ingress" "k8s.io/ingress/core/pkg/ingress"
"k8s.io/ingress/core/pkg/ingress/defaults" "k8s.io/ingress/core/pkg/ingress/defaults"
"k8s.io/ingress/core/pkg/net/ssl" "k8s.io/ingress/core/pkg/net/ssl"
"strings"
) )
const ( const (
ngxHealthPort = 18080 ngxHealthPort = 18080
ngxHealthPath = "/healthz" ngxHealthPath = "/healthz"
ngxStatusPath = "/internal_nginx_status" ngxStatusPath = "/internal_nginx_status"
ngxVtsPath = "/nginx_status/format/json"
) )
var ( var (
@ -156,8 +157,8 @@ func (n *NGINXController) start(cmd *exec.Cmd, done chan error) {
done <- err done <- err
return return
} }
cfg := ngx_template.ReadConfig(n.configmap.Data)
n.setupMonitor(cmd.Args) n.setupMonitor(cmd.Args, cfg.EnableVtsStatus)
go func() { go func() {
done <- cmd.Wait() done <- cmd.Wait()
@ -177,6 +178,7 @@ func (n NGINXController) Reload(data []byte) ([]byte, bool, error) {
} }
o, e := exec.Command(n.binary, "-s", "reload").CombinedOutput() o, e := exec.Command(n.binary, "-s", "reload").CombinedOutput()
return o, true, e return o, true, e
} }
@ -204,6 +206,7 @@ func (n NGINXController) isReloadRequired(data []byte) bool {
} }
if !bytes.Equal(src, data) { if !bytes.Equal(src, data) {
tmpfile, err := ioutil.TempFile("", "nginx-cfg-diff") tmpfile, err := ioutil.TempFile("", "nginx-cfg-diff")
if err != nil { if err != nil {
glog.Errorf("error creating temporal file: %s", err) glog.Errorf("error creating temporal file: %s", err)
@ -312,6 +315,7 @@ func (n *NGINXController) OnUpdate(ingressCfg ingress.Configuration) ([]byte, er
} }
cfg := ngx_template.ReadConfig(n.configmap.Data) cfg := ngx_template.ReadConfig(n.configmap.Data)
n.setupMonitor([]string{""}, cfg.EnableVtsStatus)
// NGINX cannot resize the has tables used to store server names. // NGINX cannot resize the has tables used to store server names.
// For this reason we check if the defined size defined is correct // For this reason we check if the defined size defined is correct

View file

@ -17,7 +17,9 @@ limitations under the License.
package main package main
import ( import (
"encoding/json"
"fmt" "fmt"
"github.com/golang/glog"
"io/ioutil" "io/ioutil"
"net/http" "net/http"
"regexp" "regexp"
@ -49,22 +51,147 @@ type nginxStatus struct {
Waiting int Waiting int
} }
// https://github.com/vozlt/nginx-module-vts
type Vts struct {
NginxVersion string `json:"nginxVersion"`
LoadMsec int `json:"loadMsec"`
NowMsec int `json:"nowMsec"`
// Total connections and requests(same as stub_status_module in NGINX)
Connections Connections `json:"connections"`
// Traffic(in/out) and request and response counts and cache hit ratio per each server zone
ServerZones map[string]ServerZone `json:"serverZones"`
// Traffic(in/out) and request and response counts and cache hit ratio per each server zone filtered through
// the vhost_traffic_status_filter_by_set_key directive
FilterZones map[string]map[string]FilterZone `json:"filterZones"`
// Traffic(in/out) and request and response counts per server in each upstream group
UpstreamZones map[string][]UpstreamZone `json:"upstreamZones"`
}
type ServerZone struct {
RequestCounter float64 `json:"requestCounter"`
InBytes float64 `json:"inBytes"`
OutBytes float64 `json:"outBytes"`
Responses Response `json:"responses"`
Cache Cache `json:"responses"`
}
type FilterZone struct {
RequestCounter float64 `json:"requestCounter"`
InBytes float64 `json:"inBytes"`
OutBytes float64 `json:"outBytes"`
Cache Cache `json:"responses"`
Responses Response `json:"responses"`
}
type UpstreamZone struct {
Responses Response `json:"responses"`
Server string `json:"server"`
RequestCounter float64 `json:"requestCounter"`
InBytes float64 `json:"inBytes"`
OutBytes float64 `json:"outBytes"`
ResponseMsec float64 `json:"responseMsec"`
Weight float64 `json:"weight"`
MaxFails float64 `json:"maxFails"`
FailTimeout float64 `json:"failTimeout"`
Backup BoolToFloat64 `json:"backup"`
Down BoolToFloat64 `json:"down"`
}
type Cache struct {
Miss float64 `json:"miss"`
Bypass float64 `json:"bypass"`
Expired float64 `json:"expired"`
Stale float64 `json:"stale"`
Updating float64 `json:"updating"`
Revalidated float64 `json:"revalidated"`
Hit float64 `json:"hit"`
Scarce float64 `json:"scarce"`
}
type Response struct {
OneXx float64 `json:"1xx"`
TwoXx float64 `json:"2xx"`
TheeXx float64 `json:"3xx"`
FourXx float64 `json:"4xx"`
FiveXx float64 `json:"5xx"`
}
type Connections struct {
Active float64 `json:"active"`
Reading float64 `json:"reading"`
Writing float64 `json:"writing"`
Waiting float64 `json:"waiting"`
Accepted float64 `json:"accepted"`
Handled float64 `json:"handled"`
Requests float64 `json:"requests"`
}
type BoolToFloat64 float64
func (bit BoolToFloat64) UnmarshalJSON(data []byte) error {
asString := string(data)
if asString == "1" || asString == "true" {
bit = 1
} else if asString == "0" || asString == "false" {
bit = 0
} else {
return fmt.Errorf(fmt.Sprintf("Boolean unmarshal error: invalid input %s", asString))
}
return nil
}
func getNginxStatus() (*nginxStatus, error) { func getNginxStatus() (*nginxStatus, error) {
resp, err := http.DefaultClient.Get(fmt.Sprintf("http://localhost:%v%v", ngxHealthPort, ngxStatusPath))
url := fmt.Sprintf("http://localhost:%v%v", ngxHealthPort, ngxStatusPath)
glog.V(3).Infof("start scrapping url: %v", url)
data, err := httpBody(url)
if err != nil { if err != nil {
return nil, fmt.Errorf("unexpected error scraping nginx status page: %v", err) return nil, fmt.Errorf("unexpected error scraping nginx status page: %v", err)
} }
return parse(string(data)), nil
}
func httpBody(url string) ([]byte, error) {
resp, err := http.DefaultClient.Get(url)
if err != nil {
return nil, fmt.Errorf("unexpected error scraping nginx : %v", err)
}
data, err := ioutil.ReadAll(resp.Body) data, err := ioutil.ReadAll(resp.Body)
if err != nil { if err != nil {
return nil, fmt.Errorf("unexpected error scraping nginx status page (%v)", err) return nil, fmt.Errorf("unexpected error scraping nginx (%v)", err)
} }
defer resp.Body.Close() defer resp.Body.Close()
if resp.StatusCode < 200 || resp.StatusCode >= 400 { if resp.StatusCode < 200 || resp.StatusCode >= 400 {
return nil, fmt.Errorf("unexpected error scraping nginx status page (status %v)", resp.StatusCode) return nil, fmt.Errorf("unexpected error scraping nginx (status %v)", resp.StatusCode)
} }
return parse(string(data)), nil return data, nil
}
func getNginxVtsMetrics() (*Vts, error) {
url := fmt.Sprintf("http://localhost:%v%v", ngxHealthPort, ngxVtsPath)
glog.V(3).Infof("start scrapping url: %v", url)
data, err := httpBody(url)
if err != nil {
return nil, fmt.Errorf("unexpected error scraping nginx vts (%v)", err)
}
var vts Vts
err = json.Unmarshal(data, &vts)
if err != nil {
return nil, fmt.Errorf("unexpected error json unmarshal (%v)", err)
}
glog.V(3).Infof("scrap returned : %v", vts)
return &vts, nil
} }
func parse(data string) *nginxStatus { func parse(data string) *nginxStatus {

View file

@ -67,4 +67,4 @@ func TestToint(t *testing.T) {
t.Fatalf("expected %v but returned %v", test.exp, v) t.Fatalf("expected %v but returned %v", test.exp, v)
} }
} }
} }

View file

@ -289,6 +289,7 @@ func NewDefault() Configuration {
UseProxyProtocol: false, UseProxyProtocol: false,
UseGzip: true, UseGzip: true,
WorkerProcesses: runtime.NumCPU(), WorkerProcesses: runtime.NumCPU(),
EnableVtsStatus: false,
VtsStatusZoneSize: "10m", VtsStatusZoneSize: "10m",
UseHTTP2: true, UseHTTP2: true,
Backend: defaults.Backend{ Backend: defaults.Backend{

View file

@ -60,9 +60,6 @@ http {
client_header_buffer_size {{ $cfg.ClientHeaderBufferSize }}; client_header_buffer_size {{ $cfg.ClientHeaderBufferSize }};
large_client_header_buffers {{ $cfg.LargeClientHeaderBuffers }}; large_client_header_buffers {{ $cfg.LargeClientHeaderBuffers }};
http2_max_field_size {{ $cfg.HTTP2MaxFieldSize }};
http2_max_header_size {{ $cfg.HTTP2MaxHeaderSize }};
types_hash_max_size 2048; types_hash_max_size 2048;
server_names_hash_max_size {{ $cfg.ServerNameHashMaxSize }}; server_names_hash_max_size {{ $cfg.ServerNameHashMaxSize }};
@ -82,7 +79,7 @@ http {
server_tokens {{ if $cfg.ShowServerTokens }}on{{ else }}off{{ end }}; server_tokens {{ if $cfg.ShowServerTokens }}on{{ else }}off{{ end }};
log_format upstreaminfo '{{ buildLogFormatUpstream $cfg }}'; log_format upstreaminfo {{ buildLogFormatUpstream $cfg }};
{{/* map urls that should not appear in access.log */}} {{/* map urls that should not appear in access.log */}}
{{/* http://nginx.org/en/docs/http/ngx_http_log_module.html#access_log */}} {{/* http://nginx.org/en/docs/http/ngx_http_log_module.html#access_log */}}
@ -210,10 +207,10 @@ http {
{{ range $index, $server := .Servers }} {{ range $index, $server := .Servers }}
server { server {
server_name {{ $server.Hostname }}; server_name {{ $server.Hostname }};
listen {{ if not $cfg.DisableIpv6 }}[::]:{{ end }}80{{ if $cfg.UseProxyProtocol }} proxy_protocol{{ end }}{{ if eq $server.Hostname "_"}} default_server {{ if not $cfg.DisableIpv6 }}ipv6only=off{{end}} reuseport backlog={{ $backlogSize }}{{end}}; listen [::]:80{{ if $cfg.UseProxyProtocol }} proxy_protocol{{ end }}{{ if eq $index 0 }} ipv6only=off{{end}}{{ if eq $server.Hostname "_"}} default_server reuseport backlog={{ $backlogSize }}{{end}};
{{/* Listen on 442 because port 443 is used in the stream section */}} {{/* Listen on 442 because port 443 is used in the stream section */}}
{{/* This listen on port 442 cannot contains proxy_protocol directive because port 443 is in charge of decoding the protocol */}} {{/* This listen cannot contains proxy_protocol directive because port 443 is in charge of decoding the protocol */}}
{{ if not (empty $server.SSLCertificate) }}listen {{ if gt (len $passthroughBackends) 0 }}442{{ else }}{{ if not $cfg.DisableIpv6 }}[::]:{{ end }}443 {{ if $cfg.UseProxyProtocol }} proxy_protocol {{ end }}{{ end }} {{ if eq $server.Hostname "_"}} default_server {{ if not $cfg.DisableIpv6 }}ipv6only=off{{end}} reuseport backlog={{ $backlogSize }}{{end}} ssl {{ if $cfg.UseHTTP2 }}http2{{ end }}; {{ if not (empty $server.SSLCertificate) }}listen {{ if gt (len $passthroughBackends) 0 }}442{{ else }}[::]:443 {{ end }}{{ if eq $server.Hostname "_"}} default_server reuseport backlog={{ $backlogSize }}{{end}} ssl {{ if $cfg.UseHTTP2 }}http2{{ end }};
{{/* comment PEM sha is required to detect changes in the generated configuration and force a reload */}} {{/* comment PEM sha is required to detect changes in the generated configuration and force a reload */}}
# PEM sha: {{ $server.SSLPemChecksum }} # PEM sha: {{ $server.SSLPemChecksum }}
ssl_certificate {{ $server.SSLCertificate }}; ssl_certificate {{ $server.SSLCertificate }};
@ -246,8 +243,6 @@ http {
{{ end }} {{ end }}
{{ if not (empty $location.ExternalAuth.Method) }} {{ if not (empty $location.ExternalAuth.Method) }}
proxy_method {{ $location.ExternalAuth.Method }}; proxy_method {{ $location.ExternalAuth.Method }};
proxy_set_header X-Original-URI $request_uri;
proxy_set_header X-Scheme $pass_access_scheme;
{{ end }} {{ end }}
proxy_set_header Host $host; proxy_set_header Host $host;
proxy_pass_request_headers on; proxy_pass_request_headers on;
@ -273,13 +268,9 @@ http {
auth_request {{ $authPath }}; auth_request {{ $authPath }};
{{ end }} {{ end }}
{{ if not (empty $location.ExternalAuth.SigninURL) }} {{ if (and (not (empty $server.SSLCertificate)) $location.Redirect.SSLRedirect) }}
error_page 401 = {{ $location.ExternalAuth.SigninURL }};
{{ end }}
{{ if (or $location.Redirect.ForceSSLRedirect (and (not (empty $server.SSLCertificate)) $location.Redirect.SSLRedirect)) }}
# enforce ssl on server side # enforce ssl on server side
if ($pass_access_scheme = http) { if ($scheme = http) {
return 301 https://$host$request_uri; return 301 https://$host$request_uri;
} }
{{ end }} {{ end }}
@ -323,8 +314,6 @@ http {
proxy_set_header X-Forwarded-Host $host; proxy_set_header X-Forwarded-Host $host;
proxy_set_header X-Forwarded-Port $pass_port; proxy_set_header X-Forwarded-Port $pass_port;
proxy_set_header X-Forwarded-Proto $pass_access_scheme; proxy_set_header X-Forwarded-Proto $pass_access_scheme;
proxy_set_header X-Original-URI $request_uri;
proxy_set_header X-Scheme $pass_access_scheme;
# mitigate HTTPoxy Vulnerability # mitigate HTTPoxy Vulnerability
# https://www.nginx.com/blog/mitigating-the-httpoxy-vulnerability-with-nginx/ # https://www.nginx.com/blog/mitigating-the-httpoxy-vulnerability-with-nginx/
@ -342,7 +331,6 @@ http {
proxy_redirect off; proxy_redirect off;
proxy_buffering off; proxy_buffering off;
proxy_buffer_size "{{ $location.Proxy.BufferSize }}"; proxy_buffer_size "{{ $location.Proxy.BufferSize }}";
proxy_buffers 4 "{{ $location.Proxy.BufferSize }}";
proxy_http_version 1.1; proxy_http_version 1.1;
@ -376,7 +364,7 @@ http {
# with an external software (like sysdig) # with an external software (like sysdig)
location /nginx_status { location /nginx_status {
allow 127.0.0.1; allow 127.0.0.1;
{{ if not $cfg.DisableIpv6 }}allow ::1;{{ end }} allow ::1;
deny all; deny all;
access_log off; access_log off;
@ -394,7 +382,7 @@ http {
# Use the port 18080 (random value just to avoid known ports) as default port for nginx. # Use the port 18080 (random value just to avoid known ports) as default port for nginx.
# Changing this value requires a change in: # Changing this value requires a change in:
# https://github.com/kubernetes/contrib/blob/master/ingress/controllers/nginx/nginx/command.go#L104 # https://github.com/kubernetes/contrib/blob/master/ingress/controllers/nginx/nginx/command.go#L104
listen {{ if not $cfg.DisableIpv6 }}[::]:{{ end }}18080 {{ if not $cfg.DisableIpv6 }}ipv6only=off{{end}} default_server reuseport backlog={{ .BacklogSize }}; listen [::]:18080 ipv6only=off default_server reuseport backlog={{ .BacklogSize }};
location {{ $healthzURI }} { location {{ $healthzURI }} {
access_log off; access_log off;
@ -416,7 +404,7 @@ http {
# TODO: enable extraction for vts module. # TODO: enable extraction for vts module.
location /internal_nginx_status { location /internal_nginx_status {
allow 127.0.0.1; allow 127.0.0.1;
{{ if not $cfg.DisableIpv6 }}allow ::1;{{ end }} allow ::1;
deny all; deny all;
access_log off; access_log off;
@ -476,7 +464,7 @@ stream {
{{ buildSSLPassthroughUpstreams $backends .PassthroughBackends }} {{ buildSSLPassthroughUpstreams $backends .PassthroughBackends }}
server { server {
listen {{ if not $cfg.DisableIpv6 }}[::]:{{ end }}443 {{ if not $cfg.DisableIpv6 }}ipv6only=off{{ end }}{{ if $cfg.UseProxyProtocol }} proxy_protocol{{ end }}; listen [::]:443 ipv6only=off{{ if $cfg.UseProxyProtocol }} proxy_protocol{{ end }};
proxy_pass $stream_upstream; proxy_pass $stream_upstream;
ssl_preread on; ssl_preread on;
} }

View file

@ -19,7 +19,7 @@ set -e
export NGINX_VERSION=1.11.10 export NGINX_VERSION=1.11.10
export NDK_VERSION=0.3.0 export NDK_VERSION=0.3.0
export VTS_VERSION=0.1.11 export VTS_VERSION=0.1.12
export SETMISC_VERSION=0.31 export SETMISC_VERSION=0.31
export LUA_VERSION=0.10.7 export LUA_VERSION=0.10.7
export STICKY_SESSIONS_VERSION=08a395c66e42 export STICKY_SESSIONS_VERSION=08a395c66e42