prometheus metrics improvements
This commit is contained in:
parent
2e7c1308c9
commit
10b7de23db
8 changed files with 142 additions and 4553 deletions
|
@ -25,10 +25,10 @@ import (
|
||||||
"github.com/ncabatoff/process-exporter/proc"
|
"github.com/ncabatoff/process-exporter/proc"
|
||||||
"github.com/prometheus/client_golang/prometheus"
|
"github.com/prometheus/client_golang/prometheus"
|
||||||
"reflect"
|
"reflect"
|
||||||
"strings"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
|
// TODO add current namespace
|
||||||
|
// TODO add ingress class
|
||||||
var (
|
var (
|
||||||
// descriptions borrow from https://github.com/vozlt/nginx-module-vts
|
// descriptions borrow from https://github.com/vozlt/nginx-module-vts
|
||||||
|
|
||||||
|
@ -192,7 +192,6 @@ var (
|
||||||
type exeMatcher struct {
|
type exeMatcher struct {
|
||||||
name string
|
name string
|
||||||
args []string
|
args []string
|
||||||
enableVtsCollector bool
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (em exeMatcher) MatchAndName(nacl common.NameAndCmdline) (bool, string) {
|
func (em exeMatcher) MatchAndName(nacl common.NameAndCmdline) (bool, string) {
|
||||||
|
@ -203,28 +202,24 @@ func (em exeMatcher) MatchAndName(nacl common.NameAndCmdline) (bool, string) {
|
||||||
return em.name == cmd, ""
|
return em.name == cmd, ""
|
||||||
}
|
}
|
||||||
|
|
||||||
func (n *NGINXController) setupMonitor(args[] string, vtsCollector bool) {
|
func (n *NGINXController) setupMonitor(args []string, vtsCollector bool) {
|
||||||
glog.Warning("vtsCollector now is ", vtsCollector)
|
|
||||||
pc, err := newProcessCollector(true, exeMatcher{"nginx", args, vtsCollector})
|
pc, err := newProcessCollector(true, exeMatcher{"nginx", args}, vtsCollector)
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
glog.Warningf("unexpected error registering nginx collector: %v", err)
|
glog.Fatalf("unexpected error registering nginx collector: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
err = prometheus.Register(pc)
|
err = prometheus.Register(pc)
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if reg, ok := err.(prometheus.AlreadyRegisteredError); ok {
|
if _, ok := err.(prometheus.AlreadyRegisteredError); !ok {
|
||||||
*reg.ExistingCollector.(prometheus.Collector).(*namedProcessCollector) = *pc
|
|
||||||
|
|
||||||
}else{
|
|
||||||
glog.Warningf("unexpected error registering nginx collector: %v", err)
|
glog.Warningf("unexpected error registering nginx collector: %v", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
type (
|
type (
|
||||||
scrapeRequest struct {
|
scrapeRequest struct {
|
||||||
results chan<- prometheus.Metric
|
results chan<- prometheus.Metric
|
||||||
|
@ -233,31 +228,31 @@ type (
|
||||||
|
|
||||||
namedProcessCollector struct {
|
namedProcessCollector struct {
|
||||||
scrapeChan chan scrapeRequest
|
scrapeChan chan scrapeRequest
|
||||||
grouper *proc.Grouper
|
*proc.Grouper
|
||||||
fs *proc.FS
|
fs *proc.FS
|
||||||
//enableVtsCollector *bool
|
enableVtsCollector bool
|
||||||
}
|
}
|
||||||
)
|
)
|
||||||
|
|
||||||
func newProcessCollector(
|
func newProcessCollector(
|
||||||
children bool,
|
children bool,
|
||||||
n common.MatchNamer) (*namedProcessCollector, error) {
|
n common.MatchNamer,
|
||||||
|
enableVtsCollector bool) (*namedProcessCollector, error) {
|
||||||
|
|
||||||
//fs, err := proc.NewFS("/proc")
|
fs, err := proc.NewFS("/proc")
|
||||||
//if err != nil {
|
if err != nil {
|
||||||
// return nil, err
|
return nil, err
|
||||||
//}
|
}
|
||||||
p := &namedProcessCollector{
|
p := &namedProcessCollector{
|
||||||
scrapeChan: make(chan scrapeRequest),
|
scrapeChan: make(chan scrapeRequest),
|
||||||
grouper: proc.NewGrouper(children, n),
|
Grouper: proc.NewGrouper(children, n),
|
||||||
//fs: fs,
|
fs: fs,
|
||||||
//enableVtsCollector: vtsCollector,
|
enableVtsCollector: enableVtsCollector,
|
||||||
|
}
|
||||||
|
_, err = p.Update(p.fs.AllProcs())
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
// p.Update(p.fs.AllProcs())
|
|
||||||
//if err != nil {
|
|
||||||
// return nil, err
|
|
||||||
//}
|
|
||||||
|
|
||||||
go p.start()
|
go p.start()
|
||||||
|
|
||||||
|
@ -274,10 +269,8 @@ func (p *namedProcessCollector) Describe(ch chan<- *prometheus.Desc) {
|
||||||
ch <- memResidentbytesDesc
|
ch <- memResidentbytesDesc
|
||||||
ch <- memVirtualbytesDesc
|
ch <- memVirtualbytesDesc
|
||||||
ch <- startTimeDesc
|
ch <- startTimeDesc
|
||||||
x := p.grouper.(exeMatcher)
|
|
||||||
if true { //x.(execMatcher) == nil { //.(exeMatcher).enableVtsCollector {
|
|
||||||
glog.Info("registering vts describe")
|
|
||||||
|
|
||||||
|
//vts metrics
|
||||||
ch <- vtsBytesDesc
|
ch <- vtsBytesDesc
|
||||||
ch <- vtsCacheDesc
|
ch <- vtsCacheDesc
|
||||||
ch <- vtsConnectionsDesc
|
ch <- vtsConnectionsDesc
|
||||||
|
@ -295,7 +288,6 @@ func (p *namedProcessCollector) Describe(ch chan<- *prometheus.Desc) {
|
||||||
ch <- vtsFilterZoneBytesDesc
|
ch <- vtsFilterZoneBytesDesc
|
||||||
ch <- vtsFilterZoneCacheDesc
|
ch <- vtsFilterZoneCacheDesc
|
||||||
ch <- vtsFilterZoneResponseDesc
|
ch <- vtsFilterZoneResponseDesc
|
||||||
}
|
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -308,26 +300,25 @@ func (p *namedProcessCollector) Collect(ch chan<- prometheus.Metric) {
|
||||||
|
|
||||||
func (p *namedProcessCollector) start() {
|
func (p *namedProcessCollector) start() {
|
||||||
|
|
||||||
|
|
||||||
for req := range p.scrapeChan {
|
for req := range p.scrapeChan {
|
||||||
ch := req.results
|
ch := req.results
|
||||||
p.scrapeNginxStatus(ch)
|
p.scrapeNginxStatus(ch)
|
||||||
p.scrapeProcs(ch)
|
p.scrapeProcs(ch)
|
||||||
p.scrapeVts(ch)
|
p.scrapeVts(ch)
|
||||||
|
|
||||||
|
|
||||||
req.done <- struct{}{}
|
req.done <- struct{}{}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// scrapeNginxStatus scrap the nginx status
|
||||||
func (p *namedProcessCollector) scrapeNginxStatus(ch chan<- prometheus.Metric) {
|
func (p *namedProcessCollector) scrapeNginxStatus(ch chan<- prometheus.Metric) {
|
||||||
s, err := getNginxStatus()
|
s, err := getNginxStatus()
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
glog.Warningf("unexpected error obtaining nginx status info: %v", err)
|
glog.Warningf("unexpected error obtaining nginx status info: %v", err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
ch <- prometheus.MustNewConstMetric(activeDesc,
|
ch <- prometheus.MustNewConstMetric(activeDesc,
|
||||||
prometheus.GaugeValue, float64(s.Active))
|
prometheus.GaugeValue, float64(s.Active))
|
||||||
ch <- prometheus.MustNewConstMetric(acceptedDesc,
|
ch <- prometheus.MustNewConstMetric(acceptedDesc,
|
||||||
|
@ -345,14 +336,9 @@ func (p *namedProcessCollector) scrapeNginxStatus(ch chan<- prometheus.Metric) {
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// scrapeVts scrape nginx vts metrics
|
||||||
func (p *namedProcessCollector) scrapeVts(ch chan<- prometheus.Metric) {
|
func (p *namedProcessCollector) scrapeVts(ch chan<- prometheus.Metric) {
|
||||||
|
|
||||||
if ! true {
|
|
||||||
glog.V(3).Info("vts metrics not enabled")
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
glog.V(3).Info("starting scrap on vts")
|
|
||||||
nginxMetrics, err := getNginxVtsMetrics()
|
nginxMetrics, err := getNginxVtsMetrics()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
glog.Warningf("unexpected error obtaining nginx status info: %v", err)
|
glog.Warningf("unexpected error obtaining nginx status info: %v", err)
|
||||||
|
@ -417,8 +403,6 @@ func (p *namedProcessCollector) scrapeVts(ch chan<- prometheus.Metric) {
|
||||||
|
|
||||||
for country, zone := range countries {
|
for country, zone := range countries {
|
||||||
|
|
||||||
serverZone = strings.Replace(serverZone, "country::", "", 1)
|
|
||||||
|
|
||||||
reflectMetrics(&zone.Responses, vtsFilterZoneResponseDesc, ch, serverZone, country)
|
reflectMetrics(&zone.Responses, vtsFilterZoneResponseDesc, ch, serverZone, country)
|
||||||
reflectMetrics(&zone.Cache, vtsFilterZoneCacheDesc, ch, serverZone, country)
|
reflectMetrics(&zone.Cache, vtsFilterZoneCacheDesc, ch, serverZone, country)
|
||||||
|
|
||||||
|
@ -432,35 +416,34 @@ func (p *namedProcessCollector) scrapeVts(ch chan<- prometheus.Metric) {
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (p *namedProcessCollector) scrapeProcs(ch chan<- prometheus.Metric) {
|
func (p *namedProcessCollector) scrapeProcs(ch chan<- prometheus.Metric) {
|
||||||
|
|
||||||
|
_, err := p.Update(p.fs.AllProcs())
|
||||||
|
if err != nil {
|
||||||
|
glog.Warningf("unexpected error obtaining nginx process info: %v", err)
|
||||||
return
|
return
|
||||||
//_, err := p.Update(p.fs.AllProcs())
|
}
|
||||||
//if err != nil {
|
|
||||||
// glog.Warningf("unexpected error obtaining nginx process info: %v", err)
|
for gname, gcounts := range p.Groups() {
|
||||||
// return
|
glog.Infof("%v", gname)
|
||||||
//}
|
glog.Infof("%v", gcounts)
|
||||||
//
|
ch <- prometheus.MustNewConstMetric(numprocsDesc,
|
||||||
//for gname, gcounts := range p.Groups() {
|
prometheus.GaugeValue, float64(gcounts.Procs))
|
||||||
// glog.Infof("%v", gname)
|
ch <- prometheus.MustNewConstMetric(memResidentbytesDesc,
|
||||||
// glog.Infof("%v", gcounts)
|
prometheus.GaugeValue, float64(gcounts.Memresident))
|
||||||
// ch <- prometheus.MustNewConstMetric(numprocsDesc,
|
ch <- prometheus.MustNewConstMetric(memVirtualbytesDesc,
|
||||||
// prometheus.GaugeValue, float64(gcounts.Procs))
|
prometheus.GaugeValue, float64(gcounts.Memvirtual))
|
||||||
// ch <- prometheus.MustNewConstMetric(memResidentbytesDesc,
|
ch <- prometheus.MustNewConstMetric(startTimeDesc,
|
||||||
// prometheus.GaugeValue, float64(gcounts.Memresident))
|
prometheus.GaugeValue, float64(gcounts.OldestStartTime.Unix()))
|
||||||
// ch <- prometheus.MustNewConstMetric(memVirtualbytesDesc,
|
ch <- prometheus.MustNewConstMetric(cpuSecsDesc,
|
||||||
// prometheus.GaugeValue, float64(gcounts.Memvirtual))
|
prometheus.CounterValue, gcounts.Cpu)
|
||||||
// ch <- prometheus.MustNewConstMetric(startTimeDesc,
|
ch <- prometheus.MustNewConstMetric(readBytesDesc,
|
||||||
// prometheus.GaugeValue, float64(gcounts.OldestStartTime.Unix()))
|
prometheus.CounterValue, float64(gcounts.ReadBytes))
|
||||||
// ch <- prometheus.MustNewConstMetric(cpuSecsDesc,
|
ch <- prometheus.MustNewConstMetric(writeBytesDesc,
|
||||||
// prometheus.CounterValue, gcounts.Cpu)
|
prometheus.CounterValue, float64(gcounts.WriteBytes))
|
||||||
// ch <- prometheus.MustNewConstMetric(readBytesDesc,
|
}
|
||||||
// prometheus.CounterValue, float64(gcounts.ReadBytes))
|
|
||||||
// ch <- prometheus.MustNewConstMetric(writeBytesDesc,
|
|
||||||
// prometheus.CounterValue, float64(gcounts.WriteBytes))
|
|
||||||
//}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func reflectMetrics(value interface{}, desc *prometheus.Desc, ch chan<- prometheus.Metric, labels ...string) {
|
func reflectMetrics(value interface{}, desc *prometheus.Desc, ch chan<- prometheus.Metric, labels ...string) {
|
||||||
|
|
|
@ -1 +0,0 @@
|
||||||
package main
|
|
|
@ -51,7 +51,7 @@ const (
|
||||||
var (
|
var (
|
||||||
tmplPath = "/etc/nginx/template/nginx.tmpl"
|
tmplPath = "/etc/nginx/template/nginx.tmpl"
|
||||||
cfgPath = "/etc/nginx/nginx.conf"
|
cfgPath = "/etc/nginx/nginx.conf"
|
||||||
binary = "/usr/local/bin/nginx"
|
binary = "/usr/sbin/nginx"
|
||||||
defIngressClass = "nginx"
|
defIngressClass = "nginx"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -107,8 +107,6 @@ type NGINXController struct {
|
||||||
storeLister ingress.StoreLister
|
storeLister ingress.StoreLister
|
||||||
|
|
||||||
binary string
|
binary string
|
||||||
|
|
||||||
namedProcessCollector *namedProcessCollector
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Start start a new NGINX master process running in foreground.
|
// Start start a new NGINX master process running in foreground.
|
||||||
|
@ -208,9 +206,6 @@ func (n NGINXController) isReloadRequired(data []byte) bool {
|
||||||
|
|
||||||
if !bytes.Equal(src, data) {
|
if !bytes.Equal(src, data) {
|
||||||
|
|
||||||
cfg := ngx_template.ReadConfig(n.configmap.Data)
|
|
||||||
n.setupMonitor([]string{""}, &cfg.EnableVtsStatus)
|
|
||||||
|
|
||||||
tmpfile, err := ioutil.TempFile("", "nginx-cfg-diff")
|
tmpfile, err := ioutil.TempFile("", "nginx-cfg-diff")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
glog.Errorf("error creating temporal file: %s", err)
|
glog.Errorf("error creating temporal file: %s", err)
|
||||||
|
@ -319,6 +314,7 @@ func (n *NGINXController) OnUpdate(ingressCfg ingress.Configuration) ([]byte, er
|
||||||
}
|
}
|
||||||
|
|
||||||
cfg := ngx_template.ReadConfig(n.configmap.Data)
|
cfg := ngx_template.ReadConfig(n.configmap.Data)
|
||||||
|
n.setupMonitor([]string{""}, cfg.EnableVtsStatus)
|
||||||
|
|
||||||
// NGINX cannot resize the has tables used to store server names.
|
// NGINX cannot resize the has tables used to store server names.
|
||||||
// For this reason we check if the defined size defined is correct
|
// For this reason we check if the defined size defined is correct
|
||||||
|
|
|
@ -143,7 +143,7 @@ func (bit BoolToFloat64) UnmarshalJSON(data []byte) error {
|
||||||
func getNginxStatus() (*nginxStatus, error) {
|
func getNginxStatus() (*nginxStatus, error) {
|
||||||
|
|
||||||
url := fmt.Sprintf("http://localhost:%v%v", ngxHealthPort, ngxStatusPath)
|
url := fmt.Sprintf("http://localhost:%v%v", ngxHealthPort, ngxStatusPath)
|
||||||
glog.V(3).Infof("scrapping url: %v", url)
|
glog.V(3).Infof("start scrapping url: %v", url)
|
||||||
|
|
||||||
data, err := httpBody(url)
|
data, err := httpBody(url)
|
||||||
|
|
||||||
|
@ -172,9 +172,10 @@ func httpBody(url string) ([]byte, error) {
|
||||||
return data, nil
|
return data, nil
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func getNginxVtsMetrics() (*Vts, error) {
|
func getNginxVtsMetrics() (*Vts, error) {
|
||||||
url := fmt.Sprintf("http://localhost:%v%v", ngxHealthPort, ngxVtsPath)
|
url := fmt.Sprintf("http://localhost:%v%v", ngxHealthPort, ngxVtsPath)
|
||||||
glog.V(3).Infof("scrapping url: %v", url)
|
glog.V(3).Infof("start scrapping url: %v", url)
|
||||||
|
|
||||||
data, err := httpBody(url)
|
data, err := httpBody(url)
|
||||||
|
|
||||||
|
@ -188,6 +189,8 @@ func getNginxVtsMetrics() (*Vts, error) {
|
||||||
return nil, fmt.Errorf("unexpected error json unmarshal (%v)", err)
|
return nil, fmt.Errorf("unexpected error json unmarshal (%v)", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
glog.V(3).Infof("scrap returned : %v", vts)
|
||||||
|
|
||||||
return &vts, nil
|
return &vts, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -68,21 +68,3 @@ func TestToint(t *testing.T) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
//
|
|
||||||
//func TestUnmarshalJSON (t *testing.T){
|
|
||||||
// tests := []struct{
|
|
||||||
// in []byte
|
|
||||||
// exp float64
|
|
||||||
// error error
|
|
||||||
// }{
|
|
||||||
// {in: "false",exp: 0},
|
|
||||||
// {"0", 0},
|
|
||||||
// {"true", 1},
|
|
||||||
// {"1", 1},
|
|
||||||
// {" errr", error},
|
|
||||||
// }
|
|
||||||
//
|
|
||||||
// for _,test := range tests
|
|
||||||
//
|
|
||||||
//
|
|
||||||
//}
|
|
|
@ -213,8 +213,11 @@ func buildLocation(input interface{}) string {
|
||||||
|
|
||||||
func buildAuthLocation(input interface{}) string {
|
func buildAuthLocation(input interface{}) string {
|
||||||
location, ok := input.(*ingress.Location)
|
location, ok := input.(*ingress.Location)
|
||||||
|
if !ok {
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
|
||||||
if !ok || location.ExternalAuth.URL == "" {
|
if location.ExternalAuth.URL == "" {
|
||||||
return ""
|
return ""
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
File diff suppressed because it is too large
Load diff
|
@ -61,9 +61,6 @@ http {
|
||||||
client_header_buffer_size {{ $cfg.ClientHeaderBufferSize }};
|
client_header_buffer_size {{ $cfg.ClientHeaderBufferSize }};
|
||||||
large_client_header_buffers {{ $cfg.LargeClientHeaderBuffers }};
|
large_client_header_buffers {{ $cfg.LargeClientHeaderBuffers }};
|
||||||
|
|
||||||
http2_max_field_size {{ $cfg.HTTP2MaxFieldSize }};
|
|
||||||
http2_max_header_size {{ $cfg.HTTP2MaxHeaderSize }};
|
|
||||||
|
|
||||||
types_hash_max_size 2048;
|
types_hash_max_size 2048;
|
||||||
server_names_hash_max_size {{ $cfg.ServerNameHashMaxSize }};
|
server_names_hash_max_size {{ $cfg.ServerNameHashMaxSize }};
|
||||||
server_names_hash_bucket_size {{ $cfg.ServerNameHashBucketSize }};
|
server_names_hash_bucket_size {{ $cfg.ServerNameHashBucketSize }};
|
||||||
|
@ -82,7 +79,7 @@ http {
|
||||||
|
|
||||||
server_tokens {{ if $cfg.ShowServerTokens }}on{{ else }}off{{ end }};
|
server_tokens {{ if $cfg.ShowServerTokens }}on{{ else }}off{{ end }};
|
||||||
|
|
||||||
log_format upstreaminfo '{{ buildLogFormatUpstream $cfg }}';
|
log_format upstreaminfo {{ buildLogFormatUpstream $cfg }};
|
||||||
|
|
||||||
{{/* map urls that should not appear in access.log */}}
|
{{/* map urls that should not appear in access.log */}}
|
||||||
{{/* http://nginx.org/en/docs/http/ngx_http_log_module.html#access_log */}}
|
{{/* http://nginx.org/en/docs/http/ngx_http_log_module.html#access_log */}}
|
||||||
|
@ -210,14 +207,10 @@ http {
|
||||||
{{ range $index, $server := .Servers }}
|
{{ range $index, $server := .Servers }}
|
||||||
server {
|
server {
|
||||||
server_name {{ $server.Hostname }};
|
server_name {{ $server.Hostname }};
|
||||||
listen {{ if not $cfg.DisableIpv6 }}[::]:{{ end }}80{{ if $cfg.UseProxyProtocol }} proxy_protocol{{ end }}{{ if eq $server.Hostname "_"}} default_server {{ if not $cfg.DisableIpv6 }}ipv6only=off{{end}} reuseport backlog={{ $backlogSize }}{{end}};
|
listen [::]:80{{ if $cfg.UseProxyProtocol }} proxy_protocol{{ end }}{{ if eq $index 0 }} ipv6only=off{{end}}{{ if eq $server.Hostname "_"}} default_server reuseport backlog={{ $backlogSize }}{{end}};
|
||||||
{{/* Listen on 442 because port 443 is used in the stream section */}}
|
{{/* Listen on 442 because port 443 is used in the stream section */}}
|
||||||
{{/* This listen on port 442 cannot contains proxy_protocol directive because port 443 is in charge of decoding the protocol */}}
|
{{/* This listen cannot contains proxy_protocol directive because port 443 is in charge of decoding the protocol */}}
|
||||||
<<<<<<< HEAD
|
{{ if not (empty $server.SSLCertificate) }}listen {{ if gt (len $passthroughBackends) 0 }}442{{ else }}[::]:443 {{ end }}{{ if eq $server.Hostname "_"}} default_server reuseport backlog={{ $backlogSize }}{{end}} ssl {{ if $cfg.UseHTTP2 }}http2{{ end }};
|
||||||
{{ if not (empty $server.SSLCertificate) }}listen {{ if gt (len $passthroughBackends) 0 }}442{{ else }}{{ if not $cfg.DisableIpv6 }}[::]:{{ end }}443 {{ if $cfg.UseProxyProtocol }} proxy_protocol {{ end }}{{ end }} {{ if eq $server.Hostname "_"}} default_server {{ if not $cfg.DisableIpv6 }}ipv6only=off{{end}} reuseport backlog={{ $backlogSize }}{{end}} ssl {{ if $cfg.UseHTTP2 }}http2{{ end }};
|
|
||||||
=======
|
|
||||||
{{ if not (empty $server.SSLCertificate) }}listen {{ if gt (len $passthroughBackends) 0 }}4420{{ else }}[::]:4430 {{ if $cfg.UseProxyProtocol }} proxy_protocol {{ end }}{{ end }} {{ if eq $index 0 }} ipv6only=off{{end}} {{ if eq $server.Hostname "_"}} default_server reuseport backlog={{ $backlogSize }}{{end}} ssl ; #{{ if $cfg.UseHTTP2 }}http2{{ end }};
|
|
||||||
>>>>>>> run e2e
|
|
||||||
{{/* comment PEM sha is required to detect changes in the generated configuration and force a reload */}}
|
{{/* comment PEM sha is required to detect changes in the generated configuration and force a reload */}}
|
||||||
# PEM sha: {{ $server.SSLPemChecksum }}
|
# PEM sha: {{ $server.SSLPemChecksum }}
|
||||||
ssl_certificate {{ $server.SSLCertificate }};
|
ssl_certificate {{ $server.SSLCertificate }};
|
||||||
|
@ -225,7 +218,7 @@ http {
|
||||||
{{ end }}
|
{{ end }}
|
||||||
|
|
||||||
{{ if (and (not (empty $server.SSLCertificate)) $cfg.HSTS) }}
|
{{ if (and (not (empty $server.SSLCertificate)) $cfg.HSTS) }}
|
||||||
#more_set_headers "Strict-Transport-Security: max-age={{ $cfg.HSTSMaxAge }}{{ if $cfg.HSTSIncludeSubdomains }}; includeSubDomains{{ end }}; preload";
|
more_set_headers "Strict-Transport-Security: max-age={{ $cfg.HSTSMaxAge }}{{ if $cfg.HSTSIncludeSubdomains }}; includeSubDomains{{ end }}; preload";
|
||||||
{{ end }}
|
{{ end }}
|
||||||
|
|
||||||
{{ if $cfg.EnableVtsStatus }}vhost_traffic_status_filter_by_set_key $geoip_country_code country::$server_name;{{ end }}
|
{{ if $cfg.EnableVtsStatus }}vhost_traffic_status_filter_by_set_key $geoip_country_code country::$server_name;{{ end }}
|
||||||
|
@ -250,8 +243,6 @@ http {
|
||||||
{{ end }}
|
{{ end }}
|
||||||
{{ if not (empty $location.ExternalAuth.Method) }}
|
{{ if not (empty $location.ExternalAuth.Method) }}
|
||||||
proxy_method {{ $location.ExternalAuth.Method }};
|
proxy_method {{ $location.ExternalAuth.Method }};
|
||||||
proxy_set_header X-Original-URI $request_uri;
|
|
||||||
proxy_set_header X-Scheme $pass_access_scheme;
|
|
||||||
{{ end }}
|
{{ end }}
|
||||||
proxy_set_header Host $host;
|
proxy_set_header Host $host;
|
||||||
proxy_pass_request_headers on;
|
proxy_pass_request_headers on;
|
||||||
|
@ -277,13 +268,9 @@ http {
|
||||||
auth_request {{ $authPath }};
|
auth_request {{ $authPath }};
|
||||||
{{ end }}
|
{{ end }}
|
||||||
|
|
||||||
{{ if not (empty $location.ExternalAuth.SigninURL) }}
|
{{ if (and (not (empty $server.SSLCertificate)) $location.Redirect.SSLRedirect) }}
|
||||||
error_page 401 = {{ $location.ExternalAuth.SigninURL }};
|
|
||||||
{{ end }}
|
|
||||||
|
|
||||||
{{ if (or $location.Redirect.ForceSSLRedirect (and (not (empty $server.SSLCertificate)) $location.Redirect.SSLRedirect)) }}
|
|
||||||
# enforce ssl on server side
|
# enforce ssl on server side
|
||||||
if ($pass_access_scheme = http) {
|
if ($scheme = http) {
|
||||||
return 301 https://$host$request_uri;
|
return 301 https://$host$request_uri;
|
||||||
}
|
}
|
||||||
{{ end }}
|
{{ end }}
|
||||||
|
@ -327,8 +314,6 @@ http {
|
||||||
proxy_set_header X-Forwarded-Host $host;
|
proxy_set_header X-Forwarded-Host $host;
|
||||||
proxy_set_header X-Forwarded-Port $pass_port;
|
proxy_set_header X-Forwarded-Port $pass_port;
|
||||||
proxy_set_header X-Forwarded-Proto $pass_access_scheme;
|
proxy_set_header X-Forwarded-Proto $pass_access_scheme;
|
||||||
proxy_set_header X-Original-URI $request_uri;
|
|
||||||
proxy_set_header X-Scheme $pass_access_scheme;
|
|
||||||
|
|
||||||
# mitigate HTTPoxy Vulnerability
|
# mitigate HTTPoxy Vulnerability
|
||||||
# https://www.nginx.com/blog/mitigating-the-httpoxy-vulnerability-with-nginx/
|
# https://www.nginx.com/blog/mitigating-the-httpoxy-vulnerability-with-nginx/
|
||||||
|
@ -346,7 +331,6 @@ http {
|
||||||
proxy_redirect off;
|
proxy_redirect off;
|
||||||
proxy_buffering off;
|
proxy_buffering off;
|
||||||
proxy_buffer_size "{{ $location.Proxy.BufferSize }}";
|
proxy_buffer_size "{{ $location.Proxy.BufferSize }}";
|
||||||
proxy_buffers 4 "{{ $location.Proxy.BufferSize }}";
|
|
||||||
|
|
||||||
proxy_http_version 1.1;
|
proxy_http_version 1.1;
|
||||||
|
|
||||||
|
@ -380,7 +364,7 @@ http {
|
||||||
# with an external software (like sysdig)
|
# with an external software (like sysdig)
|
||||||
location /nginx_status {
|
location /nginx_status {
|
||||||
allow 127.0.0.1;
|
allow 127.0.0.1;
|
||||||
{{ if not $cfg.DisableIpv6 }}allow ::1;{{ end }}
|
allow ::1;
|
||||||
deny all;
|
deny all;
|
||||||
|
|
||||||
access_log off;
|
access_log off;
|
||||||
|
@ -388,6 +372,7 @@ http {
|
||||||
}
|
}
|
||||||
{{ end }}
|
{{ end }}
|
||||||
|
|
||||||
|
{{ template "CUSTOM_ERRORS" $cfg }}
|
||||||
}
|
}
|
||||||
|
|
||||||
{{ end }}
|
{{ end }}
|
||||||
|
@ -397,7 +382,7 @@ http {
|
||||||
# Use the port 18080 (random value just to avoid known ports) as default port for nginx.
|
# Use the port 18080 (random value just to avoid known ports) as default port for nginx.
|
||||||
# Changing this value requires a change in:
|
# Changing this value requires a change in:
|
||||||
# https://github.com/kubernetes/contrib/blob/master/ingress/controllers/nginx/nginx/command.go#L104
|
# https://github.com/kubernetes/contrib/blob/master/ingress/controllers/nginx/nginx/command.go#L104
|
||||||
listen {{ if not $cfg.DisableIpv6 }}[::]:{{ end }}18080 {{ if not $cfg.DisableIpv6 }}ipv6only=off{{end}} default_server reuseport backlog={{ .BacklogSize }};
|
listen [::]:18080 ipv6only=off default_server reuseport backlog={{ .BacklogSize }};
|
||||||
|
|
||||||
location {{ $healthzURI }} {
|
location {{ $healthzURI }} {
|
||||||
access_log off;
|
access_log off;
|
||||||
|
@ -419,7 +404,7 @@ http {
|
||||||
# TODO: enable extraction for vts module.
|
# TODO: enable extraction for vts module.
|
||||||
location /internal_nginx_status {
|
location /internal_nginx_status {
|
||||||
allow 127.0.0.1;
|
allow 127.0.0.1;
|
||||||
{{ if not $cfg.DisableIpv6 }}allow ::1;{{ end }}
|
allow ::1;
|
||||||
deny all;
|
deny all;
|
||||||
|
|
||||||
access_log off;
|
access_log off;
|
||||||
|
@ -430,9 +415,9 @@ http {
|
||||||
set $proxy_upstream_name "upstream-default-backend";
|
set $proxy_upstream_name "upstream-default-backend";
|
||||||
proxy_pass http://upstream-default-backend;
|
proxy_pass http://upstream-default-backend;
|
||||||
}
|
}
|
||||||
|
{{ template "CUSTOM_ERRORS" $cfg }}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
# default server for services without endpoints
|
# default server for services without endpoints
|
||||||
server {
|
server {
|
||||||
listen 8181;
|
listen 8181;
|
||||||
|
@ -440,7 +425,6 @@ http {
|
||||||
|
|
||||||
location / {
|
location / {
|
||||||
{{ if .CustomErrors }}
|
{{ if .CustomErrors }}
|
||||||
<<<<<<< HEAD
|
|
||||||
content_by_lua_block {
|
content_by_lua_block {
|
||||||
openURL(ngx.req.get_headers(0), 503)
|
openURL(ngx.req.get_headers(0), 503)
|
||||||
}
|
}
|
||||||
|
@ -480,7 +464,7 @@ stream {
|
||||||
{{ buildSSLPassthroughUpstreams $backends .PassthroughBackends }}
|
{{ buildSSLPassthroughUpstreams $backends .PassthroughBackends }}
|
||||||
|
|
||||||
server {
|
server {
|
||||||
listen {{ if not $cfg.DisableIpv6 }}[::]:{{ end }}443 {{ if not $cfg.DisableIpv6 }}ipv6only=off{{ end }}{{ if $cfg.UseProxyProtocol }} proxy_protocol{{ end }};
|
listen [::]:443 ipv6only=off{{ if $cfg.UseProxyProtocol }} proxy_protocol{{ end }};
|
||||||
proxy_pass $stream_upstream;
|
proxy_pass $stream_upstream;
|
||||||
ssl_preread on;
|
ssl_preread on;
|
||||||
}
|
}
|
||||||
|
@ -521,14 +505,44 @@ stream {
|
||||||
{{ range $errCode := .CustomHTTPErrors }}
|
{{ range $errCode := .CustomHTTPErrors }}
|
||||||
location @custom_{{ $errCode }} {
|
location @custom_{{ $errCode }} {
|
||||||
internal;
|
internal;
|
||||||
=======
|
|
||||||
>>>>>>> run e2e
|
|
||||||
content_by_lua_block {
|
content_by_lua_block {
|
||||||
openURL(ngx.req.get_headers(0), 503)
|
openURL(ngx.req.get_headers(0), {{ $errCode }})
|
||||||
|
}
|
||||||
}
|
}
|
||||||
{{ else }}
|
|
||||||
return 503;
|
|
||||||
{{ end }}
|
{{ end }}
|
||||||
|
{{ end }}
|
||||||
|
|
||||||
|
{{/* CORS support from https://michielkalkman.com/snippets/nginx-cors-open-configuration.html */}}
|
||||||
|
{{ define "CORS" }}
|
||||||
|
if ($request_method = 'OPTIONS') {
|
||||||
|
add_header 'Access-Control-Allow-Origin' '*';
|
||||||
|
#
|
||||||
|
# Om nom nom cookies
|
||||||
|
#
|
||||||
|
add_header 'Access-Control-Allow-Credentials' 'true';
|
||||||
|
add_header 'Access-Control-Allow-Methods' 'GET, POST, OPTIONS';
|
||||||
|
#
|
||||||
|
# Custom headers and headers various browsers *should* be OK with but aren't
|
||||||
|
#
|
||||||
|
add_header 'Access-Control-Allow-Headers' 'DNT,X-CustomHeader,Keep-Alive,User-Agent,X-Requested-With,If-Modified-Since,Cache-Control,Content-Type';
|
||||||
|
#
|
||||||
|
# Tell client that this pre-flight info is valid for 20 days
|
||||||
|
#
|
||||||
|
add_header 'Access-Control-Max-Age' 1728000;
|
||||||
|
add_header 'Content-Type' 'text/plain charset=UTF-8';
|
||||||
|
add_header 'Content-Length' 0;
|
||||||
|
return 204;
|
||||||
}
|
}
|
||||||
|
if ($request_method = 'POST') {
|
||||||
|
add_header 'Access-Control-Allow-Origin' '*';
|
||||||
|
add_header 'Access-Control-Allow-Credentials' 'true';
|
||||||
|
add_header 'Access-Control-Allow-Methods' 'GET, POST, OPTIONS';
|
||||||
|
add_header 'Access-Control-Allow-Headers' 'DNT,X-CustomHeader,Keep-Alive,User-Agent,X-Requested-With,If-Modified-Since,Cache-Control,Content-Type';
|
||||||
}
|
}
|
||||||
}
|
if ($request_method = 'GET') {
|
||||||
|
add_header 'Access-Control-Allow-Origin' '*';
|
||||||
|
add_header 'Access-Control-Allow-Credentials' 'true';
|
||||||
|
add_header 'Access-Control-Allow-Methods' 'GET, POST, OPTIONS';
|
||||||
|
add_header 'Access-Control-Allow-Headers' 'DNT,X-CustomHeader,Keep-Alive,User-Agent,X-Requested-With,If-Modified-Since,Cache-Control,Content-Type';
|
||||||
|
}
|
||||||
|
{{ end }}
|
||||||
|
|
Loading…
Reference in a new issue