Merge pull request #614 from aledbf/refactor-passthrough
Refactor nginx ssl passthrough
This commit is contained in:
commit
f6af1ca023
15 changed files with 708 additions and 122 deletions
8
Godeps/Godeps.json
generated
8
Godeps/Godeps.json
generated
|
@ -30,6 +30,10 @@
|
|||
"ImportPath": "github.com/PuerkitoBio/urlesc",
|
||||
"Rev": "5bd2802263f21d8788851d5305584c82a5c75d7e"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/armon/go-proxyproto",
|
||||
"Rev": "3daa90aec0039a806299b9078f4422fee950f33c"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/beorn7/perks/quantile",
|
||||
"Rev": "3ac7bf7a47d159a033b107610db8a1b6575507a4"
|
||||
|
@ -164,6 +168,10 @@
|
|||
"Comment": "v0.1.0",
|
||||
"Rev": "2942f905437b665326fe044c49edb2094df13b37"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/paultag/sniff/parser",
|
||||
"Rev": "c36b8585a41425573d9e3e1890bf3b6ac89a3828"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/pborman/uuid",
|
||||
"Rev": "ca53cad383cad2479bbba7f7a1a05797ec1386e4"
|
||||
|
|
|
@ -26,16 +26,16 @@ import (
|
|||
"os"
|
||||
"os/exec"
|
||||
"strconv"
|
||||
"strings"
|
||||
"syscall"
|
||||
"time"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"github.com/spf13/pflag"
|
||||
|
||||
proxyproto "github.com/armon/go-proxyproto"
|
||||
api_v1 "k8s.io/client-go/pkg/api/v1"
|
||||
|
||||
"strings"
|
||||
|
||||
"k8s.io/ingress/controllers/nginx/pkg/config"
|
||||
ngx_template "k8s.io/ingress/controllers/nginx/pkg/template"
|
||||
"k8s.io/ingress/controllers/nginx/pkg/version"
|
||||
|
@ -53,6 +53,8 @@ const (
|
|||
|
||||
defaultStatusModule statusModule = "default"
|
||||
vtsStatusModule statusModule = "vts"
|
||||
|
||||
errNoChild = "wait: no child processes"
|
||||
)
|
||||
|
||||
var (
|
||||
|
@ -81,8 +83,40 @@ func newNGINXController() ingress.Controller {
|
|||
configmap: &api_v1.ConfigMap{},
|
||||
isIPV6Enabled: isIPv6Enabled(),
|
||||
resolver: h,
|
||||
proxy: &proxy{},
|
||||
}
|
||||
|
||||
listener, err := net.Listen("tcp", ":443")
|
||||
if err != nil {
|
||||
glog.Fatalf("%v", err)
|
||||
}
|
||||
|
||||
proxyList := &proxyproto.Listener{Listener: listener}
|
||||
|
||||
// start goroutine that accepts tcp connections in port 443
|
||||
go func() {
|
||||
for {
|
||||
var conn net.Conn
|
||||
var err error
|
||||
|
||||
if n.isProxyProtocolEnabled {
|
||||
// we need to wrap the listener in order to decode
|
||||
// proxy protocol before handling the connection
|
||||
conn, err = proxyList.Accept()
|
||||
} else {
|
||||
conn, err = listener.Accept()
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
glog.Warningf("unexpected error accepting tcp connection: %v", err)
|
||||
continue
|
||||
}
|
||||
|
||||
glog.V(3).Infof("remote adress %s to local %s", conn.RemoteAddr(), conn.LocalAddr())
|
||||
go n.proxy.Handle(conn)
|
||||
}
|
||||
}()
|
||||
|
||||
var onChange func()
|
||||
onChange = func() {
|
||||
template, err := ngx_template.NewTemplate(tmplPath, onChange)
|
||||
|
@ -121,7 +155,8 @@ type NGINXController struct {
|
|||
|
||||
storeLister ingress.StoreLister
|
||||
|
||||
binary string
|
||||
binary string
|
||||
resolver []net.IP
|
||||
|
||||
cmdArgs []string
|
||||
|
||||
|
@ -134,7 +169,10 @@ type NGINXController struct {
|
|||
// returns true if IPV6 is enabled in the pod
|
||||
isIPV6Enabled bool
|
||||
|
||||
resolver []net.IP
|
||||
// returns true if proxy protocol es enabled
|
||||
isProxyProtocolEnabled bool
|
||||
|
||||
proxy *proxy
|
||||
}
|
||||
|
||||
// Start start a new NGINX master process running in foreground.
|
||||
|
@ -306,7 +344,7 @@ func (n NGINXController) testTemplate(cfg []byte) error {
|
|||
return err
|
||||
}
|
||||
out, err := exec.Command(n.binary, "-t", "-c", tmpfile.Name()).CombinedOutput()
|
||||
if err != nil {
|
||||
if err != nil && err.Error() != errNoChild {
|
||||
// this error is different from the rest because it must be clear why nginx is not working
|
||||
oe := fmt.Sprintf(`
|
||||
-------------------------------------------------------------------------------
|
||||
|
@ -324,6 +362,20 @@ Error: %v
|
|||
// SetConfig sets the configured configmap
|
||||
func (n *NGINXController) SetConfig(cmap *api_v1.ConfigMap) {
|
||||
n.configmap = cmap
|
||||
|
||||
n.isProxyProtocolEnabled = false
|
||||
if cmap == nil {
|
||||
return
|
||||
}
|
||||
|
||||
val, ok := cmap.Data["use-proxy-protocol"]
|
||||
if ok {
|
||||
b, err := strconv.ParseBool(val)
|
||||
if err == nil {
|
||||
n.isProxyProtocolEnabled = b
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// SetListers sets the configured store listers in the generic ingress controller
|
||||
|
@ -446,6 +498,39 @@ func (n *NGINXController) OnUpdate(ingressCfg ingress.Configuration) ([]byte, er
|
|||
return nil, err
|
||||
}
|
||||
|
||||
servers := []*server{}
|
||||
for _, pb := range ingressCfg.PassthroughBackends {
|
||||
svc := pb.Service
|
||||
if svc == nil {
|
||||
glog.Warningf("missing service for PassthroughBackends %v", pb.Backend)
|
||||
continue
|
||||
}
|
||||
port, err := strconv.Atoi(pb.Port.String())
|
||||
if err != nil {
|
||||
for _, sp := range svc.Spec.Ports {
|
||||
if sp.Name == pb.Port.String() {
|
||||
port = int(sp.Port)
|
||||
break
|
||||
}
|
||||
}
|
||||
} else {
|
||||
for _, sp := range svc.Spec.Ports {
|
||||
if sp.Port == int32(port) {
|
||||
port = int(sp.Port)
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
servers = append(servers, &server{
|
||||
Hostname: pb.Hostname,
|
||||
IP: svc.Spec.ClusterIP,
|
||||
Port: port,
|
||||
})
|
||||
}
|
||||
|
||||
n.proxy.ServerList = servers
|
||||
|
||||
return content, nil
|
||||
}
|
||||
|
||||
|
|
90
controllers/nginx/pkg/cmd/controller/tcp.go
Normal file
90
controllers/nginx/pkg/cmd/controller/tcp.go
Normal file
|
@ -0,0 +1,90 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"net"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"github.com/paultag/sniff/parser"
|
||||
)
|
||||
|
||||
type server struct {
|
||||
Hostname string
|
||||
IP string
|
||||
Port int
|
||||
}
|
||||
|
||||
type proxy struct {
|
||||
ServerList []*server
|
||||
Default *server
|
||||
}
|
||||
|
||||
func (p *proxy) Get(host string) *server {
|
||||
for _, s := range p.ServerList {
|
||||
if s.Hostname == host {
|
||||
return s
|
||||
}
|
||||
}
|
||||
|
||||
return &server{
|
||||
Hostname: "localhost",
|
||||
IP: "127.0.0.1",
|
||||
Port: 442,
|
||||
}
|
||||
}
|
||||
|
||||
func (p *proxy) Handle(conn net.Conn) {
|
||||
defer conn.Close()
|
||||
data := make([]byte, 4096)
|
||||
|
||||
length, err := conn.Read(data)
|
||||
if err != nil {
|
||||
glog.V(4).Infof("error reading the first 4k of the connection: %s", err)
|
||||
return
|
||||
}
|
||||
|
||||
var proxy *server
|
||||
hostname, err := parser.GetHostname(data[:])
|
||||
if err == nil {
|
||||
glog.V(3).Infof("parsed hostname from TLS Client Hello: %s", hostname)
|
||||
proxy = p.Get(hostname)
|
||||
if proxy == nil {
|
||||
return
|
||||
}
|
||||
} else {
|
||||
proxy = p.Default
|
||||
if proxy == nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
clientConn, err := net.Dial("tcp", fmt.Sprintf("%s:%d", proxy.IP, proxy.Port))
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
defer clientConn.Close()
|
||||
|
||||
_, err = clientConn.Write(data[:length])
|
||||
if err != nil {
|
||||
clientConn.Close()
|
||||
}
|
||||
pipe(clientConn, conn)
|
||||
}
|
||||
|
||||
func pipe(client, server net.Conn) {
|
||||
doCopy := func(s, c net.Conn, cancel chan<- bool) {
|
||||
io.Copy(s, c)
|
||||
cancel <- true
|
||||
}
|
||||
|
||||
cancel := make(chan bool, 2)
|
||||
|
||||
go doCopy(server, client, cancel)
|
||||
go doCopy(client, server, cancel)
|
||||
|
||||
select {
|
||||
case <-cancel:
|
||||
return
|
||||
}
|
||||
}
|
|
@ -50,7 +50,7 @@ const (
|
|||
|
||||
logFormatUpstream = `%v - [$proxy_add_x_forwarded_for] - $remote_user [$time_local] "$request" $status $body_bytes_sent "$http_referer" "$http_user_agent" $request_length $request_time [$proxy_upstream_name] $upstream_addr $upstream_response_length $upstream_response_time $upstream_status`
|
||||
|
||||
logFormatStream = `[$time_local] $protocol [$ssl_preread_server_name] [$stream_upstream] $status $bytes_sent $bytes_received $session_time`
|
||||
logFormatStream = `[$time_local] $protocol $status $bytes_sent $bytes_received $session_time`
|
||||
|
||||
// http://nginx.org/en/docs/http/ngx_http_ssl_module.html#ssl_buffer_size
|
||||
// Sets the size of the buffer used for sending data.
|
||||
|
|
|
@ -130,21 +130,20 @@ var (
|
|||
}
|
||||
return true
|
||||
},
|
||||
"buildLocation": buildLocation,
|
||||
"buildAuthLocation": buildAuthLocation,
|
||||
"buildAuthResponseHeaders": buildAuthResponseHeaders,
|
||||
"buildProxyPass": buildProxyPass,
|
||||
"buildRateLimitZones": buildRateLimitZones,
|
||||
"buildRateLimit": buildRateLimit,
|
||||
"buildSSLPassthroughUpstreams": buildSSLPassthroughUpstreams,
|
||||
"buildResolvers": buildResolvers,
|
||||
"isLocationAllowed": isLocationAllowed,
|
||||
"buildLogFormatUpstream": buildLogFormatUpstream,
|
||||
"contains": strings.Contains,
|
||||
"hasPrefix": strings.HasPrefix,
|
||||
"hasSuffix": strings.HasSuffix,
|
||||
"toUpper": strings.ToUpper,
|
||||
"toLower": strings.ToLower,
|
||||
"buildLocation": buildLocation,
|
||||
"buildAuthLocation": buildAuthLocation,
|
||||
"buildAuthResponseHeaders": buildAuthResponseHeaders,
|
||||
"buildProxyPass": buildProxyPass,
|
||||
"buildRateLimitZones": buildRateLimitZones,
|
||||
"buildRateLimit": buildRateLimit,
|
||||
"buildResolvers": buildResolvers,
|
||||
"isLocationAllowed": isLocationAllowed,
|
||||
"buildLogFormatUpstream": buildLogFormatUpstream,
|
||||
"contains": strings.Contains,
|
||||
"hasPrefix": strings.HasPrefix,
|
||||
"hasSuffix": strings.HasSuffix,
|
||||
"toUpper": strings.ToUpper,
|
||||
"toLower": strings.ToLower,
|
||||
}
|
||||
)
|
||||
|
||||
|
@ -169,34 +168,6 @@ func buildResolvers(a interface{}) string {
|
|||
return strings.Join(r, " ")
|
||||
}
|
||||
|
||||
func buildSSLPassthroughUpstreams(b interface{}, sslb interface{}) string {
|
||||
backends := b.([]*ingress.Backend)
|
||||
sslBackends := sslb.([]*ingress.SSLPassthroughBackend)
|
||||
buf := bytes.NewBuffer(make([]byte, 0, 10))
|
||||
|
||||
// multiple services can use the same upstream.
|
||||
// avoid duplications using a map[name]=true
|
||||
u := make(map[string]bool)
|
||||
for _, passthrough := range sslBackends {
|
||||
if u[passthrough.Backend] {
|
||||
continue
|
||||
}
|
||||
u[passthrough.Backend] = true
|
||||
fmt.Fprintf(buf, "upstream %v {\n", passthrough.Backend)
|
||||
for _, backend := range backends {
|
||||
if backend.Name == passthrough.Backend {
|
||||
for _, server := range backend.Endpoints {
|
||||
fmt.Fprintf(buf, "\t\tserver %v:%v;\n", server.Address, server.Port)
|
||||
}
|
||||
break
|
||||
}
|
||||
}
|
||||
fmt.Fprint(buf, "\t}\n\n")
|
||||
}
|
||||
|
||||
return buf.String()
|
||||
}
|
||||
|
||||
// buildLocation produces the location string, if the ingress has redirects
|
||||
// (specified through the ingress.kubernetes.io/rewrite-to annotation)
|
||||
func buildLocation(input interface{}) string {
|
||||
|
@ -283,7 +254,7 @@ func buildProxyPass(b interface{}, loc interface{}) string {
|
|||
|
||||
for _, backend := range backends {
|
||||
if backend.Name == location.Backend {
|
||||
if backend.Secure {
|
||||
if backend.Secure || backend.SSLPassthrough {
|
||||
proto = "https"
|
||||
}
|
||||
break
|
||||
|
|
|
@ -3,7 +3,6 @@
|
|||
{{ $healthzURI := .HealthzURI }}
|
||||
{{ $backends := .Backends }}
|
||||
{{ $proxyHeaders := .ProxySetHeaders }}
|
||||
{{ $passthroughBackends := .PassthroughBackends }}
|
||||
daemon off;
|
||||
|
||||
worker_processes {{ $cfg.WorkerProcesses }};
|
||||
|
@ -222,10 +221,10 @@ http {
|
|||
listen 80{{ if $cfg.UseProxyProtocol }} proxy_protocol{{ end }}{{ if eq $server.Hostname "_"}} default_server reuseport backlog={{ $backlogSize }}{{end}};
|
||||
{{ if $IsIPV6Enabled }}listen [::]:80{{ if $cfg.UseProxyProtocol }} proxy_protocol{{ end }}{{ if eq $server.Hostname "_"}} default_server reuseport backlog={{ $backlogSize }}{{ end }};{{ end }}
|
||||
|
||||
{{/* Listen on 442 because port 443 is used in the stream section */}}
|
||||
{{/* Listen on 442 because port 443 is used in the TLS sni server */}}
|
||||
{{/* This listen on port 442 cannot contains proxy_protocol directive because port 443 is in charge of decoding the protocol */}}
|
||||
{{ if not (empty $server.SSLCertificate) }}listen {{ if gt (len $passthroughBackends) 0 }}442{{ else }}443 {{ if $cfg.UseProxyProtocol }} proxy_protocol {{ end }}{{ end }} {{ if eq $server.Hostname "_"}} default_server reuseport backlog={{ $backlogSize }}{{end}} ssl {{ if $cfg.UseHTTP2 }}http2{{ end }};
|
||||
{{ if $IsIPV6Enabled }}{{ if not (empty $server.SSLCertificate) }}listen {{ if gt (len $passthroughBackends) 0 }}[::]:442{{ else }}[::]:443 {{ end }}{{ if $cfg.UseProxyProtocol }} proxy_protocol {{ end }}{{ end }} {{ if eq $server.Hostname "_"}} default_server reuseport backlog={{ $backlogSize }}{{end}} ssl {{ if $cfg.UseHTTP2 }}http2{{ end }};{{ end }}
|
||||
{{ if not (empty $server.SSLCertificate) }}listen 442{{ if eq $server.Hostname "_"}} default_server reuseport backlog={{ $backlogSize }}{{end}} ssl {{ if $cfg.UseHTTP2 }}http2{{ end }};
|
||||
{{ if $IsIPV6Enabled }}{{ if not (empty $server.SSLCertificate) }}listen [::]:442{{ end }} {{ if eq $server.Hostname "_"}} default_server reuseport backlog={{ $backlogSize }}{{end}} ssl {{ if $cfg.UseHTTP2 }}http2{{ end }};{{ end }}
|
||||
{{/* comment PEM sha is required to detect changes in the generated configuration and force a reload */}}
|
||||
# PEM sha: {{ $server.SSLPemChecksum }}
|
||||
ssl_certificate {{ $server.SSLCertificate }};
|
||||
|
@ -476,17 +475,6 @@ http {
|
|||
}
|
||||
|
||||
stream {
|
||||
{{ if gt (len $passthroughBackends) 0 }}
|
||||
# map FQDN that requires SSL passthrough
|
||||
map $ssl_preread_server_name $stream_upstream {
|
||||
hostnames;
|
||||
{{ range $i, $passthrough := .PassthroughBackends }}
|
||||
{{ $passthrough.Hostname }} {{ $passthrough.Backend }};
|
||||
{{ end }}
|
||||
# send SSL traffic to this nginx in a different port
|
||||
default nginx-ssl-backend;
|
||||
}
|
||||
|
||||
log_format log_stream {{ $cfg.LogFormatStream }};
|
||||
|
||||
{{ if $cfg.DisableAccessLog }}
|
||||
|
@ -497,21 +485,6 @@ stream {
|
|||
|
||||
error_log /var/log/nginx/error.log;
|
||||
|
||||
# configure default backend for SSL
|
||||
upstream nginx-ssl-backend {
|
||||
server 127.0.0.1:442;
|
||||
}
|
||||
|
||||
{{ buildSSLPassthroughUpstreams $backends .PassthroughBackends }}
|
||||
|
||||
server {
|
||||
listen 443 {{ if $cfg.UseProxyProtocol }} proxy_protocol{{ end }};
|
||||
{{ if $IsIPV6Enabled }}listen [::]:443 {{ if $cfg.UseProxyProtocol }} proxy_protocol{{ end }};{{ end }}
|
||||
proxy_pass $stream_upstream;
|
||||
ssl_preread on;
|
||||
}
|
||||
{{ end }}
|
||||
|
||||
# TCP services
|
||||
{{ range $i, $tcpServer := .TCPBackends }}
|
||||
upstream tcp-{{ $tcpServer.Backend.Namespace }}-{{ $tcpServer.Backend.Name }}-{{ $tcpServer.Backend.Port }} {
|
||||
|
|
|
@ -49,6 +49,7 @@ import (
|
|||
"k8s.io/ingress/core/pkg/ingress/defaults"
|
||||
"k8s.io/ingress/core/pkg/ingress/resolver"
|
||||
"k8s.io/ingress/core/pkg/ingress/status"
|
||||
"k8s.io/ingress/core/pkg/ingress/status/leaderelection/resourcelock"
|
||||
"k8s.io/ingress/core/pkg/ingress/store"
|
||||
"k8s.io/ingress/core/pkg/k8s"
|
||||
"k8s.io/ingress/core/pkg/net/ssl"
|
||||
|
@ -211,6 +212,11 @@ func newIngressController(config *Configuration) *GenericController {
|
|||
|
||||
eventHandler := cache.ResourceEventHandlerFuncs{
|
||||
AddFunc: func(obj interface{}) {
|
||||
ep := obj.(*api.Endpoints)
|
||||
_, found := ep.Annotations[resourcelock.LeaderElectionRecordAnnotationKey]
|
||||
if found {
|
||||
return
|
||||
}
|
||||
ic.syncQueue.Enqueue(obj)
|
||||
},
|
||||
DeleteFunc: func(obj interface{}) {
|
||||
|
@ -218,6 +224,12 @@ func newIngressController(config *Configuration) *GenericController {
|
|||
},
|
||||
UpdateFunc: func(old, cur interface{}) {
|
||||
if !reflect.DeepEqual(old, cur) {
|
||||
ep := cur.(*api.Endpoints)
|
||||
_, found := ep.Annotations[resourcelock.LeaderElectionRecordAnnotationKey]
|
||||
if found {
|
||||
return
|
||||
}
|
||||
|
||||
ic.syncQueue.Enqueue(cur)
|
||||
}
|
||||
},
|
||||
|
@ -276,7 +288,7 @@ func newIngressController(config *Configuration) *GenericController {
|
|||
|
||||
ic.nodeLister.Store, ic.nodeController = cache.NewInformer(
|
||||
cache.NewListWatchFromClient(ic.cfg.Client.Core().RESTClient(), "nodes", api.NamespaceAll, fields.Everything()),
|
||||
&api.Node{}, ic.cfg.ResyncPeriod, eventHandler)
|
||||
&api.Node{}, ic.cfg.ResyncPeriod, cache.ResourceEventHandlerFuncs{})
|
||||
|
||||
if config.UpdateStatus {
|
||||
ic.syncStatus = status.NewStatusSyncer(status.Config{
|
||||
|
@ -381,6 +393,8 @@ func (ic *GenericController) syncIngress(key interface{}) error {
|
|||
passUpstreams = append(passUpstreams, &ingress.SSLPassthroughBackend{
|
||||
Backend: loc.Backend,
|
||||
Hostname: server.Hostname,
|
||||
Service: loc.Service,
|
||||
Port: loc.Port,
|
||||
})
|
||||
break
|
||||
}
|
||||
|
@ -620,6 +634,8 @@ func (ic *GenericController) getBackendServers() ([]*ingress.Backend, []*ingress
|
|||
loc.Backend = ups.Name
|
||||
loc.IsDefBackend = false
|
||||
loc.Backend = ups.Name
|
||||
loc.Port = ups.Port
|
||||
loc.Service = ups.Service
|
||||
mergeLocationAnnotations(loc, anns)
|
||||
break
|
||||
}
|
||||
|
@ -631,6 +647,8 @@ func (ic *GenericController) getBackendServers() ([]*ingress.Backend, []*ingress
|
|||
Path: nginxPath,
|
||||
Backend: ups.Name,
|
||||
IsDefBackend: false,
|
||||
Service: ups.Service,
|
||||
Port: ups.Port,
|
||||
}
|
||||
mergeLocationAnnotations(loc, anns)
|
||||
server.Locations = append(server.Locations, loc)
|
||||
|
@ -641,7 +659,6 @@ func (ic *GenericController) getBackendServers() ([]*ingress.Backend, []*ingress
|
|||
|
||||
// Configure Backends[].SSLPassthrough
|
||||
for _, upstream := range upstreams {
|
||||
isHTTP := false
|
||||
isHTTPSfrom := []*ingress.Server{}
|
||||
for _, server := range servers {
|
||||
for _, location := range server.Locations {
|
||||
|
@ -650,26 +667,18 @@ func (ic *GenericController) getBackendServers() ([]*ingress.Backend, []*ingress
|
|||
if location.Path == rootLocation {
|
||||
if location.Backend == defUpstreamName {
|
||||
glog.Warningf("ignoring ssl passthrough of %v as it doesn't have a default backend (root context)", server.Hostname)
|
||||
} else {
|
||||
isHTTPSfrom = append(isHTTPSfrom, server)
|
||||
continue
|
||||
}
|
||||
|
||||
isHTTPSfrom = append(isHTTPSfrom, server)
|
||||
}
|
||||
} else {
|
||||
isHTTP = true
|
||||
continue
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
if len(isHTTPSfrom) > 0 {
|
||||
if isHTTP {
|
||||
for _, server := range isHTTPSfrom {
|
||||
glog.Warningf("backend type mismatch on %v, assuming HTTP on ssl passthrough host %v", upstream.Name, server.Hostname)
|
||||
// removing this server from the PassthroughBackends slice
|
||||
server.SSLPassthrough = false
|
||||
}
|
||||
} else {
|
||||
upstream.SSLPassthrough = true
|
||||
}
|
||||
upstream.SSLPassthrough = true
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -761,36 +770,43 @@ func (ic *GenericController) createUpstreams(data []interface{}) map[string]*ing
|
|||
path.Backend.ServiceName,
|
||||
path.Backend.ServicePort.String())
|
||||
|
||||
upstream, ok := upstreams[name]
|
||||
isNewUpstream := !ok
|
||||
|
||||
if isNewUpstream {
|
||||
glog.V(3).Infof("creating upstream %v", name)
|
||||
upstream = newUpstream(name)
|
||||
upstreams[name] = upstream
|
||||
if _, ok := upstreams[name]; ok {
|
||||
continue
|
||||
}
|
||||
|
||||
if !upstream.Secure {
|
||||
upstream.Secure = secUpstream
|
||||
glog.V(3).Infof("creating upstream %v", name)
|
||||
upstreams[name] = newUpstream(name)
|
||||
if !upstreams[name].Secure {
|
||||
upstreams[name].Secure = secUpstream
|
||||
}
|
||||
|
||||
if upstream.SessionAffinity.AffinityType == "" {
|
||||
upstream.SessionAffinity.AffinityType = affinity.AffinityType
|
||||
if upstreams[name].SessionAffinity.AffinityType == "" {
|
||||
upstreams[name].SessionAffinity.AffinityType = affinity.AffinityType
|
||||
if affinity.AffinityType == "cookie" {
|
||||
upstream.SessionAffinity.CookieSessionAffinity.Name = affinity.CookieConfig.Name
|
||||
upstream.SessionAffinity.CookieSessionAffinity.Hash = affinity.CookieConfig.Hash
|
||||
upstreams[name].SessionAffinity.CookieSessionAffinity.Name = affinity.CookieConfig.Name
|
||||
upstreams[name].SessionAffinity.CookieSessionAffinity.Hash = affinity.CookieConfig.Hash
|
||||
}
|
||||
}
|
||||
|
||||
if isNewUpstream {
|
||||
svcKey := fmt.Sprintf("%v/%v", ing.GetNamespace(), path.Backend.ServiceName)
|
||||
endp, err := ic.serviceEndpoints(svcKey, path.Backend.ServicePort.String(), hz)
|
||||
if err != nil {
|
||||
glog.Warningf("error obtaining service endpoints: %v", err)
|
||||
continue
|
||||
}
|
||||
upstream.Endpoints = endp
|
||||
svcKey := fmt.Sprintf("%v/%v", ing.GetNamespace(), path.Backend.ServiceName)
|
||||
endp, err := ic.serviceEndpoints(svcKey, path.Backend.ServicePort.String(), hz)
|
||||
if err != nil {
|
||||
glog.Warningf("error obtaining service endpoints: %v", err)
|
||||
continue
|
||||
}
|
||||
upstreams[name].Endpoints = endp
|
||||
|
||||
s, exists, err := ic.svcLister.Store.GetByKey(svcKey)
|
||||
if err != nil {
|
||||
glog.Warningf("error obtaining service: %v", err)
|
||||
continue
|
||||
}
|
||||
|
||||
if exists {
|
||||
upstreams[name].Service = s.(*api.Service)
|
||||
} else {
|
||||
glog.Warningf("service %v does not exists", svcKey)
|
||||
}
|
||||
upstreams[name].Port = path.Backend.ServicePort
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -926,6 +942,7 @@ func (ic *GenericController) createServers(data []interface{},
|
|||
// server already configured
|
||||
continue
|
||||
}
|
||||
|
||||
servers[host] = &ingress.Server{
|
||||
Hostname: host,
|
||||
Locations: []*ingress.Location{
|
||||
|
@ -1132,7 +1149,7 @@ func (ic GenericController) Start() {
|
|||
go ic.secrController.Run(ic.stopCh)
|
||||
go ic.mapController.Run(ic.stopCh)
|
||||
|
||||
go ic.syncQueue.Run(5*time.Second, ic.stopCh)
|
||||
go ic.syncQueue.Run(10*time.Second, ic.stopCh)
|
||||
|
||||
go wait.Forever(ic.syncSecret, 10*time.Second)
|
||||
|
||||
|
|
|
@ -295,7 +295,7 @@ func (s *statusSync) updateStatus(newIPs []api_v1.LoadBalancerIngress) {
|
|||
curIPs := currIng.Status.LoadBalancer.Ingress
|
||||
sort.Sort(loadBalancerIngressByIP(curIPs))
|
||||
if ingressSliceEqual(newIPs, curIPs) {
|
||||
glog.V(3).Infof("skipping update of Ingress %v/%v (there is no change)", currIng.Namespace, currIng.Name)
|
||||
glog.V(3).Infof("skipping update of Ingress %v/%v (no change)", currIng.Namespace, currIng.Name)
|
||||
return
|
||||
}
|
||||
|
||||
|
|
|
@ -147,7 +147,9 @@ type Configuration struct {
|
|||
// Backend describes one or more remote server/s (endpoints) associated with a service
|
||||
type Backend struct {
|
||||
// Name represents an unique api.Service name formatted as <namespace>-<name>-<port>
|
||||
Name string `json:"name"`
|
||||
Name string `json:"name"`
|
||||
Service *api.Service `json:"service"`
|
||||
Port intstr.IntOrString `json:"port"`
|
||||
// This indicates if the communication protocol between the backend and the endpoint is HTTP or HTTPS
|
||||
// Allowing the use of HTTPS
|
||||
// The endpoint/s must provide a TLS connection.
|
||||
|
@ -158,8 +160,7 @@ type Backend struct {
|
|||
SSLPassthrough bool `json:"sslPassthrough"`
|
||||
// Endpoints contains the list of endpoints currently running
|
||||
Endpoints []Endpoint `json:"endpoints"`
|
||||
// StickySession contains the StickyConfig object with stickness configuration
|
||||
|
||||
// StickySessionAffinitySession contains the StickyConfig object with stickness configuration
|
||||
SessionAffinity SessionAffinityConfig
|
||||
}
|
||||
|
||||
|
@ -244,6 +245,9 @@ type Location struct {
|
|||
IsDefBackend bool `json:"isDefBackend"`
|
||||
// Backend describes the name of the backend to use.
|
||||
Backend string `json:"backend"`
|
||||
|
||||
Service *api.Service `json:"service"`
|
||||
Port intstr.IntOrString `json:"port"`
|
||||
// BasicDigestAuth returns authentication configuration for
|
||||
// an Ingress rule.
|
||||
// +optional
|
||||
|
@ -291,6 +295,8 @@ type Location struct {
|
|||
// The endpoints must provide the TLS termination exposing the required SSL certificate.
|
||||
// The ingress controller only pipes the underlying TCP connection
|
||||
type SSLPassthroughBackend struct {
|
||||
Service *api.Service `json:"service"`
|
||||
Port intstr.IntOrString `json:"port"`
|
||||
// Backend describes the endpoints to use.
|
||||
Backend string `json:"namespace,omitempty"`
|
||||
// Hostname returns the FQDN of the server
|
||||
|
|
2
vendor/github.com/armon/go-proxyproto/.gitignore
generated
vendored
Normal file
2
vendor/github.com/armon/go-proxyproto/.gitignore
generated
vendored
Normal file
|
@ -0,0 +1,2 @@
|
|||
*.test
|
||||
*~
|
21
vendor/github.com/armon/go-proxyproto/LICENSE
generated
vendored
Normal file
21
vendor/github.com/armon/go-proxyproto/LICENSE
generated
vendored
Normal file
|
@ -0,0 +1,21 @@
|
|||
The MIT License (MIT)
|
||||
|
||||
Copyright (c) 2014 Armon Dadgar
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
SOFTWARE.
|
36
vendor/github.com/armon/go-proxyproto/README.md
generated
vendored
Normal file
36
vendor/github.com/armon/go-proxyproto/README.md
generated
vendored
Normal file
|
@ -0,0 +1,36 @@
|
|||
# proxyproto
|
||||
|
||||
This library provides the `proxyproto` package which can be used for servers
|
||||
listening behind HAProxy of Amazon ELB load balancers. Those load balancers
|
||||
support the use of a proxy protocol (http://haproxy.1wt.eu/download/1.5/doc/proxy-protocol.txt),
|
||||
which provides a simple mechansim for the server to get the address of the client
|
||||
instead of the load balancer.
|
||||
|
||||
This library provides both a net.Listener and net.Conn implementation that
|
||||
can be used to handle situation in which you may be using the proxy protocol.
|
||||
Only proxy protocol version 1, the human-readable form, is understood.
|
||||
|
||||
The only caveat is that we check for the "PROXY " prefix to determine if the protocol
|
||||
is being used. If that string may occur as part of your input, then it is ambiguous
|
||||
if the protocol is being used and you may have problems.
|
||||
|
||||
# Documentation
|
||||
|
||||
Full documentation can be found [here](http://godoc.org/github.com/armon/go-proxyproto).
|
||||
|
||||
# Examples
|
||||
|
||||
Using the library is very simple:
|
||||
|
||||
```
|
||||
|
||||
// Create a listener
|
||||
list, err := net.Listen("tcp", "...")
|
||||
|
||||
// Wrap listener in a proxyproto listener
|
||||
proxyList := &proxyproto.Listener{Listener: list}
|
||||
conn, err :=proxyList.Accept()
|
||||
|
||||
...
|
||||
```
|
||||
|
213
vendor/github.com/armon/go-proxyproto/protocol.go
generated
vendored
Normal file
213
vendor/github.com/armon/go-proxyproto/protocol.go
generated
vendored
Normal file
|
@ -0,0 +1,213 @@
|
|||
package proxyproto
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"fmt"
|
||||
"io"
|
||||
"log"
|
||||
"net"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
var (
|
||||
// prefix is the string we look for at the start of a connection
|
||||
// to check if this connection is using the proxy protocol
|
||||
prefix = []byte("PROXY ")
|
||||
prefixLen = len(prefix)
|
||||
)
|
||||
|
||||
// Listener is used to wrap an underlying listener,
|
||||
// whose connections may be using the HAProxy Proxy Protocol (version 1).
|
||||
// If the connection is using the protocol, the RemoteAddr() will return
|
||||
// the correct client address.
|
||||
//
|
||||
// Optionally define ProxyHeaderTimeout to set a maximum time to
|
||||
// receive the Proxy Protocol Header. Zero means no timeout.
|
||||
type Listener struct {
|
||||
Listener net.Listener
|
||||
ProxyHeaderTimeout time.Duration
|
||||
}
|
||||
|
||||
// Conn is used to wrap and underlying connection which
|
||||
// may be speaking the Proxy Protocol. If it is, the RemoteAddr() will
|
||||
// return the address of the client instead of the proxy address.
|
||||
type Conn struct {
|
||||
bufReader *bufio.Reader
|
||||
conn net.Conn
|
||||
dstAddr *net.TCPAddr
|
||||
srcAddr *net.TCPAddr
|
||||
once sync.Once
|
||||
proxyHeaderTimeout time.Duration
|
||||
}
|
||||
|
||||
// Accept waits for and returns the next connection to the listener.
|
||||
func (p *Listener) Accept() (net.Conn, error) {
|
||||
// Get the underlying connection
|
||||
conn, err := p.Listener.Accept()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return NewConn(conn, p.ProxyHeaderTimeout), nil
|
||||
}
|
||||
|
||||
// Close closes the underlying listener.
|
||||
func (p *Listener) Close() error {
|
||||
return p.Listener.Close()
|
||||
}
|
||||
|
||||
// Addr returns the underlying listener's network address.
|
||||
func (p *Listener) Addr() net.Addr {
|
||||
return p.Listener.Addr()
|
||||
}
|
||||
|
||||
// NewConn is used to wrap a net.Conn that may be speaking
|
||||
// the proxy protocol into a proxyproto.Conn
|
||||
func NewConn(conn net.Conn, timeout time.Duration) *Conn {
|
||||
pConn := &Conn{
|
||||
bufReader: bufio.NewReader(conn),
|
||||
conn: conn,
|
||||
proxyHeaderTimeout: timeout,
|
||||
}
|
||||
return pConn
|
||||
}
|
||||
|
||||
// Read is check for the proxy protocol header when doing
|
||||
// the initial scan. If there is an error parsing the header,
|
||||
// it is returned and the socket is closed.
|
||||
func (p *Conn) Read(b []byte) (int, error) {
|
||||
var err error
|
||||
p.once.Do(func() { err = p.checkPrefix() })
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return p.bufReader.Read(b)
|
||||
}
|
||||
|
||||
func (p *Conn) Write(b []byte) (int, error) {
|
||||
return p.conn.Write(b)
|
||||
}
|
||||
|
||||
func (p *Conn) Close() error {
|
||||
return p.conn.Close()
|
||||
}
|
||||
|
||||
func (p *Conn) LocalAddr() net.Addr {
|
||||
return p.conn.LocalAddr()
|
||||
}
|
||||
|
||||
// RemoteAddr returns the address of the client if the proxy
|
||||
// protocol is being used, otherwise just returns the address of
|
||||
// the socket peer. If there is an error parsing the header, the
|
||||
// address of the client is not returned, and the socket is closed.
|
||||
// Once implication of this is that the call could block if the
|
||||
// client is slow. Using a Deadline is recommended if this is called
|
||||
// before Read()
|
||||
func (p *Conn) RemoteAddr() net.Addr {
|
||||
p.once.Do(func() {
|
||||
if err := p.checkPrefix(); err != nil && err != io.EOF {
|
||||
log.Printf("[ERR] Failed to read proxy prefix: %v", err)
|
||||
p.Close()
|
||||
p.bufReader = bufio.NewReader(p.conn)
|
||||
}
|
||||
})
|
||||
if p.srcAddr != nil {
|
||||
return p.srcAddr
|
||||
}
|
||||
return p.conn.RemoteAddr()
|
||||
}
|
||||
|
||||
func (p *Conn) SetDeadline(t time.Time) error {
|
||||
return p.conn.SetDeadline(t)
|
||||
}
|
||||
|
||||
func (p *Conn) SetReadDeadline(t time.Time) error {
|
||||
return p.conn.SetReadDeadline(t)
|
||||
}
|
||||
|
||||
func (p *Conn) SetWriteDeadline(t time.Time) error {
|
||||
return p.conn.SetWriteDeadline(t)
|
||||
}
|
||||
|
||||
func (p *Conn) checkPrefix() error {
|
||||
if p.proxyHeaderTimeout != 0 {
|
||||
readDeadLine := time.Now().Add(p.proxyHeaderTimeout)
|
||||
p.conn.SetReadDeadline(readDeadLine)
|
||||
defer p.conn.SetReadDeadline(time.Time{})
|
||||
}
|
||||
|
||||
// Incrementally check each byte of the prefix
|
||||
for i := 1; i <= prefixLen; i++ {
|
||||
inp, err := p.bufReader.Peek(i)
|
||||
|
||||
if err != nil {
|
||||
if neterr, ok := err.(net.Error); ok && neterr.Timeout() {
|
||||
return nil
|
||||
} else {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// Check for a prefix mis-match, quit early
|
||||
if !bytes.Equal(inp, prefix[:i]) {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// Read the header line
|
||||
header, err := p.bufReader.ReadString('\n')
|
||||
if err != nil {
|
||||
p.conn.Close()
|
||||
return err
|
||||
}
|
||||
|
||||
// Strip the carriage return and new line
|
||||
header = header[:len(header)-2]
|
||||
|
||||
// Split on spaces, should be (PROXY <type> <src addr> <dst addr> <src port> <dst port>)
|
||||
parts := strings.Split(header, " ")
|
||||
if len(parts) != 6 {
|
||||
p.conn.Close()
|
||||
return fmt.Errorf("Invalid header line: %s", header)
|
||||
}
|
||||
|
||||
// Verify the type is known
|
||||
switch parts[1] {
|
||||
case "TCP4":
|
||||
case "TCP6":
|
||||
default:
|
||||
p.conn.Close()
|
||||
return fmt.Errorf("Unhandled address type: %s", parts[1])
|
||||
}
|
||||
|
||||
// Parse out the source address
|
||||
ip := net.ParseIP(parts[2])
|
||||
if ip == nil {
|
||||
p.conn.Close()
|
||||
return fmt.Errorf("Invalid source ip: %s", parts[2])
|
||||
}
|
||||
port, err := strconv.Atoi(parts[4])
|
||||
if err != nil {
|
||||
p.conn.Close()
|
||||
return fmt.Errorf("Invalid source port: %s", parts[4])
|
||||
}
|
||||
p.srcAddr = &net.TCPAddr{IP: ip, Port: port}
|
||||
|
||||
// Parse out the destination address
|
||||
ip = net.ParseIP(parts[3])
|
||||
if ip == nil {
|
||||
p.conn.Close()
|
||||
return fmt.Errorf("Invalid destination ip: %s", parts[3])
|
||||
}
|
||||
port, err = strconv.Atoi(parts[5])
|
||||
if err != nil {
|
||||
p.conn.Close()
|
||||
return fmt.Errorf("Invalid destination port: %s", parts[5])
|
||||
}
|
||||
p.dstAddr = &net.TCPAddr{IP: ip, Port: port}
|
||||
|
||||
return nil
|
||||
}
|
17
vendor/github.com/paultag/sniff/LICENSE
generated
vendored
Normal file
17
vendor/github.com/paultag/sniff/LICENSE
generated
vendored
Normal file
|
@ -0,0 +1,17 @@
|
|||
Permission is hereby granted, free of charge, to any person obtaining a
|
||||
copy of this software and associated documentation files (the "Software"),
|
||||
to deal in the Software without restriction, including without limitation
|
||||
the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
and/or sell copies of the Software, and to permit persons to whom the
|
||||
Software is furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in
|
||||
all copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
||||
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
|
||||
DEALINGS IN THE SOFTWARE.
|
147
vendor/github.com/paultag/sniff/parser/parser.go
generated
vendored
Normal file
147
vendor/github.com/paultag/sniff/parser/parser.go
generated
vendored
Normal file
|
@ -0,0 +1,147 @@
|
|||
/* {{{ Copyright (c) Paul R. Tagliamonte <paultag@debian.org>, 2015
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
* of this software and associated documentation files (the "Software"), to deal
|
||||
* in the Software without restriction, including without limitation the rights
|
||||
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
* copies of the Software, and to permit persons to whom the Software is
|
||||
* furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||
* THE SOFTWARE. }}} */
|
||||
|
||||
package parser
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
)
|
||||
|
||||
var TLSHeaderLength = 5
|
||||
|
||||
/* This function is basically all most folks want to invoke out of this
|
||||
* jumble of bits. This will take an incoming TLS Client Hello (including
|
||||
* all the fuzzy bits at the beginning of it - fresh out of the socket) and
|
||||
* go ahead and give us the SNI Name they want. */
|
||||
func GetHostname(data []byte) (string, error) {
|
||||
if len(data) == 0 || data[0] != 0x16 {
|
||||
return "", fmt.Errorf("Doesn't look like a TLS Client Hello")
|
||||
}
|
||||
|
||||
extensions, err := GetExtensionBlock(data)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
sn, err := GetSNBlock(extensions)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
sni, err := GetSNIBlock(sn)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return string(sni), nil
|
||||
}
|
||||
|
||||
/* Given a Server Name TLS Extension block, parse out and return the SNI
|
||||
* (Server Name Indication) payload */
|
||||
func GetSNIBlock(data []byte) ([]byte, error) {
|
||||
index := 0
|
||||
|
||||
for {
|
||||
if index >= len(data) {
|
||||
break
|
||||
}
|
||||
length := int((data[index] << 8) + data[index+1])
|
||||
endIndex := index + 2 + length
|
||||
if data[index+2] == 0x00 { /* SNI */
|
||||
sni := data[index+3:]
|
||||
sniLength := int((sni[0] << 8) + sni[1])
|
||||
return sni[2 : sniLength+2], nil
|
||||
}
|
||||
index = endIndex
|
||||
}
|
||||
return []byte{}, fmt.Errorf(
|
||||
"Finished parsing the SN block without finding an SNI",
|
||||
)
|
||||
}
|
||||
|
||||
/* Given a TLS Extensions data block, go ahead and find the SN block */
|
||||
func GetSNBlock(data []byte) ([]byte, error) {
|
||||
index := 0
|
||||
|
||||
if len(data) < 2 {
|
||||
return []byte{}, fmt.Errorf("Not enough bytes to be an SN block")
|
||||
}
|
||||
|
||||
extensionLength := int((data[index] << 8) + data[index+1])
|
||||
data = data[2 : extensionLength+2]
|
||||
|
||||
for {
|
||||
if index >= len(data) {
|
||||
break
|
||||
}
|
||||
length := int((data[index+2] << 8) + data[index+3])
|
||||
endIndex := index + 4 + length
|
||||
if data[index] == 0x00 && data[index+1] == 0x00 {
|
||||
return data[index+4 : endIndex], nil
|
||||
}
|
||||
|
||||
index = endIndex
|
||||
}
|
||||
|
||||
return []byte{}, fmt.Errorf(
|
||||
"Finished parsing the Extension block without finding an SN block",
|
||||
)
|
||||
}
|
||||
|
||||
/* Given a raw TLS Client Hello, go ahead and find all the Extensions */
|
||||
func GetExtensionBlock(data []byte) ([]byte, error) {
|
||||
/* data[0] - content type
|
||||
* data[1], data[2] - major/minor version
|
||||
* data[3], data[4] - total length
|
||||
* data[...38+5] - start of SessionID (length bit)
|
||||
* data[38+5] - length of SessionID
|
||||
*/
|
||||
var index = TLSHeaderLength + 38
|
||||
|
||||
if len(data) <= index+1 {
|
||||
return []byte{}, fmt.Errorf("Not enough bits to be a Client Hello")
|
||||
}
|
||||
|
||||
/* Index is at SessionID Length bit */
|
||||
if newIndex := index + 1 + int(data[index]); (newIndex + 2) < len(data) {
|
||||
index = newIndex
|
||||
} else {
|
||||
return []byte{}, fmt.Errorf("Not enough bytes for the SessionID")
|
||||
}
|
||||
|
||||
/* Index is at Cipher List Length bits */
|
||||
if newIndex := (index + 2 + int((data[index]<<8)+data[index+1])); (newIndex + 1) < len(data) {
|
||||
index = newIndex
|
||||
} else {
|
||||
return []byte{}, fmt.Errorf("Not enough bytes for the Cipher List")
|
||||
}
|
||||
|
||||
/* Index is now at the compression length bit */
|
||||
if newIndex := index + 1 + int(data[index]); newIndex < len(data) {
|
||||
index = newIndex
|
||||
} else {
|
||||
return []byte{}, fmt.Errorf("Not enough bytes for the compression length")
|
||||
}
|
||||
|
||||
/* Now we're at the Extension start */
|
||||
if len(data[index:]) == 0 {
|
||||
return nil, fmt.Errorf("No extensions")
|
||||
}
|
||||
return data[index:], nil
|
||||
}
|
||||
|
||||
// vim: foldmethod=marker
|
Loading…
Reference in a new issue