A really crappy SNI implementation
I've developed a poor and embarrassing form of SNI support into the ingress controller based on the existing TCP "support". This probably only works with nginx. I don't think anyone should attempt to merge this code anywhere, however, functionally this is the behavior that I wanted.
This commit is contained in:
parent
2c3b29c0b7
commit
cbb85a0234
7 changed files with 113 additions and 29 deletions
|
@ -26,7 +26,7 @@ ARCH ?= $(shell go env GOARCH)
|
|||
GOARCH = ${ARCH}
|
||||
DUMB_ARCH = ${ARCH}
|
||||
|
||||
ALL_ARCH = amd64 arm arm64 ppc64le
|
||||
ALL_ARCH ?= amd64 arm arm64 ppc64le
|
||||
|
||||
QEMUVERSION=v2.9.1
|
||||
|
||||
|
|
|
@ -649,6 +649,7 @@ func (n *NGINXController) OnUpdate(ingressCfg ingress.Configuration) error {
|
|||
PassthroughBackends: ingressCfg.PassthroughBackends,
|
||||
Servers: ingressCfg.Servers,
|
||||
TCPBackends: ingressCfg.TCPEndpoints,
|
||||
SNIBackends: ingressCfg.SNIEndpoints,
|
||||
UDPBackends: ingressCfg.UDPEndpoints,
|
||||
HealthzURI: ngxHealthPath,
|
||||
CustomErrors: len(cfg.CustomHTTPErrors) > 0,
|
||||
|
|
|
@ -478,6 +478,7 @@ type TemplateConfig struct {
|
|||
PassthroughBackends []*ingress.SSLPassthroughBackend
|
||||
Servers []*ingress.Server
|
||||
TCPBackends []ingress.L4Service
|
||||
SNIBackends []ingress.L4Service
|
||||
UDPBackends []ingress.L4Service
|
||||
HealthzURI string
|
||||
CustomErrors bool
|
||||
|
|
|
@ -136,6 +136,8 @@ type Configuration struct {
|
|||
// optional
|
||||
TCPConfigMapName string
|
||||
// optional
|
||||
SNIConfigMapName string
|
||||
// optional
|
||||
UDPConfigMapName string
|
||||
DefaultSSLCertificate string
|
||||
DefaultHealthzURL string
|
||||
|
@ -293,7 +295,7 @@ func newIngressController(config *Configuration) *GenericController {
|
|||
ic.forceReload = true
|
||||
}
|
||||
// updates to configuration configmaps can trigger an update
|
||||
if mapKey == ic.cfg.ConfigMapName || mapKey == ic.cfg.TCPConfigMapName || mapKey == ic.cfg.UDPConfigMapName {
|
||||
if mapKey == ic.cfg.ConfigMapName || mapKey == ic.cfg.TCPConfigMapName || mapKey == ic.cfg.SNIConfigMapName || mapKey == ic.cfg.UDPConfigMapName {
|
||||
ic.recorder.Eventf(upCmap, api.EventTypeNormal, "UPDATE", fmt.Sprintf("ConfigMap %v", mapKey))
|
||||
ic.syncQueue.Enqueue(cur)
|
||||
}
|
||||
|
@ -465,6 +467,7 @@ func (ic *GenericController) syncIngress(key interface{}) error {
|
|||
Backends: upstreams,
|
||||
Servers: servers,
|
||||
TCPEndpoints: ic.getStreamServices(ic.cfg.TCPConfigMapName, api.ProtocolTCP),
|
||||
SNIEndpoints: ic.getStreamServices(ic.cfg.SNIConfigMapName, api.ProtocolSNI),
|
||||
UDPEndpoints: ic.getStreamServices(ic.cfg.UDPConfigMapName, api.ProtocolUDP),
|
||||
PassthroughBackends: passUpstreams,
|
||||
}
|
||||
|
@ -513,21 +516,44 @@ func (ic *GenericController) getStreamServices(configmapName string, proto api.P
|
|||
}
|
||||
|
||||
var svcs []ingress.L4Service
|
||||
// k -> port to expose
|
||||
// v -> <namespace>/<service name>:<port from service to be used>
|
||||
// k -> port to expose for TCP and UDP, hostname for SNI
|
||||
// v -> <namespace>/<service name>:<port from service to be used> for TCP and UDP
|
||||
// v -> <namespace>/<service name>:<port from service to be used>!<service-upstream true/false> for SNI
|
||||
for k, v := range configmap.Data {
|
||||
externalPort, err := strconv.Atoi(k)
|
||||
if err != nil {
|
||||
glog.Warningf("%v is not valid as a TCP/UDP port", k)
|
||||
continue
|
||||
glog.V(3).Infof("Evaluating: %v => %v", k, v)
|
||||
var externalPort int
|
||||
var hostname string
|
||||
if proto == api.ProtocolSNI {
|
||||
hostname = k
|
||||
externalPort = 8443
|
||||
} else {
|
||||
externalPort, err = strconv.Atoi(k)
|
||||
if err != nil {
|
||||
glog.Warningf("%v is not valid as a TCP/UDP port", k)
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
// this ports used by the backend
|
||||
// this port is used by the backend
|
||||
if local_strings.StringInSlice(k, reservedPorts) {
|
||||
glog.Warningf("port %v cannot be used for TCP or UDP services. It is reserved for the Ingress controller", k)
|
||||
continue
|
||||
}
|
||||
|
||||
serviceUpstream := false
|
||||
if proto == api.ProtocolSNI {
|
||||
glog.V(3).Infof("proto is sni, looking for serviceUpstream info")
|
||||
tokens := strings.Split(v, "!")
|
||||
if len(tokens) == 2 {
|
||||
glog.V(3).Infof("Found the bang")
|
||||
if tokens[1] == "true" {
|
||||
glog.V(3).Infof("serviceUpstream is requested")
|
||||
serviceUpstream = true
|
||||
}
|
||||
glog.V(3).Infof("setting v to %v", tokens[0])
|
||||
v = tokens[0]
|
||||
}
|
||||
}
|
||||
nsSvcPort := strings.Split(v, ":")
|
||||
if len(nsSvcPort) < 2 {
|
||||
glog.Warningf("invalid format (namespace/name:port:[PROXY]) '%v'", k)
|
||||
|
@ -538,8 +564,8 @@ func (ic *GenericController) getStreamServices(configmapName string, proto api.P
|
|||
svcPort := nsSvcPort[1]
|
||||
useProxyProtocol := false
|
||||
|
||||
// Proxy protocol is possible if the service is TCP
|
||||
if len(nsSvcPort) == 3 && proto == api.ProtocolTCP {
|
||||
// Proxy protocol is possible if the service is TCP or SNI
|
||||
if len(nsSvcPort) == 3 && (proto == api.ProtocolTCP || proto == api.ProtocolSNI) {
|
||||
if strings.ToUpper(nsSvcPort[2]) == "PROXY" {
|
||||
useProxyProtocol = true
|
||||
}
|
||||
|
@ -565,25 +591,63 @@ func (ic *GenericController) getStreamServices(configmapName string, proto api.P
|
|||
svc := svcObj.(*api.Service)
|
||||
|
||||
var endps []ingress.Endpoint
|
||||
targetPort, err := strconv.Atoi(svcPort)
|
||||
if err != nil {
|
||||
glog.V(3).Infof("searching service %v/%v endpoints using the name '%v'", svcNs, svcName, svcPort)
|
||||
for _, sp := range svc.Spec.Ports {
|
||||
if sp.Name == svcPort {
|
||||
if sp.Protocol == proto {
|
||||
endps = ic.getEndpoints(svc, &sp, proto, &healthcheck.Upstream{})
|
||||
break
|
||||
// Add the service cluster endpoint as the upstream instead of individual endpoints
|
||||
// if the serviceUpstream annotation is enabled
|
||||
if serviceUpstream {
|
||||
svcKey := fmt.Sprintf("%v/%v", svcNs, svcName)
|
||||
svcObj, svcExists, err := ic.svcLister.Store.GetByKey(svcKey)
|
||||
|
||||
if err != nil {
|
||||
// XXX This is stupid. If the kube API server is down or
|
||||
// something we'll start tearing apart our perfectly good
|
||||
// configuration. The better option here is to noop.
|
||||
glog.Warningf("Unable to query for info about service %v, skipping", svcKey)
|
||||
continue
|
||||
}
|
||||
|
||||
if !svcExists {
|
||||
glog.Warningf("Service %v was not found, ignoring.", svcKey)
|
||||
continue
|
||||
}
|
||||
|
||||
svc := svcObj.(*api.Service)
|
||||
if svc.Spec.ClusterIP == "" {
|
||||
glog.Warningf("No ClusterIP found for service %s", svcKey)
|
||||
continue
|
||||
}
|
||||
endps = []ingress.Endpoint{ingress.Endpoint{
|
||||
Address: svc.Spec.ClusterIP,
|
||||
Port: svcPort,
|
||||
}}
|
||||
} else {
|
||||
targetPort, err := strconv.Atoi(svcPort)
|
||||
// We're going to go searching through service endpoints for the port
|
||||
// and proto we're trying to forward to. SNI, however, is not a valid
|
||||
// protocol for services. We continue our hack here by mapping SNI to
|
||||
// TCP for these purposes.
|
||||
searchProto := proto
|
||||
if searchProto == api.ProtocolSNI {
|
||||
searchProto = api.ProtocolTCP
|
||||
}
|
||||
if err != nil {
|
||||
glog.V(3).Infof("searching service %v/%v endpoints using the name '%v' and proto '%v'", svcNs, svcName, svcPort, searchProto)
|
||||
for _, sp := range svc.Spec.Ports {
|
||||
if sp.Name == svcPort {
|
||||
if sp.Protocol == searchProto {
|
||||
endps = ic.getEndpoints(svc, &sp, searchProto, &healthcheck.Upstream{})
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
} else {
|
||||
// we need to use the TargetPort (where the endpoints are running)
|
||||
glog.V(3).Infof("searching service %v/%v endpoints using the target port '%v'", svcNs, svcName, targetPort)
|
||||
for _, sp := range svc.Spec.Ports {
|
||||
if sp.Port == int32(targetPort) {
|
||||
if sp.Protocol == proto {
|
||||
endps = ic.getEndpoints(svc, &sp, proto, &healthcheck.Upstream{})
|
||||
break
|
||||
} else {
|
||||
// we need to use the TargetPort (where the endpoints are running)
|
||||
glog.V(3).Infof("searching service %v/%v endpoints using the target port '%v' and proto '%v'", svcNs, svcName, targetPort, searchProto)
|
||||
for _, sp := range svc.Spec.Ports {
|
||||
if sp.Port == int32(targetPort) {
|
||||
if sp.Protocol == searchProto {
|
||||
endps = ic.getEndpoints(svc, &sp, searchProto, &healthcheck.Upstream{})
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -604,6 +668,7 @@ func (ic *GenericController) getStreamServices(configmapName string, proto api.P
|
|||
Port: intstr.FromString(svcPort),
|
||||
Protocol: proto,
|
||||
UseProxyProtocol: useProxyProtocol,
|
||||
ServerName: hostname,
|
||||
},
|
||||
Endpoints: endps,
|
||||
})
|
||||
|
|
|
@ -60,6 +60,15 @@ func NewIngressController(backend ingress.Controller) *GenericController {
|
|||
number of the name of the port.
|
||||
The ports 80 and 443 are not allowed as external ports. This ports are reserved for the backend`)
|
||||
|
||||
sniConfigMapName = flags.String("sni-services-configmap", "",
|
||||
`Name of the ConfigMap that contains the definition of the SNI services to expose.
|
||||
The key in the map indicates the SNI name to look for in a TLS client
|
||||
hello and the external port to be used. The value is the name of the
|
||||
service with the format namespace/serviceName and the port of the
|
||||
service could be a number of the name of the port. The ports 80 and 443
|
||||
are not allowed as external ports. This ports are reserved for the
|
||||
backend`)
|
||||
|
||||
udpConfigMapName = flags.String("udp-services-configmap", "",
|
||||
`Name of the ConfigMap that contains the definition of the UDP services to expose.
|
||||
The key in the map indicates the external port to be used. The value is the name of the
|
||||
|
@ -180,6 +189,7 @@ func NewIngressController(backend ingress.Controller) *GenericController {
|
|||
Namespace: *watchNamespace,
|
||||
ConfigMapName: *configMap,
|
||||
TCPConfigMapName: *tcpConfigMapName,
|
||||
SNIConfigMapName: *sniConfigMapName,
|
||||
UDPConfigMapName: *udpConfigMapName,
|
||||
DefaultSSLCertificate: *defSSLCertificate,
|
||||
DefaultHealthzURL: *defHealthzURL,
|
||||
|
|
|
@ -140,6 +140,9 @@ type Configuration struct {
|
|||
// TCPEndpoints contain endpoints for tcp streams handled by this backend
|
||||
// +optional
|
||||
TCPEndpoints []L4Service `json:"tcpEndpoints,omitempty"`
|
||||
// SNIEndpoints contain endpoints for SNI streams handled by this backend
|
||||
// +optional
|
||||
SNIEndpoints []L4Service `json:"sniEndpoints,omitempty"`
|
||||
// UDPEndpoints contain endpoints for udp streams handled by this backend
|
||||
// +optional
|
||||
UDPEndpoints []L4Service `json:"udpEndpoints,omitempty"`
|
||||
|
@ -349,7 +352,7 @@ type L4Service struct {
|
|||
// Backend of the service
|
||||
Backend L4Backend `json:"backend"`
|
||||
// Endpoints active endpoints of the service
|
||||
Endpoints []Endpoint `json:"endpoins,omitEmpty"`
|
||||
Endpoints []Endpoint `json:"endpoints,omitEmpty"`
|
||||
}
|
||||
|
||||
// L4Backend describes the kubernetes service behind L4 Ingress service
|
||||
|
@ -360,4 +363,6 @@ type L4Backend struct {
|
|||
Protocol api.Protocol `json:"protocol"`
|
||||
// +optional
|
||||
UseProxyProtocol bool `json:"useProxyProtocol"`
|
||||
// +optional This is the name we'll route SNI requests on when proto is SNI
|
||||
ServerName string `json:"servername"`
|
||||
}
|
||||
|
|
2
vendor/k8s.io/api/core/v1/types.go
generated
vendored
2
vendor/k8s.io/api/core/v1/types.go
generated
vendored
|
@ -847,6 +847,8 @@ type Protocol string
|
|||
const (
|
||||
// ProtocolTCP is the TCP protocol.
|
||||
ProtocolTCP Protocol = "TCP"
|
||||
// ProtocolSNI is the TCP protocol where we will inspect the TLS client hello for name information.
|
||||
ProtocolSNI Protocol = "SNI"
|
||||
// ProtocolUDP is the UDP protocol.
|
||||
ProtocolUDP Protocol = "UDP"
|
||||
)
|
||||
|
|
Loading…
Reference in a new issue