Implement SSL Passthrough directly on NGINX
This commit is contained in:
parent
d96b3f0082
commit
7de9759532
4 changed files with 176 additions and 63 deletions
|
@ -35,7 +35,6 @@ import (
|
||||||
"text/template"
|
"text/template"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
proxyproto "github.com/armon/go-proxyproto"
|
|
||||||
"github.com/eapache/channels"
|
"github.com/eapache/channels"
|
||||||
apiv1 "k8s.io/api/core/v1"
|
apiv1 "k8s.io/api/core/v1"
|
||||||
"k8s.io/apimachinery/pkg/util/intstr"
|
"k8s.io/apimachinery/pkg/util/intstr"
|
||||||
|
@ -296,10 +295,6 @@ func (n *NGINXController) Start() {
|
||||||
Pgid: 0,
|
Pgid: 0,
|
||||||
}
|
}
|
||||||
|
|
||||||
if n.cfg.EnableSSLPassthrough {
|
|
||||||
n.setupSSLProxy()
|
|
||||||
}
|
|
||||||
|
|
||||||
klog.InfoS("Starting NGINX process")
|
klog.InfoS("Starting NGINX process")
|
||||||
n.start(cmd)
|
n.start(cmd)
|
||||||
|
|
||||||
|
@ -761,53 +756,6 @@ func nextPowerOf2(v int) int {
|
||||||
return v
|
return v
|
||||||
}
|
}
|
||||||
|
|
||||||
func (n *NGINXController) setupSSLProxy() {
|
|
||||||
cfg := n.store.GetBackendConfiguration()
|
|
||||||
sslPort := n.cfg.ListenPorts.HTTPS
|
|
||||||
proxyPort := n.cfg.ListenPorts.SSLProxy
|
|
||||||
|
|
||||||
klog.InfoS("Starting TLS proxy for SSL Passthrough")
|
|
||||||
n.Proxy = &tcpproxy.TCPProxy{
|
|
||||||
Default: &tcpproxy.TCPServer{
|
|
||||||
Hostname: "localhost",
|
|
||||||
IP: "127.0.0.1",
|
|
||||||
Port: proxyPort,
|
|
||||||
ProxyProtocol: true,
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
listener, err := net.Listen("tcp", fmt.Sprintf(":%v", sslPort))
|
|
||||||
if err != nil {
|
|
||||||
klog.Fatalf("%v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
proxyList := &proxyproto.Listener{Listener: listener, ProxyHeaderTimeout: cfg.ProxyProtocolHeaderTimeout}
|
|
||||||
|
|
||||||
// accept TCP connections on the configured HTTPS port
|
|
||||||
go func() {
|
|
||||||
for {
|
|
||||||
var conn net.Conn
|
|
||||||
var err error
|
|
||||||
|
|
||||||
if n.store.GetBackendConfiguration().UseProxyProtocol {
|
|
||||||
// wrap the listener in order to decode Proxy
|
|
||||||
// Protocol before handling the connection
|
|
||||||
conn, err = proxyList.Accept()
|
|
||||||
} else {
|
|
||||||
conn, err = listener.Accept()
|
|
||||||
}
|
|
||||||
|
|
||||||
if err != nil {
|
|
||||||
klog.Warningf("Error accepting TCP connection: %v", err)
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
klog.V(3).InfoS("Handling TCP connection", "remote", conn.RemoteAddr(), "local", conn.LocalAddr())
|
|
||||||
go n.Proxy.Handle(conn)
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
}
|
|
||||||
|
|
||||||
// configureDynamically encodes new Backends in JSON format and POSTs the
|
// configureDynamically encodes new Backends in JSON format and POSTs the
|
||||||
// payload to an internal HTTP endpoint handled by Lua.
|
// payload to an internal HTTP endpoint handled by Lua.
|
||||||
func (n *NGINXController) configureDynamically(pcfg *ingress.Configuration) error {
|
func (n *NGINXController) configureDynamically(pcfg *ingress.Configuration) error {
|
||||||
|
|
|
@ -286,6 +286,7 @@ var funcMap = text_template.FuncMap{
|
||||||
"shouldLoadAuthDigestModule": shouldLoadAuthDigestModule,
|
"shouldLoadAuthDigestModule": shouldLoadAuthDigestModule,
|
||||||
"buildServerName": buildServerName,
|
"buildServerName": buildServerName,
|
||||||
"buildCorsOriginRegex": buildCorsOriginRegex,
|
"buildCorsOriginRegex": buildCorsOriginRegex,
|
||||||
|
"buildSSLPassthroughListener": buildSSLPassthroughListener,
|
||||||
}
|
}
|
||||||
|
|
||||||
// escapeLiteralDollar will replace the $ character with ${literal_dollar}
|
// escapeLiteralDollar will replace the $ character with ${literal_dollar}
|
||||||
|
@ -1533,6 +1534,15 @@ func httpListener(addresses []string, co string, tc *config.TemplateConfig) []st
|
||||||
return out
|
return out
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func buildSSLPassthroughListener(t interface{}) string {
|
||||||
|
tc, ok := t.(config.TemplateConfig)
|
||||||
|
if !ok {
|
||||||
|
klog.Errorf("expected a 'config.TemplateConfig' type but %T was returned", t)
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
return fmt.Sprintf("%v", tc.ListenPorts.HTTPS)
|
||||||
|
}
|
||||||
|
|
||||||
func httpsListener(addresses []string, co string, tc *config.TemplateConfig) []string {
|
func httpsListener(addresses []string, co string, tc *config.TemplateConfig) []string {
|
||||||
out := make([]string, 0)
|
out := make([]string, 0)
|
||||||
for _, address := range addresses {
|
for _, address := range addresses {
|
||||||
|
|
80
rootfs/etc/nginx/njs/passthrough.js
Normal file
80
rootfs/etc/nginx/njs/passthrough.js
Normal file
|
@ -0,0 +1,80 @@
|
||||||
|
function truncate(r) {
|
||||||
|
try {
|
||||||
|
ngx.shared.ptbackends.clear() // TODO: We should instead try to compare and clean
|
||||||
|
r.return(200, "ok")
|
||||||
|
r.finish()
|
||||||
|
} catch (e) {
|
||||||
|
r.error(e)
|
||||||
|
r.return(400, "error truncating the map json payload")
|
||||||
|
r.finish()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
function set(r) {
|
||||||
|
var service;
|
||||||
|
service = r.args.key
|
||||||
|
if (service == "" || service == null ) {
|
||||||
|
r.return(400, "key should not be null")
|
||||||
|
r.finish()
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
try {
|
||||||
|
JSON.parse(r.requestText)
|
||||||
|
ngx.shared.ptbackends.set(r.args.key, r.requestText)
|
||||||
|
r.return(200, "ok")
|
||||||
|
r.finish()
|
||||||
|
} catch (e) {
|
||||||
|
r.error(e)
|
||||||
|
r.return(400, "error parsing json payload")
|
||||||
|
r.finish()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
function getUpstream(r) {
|
||||||
|
var service;
|
||||||
|
|
||||||
|
try {
|
||||||
|
if ("variables" in r) {
|
||||||
|
service = r.variables.ssl_preread_server_name;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (service == "") {
|
||||||
|
// TODO: This should be a parameter with the port that NGINX is listening
|
||||||
|
// for non Passthrough
|
||||||
|
return "127.0.0.1:442"
|
||||||
|
}
|
||||||
|
|
||||||
|
const backends = ngx.shared.ptbackends.get(service)
|
||||||
|
if (backends == "" || backends == null) {
|
||||||
|
throw "no backend configured"
|
||||||
|
}
|
||||||
|
|
||||||
|
const objBackend = JSON.parse(backends)
|
||||||
|
if (objBackend["endpoints"] == null || objBackend["endpoints"] == undefined) {
|
||||||
|
throw "bad endpoints object" // TODO: This validation should happen when receiving the json
|
||||||
|
}
|
||||||
|
|
||||||
|
// TODO: We can loadbalance between backends, but right now let's receive just the ClusterIP
|
||||||
|
if (!Array.isArray(objBackend["endpoints"])) {
|
||||||
|
throw "endpoint object is not an array"
|
||||||
|
}
|
||||||
|
|
||||||
|
if (objBackend["endpoints"].length < 1) {
|
||||||
|
throw "no backends available for the service"
|
||||||
|
}
|
||||||
|
|
||||||
|
// TODO: Do we want to implement a different LB for Passthrough when it is composed of multiple backends?
|
||||||
|
var randomBackend = Math.floor(Math.random() * (objBackend["endpoints"].length));
|
||||||
|
if (typeof objBackend["endpoints"][randomBackend] != 'string') {
|
||||||
|
throw "endpoint is not a string"
|
||||||
|
}
|
||||||
|
return objBackend["endpoints"][randomBackend]
|
||||||
|
|
||||||
|
} catch (e) {
|
||||||
|
// If there's an error we should give user a return saying it
|
||||||
|
return "@invalidbackend"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
export default {set, truncate, getUpstream};
|
|
@ -37,6 +37,9 @@ load_module /etc/nginx/modules/ngx_http_opentracing_module.so;
|
||||||
load_module /modules_mount/etc/nginx/modules/otel/otel_ngx_module.so;
|
load_module /modules_mount/etc/nginx/modules/otel/otel_ngx_module.so;
|
||||||
{{ end }}
|
{{ end }}
|
||||||
|
|
||||||
|
load_module modules/ngx_http_js_module.so;
|
||||||
|
load_module modules/ngx_stream_js_module.so;
|
||||||
|
|
||||||
daemon off;
|
daemon off;
|
||||||
|
|
||||||
worker_processes {{ $cfg.WorkerProcesses }};
|
worker_processes {{ $cfg.WorkerProcesses }};
|
||||||
|
@ -524,17 +527,6 @@ http {
|
||||||
{{ end }}
|
{{ end }}
|
||||||
|
|
||||||
upstream upstream_balancer {
|
upstream upstream_balancer {
|
||||||
### Attention!!!
|
|
||||||
#
|
|
||||||
# We no longer create "upstream" section for every backend.
|
|
||||||
# Backends are handled dynamically using Lua. If you would like to debug
|
|
||||||
# and see what backends ingress-nginx has in its memory you can
|
|
||||||
# install our kubectl plugin https://kubernetes.github.io/ingress-nginx/kubectl-plugin.
|
|
||||||
# Once you have the plugin you can use "kubectl ingress-nginx backends" command to
|
|
||||||
# inspect current backends.
|
|
||||||
#
|
|
||||||
###
|
|
||||||
|
|
||||||
server 0.0.0.1; # placeholder
|
server 0.0.0.1; # placeholder
|
||||||
|
|
||||||
balancer_by_lua_block {
|
balancer_by_lua_block {
|
||||||
|
@ -770,6 +762,75 @@ http {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
# NGX Server for NJS Operations
|
||||||
|
# TODO: Check if the shared map can be accessed between stream and server directives
|
||||||
|
server {
|
||||||
|
listen 127.0.0.1:11111; # TODO: Turn this configurable
|
||||||
|
set $proxy_upstream_name "njs";
|
||||||
|
|
||||||
|
keepalive_timeout 0;
|
||||||
|
gzip off;
|
||||||
|
|
||||||
|
access_log off;
|
||||||
|
|
||||||
|
{{ if $cfg.EnableOpentracing }}
|
||||||
|
opentracing off;
|
||||||
|
{{ end }}
|
||||||
|
|
||||||
|
{{ if $cfg.EnableOpentelemetry }}
|
||||||
|
opentelemetry off;
|
||||||
|
{{ end }}
|
||||||
|
|
||||||
|
{{ if $cfg.EnableModsecurity }}
|
||||||
|
modsecurity off;
|
||||||
|
{{ end }}
|
||||||
|
|
||||||
|
location {{ $healthzURI }} {
|
||||||
|
return 200;
|
||||||
|
}
|
||||||
|
|
||||||
|
location /is-dynamic-lb-initialized {
|
||||||
|
content_by_lua_block {
|
||||||
|
local configuration = require("configuration")
|
||||||
|
local backend_data = configuration.get_backends_data()
|
||||||
|
if not backend_data then
|
||||||
|
ngx.exit(ngx.HTTP_INTERNAL_SERVER_ERROR)
|
||||||
|
return
|
||||||
|
end
|
||||||
|
|
||||||
|
ngx.say("OK")
|
||||||
|
ngx.exit(ngx.HTTP_OK)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
location {{ .StatusPath }} {
|
||||||
|
stub_status on;
|
||||||
|
}
|
||||||
|
|
||||||
|
location /ptcfg/truncate {
|
||||||
|
client_max_body_size {{ luaConfigurationRequestBodySize $cfg }};
|
||||||
|
client_body_buffer_size {{ luaConfigurationRequestBodySize $cfg }};
|
||||||
|
proxy_buffering off;
|
||||||
|
|
||||||
|
js_content passthrough.truncate;
|
||||||
|
}
|
||||||
|
|
||||||
|
location /ptcfg/set {
|
||||||
|
client_max_body_size {{ luaConfigurationRequestBodySize $cfg }};
|
||||||
|
client_body_buffer_size {{ luaConfigurationRequestBodySize $cfg }};
|
||||||
|
proxy_buffering off;
|
||||||
|
|
||||||
|
js_content passthrough.set;
|
||||||
|
}
|
||||||
|
|
||||||
|
location / {
|
||||||
|
content_by_lua_block {
|
||||||
|
ngx.exit(ngx.HTTP_NOT_FOUND)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
stream {
|
stream {
|
||||||
|
@ -839,6 +900,20 @@ stream {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
{{ if and $all.IsSSLPassthroughEnabled }}
|
||||||
|
# Start SSLPassthrough configuration
|
||||||
|
|
||||||
|
js_import njs/passthrough.js;
|
||||||
|
js_shared_dict_zone zone=ptbackends:32m type=string;
|
||||||
|
js_set $ptupstream passthrough.getUpstream;
|
||||||
|
|
||||||
|
server {
|
||||||
|
{{ buildSSLPassthroughListener $all }}
|
||||||
|
ssl_preread on;
|
||||||
|
proxy_pass $ptupstream;
|
||||||
|
}
|
||||||
|
{{ end }}
|
||||||
|
|
||||||
server {
|
server {
|
||||||
listen 127.0.0.1:{{ .StreamPort }};
|
listen 127.0.0.1:{{ .StreamPort }};
|
||||||
|
|
||||||
|
|
Loading…
Reference in a new issue