Use nginx upstreams and reload only if configuration changes

This commit is contained in:
Manuel de Brito Fontes 2016-03-14 23:29:13 -03:00
parent d0a15b1267
commit cad814cbb3
50 changed files with 370 additions and 10432 deletions

View file

@ -19,7 +19,7 @@ package main
import ( import (
"fmt" "fmt"
"net/http" "net/http"
"reflect" "sort"
"sync" "sync"
"time" "time"
@ -33,6 +33,7 @@ import (
"k8s.io/kubernetes/pkg/controller/framework" "k8s.io/kubernetes/pkg/controller/framework"
"k8s.io/kubernetes/pkg/labels" "k8s.io/kubernetes/pkg/labels"
"k8s.io/kubernetes/pkg/runtime" "k8s.io/kubernetes/pkg/runtime"
"k8s.io/kubernetes/pkg/util/wait"
"k8s.io/kubernetes/pkg/watch" "k8s.io/kubernetes/pkg/watch"
"k8s.io/contrib/ingress/controllers/nginx-third-party/nginx" "k8s.io/contrib/ingress/controllers/nginx-third-party/nginx"
@ -58,28 +59,26 @@ const (
// namespace/serviceName:portToExport pairings. This assumes you've opened up the right // namespace/serviceName:portToExport pairings. This assumes you've opened up the right
// hostPorts for each service that serves ingress traffic. Te value of portToExport indicates the // hostPorts for each service that serves ingress traffic. Te value of portToExport indicates the
// port to listen inside nginx, not the port of the service. // port to listen inside nginx, not the port of the service.
lbTcpServices = "tcpservices" lbTCPServices = "tcpservices"
k8sAnnotationPrefix = "nginx-ingress.kubernetes.io" k8sAnnotationPrefix = "nginx-ingress.kubernetes.io"
) )
var (
keyFunc = framework.DeletionHandlingMetaNamespaceKeyFunc
)
// loadBalancerController watches the kubernetes api and adds/removes services // loadBalancerController watches the kubernetes api and adds/removes services
// from the loadbalancer // from the loadbalancer
type loadBalancerController struct { type loadBalancerController struct {
client *client.Client client *client.Client
ingController *framework.Controller ingController *framework.Controller
configController *framework.Controller configController *framework.Controller
endpController *framework.Controller
svcController *framework.Controller
ingLister StoreToIngressLister ingLister StoreToIngressLister
svcLister cache.StoreToServiceLister
configLister StoreToConfigMapLister configLister StoreToConfigMapLister
endpLister cache.StoreToEndpointsLister
recorder record.EventRecorder recorder record.EventRecorder
ingQueue *taskQueue
configQueue *taskQueue
stopCh chan struct{} stopCh chan struct{}
ngx *nginx.NginxManager nginx *nginx.NginxManager
lbInfo *lbInfo lbInfo *lbInfo
// stopLock is used to enforce only a single call to Stop is active. // stopLock is used to enforce only a single call to Stop is active.
// Needed because we allow stopping through an http endpoint and // Needed because we allow stopping through an http endpoint and
@ -95,97 +94,47 @@ func (a annotations) getNginxConfig() (string, bool) {
return val, ok return val, ok
} }
func (a annotations) getTcpServices() (string, bool) { func (a annotations) getTCPServices() (string, bool) {
val, ok := a[fmt.Sprintf("%v/%v", k8sAnnotationPrefix, lbTcpServices)] val, ok := a[fmt.Sprintf("%v/%v", k8sAnnotationPrefix, lbTCPServices)]
return val, ok return val, ok
} }
// NewLoadBalancerController creates a controller for nginx loadbalancer // newLoadBalancerController creates a controller for nginx loadbalancer
func NewLoadBalancerController(kubeClient *client.Client, resyncPeriod time.Duration, defaultSvc, customErrorSvc nginx.Service, namespace string, lbInfo *lbInfo) (*loadBalancerController, error) { func newLoadBalancerController(kubeClient *client.Client, resyncPeriod time.Duration, defaultSvc, customErrorSvc nginx.Service, namespace string, lbInfo *lbInfo) (*loadBalancerController, error) {
eventBroadcaster := record.NewBroadcaster()
eventBroadcaster.StartLogging(glog.Infof)
eventBroadcaster.StartRecordingToSink(kubeClient.Events(""))
lbc := loadBalancerController{ lbc := loadBalancerController{
client: kubeClient, client: kubeClient,
stopCh: make(chan struct{}), stopCh: make(chan struct{}),
recorder: eventBroadcaster.NewRecorder(
api.EventSource{Component: "nginx-lb-controller"}),
lbInfo: lbInfo, lbInfo: lbInfo,
nginx: nginx.NewManager(kubeClient, defaultSvc, customErrorSvc),
} }
lbc.ingQueue = NewTaskQueue(lbc.syncIngress)
lbc.configQueue = NewTaskQueue(lbc.syncConfig)
lbc.ngx = nginx.NewManager(kubeClient, defaultSvc, customErrorSvc)
// Ingress watch handlers
pathHandlers := framework.ResourceEventHandlerFuncs{
AddFunc: func(obj interface{}) {
addIng := obj.(*extensions.Ingress)
lbc.recorder.Eventf(addIng, api.EventTypeNormal, "ADD", fmt.Sprintf("Adding ingress %s/%s", addIng.Namespace, addIng.Name))
lbc.ingQueue.enqueue(obj)
},
DeleteFunc: lbc.ingQueue.enqueue,
UpdateFunc: func(old, cur interface{}) {
if !reflect.DeepEqual(old, cur) {
glog.V(2).Infof("Ingress %v changed, syncing", cur.(*extensions.Ingress).Name)
}
lbc.ingQueue.enqueue(cur)
},
}
lbc.ingLister.Store, lbc.ingController = framework.NewInformer( lbc.ingLister.Store, lbc.ingController = framework.NewInformer(
&cache.ListWatch{ &cache.ListWatch{
ListFunc: ingressListFunc(lbc.client, namespace), ListFunc: ingressListFunc(lbc.client, namespace),
WatchFunc: ingressWatchFunc(lbc.client, namespace), WatchFunc: ingressWatchFunc(lbc.client, namespace),
}, },
&extensions.Ingress{}, resyncPeriod, pathHandlers) &extensions.Ingress{}, resyncPeriod, framework.ResourceEventHandlerFuncs{})
// Config watch handlers
configHandlers := framework.ResourceEventHandlerFuncs{
AddFunc: func(obj interface{}) {
lbc.configQueue.enqueue(obj)
},
DeleteFunc: lbc.configQueue.enqueue,
UpdateFunc: func(old, cur interface{}) {
if !reflect.DeepEqual(old, cur) {
glog.V(2).Infof("nginx rc changed, syncing")
lbc.configQueue.enqueue(cur)
}
},
}
lbc.configLister.Store, lbc.configController = framework.NewInformer( lbc.configLister.Store, lbc.configController = framework.NewInformer(
&cache.ListWatch{ &cache.ListWatch{
ListFunc: func(api.ListOptions) (runtime.Object, error) { ListFunc: configListFunc(kubeClient, lbc.lbInfo.DeployType, namespace, lbInfo.ObjectName),
switch lbInfo.DeployType.(type) { WatchFunc: configWatchFunc(kubeClient, lbc.lbInfo.DeployType, namespace, lbInfo.ObjectName),
case *api.ReplicationController:
rc, err := kubeClient.ReplicationControllers(lbInfo.PodNamespace).Get(lbInfo.ObjectName)
return &api.ReplicationControllerList{
Items: []api.ReplicationController{*rc},
}, err
case *extensions.DaemonSet:
ds, err := kubeClient.Extensions().DaemonSets(lbInfo.PodNamespace).Get(lbInfo.ObjectName)
return &extensions.DaemonSetList{
Items: []extensions.DaemonSet{*ds},
}, err
default:
return nil, errInvalidKind
}
},
WatchFunc: func(options api.ListOptions) (watch.Interface, error) {
switch lbInfo.DeployType.(type) {
case *api.ReplicationController:
options.LabelSelector = labels.SelectorFromSet(labels.Set{"name": lbInfo.ObjectName})
return kubeClient.ReplicationControllers(lbInfo.PodNamespace).Watch(options)
case *extensions.DaemonSet:
options.LabelSelector = labels.SelectorFromSet(labels.Set{"name": lbInfo.ObjectName})
return kubeClient.Extensions().DaemonSets(lbInfo.PodNamespace).Watch(options)
default:
return nil, errInvalidKind
}
},
}, },
&api.ReplicationController{}, resyncPeriod, configHandlers) &api.ReplicationController{}, resyncPeriod, framework.ResourceEventHandlerFuncs{})
lbc.endpLister.Store, lbc.endpController = framework.NewInformer(
&cache.ListWatch{
ListFunc: endpointsListFunc(kubeClient, namespace),
WatchFunc: endpointsWatchFunc(kubeClient, namespace),
},
&api.Endpoints{}, resyncPeriod, framework.ResourceEventHandlerFuncs{})
lbc.svcLister.Store, lbc.svcController = framework.NewInformer(
&cache.ListWatch{
ListFunc: serviceListFunc(kubeClient, namespace),
WatchFunc: serviceWatchFunc(kubeClient, namespace),
},
&api.Service{}, resyncPeriod, framework.ResourceEventHandlerFuncs{})
return &lbc, nil return &lbc, nil
} }
@ -202,117 +151,66 @@ func ingressWatchFunc(c *client.Client, ns string) func(options api.ListOptions)
} }
} }
// syncIngress manages Ingress create/updates/deletes. func serviceListFunc(c *client.Client, ns string) func(api.ListOptions) (runtime.Object, error) {
func (lbc *loadBalancerController) syncIngress(key string) { return func(opts api.ListOptions) (runtime.Object, error) {
glog.V(2).Infof("Syncing Ingress %v", key) return c.Services(ns).List(opts)
obj, ingExists, err := lbc.ingLister.Store.GetByKey(key)
if err != nil {
lbc.ingQueue.requeue(key, err)
return
} }
if !ingExists {
glog.Errorf("Ingress not found: %v", key)
return
}
// this means some Ingress rule changed. There is no need to reload nginx but
// we need to update the rules to use invoking "POST /update-ingress" with the
// list of Ingress rules
ingList := lbc.ingLister.Store.List()
if err := lbc.ngx.SyncIngress(ingList); err != nil {
lbc.ingQueue.requeue(key, err)
return
}
ing := *obj.(*extensions.Ingress)
if err := lbc.updateIngressStatus(ing); err != nil {
lbc.recorder.Eventf(&ing, api.EventTypeWarning, "Status", err.Error())
lbc.ingQueue.requeue(key, err)
}
return
} }
// syncConfig manages changes in nginx configuration. func serviceWatchFunc(c *client.Client, ns string) func(options api.ListOptions) (watch.Interface, error) {
func (lbc *loadBalancerController) syncConfig(key string) { return func(options api.ListOptions) (watch.Interface, error) {
glog.Infof("Syncing nginx configuration") return c.Services(ns).Watch(options)
if !lbc.ingController.HasSynced() {
glog.Infof("deferring sync till endpoints controller has synced")
time.Sleep(100 * time.Millisecond)
} }
// we only need to sync nginx
if key != fmt.Sprintf("%v/%v", lbc.lbInfo.PodNamespace, lbc.lbInfo.ObjectName) {
glog.Warningf("skipping sync because the event is not related to a change in configuration")
return
}
obj, configExists, err := lbc.configLister.Store.GetByKey(key)
if err != nil {
lbc.configQueue.requeue(key, err)
return
}
if !configExists {
glog.Errorf("Configuration not found: %v", key)
return
}
glog.V(2).Infof("Syncing config %v", key)
var kindAnnotations map[string]string
switch obj.(type) {
case *api.ReplicationController:
rc := *obj.(*api.ReplicationController)
kindAnnotations = rc.Annotations
case *extensions.DaemonSet:
rc := *obj.(*extensions.DaemonSet)
kindAnnotations = rc.Annotations
}
ngxCfgAnn, _ := annotations(kindAnnotations).getNginxConfig()
tcpSvcAnn, _ := annotations(kindAnnotations).getTcpServices()
ngxConfig, err := lbc.ngx.ReadConfig(ngxCfgAnn)
if err != nil {
glog.Warningf("%v", err)
}
// TODO: tcp services can change (new item in the annotation list)
// TODO: skip get everytime
tcpServices := getTcpServices(lbc.client, tcpSvcAnn)
lbc.ngx.Reload(ngxConfig, tcpServices)
return
} }
// updateIngressStatus updates the IP and annotations of a loadbalancer. func configListFunc(c *client.Client, deployType runtime.Object, ns, name string) func(api.ListOptions) (runtime.Object, error) {
// The annotations are parsed by kubectl describe. return func(api.ListOptions) (runtime.Object, error) {
func (lbc *loadBalancerController) updateIngressStatus(ing extensions.Ingress) error { switch deployType.(type) {
ingClient := lbc.client.Extensions().Ingress(ing.Namespace) case *api.ReplicationController:
rc, err := c.ReplicationControllers(ns).Get(name)
ip := lbc.lbInfo.PodIP return &api.ReplicationControllerList{
currIng, err := ingClient.Get(ing.Name) Items: []api.ReplicationController{*rc},
if err != nil { }, err
return err case *extensions.DaemonSet:
} ds, err := c.Extensions().DaemonSets(ns).Get(name)
currIng.Status = extensions.IngressStatus{ return &extensions.DaemonSetList{
LoadBalancer: api.LoadBalancerStatus{ Items: []extensions.DaemonSet{*ds},
Ingress: []api.LoadBalancerIngress{ }, err
{IP: ip}, default:
}, return nil, errInvalidKind
}, }
} }
}
glog.Infof("Updating loadbalancer %v/%v with IP %v", ing.Namespace, ing.Name, ip) func configWatchFunc(c *client.Client, deployType runtime.Object, ns, name string) func(options api.ListOptions) (watch.Interface, error) {
lbc.recorder.Eventf(currIng, api.EventTypeNormal, "CREATE", "ip: %v", ip) return func(options api.ListOptions) (watch.Interface, error) {
return nil switch deployType.(type) {
case *api.ReplicationController:
options.LabelSelector = labels.SelectorFromSet(labels.Set{"name": name})
return c.ReplicationControllers(ns).Watch(options)
case *extensions.DaemonSet:
options.LabelSelector = labels.SelectorFromSet(labels.Set{"name": name})
return c.Extensions().DaemonSets(ns).Watch(options)
default:
return nil, errInvalidKind
}
}
}
func endpointsListFunc(c *client.Client, ns string) func(api.ListOptions) (runtime.Object, error) {
return func(opts api.ListOptions) (runtime.Object, error) {
return c.Endpoints(ns).List(opts)
}
}
func endpointsWatchFunc(c *client.Client, ns string) func(options api.ListOptions) (watch.Interface, error) {
return func(options api.ListOptions) (watch.Interface, error) {
return c.Endpoints(ns).Watch(options)
}
} }
func (lbc *loadBalancerController) registerHandlers() { func (lbc *loadBalancerController) registerHandlers() {
http.HandleFunc("/healthz", func(w http.ResponseWriter, r *http.Request) { http.HandleFunc("/healthz", func(w http.ResponseWriter, r *http.Request) {
if err := lbc.ngx.IsHealthy(); err != nil { if err := lbc.nginx.IsHealthy(); err != nil {
w.WriteHeader(500) w.WriteHeader(500)
w.Write([]byte("nginx error")) w.Write([]byte("nginx error"))
return return
@ -329,6 +227,136 @@ func (lbc *loadBalancerController) registerHandlers() {
glog.Fatalf(fmt.Sprintf("%v", http.ListenAndServe(fmt.Sprintf(":%v", *healthzPort), nil))) glog.Fatalf(fmt.Sprintf("%v", http.ListenAndServe(fmt.Sprintf(":%v", *healthzPort), nil)))
} }
func (lbc *loadBalancerController) sync() {
ings := lbc.ingLister.Store.List()
upstreams, servers, update := lbc.updateNGINX(ings)
if update {
glog.V(2).Infof("syncing NGINX config")
var kindAnnotations map[string]string
ngxCfgAnn, _ := annotations(kindAnnotations).getNginxConfig()
tcpSvcAnn, _ := annotations(kindAnnotations).getTCPServices()
ngxConfig, err := lbc.nginx.ReadConfig(ngxCfgAnn)
if err != nil {
glog.Warningf("%v", err)
}
tcpServices := getTCPServices(lbc.client, tcpSvcAnn)
lbc.nginx.CheckAndReload(ngxConfig, upstreams, servers, tcpServices)
}
}
func (lbc *loadBalancerController) updateNGINX(data []interface{}) ([]nginx.Upstream, []nginx.Server, bool) {
pems := make(map[string]string)
upstreams := make(map[string]nginx.Upstream)
var servers []nginx.Server
for _, ingIf := range data {
ing := ingIf.(extensions.Ingress)
for _, rule := range ing.Spec.Rules {
if rule.IngressRuleValue.HTTP == nil {
continue
}
for _, path := range rule.HTTP.Paths {
name := ing.Namespace + "-" + path.Backend.ServiceName
var ups nginx.Upstream
if existent, ok := upstreams[name]; ok {
ups = existent
} else {
ups := nginx.NewUpstreamWithDefaultServer(name)
upstreams[name] = ups
}
svcKey := ing.Namespace + "/" + path.Backend.ServiceName
svcObj, svcExists, err := lbc.svcLister.Store.GetByKey(svcKey)
if err != nil {
glog.Infof("error getting service %v from the cache: %v", svcKey, err)
} else {
if svcExists {
svc := svcObj.(*api.Service)
if svc.Spec.ClusterIP != "None" && svc.Spec.ClusterIP != "" {
upsServer := nginx.UpstreamServer{Address: svc.Spec.ClusterIP, Port: path.Backend.ServicePort.String()}
ups.Backends = []nginx.UpstreamServer{upsServer}
} else if svc.Spec.ClusterIP == "None" {
endps, err := lbc.endpLister.GetServiceEndpoints(svc)
if err != nil {
glog.Infof("error getting endpoints for service %v from the cache: %v", svc, err)
} else {
upsServers := endpointsToUpstreamServers(endps, path.Backend.ServicePort.IntValue())
if len(upsServers) > 0 {
ups.Backends = upsServers
}
}
}
}
}
//upstreams[name] = append(upstreams[name], ups)
}
}
for _, rule := range ing.Spec.Rules {
server := nginx.Server{Name: rule.Host}
if pemFile, ok := pems[rule.Host]; ok {
server.SSL = true
server.SSLCertificate = pemFile
server.SSLCertificateKey = pemFile
}
var locations []nginx.Location
for _, path := range rule.HTTP.Paths {
loc := nginx.Location{Path: path.Path}
upsName := ing.GetName() + "-" + path.Backend.ServiceName
for _, ups := range upstreams {
if upsName == ups.Name {
loc.Upstream = ups
}
}
locations = append(locations, loc)
}
server.Locations = locations
servers = append(servers, server)
}
}
uValues := make([]nginx.Upstream, 0, len(upstreams))
for _, value := range upstreams {
sort.Sort(nginx.UpstreamServerByAddrPort(value.Backends))
uValues = append(uValues, value)
}
sort.Sort(nginx.UpstreamByNameServers(uValues))
sort.Sort(nginx.ServerByNamePort(servers))
return uValues, servers, true
}
func endpointsToUpstreamServers(endps api.Endpoints, servicePort int) []nginx.UpstreamServer {
var upsServers []nginx.UpstreamServer
for _, subset := range endps.Subsets {
for _, port := range subset.Ports {
if port.Port == servicePort {
for _, address := range subset.Addresses {
ups := nginx.UpstreamServer{Address: address.IP, Port: fmt.Sprintf("%v", servicePort)}
upsServers = append(upsServers, ups)
}
break
}
}
}
return upsServers
}
// Stop stops the loadbalancer controller. // Stop stops the loadbalancer controller.
func (lbc *loadBalancerController) Stop() { func (lbc *loadBalancerController) Stop() {
// Stop is invoked from the http endpoint. // Stop is invoked from the http endpoint.
@ -339,29 +367,22 @@ func (lbc *loadBalancerController) Stop() {
if !lbc.shutdown { if !lbc.shutdown {
close(lbc.stopCh) close(lbc.stopCh)
glog.Infof("Shutting down controller queues") glog.Infof("Shutting down controller queues")
lbc.ingQueue.shutdown()
lbc.configQueue.shutdown()
lbc.shutdown = true lbc.shutdown = true
} }
} }
// Run starts the loadbalancer controller. // Run starts the loadbalancer controller.
func (lbc *loadBalancerController) Run() { func (lbc *loadBalancerController) Run() {
glog.Infof("Starting nginx loadbalancer controller") glog.Infof("Starting NGINX loadbalancer controller")
go lbc.ngx.Start() go lbc.nginx.Start()
go lbc.registerHandlers() go lbc.registerHandlers()
go lbc.configController.Run(lbc.stopCh) go lbc.configController.Run(lbc.stopCh)
go lbc.configQueue.run(time.Second, lbc.stopCh)
// Initial nginx configuration.
lbc.syncConfig(lbc.lbInfo.PodNamespace + "/" + lbc.lbInfo.ObjectName)
time.Sleep(5 * time.Second)
go lbc.ingController.Run(lbc.stopCh) go lbc.ingController.Run(lbc.stopCh)
go lbc.ingQueue.run(time.Second, lbc.stopCh)
// periodic check for changes in configuration
go wait.Until(lbc.sync, 5*time.Second, wait.NeverStop)
<-lbc.stopCh <-lbc.stopCh
glog.Infof("Shutting down nginx loadbalancer controller") glog.Infof("Shutting down NGINX loadbalancer controller")
} }

View file

@ -1,44 +0,0 @@
local ssl = require "ngx.ssl"
local ssl_base_directory = "/etc/nginx/nginx-ssl"
local server_name = ssl.server_name()
local addr, addrtyp, err = ssl.raw_server_addr()
local byte = string.byte
local cert_path = ""
ssl.clear_certs()
-- Check for SNI request.
if server_name == nil then
ngx.log(ngx.INFO, "SNI Not present - performing IP lookup")
-- Set server name as IP address.
server_name = string.format("%d.%d.%d.%d", byte(addr, 1), byte(addr, 2), byte(addr, 3), byte(addr, 4))
ngx.log(ngx.INFO, "IP Address: ", server_name)
end
-- Set certifcate paths
cert_path = ssl_base_directory .. "/" .. server_name .. ".cert"
key_path = ssl_base_directory .. "/" .. server_name .. ".key"
-- Attempt to retrieve and set certificate for request.
local f = assert(io.open(cert_path))
local cert_data = f:read("*a")
f:close()
local ok, err = ssl.set_der_cert(cert_data)
if not ok then
ngx.log(ngx.ERR, "failed to set DER cert: ", err)
return
end
-- Attempt to retrieve and set key for request.
local f = assert(io.open(key_path))
local pkey_data = f:read("*a")
f:close()
local ok, err = ssl.set_der_priv_key(pkey_data)
if not ok then
ngx.log(ngx.ERR, "failed to set DER key: ", err)
return
end

View file

@ -21,30 +21,3 @@ function openURL(status, page)
ngx.say(res.body) ngx.say(res.body)
end end
function openCustomErrorURL(status, page)
local httpc = http.new()
data = {}
data["code"] = status
data["format"] = ngx.var.httpAccept
local params = "/error?"..ngx.encode_args(data)
local res, err = httpc:request_uri(page, {
path = params,
method = "GET"
})
if not res then
ngx.log(ngx.ERR, err)
ngx.exit(500)
end
ngx.status = tonumber(status)
ngx.header["Content-Type"] = ngx.var.httpReturnType or "text/plain"
if ngx.var.http_cookie then
ngx.header["Cookie"] = ngx.var.http_cookie
end
ngx.say(res.body)
end

View file

@ -1,229 +0,0 @@
local _M = {}
local cjson = require "cjson"
local trie = require "trie"
local http = require "resty.http"
local cache = require "resty.dns.cache"
local os = require "os"
local encode = cjson.encode
local decode = cjson.decode
local table_concat = table.concat
local trie_get = trie.get
local match = string.match
local gsub = string.gsub
local lower = string.lower
-- we "cache" the config local to each worker
local ingressConfig = nil
local cluster_domain = "cluster.local"
local def_backend = nil
local custom_error = nil
local dns_cache_options = nil
function get_ingressConfig(ngx)
local d = ngx.shared["ingress"]
local value, flags, stale = d:get_stale("ingressConfig")
if not value then
-- nothing we can do
return nil, "config not set"
end
ingressConfig = decode(value)
return ingressConfig, nil
end
function worker_cache_config(ngx)
local _, err = get_ingressConfig(ngx)
if err then
ngx.log(ngx.ERR, "unable to get ingressConfig: ", err)
return
end
end
function _M.content(ngx)
local host = ngx.var.host
-- strip off any port
local h = match(host, "^(.+):?")
if h then
host = h
end
host = lower(host)
local config, err = get_ingressConfig(ngx)
if err then
ngx.log(ngx.ERR, "unable to get config: ", err)
return ngx.exit(503)
end
-- this assumes we only allow exact host matches
local paths = config[host]
if not paths then
ngx.log(ngx.ERR, "No server for host "..host.." returning 404")
if custom_error then
openCustomErrorURL(404, custom_error)
return
else
openURL(404, def_backend)
return
end
end
local backend = trie_get(paths, ngx.var.uri)
if not backend then
ngx.log(ngx.ERR, "No server for host "..host.." and path "..ngx.var.uri.." returning 404")
if custom_error then
openCustomErrorURL(404, custom_error)
return
else
openURL(404, def_backend)
return
end
end
local address = backend.host
ngx.var.upstream_port = backend.port or 80
if dns_cache_options then
local dns = cache.new(dns_cache_options)
local answer, err, stale = dns:query(address, { qtype = 1 })
if err or (not answer) then
if stale then
answer = stale
else
answer = nil
end
end
if answer and answer[1] then
local ans = answer[1]
if ans.address then
address = ans.address
end
else
ngx.log(ngx.ERR, "dns failed for ", address, " with ", err, " => ", encode(answer or ""))
end
end
ngx.var.upstream_host = address
return
end
function _M.init_worker(ngx)
end
function _M.init(ngx, options)
-- ngx.log(ngx.OK, "options: "..encode(options))
def_backend = options.def_backend
custom_error = options.custom_error
-- try to create a dns cache
local resolvers = options.resolvers
if resolvers then
cache.init_cache(512)
local servers = trie.strsplit(" ", resolvers)
dns_cache_options =
{
dict = "dns_cache",
negative_ttl = nil,
max_stale = 900,
normalise_ttl = false,
resolver = {
nameservers = {servers[1]}
}
}
end
end
-- dump config. This is the raw config (including trie) for now
function _M.config(ngx)
ngx.header.content_type = "application/json"
local config = {
ingress = ingressConfig
}
local val = encode(config)
ngx.print(val)
end
function _M.update_ingress(ngx)
ngx.header.content_type = "application/json"
if ngx.req.get_method() ~= "POST" then
ngx.print(encode({
message = "only POST request"
}))
ngx.exit(400)
return
end
ngx.req.read_body()
local data = ngx.req.get_body_data()
local val = decode(data)
if not val then
ngx.log(ngx.ERR, "failed to decode body")
return
end
config = {}
for _, ingress in ipairs(val) do
local namespace = ingress.metadata.namespace
local spec = ingress.spec
-- we do not allow default ingress backends right now.
for _, rule in ipairs(spec.rules) do
local host = rule.host
local paths = config[host]
if not paths then
paths = trie.new()
config[host] = paths
end
rule.http = rule.http or { paths = {}}
for _, path in ipairs(rule.http.paths) do
local hostname = table_concat(
{
path.backend.serviceName,
namespace,
"svc",
cluster_domain
}, ".")
local backend = {
host = hostname,
port = path.backend.servicePort
}
paths:add(path.path, backend)
end
end
end
local d = ngx.shared["ingress"]
local ok, err, _ = d:set("ingressConfig", encode(config))
if not ok then
ngx.log(ngx.ERR, "Error: "..err)
local res = encode({
message = "Error updating Ingress rules: "..err
})
ngx.print(res)
return ngx.exit(500)
end
ingressConfig = config
local res = encode({
message = "Ingress rules updated"
})
ngx.print(res)
end
return _M

View file

@ -1,2 +0,0 @@
t/servroot/
t/error.log

View file

@ -1,20 +0,0 @@
The MIT License (MIT)
Copyright (c) 2013 Hamish Forbes
Permission is hereby granted, free of charge, to any person obtaining a copy of
this software and associated documentation files (the "Software"), to deal in
the Software without restriction, including without limitation the rights to
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
the Software, and to permit persons to whom the Software is furnished to do so,
subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.

View file

@ -1,23 +0,0 @@
OPENRESTY_PREFIX=/usr/local/openresty
PREFIX ?= /usr/local
LUA_INCLUDE_DIR ?= $(PREFIX)/include
LUA_LIB_DIR ?= $(PREFIX)/lib/lua/$(LUA_VERSION)
INSTALL ?= install
TEST_FILE ?= t
.PHONY: all test leak
all: ;
install: all
$(INSTALL) -d $(DESTDIR)/$(LUA_LIB_DIR)/resty/dns
leak: all
TEST_NGINX_CHECK_LEAK=1 TEST_NGINX_NO_SHUFFLE=1 PATH=$(OPENRESTY_PREFIX)/nginx/sbin:$$PATH prove -I../test-nginx/lib -r $(TEST_FILE)
test: all
TEST_NGINX_NO_SHUFFLE=1 PATH=$(OPENRESTY_PREFIX)/nginx/sbin:$$PATH prove -I../test-nginx/lib -r $(TEST_FILE)
util/lua-releng.pl

View file

@ -1,111 +0,0 @@
#lua-resty-dns-cache
A wrapper for [lua-resty-dns](https://github.com/openresty/lua-resty-dns) to cache responses based on record TTLs.
Uses [lua-resty-lrucache](https://github.com/openresty/lua-resty-lrucache) and [ngx.shared.DICT](https://github.com/openresty/lua-nginx-module#ngxshareddict) to provide a 2 level cache.
Can repopulate cache in the background while returning stale answers.
#Overview
```lua
lua_shared_dict dns_cache 1m;
init_by_lua '
require("resty.dns.cache").init_cache(200)
';
server {
listen 80;
server_name dns_cache;
location / {
content_by_lua '
local DNS_Cache = require("resty.dns.cache")
local dns = DNS_Cache.new({
dict = "dns_cache",
negative_ttl = 30,
max_stale = 300,
resolver = {
nameservers = {"123.123.123.123"}
}
})
local host = ngx.req.get_uri_args()["host"] or "www.google.com"
local answer, err, stale = dns:query(host)
if err then
if stale then
ngx.header["Warning"] = "110: Response is stale"
answer = stale
ngx.log(ngx.ERR, err)
else
ngx.status = 500
ngx.say(err)
return ngx.exit(ngx.status)
end
end
local cjson = require "cjson"
ngx.say(cjson.encode(answer))
';
}
}
```
#Methods
### init_cache
`syntax: ok, err = dns_cache.init_cache(max_items?)`
Creates a global lrucache object for caching responses.
Accepts an optional `max_items` argument, defaults to 200 entries.
Calling this repeatedly will reset the LRU cache
### initted
`syntax: ok = dns_cache.initted()`
Returns `true` if LRU Cache has been initialised
### new
`syntax: ok, err = dns_cache.new(opts)`
Returns a new DNS cache instance. Returns `nil` and a string on error
Accepts a table of options, if no shared dictionary is provided only lrucache is used.
* `dict` - Name of the [ngx.shared.DICT](https://github.com/openresty/lua-nginx-module#ngxshareddict) to use for cache.
* `resolver` - Table of options passed to [lua-resty-dns](https://github.com/openresty/lua-resty-dns#new). Defaults to using Google DNS.
* `normalise_ttl` - Boolean. Reduces TTL in cached answers to account for cached time. Defaults to `true`.
* `negative_ttl` - Time in seconds to cache negative / error responses. `nil` or `false` disables caching negative responses. Defaults to `false`
* `minimise_ttl` - Boolean. Set cache TTL based on the shortest DNS TTL in all responses rather than the first response. Defaults to `false`
* `max_stale` - Number of seconds past expiry to return stale content rather than querying. Stale hits will trigger a non-blocking background query to repopulate cache.
### query
`syntax: answers, err, stale = c:query(name, opts?)`
Passes through to lua-resty-dns' [query](https://github.com/openresty/lua-resty-dns#query) method.
Returns an extra `stale` variable containing stale data if a resolver cannot be contacted.
### tcp_query
`syntax: answers, err, stale = c:tcp_query(name, opts?)`
Passes through to lua-resty-dns' [tcp_query](https://github.com/openresty/lua-resty-dns#tcp_query) method.
Returns an extra `stale` variable containing stale data if a resolver cannot be contacted.
### set_timeout
`syntax: c:set_timeout(time)`
Passes through to lua-resty-dns' [set_timeout](https://github.com/openresty/lua-resty-dns#set_timeout) method.
## Constants
lua-resty-dns' [constants](https://github.com/openresty/lua-resty-dns#constants) are accessible on the `resty.dns.cache` object too.
## TODO
* Cap'n'proto serialisation

View file

@ -1,449 +0,0 @@
local ngx_log = ngx.log
local ngx_DEBUG = ngx.DEBUG
local ngx_ERR = ngx.ERR
local ngx_shared = ngx.shared
local ngx_time = ngx.time
local resty_resolver = require "resty.dns.resolver"
local resty_lrucache = require "resty.lrucache"
local cjson = require "cjson"
local json_encode = cjson.encode
local json_decode = cjson.decode
local tbl_concat = table.concat
local tonumber = tonumber
local _ngx_timer_at = ngx.timer.at
local ngx_worker_pid = ngx.worker.pid
local function ngx_timer_at(delay, func, ...)
local ok, err = _ngx_timer_at(delay, func, ...)
if not ok then
ngx_log(ngx_ERR, "Timer Error: ", err)
end
return ok, err
end
local debug_log = function(msg, ...)
if type(msg) == 'table' then
local ok, json = pcall(json_encode, msg)
if ok then
msg = json
else
ngx_log(ngx_ERR, json)
end
end
ngx_log(ngx_DEBUG, msg, ...)
end
local _M = {
_VERSION = '0.01',
TYPE_A = resty_resolver.TYPE_A,
TYPE_NS = resty_resolver.TYPE_NS,
TYPE_CNAME = resty_resolver.TYPE_CNAME,
TYPE_PTR = resty_resolver.TYPE_PTR,
TYPE_MX = resty_resolver.TYPE_MX,
TYPE_TXT = resty_resolver.TYPE_TXT,
TYPE_AAAA = resty_resolver.TYPE_AAAA,
TYPE_SRV = resty_resolver.TYPE_SRV,
TYPE_SPF = resty_resolver.TYPE_SPF,
CLASS_IN = resty_resolver.CLASS_IN
}
local DEBUG = false
local mt = { __index = _M }
local lru_cache_defaults = {200}
local resolver_defaults = {
nameservers = {"8.8.8.8", "8.8.4.4"}
}
-- Global lrucache instance
local lru_cache
local max_items = 200
function _M.init_cache(size)
if size then max_items = size end
local err
if DEBUG then debug_log("Initialising lru cache with ", max_items, " max items") end
lru_cache, err = resty_lrucache.new(max_items)
if not lru_cache then
return nil, err
end
return true
end
function _M.initted()
if lru_cache then return true end
return false
end
function _M.new(opts)
local self, err = { opts = opts}, nil
opts = opts or {}
-- Set defaults
if opts.normalise_ttl ~= nil then self.normalise_ttl = opts.normalise_ttl else self.normalise_ttl = true end
if opts.minimise_ttl ~= nil then self.minimise_ttl = opts.minimise_ttl else self.minimise_ttl = false end
if opts.negative_ttl ~= nil then
self.negative_ttl = tonumber(opts.negative_ttl)
else
self.negative_ttl = false
end
if opts.max_stale ~= nil then
self.max_stale = tonumber(opts.max_stale)
else
self.max_stale = 0
end
opts.resolver = opts.resolver or resolver_defaults
self.resolver, err = resty_resolver:new(opts.resolver)
if not self.resolver then
return nil, err
end
if opts.dict then
self.dict = ngx_shared[opts.dict]
end
return setmetatable(self, mt)
end
function _M.flush(self, hard)
local ok, err = self.init_cache()
if not ok then
ngx_log(ngx_ERR, err)
end
if self.dict then
if DEBUG then debug_log("Flushing dictionary") end
self.dict:flush_all()
if hard then
local flushed = self.dict:flush_expired()
if DEBUG then debug_log("Flushed ", flushed, " keys from memory") end
end
end
end
function _M._debug(flag)
DEBUG = flag
end
function _M.set_timeout(self, ...)
return self.resolver:set_timeout(...)
end
local function minimise_ttl(answer)
if DEBUG then debug_log('Minimising TTL') end
local ttl
for _, ans in ipairs(answer) do
if DEBUG then debug_log('TTL ', ans.name, ': ', ans.ttl) end
if ttl == nil or ans.ttl < ttl then
ttl = ans.ttl
end
end
return ttl
end
local function normalise_ttl(self, data)
-- Calculate time since query and subtract from answer's TTL
if self.normalise_ttl then
local now = ngx_time()
local diff = now - data.now
if DEBUG then debug_log("Normalising TTL, diff: ", diff) end
for _, answer in ipairs(data.answer) do
if DEBUG then debug_log("Old: ", answer.ttl, ", new: ", answer.ttl - diff) end
answer.ttl = answer.ttl - diff
end
data.now = now
end
return data
end
local function cache_get(self, key)
-- Try local LRU cache first
local data, lru_stale
if lru_cache then
data, lru_stale = lru_cache:get(key)
-- Set stale if should have expired
if data and data.expires <= ngx_time() then
lru_stale = data
data = nil
end
if data then
if DEBUG then
debug_log('lru_cache HIT: ', key)
debug_log(data)
end
return normalise_ttl(self, data)
elseif DEBUG then
debug_log('lru_cache MISS: ', key)
end
end
-- lru_cache miss, try shared dict
local dict = self.dict
if dict then
local data, flags, stale = dict:get_stale(key)
-- Set stale if should have expired
if data then
data = json_decode(data)
if data.expires <= ngx_time() then
stale = true
end
end
-- Dict data is stale, prefer stale LRU data
if stale and lru_stale then
if DEBUG then
debug_log('lru_cache STALE: ', key)
debug_log(lru_stale)
end
return nil, normalise_ttl(self, lru_stale)
end
-- Definitely no lru data, going to have to try shared dict
if not data then
-- Full MISS on dict, return nil
if DEBUG then debug_log('shared_dict MISS: ', key) end
return nil
end
-- Return nil and dict cache if its stale
if stale then
if DEBUG then debug_log('shared_dict STALE: ', key) end
return nil, normalise_ttl(self, data)
end
-- Fresh HIT from dict, repopulate the lru_cache
if DEBUG then debug_log('shared_dict HIT: ', key) end
if lru_cache then
local ttl = data.expires - ngx_time()
if DEBUG then debug_log('lru_cache SET: ', key, ' ', ttl) end
lru_cache:set(key, data, ttl)
end
return normalise_ttl(self, data)
elseif lru_stale then
-- Return lru stale if no dict configured
if DEBUG then
debug_log('lru_cache STALE: ', key)
debug_log(lru_stale)
end
return nil, normalise_ttl(self, lru_stale)
end
if not lru_cache or dict then
ngx_log(ngx_ERR, "No cache defined")
end
end
local function cache_set(self, key, answer, ttl)
-- Don't cache records with 0 TTL
if ttl == 0 or ttl == nil then
return
end
-- Calculate absolute expiry - used to populate lru_cache from shared_dict
local now = ngx_time()
local data = {
answer = answer,
now = now,
queried = now,
expires = now + ttl
}
-- Extend cache expiry if using stale
local real_ttl = ttl
if self.max_stale then
real_ttl = real_ttl + self.max_stale
end
-- Set lru cache
if lru_cache then
if DEBUG then debug_log('lru_cache SET: ', key, ' ', real_ttl) end
lru_cache:set(key, data, real_ttl)
end
-- Set dict cache
local dict = self.dict
if dict then
if DEBUG then debug_log('shared_dict SET: ', key, ' ', real_ttl) end
local ok, err, forcible = dict:set(key, json_encode(data), real_ttl)
if not ok then
ngx_log(ngx_ERR, 'shared_dict ERR: ', err)
end
if forcible then
ngx_log(ngx_DEBUG, 'shared_dict full')
end
end
end
local function _resolve(resolver, query_func, host, opts)
if DEBUG then debug_log('Querying: ', host) end
local answers, err = query_func(resolver, host, opts)
if not answers then
return answers, err
end
if DEBUG then debug_log(answers) end
return answers
end
local function cache_key(host, qtype)
return tbl_concat({host,'|',qtype})
end
local function get_repopulate_lock(dict, host, qtype)
local key = cache_key(host, qtype or 1) .. '|lock'
if DEBUG then debug_log("Locking '", key, "' for ", 30, "s: ", ngx_worker_pid()) end
return dict:add(key, ngx_worker_pid(), 30)
end
local function release_repopulate_lock(dict, host, qtype)
local key = cache_key(host, qtype or 1) .. '|lock'
local pid, err = dict:get(key)
if DEBUG then debug_log("Releasing '", key, "' for ", ngx_worker_pid(), " from ", pid) end
if pid == ngx_worker_pid() then
dict:delete(key)
else
ngx_log(ngx_DEBUG, "couldnt release lock")
end
end
local _query
local function _repopulate(premature, self, host, opts, tcp)
if premature then return end
if DEBUG then debug_log("Repopulating '", host, "'") end
-- Create a new resolver instance, cannot share sockets
local err
self.resolver, err = resty_resolver:new(self.opts.resolver)
if err then
ngx_log(ngx_ERR, err)
return nil
end
-- Do not use stale when repopulating
_query(self, host, opts, tcp, true)
end
local function repopulate(self, host, opts, tcp)
-- Lock, there's a window between the key expiring and the background query completing
-- during which another query could trigger duplicate repopulate jobs
local ok, err = get_repopulate_lock(self.dict, host, opts.qtype)
if ok then
if DEBUG then debug_log("Attempting to repopulate '", host, "'") end
local ok, err = ngx_timer_at(0, _repopulate, self, host, opts, tcp)
if not ok then
-- Release lock if we couldn't start the timer
release_repopulate_lock(self.dict, host, opts.qtype)
end
else
if err == "exists" then
if DEBUG then debug_log("Lock not acquired") end
return
else
ngx.log(ngx.ERR, err)
end
end
end
_query = function(self, host, opts, tcp, repopulating)
-- Build cache key
opts = opts or {}
local key = cache_key(host, opts.qtype or 1)
-- Check caches
local answer
local data, stale = cache_get(self, key)
if data then
-- Shouldn't get a cache hit when repopulating but better safe than sorry
if repopulating then release_repopulate_lock(self.dict, host, opts.qtype) end
answer = data.answer
-- Don't return negative cache hits if negative_ttl is off in this instance
if not answer.errcode or self.negative_ttl then
return answer
end
end
-- No fresh cache entry, return stale if within max_stale and trigger background repopulate
if stale and not repopulating and self.max_stale > 0
and (ngx_time() - stale.expires) < self.max_stale then
if DEBUG then debug_log('max_stale ', self.max_stale) end
repopulate(self, host, opts, tcp)
if DEBUG then debug_log('Returning STALE: ', key) end
return nil, nil, stale.answer
end
-- Try to resolve
local resolver = self.resolver
local query_func = resolver.query
if tcp then
query_func = resolver.tcp_query
end
local answer, err = _resolve(resolver, query_func, host, opts)
if not answer then
-- Couldn't resolve, return potential stale response with error msg
if DEBUG then
debug_log('Resolver error ', key, ': ', err)
if stale then debug_log('Returning STALE: ', key) end
end
if repopulating then release_repopulate_lock(self.dict, host, opts.qtype) end
if stale then stale = stale.answer end
return nil, err, stale
end
local ttl
-- Cache server errors for negative_cache seconds
if answer.errcode then
if self.negative_ttl then
ttl = self.negative_ttl
else
if repopulating then release_repopulate_lock(self.dict, host, opts.qtype) end
return answer
end
else
-- Cache for the lowest TTL in the chain of responses...
if self.minimise_ttl then
ttl = minimise_ttl(answer)
elseif answer[1] then
-- ... or just the first one
ttl = answer[1].ttl or nil
end
end
-- Set cache
cache_set(self, key, answer, ttl)
if repopulating then release_repopulate_lock(self.dict, host, opts.qtype) end
return answer
end
function _M.query(self, host, opts)
return _query(self, host, opts, false)
end
function _M.tcp_query(self, host, opts)
return _query(self, host, opts, true)
end
return _M

View file

@ -1,233 +0,0 @@
use Test::Nginx::Socket;
use Cwd qw(cwd);
plan tests => repeat_each() * 24;
my $pwd = cwd();
our $HttpConfig = qq{
lua_package_path "$pwd/lib/?.lua;;";
};
no_long_string();
run_tests();
__DATA__
=== TEST 1: Load module without errors.
--- http_config eval
"$::HttpConfig"
. q{
init_by_lua '
local DNS_Cache = require("resty.dns.cache")
';
}
--- config
location /sanity {
echo "OK";
}
--- request
GET /sanity
--- no_error_log
[error]
--- response_body
OK
=== TEST 2: Can init cache - defaults
--- http_config eval
"$::HttpConfig"
. q{
init_by_lua '
local DNS_Cache = require("resty.dns.cache")
DNS_Cache.init_cache()
';
}
--- config
location /sanity {
content_by_lua '
local DNS_Cache = require("resty.dns.cache")
ngx.say(DNS_Cache.initted())
';
}
--- request
GET /sanity
--- no_error_log
[error]
--- response_body
true
=== TEST 3: Can init cache - user config
--- http_config eval
"$::HttpConfig"
. q{
init_by_lua '
local DNS_Cache = require("resty.dns.cache")
DNS_Cache.init_cache(300)
';
}
--- config
location /sanity {
content_by_lua '
local DNS_Cache = require("resty.dns.cache")
ngx.say(DNS_Cache.initted())
';
}
--- request
GET /sanity
--- no_error_log
[error]
--- response_body
true
=== TEST 4: Can init new instance - defaults
--- http_config eval
"$::HttpConfig"
. q{
init_by_lua '
local DNS_Cache = require("resty.dns.cache")
DNS_Cache.init_cache(300)
';
}
--- config
location /sanity {
content_by_lua '
local DNS_Cache = require("resty.dns.cache")
local dns, err = DNS_Cache.new()
if dns then
ngx.say("OK")
else
ngx.say(err)
end
';
}
--- request
GET /sanity
--- no_error_log
[error]
--- response_body
OK
=== TEST 5: Can init new instance - user config
--- http_config eval
"$::HttpConfig"
. q{
init_by_lua '
local DNS_Cache = require("resty.dns.cache")
DNS_Cache.init_cache(300)
';
}
--- config
location /sanity {
content_by_lua '
local DNS_Cache = require("resty.dns.cache")
local dns, err = DNS_Cache.new({
negative_ttl = 10,
resolver = { nameservers = {"10.10.10.10"} }
})
if dns then
ngx.say("OK")
else
ngx.say(err)
end
';
}
--- request
GET /sanity
--- no_error_log
[error]
--- response_body
OK
=== TEST 6: Resty DNS errors are passed through
--- http_config eval
"$::HttpConfig"
. q{
init_by_lua '
local DNS_Cache = require("resty.dns.cache")
DNS_Cache.init_cache(300)
';
}
--- config
location /sanity {
content_by_lua '
local DNS_Cache = require("resty.dns.cache")
local dns, err = DNS_Cache.new({
resolver = { }
})
if dns then
ngx.say("OK")
else
ngx.say(err)
end
';
}
--- request
GET /sanity
--- no_error_log
[error]
--- response_body
no nameservers specified
=== TEST 7: Can create instance with shared dict
--- http_config eval
"$::HttpConfig"
. q{
lua_shared_dict dns_cache 1m;
init_by_lua '
local DNS_Cache = require("resty.dns.cache")
DNS_Cache.init_cache()
';
}
--- config
location /sanity {
content_by_lua '
local DNS_Cache = require("resty.dns.cache")
ngx.say(DNS_Cache.initted())
local dns, err = DNS_Cache.new({
dict = "dns_cache"
})
if dns then
ngx.say("OK")
else
ngx.say(err)
end
';
}
--- request
GET /sanity
--- no_error_log
[error]
--- response_body
true
OK
=== TEST 8: Can create instance with shared dict and no lru_cache
--- http_config eval
"$::HttpConfig"
. q{
lua_shared_dict dns_cache 1m;
}
--- config
location /sanity {
content_by_lua '
local DNS_Cache = require("resty.dns.cache")
ngx.say(DNS_Cache.initted())
local dns, err = DNS_Cache.new({
dict = "dns_cache"
})
if dns then
ngx.say("OK")
else
ngx.say(err)
end
';
}
--- request
GET /sanity
--- no_error_log
[error]
--- response_body
false
OK

View file

@ -1,195 +0,0 @@
use lib 't';
use TestDNS;
use Cwd qw(cwd);
plan tests => repeat_each() * 12;
my $pwd = cwd();
our $HttpConfig = qq{
lua_package_path "$pwd/lib/?.lua;;";
};
no_long_string();
run_tests();
__DATA__
=== TEST 1: Can resolve with lru + dict
--- http_config eval
"$::HttpConfig"
. q{
lua_shared_dict dns_cache 1m;
init_by_lua '
local DNS_Cache = require("resty.dns.cache")
DNS_Cache.init_cache()
';
}
--- config
location /t {
content_by_lua '
local DNS_Cache = require("resty.dns.cache")
local dns, err = DNS_Cache.new({
dict = "dns_cache",
resolver = {
nameservers = { {"127.0.0.1", "1953"} }
}
})
if not dns then
ngx.say(err)
end
dns.resolver._id = 125
local answer, err, stale = dns:query("www.google.com", { qtype = dns.TYPE_A })
if not answer then
ngx.say(err)
end
local cjson = require"cjson"
ngx.say(cjson.encode(answer))
';
}
--- udp_listen: 1953
--- udp_reply dns
{
id => 125,
opcode => 0,
qname => 'www.google.com',
answer => [{ name => "www.google.com", ipv4 => "127.0.0.1", ttl => 123456 }],
}
--- request
GET /t
--- no_error_log
[error]
--- response_body
[{"address":"127.0.0.1","type":1,"class":1,"name":"www.google.com","ttl":123456}]
=== TEST 2: Can resolve with lru only
--- http_config eval
"$::HttpConfig"
. q{
init_by_lua '
local DNS_Cache = require("resty.dns.cache")
DNS_Cache.init_cache()
';
}
--- config
location /t {
content_by_lua '
local DNS_Cache = require("resty.dns.cache")
local dns, err = DNS_Cache.new({
resolver = {
nameservers = { {"127.0.0.1", "1953"} }
}
})
if not dns then
ngx.say(err)
end
dns.resolver._id = 125
local answer, err, stale = dns:query("www.google.com", { qtype = dns.TYPE_A })
if not answer then
ngx.say(err)
end
local cjson = require"cjson"
ngx.say(cjson.encode(answer))
';
}
--- udp_listen: 1953
--- udp_reply dns
{
id => 125,
opcode => 0,
qname => 'www.google.com',
answer => [{ name => "www.google.com", ipv4 => "127.0.0.1", ttl => 123456 }],
}
--- request
GET /t
--- no_error_log
[error]
--- response_body
[{"address":"127.0.0.1","type":1,"class":1,"name":"www.google.com","ttl":123456}]
=== TEST 3: Can resolve with dict only
--- http_config eval
"$::HttpConfig"
. q{
lua_shared_dict dns_cache 1m;
}
--- config
location /t {
content_by_lua '
local DNS_Cache = require("resty.dns.cache")
local dns, err = DNS_Cache.new({
dict = "dns_cache",
resolver = {
nameservers = { {"127.0.0.1", "1953"} }
}
})
if not dns then
ngx.say(err)
end
dns.resolver._id = 125
local answer, err, stale = dns:query("www.google.com", { qtype = dns.TYPE_A })
if not answer then
ngx.say(err)
end
local cjson = require"cjson"
ngx.say(cjson.encode(answer))
';
}
--- udp_listen: 1953
--- udp_reply dns
{
id => 125,
opcode => 0,
qname => 'www.google.com',
answer => [{ name => "www.google.com", ipv4 => "127.0.0.1", ttl => 123456 }],
}
--- request
GET /t
--- no_error_log
[error]
--- response_body
[{"address":"127.0.0.1","type":1,"class":1,"name":"www.google.com","ttl":123456}]
=== TEST 4: Can resolve with no cache, error thrown
--- http_config eval
"$::HttpConfig"
--- config
location /t {
content_by_lua '
local DNS_Cache = require("resty.dns.cache")
local dns, err = DNS_Cache.new({
resolver = {
nameservers = { {"127.0.0.1", "1953"} }
}
})
if not dns then
ngx.say(err)
end
dns.resolver._id = 125
local answer, err, stale = dns:query("www.google.com", { qtype = dns.TYPE_A })
if not answer then
ngx.say(err)
end
local cjson = require"cjson"
ngx.say(cjson.encode(answer))
';
}
--- udp_listen: 1953
--- udp_reply dns
{
id => 125,
opcode => 0,
qname => 'www.google.com',
answer => [{ name => "www.google.com", ipv4 => "127.0.0.1", ttl => 123456 }],
}
--- request
GET /t
--- error_log
No cache defined
--- response_body
[{"address":"127.0.0.1","type":1,"class":1,"name":"www.google.com","ttl":123456}]

View file

@ -1,873 +0,0 @@
use lib 't';
use TestDNS;
use Cwd qw(cwd);
plan tests => repeat_each() * 47;
my $pwd = cwd();
our $HttpConfig = qq{
lua_package_path "$pwd/lib/?.lua;;";
lua_socket_log_errors off;
};
no_long_string();
run_tests();
__DATA__
=== TEST 1: Response comes from cache on second hit
--- http_config eval
"$::HttpConfig"
. q{
lua_shared_dict dns_cache 1m;
init_by_lua '
local DNS_Cache = require("resty.dns.cache")
DNS_Cache.init_cache()
';
}
--- config
location /t {
echo_location /_t;
echo_location /_t2;
}
location /_t {
content_by_lua '
local DNS_Cache = require("resty.dns.cache")
local dns, err = DNS_Cache.new({
dict = "dns_cache",
resolver = {nameservers = {{"127.0.0.1", "1953"}}}
})
if not dns then
ngx.say(err)
end
dns.resolver._id = 125
local answer, err, stale = dns:query("www.google.com", { qtype = dns.TYPE_A })
if not answer then
ngx.say(err)
end
';
}
location /_t2 {
content_by_lua '
local DNS_Cache = require("resty.dns.cache")
local dns, err = DNS_Cache.new({
dict = "dns_cache",
resolver = {nameservers = {{"127.0.0.1", "1953"}}}
})
if not dns then
ngx.say(err)
end
dns.resolver._id = 125
dns._debug(true)
local answer, err, stale = dns:query("www.google.com", { qtype = dns.TYPE_A })
if not answer then
ngx.say(err)
end
local cjson = require"cjson"
ngx.say(cjson.encode(answer))
';
}
--- udp_listen: 1953
--- udp_reply dns
{
id => 125,
opcode => 0,
qname => 'www.google.com',
answer => [{ name => "www.google.com", ipv4 => "127.0.0.1", ttl => 123456 }],
}
--- request
GET /t
--- error_log
lru_cache HIT
--- response_body
[{"address":"127.0.0.1","type":1,"class":1,"name":"www.google.com","ttl":123456}]
=== TEST 2: Response comes from dict on miss
--- http_config eval
"$::HttpConfig"
. q{
lua_shared_dict dns_cache 1m;
init_by_lua '
local DNS_Cache = require("resty.dns.cache")
DNS_Cache.init_cache()
';
}
--- config
location /t {
echo_location /_t;
echo_location /_t2;
}
location /_t {
content_by_lua '
local DNS_Cache = require("resty.dns.cache")
local dns, err = DNS_Cache.new({
dict = "dns_cache",
resolver = {nameservers = {{"127.0.0.1", "1953"}}}
})
if not dns then
ngx.say(err)
end
dns.resolver._id = 125
local answer, err, stale = dns:query("www.google.com", { qtype = dns.TYPE_A })
if not answer then
ngx.say(err)
end
';
}
location /_t2 {
content_by_lua '
local DNS_Cache = require("resty.dns.cache")
DNS_Cache.init_cache() -- reset cache
local dns, err = DNS_Cache.new({
dict = "dns_cache",
resolver = {nameservers = {{"127.0.0.1", "1953"}}}
})
if not dns then
ngx.say(err)
end
dns.resolver._id = 125
dns._debug(true)
local answer, err, stale = dns:query("www.google.com", { qtype = dns.TYPE_A })
if not answer then
ngx.say(err)
end
local cjson = require"cjson"
ngx.say(cjson.encode(answer))
';
}
--- udp_listen: 1953
--- udp_reply dns
{
id => 125,
opcode => 0,
qname => 'www.google.com',
answer => [{ name => "www.google.com", ipv4 => "127.0.0.1", ttl => 123456 }],
}
--- request
GET /t
--- error_log
lru_cache MISS
shared_dict HIT
lru_cache SET
--- response_body
[{"address":"127.0.0.1","type":1,"class":1,"name":"www.google.com","ttl":123456}]
=== TEST 3: Stale response from lru served if resolver down
--- http_config eval
"$::HttpConfig"
. q{
lua_shared_dict dns_cache 1m;
init_by_lua '
local DNS_Cache = require("resty.dns.cache")
DNS_Cache.init_cache()
';
}
--- config
location /t {
echo_location /_t;
echo_sleep 2;
echo_location /_t2;
}
location /_t {
content_by_lua '
local DNS_Cache = require("resty.dns.cache")
local dns, err = DNS_Cache.new({
dict = "dns_cache",
resolver = {nameservers = {{"127.0.0.1", "1953"}}}
})
if not dns then
ngx.say(err)
end
dns.resolver._id = 125
local answer, err, stale = dns:query("www.google.com", { qtype = dns.TYPE_A })
if not answer then
ngx.say(err)
end
';
}
location /_t2 {
content_by_lua '
local DNS_Cache = require("resty.dns.cache")
local dns, err = DNS_Cache.new({
dict = "dns_cache",
resolver = {nameservers = {{"127.0.0.1", "1954"}}, retrans = 1, timeout = 100}
})
if not dns then
ngx.say(err)
end
dns.resolver._id = 125
dns._debug(true)
local answer, err, stale = dns:query("www.google.com", { qtype = dns.TYPE_A })
if stale then
answer = stale
end
local cjson = require"cjson"
ngx.say(cjson.encode(answer))
';
}
--- udp_listen: 1953
--- udp_reply dns
{
id => 125,
opcode => 0,
qname => 'www.google.com',
answer => [{ name => "www.google.com", ipv4 => "127.0.0.1", ttl => 1 }],
}
--- request
GET /t
--- error_log
lru_cache MISS
lru_cache STALE
--- response_body
[{"address":"127.0.0.1","type":1,"class":1,"name":"www.google.com","ttl":-1}]
=== TEST 4: Stale response from dict served if resolver down
--- http_config eval
"$::HttpConfig"
. q{
lua_shared_dict dns_cache 1m;
init_by_lua '
local DNS_Cache = require("resty.dns.cache")
DNS_Cache.init_cache()
';
}
--- config
location /t {
echo_location /_t;
echo_sleep 2;
echo_location /_t2;
}
location /_t {
content_by_lua '
local DNS_Cache = require("resty.dns.cache")
local dns, err = DNS_Cache.new({
dict = "dns_cache",
resolver = {nameservers = {{"127.0.0.1", "1953"}}}
})
if not dns then
ngx.say(err)
end
dns.resolver._id = 125
local answer, err, stale = dns:query("www.google.com", { qtype = dns.TYPE_A })
if not answer then
ngx.say(err)
end
';
}
location /_t2 {
content_by_lua '
local DNS_Cache = require("resty.dns.cache")
local dns, err = DNS_Cache.new({
dict = "dns_cache",
resolver = {nameservers = {{"127.0.0.1", "1954"}}, retrans = 1, timeout = 100}
})
DNS_Cache.init_cache() -- reset cache
if not dns then
ngx.say(err)
end
dns.resolver._id = 125
dns._debug(true)
local answer, err, stale = dns:query("www.google.com", { qtype = dns.TYPE_A })
if stale then
answer = stale
end
local cjson = require"cjson"
ngx.say(cjson.encode(answer))
';
}
--- udp_listen: 1953
--- udp_reply dns
{
id => 125,
opcode => 0,
qname => 'www.google.com',
answer => [{ name => "www.google.com", ipv4 => "127.0.0.1", ttl => 1 }],
}
--- request
GET /t
--- error_log
lru_cache MISS
shared_dict STALE
--- response_body
[{"address":"127.0.0.1","type":1,"class":1,"name":"www.google.com","ttl":-1}]
=== TEST 5: Stale response from lru served if resolver down, no dict
--- http_config eval
"$::HttpConfig"
. q{
init_by_lua '
local DNS_Cache = require("resty.dns.cache")
DNS_Cache.init_cache()
';
}
--- config
location /t {
echo_location /_t;
echo_sleep 2;
echo_location /_t2;
}
location /_t {
content_by_lua '
local DNS_Cache = require("resty.dns.cache")
local dns, err = DNS_Cache.new({
resolver = {nameservers = {{"127.0.0.1", "1953"}}}
})
if not dns then
ngx.say(err)
end
dns.resolver._id = 125
local answer, err, stale = dns:query("www.google.com", { qtype = dns.TYPE_A })
if not answer then
ngx.say(err)
end
';
}
location /_t2 {
content_by_lua '
local DNS_Cache = require("resty.dns.cache")
local dns, err = DNS_Cache.new({
dict = "dns_cache",
resolver = {nameservers = {{"127.0.0.1", "1954"}}, retrans = 1, timeout = 100}
})
if not dns then
ngx.say(err)
end
dns.resolver._id = 125
dns._debug(true)
local answer, err, stale = dns:query("www.google.com", { qtype = dns.TYPE_A })
if stale then
answer = stale
end
local cjson = require"cjson"
ngx.say(cjson.encode(answer))
';
}
--- udp_listen: 1953
--- udp_reply dns
{
id => 125,
opcode => 0,
qname => 'www.google.com',
answer => [{ name => "www.google.com", ipv4 => "127.0.0.1", ttl => 1 }],
}
--- request
GET /t
--- error_log
lru_cache MISS
lru_cache STALE
--- response_body
[{"address":"127.0.0.1","type":1,"class":1,"name":"www.google.com","ttl":-1}]
=== TEST 6: Stale response from dict served if resolver down, no lru
--- http_config eval
"$::HttpConfig"
. q{
lua_shared_dict dns_cache 1m;
}
--- config
location /t {
echo_location /_t;
echo_sleep 2;
echo_location /_t2;
}
location /_t {
content_by_lua '
local DNS_Cache = require("resty.dns.cache")
local dns, err = DNS_Cache.new({
dict = "dns_cache",
resolver = {nameservers = {{"127.0.0.1", "1953"}}}
})
if not dns then
ngx.say(err)
end
dns.resolver._id = 125
local answer, err, stale = dns:query("www.google.com", { qtype = dns.TYPE_A })
if not answer then
ngx.say(err)
end
';
}
location /_t2 {
content_by_lua '
local DNS_Cache = require("resty.dns.cache")
local dns, err = DNS_Cache.new({
dict = "dns_cache",
resolver = {nameservers = {{"127.0.0.1", "1954"}}, retrans = 1, timeout = 100}
})
if not dns then
ngx.say(err)
end
dns.resolver._id = 125
dns._debug(true)
local answer, err, stale = dns:query("www.google.com", { qtype = dns.TYPE_A })
if stale then
answer = stale
end
local cjson = require"cjson"
ngx.say(cjson.encode(answer))
';
}
--- udp_listen: 1953
--- udp_reply dns
{
id => 125,
opcode => 0,
qname => 'www.google.com',
answer => [{ name => "www.google.com", ipv4 => "127.0.0.1", ttl => 1 }],
}
--- request
GET /t
--- error_log
shared_dict STALE
--- response_body
[{"address":"127.0.0.1","type":1,"class":1,"name":"www.google.com","ttl":-1}]
=== TEST 7: TTLs are reduced
--- http_config eval
"$::HttpConfig"
. q{
lua_shared_dict dns_cache 1m;
init_by_lua '
local DNS_Cache = require("resty.dns.cache")
DNS_Cache.init_cache()
';
}
--- config
location /t {
echo_location /_t;
echo_sleep 2;
echo_location /_t2;
}
location /_t {
content_by_lua '
local DNS_Cache = require("resty.dns.cache")
local dns, err = DNS_Cache.new({
dict = "dns_cache",
resolver = {nameservers = {{"127.0.0.1", "1953"}}}
})
if not dns then
ngx.say(err)
end
dns.resolver._id = 125
local answer, err, stale = dns:query("www.google.com", { qtype = dns.TYPE_A })
if not answer then
ngx.say(err)
end
';
}
location /_t2 {
content_by_lua '
local DNS_Cache = require("resty.dns.cache")
local dns, err = DNS_Cache.new({
dict = "dns_cache",
resolver = {nameservers = {{"127.0.0.1", "1953"}}, retrans = 1, timeout = 100}
})
if not dns then
ngx.say(err)
end
dns.resolver._id = 125
local answer, err, stale = dns:query("www.google.com", { qtype = dns.TYPE_A })
if not answer then
ngx.say(answer)
end
local cjson = require"cjson"
ngx.say(cjson.encode(answer))
';
}
--- udp_listen: 1953
--- udp_reply dns
{
id => 125,
opcode => 0,
qname => 'www.google.com',
answer => [{ name => "www.google.com", ipv4 => "127.0.0.1", ttl => 10 }],
}
--- request
GET /t
--- no_error_log
[error]
--- response_body
[{"address":"127.0.0.1","type":1,"class":1,"name":"www.google.com","ttl":8}]
=== TEST 8: TTL reduction can be disabled
--- http_config eval
"$::HttpConfig"
. q{
lua_shared_dict dns_cache 1m;
init_by_lua '
local DNS_Cache = require("resty.dns.cache")
DNS_Cache.init_cache()
';
}
--- config
location /t {
echo_location /_t;
echo_sleep 2;
echo_location /_t2;
}
location /_t {
content_by_lua '
local DNS_Cache = require("resty.dns.cache")
local dns, err = DNS_Cache.new({
dict = "dns_cache",
resolver = {nameservers = {{"127.0.0.1", "1953"}}}
})
if not dns then
ngx.say(err)
end
dns.resolver._id = 125
local answer, err, stale = dns:query("www.google.com", { qtype = dns.TYPE_A })
if not answer then
ngx.say(err)
end
';
}
location /_t2 {
content_by_lua '
local DNS_Cache = require("resty.dns.cache")
local dns, err = DNS_Cache.new({
dict = "dns_cache",
normalise_ttl = false,
resolver = {nameservers = {{"127.0.0.1", "1953"}}, retrans = 1, timeout = 100}
})
if not dns then
ngx.say(err)
end
dns.resolver._id = 125
local answer, err, stale = dns:query("www.google.com", { qtype = dns.TYPE_A })
if not answer then
ngx.say(answer)
end
local cjson = require"cjson"
ngx.say(cjson.encode(answer))
';
}
--- udp_listen: 1953
--- udp_reply dns
{
id => 125,
opcode => 0,
qname => 'www.google.com',
answer => [{ name => "www.google.com", ipv4 => "127.0.0.1", ttl => 10 }],
}
--- request
GET /t
--- no_error_log
[error]
--- response_body
[{"address":"127.0.0.1","type":1,"class":1,"name":"www.google.com","ttl":10}]
=== TEST 9: Negative responses are not cached by default
--- http_config eval
"$::HttpConfig"
. q{
lua_shared_dict dns_cache 1m;
init_by_lua '
local DNS_Cache = require("resty.dns.cache")
DNS_Cache.init_cache()
';
}
--- config
location /t {
content_by_lua '
local DNS_Cache = require("resty.dns.cache")
local dns, err = DNS_Cache.new({
dict = "dns_cache",
resolver = {nameservers = {{"127.0.0.1", "1953"}}}
})
if not dns then
ngx.say(err)
end
dns._debug(true)
dns.resolver._id = 125
local answer, err, stale = dns:query("www.google.com", { qtype = dns.TYPE_A })
if not answer then
ngx.say(err)
end
local cjson = require"cjson"
ngx.say(cjson.encode(answer))
';
}
--- udp_listen: 1953
--- udp_reply dns
{
id => 125,
rcode => 3,
opcode => 0,
qname => 'www.google.com',
}
--- request
GET /t
--- no_error_log
SET
--- response_body
{"errcode":3,"errstr":"name error"}
=== TEST 10: Negative responses can be cached
--- http_config eval
"$::HttpConfig"
. q{
lua_shared_dict dns_cache 1m;
init_by_lua '
local DNS_Cache = require("resty.dns.cache")
DNS_Cache.init_cache()
';
}
--- config
location /t {
echo_location /_t;
echo_location /_t2;
}
location /_t {
content_by_lua '
local DNS_Cache = require("resty.dns.cache")
local dns, err = DNS_Cache.new({
dict = "dns_cache",
negative_ttl = 10,
resolver = {nameservers = {{"127.0.0.1", "1953"}}}
})
if not dns then
ngx.say(err)
end
dns.resolver._id = 125
local answer, err, stale = dns:query("www.google.com", { qtype = dns.TYPE_A })
if not answer then
ngx.say(err)
end
';
}
location /_t2 {
content_by_lua '
local DNS_Cache = require("resty.dns.cache")
local dns, err = DNS_Cache.new({
dict = "dns_cache",
negative_ttl = 10,
resolver = {nameservers = {{"127.0.0.1", "1953"}}}
})
if not dns then
ngx.say(err)
end
dns.resolver._id = 125
dns._debug(true)
local answer, err, stale = dns:query("www.google.com", { qtype = dns.TYPE_A })
if not answer then
ngx.say(err)
end
local cjson = require"cjson"
ngx.say(cjson.encode(answer))
';
}
--- udp_listen: 1953
--- udp_reply dns
{
id => 125,
rcode => 3,
opcode => 0,
qname => 'www.google.com',
}
--- request
GET /t
--- error_log
lru_cache HIT
--- response_body
{"errcode":3,"errstr":"name error"}
=== TEST 11: Cached negative responses are not returned by default
--- http_config eval
"$::HttpConfig"
. q{
lua_shared_dict dns_cache 1m;
init_by_lua '
local DNS_Cache = require("resty.dns.cache")
DNS_Cache.init_cache()
';
}
--- config
location /t {
echo_location /_t;
echo_location /_t2;
}
location /_t {
content_by_lua '
local DNS_Cache = require("resty.dns.cache")
local dns, err = DNS_Cache.new({
dict = "dns_cache",
negative_ttl = 10,
resolver = {nameservers = {{"127.0.0.1", "1953"}}}
})
if not dns then
ngx.say(err)
end
dns._debug(true)
dns.resolver._id = 125
local answer, err, stale = dns:query("www.google.com", { qtype = dns.TYPE_A })
if not answer then
ngx.say(err)
end
';
}
location /_t2 {
content_by_lua '
local DNS_Cache = require("resty.dns.cache")
local dns, err = DNS_Cache.new({
dict = "dns_cache",
resolver = {nameservers = {{"127.0.0.1", "1954"}, retrans = 1, timeout = 100}}
})
if not dns then
ngx.say(err)
end
dns.resolver._id = 125
dns._debug(true)
local answer, err, stale = dns:query("www.google.com", { qtype = dns.TYPE_A })
local cjson = require"cjson"
ngx.say(cjson.encode(answer))
';
}
--- udp_listen: 1953
--- udp_reply dns
{
id => 125,
rcode => 3,
opcode => 0,
qname => 'www.google.com',
}
--- request
GET /t
--- error_log
lru_cache SET
lru_cache HIT
--- response_body
null
=== TEST 12: Cache TTL can be minimised
--- http_config eval
"$::HttpConfig"
. q{
lua_shared_dict dns_cache 1m;
init_by_lua '
local DNS_Cache = require("resty.dns.cache")
DNS_Cache.init_cache()
';
}
--- config
location /t {
content_by_lua '
local DNS_Cache = require("resty.dns.cache")
local dns, err = DNS_Cache.new({
dict = "dns_cache",
minimise_ttl = true,
resolver = {nameservers = {{"127.0.0.1", "1953"}}}
})
if not dns then
ngx.say(err)
end
dns.resolver._id = 125
dns._debug(true)
local answer, err, stale = dns:query("www.google.com", { qtype = dns.TYPE_A })
if not answer then
ngx.say(err)
end
local cjson = require"cjson"
ngx.say(cjson.encode(answer))
';
}
--- udp_listen: 1953
--- udp_reply dns
{
id => 125,
opcode => 0,
qname => 'www.google.com',
answer => [
{ name => "www.google.com", ipv4 => "127.0.0.1", ttl => 123456 },
{ name => "l.www.google.com", ipv6 => "::1", ttl => 10 },
],
}
--- request
GET /t
--- error_log
lru_cache SET: www.google.com|1 10
shared_dict SET: www.google.com|1 10
--- response_body
[{"address":"127.0.0.1","type":1,"class":1,"name":"www.google.com","ttl":123456},{"address":"0:0:0:0:0:0:0:1","type":28,"class":1,"name":"l.www.google.com","ttl":10}]
=== TEST 13: Cache TTLs not minimised by default
--- http_config eval
"$::HttpConfig"
. q{
lua_shared_dict dns_cache 1m;
init_by_lua '
local DNS_Cache = require("resty.dns.cache")
DNS_Cache.init_cache()
';
}
--- config
location /t {
content_by_lua '
local DNS_Cache = require("resty.dns.cache")
local dns, err = DNS_Cache.new({
dict = "dns_cache",
resolver = {nameservers = {{"127.0.0.1", "1953"}}}
})
if not dns then
ngx.say(err)
end
dns.resolver._id = 125
dns._debug(true)
local answer, err, stale = dns:query("www.google.com", { qtype = dns.TYPE_A })
if not answer then
ngx.say(err)
end
local cjson = require"cjson"
ngx.say(cjson.encode(answer))
';
}
--- udp_listen: 1953
--- udp_reply dns
{
id => 125,
opcode => 0,
qname => 'www.google.com',
answer => [
{ name => "www.google.com", ipv4 => "127.0.0.1", ttl => 123456 },
{ name => "l.www.google.com", ipv6 => "::1", ttl => 10 },
],
}
--- request
GET /t
--- error_log
lru_cache SET: www.google.com|1 123456
shared_dict SET: www.google.com|1 123456
--- response_body
[{"address":"127.0.0.1","type":1,"class":1,"name":"www.google.com","ttl":123456},{"address":"0:0:0:0:0:0:0:1","type":28,"class":1,"name":"l.www.google.com","ttl":10}]

View file

@ -1,275 +0,0 @@
use lib 't';
use TestDNS;
use Cwd qw(cwd);
plan tests => repeat_each() * 17;
my $pwd = cwd();
our $HttpConfig = qq{
lua_package_path "$pwd/lib/?.lua;;";
lua_socket_log_errors off;
};
no_long_string();
run_tests();
__DATA__
=== TEST 1: Query is triggered when cache is expired
--- http_config eval
"$::HttpConfig"
. q{
lua_shared_dict dns_cache 1m;
init_by_lua '
local DNS_Cache = require("resty.dns.cache")
DNS_Cache.init_cache()
';
}
--- config
location /t {
content_by_lua '
local DNS_Cache = require("resty.dns.cache")
local dns, err = DNS_Cache.new({
dict = "dns_cache",
resolver = {nameservers = {{"127.0.0.1", "1953"}}},
max_stale = 10
})
if not dns then
ngx.say(err)
end
dns.resolver._id = 125
local answer, err, stale = dns:query("www.google.com", { qtype = dns.TYPE_A })
if not answer then
ngx.say(err)
end
local cjson = require"cjson"
ngx.say(cjson.encode(answer))
dns._debug(true)
-- Sleep beyond response TTL
ngx.sleep(1.1)
local answer, err, stale = dns:query("www.google.com", { qtype = dns.TYPE_A })
if not answer then
if stale then
answer = stale
else
ngx.say(err)
end
end
local cjson = require"cjson"
ngx.say(cjson.encode(answer))
ngx.sleep(0.1)
';
}
--- udp_listen: 1953
--- udp_reply dns
{
id => 125,
opcode => 0,
qname => 'www.google.com',
answer => [{ name => "www.google.com", ipv4 => "127.0.0.1", ttl => 1 }],
}
--- request
GET /t
--- error_log
Returning STALE
Attempting to repopulate 'www.google.com'
Repopulating 'www.google.com'
--- response_body
[{"address":"127.0.0.1","type":1,"class":1,"name":"www.google.com","ttl":1}]
[{"address":"127.0.0.1","type":1,"class":1,"name":"www.google.com","ttl":0}]
=== TEST 2: Query is not triggered when cache expires and max_stale is disabled
--- http_config eval
"$::HttpConfig"
. q{
lua_shared_dict dns_cache 1m;
init_by_lua '
local DNS_Cache = require("resty.dns.cache")
DNS_Cache.init_cache()
';
}
--- config
location /t {
content_by_lua '
local DNS_Cache = require("resty.dns.cache")
local dns, err = DNS_Cache.new({
dict = "dns_cache",
resolver = {nameservers = {{"127.0.0.1", "1953"}}, retrans = 1, timeout = 50 },
max_stale = 0
})
if not dns then
ngx.say(err)
end
dns.resolver._id = 125
local answer, err, stale = dns:query("www.google.com", { qtype = dns.TYPE_A })
if not answer then
ngx.say(err)
end
dns._debug(true)
-- Sleep beyond response TTL
ngx.sleep(1.1)
local answer, err, stale = dns:query("www.google.com", { qtype = dns.TYPE_A })
if not answer then
if stale then
answer = stale
else
ngx.say(err)
end
end
local cjson = require"cjson"
ngx.say(cjson.encode(answer))
ngx.sleep(0.1)
';
}
--- udp_listen: 1953
--- udp_reply dns
{
id => 125,
opcode => 0,
qname => 'www.google.com',
answer => [{ name => "www.google.com", ipv4 => "127.0.0.1", ttl => 1 }],
}
--- request
GET /t
--- no_error_log
Attempting to repopulate 'www.google.com'
Repopulating 'www.google.com'
--- response_body
[{"address":"127.0.0.1","type":1,"class":1,"name":"www.google.com","ttl":0}]
=== TEST 3: Repopulate ignores max_stale
--- http_config eval
"$::HttpConfig"
. q{
lua_shared_dict dns_cache 1m;
init_by_lua '
local DNS_Cache = require("resty.dns.cache")
DNS_Cache.init_cache()
';
}
--- config
location /t {
content_by_lua '
local DNS_Cache = require("resty.dns.cache")
local dns, err = DNS_Cache.new({
dict = "dns_cache",
resolver = {nameservers = {{"127.0.0.1", "1953"}}, retrans = 1, timeout = 50 },
max_stale = 10,
})
if not dns then
ngx.say(err)
end
dns.resolver._id = 125
dns._debug(true)
local answer, err, stale = dns:query("www.google.com", { qtype = dns.TYPE_A })
if not answer then
ngx.say(err)
end
-- Sleep beyond response TTL
ngx.sleep(1.1)
local answer, err, stale = dns:query("www.google.com", { qtype = dns.TYPE_A })
if not answer then
if stale then
answer = stale
else
ngx.say(err)
end
end
local cjson = require"cjson"
ngx.say(cjson.encode(answer))
ngx.sleep(0.1)
';
}
--- udp_listen: 1953
--- udp_reply dns
{
id => 125,
opcode => 0,
qname => 'www.google.com',
answer => [{ name => "www.google.com", ipv4 => "127.0.0.1", ttl => 1 }],
}
--- request
GET /t
--- error_log
Repopulating 'www.google.com'
Querying: www.google.com
Resolver error
--- response_body
[{"address":"127.0.0.1","type":1,"class":1,"name":"www.google.com","ttl":0}]
=== TEST 4: Multiple queries only trigger 1 repopulate timer
--- http_config eval
"$::HttpConfig"
. q{
lua_shared_dict dns_cache 1m;
init_by_lua '
local DNS_Cache = require("resty.dns.cache")
DNS_Cache.init_cache()
';
}
--- config
location /t {
content_by_lua '
local DNS_Cache = require("resty.dns.cache")
local dns, err = DNS_Cache.new({
dict = "dns_cache",
resolver = {nameservers = {{"127.0.0.1", "1953"}}, retrans = 1, timeout = 50 },
repopulate = true,
})
if not dns then
ngx.say(err)
end
dns.resolver._id = 125
local answer, err, stale = dns:query("www.google.com", { qtype = dns.TYPE_A })
if not answer then
ngx.say(err)
end
dns._debug(true)
local answer, err, stale = dns:query("www.google.com", { qtype = dns.TYPE_A })
if not answer then
ngx.say(err)
end
local answer, err, stale = dns:query("www.google.com", { qtype = dns.TYPE_A })
if not answer then
ngx.say(err)
end
local cjson = require"cjson"
ngx.say(cjson.encode(answer))
';
}
--- udp_listen: 1953
--- udp_reply dns
{
id => 125,
opcode => 0,
qname => 'www.google.com',
answer => [{ name => "www.google.com", ipv4 => "127.0.0.1", ttl => 1 }],
}
--- request
GET /t
--- no_error_log
Attempting to repopulate www.google.com
--- response_body
[{"address":"127.0.0.1","type":1,"class":1,"name":"www.google.com","ttl":1}]

View file

@ -1,271 +0,0 @@
package TestDNS;
use strict;
use warnings;
use 5.010001;
use Test::Nginx::Socket::Lua -Base;
#use JSON::XS;
use constant {
TYPE_A => 1,
TYPE_TXT => 16,
TYPE_CNAME => 5,
TYPE_AAAA => 28,
CLASS_INTERNET => 1,
};
sub encode_name ($);
sub encode_ipv4 ($);
sub encode_ipv6 ($);
sub gen_dns_reply ($$);
sub Test::Base::Filter::dns {
my ($self, $code) = @_;
my $args = $self->current_arguments;
#warn "args: $args";
if (defined $args && $args ne 'tcp' && $args ne 'udp') {
die "Invalid argument to the \"dns\" filter: $args\n";
}
my $mode = $args // 'udp';
my $block = $self->current_block;
my $pointer_spec = $block->dns_pointers;
my @pointers;
if (defined $pointer_spec) {
my @loops = split /\s*,\s*/, $pointer_spec;
for my $loop (@loops) {
my @nodes = split /\s*=>\s*/, $loop;
my $prev;
for my $n (@nodes) {
if ($n !~ /^\d+$/ || $n == 0) {
die "bad name ID in the --- dns_pointers: $n\n";
}
if (!defined $prev) {
$prev = $n;
next;
}
$pointers[$prev] = $n;
}
}
}
my $input = eval $code;
if ($@) {
die "failed to evaluate code $code: $@\n";
}
if (!ref $input) {
return $input;
}
if (ref $input eq 'ARRAY') {
my @replies;
for my $t (@$input) {
push @replies, gen_dns_reply($t, $mode);
}
return \@replies;
}
if (ref $input eq 'HASH') {
return gen_dns_reply($input, $mode);
}
return $input;
}
sub gen_dns_reply ($$) {
my ($t, $mode) = @_;
my @raw_names;
push @raw_names, \($t->{qname});
my $answers = $t->{answer} // [];
if (!ref $answers) {
$answers = [$answers];
}
for my $ans (@$answers) {
push @raw_names, \($ans->{name});
if (defined $ans->{cname}) {
push @raw_names, \($ans->{cname});
}
}
for my $rname (@raw_names) {
$$rname = encode_name($$rname // "");
}
my $qname = $t->{qname};
my $s = '';
my $id = $t->{id} // 0;
$s .= pack("n", $id);
#warn "id: ", length($s), " ", encode_json([$s]);
my $qr = $t->{qr} // 1;
my $opcode = $t->{opcode} // 0;
my $aa = $t->{aa} // 0;
my $tc = $t->{tc} // 0;
my $rd = $t->{rd} // 1;
my $ra = $t->{ra} // 1;
my $rcode = $t->{rcode} // 0;
my $flags = ($qr << 15) + ($opcode << 11) + ($aa << 10) + ($tc << 9) + ($rd << 8) + ($ra << 7) + $rcode;
#warn sprintf("flags: %b", $flags);
$flags = pack("n", $flags);
$s .= $flags;
#warn "flags: ", length($flags), " ", encode_json([$flags]);
my $qdcount = $t->{qdcount} // 1;
my $ancount = $t->{ancount} // scalar @$answers;
my $nscount = 0;
my $arcount = 0;
$s .= pack("nnnn", $qdcount, $ancount, $nscount, $arcount);
#warn "qname: ", length($qname), " ", encode_json([$qname]);
$s .= $qname;
my $qs_type = $t->{qtype} // TYPE_A;
my $qs_class = $t->{qclass} // CLASS_INTERNET;
$s .= pack("nn", $qs_type, $qs_class);
for my $ans (@$answers) {
my $name = $ans->{name};
my $type = $ans->{type};
my $class = $ans->{class};
my $ttl = $ans->{ttl};
my $rdlength = $ans->{rdlength};
my $rddata = $ans->{rddata};
my $ipv4 = $ans->{ipv4};
if (defined $ipv4) {
my ($data, $len) = encode_ipv4($ipv4);
$rddata //= $data;
$rdlength //= $len;
$type //= TYPE_A;
$class //= CLASS_INTERNET;
}
my $ipv6 = $ans->{ipv6};
if (defined $ipv6) {
my ($data, $len) = encode_ipv6($ipv6);
$rddata //= $data;
$rdlength //= $len;
$type //= TYPE_AAAA;
$class //= CLASS_INTERNET;
}
my $cname = $ans->{cname};
if (defined $cname) {
$rddata //= $cname;
$rdlength //= length $rddata;
$type //= TYPE_CNAME;
$class //= CLASS_INTERNET;
}
my $txt = $ans->{txt};
if (defined $txt) {
$rddata //= $txt;
$rdlength //= length $rddata;
$type //= TYPE_TXT;
$class //= CLASS_INTERNET;
}
$type //= 0;
$class //= 0;
$ttl //= 0;
#warn "rdlength: $rdlength, rddata: ", encode_json([$rddata]), "\n";
$s .= $name . pack("nnNn", $type, $class, $ttl, $rdlength) . $rddata;
}
if ($mode eq 'tcp') {
return pack("n", length($s)) . $s;
}
return $s;
}
sub encode_ipv4 ($) {
my $txt = shift;
my @bytes = split /\./, $txt;
return pack("CCCC", @bytes), 4;
}
sub encode_ipv6 ($) {
my $txt = shift;
my @groups = split /:/, $txt;
my $nils = 0;
my $nonnils = 0;
for my $g (@groups) {
if ($g eq '') {
$nils++;
} else {
$nonnils++;
$g = hex($g);
}
}
my $total = $nils + $nonnils;
if ($total > 8 ) {
die "Invalid IPv6 address: too many groups: $total: $txt";
}
if ($nils) {
my $found = 0;
my @new_groups;
for my $g (@groups) {
if ($g eq '') {
if ($found) {
next;
}
for (1 .. 8 - $nonnils) {
push @new_groups, 0;
}
$found = 1;
} else {
push @new_groups, $g;
}
}
@groups = @new_groups;
}
if (@groups != 8) {
die "Invalid IPv6 address: $txt: @groups\n";
}
#warn "IPv6 groups: @groups";
return pack("nnnnnnnn", @groups), 16;
}
sub encode_name ($) {
my $name = shift;
$name =~ s/([^.]+)\.?/chr(length($1)) . $1/ge;
$name .= "\0";
return $name;
}
1

View file

@ -1,32 +0,0 @@
#!/usr/bin/env perl
use strict;
use warnings;
sub file_contains ($$);
my $version;
for my $file (map glob, qw{ *.lua lib/*.lua lib/*/*.lua lib/*/*/*.lua lib/*/*/*/*.lua lib/*/*/*/*/*.lua }) {
print "Checking use of Lua global variables in file $file ...\n";
system("luac -p -l $file | grep ETGLOBAL | grep -vE 'require|type|tostring|error|ngx|ndk|jit|setmetatable|getmetatable|string|table|io|os|print|tonumber|math|pcall|xpcall|unpack|pairs|ipairs|assert|module|package|coroutine|[gs]etfenv|next|rawget|rawset|rawlen'");
file_contains($file, "attempt to write to undeclared variable");
#system("grep -H -n -E --color '.{81}' $file");
}
sub file_contains ($$) {
my ($file, $regex) = @_;
open my $in, $file
or die "Cannot open $file fo reading: $!\n";
my $content = do { local $/; <$in> };
close $in;
#print "$content";
return scalar ($content =~ /$regex/);
}
if (-d 't') {
for my $file (map glob, qw{ t/*.t t/*/*.t t/*/*/*.t }) {
system(qq{grep -H -n --color -E '\\--- ?(ONLY|LAST)' $file});
}
}

View file

@ -1 +0,0 @@
*.t linguist-language=Text

View file

@ -1,10 +0,0 @@
*.swp
*.swo
*~
go
t/servroot/
reindex
nginx
ctags
tags
a.lua

View file

@ -1,18 +0,0 @@
OPENRESTY_PREFIX=/usr/local/openresty
PREFIX ?= /usr/local
LUA_INCLUDE_DIR ?= $(PREFIX)/include
LUA_LIB_DIR ?= $(PREFIX)/lib/lua/$(LUA_VERSION)
INSTALL ?= install
.PHONY: all test install
all: ;
install: all
$(INSTALL) -d $(DESTDIR)/$(LUA_LIB_DIR)/resty/dns
$(INSTALL) lib/resty/dns/*.lua $(DESTDIR)/$(LUA_LIB_DIR)/resty/dns/
test: all
PATH=$(OPENRESTY_PREFIX)/nginx/sbin:$$PATH prove -I../test-nginx/lib -r t

View file

@ -1,404 +0,0 @@
Name
====
lua-resty-dns - Lua DNS resolver for the ngx_lua based on the cosocket API
Table of Contents
=================
* [Name](#name)
* [Status](#status)
* [Description](#description)
* [Synopsis](#synopsis)
* [Methods](#methods)
* [new](#new)
* [query](#query)
* [tcp_query](#tcp_query)
* [set_timeout](#set_timeout)
* [compress_ipv6_addr](#compress_ipv6_addr)
* [Constants](#constants)
* [TYPE_A](#type_a)
* [TYPE_NS](#type_ns)
* [TYPE_CNAME](#type_cname)
* [TYPE_PTR](#type_ptr)
* [TYPE_MX](#type_mx)
* [TYPE_TXT](#type_txt)
* [TYPE_AAAA](#type_aaaa)
* [TYPE_SRV](#type_srv)
* [TYPE_SPF](#type_spf)
* [CLASS_IN](#class_in)
* [Automatic Error Logging](#automatic-error-logging)
* [Limitations](#limitations)
* [TODO](#todo)
* [Author](#author)
* [Copyright and License](#copyright-and-license)
* [See Also](#see-also)
Status
======
This library is considered production ready.
Description
===========
This Lua library provies a DNS resolver for the ngx_lua nginx module:
http://wiki.nginx.org/HttpLuaModule
This Lua library takes advantage of ngx_lua's cosocket API, which ensures
100% nonblocking behavior.
Note that at least [ngx_lua 0.5.12](https://github.com/chaoslawful/lua-nginx-module/tags) or [ngx_openresty 1.2.1.11](http://openresty.org/#Download) is required.
Also, the [bit library](http://bitop.luajit.org/) is also required. If you're using LuaJIT 2.0 with ngx_lua, then the `bit` library is already available by default.
Note that, this library is bundled and enabled by default in the [ngx_openresty bundle](http://openresty.org/).
Synopsis
========
```lua
lua_package_path "/path/to/lua-resty-dns/lib/?.lua;;";
server {
location = /dns {
content_by_lua '
local resolver = require "resty.dns.resolver"
local r, err = resolver:new{
nameservers = {"8.8.8.8", {"8.8.4.4", 53} },
retrans = 5, -- 5 retransmissions on receive timeout
timeout = 2000, -- 2 sec
}
if not r then
ngx.say("failed to instantiate the resolver: ", err)
return
end
local answers, err = r:query("www.google.com")
if not answers then
ngx.say("failed to query the DNS server: ", err)
return
end
if answers.errcode then
ngx.say("server returned error code: ", answers.errcode,
": ", answers.errstr)
end
for i, ans in ipairs(answers) do
ngx.say(ans.name, " ", ans.address or ans.cname,
" type:", ans.type, " class:", ans.class,
" ttl:", ans.ttl)
end
';
}
}
```
[Back to TOC](#table-of-contents)
Methods
=======
[Back to TOC](#table-of-contents)
new
---
`syntax: r, err = class:new(opts)`
Creates a dns.resolver object. Returns `nil` and an message string on error.
It accepts a `opts` table argument. The following options are supported:
* `nameservers`
a list of nameservers to be used. Each nameserver entry can be either a single hostname string or a table holding both the hostname string and the port number. The nameserver is picked up by a simple round-robin algorithm for each `query` method call. This option is required.
* `retrans`
the total number of times of retransmitting the DNS request when receiving a DNS response times out according to the `timeout` setting. Default to `5` times. When trying to retransmit the query, the next nameserver according to the round-robin algorithm will be picked up.
* `timeout`
the time in milliseconds for waiting for the respond for a single attempt of request transmition. note that this is ''not'' the maximal total waiting time before giving up, the maximal total waiting time can be calculated by the expression `timeout x retrans`. The `timeout` setting can also be changed by calling the `set_timeout` method. The default `timeout` setting is 2000 milliseconds, or 2 seconds.
* `no_recurse`
a boolean flag controls whether to disable the "recursion desired" (RD) flag in the UDP request. Default to `false`.
[Back to TOC](#table-of-contents)
query
-----
`syntax: answers, err = r:query(name, options?)`
Performs a DNS standard query to the nameservers specified by the `new` method,
and returns all the answer records in an array-like Lua table. In case of errors, it will
return `nil` and a string describing the error instead.
If the server returns a non-zero error code, the fields `errcode` and `errstr` will be set accordingly in the Lua table returned.
Each entry in the `answers` returned table value is also a hash-like Lua table
which usually takes some of the following fields:
* `name`
The resource record name.
* `type`
The current resource record type, possible values are `1` (`TYPE_A`), `5` (`TYPE_CNAME`), `28` (`TYPE_AAAA`), and any other values allowed by RFC 1035.
* `address`
The IPv4 or IPv6 address in their textual representations when the resource record type is either `1` (`TYPE_A`) or `28` (`TYPE_AAAA`), respectively. Secussesive 16-bit zero groups in IPv6 addresses will not be compressed by default, if you want that, you need to call the `compress_ipv6_addr` static method instead.
* `cname`
The (decoded) record data value for `CNAME` resource records. Only present for `CNAME` records.
* `ttl`
The time-to-live (TTL) value in seconds for the current resource record.
* `class`
The current resource record class, possible values are `1` (`CLASS_IN`) or any other values allowed by RFC 1035.
* `preference`
The preference integer number for `MX` resource records. Only present for `MX` type records.
* `exchange`
The exchange domain name for `MX` resource records. Only present for `MX` type records.
* `nsdname`
A domain-name which specifies a host which should be authoritative for the specified class and domain. Usually present for `NS` type records.
* `rdata`
The raw resource data (RDATA) for resource records that are not recognized.
* `txt`
The record value for `TXT` records. When there is only one character string in this record, then this field takes a single Lua string. Otherwise this field takes a Lua table holding all the strings.
* `ptrdname`
The record value for `PTR` records.
This method also takes an optional `options` argument table, which takes the following fields:
* `qtype`
The type of the question. Possible values are `1` (`TYPE_A`), `5` (`TYPE_CNAME`), `28` (`TYPE_AAAA`), or any other QTYPE value specified by RFC 1035 and RFC 3596. Default to `1` (`TYPE_A`).
When data truncation happens, the resolver will automatically retry using the TCP transport mode
to query the current nameserver. All TCP connections are short lived.
[Back to TOC](#table-of-contents)
tcp_query
---------
`syntax: answers, err = r:tcp_query(name, options?)`
Just like the `query` method, but enforce the TCP transport mode instead of UDP.
All TCP connections are short lived.
Here is an example:
```lua
local resolver = require "resty.dns.resolver"
local r, err = resolver:new{
nameservers = { "8.8.8.8" }
}
if not r then
ngx.say("failed to instantiate resolver: ", err)
return
end
local ans, err = r:tcp_query("www.google.com", { qtype = r.TYPE_A })
if not ans then
ngx.say("failed to query: ", err)
return
end
local cjson = require "cjson"
ngx.say("records: ", cjson.encode(ans))
```
[Back to TOC](#table-of-contents)
set_timeout
-----------
`syntax: r:set_timeout(time)`
Overrides the current `timeout` setting by the `time` argument in milliseconds for all the nameserver peers.
[Back to TOC](#table-of-contents)
compress_ipv6_addr
------------------
`syntax: compressed = resty.dns.resolver.compress_ipv6_addr(address)`
Compresses the successive 16-bit zero groups in the textual format of the IPv6 address.
For example,
```lua
local resolver = require "resty.dns.resolver"
local compress = resolver.compress_ipv6_addr
local new_addr = compress("FF01:0:0:0:0:0:0:101")
```
will yield `FF01::101` in the `new_addr` return value.
[Back to TOC](#table-of-contents)
Constants
=========
[Back to TOC](#table-of-contents)
TYPE_A
------
The `A` resource record type, equal to the decimal number `1`.
[Back to TOC](#table-of-contents)
TYPE_NS
-------
The `NS` resource record type, equal to the decimal number `2`.
[Back to TOC](#table-of-contents)
TYPE_CNAME
----------
The `CNAME` resource record type, equal to the decimal number `5`.
[Back to TOC](#table-of-contents)
TYPE_PTR
--------
The `PTR` resource record type, equal to the decimal number `12`.
[Back to TOC](#table-of-contents)
TYPE_MX
-------
The `MX` resource record type, equal to the decimal number `15`.
[Back to TOC](#table-of-contents)
TYPE_TXT
--------
The `TXT` resource record type, equal to the decimal number `16`.
[Back to TOC](#table-of-contents)
TYPE_AAAA
---------
`syntax: typ = r.TYPE_AAAA`
The `AAAA` resource record type, equal to the decimal number `28`.
[Back to TOC](#table-of-contents)
TYPE_SRV
---------
`syntax: typ = r.TYPE_SRV`
The `SRV` resource record type, equal to the decimal number `33`.
See RFC 2782 for details.
[Back to TOC](#table-of-contents)
TYPE_SPF
---------
`syntax: typ = r.TYPE_SPF`
The `SPF` resource record type, equal to the decimal number `99`.
See RFC 4408 for details.
[Back to TOC](#table-of-contents)
CLASS_IN
--------
`syntax: class = r.CLASS_IN`
The `Internet` resource record type, equal to the decimal number `1`.
[Back to TOC](#table-of-contents)
Automatic Error Logging
=======================
By default the underlying [ngx_lua](http://wiki.nginx.org/HttpLuaModule) module
does error logging when socket errors happen. If you are already doing proper error
handling in your own Lua code, then you are recommended to disable this automatic error logging by turning off [ngx_lua](http://wiki.nginx.org/HttpLuaModule)'s [lua_socket_log_errors](http://wiki.nginx.org/HttpLuaModule#lua_socket_log_errors) directive, that is,
```nginx
lua_socket_log_errors off;
```
[Back to TOC](#table-of-contents)
Limitations
===========
* This library cannot be used in code contexts like set_by_lua*, log_by_lua*, and
header_filter_by_lua* where the ngx_lua cosocket API is not available.
* The `resty.dns.resolver` object instance cannot be stored in a Lua variable at the Lua module level,
because it will then be shared by all the concurrent requests handled by the same nginx
worker process (see
http://wiki.nginx.org/HttpLuaModule#Data_Sharing_within_an_Nginx_Worker ) and
result in bad race conditions when concurrent requests are trying to use the same `resty.dns.resolver` instance.
You should always initiate `resty.dns.resolver` objects in function local
variables or in the `ngx.ctx` table. These places all have their own data copies for
each request.
[Back to TOC](#table-of-contents)
TODO
====
* Concurrent (or parallel) query mode
* Better support for other resource record types like `TLSA`.
[Back to TOC](#table-of-contents)
Author
======
Yichun "agentzh" Zhang (章亦春) <agentzh@gmail.com>, CloudFlare Inc.
[Back to TOC](#table-of-contents)
Copyright and License
=====================
This module is licensed under the BSD license.
Copyright (C) 2012-2014, by Yichun "agentzh" Zhang (章亦春) <agentzh@gmail.com>, CloudFlare Inc.
All rights reserved.
Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
[Back to TOC](#table-of-contents)
See Also
========
* the ngx_lua module: http://wiki.nginx.org/HttpLuaModule
* the [lua-resty-memcached](https://github.com/agentzh/lua-resty-memcached) library.
* the [lua-resty-redis](https://github.com/agentzh/lua-resty-redis) library.
* the [lua-resty-mysql](https://github.com/agentzh/lua-resty-mysql) library.
[Back to TOC](#table-of-contents)

View file

@ -1,803 +0,0 @@
-- Copyright (C) Yichun Zhang (agentzh)
-- local socket = require "socket"
local bit = require "bit"
local udp = ngx.socket.udp
local rand = math.random
local char = string.char
local byte = string.byte
local find = string.find
local gsub = string.gsub
local sub = string.sub
local format = string.format
local band = bit.band
local rshift = bit.rshift
local lshift = bit.lshift
local insert = table.insert
local concat = table.concat
local re_sub = ngx.re.sub
local tcp = ngx.socket.tcp
local log = ngx.log
local DEBUG = ngx.DEBUG
local randomseed = math.randomseed
local ngx_time = ngx.time
local setmetatable = setmetatable
local type = type
local DOT_CHAR = byte(".")
local TYPE_A = 1
local TYPE_NS = 2
local TYPE_CNAME = 5
local TYPE_PTR = 12
local TYPE_MX = 15
local TYPE_TXT = 16
local TYPE_AAAA = 28
local TYPE_SRV = 33
local TYPE_SPF = 99
local CLASS_IN = 1
local _M = {
_VERSION = '0.14',
TYPE_A = TYPE_A,
TYPE_NS = TYPE_NS,
TYPE_CNAME = TYPE_CNAME,
TYPE_PTR = TYPE_PTR,
TYPE_MX = TYPE_MX,
TYPE_TXT = TYPE_TXT,
TYPE_AAAA = TYPE_AAAA,
TYPE_SRV = TYPE_SRV,
TYPE_SPF = TYPE_SPF,
CLASS_IN = CLASS_IN,
}
local resolver_errstrs = {
"format error", -- 1
"server failure", -- 2
"name error", -- 3
"not implemented", -- 4
"refused", -- 5
}
local mt = { __index = _M }
function _M.new(class, opts)
if not opts then
return nil, "no options table specified"
end
local servers = opts.nameservers
if not servers or #servers == 0 then
return nil, "no nameservers specified"
end
local timeout = opts.timeout or 2000 -- default 2 sec
local n = #servers
local socks = {}
for i = 1, n do
local server = servers[i]
local sock, err = udp()
if not sock then
return nil, "failed to create udp socket: " .. err
end
local host, port
if type(server) == 'table' then
host = server[1]
port = server[2] or 53
else
host = server
port = 53
servers[i] = {host, port}
end
local ok, err = sock:setpeername(host, port)
if not ok then
return nil, "failed to set peer name: " .. err
end
sock:settimeout(timeout)
insert(socks, sock)
end
local tcp_sock, err = tcp()
if not tcp_sock then
return nil, "failed to create tcp socket: " .. err
end
tcp_sock:settimeout(timeout)
return setmetatable(
{ cur = rand(1, n), socks = socks,
tcp_sock = tcp_sock,
servers = servers,
retrans = opts.retrans or 5,
no_recurse = opts.no_recurse,
}, mt)
end
local function pick_sock(self, socks)
local cur = self.cur
if cur == #socks then
self.cur = 1
else
self.cur = cur + 1
end
return socks[cur]
end
local function _get_cur_server(self)
local cur = self.cur
local servers = self.servers
if cur == 1 then
return servers[#servers]
end
return servers[cur - 1]
end
function _M.set_timeout(self, timeout)
local socks = self.socks
if not socks then
return nil, "not initialized"
end
for i = 1, #socks do
local sock = socks[i]
sock:settimeout(timeout)
end
local tcp_sock = self.tcp_sock
if not tcp_sock then
return nil, "not initialized"
end
tcp_sock:settimeout(timeout)
end
local function _encode_name(s)
return char(#s) .. s
end
local function _decode_name(buf, pos)
local labels = {}
local nptrs = 0
local p = pos
while nptrs < 128 do
local fst = byte(buf, p)
if not fst then
return nil, 'truncated';
end
-- print("fst at ", p, ": ", fst)
if fst == 0 then
if nptrs == 0 then
pos = pos + 1
end
break
end
if band(fst, 0xc0) ~= 0 then
-- being a pointer
if nptrs == 0 then
pos = pos + 2
end
nptrs = nptrs + 1
local snd = byte(buf, p + 1)
if not snd then
return nil, 'truncated'
end
p = lshift(band(fst, 0x3f), 8) + snd + 1
-- print("resolving ptr ", p, ": ", byte(buf, p))
else
-- being a label
local label = sub(buf, p + 1, p + fst)
insert(labels, label)
-- print("resolved label ", label)
p = p + fst + 1
if nptrs == 0 then
pos = p
end
end
end
return concat(labels, "."), pos
end
local function _build_request(qname, id, no_recurse, opts)
local qtype
if opts then
qtype = opts.qtype
end
if not qtype then
qtype = 1 -- A record
end
local ident_hi = char(rshift(id, 8))
local ident_lo = char(band(id, 0xff))
local flags
if no_recurse then
-- print("found no recurse")
flags = "\0\0"
else
flags = "\1\0"
end
local nqs = "\0\1"
local nan = "\0\0"
local nns = "\0\0"
local nar = "\0\0"
local typ = "\0" .. char(qtype)
local class = "\0\1" -- the Internet class
if byte(qname, 1) == DOT_CHAR then
return nil, "bad name"
end
local name = gsub(qname, "([^.]+)%.?", _encode_name) .. '\0'
return {
ident_hi, ident_lo, flags, nqs, nan, nns, nar,
name, typ, class
}
end
local function parse_response(buf, id)
local n = #buf
if n < 12 then
return nil, 'truncated';
end
-- header layout: ident flags nqs nan nns nar
local ident_hi = byte(buf, 1)
local ident_lo = byte(buf, 2)
local ans_id = lshift(ident_hi, 8) + ident_lo
-- print("id: ", id, ", ans id: ", ans_id)
if ans_id ~= id then
-- identifier mismatch and throw it away
log(DEBUG, "id mismatch in the DNS reply: ", ans_id, " ~= ", id)
return nil, "id mismatch"
end
local flags_hi = byte(buf, 3)
local flags_lo = byte(buf, 4)
local flags = lshift(flags_hi, 8) + flags_lo
-- print(format("flags: 0x%x", flags))
if band(flags, 0x8000) == 0 then
return nil, format("bad QR flag in the DNS response")
end
if band(flags, 0x200) ~= 0 then
return nil, "truncated"
end
local code = band(flags, 0x7f)
-- print(format("code: %d", code))
local nqs_hi = byte(buf, 5)
local nqs_lo = byte(buf, 6)
local nqs = lshift(nqs_hi, 8) + nqs_lo
-- print("nqs: ", nqs)
if nqs ~= 1 then
return nil, format("bad number of questions in DNS response: %d", nqs)
end
local nan_hi = byte(buf, 7)
local nan_lo = byte(buf, 8)
local nan = lshift(nan_hi, 8) + nan_lo
-- print("nan: ", nan)
-- skip the question part
local ans_qname, pos = _decode_name(buf, 13)
if not ans_qname then
return nil, pos
end
-- print("qname in reply: ", ans_qname)
-- print("question: ", sub(buf, 13, pos))
if pos + 3 + nan * 12 > n then
-- print(format("%d > %d", pos + 3 + nan * 12, n))
return nil, 'truncated';
end
-- question section layout: qname qtype(2) qclass(2)
local type_hi = byte(buf, pos)
local type_lo = byte(buf, pos + 1)
local ans_type = lshift(type_hi, 8) + type_lo
-- print("ans qtype: ", ans_type)
local class_hi = byte(buf, pos + 2)
local class_lo = byte(buf, pos + 3)
local qclass = lshift(class_hi, 8) + class_lo
-- print("ans qclass: ", qclass)
if qclass ~= 1 then
return nil, format("unknown query class %d in DNS response", qclass)
end
pos = pos + 4
local answers = {}
if code ~= 0 then
answers.errcode = code
answers.errstr = resolver_errstrs[code] or "unknown"
end
for i = 1, nan do
-- print(format("ans %d: qtype:%d qclass:%d", i, qtype, qclass))
local ans = {}
insert(answers, ans)
local name
name, pos = _decode_name(buf, pos)
if not name then
return nil, pos
end
ans.name = name
-- print("name: ", name)
type_hi = byte(buf, pos)
type_lo = byte(buf, pos + 1)
local typ = lshift(type_hi, 8) + type_lo
ans.type = typ
-- print("type: ", typ)
class_hi = byte(buf, pos + 2)
class_lo = byte(buf, pos + 3)
local class = lshift(class_hi, 8) + class_lo
ans.class = class
-- print("class: ", class)
local ttl_bytes = { byte(buf, pos + 4, pos + 7) }
-- print("ttl bytes: ", concat(ttl_bytes, " "))
local ttl = lshift(ttl_bytes[1], 24) + lshift(ttl_bytes[2], 16)
+ lshift(ttl_bytes[3], 8) + ttl_bytes[4]
-- print("ttl: ", ttl)
ans.ttl = ttl
local len_hi = byte(buf, pos + 8)
local len_lo = byte(buf, pos + 9)
local len = lshift(len_hi, 8) + len_lo
-- print("record len: ", len)
pos = pos + 10
if typ == TYPE_A then
if len ~= 4 then
return nil, "bad A record value length: " .. len
end
local addr_bytes = { byte(buf, pos, pos + 3) }
local addr = concat(addr_bytes, ".")
-- print("ipv4 address: ", addr)
ans.address = addr
pos = pos + 4
elseif typ == TYPE_CNAME then
local cname, p = _decode_name(buf, pos)
if not cname then
return nil, pos
end
if p - pos ~= len then
return nil, format("bad cname record length: %d ~= %d",
p - pos, len)
end
pos = p
-- print("cname: ", cname)
ans.cname = cname
elseif typ == TYPE_AAAA then
if len ~= 16 then
return nil, "bad AAAA record value length: " .. len
end
local addr_bytes = { byte(buf, pos, pos + 15) }
local flds = {}
local comp_begin, comp_end
for i = 1, 16, 2 do
local a = addr_bytes[i]
local b = addr_bytes[i + 1]
if a == 0 then
insert(flds, format("%x", b))
else
insert(flds, format("%x%02x", a, b))
end
end
-- we do not compress the IPv6 addresses by default
-- due to performance considerations
ans.address = concat(flds, ":")
pos = pos + 16
elseif typ == TYPE_MX then
-- print("len = ", len)
if len < 3 then
return nil, "bad MX record value length: " .. len
end
local pref_hi = byte(buf, pos)
local pref_lo = byte(buf, pos + 1)
ans.preference = lshift(pref_hi, 8) + pref_lo
local host, p = _decode_name(buf, pos + 2)
if not host then
return nil, pos
end
if p - pos ~= len then
return nil, format("bad cname record length: %d ~= %d",
p - pos, len)
end
ans.exchange = host
pos = p
elseif typ == TYPE_SRV then
if len < 7 then
return nil, "bad SRV record value length: " .. len
end
local prio_hi = byte(buf, pos)
local prio_lo = byte(buf, pos + 1)
ans.priority = lshift(prio_hi, 8) + prio_lo
local weight_hi = byte(buf, pos + 2)
local weight_lo = byte(buf, pos + 3)
ans.weight = lshift(weight_hi, 8) + weight_lo
local port_hi = byte(buf, pos + 4)
local port_lo = byte(buf, pos + 5)
ans.port = lshift(port_hi, 8) + port_lo
local name, p = _decode_name(buf, pos + 6)
if not name then
return nil, pos
end
if p - pos ~= len then
return nil, format("bad srv record length: %d ~= %d",
p - pos, len)
end
ans.target = name
pos = p
elseif typ == TYPE_NS then
local name, p = _decode_name(buf, pos)
if not name then
return nil, pos
end
if p - pos ~= len then
return nil, format("bad cname record length: %d ~= %d",
p - pos, len)
end
pos = p
-- print("name: ", name)
ans.nsdname = name
elseif typ == TYPE_TXT or typ == TYPE_SPF then
local key = (typ == TYPE_TXT) and "txt" or "spf"
local slen = byte(buf, pos)
if slen + 1 > len then
-- truncate the over-run TXT record data
slen = len
end
-- print("slen: ", len)
local val = sub(buf, pos + 1, pos + slen)
local last = pos + len
pos = pos + slen + 1
if pos < last then
-- more strings to be processed
-- this code path is usually cold, so we do not
-- merge the following loop on this code path
-- with the processing logic above.
val = {val}
local idx = 2
repeat
local slen = byte(buf, pos)
if pos + slen + 1 > last then
-- truncate the over-run TXT record data
slen = last - pos - 1
end
val[idx] = sub(buf, pos + 1, pos + slen)
idx = idx + 1
pos = pos + slen + 1
until pos >= last
end
ans[key] = val
elseif typ == TYPE_PTR then
local name, p = _decode_name(buf, pos)
if not name then
return nil, pos
end
if p - pos ~= len then
return nil, format("bad cname record length: %d ~= %d",
p - pos, len)
end
pos = p
-- print("name: ", name)
ans.ptrdname = name
else
-- for unknown types, just forward the raw value
ans.rdata = sub(buf, pos, pos + len - 1)
pos = pos + len
end
end
return answers
end
local function _gen_id(self)
local id = self._id -- for regression testing
if id then
return id
end
return rand(0, 65535) -- two bytes
end
local function _tcp_query(self, query, id)
local sock = self.tcp_sock
if not sock then
return nil, "not initialized"
end
log(DEBUG, "query the TCP server due to reply truncation")
local server = _get_cur_server(self)
local ok, err = sock:connect(server[1], server[2])
if not ok then
return nil, "failed to connect to TCP server "
.. concat(server, ":") .. ": " .. err
end
query = concat(query, "")
local len = #query
local len_hi = char(rshift(len, 8))
local len_lo = char(band(len, 0xff))
local bytes, err = sock:send({len_hi, len_lo, query})
if not bytes then
return nil, "failed to send query to TCP server "
.. concat(server, ":") .. ": " .. err
end
local buf, err = sock:receive(2)
if not buf then
return nil, "failed to receive the reply length field from TCP server "
.. concat(server, ":") .. ": " .. err
end
local len_hi = byte(buf, 1)
local len_lo = byte(buf, 2)
local len = lshift(len_hi, 8) + len_lo
-- print("tcp message len: ", len)
buf, err = sock:receive(len)
if not buf then
return nil, "failed to receive the reply message body from TCP server "
.. concat(server, ":") .. ": " .. err
end
local answers, err = parse_response(buf, id)
if not answers then
return nil, "failed to parse the reply from the TCP server "
.. concat(server, ":") .. ": " .. err
end
sock:close()
return answers
end
function _M.tcp_query(self, qname, opts)
local socks = self.socks
if not socks then
return nil, "not initialized"
end
pick_sock(self, socks)
local id = _gen_id(self)
local query, err = _build_request(qname, id, self.no_recurse, opts)
if not query then
return nil, err
end
return _tcp_query(self, query, id)
end
function _M.query(self, qname, opts)
local socks = self.socks
if not socks then
return nil, "not initialized"
end
local id = _gen_id(self)
local query, err = _build_request(qname, id, self.no_recurse, opts)
if not query then
return nil, err
end
-- local cjson = require "cjson"
-- print("query: ", cjson.encode(concat(query, "")))
local retrans = self.retrans
-- print("retrans: ", retrans)
for i = 1, retrans do
local sock = pick_sock(self, socks)
local ok, err = sock:send(query)
if not ok then
local server = _get_cur_server(self)
return nil, "failed to send request to UDP server "
.. concat(server, ":") .. ": " .. err
end
local buf, err
for j = 1, 128 do
buf, err = sock:receive(4096)
if err then
break
end
if buf then
local answers
answers, err = parse_response(buf, id)
if not answers then
if err == "truncated" then
return _tcp_query(self, query, id)
end
if err ~= "id mismatch" then
return nil, err
end
-- retry receiving when err == "id mismatch"
else
return answers
end
end
end
if err ~= "timeout" or i == retrans then
local server = _get_cur_server(self)
return nil, "failed to receive reply from UDP server "
.. concat(server, ":") .. ": " .. err
end
end
-- impossible to reach here
end
function _M.compress_ipv6_addr(addr)
local addr = re_sub(addr, "^(0:)+|(:0)+$|:(0:)+", "::", "jo")
if addr == "::0" then
addr = "::"
end
return addr
end
randomseed(ngx_time())
return _M

View file

@ -1,271 +0,0 @@
package TestDNS;
use strict;
use warnings;
use 5.010001;
use Test::Nginx::Socket::Lua -Base;
#use JSON::XS;
use constant {
TYPE_A => 1,
TYPE_TXT => 16,
TYPE_CNAME => 5,
TYPE_AAAA => 28,
CLASS_INTERNET => 1,
};
sub encode_name ($);
sub encode_ipv4 ($);
sub encode_ipv6 ($);
sub gen_dns_reply ($$);
sub Test::Base::Filter::dns {
my ($self, $code) = @_;
my $args = $self->current_arguments;
#warn "args: $args";
if (defined $args && $args ne 'tcp' && $args ne 'udp') {
die "Invalid argument to the \"dns\" filter: $args\n";
}
my $mode = $args // 'udp';
my $block = $self->current_block;
my $pointer_spec = $block->dns_pointers;
my @pointers;
if (defined $pointer_spec) {
my @loops = split /\s*,\s*/, $pointer_spec;
for my $loop (@loops) {
my @nodes = split /\s*=>\s*/, $loop;
my $prev;
for my $n (@nodes) {
if ($n !~ /^\d+$/ || $n == 0) {
die "bad name ID in the --- dns_pointers: $n\n";
}
if (!defined $prev) {
$prev = $n;
next;
}
$pointers[$prev] = $n;
}
}
}
my $input = eval $code;
if ($@) {
die "failed to evaluate code $code: $@\n";
}
if (!ref $input) {
return $input;
}
if (ref $input eq 'ARRAY') {
my @replies;
for my $t (@$input) {
push @replies, gen_dns_reply($t, $mode);
}
return \@replies;
}
if (ref $input eq 'HASH') {
return gen_dns_reply($input, $mode);
}
return $input;
}
sub gen_dns_reply ($$) {
my ($t, $mode) = @_;
my @raw_names;
push @raw_names, \($t->{qname});
my $answers = $t->{answer} // [];
if (!ref $answers) {
$answers = [$answers];
}
for my $ans (@$answers) {
push @raw_names, \($ans->{name});
if (defined $ans->{cname}) {
push @raw_names, \($ans->{cname});
}
}
for my $rname (@raw_names) {
$$rname = encode_name($$rname // "");
}
my $qname = $t->{qname};
my $s = '';
my $id = $t->{id} // 0;
$s .= pack("n", $id);
#warn "id: ", length($s), " ", encode_json([$s]);
my $qr = $t->{qr} // 1;
my $opcode = $t->{opcode} // 0;
my $aa = $t->{aa} // 0;
my $tc = $t->{tc} // 0;
my $rd = $t->{rd} // 1;
my $ra = $t->{ra} // 1;
my $rcode = $t->{rcode} // 0;
my $flags = ($qr << 15) + ($opcode << 11) + ($aa << 10) + ($tc << 9) + ($rd << 8) + ($ra << 7) + $rcode;
#warn sprintf("flags: %b", $flags);
$flags = pack("n", $flags);
$s .= $flags;
#warn "flags: ", length($flags), " ", encode_json([$flags]);
my $qdcount = $t->{qdcount} // 1;
my $ancount = $t->{ancount} // scalar @$answers;
my $nscount = 0;
my $arcount = 0;
$s .= pack("nnnn", $qdcount, $ancount, $nscount, $arcount);
#warn "qname: ", length($qname), " ", encode_json([$qname]);
$s .= $qname;
my $qs_type = $t->{qtype} // TYPE_A;
my $qs_class = $t->{qclass} // CLASS_INTERNET;
$s .= pack("nn", $qs_type, $qs_class);
for my $ans (@$answers) {
my $name = $ans->{name};
my $type = $ans->{type};
my $class = $ans->{class};
my $ttl = $ans->{ttl};
my $rdlength = $ans->{rdlength};
my $rddata = $ans->{rddata};
my $ipv4 = $ans->{ipv4};
if (defined $ipv4) {
my ($data, $len) = encode_ipv4($ipv4);
$rddata //= $data;
$rdlength //= $len;
$type //= TYPE_A;
$class //= CLASS_INTERNET;
}
my $ipv6 = $ans->{ipv6};
if (defined $ipv6) {
my ($data, $len) = encode_ipv6($ipv6);
$rddata //= $data;
$rdlength //= $len;
$type //= TYPE_AAAA;
$class //= CLASS_INTERNET;
}
my $cname = $ans->{cname};
if (defined $cname) {
$rddata //= $cname;
$rdlength //= length $rddata;
$type //= TYPE_CNAME;
$class //= CLASS_INTERNET;
}
my $txt = $ans->{txt};
if (defined $txt) {
$rddata //= $txt;
$rdlength //= length $rddata;
$type //= TYPE_TXT;
$class //= CLASS_INTERNET;
}
$type //= 0;
$class //= 0;
$ttl //= 0;
#warn "rdlength: $rdlength, rddata: ", encode_json([$rddata]), "\n";
$s .= $name . pack("nnNn", $type, $class, $ttl, $rdlength) . $rddata;
}
if ($mode eq 'tcp') {
return pack("n", length($s)) . $s;
}
return $s;
}
sub encode_ipv4 ($) {
my $txt = shift;
my @bytes = split /\./, $txt;
return pack("CCCC", @bytes), 4;
}
sub encode_ipv6 ($) {
my $txt = shift;
my @groups = split /:/, $txt;
my $nils = 0;
my $nonnils = 0;
for my $g (@groups) {
if ($g eq '') {
$nils++;
} else {
$nonnils++;
$g = hex($g);
}
}
my $total = $nils + $nonnils;
if ($total > 8 ) {
die "Invalid IPv6 address: too many groups: $total: $txt";
}
if ($nils) {
my $found = 0;
my @new_groups;
for my $g (@groups) {
if ($g eq '') {
if ($found) {
next;
}
for (1 .. 8 - $nonnils) {
push @new_groups, 0;
}
$found = 1;
} else {
push @new_groups, $g;
}
}
@groups = @new_groups;
}
if (@groups != 8) {
die "Invalid IPv6 address: $txt: @groups\n";
}
#warn "IPv6 groups: @groups";
return pack("nnnnnnnn", @groups), 16;
}
sub encode_name ($) {
my $name = shift;
$name =~ s/([^.]+)\.?/chr(length($1)) . $1/ge;
$name .= "\0";
return $name;
}
1

View file

@ -1,89 +0,0 @@
local ngx_null = ngx.null
local tostring = tostring
local byte = string.byte
local gsub = string.gsub
local sort = table.sort
local pairs = pairs
local ipairs = ipairs
local concat = table.concat
local ok, new_tab = pcall(require, "table.new")
if not ok then
new_tab = function (narr, nrec) return {} end
end
local _M = {}
local metachars = {
['\t'] = '\\t',
["\\"] = "\\\\",
['"'] = '\\"',
['\r'] = '\\r',
['\n'] = '\\n',
}
local function encode_str(s)
-- XXX we will rewrite this when string.buffer is implemented
-- in LuaJIT 2.1 because string.gsub cannot be JIT compiled.
return gsub(s, '["\\\r\n\t]', metachars)
end
local function is_arr(t)
local exp = 1
for k, _ in pairs(t) do
if k ~= exp then
return nil
end
exp = exp + 1
end
return exp - 1
end
local encode
encode = function (v)
if v == nil or v == ngx_null then
return "null"
end
local typ = type(v)
if typ == 'string' then
return '"' .. encode_str(v) .. '"'
end
if typ == 'number' or typ == 'boolean' then
return tostring(v)
end
if typ == 'table' then
local n = is_arr(v)
if n then
local bits = new_tab(n, 0)
for i, elem in ipairs(v) do
bits[i] = encode(elem)
end
return "[" .. concat(bits, ",") .. "]"
end
local keys = {}
local i = 0
for key, _ in pairs(v) do
i = i + 1
keys[i] = key
end
sort(keys)
local bits = new_tab(0, i)
i = 0
for _, key in ipairs(keys) do
i = i + 1
bits[i] = encode(key) .. ":" .. encode(v[key])
end
return "{" .. concat(bits, ",") .. "}"
end
return '"<' .. typ .. '>"'
end
_M.encode = encode
return _M

File diff suppressed because it is too large Load diff

View file

@ -1,502 +0,0 @@
# vim:set ft= ts=4 sw=4 et:
use Test::Nginx::Socket::Lua;
use Cwd qw(cwd);
repeat_each(2);
plan tests => repeat_each() * (3 * blocks());
my $pwd = cwd();
our $HttpConfig = qq{
lua_package_path "$pwd/t/lib/?.lua;$pwd/lib/?.lua;;";
lua_package_cpath "/usr/local/openresty-debug/lualib/?.so;/usr/local/openresty/lualib/?.so;;";
};
$ENV{TEST_NGINX_RESOLVER} ||= '8.8.8.8';
no_long_string();
run_tests();
__DATA__
=== TEST 1: A records
--- http_config eval: $::HttpConfig
--- config
location /t {
content_by_lua '
local resolver = require "resty.dns.resolver"
local r, err = resolver:new{ nameservers = { "$TEST_NGINX_RESOLVER" } }
if not r then
ngx.say("failed to instantiate resolver: ", err)
return
end
local ans, err = r:query("www.google.com", { qtype = r.TYPE_A })
if not ans then
ngx.say("failed to query: ", err)
return
end
local cjson = require "cjson"
ngx.say("records: ", cjson.encode(ans))
';
}
--- request
GET /t
--- response_body_like chop
^records: \[.*?"address":"(?:\d{1,3}\.){3}\d+".*?\]$
--- no_error_log
[error]
=== TEST 2: CNAME records
--- http_config eval: $::HttpConfig
--- config
location /t {
content_by_lua '
local resolver = require "resty.dns.resolver"
local r, err = resolver:new{ nameservers = { "$TEST_NGINX_RESOLVER" } }
if not r then
ngx.say("failed to instantiate resolver: ", err)
return
end
local ans, err = r:query("www.yahoo.com", { qtype = r.TYPE_CNAME })
if not ans then
ngx.say("failed to query: ", err)
return
end
local cjson = require "cjson"
ngx.say("records: ", cjson.encode(ans))
';
}
--- request
GET /t
--- response_body_like chop
^records: \[.*?"cname":"[-_a-z0-9.]+".*?\]$
--- no_error_log
[error]
=== TEST 3: AAAA records
--- http_config eval: $::HttpConfig
--- config
location /t {
content_by_lua '
local resolver = require "resty.dns.resolver"
local r, err = resolver:new{ nameservers = { "$TEST_NGINX_RESOLVER" } }
if not r then
ngx.say("failed to instantiate resolver: ", err)
return
end
local ans, err = r:query("www.google.com", { qtype = r.TYPE_AAAA })
if not ans then
ngx.say("failed to query: ", err)
return
end
local cjson = require "cjson"
ngx.say("records: ", cjson.encode(ans))
';
}
--- request
GET /t
--- response_body_like chop
^records: \[.*?"address":"[a-fA-F0-9]*(?::[a-fA-F0-9]*)+".*?\]$
--- no_error_log
[error]
=== TEST 4: compress ipv6 addr
--- http_config eval: $::HttpConfig
--- config
location /t {
content_by_lua '
local resolver = require "resty.dns.resolver"
local c = resolver.compress_ipv6_addr
ngx.say(c("1080:0:0:0:8:800:200C:417A"))
ngx.say(c("FF01:0:0:0:0:0:0:101"))
ngx.say(c("0:0:0:0:0:0:0:1"))
ngx.say(c("1:5:0:0:0:0:0:0"))
ngx.say(c("7:25:0:0:0:3:0:0"))
ngx.say(c("0:0:0:0:0:0:0:0"))
';
}
--- request
GET /t
--- response_body
1080::8:800:200C:417A
FF01::101
::1
1:5::
7:25::3:0:0
::
--- no_error_log
[error]
=== TEST 5: A records (TCP)
--- http_config eval: $::HttpConfig
--- config
location /t {
content_by_lua '
local resolver = require "resty.dns.resolver"
local r, err = resolver:new{ nameservers = { "$TEST_NGINX_RESOLVER" } }
if not r then
ngx.say("failed to instantiate resolver: ", err)
return
end
local ans, err = r:tcp_query("www.google.com", { qtype = r.TYPE_A })
if not ans then
ngx.say("failed to query: ", err)
return
end
local cjson = require "cjson"
ngx.say("records: ", cjson.encode(ans))
';
}
--- request
GET /t
--- response_body_like chop
^records: \[.*?"address":"(?:\d{1,3}\.){3}\d+".*?\]$
--- no_error_log
[error]
=== TEST 6: MX records
--- http_config eval: $::HttpConfig
--- config
location /t {
content_by_lua '
local resolver = require "resty.dns.resolver"
local r, err = resolver:new{ nameservers = { "$TEST_NGINX_RESOLVER" } }
if not r then
ngx.say("failed to instantiate resolver: ", err)
return
end
local ans, err = r:query("gmail.com", { qtype = r.TYPE_MX })
if not ans then
ngx.say("failed to query: ", err)
return
end
local cjson = require "cjson"
ngx.say("records: ", cjson.encode(ans))
';
}
--- request
GET /t
--- response_body_like chop
^records: \[\{.*?"preference":\d+,.*?"exchange":"[^"]+".*?\}\]$
--- no_error_log
[error]
=== TEST 7: NS records
--- http_config eval: $::HttpConfig
--- config
location /t {
content_by_lua '
local resolver = require "resty.dns.resolver"
local r, err = resolver:new{ nameservers = { "$TEST_NGINX_RESOLVER" } }
if not r then
ngx.say("failed to instantiate resolver: ", err)
return
end
local ans, err = r:query("agentzh.org", { qtype = r.TYPE_NS })
if not ans then
ngx.say("failed to query: ", err)
return
end
local cjson = require "cjson"
ngx.say("records: ", cjson.encode(ans))
';
}
--- request
GET /t
--- response_body_like chop
^records: \[\{.*?"nsdname":"[^"]+".*?\}\]$
--- no_error_log
[error]
=== TEST 8: TXT query (no ans)
--- http_config eval: $::HttpConfig
--- config
location /t {
content_by_lua '
local resolver = require "resty.dns.resolver"
local r, err = resolver:new{ nameservers = { "$TEST_NGINX_RESOLVER" } }
if not r then
ngx.say("failed to instantiate resolver: ", err)
return
end
local ans, err = r:query("agentzh.org", { qtype = r.TYPE_TXT })
if not ans then
ngx.say("failed to query: ", err)
return
end
local cjson = require "cjson"
ngx.say("records: ", cjson.encode(ans))
';
}
--- request
GET /t
--- response_body
records: {}
--- no_error_log
[error]
--- timeout: 10
=== TEST 9: TXT query (with ans)
--- http_config eval: $::HttpConfig
--- config
location /t {
content_by_lua '
local resolver = require "resty.dns.resolver"
local r, err = resolver:new{ nameservers = { "$TEST_NGINX_RESOLVER" } }
if not r then
ngx.say("failed to instantiate resolver: ", err)
return
end
local ans, err = r:query("gmail.com", { qtype = r.TYPE_TXT })
if not ans then
ngx.say("failed to query: ", err)
return
end
local cjson = require "cjson"
ngx.say("records: ", cjson.encode(ans))
';
}
--- request
GET /t
--- response_body_like chop
^records: \[\{.*?"txt":"v=spf\d+\s[^"]+".*?\}\]$
--- no_error_log
[error]
=== TEST 10: PTR query
--- http_config eval: $::HttpConfig
--- config
location /t {
content_by_lua '
local resolver = require "resty.dns.resolver"
local r, err = resolver:new{ nameservers = { "$TEST_NGINX_RESOLVER" } }
if not r then
ngx.say("failed to instantiate resolver: ", err)
return
end
local ans, err = r:query("4.4.8.8.in-addr.arpa", { qtype = r.TYPE_PTR })
if not ans then
ngx.say("failed to query: ", err)
return
end
local cjson = require "cjson"
ngx.say("records: ", cjson.encode(ans))
';
}
--- request
GET /t
--- response_body_like chop
^records: \[\{.*?"ptrdname":"google-public-dns-b\.google\.com".*?\}\]$
--- no_error_log
[error]
=== TEST 11: domains with a trailing dot
--- http_config eval: $::HttpConfig
--- config
location /t {
content_by_lua '
local resolver = require "resty.dns.resolver"
local r, err = resolver:new{ nameservers = { "$TEST_NGINX_RESOLVER" } }
if not r then
ngx.say("failed to instantiate resolver: ", err)
return
end
local ans, err = r:query("www.google.com.", { qtype = r.TYPE_A })
if not ans then
ngx.say("failed to query: ", err)
return
end
local cjson = require "cjson"
ngx.say("records: ", cjson.encode(ans))
';
}
--- request
GET /t
--- response_body_like chop
^records: \[.*?"address":"(?:\d{1,3}\.){3}\d+".*?\]$
--- no_error_log
[error]
=== TEST 12: domains with a leading dot
--- http_config eval: $::HttpConfig
--- config
location /t {
content_by_lua '
local resolver = require "resty.dns.resolver"
local r, err = resolver:new{ nameservers = { "$TEST_NGINX_RESOLVER" } }
if not r then
ngx.say("failed to instantiate resolver: ", err)
return
end
local ans, err = r:query(".www.google.com", { qtype = r.TYPE_A })
if not ans then
ngx.say("failed to query: ", err)
return
end
local cjson = require "cjson"
ngx.say("records: ", cjson.encode(ans))
';
}
--- request
GET /t
--- response_body
failed to query: bad name
--- no_error_log
[error]
=== TEST 13: SRV records or XMPP
--- http_config eval: $::HttpConfig
--- config
location /t {
content_by_lua '
local resolver = require "resty.dns.resolver"
local r, err = resolver:new{ nameservers = { "$TEST_NGINX_RESOLVER" } }
if not r then
ngx.say("failed to instantiate resolver: ", err)
return
end
local ans, err = r:query("_xmpp-client._tcp.jabber.org", { qtype = r.TYPE_SRV })
if not ans then
ngx.say("failed to query: ", err)
return
end
local ljson = require "ljson"
ngx.say("records: ", ljson.encode(ans))
';
}
--- request
GET /t
--- response_body_like chop
^records: \[(?:{"class":1,"name":"_xmpp-client._tcp.jabber.org","port":\d+,"priority":\d+,"target":"[\w.]+\.jabber.org","ttl":\d+,"type":33,"weight":\d+},?)+\]$
--- no_error_log
[error]
=== TEST 14: SPF query (with ans)
--- http_config eval: $::HttpConfig
--- config
location /t {
content_by_lua '
local resolver = require "resty.dns.resolver"
local r, err = resolver:new{ nameservers = { "$TEST_NGINX_RESOLVER" } }
if not r then
ngx.say("failed to instantiate resolver: ", err)
return
end
local ans, err = r:query("linkedin.com", { qtype = r.TYPE_SPF })
if not ans then
ngx.say("failed to query: ", err)
return
end
local cjson = require "cjson"
ngx.say("records: ", cjson.encode(ans))
';
}
--- request
GET /t
--- response_body_like chop
^records: \[\{.*?"spf":"v=spf\d+\s[^"]+".*?\}\]$
--- no_error_log
[error]
=== TEST 15: SPF query (no ans)
--- http_config eval: $::HttpConfig
--- config
location /t {
content_by_lua '
local resolver = require "resty.dns.resolver"
local r, err = resolver:new{ nameservers = { "$TEST_NGINX_RESOLVER" } }
if not r then
ngx.say("failed to instantiate resolver: ", err)
return
end
local ans, err = r:query("agentzh.org", { qtype = r.TYPE_SPF })
if not ans then
ngx.say("failed to query: ", err)
return
end
local cjson = require "cjson"
ngx.say("records: ", cjson.encode(ans))
';
}
--- request
GET /t
--- response_body
records: {}
--- no_error_log
[error]
--- timeout: 10

View file

@ -1,549 +0,0 @@
{
<insert_a_suppression_name_here>
Memcheck:Cond
fun:lj_str_new
}
{
<insert_a_suppression_name_here>
Memcheck:Param
write(buf)
fun:__write_nocancel
fun:ngx_log_error_core
fun:ngx_resolver_read_response
}
{
<insert_a_suppression_name_here>
Memcheck:Cond
fun:ngx_sprintf_num
fun:ngx_vslprintf
fun:ngx_log_error_core
fun:ngx_resolver_read_response
fun:ngx_epoll_process_events
fun:ngx_process_events_and_timers
fun:ngx_single_process_cycle
fun:main
}
{
<insert_a_suppression_name_here>
Memcheck:Addr1
fun:ngx_vslprintf
fun:ngx_snprintf
fun:ngx_sock_ntop
fun:ngx_event_accept
}
{
<insert_a_suppression_name_here>
Memcheck:Param
write(buf)
fun:__write_nocancel
fun:ngx_log_error_core
fun:ngx_resolver_read_response
fun:ngx_event_process_posted
fun:ngx_process_events_and_timers
fun:ngx_single_process_cycle
fun:main
}
{
<insert_a_suppression_name_here>
Memcheck:Cond
fun:ngx_sprintf_num
fun:ngx_vslprintf
fun:ngx_log_error_core
fun:ngx_resolver_read_response
fun:ngx_event_process_posted
fun:ngx_process_events_and_timers
fun:ngx_single_process_cycle
fun:main
}
{
<insert_a_suppression_name_here>
exp-sgcheck:SorG
fun:lj_str_new
fun:lua_pushlstring
}
{
<insert_a_suppression_name_here>
Memcheck:Leak
fun:malloc
fun:ngx_alloc
obj:*
}
{
<insert_a_suppression_name_here>
exp-sgcheck:SorG
fun:lj_str_new
fun:lua_pushlstring
}
{
<insert_a_suppression_name_here>
exp-sgcheck:SorG
fun:ngx_http_lua_ndk_set_var_get
}
{
<insert_a_suppression_name_here>
exp-sgcheck:SorG
fun:lj_str_new
fun:lua_getfield
}
{
<insert_a_suppression_name_here>
exp-sgcheck:SorG
fun:lj_str_new
fun:lua_setfield
}
{
<insert_a_suppression_name_here>
exp-sgcheck:SorG
fun:ngx_http_variables_init_vars
fun:ngx_http_block
}
{
<insert_a_suppression_name_here>
exp-sgcheck:SorG
fun:ngx_conf_parse
}
{
<insert_a_suppression_name_here>
exp-sgcheck:SorG
fun:ngx_vslprintf
fun:ngx_log_error_core
}
{
<insert_a_suppression_name_here>
Memcheck:Leak
fun:malloc
fun:ngx_alloc
fun:ngx_calloc
fun:ngx_event_process_init
}
{
<insert_a_suppression_name_here>
Memcheck:Leak
fun:malloc
fun:ngx_alloc
fun:ngx_malloc
fun:ngx_pcalloc
}
{
<insert_a_suppression_name_here>
Memcheck:Addr4
fun:lj_str_new
fun:lua_setfield
}
{
<insert_a_suppression_name_here>
Memcheck:Addr4
fun:lj_str_new
fun:lua_getfield
}
{
<insert_a_suppression_name_here>
Memcheck:Leak
fun:malloc
fun:ngx_alloc
fun:(below main)
}
{
<insert_a_suppression_name_here>
Memcheck:Param
epoll_ctl(event)
fun:epoll_ctl
}
{
<insert_a_suppression_name_here>
Memcheck:Leak
fun:malloc
fun:ngx_alloc
fun:ngx_event_process_init
}
{
<insert_a_suppression_name_here>
Memcheck:Cond
fun:ngx_conf_flush_files
fun:ngx_single_process_cycle
}
{
<insert_a_suppression_name_here>
Memcheck:Cond
fun:memcpy
fun:ngx_vslprintf
fun:ngx_log_error_core
fun:ngx_http_charset_header_filter
}
{
<insert_a_suppression_name_here>
Memcheck:Leak
fun:memalign
fun:posix_memalign
fun:ngx_memalign
fun:ngx_pcalloc
}
{
<insert_a_suppression_name_here>
Memcheck:Addr4
fun:lj_str_new
fun:lua_pushlstring
}
{
<insert_a_suppression_name_here>
Memcheck:Cond
fun:lj_str_new
fun:lj_str_fromnum
}
{
<insert_a_suppression_name_here>
Memcheck:Cond
fun:lj_str_new
fun:lua_pushlstring
}
{
<false_alarm_due_to_u32_alignment_in_luajit2>
Memcheck:Addr4
fun:lj_str_new
fun:lua_setfield
fun:ngx_http_lua_cache_store_code
}
{
<insert_a_suppression_name_here>
Memcheck:Cond
fun:lj_str_new
fun:lua_getfield
fun:ngx_http_lua_cache_load_code
}
{
<insert_a_suppression_name_here>
Memcheck:Cond
fun:lj_str_new
fun:lua_setfield
fun:ngx_http_lua_cache_store_code
}
{
<false_alarm_due_to_u32_alignment_in_luajit2>
Memcheck:Addr4
fun:lj_str_new
fun:lua_getfield
fun:ngx_http_lua_cache_load_code
}
{
<insert_a_suppression_name_here>
Memcheck:Param
socketcall.setsockopt(optval)
fun:setsockopt
fun:drizzle_state_connect
}
{
<insert_a_suppression_name_here>
Memcheck:Leak
fun:malloc
fun:ngx_alloc
fun:ngx_palloc_large
}
{
<insert_a_suppression_name_here>
Memcheck:Leak
fun:malloc
fun:ngx_alloc
fun:ngx_pool_cleanup_add
}
{
<insert_a_suppression_name_here>
Memcheck:Leak
fun:malloc
fun:ngx_alloc
fun:ngx_pnalloc
}
{
<insert_a_suppression_name_here>
Memcheck:Cond
fun:ngx_conf_flush_files
fun:ngx_single_process_cycle
fun:main
}
{
<insert_a_suppression_name_here>
Memcheck:Leak
fun:malloc
fun:ngx_alloc
fun:ngx_palloc
}
{
<insert_a_suppression_name_here>
Memcheck:Leak
fun:malloc
fun:ngx_alloc
fun:ngx_pcalloc
}
{
<insert_a_suppression_name_here>
Memcheck:Leak
fun:malloc
fun:ngx_alloc
fun:ngx_malloc
fun:ngx_palloc_large
}
{
<insert_a_suppression_name_here>
Memcheck:Leak
fun:malloc
fun:ngx_alloc
fun:ngx_create_pool
}
{
<insert_a_suppression_name_here>
Memcheck:Leak
fun:malloc
fun:ngx_alloc
fun:ngx_malloc
fun:ngx_palloc
}
{
<insert_a_suppression_name_here>
Memcheck:Leak
fun:malloc
fun:ngx_alloc
fun:ngx_malloc
fun:ngx_pnalloc
}
{
<insert_a_suppression_name_here>
Memcheck:Leak
fun:malloc
fun:ngx_alloc
fun:ngx_palloc_large
fun:ngx_palloc
fun:ngx_array_push
fun:ngx_http_get_variable_index
fun:ngx_http_memc_add_variable
fun:ngx_http_memc_init
fun:ngx_http_block
fun:ngx_conf_parse
fun:ngx_init_cycle
fun:main
}
{
<insert_a_suppression_name_here>
Memcheck:Leak
fun:malloc
fun:ngx_alloc
fun:ngx_event_process_init
fun:ngx_single_process_cycle
fun:main
}
{
<insert_a_suppression_name_here>
Memcheck:Leak
fun:malloc
fun:ngx_alloc
fun:ngx_crc32_table_init
fun:main
}
{
<insert_a_suppression_name_here>
Memcheck:Leak
fun:malloc
fun:ngx_alloc
fun:ngx_event_process_init
fun:ngx_worker_process_init
fun:ngx_worker_process_cycle
fun:ngx_spawn_process
fun:ngx_start_worker_processes
fun:ngx_master_process_cycle
fun:main
}
{
<insert_a_suppression_name_here>
Memcheck:Leak
fun:malloc
fun:ngx_alloc
fun:ngx_palloc_large
fun:ngx_palloc
fun:ngx_pcalloc
fun:ngx_hash_init
fun:ngx_http_variables_init_vars
fun:ngx_http_block
fun:ngx_conf_parse
fun:ngx_init_cycle
fun:main
}
{
<insert_a_suppression_name_here>
Memcheck:Leak
fun:malloc
fun:ngx_alloc
fun:ngx_palloc_large
fun:ngx_palloc
fun:ngx_pcalloc
fun:ngx_http_upstream_drizzle_create_srv_conf
fun:ngx_http_upstream
fun:ngx_conf_parse
fun:ngx_http_block
fun:ngx_conf_parse
fun:ngx_init_cycle
fun:main
}
{
<insert_a_suppression_name_here>
Memcheck:Leak
fun:malloc
fun:ngx_alloc
fun:ngx_palloc_large
fun:ngx_palloc
fun:ngx_pcalloc
fun:ngx_hash_keys_array_init
fun:ngx_http_variables_add_core_vars
fun:ngx_http_core_preconfiguration
fun:ngx_http_block
fun:ngx_conf_parse
fun:ngx_init_cycle
fun:main
}
{
<insert_a_suppression_name_here>
Memcheck:Leak
fun:malloc
fun:ngx_alloc
fun:ngx_palloc_large
fun:ngx_palloc
fun:ngx_array_push
fun:ngx_hash_add_key
fun:ngx_http_add_variable
fun:ngx_http_echo_add_variables
fun:ngx_http_echo_handler_init
fun:ngx_http_block
fun:ngx_conf_parse
fun:ngx_init_cycle
}
{
<insert_a_suppression_name_here>
Memcheck:Leak
fun:malloc
fun:ngx_alloc
fun:ngx_palloc_large
fun:ngx_palloc
fun:ngx_pcalloc
fun:ngx_http_upstream_drizzle_create_srv_conf
fun:ngx_http_core_server
fun:ngx_conf_parse
fun:ngx_http_block
fun:ngx_conf_parse
fun:ngx_init_cycle
fun:main
}
{
<insert_a_suppression_name_here>
Memcheck:Leak
fun:malloc
fun:ngx_alloc
fun:ngx_palloc_large
fun:ngx_palloc
fun:ngx_pcalloc
fun:ngx_http_upstream_drizzle_create_srv_conf
fun:ngx_http_block
fun:ngx_conf_parse
fun:ngx_init_cycle
fun:main
}
{
<insert_a_suppression_name_here>
Memcheck:Leak
fun:malloc
fun:ngx_alloc
fun:ngx_palloc_large
fun:ngx_palloc
fun:ngx_array_push
fun:ngx_hash_add_key
fun:ngx_http_variables_add_core_vars
fun:ngx_http_core_preconfiguration
fun:ngx_http_block
fun:ngx_conf_parse
fun:ngx_init_cycle
fun:main
}
{
<insert_a_suppression_name_here>
Memcheck:Leak
fun:malloc
fun:ngx_alloc
fun:ngx_palloc_large
fun:ngx_palloc
fun:ngx_pcalloc
fun:ngx_init_cycle
fun:main
}
{
<insert_a_suppression_name_here>
Memcheck:Leak
fun:malloc
fun:ngx_alloc
fun:ngx_palloc_large
fun:ngx_palloc
fun:ngx_hash_init
fun:ngx_http_upstream_init_main_conf
fun:ngx_http_block
fun:ngx_conf_parse
fun:ngx_init_cycle
fun:main
}
{
<insert_a_suppression_name_here>
Memcheck:Leak
fun:malloc
fun:ngx_alloc
fun:ngx_palloc_large
fun:ngx_palloc
fun:ngx_pcalloc
fun:ngx_http_drizzle_keepalive_init
fun:ngx_http_upstream_drizzle_init
fun:ngx_http_upstream_init_main_conf
fun:ngx_http_block
fun:ngx_conf_parse
fun:ngx_init_cycle
fun:main
}
{
<insert_a_suppression_name_here>
Memcheck:Leak
fun:malloc
fun:ngx_alloc
fun:ngx_palloc_large
fun:ngx_palloc
fun:ngx_hash_init
fun:ngx_http_variables_init_vars
fun:ngx_http_block
fun:ngx_conf_parse
fun:ngx_init_cycle
fun:main
}
{
<insert_a_suppression_name_here>
Memcheck:Leak
fun:memalign
fun:posix_memalign
fun:ngx_memalign
fun:ngx_create_pool
}
{
<insert_a_suppression_name_here>
Memcheck:Leak
fun:memalign
fun:posix_memalign
fun:ngx_memalign
fun:ngx_palloc_block
fun:ngx_palloc
}
{
<insert_a_suppression_name_here>
Memcheck:Cond
fun:index
fun:expand_dynamic_string_token
fun:_dl_map_object
fun:map_doit
fun:_dl_catch_error
fun:do_preload
fun:dl_main
fun:_dl_sysdep_start
fun:_dl_start
}

View file

@ -1,10 +0,0 @@
*.swp
*.swo
*~
go
t/servroot/
reindex
nginx
ctags
tags
a.lua

View file

@ -1,18 +0,0 @@
OPENRESTY_PREFIX=/usr/local/openresty
PREFIX ?= /usr/local
LUA_INCLUDE_DIR ?= $(PREFIX)/include
LUA_LIB_DIR ?= $(PREFIX)/lib/lua/$(LUA_VERSION)
INSTALL ?= install
.PHONY: all test install
all: ;
install: all
$(INSTALL) -d $(DESTDIR)/$(LUA_LIB_DIR)/resty/
$(INSTALL) lib/resty/*.lua $(DESTDIR)/$(LUA_LIB_DIR)/resty/
test: all
PATH=$(OPENRESTY_PREFIX)/nginx/sbin:$$PATH prove -I../test-nginx/lib -r t

View file

@ -1,376 +0,0 @@
Name
====
lua-resty-lock - Simple shm-based nonblocking lock API
Table of Contents
=================
* [Name](#name)
* [Status](#status)
* [Synopsis](#synopsis)
* [Description](#description)
* [Methods](#methods)
* [new](#new)
* [lock](#lock)
* [unlock](#unlock)
* [For Multiple Lua Light Threads](#for-multiple-lua-light-threads)
* [For Cache Locks](#for-cache-locks)
* [Prerequisites](#prerequisites)
* [Installation](#installation)
* [TODO](#todo)
* [Community](#community)
* [English Mailing List](#english-mailing-list)
* [Chinese Mailing List](#chinese-mailing-list)
* [Bugs and Patches](#bugs-and-patches)
* [Author](#author)
* [Copyright and License](#copyright-and-license)
* [See Also](#see-also)
Status
======
This library is still under early development and is production ready.
Synopsis
========
```lua
# nginx.conf
http {
# you do not need the following line if you are using the
# ngx_openresty bundle:
lua_package_path "/path/to/lua-resty-lock/lib/?.lua;;";
lua_shared_dict my_locks 100k;
server {
...
location = /t {
content_by_lua '
local lock = require "resty.lock"
for i = 1, 2 do
local lock = lock:new("my_locks")
local elapsed, err = lock:lock("my_key")
ngx.say("lock: ", elapsed, ", ", err)
local ok, err = lock:unlock()
if not ok then
ngx.say("failed to unlock: ", err)
end
ngx.say("unlock: ", ok)
end
';
}
}
}
```
Description
===========
This library implements a simple mutex lock in a similar way to ngx_proxy module's [proxy_cache_lock directive](http://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_cache_lock).
Under the hood, this library uses [ngx_lua](https://github.com/chaoslawful/lua-nginx-module) module's shared memory dictionaries. The lock waiting is nonblocking because we use stepwise [ngx.sleep](https://github.com/chaoslawful/lua-nginx-module#ngxsleep) to poll the lock periodically.
[Back to TOC](#table-of-contents)
Methods
=======
To load this library,
1. you need to specify this library's path in ngx_lua's [lua_package_path](https://github.com/chaoslawful/lua-nginx-module#lua_package_path) directive. For example, `lua_package_path "/path/to/lua-resty-lock/lib/?.lua;;";`.
2. you use `require` to load the library into a local Lua variable:
```lua
local lock = require "resty.lock"
```
[Back to TOC](#table-of-contents)
new
---
`syntax: obj = lock:new(dict_name)`
`syntax: obj = lock:new(dict_name, opts)`
Creates a new lock object instance by specifying the shared dictionary name (created by [lua_shared_dict](http://https://github.com/chaoslawful/lua-nginx-module#lua_shared_dict)) and an optional options table `opts`.
The options table accepts the following options:
* `exptime`
Specifies expiration time (in seconds) for the lock entry in the shared memory dictionary. You can specify up to `0.001` seconds. Default to 30 (seconds). Even if the invoker does not call `unlock` or the object holding the lock is not GC'd, the lock will be released after this time. So deadlock won't happen even when the worker process holding the lock crashes.
* `timeout`
Specifies the maximal waiting time (in seconds) for the [lock](#lock) method calls on the current object instance. You can specify up to `0.001` seconds. Default to 5 (seconds). This option value cannot be bigger than `exptime`. This timeout is to prevent a [lock](#lock) method call from waiting forever.
You can specify `0` to make the [lock](#lock) method return immediately without waiting if it cannot acquire the lock right away.
* `step`
Specifies the initial step (in seconds) of sleeping when waiting for the lock. Default to `0.001` (seconds). When the [lock](#lock) method is waiting on a busy lock, it sleeps by steps. The step size is increased by a ratio (specified by the `ratio` option) until reaching the step size limit (specified by the `max_step` option).
* `ratio`
Specifies the step increasing ratio. Default to 2, that is, the step size doubles at each waiting iteration.
* `max_step`
Specifies the maximal step size (i.e., sleep interval, in seconds) allowed. See also the `step` and `ratio` options). Default to 0.5 (seconds).
[Back to TOC](#table-of-contents)
lock
----
`syntax: elapsed, err = obj:lock(key)`
Tries to lock a key across all the Nginx worker processes in the current Nginx server instance. Different keys are different locks.
The length of the key string must not be larger than 65535 bytes.
Returns the waiting time (in seconds) if the lock is successfully acquired. Otherwise returns `nil` and a string describing the error.
The waiting time is not from the wallclock, but rather is from simply adding up all the waiting "steps". A nonzero `elapsed` return value indicates that someone else has just hold this lock. But a zero return value cannot gurantee that no one else has just acquired and released the lock.
When this method is waiting on fetching the lock, no operating system threads will be blocked and the current Lua "light thread" will be automatically yielded behind the scene.
It is strongly recommended to always call the [unlock()](#unlock) method to actively release the lock as soon as possible.
If the [unlock()](#unlock) method is never called after this method call, the lock will get released when
1. the current `resty.lock` object instance is collected automatically by the Lua GC.
2. the `exptime` for the lock entry is reached.
Common errors for this method call is
* "timeout"
: The timeout threshold specified by the `timeout` option of the [new](#new) method is exceeded.
* "locked"
: The current `resty.lock` object instance is already holding a lock (not necessarily of the same key).
Other possible errors are from ngx_lua's shared dictionary API.
[Back to TOC](#table-of-contents)
unlock
------
`syntax: ok, err = obj:unlock()`
Releases the lock held by the current `resty.lock` object instance.
Returns `1` on success. Returns `nil` and a string describing the error otherwise.
If you call `unlock` when no lock is currently held, the error "unlocked" will be returned.
[Back to TOC](#table-of-contents)
For Multiple Lua Light Threads
==============================
It is always a bad idea to share a single `resty.lock` object instance across multiple ngx_lua "light threads" because the object itself is stateful and is vulnerable to race conditions. It is highly recommended to always allocate a separate `resty.lock` object instance for each "light thread" that needs one.
[Back to TOC](#table-of-contents)
For Cache Locks
===============
One common use case for this library is avoid the so-called "dog-pile effect", that is, to limit concurrent backend queries for the same key when a cache miss happens. This usage is similar to the standard ngx_proxy module's [proxy_cache_lock](http://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_cache_lock) directive.
The basic workflow for a cache lock is as follows:
1. Check the cache for a hit with the key. If a cache miss happens, proceed to step 2.
2. Instantiate a `resty.lock` object, call the [lock](#lock) method on the key, and check the 1st return value, i.e., the lock waiting time. If it is `nil`, handle the error; otherwise proceed to step 3.
3. Check the cache again for a hit. If it is still a miss, proceed to step 4; otherwise release the lock by calling [unlock](#unlock) and then return the cached value.
4. Query the backend (the data source) for the value, put the result into the cache, and then release the lock currently held by calling [unlock](#unlock).
Below is a kinda complete code example that demonstrates the idea.
```lua
local resty_lock = require "resty.lock"
local cache = ngx.shared.my_cache
-- step 1:
local val, err = cache:get(key)
if val then
ngx.say("result: ", val)
return
end
if err then
return fail("failed to get key from shm: ", err)
end
-- cache miss!
-- step 2:
local lock = resty_lock:new("my_locks")
local elapsed, err = lock:lock(key)
if not elapsed then
return fail("failed to acquire the lock: ", err)
end
-- lock successfully acquired!
-- step 3:
-- someone might have already put the value into the cache
-- so we check it here again:
val, err = cache:get(key)
if val then
local ok, err = lock:unlock()
if not ok then
return fail("failed to unlock: ", err)
end
ngx.say("result: ", val)
return
end
--- step 4:
local val = fetch_redis(key)
if not val then
local ok, err = lock:unlock()
if not ok then
return fail("failed to unlock: ", err)
end
-- FIXME: we should handle the backend miss more carefully
-- here, like inserting a stub value into the cache.
ngx.say("no value found")
return
end
-- update the shm cache with the newly fetched value
local ok, err = cache:set(key, val, 1)
if not ok then
local ok, err = lock:unlock()
if not ok then
return fail("failed to unlock: ", err)
end
return fail("failed to update shm cache: ", err)
end
local ok, err = lock:unlock()
if not ok then
return fail("failed to unlock: ", err)
end
ngx.say("result: ", val)
```
Here we assume that we use the ngx_lua shared memory dictionary to cache the Redis query results and we have the following configurations in `nginx.conf`:
```nginx
# you may want to change the dictionary size for your cases.
lua_shared_dict my_cache 10m;
lua_shared_dict my_locks 1m;
```
The `my_cache` dictionary is for the data cache while the `my_locks` dictionary is for `resty.lock` itself.
Several important things to note in the example above:
1. You need to release the lock as soon as possible, even when some other unrelated errors happen.
2. You need to update the cache with the result got from the backend *before* releasing the lock so other threads already waiting on the lock can get cached value when they get the lock afterwards.
3. When the backend returns no value at all, we should handle the case carefully by inserting some stub value into the cache.
[Back to TOC](#table-of-contents)
Prerequisites
=============
* [LuaJIT](http://luajit.org) 2.0+
* [ngx_lua](https://github.com/chaoslawful/lua-nginx-module) 0.8.10+
[Back to TOC](#table-of-contents)
Installation
============
It is recommended to use the latest [ngx_openresty bundle](http://openresty.org) directly where this library
is bundled and enabled by default. At least ngx_openresty 1.4.2.9 is required. And you need to enable LuaJIT when building your ngx_openresty
bundle by passing the `--with-luajit` option to its `./configure` script. No extra Nginx configuration is required.
If you want to use this library with your own Nginx build (with ngx_lua), then you need to
ensure you are using at least ngx_lua 0.8.10. Also, You need to configure
the [lua_package_path](https://github.com/chaoslawful/lua-nginx-module#lua_package_path) directive to
add the path of your lua-resty-lock source tree to ngx_lua's Lua module search path, as in
```nginx
# nginx.conf
http {
lua_package_path "/path/to/lua-resty-lock/lib/?.lua;;";
...
}
```
and then load the library in Lua:
```lua
local lock = require "resty.lock"
```
[Back to TOC](#table-of-contents)
TODO
====
* We should simplify the current implementation when LuaJIT 2.1 gets support for `__gc` metamethod on normal Lua tables. Right now we are using an FFI cdata and a ref/unref memo table to work around this, which is rather ugly and a bit inefficient.
[Back to TOC](#table-of-contents)
Community
=========
[Back to TOC](#table-of-contents)
English Mailing List
--------------------
The [openresty-en](https://groups.google.com/group/openresty-en) mailing list is for English speakers.
[Back to TOC](#table-of-contents)
Chinese Mailing List
--------------------
The [openresty](https://groups.google.com/group/openresty) mailing list is for Chinese speakers.
[Back to TOC](#table-of-contents)
Bugs and Patches
================
Please report bugs or submit patches by
1. creating a ticket on the [GitHub Issue Tracker](http://github.com/openresty/lua-resty-lock/issues),
1. or posting to the [OpenResty community](#community).
[Back to TOC](#table-of-contents)
Author
======
Yichun "agentzh" Zhang (章亦春) <agentzh@gmail.com>, CloudFlare Inc.
[Back to TOC](#table-of-contents)
Copyright and License
=====================
This module is licensed under the BSD license.
Copyright (C) 2013-2014, by Yichun "agentzh" Zhang, CloudFlare Inc.
All rights reserved.
Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
[Back to TOC](#table-of-contents)
See Also
========
* the ngx_lua module: https://github.com/chaoslawful/lua-nginx-module
* OpenResty: http://openresty.org
[Back to TOC](#table-of-contents)

View file

@ -1,208 +0,0 @@
-- Copyright (C) Yichun Zhang (agentzh)
local ffi = require "ffi"
local ffi_new = ffi.new
local shared = ngx.shared
local sleep = ngx.sleep
local shdict_mt
local debug = ngx.config.debug
local setmetatable = setmetatable
local getmetatable = getmetatable
local tonumber = tonumber
local _M = { _VERSION = '0.04' }
local mt = { __index = _M }
local FREE_LIST_REF = 0
-- FIXME: we don't need this when we have __gc metamethod support on Lua
-- tables.
local memo = {}
if debug then _M.memo = memo end
local function ref_obj(key)
if key == nil then
return -1
end
local ref = memo[FREE_LIST_REF]
if ref and ref ~= 0 then
memo[FREE_LIST_REF] = memo[ref]
else
ref = #memo + 1
end
memo[ref] = key
-- print("ref key_id returned ", ref)
return ref
end
if debug then _M.ref_obj = ref_obj end
local function unref_obj(ref)
if ref >= 0 then
memo[ref] = memo[FREE_LIST_REF]
memo[FREE_LIST_REF] = ref
end
end
if debug then _M.unref_obj = unref_obj end
local function gc_lock(cdata)
local dict_id = tonumber(cdata.dict_id)
local key_id = tonumber(cdata.key_id)
-- print("key_id: ", key_id, ", key: ", memo[key_id], "dict: ",
-- type(memo[cdata.dict_id]))
if key_id > 0 then
local key = memo[key_id]
unref_obj(key_id)
local dict = memo[dict_id]
-- print("dict.delete type: ", type(dict.delete))
local ok, err = dict:delete(key)
if not ok then
ngx.log(ngx.ERR, 'failed to delete key "', key, '": ', err)
end
cdata.key_id = 0
end
unref_obj(dict_id)
end
local ctype = ffi.metatype("struct { int key_id; int dict_id; }",
{ __gc = gc_lock })
function _M.new(_, dict_name, opts)
local dict = shared[dict_name]
if not dict then
return nil, "dictionary not found"
end
local cdata = ffi_new(ctype)
cdata.key_id = 0
cdata.dict_id = ref_obj(dict)
local timeout, exptime, step, ratio, max_step
if opts then
timeout = opts.timeout
exptime = opts.exptime
step = opts.step
ratio = opts.ratio
max_step = opts.max_step
end
if not exptime then
exptime = 30
end
if timeout and timeout > exptime then
timeout = exptime
end
local self = {
cdata = cdata,
dict = dict,
timeout = timeout or 5,
exptime = exptime,
step = step or 0.001,
ratio = ratio or 2,
max_step = max_step or 0.5,
}
return setmetatable(self, mt)
end
function _M.lock(self, key)
if not key then
return nil, "nil key"
end
local dict = self.dict
local cdata = self.cdata
if cdata.key_id > 0 then
return nil, "locked"
end
local exptime = self.exptime
local ok, err = dict:add(key, true, exptime)
if ok then
cdata.key_id = ref_obj(key)
if not shdict_mt then
shdict_mt = getmetatable(dict)
end
return 0
end
if err ~= "exists" then
return nil, err
end
-- lock held by others
local step = self.step
local ratio = self.ratio
local timeout = self.timeout
local max_step = self.max_step
local elapsed = 0
while timeout > 0 do
if step > timeout then
step = timeout
end
sleep(step)
elapsed = elapsed + step
timeout = timeout - step
local ok, err = dict:add(key, true, exptime)
if ok then
cdata.key_id = ref_obj(key)
if not shdict_mt then
shdict_mt = getmetatable(dict)
end
return elapsed
end
if err ~= "exists" then
return nil, err
end
if timeout <= 0 then
break
end
step = step * ratio
if step <= 0 then
step = 0.001
end
if step > max_step then
step = max_step
end
end
return nil, "timeout"
end
function _M.unlock(self)
local dict = self.dict
local cdata = self.cdata
local key_id = tonumber(cdata.key_id)
if key_id <= 0 then
return nil, "unlocked"
end
local key = memo[key_id]
unref_obj(key_id)
local ok, err = dict:delete(key)
if not ok then
return nil, err
end
cdata.key_id = 0
return 1
end
return _M

View file

@ -1,470 +0,0 @@
# vim:set ft= ts=4 sw=4 et:
use Test::Nginx::Socket::Lua;
use Cwd qw(cwd);
repeat_each(2);
plan tests => repeat_each() * (blocks() * 3);
my $pwd = cwd();
our $HttpConfig = qq{
lua_package_path "$pwd/lib/?.lua;;";
lua_package_cpath "/usr/local/openresty-debug/lualib/?.so;/usr/local/openresty/lualib/?.so;;";
lua_shared_dict cache_locks 100k;
};
$ENV{TEST_NGINX_RESOLVER} = '8.8.8.8';
$ENV{TEST_NGINX_REDIS_PORT} ||= 6379;
no_long_string();
#no_diff();
run_tests();
__DATA__
=== TEST 1: lock is subject to garbage collection
--- http_config eval: $::HttpConfig
--- config
location = /t {
content_by_lua '
local lock = require "resty.lock"
for i = 1, 2 do
collectgarbage("collect")
local lock = lock:new("cache_locks")
local elapsed, err = lock:lock("foo")
ngx.say("lock: ", elapsed, ", ", err)
end
collectgarbage("collect")
';
}
--- request
GET /t
--- response_body
lock: 0, nil
lock: 0, nil
--- no_error_log
[error]
=== TEST 2: serial lock and unlock
--- http_config eval: $::HttpConfig
--- config
location = /t {
content_by_lua '
local lock = require "resty.lock"
for i = 1, 2 do
local lock = lock:new("cache_locks")
local elapsed, err = lock:lock("foo")
ngx.say("lock: ", elapsed, ", ", err)
local ok, err = lock:unlock()
if not ok then
ngx.say("failed to unlock: ", err)
end
ngx.say("unlock: ", ok)
end
';
}
--- request
GET /t
--- response_body
lock: 0, nil
unlock: 1
lock: 0, nil
unlock: 1
--- no_error_log
[error]
=== TEST 3: timed out locks
--- http_config eval: $::HttpConfig
--- config
location = /t {
content_by_lua '
local lock = require "resty.lock"
for i = 1, 2 do
local lock1 = lock:new("cache_locks", { timeout = 0.01 })
local lock2 = lock:new("cache_locks", { timeout = 0.01 })
local elapsed, err = lock1:lock("foo")
ngx.say("lock 1: lock: ", elapsed, ", ", err)
local elapsed, err = lock2:lock("foo")
ngx.say("lock 2: lock: ", elapsed, ", ", err)
local ok, err = lock1:unlock()
ngx.say("lock 1: unlock: ", ok, ", ", err)
local ok, err = lock2:unlock()
ngx.say("lock 2: unlock: ", ok, ", ", err)
end
';
}
--- request
GET /t
--- response_body
lock 1: lock: 0, nil
lock 2: lock: nil, timeout
lock 1: unlock: 1, nil
lock 2: unlock: nil, unlocked
lock 1: lock: 0, nil
lock 2: lock: nil, timeout
lock 1: unlock: 1, nil
lock 2: unlock: nil, unlocked
--- no_error_log
[error]
=== TEST 4: waited locks
--- http_config eval: $::HttpConfig
--- config
location = /t {
content_by_lua '
local resty_lock = require "resty.lock"
local key = "blah"
local t, err = ngx.thread.spawn(function ()
local lock = resty_lock:new("cache_locks")
local elapsed, err = lock:lock(key)
ngx.say("sub thread: lock: ", elapsed, " ", err)
ngx.sleep(0.1)
ngx.say("sub thread: unlock: ", lock:unlock(key))
end)
local lock = resty_lock:new("cache_locks")
local elapsed, err = lock:lock(key)
ngx.say("main thread: lock: ", elapsed, " ", err)
ngx.say("main thread: unlock: ", lock:unlock())
';
}
--- request
GET /t
--- response_body_like chop
^sub thread: lock: 0 nil
sub thread: unlock: 1
main thread: lock: 0.12[6-9] nil
main thread: unlock: 1
$
--- no_error_log
[error]
=== TEST 5: waited locks (custom step)
--- http_config eval: $::HttpConfig
--- config
location = /t {
content_by_lua '
local resty_lock = require "resty.lock"
local key = "blah"
local t, err = ngx.thread.spawn(function ()
local lock = resty_lock:new("cache_locks")
local elapsed, err = lock:lock(key)
ngx.say("sub thread: lock: ", elapsed, " ", err)
ngx.sleep(0.1)
ngx.say("sub thread: unlock: ", lock:unlock(key))
end)
local lock = resty_lock:new("cache_locks", { step = 0.01 })
local elapsed, err = lock:lock(key)
ngx.say("main thread: lock: ", elapsed, " ", err)
ngx.say("main thread: unlock: ", lock:unlock())
';
}
--- request
GET /t
--- response_body_like chop
^sub thread: lock: 0 nil
sub thread: unlock: 1
main thread: lock: 0.1[4-5]\d* nil
main thread: unlock: 1
$
--- no_error_log
[error]
=== TEST 6: waited locks (custom ratio)
--- http_config eval: $::HttpConfig
--- config
location = /t {
content_by_lua '
local resty_lock = require "resty.lock"
local key = "blah"
local t, err = ngx.thread.spawn(function ()
local lock = resty_lock:new("cache_locks")
local elapsed, err = lock:lock(key)
ngx.say("sub thread: lock: ", elapsed, " ", err)
ngx.sleep(0.1)
ngx.say("sub thread: unlock: ", lock:unlock(key))
end)
local lock = resty_lock:new("cache_locks", { ratio = 3 })
local elapsed, err = lock:lock(key)
ngx.say("main thread: lock: ", elapsed, " ", err)
ngx.say("main thread: unlock: ", lock:unlock())
';
}
--- request
GET /t
--- response_body_like chop
^sub thread: lock: 0 nil
sub thread: unlock: 1
main thread: lock: 0.1[2]\d* nil
main thread: unlock: 1
$
--- no_error_log
[error]
=== TEST 7: waited locks (custom max step)
--- http_config eval: $::HttpConfig
--- config
location = /t {
content_by_lua '
local resty_lock = require "resty.lock"
local key = "blah"
local t, err = ngx.thread.spawn(function ()
local lock = resty_lock:new("cache_locks")
local elapsed, err = lock:lock(key)
ngx.say("sub thread: lock: ", elapsed, " ", err)
ngx.sleep(0.1)
ngx.say("sub thread: unlock: ", lock:unlock(key))
end)
local lock = resty_lock:new("cache_locks", { max_step = 0.05 })
local elapsed, err = lock:lock(key)
ngx.say("main thread: lock: ", elapsed, " ", err)
ngx.say("main thread: unlock: ", lock:unlock())
';
}
--- request
GET /t
--- response_body_like chop
^sub thread: lock: 0 nil
sub thread: unlock: 1
main thread: lock: 0.11[2-4]\d* nil
main thread: unlock: 1
$
--- no_error_log
[error]
=== TEST 8: lock expired by itself
--- http_config eval: $::HttpConfig
--- config
location = /t {
content_by_lua '
local resty_lock = require "resty.lock"
local key = "blah"
local t, err = ngx.thread.spawn(function ()
local lock = resty_lock:new("cache_locks", { exptime = 0.1 })
local elapsed, err = lock:lock(key)
ngx.say("sub thread: lock: ", elapsed, " ", err)
ngx.sleep(0.1)
-- ngx.say("sub thread: unlock: ", lock:unlock(key))
end)
local lock = resty_lock:new("cache_locks", { max_step = 0.05 })
local elapsed, err = lock:lock(key)
ngx.say("main thread: lock: ", elapsed, " ", err)
ngx.say("main thread: unlock: ", lock:unlock())
';
}
--- request
GET /t
--- response_body_like chop
^sub thread: lock: 0 nil
main thread: lock: 0.11[2-4]\d* nil
main thread: unlock: 1
$
--- no_error_log
[error]
=== TEST 9: ref & unref (1 at most)
--- http_config eval: $::HttpConfig
--- config
location = /t {
content_by_lua '
local lock = require "resty.lock"
local memo = lock.memo
local ref = lock.ref_obj("foo")
ngx.say(#memo)
lock.unref_obj(ref)
ngx.say(#memo)
ref = lock.ref_obj("bar")
ngx.say(#memo)
lock.unref_obj(ref)
ngx.say(#memo)
';
}
--- request
GET /t
--- response_body
1
0
1
0
--- no_error_log
[error]
=== TEST 10: ref & unref (2 at most)
--- http_config eval: $::HttpConfig
--- config
location = /t {
content_by_lua '
local lock = require "resty.lock"
local memo = lock.memo
for i = 1, 2 do
local refs = {}
refs[1] = lock.ref_obj("foo")
ngx.say(#memo)
refs[2] = lock.ref_obj("bar")
ngx.say(#memo)
lock.unref_obj(refs[1])
ngx.say(#memo)
lock.unref_obj(refs[2])
ngx.say(#memo)
end
';
}
--- request
GET /t
--- response_body
1
2
2
2
2
2
1
1
--- no_error_log
[error]
=== TEST 11: lock on a nil key
--- http_config eval: $::HttpConfig
--- config
location = /t {
content_by_lua '
local lock = require "resty.lock"
local lock = lock:new("cache_locks")
local elapsed, err = lock:lock(nil)
if elapsed then
ngx.say("lock: ", elapsed, ", ", err)
local ok, err = lock:unlock()
if not ok then
ngx.say("failed to unlock: ", err)
end
else
ngx.say("failed to lock: ", err)
end
';
}
--- request
GET /t
--- response_body
failed to lock: nil key
--- no_error_log
[error]
=== TEST 12: same shdict, multple locks
--- http_config eval: $::HttpConfig
--- config
location = /t {
content_by_lua '
local lock = require "resty.lock"
local memo = lock.memo
local lock1 = lock:new("cache_locks", { timeout = 0.01 })
for i = 1, 3 do
lock1:lock("lock_key")
lock1:unlock()
collectgarbage("collect")
end
local lock2 = lock:new("cache_locks", { timeout = 0.01 })
local lock3 = lock:new("cache_locks", { timeout = 0.01 })
lock2:lock("lock_key")
lock3:lock("lock_key")
collectgarbage("collect")
ngx.say(#memo)
lock2:unlock()
lock3:unlock()
collectgarbage("collect")
';
}
--- request
GET /t
--- response_body
4
--- no_error_log
[error]
=== TEST 13: timed out locks (0 timeout)
--- http_config eval: $::HttpConfig
--- config
location = /t {
content_by_lua '
local lock = require "resty.lock"
for i = 1, 2 do
local lock1 = lock:new("cache_locks", { timeout = 0 })
local lock2 = lock:new("cache_locks", { timeout = 0 })
local elapsed, err = lock1:lock("foo")
ngx.say("lock 1: lock: ", elapsed, ", ", err)
local elapsed, err = lock2:lock("foo")
ngx.say("lock 2: lock: ", elapsed, ", ", err)
local ok, err = lock1:unlock()
ngx.say("lock 1: unlock: ", ok, ", ", err)
local ok, err = lock2:unlock()
ngx.say("lock 2: unlock: ", ok, ", ", err)
end
';
}
--- request
GET /t
--- response_body
lock 1: lock: 0, nil
lock 2: lock: nil, timeout
lock 1: unlock: 1, nil
lock 2: unlock: nil, unlocked
lock 1: lock: 0, nil
lock 2: lock: nil, timeout
lock 1: unlock: 1, nil
lock 2: unlock: nil, unlocked
--- no_error_log
[error]

View file

@ -1,53 +0,0 @@
# Valgrind suppression file for LuaJIT 2.0.
{
Optimized string compare
Memcheck:Addr4
fun:lj_str_cmp
}
{
Optimized string compare
Memcheck:Addr1
fun:lj_str_cmp
}
{
Optimized string compare
Memcheck:Addr4
fun:lj_str_new
}
{
Optimized string compare
Memcheck:Addr1
fun:lj_str_new
}
{
Optimized string compare
Memcheck:Cond
fun:lj_str_new
}
{
<insert_a_suppression_name_here>
Memcheck:Leak
fun:malloc
fun:ngx_alloc
fun:ngx_event_process_init
}
{
<insert_a_suppression_name_here>
Memcheck:Param
epoll_ctl(event)
fun:epoll_ctl
fun:ngx_epoll_add_event
}
{
<insert_a_suppression_name_here>
Memcheck:Cond
fun:index
fun:expand_dynamic_string_token
fun:_dl_map_object
fun:map_doit
fun:_dl_catch_error
fun:do_preload
fun:dl_main
fun:_dl_sysdep_start
fun:_dl_start
}

View file

@ -1 +0,0 @@
*.t linguist-language=Text

View file

@ -1,10 +0,0 @@
*.swp
*.swo
*~
go
t/servroot/
reindex
nginx
ctags
tags
a.lua

View file

@ -1,19 +0,0 @@
OPENRESTY_PREFIX=/usr/local/openresty
PREFIX ?= /usr/local
LUA_INCLUDE_DIR ?= $(PREFIX)/include
LUA_LIB_DIR ?= $(PREFIX)/lib/lua/$(LUA_VERSION)
INSTALL ?= install
.PHONY: all test install
all: ;
install: all
$(INSTALL) -d $(DESTDIR)/$(LUA_LIB_DIR)/resty/lrucache
$(INSTALL) lib/resty/*.lua $(DESTDIR)/$(LUA_LIB_DIR)/resty/
$(INSTALL) lib/resty/lrucache/*.lua $(DESTDIR)/$(LUA_LIB_DIR)/resty/lrucache/
test: all
PATH=$(OPENRESTY_PREFIX)/nginx/sbin:$$PATH prove -I../test-nginx/lib -r t

View file

@ -1,293 +0,0 @@
Name
====
lua-resty-lrucache - in-Lua LRU Cache based on LuaJIT FFI
Table of Contents
=================
* [Name](#name)
* [Status](#status)
* [Synopsis](#synopsis)
* [Description](#description)
* [Methods](#methods)
* [new](#new)
* [set](#set)
* [get](#get)
* [delete](#delete)
* [Prerequisites](#prerequisites)
* [Installation](#installation)
* [TODO](#todo)
* [Community](#community)
* [English Mailing List](#english-mailing-list)
* [Chinese Mailing List](#chinese-mailing-list)
* [Bugs and Patches](#bugs-and-patches)
* [Author](#author)
* [Copyright and License](#copyright-and-license)
* [See Also](#see-also)
Status
======
This library is still under active development and is considered production ready.
Synopsis
========
```lua
-- file myapp.lua: example "myapp" module
local _M = {}
-- alternatively: local lrucache = require "resty.lrucache.pureffi"
local lrucache = require "resty.lrucache"
-- we need to initialize the cache on the lua module level so that
-- it can be shared by all the requests served by each nginx worker process:
local c = lrucache.new(200) -- allow up to 200 items in the cache
if not c then
return error("failed to create the cache: " .. (err or "unknown"))
end
function _M.go()
c:set("dog", 32)
c:set("cat", 56)
ngx.say("dog: ", c:get("dog"))
ngx.say("cat: ", c:get("cat"))
c:set("dog", { age = 10 }, 0.1) -- expire in 0.1 sec
c:delete("dog")
end
return _M
```
```nginx
# nginx.conf
http {
lua_package_path "/path/to/lua-resty-lrucache/lib/?.lua;;";
server {
listen 8080;
location = /t {
content_by_lua '
require("myapp").go()
';
}
}
}
```
Description
===========
This library implements a simple LRU cache for [OpenResty](http://openresty.org) and the [ngx_lua](https://github.com/openresty/lua-nginx-module) module.
This cache also supports expiration time.
The LRU cache resides completely in the Lua VM and is subject to Lua GC. So do not expect
it to get shared across the OS process boundary. The upside is that you can cache
arbitrary complex Lua values (like deep nested Lua tables) without the overhead of
serialization (as with `ngx_lua`'s
[shared dictionary API](https://github.com/openresty/lua-nginx-module#lua_shared_dict)).
The downside is that your cache is always limited to the current OS process
(like the current nginx worker process). It does not really make much sense to use this
library in the context of [init_by_lua](https://github.com/openresty/lua-nginx-module#lua_shared_dict)
because the cache will not get shared by any of the worker processes
(unless you just want to "warm up" the cache with predefined items which will get
inherited by the workers via `fork`).
There are two different implementations included in this library, in the form of
two classes: `resty.lrucache` and `resty.lrucache.pureffi`. They share exactly the same API. The only difference is that the latter
is a pure FFI implementation that also implements an FFI-based hash table
for the cache lookup while the former uses native Lua tables for it.
If the cache hit rate is relatively high, you should use the `resty.lrucache` class which is faster than `resty.lrucache.pureffi`.
But if the cache hit rate is relatively low and there can be a *lot* of
variations of keys inserted into and removed from the cache, then you should use the `resty.lrucache.pureffi` instead, because
Lua tables are not good at removing keys frequently by design and you
would see the `resizetab` function call in the LuaJIT runtime being very hot in
[on-CPU flame graphs](https://github.com/openresty/stapxx#lj-lua-stacks) if
you use the `resty.lrucache` class instead of `resty.lrucache.pureffi` in this use case.
[Back to TOC](#table-of-contents)
Methods
=======
To load this library,
1. you need to specify this library's path in ngx_lua's [lua_package_path](https://github.com/openresty/lua-nginx-module#lua_package_path) directive. For example, `lua_package_path "/path/to/lua-resty-lrucache/lib/?.lua;;";`.
2. you use `require` to load the library into a local Lua variable:
```lua
local lrucache = require "resty.lrucache"
```
or
```lua
local lrucache = require "resty.lrucache.pureffi"
```
[Back to TOC](#table-of-contents)
new
---
`syntax: cache, err = lrucache.new(max_items [, load_factor])`
Creates a new cache instance. If failed, returns `nil` and a string describing the error.
The `max_items` argument specifies the maximal number of items held in the cache.
The `load-factor` argument designates the "load factor" of the FFI-based hash-table used internally by `resty.lrucache.pureffi`;
the default value is 0.5 (i.e. 50%); if the load factor is specified, it will be clamped
to the range of `[0.1, 1]` (i.e. if load factor is greater than 1, it will be saturated to
1; likewise, if load-factor is smaller than `0.1`, it will be clamped to `0.1`). This argument is only meaningful for `resty.lrucache.pureffi`.
[Back to TOC](#table-of-contents)
set
---
`syntax: cache:set(key, value, ttl)`
Sets a key with a value and an expiration time.
The `ttl` argument specifies the expiration time period. The time value is in seconds, but you can also specify the fraction number part, like `0.25`. A nil `ttl` argument value means never expired (which is the default).
When the cache is full, the cache will automatically evict the least recently used item.
[Back to TOC](#table-of-contents)
get
---
`syntax: data, stale_data = cache:get(key)`
Fetches a value with the key. If the key does not exist in the cache or has already expired, a `nil` value will be returned.
Starting from `v0.03`, the stale data is also returned as the second return value if available.
[Back to TOC](#table-of-contents)
delete
------
`syntax: cache:delete(key)`
Removes an item specified by the key from the cache.
[Back to TOC](#table-of-contents)
Prerequisites
=============
* [LuaJIT](http://luajit.org) 2.0+
* [ngx_lua](https://github.com/openresty/lua-nginx-module) 0.8.10+
[Back to TOC](#table-of-contents)
Installation
============
It is recommended to use the latest [ngx_openresty bundle](http://openresty.org) directly. At least ngx_openresty 1.4.2.9 is required. And you need to enable LuaJIT when building your ngx_openresty
bundle by passing the `--with-luajit` option to its `./configure` script. No extra Nginx configuration is required.
If you want to use this library with your own Nginx build (with ngx_lua), then you need to
ensure you are using at least ngx_lua 0.8.10.
Also, You need to configure
the [lua_package_path](https://github.com/openresty/lua-nginx-module#lua_package_path) directive to
add the path of your lua-resty-lrucache source tree to ngx_lua's Lua module search path, as in
```nginx
# nginx.conf
http {
lua_package_path "/path/to/lua-resty-lrucache/lib/?.lua;;";
...
}
```
and then load the library in Lua:
```lua
local lrucache = require "resty.lrucache"
```
[Back to TOC](#table-of-contents)
TODO
====
* add new method `get_stale` for fetching already expired items.
* add new method `flush_all` for flushing out everything in the cache.
[Back to TOC](#table-of-contents)
Community
=========
[Back to TOC](#table-of-contents)
English Mailing List
--------------------
The [openresty-en](https://groups.google.com/group/openresty-en) mailing list is for English speakers.
[Back to TOC](#table-of-contents)
Chinese Mailing List
--------------------
The [openresty](https://groups.google.com/group/openresty) mailing list is for Chinese speakers.
[Back to TOC](#table-of-contents)
Bugs and Patches
================
Please report bugs or submit patches by
1. creating a ticket on the [GitHub Issue Tracker](https://github.com/openresty/lua-resty-lrucache/issues),
1. or posting to the [OpenResty community](#community).
[Back to TOC](#table-of-contents)
Author
======
Yichun "agentzh" Zhang (章亦春) <agentzh@gmail.com>, CloudFlare Inc.
Shuxin Yang, CloudFlare Inc.
[Back to TOC](#table-of-contents)
Copyright and License
=====================
This module is licensed under the BSD license.
Copyright (C) 2014-2015, by Yichun "agentzh" Zhang, CloudFlare Inc.
Copyright (C) 2014-2015, by Shuxin Yang, CloudFlare Inc.
All rights reserved.
Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
[Back to TOC](#table-of-contents)
See Also
========
* the ngx_lua module: https://github.com/chaoslawful/lua-nginx-module
* OpenResty: http://openresty.org
[Back to TOC](#table-of-contents)

View file

@ -1,229 +0,0 @@
-- Copyright (C) Yichun Zhang (agentzh)
local ffi = require "ffi"
local ffi_new = ffi.new
local ffi_sizeof = ffi.sizeof
local ffi_cast = ffi.cast
local ffi_fill = ffi.fill
local ngx_now = ngx.now
local uintptr_t = ffi.typeof("uintptr_t")
local setmetatable = setmetatable
local tonumber = tonumber
-- queue data types
--
-- this queue is a double-ended queue and the first node
-- is reserved for the queue itself.
-- the implementation is mostly borrowed from nginx's ngx_queue_t data
-- structure.
ffi.cdef[[
typedef struct lrucache_queue_s lrucache_queue_t;
struct lrucache_queue_s {
double expire; /* in seconds */
lrucache_queue_t *prev;
lrucache_queue_t *next;
};
]]
local queue_arr_type = ffi.typeof("lrucache_queue_t[?]")
local queue_ptr_type = ffi.typeof("lrucache_queue_t*")
local queue_type = ffi.typeof("lrucache_queue_t")
local NULL = ffi.null
-- queue utility functions
local function queue_insert_tail(h, x)
local last = h[0].prev
x.prev = last
last.next = x
x.next = h
h[0].prev = x
end
local function queue_init(size)
if not size then
size = 0
end
local q = ffi_new(queue_arr_type, size + 1)
ffi_fill(q, ffi_sizeof(queue_type, size + 1), 0)
if size == 0 then
q[0].prev = q
q[0].next = q
else
local prev = q[0]
for i = 1, size do
local e = q[i]
prev.next = e
e.prev = prev
prev = e
end
local last = q[size]
last.next = q
q[0].prev = last
end
return q
end
local function queue_is_empty(q)
-- print("q: ", tostring(q), "q.prev: ", tostring(q), ": ", q == q.prev)
return q == q[0].prev
end
local function queue_remove(x)
local prev = x.prev
local next = x.next
next.prev = prev
prev.next = next
-- for debugging purpose only:
x.prev = NULL
x.next = NULL
end
local function queue_insert_head(h, x)
x.next = h[0].next
x.next.prev = x
x.prev = h
h[0].next = x
end
local function queue_last(h)
return h[0].prev
end
local function queue_head(h)
return h[0].next
end
-- true module stuffs
local _M = {
_VERSION = '0.04'
}
local mt = { __index = _M }
local function ptr2num(ptr)
return tonumber(ffi_cast(uintptr_t, ptr))
end
function _M.new(size)
if size < 1 then
return nil, "size too small"
end
local self = {
keys = {},
hasht = {},
free_queue = queue_init(size),
cache_queue = queue_init(),
key2node = {},
node2key = {},
}
return setmetatable(self, mt)
end
function _M.get(self, key)
local hasht = self.hasht
local val = hasht[key]
if not val then
return nil
end
local node = self.key2node[key]
-- print(key, ": moving node ", tostring(node), " to cache queue head")
local cache_queue = self.cache_queue
queue_remove(node)
queue_insert_head(cache_queue, node)
if node.expire >= 0 and node.expire < ngx_now() then
-- print("expired: ", node.expire, " > ", ngx_now())
return nil, val
end
return val
end
function _M.delete(self, key)
self.hasht[key] = nil
local key2node = self.key2node
local node = key2node[key]
if not node then
return false
end
key2node[key] = nil
self.node2key[ptr2num(node)] = nil
queue_remove(node)
queue_insert_tail(self.free_queue, node)
return true
end
function _M.set(self, key, value, ttl)
local hasht = self.hasht
hasht[key] = value
local key2node = self.key2node
local node = key2node[key]
if not node then
local free_queue = self.free_queue
local node2key = self.node2key
if queue_is_empty(free_queue) then
-- evict the least recently used key
-- assert(not queue_is_empty(self.cache_queue))
node = queue_last(self.cache_queue)
local oldkey = node2key[ptr2num(node)]
-- print(key, ": evicting oldkey: ", oldkey, ", oldnode: ",
-- tostring(node))
if oldkey then
hasht[oldkey] = nil
key2node[oldkey] = nil
end
else
-- take a free queue node
node = queue_head(free_queue)
-- print(key, ": get a new free node: ", tostring(node))
end
node2key[ptr2num(node)] = key
key2node[key] = node
end
queue_remove(node)
queue_insert_head(self.cache_queue, node)
if ttl then
node.expire = ngx_now() + ttl
else
node.expire = -1
end
end
return _M

View file

@ -1,534 +0,0 @@
-- Copyright (C) Yichun Zhang (agentzh)
-- Copyright (C) Shuxin Yang
--[[
This module implements a key/value cache store. We adopt LRU as our
replace/evict policy. Each key/value pair is tagged with a Time-to-Live (TTL);
from user's perspective, stale pairs are automatically removed from the cache.
Why FFI
-------
In Lua, expression "table[key] = nil" does not *PHYSICALLY* remove the value
associated with the key; it just set the value to be nil! So the table will
keep growing with large number of the key/nil pairs which will be purged until
resize() operator is called.
This "feature" is terribly ill-suited to what we need. Therefore we have to
rely on FFI to build a hash-table where any entry can be physically deleted
immediately.
Under the hood:
--------------
In concept, we introduce three data structures to implement the cache store:
1. key/value vector for storing keys and values.
2. a queue to mimic the LRU.
3. hash-table for looking up the value for a given key.
Unfortunately, efficiency and clarity usually come at each other cost. The
data strucutres we are using are slightly more complicated than what we
described above.
o. Lua does not have efficient way to store a vector of pair. So, we use
two vectors for key/value pair: one for keys and the other for values
(_M.key_v and _M.val_v, respectively), and i-th key corresponds to
i-th value.
A key/value pair is identified by the "id" field in a "node" (we shall
discuss node later)
o. The queue is nothing more than a doubly-linked list of "node" linked via
lrucache_pureffi_queue_s::{next|prev} fields.
o. The hash-table has two parts:
- the _M.bucket_v[] a vector of bucket, indiced by hash-value, and
- a bucket is a singly-linked list of "node" via the
lrucache_pureffi_queue_s::conflict field.
A key must be a string, and the hash value of a key is evaluated by:
crc32(key-cast-to-pointer) % size(_M.bucket_v).
We mandate size(_M.bucket_v) being a power-of-two in order to avoid
expensive modulo operation.
At the heart of the module is an array of "node" (of type
lrucache_pureffi_queue_s). A node:
- keeps the meta-data of its corresponding key/value pair
(embodied by the "id", and "expire" field);
- is a part of LRU queue (embodied by "prev" and "next" fields);
- is a part of hash-table (embodied by the "conflict" field).
]]
local ffi = require "ffi"
local bit = require "bit"
local ffi_new = ffi.new
local ffi_sizeof = ffi.sizeof
local ffi_cast = ffi.cast
local ffi_fill = ffi.fill
local ngx_now = ngx.now
local uintptr_t = ffi.typeof("uintptr_t")
local c_str_t = ffi.typeof("const char*")
local int_t = ffi.typeof("int")
local int_array_t = ffi.typeof("int[?]")
local crc_tab = ffi.new("const unsigned int[256]", {
0x00000000, 0x77073096, 0xEE0E612C, 0x990951BA, 0x076DC419, 0x706AF48F,
0xE963A535, 0x9E6495A3, 0x0EDB8832, 0x79DCB8A4, 0xE0D5E91E, 0x97D2D988,
0x09B64C2B, 0x7EB17CBD, 0xE7B82D07, 0x90BF1D91, 0x1DB71064, 0x6AB020F2,
0xF3B97148, 0x84BE41DE, 0x1ADAD47D, 0x6DDDE4EB, 0xF4D4B551, 0x83D385C7,
0x136C9856, 0x646BA8C0, 0xFD62F97A, 0x8A65C9EC, 0x14015C4F, 0x63066CD9,
0xFA0F3D63, 0x8D080DF5, 0x3B6E20C8, 0x4C69105E, 0xD56041E4, 0xA2677172,
0x3C03E4D1, 0x4B04D447, 0xD20D85FD, 0xA50AB56B, 0x35B5A8FA, 0x42B2986C,
0xDBBBC9D6, 0xACBCF940, 0x32D86CE3, 0x45DF5C75, 0xDCD60DCF, 0xABD13D59,
0x26D930AC, 0x51DE003A, 0xC8D75180, 0xBFD06116, 0x21B4F4B5, 0x56B3C423,
0xCFBA9599, 0xB8BDA50F, 0x2802B89E, 0x5F058808, 0xC60CD9B2, 0xB10BE924,
0x2F6F7C87, 0x58684C11, 0xC1611DAB, 0xB6662D3D, 0x76DC4190, 0x01DB7106,
0x98D220BC, 0xEFD5102A, 0x71B18589, 0x06B6B51F, 0x9FBFE4A5, 0xE8B8D433,
0x7807C9A2, 0x0F00F934, 0x9609A88E, 0xE10E9818, 0x7F6A0DBB, 0x086D3D2D,
0x91646C97, 0xE6635C01, 0x6B6B51F4, 0x1C6C6162, 0x856530D8, 0xF262004E,
0x6C0695ED, 0x1B01A57B, 0x8208F4C1, 0xF50FC457, 0x65B0D9C6, 0x12B7E950,
0x8BBEB8EA, 0xFCB9887C, 0x62DD1DDF, 0x15DA2D49, 0x8CD37CF3, 0xFBD44C65,
0x4DB26158, 0x3AB551CE, 0xA3BC0074, 0xD4BB30E2, 0x4ADFA541, 0x3DD895D7,
0xA4D1C46D, 0xD3D6F4FB, 0x4369E96A, 0x346ED9FC, 0xAD678846, 0xDA60B8D0,
0x44042D73, 0x33031DE5, 0xAA0A4C5F, 0xDD0D7CC9, 0x5005713C, 0x270241AA,
0xBE0B1010, 0xC90C2086, 0x5768B525, 0x206F85B3, 0xB966D409, 0xCE61E49F,
0x5EDEF90E, 0x29D9C998, 0xB0D09822, 0xC7D7A8B4, 0x59B33D17, 0x2EB40D81,
0xB7BD5C3B, 0xC0BA6CAD, 0xEDB88320, 0x9ABFB3B6, 0x03B6E20C, 0x74B1D29A,
0xEAD54739, 0x9DD277AF, 0x04DB2615, 0x73DC1683, 0xE3630B12, 0x94643B84,
0x0D6D6A3E, 0x7A6A5AA8, 0xE40ECF0B, 0x9309FF9D, 0x0A00AE27, 0x7D079EB1,
0xF00F9344, 0x8708A3D2, 0x1E01F268, 0x6906C2FE, 0xF762575D, 0x806567CB,
0x196C3671, 0x6E6B06E7, 0xFED41B76, 0x89D32BE0, 0x10DA7A5A, 0x67DD4ACC,
0xF9B9DF6F, 0x8EBEEFF9, 0x17B7BE43, 0x60B08ED5, 0xD6D6A3E8, 0xA1D1937E,
0x38D8C2C4, 0x4FDFF252, 0xD1BB67F1, 0xA6BC5767, 0x3FB506DD, 0x48B2364B,
0xD80D2BDA, 0xAF0A1B4C, 0x36034AF6, 0x41047A60, 0xDF60EFC3, 0xA867DF55,
0x316E8EEF, 0x4669BE79, 0xCB61B38C, 0xBC66831A, 0x256FD2A0, 0x5268E236,
0xCC0C7795, 0xBB0B4703, 0x220216B9, 0x5505262F, 0xC5BA3BBE, 0xB2BD0B28,
0x2BB45A92, 0x5CB36A04, 0xC2D7FFA7, 0xB5D0CF31, 0x2CD99E8B, 0x5BDEAE1D,
0x9B64C2B0, 0xEC63F226, 0x756AA39C, 0x026D930A, 0x9C0906A9, 0xEB0E363F,
0x72076785, 0x05005713, 0x95BF4A82, 0xE2B87A14, 0x7BB12BAE, 0x0CB61B38,
0x92D28E9B, 0xE5D5BE0D, 0x7CDCEFB7, 0x0BDBDF21, 0x86D3D2D4, 0xF1D4E242,
0x68DDB3F8, 0x1FDA836E, 0x81BE16CD, 0xF6B9265B, 0x6FB077E1, 0x18B74777,
0x88085AE6, 0xFF0F6A70, 0x66063BCA, 0x11010B5C, 0x8F659EFF, 0xF862AE69,
0x616BFFD3, 0x166CCF45, 0xA00AE278, 0xD70DD2EE, 0x4E048354, 0x3903B3C2,
0xA7672661, 0xD06016F7, 0x4969474D, 0x3E6E77DB, 0xAED16A4A, 0xD9D65ADC,
0x40DF0B66, 0x37D83BF0, 0xA9BCAE53, 0xDEBB9EC5, 0x47B2CF7F, 0x30B5FFE9,
0xBDBDF21C, 0xCABAC28A, 0x53B39330, 0x24B4A3A6, 0xBAD03605, 0xCDD70693,
0x54DE5729, 0x23D967BF, 0xB3667A2E, 0xC4614AB8, 0x5D681B02, 0x2A6F2B94,
0xB40BBE37, 0xC30C8EA1, 0x5A05DF1B, 0x2D02EF8D });
local setmetatable = setmetatable
local tonumber = tonumber
local tostring = tostring
local type = type
local brshift = bit.rshift
local bxor = bit.bxor
local band = bit.band
local ok, tab_new = pcall(require, "table.new")
if not ok then
tab_new = function (narr, nrec) return {} end
end
-- queue data types
--
-- this queue is a double-ended queue and the first node
-- is reserved for the queue itself.
-- the implementation is mostly borrowed from nginx's ngx_queue_t data
-- structure.
ffi.cdef[[
/* A lrucache_pureffi_queue_s node hook together three data structures:
* o. the key/value store as embodied by the "id" (which is in essence the
* indentifier of key/pair pair) and the "expire" (which is a metadata
* of the corresponding key/pair pair).
* o. The LRU queue via the prev/next fields.
* o. The hash-tabble as embodied by the "conflict" field.
*/
typedef struct lrucache_pureffi_queue_s lrucache_pureffi_queue_t;
struct lrucache_pureffi_queue_s {
/* Each node is assigned a unique ID at construction time, and the
* ID remain immutatble, regardless the node is in active-list or
* free-list. The queue header is assigned ID 0. Since queue-header
* is a sentinel node, 0 denodes "invalid ID".
*
* Intuitively, we can view the "id" as the identifier of key/value
* pair.
*/
int id;
/* The bucket of the hash-table is implemented as a singly-linked list.
* The "conflict" refers to the ID of the next node in the bucket.
*/
int conflict;
double expire; /* in seconds */
lrucache_pureffi_queue_t *prev;
lrucache_pureffi_queue_t *next;
};
]]
local queue_arr_type = ffi.typeof("lrucache_pureffi_queue_t[?]")
--local queue_ptr_type = ffi.typeof("lrucache_pureffi_queue_t*")
local queue_type = ffi.typeof("lrucache_pureffi_queue_t")
local NULL = ffi.null
--========================================================================
--
-- Queue utility functions
--
--========================================================================
-- Append the element "x" to the given queue "h".
local function queue_insert_tail(h, x)
local last = h[0].prev
x.prev = last
last.next = x
x.next = h
h[0].prev = x
end
--[[
Allocate a queue with size + 1 elements. Elements are linked together in a
circular way, i.e. the last element's "next" points to the first element,
while the first element's "prev" element points to the last element.
]]
local function queue_init(size)
if not size then
size = 0
end
local q = ffi_new(queue_arr_type, size + 1)
ffi_fill(q, ffi_sizeof(queue_type, size + 1), 0)
if size == 0 then
q[0].prev = q
q[0].next = q
else
local prev = q[0]
for i = 1, size do
local e = q[i]
e.id = i
prev.next = e
e.prev = prev
prev = e
end
local last = q[size]
last.next = q
q[0].prev = last
end
return q
end
local function queue_is_empty(q)
-- print("q: ", tostring(q), "q.prev: ", tostring(q), ": ", q == q.prev)
return q == q[0].prev
end
local function queue_remove(x)
local prev = x.prev
local next = x.next
next.prev = prev
prev.next = next
-- for debugging purpose only:
x.prev = NULL
x.next = NULL
end
-- Insert the element "x" the to the given queue "h"
local function queue_insert_head(h, x)
x.next = h[0].next
x.next.prev = x
x.prev = h
h[0].next = x
end
local function queue_last(h)
return h[0].prev
end
local function queue_head(h)
return h[0].next
end
--========================================================================
--
-- Miscellaneous Utility Functions
--
--========================================================================
local function ptr2num(ptr)
return tonumber(ffi_cast(uintptr_t, ptr))
end
local function crc32_ptr(ptr)
local p = brshift(ptr2num(ptr), 3)
local b = band(p, 255)
local crc32 = crc_tab[b]
b = band(brshift(p, 8), 255)
crc32 = bxor(brshift(crc32, 8), crc_tab[band(bxor(crc32, b), 255)])
b = band(brshift(p, 16), 255)
crc32 = bxor(brshift(crc32, 8), crc_tab[band(bxor(crc32, b), 255)])
--b = band(brshift(p, 24), 255)
--crc32 = bxor(brshift(crc32, 8), crc_tab[band(bxor(crc32, b), 255)])
return crc32
end
--========================================================================
--
-- Implementation of "export" functions
--
--========================================================================
local _M = {
_VERSION = '0.04'
}
local mt = { __index = _M }
-- "size" specifies the maximum number of entries in the LRU queue, and the
-- "load_factor" designates the 'load factor' of the hash-table we are using
-- internally. The default value of load-factor is 0.5 (i.e. 50%); if the
-- load-factor is specified, it will be clamped to the range of [0.1, 1](i.e.
-- if load-factor is greater than 1, it will be saturated to 1, likewise,
-- if load-factor is smaller than 0.1, it will be clamped to 0.1).
function _M.new(size, load_factor)
if size < 1 then
return nil, "size too small"
end
-- Determine bucket size, which must be power of two.
local load_f = load_factor
if not load_factor then
load_f = 0.5
elseif load_factor > 1 then
load_f = 1
elseif load_factor < 0.1 then
load_f = 0.1
end
local bs_min = size / load_f
-- The bucket_sz *MUST* be a power-of-two. See the hash_string().
local bucket_sz = 1
repeat
bucket_sz = bucket_sz * 2
until bucket_sz >= bs_min
local self = {
size = size,
bucket_sz = bucket_sz,
free_queue = queue_init(size),
cache_queue = queue_init(0),
node_v = nil,
key_v = tab_new(size, 0),
val_v = tab_new(size, 0),
bucket_v = ffi_new(int_array_t, bucket_sz)
}
-- "note_v" is an array of all the nodes used in the LRU queue. Exprpession
-- node_v[i] evaluates to the element of ID "i".
self.node_v = self.free_queue
-- Allocate the array-part of the key_v, val_v, bucket_v.
--local key_v = self.key_v
--local val_v = self.val_v
--local bucket_v = self.bucket_v
ffi_fill(self.bucket_v, ffi_sizeof(int_t, bucket_sz), 0)
return setmetatable(self, mt)
end
local function hash_string(self, str)
local c_str = ffi_cast(c_str_t, str)
local hv = crc32_ptr(c_str)
hv = band(hv, self.bucket_sz - 1)
-- Hint: bucket is 0-based
return hv
end
-- Search the node associated with the key in the bucket, if found returns
-- the the id of the node, and the id of its previous node in the conflict list.
-- The "bucket_hdr_id" is the ID of the first node in the bucket
local function _find_node_in_bucket(key, key_v, node_v, bucket_hdr_id)
if bucket_hdr_id ~= 0 then
local prev = 0
local cur = bucket_hdr_id
while cur ~= 0 and key_v[cur] ~= key do
prev = cur
cur = node_v[cur].conflict
end
if cur ~= 0 then
return cur, prev
end
end
end
-- Return the node corresponding to the key/val.
local function find_key(self, key)
local key_hash = hash_string(self, key)
return _find_node_in_bucket(key, self.key_v, self.node_v,
self.bucket_v[key_hash])
end
--[[ This function tries to
1. Remove the given key and the associated value from the key/value store,
2. Remove the entry associated with the key from the hash-table.
NOTE: all queues remain intact.
If there was a node bound to the key/val, return that node; otherwise,
nil is returned.
]]
local function remove_key(self, key)
local key_v = self.key_v
local val_v = self.val_v
local node_v = self.node_v
local bucket_v = self.bucket_v
local key_hash = hash_string(self, key)
local cur, prev =
_find_node_in_bucket(key, key_v, node_v, bucket_v[key_hash])
if cur then
-- In an attempt to make key and val dead.
key_v[cur] = nil
val_v[cur] = nil
-- Remove the node from the hash table
local next_node = node_v[cur].conflict
if prev ~= 0 then
node_v[prev].conflict = next_node
else
bucket_v[key_hash] = next_node
end
node_v[cur].conflict = 0
return cur
end
end
--[[ Bind the key/val with the given node, and insert the node into the Hashtab.
NOTE: this function does not touch any queue
]]
local function insert_key(self, key, val, node)
-- Bind the key/val with the node
local node_id = node.id
self.key_v[node_id] = key
self.val_v[node_id] = val
-- Insert the node into the hash-table
local key_hash = hash_string(self, key)
local bucket_v = self.bucket_v
node.conflict = bucket_v[key_hash]
bucket_v[key_hash] = node_id
end
function _M.get(self, key)
if type(key) ~= "string" then
key = tostring(key)
end
local node_id = find_key(self, key)
if not node_id then
return nil
end
-- print(key, ": moving node ", tostring(node), " to cache queue head")
local cache_queue = self.cache_queue
local node = self.node_v + node_id
queue_remove(node)
queue_insert_head(cache_queue, node)
local expire = node.expire
if expire >= 0 and expire < ngx_now() then
-- print("expired: ", node.expire, " > ", ngx_now())
return nil, self.val_v[node_id]
end
return self.val_v[node_id]
end
function _M.delete(self, key)
if type(key) ~= "string" then
key = tostring(key)
end
local node_id = remove_key(self, key);
if not node_id then
return false
end
local node = self.node_v + node_id
queue_remove(node)
queue_insert_tail(self.free_queue, node)
return true
end
function _M.set(self, key, value, ttl)
if type(key) ~= "string" then
key = tostring(key)
end
local node_id = find_key(self, key)
local node
if not node_id then
local free_queue = self.free_queue
if queue_is_empty(free_queue) then
-- evict the least recently used key
-- assert(not queue_is_empty(self.cache_queue))
node = queue_last(self.cache_queue)
remove_key(self, self.key_v[node.id])
else
-- take a free queue node
node = queue_head(free_queue)
-- print(key, ": get a new free node: ", tostring(node))
end
-- insert the key
insert_key(self, key, value, node)
else
node = self.node_v + node_id
self.val_v[node_id] = value
end
queue_remove(node)
queue_insert_head(self.cache_queue, node)
if ttl then
node.expire = ngx_now() + ttl
else
node.expire = -1
end
end
return _M

View file

@ -1,121 +0,0 @@
# vim:set ft= ts=4 sw=4 et fdm=marker:
use Test::Nginx::Socket::Lua;
use Cwd qw(cwd);
repeat_each(1);
plan tests => repeat_each() * 13;
#no_diff();
#no_long_string();
my $pwd = cwd();
our $HttpConfig = <<"_EOC_";
lua_package_path "$pwd/lib/?.lua;$pwd/../lua-resty-core/lib/?.lua;;";
#init_by_lua '
#local v = require "jit.v"
#v.on("$Test::Nginx::Util::ErrLogFile")
#require "resty.core"
#';
_EOC_
no_long_string();
run_tests();
__DATA__
=== TEST 1: sanity
--- http_config eval
"$::HttpConfig"
. qq!
init_by_lua '
local function log(...)
ngx.log(ngx.WARN, ...)
end
local lrucache = require "resty.lrucache"
local c = lrucache.new(2)
collectgarbage()
c:set("dog", 32)
c:set("cat", 56)
log("dog: ", c:get("dog"))
log("cat: ", c:get("cat"))
c:set("dog", 32)
c:set("cat", 56)
log("dog: ", c:get("dog"))
log("cat: ", c:get("cat"))
c:delete("dog")
c:delete("cat")
log("dog: ", c:get("dog"))
log("cat: ", c:get("cat"))
';
!
--- config
location = /t {
echo ok;
}
--- request
GET /t
--- response_body
ok
--- no_error_log
[error]
--- error_log
dog: 32
cat: 56
dog: 32
cat: 56
dog: nil
cat: nil
=== TEST 2: sanity
--- http_config eval
"$::HttpConfig"
. qq!
init_by_lua '
lrucache = require "resty.lrucache"
flv_index, err = lrucache.new(200)
if not flv_index then
ngx.log(ngx.ERR, "failed to create the cache: ", err)
return
end
flv_meta, err = lrucache.new(200)
if not flv_meta then
ngx.log(ngx.ERR, "failed to create the cache: ", err)
return
end
flv_channel, err = lrucache.new(200)
if not flv_channel then
ngx.log(ngx.ERR, "failed to create the cache: ", err)
return
end
ngx.log(ngx.WARN, "3 lrucache initialized.")
';
!
--- config
location = /t {
echo ok;
}
--- request
GET /t
--- response_body
ok
--- no_error_log
[error]
--- error_log
3 lrucache initialized.

View file

@ -1,75 +0,0 @@
# vim:set ft= ts=4 sw=4 et fdm=marker:
use Test::Nginx::Socket::Lua;
use Cwd qw(cwd);
repeat_each(2);
plan tests => repeat_each() * (blocks() * 3);
#no_diff();
#no_long_string();
my $pwd = cwd();
our $HttpConfig = <<"_EOC_";
lua_package_path "$pwd/lib/?.lua;$pwd/../lua-resty-core/lib/?.lua;;";
#init_by_lua '
#local v = require "jit.v"
#v.on("$Test::Nginx::Util::ErrLogFile")
#require "resty.core"
#';
_EOC_
no_long_string();
run_tests();
__DATA__
=== TEST 1: sanity
--- http_config eval: $::HttpConfig
--- config
location = /t {
content_by_lua '
local lrucache = require "resty.lrucache"
local c = lrucache.new(2)
collectgarbage()
c:set("dog", 32)
c:set("cat", 56)
ngx.say("dog: ", c:get("dog"))
ngx.say("cat: ", c:get("cat"))
local lrucache = require "resty.lrucache.pureffi"
local c2 = lrucache.new(2)
ngx.say("dog: ", c2:get("dog"))
ngx.say("cat: ", c2:get("cat"))
c2:set("dog", 9)
c2:set("cat", "hi")
ngx.say("dog: ", c2:get("dog"))
ngx.say("cat: ", c2:get("cat"))
ngx.say("dog: ", c:get("dog"))
ngx.say("cat: ", c:get("cat"))
';
}
--- request
GET /t
--- response_body
dog: 32
cat: 56
dog: nil
cat: nil
dog: 9
cat: hi
dog: 32
cat: 56
--- no_error_log
[error]

View file

@ -1,121 +0,0 @@
# vim:set ft= ts=4 sw=4 et fdm=marker:
use Test::Nginx::Socket::Lua;
use Cwd qw(cwd);
repeat_each(1);
plan tests => repeat_each() * 13;
#no_diff();
#no_long_string();
my $pwd = cwd();
our $HttpConfig = <<"_EOC_";
lua_package_path "$pwd/lib/?.lua;$pwd/../lua-resty-core/lib/?.lua;;";
#init_by_lua '
#local v = require "jit.v"
#v.on("$Test::Nginx::Util::ErrLogFile")
#require "resty.core"
#';
_EOC_
no_long_string();
run_tests();
__DATA__
=== TEST 1: sanity
--- http_config eval
"$::HttpConfig"
. qq!
init_by_lua '
local function log(...)
ngx.log(ngx.WARN, ...)
end
local lrucache = require "resty.lrucache.pureffi"
local c = lrucache.new(2)
collectgarbage()
c:set("dog", 32)
c:set("cat", 56)
log("dog: ", c:get("dog"))
log("cat: ", c:get("cat"))
c:set("dog", 32)
c:set("cat", 56)
log("dog: ", c:get("dog"))
log("cat: ", c:get("cat"))
c:delete("dog")
c:delete("cat")
log("dog: ", c:get("dog"))
log("cat: ", c:get("cat"))
';
!
--- config
location = /t {
echo ok;
}
--- request
GET /t
--- response_body
ok
--- no_error_log
[error]
--- error_log
dog: 32
cat: 56
dog: 32
cat: 56
dog: nil
cat: nil
=== TEST 2: sanity
--- http_config eval
"$::HttpConfig"
. qq!
init_by_lua '
lrucache = require "resty.lrucache.pureffi"
flv_index, err = lrucache.new(200)
if not flv_index then
ngx.log(ngx.ERR, "failed to create the cache: ", err)
return
end
flv_meta, err = lrucache.new(200)
if not flv_meta then
ngx.log(ngx.ERR, "failed to create the cache: ", err)
return
end
flv_channel, err = lrucache.new(200)
if not flv_channel then
ngx.log(ngx.ERR, "failed to create the cache: ", err)
return
end
ngx.log(ngx.WARN, "3 lrucache initialized.")
';
!
--- config
location = /t {
echo ok;
}
--- request
GET /t
--- response_body
ok
--- no_error_log
[error]
--- error_log
3 lrucache initialized.

View file

@ -1,390 +0,0 @@
# vim:set ft= ts=4 sw=4 et fdm=marker:
use Test::Nginx::Socket::Lua;
use Cwd qw(cwd);
repeat_each(2);
plan tests => repeat_each() * (blocks() * 3);
#no_diff();
#no_long_string();
my $pwd = cwd();
our $HttpConfig = <<"_EOC_";
lua_package_path "$pwd/lib/?.lua;$pwd/../lua-resty-core/lib/?.lua;;";
#init_by_lua '
#local v = require "jit.v"
#v.on("$Test::Nginx::Util::ErrLogFile")
#require "resty.core"
#';
_EOC_
no_long_string();
run_tests();
__DATA__
=== TEST 1: sanity
--- http_config eval: $::HttpConfig
--- config
location = /t {
content_by_lua '
local lrucache = require "resty.lrucache.pureffi"
local c = lrucache.new(2)
collectgarbage()
c:set("dog", 32)
c:set("cat", 56)
ngx.say("dog: ", c:get("dog"))
ngx.say("cat: ", c:get("cat"))
c:set("dog", 32)
c:set("cat", 56)
ngx.say("dog: ", c:get("dog"))
ngx.say("cat: ", c:get("cat"))
c:delete("dog")
c:delete("cat")
ngx.say("dog: ", c:get("dog"))
ngx.say("cat: ", c:get("cat"))
';
}
--- request
GET /t
--- response_body
dog: 32
cat: 56
dog: 32
cat: 56
dog: nil
cat: nil
--- no_error_log
[error]
=== TEST 2: evict existing items
--- http_config eval: $::HttpConfig
--- config
location = /t {
content_by_lua '
local lrucache = require "resty.lrucache.pureffi"
local c = lrucache.new(2)
if not c then
ngx.say("failed to init lrucace: ", err)
return
end
c:set("dog", 32)
c:set("cat", 56)
ngx.say("dog: ", c:get("dog"))
ngx.say("cat: ", c:get("cat"))
c:set("bird", 76)
ngx.say("dog: ", c:get("dog"))
ngx.say("cat: ", c:get("cat"))
ngx.say("bird: ", c:get("bird"))
';
}
--- request
GET /t
--- response_body
dog: 32
cat: 56
dog: nil
cat: 56
bird: 76
--- no_error_log
[error]
=== TEST 3: evict existing items (reordered, get should also count)
--- http_config eval: $::HttpConfig
--- config
location = /t {
content_by_lua '
local lrucache = require "resty.lrucache.pureffi"
local c = lrucache.new(2)
if not c then
ngx.say("failed to init lrucace: ", err)
return
end
c:set("cat", 56)
c:set("dog", 32)
ngx.say("dog: ", c:get("dog"))
ngx.say("cat: ", c:get("cat"))
c:set("bird", 76)
ngx.say("dog: ", c:get("dog"))
ngx.say("cat: ", c:get("cat"))
ngx.say("bird: ", c:get("bird"))
';
}
--- request
GET /t
--- response_body
dog: 32
cat: 56
dog: nil
cat: 56
bird: 76
--- no_error_log
[error]
=== TEST 4: ttl
--- http_config eval: $::HttpConfig
--- config
location = /t {
content_by_lua '
local lrucache = require "resty.lrucache.pureffi"
local c = lrucache.new(1)
c:set("dog", 32, 0.6)
ngx.say("dog: ", c:get("dog"))
ngx.sleep(0.3)
ngx.say("dog: ", c:get("dog"))
ngx.sleep(0.31)
ngx.say("dog: ", c:get("dog"))
';
}
--- request
GET /t
--- response_body
dog: 32
dog: 32
dog: nil32
--- no_error_log
[error]
=== TEST 5: load factor
--- http_config eval: $::HttpConfig
--- config
location = /t {
content_by_lua '
local lrucache = require "resty.lrucache.pureffi"
local c = lrucache.new(1, 0.25)
ngx.say(c.bucket_sz)
';
}
--- request
GET /t
--- response_body
4
--- no_error_log
[error]
=== TEST 6: load factor clamped to 0.1
--- http_config eval: $::HttpConfig
--- config
location = /t {
content_by_lua '
local lrucache = require "resty.lrucache.pureffi"
local c = lrucache.new(3, 0.05)
ngx.say(c.bucket_sz)
';
}
--- request
GET /t
--- response_body
32
--- no_error_log
[error]
=== TEST 7: load factor saturated to 1
--- http_config eval: $::HttpConfig
--- config
location = /t {
content_by_lua '
local lrucache = require "resty.lrucache.pureffi"
local c = lrucache.new(3, 2.1)
ngx.say(c.bucket_sz)
';
}
--- request
GET /t
--- response_body
4
--- no_error_log
[error]
=== TEST 8: non-string keys
--- http_config eval: $::HttpConfig
--- config
location = /t {
content_by_lua '
local function log(...)
ngx.say(...)
end
local lrucache = require "resty.lrucache.pureffi"
local c = lrucache.new(2)
collectgarbage()
local tab1 = {1, 2}
local tab2 = {3, 4}
c:set(tab1, 32)
c:set(tab2, 56)
log("tab1: ", c:get(tab1))
log("tab2: ", c:get(tab2))
c:set(tab1, 32)
c:set(tab2, 56)
log("tab1: ", c:get(tab1))
log("tab2: ", c:get(tab2))
c:delete(tab1)
c:delete(tab2)
log("tab1: ", c:get(tab1))
log("tab2: ", c:get(tab2))
';
}
--- request
GET /t
--- response_body
tab1: 32
tab2: 56
tab1: 32
tab2: 56
tab1: nil
tab2: nil
--- no_error_log
[error]
=== TEST 9: replace value
--- http_config eval: $::HttpConfig
--- config
location = /t {
content_by_lua '
local lrucache = require "resty.lrucache.pureffi"
local c = lrucache.new(1)
c:set("dog", 32)
ngx.say("dog: ", c:get("dog"))
c:set("dog", 33)
ngx.say("dog: ", c:get("dog"))
';
}
--- request
GET /t
--- response_body
dog: 32
dog: 33
--- no_error_log
[error]
=== TEST 10: replace value 2
--- http_config eval: $::HttpConfig
--- config
location = /t {
content_by_lua '
local lrucache = require "resty.lrucache.pureffi"
local c = lrucache.new(1)
c:set("dog", 32, 1.0)
ngx.say("dog: ", c:get("dog"))
c:set("dog", 33, 0.3)
ngx.say("dog: ", c:get("dog"))
ngx.sleep(0.4)
ngx.say("dog: ", c:get("dog"))
';
}
--- request
GET /t
--- response_body
dog: 32
dog: 33
dog: nil33
--- no_error_log
[error]
=== TEST 11: replace value 3 (the old value has longer expire time)
--- http_config eval: $::HttpConfig
--- config
location = /t {
content_by_lua '
local lrucache = require "resty.lrucache.pureffi"
local c = lrucache.new(1)
c:set("dog", 32, 1.2)
c:set("dog", 33, 0.6)
ngx.sleep(0.2)
ngx.say("dog: ", c:get("dog"))
ngx.sleep(0.5)
ngx.say("dog: ", c:get("dog"))
';
}
--- request
GET /t
--- response_body
dog: 33
dog: nil33
--- no_error_log
[error]
=== TEST 12: replace value 4
--- http_config eval: $::HttpConfig
--- config
location = /t {
content_by_lua '
local lrucache = require "resty.lrucache.pureffi"
local c = lrucache.new(1)
c:set("dog", 32, 0.1)
ngx.sleep(0.2)
c:set("dog", 33)
ngx.sleep(0.2)
ngx.say("dog: ", c:get("dog"))
';
}
--- request
GET /t
--- response_body
dog: 33
--- no_error_log
[error]

View file

@ -1,250 +0,0 @@
# vim:set ft= ts=4 sw=4 et fdm=marker:
use Test::Nginx::Socket::Lua;
use Cwd qw(cwd);
repeat_each(2);
plan tests => repeat_each() * (blocks() * 3);
#no_diff();
#no_long_string();
my $pwd = cwd();
our $HttpConfig = <<"_EOC_";
lua_package_path "$pwd/lib/?.lua;$pwd/../lua-resty-core/lib/?.lua;;";
#init_by_lua '
#local v = require "jit.v"
#v.on("$Test::Nginx::Util::ErrLogFile")
#require "resty.core"
#';
_EOC_
no_long_string();
run_tests();
__DATA__
=== TEST 1: sanity
--- http_config eval: $::HttpConfig
--- config
location = /t {
content_by_lua '
local lrucache = require "resty.lrucache"
local c = lrucache.new(2)
collectgarbage()
c:set("dog", 32)
c:set("cat", 56)
ngx.say("dog: ", c:get("dog"))
ngx.say("cat: ", c:get("cat"))
c:set("dog", 32)
c:set("cat", 56)
ngx.say("dog: ", c:get("dog"))
ngx.say("cat: ", c:get("cat"))
c:delete("dog")
c:delete("cat")
ngx.say("dog: ", c:get("dog"))
ngx.say("cat: ", c:get("cat"))
';
}
--- request
GET /t
--- response_body
dog: 32
cat: 56
dog: 32
cat: 56
dog: nil
cat: nil
--- no_error_log
[error]
=== TEST 2: evict existing items
--- http_config eval: $::HttpConfig
--- config
location = /t {
content_by_lua '
local lrucache = require "resty.lrucache"
local c = lrucache.new(2)
if not c then
ngx.say("failed to init lrucace: ", err)
return
end
c:set("dog", 32)
c:set("cat", 56)
ngx.say("dog: ", c:get("dog"))
ngx.say("cat: ", c:get("cat"))
c:set("bird", 76)
ngx.say("dog: ", c:get("dog"))
ngx.say("cat: ", c:get("cat"))
ngx.say("bird: ", c:get("bird"))
';
}
--- request
GET /t
--- response_body
dog: 32
cat: 56
dog: nil
cat: 56
bird: 76
--- no_error_log
[error]
=== TEST 3: evict existing items (reordered, get should also count)
--- http_config eval: $::HttpConfig
--- config
location = /t {
content_by_lua '
local lrucache = require "resty.lrucache"
local c = lrucache.new(2)
if not c then
ngx.say("failed to init lrucace: ", err)
return
end
c:set("cat", 56)
c:set("dog", 32)
ngx.say("dog: ", c:get("dog"))
ngx.say("cat: ", c:get("cat"))
c:set("bird", 76)
ngx.say("dog: ", c:get("dog"))
ngx.say("cat: ", c:get("cat"))
ngx.say("bird: ", c:get("bird"))
';
}
--- request
GET /t
--- response_body
dog: 32
cat: 56
dog: nil
cat: 56
bird: 76
--- no_error_log
[error]
=== TEST 4: ttl
--- http_config eval: $::HttpConfig
--- config
location = /t {
content_by_lua '
local lrucache = require "resty.lrucache"
local c = lrucache.new(1)
c:set("dog", 32, 0.5)
ngx.say("dog: ", c:get("dog"))
ngx.sleep(0.25)
ngx.say("dog: ", c:get("dog"))
ngx.sleep(0.26)
ngx.say("dog: ", c:get("dog"))
';
}
--- request
GET /t
--- response_body
dog: 32
dog: 32
dog: nil32
--- no_error_log
[error]
=== TEST 5: ttl
--- http_config eval: $::HttpConfig
--- config
location = /t {
content_by_lua '
local lrucache = require "resty.lrucache"
local lim = 5
local c = lrucache.new(lim)
local n = 1000
for i = 1, n do
c:set("dog" .. i, i)
c:delete("dog" .. i, i)
c:set("dog" .. i, i)
local cnt = 0
for k, v in pairs(c.hasht) do
cnt = cnt + 1
end
assert(cnt <= lim)
end
for i = 1, n do
local key = "dog" .. math.random(1, n)
c:get(key)
end
for i = 1, n do
local key = "dog" .. math.random(1, n)
c:get(key)
c:set("dog" .. i, i)
local cnt = 0
for k, v in pairs(c.hasht) do
cnt = cnt + 1
end
assert(cnt <= lim)
end
ngx.say("ok")
';
}
--- request
GET /t
--- response_body
ok
--- no_error_log
[error]
--- timeout: 20
=== TEST 6: replace value
--- http_config eval: $::HttpConfig
--- config
location = /t {
content_by_lua '
local lrucache = require "resty.lrucache"
local c = lrucache.new(1)
c:set("dog", 32)
ngx.say("dog: ", c:get("dog"))
c:set("dog", 33)
ngx.say("dog: ", c:get("dog"))
';
}
--- request
GET /t
--- response_body
dog: 32
dog: 33
--- no_error_log
[error]

View file

@ -1,36 +0,0 @@
# Valgrind suppression file for LuaJIT 2.0.
{
<insert_a_suppression_name_here>
Memcheck:Leak
fun:malloc
fun:ngx_alloc
fun:ngx_event_process_init
}
{
<insert_a_suppression_name_here>
Memcheck:Param
epoll_ctl(event)
fun:epoll_ctl
fun:ngx_epoll_add_event
}
{
<insert_a_suppression_name_here>
Memcheck:Cond
fun:index
fun:expand_dynamic_string_token
fun:_dl_map_object
fun:map_doit
fun:_dl_catch_error
fun:do_preload
fun:dl_main
fun:_dl_sysdep_start
fun:_dl_start
}
{
<insert_a_suppression_name_here>
Memcheck:Param
epoll_ctl(event)
fun:epoll_ctl
fun:ngx_epoll_init
fun:ngx_event_process_init
}

View file

@ -63,13 +63,6 @@ func main() {
glog.Fatalf("Please specify --default-backend") glog.Fatalf("Please specify --default-backend")
} }
glog.Info("Checking if DNS is working")
ip, err := checkDNS(*defaultSvc)
if err != nil {
glog.Fatalf("Please check if the DNS addon is working properly.\n%v", err)
}
glog.Infof("IP address of '%v' service: %s", *defaultSvc, ip)
kubeClient, err := unversioned.NewInCluster() kubeClient, err := unversioned.NewInCluster()
if err != nil { if err != nil {
glog.Fatalf("failed to create client: %v", err) glog.Fatalf("failed to create client: %v", err)
@ -82,8 +75,7 @@ func main() {
} }
defError, _ := getService(kubeClient, *customErrorSvc) defError, _ := getService(kubeClient, *customErrorSvc)
// Start loadbalancer controller lbc, err := newLoadBalancerController(kubeClient, *resyncPeriod, defSvc, defError, *watchNamespace, lbInfo)
lbc, err := NewLoadBalancerController(kubeClient, *resyncPeriod, defSvc, defError, *watchNamespace, lbInfo)
if err != nil { if err != nil {
glog.Fatalf("%v", err) glog.Fatalf("%v", err)
} }
@ -91,7 +83,7 @@ func main() {
lbc.Run() lbc.Run()
for { for {
glog.Infof("Handled quit, awaiting pod deletion.") glog.Infof("Handled quit, awaiting pod deletion")
time.Sleep(30 * time.Second) time.Sleep(30 * time.Second)
} }
} }

View file

@ -1,3 +1,9 @@
{{range $name, $upstream := .upstreams}}
upstream {{$upstream.Name}} {
{{range $server := $upstream.UpstreamServers}}
server {{$server.Address}}:{{$server.Port}};{{end}}
}{{end}}
{{ $cfg := .cfg }}{{ $sslCertificates := .sslCertificates }}{{ $defErrorSvc := .defErrorSvc }}{{ $defBackend := .defBackend }} {{ $cfg := .cfg }}{{ $sslCertificates := .sslCertificates }}{{ $defErrorSvc := .defErrorSvc }}{{ $defBackend := .defBackend }}
daemon off; daemon off;
@ -14,16 +20,7 @@ events {
http { http {
#vhost_traffic_status_zone shared:vhost_traffic_status:10m; #vhost_traffic_status_zone shared:vhost_traffic_status:10m;
# configure cache size used in ingress.lua lua_package_path '.?.lua;./etc/nginx/lua/?.lua;/etc/nginx/lua/vendor/lua-resty-http/lib/?.lua;';
lua_shared_dict ingress 10m;
lua_shared_dict dns_cache 15m;
lua_shared_dict ssl_certs 5m;
lua_package_path '.?.lua;./etc/nginx/lua/?.lua;/etc/nginx/lua/vendor/lua-resty-lock/lib/?.lua;/etc/nginx/lua/vendor/lua-resty-dns/lib/?.lua;/etc/nginx/lua/vendor/lua-resty-dns-cache/lib/?.lua;/etc/nginx/lua/vendor/lua-resty-http/lib/?.lua;/etc/nginx/lua/vendor/lua-resty-lrucache/lib/?.lua;;';
init_worker_by_lua_block {
require("ingress").init_worker(ngx)
}
init_by_lua_block { init_by_lua_block {
{{ if $defErrorSvc }}{{/* only if exists a custom error service */}} {{ if $defErrorSvc }}{{/* only if exists a custom error service */}}
@ -32,21 +29,7 @@ http {
dev_error_url = nil dev_error_url = nil
{{ end }} {{ end }}
local options = {} local options = {}
options.def_backend = "http://{{ $defBackend.ServiceName }}.{{ $defBackend.Namespace }}.svc.cluster.local:{{ $defBackend.ServicePort }}" def_backend = "http://{{ $defBackend.ServiceName }}.{{ $defBackend.Namespace }}.svc.cluster.local:{{ $defBackend.ServicePort }}"
{{ if $defErrorSvc }}{{/* only if exists a custom error service */}}options.custom_error = "http://{{ $defErrorSvc.ServiceName }}.{{ $defErrorSvc.Namespace }}.svc.cluster.local:{{ $defErrorSvc.ServicePort }}"{{ end }}
{{ if not (empty .defResolver) }}-- Custom dns resolver.
options.resolvers = "{{ .defResolver }}"
{{ end }}
require("ingress").init(ngx, options)
local certs = {}{{ range $sslCert := .sslCertificates }}{{ range $cname := $sslCert.Cname }}
certs["{{ $cname }}"] = {}
certs["{{ $cname }}"].cert = "{{ $sslCert.Cert }}"
certs["{{ $cname }}"].key = "{{ $sslCert.Key }}"
certs["{{ $cname }}"].valid = {{ $sslCert.Valid }}
{{ end }}{{ end }}
ssl_certs = certs
require("error_page") require("error_page")
} }
@ -121,7 +104,6 @@ http {
text text/plain; text text/plain;
} }
server_name_in_redirect off; server_name_in_redirect off;
port_in_redirect off; port_in_redirect off;
@ -221,26 +203,37 @@ http {
{{ end }}{{ end }} {{ end }}{{ end }}
location / { location / {
set $upstream_host ''; proxy_pass http://{{ $defBackend.ServiceName }}.{{ $defBackend.Namespace }}.svc.cluster.local:{{ $defBackend.ServicePort }};
set $upstream_port '';
#ssl_certificate_by_lua '
# -- TODO: waiting release 0.9.20
# -- https://github.com/openresty/lua-nginx-module/pull/608#issuecomment-165255821
# -- require("dynamic-ssl").config(ngx)
# require("ingress").content(ngx)
#';
# TODO: remove after ^^
access_by_lua_block {
require("ingress").content(ngx)
}
proxy_pass http://$upstream_host:$upstream_port$request_uri;
} }
{{ if $defErrorSvc }}{{ template "CUSTOM_ERRORS" (dict "cfg" $cfg "defErrorSvc" $defErrorSvc) }}{{ end }} {{ if $defErrorSvc }}{{ template "CUSTOM_ERRORS" (dict "cfg" $cfg "defErrorSvc" $defErrorSvc) }}{{ end }}
} }
{{ end }} {{ end }}
{{ range $server := .servers }}
server {
listen 80;
{{ if $server.SSL }}
listen 443 ssl http2;
ssl_certificate {{ $server.SSLCertificate }};
ssl_certificate_key {{ $server.SSLCertificateKey }};
{{ end }}
server_name {{ $server.Name }};
{{ if $server.SSL }}
if ($scheme = http) {
return 301 https://$host$request_uri;
}
{{ end }}
{{ range $location := $server.Locations }}
location {{ $location.Path }} {
proxy_set_header Host $host;
proxy_pass http://{{ $location.Upstream.Name }};
}{{ end }}
}{{ end }}
# default server, including healthcheck # default server, including healthcheck
server { server {
listen 8080 default_server{{ if $cfg.UseProxyProtocol }} proxy_protocol{{ end }} reuseport; listen 8080 default_server{{ if $cfg.UseProxyProtocol }} proxy_protocol{{ end }} reuseport;
@ -299,62 +292,61 @@ stream {
{{ define "CUSTOM_ERRORS" }} {{ define "CUSTOM_ERRORS" }}
location @custom_403 { location @custom_403 {
content_by_lua_block { content_by_lua_block {
openErrorURL(403, dev_error_url) openURL(403, dev_error_url)
} }
} }
location @custom_404 { location @custom_404 {
content_by_lua_block { content_by_lua_block {
openErrorURL(404, dev_error_url) openURL(404, dev_error_url)
} }
} }
location @custom_405 { location @custom_405 {
content_by_lua_block { content_by_lua_block {
openErrorURL(405, dev_error_url) openURL(405, dev_error_url)
} }
} }
location @custom_408 { location @custom_408 {
content_by_lua_block { content_by_lua_block {
openErrorURL(408, dev_error_url) openURL(408, dev_error_url)
} }
} }
location @custom_413 { location @custom_413 {
content_by_lua_block { content_by_lua_block {
openErrorURL(413, dev_error_url) openURL(413, dev_error_url)
} }
} }
location @custom_500 { location @custom_500 {
content_by_lua_block { content_by_lua_block {
openErrorURL(500, dev_error_url) openURL(500, dev_error_url)
} }
} }
location @custom_501 { location @custom_501 {
content_by_lua_block { content_by_lua_block {
openErrorURL(501, dev_error_url) openURL(501, dev_error_url)
} }
} }
location @custom_502 { location @custom_502 {
content_by_lua_block { content_by_lua_block {
openErrorURL(502, dev_error_url) openURL(502, dev_error_url)
} }
} }
location @custom_503 { location @custom_503 {
content_by_lua_block { content_by_lua_block {
openErrorURL(503, dev_error_url) openURL(503, dev_error_url)
} }
} }
location @custom_504 { location @custom_504 {
content_by_lua_block { content_by_lua_block {
openErrorURL(504, dev_error_url) openURL(504, dev_error_url)
} }
} }
{{ end }} {{ end }}

View file

@ -52,17 +52,20 @@ func (ngx *NginxManager) Start() {
// shut down, stop accepting new connections and continue to service current requests // shut down, stop accepting new connections and continue to service current requests
// until all such requests are serviced. After that, the old worker processes exit. // until all such requests are serviced. After that, the old worker processes exit.
// http://nginx.org/en/docs/beginners_guide.html#control // http://nginx.org/en/docs/beginners_guide.html#control
func (ngx *NginxManager) Reload(cfg *nginxConfiguration, servicesL4 []Service) { func (ngx *NginxManager) CheckAndReload(cfg *nginxConfiguration, upstreams []Upstream, servers []Server, servicesL4 []Service) {
ngx.reloadLock.Lock() ngx.reloadLock.Lock()
defer ngx.reloadLock.Unlock() defer ngx.reloadLock.Unlock()
if err := ngx.writeCfg(cfg, servicesL4); err != nil { newCfg, err := ngx.writeCfg(cfg, upstreams, servers, servicesL4)
if err != nil {
glog.Errorf("Failed to write new nginx configuration. Avoiding reload: %v", err) glog.Errorf("Failed to write new nginx configuration. Avoiding reload: %v", err)
return return
} }
if err := ngx.shellOut("nginx -s reload"); err == nil { if newCfg {
glog.Info("Change in configuration detected. Reloading...") if err := ngx.shellOut("nginx -s reload"); err == nil {
glog.Info("Change in configuration detected. Reloading...")
}
} }
} }

View file

@ -0,0 +1,92 @@
package nginx
// NGINXController Updates NGINX configuration, starts and reloads NGINX
type NGINXController struct {
resolver string
nginxConfdPath string
nginxCertsPath string
local bool
}
// IngressNGINXConfig describes an NGINX configuration
type IngressNGINXConfig struct {
Upstreams []Upstream
Servers []Server
}
// Upstream describes an NGINX upstream
type Upstream struct {
Name string
Backends []UpstreamServer
}
type UpstreamByNameServers []Upstream
func (c UpstreamByNameServers) Len() int { return len(c) }
func (c UpstreamByNameServers) Swap(i, j int) { c[i], c[j] = c[j], c[i] }
func (c UpstreamByNameServers) Less(i, j int) bool {
return c[i].Name < c[j].Name
}
// UpstreamServer describes a server in an NGINX upstream
type UpstreamServer struct {
Address string
Port string
}
type UpstreamServerByAddrPort []UpstreamServer
func (c UpstreamServerByAddrPort) Len() int { return len(c) }
func (c UpstreamServerByAddrPort) Swap(i, j int) { c[i], c[j] = c[j], c[i] }
func (c UpstreamServerByAddrPort) Less(i, j int) bool {
iName := c[i].Address
jName := c[j].Address
if iName != jName {
return iName < jName
}
iU := c[i].Port
jU := c[j].Port
return iU < jU
}
// Server describes an NGINX server
type Server struct {
Name string
Locations []Location
SSL bool
SSLCertificate string
SSLCertificateKey string
}
type ServerByNamePort []Server
func (c ServerByNamePort) Len() int { return len(c) }
func (c ServerByNamePort) Swap(i, j int) { c[i], c[j] = c[j], c[i] }
func (c ServerByNamePort) Less(i, j int) bool {
return c[i].Name < c[j].Name
}
// Location describes an NGINX location
type Location struct {
Path string
Upstream Upstream
}
type locByPathUpstream []Location
func (c locByPathUpstream) Len() int { return len(c) }
func (c locByPathUpstream) Swap(i, j int) { c[i], c[j] = c[j], c[i] }
func (c locByPathUpstream) Less(i, j int) bool {
return c[i].Path < c[j].Path
}
// NewUpstreamWithDefaultServer creates an upstream with the default server.
// proxy_pass to an upstream with the default server returns 502.
// We use it for services that have no endpoints
func NewUpstreamWithDefaultServer(name string) Upstream {
return Upstream{
Name: name,
Backends: []UpstreamServer{UpstreamServer{Address: "127.0.0.1", Port: "8181"}},
}
}

View file

@ -60,10 +60,10 @@ func (ngx *NginxManager) loadTemplate() {
ngx.template = tmpl ngx.template = tmpl
} }
func (ngx *NginxManager) writeCfg(cfg *nginxConfiguration, servicesL4 []Service) error { func (ngx *NginxManager) writeCfg(cfg *nginxConfiguration, upstreams []Upstream, servers []Server, servicesL4 []Service) (bool, error) {
file, err := os.Create(ngx.ConfigFile) file, err := os.Create(ngx.ConfigFile)
if err != nil { if err != nil {
return err return false, err
} }
fromMap := structs.Map(cfg) fromMap := structs.Map(cfg)
@ -72,6 +72,8 @@ func (ngx *NginxManager) writeCfg(cfg *nginxConfiguration, servicesL4 []Service)
conf := make(map[string]interface{}) conf := make(map[string]interface{})
conf["sslCertificates"] = ngx.sslCertificates conf["sslCertificates"] = ngx.sslCertificates
conf["upstreams"] = upstreams
conf["servers"] = servers
conf["tcpServices"] = servicesL4 conf["tcpServices"] = servicesL4
conf["defBackend"] = ngx.defBackend conf["defBackend"] = ngx.defBackend
conf["defResolver"] = ngx.defResolver conf["defResolver"] = ngx.defResolver
@ -94,8 +96,8 @@ func (ngx *NginxManager) writeCfg(cfg *nginxConfiguration, servicesL4 []Service)
err = ngx.template.Execute(file, conf) err = ngx.template.Execute(file, conf)
if err != nil { if err != nil {
return err return false, err
} }
return nil return true, nil
} }

View file

@ -17,7 +17,6 @@ limitations under the License.
package nginx package nginx
import ( import (
"bytes"
"encoding/json" "encoding/json"
"fmt" "fmt"
"io/ioutil" "io/ioutil"
@ -28,27 +27,6 @@ import (
"github.com/golang/glog" "github.com/golang/glog"
) )
// SyncIngress creates a GET request to nginx to indicate that is required to refresh the Ingress rules.
func (ngx *NginxManager) SyncIngress(ingList []interface{}) error {
encData, _ := json.Marshal(ingList)
req, err := http.NewRequest("POST", "http://127.0.0.1:8080/update-ingress", bytes.NewBuffer(encData))
req.Header.Set("Content-Type", "application/json")
res, err := http.DefaultClient.Do(req)
if err != nil {
return err
}
defer res.Body.Close()
if res.StatusCode != 200 {
body, _ := ioutil.ReadAll(res.Body)
glog.Errorf("Error: %v", string(body))
return fmt.Errorf("nginx status is unhealthy")
}
return nil
}
// IsHealthy checks if nginx is running // IsHealthy checks if nginx is running
func (ngx *NginxManager) IsHealthy() error { func (ngx *NginxManager) IsHealthy() error {
res, err := http.Get("http://127.0.0.1:8080/healthz") res, err := http.Get("http://127.0.0.1:8080/healthz")

View file

@ -19,11 +19,9 @@ package main
import ( import (
"encoding/json" "encoding/json"
"fmt" "fmt"
"net"
"os" "os"
"strconv" "strconv"
"strings" "strings"
"time"
"k8s.io/contrib/ingress/controllers/nginx-third-party/nginx" "k8s.io/contrib/ingress/controllers/nginx-third-party/nginx"
@ -33,8 +31,6 @@ import (
"k8s.io/kubernetes/pkg/apis/extensions" "k8s.io/kubernetes/pkg/apis/extensions"
"k8s.io/kubernetes/pkg/client/cache" "k8s.io/kubernetes/pkg/client/cache"
"k8s.io/kubernetes/pkg/client/unversioned" "k8s.io/kubernetes/pkg/client/unversioned"
"k8s.io/kubernetes/pkg/util/wait"
"k8s.io/kubernetes/pkg/util/workqueue"
) )
const ( const (
@ -48,66 +44,6 @@ var (
errInvalidKind = fmt.Errorf("Please check the field Kind, only ReplicationController or DaemonSet are allowed") errInvalidKind = fmt.Errorf("Please check the field Kind, only ReplicationController or DaemonSet are allowed")
) )
// taskQueue manages a work queue through an independent worker that
// invokes the given sync function for every work item inserted.
type taskQueue struct {
// queue is the work queue the worker polls
queue *workqueue.Type
// sync is called for each item in the queue
sync func(string)
// workerDone is closed when the worker exits
workerDone chan struct{}
}
func (t *taskQueue) run(period time.Duration, stopCh <-chan struct{}) {
wait.Until(t.worker, period, stopCh)
}
// enqueue enqueues ns/name of the given api object in the task queue.
func (t *taskQueue) enqueue(obj interface{}) {
key, err := keyFunc(obj)
if err != nil {
glog.Infof("Couldn't get key for object %+v: %v", obj, err)
return
}
t.queue.Add(key)
}
func (t *taskQueue) requeue(key string, err error) {
glog.Errorf("Requeuing %v, err %v", key, err)
t.queue.Add(key)
}
// worker processes work in the queue through sync.
func (t *taskQueue) worker() {
for {
key, quit := t.queue.Get()
if quit {
close(t.workerDone)
return
}
glog.V(2).Infof("Syncing %v", key)
t.sync(key.(string))
t.queue.Done(key)
}
}
// shutdown shuts down the work queue and waits for the worker to ACK
func (t *taskQueue) shutdown() {
t.queue.ShutDown()
<-t.workerDone
}
// NewTaskQueue creates a new task queue with the given sync function.
// The sync function is called for every element inserted into the queue.
func NewTaskQueue(syncFn func(string)) *taskQueue {
return &taskQueue{
queue: workqueue.New(),
sync: syncFn,
workerDone: make(chan struct{}),
}
}
// StoreToIngressLister makes a Store that lists Ingress. // StoreToIngressLister makes a Store that lists Ingress.
// TODO: use cache/listers post 1.1. // TODO: use cache/listers post 1.1.
type StoreToIngressLister struct { type StoreToIngressLister struct {
@ -128,7 +64,7 @@ type StoreToConfigMapLister struct {
} }
// getLBDetails returns runtime information about the pod (name, IP) and replication // getLBDetails returns runtime information about the pod (name, IP) and replication
// controller (namespace and name) // controller or daemonset (namespace and name).
// This is required to watch for changes in annotations or configuration (ConfigMap) // This is required to watch for changes in annotations or configuration (ConfigMap)
func getLBDetails(kubeClient *unversioned.Client) (rc *lbInfo, err error) { func getLBDetails(kubeClient *unversioned.Client) (rc *lbInfo, err error) {
podIP := os.Getenv("POD_IP") podIP := os.Getenv("POD_IP")
@ -209,7 +145,7 @@ func getServicePorts(kubeClient *unversioned.Client, ns, name string) (ports []s
return return
} }
func getTcpServices(kubeClient *unversioned.Client, tcpServices string) []nginx.Service { func getTCPServices(kubeClient *unversioned.Client, tcpServices string) []nginx.Service {
svcs := []nginx.Service{} svcs := []nginx.Service{}
for _, svc := range strings.Split(tcpServices, ",") { for _, svc := range strings.Split(tcpServices, ",") {
if svc == "" { if svc == "" {
@ -239,13 +175,3 @@ func getTcpServices(kubeClient *unversioned.Client, tcpServices string) []nginx.
return svcs return svcs
} }
func checkDNS(name string) (*net.IPAddr, error) {
parts := strings.Split(name, "/")
if len(parts) != 2 {
glog.Fatalf("Please check the service format (namespace/name) in service %v", name)
}
host := fmt.Sprintf("%v.%v", parts[1], parts[0])
return net.ResolveIPAddr("ip4", host)
}