Improve configuration change detection (#2656)

* Use information about the configuration configmap to determine changes

* Add hashstructure dependency

* Rename queue functions

* Add test for configmap checksum
This commit is contained in:
Manuel Alejandro de Brito Fontes 2018-06-21 10:50:57 -04:00 committed by GitHub
parent a6978a873b
commit aec40c171f
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
15 changed files with 564 additions and 42 deletions

8
Gopkg.lock generated
View file

@ -207,6 +207,12 @@
packages = ["."]
revision = "4fdf99ab29366514c69ccccddab5dc58b8d84062"
[[projects]]
branch = "master"
name = "github.com/mitchellh/hashstructure"
packages = ["."]
revision = "2bca23e0e452137f789efbc8610126fd8b94f73b"
[[projects]]
branch = "master"
name = "github.com/mitchellh/mapstructure"
@ -900,6 +906,6 @@
[solve-meta]
analyzer-name = "dep"
analyzer-version = 1
inputs-digest = "5feeef324f0cbac72e0234d5f649fc7c4233f4e2bb4477e454e047b5461d7569"
inputs-digest = "56ef61f651cca98e6dc7f7d25fd8dec603be3439bf91ba2e19838c5be1cbeea4"
solver-name = "gps-cdcl"
solver-version = 1

View file

@ -506,6 +506,9 @@ type Configuration struct {
// http://github.com/influxdata/nginx-influxdb-module/
// By default this is disabled
EnableInfluxDB bool `json:"enable-influxdb"`
// Checksum contains a checksum of the configmap configuration
Checksum string `json:"-"`
}
// NewDefault returns the default nginx configuration

View file

@ -22,7 +22,6 @@ import (
"sort"
"strconv"
"strings"
"sync/atomic"
"time"
"github.com/golang/glog"
@ -155,14 +154,16 @@ func (n *NGINXController) syncIngress(interface{}) error {
TCPEndpoints: n.getStreamServices(n.cfg.TCPConfigMapName, apiv1.ProtocolTCP),
UDPEndpoints: n.getStreamServices(n.cfg.UDPConfigMapName, apiv1.ProtocolUDP),
PassthroughBackends: passUpstreams,
ConfigurationChecksum: n.store.GetBackendConfiguration().Checksum,
}
if !n.isForceReload() && n.runningConfig.Equal(&pcfg) {
if n.runningConfig.Equal(&pcfg) {
glog.V(3).Infof("No configuration change detected, skipping backend reload.")
return nil
}
if n.cfg.DynamicConfigurationEnabled && n.IsDynamicConfigurationEnough(&pcfg) && !n.isForceReload() {
if n.cfg.DynamicConfigurationEnabled && n.IsDynamicConfigurationEnough(&pcfg) {
glog.Infof("Changes handled by the dynamic configuration, skipping backend reload.")
} else {
glog.Infof("Configuration changes detected, backend reload required.")
@ -200,7 +201,6 @@ func (n *NGINXController) syncIngress(interface{}) error {
}
n.runningConfig = &pcfg
n.SetForceReload(false)
return nil
}
@ -1048,21 +1048,6 @@ func (n *NGINXController) createServers(data []*extensions.Ingress,
return servers
}
func (n *NGINXController) isForceReload() bool {
return atomic.LoadInt32(&n.forceReload) != 0
}
// SetForceReload sets whether the backend should be reloaded regardless of
// configuration changes.
func (n *NGINXController) SetForceReload(shouldReload bool) {
if shouldReload {
atomic.StoreInt32(&n.forceReload, 1)
n.syncQueue.Enqueue(&extensions.Ingress{})
} else {
atomic.StoreInt32(&n.forceReload, 0)
}
}
// extractTLSSecretName returns the name of the Secret containing a SSL
// certificate for the given host name, or an empty string.
func extractTLSSecretName(host string, ing *extensions.Ingress,

View file

@ -38,7 +38,6 @@ import (
proxyproto "github.com/armon/go-proxyproto"
"github.com/eapache/channels"
apiv1 "k8s.io/api/core/v1"
extensions "k8s.io/api/extensions/v1beta1"
"k8s.io/client-go/kubernetes/scheme"
v1core "k8s.io/client-go/kubernetes/typed/core/v1"
"k8s.io/client-go/tools/record"
@ -153,7 +152,7 @@ Error loading new template: %v
n.t = template
glog.Info("New NGINX configuration template loaded.")
n.SetForceReload(true)
n.syncQueue.EnqueueTask(task.GetDummyObject("template-change"))
}
ngxTpl, err := ngx_template.NewTemplate(tmplPath, fs)
@ -194,7 +193,7 @@ Error loading new template: %v
for _, f := range filesToWatch {
_, err = watch.NewFileWatcher(f, func() {
glog.Info("File %v changed. Reloading NGINX", f)
n.SetForceReload(true)
n.syncQueue.EnqueueTask(task.GetDummyObject("file-change"))
})
if err != nil {
glog.Fatalf("Error creating file watcher for %v: %v", f, err)
@ -232,8 +231,6 @@ type NGINXController struct {
// runningConfig contains the running configuration in the Backend
runningConfig *ingress.Configuration
forceReload int32
t *ngx_template.Template
resolver []net.IP
@ -278,7 +275,7 @@ func (n *NGINXController) Start() {
go n.syncQueue.Run(time.Second, n.stopCh)
// force initial sync
n.syncQueue.Enqueue(&extensions.Ingress{})
n.syncQueue.EnqueueTask(task.GetDummyObject("initial-sync"))
for {
select {
@ -311,10 +308,12 @@ func (n *NGINXController) Start() {
if evt, ok := event.(store.Event); ok {
glog.V(3).Infof("Event %v received - object %v", evt.Type, evt.Obj)
if evt.Type == store.ConfigurationEvent {
n.SetForceReload(true)
// TODO: is this necessary? Consider removing this special case
n.syncQueue.EnqueueTask(task.GetDummyObject("configmap-change"))
continue
}
n.syncQueue.Enqueue(evt.Obj)
n.syncQueue.EnqueueSkippableTask(evt.Obj)
} else {
glog.Warningf("Unexpected event type received %T", event)
}

View file

@ -25,6 +25,7 @@ import (
"github.com/golang/glog"
"github.com/mitchellh/hashstructure"
"github.com/mitchellh/mapstructure"
"k8s.io/apimachinery/pkg/util/sets"
@ -191,6 +192,15 @@ func ReadConfig(src map[string]string) config.Configuration {
glog.Warningf("unexpected error merging defaults: %v", err)
}
hash, err := hashstructure.Hash(to, &hashstructure.HashOptions{
TagName: "json",
})
if err != nil {
glog.Warningf("unexpected error obtaining hash: %v", err)
}
to.Checksum = fmt.Sprintf("%v", hash)
return to
}

View file

@ -17,11 +17,13 @@ limitations under the License.
package template
import (
"fmt"
"reflect"
"testing"
"time"
"github.com/kylelemons/godebug/pretty"
"github.com/mitchellh/hashstructure"
"k8s.io/ingress-nginx/internal/ingress/controller/config"
)
@ -88,6 +90,14 @@ func TestMergeConfigMapToStruct(t *testing.T) {
def.NginxStatusIpv6Whitelist = []string{"::1", "2001::/16"}
def.ProxyAddOriginalUriHeader = false
hash, err := hashstructure.Hash(def, &hashstructure.HashOptions{
TagName: "json",
})
if err != nil {
t.Fatalf("unexpected error obtaining hash: %v", err)
}
def.Checksum = fmt.Sprintf("%v", hash)
to := ReadConfig(conf)
if diff := pretty.Compare(to, def); diff != "" {
t.Errorf("unexpected diff: (-got +want)\n%s", diff)
@ -107,6 +117,14 @@ func TestMergeConfigMapToStruct(t *testing.T) {
}
def = config.NewDefault()
hash, err = hashstructure.Hash(def, &hashstructure.HashOptions{
TagName: "json",
})
if err != nil {
t.Fatalf("unexpected error obtaining hash: %v", err)
}
def.Checksum = fmt.Sprintf("%v", hash)
to = ReadConfig(map[string]string{})
if diff := pretty.Compare(to, def); diff != "" {
t.Errorf("unexpected diff: (-got +want)\n%s", diff)
@ -114,6 +132,15 @@ func TestMergeConfigMapToStruct(t *testing.T) {
def = config.NewDefault()
def.WhitelistSourceRange = []string{"1.1.1.1/32"}
hash, err = hashstructure.Hash(def, &hashstructure.HashOptions{
TagName: "json",
})
if err != nil {
t.Fatalf("unexpected error obtaining hash: %v", err)
}
def.Checksum = fmt.Sprintf("%v", hash)
to = ReadConfig(map[string]string{
"whitelist-source-range": "1.1.1.1/32",
})

View file

@ -65,6 +65,9 @@ type Configuration struct {
// It contains information about the associated Server Name Indication (SNI).
// +optional
PassthroughBackends []*SSLPassthroughBackend `json:"passthroughBackends,omitempty"`
// ConfigurationChecksum contains the particular checksum of a Configuration object
ConfigurationChecksum string `json:"configurationChecksum,omitempty"`
}
// Backend describes one or more remote server/s (endpoints) associated with a service

View file

@ -104,6 +104,10 @@ func (c1 *Configuration) Equal(c2 *Configuration) bool {
}
}
if c1.ConfigurationChecksum != c2.ConfigurationChecksum {
return false
}
return true
}

View file

@ -22,6 +22,7 @@ import (
"github.com/golang/glog"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/client-go/tools/cache"
"k8s.io/client-go/util/workqueue"
@ -50,23 +51,39 @@ type Queue struct {
// Element represents one item of the queue
type Element struct {
Key interface{}
Timestamp int64
Key interface{}
Timestamp int64
IsSkippable bool
}
// Run ...
// Run starts processing elements in the queue
func (t *Queue) Run(period time.Duration, stopCh <-chan struct{}) {
wait.Until(t.worker, period, stopCh)
}
// Enqueue enqueues ns/name of the given api object in the task queue.
func (t *Queue) Enqueue(obj interface{}) {
// EnqueueTask enqueues ns/name of the given api object in the task queue.
func (t *Queue) EnqueueTask(obj interface{}) {
t.enqueue(obj, false)
}
// EnqueueSkippableTask enqueues ns/name of the given api object in
// the task queue that can be skipped
func (t *Queue) EnqueueSkippableTask(obj interface{}) {
t.enqueue(obj, true)
}
// enqueue enqueues ns/name of the given api object in the task queue.
func (t *Queue) enqueue(obj interface{}, skippable bool) {
if t.IsShuttingDown() {
glog.Errorf("queue has been shutdown, failed to enqueue: %v", obj)
return
}
ts := time.Now().UnixNano()
if !skippable {
// make sure the timestamp is bigger than lastSync
ts = time.Now().Add(24 * time.Hour).UnixNano()
}
glog.V(3).Infof("queuing item %v", obj)
key, err := t.fn(obj)
if err != nil {
@ -166,3 +183,10 @@ func NewCustomTaskQueue(syncFn func(interface{}) error, fn func(interface{}) (in
return q
}
// GetDummyObject returns a valid object that can be used in the Queue
func GetDummyObject(name string) *metav1.ObjectMeta {
return &metav1.ObjectMeta{
Name: name,
}
}

View file

@ -71,7 +71,7 @@ func TestEnqueueSuccess(t *testing.T) {
k: "testKey",
v: "testValue",
}
q.Enqueue(mo)
q.EnqueueSkippableTask(mo)
// wait for 'mockSynFn'
time.Sleep(time.Millisecond * 10)
if atomic.LoadUint32(&sr) != 1 {
@ -99,7 +99,7 @@ func TestEnqueueFailed(t *testing.T) {
q.Shutdown()
// wait for shutdown
time.Sleep(time.Millisecond * 10)
q.Enqueue(mo)
q.EnqueueSkippableTask(mo)
// wait for 'mockSynFn'
time.Sleep(time.Millisecond * 10)
// queue is shutdown, so mockSynFn should not be executed, so the result should be 0
@ -121,7 +121,7 @@ func TestEnqueueKeyError(t *testing.T) {
v: "testValue",
}
q.Enqueue(mo)
q.EnqueueSkippableTask(mo)
// wait for 'mockSynFn'
time.Sleep(time.Millisecond * 10)
// key error, so the result should be 0
@ -142,16 +142,16 @@ func TestSkipEnqueue(t *testing.T) {
k: "testKey",
v: "testValue",
}
q.Enqueue(mo)
q.Enqueue(mo)
q.Enqueue(mo)
q.Enqueue(mo)
q.EnqueueSkippableTask(mo)
q.EnqueueSkippableTask(mo)
q.EnqueueTask(mo)
q.EnqueueSkippableTask(mo)
// run queue
go q.Run(time.Second, stopCh)
// wait for 'mockSynFn'
time.Sleep(time.Millisecond * 10)
if atomic.LoadUint32(&sr) != 1 {
t.Errorf("sr should be 1, but is %d", sr)
if atomic.LoadUint32(&sr) != 2 {
t.Errorf("sr should be 2, but is %d", sr)
}
// shutdown queue before exit

View file

@ -7,6 +7,8 @@
{{ $proxyHeaders := .ProxySetHeaders }}
{{ $addHeaders := .AddHeaders }}
# Configuration checksum: {{ $all.Cfg.Checksum }}
# setup custom paths that do not require root access
pid /tmp/nginx.pid;

21
vendor/github.com/mitchellh/hashstructure/LICENSE generated vendored Normal file
View file

@ -0,0 +1,21 @@
The MIT License (MIT)
Copyright (c) 2016 Mitchell Hashimoto
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.

65
vendor/github.com/mitchellh/hashstructure/README.md generated vendored Normal file
View file

@ -0,0 +1,65 @@
# hashstructure [![GoDoc](https://godoc.org/github.com/mitchellh/hashstructure?status.svg)](https://godoc.org/github.com/mitchellh/hashstructure)
hashstructure is a Go library for creating a unique hash value
for arbitrary values in Go.
This can be used to key values in a hash (for use in a map, set, etc.)
that are complex. The most common use case is comparing two values without
sending data across the network, caching values locally (de-dup), and so on.
## Features
* Hash any arbitrary Go value, including complex types.
* Tag a struct field to ignore it and not affect the hash value.
* Tag a slice type struct field to treat it as a set where ordering
doesn't affect the hash code but the field itself is still taken into
account to create the hash value.
* Optionally specify a custom hash function to optimize for speed, collision
avoidance for your data set, etc.
* Optionally hash the output of `.String()` on structs that implement fmt.Stringer,
allowing effective hashing of time.Time
## Installation
Standard `go get`:
```
$ go get github.com/mitchellh/hashstructure
```
## Usage & Example
For usage and examples see the [Godoc](http://godoc.org/github.com/mitchellh/hashstructure).
A quick code example is shown below:
```go
type ComplexStruct struct {
Name string
Age uint
Metadata map[string]interface{}
}
v := ComplexStruct{
Name: "mitchellh",
Age: 64,
Metadata: map[string]interface{}{
"car": true,
"location": "California",
"siblings": []string{"Bob", "John"},
},
}
hash, err := hashstructure.Hash(v, nil)
if err != nil {
panic(err)
}
fmt.Printf("%d", hash)
// Output:
// 2307517237273902113
```

View file

@ -0,0 +1,358 @@
package hashstructure
import (
"encoding/binary"
"fmt"
"hash"
"hash/fnv"
"reflect"
)
// ErrNotStringer is returned when there's an error with hash:"string"
type ErrNotStringer struct {
Field string
}
// Error implements error for ErrNotStringer
func (ens *ErrNotStringer) Error() string {
return fmt.Sprintf("hashstructure: %s has hash:\"string\" set, but does not implement fmt.Stringer", ens.Field)
}
// HashOptions are options that are available for hashing.
type HashOptions struct {
// Hasher is the hash function to use. If this isn't set, it will
// default to FNV.
Hasher hash.Hash64
// TagName is the struct tag to look at when hashing the structure.
// By default this is "hash".
TagName string
// ZeroNil is flag determining if nil pointer should be treated equal
// to a zero value of pointed type. By default this is false.
ZeroNil bool
}
// Hash returns the hash value of an arbitrary value.
//
// If opts is nil, then default options will be used. See HashOptions
// for the default values. The same *HashOptions value cannot be used
// concurrently. None of the values within a *HashOptions struct are
// safe to read/write while hashing is being done.
//
// Notes on the value:
//
// * Unexported fields on structs are ignored and do not affect the
// hash value.
//
// * Adding an exported field to a struct with the zero value will change
// the hash value.
//
// For structs, the hashing can be controlled using tags. For example:
//
// struct {
// Name string
// UUID string `hash:"ignore"`
// }
//
// The available tag values are:
//
// * "ignore" or "-" - The field will be ignored and not affect the hash code.
//
// * "set" - The field will be treated as a set, where ordering doesn't
// affect the hash code. This only works for slices.
//
// * "string" - The field will be hashed as a string, only works when the
// field implements fmt.Stringer
//
func Hash(v interface{}, opts *HashOptions) (uint64, error) {
// Create default options
if opts == nil {
opts = &HashOptions{}
}
if opts.Hasher == nil {
opts.Hasher = fnv.New64()
}
if opts.TagName == "" {
opts.TagName = "hash"
}
// Reset the hash
opts.Hasher.Reset()
// Create our walker and walk the structure
w := &walker{
h: opts.Hasher,
tag: opts.TagName,
zeronil: opts.ZeroNil,
}
return w.visit(reflect.ValueOf(v), nil)
}
type walker struct {
h hash.Hash64
tag string
zeronil bool
}
type visitOpts struct {
// Flags are a bitmask of flags to affect behavior of this visit
Flags visitFlag
// Information about the struct containing this field
Struct interface{}
StructField string
}
func (w *walker) visit(v reflect.Value, opts *visitOpts) (uint64, error) {
t := reflect.TypeOf(0)
// Loop since these can be wrapped in multiple layers of pointers
// and interfaces.
for {
// If we have an interface, dereference it. We have to do this up
// here because it might be a nil in there and the check below must
// catch that.
if v.Kind() == reflect.Interface {
v = v.Elem()
continue
}
if v.Kind() == reflect.Ptr {
if w.zeronil {
t = v.Type().Elem()
}
v = reflect.Indirect(v)
continue
}
break
}
// If it is nil, treat it like a zero.
if !v.IsValid() {
v = reflect.Zero(t)
}
// Binary writing can use raw ints, we have to convert to
// a sized-int, we'll choose the largest...
switch v.Kind() {
case reflect.Int:
v = reflect.ValueOf(int64(v.Int()))
case reflect.Uint:
v = reflect.ValueOf(uint64(v.Uint()))
case reflect.Bool:
var tmp int8
if v.Bool() {
tmp = 1
}
v = reflect.ValueOf(tmp)
}
k := v.Kind()
// We can shortcut numeric values by directly binary writing them
if k >= reflect.Int && k <= reflect.Complex64 {
// A direct hash calculation
w.h.Reset()
err := binary.Write(w.h, binary.LittleEndian, v.Interface())
return w.h.Sum64(), err
}
switch k {
case reflect.Array:
var h uint64
l := v.Len()
for i := 0; i < l; i++ {
current, err := w.visit(v.Index(i), nil)
if err != nil {
return 0, err
}
h = hashUpdateOrdered(w.h, h, current)
}
return h, nil
case reflect.Map:
var includeMap IncludableMap
if opts != nil && opts.Struct != nil {
if v, ok := opts.Struct.(IncludableMap); ok {
includeMap = v
}
}
// Build the hash for the map. We do this by XOR-ing all the key
// and value hashes. This makes it deterministic despite ordering.
var h uint64
for _, k := range v.MapKeys() {
v := v.MapIndex(k)
if includeMap != nil {
incl, err := includeMap.HashIncludeMap(
opts.StructField, k.Interface(), v.Interface())
if err != nil {
return 0, err
}
if !incl {
continue
}
}
kh, err := w.visit(k, nil)
if err != nil {
return 0, err
}
vh, err := w.visit(v, nil)
if err != nil {
return 0, err
}
fieldHash := hashUpdateOrdered(w.h, kh, vh)
h = hashUpdateUnordered(h, fieldHash)
}
return h, nil
case reflect.Struct:
parent := v.Interface()
var include Includable
if impl, ok := parent.(Includable); ok {
include = impl
}
t := v.Type()
h, err := w.visit(reflect.ValueOf(t.Name()), nil)
if err != nil {
return 0, err
}
l := v.NumField()
for i := 0; i < l; i++ {
if innerV := v.Field(i); v.CanSet() || t.Field(i).Name != "_" {
var f visitFlag
fieldType := t.Field(i)
if fieldType.PkgPath != "" {
// Unexported
continue
}
tag := fieldType.Tag.Get(w.tag)
if tag == "ignore" || tag == "-" {
// Ignore this field
continue
}
// if string is set, use the string value
if tag == "string" {
if impl, ok := innerV.Interface().(fmt.Stringer); ok {
innerV = reflect.ValueOf(impl.String())
} else {
return 0, &ErrNotStringer{
Field: v.Type().Field(i).Name,
}
}
}
// Check if we implement includable and check it
if include != nil {
incl, err := include.HashInclude(fieldType.Name, innerV)
if err != nil {
return 0, err
}
if !incl {
continue
}
}
switch tag {
case "set":
f |= visitFlagSet
}
kh, err := w.visit(reflect.ValueOf(fieldType.Name), nil)
if err != nil {
return 0, err
}
vh, err := w.visit(innerV, &visitOpts{
Flags: f,
Struct: parent,
StructField: fieldType.Name,
})
if err != nil {
return 0, err
}
fieldHash := hashUpdateOrdered(w.h, kh, vh)
h = hashUpdateUnordered(h, fieldHash)
}
}
return h, nil
case reflect.Slice:
// We have two behaviors here. If it isn't a set, then we just
// visit all the elements. If it is a set, then we do a deterministic
// hash code.
var h uint64
var set bool
if opts != nil {
set = (opts.Flags & visitFlagSet) != 0
}
l := v.Len()
for i := 0; i < l; i++ {
current, err := w.visit(v.Index(i), nil)
if err != nil {
return 0, err
}
if set {
h = hashUpdateUnordered(h, current)
} else {
h = hashUpdateOrdered(w.h, h, current)
}
}
return h, nil
case reflect.String:
// Directly hash
w.h.Reset()
_, err := w.h.Write([]byte(v.String()))
return w.h.Sum64(), err
default:
return 0, fmt.Errorf("unknown kind to hash: %s", k)
}
}
func hashUpdateOrdered(h hash.Hash64, a, b uint64) uint64 {
// For ordered updates, use a real hash function
h.Reset()
// We just panic if the binary writes fail because we are writing
// an int64 which should never be fail-able.
e1 := binary.Write(h, binary.LittleEndian, a)
e2 := binary.Write(h, binary.LittleEndian, b)
if e1 != nil {
panic(e1)
}
if e2 != nil {
panic(e2)
}
return h.Sum64()
}
func hashUpdateUnordered(a, b uint64) uint64 {
return a ^ b
}
// visitFlag is used as a bitmask for affecting visit behavior
type visitFlag uint
const (
visitFlagInvalid visitFlag = iota
visitFlagSet = iota << 1
)

15
vendor/github.com/mitchellh/hashstructure/include.go generated vendored Normal file
View file

@ -0,0 +1,15 @@
package hashstructure
// Includable is an interface that can optionally be implemented by
// a struct. It will be called for each field in the struct to check whether
// it should be included in the hash.
type Includable interface {
HashInclude(field string, v interface{}) (bool, error)
}
// IncludableMap is an interface that can optionally be implemented by
// a struct. It will be called when a map-type field is found to ask the
// struct if the map item should be included in the hash.
type IncludableMap interface {
HashIncludeMap(field string, k, v interface{}) (bool, error)
}