Godeps
This commit is contained in:
parent
dba1b6005a
commit
9168a67324
969 changed files with 126310 additions and 147369 deletions
220
Godeps/Godeps.json
generated
220
Godeps/Godeps.json
generated
|
@ -23,15 +23,30 @@
|
|||
"ImportPath": "github.com/davecgh/go-spew/spew",
|
||||
"Rev": "5215b55f46b2b919f50a1df0eaa5886afe4e3b3d"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/docker/distribution/digest",
|
||||
"Comment": "v2.4.0-rc.1-38-gcd27f17",
|
||||
"Rev": "cd27f179f2c10c5d300e6d09025b538c475b0d51"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/docker/distribution/reference",
|
||||
"Comment": "v2.4.0-rc.1-38-gcd27f17",
|
||||
"Rev": "cd27f179f2c10c5d300e6d09025b538c475b0d51"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/docker/docker/pkg/mount",
|
||||
"Comment": "v1.4.1-4831-g0f5c9d3",
|
||||
"Rev": "0f5c9d301b9b1cca66b3ea0f9dec3b5317d3686d"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/docker/docker/pkg/parsers",
|
||||
"Comment": "v1.4.1-4831-g0f5c9d3",
|
||||
"Rev": "0f5c9d301b9b1cca66b3ea0f9dec3b5317d3686d"
|
||||
"ImportPath": "github.com/docker/engine-api/types",
|
||||
"Comment": "v0.3.1-62-g3d72d39",
|
||||
"Rev": "3d72d392d07bece8d7d7b2a3b6b2e57c2df376a2"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/docker/go-connections/nat",
|
||||
"Comment": "v0.2.0-2-gf549a93",
|
||||
"Rev": "f549a9393d05688dff0992ef3efd8bbe6c628aeb"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/docker/go-units",
|
||||
|
@ -51,14 +66,15 @@
|
|||
"ImportPath": "github.com/fatih/structs",
|
||||
"Rev": "d2e1722acaab51fc7fc55686706d08bbf9e4fafb"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/fsouza/go-dockerclient",
|
||||
"Rev": "0099401a7342ad77e71ca9f9a57c5e72fb80f6b2"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/ghodss/yaml",
|
||||
"Rev": "73d445a93680fa1a78ae23a5839bad48f32ba1ee"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/gogo/protobuf/proto",
|
||||
"Comment": "v0.1-125-g82d16f7",
|
||||
"Rev": "82d16f734d6d871204a3feb1a73cb220cc92574c"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/golang/glog",
|
||||
"Rev": "44145f04b68cf362d9c4df2182967c2275eaefed"
|
||||
|
@ -73,8 +89,8 @@
|
|||
},
|
||||
{
|
||||
"ImportPath": "github.com/google/cadvisor/info/v1",
|
||||
"Comment": "v0.22.2",
|
||||
"Rev": "546a3771589bdb356777c646c6eca24914fdd48b"
|
||||
"Comment": "v0.23.0",
|
||||
"Rev": "750f18e5eac3f6193b354fc14c03d92d4318a0ec"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/google/gofuzz",
|
||||
|
@ -191,209 +207,229 @@
|
|||
"Rev": "a83829b6f1293c91addabc89d0571c246397bbf4"
|
||||
},
|
||||
{
|
||||
"ImportPath": "k8s.io/heapster/api/v1/types",
|
||||
"Comment": "v0.19.1-44-g0991ac5",
|
||||
"Rev": "0991ac528ea24aae194e45d6dcf01896cb42cbea"
|
||||
"ImportPath": "k8s.io/heapster/metrics/api/v1/types",
|
||||
"Comment": "v1.1.0-beta1-15-gde510e4",
|
||||
"Rev": "de510e4bdcdea96722b5bde19ff0b7a142939485"
|
||||
},
|
||||
{
|
||||
"ImportPath": "k8s.io/kubernetes/federation/apis/federation",
|
||||
"Comment": "v1.3.0-alpha.3-473-g41b7b04",
|
||||
"Rev": "41b7b04fafecd22127a96fc51a87b2ea34418ffc"
|
||||
},
|
||||
{
|
||||
"ImportPath": "k8s.io/kubernetes/pkg/api",
|
||||
"Comment": "v1.3.0-alpha.2-211-g9cc1ee0",
|
||||
"Rev": "9cc1ee01fd101e023330fa5b6d063397918abd2f"
|
||||
"Comment": "v1.3.0-alpha.3-473-g41b7b04",
|
||||
"Rev": "41b7b04fafecd22127a96fc51a87b2ea34418ffc"
|
||||
},
|
||||
{
|
||||
"ImportPath": "k8s.io/kubernetes/pkg/apimachinery",
|
||||
"Comment": "v1.3.0-alpha.2-211-g9cc1ee0",
|
||||
"Rev": "9cc1ee01fd101e023330fa5b6d063397918abd2f"
|
||||
"Comment": "v1.3.0-alpha.3-473-g41b7b04",
|
||||
"Rev": "41b7b04fafecd22127a96fc51a87b2ea34418ffc"
|
||||
},
|
||||
{
|
||||
"ImportPath": "k8s.io/kubernetes/pkg/apis/apps",
|
||||
"Comment": "v1.3.0-alpha.3-473-g41b7b04",
|
||||
"Rev": "41b7b04fafecd22127a96fc51a87b2ea34418ffc"
|
||||
},
|
||||
{
|
||||
"ImportPath": "k8s.io/kubernetes/pkg/apis/authorization",
|
||||
"Comment": "v1.3.0-alpha.2-211-g9cc1ee0",
|
||||
"Rev": "9cc1ee01fd101e023330fa5b6d063397918abd2f"
|
||||
"Comment": "v1.3.0-alpha.3-473-g41b7b04",
|
||||
"Rev": "41b7b04fafecd22127a96fc51a87b2ea34418ffc"
|
||||
},
|
||||
{
|
||||
"ImportPath": "k8s.io/kubernetes/pkg/apis/autoscaling",
|
||||
"Comment": "v1.3.0-alpha.2-211-g9cc1ee0",
|
||||
"Rev": "9cc1ee01fd101e023330fa5b6d063397918abd2f"
|
||||
"Comment": "v1.3.0-alpha.3-473-g41b7b04",
|
||||
"Rev": "41b7b04fafecd22127a96fc51a87b2ea34418ffc"
|
||||
},
|
||||
{
|
||||
"ImportPath": "k8s.io/kubernetes/pkg/apis/batch",
|
||||
"Comment": "v1.3.0-alpha.2-211-g9cc1ee0",
|
||||
"Rev": "9cc1ee01fd101e023330fa5b6d063397918abd2f"
|
||||
"Comment": "v1.3.0-alpha.3-473-g41b7b04",
|
||||
"Rev": "41b7b04fafecd22127a96fc51a87b2ea34418ffc"
|
||||
},
|
||||
{
|
||||
"ImportPath": "k8s.io/kubernetes/pkg/apis/componentconfig",
|
||||
"Comment": "v1.3.0-alpha.2-211-g9cc1ee0",
|
||||
"Rev": "9cc1ee01fd101e023330fa5b6d063397918abd2f"
|
||||
"Comment": "v1.3.0-alpha.3-473-g41b7b04",
|
||||
"Rev": "41b7b04fafecd22127a96fc51a87b2ea34418ffc"
|
||||
},
|
||||
{
|
||||
"ImportPath": "k8s.io/kubernetes/pkg/apis/extensions",
|
||||
"Comment": "v1.3.0-alpha.2-211-g9cc1ee0",
|
||||
"Rev": "9cc1ee01fd101e023330fa5b6d063397918abd2f"
|
||||
"Comment": "v1.3.0-alpha.3-473-g41b7b04",
|
||||
"Rev": "41b7b04fafecd22127a96fc51a87b2ea34418ffc"
|
||||
},
|
||||
{
|
||||
"ImportPath": "k8s.io/kubernetes/pkg/apis/metrics",
|
||||
"Comment": "v1.3.0-alpha.2-211-g9cc1ee0",
|
||||
"Rev": "9cc1ee01fd101e023330fa5b6d063397918abd2f"
|
||||
"Comment": "v1.3.0-alpha.3-473-g41b7b04",
|
||||
"Rev": "41b7b04fafecd22127a96fc51a87b2ea34418ffc"
|
||||
},
|
||||
{
|
||||
"ImportPath": "k8s.io/kubernetes/pkg/apis/policy",
|
||||
"Comment": "v1.3.0-alpha.3-473-g41b7b04",
|
||||
"Rev": "41b7b04fafecd22127a96fc51a87b2ea34418ffc"
|
||||
},
|
||||
{
|
||||
"ImportPath": "k8s.io/kubernetes/pkg/auth/user",
|
||||
"Comment": "v1.3.0-alpha.2-211-g9cc1ee0",
|
||||
"Rev": "9cc1ee01fd101e023330fa5b6d063397918abd2f"
|
||||
"Comment": "v1.3.0-alpha.3-473-g41b7b04",
|
||||
"Rev": "41b7b04fafecd22127a96fc51a87b2ea34418ffc"
|
||||
},
|
||||
{
|
||||
"ImportPath": "k8s.io/kubernetes/pkg/capabilities",
|
||||
"Comment": "v1.3.0-alpha.2-211-g9cc1ee0",
|
||||
"Rev": "9cc1ee01fd101e023330fa5b6d063397918abd2f"
|
||||
"Comment": "v1.3.0-alpha.3-473-g41b7b04",
|
||||
"Rev": "41b7b04fafecd22127a96fc51a87b2ea34418ffc"
|
||||
},
|
||||
{
|
||||
"ImportPath": "k8s.io/kubernetes/pkg/client/cache",
|
||||
"Comment": "v1.3.0-alpha.2-211-g9cc1ee0",
|
||||
"Rev": "9cc1ee01fd101e023330fa5b6d063397918abd2f"
|
||||
"Comment": "v1.3.0-alpha.3-473-g41b7b04",
|
||||
"Rev": "41b7b04fafecd22127a96fc51a87b2ea34418ffc"
|
||||
},
|
||||
{
|
||||
"ImportPath": "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset",
|
||||
"Comment": "v1.3.0-alpha.2-211-g9cc1ee0",
|
||||
"Rev": "9cc1ee01fd101e023330fa5b6d063397918abd2f"
|
||||
"Comment": "v1.3.0-alpha.3-473-g41b7b04",
|
||||
"Rev": "41b7b04fafecd22127a96fc51a87b2ea34418ffc"
|
||||
},
|
||||
{
|
||||
"ImportPath": "k8s.io/kubernetes/pkg/client/metrics",
|
||||
"Comment": "v1.3.0-alpha.2-211-g9cc1ee0",
|
||||
"Rev": "9cc1ee01fd101e023330fa5b6d063397918abd2f"
|
||||
"Comment": "v1.3.0-alpha.3-473-g41b7b04",
|
||||
"Rev": "41b7b04fafecd22127a96fc51a87b2ea34418ffc"
|
||||
},
|
||||
{
|
||||
"ImportPath": "k8s.io/kubernetes/pkg/client/record",
|
||||
"Comment": "v1.3.0-alpha.2-211-g9cc1ee0",
|
||||
"Rev": "9cc1ee01fd101e023330fa5b6d063397918abd2f"
|
||||
"Comment": "v1.3.0-alpha.3-473-g41b7b04",
|
||||
"Rev": "41b7b04fafecd22127a96fc51a87b2ea34418ffc"
|
||||
},
|
||||
{
|
||||
"ImportPath": "k8s.io/kubernetes/pkg/client/restclient",
|
||||
"Comment": "v1.3.0-alpha.2-211-g9cc1ee0",
|
||||
"Rev": "9cc1ee01fd101e023330fa5b6d063397918abd2f"
|
||||
"Comment": "v1.3.0-alpha.3-473-g41b7b04",
|
||||
"Rev": "41b7b04fafecd22127a96fc51a87b2ea34418ffc"
|
||||
},
|
||||
{
|
||||
"ImportPath": "k8s.io/kubernetes/pkg/client/transport",
|
||||
"Comment": "v1.3.0-alpha.2-211-g9cc1ee0",
|
||||
"Rev": "9cc1ee01fd101e023330fa5b6d063397918abd2f"
|
||||
"Comment": "v1.3.0-alpha.3-473-g41b7b04",
|
||||
"Rev": "41b7b04fafecd22127a96fc51a87b2ea34418ffc"
|
||||
},
|
||||
{
|
||||
"ImportPath": "k8s.io/kubernetes/pkg/client/typed/discovery",
|
||||
"Comment": "v1.3.0-alpha.2-211-g9cc1ee0",
|
||||
"Rev": "9cc1ee01fd101e023330fa5b6d063397918abd2f"
|
||||
"Comment": "v1.3.0-alpha.3-473-g41b7b04",
|
||||
"Rev": "41b7b04fafecd22127a96fc51a87b2ea34418ffc"
|
||||
},
|
||||
{
|
||||
"ImportPath": "k8s.io/kubernetes/pkg/client/unversioned",
|
||||
"Comment": "v1.3.0-alpha.2-211-g9cc1ee0",
|
||||
"Rev": "9cc1ee01fd101e023330fa5b6d063397918abd2f"
|
||||
"Comment": "v1.3.0-alpha.3-473-g41b7b04",
|
||||
"Rev": "41b7b04fafecd22127a96fc51a87b2ea34418ffc"
|
||||
},
|
||||
{
|
||||
"ImportPath": "k8s.io/kubernetes/pkg/cloudprovider",
|
||||
"Comment": "v1.3.0-alpha.2-211-g9cc1ee0",
|
||||
"Rev": "9cc1ee01fd101e023330fa5b6d063397918abd2f"
|
||||
"Comment": "v1.3.0-alpha.3-473-g41b7b04",
|
||||
"Rev": "41b7b04fafecd22127a96fc51a87b2ea34418ffc"
|
||||
},
|
||||
{
|
||||
"ImportPath": "k8s.io/kubernetes/pkg/controller",
|
||||
"Comment": "v1.3.0-alpha.2-211-g9cc1ee0",
|
||||
"Rev": "9cc1ee01fd101e023330fa5b6d063397918abd2f"
|
||||
"Comment": "v1.3.0-alpha.3-473-g41b7b04",
|
||||
"Rev": "41b7b04fafecd22127a96fc51a87b2ea34418ffc"
|
||||
},
|
||||
{
|
||||
"ImportPath": "k8s.io/kubernetes/pkg/conversion",
|
||||
"Comment": "v1.3.0-alpha.2-211-g9cc1ee0",
|
||||
"Rev": "9cc1ee01fd101e023330fa5b6d063397918abd2f"
|
||||
"Comment": "v1.3.0-alpha.3-473-g41b7b04",
|
||||
"Rev": "41b7b04fafecd22127a96fc51a87b2ea34418ffc"
|
||||
},
|
||||
{
|
||||
"ImportPath": "k8s.io/kubernetes/pkg/credentialprovider",
|
||||
"Comment": "v1.3.0-alpha.2-211-g9cc1ee0",
|
||||
"Rev": "9cc1ee01fd101e023330fa5b6d063397918abd2f"
|
||||
"Comment": "v1.3.0-alpha.3-473-g41b7b04",
|
||||
"Rev": "41b7b04fafecd22127a96fc51a87b2ea34418ffc"
|
||||
},
|
||||
{
|
||||
"ImportPath": "k8s.io/kubernetes/pkg/fieldpath",
|
||||
"Comment": "v1.3.0-alpha.2-211-g9cc1ee0",
|
||||
"Rev": "9cc1ee01fd101e023330fa5b6d063397918abd2f"
|
||||
"Comment": "v1.3.0-alpha.3-473-g41b7b04",
|
||||
"Rev": "41b7b04fafecd22127a96fc51a87b2ea34418ffc"
|
||||
},
|
||||
{
|
||||
"ImportPath": "k8s.io/kubernetes/pkg/fields",
|
||||
"Comment": "v1.3.0-alpha.2-211-g9cc1ee0",
|
||||
"Rev": "9cc1ee01fd101e023330fa5b6d063397918abd2f"
|
||||
"Comment": "v1.3.0-alpha.3-473-g41b7b04",
|
||||
"Rev": "41b7b04fafecd22127a96fc51a87b2ea34418ffc"
|
||||
},
|
||||
{
|
||||
"ImportPath": "k8s.io/kubernetes/pkg/healthz",
|
||||
"Comment": "v1.3.0-alpha.2-211-g9cc1ee0",
|
||||
"Rev": "9cc1ee01fd101e023330fa5b6d063397918abd2f"
|
||||
"Comment": "v1.3.0-alpha.3-473-g41b7b04",
|
||||
"Rev": "41b7b04fafecd22127a96fc51a87b2ea34418ffc"
|
||||
},
|
||||
{
|
||||
"ImportPath": "k8s.io/kubernetes/pkg/kubectl",
|
||||
"Comment": "v1.3.0-alpha.2-211-g9cc1ee0",
|
||||
"Rev": "9cc1ee01fd101e023330fa5b6d063397918abd2f"
|
||||
"Comment": "v1.3.0-alpha.3-473-g41b7b04",
|
||||
"Rev": "41b7b04fafecd22127a96fc51a87b2ea34418ffc"
|
||||
},
|
||||
{
|
||||
"ImportPath": "k8s.io/kubernetes/pkg/kubelet/qos",
|
||||
"Comment": "v1.3.0-alpha.2-211-g9cc1ee0",
|
||||
"Rev": "9cc1ee01fd101e023330fa5b6d063397918abd2f"
|
||||
"Comment": "v1.3.0-alpha.3-473-g41b7b04",
|
||||
"Rev": "41b7b04fafecd22127a96fc51a87b2ea34418ffc"
|
||||
},
|
||||
{
|
||||
"ImportPath": "k8s.io/kubernetes/pkg/labels",
|
||||
"Comment": "v1.3.0-alpha.2-211-g9cc1ee0",
|
||||
"Rev": "9cc1ee01fd101e023330fa5b6d063397918abd2f"
|
||||
"Comment": "v1.3.0-alpha.3-473-g41b7b04",
|
||||
"Rev": "41b7b04fafecd22127a96fc51a87b2ea34418ffc"
|
||||
},
|
||||
{
|
||||
"ImportPath": "k8s.io/kubernetes/pkg/master/ports",
|
||||
"Comment": "v1.3.0-alpha.2-211-g9cc1ee0",
|
||||
"Rev": "9cc1ee01fd101e023330fa5b6d063397918abd2f"
|
||||
"Comment": "v1.3.0-alpha.3-473-g41b7b04",
|
||||
"Rev": "41b7b04fafecd22127a96fc51a87b2ea34418ffc"
|
||||
},
|
||||
{
|
||||
"ImportPath": "k8s.io/kubernetes/pkg/registry/generic",
|
||||
"Comment": "v1.3.0-alpha.2-211-g9cc1ee0",
|
||||
"Rev": "9cc1ee01fd101e023330fa5b6d063397918abd2f"
|
||||
"Comment": "v1.3.0-alpha.3-473-g41b7b04",
|
||||
"Rev": "41b7b04fafecd22127a96fc51a87b2ea34418ffc"
|
||||
},
|
||||
{
|
||||
"ImportPath": "k8s.io/kubernetes/pkg/registry/thirdpartyresourcedata",
|
||||
"Comment": "v1.3.0-alpha.2-211-g9cc1ee0",
|
||||
"Rev": "9cc1ee01fd101e023330fa5b6d063397918abd2f"
|
||||
"Comment": "v1.3.0-alpha.3-473-g41b7b04",
|
||||
"Rev": "41b7b04fafecd22127a96fc51a87b2ea34418ffc"
|
||||
},
|
||||
{
|
||||
"ImportPath": "k8s.io/kubernetes/pkg/runtime",
|
||||
"Comment": "v1.3.0-alpha.2-211-g9cc1ee0",
|
||||
"Rev": "9cc1ee01fd101e023330fa5b6d063397918abd2f"
|
||||
"Comment": "v1.3.0-alpha.3-473-g41b7b04",
|
||||
"Rev": "41b7b04fafecd22127a96fc51a87b2ea34418ffc"
|
||||
},
|
||||
{
|
||||
"ImportPath": "k8s.io/kubernetes/pkg/storage",
|
||||
"Comment": "v1.3.0-alpha.2-211-g9cc1ee0",
|
||||
"Rev": "9cc1ee01fd101e023330fa5b6d063397918abd2f"
|
||||
"Comment": "v1.3.0-alpha.3-473-g41b7b04",
|
||||
"Rev": "41b7b04fafecd22127a96fc51a87b2ea34418ffc"
|
||||
},
|
||||
{
|
||||
"ImportPath": "k8s.io/kubernetes/pkg/types",
|
||||
"Comment": "v1.3.0-alpha.2-211-g9cc1ee0",
|
||||
"Rev": "9cc1ee01fd101e023330fa5b6d063397918abd2f"
|
||||
"Comment": "v1.3.0-alpha.3-473-g41b7b04",
|
||||
"Rev": "41b7b04fafecd22127a96fc51a87b2ea34418ffc"
|
||||
},
|
||||
{
|
||||
"ImportPath": "k8s.io/kubernetes/pkg/util",
|
||||
"Comment": "v1.3.0-alpha.2-211-g9cc1ee0",
|
||||
"Rev": "9cc1ee01fd101e023330fa5b6d063397918abd2f"
|
||||
"Comment": "v1.3.0-alpha.3-473-g41b7b04",
|
||||
"Rev": "41b7b04fafecd22127a96fc51a87b2ea34418ffc"
|
||||
},
|
||||
{
|
||||
"ImportPath": "k8s.io/kubernetes/pkg/version",
|
||||
"Comment": "v1.3.0-alpha.2-211-g9cc1ee0",
|
||||
"Rev": "9cc1ee01fd101e023330fa5b6d063397918abd2f"
|
||||
"Comment": "v1.3.0-alpha.3-473-g41b7b04",
|
||||
"Rev": "41b7b04fafecd22127a96fc51a87b2ea34418ffc"
|
||||
},
|
||||
{
|
||||
"ImportPath": "k8s.io/kubernetes/pkg/watch",
|
||||
"Comment": "v1.3.0-alpha.2-211-g9cc1ee0",
|
||||
"Rev": "9cc1ee01fd101e023330fa5b6d063397918abd2f"
|
||||
"Comment": "v1.3.0-alpha.3-473-g41b7b04",
|
||||
"Rev": "41b7b04fafecd22127a96fc51a87b2ea34418ffc"
|
||||
},
|
||||
{
|
||||
"ImportPath": "k8s.io/kubernetes/plugin/pkg/client/auth",
|
||||
"Comment": "v1.3.0-alpha.3-473-g41b7b04",
|
||||
"Rev": "41b7b04fafecd22127a96fc51a87b2ea34418ffc"
|
||||
},
|
||||
{
|
||||
"ImportPath": "k8s.io/kubernetes/third_party/forked/json",
|
||||
"Comment": "v1.3.0-alpha.2-211-g9cc1ee0",
|
||||
"Rev": "9cc1ee01fd101e023330fa5b6d063397918abd2f"
|
||||
"Comment": "v1.3.0-alpha.3-473-g41b7b04",
|
||||
"Rev": "41b7b04fafecd22127a96fc51a87b2ea34418ffc"
|
||||
},
|
||||
{
|
||||
"ImportPath": "k8s.io/kubernetes/third_party/forked/reflect",
|
||||
"Comment": "v1.3.0-alpha.2-211-g9cc1ee0",
|
||||
"Rev": "9cc1ee01fd101e023330fa5b6d063397918abd2f"
|
||||
"Comment": "v1.3.0-alpha.3-473-g41b7b04",
|
||||
"Rev": "41b7b04fafecd22127a96fc51a87b2ea34418ffc"
|
||||
},
|
||||
{
|
||||
"ImportPath": "k8s.io/kubernetes/third_party/golang/template",
|
||||
"Comment": "v1.3.0-alpha.2-211-g9cc1ee0",
|
||||
"Rev": "9cc1ee01fd101e023330fa5b6d063397918abd2f"
|
||||
"Comment": "v1.3.0-alpha.3-473-g41b7b04",
|
||||
"Rev": "41b7b04fafecd22127a96fc51a87b2ea34418ffc"
|
||||
},
|
||||
{
|
||||
"ImportPath": "speter.net/go/exp/math/dec/inf",
|
||||
|
|
139
Godeps/_workspace/src/github.com/docker/distribution/digest/digest.go
generated
vendored
Normal file
139
Godeps/_workspace/src/github.com/docker/distribution/digest/digest.go
generated
vendored
Normal file
|
@ -0,0 +1,139 @@
|
|||
package digest
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"hash"
|
||||
"io"
|
||||
"regexp"
|
||||
"strings"
|
||||
)
|
||||
|
||||
const (
|
||||
// DigestSha256EmptyTar is the canonical sha256 digest of empty data
|
||||
DigestSha256EmptyTar = "sha256:e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855"
|
||||
)
|
||||
|
||||
// Digest allows simple protection of hex formatted digest strings, prefixed
|
||||
// by their algorithm. Strings of type Digest have some guarantee of being in
|
||||
// the correct format and it provides quick access to the components of a
|
||||
// digest string.
|
||||
//
|
||||
// The following is an example of the contents of Digest types:
|
||||
//
|
||||
// sha256:7173b809ca12ec5dee4506cd86be934c4596dd234ee82c0662eac04a8c2c71dc
|
||||
//
|
||||
// This allows to abstract the digest behind this type and work only in those
|
||||
// terms.
|
||||
type Digest string
|
||||
|
||||
// NewDigest returns a Digest from alg and a hash.Hash object.
|
||||
func NewDigest(alg Algorithm, h hash.Hash) Digest {
|
||||
return NewDigestFromBytes(alg, h.Sum(nil))
|
||||
}
|
||||
|
||||
// NewDigestFromBytes returns a new digest from the byte contents of p.
|
||||
// Typically, this can come from hash.Hash.Sum(...) or xxx.SumXXX(...)
|
||||
// functions. This is also useful for rebuilding digests from binary
|
||||
// serializations.
|
||||
func NewDigestFromBytes(alg Algorithm, p []byte) Digest {
|
||||
return Digest(fmt.Sprintf("%s:%x", alg, p))
|
||||
}
|
||||
|
||||
// NewDigestFromHex returns a Digest from alg and a the hex encoded digest.
|
||||
func NewDigestFromHex(alg, hex string) Digest {
|
||||
return Digest(fmt.Sprintf("%s:%s", alg, hex))
|
||||
}
|
||||
|
||||
// DigestRegexp matches valid digest types.
|
||||
var DigestRegexp = regexp.MustCompile(`[a-zA-Z0-9-_+.]+:[a-fA-F0-9]+`)
|
||||
|
||||
// DigestRegexpAnchored matches valid digest types, anchored to the start and end of the match.
|
||||
var DigestRegexpAnchored = regexp.MustCompile(`^` + DigestRegexp.String() + `$`)
|
||||
|
||||
var (
|
||||
// ErrDigestInvalidFormat returned when digest format invalid.
|
||||
ErrDigestInvalidFormat = fmt.Errorf("invalid checksum digest format")
|
||||
|
||||
// ErrDigestInvalidLength returned when digest has invalid length.
|
||||
ErrDigestInvalidLength = fmt.Errorf("invalid checksum digest length")
|
||||
|
||||
// ErrDigestUnsupported returned when the digest algorithm is unsupported.
|
||||
ErrDigestUnsupported = fmt.Errorf("unsupported digest algorithm")
|
||||
)
|
||||
|
||||
// ParseDigest parses s and returns the validated digest object. An error will
|
||||
// be returned if the format is invalid.
|
||||
func ParseDigest(s string) (Digest, error) {
|
||||
d := Digest(s)
|
||||
|
||||
return d, d.Validate()
|
||||
}
|
||||
|
||||
// FromReader returns the most valid digest for the underlying content using
|
||||
// the canonical digest algorithm.
|
||||
func FromReader(rd io.Reader) (Digest, error) {
|
||||
return Canonical.FromReader(rd)
|
||||
}
|
||||
|
||||
// FromBytes digests the input and returns a Digest.
|
||||
func FromBytes(p []byte) Digest {
|
||||
return Canonical.FromBytes(p)
|
||||
}
|
||||
|
||||
// Validate checks that the contents of d is a valid digest, returning an
|
||||
// error if not.
|
||||
func (d Digest) Validate() error {
|
||||
s := string(d)
|
||||
|
||||
if !DigestRegexpAnchored.MatchString(s) {
|
||||
return ErrDigestInvalidFormat
|
||||
}
|
||||
|
||||
i := strings.Index(s, ":")
|
||||
if i < 0 {
|
||||
return ErrDigestInvalidFormat
|
||||
}
|
||||
|
||||
// case: "sha256:" with no hex.
|
||||
if i+1 == len(s) {
|
||||
return ErrDigestInvalidFormat
|
||||
}
|
||||
|
||||
switch algorithm := Algorithm(s[:i]); algorithm {
|
||||
case SHA256, SHA384, SHA512:
|
||||
if algorithm.Size()*2 != len(s[i+1:]) {
|
||||
return ErrDigestInvalidLength
|
||||
}
|
||||
break
|
||||
default:
|
||||
return ErrDigestUnsupported
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Algorithm returns the algorithm portion of the digest. This will panic if
|
||||
// the underlying digest is not in a valid format.
|
||||
func (d Digest) Algorithm() Algorithm {
|
||||
return Algorithm(d[:d.sepIndex()])
|
||||
}
|
||||
|
||||
// Hex returns the hex digest portion of the digest. This will panic if the
|
||||
// underlying digest is not in a valid format.
|
||||
func (d Digest) Hex() string {
|
||||
return string(d[d.sepIndex()+1:])
|
||||
}
|
||||
|
||||
func (d Digest) String() string {
|
||||
return string(d)
|
||||
}
|
||||
|
||||
func (d Digest) sepIndex() int {
|
||||
i := strings.Index(string(d), ":")
|
||||
|
||||
if i < 0 {
|
||||
panic("could not find ':' in digest: " + d)
|
||||
}
|
||||
|
||||
return i
|
||||
}
|
82
Godeps/_workspace/src/github.com/docker/distribution/digest/digest_test.go
generated
vendored
Normal file
82
Godeps/_workspace/src/github.com/docker/distribution/digest/digest_test.go
generated
vendored
Normal file
|
@ -0,0 +1,82 @@
|
|||
package digest
|
||||
|
||||
import (
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestParseDigest(t *testing.T) {
|
||||
for _, testcase := range []struct {
|
||||
input string
|
||||
err error
|
||||
algorithm Algorithm
|
||||
hex string
|
||||
}{
|
||||
{
|
||||
input: "sha256:e58fcf7418d4390dec8e8fb69d88c06ec07039d651fedd3aa72af9972e7d046b",
|
||||
algorithm: "sha256",
|
||||
hex: "e58fcf7418d4390dec8e8fb69d88c06ec07039d651fedd3aa72af9972e7d046b",
|
||||
},
|
||||
{
|
||||
input: "sha384:d3fc7881460b7e22e3d172954463dddd7866d17597e7248453c48b3e9d26d9596bf9c4a9cf8072c9d5bad76e19af801d",
|
||||
algorithm: "sha384",
|
||||
hex: "d3fc7881460b7e22e3d172954463dddd7866d17597e7248453c48b3e9d26d9596bf9c4a9cf8072c9d5bad76e19af801d",
|
||||
},
|
||||
{
|
||||
// empty hex
|
||||
input: "sha256:",
|
||||
err: ErrDigestInvalidFormat,
|
||||
},
|
||||
{
|
||||
// just hex
|
||||
input: "d41d8cd98f00b204e9800998ecf8427e",
|
||||
err: ErrDigestInvalidFormat,
|
||||
},
|
||||
{
|
||||
// not hex
|
||||
input: "sha256:d41d8cd98f00b204e9800m98ecf8427e",
|
||||
err: ErrDigestInvalidFormat,
|
||||
},
|
||||
{
|
||||
// too short
|
||||
input: "sha256:abcdef0123456789",
|
||||
err: ErrDigestInvalidLength,
|
||||
},
|
||||
{
|
||||
// too short (from different algorithm)
|
||||
input: "sha512:abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789",
|
||||
err: ErrDigestInvalidLength,
|
||||
},
|
||||
{
|
||||
input: "foo:d41d8cd98f00b204e9800998ecf8427e",
|
||||
err: ErrDigestUnsupported,
|
||||
},
|
||||
} {
|
||||
digest, err := ParseDigest(testcase.input)
|
||||
if err != testcase.err {
|
||||
t.Fatalf("error differed from expected while parsing %q: %v != %v", testcase.input, err, testcase.err)
|
||||
}
|
||||
|
||||
if testcase.err != nil {
|
||||
continue
|
||||
}
|
||||
|
||||
if digest.Algorithm() != testcase.algorithm {
|
||||
t.Fatalf("incorrect algorithm for parsed digest: %q != %q", digest.Algorithm(), testcase.algorithm)
|
||||
}
|
||||
|
||||
if digest.Hex() != testcase.hex {
|
||||
t.Fatalf("incorrect hex for parsed digest: %q != %q", digest.Hex(), testcase.hex)
|
||||
}
|
||||
|
||||
// Parse string return value and check equality
|
||||
newParsed, err := ParseDigest(digest.String())
|
||||
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error parsing input %q: %v", testcase.input, err)
|
||||
}
|
||||
|
||||
if newParsed != digest {
|
||||
t.Fatalf("expected equal: %q != %q", newParsed, digest)
|
||||
}
|
||||
}
|
||||
}
|
155
Godeps/_workspace/src/github.com/docker/distribution/digest/digester.go
generated
vendored
Normal file
155
Godeps/_workspace/src/github.com/docker/distribution/digest/digester.go
generated
vendored
Normal file
|
@ -0,0 +1,155 @@
|
|||
package digest
|
||||
|
||||
import (
|
||||
"crypto"
|
||||
"fmt"
|
||||
"hash"
|
||||
"io"
|
||||
)
|
||||
|
||||
// Algorithm identifies and implementation of a digester by an identifier.
|
||||
// Note the that this defines both the hash algorithm used and the string
|
||||
// encoding.
|
||||
type Algorithm string
|
||||
|
||||
// supported digest types
|
||||
const (
|
||||
SHA256 Algorithm = "sha256" // sha256 with hex encoding
|
||||
SHA384 Algorithm = "sha384" // sha384 with hex encoding
|
||||
SHA512 Algorithm = "sha512" // sha512 with hex encoding
|
||||
|
||||
// Canonical is the primary digest algorithm used with the distribution
|
||||
// project. Other digests may be used but this one is the primary storage
|
||||
// digest.
|
||||
Canonical = SHA256
|
||||
)
|
||||
|
||||
var (
|
||||
// TODO(stevvooe): Follow the pattern of the standard crypto package for
|
||||
// registration of digests. Effectively, we are a registerable set and
|
||||
// common symbol access.
|
||||
|
||||
// algorithms maps values to hash.Hash implementations. Other algorithms
|
||||
// may be available but they cannot be calculated by the digest package.
|
||||
algorithms = map[Algorithm]crypto.Hash{
|
||||
SHA256: crypto.SHA256,
|
||||
SHA384: crypto.SHA384,
|
||||
SHA512: crypto.SHA512,
|
||||
}
|
||||
)
|
||||
|
||||
// Available returns true if the digest type is available for use. If this
|
||||
// returns false, New and Hash will return nil.
|
||||
func (a Algorithm) Available() bool {
|
||||
h, ok := algorithms[a]
|
||||
if !ok {
|
||||
return false
|
||||
}
|
||||
|
||||
// check availability of the hash, as well
|
||||
return h.Available()
|
||||
}
|
||||
|
||||
func (a Algorithm) String() string {
|
||||
return string(a)
|
||||
}
|
||||
|
||||
// Size returns number of bytes returned by the hash.
|
||||
func (a Algorithm) Size() int {
|
||||
h, ok := algorithms[a]
|
||||
if !ok {
|
||||
return 0
|
||||
}
|
||||
return h.Size()
|
||||
}
|
||||
|
||||
// Set implemented to allow use of Algorithm as a command line flag.
|
||||
func (a *Algorithm) Set(value string) error {
|
||||
if value == "" {
|
||||
*a = Canonical
|
||||
} else {
|
||||
// just do a type conversion, support is queried with Available.
|
||||
*a = Algorithm(value)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// New returns a new digester for the specified algorithm. If the algorithm
|
||||
// does not have a digester implementation, nil will be returned. This can be
|
||||
// checked by calling Available before calling New.
|
||||
func (a Algorithm) New() Digester {
|
||||
return &digester{
|
||||
alg: a,
|
||||
hash: a.Hash(),
|
||||
}
|
||||
}
|
||||
|
||||
// Hash returns a new hash as used by the algorithm. If not available, the
|
||||
// method will panic. Check Algorithm.Available() before calling.
|
||||
func (a Algorithm) Hash() hash.Hash {
|
||||
if !a.Available() {
|
||||
// NOTE(stevvooe): A missing hash is usually a programming error that
|
||||
// must be resolved at compile time. We don't import in the digest
|
||||
// package to allow users to choose their hash implementation (such as
|
||||
// when using stevvooe/resumable or a hardware accelerated package).
|
||||
//
|
||||
// Applications that may want to resolve the hash at runtime should
|
||||
// call Algorithm.Available before call Algorithm.Hash().
|
||||
panic(fmt.Sprintf("%v not available (make sure it is imported)", a))
|
||||
}
|
||||
|
||||
return algorithms[a].New()
|
||||
}
|
||||
|
||||
// FromReader returns the digest of the reader using the algorithm.
|
||||
func (a Algorithm) FromReader(rd io.Reader) (Digest, error) {
|
||||
digester := a.New()
|
||||
|
||||
if _, err := io.Copy(digester.Hash(), rd); err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
return digester.Digest(), nil
|
||||
}
|
||||
|
||||
// FromBytes digests the input and returns a Digest.
|
||||
func (a Algorithm) FromBytes(p []byte) Digest {
|
||||
digester := a.New()
|
||||
|
||||
if _, err := digester.Hash().Write(p); err != nil {
|
||||
// Writes to a Hash should never fail. None of the existing
|
||||
// hash implementations in the stdlib or hashes vendored
|
||||
// here can return errors from Write. Having a panic in this
|
||||
// condition instead of having FromBytes return an error value
|
||||
// avoids unnecessary error handling paths in all callers.
|
||||
panic("write to hash function returned error: " + err.Error())
|
||||
}
|
||||
|
||||
return digester.Digest()
|
||||
}
|
||||
|
||||
// TODO(stevvooe): Allow resolution of verifiers using the digest type and
|
||||
// this registration system.
|
||||
|
||||
// Digester calculates the digest of written data. Writes should go directly
|
||||
// to the return value of Hash, while calling Digest will return the current
|
||||
// value of the digest.
|
||||
type Digester interface {
|
||||
Hash() hash.Hash // provides direct access to underlying hash instance.
|
||||
Digest() Digest
|
||||
}
|
||||
|
||||
// digester provides a simple digester definition that embeds a hasher.
|
||||
type digester struct {
|
||||
alg Algorithm
|
||||
hash hash.Hash
|
||||
}
|
||||
|
||||
func (d *digester) Hash() hash.Hash {
|
||||
return d.hash
|
||||
}
|
||||
|
||||
func (d *digester) Digest() Digest {
|
||||
return NewDigest(d.alg, d.hash)
|
||||
}
|
21
Godeps/_workspace/src/github.com/docker/distribution/digest/digester_resumable_test.go
generated
vendored
Normal file
21
Godeps/_workspace/src/github.com/docker/distribution/digest/digester_resumable_test.go
generated
vendored
Normal file
|
@ -0,0 +1,21 @@
|
|||
// +build !noresumabledigest
|
||||
|
||||
package digest
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/stevvooe/resumable"
|
||||
_ "github.com/stevvooe/resumable/sha256"
|
||||
)
|
||||
|
||||
// TestResumableDetection just ensures that the resumable capability of a hash
|
||||
// is exposed through the digester type, which is just a hash plus a Digest
|
||||
// method.
|
||||
func TestResumableDetection(t *testing.T) {
|
||||
d := Canonical.New()
|
||||
|
||||
if _, ok := d.Hash().(resumable.Hash); !ok {
|
||||
t.Fatalf("expected digester to implement resumable.Hash: %#v, %v", d, d.Hash())
|
||||
}
|
||||
}
|
42
Godeps/_workspace/src/github.com/docker/distribution/digest/doc.go
generated
vendored
Normal file
42
Godeps/_workspace/src/github.com/docker/distribution/digest/doc.go
generated
vendored
Normal file
|
@ -0,0 +1,42 @@
|
|||
// Package digest provides a generalized type to opaquely represent message
|
||||
// digests and their operations within the registry. The Digest type is
|
||||
// designed to serve as a flexible identifier in a content-addressable system.
|
||||
// More importantly, it provides tools and wrappers to work with
|
||||
// hash.Hash-based digests with little effort.
|
||||
//
|
||||
// Basics
|
||||
//
|
||||
// The format of a digest is simply a string with two parts, dubbed the
|
||||
// "algorithm" and the "digest", separated by a colon:
|
||||
//
|
||||
// <algorithm>:<digest>
|
||||
//
|
||||
// An example of a sha256 digest representation follows:
|
||||
//
|
||||
// sha256:7173b809ca12ec5dee4506cd86be934c4596dd234ee82c0662eac04a8c2c71dc
|
||||
//
|
||||
// In this case, the string "sha256" is the algorithm and the hex bytes are
|
||||
// the "digest".
|
||||
//
|
||||
// Because the Digest type is simply a string, once a valid Digest is
|
||||
// obtained, comparisons are cheap, quick and simple to express with the
|
||||
// standard equality operator.
|
||||
//
|
||||
// Verification
|
||||
//
|
||||
// The main benefit of using the Digest type is simple verification against a
|
||||
// given digest. The Verifier interface, modeled after the stdlib hash.Hash
|
||||
// interface, provides a common write sink for digest verification. After
|
||||
// writing is complete, calling the Verifier.Verified method will indicate
|
||||
// whether or not the stream of bytes matches the target digest.
|
||||
//
|
||||
// Missing Features
|
||||
//
|
||||
// In addition to the above, we intend to add the following features to this
|
||||
// package:
|
||||
//
|
||||
// 1. A Digester type that supports write sink digest calculation.
|
||||
//
|
||||
// 2. Suspend and resume of ongoing digest calculations to support efficient digest verification in the registry.
|
||||
//
|
||||
package digest
|
245
Godeps/_workspace/src/github.com/docker/distribution/digest/set.go
generated
vendored
Normal file
245
Godeps/_workspace/src/github.com/docker/distribution/digest/set.go
generated
vendored
Normal file
|
@ -0,0 +1,245 @@
|
|||
package digest
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"sort"
|
||||
"strings"
|
||||
"sync"
|
||||
)
|
||||
|
||||
var (
|
||||
// ErrDigestNotFound is used when a matching digest
|
||||
// could not be found in a set.
|
||||
ErrDigestNotFound = errors.New("digest not found")
|
||||
|
||||
// ErrDigestAmbiguous is used when multiple digests
|
||||
// are found in a set. None of the matching digests
|
||||
// should be considered valid matches.
|
||||
ErrDigestAmbiguous = errors.New("ambiguous digest string")
|
||||
)
|
||||
|
||||
// Set is used to hold a unique set of digests which
|
||||
// may be easily referenced by easily referenced by a string
|
||||
// representation of the digest as well as short representation.
|
||||
// The uniqueness of the short representation is based on other
|
||||
// digests in the set. If digests are omitted from this set,
|
||||
// collisions in a larger set may not be detected, therefore it
|
||||
// is important to always do short representation lookups on
|
||||
// the complete set of digests. To mitigate collisions, an
|
||||
// appropriately long short code should be used.
|
||||
type Set struct {
|
||||
mutex sync.RWMutex
|
||||
entries digestEntries
|
||||
}
|
||||
|
||||
// NewSet creates an empty set of digests
|
||||
// which may have digests added.
|
||||
func NewSet() *Set {
|
||||
return &Set{
|
||||
entries: digestEntries{},
|
||||
}
|
||||
}
|
||||
|
||||
// checkShortMatch checks whether two digests match as either whole
|
||||
// values or short values. This function does not test equality,
|
||||
// rather whether the second value could match against the first
|
||||
// value.
|
||||
func checkShortMatch(alg Algorithm, hex, shortAlg, shortHex string) bool {
|
||||
if len(hex) == len(shortHex) {
|
||||
if hex != shortHex {
|
||||
return false
|
||||
}
|
||||
if len(shortAlg) > 0 && string(alg) != shortAlg {
|
||||
return false
|
||||
}
|
||||
} else if !strings.HasPrefix(hex, shortHex) {
|
||||
return false
|
||||
} else if len(shortAlg) > 0 && string(alg) != shortAlg {
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// Lookup looks for a digest matching the given string representation.
|
||||
// If no digests could be found ErrDigestNotFound will be returned
|
||||
// with an empty digest value. If multiple matches are found
|
||||
// ErrDigestAmbiguous will be returned with an empty digest value.
|
||||
func (dst *Set) Lookup(d string) (Digest, error) {
|
||||
dst.mutex.RLock()
|
||||
defer dst.mutex.RUnlock()
|
||||
if len(dst.entries) == 0 {
|
||||
return "", ErrDigestNotFound
|
||||
}
|
||||
var (
|
||||
searchFunc func(int) bool
|
||||
alg Algorithm
|
||||
hex string
|
||||
)
|
||||
dgst, err := ParseDigest(d)
|
||||
if err == ErrDigestInvalidFormat {
|
||||
hex = d
|
||||
searchFunc = func(i int) bool {
|
||||
return dst.entries[i].val >= d
|
||||
}
|
||||
} else {
|
||||
hex = dgst.Hex()
|
||||
alg = dgst.Algorithm()
|
||||
searchFunc = func(i int) bool {
|
||||
if dst.entries[i].val == hex {
|
||||
return dst.entries[i].alg >= alg
|
||||
}
|
||||
return dst.entries[i].val >= hex
|
||||
}
|
||||
}
|
||||
idx := sort.Search(len(dst.entries), searchFunc)
|
||||
if idx == len(dst.entries) || !checkShortMatch(dst.entries[idx].alg, dst.entries[idx].val, string(alg), hex) {
|
||||
return "", ErrDigestNotFound
|
||||
}
|
||||
if dst.entries[idx].alg == alg && dst.entries[idx].val == hex {
|
||||
return dst.entries[idx].digest, nil
|
||||
}
|
||||
if idx+1 < len(dst.entries) && checkShortMatch(dst.entries[idx+1].alg, dst.entries[idx+1].val, string(alg), hex) {
|
||||
return "", ErrDigestAmbiguous
|
||||
}
|
||||
|
||||
return dst.entries[idx].digest, nil
|
||||
}
|
||||
|
||||
// Add adds the given digest to the set. An error will be returned
|
||||
// if the given digest is invalid. If the digest already exists in the
|
||||
// set, this operation will be a no-op.
|
||||
func (dst *Set) Add(d Digest) error {
|
||||
if err := d.Validate(); err != nil {
|
||||
return err
|
||||
}
|
||||
dst.mutex.Lock()
|
||||
defer dst.mutex.Unlock()
|
||||
entry := &digestEntry{alg: d.Algorithm(), val: d.Hex(), digest: d}
|
||||
searchFunc := func(i int) bool {
|
||||
if dst.entries[i].val == entry.val {
|
||||
return dst.entries[i].alg >= entry.alg
|
||||
}
|
||||
return dst.entries[i].val >= entry.val
|
||||
}
|
||||
idx := sort.Search(len(dst.entries), searchFunc)
|
||||
if idx == len(dst.entries) {
|
||||
dst.entries = append(dst.entries, entry)
|
||||
return nil
|
||||
} else if dst.entries[idx].digest == d {
|
||||
return nil
|
||||
}
|
||||
|
||||
entries := append(dst.entries, nil)
|
||||
copy(entries[idx+1:], entries[idx:len(entries)-1])
|
||||
entries[idx] = entry
|
||||
dst.entries = entries
|
||||
return nil
|
||||
}
|
||||
|
||||
// Remove removes the given digest from the set. An err will be
|
||||
// returned if the given digest is invalid. If the digest does
|
||||
// not exist in the set, this operation will be a no-op.
|
||||
func (dst *Set) Remove(d Digest) error {
|
||||
if err := d.Validate(); err != nil {
|
||||
return err
|
||||
}
|
||||
dst.mutex.Lock()
|
||||
defer dst.mutex.Unlock()
|
||||
entry := &digestEntry{alg: d.Algorithm(), val: d.Hex(), digest: d}
|
||||
searchFunc := func(i int) bool {
|
||||
if dst.entries[i].val == entry.val {
|
||||
return dst.entries[i].alg >= entry.alg
|
||||
}
|
||||
return dst.entries[i].val >= entry.val
|
||||
}
|
||||
idx := sort.Search(len(dst.entries), searchFunc)
|
||||
// Not found if idx is after or value at idx is not digest
|
||||
if idx == len(dst.entries) || dst.entries[idx].digest != d {
|
||||
return nil
|
||||
}
|
||||
|
||||
entries := dst.entries
|
||||
copy(entries[idx:], entries[idx+1:])
|
||||
entries = entries[:len(entries)-1]
|
||||
dst.entries = entries
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// All returns all the digests in the set
|
||||
func (dst *Set) All() []Digest {
|
||||
dst.mutex.RLock()
|
||||
defer dst.mutex.RUnlock()
|
||||
retValues := make([]Digest, len(dst.entries))
|
||||
for i := range dst.entries {
|
||||
retValues[i] = dst.entries[i].digest
|
||||
}
|
||||
|
||||
return retValues
|
||||
}
|
||||
|
||||
// ShortCodeTable returns a map of Digest to unique short codes. The
|
||||
// length represents the minimum value, the maximum length may be the
|
||||
// entire value of digest if uniqueness cannot be achieved without the
|
||||
// full value. This function will attempt to make short codes as short
|
||||
// as possible to be unique.
|
||||
func ShortCodeTable(dst *Set, length int) map[Digest]string {
|
||||
dst.mutex.RLock()
|
||||
defer dst.mutex.RUnlock()
|
||||
m := make(map[Digest]string, len(dst.entries))
|
||||
l := length
|
||||
resetIdx := 0
|
||||
for i := 0; i < len(dst.entries); i++ {
|
||||
var short string
|
||||
extended := true
|
||||
for extended {
|
||||
extended = false
|
||||
if len(dst.entries[i].val) <= l {
|
||||
short = dst.entries[i].digest.String()
|
||||
} else {
|
||||
short = dst.entries[i].val[:l]
|
||||
for j := i + 1; j < len(dst.entries); j++ {
|
||||
if checkShortMatch(dst.entries[j].alg, dst.entries[j].val, "", short) {
|
||||
if j > resetIdx {
|
||||
resetIdx = j
|
||||
}
|
||||
extended = true
|
||||
} else {
|
||||
break
|
||||
}
|
||||
}
|
||||
if extended {
|
||||
l++
|
||||
}
|
||||
}
|
||||
}
|
||||
m[dst.entries[i].digest] = short
|
||||
if i >= resetIdx {
|
||||
l = length
|
||||
}
|
||||
}
|
||||
return m
|
||||
}
|
||||
|
||||
type digestEntry struct {
|
||||
alg Algorithm
|
||||
val string
|
||||
digest Digest
|
||||
}
|
||||
|
||||
type digestEntries []*digestEntry
|
||||
|
||||
func (d digestEntries) Len() int {
|
||||
return len(d)
|
||||
}
|
||||
|
||||
func (d digestEntries) Less(i, j int) bool {
|
||||
if d[i].val != d[j].val {
|
||||
return d[i].val < d[j].val
|
||||
}
|
||||
return d[i].alg < d[j].alg
|
||||
}
|
||||
|
||||
func (d digestEntries) Swap(i, j int) {
|
||||
d[i], d[j] = d[j], d[i]
|
||||
}
|
368
Godeps/_workspace/src/github.com/docker/distribution/digest/set_test.go
generated
vendored
Normal file
368
Godeps/_workspace/src/github.com/docker/distribution/digest/set_test.go
generated
vendored
Normal file
|
@ -0,0 +1,368 @@
|
|||
package digest
|
||||
|
||||
import (
|
||||
"crypto/sha256"
|
||||
"encoding/binary"
|
||||
"math/rand"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func assertEqualDigests(t *testing.T, d1, d2 Digest) {
|
||||
if d1 != d2 {
|
||||
t.Fatalf("Digests do not match:\n\tActual: %s\n\tExpected: %s", d1, d2)
|
||||
}
|
||||
}
|
||||
|
||||
func TestLookup(t *testing.T) {
|
||||
digests := []Digest{
|
||||
"sha256:1234511111111111111111111111111111111111111111111111111111111111",
|
||||
"sha256:1234111111111111111111111111111111111111111111111111111111111111",
|
||||
"sha256:1234611111111111111111111111111111111111111111111111111111111111",
|
||||
"sha256:5432111111111111111111111111111111111111111111111111111111111111",
|
||||
"sha256:6543111111111111111111111111111111111111111111111111111111111111",
|
||||
"sha256:6432111111111111111111111111111111111111111111111111111111111111",
|
||||
"sha256:6542111111111111111111111111111111111111111111111111111111111111",
|
||||
"sha256:6532111111111111111111111111111111111111111111111111111111111111",
|
||||
}
|
||||
|
||||
dset := NewSet()
|
||||
for i := range digests {
|
||||
if err := dset.Add(digests[i]); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
dgst, err := dset.Lookup("54")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
assertEqualDigests(t, dgst, digests[3])
|
||||
|
||||
dgst, err = dset.Lookup("1234")
|
||||
if err == nil {
|
||||
t.Fatal("Expected ambiguous error looking up: 1234")
|
||||
}
|
||||
if err != ErrDigestAmbiguous {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
dgst, err = dset.Lookup("9876")
|
||||
if err == nil {
|
||||
t.Fatal("Expected ambiguous error looking up: 9876")
|
||||
}
|
||||
if err != ErrDigestNotFound {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
dgst, err = dset.Lookup("sha256:1234")
|
||||
if err == nil {
|
||||
t.Fatal("Expected ambiguous error looking up: sha256:1234")
|
||||
}
|
||||
if err != ErrDigestAmbiguous {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
dgst, err = dset.Lookup("sha256:12345")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
assertEqualDigests(t, dgst, digests[0])
|
||||
|
||||
dgst, err = dset.Lookup("sha256:12346")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
assertEqualDigests(t, dgst, digests[2])
|
||||
|
||||
dgst, err = dset.Lookup("12346")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
assertEqualDigests(t, dgst, digests[2])
|
||||
|
||||
dgst, err = dset.Lookup("12345")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
assertEqualDigests(t, dgst, digests[0])
|
||||
}
|
||||
|
||||
func TestAddDuplication(t *testing.T) {
|
||||
digests := []Digest{
|
||||
"sha256:1234111111111111111111111111111111111111111111111111111111111111",
|
||||
"sha256:1234511111111111111111111111111111111111111111111111111111111111",
|
||||
"sha256:1234611111111111111111111111111111111111111111111111111111111111",
|
||||
"sha256:5432111111111111111111111111111111111111111111111111111111111111",
|
||||
"sha256:6543111111111111111111111111111111111111111111111111111111111111",
|
||||
"sha512:65431111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111",
|
||||
"sha512:65421111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111",
|
||||
"sha512:65321111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111",
|
||||
}
|
||||
|
||||
dset := NewSet()
|
||||
for i := range digests {
|
||||
if err := dset.Add(digests[i]); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
if len(dset.entries) != 8 {
|
||||
t.Fatal("Invalid dset size")
|
||||
}
|
||||
|
||||
if err := dset.Add(Digest("sha256:1234511111111111111111111111111111111111111111111111111111111111")); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if len(dset.entries) != 8 {
|
||||
t.Fatal("Duplicate digest insert allowed")
|
||||
}
|
||||
|
||||
if err := dset.Add(Digest("sha384:123451111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111")); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if len(dset.entries) != 9 {
|
||||
t.Fatal("Insert with different algorithm not allowed")
|
||||
}
|
||||
}
|
||||
|
||||
func TestRemove(t *testing.T) {
|
||||
digests, err := createDigests(10)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
dset := NewSet()
|
||||
for i := range digests {
|
||||
if err := dset.Add(digests[i]); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
dgst, err := dset.Lookup(digests[0].String())
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if dgst != digests[0] {
|
||||
t.Fatalf("Unexpected digest value:\n\tExpected: %s\n\tActual: %s", digests[0], dgst)
|
||||
}
|
||||
|
||||
if err := dset.Remove(digests[0]); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if _, err := dset.Lookup(digests[0].String()); err != ErrDigestNotFound {
|
||||
t.Fatalf("Expected error %v when looking up removed digest, got %v", ErrDigestNotFound, err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestAll(t *testing.T) {
|
||||
digests, err := createDigests(100)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
dset := NewSet()
|
||||
for i := range digests {
|
||||
if err := dset.Add(digests[i]); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
all := map[Digest]struct{}{}
|
||||
for _, dgst := range dset.All() {
|
||||
all[dgst] = struct{}{}
|
||||
}
|
||||
|
||||
if len(all) != len(digests) {
|
||||
t.Fatalf("Unexpected number of unique digests found:\n\tExpected: %d\n\tActual: %d", len(digests), len(all))
|
||||
}
|
||||
|
||||
for i, dgst := range digests {
|
||||
if _, ok := all[dgst]; !ok {
|
||||
t.Fatalf("Missing element at position %d: %s", i, dgst)
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func assertEqualShort(t *testing.T, actual, expected string) {
|
||||
if actual != expected {
|
||||
t.Fatalf("Unexpected short value:\n\tExpected: %s\n\tActual: %s", expected, actual)
|
||||
}
|
||||
}
|
||||
|
||||
func TestShortCodeTable(t *testing.T) {
|
||||
digests := []Digest{
|
||||
"sha256:1234111111111111111111111111111111111111111111111111111111111111",
|
||||
"sha256:1234511111111111111111111111111111111111111111111111111111111111",
|
||||
"sha256:1234611111111111111111111111111111111111111111111111111111111111",
|
||||
"sha256:5432111111111111111111111111111111111111111111111111111111111111",
|
||||
"sha256:6543111111111111111111111111111111111111111111111111111111111111",
|
||||
"sha256:6432111111111111111111111111111111111111111111111111111111111111",
|
||||
"sha256:6542111111111111111111111111111111111111111111111111111111111111",
|
||||
"sha256:6532111111111111111111111111111111111111111111111111111111111111",
|
||||
}
|
||||
|
||||
dset := NewSet()
|
||||
for i := range digests {
|
||||
if err := dset.Add(digests[i]); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
dump := ShortCodeTable(dset, 2)
|
||||
|
||||
if len(dump) < len(digests) {
|
||||
t.Fatalf("Error unexpected size: %d, expecting %d", len(dump), len(digests))
|
||||
}
|
||||
assertEqualShort(t, dump[digests[0]], "12341")
|
||||
assertEqualShort(t, dump[digests[1]], "12345")
|
||||
assertEqualShort(t, dump[digests[2]], "12346")
|
||||
assertEqualShort(t, dump[digests[3]], "54")
|
||||
assertEqualShort(t, dump[digests[4]], "6543")
|
||||
assertEqualShort(t, dump[digests[5]], "64")
|
||||
assertEqualShort(t, dump[digests[6]], "6542")
|
||||
assertEqualShort(t, dump[digests[7]], "653")
|
||||
}
|
||||
|
||||
func createDigests(count int) ([]Digest, error) {
|
||||
r := rand.New(rand.NewSource(25823))
|
||||
digests := make([]Digest, count)
|
||||
for i := range digests {
|
||||
h := sha256.New()
|
||||
if err := binary.Write(h, binary.BigEndian, r.Int63()); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
digests[i] = NewDigest("sha256", h)
|
||||
}
|
||||
return digests, nil
|
||||
}
|
||||
|
||||
func benchAddNTable(b *testing.B, n int) {
|
||||
digests, err := createDigests(n)
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
dset := &Set{entries: digestEntries(make([]*digestEntry, 0, n))}
|
||||
for j := range digests {
|
||||
if err = dset.Add(digests[j]); err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func benchLookupNTable(b *testing.B, n int, shortLen int) {
|
||||
digests, err := createDigests(n)
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
dset := &Set{entries: digestEntries(make([]*digestEntry, 0, n))}
|
||||
for i := range digests {
|
||||
if err := dset.Add(digests[i]); err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
}
|
||||
shorts := make([]string, 0, n)
|
||||
for _, short := range ShortCodeTable(dset, shortLen) {
|
||||
shorts = append(shorts, short)
|
||||
}
|
||||
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
if _, err = dset.Lookup(shorts[i%n]); err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func benchRemoveNTable(b *testing.B, n int) {
|
||||
digests, err := createDigests(n)
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
dset := &Set{entries: digestEntries(make([]*digestEntry, 0, n))}
|
||||
b.StopTimer()
|
||||
for j := range digests {
|
||||
if err = dset.Add(digests[j]); err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
}
|
||||
b.StartTimer()
|
||||
for j := range digests {
|
||||
if err = dset.Remove(digests[j]); err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func benchShortCodeNTable(b *testing.B, n int, shortLen int) {
|
||||
digests, err := createDigests(n)
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
dset := &Set{entries: digestEntries(make([]*digestEntry, 0, n))}
|
||||
for i := range digests {
|
||||
if err := dset.Add(digests[i]); err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
ShortCodeTable(dset, shortLen)
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkAdd10(b *testing.B) {
|
||||
benchAddNTable(b, 10)
|
||||
}
|
||||
|
||||
func BenchmarkAdd100(b *testing.B) {
|
||||
benchAddNTable(b, 100)
|
||||
}
|
||||
|
||||
func BenchmarkAdd1000(b *testing.B) {
|
||||
benchAddNTable(b, 1000)
|
||||
}
|
||||
|
||||
func BenchmarkRemove10(b *testing.B) {
|
||||
benchRemoveNTable(b, 10)
|
||||
}
|
||||
|
||||
func BenchmarkRemove100(b *testing.B) {
|
||||
benchRemoveNTable(b, 100)
|
||||
}
|
||||
|
||||
func BenchmarkRemove1000(b *testing.B) {
|
||||
benchRemoveNTable(b, 1000)
|
||||
}
|
||||
|
||||
func BenchmarkLookup10(b *testing.B) {
|
||||
benchLookupNTable(b, 10, 12)
|
||||
}
|
||||
|
||||
func BenchmarkLookup100(b *testing.B) {
|
||||
benchLookupNTable(b, 100, 12)
|
||||
}
|
||||
|
||||
func BenchmarkLookup1000(b *testing.B) {
|
||||
benchLookupNTable(b, 1000, 12)
|
||||
}
|
||||
|
||||
func BenchmarkShortCode10(b *testing.B) {
|
||||
benchShortCodeNTable(b, 10, 12)
|
||||
}
|
||||
func BenchmarkShortCode100(b *testing.B) {
|
||||
benchShortCodeNTable(b, 100, 12)
|
||||
}
|
||||
func BenchmarkShortCode1000(b *testing.B) {
|
||||
benchShortCodeNTable(b, 1000, 12)
|
||||
}
|
44
Godeps/_workspace/src/github.com/docker/distribution/digest/verifiers.go
generated
vendored
Normal file
44
Godeps/_workspace/src/github.com/docker/distribution/digest/verifiers.go
generated
vendored
Normal file
|
@ -0,0 +1,44 @@
|
|||
package digest
|
||||
|
||||
import (
|
||||
"hash"
|
||||
"io"
|
||||
)
|
||||
|
||||
// Verifier presents a general verification interface to be used with message
|
||||
// digests and other byte stream verifications. Users instantiate a Verifier
|
||||
// from one of the various methods, write the data under test to it then check
|
||||
// the result with the Verified method.
|
||||
type Verifier interface {
|
||||
io.Writer
|
||||
|
||||
// Verified will return true if the content written to Verifier matches
|
||||
// the digest.
|
||||
Verified() bool
|
||||
}
|
||||
|
||||
// NewDigestVerifier returns a verifier that compares the written bytes
|
||||
// against a passed in digest.
|
||||
func NewDigestVerifier(d Digest) (Verifier, error) {
|
||||
if err := d.Validate(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return hashVerifier{
|
||||
hash: d.Algorithm().Hash(),
|
||||
digest: d,
|
||||
}, nil
|
||||
}
|
||||
|
||||
type hashVerifier struct {
|
||||
digest Digest
|
||||
hash hash.Hash
|
||||
}
|
||||
|
||||
func (hv hashVerifier) Write(p []byte) (n int, err error) {
|
||||
return hv.hash.Write(p)
|
||||
}
|
||||
|
||||
func (hv hashVerifier) Verified() bool {
|
||||
return hv.digest == NewDigest(hv.digest.Algorithm(), hv.hash)
|
||||
}
|
49
Godeps/_workspace/src/github.com/docker/distribution/digest/verifiers_test.go
generated
vendored
Normal file
49
Godeps/_workspace/src/github.com/docker/distribution/digest/verifiers_test.go
generated
vendored
Normal file
|
@ -0,0 +1,49 @@
|
|||
package digest
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"crypto/rand"
|
||||
"io"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestDigestVerifier(t *testing.T) {
|
||||
p := make([]byte, 1<<20)
|
||||
rand.Read(p)
|
||||
digest := FromBytes(p)
|
||||
|
||||
verifier, err := NewDigestVerifier(digest)
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error getting digest verifier: %s", err)
|
||||
}
|
||||
|
||||
io.Copy(verifier, bytes.NewReader(p))
|
||||
|
||||
if !verifier.Verified() {
|
||||
t.Fatalf("bytes not verified")
|
||||
}
|
||||
}
|
||||
|
||||
// TestVerifierUnsupportedDigest ensures that unsupported digest validation is
|
||||
// flowing through verifier creation.
|
||||
func TestVerifierUnsupportedDigest(t *testing.T) {
|
||||
unsupported := Digest("bean:0123456789abcdef")
|
||||
|
||||
_, err := NewDigestVerifier(unsupported)
|
||||
if err == nil {
|
||||
t.Fatalf("expected error when creating verifier")
|
||||
}
|
||||
|
||||
if err != ErrDigestUnsupported {
|
||||
t.Fatalf("incorrect error for unsupported digest: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
// TODO(stevvooe): Add benchmarks to measure bytes/second throughput for
|
||||
// DigestVerifier.
|
||||
//
|
||||
// The relevant benchmark for comparison can be run with the following
|
||||
// commands:
|
||||
//
|
||||
// go test -bench . crypto/sha1
|
||||
//
|
334
Godeps/_workspace/src/github.com/docker/distribution/reference/reference.go
generated
vendored
Normal file
334
Godeps/_workspace/src/github.com/docker/distribution/reference/reference.go
generated
vendored
Normal file
|
@ -0,0 +1,334 @@
|
|||
// Package reference provides a general type to represent any way of referencing images within the registry.
|
||||
// Its main purpose is to abstract tags and digests (content-addressable hash).
|
||||
//
|
||||
// Grammar
|
||||
//
|
||||
// reference := name [ ":" tag ] [ "@" digest ]
|
||||
// name := [hostname '/'] component ['/' component]*
|
||||
// hostname := hostcomponent ['.' hostcomponent]* [':' port-number]
|
||||
// hostcomponent := /([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9])/
|
||||
// port-number := /[0-9]+/
|
||||
// component := alpha-numeric [separator alpha-numeric]*
|
||||
// alpha-numeric := /[a-z0-9]+/
|
||||
// separator := /[_.]|__|[-]*/
|
||||
//
|
||||
// tag := /[\w][\w.-]{0,127}/
|
||||
//
|
||||
// digest := digest-algorithm ":" digest-hex
|
||||
// digest-algorithm := digest-algorithm-component [ digest-algorithm-separator digest-algorithm-component ]
|
||||
// digest-algorithm-separator := /[+.-_]/
|
||||
// digest-algorithm-component := /[A-Za-z][A-Za-z0-9]*/
|
||||
// digest-hex := /[0-9a-fA-F]{32,}/ ; At least 128 bit digest value
|
||||
package reference
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
|
||||
"github.com/docker/distribution/digest"
|
||||
)
|
||||
|
||||
const (
|
||||
// NameTotalLengthMax is the maximum total number of characters in a repository name.
|
||||
NameTotalLengthMax = 255
|
||||
)
|
||||
|
||||
var (
|
||||
// ErrReferenceInvalidFormat represents an error while trying to parse a string as a reference.
|
||||
ErrReferenceInvalidFormat = errors.New("invalid reference format")
|
||||
|
||||
// ErrTagInvalidFormat represents an error while trying to parse a string as a tag.
|
||||
ErrTagInvalidFormat = errors.New("invalid tag format")
|
||||
|
||||
// ErrDigestInvalidFormat represents an error while trying to parse a string as a tag.
|
||||
ErrDigestInvalidFormat = errors.New("invalid digest format")
|
||||
|
||||
// ErrNameEmpty is returned for empty, invalid repository names.
|
||||
ErrNameEmpty = errors.New("repository name must have at least one component")
|
||||
|
||||
// ErrNameTooLong is returned when a repository name is longer than NameTotalLengthMax.
|
||||
ErrNameTooLong = fmt.Errorf("repository name must not be more than %v characters", NameTotalLengthMax)
|
||||
)
|
||||
|
||||
// Reference is an opaque object reference identifier that may include
|
||||
// modifiers such as a hostname, name, tag, and digest.
|
||||
type Reference interface {
|
||||
// String returns the full reference
|
||||
String() string
|
||||
}
|
||||
|
||||
// Field provides a wrapper type for resolving correct reference types when
|
||||
// working with encoding.
|
||||
type Field struct {
|
||||
reference Reference
|
||||
}
|
||||
|
||||
// AsField wraps a reference in a Field for encoding.
|
||||
func AsField(reference Reference) Field {
|
||||
return Field{reference}
|
||||
}
|
||||
|
||||
// Reference unwraps the reference type from the field to
|
||||
// return the Reference object. This object should be
|
||||
// of the appropriate type to further check for different
|
||||
// reference types.
|
||||
func (f Field) Reference() Reference {
|
||||
return f.reference
|
||||
}
|
||||
|
||||
// MarshalText serializes the field to byte text which
|
||||
// is the string of the reference.
|
||||
func (f Field) MarshalText() (p []byte, err error) {
|
||||
return []byte(f.reference.String()), nil
|
||||
}
|
||||
|
||||
// UnmarshalText parses text bytes by invoking the
|
||||
// reference parser to ensure the appropriately
|
||||
// typed reference object is wrapped by field.
|
||||
func (f *Field) UnmarshalText(p []byte) error {
|
||||
r, err := Parse(string(p))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
f.reference = r
|
||||
return nil
|
||||
}
|
||||
|
||||
// Named is an object with a full name
|
||||
type Named interface {
|
||||
Reference
|
||||
Name() string
|
||||
}
|
||||
|
||||
// Tagged is an object which has a tag
|
||||
type Tagged interface {
|
||||
Reference
|
||||
Tag() string
|
||||
}
|
||||
|
||||
// NamedTagged is an object including a name and tag.
|
||||
type NamedTagged interface {
|
||||
Named
|
||||
Tag() string
|
||||
}
|
||||
|
||||
// Digested is an object which has a digest
|
||||
// in which it can be referenced by
|
||||
type Digested interface {
|
||||
Reference
|
||||
Digest() digest.Digest
|
||||
}
|
||||
|
||||
// Canonical reference is an object with a fully unique
|
||||
// name including a name with hostname and digest
|
||||
type Canonical interface {
|
||||
Named
|
||||
Digest() digest.Digest
|
||||
}
|
||||
|
||||
// SplitHostname splits a named reference into a
|
||||
// hostname and name string. If no valid hostname is
|
||||
// found, the hostname is empty and the full value
|
||||
// is returned as name
|
||||
func SplitHostname(named Named) (string, string) {
|
||||
name := named.Name()
|
||||
match := anchoredNameRegexp.FindStringSubmatch(name)
|
||||
if match == nil || len(match) != 3 {
|
||||
return "", name
|
||||
}
|
||||
return match[1], match[2]
|
||||
}
|
||||
|
||||
// Parse parses s and returns a syntactically valid Reference.
|
||||
// If an error was encountered it is returned, along with a nil Reference.
|
||||
// NOTE: Parse will not handle short digests.
|
||||
func Parse(s string) (Reference, error) {
|
||||
matches := ReferenceRegexp.FindStringSubmatch(s)
|
||||
if matches == nil {
|
||||
if s == "" {
|
||||
return nil, ErrNameEmpty
|
||||
}
|
||||
// TODO(dmcgowan): Provide more specific and helpful error
|
||||
return nil, ErrReferenceInvalidFormat
|
||||
}
|
||||
|
||||
if len(matches[1]) > NameTotalLengthMax {
|
||||
return nil, ErrNameTooLong
|
||||
}
|
||||
|
||||
ref := reference{
|
||||
name: matches[1],
|
||||
tag: matches[2],
|
||||
}
|
||||
if matches[3] != "" {
|
||||
var err error
|
||||
ref.digest, err = digest.ParseDigest(matches[3])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
r := getBestReferenceType(ref)
|
||||
if r == nil {
|
||||
return nil, ErrNameEmpty
|
||||
}
|
||||
|
||||
return r, nil
|
||||
}
|
||||
|
||||
// ParseNamed parses s and returns a syntactically valid reference implementing
|
||||
// the Named interface. The reference must have a name, otherwise an error is
|
||||
// returned.
|
||||
// If an error was encountered it is returned, along with a nil Reference.
|
||||
// NOTE: ParseNamed will not handle short digests.
|
||||
func ParseNamed(s string) (Named, error) {
|
||||
ref, err := Parse(s)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
named, isNamed := ref.(Named)
|
||||
if !isNamed {
|
||||
return nil, fmt.Errorf("reference %s has no name", ref.String())
|
||||
}
|
||||
return named, nil
|
||||
}
|
||||
|
||||
// WithName returns a named object representing the given string. If the input
|
||||
// is invalid ErrReferenceInvalidFormat will be returned.
|
||||
func WithName(name string) (Named, error) {
|
||||
if len(name) > NameTotalLengthMax {
|
||||
return nil, ErrNameTooLong
|
||||
}
|
||||
if !anchoredNameRegexp.MatchString(name) {
|
||||
return nil, ErrReferenceInvalidFormat
|
||||
}
|
||||
return repository(name), nil
|
||||
}
|
||||
|
||||
// WithTag combines the name from "name" and the tag from "tag" to form a
|
||||
// reference incorporating both the name and the tag.
|
||||
func WithTag(name Named, tag string) (NamedTagged, error) {
|
||||
if !anchoredTagRegexp.MatchString(tag) {
|
||||
return nil, ErrTagInvalidFormat
|
||||
}
|
||||
return taggedReference{
|
||||
name: name.Name(),
|
||||
tag: tag,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// WithDigest combines the name from "name" and the digest from "digest" to form
|
||||
// a reference incorporating both the name and the digest.
|
||||
func WithDigest(name Named, digest digest.Digest) (Canonical, error) {
|
||||
if !anchoredDigestRegexp.MatchString(digest.String()) {
|
||||
return nil, ErrDigestInvalidFormat
|
||||
}
|
||||
return canonicalReference{
|
||||
name: name.Name(),
|
||||
digest: digest,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func getBestReferenceType(ref reference) Reference {
|
||||
if ref.name == "" {
|
||||
// Allow digest only references
|
||||
if ref.digest != "" {
|
||||
return digestReference(ref.digest)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
if ref.tag == "" {
|
||||
if ref.digest != "" {
|
||||
return canonicalReference{
|
||||
name: ref.name,
|
||||
digest: ref.digest,
|
||||
}
|
||||
}
|
||||
return repository(ref.name)
|
||||
}
|
||||
if ref.digest == "" {
|
||||
return taggedReference{
|
||||
name: ref.name,
|
||||
tag: ref.tag,
|
||||
}
|
||||
}
|
||||
|
||||
return ref
|
||||
}
|
||||
|
||||
type reference struct {
|
||||
name string
|
||||
tag string
|
||||
digest digest.Digest
|
||||
}
|
||||
|
||||
func (r reference) String() string {
|
||||
return r.name + ":" + r.tag + "@" + r.digest.String()
|
||||
}
|
||||
|
||||
func (r reference) Name() string {
|
||||
return r.name
|
||||
}
|
||||
|
||||
func (r reference) Tag() string {
|
||||
return r.tag
|
||||
}
|
||||
|
||||
func (r reference) Digest() digest.Digest {
|
||||
return r.digest
|
||||
}
|
||||
|
||||
type repository string
|
||||
|
||||
func (r repository) String() string {
|
||||
return string(r)
|
||||
}
|
||||
|
||||
func (r repository) Name() string {
|
||||
return string(r)
|
||||
}
|
||||
|
||||
type digestReference digest.Digest
|
||||
|
||||
func (d digestReference) String() string {
|
||||
return d.String()
|
||||
}
|
||||
|
||||
func (d digestReference) Digest() digest.Digest {
|
||||
return digest.Digest(d)
|
||||
}
|
||||
|
||||
type taggedReference struct {
|
||||
name string
|
||||
tag string
|
||||
}
|
||||
|
||||
func (t taggedReference) String() string {
|
||||
return t.name + ":" + t.tag
|
||||
}
|
||||
|
||||
func (t taggedReference) Name() string {
|
||||
return t.name
|
||||
}
|
||||
|
||||
func (t taggedReference) Tag() string {
|
||||
return t.tag
|
||||
}
|
||||
|
||||
type canonicalReference struct {
|
||||
name string
|
||||
digest digest.Digest
|
||||
}
|
||||
|
||||
func (c canonicalReference) String() string {
|
||||
return c.name + "@" + c.digest.String()
|
||||
}
|
||||
|
||||
func (c canonicalReference) Name() string {
|
||||
return c.name
|
||||
}
|
||||
|
||||
func (c canonicalReference) Digest() digest.Digest {
|
||||
return c.digest
|
||||
}
|
535
Godeps/_workspace/src/github.com/docker/distribution/reference/reference_test.go
generated
vendored
Normal file
535
Godeps/_workspace/src/github.com/docker/distribution/reference/reference_test.go
generated
vendored
Normal file
|
@ -0,0 +1,535 @@
|
|||
package reference
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"strconv"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/docker/distribution/digest"
|
||||
)
|
||||
|
||||
func TestReferenceParse(t *testing.T) {
|
||||
// referenceTestcases is a unified set of testcases for
|
||||
// testing the parsing of references
|
||||
referenceTestcases := []struct {
|
||||
// input is the repository name or name component testcase
|
||||
input string
|
||||
// err is the error expected from Parse, or nil
|
||||
err error
|
||||
// repository is the string representation for the reference
|
||||
repository string
|
||||
// hostname is the hostname expected in the reference
|
||||
hostname string
|
||||
// tag is the tag for the reference
|
||||
tag string
|
||||
// digest is the digest for the reference (enforces digest reference)
|
||||
digest string
|
||||
}{
|
||||
{
|
||||
input: "test_com",
|
||||
repository: "test_com",
|
||||
},
|
||||
{
|
||||
input: "test.com:tag",
|
||||
repository: "test.com",
|
||||
tag: "tag",
|
||||
},
|
||||
{
|
||||
input: "test.com:5000",
|
||||
repository: "test.com",
|
||||
tag: "5000",
|
||||
},
|
||||
{
|
||||
input: "test.com/repo:tag",
|
||||
hostname: "test.com",
|
||||
repository: "test.com/repo",
|
||||
tag: "tag",
|
||||
},
|
||||
{
|
||||
input: "test:5000/repo",
|
||||
hostname: "test:5000",
|
||||
repository: "test:5000/repo",
|
||||
},
|
||||
{
|
||||
input: "test:5000/repo:tag",
|
||||
hostname: "test:5000",
|
||||
repository: "test:5000/repo",
|
||||
tag: "tag",
|
||||
},
|
||||
{
|
||||
input: "test:5000/repo@sha256:ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff",
|
||||
hostname: "test:5000",
|
||||
repository: "test:5000/repo",
|
||||
digest: "sha256:ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff",
|
||||
},
|
||||
{
|
||||
input: "test:5000/repo:tag@sha256:ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff",
|
||||
hostname: "test:5000",
|
||||
repository: "test:5000/repo",
|
||||
tag: "tag",
|
||||
digest: "sha256:ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff",
|
||||
},
|
||||
{
|
||||
input: "test:5000/repo",
|
||||
hostname: "test:5000",
|
||||
repository: "test:5000/repo",
|
||||
},
|
||||
{
|
||||
input: "",
|
||||
err: ErrNameEmpty,
|
||||
},
|
||||
{
|
||||
input: ":justtag",
|
||||
err: ErrReferenceInvalidFormat,
|
||||
},
|
||||
{
|
||||
input: "@sha256:ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff",
|
||||
err: ErrReferenceInvalidFormat,
|
||||
},
|
||||
{
|
||||
input: "repo@sha256:ffffffffffffffffffffffffffffffffff",
|
||||
err: digest.ErrDigestInvalidLength,
|
||||
},
|
||||
{
|
||||
input: "validname@invaliddigest:ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff",
|
||||
err: digest.ErrDigestUnsupported,
|
||||
},
|
||||
{
|
||||
input: strings.Repeat("a/", 128) + "a:tag",
|
||||
err: ErrNameTooLong,
|
||||
},
|
||||
{
|
||||
input: strings.Repeat("a/", 127) + "a:tag-puts-this-over-max",
|
||||
hostname: "a",
|
||||
repository: strings.Repeat("a/", 127) + "a",
|
||||
tag: "tag-puts-this-over-max",
|
||||
},
|
||||
{
|
||||
input: "aa/asdf$$^/aa",
|
||||
err: ErrReferenceInvalidFormat,
|
||||
},
|
||||
{
|
||||
input: "sub-dom1.foo.com/bar/baz/quux",
|
||||
hostname: "sub-dom1.foo.com",
|
||||
repository: "sub-dom1.foo.com/bar/baz/quux",
|
||||
},
|
||||
{
|
||||
input: "sub-dom1.foo.com/bar/baz/quux:some-long-tag",
|
||||
hostname: "sub-dom1.foo.com",
|
||||
repository: "sub-dom1.foo.com/bar/baz/quux",
|
||||
tag: "some-long-tag",
|
||||
},
|
||||
{
|
||||
input: "b.gcr.io/test.example.com/my-app:test.example.com",
|
||||
hostname: "b.gcr.io",
|
||||
repository: "b.gcr.io/test.example.com/my-app",
|
||||
tag: "test.example.com",
|
||||
},
|
||||
{
|
||||
input: "xn--n3h.com/myimage:xn--n3h.com", // ☃.com in punycode
|
||||
hostname: "xn--n3h.com",
|
||||
repository: "xn--n3h.com/myimage",
|
||||
tag: "xn--n3h.com",
|
||||
},
|
||||
{
|
||||
input: "xn--7o8h.com/myimage:xn--7o8h.com@sha512:ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", // 🐳.com in punycode
|
||||
hostname: "xn--7o8h.com",
|
||||
repository: "xn--7o8h.com/myimage",
|
||||
tag: "xn--7o8h.com",
|
||||
digest: "sha512:ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff",
|
||||
},
|
||||
{
|
||||
input: "foo_bar.com:8080",
|
||||
repository: "foo_bar.com",
|
||||
tag: "8080",
|
||||
},
|
||||
{
|
||||
input: "foo/foo_bar.com:8080",
|
||||
hostname: "foo",
|
||||
repository: "foo/foo_bar.com",
|
||||
tag: "8080",
|
||||
},
|
||||
}
|
||||
for _, testcase := range referenceTestcases {
|
||||
failf := func(format string, v ...interface{}) {
|
||||
t.Logf(strconv.Quote(testcase.input)+": "+format, v...)
|
||||
t.Fail()
|
||||
}
|
||||
|
||||
repo, err := Parse(testcase.input)
|
||||
if testcase.err != nil {
|
||||
if err == nil {
|
||||
failf("missing expected error: %v", testcase.err)
|
||||
} else if testcase.err != err {
|
||||
failf("mismatched error: got %v, expected %v", err, testcase.err)
|
||||
}
|
||||
continue
|
||||
} else if err != nil {
|
||||
failf("unexpected parse error: %v", err)
|
||||
continue
|
||||
}
|
||||
if repo.String() != testcase.input {
|
||||
failf("mismatched repo: got %q, expected %q", repo.String(), testcase.input)
|
||||
}
|
||||
|
||||
if named, ok := repo.(Named); ok {
|
||||
if named.Name() != testcase.repository {
|
||||
failf("unexpected repository: got %q, expected %q", named.Name(), testcase.repository)
|
||||
}
|
||||
hostname, _ := SplitHostname(named)
|
||||
if hostname != testcase.hostname {
|
||||
failf("unexpected hostname: got %q, expected %q", hostname, testcase.hostname)
|
||||
}
|
||||
} else if testcase.repository != "" || testcase.hostname != "" {
|
||||
failf("expected named type, got %T", repo)
|
||||
}
|
||||
|
||||
tagged, ok := repo.(Tagged)
|
||||
if testcase.tag != "" {
|
||||
if ok {
|
||||
if tagged.Tag() != testcase.tag {
|
||||
failf("unexpected tag: got %q, expected %q", tagged.Tag(), testcase.tag)
|
||||
}
|
||||
} else {
|
||||
failf("expected tagged type, got %T", repo)
|
||||
}
|
||||
} else if ok {
|
||||
failf("unexpected tagged type")
|
||||
}
|
||||
|
||||
digested, ok := repo.(Digested)
|
||||
if testcase.digest != "" {
|
||||
if ok {
|
||||
if digested.Digest().String() != testcase.digest {
|
||||
failf("unexpected digest: got %q, expected %q", digested.Digest().String(), testcase.digest)
|
||||
}
|
||||
} else {
|
||||
failf("expected digested type, got %T", repo)
|
||||
}
|
||||
} else if ok {
|
||||
failf("unexpected digested type")
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
// TestWithNameFailure tests cases where WithName should fail. Cases where it
|
||||
// should succeed are covered by TestSplitHostname, below.
|
||||
func TestWithNameFailure(t *testing.T) {
|
||||
testcases := []struct {
|
||||
input string
|
||||
err error
|
||||
}{
|
||||
{
|
||||
input: "",
|
||||
err: ErrNameEmpty,
|
||||
},
|
||||
{
|
||||
input: ":justtag",
|
||||
err: ErrReferenceInvalidFormat,
|
||||
},
|
||||
{
|
||||
input: "@sha256:ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff",
|
||||
err: ErrReferenceInvalidFormat,
|
||||
},
|
||||
{
|
||||
input: "validname@invaliddigest:ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff",
|
||||
err: ErrReferenceInvalidFormat,
|
||||
},
|
||||
{
|
||||
input: strings.Repeat("a/", 128) + "a:tag",
|
||||
err: ErrNameTooLong,
|
||||
},
|
||||
{
|
||||
input: "aa/asdf$$^/aa",
|
||||
err: ErrReferenceInvalidFormat,
|
||||
},
|
||||
}
|
||||
for _, testcase := range testcases {
|
||||
failf := func(format string, v ...interface{}) {
|
||||
t.Logf(strconv.Quote(testcase.input)+": "+format, v...)
|
||||
t.Fail()
|
||||
}
|
||||
|
||||
_, err := WithName(testcase.input)
|
||||
if err == nil {
|
||||
failf("no error parsing name. expected: %s", testcase.err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestSplitHostname(t *testing.T) {
|
||||
testcases := []struct {
|
||||
input string
|
||||
hostname string
|
||||
name string
|
||||
}{
|
||||
{
|
||||
input: "test.com/foo",
|
||||
hostname: "test.com",
|
||||
name: "foo",
|
||||
},
|
||||
{
|
||||
input: "test_com/foo",
|
||||
hostname: "",
|
||||
name: "test_com/foo",
|
||||
},
|
||||
{
|
||||
input: "test:8080/foo",
|
||||
hostname: "test:8080",
|
||||
name: "foo",
|
||||
},
|
||||
{
|
||||
input: "test.com:8080/foo",
|
||||
hostname: "test.com:8080",
|
||||
name: "foo",
|
||||
},
|
||||
{
|
||||
input: "test-com:8080/foo",
|
||||
hostname: "test-com:8080",
|
||||
name: "foo",
|
||||
},
|
||||
{
|
||||
input: "xn--n3h.com:18080/foo",
|
||||
hostname: "xn--n3h.com:18080",
|
||||
name: "foo",
|
||||
},
|
||||
}
|
||||
for _, testcase := range testcases {
|
||||
failf := func(format string, v ...interface{}) {
|
||||
t.Logf(strconv.Quote(testcase.input)+": "+format, v...)
|
||||
t.Fail()
|
||||
}
|
||||
|
||||
named, err := WithName(testcase.input)
|
||||
if err != nil {
|
||||
failf("error parsing name: %s", err)
|
||||
}
|
||||
hostname, name := SplitHostname(named)
|
||||
if hostname != testcase.hostname {
|
||||
failf("unexpected hostname: got %q, expected %q", hostname, testcase.hostname)
|
||||
}
|
||||
if name != testcase.name {
|
||||
failf("unexpected name: got %q, expected %q", name, testcase.name)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
type serializationType struct {
|
||||
Description string
|
||||
Field Field
|
||||
}
|
||||
|
||||
func TestSerialization(t *testing.T) {
|
||||
testcases := []struct {
|
||||
description string
|
||||
input string
|
||||
name string
|
||||
tag string
|
||||
digest string
|
||||
err error
|
||||
}{
|
||||
{
|
||||
description: "empty value",
|
||||
err: ErrNameEmpty,
|
||||
},
|
||||
{
|
||||
description: "just a name",
|
||||
input: "example.com:8000/named",
|
||||
name: "example.com:8000/named",
|
||||
},
|
||||
{
|
||||
description: "name with a tag",
|
||||
input: "example.com:8000/named:tagged",
|
||||
name: "example.com:8000/named",
|
||||
tag: "tagged",
|
||||
},
|
||||
{
|
||||
description: "name with digest",
|
||||
input: "other.com/named@sha256:1234567890098765432112345667890098765432112345667890098765432112",
|
||||
name: "other.com/named",
|
||||
digest: "sha256:1234567890098765432112345667890098765432112345667890098765432112",
|
||||
},
|
||||
}
|
||||
for _, testcase := range testcases {
|
||||
failf := func(format string, v ...interface{}) {
|
||||
t.Logf(strconv.Quote(testcase.input)+": "+format, v...)
|
||||
t.Fail()
|
||||
}
|
||||
|
||||
m := map[string]string{
|
||||
"Description": testcase.description,
|
||||
"Field": testcase.input,
|
||||
}
|
||||
b, err := json.Marshal(m)
|
||||
if err != nil {
|
||||
failf("error marshalling: %v", err)
|
||||
}
|
||||
t := serializationType{}
|
||||
|
||||
if err := json.Unmarshal(b, &t); err != nil {
|
||||
if testcase.err == nil {
|
||||
failf("error unmarshalling: %v", err)
|
||||
}
|
||||
if err != testcase.err {
|
||||
failf("wrong error, expected %v, got %v", testcase.err, err)
|
||||
}
|
||||
|
||||
continue
|
||||
} else if testcase.err != nil {
|
||||
failf("expected error unmarshalling: %v", testcase.err)
|
||||
}
|
||||
|
||||
if t.Description != testcase.description {
|
||||
failf("wrong description, expected %q, got %q", testcase.description, t.Description)
|
||||
}
|
||||
|
||||
ref := t.Field.Reference()
|
||||
|
||||
if named, ok := ref.(Named); ok {
|
||||
if named.Name() != testcase.name {
|
||||
failf("unexpected repository: got %q, expected %q", named.Name(), testcase.name)
|
||||
}
|
||||
} else if testcase.name != "" {
|
||||
failf("expected named type, got %T", ref)
|
||||
}
|
||||
|
||||
tagged, ok := ref.(Tagged)
|
||||
if testcase.tag != "" {
|
||||
if ok {
|
||||
if tagged.Tag() != testcase.tag {
|
||||
failf("unexpected tag: got %q, expected %q", tagged.Tag(), testcase.tag)
|
||||
}
|
||||
} else {
|
||||
failf("expected tagged type, got %T", ref)
|
||||
}
|
||||
} else if ok {
|
||||
failf("unexpected tagged type")
|
||||
}
|
||||
|
||||
digested, ok := ref.(Digested)
|
||||
if testcase.digest != "" {
|
||||
if ok {
|
||||
if digested.Digest().String() != testcase.digest {
|
||||
failf("unexpected digest: got %q, expected %q", digested.Digest().String(), testcase.digest)
|
||||
}
|
||||
} else {
|
||||
failf("expected digested type, got %T", ref)
|
||||
}
|
||||
} else if ok {
|
||||
failf("unexpected digested type")
|
||||
}
|
||||
|
||||
t = serializationType{
|
||||
Description: testcase.description,
|
||||
Field: AsField(ref),
|
||||
}
|
||||
|
||||
b2, err := json.Marshal(t)
|
||||
if err != nil {
|
||||
failf("error marshing serialization type: %v", err)
|
||||
}
|
||||
|
||||
if string(b) != string(b2) {
|
||||
failf("unexpected serialized value: expected %q, got %q", string(b), string(b2))
|
||||
}
|
||||
|
||||
// Ensure t.Field is not implementing "Reference" directly, getting
|
||||
// around the Reference type system
|
||||
var fieldInterface interface{} = t.Field
|
||||
if _, ok := fieldInterface.(Reference); ok {
|
||||
failf("field should not implement Reference interface")
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
func TestWithTag(t *testing.T) {
|
||||
testcases := []struct {
|
||||
name string
|
||||
tag string
|
||||
combined string
|
||||
}{
|
||||
{
|
||||
name: "test.com/foo",
|
||||
tag: "tag",
|
||||
combined: "test.com/foo:tag",
|
||||
},
|
||||
{
|
||||
name: "foo",
|
||||
tag: "tag2",
|
||||
combined: "foo:tag2",
|
||||
},
|
||||
{
|
||||
name: "test.com:8000/foo",
|
||||
tag: "tag4",
|
||||
combined: "test.com:8000/foo:tag4",
|
||||
},
|
||||
{
|
||||
name: "test.com:8000/foo",
|
||||
tag: "TAG5",
|
||||
combined: "test.com:8000/foo:TAG5",
|
||||
},
|
||||
}
|
||||
for _, testcase := range testcases {
|
||||
failf := func(format string, v ...interface{}) {
|
||||
t.Logf(strconv.Quote(testcase.name)+": "+format, v...)
|
||||
t.Fail()
|
||||
}
|
||||
|
||||
named, err := WithName(testcase.name)
|
||||
if err != nil {
|
||||
failf("error parsing name: %s", err)
|
||||
}
|
||||
tagged, err := WithTag(named, testcase.tag)
|
||||
if err != nil {
|
||||
failf("WithTag failed: %s", err)
|
||||
}
|
||||
if tagged.String() != testcase.combined {
|
||||
failf("unexpected: got %q, expected %q", tagged.String(), testcase.combined)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestWithDigest(t *testing.T) {
|
||||
testcases := []struct {
|
||||
name string
|
||||
digest digest.Digest
|
||||
combined string
|
||||
}{
|
||||
{
|
||||
name: "test.com/foo",
|
||||
digest: "sha256:1234567890098765432112345667890098765",
|
||||
combined: "test.com/foo@sha256:1234567890098765432112345667890098765",
|
||||
},
|
||||
{
|
||||
name: "foo",
|
||||
digest: "sha256:1234567890098765432112345667890098765",
|
||||
combined: "foo@sha256:1234567890098765432112345667890098765",
|
||||
},
|
||||
{
|
||||
name: "test.com:8000/foo",
|
||||
digest: "sha256:1234567890098765432112345667890098765",
|
||||
combined: "test.com:8000/foo@sha256:1234567890098765432112345667890098765",
|
||||
},
|
||||
}
|
||||
for _, testcase := range testcases {
|
||||
failf := func(format string, v ...interface{}) {
|
||||
t.Logf(strconv.Quote(testcase.name)+": "+format, v...)
|
||||
t.Fail()
|
||||
}
|
||||
|
||||
named, err := WithName(testcase.name)
|
||||
if err != nil {
|
||||
failf("error parsing name: %s", err)
|
||||
}
|
||||
digested, err := WithDigest(named, testcase.digest)
|
||||
if err != nil {
|
||||
failf("WithDigest failed: %s", err)
|
||||
}
|
||||
if digested.String() != testcase.combined {
|
||||
failf("unexpected: got %q, expected %q", digested.String(), testcase.combined)
|
||||
}
|
||||
}
|
||||
}
|
124
Godeps/_workspace/src/github.com/docker/distribution/reference/regexp.go
generated
vendored
Normal file
124
Godeps/_workspace/src/github.com/docker/distribution/reference/regexp.go
generated
vendored
Normal file
|
@ -0,0 +1,124 @@
|
|||
package reference
|
||||
|
||||
import "regexp"
|
||||
|
||||
var (
|
||||
// alphaNumericRegexp defines the alpha numeric atom, typically a
|
||||
// component of names. This only allows lower case characters and digits.
|
||||
alphaNumericRegexp = match(`[a-z0-9]+`)
|
||||
|
||||
// separatorRegexp defines the separators allowed to be embedded in name
|
||||
// components. This allow one period, one or two underscore and multiple
|
||||
// dashes.
|
||||
separatorRegexp = match(`(?:[._]|__|[-]*)`)
|
||||
|
||||
// nameComponentRegexp restricts registry path component names to start
|
||||
// with at least one letter or number, with following parts able to be
|
||||
// separated by one period, one or two underscore and multiple dashes.
|
||||
nameComponentRegexp = expression(
|
||||
alphaNumericRegexp,
|
||||
optional(repeated(separatorRegexp, alphaNumericRegexp)))
|
||||
|
||||
// hostnameComponentRegexp restricts the registry hostname component of a
|
||||
// repository name to start with a component as defined by hostnameRegexp
|
||||
// and followed by an optional port.
|
||||
hostnameComponentRegexp = match(`(?:[a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9])`)
|
||||
|
||||
// hostnameRegexp defines the structure of potential hostname components
|
||||
// that may be part of image names. This is purposely a subset of what is
|
||||
// allowed by DNS to ensure backwards compatibility with Docker image
|
||||
// names.
|
||||
hostnameRegexp = expression(
|
||||
hostnameComponentRegexp,
|
||||
optional(repeated(literal(`.`), hostnameComponentRegexp)),
|
||||
optional(literal(`:`), match(`[0-9]+`)))
|
||||
|
||||
// TagRegexp matches valid tag names. From docker/docker:graph/tags.go.
|
||||
TagRegexp = match(`[\w][\w.-]{0,127}`)
|
||||
|
||||
// anchoredTagRegexp matches valid tag names, anchored at the start and
|
||||
// end of the matched string.
|
||||
anchoredTagRegexp = anchored(TagRegexp)
|
||||
|
||||
// DigestRegexp matches valid digests.
|
||||
DigestRegexp = match(`[A-Za-z][A-Za-z0-9]*(?:[-_+.][A-Za-z][A-Za-z0-9]*)*[:][[:xdigit:]]{32,}`)
|
||||
|
||||
// anchoredDigestRegexp matches valid digests, anchored at the start and
|
||||
// end of the matched string.
|
||||
anchoredDigestRegexp = anchored(DigestRegexp)
|
||||
|
||||
// NameRegexp is the format for the name component of references. The
|
||||
// regexp has capturing groups for the hostname and name part omitting
|
||||
// the separating forward slash from either.
|
||||
NameRegexp = expression(
|
||||
optional(hostnameRegexp, literal(`/`)),
|
||||
nameComponentRegexp,
|
||||
optional(repeated(literal(`/`), nameComponentRegexp)))
|
||||
|
||||
// anchoredNameRegexp is used to parse a name value, capturing the
|
||||
// hostname and trailing components.
|
||||
anchoredNameRegexp = anchored(
|
||||
optional(capture(hostnameRegexp), literal(`/`)),
|
||||
capture(nameComponentRegexp,
|
||||
optional(repeated(literal(`/`), nameComponentRegexp))))
|
||||
|
||||
// ReferenceRegexp is the full supported format of a reference. The regexp
|
||||
// is anchored and has capturing groups for name, tag, and digest
|
||||
// components.
|
||||
ReferenceRegexp = anchored(capture(NameRegexp),
|
||||
optional(literal(":"), capture(TagRegexp)),
|
||||
optional(literal("@"), capture(DigestRegexp)))
|
||||
)
|
||||
|
||||
// match compiles the string to a regular expression.
|
||||
var match = regexp.MustCompile
|
||||
|
||||
// literal compiles s into a literal regular expression, escaping any regexp
|
||||
// reserved characters.
|
||||
func literal(s string) *regexp.Regexp {
|
||||
re := match(regexp.QuoteMeta(s))
|
||||
|
||||
if _, complete := re.LiteralPrefix(); !complete {
|
||||
panic("must be a literal")
|
||||
}
|
||||
|
||||
return re
|
||||
}
|
||||
|
||||
// expression defines a full expression, where each regular expression must
|
||||
// follow the previous.
|
||||
func expression(res ...*regexp.Regexp) *regexp.Regexp {
|
||||
var s string
|
||||
for _, re := range res {
|
||||
s += re.String()
|
||||
}
|
||||
|
||||
return match(s)
|
||||
}
|
||||
|
||||
// optional wraps the expression in a non-capturing group and makes the
|
||||
// production optional.
|
||||
func optional(res ...*regexp.Regexp) *regexp.Regexp {
|
||||
return match(group(expression(res...)).String() + `?`)
|
||||
}
|
||||
|
||||
// repeated wraps the regexp in a non-capturing group to get one or more
|
||||
// matches.
|
||||
func repeated(res ...*regexp.Regexp) *regexp.Regexp {
|
||||
return match(group(expression(res...)).String() + `+`)
|
||||
}
|
||||
|
||||
// group wraps the regexp in a non-capturing group.
|
||||
func group(res ...*regexp.Regexp) *regexp.Regexp {
|
||||
return match(`(?:` + expression(res...).String() + `)`)
|
||||
}
|
||||
|
||||
// capture wraps the expression in a capturing group.
|
||||
func capture(res ...*regexp.Regexp) *regexp.Regexp {
|
||||
return match(`(` + expression(res...).String() + `)`)
|
||||
}
|
||||
|
||||
// anchored anchors the regular expression by adding start and end delimiters.
|
||||
func anchored(res ...*regexp.Regexp) *regexp.Regexp {
|
||||
return match(`^` + expression(res...).String() + `$`)
|
||||
}
|
489
Godeps/_workspace/src/github.com/docker/distribution/reference/regexp_test.go
generated
vendored
Normal file
489
Godeps/_workspace/src/github.com/docker/distribution/reference/regexp_test.go
generated
vendored
Normal file
|
@ -0,0 +1,489 @@
|
|||
package reference
|
||||
|
||||
import (
|
||||
"regexp"
|
||||
"strings"
|
||||
"testing"
|
||||
)
|
||||
|
||||
type regexpMatch struct {
|
||||
input string
|
||||
match bool
|
||||
subs []string
|
||||
}
|
||||
|
||||
func checkRegexp(t *testing.T, r *regexp.Regexp, m regexpMatch) {
|
||||
matches := r.FindStringSubmatch(m.input)
|
||||
if m.match && matches != nil {
|
||||
if len(matches) != (r.NumSubexp()+1) || matches[0] != m.input {
|
||||
t.Fatalf("Bad match result %#v for %q", matches, m.input)
|
||||
}
|
||||
if len(matches) < (len(m.subs) + 1) {
|
||||
t.Errorf("Expected %d sub matches, only have %d for %q", len(m.subs), len(matches)-1, m.input)
|
||||
}
|
||||
for i := range m.subs {
|
||||
if m.subs[i] != matches[i+1] {
|
||||
t.Errorf("Unexpected submatch %d: %q, expected %q for %q", i+1, matches[i+1], m.subs[i], m.input)
|
||||
}
|
||||
}
|
||||
} else if m.match {
|
||||
t.Errorf("Expected match for %q", m.input)
|
||||
} else if matches != nil {
|
||||
t.Errorf("Unexpected match for %q", m.input)
|
||||
}
|
||||
}
|
||||
|
||||
func TestHostRegexp(t *testing.T) {
|
||||
hostcases := []regexpMatch{
|
||||
{
|
||||
input: "test.com",
|
||||
match: true,
|
||||
},
|
||||
{
|
||||
input: "test.com:10304",
|
||||
match: true,
|
||||
},
|
||||
{
|
||||
input: "test.com:http",
|
||||
match: false,
|
||||
},
|
||||
{
|
||||
input: "localhost",
|
||||
match: true,
|
||||
},
|
||||
{
|
||||
input: "localhost:8080",
|
||||
match: true,
|
||||
},
|
||||
{
|
||||
input: "a",
|
||||
match: true,
|
||||
},
|
||||
{
|
||||
input: "a.b",
|
||||
match: true,
|
||||
},
|
||||
{
|
||||
input: "ab.cd.com",
|
||||
match: true,
|
||||
},
|
||||
{
|
||||
input: "a-b.com",
|
||||
match: true,
|
||||
},
|
||||
{
|
||||
input: "-ab.com",
|
||||
match: false,
|
||||
},
|
||||
{
|
||||
input: "ab-.com",
|
||||
match: false,
|
||||
},
|
||||
{
|
||||
input: "ab.c-om",
|
||||
match: true,
|
||||
},
|
||||
{
|
||||
input: "ab.-com",
|
||||
match: false,
|
||||
},
|
||||
{
|
||||
input: "ab.com-",
|
||||
match: false,
|
||||
},
|
||||
{
|
||||
input: "0101.com",
|
||||
match: true, // TODO(dmcgowan): valid if this should be allowed
|
||||
},
|
||||
{
|
||||
input: "001a.com",
|
||||
match: true,
|
||||
},
|
||||
{
|
||||
input: "b.gbc.io:443",
|
||||
match: true,
|
||||
},
|
||||
{
|
||||
input: "b.gbc.io",
|
||||
match: true,
|
||||
},
|
||||
{
|
||||
input: "xn--n3h.com", // ☃.com in punycode
|
||||
match: true,
|
||||
},
|
||||
{
|
||||
input: "Asdf.com", // uppercase character
|
||||
match: true,
|
||||
},
|
||||
}
|
||||
r := regexp.MustCompile(`^` + hostnameRegexp.String() + `$`)
|
||||
for i := range hostcases {
|
||||
checkRegexp(t, r, hostcases[i])
|
||||
}
|
||||
}
|
||||
|
||||
func TestFullNameRegexp(t *testing.T) {
|
||||
if anchoredNameRegexp.NumSubexp() != 2 {
|
||||
t.Fatalf("anchored name regexp should have two submatches: %v, %v != 2",
|
||||
anchoredNameRegexp, anchoredNameRegexp.NumSubexp())
|
||||
}
|
||||
|
||||
testcases := []regexpMatch{
|
||||
{
|
||||
input: "",
|
||||
match: false,
|
||||
},
|
||||
{
|
||||
input: "short",
|
||||
match: true,
|
||||
subs: []string{"", "short"},
|
||||
},
|
||||
{
|
||||
input: "simple/name",
|
||||
match: true,
|
||||
subs: []string{"simple", "name"},
|
||||
},
|
||||
{
|
||||
input: "library/ubuntu",
|
||||
match: true,
|
||||
subs: []string{"library", "ubuntu"},
|
||||
},
|
||||
{
|
||||
input: "docker/stevvooe/app",
|
||||
match: true,
|
||||
subs: []string{"docker", "stevvooe/app"},
|
||||
},
|
||||
{
|
||||
input: "aa/aa/aa/aa/aa/aa/aa/aa/aa/bb/bb/bb/bb/bb/bb",
|
||||
match: true,
|
||||
subs: []string{"aa", "aa/aa/aa/aa/aa/aa/aa/aa/bb/bb/bb/bb/bb/bb"},
|
||||
},
|
||||
{
|
||||
input: "aa/aa/bb/bb/bb",
|
||||
match: true,
|
||||
subs: []string{"aa", "aa/bb/bb/bb"},
|
||||
},
|
||||
{
|
||||
input: "a/a/a/a",
|
||||
match: true,
|
||||
subs: []string{"a", "a/a/a"},
|
||||
},
|
||||
{
|
||||
input: "a/a/a/a/",
|
||||
match: false,
|
||||
},
|
||||
{
|
||||
input: "a//a/a",
|
||||
match: false,
|
||||
},
|
||||
{
|
||||
input: "a",
|
||||
match: true,
|
||||
subs: []string{"", "a"},
|
||||
},
|
||||
{
|
||||
input: "a/aa",
|
||||
match: true,
|
||||
subs: []string{"a", "aa"},
|
||||
},
|
||||
{
|
||||
input: "a/aa/a",
|
||||
match: true,
|
||||
subs: []string{"a", "aa/a"},
|
||||
},
|
||||
{
|
||||
input: "foo.com",
|
||||
match: true,
|
||||
subs: []string{"", "foo.com"},
|
||||
},
|
||||
{
|
||||
input: "foo.com/",
|
||||
match: false,
|
||||
},
|
||||
{
|
||||
input: "foo.com:8080/bar",
|
||||
match: true,
|
||||
subs: []string{"foo.com:8080", "bar"},
|
||||
},
|
||||
{
|
||||
input: "foo.com:http/bar",
|
||||
match: false,
|
||||
},
|
||||
{
|
||||
input: "foo.com/bar",
|
||||
match: true,
|
||||
subs: []string{"foo.com", "bar"},
|
||||
},
|
||||
{
|
||||
input: "foo.com/bar/baz",
|
||||
match: true,
|
||||
subs: []string{"foo.com", "bar/baz"},
|
||||
},
|
||||
{
|
||||
input: "localhost:8080/bar",
|
||||
match: true,
|
||||
subs: []string{"localhost:8080", "bar"},
|
||||
},
|
||||
{
|
||||
input: "sub-dom1.foo.com/bar/baz/quux",
|
||||
match: true,
|
||||
subs: []string{"sub-dom1.foo.com", "bar/baz/quux"},
|
||||
},
|
||||
{
|
||||
input: "blog.foo.com/bar/baz",
|
||||
match: true,
|
||||
subs: []string{"blog.foo.com", "bar/baz"},
|
||||
},
|
||||
{
|
||||
input: "a^a",
|
||||
match: false,
|
||||
},
|
||||
{
|
||||
input: "aa/asdf$$^/aa",
|
||||
match: false,
|
||||
},
|
||||
{
|
||||
input: "asdf$$^/aa",
|
||||
match: false,
|
||||
},
|
||||
{
|
||||
input: "aa-a/a",
|
||||
match: true,
|
||||
subs: []string{"aa-a", "a"},
|
||||
},
|
||||
{
|
||||
input: strings.Repeat("a/", 128) + "a",
|
||||
match: true,
|
||||
subs: []string{"a", strings.Repeat("a/", 127) + "a"},
|
||||
},
|
||||
{
|
||||
input: "a-/a/a/a",
|
||||
match: false,
|
||||
},
|
||||
{
|
||||
input: "foo.com/a-/a/a",
|
||||
match: false,
|
||||
},
|
||||
{
|
||||
input: "-foo/bar",
|
||||
match: false,
|
||||
},
|
||||
{
|
||||
input: "foo/bar-",
|
||||
match: false,
|
||||
},
|
||||
{
|
||||
input: "foo-/bar",
|
||||
match: false,
|
||||
},
|
||||
{
|
||||
input: "foo/-bar",
|
||||
match: false,
|
||||
},
|
||||
{
|
||||
input: "_foo/bar",
|
||||
match: false,
|
||||
},
|
||||
{
|
||||
input: "foo_bar",
|
||||
match: true,
|
||||
subs: []string{"", "foo_bar"},
|
||||
},
|
||||
{
|
||||
input: "foo_bar.com",
|
||||
match: true,
|
||||
subs: []string{"", "foo_bar.com"},
|
||||
},
|
||||
{
|
||||
input: "foo_bar.com:8080",
|
||||
match: false,
|
||||
},
|
||||
{
|
||||
input: "foo_bar.com:8080/app",
|
||||
match: false,
|
||||
},
|
||||
{
|
||||
input: "foo.com/foo_bar",
|
||||
match: true,
|
||||
subs: []string{"foo.com", "foo_bar"},
|
||||
},
|
||||
{
|
||||
input: "____/____",
|
||||
match: false,
|
||||
},
|
||||
{
|
||||
input: "_docker/_docker",
|
||||
match: false,
|
||||
},
|
||||
{
|
||||
input: "docker_/docker_",
|
||||
match: false,
|
||||
},
|
||||
{
|
||||
input: "b.gcr.io/test.example.com/my-app",
|
||||
match: true,
|
||||
subs: []string{"b.gcr.io", "test.example.com/my-app"},
|
||||
},
|
||||
{
|
||||
input: "xn--n3h.com/myimage", // ☃.com in punycode
|
||||
match: true,
|
||||
subs: []string{"xn--n3h.com", "myimage"},
|
||||
},
|
||||
{
|
||||
input: "xn--7o8h.com/myimage", // 🐳.com in punycode
|
||||
match: true,
|
||||
subs: []string{"xn--7o8h.com", "myimage"},
|
||||
},
|
||||
{
|
||||
input: "example.com/xn--7o8h.com/myimage", // 🐳.com in punycode
|
||||
match: true,
|
||||
subs: []string{"example.com", "xn--7o8h.com/myimage"},
|
||||
},
|
||||
{
|
||||
input: "example.com/some_separator__underscore/myimage",
|
||||
match: true,
|
||||
subs: []string{"example.com", "some_separator__underscore/myimage"},
|
||||
},
|
||||
{
|
||||
input: "example.com/__underscore/myimage",
|
||||
match: false,
|
||||
},
|
||||
{
|
||||
input: "example.com/..dots/myimage",
|
||||
match: false,
|
||||
},
|
||||
{
|
||||
input: "example.com/.dots/myimage",
|
||||
match: false,
|
||||
},
|
||||
{
|
||||
input: "example.com/nodouble..dots/myimage",
|
||||
match: false,
|
||||
},
|
||||
{
|
||||
input: "example.com/nodouble..dots/myimage",
|
||||
match: false,
|
||||
},
|
||||
{
|
||||
input: "docker./docker",
|
||||
match: false,
|
||||
},
|
||||
{
|
||||
input: ".docker/docker",
|
||||
match: false,
|
||||
},
|
||||
{
|
||||
input: "docker-/docker",
|
||||
match: false,
|
||||
},
|
||||
{
|
||||
input: "-docker/docker",
|
||||
match: false,
|
||||
},
|
||||
{
|
||||
input: "do..cker/docker",
|
||||
match: false,
|
||||
},
|
||||
{
|
||||
input: "do__cker:8080/docker",
|
||||
match: false,
|
||||
},
|
||||
{
|
||||
input: "do__cker/docker",
|
||||
match: true,
|
||||
subs: []string{"", "do__cker/docker"},
|
||||
},
|
||||
{
|
||||
input: "b.gcr.io/test.example.com/my-app",
|
||||
match: true,
|
||||
subs: []string{"b.gcr.io", "test.example.com/my-app"},
|
||||
},
|
||||
{
|
||||
input: "registry.io/foo/project--id.module--name.ver---sion--name",
|
||||
match: true,
|
||||
subs: []string{"registry.io", "foo/project--id.module--name.ver---sion--name"},
|
||||
},
|
||||
{
|
||||
input: "Asdf.com/foo/bar", // uppercase character in hostname
|
||||
match: true,
|
||||
},
|
||||
{
|
||||
input: "Foo/FarB", // uppercase characters in remote name
|
||||
match: false,
|
||||
},
|
||||
}
|
||||
for i := range testcases {
|
||||
checkRegexp(t, anchoredNameRegexp, testcases[i])
|
||||
}
|
||||
}
|
||||
|
||||
func TestReferenceRegexp(t *testing.T) {
|
||||
if ReferenceRegexp.NumSubexp() != 3 {
|
||||
t.Fatalf("anchored name regexp should have three submatches: %v, %v != 3",
|
||||
ReferenceRegexp, ReferenceRegexp.NumSubexp())
|
||||
}
|
||||
|
||||
testcases := []regexpMatch{
|
||||
{
|
||||
input: "registry.com:8080/myapp:tag",
|
||||
match: true,
|
||||
subs: []string{"registry.com:8080/myapp", "tag", ""},
|
||||
},
|
||||
{
|
||||
input: "registry.com:8080/myapp@sha256:be178c0543eb17f5f3043021c9e5fcf30285e557a4fc309cce97ff9ca6182912",
|
||||
match: true,
|
||||
subs: []string{"registry.com:8080/myapp", "", "sha256:be178c0543eb17f5f3043021c9e5fcf30285e557a4fc309cce97ff9ca6182912"},
|
||||
},
|
||||
{
|
||||
input: "registry.com:8080/myapp:tag2@sha256:be178c0543eb17f5f3043021c9e5fcf30285e557a4fc309cce97ff9ca6182912",
|
||||
match: true,
|
||||
subs: []string{"registry.com:8080/myapp", "tag2", "sha256:be178c0543eb17f5f3043021c9e5fcf30285e557a4fc309cce97ff9ca6182912"},
|
||||
},
|
||||
{
|
||||
input: "registry.com:8080/myapp@sha256:badbadbadbad",
|
||||
match: false,
|
||||
},
|
||||
{
|
||||
input: "registry.com:8080/myapp:invalid~tag",
|
||||
match: false,
|
||||
},
|
||||
{
|
||||
input: "bad_hostname.com:8080/myapp:tag",
|
||||
match: false,
|
||||
},
|
||||
{
|
||||
input:// localhost treated as name, missing tag with 8080 as tag
|
||||
"localhost:8080@sha256:be178c0543eb17f5f3043021c9e5fcf30285e557a4fc309cce97ff9ca6182912",
|
||||
match: true,
|
||||
subs: []string{"localhost", "8080", "sha256:be178c0543eb17f5f3043021c9e5fcf30285e557a4fc309cce97ff9ca6182912"},
|
||||
},
|
||||
{
|
||||
input: "localhost:8080/name@sha256:be178c0543eb17f5f3043021c9e5fcf30285e557a4fc309cce97ff9ca6182912",
|
||||
match: true,
|
||||
subs: []string{"localhost:8080/name", "", "sha256:be178c0543eb17f5f3043021c9e5fcf30285e557a4fc309cce97ff9ca6182912"},
|
||||
},
|
||||
{
|
||||
input: "localhost:http/name@sha256:be178c0543eb17f5f3043021c9e5fcf30285e557a4fc309cce97ff9ca6182912",
|
||||
match: false,
|
||||
},
|
||||
{
|
||||
// localhost will be treated as an image name without a host
|
||||
input: "localhost@sha256:be178c0543eb17f5f3043021c9e5fcf30285e557a4fc309cce97ff9ca6182912",
|
||||
match: true,
|
||||
subs: []string{"localhost", "", "sha256:be178c0543eb17f5f3043021c9e5fcf30285e557a4fc309cce97ff9ca6182912"},
|
||||
},
|
||||
{
|
||||
input: "registry.com:8080/myapp@bad",
|
||||
match: false,
|
||||
},
|
||||
{
|
||||
input: "registry.com:8080/myapp@2bad",
|
||||
match: false, // TODO(dmcgowan): Support this as valid
|
||||
},
|
||||
}
|
||||
|
||||
for i := range testcases {
|
||||
checkRegexp(t, ReferenceRegexp, testcases[i])
|
||||
}
|
||||
|
||||
}
|
116
Godeps/_workspace/src/github.com/docker/docker/pkg/parsers/filters/parse.go
generated
vendored
116
Godeps/_workspace/src/github.com/docker/docker/pkg/parsers/filters/parse.go
generated
vendored
|
@ -1,116 +0,0 @@
|
|||
package filters
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"regexp"
|
||||
"strings"
|
||||
)
|
||||
|
||||
type Args map[string][]string
|
||||
|
||||
// Parse the argument to the filter flag. Like
|
||||
//
|
||||
// `docker ps -f 'created=today' -f 'image.name=ubuntu*'`
|
||||
//
|
||||
// If prev map is provided, then it is appended to, and returned. By default a new
|
||||
// map is created.
|
||||
func ParseFlag(arg string, prev Args) (Args, error) {
|
||||
var filters Args = prev
|
||||
if prev == nil {
|
||||
filters = Args{}
|
||||
}
|
||||
if len(arg) == 0 {
|
||||
return filters, nil
|
||||
}
|
||||
|
||||
if !strings.Contains(arg, "=") {
|
||||
return filters, ErrorBadFormat
|
||||
}
|
||||
|
||||
f := strings.SplitN(arg, "=", 2)
|
||||
name := strings.ToLower(strings.TrimSpace(f[0]))
|
||||
value := strings.TrimSpace(f[1])
|
||||
filters[name] = append(filters[name], value)
|
||||
|
||||
return filters, nil
|
||||
}
|
||||
|
||||
var ErrorBadFormat = errors.New("bad format of filter (expected name=value)")
|
||||
|
||||
// packs the Args into an string for easy transport from client to server
|
||||
func ToParam(a Args) (string, error) {
|
||||
// this way we don't URL encode {}, just empty space
|
||||
if len(a) == 0 {
|
||||
return "", nil
|
||||
}
|
||||
|
||||
buf, err := json.Marshal(a)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return string(buf), nil
|
||||
}
|
||||
|
||||
// unpacks the filter Args
|
||||
func FromParam(p string) (Args, error) {
|
||||
args := Args{}
|
||||
if len(p) == 0 {
|
||||
return args, nil
|
||||
}
|
||||
if err := json.NewDecoder(strings.NewReader(p)).Decode(&args); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return args, nil
|
||||
}
|
||||
|
||||
func (filters Args) MatchKVList(field string, sources map[string]string) bool {
|
||||
fieldValues := filters[field]
|
||||
|
||||
//do not filter if there is no filter set or cannot determine filter
|
||||
if len(fieldValues) == 0 {
|
||||
return true
|
||||
}
|
||||
|
||||
if sources == nil || len(sources) == 0 {
|
||||
return false
|
||||
}
|
||||
|
||||
outer:
|
||||
for _, name2match := range fieldValues {
|
||||
testKV := strings.SplitN(name2match, "=", 2)
|
||||
|
||||
for k, v := range sources {
|
||||
if len(testKV) == 1 {
|
||||
if k == testKV[0] {
|
||||
continue outer
|
||||
}
|
||||
} else if k == testKV[0] && v == testKV[1] {
|
||||
continue outer
|
||||
}
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
func (filters Args) Match(field, source string) bool {
|
||||
fieldValues := filters[field]
|
||||
|
||||
//do not filter if there is no filter set or cannot determine filter
|
||||
if len(fieldValues) == 0 {
|
||||
return true
|
||||
}
|
||||
for _, name2match := range fieldValues {
|
||||
match, err := regexp.MatchString(name2match, source)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
if match {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
218
Godeps/_workspace/src/github.com/docker/docker/pkg/parsers/filters/parse_test.go
generated
vendored
218
Godeps/_workspace/src/github.com/docker/docker/pkg/parsers/filters/parse_test.go
generated
vendored
|
@ -1,218 +0,0 @@
|
|||
package filters
|
||||
|
||||
import (
|
||||
"sort"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestParseArgs(t *testing.T) {
|
||||
// equivalent of `docker ps -f 'created=today' -f 'image.name=ubuntu*' -f 'image.name=*untu'`
|
||||
flagArgs := []string{
|
||||
"created=today",
|
||||
"image.name=ubuntu*",
|
||||
"image.name=*untu",
|
||||
}
|
||||
var (
|
||||
args = Args{}
|
||||
err error
|
||||
)
|
||||
for i := range flagArgs {
|
||||
args, err = ParseFlag(flagArgs[i], args)
|
||||
if err != nil {
|
||||
t.Errorf("failed to parse %s: %s", flagArgs[i], err)
|
||||
}
|
||||
}
|
||||
if len(args["created"]) != 1 {
|
||||
t.Errorf("failed to set this arg")
|
||||
}
|
||||
if len(args["image.name"]) != 2 {
|
||||
t.Errorf("the args should have collapsed")
|
||||
}
|
||||
}
|
||||
|
||||
func TestParseArgsEdgeCase(t *testing.T) {
|
||||
var filters Args
|
||||
args, err := ParseFlag("", filters)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if args == nil || len(args) != 0 {
|
||||
t.Fatalf("Expected an empty Args (map), got %v", args)
|
||||
}
|
||||
if args, err = ParseFlag("anything", args); err == nil || err != ErrorBadFormat {
|
||||
t.Fatalf("Expected ErrorBadFormat, got %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestToParam(t *testing.T) {
|
||||
a := Args{
|
||||
"created": []string{"today"},
|
||||
"image.name": []string{"ubuntu*", "*untu"},
|
||||
}
|
||||
|
||||
_, err := ToParam(a)
|
||||
if err != nil {
|
||||
t.Errorf("failed to marshal the filters: %s", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestFromParam(t *testing.T) {
|
||||
invalids := []string{
|
||||
"anything",
|
||||
"['a','list']",
|
||||
"{'key': 'value'}",
|
||||
`{"key": "value"}`,
|
||||
}
|
||||
valids := map[string]Args{
|
||||
`{"key": ["value"]}`: {
|
||||
"key": {"value"},
|
||||
},
|
||||
`{"key": ["value1", "value2"]}`: {
|
||||
"key": {"value1", "value2"},
|
||||
},
|
||||
`{"key1": ["value1"], "key2": ["value2"]}`: {
|
||||
"key1": {"value1"},
|
||||
"key2": {"value2"},
|
||||
},
|
||||
}
|
||||
for _, invalid := range invalids {
|
||||
if _, err := FromParam(invalid); err == nil {
|
||||
t.Fatalf("Expected an error with %v, got nothing", invalid)
|
||||
}
|
||||
}
|
||||
for json, expectedArgs := range valids {
|
||||
args, err := FromParam(json)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if len(args) != len(expectedArgs) {
|
||||
t.Fatalf("Expected %v, go %v", expectedArgs, args)
|
||||
}
|
||||
for key, expectedValues := range expectedArgs {
|
||||
values := args[key]
|
||||
sort.Strings(values)
|
||||
sort.Strings(expectedValues)
|
||||
if len(values) != len(expectedValues) {
|
||||
t.Fatalf("Expected %v, go %v", expectedArgs, args)
|
||||
}
|
||||
for index, expectedValue := range expectedValues {
|
||||
if values[index] != expectedValue {
|
||||
t.Fatalf("Expected %v, go %v", expectedArgs, args)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestEmpty(t *testing.T) {
|
||||
a := Args{}
|
||||
v, err := ToParam(a)
|
||||
if err != nil {
|
||||
t.Errorf("failed to marshal the filters: %s", err)
|
||||
}
|
||||
v1, err := FromParam(v)
|
||||
if err != nil {
|
||||
t.Errorf("%s", err)
|
||||
}
|
||||
if len(a) != len(v1) {
|
||||
t.Errorf("these should both be empty sets")
|
||||
}
|
||||
}
|
||||
|
||||
func TestArgsMatchKVList(t *testing.T) {
|
||||
// empty sources
|
||||
args := Args{
|
||||
"created": []string{"today"},
|
||||
}
|
||||
if args.MatchKVList("created", map[string]string{}) {
|
||||
t.Fatalf("Expected false for (%v,created), got true", args)
|
||||
}
|
||||
// Not empty sources
|
||||
sources := map[string]string{
|
||||
"key1": "value1",
|
||||
"key2": "value2",
|
||||
"key3": "value3",
|
||||
}
|
||||
matches := map[*Args]string{
|
||||
&Args{}: "field",
|
||||
&Args{
|
||||
"created": []string{"today"},
|
||||
"labels": []string{"key1"},
|
||||
}: "labels",
|
||||
&Args{
|
||||
"created": []string{"today"},
|
||||
"labels": []string{"key1=value1"},
|
||||
}: "labels",
|
||||
}
|
||||
differs := map[*Args]string{
|
||||
&Args{
|
||||
"created": []string{"today"},
|
||||
}: "created",
|
||||
&Args{
|
||||
"created": []string{"today"},
|
||||
"labels": []string{"key4"},
|
||||
}: "labels",
|
||||
&Args{
|
||||
"created": []string{"today"},
|
||||
"labels": []string{"key1=value3"},
|
||||
}: "labels",
|
||||
}
|
||||
for args, field := range matches {
|
||||
if args.MatchKVList(field, sources) != true {
|
||||
t.Fatalf("Expected true for %v on %v, got false", sources, args)
|
||||
}
|
||||
}
|
||||
for args, field := range differs {
|
||||
if args.MatchKVList(field, sources) != false {
|
||||
t.Fatalf("Expected false for %v on %v, got true", sources, args)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestArgsMatch(t *testing.T) {
|
||||
source := "today"
|
||||
matches := map[*Args]string{
|
||||
&Args{}: "field",
|
||||
&Args{
|
||||
"created": []string{"today"},
|
||||
"labels": []string{"key1"},
|
||||
}: "today",
|
||||
&Args{
|
||||
"created": []string{"to*"},
|
||||
}: "created",
|
||||
&Args{
|
||||
"created": []string{"to(.*)"},
|
||||
}: "created",
|
||||
&Args{
|
||||
"created": []string{"tod"},
|
||||
}: "created",
|
||||
&Args{
|
||||
"created": []string{"anything", "to*"},
|
||||
}: "created",
|
||||
}
|
||||
differs := map[*Args]string{
|
||||
&Args{
|
||||
"created": []string{"tomorrow"},
|
||||
}: "created",
|
||||
&Args{
|
||||
"created": []string{"to(day"},
|
||||
}: "created",
|
||||
&Args{
|
||||
"created": []string{"tom(.*)"},
|
||||
}: "created",
|
||||
&Args{
|
||||
"created": []string{"today1"},
|
||||
"labels": []string{"today"},
|
||||
}: "created",
|
||||
}
|
||||
for args, field := range matches {
|
||||
if args.Match(field, source) != true {
|
||||
t.Fatalf("Expected true for %v on %v, got false", source, args)
|
||||
}
|
||||
}
|
||||
for args, field := range differs {
|
||||
if args.Match(field, source) != false {
|
||||
t.Fatalf("Expected false for %v on %v, got true", source, args)
|
||||
}
|
||||
}
|
||||
}
|
95
Godeps/_workspace/src/github.com/docker/docker/pkg/parsers/kernel/kernel.go
generated
vendored
95
Godeps/_workspace/src/github.com/docker/docker/pkg/parsers/kernel/kernel.go
generated
vendored
|
@ -1,95 +0,0 @@
|
|||
// +build !windows
|
||||
|
||||
package kernel
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"errors"
|
||||
"fmt"
|
||||
)
|
||||
|
||||
type KernelVersionInfo struct {
|
||||
Kernel int
|
||||
Major int
|
||||
Minor int
|
||||
Flavor string
|
||||
}
|
||||
|
||||
func (k *KernelVersionInfo) String() string {
|
||||
return fmt.Sprintf("%d.%d.%d%s", k.Kernel, k.Major, k.Minor, k.Flavor)
|
||||
}
|
||||
|
||||
// Compare two KernelVersionInfo struct.
|
||||
// Returns -1 if a < b, 0 if a == b, 1 it a > b
|
||||
func CompareKernelVersion(a, b *KernelVersionInfo) int {
|
||||
if a.Kernel < b.Kernel {
|
||||
return -1
|
||||
} else if a.Kernel > b.Kernel {
|
||||
return 1
|
||||
}
|
||||
|
||||
if a.Major < b.Major {
|
||||
return -1
|
||||
} else if a.Major > b.Major {
|
||||
return 1
|
||||
}
|
||||
|
||||
if a.Minor < b.Minor {
|
||||
return -1
|
||||
} else if a.Minor > b.Minor {
|
||||
return 1
|
||||
}
|
||||
|
||||
return 0
|
||||
}
|
||||
|
||||
func GetKernelVersion() (*KernelVersionInfo, error) {
|
||||
var (
|
||||
err error
|
||||
)
|
||||
|
||||
uts, err := uname()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
release := make([]byte, len(uts.Release))
|
||||
|
||||
i := 0
|
||||
for _, c := range uts.Release {
|
||||
release[i] = byte(c)
|
||||
i++
|
||||
}
|
||||
|
||||
// Remove the \x00 from the release for Atoi to parse correctly
|
||||
release = release[:bytes.IndexByte(release, 0)]
|
||||
|
||||
return ParseRelease(string(release))
|
||||
}
|
||||
|
||||
func ParseRelease(release string) (*KernelVersionInfo, error) {
|
||||
var (
|
||||
kernel, major, minor, parsed int
|
||||
flavor, partial string
|
||||
)
|
||||
|
||||
// Ignore error from Sscanf to allow an empty flavor. Instead, just
|
||||
// make sure we got all the version numbers.
|
||||
parsed, _ = fmt.Sscanf(release, "%d.%d%s", &kernel, &major, &partial)
|
||||
if parsed < 2 {
|
||||
return nil, errors.New("Can't parse kernel version " + release)
|
||||
}
|
||||
|
||||
// sometimes we have 3.12.25-gentoo, but sometimes we just have 3.12-1-amd64
|
||||
parsed, _ = fmt.Sscanf(partial, ".%d%s", &minor, &flavor)
|
||||
if parsed < 1 {
|
||||
flavor = partial
|
||||
}
|
||||
|
||||
return &KernelVersionInfo{
|
||||
Kernel: kernel,
|
||||
Major: major,
|
||||
Minor: minor,
|
||||
Flavor: flavor,
|
||||
}, nil
|
||||
}
|
92
Godeps/_workspace/src/github.com/docker/docker/pkg/parsers/kernel/kernel_test.go
generated
vendored
92
Godeps/_workspace/src/github.com/docker/docker/pkg/parsers/kernel/kernel_test.go
generated
vendored
|
@ -1,92 +0,0 @@
|
|||
package kernel
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func assertParseRelease(t *testing.T, release string, b *KernelVersionInfo, result int) {
|
||||
var (
|
||||
a *KernelVersionInfo
|
||||
)
|
||||
a, _ = ParseRelease(release)
|
||||
|
||||
if r := CompareKernelVersion(a, b); r != result {
|
||||
t.Fatalf("Unexpected kernel version comparison result for (%v,%v). Found %d, expected %d", release, b, r, result)
|
||||
}
|
||||
if a.Flavor != b.Flavor {
|
||||
t.Fatalf("Unexpected parsed kernel flavor. Found %s, expected %s", a.Flavor, b.Flavor)
|
||||
}
|
||||
}
|
||||
|
||||
func TestParseRelease(t *testing.T) {
|
||||
assertParseRelease(t, "3.8.0", &KernelVersionInfo{Kernel: 3, Major: 8, Minor: 0}, 0)
|
||||
assertParseRelease(t, "3.4.54.longterm-1", &KernelVersionInfo{Kernel: 3, Major: 4, Minor: 54, Flavor: ".longterm-1"}, 0)
|
||||
assertParseRelease(t, "3.4.54.longterm-1", &KernelVersionInfo{Kernel: 3, Major: 4, Minor: 54, Flavor: ".longterm-1"}, 0)
|
||||
assertParseRelease(t, "3.8.0-19-generic", &KernelVersionInfo{Kernel: 3, Major: 8, Minor: 0, Flavor: "-19-generic"}, 0)
|
||||
assertParseRelease(t, "3.12.8tag", &KernelVersionInfo{Kernel: 3, Major: 12, Minor: 8, Flavor: "tag"}, 0)
|
||||
assertParseRelease(t, "3.12-1-amd64", &KernelVersionInfo{Kernel: 3, Major: 12, Minor: 0, Flavor: "-1-amd64"}, 0)
|
||||
assertParseRelease(t, "3.8.0", &KernelVersionInfo{Kernel: 4, Major: 8, Minor: 0}, -1)
|
||||
// Errors
|
||||
invalids := []string{
|
||||
"3",
|
||||
"a",
|
||||
"a.a",
|
||||
"a.a.a-a",
|
||||
}
|
||||
for _, invalid := range invalids {
|
||||
expectedMessage := fmt.Sprintf("Can't parse kernel version %v", invalid)
|
||||
if _, err := ParseRelease(invalid); err == nil || err.Error() != expectedMessage {
|
||||
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func assertKernelVersion(t *testing.T, a, b *KernelVersionInfo, result int) {
|
||||
if r := CompareKernelVersion(a, b); r != result {
|
||||
t.Fatalf("Unexpected kernel version comparison result. Found %d, expected %d", r, result)
|
||||
}
|
||||
}
|
||||
|
||||
func TestCompareKernelVersion(t *testing.T) {
|
||||
assertKernelVersion(t,
|
||||
&KernelVersionInfo{Kernel: 3, Major: 8, Minor: 0},
|
||||
&KernelVersionInfo{Kernel: 3, Major: 8, Minor: 0},
|
||||
0)
|
||||
assertKernelVersion(t,
|
||||
&KernelVersionInfo{Kernel: 2, Major: 6, Minor: 0},
|
||||
&KernelVersionInfo{Kernel: 3, Major: 8, Minor: 0},
|
||||
-1)
|
||||
assertKernelVersion(t,
|
||||
&KernelVersionInfo{Kernel: 3, Major: 8, Minor: 0},
|
||||
&KernelVersionInfo{Kernel: 2, Major: 6, Minor: 0},
|
||||
1)
|
||||
assertKernelVersion(t,
|
||||
&KernelVersionInfo{Kernel: 3, Major: 8, Minor: 0},
|
||||
&KernelVersionInfo{Kernel: 3, Major: 8, Minor: 0},
|
||||
0)
|
||||
assertKernelVersion(t,
|
||||
&KernelVersionInfo{Kernel: 3, Major: 8, Minor: 5},
|
||||
&KernelVersionInfo{Kernel: 3, Major: 8, Minor: 0},
|
||||
1)
|
||||
assertKernelVersion(t,
|
||||
&KernelVersionInfo{Kernel: 3, Major: 0, Minor: 20},
|
||||
&KernelVersionInfo{Kernel: 3, Major: 8, Minor: 0},
|
||||
-1)
|
||||
assertKernelVersion(t,
|
||||
&KernelVersionInfo{Kernel: 3, Major: 7, Minor: 20},
|
||||
&KernelVersionInfo{Kernel: 3, Major: 8, Minor: 0},
|
||||
-1)
|
||||
assertKernelVersion(t,
|
||||
&KernelVersionInfo{Kernel: 3, Major: 8, Minor: 20},
|
||||
&KernelVersionInfo{Kernel: 3, Major: 7, Minor: 0},
|
||||
1)
|
||||
assertKernelVersion(t,
|
||||
&KernelVersionInfo{Kernel: 3, Major: 8, Minor: 20},
|
||||
&KernelVersionInfo{Kernel: 3, Major: 8, Minor: 0},
|
||||
1)
|
||||
assertKernelVersion(t,
|
||||
&KernelVersionInfo{Kernel: 3, Major: 8, Minor: 0},
|
||||
&KernelVersionInfo{Kernel: 3, Major: 8, Minor: 20},
|
||||
-1)
|
||||
}
|
65
Godeps/_workspace/src/github.com/docker/docker/pkg/parsers/kernel/kernel_windows.go
generated
vendored
65
Godeps/_workspace/src/github.com/docker/docker/pkg/parsers/kernel/kernel_windows.go
generated
vendored
|
@ -1,65 +0,0 @@
|
|||
package kernel
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"syscall"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
type KernelVersionInfo struct {
|
||||
kvi string
|
||||
major int
|
||||
minor int
|
||||
build int
|
||||
}
|
||||
|
||||
func (k *KernelVersionInfo) String() string {
|
||||
return fmt.Sprintf("%d.%d %d (%s)", k.major, k.minor, k.build, k.kvi)
|
||||
}
|
||||
|
||||
func GetKernelVersion() (*KernelVersionInfo, error) {
|
||||
|
||||
var (
|
||||
h syscall.Handle
|
||||
dwVersion uint32
|
||||
err error
|
||||
)
|
||||
|
||||
KVI := &KernelVersionInfo{"Unknown", 0, 0, 0}
|
||||
|
||||
if err = syscall.RegOpenKeyEx(syscall.HKEY_LOCAL_MACHINE,
|
||||
syscall.StringToUTF16Ptr(`SOFTWARE\\Microsoft\\Windows NT\\CurrentVersion\\`),
|
||||
0,
|
||||
syscall.KEY_READ,
|
||||
&h); err != nil {
|
||||
return KVI, err
|
||||
}
|
||||
defer syscall.RegCloseKey(h)
|
||||
|
||||
var buf [1 << 10]uint16
|
||||
var typ uint32
|
||||
n := uint32(len(buf) * 2) // api expects array of bytes, not uint16
|
||||
|
||||
if err = syscall.RegQueryValueEx(h,
|
||||
syscall.StringToUTF16Ptr("BuildLabEx"),
|
||||
nil,
|
||||
&typ,
|
||||
(*byte)(unsafe.Pointer(&buf[0])),
|
||||
&n); err != nil {
|
||||
return KVI, err
|
||||
}
|
||||
|
||||
KVI.kvi = syscall.UTF16ToString(buf[:])
|
||||
|
||||
// Important - docker.exe MUST be manifested for this API to return
|
||||
// the correct information.
|
||||
if dwVersion, err = syscall.GetVersion(); err != nil {
|
||||
return KVI, err
|
||||
}
|
||||
|
||||
KVI.major = int(dwVersion & 0xFF)
|
||||
KVI.minor = int((dwVersion & 0XFF00) >> 8)
|
||||
KVI.build = int((dwVersion & 0xFFFF0000) >> 16)
|
||||
|
||||
return KVI, nil
|
||||
}
|
16
Godeps/_workspace/src/github.com/docker/docker/pkg/parsers/kernel/uname_linux.go
generated
vendored
16
Godeps/_workspace/src/github.com/docker/docker/pkg/parsers/kernel/uname_linux.go
generated
vendored
|
@ -1,16 +0,0 @@
|
|||
package kernel
|
||||
|
||||
import (
|
||||
"syscall"
|
||||
)
|
||||
|
||||
type Utsname syscall.Utsname
|
||||
|
||||
func uname() (*syscall.Utsname, error) {
|
||||
uts := &syscall.Utsname{}
|
||||
|
||||
if err := syscall.Uname(uts); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return uts, nil
|
||||
}
|
15
Godeps/_workspace/src/github.com/docker/docker/pkg/parsers/kernel/uname_unsupported.go
generated
vendored
15
Godeps/_workspace/src/github.com/docker/docker/pkg/parsers/kernel/uname_unsupported.go
generated
vendored
|
@ -1,15 +0,0 @@
|
|||
// +build !linux
|
||||
|
||||
package kernel
|
||||
|
||||
import (
|
||||
"errors"
|
||||
)
|
||||
|
||||
type Utsname struct {
|
||||
Release [65]byte
|
||||
}
|
||||
|
||||
func uname() (*Utsname, error) {
|
||||
return nil, errors.New("Kernel version detection is available only on linux")
|
||||
}
|
|
@ -1,40 +0,0 @@
|
|||
package operatingsystem
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"errors"
|
||||
"io/ioutil"
|
||||
)
|
||||
|
||||
var (
|
||||
// file to use to detect if the daemon is running in a container
|
||||
proc1Cgroup = "/proc/1/cgroup"
|
||||
|
||||
// file to check to determine Operating System
|
||||
etcOsRelease = "/etc/os-release"
|
||||
)
|
||||
|
||||
func GetOperatingSystem() (string, error) {
|
||||
b, err := ioutil.ReadFile(etcOsRelease)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
if i := bytes.Index(b, []byte("PRETTY_NAME")); i >= 0 {
|
||||
b = b[i+13:]
|
||||
return string(b[:bytes.IndexByte(b, '"')]), nil
|
||||
}
|
||||
return "", errors.New("PRETTY_NAME not found")
|
||||
}
|
||||
|
||||
func IsContainerized() (bool, error) {
|
||||
b, err := ioutil.ReadFile(proc1Cgroup)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
for _, line := range bytes.Split(b, []byte{'\n'}) {
|
||||
if len(line) > 0 && !bytes.HasSuffix(line, []byte{'/'}) {
|
||||
return true, nil
|
||||
}
|
||||
}
|
||||
return false, nil
|
||||
}
|
|
@ -1,124 +0,0 @@
|
|||
package operatingsystem
|
||||
|
||||
import (
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestGetOperatingSystem(t *testing.T) {
|
||||
var (
|
||||
backup = etcOsRelease
|
||||
ubuntuTrusty = []byte(`NAME="Ubuntu"
|
||||
VERSION="14.04, Trusty Tahr"
|
||||
ID=ubuntu
|
||||
ID_LIKE=debian
|
||||
PRETTY_NAME="Ubuntu 14.04 LTS"
|
||||
VERSION_ID="14.04"
|
||||
HOME_URL="http://www.ubuntu.com/"
|
||||
SUPPORT_URL="http://help.ubuntu.com/"
|
||||
BUG_REPORT_URL="http://bugs.launchpad.net/ubuntu/"`)
|
||||
gentoo = []byte(`NAME=Gentoo
|
||||
ID=gentoo
|
||||
PRETTY_NAME="Gentoo/Linux"
|
||||
ANSI_COLOR="1;32"
|
||||
HOME_URL="http://www.gentoo.org/"
|
||||
SUPPORT_URL="http://www.gentoo.org/main/en/support.xml"
|
||||
BUG_REPORT_URL="https://bugs.gentoo.org/"
|
||||
`)
|
||||
noPrettyName = []byte(`NAME="Ubuntu"
|
||||
VERSION="14.04, Trusty Tahr"
|
||||
ID=ubuntu
|
||||
ID_LIKE=debian
|
||||
VERSION_ID="14.04"
|
||||
HOME_URL="http://www.ubuntu.com/"
|
||||
SUPPORT_URL="http://help.ubuntu.com/"
|
||||
BUG_REPORT_URL="http://bugs.launchpad.net/ubuntu/"`)
|
||||
)
|
||||
|
||||
dir := os.TempDir()
|
||||
etcOsRelease = filepath.Join(dir, "etcOsRelease")
|
||||
|
||||
defer func() {
|
||||
os.Remove(etcOsRelease)
|
||||
etcOsRelease = backup
|
||||
}()
|
||||
|
||||
for expect, osRelease := range map[string][]byte{
|
||||
"Ubuntu 14.04 LTS": ubuntuTrusty,
|
||||
"Gentoo/Linux": gentoo,
|
||||
"": noPrettyName,
|
||||
} {
|
||||
if err := ioutil.WriteFile(etcOsRelease, osRelease, 0600); err != nil {
|
||||
t.Fatalf("failed to write to %s: %v", etcOsRelease, err)
|
||||
}
|
||||
s, err := GetOperatingSystem()
|
||||
if s != expect {
|
||||
if expect == "" {
|
||||
t.Fatalf("Expected error 'PRETTY_NAME not found', but got %v", err)
|
||||
} else {
|
||||
t.Fatalf("Expected '%s', but got '%s'. Err=%v", expect, s, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestIsContainerized(t *testing.T) {
|
||||
var (
|
||||
backup = proc1Cgroup
|
||||
nonContainerizedProc1Cgroup = []byte(`14:name=systemd:/
|
||||
13:hugetlb:/
|
||||
12:net_prio:/
|
||||
11:perf_event:/
|
||||
10:bfqio:/
|
||||
9:blkio:/
|
||||
8:net_cls:/
|
||||
7:freezer:/
|
||||
6:devices:/
|
||||
5:memory:/
|
||||
4:cpuacct:/
|
||||
3:cpu:/
|
||||
2:cpuset:/
|
||||
`)
|
||||
containerizedProc1Cgroup = []byte(`9:perf_event:/docker/3cef1b53c50b0fa357d994f8a1a8cd783c76bbf4f5dd08b226e38a8bd331338d
|
||||
8:blkio:/docker/3cef1b53c50b0fa357d994f8a1a8cd783c76bbf4f5dd08b226e38a8bd331338d
|
||||
7:net_cls:/
|
||||
6:freezer:/docker/3cef1b53c50b0fa357d994f8a1a8cd783c76bbf4f5dd08b226e38a8bd331338d
|
||||
5:devices:/docker/3cef1b53c50b0fa357d994f8a1a8cd783c76bbf4f5dd08b226e38a8bd331338d
|
||||
4:memory:/docker/3cef1b53c50b0fa357d994f8a1a8cd783c76bbf4f5dd08b226e38a8bd331338d
|
||||
3:cpuacct:/docker/3cef1b53c50b0fa357d994f8a1a8cd783c76bbf4f5dd08b226e38a8bd331338d
|
||||
2:cpu:/docker/3cef1b53c50b0fa357d994f8a1a8cd783c76bbf4f5dd08b226e38a8bd331338d
|
||||
1:cpuset:/`)
|
||||
)
|
||||
|
||||
dir := os.TempDir()
|
||||
proc1Cgroup = filepath.Join(dir, "proc1Cgroup")
|
||||
|
||||
defer func() {
|
||||
os.Remove(proc1Cgroup)
|
||||
proc1Cgroup = backup
|
||||
}()
|
||||
|
||||
if err := ioutil.WriteFile(proc1Cgroup, nonContainerizedProc1Cgroup, 0600); err != nil {
|
||||
t.Fatalf("failed to write to %s: %v", proc1Cgroup, err)
|
||||
}
|
||||
inContainer, err := IsContainerized()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if inContainer {
|
||||
t.Fatal("Wrongly assuming containerized")
|
||||
}
|
||||
|
||||
if err := ioutil.WriteFile(proc1Cgroup, containerizedProc1Cgroup, 0600); err != nil {
|
||||
t.Fatalf("failed to write to %s: %v", proc1Cgroup, err)
|
||||
}
|
||||
inContainer, err = IsContainerized()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if !inContainer {
|
||||
t.Fatal("Wrongly assuming non-containerized")
|
||||
}
|
||||
}
|
|
@ -1,47 +0,0 @@
|
|||
package operatingsystem
|
||||
|
||||
import (
|
||||
"syscall"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
// See https://code.google.com/p/go/source/browse/src/pkg/mime/type_windows.go?r=d14520ac25bf6940785aabb71f5be453a286f58c
|
||||
// for a similar sample
|
||||
|
||||
func GetOperatingSystem() (string, error) {
|
||||
|
||||
var h syscall.Handle
|
||||
|
||||
// Default return value
|
||||
ret := "Unknown Operating System"
|
||||
|
||||
if err := syscall.RegOpenKeyEx(syscall.HKEY_LOCAL_MACHINE,
|
||||
syscall.StringToUTF16Ptr(`SOFTWARE\\Microsoft\\Windows NT\\CurrentVersion\\`),
|
||||
0,
|
||||
syscall.KEY_READ,
|
||||
&h); err != nil {
|
||||
return ret, err
|
||||
}
|
||||
defer syscall.RegCloseKey(h)
|
||||
|
||||
var buf [1 << 10]uint16
|
||||
var typ uint32
|
||||
n := uint32(len(buf) * 2) // api expects array of bytes, not uint16
|
||||
|
||||
if err := syscall.RegQueryValueEx(h,
|
||||
syscall.StringToUTF16Ptr("ProductName"),
|
||||
nil,
|
||||
&typ,
|
||||
(*byte)(unsafe.Pointer(&buf[0])),
|
||||
&n); err != nil {
|
||||
return ret, err
|
||||
}
|
||||
ret = syscall.UTF16ToString(buf[:])
|
||||
|
||||
return ret, nil
|
||||
}
|
||||
|
||||
// No-op on Windows
|
||||
func IsContainerized() (bool, error) {
|
||||
return false, nil
|
||||
}
|
170
Godeps/_workspace/src/github.com/docker/docker/pkg/parsers/parsers.go
generated
vendored
170
Godeps/_workspace/src/github.com/docker/docker/pkg/parsers/parsers.go
generated
vendored
|
@ -1,170 +0,0 @@
|
|||
package parsers
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net/url"
|
||||
"path"
|
||||
"runtime"
|
||||
"strconv"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// FIXME: Change this not to receive default value as parameter
|
||||
func ParseHost(defaultTCPAddr, defaultUnixAddr, addr string) (string, error) {
|
||||
addr = strings.TrimSpace(addr)
|
||||
if addr == "" {
|
||||
if runtime.GOOS != "windows" {
|
||||
addr = fmt.Sprintf("unix://%s", defaultUnixAddr)
|
||||
} else {
|
||||
// Note - defaultTCPAddr already includes tcp:// prefix
|
||||
addr = fmt.Sprintf("%s", defaultTCPAddr)
|
||||
}
|
||||
}
|
||||
addrParts := strings.Split(addr, "://")
|
||||
if len(addrParts) == 1 {
|
||||
addrParts = []string{"tcp", addrParts[0]}
|
||||
}
|
||||
|
||||
switch addrParts[0] {
|
||||
case "tcp":
|
||||
return ParseTCPAddr(addrParts[1], defaultTCPAddr)
|
||||
case "unix":
|
||||
return ParseUnixAddr(addrParts[1], defaultUnixAddr)
|
||||
case "fd":
|
||||
return addr, nil
|
||||
default:
|
||||
return "", fmt.Errorf("Invalid bind address format: %s", addr)
|
||||
}
|
||||
}
|
||||
|
||||
func ParseUnixAddr(addr string, defaultAddr string) (string, error) {
|
||||
addr = strings.TrimPrefix(addr, "unix://")
|
||||
if strings.Contains(addr, "://") {
|
||||
return "", fmt.Errorf("Invalid proto, expected unix: %s", addr)
|
||||
}
|
||||
if addr == "" {
|
||||
addr = defaultAddr
|
||||
}
|
||||
return fmt.Sprintf("unix://%s", addr), nil
|
||||
}
|
||||
|
||||
func ParseTCPAddr(addr string, defaultAddr string) (string, error) {
|
||||
addr = strings.TrimPrefix(addr, "tcp://")
|
||||
if strings.Contains(addr, "://") || addr == "" {
|
||||
return "", fmt.Errorf("Invalid proto, expected tcp: %s", addr)
|
||||
}
|
||||
|
||||
u, err := url.Parse("tcp://" + addr)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
hostParts := strings.Split(u.Host, ":")
|
||||
if len(hostParts) != 2 {
|
||||
return "", fmt.Errorf("Invalid bind address format: %s", addr)
|
||||
}
|
||||
host := hostParts[0]
|
||||
if host == "" {
|
||||
host = defaultAddr
|
||||
}
|
||||
|
||||
p, err := strconv.Atoi(hostParts[1])
|
||||
if err != nil && p == 0 {
|
||||
return "", fmt.Errorf("Invalid bind address format: %s", addr)
|
||||
}
|
||||
return fmt.Sprintf("tcp://%s:%d%s", host, p, u.Path), nil
|
||||
}
|
||||
|
||||
// Get a repos name and returns the right reposName + tag|digest
|
||||
// The tag can be confusing because of a port in a repository name.
|
||||
// Ex: localhost.localdomain:5000/samalba/hipache:latest
|
||||
// Digest ex: localhost:5000/foo/bar@sha256:bc8813ea7b3603864987522f02a76101c17ad122e1c46d790efc0fca78ca7bfb
|
||||
func ParseRepositoryTag(repos string) (string, string) {
|
||||
n := strings.Index(repos, "@")
|
||||
if n >= 0 {
|
||||
parts := strings.Split(repos, "@")
|
||||
return parts[0], parts[1]
|
||||
}
|
||||
n = strings.LastIndex(repos, ":")
|
||||
if n < 0 {
|
||||
return repos, ""
|
||||
}
|
||||
if tag := repos[n+1:]; !strings.Contains(tag, "/") {
|
||||
return repos[:n], tag
|
||||
}
|
||||
return repos, ""
|
||||
}
|
||||
|
||||
func PartParser(template, data string) (map[string]string, error) {
|
||||
// ip:public:private
|
||||
var (
|
||||
templateParts = strings.Split(template, ":")
|
||||
parts = strings.Split(data, ":")
|
||||
out = make(map[string]string, len(templateParts))
|
||||
)
|
||||
if len(parts) != len(templateParts) {
|
||||
return nil, fmt.Errorf("Invalid format to parse. %s should match template %s", data, template)
|
||||
}
|
||||
|
||||
for i, t := range templateParts {
|
||||
value := ""
|
||||
if len(parts) > i {
|
||||
value = parts[i]
|
||||
}
|
||||
out[t] = value
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
|
||||
func ParseKeyValueOpt(opt string) (string, string, error) {
|
||||
parts := strings.SplitN(opt, "=", 2)
|
||||
if len(parts) != 2 {
|
||||
return "", "", fmt.Errorf("Unable to parse key/value option: %s", opt)
|
||||
}
|
||||
return strings.TrimSpace(parts[0]), strings.TrimSpace(parts[1]), nil
|
||||
}
|
||||
|
||||
func ParsePortRange(ports string) (uint64, uint64, error) {
|
||||
if ports == "" {
|
||||
return 0, 0, fmt.Errorf("Empty string specified for ports.")
|
||||
}
|
||||
if !strings.Contains(ports, "-") {
|
||||
start, err := strconv.ParseUint(ports, 10, 16)
|
||||
end := start
|
||||
return start, end, err
|
||||
}
|
||||
|
||||
parts := strings.Split(ports, "-")
|
||||
start, err := strconv.ParseUint(parts[0], 10, 16)
|
||||
if err != nil {
|
||||
return 0, 0, err
|
||||
}
|
||||
end, err := strconv.ParseUint(parts[1], 10, 16)
|
||||
if err != nil {
|
||||
return 0, 0, err
|
||||
}
|
||||
if end < start {
|
||||
return 0, 0, fmt.Errorf("Invalid range specified for the Port: %s", ports)
|
||||
}
|
||||
return start, end, nil
|
||||
}
|
||||
|
||||
func ParseLink(val string) (string, string, error) {
|
||||
if val == "" {
|
||||
return "", "", fmt.Errorf("empty string specified for links")
|
||||
}
|
||||
arr := strings.Split(val, ":")
|
||||
if len(arr) > 2 {
|
||||
return "", "", fmt.Errorf("bad format for links: %s", val)
|
||||
}
|
||||
if len(arr) == 1 {
|
||||
return val, val, nil
|
||||
}
|
||||
// This is kept because we can actually get an HostConfig with links
|
||||
// from an already created container and the format is not `foo:bar`
|
||||
// but `/foo:/c1/bar`
|
||||
if strings.HasPrefix(arr[0], "/") {
|
||||
_, alias := path.Split(arr[1])
|
||||
return arr[0][1:], alias, nil
|
||||
}
|
||||
return arr[0], arr[1], nil
|
||||
}
|
210
Godeps/_workspace/src/github.com/docker/docker/pkg/parsers/parsers_test.go
generated
vendored
210
Godeps/_workspace/src/github.com/docker/docker/pkg/parsers/parsers_test.go
generated
vendored
|
@ -1,210 +0,0 @@
|
|||
package parsers
|
||||
|
||||
import (
|
||||
"strings"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestParseHost(t *testing.T) {
|
||||
var (
|
||||
defaultHttpHost = "127.0.0.1"
|
||||
defaultUnix = "/var/run/docker.sock"
|
||||
)
|
||||
invalids := map[string]string{
|
||||
"0.0.0.0": "Invalid bind address format: 0.0.0.0",
|
||||
"tcp://": "Invalid proto, expected tcp: ",
|
||||
"tcp:a.b.c.d": "Invalid bind address format: tcp:a.b.c.d",
|
||||
"tcp:a.b.c.d/path": "Invalid bind address format: tcp:a.b.c.d/path",
|
||||
"udp://127.0.0.1": "Invalid bind address format: udp://127.0.0.1",
|
||||
"udp://127.0.0.1:2375": "Invalid bind address format: udp://127.0.0.1:2375",
|
||||
}
|
||||
valids := map[string]string{
|
||||
"0.0.0.1:5555": "tcp://0.0.0.1:5555",
|
||||
"0.0.0.1:5555/path": "tcp://0.0.0.1:5555/path",
|
||||
":6666": "tcp://127.0.0.1:6666",
|
||||
":6666/path": "tcp://127.0.0.1:6666/path",
|
||||
"tcp://:7777": "tcp://127.0.0.1:7777",
|
||||
"tcp://:7777/path": "tcp://127.0.0.1:7777/path",
|
||||
"": "unix:///var/run/docker.sock",
|
||||
"unix:///run/docker.sock": "unix:///run/docker.sock",
|
||||
"unix://": "unix:///var/run/docker.sock",
|
||||
"fd://": "fd://",
|
||||
"fd://something": "fd://something",
|
||||
}
|
||||
for invalidAddr, expectedError := range invalids {
|
||||
if addr, err := ParseHost(defaultHttpHost, defaultUnix, invalidAddr); err == nil || err.Error() != expectedError {
|
||||
t.Errorf("tcp %v address expected error %v return, got %s and addr %v", invalidAddr, expectedError, err, addr)
|
||||
}
|
||||
}
|
||||
for validAddr, expectedAddr := range valids {
|
||||
if addr, err := ParseHost(defaultHttpHost, defaultUnix, validAddr); err != nil || addr != expectedAddr {
|
||||
t.Errorf("%v -> expected %v, got %v", validAddr, expectedAddr, addr)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestParseInvalidUnixAddrInvalid(t *testing.T) {
|
||||
if _, err := ParseUnixAddr("unix://tcp://127.0.0.1", "unix:///var/run/docker.sock"); err == nil || err.Error() != "Invalid proto, expected unix: tcp://127.0.0.1" {
|
||||
t.Fatalf("Expected an error, got %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestParseRepositoryTag(t *testing.T) {
|
||||
if repo, tag := ParseRepositoryTag("root"); repo != "root" || tag != "" {
|
||||
t.Errorf("Expected repo: '%s' and tag: '%s', got '%s' and '%s'", "root", "", repo, tag)
|
||||
}
|
||||
if repo, tag := ParseRepositoryTag("root:tag"); repo != "root" || tag != "tag" {
|
||||
t.Errorf("Expected repo: '%s' and tag: '%s', got '%s' and '%s'", "root", "tag", repo, tag)
|
||||
}
|
||||
if repo, digest := ParseRepositoryTag("root@sha256:e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855"); repo != "root" || digest != "sha256:e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855" {
|
||||
t.Errorf("Expected repo: '%s' and digest: '%s', got '%s' and '%s'", "root", "sha256:e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", repo, digest)
|
||||
}
|
||||
if repo, tag := ParseRepositoryTag("user/repo"); repo != "user/repo" || tag != "" {
|
||||
t.Errorf("Expected repo: '%s' and tag: '%s', got '%s' and '%s'", "user/repo", "", repo, tag)
|
||||
}
|
||||
if repo, tag := ParseRepositoryTag("user/repo:tag"); repo != "user/repo" || tag != "tag" {
|
||||
t.Errorf("Expected repo: '%s' and tag: '%s', got '%s' and '%s'", "user/repo", "tag", repo, tag)
|
||||
}
|
||||
if repo, digest := ParseRepositoryTag("user/repo@sha256:e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855"); repo != "user/repo" || digest != "sha256:e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855" {
|
||||
t.Errorf("Expected repo: '%s' and digest: '%s', got '%s' and '%s'", "user/repo", "sha256:e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", repo, digest)
|
||||
}
|
||||
if repo, tag := ParseRepositoryTag("url:5000/repo"); repo != "url:5000/repo" || tag != "" {
|
||||
t.Errorf("Expected repo: '%s' and tag: '%s', got '%s' and '%s'", "url:5000/repo", "", repo, tag)
|
||||
}
|
||||
if repo, tag := ParseRepositoryTag("url:5000/repo:tag"); repo != "url:5000/repo" || tag != "tag" {
|
||||
t.Errorf("Expected repo: '%s' and tag: '%s', got '%s' and '%s'", "url:5000/repo", "tag", repo, tag)
|
||||
}
|
||||
if repo, digest := ParseRepositoryTag("url:5000/repo@sha256:e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855"); repo != "url:5000/repo" || digest != "sha256:e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855" {
|
||||
t.Errorf("Expected repo: '%s' and digest: '%s', got '%s' and '%s'", "url:5000/repo", "sha256:e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", repo, digest)
|
||||
}
|
||||
}
|
||||
|
||||
func TestParsePortMapping(t *testing.T) {
|
||||
if _, err := PartParser("ip:public:private", "192.168.1.1:80"); err == nil {
|
||||
t.Fatalf("Expected an error, got %v", err)
|
||||
}
|
||||
data, err := PartParser("ip:public:private", "192.168.1.1:80:8080")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if len(data) != 3 {
|
||||
t.FailNow()
|
||||
}
|
||||
if data["ip"] != "192.168.1.1" {
|
||||
t.Fail()
|
||||
}
|
||||
if data["public"] != "80" {
|
||||
t.Fail()
|
||||
}
|
||||
if data["private"] != "8080" {
|
||||
t.Fail()
|
||||
}
|
||||
}
|
||||
|
||||
func TestParseKeyValueOpt(t *testing.T) {
|
||||
invalids := map[string]string{
|
||||
"": "Unable to parse key/value option: ",
|
||||
"key": "Unable to parse key/value option: key",
|
||||
}
|
||||
for invalid, expectedError := range invalids {
|
||||
if _, _, err := ParseKeyValueOpt(invalid); err == nil || err.Error() != expectedError {
|
||||
t.Fatalf("Expected error %v for %v, got %v", expectedError, invalid, err)
|
||||
}
|
||||
}
|
||||
valids := map[string][]string{
|
||||
"key=value": {"key", "value"},
|
||||
" key = value ": {"key", "value"},
|
||||
"key=value1=value2": {"key", "value1=value2"},
|
||||
" key = value1 = value2 ": {"key", "value1 = value2"},
|
||||
}
|
||||
for valid, expectedKeyValue := range valids {
|
||||
key, value, err := ParseKeyValueOpt(valid)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if key != expectedKeyValue[0] || value != expectedKeyValue[1] {
|
||||
t.Fatalf("Expected {%v: %v} got {%v: %v}", expectedKeyValue[0], expectedKeyValue[1], key, value)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestParsePortRange(t *testing.T) {
|
||||
if start, end, err := ParsePortRange("8000-8080"); err != nil || start != 8000 || end != 8080 {
|
||||
t.Fatalf("Error: %s or Expecting {start,end} values {8000,8080} but found {%d,%d}.", err, start, end)
|
||||
}
|
||||
}
|
||||
|
||||
func TestParsePortRangeEmpty(t *testing.T) {
|
||||
if _, _, err := ParsePortRange(""); err == nil || err.Error() != "Empty string specified for ports." {
|
||||
t.Fatalf("Expected error 'Empty string specified for ports.', got %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestParsePortRangeWithNoRange(t *testing.T) {
|
||||
start, end, err := ParsePortRange("8080")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if start != 8080 || end != 8080 {
|
||||
t.Fatalf("Expected start and end to be the same and equal to 8080, but were %v and %v", start, end)
|
||||
}
|
||||
}
|
||||
|
||||
func TestParsePortRangeIncorrectRange(t *testing.T) {
|
||||
if _, _, err := ParsePortRange("9000-8080"); err == nil || !strings.Contains(err.Error(), "Invalid range specified for the Port") {
|
||||
t.Fatalf("Expecting error 'Invalid range specified for the Port' but received %s.", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestParsePortRangeIncorrectEndRange(t *testing.T) {
|
||||
if _, _, err := ParsePortRange("8000-a"); err == nil || !strings.Contains(err.Error(), "invalid syntax") {
|
||||
t.Fatalf("Expecting error 'Invalid range specified for the Port' but received %s.", err)
|
||||
}
|
||||
|
||||
if _, _, err := ParsePortRange("8000-30a"); err == nil || !strings.Contains(err.Error(), "invalid syntax") {
|
||||
t.Fatalf("Expecting error 'Invalid range specified for the Port' but received %s.", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestParsePortRangeIncorrectStartRange(t *testing.T) {
|
||||
if _, _, err := ParsePortRange("a-8000"); err == nil || !strings.Contains(err.Error(), "invalid syntax") {
|
||||
t.Fatalf("Expecting error 'Invalid range specified for the Port' but received %s.", err)
|
||||
}
|
||||
|
||||
if _, _, err := ParsePortRange("30a-8000"); err == nil || !strings.Contains(err.Error(), "invalid syntax") {
|
||||
t.Fatalf("Expecting error 'Invalid range specified for the Port' but received %s.", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestParseLink(t *testing.T) {
|
||||
name, alias, err := ParseLink("name:alias")
|
||||
if err != nil {
|
||||
t.Fatalf("Expected not to error out on a valid name:alias format but got: %v", err)
|
||||
}
|
||||
if name != "name" {
|
||||
t.Fatalf("Link name should have been name, got %s instead", name)
|
||||
}
|
||||
if alias != "alias" {
|
||||
t.Fatalf("Link alias should have been alias, got %s instead", alias)
|
||||
}
|
||||
// short format definition
|
||||
name, alias, err = ParseLink("name")
|
||||
if err != nil {
|
||||
t.Fatalf("Expected not to error out on a valid name only format but got: %v", err)
|
||||
}
|
||||
if name != "name" {
|
||||
t.Fatalf("Link name should have been name, got %s instead", name)
|
||||
}
|
||||
if alias != "name" {
|
||||
t.Fatalf("Link alias should have been name, got %s instead", alias)
|
||||
}
|
||||
// empty string link definition is not allowed
|
||||
if _, _, err := ParseLink(""); err == nil || !strings.Contains(err.Error(), "empty string specified for links") {
|
||||
t.Fatalf("Expected error 'empty string specified for links' but got: %v", err)
|
||||
}
|
||||
// more than two colons are not allowed
|
||||
if _, _, err := ParseLink("link:alias:wrong"); err == nil || !strings.Contains(err.Error(), "bad format for links: link:alias:wrong") {
|
||||
t.Fatalf("Expected error 'bad format for links: link:alias:wrong' but got: %v", err)
|
||||
}
|
||||
}
|
22
Godeps/_workspace/src/github.com/docker/engine-api/types/auth.go
generated
vendored
Normal file
22
Godeps/_workspace/src/github.com/docker/engine-api/types/auth.go
generated
vendored
Normal file
|
@ -0,0 +1,22 @@
|
|||
package types
|
||||
|
||||
// AuthConfig contains authorization information for connecting to a Registry
|
||||
type AuthConfig struct {
|
||||
Username string `json:"username,omitempty"`
|
||||
Password string `json:"password,omitempty"`
|
||||
Auth string `json:"auth,omitempty"`
|
||||
|
||||
// Email is an optional value associated with the username.
|
||||
// This field is deprecated and will be removed in a later
|
||||
// version of docker.
|
||||
Email string `json:"email,omitempty"`
|
||||
|
||||
ServerAddress string `json:"serveraddress,omitempty"`
|
||||
|
||||
// IdentityToken is used to authenticate the user and get
|
||||
// an access token for the registry.
|
||||
IdentityToken string `json:"identitytoken,omitempty"`
|
||||
|
||||
// RegistryToken is a bearer token to be sent to a registry
|
||||
RegistryToken string `json:"registrytoken,omitempty"`
|
||||
}
|
23
Godeps/_workspace/src/github.com/docker/engine-api/types/blkiodev/blkio.go
generated
vendored
Normal file
23
Godeps/_workspace/src/github.com/docker/engine-api/types/blkiodev/blkio.go
generated
vendored
Normal file
|
@ -0,0 +1,23 @@
|
|||
package blkiodev
|
||||
|
||||
import "fmt"
|
||||
|
||||
// WeightDevice is a structure that holds device:weight pair
|
||||
type WeightDevice struct {
|
||||
Path string
|
||||
Weight uint16
|
||||
}
|
||||
|
||||
func (w *WeightDevice) String() string {
|
||||
return fmt.Sprintf("%s:%d", w.Path, w.Weight)
|
||||
}
|
||||
|
||||
// ThrottleDevice is a structure that holds device:rate_per_second pair
|
||||
type ThrottleDevice struct {
|
||||
Path string
|
||||
Rate uint64
|
||||
}
|
||||
|
||||
func (t *ThrottleDevice) String() string {
|
||||
return fmt.Sprintf("%s:%d", t.Path, t.Rate)
|
||||
}
|
231
Godeps/_workspace/src/github.com/docker/engine-api/types/client.go
generated
vendored
Normal file
231
Godeps/_workspace/src/github.com/docker/engine-api/types/client.go
generated
vendored
Normal file
|
@ -0,0 +1,231 @@
|
|||
package types
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"io"
|
||||
"net"
|
||||
|
||||
"github.com/docker/engine-api/types/container"
|
||||
"github.com/docker/engine-api/types/filters"
|
||||
"github.com/docker/go-units"
|
||||
)
|
||||
|
||||
// ContainerAttachOptions holds parameters to attach to a container.
|
||||
type ContainerAttachOptions struct {
|
||||
Stream bool
|
||||
Stdin bool
|
||||
Stdout bool
|
||||
Stderr bool
|
||||
DetachKeys string
|
||||
}
|
||||
|
||||
// ContainerCommitOptions holds parameters to commit changes into a container.
|
||||
type ContainerCommitOptions struct {
|
||||
Reference string
|
||||
Comment string
|
||||
Author string
|
||||
Changes []string
|
||||
Pause bool
|
||||
Config *container.Config
|
||||
}
|
||||
|
||||
// ContainerExecInspect holds information returned by exec inspect.
|
||||
type ContainerExecInspect struct {
|
||||
ExecID string
|
||||
ContainerID string
|
||||
Running bool
|
||||
ExitCode int
|
||||
}
|
||||
|
||||
// ContainerListOptions holds parameters to list containers with.
|
||||
type ContainerListOptions struct {
|
||||
Quiet bool
|
||||
Size bool
|
||||
All bool
|
||||
Latest bool
|
||||
Since string
|
||||
Before string
|
||||
Limit int
|
||||
Filter filters.Args
|
||||
}
|
||||
|
||||
// ContainerLogsOptions holds parameters to filter logs with.
|
||||
type ContainerLogsOptions struct {
|
||||
ShowStdout bool
|
||||
ShowStderr bool
|
||||
Since string
|
||||
Timestamps bool
|
||||
Follow bool
|
||||
Tail string
|
||||
}
|
||||
|
||||
// ContainerRemoveOptions holds parameters to remove containers.
|
||||
type ContainerRemoveOptions struct {
|
||||
RemoveVolumes bool
|
||||
RemoveLinks bool
|
||||
Force bool
|
||||
}
|
||||
|
||||
// CopyToContainerOptions holds information
|
||||
// about files to copy into a container
|
||||
type CopyToContainerOptions struct {
|
||||
AllowOverwriteDirWithFile bool
|
||||
}
|
||||
|
||||
// EventsOptions hold parameters to filter events with.
|
||||
type EventsOptions struct {
|
||||
Since string
|
||||
Until string
|
||||
Filters filters.Args
|
||||
}
|
||||
|
||||
// NetworkListOptions holds parameters to filter the list of networks with.
|
||||
type NetworkListOptions struct {
|
||||
Filters filters.Args
|
||||
}
|
||||
|
||||
// HijackedResponse holds connection information for a hijacked request.
|
||||
type HijackedResponse struct {
|
||||
Conn net.Conn
|
||||
Reader *bufio.Reader
|
||||
}
|
||||
|
||||
// Close closes the hijacked connection and reader.
|
||||
func (h *HijackedResponse) Close() {
|
||||
h.Conn.Close()
|
||||
}
|
||||
|
||||
// CloseWriter is an interface that implements structs
|
||||
// that close input streams to prevent from writing.
|
||||
type CloseWriter interface {
|
||||
CloseWrite() error
|
||||
}
|
||||
|
||||
// CloseWrite closes a readWriter for writing.
|
||||
func (h *HijackedResponse) CloseWrite() error {
|
||||
if conn, ok := h.Conn.(CloseWriter); ok {
|
||||
return conn.CloseWrite()
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// ImageBuildOptions holds the information
|
||||
// necessary to build images.
|
||||
type ImageBuildOptions struct {
|
||||
Tags []string
|
||||
SuppressOutput bool
|
||||
RemoteContext string
|
||||
NoCache bool
|
||||
Remove bool
|
||||
ForceRemove bool
|
||||
PullParent bool
|
||||
Isolation container.Isolation
|
||||
CPUSetCPUs string
|
||||
CPUSetMems string
|
||||
CPUShares int64
|
||||
CPUQuota int64
|
||||
CPUPeriod int64
|
||||
Memory int64
|
||||
MemorySwap int64
|
||||
CgroupParent string
|
||||
ShmSize int64
|
||||
Dockerfile string
|
||||
Ulimits []*units.Ulimit
|
||||
BuildArgs map[string]string
|
||||
AuthConfigs map[string]AuthConfig
|
||||
Context io.Reader
|
||||
Labels map[string]string
|
||||
}
|
||||
|
||||
// ImageBuildResponse holds information
|
||||
// returned by a server after building
|
||||
// an image.
|
||||
type ImageBuildResponse struct {
|
||||
Body io.ReadCloser
|
||||
OSType string
|
||||
}
|
||||
|
||||
// ImageCreateOptions holds information to create images.
|
||||
type ImageCreateOptions struct {
|
||||
RegistryAuth string // RegistryAuth is the base64 encoded credentials for the registry
|
||||
}
|
||||
|
||||
// ImageImportSource holds source information for ImageImport
|
||||
type ImageImportSource struct {
|
||||
Source io.Reader // Source is the data to send to the server to create this image from (mutually exclusive with SourceName)
|
||||
SourceName string // SourceName is the name of the image to pull (mutually exclusive with Source)
|
||||
}
|
||||
|
||||
// ImageImportOptions holds information to import images from the client host.
|
||||
type ImageImportOptions struct {
|
||||
Tag string // Tag is the name to tag this image with. This attribute is deprecated.
|
||||
Message string // Message is the message to tag the image with
|
||||
Changes []string // Changes are the raw changes to apply to this image
|
||||
}
|
||||
|
||||
// ImageListOptions holds parameters to filter the list of images with.
|
||||
type ImageListOptions struct {
|
||||
MatchName string
|
||||
All bool
|
||||
Filters filters.Args
|
||||
}
|
||||
|
||||
// ImageLoadResponse returns information to the client about a load process.
|
||||
type ImageLoadResponse struct {
|
||||
Body io.ReadCloser
|
||||
JSON bool
|
||||
}
|
||||
|
||||
// ImagePullOptions holds information to pull images.
|
||||
type ImagePullOptions struct {
|
||||
RegistryAuth string // RegistryAuth is the base64 encoded credentials for the registry
|
||||
PrivilegeFunc RequestPrivilegeFunc
|
||||
}
|
||||
|
||||
// RequestPrivilegeFunc is a function interface that
|
||||
// clients can supply to retry operations after
|
||||
// getting an authorization error.
|
||||
// This function returns the registry authentication
|
||||
// header value in base 64 format, or an error
|
||||
// if the privilege request fails.
|
||||
type RequestPrivilegeFunc func() (string, error)
|
||||
|
||||
//ImagePushOptions holds information to push images.
|
||||
type ImagePushOptions ImagePullOptions
|
||||
|
||||
// ImageRemoveOptions holds parameters to remove images.
|
||||
type ImageRemoveOptions struct {
|
||||
Force bool
|
||||
PruneChildren bool
|
||||
}
|
||||
|
||||
// ImageSearchOptions holds parameters to search images with.
|
||||
type ImageSearchOptions struct {
|
||||
RegistryAuth string
|
||||
PrivilegeFunc RequestPrivilegeFunc
|
||||
}
|
||||
|
||||
// ImageTagOptions holds parameters to tag an image
|
||||
type ImageTagOptions struct {
|
||||
Force bool
|
||||
}
|
||||
|
||||
// ResizeOptions holds parameters to resize a tty.
|
||||
// It can be used to resize container ttys and
|
||||
// exec process ttys too.
|
||||
type ResizeOptions struct {
|
||||
Height int
|
||||
Width int
|
||||
}
|
||||
|
||||
// VersionResponse holds version information for the client and the server
|
||||
type VersionResponse struct {
|
||||
Client *Version
|
||||
Server *Version
|
||||
}
|
||||
|
||||
// ServerOK returns true when the client could connect to the docker server
|
||||
// and parse the information received. It returns false otherwise.
|
||||
func (v VersionResponse) ServerOK() bool {
|
||||
return v.Server != nil
|
||||
}
|
53
Godeps/_workspace/src/github.com/docker/engine-api/types/configs.go
generated
vendored
Normal file
53
Godeps/_workspace/src/github.com/docker/engine-api/types/configs.go
generated
vendored
Normal file
|
@ -0,0 +1,53 @@
|
|||
package types
|
||||
|
||||
import (
|
||||
"github.com/docker/engine-api/types/container"
|
||||
"github.com/docker/engine-api/types/network"
|
||||
)
|
||||
|
||||
// configs holds structs used for internal communication between the
|
||||
// frontend (such as an http server) and the backend (such as the
|
||||
// docker daemon).
|
||||
|
||||
// ContainerCreateConfig is the parameter set to ContainerCreate()
|
||||
type ContainerCreateConfig struct {
|
||||
Name string
|
||||
Config *container.Config
|
||||
HostConfig *container.HostConfig
|
||||
NetworkingConfig *network.NetworkingConfig
|
||||
AdjustCPUShares bool
|
||||
}
|
||||
|
||||
// ContainerRmConfig holds arguments for the container remove
|
||||
// operation. This struct is used to tell the backend what operations
|
||||
// to perform.
|
||||
type ContainerRmConfig struct {
|
||||
ForceRemove, RemoveVolume, RemoveLink bool
|
||||
}
|
||||
|
||||
// ContainerCommitConfig contains build configs for commit operation,
|
||||
// and is used when making a commit with the current state of the container.
|
||||
type ContainerCommitConfig struct {
|
||||
Pause bool
|
||||
Repo string
|
||||
Tag string
|
||||
Author string
|
||||
Comment string
|
||||
// merge container config into commit config before commit
|
||||
MergeConfigs bool
|
||||
Config *container.Config
|
||||
}
|
||||
|
||||
// ExecConfig is a small subset of the Config struct that holds the configuration
|
||||
// for the exec feature of docker.
|
||||
type ExecConfig struct {
|
||||
User string // User that will run the command
|
||||
Privileged bool // Is the container in privileged mode
|
||||
Tty bool // Attach standard streams to a tty.
|
||||
AttachStdin bool // Attach the standard input, makes possible user interaction
|
||||
AttachStderr bool // Attach the standard output
|
||||
AttachStdout bool // Attach the standard error
|
||||
Detach bool // Execute in detach mode
|
||||
DetachKeys string // Escape keys for detach
|
||||
Cmd []string // Execution commands and args
|
||||
}
|
37
Godeps/_workspace/src/github.com/docker/engine-api/types/container/config.go
generated
vendored
Normal file
37
Godeps/_workspace/src/github.com/docker/engine-api/types/container/config.go
generated
vendored
Normal file
|
@ -0,0 +1,37 @@
|
|||
package container
|
||||
|
||||
import (
|
||||
"github.com/docker/engine-api/types/strslice"
|
||||
"github.com/docker/go-connections/nat"
|
||||
)
|
||||
|
||||
// Config contains the configuration data about a container.
|
||||
// It should hold only portable information about the container.
|
||||
// Here, "portable" means "independent from the host we are running on".
|
||||
// Non-portable information *should* appear in HostConfig.
|
||||
// All fields added to this struct must be marked `omitempty` to keep getting
|
||||
// predictable hashes from the old `v1Compatibility` configuration.
|
||||
type Config struct {
|
||||
Hostname string // Hostname
|
||||
Domainname string // Domainname
|
||||
User string // User that will run the command(s) inside the container
|
||||
AttachStdin bool // Attach the standard input, makes possible user interaction
|
||||
AttachStdout bool // Attach the standard output
|
||||
AttachStderr bool // Attach the standard error
|
||||
ExposedPorts map[nat.Port]struct{} `json:",omitempty"` // List of exposed ports
|
||||
Tty bool // Attach standard streams to a tty, including stdin if it is not closed.
|
||||
OpenStdin bool // Open stdin
|
||||
StdinOnce bool // If true, close stdin after the 1 attached client disconnects.
|
||||
Env []string // List of environment variable to set in the container
|
||||
Cmd strslice.StrSlice // Command to run when starting the container
|
||||
ArgsEscaped bool `json:",omitempty"` // True if command is already escaped (Windows specific)
|
||||
Image string // Name of the image as it was passed by the operator (eg. could be symbolic)
|
||||
Volumes map[string]struct{} // List of volumes (mounts) used for the container
|
||||
WorkingDir string // Current directory (PWD) in the command will be launched
|
||||
Entrypoint strslice.StrSlice // Entrypoint to run when starting the container
|
||||
NetworkDisabled bool `json:",omitempty"` // Is network disabled
|
||||
MacAddress string `json:",omitempty"` // Mac Address of the container
|
||||
OnBuild []string // ONBUILD metadata that were defined on the image Dockerfile
|
||||
Labels map[string]string // List of labels set to this container
|
||||
StopSignal string `json:",omitempty"` // Signal to stop a container
|
||||
}
|
301
Godeps/_workspace/src/github.com/docker/engine-api/types/container/host_config.go
generated
vendored
Normal file
301
Godeps/_workspace/src/github.com/docker/engine-api/types/container/host_config.go
generated
vendored
Normal file
|
@ -0,0 +1,301 @@
|
|||
package container
|
||||
|
||||
import (
|
||||
"strings"
|
||||
|
||||
"github.com/docker/engine-api/types/blkiodev"
|
||||
"github.com/docker/engine-api/types/strslice"
|
||||
"github.com/docker/go-connections/nat"
|
||||
"github.com/docker/go-units"
|
||||
)
|
||||
|
||||
// NetworkMode represents the container network stack.
|
||||
type NetworkMode string
|
||||
|
||||
// Isolation represents the isolation technology of a container. The supported
|
||||
// values are platform specific
|
||||
type Isolation string
|
||||
|
||||
// IsDefault indicates the default isolation technology of a container. On Linux this
|
||||
// is the native driver. On Windows, this is a Windows Server Container.
|
||||
func (i Isolation) IsDefault() bool {
|
||||
return strings.ToLower(string(i)) == "default" || string(i) == ""
|
||||
}
|
||||
|
||||
// IpcMode represents the container ipc stack.
|
||||
type IpcMode string
|
||||
|
||||
// IsPrivate indicates whether the container uses its private ipc stack.
|
||||
func (n IpcMode) IsPrivate() bool {
|
||||
return !(n.IsHost() || n.IsContainer())
|
||||
}
|
||||
|
||||
// IsHost indicates whether the container uses the host's ipc stack.
|
||||
func (n IpcMode) IsHost() bool {
|
||||
return n == "host"
|
||||
}
|
||||
|
||||
// IsContainer indicates whether the container uses a container's ipc stack.
|
||||
func (n IpcMode) IsContainer() bool {
|
||||
parts := strings.SplitN(string(n), ":", 2)
|
||||
return len(parts) > 1 && parts[0] == "container"
|
||||
}
|
||||
|
||||
// Valid indicates whether the ipc stack is valid.
|
||||
func (n IpcMode) Valid() bool {
|
||||
parts := strings.Split(string(n), ":")
|
||||
switch mode := parts[0]; mode {
|
||||
case "", "host":
|
||||
case "container":
|
||||
if len(parts) != 2 || parts[1] == "" {
|
||||
return false
|
||||
}
|
||||
default:
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// Container returns the name of the container ipc stack is going to be used.
|
||||
func (n IpcMode) Container() string {
|
||||
parts := strings.SplitN(string(n), ":", 2)
|
||||
if len(parts) > 1 {
|
||||
return parts[1]
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
// UsernsMode represents userns mode in the container.
|
||||
type UsernsMode string
|
||||
|
||||
// IsHost indicates whether the container uses the host's userns.
|
||||
func (n UsernsMode) IsHost() bool {
|
||||
return n == "host"
|
||||
}
|
||||
|
||||
// IsPrivate indicates whether the container uses the a private userns.
|
||||
func (n UsernsMode) IsPrivate() bool {
|
||||
return !(n.IsHost())
|
||||
}
|
||||
|
||||
// Valid indicates whether the userns is valid.
|
||||
func (n UsernsMode) Valid() bool {
|
||||
parts := strings.Split(string(n), ":")
|
||||
switch mode := parts[0]; mode {
|
||||
case "", "host":
|
||||
default:
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// CgroupSpec represents the cgroup to use for the container.
|
||||
type CgroupSpec string
|
||||
|
||||
// IsContainer indicates whether the container is using another container cgroup
|
||||
func (c CgroupSpec) IsContainer() bool {
|
||||
parts := strings.SplitN(string(c), ":", 2)
|
||||
return len(parts) > 1 && parts[0] == "container"
|
||||
}
|
||||
|
||||
// Valid indicates whether the cgroup spec is valid.
|
||||
func (c CgroupSpec) Valid() bool {
|
||||
return c.IsContainer() || c == ""
|
||||
}
|
||||
|
||||
// Container returns the name of the container whose cgroup will be used.
|
||||
func (c CgroupSpec) Container() string {
|
||||
parts := strings.SplitN(string(c), ":", 2)
|
||||
if len(parts) > 1 {
|
||||
return parts[1]
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
// UTSMode represents the UTS namespace of the container.
|
||||
type UTSMode string
|
||||
|
||||
// IsPrivate indicates whether the container uses its private UTS namespace.
|
||||
func (n UTSMode) IsPrivate() bool {
|
||||
return !(n.IsHost())
|
||||
}
|
||||
|
||||
// IsHost indicates whether the container uses the host's UTS namespace.
|
||||
func (n UTSMode) IsHost() bool {
|
||||
return n == "host"
|
||||
}
|
||||
|
||||
// Valid indicates whether the UTS namespace is valid.
|
||||
func (n UTSMode) Valid() bool {
|
||||
parts := strings.Split(string(n), ":")
|
||||
switch mode := parts[0]; mode {
|
||||
case "", "host":
|
||||
default:
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// PidMode represents the pid stack of the container.
|
||||
type PidMode string
|
||||
|
||||
// IsPrivate indicates whether the container uses its private pid stack.
|
||||
func (n PidMode) IsPrivate() bool {
|
||||
return !(n.IsHost())
|
||||
}
|
||||
|
||||
// IsHost indicates whether the container uses the host's pid stack.
|
||||
func (n PidMode) IsHost() bool {
|
||||
return n == "host"
|
||||
}
|
||||
|
||||
// Valid indicates whether the pid stack is valid.
|
||||
func (n PidMode) Valid() bool {
|
||||
parts := strings.Split(string(n), ":")
|
||||
switch mode := parts[0]; mode {
|
||||
case "", "host":
|
||||
default:
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// DeviceMapping represents the device mapping between the host and the container.
|
||||
type DeviceMapping struct {
|
||||
PathOnHost string
|
||||
PathInContainer string
|
||||
CgroupPermissions string
|
||||
}
|
||||
|
||||
// RestartPolicy represents the restart policies of the container.
|
||||
type RestartPolicy struct {
|
||||
Name string
|
||||
MaximumRetryCount int
|
||||
}
|
||||
|
||||
// IsNone indicates whether the container has the "no" restart policy.
|
||||
// This means the container will not automatically restart when exiting.
|
||||
func (rp *RestartPolicy) IsNone() bool {
|
||||
return rp.Name == "no"
|
||||
}
|
||||
|
||||
// IsAlways indicates whether the container has the "always" restart policy.
|
||||
// This means the container will automatically restart regardless of the exit status.
|
||||
func (rp *RestartPolicy) IsAlways() bool {
|
||||
return rp.Name == "always"
|
||||
}
|
||||
|
||||
// IsOnFailure indicates whether the container has the "on-failure" restart policy.
|
||||
// This means the container will automatically restart of exiting with a non-zero exit status.
|
||||
func (rp *RestartPolicy) IsOnFailure() bool {
|
||||
return rp.Name == "on-failure"
|
||||
}
|
||||
|
||||
// IsUnlessStopped indicates whether the container has the
|
||||
// "unless-stopped" restart policy. This means the container will
|
||||
// automatically restart unless user has put it to stopped state.
|
||||
func (rp *RestartPolicy) IsUnlessStopped() bool {
|
||||
return rp.Name == "unless-stopped"
|
||||
}
|
||||
|
||||
// IsSame compares two RestartPolicy to see if they are the same
|
||||
func (rp *RestartPolicy) IsSame(tp *RestartPolicy) bool {
|
||||
return rp.Name == tp.Name && rp.MaximumRetryCount == tp.MaximumRetryCount
|
||||
}
|
||||
|
||||
// LogConfig represents the logging configuration of the container.
|
||||
type LogConfig struct {
|
||||
Type string
|
||||
Config map[string]string
|
||||
}
|
||||
|
||||
// Resources contains container's resources (cgroups config, ulimits...)
|
||||
type Resources struct {
|
||||
// Applicable to all platforms
|
||||
CPUShares int64 `json:"CpuShares"` // CPU shares (relative weight vs. other containers)
|
||||
Memory int64 // Memory limit (in bytes)
|
||||
|
||||
// Applicable to UNIX platforms
|
||||
CgroupParent string // Parent cgroup.
|
||||
BlkioWeight uint16 // Block IO weight (relative weight vs. other containers)
|
||||
BlkioWeightDevice []*blkiodev.WeightDevice
|
||||
BlkioDeviceReadBps []*blkiodev.ThrottleDevice
|
||||
BlkioDeviceWriteBps []*blkiodev.ThrottleDevice
|
||||
BlkioDeviceReadIOps []*blkiodev.ThrottleDevice
|
||||
BlkioDeviceWriteIOps []*blkiodev.ThrottleDevice
|
||||
CPUPeriod int64 `json:"CpuPeriod"` // CPU CFS (Completely Fair Scheduler) period
|
||||
CPUQuota int64 `json:"CpuQuota"` // CPU CFS (Completely Fair Scheduler) quota
|
||||
CpusetCpus string // CpusetCpus 0-2, 0,1
|
||||
CpusetMems string // CpusetMems 0-2, 0,1
|
||||
Devices []DeviceMapping // List of devices to map inside the container
|
||||
DiskQuota int64 // Disk limit (in bytes)
|
||||
KernelMemory int64 // Kernel memory limit (in bytes)
|
||||
MemoryReservation int64 // Memory soft limit (in bytes)
|
||||
MemorySwap int64 // Total memory usage (memory + swap); set `-1` to enable unlimited swap
|
||||
MemorySwappiness *int64 // Tuning container memory swappiness behaviour
|
||||
OomKillDisable *bool // Whether to disable OOM Killer or not
|
||||
PidsLimit int64 // Setting pids limit for a container
|
||||
Ulimits []*units.Ulimit // List of ulimits to be set in the container
|
||||
|
||||
// Applicable to Windows
|
||||
CPUCount int64 `json:"CpuCount"` // CPU count
|
||||
CPUPercent int64 `json:"CpuPercent"` // CPU percent
|
||||
IOMaximumIOps uint64 // Maximum IOps for the container system drive
|
||||
IOMaximumBandwidth uint64 // Maximum IO in bytes per second for the container system drive
|
||||
NetworkMaximumBandwidth uint64 // Maximum bandwidth of the network endpoint in bytes per second
|
||||
}
|
||||
|
||||
// UpdateConfig holds the mutable attributes of a Container.
|
||||
// Those attributes can be updated at runtime.
|
||||
type UpdateConfig struct {
|
||||
// Contains container's resources (cgroups, ulimits)
|
||||
Resources
|
||||
RestartPolicy RestartPolicy
|
||||
}
|
||||
|
||||
// HostConfig the non-portable Config structure of a container.
|
||||
// Here, "non-portable" means "dependent of the host we are running on".
|
||||
// Portable information *should* appear in Config.
|
||||
type HostConfig struct {
|
||||
// Applicable to all platforms
|
||||
Binds []string // List of volume bindings for this container
|
||||
ContainerIDFile string // File (path) where the containerId is written
|
||||
LogConfig LogConfig // Configuration of the logs for this container
|
||||
NetworkMode NetworkMode // Network mode to use for the container
|
||||
PortBindings nat.PortMap // Port mapping between the exposed port (container) and the host
|
||||
RestartPolicy RestartPolicy // Restart policy to be used for the container
|
||||
AutoRemove bool // Automatically remove container when it exits
|
||||
VolumeDriver string // Name of the volume driver used to mount volumes
|
||||
VolumesFrom []string // List of volumes to take from other container
|
||||
|
||||
// Applicable to UNIX platforms
|
||||
CapAdd strslice.StrSlice // List of kernel capabilities to add to the container
|
||||
CapDrop strslice.StrSlice // List of kernel capabilities to remove from the container
|
||||
DNS []string `json:"Dns"` // List of DNS server to lookup
|
||||
DNSOptions []string `json:"DnsOptions"` // List of DNSOption to look for
|
||||
DNSSearch []string `json:"DnsSearch"` // List of DNSSearch to look for
|
||||
ExtraHosts []string // List of extra hosts
|
||||
GroupAdd []string // List of additional groups that the container process will run as
|
||||
IpcMode IpcMode // IPC namespace to use for the container
|
||||
Cgroup CgroupSpec // Cgroup to use for the container
|
||||
Links []string // List of links (in the name:alias form)
|
||||
OomScoreAdj int // Container preference for OOM-killing
|
||||
PidMode PidMode // PID namespace to use for the container
|
||||
Privileged bool // Is the container in privileged mode
|
||||
PublishAllPorts bool // Should docker publish all exposed port for the container
|
||||
ReadonlyRootfs bool // Is the container root filesystem in read-only
|
||||
SecurityOpt []string // List of string values to customize labels for MLS systems, such as SELinux.
|
||||
StorageOpt map[string]string // Storage driver options per container.
|
||||
Tmpfs map[string]string `json:",omitempty"` // List of tmpfs (mounts) used for the container
|
||||
UTSMode UTSMode // UTS namespace to use for the container
|
||||
UsernsMode UsernsMode // The user namespace to use for the container
|
||||
ShmSize int64 // Total shm memory usage
|
||||
Sysctls map[string]string `json:",omitempty"` // List of Namespaced sysctls used for the container
|
||||
|
||||
// Applicable to Windows
|
||||
ConsoleSize [2]int // Initial console size
|
||||
Isolation Isolation // Isolation technology of the container (eg default, hyperv)
|
||||
|
||||
// Contains container's resources (cgroups, ulimits)
|
||||
Resources
|
||||
}
|
81
Godeps/_workspace/src/github.com/docker/engine-api/types/container/hostconfig_unix.go
generated
vendored
Normal file
81
Godeps/_workspace/src/github.com/docker/engine-api/types/container/hostconfig_unix.go
generated
vendored
Normal file
|
@ -0,0 +1,81 @@
|
|||
// +build !windows
|
||||
|
||||
package container
|
||||
|
||||
import "strings"
|
||||
|
||||
// IsValid indicates if an isolation technology is valid
|
||||
func (i Isolation) IsValid() bool {
|
||||
return i.IsDefault()
|
||||
}
|
||||
|
||||
// IsPrivate indicates whether container uses it's private network stack.
|
||||
func (n NetworkMode) IsPrivate() bool {
|
||||
return !(n.IsHost() || n.IsContainer())
|
||||
}
|
||||
|
||||
// IsDefault indicates whether container uses the default network stack.
|
||||
func (n NetworkMode) IsDefault() bool {
|
||||
return n == "default"
|
||||
}
|
||||
|
||||
// NetworkName returns the name of the network stack.
|
||||
func (n NetworkMode) NetworkName() string {
|
||||
if n.IsBridge() {
|
||||
return "bridge"
|
||||
} else if n.IsHost() {
|
||||
return "host"
|
||||
} else if n.IsContainer() {
|
||||
return "container"
|
||||
} else if n.IsNone() {
|
||||
return "none"
|
||||
} else if n.IsDefault() {
|
||||
return "default"
|
||||
} else if n.IsUserDefined() {
|
||||
return n.UserDefined()
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
// IsBridge indicates whether container uses the bridge network stack
|
||||
func (n NetworkMode) IsBridge() bool {
|
||||
return n == "bridge"
|
||||
}
|
||||
|
||||
// IsHost indicates whether container uses the host network stack.
|
||||
func (n NetworkMode) IsHost() bool {
|
||||
return n == "host"
|
||||
}
|
||||
|
||||
// IsContainer indicates whether container uses a container network stack.
|
||||
func (n NetworkMode) IsContainer() bool {
|
||||
parts := strings.SplitN(string(n), ":", 2)
|
||||
return len(parts) > 1 && parts[0] == "container"
|
||||
}
|
||||
|
||||
// IsNone indicates whether container isn't using a network stack.
|
||||
func (n NetworkMode) IsNone() bool {
|
||||
return n == "none"
|
||||
}
|
||||
|
||||
// ConnectedContainer is the id of the container which network this container is connected to.
|
||||
func (n NetworkMode) ConnectedContainer() string {
|
||||
parts := strings.SplitN(string(n), ":", 2)
|
||||
if len(parts) > 1 {
|
||||
return parts[1]
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
// IsUserDefined indicates user-created network
|
||||
func (n NetworkMode) IsUserDefined() bool {
|
||||
return !n.IsDefault() && !n.IsBridge() && !n.IsHost() && !n.IsNone() && !n.IsContainer()
|
||||
}
|
||||
|
||||
//UserDefined indicates user-created network
|
||||
func (n NetworkMode) UserDefined() string {
|
||||
if n.IsUserDefined() {
|
||||
return string(n)
|
||||
}
|
||||
return ""
|
||||
}
|
87
Godeps/_workspace/src/github.com/docker/engine-api/types/container/hostconfig_windows.go
generated
vendored
Normal file
87
Godeps/_workspace/src/github.com/docker/engine-api/types/container/hostconfig_windows.go
generated
vendored
Normal file
|
@ -0,0 +1,87 @@
|
|||
package container
|
||||
|
||||
import (
|
||||
"strings"
|
||||
)
|
||||
|
||||
// IsDefault indicates whether container uses the default network stack.
|
||||
func (n NetworkMode) IsDefault() bool {
|
||||
return n == "default"
|
||||
}
|
||||
|
||||
// IsNone indicates whether container isn't using a network stack.
|
||||
func (n NetworkMode) IsNone() bool {
|
||||
return n == "none"
|
||||
}
|
||||
|
||||
// IsContainer indicates whether container uses a container network stack.
|
||||
// Returns false as windows doesn't support this mode
|
||||
func (n NetworkMode) IsContainer() bool {
|
||||
return false
|
||||
}
|
||||
|
||||
// IsBridge indicates whether container uses the bridge network stack
|
||||
// in windows it is given the name NAT
|
||||
func (n NetworkMode) IsBridge() bool {
|
||||
return n == "nat"
|
||||
}
|
||||
|
||||
// IsHost indicates whether container uses the host network stack.
|
||||
// returns false as this is not supported by windows
|
||||
func (n NetworkMode) IsHost() bool {
|
||||
return false
|
||||
}
|
||||
|
||||
// IsPrivate indicates whether container uses its private network stack.
|
||||
func (n NetworkMode) IsPrivate() bool {
|
||||
return !(n.IsHost() || n.IsContainer())
|
||||
}
|
||||
|
||||
// ConnectedContainer is the id of the container which network this container is connected to.
|
||||
// Returns blank string on windows
|
||||
func (n NetworkMode) ConnectedContainer() string {
|
||||
return ""
|
||||
}
|
||||
|
||||
// IsUserDefined indicates user-created network
|
||||
func (n NetworkMode) IsUserDefined() bool {
|
||||
return !n.IsDefault() && !n.IsNone() && !n.IsBridge()
|
||||
}
|
||||
|
||||
// IsHyperV indicates the use of a Hyper-V partition for isolation
|
||||
func (i Isolation) IsHyperV() bool {
|
||||
return strings.ToLower(string(i)) == "hyperv"
|
||||
}
|
||||
|
||||
// IsProcess indicates the use of process isolation
|
||||
func (i Isolation) IsProcess() bool {
|
||||
return strings.ToLower(string(i)) == "process"
|
||||
}
|
||||
|
||||
// IsValid indicates if an isolation technology is valid
|
||||
func (i Isolation) IsValid() bool {
|
||||
return i.IsDefault() || i.IsHyperV() || i.IsProcess()
|
||||
}
|
||||
|
||||
// NetworkName returns the name of the network stack.
|
||||
func (n NetworkMode) NetworkName() string {
|
||||
if n.IsDefault() {
|
||||
return "default"
|
||||
} else if n.IsBridge() {
|
||||
return "nat"
|
||||
} else if n.IsNone() {
|
||||
return "none"
|
||||
} else if n.IsUserDefined() {
|
||||
return n.UserDefined()
|
||||
}
|
||||
|
||||
return ""
|
||||
}
|
||||
|
||||
//UserDefined indicates user-created network
|
||||
func (n NetworkMode) UserDefined() string {
|
||||
if n.IsUserDefined() {
|
||||
return string(n)
|
||||
}
|
||||
return ""
|
||||
}
|
38
Godeps/_workspace/src/github.com/docker/engine-api/types/events/events.go
generated
vendored
Normal file
38
Godeps/_workspace/src/github.com/docker/engine-api/types/events/events.go
generated
vendored
Normal file
|
@ -0,0 +1,38 @@
|
|||
package events
|
||||
|
||||
const (
|
||||
// ContainerEventType is the event type that containers generate
|
||||
ContainerEventType = "container"
|
||||
// ImageEventType is the event type that images generate
|
||||
ImageEventType = "image"
|
||||
// VolumeEventType is the event type that volumes generate
|
||||
VolumeEventType = "volume"
|
||||
// NetworkEventType is the event type that networks generate
|
||||
NetworkEventType = "network"
|
||||
)
|
||||
|
||||
// Actor describes something that generates events,
|
||||
// like a container, or a network, or a volume.
|
||||
// It has a defined name and a set or attributes.
|
||||
// The container attributes are its labels, other actors
|
||||
// can generate these attributes from other properties.
|
||||
type Actor struct {
|
||||
ID string
|
||||
Attributes map[string]string
|
||||
}
|
||||
|
||||
// Message represents the information an event contains
|
||||
type Message struct {
|
||||
// Deprecated information from JSONMessage.
|
||||
// With data only in container events.
|
||||
Status string `json:"status,omitempty"`
|
||||
ID string `json:"id,omitempty"`
|
||||
From string `json:"from,omitempty"`
|
||||
|
||||
Type string
|
||||
Action string
|
||||
Actor Actor
|
||||
|
||||
Time int64 `json:"time,omitempty"`
|
||||
TimeNano int64 `json:"timeNano,omitempty"`
|
||||
}
|
295
Godeps/_workspace/src/github.com/docker/engine-api/types/filters/parse.go
generated
vendored
Normal file
295
Godeps/_workspace/src/github.com/docker/engine-api/types/filters/parse.go
generated
vendored
Normal file
|
@ -0,0 +1,295 @@
|
|||
// Package filters provides helper function to parse and handle command line
|
||||
// filter, used for example in docker ps or docker images commands.
|
||||
package filters
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"regexp"
|
||||
"strings"
|
||||
|
||||
"github.com/docker/engine-api/types/versions"
|
||||
)
|
||||
|
||||
// Args stores filter arguments as map key:{map key: bool}.
|
||||
// It contains an aggregation of the map of arguments (which are in the form
|
||||
// of -f 'key=value') based on the key, and stores values for the same key
|
||||
// in a map with string keys and boolean values.
|
||||
// e.g given -f 'label=label1=1' -f 'label=label2=2' -f 'image.name=ubuntu'
|
||||
// the args will be {"image.name":{"ubuntu":true},"label":{"label1=1":true,"label2=2":true}}
|
||||
type Args struct {
|
||||
fields map[string]map[string]bool
|
||||
}
|
||||
|
||||
// NewArgs initializes a new Args struct.
|
||||
func NewArgs() Args {
|
||||
return Args{fields: map[string]map[string]bool{}}
|
||||
}
|
||||
|
||||
// ParseFlag parses the argument to the filter flag. Like
|
||||
//
|
||||
// `docker ps -f 'created=today' -f 'image.name=ubuntu*'`
|
||||
//
|
||||
// If prev map is provided, then it is appended to, and returned. By default a new
|
||||
// map is created.
|
||||
func ParseFlag(arg string, prev Args) (Args, error) {
|
||||
filters := prev
|
||||
if len(arg) == 0 {
|
||||
return filters, nil
|
||||
}
|
||||
|
||||
if !strings.Contains(arg, "=") {
|
||||
return filters, ErrBadFormat
|
||||
}
|
||||
|
||||
f := strings.SplitN(arg, "=", 2)
|
||||
|
||||
name := strings.ToLower(strings.TrimSpace(f[0]))
|
||||
value := strings.TrimSpace(f[1])
|
||||
|
||||
filters.Add(name, value)
|
||||
|
||||
return filters, nil
|
||||
}
|
||||
|
||||
// ErrBadFormat is an error returned in case of bad format for a filter.
|
||||
var ErrBadFormat = errors.New("bad format of filter (expected name=value)")
|
||||
|
||||
// ToParam packs the Args into a string for easy transport from client to server.
|
||||
func ToParam(a Args) (string, error) {
|
||||
// this way we don't URL encode {}, just empty space
|
||||
if a.Len() == 0 {
|
||||
return "", nil
|
||||
}
|
||||
|
||||
buf, err := json.Marshal(a.fields)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return string(buf), nil
|
||||
}
|
||||
|
||||
// ToParamWithVersion packs the Args into a string for easy transport from client to server.
|
||||
// The generated string will depend on the specified version (corresponding to the API version).
|
||||
func ToParamWithVersion(version string, a Args) (string, error) {
|
||||
// this way we don't URL encode {}, just empty space
|
||||
if a.Len() == 0 {
|
||||
return "", nil
|
||||
}
|
||||
|
||||
// for daemons older than v1.10, filter must be of the form map[string][]string
|
||||
buf := []byte{}
|
||||
err := errors.New("")
|
||||
if version != "" && versions.LessThan(version, "1.22") {
|
||||
buf, err = json.Marshal(convertArgsToSlice(a.fields))
|
||||
} else {
|
||||
buf, err = json.Marshal(a.fields)
|
||||
}
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return string(buf), nil
|
||||
}
|
||||
|
||||
// FromParam unpacks the filter Args.
|
||||
func FromParam(p string) (Args, error) {
|
||||
if len(p) == 0 {
|
||||
return NewArgs(), nil
|
||||
}
|
||||
|
||||
r := strings.NewReader(p)
|
||||
d := json.NewDecoder(r)
|
||||
|
||||
m := map[string]map[string]bool{}
|
||||
if err := d.Decode(&m); err != nil {
|
||||
r.Seek(0, 0)
|
||||
|
||||
// Allow parsing old arguments in slice format.
|
||||
// Because other libraries might be sending them in this format.
|
||||
deprecated := map[string][]string{}
|
||||
if deprecatedErr := d.Decode(&deprecated); deprecatedErr == nil {
|
||||
m = deprecatedArgs(deprecated)
|
||||
} else {
|
||||
return NewArgs(), err
|
||||
}
|
||||
}
|
||||
return Args{m}, nil
|
||||
}
|
||||
|
||||
// Get returns the list of values associates with a field.
|
||||
// It returns a slice of strings to keep backwards compatibility with old code.
|
||||
func (filters Args) Get(field string) []string {
|
||||
values := filters.fields[field]
|
||||
if values == nil {
|
||||
return make([]string, 0)
|
||||
}
|
||||
slice := make([]string, 0, len(values))
|
||||
for key := range values {
|
||||
slice = append(slice, key)
|
||||
}
|
||||
return slice
|
||||
}
|
||||
|
||||
// Add adds a new value to a filter field.
|
||||
func (filters Args) Add(name, value string) {
|
||||
if _, ok := filters.fields[name]; ok {
|
||||
filters.fields[name][value] = true
|
||||
} else {
|
||||
filters.fields[name] = map[string]bool{value: true}
|
||||
}
|
||||
}
|
||||
|
||||
// Del removes a value from a filter field.
|
||||
func (filters Args) Del(name, value string) {
|
||||
if _, ok := filters.fields[name]; ok {
|
||||
delete(filters.fields[name], value)
|
||||
}
|
||||
}
|
||||
|
||||
// Len returns the number of fields in the arguments.
|
||||
func (filters Args) Len() int {
|
||||
return len(filters.fields)
|
||||
}
|
||||
|
||||
// MatchKVList returns true if the values for the specified field matches the ones
|
||||
// from the sources.
|
||||
// e.g. given Args are {'label': {'label1=1','label2=1'}, 'image.name', {'ubuntu'}},
|
||||
// field is 'label' and sources are {'label1': '1', 'label2': '2'}
|
||||
// it returns true.
|
||||
func (filters Args) MatchKVList(field string, sources map[string]string) bool {
|
||||
fieldValues := filters.fields[field]
|
||||
|
||||
//do not filter if there is no filter set or cannot determine filter
|
||||
if len(fieldValues) == 0 {
|
||||
return true
|
||||
}
|
||||
|
||||
if sources == nil || len(sources) == 0 {
|
||||
return false
|
||||
}
|
||||
|
||||
for name2match := range fieldValues {
|
||||
testKV := strings.SplitN(name2match, "=", 2)
|
||||
|
||||
v, ok := sources[testKV[0]]
|
||||
if !ok {
|
||||
return false
|
||||
}
|
||||
if len(testKV) == 2 && testKV[1] != v {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
// Match returns true if the values for the specified field matches the source string
|
||||
// e.g. given Args are {'label': {'label1=1','label2=1'}, 'image.name', {'ubuntu'}},
|
||||
// field is 'image.name' and source is 'ubuntu'
|
||||
// it returns true.
|
||||
func (filters Args) Match(field, source string) bool {
|
||||
if filters.ExactMatch(field, source) {
|
||||
return true
|
||||
}
|
||||
|
||||
fieldValues := filters.fields[field]
|
||||
for name2match := range fieldValues {
|
||||
match, err := regexp.MatchString(name2match, source)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
if match {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// ExactMatch returns true if the source matches exactly one of the filters.
|
||||
func (filters Args) ExactMatch(field, source string) bool {
|
||||
fieldValues, ok := filters.fields[field]
|
||||
//do not filter if there is no filter set or cannot determine filter
|
||||
if !ok || len(fieldValues) == 0 {
|
||||
return true
|
||||
}
|
||||
|
||||
// try to match full name value to avoid O(N) regular expression matching
|
||||
if fieldValues[source] {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// FuzzyMatch returns true if the source matches exactly one of the filters,
|
||||
// or the source has one of the filters as a prefix.
|
||||
func (filters Args) FuzzyMatch(field, source string) bool {
|
||||
if filters.ExactMatch(field, source) {
|
||||
return true
|
||||
}
|
||||
|
||||
fieldValues := filters.fields[field]
|
||||
for prefix := range fieldValues {
|
||||
if strings.HasPrefix(source, prefix) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// Include returns true if the name of the field to filter is in the filters.
|
||||
func (filters Args) Include(field string) bool {
|
||||
_, ok := filters.fields[field]
|
||||
return ok
|
||||
}
|
||||
|
||||
// Validate ensures that all the fields in the filter are valid.
|
||||
// It returns an error as soon as it finds an invalid field.
|
||||
func (filters Args) Validate(accepted map[string]bool) error {
|
||||
for name := range filters.fields {
|
||||
if !accepted[name] {
|
||||
return fmt.Errorf("Invalid filter '%s'", name)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// WalkValues iterates over the list of filtered values for a field.
|
||||
// It stops the iteration if it finds an error and it returns that error.
|
||||
func (filters Args) WalkValues(field string, op func(value string) error) error {
|
||||
if _, ok := filters.fields[field]; !ok {
|
||||
return nil
|
||||
}
|
||||
for v := range filters.fields[field] {
|
||||
if err := op(v); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func deprecatedArgs(d map[string][]string) map[string]map[string]bool {
|
||||
m := map[string]map[string]bool{}
|
||||
for k, v := range d {
|
||||
values := map[string]bool{}
|
||||
for _, vv := range v {
|
||||
values[vv] = true
|
||||
}
|
||||
m[k] = values
|
||||
}
|
||||
return m
|
||||
}
|
||||
|
||||
func convertArgsToSlice(f map[string]map[string]bool) map[string][]string {
|
||||
m := map[string][]string{}
|
||||
for k, v := range f {
|
||||
values := []string{}
|
||||
for kk := range v {
|
||||
if v[kk] {
|
||||
values = append(values, kk)
|
||||
}
|
||||
}
|
||||
m[k] = values
|
||||
}
|
||||
return m
|
||||
}
|
394
Godeps/_workspace/src/github.com/docker/engine-api/types/filters/parse_test.go
generated
vendored
Normal file
394
Godeps/_workspace/src/github.com/docker/engine-api/types/filters/parse_test.go
generated
vendored
Normal file
|
@ -0,0 +1,394 @@
|
|||
package filters
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestParseArgs(t *testing.T) {
|
||||
// equivalent of `docker ps -f 'created=today' -f 'image.name=ubuntu*' -f 'image.name=*untu'`
|
||||
flagArgs := []string{
|
||||
"created=today",
|
||||
"image.name=ubuntu*",
|
||||
"image.name=*untu",
|
||||
}
|
||||
var (
|
||||
args = NewArgs()
|
||||
err error
|
||||
)
|
||||
for i := range flagArgs {
|
||||
args, err = ParseFlag(flagArgs[i], args)
|
||||
if err != nil {
|
||||
t.Errorf("failed to parse %s: %s", flagArgs[i], err)
|
||||
}
|
||||
}
|
||||
if len(args.Get("created")) != 1 {
|
||||
t.Errorf("failed to set this arg")
|
||||
}
|
||||
if len(args.Get("image.name")) != 2 {
|
||||
t.Errorf("the args should have collapsed")
|
||||
}
|
||||
}
|
||||
|
||||
func TestParseArgsEdgeCase(t *testing.T) {
|
||||
var filters Args
|
||||
args, err := ParseFlag("", filters)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if args.Len() != 0 {
|
||||
t.Fatalf("Expected an empty Args (map), got %v", args)
|
||||
}
|
||||
if args, err = ParseFlag("anything", args); err == nil || err != ErrBadFormat {
|
||||
t.Fatalf("Expected ErrBadFormat, got %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestToParam(t *testing.T) {
|
||||
fields := map[string]map[string]bool{
|
||||
"created": {"today": true},
|
||||
"image.name": {"ubuntu*": true, "*untu": true},
|
||||
}
|
||||
a := Args{fields: fields}
|
||||
|
||||
_, err := ToParam(a)
|
||||
if err != nil {
|
||||
t.Errorf("failed to marshal the filters: %s", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestToParamWithVersion(t *testing.T) {
|
||||
fields := map[string]map[string]bool{
|
||||
"created": {"today": true},
|
||||
"image.name": {"ubuntu*": true, "*untu": true},
|
||||
}
|
||||
a := Args{fields: fields}
|
||||
|
||||
str1, err := ToParamWithVersion("1.21", a)
|
||||
if err != nil {
|
||||
t.Errorf("failed to marshal the filters with version < 1.22: %s", err)
|
||||
}
|
||||
str2, err := ToParamWithVersion("1.22", a)
|
||||
if err != nil {
|
||||
t.Errorf("failed to marshal the filters with version >= 1.22: %s", err)
|
||||
}
|
||||
if str1 != `{"created":["today"],"image.name":["*untu","ubuntu*"]}` &&
|
||||
str1 != `{"created":["today"],"image.name":["ubuntu*","*untu"]}` {
|
||||
t.Errorf("incorrectly marshaled the filters: %s", str1)
|
||||
}
|
||||
if str2 != `{"created":{"today":true},"image.name":{"*untu":true,"ubuntu*":true}}` &&
|
||||
str2 != `{"created":{"today":true},"image.name":{"ubuntu*":true,"*untu":true}}` {
|
||||
t.Errorf("incorrectly marshaled the filters: %s", str2)
|
||||
}
|
||||
}
|
||||
|
||||
func TestFromParam(t *testing.T) {
|
||||
invalids := []string{
|
||||
"anything",
|
||||
"['a','list']",
|
||||
"{'key': 'value'}",
|
||||
`{"key": "value"}`,
|
||||
}
|
||||
valid := map[*Args][]string{
|
||||
&Args{fields: map[string]map[string]bool{"key": {"value": true}}}: {
|
||||
`{"key": ["value"]}`,
|
||||
`{"key": {"value": true}}`,
|
||||
},
|
||||
&Args{fields: map[string]map[string]bool{"key": {"value1": true, "value2": true}}}: {
|
||||
`{"key": ["value1", "value2"]}`,
|
||||
`{"key": {"value1": true, "value2": true}}`,
|
||||
},
|
||||
&Args{fields: map[string]map[string]bool{"key1": {"value1": true}, "key2": {"value2": true}}}: {
|
||||
`{"key1": ["value1"], "key2": ["value2"]}`,
|
||||
`{"key1": {"value1": true}, "key2": {"value2": true}}`,
|
||||
},
|
||||
}
|
||||
|
||||
for _, invalid := range invalids {
|
||||
if _, err := FromParam(invalid); err == nil {
|
||||
t.Fatalf("Expected an error with %v, got nothing", invalid)
|
||||
}
|
||||
}
|
||||
|
||||
for expectedArgs, matchers := range valid {
|
||||
for _, json := range matchers {
|
||||
args, err := FromParam(json)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if args.Len() != expectedArgs.Len() {
|
||||
t.Fatalf("Expected %v, go %v", expectedArgs, args)
|
||||
}
|
||||
for key, expectedValues := range expectedArgs.fields {
|
||||
values := args.Get(key)
|
||||
|
||||
if len(values) != len(expectedValues) {
|
||||
t.Fatalf("Expected %v, go %v", expectedArgs, args)
|
||||
}
|
||||
|
||||
for _, v := range values {
|
||||
if !expectedValues[v] {
|
||||
t.Fatalf("Expected %v, go %v", expectedArgs, args)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestEmpty(t *testing.T) {
|
||||
a := Args{}
|
||||
v, err := ToParam(a)
|
||||
if err != nil {
|
||||
t.Errorf("failed to marshal the filters: %s", err)
|
||||
}
|
||||
v1, err := FromParam(v)
|
||||
if err != nil {
|
||||
t.Errorf("%s", err)
|
||||
}
|
||||
if a.Len() != v1.Len() {
|
||||
t.Errorf("these should both be empty sets")
|
||||
}
|
||||
}
|
||||
|
||||
func TestArgsMatchKVListEmptySources(t *testing.T) {
|
||||
args := NewArgs()
|
||||
if !args.MatchKVList("created", map[string]string{}) {
|
||||
t.Fatalf("Expected true for (%v,created), got true", args)
|
||||
}
|
||||
|
||||
args = Args{map[string]map[string]bool{"created": {"today": true}}}
|
||||
if args.MatchKVList("created", map[string]string{}) {
|
||||
t.Fatalf("Expected false for (%v,created), got true", args)
|
||||
}
|
||||
}
|
||||
|
||||
func TestArgsMatchKVList(t *testing.T) {
|
||||
// Not empty sources
|
||||
sources := map[string]string{
|
||||
"key1": "value1",
|
||||
"key2": "value2",
|
||||
"key3": "value3",
|
||||
}
|
||||
|
||||
matches := map[*Args]string{
|
||||
&Args{}: "field",
|
||||
&Args{map[string]map[string]bool{
|
||||
"created": map[string]bool{"today": true},
|
||||
"labels": map[string]bool{"key1": true}},
|
||||
}: "labels",
|
||||
&Args{map[string]map[string]bool{
|
||||
"created": map[string]bool{"today": true},
|
||||
"labels": map[string]bool{"key1=value1": true}},
|
||||
}: "labels",
|
||||
}
|
||||
|
||||
for args, field := range matches {
|
||||
if args.MatchKVList(field, sources) != true {
|
||||
t.Fatalf("Expected true for %v on %v, got false", sources, args)
|
||||
}
|
||||
}
|
||||
|
||||
differs := map[*Args]string{
|
||||
&Args{map[string]map[string]bool{
|
||||
"created": map[string]bool{"today": true}},
|
||||
}: "created",
|
||||
&Args{map[string]map[string]bool{
|
||||
"created": map[string]bool{"today": true},
|
||||
"labels": map[string]bool{"key4": true}},
|
||||
}: "labels",
|
||||
&Args{map[string]map[string]bool{
|
||||
"created": map[string]bool{"today": true},
|
||||
"labels": map[string]bool{"key1=value3": true}},
|
||||
}: "labels",
|
||||
}
|
||||
|
||||
for args, field := range differs {
|
||||
if args.MatchKVList(field, sources) != false {
|
||||
t.Fatalf("Expected false for %v on %v, got true", sources, args)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestArgsMatch(t *testing.T) {
|
||||
source := "today"
|
||||
|
||||
matches := map[*Args]string{
|
||||
&Args{}: "field",
|
||||
&Args{map[string]map[string]bool{
|
||||
"created": map[string]bool{"today": true}},
|
||||
}: "today",
|
||||
&Args{map[string]map[string]bool{
|
||||
"created": map[string]bool{"to*": true}},
|
||||
}: "created",
|
||||
&Args{map[string]map[string]bool{
|
||||
"created": map[string]bool{"to(.*)": true}},
|
||||
}: "created",
|
||||
&Args{map[string]map[string]bool{
|
||||
"created": map[string]bool{"tod": true}},
|
||||
}: "created",
|
||||
&Args{map[string]map[string]bool{
|
||||
"created": map[string]bool{"anyting": true, "to*": true}},
|
||||
}: "created",
|
||||
}
|
||||
|
||||
for args, field := range matches {
|
||||
if args.Match(field, source) != true {
|
||||
t.Fatalf("Expected true for %v on %v, got false", source, args)
|
||||
}
|
||||
}
|
||||
|
||||
differs := map[*Args]string{
|
||||
&Args{map[string]map[string]bool{
|
||||
"created": map[string]bool{"tomorrow": true}},
|
||||
}: "created",
|
||||
&Args{map[string]map[string]bool{
|
||||
"created": map[string]bool{"to(day": true}},
|
||||
}: "created",
|
||||
&Args{map[string]map[string]bool{
|
||||
"created": map[string]bool{"tom(.*)": true}},
|
||||
}: "created",
|
||||
&Args{map[string]map[string]bool{
|
||||
"created": map[string]bool{"tom": true}},
|
||||
}: "created",
|
||||
&Args{map[string]map[string]bool{
|
||||
"created": map[string]bool{"today1": true},
|
||||
"labels": map[string]bool{"today": true}},
|
||||
}: "created",
|
||||
}
|
||||
|
||||
for args, field := range differs {
|
||||
if args.Match(field, source) != false {
|
||||
t.Fatalf("Expected false for %v on %v, got true", source, args)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestAdd(t *testing.T) {
|
||||
f := NewArgs()
|
||||
f.Add("status", "running")
|
||||
v := f.fields["status"]
|
||||
if len(v) != 1 || !v["running"] {
|
||||
t.Fatalf("Expected to include a running status, got %v", v)
|
||||
}
|
||||
|
||||
f.Add("status", "paused")
|
||||
if len(v) != 2 || !v["paused"] {
|
||||
t.Fatalf("Expected to include a paused status, got %v", v)
|
||||
}
|
||||
}
|
||||
|
||||
func TestDel(t *testing.T) {
|
||||
f := NewArgs()
|
||||
f.Add("status", "running")
|
||||
f.Del("status", "running")
|
||||
v := f.fields["status"]
|
||||
if v["running"] {
|
||||
t.Fatalf("Expected to not include a running status filter, got true")
|
||||
}
|
||||
}
|
||||
|
||||
func TestLen(t *testing.T) {
|
||||
f := NewArgs()
|
||||
if f.Len() != 0 {
|
||||
t.Fatalf("Expected to not include any field")
|
||||
}
|
||||
f.Add("status", "running")
|
||||
if f.Len() != 1 {
|
||||
t.Fatalf("Expected to include one field")
|
||||
}
|
||||
}
|
||||
|
||||
func TestExactMatch(t *testing.T) {
|
||||
f := NewArgs()
|
||||
|
||||
if !f.ExactMatch("status", "running") {
|
||||
t.Fatalf("Expected to match `running` when there are no filters, got false")
|
||||
}
|
||||
|
||||
f.Add("status", "running")
|
||||
f.Add("status", "pause*")
|
||||
|
||||
if !f.ExactMatch("status", "running") {
|
||||
t.Fatalf("Expected to match `running` with one of the filters, got false")
|
||||
}
|
||||
|
||||
if f.ExactMatch("status", "paused") {
|
||||
t.Fatalf("Expected to not match `paused` with one of the filters, got true")
|
||||
}
|
||||
}
|
||||
|
||||
func TestInclude(t *testing.T) {
|
||||
f := NewArgs()
|
||||
if f.Include("status") {
|
||||
t.Fatalf("Expected to not include a status key, got true")
|
||||
}
|
||||
f.Add("status", "running")
|
||||
if !f.Include("status") {
|
||||
t.Fatalf("Expected to include a status key, got false")
|
||||
}
|
||||
}
|
||||
|
||||
func TestValidate(t *testing.T) {
|
||||
f := NewArgs()
|
||||
f.Add("status", "running")
|
||||
|
||||
valid := map[string]bool{
|
||||
"status": true,
|
||||
"dangling": true,
|
||||
}
|
||||
|
||||
if err := f.Validate(valid); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
f.Add("bogus", "running")
|
||||
if err := f.Validate(valid); err == nil {
|
||||
t.Fatalf("Expected to return an error, got nil")
|
||||
}
|
||||
}
|
||||
|
||||
func TestWalkValues(t *testing.T) {
|
||||
f := NewArgs()
|
||||
f.Add("status", "running")
|
||||
f.Add("status", "paused")
|
||||
|
||||
f.WalkValues("status", func(value string) error {
|
||||
if value != "running" && value != "paused" {
|
||||
t.Fatalf("Unexpected value %s", value)
|
||||
}
|
||||
return nil
|
||||
})
|
||||
|
||||
err := f.WalkValues("status", func(value string) error {
|
||||
return fmt.Errorf("return")
|
||||
})
|
||||
if err == nil {
|
||||
t.Fatalf("Expected to get an error, got nil")
|
||||
}
|
||||
|
||||
err = f.WalkValues("foo", func(value string) error {
|
||||
return fmt.Errorf("return")
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatalf("Expected to not iterate when the field doesn't exist, got %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestFuzzyMatch(t *testing.T) {
|
||||
f := NewArgs()
|
||||
f.Add("container", "foo")
|
||||
|
||||
cases := map[string]bool{
|
||||
"foo": true,
|
||||
"foobar": true,
|
||||
"barfoo": false,
|
||||
"bar": false,
|
||||
}
|
||||
for source, match := range cases {
|
||||
got := f.FuzzyMatch("container", source)
|
||||
if got != match {
|
||||
t.Fatalf("Expected %v, got %v: %s", match, got, source)
|
||||
}
|
||||
}
|
||||
}
|
52
Godeps/_workspace/src/github.com/docker/engine-api/types/network/network.go
generated
vendored
Normal file
52
Godeps/_workspace/src/github.com/docker/engine-api/types/network/network.go
generated
vendored
Normal file
|
@ -0,0 +1,52 @@
|
|||
package network
|
||||
|
||||
// Address represents an IP address
|
||||
type Address struct {
|
||||
Addr string
|
||||
PrefixLen int
|
||||
}
|
||||
|
||||
// IPAM represents IP Address Management
|
||||
type IPAM struct {
|
||||
Driver string
|
||||
Options map[string]string //Per network IPAM driver options
|
||||
Config []IPAMConfig
|
||||
}
|
||||
|
||||
// IPAMConfig represents IPAM configurations
|
||||
type IPAMConfig struct {
|
||||
Subnet string `json:",omitempty"`
|
||||
IPRange string `json:",omitempty"`
|
||||
Gateway string `json:",omitempty"`
|
||||
AuxAddress map[string]string `json:"AuxiliaryAddresses,omitempty"`
|
||||
}
|
||||
|
||||
// EndpointIPAMConfig represents IPAM configurations for the endpoint
|
||||
type EndpointIPAMConfig struct {
|
||||
IPv4Address string `json:",omitempty"`
|
||||
IPv6Address string `json:",omitempty"`
|
||||
}
|
||||
|
||||
// EndpointSettings stores the network endpoint details
|
||||
type EndpointSettings struct {
|
||||
// Configurations
|
||||
IPAMConfig *EndpointIPAMConfig
|
||||
Links []string
|
||||
Aliases []string
|
||||
// Operational data
|
||||
NetworkID string
|
||||
EndpointID string
|
||||
Gateway string
|
||||
IPAddress string
|
||||
IPPrefixLen int
|
||||
IPv6Gateway string
|
||||
GlobalIPv6Address string
|
||||
GlobalIPv6PrefixLen int
|
||||
MacAddress string
|
||||
}
|
||||
|
||||
// NetworkingConfig represents the container's networking configuration for each of its interfaces
|
||||
// Carries the networking configs specified in the `docker run` and `docker network connect` commands
|
||||
type NetworkingConfig struct {
|
||||
EndpointsConfig map[string]*EndpointSettings // Endpoint configs for each connecting network
|
||||
}
|
32
Godeps/_workspace/src/github.com/docker/engine-api/types/reference/image_reference.go
generated
vendored
Normal file
32
Godeps/_workspace/src/github.com/docker/engine-api/types/reference/image_reference.go
generated
vendored
Normal file
|
@ -0,0 +1,32 @@
|
|||
package reference
|
||||
|
||||
import (
|
||||
distreference "github.com/docker/distribution/reference"
|
||||
)
|
||||
|
||||
// Parse parses the given references and returns the repository and
|
||||
// tag (if present) from it. If there is an error during parsing, it will
|
||||
// return an error.
|
||||
func Parse(ref string) (string, string, error) {
|
||||
distributionRef, err := distreference.ParseNamed(ref)
|
||||
if err != nil {
|
||||
return "", "", err
|
||||
}
|
||||
|
||||
tag := GetTagFromNamedRef(distributionRef)
|
||||
return distributionRef.Name(), tag, nil
|
||||
}
|
||||
|
||||
// GetTagFromNamedRef returns a tag from the specified reference.
|
||||
// This function is necessary as long as the docker "server" api makes the distinction between repository
|
||||
// and tags.
|
||||
func GetTagFromNamedRef(ref distreference.Named) string {
|
||||
var tag string
|
||||
switch x := ref.(type) {
|
||||
case distreference.Digested:
|
||||
tag = x.Digest().String()
|
||||
case distreference.NamedTagged:
|
||||
tag = x.Tag()
|
||||
}
|
||||
return tag
|
||||
}
|
101
Godeps/_workspace/src/github.com/docker/engine-api/types/registry/registry.go
generated
vendored
Normal file
101
Godeps/_workspace/src/github.com/docker/engine-api/types/registry/registry.go
generated
vendored
Normal file
|
@ -0,0 +1,101 @@
|
|||
package registry
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"net"
|
||||
)
|
||||
|
||||
// ServiceConfig stores daemon registry services configuration.
|
||||
type ServiceConfig struct {
|
||||
InsecureRegistryCIDRs []*NetIPNet `json:"InsecureRegistryCIDRs"`
|
||||
IndexConfigs map[string]*IndexInfo `json:"IndexConfigs"`
|
||||
Mirrors []string
|
||||
}
|
||||
|
||||
// NetIPNet is the net.IPNet type, which can be marshalled and
|
||||
// unmarshalled to JSON
|
||||
type NetIPNet net.IPNet
|
||||
|
||||
// MarshalJSON returns the JSON representation of the IPNet
|
||||
func (ipnet *NetIPNet) MarshalJSON() ([]byte, error) {
|
||||
return json.Marshal((*net.IPNet)(ipnet).String())
|
||||
}
|
||||
|
||||
// UnmarshalJSON sets the IPNet from a byte array of JSON
|
||||
func (ipnet *NetIPNet) UnmarshalJSON(b []byte) (err error) {
|
||||
var ipnetStr string
|
||||
if err = json.Unmarshal(b, &ipnetStr); err == nil {
|
||||
var cidr *net.IPNet
|
||||
if _, cidr, err = net.ParseCIDR(ipnetStr); err == nil {
|
||||
*ipnet = NetIPNet(*cidr)
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// IndexInfo contains information about a registry
|
||||
//
|
||||
// RepositoryInfo Examples:
|
||||
// {
|
||||
// "Index" : {
|
||||
// "Name" : "docker.io",
|
||||
// "Mirrors" : ["https://registry-2.docker.io/v1/", "https://registry-3.docker.io/v1/"],
|
||||
// "Secure" : true,
|
||||
// "Official" : true,
|
||||
// },
|
||||
// "RemoteName" : "library/debian",
|
||||
// "LocalName" : "debian",
|
||||
// "CanonicalName" : "docker.io/debian"
|
||||
// "Official" : true,
|
||||
// }
|
||||
//
|
||||
// {
|
||||
// "Index" : {
|
||||
// "Name" : "127.0.0.1:5000",
|
||||
// "Mirrors" : [],
|
||||
// "Secure" : false,
|
||||
// "Official" : false,
|
||||
// },
|
||||
// "RemoteName" : "user/repo",
|
||||
// "LocalName" : "127.0.0.1:5000/user/repo",
|
||||
// "CanonicalName" : "127.0.0.1:5000/user/repo",
|
||||
// "Official" : false,
|
||||
// }
|
||||
type IndexInfo struct {
|
||||
// Name is the name of the registry, such as "docker.io"
|
||||
Name string
|
||||
// Mirrors is a list of mirrors, expressed as URIs
|
||||
Mirrors []string
|
||||
// Secure is set to false if the registry is part of the list of
|
||||
// insecure registries. Insecure registries accept HTTP and/or accept
|
||||
// HTTPS with certificates from unknown CAs.
|
||||
Secure bool
|
||||
// Official indicates whether this is an official registry
|
||||
Official bool
|
||||
}
|
||||
|
||||
// SearchResult describes a search result returned from a registry
|
||||
type SearchResult struct {
|
||||
// StarCount indicates the number of stars this repository has
|
||||
StarCount int `json:"star_count"`
|
||||
// IsOfficial indicates whether the result is an official repository or not
|
||||
IsOfficial bool `json:"is_official"`
|
||||
// Name is the name of the repository
|
||||
Name string `json:"name"`
|
||||
// IsTrusted indicates whether the result is trusted
|
||||
IsTrusted bool `json:"is_trusted"`
|
||||
// IsAutomated indicates whether the result is automated
|
||||
IsAutomated bool `json:"is_automated"`
|
||||
// Description is a textual description of the repository
|
||||
Description string `json:"description"`
|
||||
}
|
||||
|
||||
// SearchResults lists a collection search results returned from a registry
|
||||
type SearchResults struct {
|
||||
// Query contains the query string that generated the search results
|
||||
Query string `json:"query"`
|
||||
// NumResults indicates the number of results the query returned
|
||||
NumResults int `json:"num_results"`
|
||||
// Results is a slice containing the actual results for the search
|
||||
Results []SearchResult `json:"results"`
|
||||
}
|
68
Godeps/_workspace/src/github.com/docker/engine-api/types/seccomp.go
generated
vendored
Normal file
68
Godeps/_workspace/src/github.com/docker/engine-api/types/seccomp.go
generated
vendored
Normal file
|
@ -0,0 +1,68 @@
|
|||
package types
|
||||
|
||||
// Seccomp represents the config for a seccomp profile for syscall restriction.
|
||||
type Seccomp struct {
|
||||
DefaultAction Action `json:"defaultAction"`
|
||||
Architectures []Arch `json:"architectures"`
|
||||
Syscalls []*Syscall `json:"syscalls"`
|
||||
}
|
||||
|
||||
// Arch used for additional architectures
|
||||
type Arch string
|
||||
|
||||
// Additional architectures permitted to be used for system calls
|
||||
// By default only the native architecture of the kernel is permitted
|
||||
const (
|
||||
ArchX86 Arch = "SCMP_ARCH_X86"
|
||||
ArchX86_64 Arch = "SCMP_ARCH_X86_64"
|
||||
ArchX32 Arch = "SCMP_ARCH_X32"
|
||||
ArchARM Arch = "SCMP_ARCH_ARM"
|
||||
ArchAARCH64 Arch = "SCMP_ARCH_AARCH64"
|
||||
ArchMIPS Arch = "SCMP_ARCH_MIPS"
|
||||
ArchMIPS64 Arch = "SCMP_ARCH_MIPS64"
|
||||
ArchMIPS64N32 Arch = "SCMP_ARCH_MIPS64N32"
|
||||
ArchMIPSEL Arch = "SCMP_ARCH_MIPSEL"
|
||||
ArchMIPSEL64 Arch = "SCMP_ARCH_MIPSEL64"
|
||||
ArchMIPSEL64N32 Arch = "SCMP_ARCH_MIPSEL64N32"
|
||||
)
|
||||
|
||||
// Action taken upon Seccomp rule match
|
||||
type Action string
|
||||
|
||||
// Define actions for Seccomp rules
|
||||
const (
|
||||
ActKill Action = "SCMP_ACT_KILL"
|
||||
ActTrap Action = "SCMP_ACT_TRAP"
|
||||
ActErrno Action = "SCMP_ACT_ERRNO"
|
||||
ActTrace Action = "SCMP_ACT_TRACE"
|
||||
ActAllow Action = "SCMP_ACT_ALLOW"
|
||||
)
|
||||
|
||||
// Operator used to match syscall arguments in Seccomp
|
||||
type Operator string
|
||||
|
||||
// Define operators for syscall arguments in Seccomp
|
||||
const (
|
||||
OpNotEqual Operator = "SCMP_CMP_NE"
|
||||
OpLessThan Operator = "SCMP_CMP_LT"
|
||||
OpLessEqual Operator = "SCMP_CMP_LE"
|
||||
OpEqualTo Operator = "SCMP_CMP_EQ"
|
||||
OpGreaterEqual Operator = "SCMP_CMP_GE"
|
||||
OpGreaterThan Operator = "SCMP_CMP_GT"
|
||||
OpMaskedEqual Operator = "SCMP_CMP_MASKED_EQ"
|
||||
)
|
||||
|
||||
// Arg used for matching specific syscall arguments in Seccomp
|
||||
type Arg struct {
|
||||
Index uint `json:"index"`
|
||||
Value uint64 `json:"value"`
|
||||
ValueTwo uint64 `json:"valueTwo"`
|
||||
Op Operator `json:"op"`
|
||||
}
|
||||
|
||||
// Syscall is used to match a syscall in Seccomp
|
||||
type Syscall struct {
|
||||
Name string `json:"name"`
|
||||
Action Action `json:"action"`
|
||||
Args []*Arg `json:"args"`
|
||||
}
|
115
Godeps/_workspace/src/github.com/docker/engine-api/types/stats.go
generated
vendored
Normal file
115
Godeps/_workspace/src/github.com/docker/engine-api/types/stats.go
generated
vendored
Normal file
|
@ -0,0 +1,115 @@
|
|||
// Package types is used for API stability in the types and response to the
|
||||
// consumers of the API stats endpoint.
|
||||
package types
|
||||
|
||||
import "time"
|
||||
|
||||
// ThrottlingData stores CPU throttling stats of one running container
|
||||
type ThrottlingData struct {
|
||||
// Number of periods with throttling active
|
||||
Periods uint64 `json:"periods"`
|
||||
// Number of periods when the container hits its throttling limit.
|
||||
ThrottledPeriods uint64 `json:"throttled_periods"`
|
||||
// Aggregate time the container was throttled for in nanoseconds.
|
||||
ThrottledTime uint64 `json:"throttled_time"`
|
||||
}
|
||||
|
||||
// CPUUsage stores All CPU stats aggregated since container inception.
|
||||
type CPUUsage struct {
|
||||
// Total CPU time consumed.
|
||||
// Units: nanoseconds.
|
||||
TotalUsage uint64 `json:"total_usage"`
|
||||
// Total CPU time consumed per core.
|
||||
// Units: nanoseconds.
|
||||
PercpuUsage []uint64 `json:"percpu_usage"`
|
||||
// Time spent by tasks of the cgroup in kernel mode.
|
||||
// Units: nanoseconds.
|
||||
UsageInKernelmode uint64 `json:"usage_in_kernelmode"`
|
||||
// Time spent by tasks of the cgroup in user mode.
|
||||
// Units: nanoseconds.
|
||||
UsageInUsermode uint64 `json:"usage_in_usermode"`
|
||||
}
|
||||
|
||||
// CPUStats aggregates and wraps all CPU related info of container
|
||||
type CPUStats struct {
|
||||
CPUUsage CPUUsage `json:"cpu_usage"`
|
||||
SystemUsage uint64 `json:"system_cpu_usage"`
|
||||
ThrottlingData ThrottlingData `json:"throttling_data,omitempty"`
|
||||
}
|
||||
|
||||
// MemoryStats aggregates All memory stats since container inception
|
||||
type MemoryStats struct {
|
||||
// current res_counter usage for memory
|
||||
Usage uint64 `json:"usage"`
|
||||
// maximum usage ever recorded.
|
||||
MaxUsage uint64 `json:"max_usage"`
|
||||
// TODO(vishh): Export these as stronger types.
|
||||
// all the stats exported via memory.stat.
|
||||
Stats map[string]uint64 `json:"stats"`
|
||||
// number of times memory usage hits limits.
|
||||
Failcnt uint64 `json:"failcnt"`
|
||||
Limit uint64 `json:"limit"`
|
||||
}
|
||||
|
||||
// BlkioStatEntry is one small entity to store a piece of Blkio stats
|
||||
// TODO Windows: This can be factored out
|
||||
type BlkioStatEntry struct {
|
||||
Major uint64 `json:"major"`
|
||||
Minor uint64 `json:"minor"`
|
||||
Op string `json:"op"`
|
||||
Value uint64 `json:"value"`
|
||||
}
|
||||
|
||||
// BlkioStats stores All IO service stats for data read and write
|
||||
// TODO Windows: This can be factored out
|
||||
type BlkioStats struct {
|
||||
// number of bytes transferred to and from the block device
|
||||
IoServiceBytesRecursive []BlkioStatEntry `json:"io_service_bytes_recursive"`
|
||||
IoServicedRecursive []BlkioStatEntry `json:"io_serviced_recursive"`
|
||||
IoQueuedRecursive []BlkioStatEntry `json:"io_queue_recursive"`
|
||||
IoServiceTimeRecursive []BlkioStatEntry `json:"io_service_time_recursive"`
|
||||
IoWaitTimeRecursive []BlkioStatEntry `json:"io_wait_time_recursive"`
|
||||
IoMergedRecursive []BlkioStatEntry `json:"io_merged_recursive"`
|
||||
IoTimeRecursive []BlkioStatEntry `json:"io_time_recursive"`
|
||||
SectorsRecursive []BlkioStatEntry `json:"sectors_recursive"`
|
||||
}
|
||||
|
||||
// NetworkStats aggregates All network stats of one container
|
||||
// TODO Windows: This will require refactoring
|
||||
type NetworkStats struct {
|
||||
RxBytes uint64 `json:"rx_bytes"`
|
||||
RxPackets uint64 `json:"rx_packets"`
|
||||
RxErrors uint64 `json:"rx_errors"`
|
||||
RxDropped uint64 `json:"rx_dropped"`
|
||||
TxBytes uint64 `json:"tx_bytes"`
|
||||
TxPackets uint64 `json:"tx_packets"`
|
||||
TxErrors uint64 `json:"tx_errors"`
|
||||
TxDropped uint64 `json:"tx_dropped"`
|
||||
}
|
||||
|
||||
// PidsStats contains the stats of a container's pids
|
||||
type PidsStats struct {
|
||||
// Current is the number of pids in the cgroup
|
||||
Current uint64 `json:"current,omitempty"`
|
||||
// Limit is the hard limit on the number of pids in the cgroup.
|
||||
// A "Limit" of 0 means that there is no limit.
|
||||
Limit uint64 `json:"limit,omitempty"`
|
||||
}
|
||||
|
||||
// Stats is Ultimate struct aggregating all types of stats of one container
|
||||
type Stats struct {
|
||||
Read time.Time `json:"read"`
|
||||
PreCPUStats CPUStats `json:"precpu_stats,omitempty"`
|
||||
CPUStats CPUStats `json:"cpu_stats,omitempty"`
|
||||
MemoryStats MemoryStats `json:"memory_stats,omitempty"`
|
||||
BlkioStats BlkioStats `json:"blkio_stats,omitempty"`
|
||||
PidsStats PidsStats `json:"pids_stats,omitempty"`
|
||||
}
|
||||
|
||||
// StatsJSON is newly used Networks
|
||||
type StatsJSON struct {
|
||||
Stats
|
||||
|
||||
// Networks request version >=1.21
|
||||
Networks map[string]NetworkStats `json:"networks,omitempty"`
|
||||
}
|
30
Godeps/_workspace/src/github.com/docker/engine-api/types/strslice/strslice.go
generated
vendored
Normal file
30
Godeps/_workspace/src/github.com/docker/engine-api/types/strslice/strslice.go
generated
vendored
Normal file
|
@ -0,0 +1,30 @@
|
|||
package strslice
|
||||
|
||||
import "encoding/json"
|
||||
|
||||
// StrSlice represents a string or an array of strings.
|
||||
// We need to override the json decoder to accept both options.
|
||||
type StrSlice []string
|
||||
|
||||
// UnmarshalJSON decodes the byte slice whether it's a string or an array of
|
||||
// strings. This method is needed to implement json.Unmarshaler.
|
||||
func (e *StrSlice) UnmarshalJSON(b []byte) error {
|
||||
if len(b) == 0 {
|
||||
// With no input, we preserve the existing value by returning nil and
|
||||
// leaving the target alone. This allows defining default values for
|
||||
// the type.
|
||||
return nil
|
||||
}
|
||||
|
||||
p := make([]string, 0, 1)
|
||||
if err := json.Unmarshal(b, &p); err != nil {
|
||||
var s string
|
||||
if err := json.Unmarshal(b, &s); err != nil {
|
||||
return err
|
||||
}
|
||||
p = append(p, s)
|
||||
}
|
||||
|
||||
*e = p
|
||||
return nil
|
||||
}
|
86
Godeps/_workspace/src/github.com/docker/engine-api/types/strslice/strslice_test.go
generated
vendored
Normal file
86
Godeps/_workspace/src/github.com/docker/engine-api/types/strslice/strslice_test.go
generated
vendored
Normal file
|
@ -0,0 +1,86 @@
|
|||
package strslice
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"reflect"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestStrSliceMarshalJSON(t *testing.T) {
|
||||
for _, testcase := range []struct {
|
||||
input StrSlice
|
||||
expected string
|
||||
}{
|
||||
// MADNESS(stevvooe): No clue why nil would be "" but empty would be
|
||||
// "null". Had to make a change here that may affect compatibility.
|
||||
{input: nil, expected: "null"},
|
||||
{StrSlice{}, "[]"},
|
||||
{StrSlice{"/bin/sh", "-c", "echo"}, `["/bin/sh","-c","echo"]`},
|
||||
} {
|
||||
data, err := json.Marshal(testcase.input)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if string(data) != testcase.expected {
|
||||
t.Fatalf("%#v: expected %v, got %v", testcase.input, testcase.expected, string(data))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestStrSliceUnmarshalJSON(t *testing.T) {
|
||||
parts := map[string][]string{
|
||||
"": {"default", "values"},
|
||||
"[]": {},
|
||||
`["/bin/sh","-c","echo"]`: {"/bin/sh", "-c", "echo"},
|
||||
}
|
||||
for json, expectedParts := range parts {
|
||||
strs := StrSlice{"default", "values"}
|
||||
if err := strs.UnmarshalJSON([]byte(json)); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
actualParts := []string(strs)
|
||||
if !reflect.DeepEqual(actualParts, expectedParts) {
|
||||
t.Fatalf("%#v: expected %v, got %v", json, expectedParts, actualParts)
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
func TestStrSliceUnmarshalString(t *testing.T) {
|
||||
var e StrSlice
|
||||
echo, err := json.Marshal("echo")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := json.Unmarshal(echo, &e); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if len(e) != 1 {
|
||||
t.Fatalf("expected 1 element after unmarshal: %q", e)
|
||||
}
|
||||
|
||||
if e[0] != "echo" {
|
||||
t.Fatalf("expected `echo`, got: %q", e[0])
|
||||
}
|
||||
}
|
||||
|
||||
func TestStrSliceUnmarshalSlice(t *testing.T) {
|
||||
var e StrSlice
|
||||
echo, err := json.Marshal([]string{"echo"})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := json.Unmarshal(echo, &e); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if len(e) != 1 {
|
||||
t.Fatalf("expected 1 element after unmarshal: %q", e)
|
||||
}
|
||||
|
||||
if e[0] != "echo" {
|
||||
t.Fatalf("expected `echo`, got: %q", e[0])
|
||||
}
|
||||
}
|
124
Godeps/_workspace/src/github.com/docker/engine-api/types/time/timestamp.go
generated
vendored
Normal file
124
Godeps/_workspace/src/github.com/docker/engine-api/types/time/timestamp.go
generated
vendored
Normal file
|
@ -0,0 +1,124 @@
|
|||
package time
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"math"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
// These are additional predefined layouts for use in Time.Format and Time.Parse
|
||||
// with --since and --until parameters for `docker logs` and `docker events`
|
||||
const (
|
||||
rFC3339Local = "2006-01-02T15:04:05" // RFC3339 with local timezone
|
||||
rFC3339NanoLocal = "2006-01-02T15:04:05.999999999" // RFC3339Nano with local timezone
|
||||
dateWithZone = "2006-01-02Z07:00" // RFC3339 with time at 00:00:00
|
||||
dateLocal = "2006-01-02" // RFC3339 with local timezone and time at 00:00:00
|
||||
)
|
||||
|
||||
// GetTimestamp tries to parse given string as golang duration,
|
||||
// then RFC3339 time and finally as a Unix timestamp. If
|
||||
// any of these were successful, it returns a Unix timestamp
|
||||
// as string otherwise returns the given value back.
|
||||
// In case of duration input, the returned timestamp is computed
|
||||
// as the given reference time minus the amount of the duration.
|
||||
func GetTimestamp(value string, reference time.Time) (string, error) {
|
||||
if d, err := time.ParseDuration(value); value != "0" && err == nil {
|
||||
return strconv.FormatInt(reference.Add(-d).Unix(), 10), nil
|
||||
}
|
||||
|
||||
var format string
|
||||
var parseInLocation bool
|
||||
|
||||
// if the string has a Z or a + or three dashes use parse otherwise use parseinlocation
|
||||
parseInLocation = !(strings.ContainsAny(value, "zZ+") || strings.Count(value, "-") == 3)
|
||||
|
||||
if strings.Contains(value, ".") {
|
||||
if parseInLocation {
|
||||
format = rFC3339NanoLocal
|
||||
} else {
|
||||
format = time.RFC3339Nano
|
||||
}
|
||||
} else if strings.Contains(value, "T") {
|
||||
// we want the number of colons in the T portion of the timestamp
|
||||
tcolons := strings.Count(value, ":")
|
||||
// if parseInLocation is off and we have a +/- zone offset (not Z) then
|
||||
// there will be an extra colon in the input for the tz offset subtract that
|
||||
// colon from the tcolons count
|
||||
if !parseInLocation && !strings.ContainsAny(value, "zZ") && tcolons > 0 {
|
||||
tcolons--
|
||||
}
|
||||
if parseInLocation {
|
||||
switch tcolons {
|
||||
case 0:
|
||||
format = "2006-01-02T15"
|
||||
case 1:
|
||||
format = "2006-01-02T15:04"
|
||||
default:
|
||||
format = rFC3339Local
|
||||
}
|
||||
} else {
|
||||
switch tcolons {
|
||||
case 0:
|
||||
format = "2006-01-02T15Z07:00"
|
||||
case 1:
|
||||
format = "2006-01-02T15:04Z07:00"
|
||||
default:
|
||||
format = time.RFC3339
|
||||
}
|
||||
}
|
||||
} else if parseInLocation {
|
||||
format = dateLocal
|
||||
} else {
|
||||
format = dateWithZone
|
||||
}
|
||||
|
||||
var t time.Time
|
||||
var err error
|
||||
|
||||
if parseInLocation {
|
||||
t, err = time.ParseInLocation(format, value, time.FixedZone(reference.Zone()))
|
||||
} else {
|
||||
t, err = time.Parse(format, value)
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
// if there is a `-` then its an RFC3339 like timestamp otherwise assume unixtimestamp
|
||||
if strings.Contains(value, "-") {
|
||||
return "", err // was probably an RFC3339 like timestamp but the parser failed with an error
|
||||
}
|
||||
return value, nil // unixtimestamp in and out case (meaning: the value passed at the command line is already in the right format for passing to the server)
|
||||
}
|
||||
|
||||
return fmt.Sprintf("%d.%09d", t.Unix(), int64(t.Nanosecond())), nil
|
||||
}
|
||||
|
||||
// ParseTimestamps returns seconds and nanoseconds from a timestamp that has the
|
||||
// format "%d.%09d", time.Unix(), int64(time.Nanosecond()))
|
||||
// if the incoming nanosecond portion is longer or shorter than 9 digits it is
|
||||
// converted to nanoseconds. The expectation is that the seconds and
|
||||
// seconds will be used to create a time variable. For example:
|
||||
// seconds, nanoseconds, err := ParseTimestamp("1136073600.000000001",0)
|
||||
// if err == nil since := time.Unix(seconds, nanoseconds)
|
||||
// returns seconds as def(aultSeconds) if value == ""
|
||||
func ParseTimestamps(value string, def int64) (int64, int64, error) {
|
||||
if value == "" {
|
||||
return def, 0, nil
|
||||
}
|
||||
sa := strings.SplitN(value, ".", 2)
|
||||
s, err := strconv.ParseInt(sa[0], 10, 64)
|
||||
if err != nil {
|
||||
return s, 0, err
|
||||
}
|
||||
if len(sa) != 2 {
|
||||
return s, 0, nil
|
||||
}
|
||||
n, err := strconv.ParseInt(sa[1], 10, 64)
|
||||
if err != nil {
|
||||
return s, n, err
|
||||
}
|
||||
// should already be in nanoseconds but just in case convert n to nanoseonds
|
||||
n = int64(float64(n) * math.Pow(float64(10), float64(9-len(sa[1]))))
|
||||
return s, n, nil
|
||||
}
|
93
Godeps/_workspace/src/github.com/docker/engine-api/types/time/timestamp_test.go
generated
vendored
Normal file
93
Godeps/_workspace/src/github.com/docker/engine-api/types/time/timestamp_test.go
generated
vendored
Normal file
|
@ -0,0 +1,93 @@
|
|||
package time
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"testing"
|
||||
"time"
|
||||
)
|
||||
|
||||
func TestGetTimestamp(t *testing.T) {
|
||||
now := time.Now().In(time.UTC)
|
||||
cases := []struct {
|
||||
in, expected string
|
||||
expectedErr bool
|
||||
}{
|
||||
// Partial RFC3339 strings get parsed with second precision
|
||||
{"2006-01-02T15:04:05.999999999+07:00", "1136189045.999999999", false},
|
||||
{"2006-01-02T15:04:05.999999999Z", "1136214245.999999999", false},
|
||||
{"2006-01-02T15:04:05.999999999", "1136214245.999999999", false},
|
||||
{"2006-01-02T15:04:05Z", "1136214245.000000000", false},
|
||||
{"2006-01-02T15:04:05", "1136214245.000000000", false},
|
||||
{"2006-01-02T15:04:0Z", "", true},
|
||||
{"2006-01-02T15:04:0", "", true},
|
||||
{"2006-01-02T15:04Z", "1136214240.000000000", false},
|
||||
{"2006-01-02T15:04+00:00", "1136214240.000000000", false},
|
||||
{"2006-01-02T15:04-00:00", "1136214240.000000000", false},
|
||||
{"2006-01-02T15:04", "1136214240.000000000", false},
|
||||
{"2006-01-02T15:0Z", "", true},
|
||||
{"2006-01-02T15:0", "", true},
|
||||
{"2006-01-02T15Z", "1136214000.000000000", false},
|
||||
{"2006-01-02T15+00:00", "1136214000.000000000", false},
|
||||
{"2006-01-02T15-00:00", "1136214000.000000000", false},
|
||||
{"2006-01-02T15", "1136214000.000000000", false},
|
||||
{"2006-01-02T1Z", "1136163600.000000000", false},
|
||||
{"2006-01-02T1", "1136163600.000000000", false},
|
||||
{"2006-01-02TZ", "", true},
|
||||
{"2006-01-02T", "", true},
|
||||
{"2006-01-02+00:00", "1136160000.000000000", false},
|
||||
{"2006-01-02-00:00", "1136160000.000000000", false},
|
||||
{"2006-01-02-00:01", "1136160060.000000000", false},
|
||||
{"2006-01-02Z", "1136160000.000000000", false},
|
||||
{"2006-01-02", "1136160000.000000000", false},
|
||||
{"2015-05-13T20:39:09Z", "1431549549.000000000", false},
|
||||
|
||||
// unix timestamps returned as is
|
||||
{"1136073600", "1136073600", false},
|
||||
{"1136073600.000000001", "1136073600.000000001", false},
|
||||
// Durations
|
||||
{"1m", fmt.Sprintf("%d", now.Add(-1*time.Minute).Unix()), false},
|
||||
{"1.5h", fmt.Sprintf("%d", now.Add(-90*time.Minute).Unix()), false},
|
||||
{"1h30m", fmt.Sprintf("%d", now.Add(-90*time.Minute).Unix()), false},
|
||||
|
||||
// String fallback
|
||||
{"invalid", "invalid", false},
|
||||
}
|
||||
|
||||
for _, c := range cases {
|
||||
o, err := GetTimestamp(c.in, now)
|
||||
if o != c.expected ||
|
||||
(err == nil && c.expectedErr) ||
|
||||
(err != nil && !c.expectedErr) {
|
||||
t.Errorf("wrong value for '%s'. expected:'%s' got:'%s' with error: `%s`", c.in, c.expected, o, err)
|
||||
t.Fail()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestParseTimestamps(t *testing.T) {
|
||||
cases := []struct {
|
||||
in string
|
||||
def, expectedS, expectedN int64
|
||||
expectedErr bool
|
||||
}{
|
||||
// unix timestamps
|
||||
{"1136073600", 0, 1136073600, 0, false},
|
||||
{"1136073600.000000001", 0, 1136073600, 1, false},
|
||||
{"1136073600.0000000010", 0, 1136073600, 1, false},
|
||||
{"1136073600.00000001", 0, 1136073600, 10, false},
|
||||
{"foo.bar", 0, 0, 0, true},
|
||||
{"1136073600.bar", 0, 1136073600, 0, true},
|
||||
{"", -1, -1, 0, false},
|
||||
}
|
||||
|
||||
for _, c := range cases {
|
||||
s, n, err := ParseTimestamps(c.in, c.def)
|
||||
if s != c.expectedS ||
|
||||
n != c.expectedN ||
|
||||
(err == nil && c.expectedErr) ||
|
||||
(err != nil && !c.expectedErr) {
|
||||
t.Errorf("wrong values for input `%s` with default `%d` expected:'%d'seconds and `%d`nanosecond got:'%d'seconds and `%d`nanoseconds with error: `%s`", c.in, c.def, c.expectedS, c.expectedN, s, n, err)
|
||||
t.Fail()
|
||||
}
|
||||
}
|
||||
}
|
472
Godeps/_workspace/src/github.com/docker/engine-api/types/types.go
generated
vendored
Normal file
472
Godeps/_workspace/src/github.com/docker/engine-api/types/types.go
generated
vendored
Normal file
|
@ -0,0 +1,472 @@
|
|||
package types
|
||||
|
||||
import (
|
||||
"os"
|
||||
"time"
|
||||
|
||||
"github.com/docker/engine-api/types/container"
|
||||
"github.com/docker/engine-api/types/network"
|
||||
"github.com/docker/engine-api/types/registry"
|
||||
"github.com/docker/go-connections/nat"
|
||||
)
|
||||
|
||||
// ContainerCreateResponse contains the information returned to a client on the
|
||||
// creation of a new container.
|
||||
type ContainerCreateResponse struct {
|
||||
// ID is the ID of the created container.
|
||||
ID string `json:"Id"`
|
||||
|
||||
// Warnings are any warnings encountered during the creation of the container.
|
||||
Warnings []string `json:"Warnings"`
|
||||
}
|
||||
|
||||
// ContainerExecCreateResponse contains response of Remote API:
|
||||
// POST "/containers/{name:.*}/exec"
|
||||
type ContainerExecCreateResponse struct {
|
||||
// ID is the exec ID.
|
||||
ID string `json:"Id"`
|
||||
}
|
||||
|
||||
// ContainerUpdateResponse contains response of Remote API:
|
||||
// POST /containers/{name:.*}/update
|
||||
type ContainerUpdateResponse struct {
|
||||
// Warnings are any warnings encountered during the updating of the container.
|
||||
Warnings []string `json:"Warnings"`
|
||||
}
|
||||
|
||||
// AuthResponse contains response of Remote API:
|
||||
// POST "/auth"
|
||||
type AuthResponse struct {
|
||||
// Status is the authentication status
|
||||
Status string `json:"Status"`
|
||||
|
||||
// IdentityToken is an opaque token used for authenticating
|
||||
// a user after a successful login.
|
||||
IdentityToken string `json:"IdentityToken,omitempty"`
|
||||
}
|
||||
|
||||
// ContainerWaitResponse contains response of Remote API:
|
||||
// POST "/containers/"+containerID+"/wait"
|
||||
type ContainerWaitResponse struct {
|
||||
// StatusCode is the status code of the wait job
|
||||
StatusCode int `json:"StatusCode"`
|
||||
}
|
||||
|
||||
// ContainerCommitResponse contains response of Remote API:
|
||||
// POST "/commit?container="+containerID
|
||||
type ContainerCommitResponse struct {
|
||||
ID string `json:"Id"`
|
||||
}
|
||||
|
||||
// ContainerChange contains response of Remote API:
|
||||
// GET "/containers/{name:.*}/changes"
|
||||
type ContainerChange struct {
|
||||
Kind int
|
||||
Path string
|
||||
}
|
||||
|
||||
// ImageHistory contains response of Remote API:
|
||||
// GET "/images/{name:.*}/history"
|
||||
type ImageHistory struct {
|
||||
ID string `json:"Id"`
|
||||
Created int64
|
||||
CreatedBy string
|
||||
Tags []string
|
||||
Size int64
|
||||
Comment string
|
||||
}
|
||||
|
||||
// ImageDelete contains response of Remote API:
|
||||
// DELETE "/images/{name:.*}"
|
||||
type ImageDelete struct {
|
||||
Untagged string `json:",omitempty"`
|
||||
Deleted string `json:",omitempty"`
|
||||
}
|
||||
|
||||
// Image contains response of Remote API:
|
||||
// GET "/images/json"
|
||||
type Image struct {
|
||||
ID string `json:"Id"`
|
||||
ParentID string `json:"ParentId"`
|
||||
RepoTags []string
|
||||
RepoDigests []string
|
||||
Created int64
|
||||
Size int64
|
||||
VirtualSize int64
|
||||
Labels map[string]string
|
||||
}
|
||||
|
||||
// GraphDriverData returns Image's graph driver config info
|
||||
// when calling inspect command
|
||||
type GraphDriverData struct {
|
||||
Name string
|
||||
Data map[string]string
|
||||
}
|
||||
|
||||
// RootFS returns Image's RootFS description including the layer IDs.
|
||||
type RootFS struct {
|
||||
Type string
|
||||
Layers []string `json:",omitempty"`
|
||||
BaseLayer string `json:",omitempty"`
|
||||
}
|
||||
|
||||
// ImageInspect contains response of Remote API:
|
||||
// GET "/images/{name:.*}/json"
|
||||
type ImageInspect struct {
|
||||
ID string `json:"Id"`
|
||||
RepoTags []string
|
||||
RepoDigests []string
|
||||
Parent string
|
||||
Comment string
|
||||
Created string
|
||||
Container string
|
||||
ContainerConfig *container.Config
|
||||
DockerVersion string
|
||||
Author string
|
||||
Config *container.Config
|
||||
Architecture string
|
||||
Os string
|
||||
Size int64
|
||||
VirtualSize int64
|
||||
GraphDriver GraphDriverData
|
||||
RootFS RootFS
|
||||
}
|
||||
|
||||
// Port stores open ports info of container
|
||||
// e.g. {"PrivatePort": 8080, "PublicPort": 80, "Type": "tcp"}
|
||||
type Port struct {
|
||||
IP string `json:",omitempty"`
|
||||
PrivatePort int
|
||||
PublicPort int `json:",omitempty"`
|
||||
Type string
|
||||
}
|
||||
|
||||
// Container contains response of Remote API:
|
||||
// GET "/containers/json"
|
||||
type Container struct {
|
||||
ID string `json:"Id"`
|
||||
Names []string
|
||||
Image string
|
||||
ImageID string
|
||||
Command string
|
||||
Created int64
|
||||
Ports []Port
|
||||
SizeRw int64 `json:",omitempty"`
|
||||
SizeRootFs int64 `json:",omitempty"`
|
||||
Labels map[string]string
|
||||
State string
|
||||
Status string
|
||||
HostConfig struct {
|
||||
NetworkMode string `json:",omitempty"`
|
||||
}
|
||||
NetworkSettings *SummaryNetworkSettings
|
||||
Mounts []MountPoint
|
||||
}
|
||||
|
||||
// CopyConfig contains request body of Remote API:
|
||||
// POST "/containers/"+containerID+"/copy"
|
||||
type CopyConfig struct {
|
||||
Resource string
|
||||
}
|
||||
|
||||
// ContainerPathStat is used to encode the header from
|
||||
// GET "/containers/{name:.*}/archive"
|
||||
// "Name" is the file or directory name.
|
||||
type ContainerPathStat struct {
|
||||
Name string `json:"name"`
|
||||
Size int64 `json:"size"`
|
||||
Mode os.FileMode `json:"mode"`
|
||||
Mtime time.Time `json:"mtime"`
|
||||
LinkTarget string `json:"linkTarget"`
|
||||
}
|
||||
|
||||
// ContainerProcessList contains response of Remote API:
|
||||
// GET "/containers/{name:.*}/top"
|
||||
type ContainerProcessList struct {
|
||||
Processes [][]string
|
||||
Titles []string
|
||||
}
|
||||
|
||||
// Version contains response of Remote API:
|
||||
// GET "/version"
|
||||
type Version struct {
|
||||
Version string
|
||||
APIVersion string `json:"ApiVersion"`
|
||||
GitCommit string
|
||||
GoVersion string
|
||||
Os string
|
||||
Arch string
|
||||
KernelVersion string `json:",omitempty"`
|
||||
Experimental bool `json:",omitempty"`
|
||||
BuildTime string `json:",omitempty"`
|
||||
}
|
||||
|
||||
// Info contains response of Remote API:
|
||||
// GET "/info"
|
||||
type Info struct {
|
||||
ID string
|
||||
Containers int
|
||||
ContainersRunning int
|
||||
ContainersPaused int
|
||||
ContainersStopped int
|
||||
Images int
|
||||
Driver string
|
||||
DriverStatus [][2]string
|
||||
SystemStatus [][2]string
|
||||
Plugins PluginsInfo
|
||||
MemoryLimit bool
|
||||
SwapLimit bool
|
||||
KernelMemory bool
|
||||
CPUCfsPeriod bool `json:"CpuCfsPeriod"`
|
||||
CPUCfsQuota bool `json:"CpuCfsQuota"`
|
||||
CPUShares bool
|
||||
CPUSet bool
|
||||
IPv4Forwarding bool
|
||||
BridgeNfIptables bool
|
||||
BridgeNfIP6tables bool `json:"BridgeNfIp6tables"`
|
||||
Debug bool
|
||||
NFd int
|
||||
OomKillDisable bool
|
||||
NGoroutines int
|
||||
SystemTime string
|
||||
ExecutionDriver string
|
||||
LoggingDriver string
|
||||
CgroupDriver string
|
||||
NEventsListener int
|
||||
KernelVersion string
|
||||
OperatingSystem string
|
||||
OSType string
|
||||
Architecture string
|
||||
IndexServerAddress string
|
||||
RegistryConfig *registry.ServiceConfig
|
||||
NCPU int
|
||||
MemTotal int64
|
||||
DockerRootDir string
|
||||
HTTPProxy string `json:"HttpProxy"`
|
||||
HTTPSProxy string `json:"HttpsProxy"`
|
||||
NoProxy string
|
||||
Name string
|
||||
Labels []string
|
||||
ExperimentalBuild bool
|
||||
ServerVersion string
|
||||
ClusterStore string
|
||||
ClusterAdvertise string
|
||||
SecurityOptions []string
|
||||
}
|
||||
|
||||
// PluginsInfo is a temp struct holding Plugins name
|
||||
// registered with docker daemon. It is used by Info struct
|
||||
type PluginsInfo struct {
|
||||
// List of Volume plugins registered
|
||||
Volume []string
|
||||
// List of Network plugins registered
|
||||
Network []string
|
||||
// List of Authorization plugins registered
|
||||
Authorization []string
|
||||
}
|
||||
|
||||
// ExecStartCheck is a temp struct used by execStart
|
||||
// Config fields is part of ExecConfig in runconfig package
|
||||
type ExecStartCheck struct {
|
||||
// ExecStart will first check if it's detached
|
||||
Detach bool
|
||||
// Check if there's a tty
|
||||
Tty bool
|
||||
}
|
||||
|
||||
// ContainerState stores container's running state
|
||||
// it's part of ContainerJSONBase and will return by "inspect" command
|
||||
type ContainerState struct {
|
||||
Status string
|
||||
Running bool
|
||||
Paused bool
|
||||
Restarting bool
|
||||
OOMKilled bool
|
||||
Dead bool
|
||||
Pid int
|
||||
ExitCode int
|
||||
Error string
|
||||
StartedAt string
|
||||
FinishedAt string
|
||||
}
|
||||
|
||||
// ContainerNode stores information about the node that a container
|
||||
// is running on. It's only available in Docker Swarm
|
||||
type ContainerNode struct {
|
||||
ID string
|
||||
IPAddress string `json:"IP"`
|
||||
Addr string
|
||||
Name string
|
||||
Cpus int
|
||||
Memory int
|
||||
Labels map[string]string
|
||||
}
|
||||
|
||||
// ContainerJSONBase contains response of Remote API:
|
||||
// GET "/containers/{name:.*}/json"
|
||||
type ContainerJSONBase struct {
|
||||
ID string `json:"Id"`
|
||||
Created string
|
||||
Path string
|
||||
Args []string
|
||||
State *ContainerState
|
||||
Image string
|
||||
ResolvConfPath string
|
||||
HostnamePath string
|
||||
HostsPath string
|
||||
LogPath string
|
||||
Node *ContainerNode `json:",omitempty"`
|
||||
Name string
|
||||
RestartCount int
|
||||
Driver string
|
||||
MountLabel string
|
||||
ProcessLabel string
|
||||
AppArmorProfile string
|
||||
ExecIDs []string
|
||||
HostConfig *container.HostConfig
|
||||
GraphDriver GraphDriverData
|
||||
SizeRw *int64 `json:",omitempty"`
|
||||
SizeRootFs *int64 `json:",omitempty"`
|
||||
}
|
||||
|
||||
// ContainerJSON is newly used struct along with MountPoint
|
||||
type ContainerJSON struct {
|
||||
*ContainerJSONBase
|
||||
Mounts []MountPoint
|
||||
Config *container.Config
|
||||
NetworkSettings *NetworkSettings
|
||||
}
|
||||
|
||||
// NetworkSettings exposes the network settings in the api
|
||||
type NetworkSettings struct {
|
||||
NetworkSettingsBase
|
||||
DefaultNetworkSettings
|
||||
Networks map[string]*network.EndpointSettings
|
||||
}
|
||||
|
||||
// SummaryNetworkSettings provides a summary of container's networks
|
||||
// in /containers/json
|
||||
type SummaryNetworkSettings struct {
|
||||
Networks map[string]*network.EndpointSettings
|
||||
}
|
||||
|
||||
// NetworkSettingsBase holds basic information about networks
|
||||
type NetworkSettingsBase struct {
|
||||
Bridge string
|
||||
SandboxID string
|
||||
HairpinMode bool
|
||||
LinkLocalIPv6Address string
|
||||
LinkLocalIPv6PrefixLen int
|
||||
Ports nat.PortMap
|
||||
SandboxKey string
|
||||
SecondaryIPAddresses []network.Address
|
||||
SecondaryIPv6Addresses []network.Address
|
||||
}
|
||||
|
||||
// DefaultNetworkSettings holds network information
|
||||
// during the 2 release deprecation period.
|
||||
// It will be removed in Docker 1.11.
|
||||
type DefaultNetworkSettings struct {
|
||||
EndpointID string
|
||||
Gateway string
|
||||
GlobalIPv6Address string
|
||||
GlobalIPv6PrefixLen int
|
||||
IPAddress string
|
||||
IPPrefixLen int
|
||||
IPv6Gateway string
|
||||
MacAddress string
|
||||
}
|
||||
|
||||
// MountPoint represents a mount point configuration inside the container.
|
||||
type MountPoint struct {
|
||||
Name string `json:",omitempty"`
|
||||
Source string
|
||||
Destination string
|
||||
Driver string `json:",omitempty"`
|
||||
Mode string
|
||||
RW bool
|
||||
Propagation string
|
||||
}
|
||||
|
||||
// Volume represents the configuration of a volume for the remote API
|
||||
type Volume struct {
|
||||
Name string // Name is the name of the volume
|
||||
Driver string // Driver is the Driver name used to create the volume
|
||||
Mountpoint string // Mountpoint is the location on disk of the volume
|
||||
Status map[string]interface{} `json:",omitempty"` // Status provides low-level status information about the volume
|
||||
Labels map[string]string // Labels is metadata specific to the volume
|
||||
}
|
||||
|
||||
// VolumesListResponse contains the response for the remote API:
|
||||
// GET "/volumes"
|
||||
type VolumesListResponse struct {
|
||||
Volumes []*Volume // Volumes is the list of volumes being returned
|
||||
Warnings []string // Warnings is a list of warnings that occurred when getting the list from the volume drivers
|
||||
}
|
||||
|
||||
// VolumeCreateRequest contains the response for the remote API:
|
||||
// POST "/volumes/create"
|
||||
type VolumeCreateRequest struct {
|
||||
Name string // Name is the requested name of the volume
|
||||
Driver string // Driver is the name of the driver that should be used to create the volume
|
||||
DriverOpts map[string]string // DriverOpts holds the driver specific options to use for when creating the volume.
|
||||
Labels map[string]string // Labels holds metadata specific to the volume being created.
|
||||
}
|
||||
|
||||
// NetworkResource is the body of the "get network" http response message
|
||||
type NetworkResource struct {
|
||||
Name string
|
||||
ID string `json:"Id"`
|
||||
Scope string
|
||||
Driver string
|
||||
EnableIPv6 bool
|
||||
IPAM network.IPAM
|
||||
Internal bool
|
||||
Containers map[string]EndpointResource
|
||||
Options map[string]string
|
||||
Labels map[string]string
|
||||
}
|
||||
|
||||
// EndpointResource contains network resources allocated and used for a container in a network
|
||||
type EndpointResource struct {
|
||||
Name string
|
||||
EndpointID string
|
||||
MacAddress string
|
||||
IPv4Address string
|
||||
IPv6Address string
|
||||
}
|
||||
|
||||
// NetworkCreate is the expected body of the "create network" http request message
|
||||
type NetworkCreate struct {
|
||||
CheckDuplicate bool
|
||||
Driver string
|
||||
EnableIPv6 bool
|
||||
IPAM network.IPAM
|
||||
Internal bool
|
||||
Options map[string]string
|
||||
Labels map[string]string
|
||||
}
|
||||
|
||||
// NetworkCreateRequest is the request message sent to the server for network create call.
|
||||
type NetworkCreateRequest struct {
|
||||
NetworkCreate
|
||||
Name string
|
||||
}
|
||||
|
||||
// NetworkCreateResponse is the response message sent by the server for network create call
|
||||
type NetworkCreateResponse struct {
|
||||
ID string `json:"Id"`
|
||||
Warning string
|
||||
}
|
||||
|
||||
// NetworkConnect represents the data to be used to connect a container to the network
|
||||
type NetworkConnect struct {
|
||||
Container string
|
||||
EndpointConfig *network.EndpointSettings `json:",omitempty"`
|
||||
}
|
||||
|
||||
// NetworkDisconnect represents the data to be used to disconnect a container from the network
|
||||
type NetworkDisconnect struct {
|
||||
Container string
|
||||
Force bool
|
||||
}
|
14
Godeps/_workspace/src/github.com/docker/engine-api/types/versions/README.md
generated
vendored
Normal file
14
Godeps/_workspace/src/github.com/docker/engine-api/types/versions/README.md
generated
vendored
Normal file
|
@ -0,0 +1,14 @@
|
|||
## Legacy API type versions
|
||||
|
||||
This package includes types for legacy API versions. The stable version of the API types live in `api/types/*.go`.
|
||||
|
||||
Consider moving a type here when you need to keep backwards compatibility in the API. This legacy types are organized by the latest API version they appear in. For instance, types in the `v1p19` package are valid for API versions below or equal `1.19`. Types in the `v1p20` package are valid for the API version `1.20`, since the versions below that will use the legacy types in `v1p19`.
|
||||
|
||||
### Package name conventions
|
||||
|
||||
The package name convention is to use `v` as a prefix for the version number and `p`(patch) as a separator. We use this nomenclature due to a few restrictions in the Go package name convention:
|
||||
|
||||
1. We cannot use `.` because it's interpreted by the language, think of `v1.20.CallFunction`.
|
||||
2. We cannot use `_` because golint complains abount it. The code is actually valid, but it looks probably more weird: `v1_20.CallFunction`.
|
||||
|
||||
For instance, if you want to modify a type that was available in the version `1.21` of the API but it will have different fields in the version `1.22`, you want to create a new package under `api/types/versions/v1p21`.
|
62
Godeps/_workspace/src/github.com/docker/engine-api/types/versions/compare.go
generated
vendored
Normal file
62
Godeps/_workspace/src/github.com/docker/engine-api/types/versions/compare.go
generated
vendored
Normal file
|
@ -0,0 +1,62 @@
|
|||
package versions
|
||||
|
||||
import (
|
||||
"strconv"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// compare compares two version strings
|
||||
// returns -1 if v1 < v2, 1 if v1 > v2, 0 otherwise.
|
||||
func compare(v1, v2 string) int {
|
||||
var (
|
||||
currTab = strings.Split(v1, ".")
|
||||
otherTab = strings.Split(v2, ".")
|
||||
)
|
||||
|
||||
max := len(currTab)
|
||||
if len(otherTab) > max {
|
||||
max = len(otherTab)
|
||||
}
|
||||
for i := 0; i < max; i++ {
|
||||
var currInt, otherInt int
|
||||
|
||||
if len(currTab) > i {
|
||||
currInt, _ = strconv.Atoi(currTab[i])
|
||||
}
|
||||
if len(otherTab) > i {
|
||||
otherInt, _ = strconv.Atoi(otherTab[i])
|
||||
}
|
||||
if currInt > otherInt {
|
||||
return 1
|
||||
}
|
||||
if otherInt > currInt {
|
||||
return -1
|
||||
}
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
// LessThan checks if a version is less than another
|
||||
func LessThan(v, other string) bool {
|
||||
return compare(v, other) == -1
|
||||
}
|
||||
|
||||
// LessThanOrEqualTo checks if a version is less than or equal to another
|
||||
func LessThanOrEqualTo(v, other string) bool {
|
||||
return compare(v, other) <= 0
|
||||
}
|
||||
|
||||
// GreaterThan checks if a version is greater than another
|
||||
func GreaterThan(v, other string) bool {
|
||||
return compare(v, other) == 1
|
||||
}
|
||||
|
||||
// GreaterThanOrEqualTo checks if a version is greater than or equal to another
|
||||
func GreaterThanOrEqualTo(v, other string) bool {
|
||||
return compare(v, other) >= 0
|
||||
}
|
||||
|
||||
// Equal checks if a version is equal to another
|
||||
func Equal(v, other string) bool {
|
||||
return compare(v, other) == 0
|
||||
}
|
26
Godeps/_workspace/src/github.com/docker/engine-api/types/versions/compare_test.go
generated
vendored
Normal file
26
Godeps/_workspace/src/github.com/docker/engine-api/types/versions/compare_test.go
generated
vendored
Normal file
|
@ -0,0 +1,26 @@
|
|||
package versions
|
||||
|
||||
import (
|
||||
"testing"
|
||||
)
|
||||
|
||||
func assertVersion(t *testing.T, a, b string, result int) {
|
||||
if r := compare(a, b); r != result {
|
||||
t.Fatalf("Unexpected version comparison result. Found %d, expected %d", r, result)
|
||||
}
|
||||
}
|
||||
|
||||
func TestCompareVersion(t *testing.T) {
|
||||
assertVersion(t, "1.12", "1.12", 0)
|
||||
assertVersion(t, "1.0.0", "1", 0)
|
||||
assertVersion(t, "1", "1.0.0", 0)
|
||||
assertVersion(t, "1.05.00.0156", "1.0.221.9289", 1)
|
||||
assertVersion(t, "1", "1.0.1", -1)
|
||||
assertVersion(t, "1.0.1", "1", 1)
|
||||
assertVersion(t, "1.0.1", "1.0.2", -1)
|
||||
assertVersion(t, "1.0.2", "1.0.3", -1)
|
||||
assertVersion(t, "1.0.3", "1.1", -1)
|
||||
assertVersion(t, "1.1", "1.1.1", -1)
|
||||
assertVersion(t, "1.1.1", "1.1.2", -1)
|
||||
assertVersion(t, "1.1.2", "1.2", -1)
|
||||
}
|
35
Godeps/_workspace/src/github.com/docker/engine-api/types/versions/v1p19/types.go
generated
vendored
Normal file
35
Godeps/_workspace/src/github.com/docker/engine-api/types/versions/v1p19/types.go
generated
vendored
Normal file
|
@ -0,0 +1,35 @@
|
|||
// Package v1p19 provides specific API types for the API version 1, patch 19.
|
||||
package v1p19
|
||||
|
||||
import (
|
||||
"github.com/docker/engine-api/types"
|
||||
"github.com/docker/engine-api/types/container"
|
||||
"github.com/docker/engine-api/types/versions/v1p20"
|
||||
"github.com/docker/go-connections/nat"
|
||||
)
|
||||
|
||||
// ContainerJSON is a backcompatibility struct for APIs prior to 1.20.
|
||||
// Note this is not used by the Windows daemon.
|
||||
type ContainerJSON struct {
|
||||
*types.ContainerJSONBase
|
||||
Volumes map[string]string
|
||||
VolumesRW map[string]bool
|
||||
Config *ContainerConfig
|
||||
NetworkSettings *v1p20.NetworkSettings
|
||||
}
|
||||
|
||||
// ContainerConfig is a backcompatibility struct for APIs prior to 1.20.
|
||||
type ContainerConfig struct {
|
||||
*container.Config
|
||||
|
||||
MacAddress string
|
||||
NetworkDisabled bool
|
||||
ExposedPorts map[nat.Port]struct{}
|
||||
|
||||
// backward compatibility, they now live in HostConfig
|
||||
VolumeDriver string
|
||||
Memory int64
|
||||
MemorySwap int64
|
||||
CPUShares int64 `json:"CpuShares"`
|
||||
CPUSet string `json:"Cpuset"`
|
||||
}
|
40
Godeps/_workspace/src/github.com/docker/engine-api/types/versions/v1p20/types.go
generated
vendored
Normal file
40
Godeps/_workspace/src/github.com/docker/engine-api/types/versions/v1p20/types.go
generated
vendored
Normal file
|
@ -0,0 +1,40 @@
|
|||
// Package v1p20 provides specific API types for the API version 1, patch 20.
|
||||
package v1p20
|
||||
|
||||
import (
|
||||
"github.com/docker/engine-api/types"
|
||||
"github.com/docker/engine-api/types/container"
|
||||
"github.com/docker/go-connections/nat"
|
||||
)
|
||||
|
||||
// ContainerJSON is a backcompatibility struct for the API 1.20
|
||||
type ContainerJSON struct {
|
||||
*types.ContainerJSONBase
|
||||
Mounts []types.MountPoint
|
||||
Config *ContainerConfig
|
||||
NetworkSettings *NetworkSettings
|
||||
}
|
||||
|
||||
// ContainerConfig is a backcompatibility struct used in ContainerJSON for the API 1.20
|
||||
type ContainerConfig struct {
|
||||
*container.Config
|
||||
|
||||
MacAddress string
|
||||
NetworkDisabled bool
|
||||
ExposedPorts map[nat.Port]struct{}
|
||||
|
||||
// backward compatibility, they now live in HostConfig
|
||||
VolumeDriver string
|
||||
}
|
||||
|
||||
// StatsJSON is a backcompatibility struct used in Stats for APIs prior to 1.21
|
||||
type StatsJSON struct {
|
||||
types.Stats
|
||||
Network types.NetworkStats `json:"network,omitempty"`
|
||||
}
|
||||
|
||||
// NetworkSettings is a backward compatible struct for APIs prior to 1.21
|
||||
type NetworkSettings struct {
|
||||
types.NetworkSettingsBase
|
||||
types.DefaultNetworkSettings
|
||||
}
|
223
Godeps/_workspace/src/github.com/docker/go-connections/nat/nat.go
generated
vendored
Normal file
223
Godeps/_workspace/src/github.com/docker/go-connections/nat/nat.go
generated
vendored
Normal file
|
@ -0,0 +1,223 @@
|
|||
// Package nat is a convenience package for manipulation of strings describing network ports.
|
||||
package nat
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net"
|
||||
"strconv"
|
||||
"strings"
|
||||
)
|
||||
|
||||
const (
|
||||
// portSpecTemplate is the expected format for port specifications
|
||||
portSpecTemplate = "ip:hostPort:containerPort"
|
||||
)
|
||||
|
||||
// PortBinding represents a binding between a Host IP address and a Host Port
|
||||
type PortBinding struct {
|
||||
// HostIP is the host IP Address
|
||||
HostIP string `json:"HostIp"`
|
||||
// HostPort is the host port number
|
||||
HostPort string
|
||||
}
|
||||
|
||||
// PortMap is a collection of PortBinding indexed by Port
|
||||
type PortMap map[Port][]PortBinding
|
||||
|
||||
// PortSet is a collection of structs indexed by Port
|
||||
type PortSet map[Port]struct{}
|
||||
|
||||
// Port is a string containing port number and protocol in the format "80/tcp"
|
||||
type Port string
|
||||
|
||||
// NewPort creates a new instance of a Port given a protocol and port number or port range
|
||||
func NewPort(proto, port string) (Port, error) {
|
||||
// Check for parsing issues on "port" now so we can avoid having
|
||||
// to check it later on.
|
||||
|
||||
portStartInt, portEndInt, err := ParsePortRangeToInt(port)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
if portStartInt == portEndInt {
|
||||
return Port(fmt.Sprintf("%d/%s", portStartInt, proto)), nil
|
||||
}
|
||||
return Port(fmt.Sprintf("%d-%d/%s", portStartInt, portEndInt, proto)), nil
|
||||
}
|
||||
|
||||
// ParsePort parses the port number string and returns an int
|
||||
func ParsePort(rawPort string) (int, error) {
|
||||
if len(rawPort) == 0 {
|
||||
return 0, nil
|
||||
}
|
||||
port, err := strconv.ParseUint(rawPort, 10, 16)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return int(port), nil
|
||||
}
|
||||
|
||||
// ParsePortRangeToInt parses the port range string and returns start/end ints
|
||||
func ParsePortRangeToInt(rawPort string) (int, int, error) {
|
||||
if len(rawPort) == 0 {
|
||||
return 0, 0, nil
|
||||
}
|
||||
start, end, err := ParsePortRange(rawPort)
|
||||
if err != nil {
|
||||
return 0, 0, err
|
||||
}
|
||||
return int(start), int(end), nil
|
||||
}
|
||||
|
||||
// Proto returns the protocol of a Port
|
||||
func (p Port) Proto() string {
|
||||
proto, _ := SplitProtoPort(string(p))
|
||||
return proto
|
||||
}
|
||||
|
||||
// Port returns the port number of a Port
|
||||
func (p Port) Port() string {
|
||||
_, port := SplitProtoPort(string(p))
|
||||
return port
|
||||
}
|
||||
|
||||
// Int returns the port number of a Port as an int
|
||||
func (p Port) Int() int {
|
||||
portStr := p.Port()
|
||||
if len(portStr) == 0 {
|
||||
return 0
|
||||
}
|
||||
|
||||
// We don't need to check for an error because we're going to
|
||||
// assume that any error would have been found, and reported, in NewPort()
|
||||
port, _ := strconv.ParseUint(portStr, 10, 16)
|
||||
return int(port)
|
||||
}
|
||||
|
||||
// Range returns the start/end port numbers of a Port range as ints
|
||||
func (p Port) Range() (int, int, error) {
|
||||
return ParsePortRangeToInt(p.Port())
|
||||
}
|
||||
|
||||
// SplitProtoPort splits a port in the format of proto/port
|
||||
func SplitProtoPort(rawPort string) (string, string) {
|
||||
parts := strings.Split(rawPort, "/")
|
||||
l := len(parts)
|
||||
if len(rawPort) == 0 || l == 0 || len(parts[0]) == 0 {
|
||||
return "", ""
|
||||
}
|
||||
if l == 1 {
|
||||
return "tcp", rawPort
|
||||
}
|
||||
if len(parts[1]) == 0 {
|
||||
return "tcp", parts[0]
|
||||
}
|
||||
return parts[1], parts[0]
|
||||
}
|
||||
|
||||
func validateProto(proto string) bool {
|
||||
for _, availableProto := range []string{"tcp", "udp"} {
|
||||
if availableProto == proto {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// ParsePortSpecs receives port specs in the format of ip:public:private/proto and parses
|
||||
// these in to the internal types
|
||||
func ParsePortSpecs(ports []string) (map[Port]struct{}, map[Port][]PortBinding, error) {
|
||||
var (
|
||||
exposedPorts = make(map[Port]struct{}, len(ports))
|
||||
bindings = make(map[Port][]PortBinding)
|
||||
)
|
||||
|
||||
for _, rawPort := range ports {
|
||||
proto := "tcp"
|
||||
|
||||
if i := strings.LastIndex(rawPort, "/"); i != -1 {
|
||||
proto = rawPort[i+1:]
|
||||
rawPort = rawPort[:i]
|
||||
}
|
||||
if !strings.Contains(rawPort, ":") {
|
||||
rawPort = fmt.Sprintf("::%s", rawPort)
|
||||
} else if len(strings.Split(rawPort, ":")) == 2 {
|
||||
rawPort = fmt.Sprintf(":%s", rawPort)
|
||||
}
|
||||
|
||||
parts, err := PartParser(portSpecTemplate, rawPort)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
var (
|
||||
containerPort = parts["containerPort"]
|
||||
rawIP = parts["ip"]
|
||||
hostPort = parts["hostPort"]
|
||||
)
|
||||
|
||||
if rawIP != "" && net.ParseIP(rawIP) == nil {
|
||||
return nil, nil, fmt.Errorf("Invalid ip address: %s", rawIP)
|
||||
}
|
||||
if containerPort == "" {
|
||||
return nil, nil, fmt.Errorf("No port specified: %s<empty>", rawPort)
|
||||
}
|
||||
|
||||
startPort, endPort, err := ParsePortRange(containerPort)
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("Invalid containerPort: %s", containerPort)
|
||||
}
|
||||
|
||||
var startHostPort, endHostPort uint64 = 0, 0
|
||||
if len(hostPort) > 0 {
|
||||
startHostPort, endHostPort, err = ParsePortRange(hostPort)
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("Invalid hostPort: %s", hostPort)
|
||||
}
|
||||
}
|
||||
|
||||
if hostPort != "" && (endPort-startPort) != (endHostPort-startHostPort) {
|
||||
// Allow host port range iff containerPort is not a range.
|
||||
// In this case, use the host port range as the dynamic
|
||||
// host port range to allocate into.
|
||||
if endPort != startPort {
|
||||
return nil, nil, fmt.Errorf("Invalid ranges specified for container and host Ports: %s and %s", containerPort, hostPort)
|
||||
}
|
||||
}
|
||||
|
||||
if !validateProto(strings.ToLower(proto)) {
|
||||
return nil, nil, fmt.Errorf("Invalid proto: %s", proto)
|
||||
}
|
||||
|
||||
for i := uint64(0); i <= (endPort - startPort); i++ {
|
||||
containerPort = strconv.FormatUint(startPort+i, 10)
|
||||
if len(hostPort) > 0 {
|
||||
hostPort = strconv.FormatUint(startHostPort+i, 10)
|
||||
}
|
||||
// Set hostPort to a range only if there is a single container port
|
||||
// and a dynamic host port.
|
||||
if startPort == endPort && startHostPort != endHostPort {
|
||||
hostPort = fmt.Sprintf("%s-%s", hostPort, strconv.FormatUint(endHostPort, 10))
|
||||
}
|
||||
port, err := NewPort(strings.ToLower(proto), containerPort)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
if _, exists := exposedPorts[port]; !exists {
|
||||
exposedPorts[port] = struct{}{}
|
||||
}
|
||||
|
||||
binding := PortBinding{
|
||||
HostIP: rawIP,
|
||||
HostPort: hostPort,
|
||||
}
|
||||
bslice, exists := bindings[port]
|
||||
if !exists {
|
||||
bslice = []PortBinding{}
|
||||
}
|
||||
bindings[port] = append(bslice, binding)
|
||||
}
|
||||
}
|
||||
return exposedPorts, bindings, nil
|
||||
}
|
525
Godeps/_workspace/src/github.com/docker/go-connections/nat/nat_test.go
generated
vendored
Normal file
525
Godeps/_workspace/src/github.com/docker/go-connections/nat/nat_test.go
generated
vendored
Normal file
|
@ -0,0 +1,525 @@
|
|||
package nat
|
||||
|
||||
import (
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestParsePort(t *testing.T) {
|
||||
var (
|
||||
p int
|
||||
err error
|
||||
)
|
||||
|
||||
p, err = ParsePort("1234")
|
||||
|
||||
if err != nil || p != 1234 {
|
||||
t.Fatal("Parsing '1234' did not succeed")
|
||||
}
|
||||
|
||||
// FIXME currently this is a valid port. I don't think it should be.
|
||||
// I'm leaving this test commented out until we make a decision.
|
||||
// - erikh
|
||||
|
||||
/*
|
||||
p, err = ParsePort("0123")
|
||||
|
||||
if err != nil {
|
||||
t.Fatal("Successfully parsed port '0123' to '123'")
|
||||
}
|
||||
*/
|
||||
|
||||
p, err = ParsePort("asdf")
|
||||
|
||||
if err == nil || p != 0 {
|
||||
t.Fatal("Parsing port 'asdf' succeeded")
|
||||
}
|
||||
|
||||
p, err = ParsePort("1asdf")
|
||||
|
||||
if err == nil || p != 0 {
|
||||
t.Fatal("Parsing port '1asdf' succeeded")
|
||||
}
|
||||
}
|
||||
|
||||
func TestParsePortRangeToInt(t *testing.T) {
|
||||
var (
|
||||
begin int
|
||||
end int
|
||||
err error
|
||||
)
|
||||
|
||||
type TestRange struct {
|
||||
Range string
|
||||
Begin int
|
||||
End int
|
||||
}
|
||||
validRanges := []TestRange{
|
||||
{"1234", 1234, 1234},
|
||||
{"1234-1234", 1234, 1234},
|
||||
{"1234-1235", 1234, 1235},
|
||||
{"8000-9000", 8000, 9000},
|
||||
{"0", 0, 0},
|
||||
{"0-0", 0, 0},
|
||||
}
|
||||
|
||||
for _, r := range validRanges {
|
||||
begin, end, err = ParsePortRangeToInt(r.Range)
|
||||
|
||||
if err != nil || begin != r.Begin {
|
||||
t.Fatalf("Parsing port range '%s' did not succeed. Expected begin %d, got %d", r.Range, r.Begin, begin)
|
||||
}
|
||||
if err != nil || end != r.End {
|
||||
t.Fatalf("Parsing port range '%s' did not succeed. Expected end %d, got %d", r.Range, r.End, end)
|
||||
}
|
||||
}
|
||||
|
||||
invalidRanges := []string{
|
||||
"asdf",
|
||||
"1asdf",
|
||||
"9000-8000",
|
||||
"9000-",
|
||||
"-8000",
|
||||
"-8000-",
|
||||
}
|
||||
|
||||
for _, r := range invalidRanges {
|
||||
begin, end, err = ParsePortRangeToInt(r)
|
||||
|
||||
if err == nil || begin != 0 || end != 0 {
|
||||
t.Fatalf("Parsing port range '%s' succeeded", r)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestPort(t *testing.T) {
|
||||
p, err := NewPort("tcp", "1234")
|
||||
|
||||
if err != nil {
|
||||
t.Fatalf("tcp, 1234 had a parsing issue: %v", err)
|
||||
}
|
||||
|
||||
if string(p) != "1234/tcp" {
|
||||
t.Fatal("tcp, 1234 did not result in the string 1234/tcp")
|
||||
}
|
||||
|
||||
if p.Proto() != "tcp" {
|
||||
t.Fatal("protocol was not tcp")
|
||||
}
|
||||
|
||||
if p.Port() != "1234" {
|
||||
t.Fatal("port string value was not 1234")
|
||||
}
|
||||
|
||||
if p.Int() != 1234 {
|
||||
t.Fatal("port int value was not 1234")
|
||||
}
|
||||
|
||||
p, err = NewPort("tcp", "asd1234")
|
||||
if err == nil {
|
||||
t.Fatal("tcp, asd1234 was supposed to fail")
|
||||
}
|
||||
|
||||
p, err = NewPort("tcp", "1234-1230")
|
||||
if err == nil {
|
||||
t.Fatal("tcp, 1234-1230 was supposed to fail")
|
||||
}
|
||||
|
||||
p, err = NewPort("tcp", "1234-1242")
|
||||
if err != nil {
|
||||
t.Fatalf("tcp, 1234-1242 had a parsing issue: %v", err)
|
||||
}
|
||||
|
||||
if string(p) != "1234-1242/tcp" {
|
||||
t.Fatal("tcp, 1234-1242 did not result in the string 1234-1242/tcp")
|
||||
}
|
||||
}
|
||||
|
||||
func TestSplitProtoPort(t *testing.T) {
|
||||
var (
|
||||
proto string
|
||||
port string
|
||||
)
|
||||
|
||||
proto, port = SplitProtoPort("1234/tcp")
|
||||
|
||||
if proto != "tcp" || port != "1234" {
|
||||
t.Fatal("Could not split 1234/tcp properly")
|
||||
}
|
||||
|
||||
proto, port = SplitProtoPort("")
|
||||
|
||||
if proto != "" || port != "" {
|
||||
t.Fatal("parsing an empty string yielded surprising results", proto, port)
|
||||
}
|
||||
|
||||
proto, port = SplitProtoPort("1234")
|
||||
|
||||
if proto != "tcp" || port != "1234" {
|
||||
t.Fatal("tcp is not the default protocol for portspec '1234'", proto, port)
|
||||
}
|
||||
|
||||
proto, port = SplitProtoPort("1234/")
|
||||
|
||||
if proto != "tcp" || port != "1234" {
|
||||
t.Fatal("parsing '1234/' yielded:" + port + "/" + proto)
|
||||
}
|
||||
|
||||
proto, port = SplitProtoPort("/tcp")
|
||||
|
||||
if proto != "" || port != "" {
|
||||
t.Fatal("parsing '/tcp' yielded:" + port + "/" + proto)
|
||||
}
|
||||
}
|
||||
|
||||
func TestParsePortSpecs(t *testing.T) {
|
||||
var (
|
||||
portMap map[Port]struct{}
|
||||
bindingMap map[Port][]PortBinding
|
||||
err error
|
||||
)
|
||||
|
||||
portMap, bindingMap, err = ParsePortSpecs([]string{"1234/tcp", "2345/udp"})
|
||||
|
||||
if err != nil {
|
||||
t.Fatalf("Error while processing ParsePortSpecs: %s", err)
|
||||
}
|
||||
|
||||
if _, ok := portMap[Port("1234/tcp")]; !ok {
|
||||
t.Fatal("1234/tcp was not parsed properly")
|
||||
}
|
||||
|
||||
if _, ok := portMap[Port("2345/udp")]; !ok {
|
||||
t.Fatal("2345/udp was not parsed properly")
|
||||
}
|
||||
|
||||
for portspec, bindings := range bindingMap {
|
||||
if len(bindings) != 1 {
|
||||
t.Fatalf("%s should have exactly one binding", portspec)
|
||||
}
|
||||
|
||||
if bindings[0].HostIP != "" {
|
||||
t.Fatalf("HostIP should not be set for %s", portspec)
|
||||
}
|
||||
|
||||
if bindings[0].HostPort != "" {
|
||||
t.Fatalf("HostPort should not be set for %s", portspec)
|
||||
}
|
||||
}
|
||||
|
||||
portMap, bindingMap, err = ParsePortSpecs([]string{"1234:1234/tcp", "2345:2345/udp"})
|
||||
|
||||
if err != nil {
|
||||
t.Fatalf("Error while processing ParsePortSpecs: %s", err)
|
||||
}
|
||||
|
||||
if _, ok := portMap[Port("1234/tcp")]; !ok {
|
||||
t.Fatal("1234/tcp was not parsed properly")
|
||||
}
|
||||
|
||||
if _, ok := portMap[Port("2345/udp")]; !ok {
|
||||
t.Fatal("2345/udp was not parsed properly")
|
||||
}
|
||||
|
||||
for portspec, bindings := range bindingMap {
|
||||
_, port := SplitProtoPort(string(portspec))
|
||||
|
||||
if len(bindings) != 1 {
|
||||
t.Fatalf("%s should have exactly one binding", portspec)
|
||||
}
|
||||
|
||||
if bindings[0].HostIP != "" {
|
||||
t.Fatalf("HostIP should not be set for %s", portspec)
|
||||
}
|
||||
|
||||
if bindings[0].HostPort != port {
|
||||
t.Fatalf("HostPort should be %s for %s", port, portspec)
|
||||
}
|
||||
}
|
||||
|
||||
portMap, bindingMap, err = ParsePortSpecs([]string{"0.0.0.0:1234:1234/tcp", "0.0.0.0:2345:2345/udp"})
|
||||
|
||||
if err != nil {
|
||||
t.Fatalf("Error while processing ParsePortSpecs: %s", err)
|
||||
}
|
||||
|
||||
if _, ok := portMap[Port("1234/tcp")]; !ok {
|
||||
t.Fatal("1234/tcp was not parsed properly")
|
||||
}
|
||||
|
||||
if _, ok := portMap[Port("2345/udp")]; !ok {
|
||||
t.Fatal("2345/udp was not parsed properly")
|
||||
}
|
||||
|
||||
for portspec, bindings := range bindingMap {
|
||||
_, port := SplitProtoPort(string(portspec))
|
||||
|
||||
if len(bindings) != 1 {
|
||||
t.Fatalf("%s should have exactly one binding", portspec)
|
||||
}
|
||||
|
||||
if bindings[0].HostIP != "0.0.0.0" {
|
||||
t.Fatalf("HostIP is not 0.0.0.0 for %s", portspec)
|
||||
}
|
||||
|
||||
if bindings[0].HostPort != port {
|
||||
t.Fatalf("HostPort should be %s for %s", port, portspec)
|
||||
}
|
||||
}
|
||||
|
||||
_, _, err = ParsePortSpecs([]string{"localhost:1234:1234/tcp"})
|
||||
|
||||
if err == nil {
|
||||
t.Fatal("Received no error while trying to parse a hostname instead of ip")
|
||||
}
|
||||
}
|
||||
|
||||
func TestParsePortSpecsWithRange(t *testing.T) {
|
||||
var (
|
||||
portMap map[Port]struct{}
|
||||
bindingMap map[Port][]PortBinding
|
||||
err error
|
||||
)
|
||||
|
||||
portMap, bindingMap, err = ParsePortSpecs([]string{"1234-1236/tcp", "2345-2347/udp"})
|
||||
|
||||
if err != nil {
|
||||
t.Fatalf("Error while processing ParsePortSpecs: %s", err)
|
||||
}
|
||||
|
||||
if _, ok := portMap[Port("1235/tcp")]; !ok {
|
||||
t.Fatal("1234/tcp was not parsed properly")
|
||||
}
|
||||
|
||||
if _, ok := portMap[Port("2346/udp")]; !ok {
|
||||
t.Fatal("2345/udp was not parsed properly")
|
||||
}
|
||||
|
||||
for portspec, bindings := range bindingMap {
|
||||
if len(bindings) != 1 {
|
||||
t.Fatalf("%s should have exactly one binding", portspec)
|
||||
}
|
||||
|
||||
if bindings[0].HostIP != "" {
|
||||
t.Fatalf("HostIP should not be set for %s", portspec)
|
||||
}
|
||||
|
||||
if bindings[0].HostPort != "" {
|
||||
t.Fatalf("HostPort should not be set for %s", portspec)
|
||||
}
|
||||
}
|
||||
|
||||
portMap, bindingMap, err = ParsePortSpecs([]string{"1234-1236:1234-1236/tcp", "2345-2347:2345-2347/udp"})
|
||||
|
||||
if err != nil {
|
||||
t.Fatalf("Error while processing ParsePortSpecs: %s", err)
|
||||
}
|
||||
|
||||
if _, ok := portMap[Port("1235/tcp")]; !ok {
|
||||
t.Fatal("1234/tcp was not parsed properly")
|
||||
}
|
||||
|
||||
if _, ok := portMap[Port("2346/udp")]; !ok {
|
||||
t.Fatal("2345/udp was not parsed properly")
|
||||
}
|
||||
|
||||
for portspec, bindings := range bindingMap {
|
||||
_, port := SplitProtoPort(string(portspec))
|
||||
if len(bindings) != 1 {
|
||||
t.Fatalf("%s should have exactly one binding", portspec)
|
||||
}
|
||||
|
||||
if bindings[0].HostIP != "" {
|
||||
t.Fatalf("HostIP should not be set for %s", portspec)
|
||||
}
|
||||
|
||||
if bindings[0].HostPort != port {
|
||||
t.Fatalf("HostPort should be %s for %s", port, portspec)
|
||||
}
|
||||
}
|
||||
|
||||
portMap, bindingMap, err = ParsePortSpecs([]string{"0.0.0.0:1234-1236:1234-1236/tcp", "0.0.0.0:2345-2347:2345-2347/udp"})
|
||||
|
||||
if err != nil {
|
||||
t.Fatalf("Error while processing ParsePortSpecs: %s", err)
|
||||
}
|
||||
|
||||
if _, ok := portMap[Port("1235/tcp")]; !ok {
|
||||
t.Fatal("1234/tcp was not parsed properly")
|
||||
}
|
||||
|
||||
if _, ok := portMap[Port("2346/udp")]; !ok {
|
||||
t.Fatal("2345/udp was not parsed properly")
|
||||
}
|
||||
|
||||
for portspec, bindings := range bindingMap {
|
||||
_, port := SplitProtoPort(string(portspec))
|
||||
if len(bindings) != 1 || bindings[0].HostIP != "0.0.0.0" || bindings[0].HostPort != port {
|
||||
t.Fatalf("Expect single binding to port %s but found %s", port, bindings)
|
||||
}
|
||||
}
|
||||
|
||||
_, _, err = ParsePortSpecs([]string{"localhost:1234-1236:1234-1236/tcp"})
|
||||
|
||||
if err == nil {
|
||||
t.Fatal("Received no error while trying to parse a hostname instead of ip")
|
||||
}
|
||||
}
|
||||
|
||||
func TestParseNetworkOptsPrivateOnly(t *testing.T) {
|
||||
ports, bindings, err := ParsePortSpecs([]string{"192.168.1.100::80"})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if len(ports) != 1 {
|
||||
t.Logf("Expected 1 got %d", len(ports))
|
||||
t.FailNow()
|
||||
}
|
||||
if len(bindings) != 1 {
|
||||
t.Logf("Expected 1 got %d", len(bindings))
|
||||
t.FailNow()
|
||||
}
|
||||
for k := range ports {
|
||||
if k.Proto() != "tcp" {
|
||||
t.Logf("Expected tcp got %s", k.Proto())
|
||||
t.Fail()
|
||||
}
|
||||
if k.Port() != "80" {
|
||||
t.Logf("Expected 80 got %s", k.Port())
|
||||
t.Fail()
|
||||
}
|
||||
b, exists := bindings[k]
|
||||
if !exists {
|
||||
t.Log("Binding does not exist")
|
||||
t.FailNow()
|
||||
}
|
||||
if len(b) != 1 {
|
||||
t.Logf("Expected 1 got %d", len(b))
|
||||
t.FailNow()
|
||||
}
|
||||
s := b[0]
|
||||
if s.HostPort != "" {
|
||||
t.Logf("Expected \"\" got %s", s.HostPort)
|
||||
t.Fail()
|
||||
}
|
||||
if s.HostIP != "192.168.1.100" {
|
||||
t.Fail()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestParseNetworkOptsPublic(t *testing.T) {
|
||||
ports, bindings, err := ParsePortSpecs([]string{"192.168.1.100:8080:80"})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if len(ports) != 1 {
|
||||
t.Logf("Expected 1 got %d", len(ports))
|
||||
t.FailNow()
|
||||
}
|
||||
if len(bindings) != 1 {
|
||||
t.Logf("Expected 1 got %d", len(bindings))
|
||||
t.FailNow()
|
||||
}
|
||||
for k := range ports {
|
||||
if k.Proto() != "tcp" {
|
||||
t.Logf("Expected tcp got %s", k.Proto())
|
||||
t.Fail()
|
||||
}
|
||||
if k.Port() != "80" {
|
||||
t.Logf("Expected 80 got %s", k.Port())
|
||||
t.Fail()
|
||||
}
|
||||
b, exists := bindings[k]
|
||||
if !exists {
|
||||
t.Log("Binding does not exist")
|
||||
t.FailNow()
|
||||
}
|
||||
if len(b) != 1 {
|
||||
t.Logf("Expected 1 got %d", len(b))
|
||||
t.FailNow()
|
||||
}
|
||||
s := b[0]
|
||||
if s.HostPort != "8080" {
|
||||
t.Logf("Expected 8080 got %s", s.HostPort)
|
||||
t.Fail()
|
||||
}
|
||||
if s.HostIP != "192.168.1.100" {
|
||||
t.Fail()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestParseNetworkOptsPublicNoPort(t *testing.T) {
|
||||
ports, bindings, err := ParsePortSpecs([]string{"192.168.1.100"})
|
||||
|
||||
if err == nil {
|
||||
t.Logf("Expected error Invalid containerPort")
|
||||
t.Fail()
|
||||
}
|
||||
if ports != nil {
|
||||
t.Logf("Expected nil got %s", ports)
|
||||
t.Fail()
|
||||
}
|
||||
if bindings != nil {
|
||||
t.Logf("Expected nil got %s", bindings)
|
||||
t.Fail()
|
||||
}
|
||||
}
|
||||
|
||||
func TestParseNetworkOptsNegativePorts(t *testing.T) {
|
||||
ports, bindings, err := ParsePortSpecs([]string{"192.168.1.100:-1:-1"})
|
||||
|
||||
if err == nil {
|
||||
t.Fail()
|
||||
}
|
||||
if len(ports) != 0 {
|
||||
t.Logf("Expected nil got %d", len(ports))
|
||||
t.Fail()
|
||||
}
|
||||
if len(bindings) != 0 {
|
||||
t.Logf("Expected 0 got %d", len(bindings))
|
||||
t.Fail()
|
||||
}
|
||||
}
|
||||
|
||||
func TestParseNetworkOptsUdp(t *testing.T) {
|
||||
ports, bindings, err := ParsePortSpecs([]string{"192.168.1.100::6000/udp"})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if len(ports) != 1 {
|
||||
t.Logf("Expected 1 got %d", len(ports))
|
||||
t.FailNow()
|
||||
}
|
||||
if len(bindings) != 1 {
|
||||
t.Logf("Expected 1 got %d", len(bindings))
|
||||
t.FailNow()
|
||||
}
|
||||
for k := range ports {
|
||||
if k.Proto() != "udp" {
|
||||
t.Logf("Expected udp got %s", k.Proto())
|
||||
t.Fail()
|
||||
}
|
||||
if k.Port() != "6000" {
|
||||
t.Logf("Expected 6000 got %s", k.Port())
|
||||
t.Fail()
|
||||
}
|
||||
b, exists := bindings[k]
|
||||
if !exists {
|
||||
t.Log("Binding does not exist")
|
||||
t.FailNow()
|
||||
}
|
||||
if len(b) != 1 {
|
||||
t.Logf("Expected 1 got %d", len(b))
|
||||
t.FailNow()
|
||||
}
|
||||
s := b[0]
|
||||
if s.HostPort != "" {
|
||||
t.Logf("Expected \"\" got %s", s.HostPort)
|
||||
t.Fail()
|
||||
}
|
||||
if s.HostIP != "192.168.1.100" {
|
||||
t.Fail()
|
||||
}
|
||||
}
|
||||
}
|
56
Godeps/_workspace/src/github.com/docker/go-connections/nat/parse.go
generated
vendored
Normal file
56
Godeps/_workspace/src/github.com/docker/go-connections/nat/parse.go
generated
vendored
Normal file
|
@ -0,0 +1,56 @@
|
|||
package nat
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strconv"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// PartParser parses and validates the specified string (data) using the specified template
|
||||
// e.g. ip:public:private -> 192.168.0.1:80:8000
|
||||
func PartParser(template, data string) (map[string]string, error) {
|
||||
// ip:public:private
|
||||
var (
|
||||
templateParts = strings.Split(template, ":")
|
||||
parts = strings.Split(data, ":")
|
||||
out = make(map[string]string, len(templateParts))
|
||||
)
|
||||
if len(parts) != len(templateParts) {
|
||||
return nil, fmt.Errorf("Invalid format to parse. %s should match template %s", data, template)
|
||||
}
|
||||
|
||||
for i, t := range templateParts {
|
||||
value := ""
|
||||
if len(parts) > i {
|
||||
value = parts[i]
|
||||
}
|
||||
out[t] = value
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
|
||||
// ParsePortRange parses and validates the specified string as a port-range (8000-9000)
|
||||
func ParsePortRange(ports string) (uint64, uint64, error) {
|
||||
if ports == "" {
|
||||
return 0, 0, fmt.Errorf("Empty string specified for ports.")
|
||||
}
|
||||
if !strings.Contains(ports, "-") {
|
||||
start, err := strconv.ParseUint(ports, 10, 16)
|
||||
end := start
|
||||
return start, end, err
|
||||
}
|
||||
|
||||
parts := strings.Split(ports, "-")
|
||||
start, err := strconv.ParseUint(parts[0], 10, 16)
|
||||
if err != nil {
|
||||
return 0, 0, err
|
||||
}
|
||||
end, err := strconv.ParseUint(parts[1], 10, 16)
|
||||
if err != nil {
|
||||
return 0, 0, err
|
||||
}
|
||||
if end < start {
|
||||
return 0, 0, fmt.Errorf("Invalid range specified for the Port: %s", ports)
|
||||
}
|
||||
return start, end, nil
|
||||
}
|
54
Godeps/_workspace/src/github.com/docker/go-connections/nat/parse_test.go
generated
vendored
Normal file
54
Godeps/_workspace/src/github.com/docker/go-connections/nat/parse_test.go
generated
vendored
Normal file
|
@ -0,0 +1,54 @@
|
|||
package nat
|
||||
|
||||
import (
|
||||
"strings"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestParsePortRange(t *testing.T) {
|
||||
if start, end, err := ParsePortRange("8000-8080"); err != nil || start != 8000 || end != 8080 {
|
||||
t.Fatalf("Error: %s or Expecting {start,end} values {8000,8080} but found {%d,%d}.", err, start, end)
|
||||
}
|
||||
}
|
||||
|
||||
func TestParsePortRangeEmpty(t *testing.T) {
|
||||
if _, _, err := ParsePortRange(""); err == nil || err.Error() != "Empty string specified for ports." {
|
||||
t.Fatalf("Expected error 'Empty string specified for ports.', got %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestParsePortRangeWithNoRange(t *testing.T) {
|
||||
start, end, err := ParsePortRange("8080")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if start != 8080 || end != 8080 {
|
||||
t.Fatalf("Expected start and end to be the same and equal to 8080, but were %v and %v", start, end)
|
||||
}
|
||||
}
|
||||
|
||||
func TestParsePortRangeIncorrectRange(t *testing.T) {
|
||||
if _, _, err := ParsePortRange("9000-8080"); err == nil || !strings.Contains(err.Error(), "Invalid range specified for the Port") {
|
||||
t.Fatalf("Expecting error 'Invalid range specified for the Port' but received %s.", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestParsePortRangeIncorrectEndRange(t *testing.T) {
|
||||
if _, _, err := ParsePortRange("8000-a"); err == nil || !strings.Contains(err.Error(), "invalid syntax") {
|
||||
t.Fatalf("Expecting error 'Invalid range specified for the Port' but received %s.", err)
|
||||
}
|
||||
|
||||
if _, _, err := ParsePortRange("8000-30a"); err == nil || !strings.Contains(err.Error(), "invalid syntax") {
|
||||
t.Fatalf("Expecting error 'Invalid range specified for the Port' but received %s.", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestParsePortRangeIncorrectStartRange(t *testing.T) {
|
||||
if _, _, err := ParsePortRange("a-8000"); err == nil || !strings.Contains(err.Error(), "invalid syntax") {
|
||||
t.Fatalf("Expecting error 'Invalid range specified for the Port' but received %s.", err)
|
||||
}
|
||||
|
||||
if _, _, err := ParsePortRange("30a-8000"); err == nil || !strings.Contains(err.Error(), "invalid syntax") {
|
||||
t.Fatalf("Expecting error 'Invalid range specified for the Port' but received %s.", err)
|
||||
}
|
||||
}
|
96
Godeps/_workspace/src/github.com/docker/go-connections/nat/sort.go
generated
vendored
Normal file
96
Godeps/_workspace/src/github.com/docker/go-connections/nat/sort.go
generated
vendored
Normal file
|
@ -0,0 +1,96 @@
|
|||
package nat
|
||||
|
||||
import (
|
||||
"sort"
|
||||
"strings"
|
||||
)
|
||||
|
||||
type portSorter struct {
|
||||
ports []Port
|
||||
by func(i, j Port) bool
|
||||
}
|
||||
|
||||
func (s *portSorter) Len() int {
|
||||
return len(s.ports)
|
||||
}
|
||||
|
||||
func (s *portSorter) Swap(i, j int) {
|
||||
s.ports[i], s.ports[j] = s.ports[j], s.ports[i]
|
||||
}
|
||||
|
||||
func (s *portSorter) Less(i, j int) bool {
|
||||
ip := s.ports[i]
|
||||
jp := s.ports[j]
|
||||
|
||||
return s.by(ip, jp)
|
||||
}
|
||||
|
||||
// Sort sorts a list of ports using the provided predicate
|
||||
// This function should compare `i` and `j`, returning true if `i` is
|
||||
// considered to be less than `j`
|
||||
func Sort(ports []Port, predicate func(i, j Port) bool) {
|
||||
s := &portSorter{ports, predicate}
|
||||
sort.Sort(s)
|
||||
}
|
||||
|
||||
type portMapEntry struct {
|
||||
port Port
|
||||
binding PortBinding
|
||||
}
|
||||
|
||||
type portMapSorter []portMapEntry
|
||||
|
||||
func (s portMapSorter) Len() int { return len(s) }
|
||||
func (s portMapSorter) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
|
||||
|
||||
// sort the port so that the order is:
|
||||
// 1. port with larger specified bindings
|
||||
// 2. larger port
|
||||
// 3. port with tcp protocol
|
||||
func (s portMapSorter) Less(i, j int) bool {
|
||||
pi, pj := s[i].port, s[j].port
|
||||
hpi, hpj := toInt(s[i].binding.HostPort), toInt(s[j].binding.HostPort)
|
||||
return hpi > hpj || pi.Int() > pj.Int() || (pi.Int() == pj.Int() && strings.ToLower(pi.Proto()) == "tcp")
|
||||
}
|
||||
|
||||
// SortPortMap sorts the list of ports and their respected mapping. The ports
|
||||
// will explicit HostPort will be placed first.
|
||||
func SortPortMap(ports []Port, bindings PortMap) {
|
||||
s := portMapSorter{}
|
||||
for _, p := range ports {
|
||||
if binding, ok := bindings[p]; ok {
|
||||
for _, b := range binding {
|
||||
s = append(s, portMapEntry{port: p, binding: b})
|
||||
}
|
||||
bindings[p] = []PortBinding{}
|
||||
} else {
|
||||
s = append(s, portMapEntry{port: p})
|
||||
}
|
||||
}
|
||||
|
||||
sort.Sort(s)
|
||||
var (
|
||||
i int
|
||||
pm = make(map[Port]struct{})
|
||||
)
|
||||
// reorder ports
|
||||
for _, entry := range s {
|
||||
if _, ok := pm[entry.port]; !ok {
|
||||
ports[i] = entry.port
|
||||
pm[entry.port] = struct{}{}
|
||||
i++
|
||||
}
|
||||
// reorder bindings for this port
|
||||
if _, ok := bindings[entry.port]; ok {
|
||||
bindings[entry.port] = append(bindings[entry.port], entry.binding)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func toInt(s string) uint64 {
|
||||
i, _, err := ParsePortRange(s)
|
||||
if err != nil {
|
||||
i = 0
|
||||
}
|
||||
return i
|
||||
}
|
85
Godeps/_workspace/src/github.com/docker/go-connections/nat/sort_test.go
generated
vendored
Normal file
85
Godeps/_workspace/src/github.com/docker/go-connections/nat/sort_test.go
generated
vendored
Normal file
|
@ -0,0 +1,85 @@
|
|||
package nat
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"reflect"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestSortUniquePorts(t *testing.T) {
|
||||
ports := []Port{
|
||||
Port("6379/tcp"),
|
||||
Port("22/tcp"),
|
||||
}
|
||||
|
||||
Sort(ports, func(ip, jp Port) bool {
|
||||
return ip.Int() < jp.Int() || (ip.Int() == jp.Int() && ip.Proto() == "tcp")
|
||||
})
|
||||
|
||||
first := ports[0]
|
||||
if fmt.Sprint(first) != "22/tcp" {
|
||||
t.Log(fmt.Sprint(first))
|
||||
t.Fail()
|
||||
}
|
||||
}
|
||||
|
||||
func TestSortSamePortWithDifferentProto(t *testing.T) {
|
||||
ports := []Port{
|
||||
Port("8888/tcp"),
|
||||
Port("8888/udp"),
|
||||
Port("6379/tcp"),
|
||||
Port("6379/udp"),
|
||||
}
|
||||
|
||||
Sort(ports, func(ip, jp Port) bool {
|
||||
return ip.Int() < jp.Int() || (ip.Int() == jp.Int() && ip.Proto() == "tcp")
|
||||
})
|
||||
|
||||
first := ports[0]
|
||||
if fmt.Sprint(first) != "6379/tcp" {
|
||||
t.Fail()
|
||||
}
|
||||
}
|
||||
|
||||
func TestSortPortMap(t *testing.T) {
|
||||
ports := []Port{
|
||||
Port("22/tcp"),
|
||||
Port("22/udp"),
|
||||
Port("8000/tcp"),
|
||||
Port("6379/tcp"),
|
||||
Port("9999/tcp"),
|
||||
}
|
||||
|
||||
portMap := PortMap{
|
||||
Port("22/tcp"): []PortBinding{
|
||||
{},
|
||||
},
|
||||
Port("8000/tcp"): []PortBinding{
|
||||
{},
|
||||
},
|
||||
Port("6379/tcp"): []PortBinding{
|
||||
{},
|
||||
{HostIP: "0.0.0.0", HostPort: "32749"},
|
||||
},
|
||||
Port("9999/tcp"): []PortBinding{
|
||||
{HostIP: "0.0.0.0", HostPort: "40000"},
|
||||
},
|
||||
}
|
||||
|
||||
SortPortMap(ports, portMap)
|
||||
if !reflect.DeepEqual(ports, []Port{
|
||||
Port("9999/tcp"),
|
||||
Port("6379/tcp"),
|
||||
Port("8000/tcp"),
|
||||
Port("22/tcp"),
|
||||
Port("22/udp"),
|
||||
}) {
|
||||
t.Errorf("failed to prioritize port with explicit mappings, got %v", ports)
|
||||
}
|
||||
if pm := portMap[Port("6379/tcp")]; !reflect.DeepEqual(pm, []PortBinding{
|
||||
{HostIP: "0.0.0.0", HostPort: "32749"},
|
||||
{},
|
||||
}) {
|
||||
t.Errorf("failed to prioritize bindings with explicit mappings, got %v", pm)
|
||||
}
|
||||
}
|
2
Godeps/_workspace/src/github.com/fsouza/go-dockerclient/.gitignore
generated
vendored
2
Godeps/_workspace/src/github.com/fsouza/go-dockerclient/.gitignore
generated
vendored
|
@ -1,2 +0,0 @@
|
|||
# temporary symlink for testing
|
||||
testing/data/symlink
|
24
Godeps/_workspace/src/github.com/fsouza/go-dockerclient/.travis.yml
generated
vendored
24
Godeps/_workspace/src/github.com/fsouza/go-dockerclient/.travis.yml
generated
vendored
|
@ -1,24 +0,0 @@
|
|||
language: go
|
||||
sudo: required
|
||||
go:
|
||||
- 1.3.3
|
||||
- 1.4.2
|
||||
- 1.5.3
|
||||
- 1.6rc2
|
||||
- tip
|
||||
env:
|
||||
- GOARCH=amd64 DOCKER_VERSION=1.7.1
|
||||
- GOARCH=386 DOCKER_VERSION=1.7.1
|
||||
- GOARCH=amd64 DOCKER_VERSION=1.8.3
|
||||
- GOARCH=386 DOCKER_VERSION=1.8.3
|
||||
- GOARCH=amd64 DOCKER_VERSION=1.9.1
|
||||
- GOARCH=386 DOCKER_VERSION=1.9.1
|
||||
- GOARCH=amd64 DOCKER_VERSION=1.10.0
|
||||
- GOARCH=386 DOCKER_VERSION=1.10.0
|
||||
install:
|
||||
- make prepare_docker
|
||||
script:
|
||||
- make test
|
||||
- DOCKER_HOST=tcp://127.0.0.1:2375 make integration
|
||||
services:
|
||||
- docker
|
125
Godeps/_workspace/src/github.com/fsouza/go-dockerclient/AUTHORS
generated
vendored
125
Godeps/_workspace/src/github.com/fsouza/go-dockerclient/AUTHORS
generated
vendored
|
@ -1,125 +0,0 @@
|
|||
# This is the official list of go-dockerclient authors for copyright purposes.
|
||||
|
||||
Abhishek Chanda <abhishek.becs@gmail.com>
|
||||
Adam Bell-Hanssen <adamb@aller.no>
|
||||
Adrien Kohlbecker <adrien.kohlbecker@gmail.com>
|
||||
Aldrin Leal <aldrin@leal.eng.br>
|
||||
Andreas Jaekle <andreas@jaekle.net>
|
||||
Andrews Medina <andrewsmedina@gmail.com>
|
||||
Andrey Sibiryov <kobolog@uber.com>
|
||||
Andy Goldstein <andy.goldstein@redhat.com>
|
||||
Antonio Murdaca <runcom@redhat.com>
|
||||
Artem Sidorenko <artem@2realities.com>
|
||||
Ben Marini <ben@remind101.com>
|
||||
Ben McCann <benmccann.com>
|
||||
Ben Parees <bparees@redhat.com>
|
||||
Benno van den Berg <bennovandenberg@gmail.com>
|
||||
Brendan Fosberry <brendan@codeship.com>
|
||||
Brian Lalor <blalor@bravo5.org>
|
||||
Brian P. Hamachek <brian@brianhama.com>
|
||||
Brian Palmer <brianp@instructure.com>
|
||||
Bryan Boreham <bjboreham@gmail.com>
|
||||
Burke Libbey <burke@libbey.me>
|
||||
Carlos Diaz-Padron <cpadron@mozilla.com>
|
||||
Cesar Wong <cewong@redhat.com>
|
||||
Cezar Sa Espinola <cezar.sa@corp.globo.com>
|
||||
Cheah Chu Yeow <chuyeow@gmail.com>
|
||||
cheneydeng <cheneydeng@qq.com>
|
||||
Chris Bednarski <banzaimonkey@gmail.com>
|
||||
CMGS <ilskdw@gmail.com>
|
||||
Colin Hebert <hebert.colin@gmail.com>
|
||||
Craig Jellick <craig@rancher.com>
|
||||
Dan Williams <dcbw@redhat.com>
|
||||
Daniel, Dao Quang Minh <dqminh89@gmail.com>
|
||||
Daniel Garcia <daniel@danielgarcia.info>
|
||||
Daniel Hiltgen <daniel.hiltgen@docker.com>
|
||||
Darren Shepherd <darren@rancher.com>
|
||||
Dave Choi <dave.choi@daumkakao.com>
|
||||
David Huie <dahuie@gmail.com>
|
||||
Dawn Chen <dawnchen@google.com>
|
||||
Dinesh Subhraveti <dinesh@gemini-systems.net>
|
||||
Drew Wells <drew.wells00@gmail.com>
|
||||
Ed <edrocksit@gmail.com>
|
||||
Elias G. Schneevoigt <eliasgs@gmail.com>
|
||||
Erez Horev <erez.horev@elastifile.com>
|
||||
Eric Anderson <anderson@copperegg.com>
|
||||
Ewout Prangsma <ewout@prangsma.net>
|
||||
Fabio Rehm <fgrehm@gmail.com>
|
||||
Fatih Arslan <ftharsln@gmail.com>
|
||||
Flavia Missi <flaviamissi@gmail.com>
|
||||
Francisco Souza <f@souza.cc>
|
||||
Grégoire Delattre <gregoire.delattre@gmail.com>
|
||||
Guillermo Álvarez Fernández <guillermo@cientifico.net>
|
||||
Harry Zhang <harryzhang@zju.edu.cn>
|
||||
He Simei <hesimei@zju.edu.cn>
|
||||
Ivan Mikushin <i.mikushin@gmail.com>
|
||||
James Bardin <jbardin@litl.com>
|
||||
James Nugent <james@jen20.com>
|
||||
Jari Kolehmainen <jari.kolehmainen@digia.com>
|
||||
Jason Wilder <jwilder@litl.com>
|
||||
Jawher Moussa <jawher.moussa@gmail.com>
|
||||
Jean-Baptiste Dalido <jeanbaptiste@appgratis.com>
|
||||
Jeff Mitchell <jeffrey.mitchell@gmail.com>
|
||||
Jeffrey Hulten <jhulten@gmail.com>
|
||||
Jen Andre <jandre@gmail.com>
|
||||
Jérôme Laurens <jeromelaurens@gmail.com>
|
||||
Johan Euphrosine <proppy@google.com>
|
||||
John Hughes <hughesj@visa.com>
|
||||
Kamil Domanski <kamil@domanski.co>
|
||||
Karan Misra <kidoman@gmail.com>
|
||||
Ken Herner <chosenken@gmail.com>
|
||||
Kim, Hirokuni <hirokuni.kim@kvh.co.jp>
|
||||
Kyle Allan <kallan357@gmail.com>
|
||||
Liron Levin <levinlir@gmail.com>
|
||||
Lior Yankovich <lior@twistlock.com>
|
||||
Liu Peng <vslene@gmail.com>
|
||||
Lorenz Leutgeb <lorenz.leutgeb@gmail.com>
|
||||
Lucas Clemente <lucas@clemente.io>
|
||||
Lucas Weiblen <lucasweiblen@gmail.com>
|
||||
Lyon Hill <lyondhill@gmail.com>
|
||||
Mantas Matelis <mmatelis@coursera.org>
|
||||
Martin Sweeney <martin@sweeney.io>
|
||||
Máximo Cuadros Ortiz <mcuadros@gmail.com>
|
||||
Michael Schmatz <michaelschmatz@gmail.com>
|
||||
Michal Fojtik <mfojtik@redhat.com>
|
||||
Mike Dillon <mike.dillon@synctree.com>
|
||||
Mrunal Patel <mrunalp@gmail.com>
|
||||
Nguyen Sy Thanh Son <sonnst@sigma-solutions.eu>
|
||||
Nick Ethier <ncethier@gmail.com>
|
||||
Omeid Matten <public@omeid.me>
|
||||
Orivej Desh <orivej@gmx.fr>
|
||||
Paul Bellamy <paul.a.bellamy@gmail.com>
|
||||
Paul Morie <pmorie@gmail.com>
|
||||
Paul Weil <pweil@redhat.com>
|
||||
Peter Edge <peter.edge@gmail.com>
|
||||
Peter Jihoon Kim <raingrove@gmail.com>
|
||||
Phil Lu <lu@stackengine.com>
|
||||
Philippe Lafoucrière <philippe.lafoucriere@tech-angels.com>
|
||||
Rafe Colton <rafael.colton@gmail.com>
|
||||
Rob Miller <rob@kalistra.com>
|
||||
Robert Williamson <williamson.robert@gmail.com>
|
||||
Salvador Gironès <salvadorgirones@gmail.com>
|
||||
Sam Rijs <srijs@airpost.net>
|
||||
Sami Wagiaalla <swagiaal@redhat.com>
|
||||
Samuel Karp <skarp@amazon.com>
|
||||
Silas Sewell <silas@sewell.org>
|
||||
Simon Eskildsen <sirup@sirupsen.com>
|
||||
Simon Menke <simon.menke@gmail.com>
|
||||
Skolos <skolos@gopherlab.com>
|
||||
Soulou <leo@unbekandt.eu>
|
||||
Sridhar Ratnakumar <sridharr@activestate.com>
|
||||
Summer Mousa <smousa@zenoss.com>
|
||||
Sunjin Lee <styner32@gmail.com>
|
||||
Tarsis Azevedo <tarsis@corp.globo.com>
|
||||
Tim Schindler <tim@catalyst-zero.com>
|
||||
Timothy St. Clair <tstclair@redhat.com>
|
||||
Tobi Knaup <tobi@mesosphere.io>
|
||||
Tom Wilkie <tom.wilkie@gmail.com>
|
||||
Tonic <tonicbupt@gmail.com>
|
||||
ttyh061 <ttyh061@gmail.com>
|
||||
Victor Marmol <vmarmol@google.com>
|
||||
Vincenzo Prignano <vincenzo.prignano@gmail.com>
|
||||
Wiliam Souza <wiliamsouza83@gmail.com>
|
||||
Ye Yin <eyniy@qq.com>
|
||||
Yu, Zou <zouyu7@huawei.com>
|
||||
Yuriy Bogdanov <chinsay@gmail.com>
|
6
Godeps/_workspace/src/github.com/fsouza/go-dockerclient/DOCKER-LICENSE
generated
vendored
6
Godeps/_workspace/src/github.com/fsouza/go-dockerclient/DOCKER-LICENSE
generated
vendored
|
@ -1,6 +0,0 @@
|
|||
Apache License
|
||||
Version 2.0, January 2004
|
||||
http://www.apache.org/licenses/
|
||||
|
||||
You can find the Docker license at the following link:
|
||||
https://raw.githubusercontent.com/docker/docker/master/LICENSE
|
22
Godeps/_workspace/src/github.com/fsouza/go-dockerclient/LICENSE
generated
vendored
22
Godeps/_workspace/src/github.com/fsouza/go-dockerclient/LICENSE
generated
vendored
|
@ -1,22 +0,0 @@
|
|||
Copyright (c) 2015, go-dockerclient authors
|
||||
All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions are met:
|
||||
|
||||
* Redistributions of source code must retain the above copyright notice,
|
||||
this list of conditions and the following disclaimer.
|
||||
* Redistributions in binary form must reproduce the above copyright notice,
|
||||
this list of conditions and the following disclaimer in the documentation
|
||||
and/or other materials provided with the distribution.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
|
||||
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
|
||||
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
||||
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
|
||||
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
||||
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
|
||||
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
|
||||
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
|
||||
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
60
Godeps/_workspace/src/github.com/fsouza/go-dockerclient/Makefile
generated
vendored
60
Godeps/_workspace/src/github.com/fsouza/go-dockerclient/Makefile
generated
vendored
|
@ -1,60 +0,0 @@
|
|||
.PHONY: \
|
||||
all \
|
||||
vendor \
|
||||
lint \
|
||||
vet \
|
||||
fmt \
|
||||
fmtcheck \
|
||||
pretest \
|
||||
test \
|
||||
integration \
|
||||
cov \
|
||||
clean
|
||||
|
||||
SRCS = $(shell git ls-files '*.go' | grep -v '^external/')
|
||||
PKGS = ./. ./testing
|
||||
|
||||
all: test
|
||||
|
||||
vendor:
|
||||
@ go get -v github.com/mjibson/party
|
||||
party -d external -c -u
|
||||
|
||||
lint:
|
||||
@ go get -v github.com/golang/lint/golint
|
||||
$(foreach file,$(SRCS),golint $(file) || exit;)
|
||||
|
||||
vet:
|
||||
@-go get -v golang.org/x/tools/cmd/vet
|
||||
$(foreach pkg,$(PKGS),go vet $(pkg);)
|
||||
|
||||
fmt:
|
||||
gofmt -w $(SRCS)
|
||||
|
||||
fmtcheck:
|
||||
$(foreach file,$(SRCS),gofmt -d $(file);)
|
||||
|
||||
prepare_docker:
|
||||
sudo stop docker
|
||||
sudo rm -rf /var/lib/docker
|
||||
sudo rm -f `which docker`
|
||||
sudo apt-key adv --keyserver hkp://p80.pool.sks-keyservers.net:80 --recv-keys 58118E89F3A912897C070ADBF76221572C52609D
|
||||
echo "deb https://apt.dockerproject.org/repo ubuntu-trusty main" | sudo tee /etc/apt/sources.list.d/docker.list
|
||||
sudo apt-get update
|
||||
sudo apt-get install docker-engine=$(DOCKER_VERSION)-0~$(shell lsb_release -cs) -y --force-yes -o Dpkg::Options::="--force-confdef" -o Dpkg::Options::="--force-confold"
|
||||
|
||||
pretest: lint vet fmtcheck
|
||||
|
||||
test: pretest
|
||||
$(foreach pkg,$(PKGS),go test $(pkg) || exit;)
|
||||
|
||||
integration:
|
||||
go test -tags docker_integration -run TestIntegration -v
|
||||
|
||||
cov:
|
||||
@ go get -v github.com/axw/gocov/gocov
|
||||
@ go get golang.org/x/tools/cmd/cover
|
||||
gocov test | gocov report
|
||||
|
||||
clean:
|
||||
$(foreach pkg,$(PKGS),go clean $(pkg) || exit;)
|
105
Godeps/_workspace/src/github.com/fsouza/go-dockerclient/README.markdown
generated
vendored
105
Godeps/_workspace/src/github.com/fsouza/go-dockerclient/README.markdown
generated
vendored
|
@ -1,105 +0,0 @@
|
|||
# go-dockerclient
|
||||
|
||||
[](https://travis-ci.org/fsouza/go-dockerclient)
|
||||
[](https://godoc.org/github.com/fsouza/go-dockerclient)
|
||||
|
||||
This package presents a client for the Docker remote API. It also provides
|
||||
support for the extensions in the [Swarm API](https://docs.docker.com/swarm/api/swarm-api/).
|
||||
|
||||
This package also provides support for docker's network API, which is a simple
|
||||
passthrough to the libnetwork remote API. Note that docker's network API is
|
||||
only available in docker 1.8 and above, and only enabled in docker if
|
||||
DOCKER_EXPERIMENTAL is defined during the docker build process.
|
||||
|
||||
For more details, check the [remote API documentation](http://docs.docker.com/engine/reference/api/docker_remote_api/).
|
||||
|
||||
## Vendoring
|
||||
|
||||
If you are having issues with Go 1.5 and have `GO15VENDOREXPERIMENT` set with an application that has go-dockerclient vendored,
|
||||
please update your vendoring of go-dockerclient :) We recently moved the `vendor` directory to `external` so that go-dockerclient
|
||||
is compatible with this configuration. See [338](https://github.com/fsouza/go-dockerclient/issues/338) and [339](https://github.com/fsouza/go-dockerclient/pull/339)
|
||||
for details.
|
||||
|
||||
## Example
|
||||
|
||||
```go
|
||||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/fsouza/go-dockerclient"
|
||||
)
|
||||
|
||||
func main() {
|
||||
endpoint := "unix:///var/run/docker.sock"
|
||||
client, _ := docker.NewClient(endpoint)
|
||||
imgs, _ := client.ListImages(docker.ListImagesOptions{All: false})
|
||||
for _, img := range imgs {
|
||||
fmt.Println("ID: ", img.ID)
|
||||
fmt.Println("RepoTags: ", img.RepoTags)
|
||||
fmt.Println("Created: ", img.Created)
|
||||
fmt.Println("Size: ", img.Size)
|
||||
fmt.Println("VirtualSize: ", img.VirtualSize)
|
||||
fmt.Println("ParentId: ", img.ParentID)
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
## Using with TLS
|
||||
|
||||
In order to instantiate the client for a TLS-enabled daemon, you should use NewTLSClient, passing the endpoint and path for key and certificates as parameters.
|
||||
|
||||
```go
|
||||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/fsouza/go-dockerclient"
|
||||
)
|
||||
|
||||
func main() {
|
||||
endpoint := "tcp://[ip]:[port]"
|
||||
path := os.Getenv("DOCKER_CERT_PATH")
|
||||
ca := fmt.Sprintf("%s/ca.pem", path)
|
||||
cert := fmt.Sprintf("%s/cert.pem", path)
|
||||
key := fmt.Sprintf("%s/key.pem", path)
|
||||
client, _ := docker.NewTLSClient(endpoint, cert, key, ca)
|
||||
// use client
|
||||
}
|
||||
```
|
||||
|
||||
If using [docker-machine](https://docs.docker.com/machine/), or another application that exports environment variables
|
||||
`DOCKER_HOST, DOCKER_TLS_VERIFY, DOCKER_CERT_PATH`, you can use NewClientFromEnv.
|
||||
|
||||
|
||||
```go
|
||||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/fsouza/go-dockerclient"
|
||||
)
|
||||
|
||||
func main() {
|
||||
client, _ := docker.NewClientFromEnv()
|
||||
// use client
|
||||
}
|
||||
```
|
||||
|
||||
See the documentation for more details.
|
||||
|
||||
## Developing
|
||||
|
||||
All development commands can be seen in the [Makefile](Makefile).
|
||||
|
||||
Commited code must pass:
|
||||
|
||||
* [golint](https://github.com/golang/lint)
|
||||
* [go vet](https://godoc.org/golang.org/x/tools/cmd/vet)
|
||||
* [gofmt](https://golang.org/cmd/gofmt)
|
||||
* [go test](https://golang.org/cmd/go/#hdr-Test_packages)
|
||||
|
||||
Running `make test` will check all of these. If your editor does not automatically call gofmt, `make fmt` will format all go files in this repository.
|
136
Godeps/_workspace/src/github.com/fsouza/go-dockerclient/auth.go
generated
vendored
136
Godeps/_workspace/src/github.com/fsouza/go-dockerclient/auth.go
generated
vendored
|
@ -1,136 +0,0 @@
|
|||
// Copyright 2015 go-dockerclient authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package docker
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/base64"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"path"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// ErrCannotParseDockercfg is the error returned by NewAuthConfigurations when the dockercfg cannot be parsed.
|
||||
var ErrCannotParseDockercfg = errors.New("Failed to read authentication from dockercfg")
|
||||
|
||||
// AuthConfiguration represents authentication options to use in the PushImage
|
||||
// method. It represents the authentication in the Docker index server.
|
||||
type AuthConfiguration struct {
|
||||
Username string `json:"username,omitempty"`
|
||||
Password string `json:"password,omitempty"`
|
||||
Email string `json:"email,omitempty"`
|
||||
ServerAddress string `json:"serveraddress,omitempty"`
|
||||
}
|
||||
|
||||
// AuthConfigurations represents authentication options to use for the
|
||||
// PushImage method accommodating the new X-Registry-Config header
|
||||
type AuthConfigurations struct {
|
||||
Configs map[string]AuthConfiguration `json:"configs"`
|
||||
}
|
||||
|
||||
// AuthConfigurations119 is used to serialize a set of AuthConfigurations
|
||||
// for Docker API >= 1.19.
|
||||
type AuthConfigurations119 map[string]AuthConfiguration
|
||||
|
||||
// dockerConfig represents a registry authentation configuration from the
|
||||
// .dockercfg file.
|
||||
type dockerConfig struct {
|
||||
Auth string `json:"auth"`
|
||||
Email string `json:"email"`
|
||||
}
|
||||
|
||||
// NewAuthConfigurationsFromDockerCfg returns AuthConfigurations from the
|
||||
// ~/.dockercfg file.
|
||||
func NewAuthConfigurationsFromDockerCfg() (*AuthConfigurations, error) {
|
||||
var r io.Reader
|
||||
var err error
|
||||
p := path.Join(os.Getenv("HOME"), ".docker", "config.json")
|
||||
r, err = os.Open(p)
|
||||
if err != nil {
|
||||
p := path.Join(os.Getenv("HOME"), ".dockercfg")
|
||||
r, err = os.Open(p)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
return NewAuthConfigurations(r)
|
||||
}
|
||||
|
||||
// NewAuthConfigurations returns AuthConfigurations from a JSON encoded string in the
|
||||
// same format as the .dockercfg file.
|
||||
func NewAuthConfigurations(r io.Reader) (*AuthConfigurations, error) {
|
||||
var auth *AuthConfigurations
|
||||
confs, err := parseDockerConfig(r)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
auth, err = authConfigs(confs)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return auth, nil
|
||||
}
|
||||
|
||||
func parseDockerConfig(r io.Reader) (map[string]dockerConfig, error) {
|
||||
buf := new(bytes.Buffer)
|
||||
buf.ReadFrom(r)
|
||||
byteData := buf.Bytes()
|
||||
|
||||
var confsWrapper map[string]map[string]dockerConfig
|
||||
if err := json.Unmarshal(byteData, &confsWrapper); err == nil {
|
||||
if confs, ok := confsWrapper["auths"]; ok {
|
||||
return confs, nil
|
||||
}
|
||||
}
|
||||
|
||||
var confs map[string]dockerConfig
|
||||
if err := json.Unmarshal(byteData, &confs); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return confs, nil
|
||||
}
|
||||
|
||||
// authConfigs converts a dockerConfigs map to a AuthConfigurations object.
|
||||
func authConfigs(confs map[string]dockerConfig) (*AuthConfigurations, error) {
|
||||
c := &AuthConfigurations{
|
||||
Configs: make(map[string]AuthConfiguration),
|
||||
}
|
||||
for reg, conf := range confs {
|
||||
data, err := base64.StdEncoding.DecodeString(conf.Auth)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
userpass := strings.SplitN(string(data), ":", 2)
|
||||
if len(userpass) != 2 {
|
||||
return nil, ErrCannotParseDockercfg
|
||||
}
|
||||
c.Configs[reg] = AuthConfiguration{
|
||||
Email: conf.Email,
|
||||
Username: userpass[0],
|
||||
Password: userpass[1],
|
||||
ServerAddress: reg,
|
||||
}
|
||||
}
|
||||
return c, nil
|
||||
}
|
||||
|
||||
// AuthCheck validates the given credentials. It returns nil if successful.
|
||||
//
|
||||
// See https://goo.gl/m2SleN for more details.
|
||||
func (c *Client) AuthCheck(conf *AuthConfiguration) error {
|
||||
if conf == nil {
|
||||
return fmt.Errorf("conf is nil")
|
||||
}
|
||||
resp, err := c.do("POST", "/auth", doOptions{data: conf})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
resp.Body.Close()
|
||||
return nil
|
||||
}
|
91
Godeps/_workspace/src/github.com/fsouza/go-dockerclient/auth_test.go
generated
vendored
91
Godeps/_workspace/src/github.com/fsouza/go-dockerclient/auth_test.go
generated
vendored
|
@ -1,91 +0,0 @@
|
|||
// Copyright 2015 go-dockerclient authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package docker
|
||||
|
||||
import (
|
||||
"encoding/base64"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"strings"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestAuthLegacyConfig(t *testing.T) {
|
||||
auth := base64.StdEncoding.EncodeToString([]byte("user:pa:ss"))
|
||||
read := strings.NewReader(fmt.Sprintf(`{"docker.io":{"auth":"%s","email":"user@example.com"}}`, auth))
|
||||
ac, err := NewAuthConfigurations(read)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
c, ok := ac.Configs["docker.io"]
|
||||
if !ok {
|
||||
t.Error("NewAuthConfigurations: Expected Configs to contain docker.io")
|
||||
}
|
||||
if got, want := c.Email, "user@example.com"; got != want {
|
||||
t.Errorf(`AuthConfigurations.Configs["docker.io"].Email: wrong result. Want %q. Got %q`, want, got)
|
||||
}
|
||||
if got, want := c.Username, "user"; got != want {
|
||||
t.Errorf(`AuthConfigurations.Configs["docker.io"].Username: wrong result. Want %q. Got %q`, want, got)
|
||||
}
|
||||
if got, want := c.Password, "pa:ss"; got != want {
|
||||
t.Errorf(`AuthConfigurations.Configs["docker.io"].Password: wrong result. Want %q. Got %q`, want, got)
|
||||
}
|
||||
if got, want := c.ServerAddress, "docker.io"; got != want {
|
||||
t.Errorf(`AuthConfigurations.Configs["docker.io"].ServerAddress: wrong result. Want %q. Got %q`, want, got)
|
||||
}
|
||||
}
|
||||
|
||||
func TestAuthBadConfig(t *testing.T) {
|
||||
auth := base64.StdEncoding.EncodeToString([]byte("userpass"))
|
||||
read := strings.NewReader(fmt.Sprintf(`{"docker.io":{"auth":"%s","email":"user@example.com"}}`, auth))
|
||||
ac, err := NewAuthConfigurations(read)
|
||||
if err != ErrCannotParseDockercfg {
|
||||
t.Errorf("Incorrect error returned %v\n", err)
|
||||
}
|
||||
if ac != nil {
|
||||
t.Errorf("Invalid auth configuration returned, should be nil %v\n", ac)
|
||||
}
|
||||
}
|
||||
|
||||
func TestAuthConfig(t *testing.T) {
|
||||
auth := base64.StdEncoding.EncodeToString([]byte("user:pass"))
|
||||
read := strings.NewReader(fmt.Sprintf(`{"auths":{"docker.io":{"auth":"%s","email":"user@example.com"}}}`, auth))
|
||||
ac, err := NewAuthConfigurations(read)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
c, ok := ac.Configs["docker.io"]
|
||||
if !ok {
|
||||
t.Error("NewAuthConfigurations: Expected Configs to contain docker.io")
|
||||
}
|
||||
if got, want := c.Email, "user@example.com"; got != want {
|
||||
t.Errorf(`AuthConfigurations.Configs["docker.io"].Email: wrong result. Want %q. Got %q`, want, got)
|
||||
}
|
||||
if got, want := c.Username, "user"; got != want {
|
||||
t.Errorf(`AuthConfigurations.Configs["docker.io"].Username: wrong result. Want %q. Got %q`, want, got)
|
||||
}
|
||||
if got, want := c.Password, "pass"; got != want {
|
||||
t.Errorf(`AuthConfigurations.Configs["docker.io"].Password: wrong result. Want %q. Got %q`, want, got)
|
||||
}
|
||||
if got, want := c.ServerAddress, "docker.io"; got != want {
|
||||
t.Errorf(`AuthConfigurations.Configs["docker.io"].ServerAddress: wrong result. Want %q. Got %q`, want, got)
|
||||
}
|
||||
}
|
||||
|
||||
func TestAuthCheck(t *testing.T) {
|
||||
fakeRT := &FakeRoundTripper{status: http.StatusOK}
|
||||
client := newTestClient(fakeRT)
|
||||
if err := client.AuthCheck(nil); err == nil {
|
||||
t.Fatalf("expected error on nil auth config")
|
||||
}
|
||||
// test good auth
|
||||
if err := client.AuthCheck(&AuthConfiguration{}); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
*fakeRT = FakeRoundTripper{status: http.StatusUnauthorized}
|
||||
if err := client.AuthCheck(&AuthConfiguration{}); err == nil {
|
||||
t.Fatal("expected failure from unauthorized auth")
|
||||
}
|
||||
}
|
154
Godeps/_workspace/src/github.com/fsouza/go-dockerclient/build_test.go
generated
vendored
154
Godeps/_workspace/src/github.com/fsouza/go-dockerclient/build_test.go
generated
vendored
|
@ -1,154 +0,0 @@
|
|||
package docker
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"os"
|
||||
"reflect"
|
||||
"testing"
|
||||
|
||||
"github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive"
|
||||
)
|
||||
|
||||
func TestBuildImageMultipleContextsError(t *testing.T) {
|
||||
fakeRT := &FakeRoundTripper{message: "", status: http.StatusOK}
|
||||
client := newTestClient(fakeRT)
|
||||
var buf bytes.Buffer
|
||||
opts := BuildImageOptions{
|
||||
Name: "testImage",
|
||||
NoCache: true,
|
||||
SuppressOutput: true,
|
||||
RmTmpContainer: true,
|
||||
ForceRmTmpContainer: true,
|
||||
InputStream: &buf,
|
||||
OutputStream: &buf,
|
||||
ContextDir: "testing/data",
|
||||
}
|
||||
err := client.BuildImage(opts)
|
||||
if err != ErrMultipleContexts {
|
||||
t.Errorf("BuildImage: providing both InputStream and ContextDir should produce an error")
|
||||
}
|
||||
}
|
||||
|
||||
func TestBuildImageContextDirDockerignoreParsing(t *testing.T) {
|
||||
fakeRT := &FakeRoundTripper{message: "", status: http.StatusOK}
|
||||
client := newTestClient(fakeRT)
|
||||
|
||||
if err := os.Symlink("doesnotexist", "testing/data/symlink"); err != nil {
|
||||
t.Errorf("error creating symlink on demand: %s", err)
|
||||
}
|
||||
defer func() {
|
||||
if err := os.Remove("testing/data/symlink"); err != nil {
|
||||
t.Errorf("error removing symlink on demand: %s", err)
|
||||
}
|
||||
}()
|
||||
|
||||
var buf bytes.Buffer
|
||||
opts := BuildImageOptions{
|
||||
Name: "testImage",
|
||||
NoCache: true,
|
||||
SuppressOutput: true,
|
||||
RmTmpContainer: true,
|
||||
ForceRmTmpContainer: true,
|
||||
OutputStream: &buf,
|
||||
ContextDir: "testing/data",
|
||||
}
|
||||
err := client.BuildImage(opts)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
reqBody := fakeRT.requests[0].Body
|
||||
tmpdir, err := unpackBodyTarball(reqBody)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
defer func() {
|
||||
if err := os.RemoveAll(tmpdir); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}()
|
||||
|
||||
files, err := ioutil.ReadDir(tmpdir)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
foundFiles := []string{}
|
||||
for _, file := range files {
|
||||
foundFiles = append(foundFiles, file.Name())
|
||||
}
|
||||
|
||||
expectedFiles := []string{
|
||||
".dockerignore",
|
||||
"Dockerfile",
|
||||
"barfile",
|
||||
"ca.pem",
|
||||
"cert.pem",
|
||||
"key.pem",
|
||||
"server.pem",
|
||||
"serverkey.pem",
|
||||
"symlink",
|
||||
}
|
||||
|
||||
if !reflect.DeepEqual(expectedFiles, foundFiles) {
|
||||
t.Errorf(
|
||||
"BuildImage: incorrect files sent in tarball to docker server\nexpected %+v, found %+v",
|
||||
expectedFiles, foundFiles,
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
func TestBuildImageSendXRegistryConfig(t *testing.T) {
|
||||
fakeRT := &FakeRoundTripper{message: "", status: http.StatusOK}
|
||||
client := newTestClient(fakeRT)
|
||||
var buf bytes.Buffer
|
||||
opts := BuildImageOptions{
|
||||
Name: "testImage",
|
||||
NoCache: true,
|
||||
SuppressOutput: true,
|
||||
RmTmpContainer: true,
|
||||
ForceRmTmpContainer: true,
|
||||
OutputStream: &buf,
|
||||
ContextDir: "testing/data",
|
||||
AuthConfigs: AuthConfigurations{
|
||||
Configs: map[string]AuthConfiguration{
|
||||
"quay.io": {
|
||||
Username: "foo",
|
||||
Password: "bar",
|
||||
Email: "baz",
|
||||
ServerAddress: "quay.io",
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
encodedConfig := "eyJjb25maWdzIjp7InF1YXkuaW8iOnsidXNlcm5hbWUiOiJmb28iLCJwYXNzd29yZCI6ImJhciIsImVtYWlsIjoiYmF6Iiwic2VydmVyYWRkcmVzcyI6InF1YXkuaW8ifX19Cg=="
|
||||
|
||||
if err := client.BuildImage(opts); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
xRegistryConfig := fakeRT.requests[0].Header["X-Registry-Config"][0]
|
||||
if xRegistryConfig != encodedConfig {
|
||||
t.Errorf(
|
||||
"BuildImage: X-Registry-Config not set currectly: expected %q, got %q",
|
||||
encodedConfig,
|
||||
xRegistryConfig,
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
func unpackBodyTarball(req io.ReadCloser) (tmpdir string, err error) {
|
||||
tmpdir, err = ioutil.TempDir("", "go-dockerclient-test")
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
err = archive.Untar(req, tmpdir, &archive.TarOptions{
|
||||
Compression: archive.Uncompressed,
|
||||
NoLchown: true,
|
||||
})
|
||||
return
|
||||
}
|
43
Godeps/_workspace/src/github.com/fsouza/go-dockerclient/change.go
generated
vendored
43
Godeps/_workspace/src/github.com/fsouza/go-dockerclient/change.go
generated
vendored
|
@ -1,43 +0,0 @@
|
|||
// Copyright 2014 go-dockerclient authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package docker
|
||||
|
||||
import "fmt"
|
||||
|
||||
// ChangeType is a type for constants indicating the type of change
|
||||
// in a container
|
||||
type ChangeType int
|
||||
|
||||
const (
|
||||
// ChangeModify is the ChangeType for container modifications
|
||||
ChangeModify ChangeType = iota
|
||||
|
||||
// ChangeAdd is the ChangeType for additions to a container
|
||||
ChangeAdd
|
||||
|
||||
// ChangeDelete is the ChangeType for deletions from a container
|
||||
ChangeDelete
|
||||
)
|
||||
|
||||
// Change represents a change in a container.
|
||||
//
|
||||
// See https://goo.gl/9GsTIF for more details.
|
||||
type Change struct {
|
||||
Path string
|
||||
Kind ChangeType
|
||||
}
|
||||
|
||||
func (change *Change) String() string {
|
||||
var kind string
|
||||
switch change.Kind {
|
||||
case ChangeModify:
|
||||
kind = "C"
|
||||
case ChangeAdd:
|
||||
kind = "A"
|
||||
case ChangeDelete:
|
||||
kind = "D"
|
||||
}
|
||||
return fmt.Sprintf("%s %s", kind, change.Path)
|
||||
}
|
24
Godeps/_workspace/src/github.com/fsouza/go-dockerclient/change_test.go
generated
vendored
24
Godeps/_workspace/src/github.com/fsouza/go-dockerclient/change_test.go
generated
vendored
|
@ -1,24 +0,0 @@
|
|||
// Copyright 2014 go-dockerclient authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package docker
|
||||
|
||||
import "testing"
|
||||
|
||||
func TestChangeString(t *testing.T) {
|
||||
var tests = []struct {
|
||||
change Change
|
||||
expected string
|
||||
}{
|
||||
{Change{"/etc/passwd", ChangeModify}, "C /etc/passwd"},
|
||||
{Change{"/etc/passwd", ChangeAdd}, "A /etc/passwd"},
|
||||
{Change{"/etc/passwd", ChangeDelete}, "D /etc/passwd"},
|
||||
{Change{"/etc/passwd", 33}, " /etc/passwd"},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
if got := tt.change.String(); got != tt.expected {
|
||||
t.Errorf("Change.String(): want %q. Got %q.", tt.expected, got)
|
||||
}
|
||||
}
|
||||
}
|
928
Godeps/_workspace/src/github.com/fsouza/go-dockerclient/client.go
generated
vendored
928
Godeps/_workspace/src/github.com/fsouza/go-dockerclient/client.go
generated
vendored
|
@ -1,928 +0,0 @@
|
|||
// Copyright 2015 go-dockerclient authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Package docker provides a client for the Docker remote API.
|
||||
//
|
||||
// See https://goo.gl/G3plxW for more details on the remote API.
|
||||
package docker
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"crypto/tls"
|
||||
"crypto/x509"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"net"
|
||||
"net/http"
|
||||
"net/http/httputil"
|
||||
"net/url"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"reflect"
|
||||
"runtime"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/fsouza/go-dockerclient/external/github.com/docker/docker/opts"
|
||||
"github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/homedir"
|
||||
"github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/stdcopy"
|
||||
"github.com/fsouza/go-dockerclient/external/github.com/hashicorp/go-cleanhttp"
|
||||
)
|
||||
|
||||
const userAgent = "go-dockerclient"
|
||||
|
||||
var (
|
||||
// ErrInvalidEndpoint is returned when the endpoint is not a valid HTTP URL.
|
||||
ErrInvalidEndpoint = errors.New("invalid endpoint")
|
||||
|
||||
// ErrConnectionRefused is returned when the client cannot connect to the given endpoint.
|
||||
ErrConnectionRefused = errors.New("cannot connect to Docker endpoint")
|
||||
|
||||
apiVersion112, _ = NewAPIVersion("1.12")
|
||||
|
||||
apiVersion119, _ = NewAPIVersion("1.19")
|
||||
)
|
||||
|
||||
// APIVersion is an internal representation of a version of the Remote API.
|
||||
type APIVersion []int
|
||||
|
||||
// NewAPIVersion returns an instance of APIVersion for the given string.
|
||||
//
|
||||
// The given string must be in the form <major>.<minor>.<patch>, where <major>,
|
||||
// <minor> and <patch> are integer numbers.
|
||||
func NewAPIVersion(input string) (APIVersion, error) {
|
||||
if !strings.Contains(input, ".") {
|
||||
return nil, fmt.Errorf("Unable to parse version %q", input)
|
||||
}
|
||||
raw := strings.Split(input, "-")
|
||||
arr := strings.Split(raw[0], ".")
|
||||
ret := make(APIVersion, len(arr))
|
||||
var err error
|
||||
for i, val := range arr {
|
||||
ret[i], err = strconv.Atoi(val)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Unable to parse version %q: %q is not an integer", input, val)
|
||||
}
|
||||
}
|
||||
return ret, nil
|
||||
}
|
||||
|
||||
func (version APIVersion) String() string {
|
||||
var str string
|
||||
for i, val := range version {
|
||||
str += strconv.Itoa(val)
|
||||
if i < len(version)-1 {
|
||||
str += "."
|
||||
}
|
||||
}
|
||||
return str
|
||||
}
|
||||
|
||||
// LessThan is a function for comparing APIVersion structs
|
||||
func (version APIVersion) LessThan(other APIVersion) bool {
|
||||
return version.compare(other) < 0
|
||||
}
|
||||
|
||||
// LessThanOrEqualTo is a function for comparing APIVersion structs
|
||||
func (version APIVersion) LessThanOrEqualTo(other APIVersion) bool {
|
||||
return version.compare(other) <= 0
|
||||
}
|
||||
|
||||
// GreaterThan is a function for comparing APIVersion structs
|
||||
func (version APIVersion) GreaterThan(other APIVersion) bool {
|
||||
return version.compare(other) > 0
|
||||
}
|
||||
|
||||
// GreaterThanOrEqualTo is a function for comparing APIVersion structs
|
||||
func (version APIVersion) GreaterThanOrEqualTo(other APIVersion) bool {
|
||||
return version.compare(other) >= 0
|
||||
}
|
||||
|
||||
func (version APIVersion) compare(other APIVersion) int {
|
||||
for i, v := range version {
|
||||
if i <= len(other)-1 {
|
||||
otherVersion := other[i]
|
||||
|
||||
if v < otherVersion {
|
||||
return -1
|
||||
} else if v > otherVersion {
|
||||
return 1
|
||||
}
|
||||
}
|
||||
}
|
||||
if len(version) > len(other) {
|
||||
return 1
|
||||
}
|
||||
if len(version) < len(other) {
|
||||
return -1
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
// Client is the basic type of this package. It provides methods for
|
||||
// interaction with the API.
|
||||
type Client struct {
|
||||
SkipServerVersionCheck bool
|
||||
HTTPClient *http.Client
|
||||
TLSConfig *tls.Config
|
||||
Dialer *net.Dialer
|
||||
|
||||
endpoint string
|
||||
endpointURL *url.URL
|
||||
eventMonitor *eventMonitoringState
|
||||
requestedAPIVersion APIVersion
|
||||
serverAPIVersion APIVersion
|
||||
expectedAPIVersion APIVersion
|
||||
unixHTTPClient *http.Client
|
||||
}
|
||||
|
||||
// NewClient returns a Client instance ready for communication with the given
|
||||
// server endpoint. It will use the latest remote API version available in the
|
||||
// server.
|
||||
func NewClient(endpoint string) (*Client, error) {
|
||||
client, err := NewVersionedClient(endpoint, "")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
client.SkipServerVersionCheck = true
|
||||
return client, nil
|
||||
}
|
||||
|
||||
// NewTLSClient returns a Client instance ready for TLS communications with the givens
|
||||
// server endpoint, key and certificates . It will use the latest remote API version
|
||||
// available in the server.
|
||||
func NewTLSClient(endpoint string, cert, key, ca string) (*Client, error) {
|
||||
client, err := NewVersionedTLSClient(endpoint, cert, key, ca, "")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
client.SkipServerVersionCheck = true
|
||||
return client, nil
|
||||
}
|
||||
|
||||
// NewTLSClientFromBytes returns a Client instance ready for TLS communications with the givens
|
||||
// server endpoint, key and certificates (passed inline to the function as opposed to being
|
||||
// read from a local file). It will use the latest remote API version available in the server.
|
||||
func NewTLSClientFromBytes(endpoint string, certPEMBlock, keyPEMBlock, caPEMCert []byte) (*Client, error) {
|
||||
client, err := NewVersionedTLSClientFromBytes(endpoint, certPEMBlock, keyPEMBlock, caPEMCert, "")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
client.SkipServerVersionCheck = true
|
||||
return client, nil
|
||||
}
|
||||
|
||||
// NewVersionedClient returns a Client instance ready for communication with
|
||||
// the given server endpoint, using a specific remote API version.
|
||||
func NewVersionedClient(endpoint string, apiVersionString string) (*Client, error) {
|
||||
u, err := parseEndpoint(endpoint, false)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var requestedAPIVersion APIVersion
|
||||
if strings.Contains(apiVersionString, ".") {
|
||||
requestedAPIVersion, err = NewAPIVersion(apiVersionString)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
return &Client{
|
||||
HTTPClient: cleanhttp.DefaultClient(),
|
||||
Dialer: &net.Dialer{},
|
||||
endpoint: endpoint,
|
||||
endpointURL: u,
|
||||
eventMonitor: new(eventMonitoringState),
|
||||
requestedAPIVersion: requestedAPIVersion,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// NewVersionnedTLSClient has been DEPRECATED, please use NewVersionedTLSClient.
|
||||
func NewVersionnedTLSClient(endpoint string, cert, key, ca, apiVersionString string) (*Client, error) {
|
||||
return NewVersionedTLSClient(endpoint, cert, key, ca, apiVersionString)
|
||||
}
|
||||
|
||||
// NewVersionedTLSClient returns a Client instance ready for TLS communications with the givens
|
||||
// server endpoint, key and certificates, using a specific remote API version.
|
||||
func NewVersionedTLSClient(endpoint string, cert, key, ca, apiVersionString string) (*Client, error) {
|
||||
certPEMBlock, err := ioutil.ReadFile(cert)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
keyPEMBlock, err := ioutil.ReadFile(key)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
caPEMCert, err := ioutil.ReadFile(ca)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return NewVersionedTLSClientFromBytes(endpoint, certPEMBlock, keyPEMBlock, caPEMCert, apiVersionString)
|
||||
}
|
||||
|
||||
// NewClientFromEnv returns a Client instance ready for communication created from
|
||||
// Docker's default logic for the environment variables DOCKER_HOST, DOCKER_TLS_VERIFY, and DOCKER_CERT_PATH.
|
||||
//
|
||||
// See https://github.com/docker/docker/blob/1f963af697e8df3a78217f6fdbf67b8123a7db94/docker/docker.go#L68.
|
||||
// See https://github.com/docker/compose/blob/81707ef1ad94403789166d2fe042c8a718a4c748/compose/cli/docker_client.py#L7.
|
||||
func NewClientFromEnv() (*Client, error) {
|
||||
client, err := NewVersionedClientFromEnv("")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
client.SkipServerVersionCheck = true
|
||||
return client, nil
|
||||
}
|
||||
|
||||
// NewVersionedClientFromEnv returns a Client instance ready for TLS communications created from
|
||||
// Docker's default logic for the environment variables DOCKER_HOST, DOCKER_TLS_VERIFY, and DOCKER_CERT_PATH,
|
||||
// and using a specific remote API version.
|
||||
//
|
||||
// See https://github.com/docker/docker/blob/1f963af697e8df3a78217f6fdbf67b8123a7db94/docker/docker.go#L68.
|
||||
// See https://github.com/docker/compose/blob/81707ef1ad94403789166d2fe042c8a718a4c748/compose/cli/docker_client.py#L7.
|
||||
func NewVersionedClientFromEnv(apiVersionString string) (*Client, error) {
|
||||
dockerEnv, err := getDockerEnv()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
dockerHost := dockerEnv.dockerHost
|
||||
if dockerEnv.dockerTLSVerify {
|
||||
parts := strings.SplitN(dockerEnv.dockerHost, "://", 2)
|
||||
if len(parts) != 2 {
|
||||
return nil, fmt.Errorf("could not split %s into two parts by ://", dockerHost)
|
||||
}
|
||||
cert := filepath.Join(dockerEnv.dockerCertPath, "cert.pem")
|
||||
key := filepath.Join(dockerEnv.dockerCertPath, "key.pem")
|
||||
ca := filepath.Join(dockerEnv.dockerCertPath, "ca.pem")
|
||||
return NewVersionedTLSClient(dockerEnv.dockerHost, cert, key, ca, apiVersionString)
|
||||
}
|
||||
return NewVersionedClient(dockerEnv.dockerHost, apiVersionString)
|
||||
}
|
||||
|
||||
// NewVersionedTLSClientFromBytes returns a Client instance ready for TLS communications with the givens
|
||||
// server endpoint, key and certificates (passed inline to the function as opposed to being
|
||||
// read from a local file), using a specific remote API version.
|
||||
func NewVersionedTLSClientFromBytes(endpoint string, certPEMBlock, keyPEMBlock, caPEMCert []byte, apiVersionString string) (*Client, error) {
|
||||
u, err := parseEndpoint(endpoint, true)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var requestedAPIVersion APIVersion
|
||||
if strings.Contains(apiVersionString, ".") {
|
||||
requestedAPIVersion, err = NewAPIVersion(apiVersionString)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
if certPEMBlock == nil || keyPEMBlock == nil {
|
||||
return nil, errors.New("Both cert and key are required")
|
||||
}
|
||||
tlsCert, err := tls.X509KeyPair(certPEMBlock, keyPEMBlock)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
tlsConfig := &tls.Config{Certificates: []tls.Certificate{tlsCert}}
|
||||
if caPEMCert == nil {
|
||||
tlsConfig.InsecureSkipVerify = true
|
||||
} else {
|
||||
caPool := x509.NewCertPool()
|
||||
if !caPool.AppendCertsFromPEM(caPEMCert) {
|
||||
return nil, errors.New("Could not add RootCA pem")
|
||||
}
|
||||
tlsConfig.RootCAs = caPool
|
||||
}
|
||||
tr := cleanhttp.DefaultTransport()
|
||||
tr.TLSClientConfig = tlsConfig
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &Client{
|
||||
HTTPClient: &http.Client{Transport: tr},
|
||||
TLSConfig: tlsConfig,
|
||||
Dialer: &net.Dialer{},
|
||||
endpoint: endpoint,
|
||||
endpointURL: u,
|
||||
eventMonitor: new(eventMonitoringState),
|
||||
requestedAPIVersion: requestedAPIVersion,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (c *Client) checkAPIVersion() error {
|
||||
serverAPIVersionString, err := c.getServerAPIVersionString()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
c.serverAPIVersion, err = NewAPIVersion(serverAPIVersionString)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if c.requestedAPIVersion == nil {
|
||||
c.expectedAPIVersion = c.serverAPIVersion
|
||||
} else {
|
||||
c.expectedAPIVersion = c.requestedAPIVersion
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Endpoint returns the current endpoint. It's useful for getting the endpoint
|
||||
// when using functions that get this data from the environment (like
|
||||
// NewClientFromEnv.
|
||||
func (c *Client) Endpoint() string {
|
||||
return c.endpoint
|
||||
}
|
||||
|
||||
// Ping pings the docker server
|
||||
//
|
||||
// See https://goo.gl/kQCfJj for more details.
|
||||
func (c *Client) Ping() error {
|
||||
path := "/_ping"
|
||||
resp, err := c.do("GET", path, doOptions{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
return newError(resp)
|
||||
}
|
||||
resp.Body.Close()
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *Client) getServerAPIVersionString() (version string, err error) {
|
||||
resp, err := c.do("GET", "/version", doOptions{})
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
return "", fmt.Errorf("Received unexpected status %d while trying to retrieve the server version", resp.StatusCode)
|
||||
}
|
||||
var versionResponse map[string]interface{}
|
||||
if err := json.NewDecoder(resp.Body).Decode(&versionResponse); err != nil {
|
||||
return "", err
|
||||
}
|
||||
if version, ok := (versionResponse["ApiVersion"]).(string); ok {
|
||||
return version, nil
|
||||
}
|
||||
return "", nil
|
||||
}
|
||||
|
||||
type doOptions struct {
|
||||
data interface{}
|
||||
forceJSON bool
|
||||
headers map[string]string
|
||||
}
|
||||
|
||||
func (c *Client) do(method, path string, doOptions doOptions) (*http.Response, error) {
|
||||
var params io.Reader
|
||||
if doOptions.data != nil || doOptions.forceJSON {
|
||||
buf, err := json.Marshal(doOptions.data)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
params = bytes.NewBuffer(buf)
|
||||
}
|
||||
if path != "/version" && !c.SkipServerVersionCheck && c.expectedAPIVersion == nil {
|
||||
err := c.checkAPIVersion()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
httpClient := c.HTTPClient
|
||||
protocol := c.endpointURL.Scheme
|
||||
var u string
|
||||
if protocol == "unix" {
|
||||
httpClient = c.unixClient()
|
||||
u = c.getFakeUnixURL(path)
|
||||
} else {
|
||||
u = c.getURL(path)
|
||||
}
|
||||
req, err := http.NewRequest(method, u, params)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
req.Header.Set("User-Agent", userAgent)
|
||||
if doOptions.data != nil {
|
||||
req.Header.Set("Content-Type", "application/json")
|
||||
} else if method == "POST" {
|
||||
req.Header.Set("Content-Type", "plain/text")
|
||||
}
|
||||
|
||||
for k, v := range doOptions.headers {
|
||||
req.Header.Set(k, v)
|
||||
}
|
||||
resp, err := httpClient.Do(req)
|
||||
if err != nil {
|
||||
if strings.Contains(err.Error(), "connection refused") {
|
||||
return nil, ErrConnectionRefused
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
if resp.StatusCode < 200 || resp.StatusCode >= 400 {
|
||||
return nil, newError(resp)
|
||||
}
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
type streamOptions struct {
|
||||
setRawTerminal bool
|
||||
rawJSONStream bool
|
||||
useJSONDecoder bool
|
||||
headers map[string]string
|
||||
in io.Reader
|
||||
stdout io.Writer
|
||||
stderr io.Writer
|
||||
// timeout is the inital connection timeout
|
||||
timeout time.Duration
|
||||
}
|
||||
|
||||
func (c *Client) stream(method, path string, streamOptions streamOptions) error {
|
||||
if (method == "POST" || method == "PUT") && streamOptions.in == nil {
|
||||
streamOptions.in = bytes.NewReader(nil)
|
||||
}
|
||||
if path != "/version" && !c.SkipServerVersionCheck && c.expectedAPIVersion == nil {
|
||||
err := c.checkAPIVersion()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
req, err := http.NewRequest(method, c.getURL(path), streamOptions.in)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
req.Header.Set("User-Agent", userAgent)
|
||||
if method == "POST" {
|
||||
req.Header.Set("Content-Type", "plain/text")
|
||||
}
|
||||
for key, val := range streamOptions.headers {
|
||||
req.Header.Set(key, val)
|
||||
}
|
||||
var resp *http.Response
|
||||
protocol := c.endpointURL.Scheme
|
||||
address := c.endpointURL.Path
|
||||
if streamOptions.stdout == nil {
|
||||
streamOptions.stdout = ioutil.Discard
|
||||
}
|
||||
if streamOptions.stderr == nil {
|
||||
streamOptions.stderr = ioutil.Discard
|
||||
}
|
||||
if protocol == "unix" {
|
||||
dial, err := c.Dialer.Dial(protocol, address)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer dial.Close()
|
||||
breader := bufio.NewReader(dial)
|
||||
err = req.Write(dial)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// ReadResponse may hang if server does not replay
|
||||
if streamOptions.timeout > 0 {
|
||||
dial.SetDeadline(time.Now().Add(streamOptions.timeout))
|
||||
}
|
||||
|
||||
if resp, err = http.ReadResponse(breader, req); err != nil {
|
||||
// Cancel timeout for future I/O operations
|
||||
if streamOptions.timeout > 0 {
|
||||
dial.SetDeadline(time.Time{})
|
||||
}
|
||||
if strings.Contains(err.Error(), "connection refused") {
|
||||
return ErrConnectionRefused
|
||||
}
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
if resp, err = c.HTTPClient.Do(req); err != nil {
|
||||
if strings.Contains(err.Error(), "connection refused") {
|
||||
return ErrConnectionRefused
|
||||
}
|
||||
return err
|
||||
}
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
if resp.StatusCode < 200 || resp.StatusCode >= 400 {
|
||||
return newError(resp)
|
||||
}
|
||||
if streamOptions.useJSONDecoder || resp.Header.Get("Content-Type") == "application/json" {
|
||||
// if we want to get raw json stream, just copy it back to output
|
||||
// without decoding it
|
||||
if streamOptions.rawJSONStream {
|
||||
_, err = io.Copy(streamOptions.stdout, resp.Body)
|
||||
return err
|
||||
}
|
||||
dec := json.NewDecoder(resp.Body)
|
||||
for {
|
||||
var m jsonMessage
|
||||
if err := dec.Decode(&m); err == io.EOF {
|
||||
break
|
||||
} else if err != nil {
|
||||
return err
|
||||
}
|
||||
if m.Stream != "" {
|
||||
fmt.Fprint(streamOptions.stdout, m.Stream)
|
||||
} else if m.Progress != "" {
|
||||
fmt.Fprintf(streamOptions.stdout, "%s %s\r", m.Status, m.Progress)
|
||||
} else if m.Error != "" {
|
||||
return errors.New(m.Error)
|
||||
}
|
||||
if m.Status != "" {
|
||||
fmt.Fprintln(streamOptions.stdout, m.Status)
|
||||
}
|
||||
}
|
||||
} else {
|
||||
if streamOptions.setRawTerminal {
|
||||
_, err = io.Copy(streamOptions.stdout, resp.Body)
|
||||
} else {
|
||||
_, err = stdcopy.StdCopy(streamOptions.stdout, streamOptions.stderr, resp.Body)
|
||||
}
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type hijackOptions struct {
|
||||
success chan struct{}
|
||||
setRawTerminal bool
|
||||
in io.Reader
|
||||
stdout io.Writer
|
||||
stderr io.Writer
|
||||
data interface{}
|
||||
}
|
||||
|
||||
type CloseWaiter interface {
|
||||
io.Closer
|
||||
Wait() error
|
||||
}
|
||||
|
||||
type waiterFunc func() error
|
||||
|
||||
func (w waiterFunc) Wait() error { return w() }
|
||||
|
||||
type closerFunc func() error
|
||||
|
||||
func (c closerFunc) Close() error { return c() }
|
||||
|
||||
func (c *Client) hijack(method, path string, hijackOptions hijackOptions) (CloseWaiter, error) {
|
||||
if path != "/version" && !c.SkipServerVersionCheck && c.expectedAPIVersion == nil {
|
||||
err := c.checkAPIVersion()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
var params io.Reader
|
||||
if hijackOptions.data != nil {
|
||||
buf, err := json.Marshal(hijackOptions.data)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
params = bytes.NewBuffer(buf)
|
||||
}
|
||||
req, err := http.NewRequest(method, c.getURL(path), params)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
req.Header.Set("Content-Type", "application/json")
|
||||
req.Header.Set("Connection", "Upgrade")
|
||||
req.Header.Set("Upgrade", "tcp")
|
||||
protocol := c.endpointURL.Scheme
|
||||
address := c.endpointURL.Path
|
||||
if protocol != "unix" {
|
||||
protocol = "tcp"
|
||||
address = c.endpointURL.Host
|
||||
}
|
||||
var dial net.Conn
|
||||
if c.TLSConfig != nil && protocol != "unix" {
|
||||
dial, err = tlsDialWithDialer(c.Dialer, protocol, address, c.TLSConfig)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
} else {
|
||||
dial, err = c.Dialer.Dial(protocol, address)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
errs := make(chan error)
|
||||
quit := make(chan struct{})
|
||||
go func() {
|
||||
clientconn := httputil.NewClientConn(dial, nil)
|
||||
defer clientconn.Close()
|
||||
clientconn.Do(req)
|
||||
if hijackOptions.success != nil {
|
||||
hijackOptions.success <- struct{}{}
|
||||
<-hijackOptions.success
|
||||
}
|
||||
rwc, br := clientconn.Hijack()
|
||||
defer rwc.Close()
|
||||
|
||||
errChanOut := make(chan error, 1)
|
||||
errChanIn := make(chan error, 1)
|
||||
if hijackOptions.stdout == nil && hijackOptions.stderr == nil {
|
||||
close(errChanOut)
|
||||
} else {
|
||||
// Only copy if hijackOptions.stdout and/or hijackOptions.stderr is actually set.
|
||||
// Otherwise, if the only stream you care about is stdin, your attach session
|
||||
// will "hang" until the container terminates, even though you're not reading
|
||||
// stdout/stderr
|
||||
if hijackOptions.stdout == nil {
|
||||
hijackOptions.stdout = ioutil.Discard
|
||||
}
|
||||
if hijackOptions.stderr == nil {
|
||||
hijackOptions.stderr = ioutil.Discard
|
||||
}
|
||||
|
||||
go func() {
|
||||
defer func() {
|
||||
if hijackOptions.in != nil {
|
||||
if closer, ok := hijackOptions.in.(io.Closer); ok {
|
||||
closer.Close()
|
||||
}
|
||||
errChanIn <- nil
|
||||
}
|
||||
}()
|
||||
|
||||
var err error
|
||||
if hijackOptions.setRawTerminal {
|
||||
_, err = io.Copy(hijackOptions.stdout, br)
|
||||
} else {
|
||||
_, err = stdcopy.StdCopy(hijackOptions.stdout, hijackOptions.stderr, br)
|
||||
}
|
||||
errChanOut <- err
|
||||
}()
|
||||
}
|
||||
|
||||
go func() {
|
||||
var err error
|
||||
if hijackOptions.in != nil {
|
||||
_, err = io.Copy(rwc, hijackOptions.in)
|
||||
}
|
||||
errChanIn <- err
|
||||
rwc.(interface {
|
||||
CloseWrite() error
|
||||
}).CloseWrite()
|
||||
}()
|
||||
|
||||
var errIn error
|
||||
select {
|
||||
case errIn = <-errChanIn:
|
||||
case <-quit:
|
||||
return
|
||||
}
|
||||
|
||||
var errOut error
|
||||
select {
|
||||
case errOut = <-errChanOut:
|
||||
case <-quit:
|
||||
return
|
||||
}
|
||||
|
||||
if errIn != nil {
|
||||
errs <- errIn
|
||||
} else {
|
||||
errs <- errOut
|
||||
}
|
||||
}()
|
||||
|
||||
return struct {
|
||||
closerFunc
|
||||
waiterFunc
|
||||
}{
|
||||
closerFunc(func() error { close(quit); return nil }),
|
||||
waiterFunc(func() error { return <-errs }),
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (c *Client) getURL(path string) string {
|
||||
urlStr := strings.TrimRight(c.endpointURL.String(), "/")
|
||||
if c.endpointURL.Scheme == "unix" {
|
||||
urlStr = ""
|
||||
}
|
||||
if c.requestedAPIVersion != nil {
|
||||
return fmt.Sprintf("%s/v%s%s", urlStr, c.requestedAPIVersion, path)
|
||||
}
|
||||
return fmt.Sprintf("%s%s", urlStr, path)
|
||||
}
|
||||
|
||||
// getFakeUnixURL returns the URL needed to make an HTTP request over a UNIX
|
||||
// domain socket to the given path.
|
||||
func (c *Client) getFakeUnixURL(path string) string {
|
||||
u := *c.endpointURL // Copy.
|
||||
|
||||
// Override URL so that net/http will not complain.
|
||||
u.Scheme = "http"
|
||||
u.Host = "unix.sock" // Doesn't matter what this is - it's not used.
|
||||
u.Path = ""
|
||||
urlStr := strings.TrimRight(u.String(), "/")
|
||||
if c.requestedAPIVersion != nil {
|
||||
return fmt.Sprintf("%s/v%s%s", urlStr, c.requestedAPIVersion, path)
|
||||
}
|
||||
return fmt.Sprintf("%s%s", urlStr, path)
|
||||
}
|
||||
|
||||
func (c *Client) unixClient() *http.Client {
|
||||
if c.unixHTTPClient != nil {
|
||||
return c.unixHTTPClient
|
||||
}
|
||||
socketPath := c.endpointURL.Path
|
||||
tr := &http.Transport{
|
||||
Dial: func(network, addr string) (net.Conn, error) {
|
||||
return c.Dialer.Dial("unix", socketPath)
|
||||
},
|
||||
}
|
||||
cleanhttp.SetTransportFinalizer(tr)
|
||||
c.unixHTTPClient = &http.Client{Transport: tr}
|
||||
return c.unixHTTPClient
|
||||
}
|
||||
|
||||
type jsonMessage struct {
|
||||
Status string `json:"status,omitempty"`
|
||||
Progress string `json:"progress,omitempty"`
|
||||
Error string `json:"error,omitempty"`
|
||||
Stream string `json:"stream,omitempty"`
|
||||
}
|
||||
|
||||
func queryString(opts interface{}) string {
|
||||
if opts == nil {
|
||||
return ""
|
||||
}
|
||||
value := reflect.ValueOf(opts)
|
||||
if value.Kind() == reflect.Ptr {
|
||||
value = value.Elem()
|
||||
}
|
||||
if value.Kind() != reflect.Struct {
|
||||
return ""
|
||||
}
|
||||
items := url.Values(map[string][]string{})
|
||||
for i := 0; i < value.NumField(); i++ {
|
||||
field := value.Type().Field(i)
|
||||
if field.PkgPath != "" {
|
||||
continue
|
||||
}
|
||||
key := field.Tag.Get("qs")
|
||||
if key == "" {
|
||||
key = strings.ToLower(field.Name)
|
||||
} else if key == "-" {
|
||||
continue
|
||||
}
|
||||
addQueryStringValue(items, key, value.Field(i))
|
||||
}
|
||||
return items.Encode()
|
||||
}
|
||||
|
||||
func addQueryStringValue(items url.Values, key string, v reflect.Value) {
|
||||
switch v.Kind() {
|
||||
case reflect.Bool:
|
||||
if v.Bool() {
|
||||
items.Add(key, "1")
|
||||
}
|
||||
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
|
||||
if v.Int() > 0 {
|
||||
items.Add(key, strconv.FormatInt(v.Int(), 10))
|
||||
}
|
||||
case reflect.Float32, reflect.Float64:
|
||||
if v.Float() > 0 {
|
||||
items.Add(key, strconv.FormatFloat(v.Float(), 'f', -1, 64))
|
||||
}
|
||||
case reflect.String:
|
||||
if v.String() != "" {
|
||||
items.Add(key, v.String())
|
||||
}
|
||||
case reflect.Ptr:
|
||||
if !v.IsNil() {
|
||||
if b, err := json.Marshal(v.Interface()); err == nil {
|
||||
items.Add(key, string(b))
|
||||
}
|
||||
}
|
||||
case reflect.Map:
|
||||
if len(v.MapKeys()) > 0 {
|
||||
if b, err := json.Marshal(v.Interface()); err == nil {
|
||||
items.Add(key, string(b))
|
||||
}
|
||||
}
|
||||
case reflect.Array, reflect.Slice:
|
||||
vLen := v.Len()
|
||||
if vLen > 0 {
|
||||
for i := 0; i < vLen; i++ {
|
||||
addQueryStringValue(items, key, v.Index(i))
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Error represents failures in the API. It represents a failure from the API.
|
||||
type Error struct {
|
||||
Status int
|
||||
Message string
|
||||
}
|
||||
|
||||
func newError(resp *http.Response) *Error {
|
||||
defer resp.Body.Close()
|
||||
data, err := ioutil.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
return &Error{Status: resp.StatusCode, Message: fmt.Sprintf("cannot read body, err: %v", err)}
|
||||
}
|
||||
return &Error{Status: resp.StatusCode, Message: string(data)}
|
||||
}
|
||||
|
||||
func (e *Error) Error() string {
|
||||
return fmt.Sprintf("API error (%d): %s", e.Status, e.Message)
|
||||
}
|
||||
|
||||
func parseEndpoint(endpoint string, tls bool) (*url.URL, error) {
|
||||
if endpoint != "" && !strings.Contains(endpoint, "://") {
|
||||
endpoint = "tcp://" + endpoint
|
||||
}
|
||||
u, err := url.Parse(endpoint)
|
||||
if err != nil {
|
||||
return nil, ErrInvalidEndpoint
|
||||
}
|
||||
if tls {
|
||||
u.Scheme = "https"
|
||||
}
|
||||
switch u.Scheme {
|
||||
case "unix":
|
||||
return u, nil
|
||||
case "http", "https", "tcp":
|
||||
_, port, err := net.SplitHostPort(u.Host)
|
||||
if err != nil {
|
||||
if e, ok := err.(*net.AddrError); ok {
|
||||
if e.Err == "missing port in address" {
|
||||
return u, nil
|
||||
}
|
||||
}
|
||||
return nil, ErrInvalidEndpoint
|
||||
}
|
||||
number, err := strconv.ParseInt(port, 10, 64)
|
||||
if err == nil && number > 0 && number < 65536 {
|
||||
if u.Scheme == "tcp" {
|
||||
if tls {
|
||||
u.Scheme = "https"
|
||||
} else {
|
||||
u.Scheme = "http"
|
||||
}
|
||||
}
|
||||
return u, nil
|
||||
}
|
||||
return nil, ErrInvalidEndpoint
|
||||
default:
|
||||
return nil, ErrInvalidEndpoint
|
||||
}
|
||||
}
|
||||
|
||||
type dockerEnv struct {
|
||||
dockerHost string
|
||||
dockerTLSVerify bool
|
||||
dockerCertPath string
|
||||
}
|
||||
|
||||
func getDockerEnv() (*dockerEnv, error) {
|
||||
dockerHost := os.Getenv("DOCKER_HOST")
|
||||
var err error
|
||||
if dockerHost == "" {
|
||||
dockerHost, err = DefaultDockerHost()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
dockerTLSVerify := os.Getenv("DOCKER_TLS_VERIFY") != ""
|
||||
var dockerCertPath string
|
||||
if dockerTLSVerify {
|
||||
dockerCertPath = os.Getenv("DOCKER_CERT_PATH")
|
||||
if dockerCertPath == "" {
|
||||
home := homedir.Get()
|
||||
if home == "" {
|
||||
return nil, errors.New("environment variable HOME must be set if DOCKER_CERT_PATH is not set")
|
||||
}
|
||||
dockerCertPath = filepath.Join(home, ".docker")
|
||||
dockerCertPath, err = filepath.Abs(dockerCertPath)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
}
|
||||
return &dockerEnv{
|
||||
dockerHost: dockerHost,
|
||||
dockerTLSVerify: dockerTLSVerify,
|
||||
dockerCertPath: dockerCertPath,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// DefaultDockerHost returns the default docker socket for the current OS
|
||||
func DefaultDockerHost() (string, error) {
|
||||
var defaultHost string
|
||||
if runtime.GOOS == "windows" {
|
||||
// If we do not have a host, default to TCP socket on Windows
|
||||
defaultHost = fmt.Sprintf("tcp://%s:%d", opts.DefaultHTTPHost, opts.DefaultHTTPPort)
|
||||
} else {
|
||||
// If we do not have a host, default to unix socket
|
||||
defaultHost = fmt.Sprintf("unix://%s", opts.DefaultUnixSocket)
|
||||
}
|
||||
return opts.ValidateHost(defaultHost)
|
||||
}
|
502
Godeps/_workspace/src/github.com/fsouza/go-dockerclient/client_test.go
generated
vendored
502
Godeps/_workspace/src/github.com/fsouza/go-dockerclient/client_test.go
generated
vendored
|
@ -1,502 +0,0 @@
|
|||
// Copyright 2015 go-dockerclient authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package docker
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"net"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"reflect"
|
||||
"strconv"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/fsouza/go-dockerclient/external/github.com/hashicorp/go-cleanhttp"
|
||||
)
|
||||
|
||||
func TestNewAPIClient(t *testing.T) {
|
||||
endpoint := "http://localhost:4243"
|
||||
client, err := NewClient(endpoint)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if client.endpoint != endpoint {
|
||||
t.Errorf("Expected endpoint %s. Got %s.", endpoint, client.endpoint)
|
||||
}
|
||||
// test unix socket endpoints
|
||||
endpoint = "unix:///var/run/docker.sock"
|
||||
client, err = NewClient(endpoint)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if client.endpoint != endpoint {
|
||||
t.Errorf("Expected endpoint %s. Got %s.", endpoint, client.endpoint)
|
||||
}
|
||||
if !client.SkipServerVersionCheck {
|
||||
t.Error("Expected SkipServerVersionCheck to be true, got false")
|
||||
}
|
||||
if client.requestedAPIVersion != nil {
|
||||
t.Errorf("Expected requestedAPIVersion to be nil, got %#v.", client.requestedAPIVersion)
|
||||
}
|
||||
}
|
||||
|
||||
func newTLSClient(endpoint string) (*Client, error) {
|
||||
return NewTLSClient(endpoint,
|
||||
"testing/data/cert.pem",
|
||||
"testing/data/key.pem",
|
||||
"testing/data/ca.pem")
|
||||
}
|
||||
|
||||
func TestNewTSLAPIClient(t *testing.T) {
|
||||
endpoint := "https://localhost:4243"
|
||||
client, err := newTLSClient(endpoint)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if client.endpoint != endpoint {
|
||||
t.Errorf("Expected endpoint %s. Got %s.", endpoint, client.endpoint)
|
||||
}
|
||||
if !client.SkipServerVersionCheck {
|
||||
t.Error("Expected SkipServerVersionCheck to be true, got false")
|
||||
}
|
||||
if client.requestedAPIVersion != nil {
|
||||
t.Errorf("Expected requestedAPIVersion to be nil, got %#v.", client.requestedAPIVersion)
|
||||
}
|
||||
}
|
||||
|
||||
func TestNewVersionedClient(t *testing.T) {
|
||||
endpoint := "http://localhost:4243"
|
||||
client, err := NewVersionedClient(endpoint, "1.12")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if client.endpoint != endpoint {
|
||||
t.Errorf("Expected endpoint %s. Got %s.", endpoint, client.endpoint)
|
||||
}
|
||||
if reqVersion := client.requestedAPIVersion.String(); reqVersion != "1.12" {
|
||||
t.Errorf("Wrong requestAPIVersion. Want %q. Got %q.", "1.12", reqVersion)
|
||||
}
|
||||
if client.SkipServerVersionCheck {
|
||||
t.Error("Expected SkipServerVersionCheck to be false, got true")
|
||||
}
|
||||
}
|
||||
|
||||
func TestNewVersionedClientFromEnv(t *testing.T) {
|
||||
endpoint := "tcp://localhost:2376"
|
||||
endpointURL := "http://localhost:2376"
|
||||
os.Setenv("DOCKER_HOST", endpoint)
|
||||
os.Setenv("DOCKER_TLS_VERIFY", "")
|
||||
client, err := NewVersionedClientFromEnv("1.12")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if client.endpoint != endpoint {
|
||||
t.Errorf("Expected endpoint %s. Got %s.", endpoint, client.endpoint)
|
||||
}
|
||||
if client.endpointURL.String() != endpointURL {
|
||||
t.Errorf("Expected endpointURL %s. Got %s.", endpoint, client.endpoint)
|
||||
}
|
||||
if reqVersion := client.requestedAPIVersion.String(); reqVersion != "1.12" {
|
||||
t.Errorf("Wrong requestAPIVersion. Want %q. Got %q.", "1.12", reqVersion)
|
||||
}
|
||||
if client.SkipServerVersionCheck {
|
||||
t.Error("Expected SkipServerVersionCheck to be false, got true")
|
||||
}
|
||||
}
|
||||
|
||||
func TestNewVersionedClientFromEnvTLS(t *testing.T) {
|
||||
endpoint := "tcp://localhost:2376"
|
||||
endpointURL := "https://localhost:2376"
|
||||
base, _ := os.Getwd()
|
||||
os.Setenv("DOCKER_CERT_PATH", filepath.Join(base, "/testing/data/"))
|
||||
os.Setenv("DOCKER_HOST", endpoint)
|
||||
os.Setenv("DOCKER_TLS_VERIFY", "1")
|
||||
client, err := NewVersionedClientFromEnv("1.12")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if client.endpoint != endpoint {
|
||||
t.Errorf("Expected endpoint %s. Got %s.", endpoint, client.endpoint)
|
||||
}
|
||||
if client.endpointURL.String() != endpointURL {
|
||||
t.Errorf("Expected endpointURL %s. Got %s.", endpoint, client.endpoint)
|
||||
}
|
||||
if reqVersion := client.requestedAPIVersion.String(); reqVersion != "1.12" {
|
||||
t.Errorf("Wrong requestAPIVersion. Want %q. Got %q.", "1.12", reqVersion)
|
||||
}
|
||||
if client.SkipServerVersionCheck {
|
||||
t.Error("Expected SkipServerVersionCheck to be false, got true")
|
||||
}
|
||||
}
|
||||
|
||||
func TestNewTLSVersionedClient(t *testing.T) {
|
||||
certPath := "testing/data/cert.pem"
|
||||
keyPath := "testing/data/key.pem"
|
||||
caPath := "testing/data/ca.pem"
|
||||
endpoint := "https://localhost:4243"
|
||||
client, err := NewVersionedTLSClient(endpoint, certPath, keyPath, caPath, "1.14")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if client.endpoint != endpoint {
|
||||
t.Errorf("Expected endpoint %s. Got %s.", endpoint, client.endpoint)
|
||||
}
|
||||
if reqVersion := client.requestedAPIVersion.String(); reqVersion != "1.14" {
|
||||
t.Errorf("Wrong requestAPIVersion. Want %q. Got %q.", "1.14", reqVersion)
|
||||
}
|
||||
if client.SkipServerVersionCheck {
|
||||
t.Error("Expected SkipServerVersionCheck to be false, got true")
|
||||
}
|
||||
}
|
||||
|
||||
func TestNewTLSVersionedClientInvalidCA(t *testing.T) {
|
||||
certPath := "testing/data/cert.pem"
|
||||
keyPath := "testing/data/key.pem"
|
||||
caPath := "testing/data/key.pem"
|
||||
endpoint := "https://localhost:4243"
|
||||
_, err := NewVersionedTLSClient(endpoint, certPath, keyPath, caPath, "1.14")
|
||||
if err == nil {
|
||||
t.Errorf("Expected invalid ca at %s", caPath)
|
||||
}
|
||||
}
|
||||
|
||||
func TestNewClientInvalidEndpoint(t *testing.T) {
|
||||
cases := []string{
|
||||
"htp://localhost:3243", "http://localhost:a",
|
||||
"", "http://localhost:8080:8383", "http://localhost:65536",
|
||||
"https://localhost:-20",
|
||||
}
|
||||
for _, c := range cases {
|
||||
client, err := NewClient(c)
|
||||
if client != nil {
|
||||
t.Errorf("Want <nil> client for invalid endpoint, got %#v.", client)
|
||||
}
|
||||
if !reflect.DeepEqual(err, ErrInvalidEndpoint) {
|
||||
t.Errorf("NewClient(%q): Got invalid error for invalid endpoint. Want %#v. Got %#v.", c, ErrInvalidEndpoint, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestNewClientNoSchemeEndpoint(t *testing.T) {
|
||||
cases := []string{"localhost", "localhost:8080"}
|
||||
for _, c := range cases {
|
||||
client, err := NewClient(c)
|
||||
if client == nil {
|
||||
t.Errorf("Want client for scheme-less endpoint, got <nil>")
|
||||
}
|
||||
if err != nil {
|
||||
t.Errorf("Got unexpected error scheme-less endpoint: %q", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestNewTLSClient(t *testing.T) {
|
||||
var tests = []struct {
|
||||
endpoint string
|
||||
expected string
|
||||
}{
|
||||
{"tcp://localhost:2376", "https"},
|
||||
{"tcp://localhost:2375", "https"},
|
||||
{"tcp://localhost:4000", "https"},
|
||||
{"http://localhost:4000", "https"},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
client, err := newTLSClient(tt.endpoint)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
got := client.endpointURL.Scheme
|
||||
if got != tt.expected {
|
||||
t.Errorf("endpointURL.Scheme: Got %s. Want %s.", got, tt.expected)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestEndpoint(t *testing.T) {
|
||||
client, err := NewVersionedClient("http://localhost:4243", "1.12")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if endpoint := client.Endpoint(); endpoint != client.endpoint {
|
||||
t.Errorf("Client.Endpoint(): want %q. Got %q", client.endpoint, endpoint)
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetURL(t *testing.T) {
|
||||
var tests = []struct {
|
||||
endpoint string
|
||||
path string
|
||||
expected string
|
||||
}{
|
||||
{"http://localhost:4243/", "/", "http://localhost:4243/"},
|
||||
{"http://localhost:4243", "/", "http://localhost:4243/"},
|
||||
{"http://localhost:4243", "/containers/ps", "http://localhost:4243/containers/ps"},
|
||||
{"tcp://localhost:4243", "/containers/ps", "http://localhost:4243/containers/ps"},
|
||||
{"http://localhost:4243/////", "/", "http://localhost:4243/"},
|
||||
{"unix:///var/run/docker.socket", "/containers", "/containers"},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
client, _ := NewClient(tt.endpoint)
|
||||
client.endpoint = tt.endpoint
|
||||
client.SkipServerVersionCheck = true
|
||||
got := client.getURL(tt.path)
|
||||
if got != tt.expected {
|
||||
t.Errorf("getURL(%q): Got %s. Want %s.", tt.path, got, tt.expected)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetFakeUnixURL(t *testing.T) {
|
||||
var tests = []struct {
|
||||
endpoint string
|
||||
path string
|
||||
expected string
|
||||
}{
|
||||
{"unix://var/run/docker.sock", "/", "http://unix.sock/"},
|
||||
{"unix://var/run/docker.socket", "/", "http://unix.sock/"},
|
||||
{"unix://var/run/docker.sock", "/containers/ps", "http://unix.sock/containers/ps"},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
client, _ := NewClient(tt.endpoint)
|
||||
client.endpoint = tt.endpoint
|
||||
client.SkipServerVersionCheck = true
|
||||
got := client.getFakeUnixURL(tt.path)
|
||||
if got != tt.expected {
|
||||
t.Errorf("getURL(%q): Got %s. Want %s.", tt.path, got, tt.expected)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestError(t *testing.T) {
|
||||
fakeBody := ioutil.NopCloser(bytes.NewBufferString("bad parameter"))
|
||||
resp := &http.Response{
|
||||
StatusCode: 400,
|
||||
Body: fakeBody,
|
||||
}
|
||||
err := newError(resp)
|
||||
expected := Error{Status: 400, Message: "bad parameter"}
|
||||
if !reflect.DeepEqual(expected, *err) {
|
||||
t.Errorf("Wrong error type. Want %#v. Got %#v.", expected, *err)
|
||||
}
|
||||
message := "API error (400): bad parameter"
|
||||
if err.Error() != message {
|
||||
t.Errorf("Wrong error message. Want %q. Got %q.", message, err.Error())
|
||||
}
|
||||
}
|
||||
|
||||
func TestQueryString(t *testing.T) {
|
||||
v := float32(2.4)
|
||||
f32QueryString := fmt.Sprintf("w=%s&x=10&y=10.35", strconv.FormatFloat(float64(v), 'f', -1, 64))
|
||||
jsonPerson := url.QueryEscape(`{"Name":"gopher","age":4}`)
|
||||
var tests = []struct {
|
||||
input interface{}
|
||||
want string
|
||||
}{
|
||||
{&ListContainersOptions{All: true}, "all=1"},
|
||||
{ListContainersOptions{All: true}, "all=1"},
|
||||
{ListContainersOptions{Before: "something"}, "before=something"},
|
||||
{ListContainersOptions{Before: "something", Since: "other"}, "before=something&since=other"},
|
||||
{ListContainersOptions{Filters: map[string][]string{"status": {"paused", "running"}}}, "filters=%7B%22status%22%3A%5B%22paused%22%2C%22running%22%5D%7D"},
|
||||
{dumb{X: 10, Y: 10.35000}, "x=10&y=10.35"},
|
||||
{dumb{W: v, X: 10, Y: 10.35000}, f32QueryString},
|
||||
{dumb{X: 10, Y: 10.35000, Z: 10}, "x=10&y=10.35&zee=10"},
|
||||
{dumb{v: 4, X: 10, Y: 10.35000}, "x=10&y=10.35"},
|
||||
{dumb{T: 10, Y: 10.35000}, "y=10.35"},
|
||||
{dumb{Person: &person{Name: "gopher", Age: 4}}, "p=" + jsonPerson},
|
||||
{nil, ""},
|
||||
{10, ""},
|
||||
{"not_a_struct", ""},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
got := queryString(tt.input)
|
||||
if got != tt.want {
|
||||
t.Errorf("queryString(%v). Want %q. Got %q.", tt.input, tt.want, got)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestAPIVersions(t *testing.T) {
|
||||
var tests = []struct {
|
||||
a string
|
||||
b string
|
||||
expectedALessThanB bool
|
||||
expectedALessThanOrEqualToB bool
|
||||
expectedAGreaterThanB bool
|
||||
expectedAGreaterThanOrEqualToB bool
|
||||
}{
|
||||
{"1.11", "1.11", false, true, false, true},
|
||||
{"1.10", "1.11", true, true, false, false},
|
||||
{"1.11", "1.10", false, false, true, true},
|
||||
|
||||
{"1.11-ubuntu0", "1.11", false, true, false, true},
|
||||
{"1.10", "1.11-el7", true, true, false, false},
|
||||
|
||||
{"1.9", "1.11", true, true, false, false},
|
||||
{"1.11", "1.9", false, false, true, true},
|
||||
|
||||
{"1.1.1", "1.1", false, false, true, true},
|
||||
{"1.1", "1.1.1", true, true, false, false},
|
||||
|
||||
{"2.1", "1.1.1", false, false, true, true},
|
||||
{"2.1", "1.3.1", false, false, true, true},
|
||||
{"1.1.1", "2.1", true, true, false, false},
|
||||
{"1.3.1", "2.1", true, true, false, false},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
a, _ := NewAPIVersion(tt.a)
|
||||
b, _ := NewAPIVersion(tt.b)
|
||||
|
||||
if tt.expectedALessThanB && !a.LessThan(b) {
|
||||
t.Errorf("Expected %#v < %#v", a, b)
|
||||
}
|
||||
if tt.expectedALessThanOrEqualToB && !a.LessThanOrEqualTo(b) {
|
||||
t.Errorf("Expected %#v <= %#v", a, b)
|
||||
}
|
||||
if tt.expectedAGreaterThanB && !a.GreaterThan(b) {
|
||||
t.Errorf("Expected %#v > %#v", a, b)
|
||||
}
|
||||
if tt.expectedAGreaterThanOrEqualToB && !a.GreaterThanOrEqualTo(b) {
|
||||
t.Errorf("Expected %#v >= %#v", a, b)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestPing(t *testing.T) {
|
||||
fakeRT := &FakeRoundTripper{message: "", status: http.StatusOK}
|
||||
client := newTestClient(fakeRT)
|
||||
err := client.Ping()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestPingFailing(t *testing.T) {
|
||||
fakeRT := &FakeRoundTripper{message: "", status: http.StatusInternalServerError}
|
||||
client := newTestClient(fakeRT)
|
||||
err := client.Ping()
|
||||
if err == nil {
|
||||
t.Fatal("Expected non nil error, got nil")
|
||||
}
|
||||
expectedErrMsg := "API error (500): "
|
||||
if err.Error() != expectedErrMsg {
|
||||
t.Fatalf("Expected error to be %q, got: %q", expectedErrMsg, err.Error())
|
||||
}
|
||||
}
|
||||
|
||||
func TestPingFailingWrongStatus(t *testing.T) {
|
||||
fakeRT := &FakeRoundTripper{message: "", status: http.StatusAccepted}
|
||||
client := newTestClient(fakeRT)
|
||||
err := client.Ping()
|
||||
if err == nil {
|
||||
t.Fatal("Expected non nil error, got nil")
|
||||
}
|
||||
expectedErrMsg := "API error (202): "
|
||||
if err.Error() != expectedErrMsg {
|
||||
t.Fatalf("Expected error to be %q, got: %q", expectedErrMsg, err.Error())
|
||||
}
|
||||
}
|
||||
|
||||
func TestPingErrorWithUnixSocket(t *testing.T) {
|
||||
go func() {
|
||||
li, err := net.Listen("unix", "/tmp/echo.sock")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer li.Close()
|
||||
if err != nil {
|
||||
t.Fatalf("Expected to get listener, but failed: %#v", err)
|
||||
}
|
||||
|
||||
fd, err := li.Accept()
|
||||
if err != nil {
|
||||
t.Fatalf("Expected to accept connection, but failed: %#v", err)
|
||||
}
|
||||
|
||||
buf := make([]byte, 512)
|
||||
nr, err := fd.Read(buf)
|
||||
|
||||
// Create invalid response message to trigger error.
|
||||
data := buf[0:nr]
|
||||
for i := 0; i < 10; i++ {
|
||||
data[i] = 63
|
||||
}
|
||||
|
||||
_, err = fd.Write(data)
|
||||
if err != nil {
|
||||
t.Fatalf("Expected to write to socket, but failed: %#v", err)
|
||||
}
|
||||
|
||||
return
|
||||
}()
|
||||
|
||||
// Wait for unix socket to listen
|
||||
time.Sleep(10 * time.Millisecond)
|
||||
|
||||
endpoint := "unix:///tmp/echo.sock"
|
||||
u, _ := parseEndpoint(endpoint, false)
|
||||
client := Client{
|
||||
HTTPClient: cleanhttp.DefaultClient(),
|
||||
Dialer: &net.Dialer{},
|
||||
endpoint: endpoint,
|
||||
endpointURL: u,
|
||||
SkipServerVersionCheck: true,
|
||||
}
|
||||
|
||||
err := client.Ping()
|
||||
if err == nil {
|
||||
t.Fatal("Expected non nil error, got nil")
|
||||
}
|
||||
}
|
||||
|
||||
type FakeRoundTripper struct {
|
||||
message string
|
||||
status int
|
||||
header map[string]string
|
||||
requests []*http.Request
|
||||
}
|
||||
|
||||
func (rt *FakeRoundTripper) RoundTrip(r *http.Request) (*http.Response, error) {
|
||||
body := strings.NewReader(rt.message)
|
||||
rt.requests = append(rt.requests, r)
|
||||
res := &http.Response{
|
||||
StatusCode: rt.status,
|
||||
Body: ioutil.NopCloser(body),
|
||||
Header: make(http.Header),
|
||||
}
|
||||
for k, v := range rt.header {
|
||||
res.Header.Set(k, v)
|
||||
}
|
||||
return res, nil
|
||||
}
|
||||
|
||||
func (rt *FakeRoundTripper) Reset() {
|
||||
rt.requests = nil
|
||||
}
|
||||
|
||||
type person struct {
|
||||
Name string
|
||||
Age int `json:"age"`
|
||||
}
|
||||
|
||||
type dumb struct {
|
||||
T int `qs:"-"`
|
||||
v int
|
||||
W float32
|
||||
X int
|
||||
Y float64
|
||||
Z int `qs:"zee"`
|
||||
Person *person `qs:"p"`
|
||||
}
|
||||
|
||||
type fakeEndpointURL struct {
|
||||
Scheme string
|
||||
}
|
1180
Godeps/_workspace/src/github.com/fsouza/go-dockerclient/container.go
generated
vendored
1180
Godeps/_workspace/src/github.com/fsouza/go-dockerclient/container.go
generated
vendored
File diff suppressed because it is too large
Load diff
2263
Godeps/_workspace/src/github.com/fsouza/go-dockerclient/container_test.go
generated
vendored
2263
Godeps/_workspace/src/github.com/fsouza/go-dockerclient/container_test.go
generated
vendored
File diff suppressed because it is too large
Load diff
168
Godeps/_workspace/src/github.com/fsouza/go-dockerclient/env.go
generated
vendored
168
Godeps/_workspace/src/github.com/fsouza/go-dockerclient/env.go
generated
vendored
|
@ -1,168 +0,0 @@
|
|||
// Copyright 2014 Docker authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the DOCKER-LICENSE file.
|
||||
|
||||
package docker
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"strconv"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// Env represents a list of key-pair represented in the form KEY=VALUE.
|
||||
type Env []string
|
||||
|
||||
// Get returns the string value of the given key.
|
||||
func (env *Env) Get(key string) (value string) {
|
||||
return env.Map()[key]
|
||||
}
|
||||
|
||||
// Exists checks whether the given key is defined in the internal Env
|
||||
// representation.
|
||||
func (env *Env) Exists(key string) bool {
|
||||
_, exists := env.Map()[key]
|
||||
return exists
|
||||
}
|
||||
|
||||
// GetBool returns a boolean representation of the given key. The key is false
|
||||
// whenever its value if 0, no, false, none or an empty string. Any other value
|
||||
// will be interpreted as true.
|
||||
func (env *Env) GetBool(key string) (value bool) {
|
||||
s := strings.ToLower(strings.Trim(env.Get(key), " \t"))
|
||||
if s == "" || s == "0" || s == "no" || s == "false" || s == "none" {
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// SetBool defines a boolean value to the given key.
|
||||
func (env *Env) SetBool(key string, value bool) {
|
||||
if value {
|
||||
env.Set(key, "1")
|
||||
} else {
|
||||
env.Set(key, "0")
|
||||
}
|
||||
}
|
||||
|
||||
// GetInt returns the value of the provided key, converted to int.
|
||||
//
|
||||
// It the value cannot be represented as an integer, it returns -1.
|
||||
func (env *Env) GetInt(key string) int {
|
||||
return int(env.GetInt64(key))
|
||||
}
|
||||
|
||||
// SetInt defines an integer value to the given key.
|
||||
func (env *Env) SetInt(key string, value int) {
|
||||
env.Set(key, strconv.Itoa(value))
|
||||
}
|
||||
|
||||
// GetInt64 returns the value of the provided key, converted to int64.
|
||||
//
|
||||
// It the value cannot be represented as an integer, it returns -1.
|
||||
func (env *Env) GetInt64(key string) int64 {
|
||||
s := strings.Trim(env.Get(key), " \t")
|
||||
val, err := strconv.ParseInt(s, 10, 64)
|
||||
if err != nil {
|
||||
return -1
|
||||
}
|
||||
return val
|
||||
}
|
||||
|
||||
// SetInt64 defines an integer (64-bit wide) value to the given key.
|
||||
func (env *Env) SetInt64(key string, value int64) {
|
||||
env.Set(key, strconv.FormatInt(value, 10))
|
||||
}
|
||||
|
||||
// GetJSON unmarshals the value of the provided key in the provided iface.
|
||||
//
|
||||
// iface is a value that can be provided to the json.Unmarshal function.
|
||||
func (env *Env) GetJSON(key string, iface interface{}) error {
|
||||
sval := env.Get(key)
|
||||
if sval == "" {
|
||||
return nil
|
||||
}
|
||||
return json.Unmarshal([]byte(sval), iface)
|
||||
}
|
||||
|
||||
// SetJSON marshals the given value to JSON format and stores it using the
|
||||
// provided key.
|
||||
func (env *Env) SetJSON(key string, value interface{}) error {
|
||||
sval, err := json.Marshal(value)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
env.Set(key, string(sval))
|
||||
return nil
|
||||
}
|
||||
|
||||
// GetList returns a list of strings matching the provided key. It handles the
|
||||
// list as a JSON representation of a list of strings.
|
||||
//
|
||||
// If the given key matches to a single string, it will return a list
|
||||
// containing only the value that matches the key.
|
||||
func (env *Env) GetList(key string) []string {
|
||||
sval := env.Get(key)
|
||||
if sval == "" {
|
||||
return nil
|
||||
}
|
||||
var l []string
|
||||
if err := json.Unmarshal([]byte(sval), &l); err != nil {
|
||||
l = append(l, sval)
|
||||
}
|
||||
return l
|
||||
}
|
||||
|
||||
// SetList stores the given list in the provided key, after serializing it to
|
||||
// JSON format.
|
||||
func (env *Env) SetList(key string, value []string) error {
|
||||
return env.SetJSON(key, value)
|
||||
}
|
||||
|
||||
// Set defines the value of a key to the given string.
|
||||
func (env *Env) Set(key, value string) {
|
||||
*env = append(*env, key+"="+value)
|
||||
}
|
||||
|
||||
// Decode decodes `src` as a json dictionary, and adds each decoded key-value
|
||||
// pair to the environment.
|
||||
//
|
||||
// If `src` cannot be decoded as a json dictionary, an error is returned.
|
||||
func (env *Env) Decode(src io.Reader) error {
|
||||
m := make(map[string]interface{})
|
||||
if err := json.NewDecoder(src).Decode(&m); err != nil {
|
||||
return err
|
||||
}
|
||||
for k, v := range m {
|
||||
env.SetAuto(k, v)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// SetAuto will try to define the Set* method to call based on the given value.
|
||||
func (env *Env) SetAuto(key string, value interface{}) {
|
||||
if fval, ok := value.(float64); ok {
|
||||
env.SetInt64(key, int64(fval))
|
||||
} else if sval, ok := value.(string); ok {
|
||||
env.Set(key, sval)
|
||||
} else if val, err := json.Marshal(value); err == nil {
|
||||
env.Set(key, string(val))
|
||||
} else {
|
||||
env.Set(key, fmt.Sprintf("%v", value))
|
||||
}
|
||||
}
|
||||
|
||||
// Map returns the map representation of the env.
|
||||
func (env *Env) Map() map[string]string {
|
||||
if len(*env) == 0 {
|
||||
return nil
|
||||
}
|
||||
m := make(map[string]string)
|
||||
for _, kv := range *env {
|
||||
parts := strings.SplitN(kv, "=", 2)
|
||||
m[parts[0]] = parts[1]
|
||||
}
|
||||
return m
|
||||
}
|
351
Godeps/_workspace/src/github.com/fsouza/go-dockerclient/env_test.go
generated
vendored
351
Godeps/_workspace/src/github.com/fsouza/go-dockerclient/env_test.go
generated
vendored
|
@ -1,351 +0,0 @@
|
|||
// Copyright 2014 go-dockerclient authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the DOCKER-LICENSE file.
|
||||
|
||||
package docker
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"errors"
|
||||
"reflect"
|
||||
"sort"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestGet(t *testing.T) {
|
||||
var tests = []struct {
|
||||
input []string
|
||||
query string
|
||||
expected string
|
||||
}{
|
||||
{[]string{"PATH=/usr/bin:/bin", "PYTHONPATH=/usr/local"}, "PATH", "/usr/bin:/bin"},
|
||||
{[]string{"PATH=/usr/bin:/bin", "PYTHONPATH=/usr/local"}, "PYTHONPATH", "/usr/local"},
|
||||
{[]string{"PATH=/usr/bin:/bin", "PYTHONPATH=/usr/local"}, "PYTHONPATHI", ""},
|
||||
{[]string{"WAT="}, "WAT", ""},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
env := Env(tt.input)
|
||||
got := env.Get(tt.query)
|
||||
if got != tt.expected {
|
||||
t.Errorf("Env.Get(%q): wrong result. Want %q. Got %q", tt.query, tt.expected, got)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestExists(t *testing.T) {
|
||||
var tests = []struct {
|
||||
input []string
|
||||
query string
|
||||
expected bool
|
||||
}{
|
||||
{[]string{"WAT=", "PYTHONPATH=/usr/local"}, "WAT", true},
|
||||
{[]string{"PATH=/usr/bin:/bin", "PYTHONPATH=/usr/local"}, "PYTHONPATH", true},
|
||||
{[]string{"PATH=/usr/bin:/bin", "PYTHONPATH=/usr/local"}, "PYTHONPATHI", false},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
env := Env(tt.input)
|
||||
got := env.Exists(tt.query)
|
||||
if got != tt.expected {
|
||||
t.Errorf("Env.Exists(%q): wrong result. Want %v. Got %v", tt.query, tt.expected, got)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetBool(t *testing.T) {
|
||||
var tests = []struct {
|
||||
input string
|
||||
expected bool
|
||||
}{
|
||||
{"EMTPY_VAR", false}, {"ZERO_VAR", false}, {"NO_VAR", false},
|
||||
{"FALSE_VAR", false}, {"NONE_VAR", false}, {"TRUE_VAR", true},
|
||||
{"WAT", true}, {"PATH", true}, {"ONE_VAR", true}, {"NO_VAR_TAB", false},
|
||||
}
|
||||
env := Env([]string{
|
||||
"EMPTY_VAR=", "ZERO_VAR=0", "NO_VAR=no", "FALSE_VAR=false",
|
||||
"NONE_VAR=none", "TRUE_VAR=true", "WAT=wat", "PATH=/usr/bin:/bin",
|
||||
"ONE_VAR=1", "NO_VAR_TAB=0 \t\t\t",
|
||||
})
|
||||
for _, tt := range tests {
|
||||
got := env.GetBool(tt.input)
|
||||
if got != tt.expected {
|
||||
t.Errorf("Env.GetBool(%q): wrong result. Want %v. Got %v.", tt.input, tt.expected, got)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestSetBool(t *testing.T) {
|
||||
var tests = []struct {
|
||||
input bool
|
||||
expected string
|
||||
}{
|
||||
{true, "1"}, {false, "0"},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
var env Env
|
||||
env.SetBool("SOME", tt.input)
|
||||
if got := env.Get("SOME"); got != tt.expected {
|
||||
t.Errorf("Env.SetBool(%v): wrong result. Want %q. Got %q", tt.input, tt.expected, got)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetInt(t *testing.T) {
|
||||
var tests = []struct {
|
||||
input string
|
||||
expected int
|
||||
}{
|
||||
{"NEGATIVE_INTEGER", -10}, {"NON_INTEGER", -1}, {"ONE", 1}, {"TWO", 2},
|
||||
}
|
||||
env := Env([]string{"NEGATIVE_INTEGER=-10", "NON_INTEGER=wat", "ONE=1", "TWO=2"})
|
||||
for _, tt := range tests {
|
||||
got := env.GetInt(tt.input)
|
||||
if got != tt.expected {
|
||||
t.Errorf("Env.GetInt(%q): wrong result. Want %d. Got %d", tt.input, tt.expected, got)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestSetInt(t *testing.T) {
|
||||
var tests = []struct {
|
||||
input int
|
||||
expected string
|
||||
}{
|
||||
{10, "10"}, {13, "13"}, {7, "7"}, {33, "33"},
|
||||
{0, "0"}, {-34, "-34"},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
var env Env
|
||||
env.SetInt("SOME", tt.input)
|
||||
if got := env.Get("SOME"); got != tt.expected {
|
||||
t.Errorf("Env.SetBool(%d): wrong result. Want %q. Got %q", tt.input, tt.expected, got)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetInt64(t *testing.T) {
|
||||
var tests = []struct {
|
||||
input string
|
||||
expected int64
|
||||
}{
|
||||
{"NEGATIVE_INTEGER", -10}, {"NON_INTEGER", -1}, {"ONE", 1}, {"TWO", 2},
|
||||
}
|
||||
env := Env([]string{"NEGATIVE_INTEGER=-10", "NON_INTEGER=wat", "ONE=1", "TWO=2"})
|
||||
for _, tt := range tests {
|
||||
got := env.GetInt64(tt.input)
|
||||
if got != tt.expected {
|
||||
t.Errorf("Env.GetInt64(%q): wrong result. Want %d. Got %d", tt.input, tt.expected, got)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestSetInt64(t *testing.T) {
|
||||
var tests = []struct {
|
||||
input int64
|
||||
expected string
|
||||
}{
|
||||
{10, "10"}, {13, "13"}, {7, "7"}, {33, "33"},
|
||||
{0, "0"}, {-34, "-34"},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
var env Env
|
||||
env.SetInt64("SOME", tt.input)
|
||||
if got := env.Get("SOME"); got != tt.expected {
|
||||
t.Errorf("Env.SetBool(%d): wrong result. Want %q. Got %q", tt.input, tt.expected, got)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetJSON(t *testing.T) {
|
||||
var p struct {
|
||||
Name string `json:"name"`
|
||||
Age int `json:"age"`
|
||||
}
|
||||
var env Env
|
||||
env.Set("person", `{"name":"Gopher","age":5}`)
|
||||
err := env.GetJSON("person", &p)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
if p.Name != "Gopher" {
|
||||
t.Errorf("Env.GetJSON(%q): wrong name. Want %q. Got %q", "person", "Gopher", p.Name)
|
||||
}
|
||||
if p.Age != 5 {
|
||||
t.Errorf("Env.GetJSON(%q): wrong age. Want %d. Got %d", "person", 5, p.Age)
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetJSONAbsent(t *testing.T) {
|
||||
var l []string
|
||||
var env Env
|
||||
err := env.GetJSON("person", &l)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
if l != nil {
|
||||
t.Errorf("Env.GetJSON(): get unexpected list %v", l)
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetJSONFailure(t *testing.T) {
|
||||
var p []string
|
||||
var env Env
|
||||
env.Set("list-person", `{"name":"Gopher","age":5}`)
|
||||
err := env.GetJSON("list-person", &p)
|
||||
if err == nil {
|
||||
t.Errorf("Env.GetJSON(%q): got unexpected <nil> error.", "list-person")
|
||||
}
|
||||
}
|
||||
|
||||
func TestSetJSON(t *testing.T) {
|
||||
var p1 = struct {
|
||||
Name string `json:"name"`
|
||||
Age int `json:"age"`
|
||||
}{Name: "Gopher", Age: 5}
|
||||
var env Env
|
||||
err := env.SetJSON("person", p1)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
var p2 struct {
|
||||
Name string `json:"name"`
|
||||
Age int `json:"age"`
|
||||
}
|
||||
err = env.GetJSON("person", &p2)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
if !reflect.DeepEqual(p1, p2) {
|
||||
t.Errorf("Env.SetJSON(%q): wrong result. Want %v. Got %v", "person", p1, p2)
|
||||
}
|
||||
}
|
||||
|
||||
func TestSetJSONFailure(t *testing.T) {
|
||||
var env Env
|
||||
err := env.SetJSON("person", unmarshable{})
|
||||
if err == nil {
|
||||
t.Error("Env.SetJSON(): got unexpected <nil> error")
|
||||
}
|
||||
if env.Exists("person") {
|
||||
t.Errorf("Env.SetJSON(): should not define the key %q, but did", "person")
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetList(t *testing.T) {
|
||||
var tests = []struct {
|
||||
input string
|
||||
expected []string
|
||||
}{
|
||||
{"WAT=wat", []string{"wat"}},
|
||||
{`WAT=["wat","wet","wit","wot","wut"]`, []string{"wat", "wet", "wit", "wot", "wut"}},
|
||||
{"WAT=", nil},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
env := Env([]string{tt.input})
|
||||
got := env.GetList("WAT")
|
||||
if !reflect.DeepEqual(got, tt.expected) {
|
||||
t.Errorf("Env.GetList(%q): wrong result. Want %v. Got %v", "WAT", tt.expected, got)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestSetList(t *testing.T) {
|
||||
list := []string{"a", "b", "c"}
|
||||
var env Env
|
||||
if err := env.SetList("SOME", list); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
if got := env.GetList("SOME"); !reflect.DeepEqual(got, list) {
|
||||
t.Errorf("Env.SetList(%v): wrong result. Got %v", list, got)
|
||||
}
|
||||
}
|
||||
|
||||
func TestSet(t *testing.T) {
|
||||
var env Env
|
||||
env.Set("PATH", "/home/bin:/bin")
|
||||
env.Set("SOMETHING", "/usr/bin")
|
||||
env.Set("PATH", "/bin")
|
||||
if expected, got := "/usr/bin", env.Get("SOMETHING"); got != expected {
|
||||
t.Errorf("Env.Set(%q): wrong result. Want %q. Got %q", expected, expected, got)
|
||||
}
|
||||
if expected, got := "/bin", env.Get("PATH"); got != expected {
|
||||
t.Errorf("Env.Set(%q): wrong result. Want %q. Got %q", expected, expected, got)
|
||||
}
|
||||
}
|
||||
|
||||
func TestDecode(t *testing.T) {
|
||||
var tests = []struct {
|
||||
input string
|
||||
expectedOut []string
|
||||
expectedErr string
|
||||
}{
|
||||
{
|
||||
`{"PATH":"/usr/bin:/bin","containers":54,"wat":["123","345"]}`,
|
||||
[]string{"PATH=/usr/bin:/bin", "containers=54", `wat=["123","345"]`},
|
||||
"",
|
||||
},
|
||||
{"}}", nil, "invalid character '}' looking for beginning of value"},
|
||||
{`{}`, nil, ""},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
var env Env
|
||||
err := env.Decode(bytes.NewBufferString(tt.input))
|
||||
if tt.expectedErr == "" {
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
} else if tt.expectedErr != err.Error() {
|
||||
t.Errorf("Env.Decode(): invalid error. Want %q. Got %q.", tt.expectedErr, err)
|
||||
}
|
||||
got := []string(env)
|
||||
sort.Strings(got)
|
||||
sort.Strings(tt.expectedOut)
|
||||
if !reflect.DeepEqual(got, tt.expectedOut) {
|
||||
t.Errorf("Env.Decode(): wrong result. Want %v. Got %v.", tt.expectedOut, got)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestSetAuto(t *testing.T) {
|
||||
buf := bytes.NewBufferString("oi")
|
||||
var tests = []struct {
|
||||
input interface{}
|
||||
expected string
|
||||
}{
|
||||
{10, "10"},
|
||||
{10.3, "10"},
|
||||
{"oi", "oi"},
|
||||
{buf, "{}"},
|
||||
{unmarshable{}, "{}"},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
var env Env
|
||||
env.SetAuto("SOME", tt.input)
|
||||
if got := env.Get("SOME"); got != tt.expected {
|
||||
t.Errorf("Env.SetAuto(%v): wrong result. Want %q. Got %q", tt.input, tt.expected, got)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestMap(t *testing.T) {
|
||||
var tests = []struct {
|
||||
input []string
|
||||
expected map[string]string
|
||||
}{
|
||||
{[]string{"PATH=/usr/bin:/bin", "PYTHONPATH=/usr/local"}, map[string]string{"PATH": "/usr/bin:/bin", "PYTHONPATH": "/usr/local"}},
|
||||
{nil, nil},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
env := Env(tt.input)
|
||||
got := env.Map()
|
||||
if !reflect.DeepEqual(got, tt.expected) {
|
||||
t.Errorf("Env.Map(): wrong result. Want %v. Got %v", tt.expected, got)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
type unmarshable struct {
|
||||
}
|
||||
|
||||
func (unmarshable) MarshalJSON() ([]byte, error) {
|
||||
return nil, errors.New("cannot marshal")
|
||||
}
|
304
Godeps/_workspace/src/github.com/fsouza/go-dockerclient/event.go
generated
vendored
304
Godeps/_workspace/src/github.com/fsouza/go-dockerclient/event.go
generated
vendored
|
@ -1,304 +0,0 @@
|
|||
// Copyright 2015 go-dockerclient authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package docker
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"math"
|
||||
"net"
|
||||
"net/http"
|
||||
"net/http/httputil"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
)
|
||||
|
||||
// APIEvents represents an event returned by the API.
|
||||
type APIEvents struct {
|
||||
Status string `json:"Status,omitempty" yaml:"Status,omitempty"`
|
||||
ID string `json:"ID,omitempty" yaml:"ID,omitempty"`
|
||||
From string `json:"From,omitempty" yaml:"From,omitempty"`
|
||||
Time int64 `json:"Time,omitempty" yaml:"Time,omitempty"`
|
||||
}
|
||||
|
||||
type eventMonitoringState struct {
|
||||
sync.RWMutex
|
||||
sync.WaitGroup
|
||||
enabled bool
|
||||
lastSeen *int64
|
||||
C chan *APIEvents
|
||||
errC chan error
|
||||
listeners []chan<- *APIEvents
|
||||
}
|
||||
|
||||
const (
|
||||
maxMonitorConnRetries = 5
|
||||
retryInitialWaitTime = 10.
|
||||
)
|
||||
|
||||
var (
|
||||
// ErrNoListeners is the error returned when no listeners are available
|
||||
// to receive an event.
|
||||
ErrNoListeners = errors.New("no listeners present to receive event")
|
||||
|
||||
// ErrListenerAlreadyExists is the error returned when the listerner already
|
||||
// exists.
|
||||
ErrListenerAlreadyExists = errors.New("listener already exists for docker events")
|
||||
|
||||
// EOFEvent is sent when the event listener receives an EOF error.
|
||||
EOFEvent = &APIEvents{
|
||||
Status: "EOF",
|
||||
}
|
||||
)
|
||||
|
||||
// AddEventListener adds a new listener to container events in the Docker API.
|
||||
//
|
||||
// The parameter is a channel through which events will be sent.
|
||||
func (c *Client) AddEventListener(listener chan<- *APIEvents) error {
|
||||
var err error
|
||||
if !c.eventMonitor.isEnabled() {
|
||||
err = c.eventMonitor.enableEventMonitoring(c)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
err = c.eventMonitor.addListener(listener)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// RemoveEventListener removes a listener from the monitor.
|
||||
func (c *Client) RemoveEventListener(listener chan *APIEvents) error {
|
||||
err := c.eventMonitor.removeListener(listener)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if len(c.eventMonitor.listeners) == 0 {
|
||||
c.eventMonitor.disableEventMonitoring()
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (eventState *eventMonitoringState) addListener(listener chan<- *APIEvents) error {
|
||||
eventState.Lock()
|
||||
defer eventState.Unlock()
|
||||
if listenerExists(listener, &eventState.listeners) {
|
||||
return ErrListenerAlreadyExists
|
||||
}
|
||||
eventState.Add(1)
|
||||
eventState.listeners = append(eventState.listeners, listener)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (eventState *eventMonitoringState) removeListener(listener chan<- *APIEvents) error {
|
||||
eventState.Lock()
|
||||
defer eventState.Unlock()
|
||||
if listenerExists(listener, &eventState.listeners) {
|
||||
var newListeners []chan<- *APIEvents
|
||||
for _, l := range eventState.listeners {
|
||||
if l != listener {
|
||||
newListeners = append(newListeners, l)
|
||||
}
|
||||
}
|
||||
eventState.listeners = newListeners
|
||||
eventState.Add(-1)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (eventState *eventMonitoringState) closeListeners() {
|
||||
for _, l := range eventState.listeners {
|
||||
close(l)
|
||||
eventState.Add(-1)
|
||||
}
|
||||
eventState.listeners = nil
|
||||
}
|
||||
|
||||
func listenerExists(a chan<- *APIEvents, list *[]chan<- *APIEvents) bool {
|
||||
for _, b := range *list {
|
||||
if b == a {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (eventState *eventMonitoringState) enableEventMonitoring(c *Client) error {
|
||||
eventState.Lock()
|
||||
defer eventState.Unlock()
|
||||
if !eventState.enabled {
|
||||
eventState.enabled = true
|
||||
var lastSeenDefault = int64(0)
|
||||
eventState.lastSeen = &lastSeenDefault
|
||||
eventState.C = make(chan *APIEvents, 100)
|
||||
eventState.errC = make(chan error, 1)
|
||||
go eventState.monitorEvents(c)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (eventState *eventMonitoringState) disableEventMonitoring() error {
|
||||
eventState.Lock()
|
||||
defer eventState.Unlock()
|
||||
|
||||
eventState.closeListeners()
|
||||
|
||||
eventState.Wait()
|
||||
|
||||
if eventState.enabled {
|
||||
eventState.enabled = false
|
||||
close(eventState.C)
|
||||
close(eventState.errC)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (eventState *eventMonitoringState) monitorEvents(c *Client) {
|
||||
var err error
|
||||
for eventState.noListeners() {
|
||||
time.Sleep(10 * time.Millisecond)
|
||||
}
|
||||
if err = eventState.connectWithRetry(c); err != nil {
|
||||
// terminate if connect failed
|
||||
eventState.disableEventMonitoring()
|
||||
return
|
||||
}
|
||||
for eventState.isEnabled() {
|
||||
timeout := time.After(100 * time.Millisecond)
|
||||
select {
|
||||
case ev, ok := <-eventState.C:
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
if ev == EOFEvent {
|
||||
eventState.disableEventMonitoring()
|
||||
return
|
||||
}
|
||||
eventState.updateLastSeen(ev)
|
||||
go eventState.sendEvent(ev)
|
||||
case err = <-eventState.errC:
|
||||
if err == ErrNoListeners {
|
||||
eventState.disableEventMonitoring()
|
||||
return
|
||||
} else if err != nil {
|
||||
defer func() { go eventState.monitorEvents(c) }()
|
||||
return
|
||||
}
|
||||
case <-timeout:
|
||||
continue
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (eventState *eventMonitoringState) connectWithRetry(c *Client) error {
|
||||
var retries int
|
||||
var err error
|
||||
for err = c.eventHijack(atomic.LoadInt64(eventState.lastSeen), eventState.C, eventState.errC); err != nil && retries < maxMonitorConnRetries; retries++ {
|
||||
waitTime := int64(retryInitialWaitTime * math.Pow(2, float64(retries)))
|
||||
time.Sleep(time.Duration(waitTime) * time.Millisecond)
|
||||
err = c.eventHijack(atomic.LoadInt64(eventState.lastSeen), eventState.C, eventState.errC)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
func (eventState *eventMonitoringState) noListeners() bool {
|
||||
eventState.RLock()
|
||||
defer eventState.RUnlock()
|
||||
return len(eventState.listeners) == 0
|
||||
}
|
||||
|
||||
func (eventState *eventMonitoringState) isEnabled() bool {
|
||||
eventState.RLock()
|
||||
defer eventState.RUnlock()
|
||||
return eventState.enabled
|
||||
}
|
||||
|
||||
func (eventState *eventMonitoringState) sendEvent(event *APIEvents) {
|
||||
eventState.RLock()
|
||||
defer eventState.RUnlock()
|
||||
eventState.Add(1)
|
||||
defer eventState.Done()
|
||||
if eventState.enabled {
|
||||
if len(eventState.listeners) == 0 {
|
||||
eventState.errC <- ErrNoListeners
|
||||
return
|
||||
}
|
||||
|
||||
for _, listener := range eventState.listeners {
|
||||
listener <- event
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (eventState *eventMonitoringState) updateLastSeen(e *APIEvents) {
|
||||
eventState.Lock()
|
||||
defer eventState.Unlock()
|
||||
if atomic.LoadInt64(eventState.lastSeen) < e.Time {
|
||||
atomic.StoreInt64(eventState.lastSeen, e.Time)
|
||||
}
|
||||
}
|
||||
|
||||
func (c *Client) eventHijack(startTime int64, eventChan chan *APIEvents, errChan chan error) error {
|
||||
uri := "/events"
|
||||
if startTime != 0 {
|
||||
uri += fmt.Sprintf("?since=%d", startTime)
|
||||
}
|
||||
protocol := c.endpointURL.Scheme
|
||||
address := c.endpointURL.Path
|
||||
if protocol != "unix" {
|
||||
protocol = "tcp"
|
||||
address = c.endpointURL.Host
|
||||
}
|
||||
var dial net.Conn
|
||||
var err error
|
||||
if c.TLSConfig == nil {
|
||||
dial, err = c.Dialer.Dial(protocol, address)
|
||||
} else {
|
||||
dial, err = tlsDialWithDialer(c.Dialer, protocol, address, c.TLSConfig)
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
conn := httputil.NewClientConn(dial, nil)
|
||||
req, err := http.NewRequest("GET", uri, nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
res, err := conn.Do(req)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
go func(res *http.Response, conn *httputil.ClientConn) {
|
||||
defer conn.Close()
|
||||
defer res.Body.Close()
|
||||
decoder := json.NewDecoder(res.Body)
|
||||
for {
|
||||
var event APIEvents
|
||||
if err = decoder.Decode(&event); err != nil {
|
||||
if err == io.EOF || err == io.ErrUnexpectedEOF {
|
||||
if c.eventMonitor.isEnabled() {
|
||||
// Signal that we're exiting.
|
||||
eventChan <- EOFEvent
|
||||
}
|
||||
break
|
||||
}
|
||||
errChan <- err
|
||||
}
|
||||
if event.Time == 0 {
|
||||
continue
|
||||
}
|
||||
if !c.eventMonitor.isEnabled() {
|
||||
return
|
||||
}
|
||||
eventChan <- &event
|
||||
}
|
||||
}(res, conn)
|
||||
return nil
|
||||
}
|
132
Godeps/_workspace/src/github.com/fsouza/go-dockerclient/event_test.go
generated
vendored
132
Godeps/_workspace/src/github.com/fsouza/go-dockerclient/event_test.go
generated
vendored
|
@ -1,132 +0,0 @@
|
|||
// Copyright 2014 go-dockerclient authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package docker
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"crypto/tls"
|
||||
"crypto/x509"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
)
|
||||
|
||||
func TestEventListeners(t *testing.T) {
|
||||
testEventListeners("TestEventListeners", t, httptest.NewServer, NewClient)
|
||||
}
|
||||
|
||||
func TestTLSEventListeners(t *testing.T) {
|
||||
testEventListeners("TestTLSEventListeners", t, func(handler http.Handler) *httptest.Server {
|
||||
server := httptest.NewUnstartedServer(handler)
|
||||
|
||||
cert, err := tls.LoadX509KeyPair("testing/data/server.pem", "testing/data/serverkey.pem")
|
||||
if err != nil {
|
||||
t.Fatalf("Error loading server key pair: %s", err)
|
||||
}
|
||||
|
||||
caCert, err := ioutil.ReadFile("testing/data/ca.pem")
|
||||
if err != nil {
|
||||
t.Fatalf("Error loading ca certificate: %s", err)
|
||||
}
|
||||
caPool := x509.NewCertPool()
|
||||
if !caPool.AppendCertsFromPEM(caCert) {
|
||||
t.Fatalf("Could not add ca certificate")
|
||||
}
|
||||
|
||||
server.TLS = &tls.Config{
|
||||
Certificates: []tls.Certificate{cert},
|
||||
RootCAs: caPool,
|
||||
}
|
||||
server.StartTLS()
|
||||
return server
|
||||
}, func(url string) (*Client, error) {
|
||||
return NewTLSClient(url, "testing/data/cert.pem", "testing/data/key.pem", "testing/data/ca.pem")
|
||||
})
|
||||
}
|
||||
|
||||
func testEventListeners(testName string, t *testing.T, buildServer func(http.Handler) *httptest.Server, buildClient func(string) (*Client, error)) {
|
||||
response := `{"status":"create","id":"dfdf82bd3881","from":"base:latest","time":1374067924}
|
||||
{"status":"start","id":"dfdf82bd3881","from":"base:latest","time":1374067924}
|
||||
{"status":"stop","id":"dfdf82bd3881","from":"base:latest","time":1374067966}
|
||||
{"status":"destroy","id":"dfdf82bd3881","from":"base:latest","time":1374067970}
|
||||
`
|
||||
|
||||
server := buildServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
rsc := bufio.NewScanner(strings.NewReader(response))
|
||||
for rsc.Scan() {
|
||||
w.Write([]byte(rsc.Text()))
|
||||
w.(http.Flusher).Flush()
|
||||
time.Sleep(10 * time.Millisecond)
|
||||
}
|
||||
}))
|
||||
defer server.Close()
|
||||
|
||||
client, err := buildClient(server.URL)
|
||||
if err != nil {
|
||||
t.Errorf("Failed to create client: %s", err)
|
||||
}
|
||||
client.SkipServerVersionCheck = true
|
||||
|
||||
listener := make(chan *APIEvents, 10)
|
||||
defer func() {
|
||||
time.Sleep(10 * time.Millisecond)
|
||||
if err := client.RemoveEventListener(listener); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
}()
|
||||
|
||||
err = client.AddEventListener(listener)
|
||||
if err != nil {
|
||||
t.Errorf("Failed to add event listener: %s", err)
|
||||
}
|
||||
|
||||
timeout := time.After(1 * time.Second)
|
||||
var count int
|
||||
|
||||
for {
|
||||
select {
|
||||
case msg := <-listener:
|
||||
t.Logf("Received: %v", *msg)
|
||||
count++
|
||||
err = checkEvent(count, msg)
|
||||
if err != nil {
|
||||
t.Fatalf("Check event failed: %s", err)
|
||||
}
|
||||
if count == 4 {
|
||||
return
|
||||
}
|
||||
case <-timeout:
|
||||
t.Fatalf("%s timed out waiting on events", testName)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func checkEvent(index int, event *APIEvents) error {
|
||||
if event.ID != "dfdf82bd3881" {
|
||||
return fmt.Errorf("event ID did not match. Expected dfdf82bd3881 got %s", event.ID)
|
||||
}
|
||||
if event.From != "base:latest" {
|
||||
return fmt.Errorf("event from did not match. Expected base:latest got %s", event.From)
|
||||
}
|
||||
var status string
|
||||
switch index {
|
||||
case 1:
|
||||
status = "create"
|
||||
case 2:
|
||||
status = "start"
|
||||
case 3:
|
||||
status = "stop"
|
||||
case 4:
|
||||
status = "destroy"
|
||||
}
|
||||
if event.Status != status {
|
||||
return fmt.Errorf("event status did not match. Expected %s got %s", status, event.Status)
|
||||
}
|
||||
return nil
|
||||
}
|
168
Godeps/_workspace/src/github.com/fsouza/go-dockerclient/example_test.go
generated
vendored
168
Godeps/_workspace/src/github.com/fsouza/go-dockerclient/example_test.go
generated
vendored
|
@ -1,168 +0,0 @@
|
|||
// Copyright 2014 go-dockerclient authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package docker_test
|
||||
|
||||
import (
|
||||
"archive/tar"
|
||||
"bytes"
|
||||
"fmt"
|
||||
"io"
|
||||
"log"
|
||||
"time"
|
||||
|
||||
"github.com/fsouza/go-dockerclient"
|
||||
)
|
||||
|
||||
func ExampleClient_AttachToContainer() {
|
||||
client, err := docker.NewClient("http://localhost:4243")
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
client.SkipServerVersionCheck = true
|
||||
// Reading logs from container a84849 and sending them to buf.
|
||||
var buf bytes.Buffer
|
||||
err = client.AttachToContainer(docker.AttachToContainerOptions{
|
||||
Container: "a84849",
|
||||
OutputStream: &buf,
|
||||
Logs: true,
|
||||
Stdout: true,
|
||||
Stderr: true,
|
||||
})
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
log.Println(buf.String())
|
||||
buf.Reset()
|
||||
err = client.AttachToContainer(docker.AttachToContainerOptions{
|
||||
Container: "a84849",
|
||||
OutputStream: &buf,
|
||||
Stdout: true,
|
||||
Stream: true,
|
||||
})
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
log.Println(buf.String())
|
||||
}
|
||||
|
||||
func ExampleClient_CopyFromContainer() {
|
||||
client, err := docker.NewClient("http://localhost:4243")
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
cid := "a84849"
|
||||
var buf bytes.Buffer
|
||||
filename := "/tmp/output.txt"
|
||||
err = client.CopyFromContainer(docker.CopyFromContainerOptions{
|
||||
Container: cid,
|
||||
Resource: filename,
|
||||
OutputStream: &buf,
|
||||
})
|
||||
if err != nil {
|
||||
log.Fatalf("Error while copying from %s: %s\n", cid, err)
|
||||
}
|
||||
content := new(bytes.Buffer)
|
||||
r := bytes.NewReader(buf.Bytes())
|
||||
tr := tar.NewReader(r)
|
||||
tr.Next()
|
||||
if err != nil && err != io.EOF {
|
||||
log.Fatal(err)
|
||||
}
|
||||
if _, err := io.Copy(content, tr); err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
log.Println(buf.String())
|
||||
}
|
||||
|
||||
func ExampleClient_BuildImage() {
|
||||
client, err := docker.NewClient("http://localhost:4243")
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
t := time.Now()
|
||||
inputbuf, outputbuf := bytes.NewBuffer(nil), bytes.NewBuffer(nil)
|
||||
tr := tar.NewWriter(inputbuf)
|
||||
tr.WriteHeader(&tar.Header{Name: "Dockerfile", Size: 10, ModTime: t, AccessTime: t, ChangeTime: t})
|
||||
tr.Write([]byte("FROM base\n"))
|
||||
tr.Close()
|
||||
opts := docker.BuildImageOptions{
|
||||
Name: "test",
|
||||
InputStream: inputbuf,
|
||||
OutputStream: outputbuf,
|
||||
}
|
||||
if err := client.BuildImage(opts); err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
func ExampleClient_ListenEvents() {
|
||||
client, err := docker.NewClient("http://localhost:4243")
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
listener := make(chan *docker.APIEvents)
|
||||
err = client.AddEventListener(listener)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
defer func() {
|
||||
|
||||
err = client.RemoveEventListener(listener)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
}()
|
||||
|
||||
timeout := time.After(1 * time.Second)
|
||||
|
||||
for {
|
||||
select {
|
||||
case msg := <-listener:
|
||||
log.Println(msg)
|
||||
case <-timeout:
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func ExampleEnv_Map() {
|
||||
e := docker.Env([]string{"A=1", "B=2", "C=3"})
|
||||
envs := e.Map()
|
||||
for k, v := range envs {
|
||||
fmt.Printf("%s=%q\n", k, v)
|
||||
}
|
||||
}
|
||||
|
||||
func ExampleEnv_SetJSON() {
|
||||
type Person struct {
|
||||
Name string
|
||||
Age int
|
||||
}
|
||||
p := Person{Name: "Gopher", Age: 4}
|
||||
var e docker.Env
|
||||
err := e.SetJSON("person", p)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
func ExampleEnv_GetJSON() {
|
||||
type Person struct {
|
||||
Name string
|
||||
Age int
|
||||
}
|
||||
p := Person{Name: "Gopher", Age: 4}
|
||||
var e docker.Env
|
||||
e.Set("person", `{"name":"Gopher","age":4}`)
|
||||
err := e.GetJSON("person", &p)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
}
|
202
Godeps/_workspace/src/github.com/fsouza/go-dockerclient/exec.go
generated
vendored
202
Godeps/_workspace/src/github.com/fsouza/go-dockerclient/exec.go
generated
vendored
|
@ -1,202 +0,0 @@
|
|||
// Copyright 2015 go-dockerclient authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package docker
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"strconv"
|
||||
)
|
||||
|
||||
// Exec is the type representing a `docker exec` instance and containing the
|
||||
// instance ID
|
||||
type Exec struct {
|
||||
ID string `json:"Id,omitempty" yaml:"Id,omitempty"`
|
||||
}
|
||||
|
||||
// CreateExecOptions specify parameters to the CreateExecContainer function.
|
||||
//
|
||||
// See https://goo.gl/1KSIb7 for more details
|
||||
type CreateExecOptions struct {
|
||||
AttachStdin bool `json:"AttachStdin,omitempty" yaml:"AttachStdin,omitempty"`
|
||||
AttachStdout bool `json:"AttachStdout,omitempty" yaml:"AttachStdout,omitempty"`
|
||||
AttachStderr bool `json:"AttachStderr,omitempty" yaml:"AttachStderr,omitempty"`
|
||||
Tty bool `json:"Tty,omitempty" yaml:"Tty,omitempty"`
|
||||
Cmd []string `json:"Cmd,omitempty" yaml:"Cmd,omitempty"`
|
||||
Container string `json:"Container,omitempty" yaml:"Container,omitempty"`
|
||||
User string `json:"User,omitempty" yaml:"User,omitempty"`
|
||||
}
|
||||
|
||||
// CreateExec sets up an exec instance in a running container `id`, returning the exec
|
||||
// instance, or an error in case of failure.
|
||||
//
|
||||
// See https://goo.gl/1KSIb7 for more details
|
||||
func (c *Client) CreateExec(opts CreateExecOptions) (*Exec, error) {
|
||||
path := fmt.Sprintf("/containers/%s/exec", opts.Container)
|
||||
resp, err := c.do("POST", path, doOptions{data: opts})
|
||||
if err != nil {
|
||||
if e, ok := err.(*Error); ok && e.Status == http.StatusNotFound {
|
||||
return nil, &NoSuchContainer{ID: opts.Container}
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
var exec Exec
|
||||
if err := json.NewDecoder(resp.Body).Decode(&exec); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &exec, nil
|
||||
}
|
||||
|
||||
// StartExecOptions specify parameters to the StartExecContainer function.
|
||||
//
|
||||
// See https://goo.gl/iQCnto for more details
|
||||
type StartExecOptions struct {
|
||||
Detach bool `json:"Detach,omitempty" yaml:"Detach,omitempty"`
|
||||
|
||||
Tty bool `json:"Tty,omitempty" yaml:"Tty,omitempty"`
|
||||
|
||||
InputStream io.Reader `qs:"-"`
|
||||
OutputStream io.Writer `qs:"-"`
|
||||
ErrorStream io.Writer `qs:"-"`
|
||||
|
||||
// Use raw terminal? Usually true when the container contains a TTY.
|
||||
RawTerminal bool `qs:"-"`
|
||||
|
||||
// If set, after a successful connect, a sentinel will be sent and then the
|
||||
// client will block on receive before continuing.
|
||||
//
|
||||
// It must be an unbuffered channel. Using a buffered channel can lead
|
||||
// to unexpected behavior.
|
||||
Success chan struct{} `json:"-"`
|
||||
}
|
||||
|
||||
// StartExec starts a previously set up exec instance id. If opts.Detach is
|
||||
// true, it returns after starting the exec command. Otherwise, it sets up an
|
||||
// interactive session with the exec command.
|
||||
//
|
||||
// See https://goo.gl/iQCnto for more details
|
||||
func (c *Client) StartExec(id string, opts StartExecOptions) error {
|
||||
cw, err := c.StartExecNonBlocking(id, opts)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if cw != nil {
|
||||
return cw.Wait()
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// StartExecNonBlocking starts a previously set up exec instance id. If opts.Detach is
|
||||
// true, it returns after starting the exec command. Otherwise, it sets up an
|
||||
// interactive session with the exec command.
|
||||
//
|
||||
// See https://goo.gl/iQCnto for more details
|
||||
func (c *Client) StartExecNonBlocking(id string, opts StartExecOptions) (CloseWaiter, error) {
|
||||
if id == "" {
|
||||
return nil, &NoSuchExec{ID: id}
|
||||
}
|
||||
|
||||
path := fmt.Sprintf("/exec/%s/start", id)
|
||||
|
||||
if opts.Detach {
|
||||
resp, err := c.do("POST", path, doOptions{data: opts})
|
||||
if err != nil {
|
||||
if e, ok := err.(*Error); ok && e.Status == http.StatusNotFound {
|
||||
return nil, &NoSuchExec{ID: id}
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
return c.hijack("POST", path, hijackOptions{
|
||||
success: opts.Success,
|
||||
setRawTerminal: opts.RawTerminal,
|
||||
in: opts.InputStream,
|
||||
stdout: opts.OutputStream,
|
||||
stderr: opts.ErrorStream,
|
||||
data: opts,
|
||||
})
|
||||
}
|
||||
|
||||
// ResizeExecTTY resizes the tty session used by the exec command id. This API
|
||||
// is valid only if Tty was specified as part of creating and starting the exec
|
||||
// command.
|
||||
//
|
||||
// See https://goo.gl/e1JpsA for more details
|
||||
func (c *Client) ResizeExecTTY(id string, height, width int) error {
|
||||
params := make(url.Values)
|
||||
params.Set("h", strconv.Itoa(height))
|
||||
params.Set("w", strconv.Itoa(width))
|
||||
|
||||
path := fmt.Sprintf("/exec/%s/resize?%s", id, params.Encode())
|
||||
resp, err := c.do("POST", path, doOptions{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
resp.Body.Close()
|
||||
return nil
|
||||
}
|
||||
|
||||
// ExecProcessConfig is a type describing the command associated to a Exec
|
||||
// instance. It's used in the ExecInspect type.
|
||||
type ExecProcessConfig struct {
|
||||
Privileged bool `json:"privileged,omitempty" yaml:"privileged,omitempty"`
|
||||
User string `json:"user,omitempty" yaml:"user,omitempty"`
|
||||
Tty bool `json:"tty,omitempty" yaml:"tty,omitempty"`
|
||||
EntryPoint string `json:"entrypoint,omitempty" yaml:"entrypoint,omitempty"`
|
||||
Arguments []string `json:"arguments,omitempty" yaml:"arguments,omitempty"`
|
||||
}
|
||||
|
||||
// ExecInspect is a type with details about a exec instance, including the
|
||||
// exit code if the command has finished running. It's returned by a api
|
||||
// call to /exec/(id)/json
|
||||
//
|
||||
// See https://goo.gl/gPtX9R for more details
|
||||
type ExecInspect struct {
|
||||
ID string `json:"ID,omitempty" yaml:"ID,omitempty"`
|
||||
Running bool `json:"Running,omitempty" yaml:"Running,omitempty"`
|
||||
ExitCode int `json:"ExitCode,omitempty" yaml:"ExitCode,omitempty"`
|
||||
OpenStdin bool `json:"OpenStdin,omitempty" yaml:"OpenStdin,omitempty"`
|
||||
OpenStderr bool `json:"OpenStderr,omitempty" yaml:"OpenStderr,omitempty"`
|
||||
OpenStdout bool `json:"OpenStdout,omitempty" yaml:"OpenStdout,omitempty"`
|
||||
ProcessConfig ExecProcessConfig `json:"ProcessConfig,omitempty" yaml:"ProcessConfig,omitempty"`
|
||||
Container Container `json:"Container,omitempty" yaml:"Container,omitempty"`
|
||||
}
|
||||
|
||||
// InspectExec returns low-level information about the exec command id.
|
||||
//
|
||||
// See https://goo.gl/gPtX9R for more details
|
||||
func (c *Client) InspectExec(id string) (*ExecInspect, error) {
|
||||
path := fmt.Sprintf("/exec/%s/json", id)
|
||||
resp, err := c.do("GET", path, doOptions{})
|
||||
if err != nil {
|
||||
if e, ok := err.(*Error); ok && e.Status == http.StatusNotFound {
|
||||
return nil, &NoSuchExec{ID: id}
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
var exec ExecInspect
|
||||
if err := json.NewDecoder(resp.Body).Decode(&exec); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &exec, nil
|
||||
}
|
||||
|
||||
// NoSuchExec is the error returned when a given exec instance does not exist.
|
||||
type NoSuchExec struct {
|
||||
ID string
|
||||
}
|
||||
|
||||
func (err *NoSuchExec) Error() string {
|
||||
return "No such exec instance: " + err.ID
|
||||
}
|
262
Godeps/_workspace/src/github.com/fsouza/go-dockerclient/exec_test.go
generated
vendored
262
Godeps/_workspace/src/github.com/fsouza/go-dockerclient/exec_test.go
generated
vendored
|
@ -1,262 +0,0 @@
|
|||
// Copyright 2015 go-dockerclient authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package docker
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"net/url"
|
||||
"reflect"
|
||||
"strings"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestExecCreate(t *testing.T) {
|
||||
jsonContainer := `{"Id": "4fa6e0f0c6786287e131c3852c58a2e01cc697a68231826813597e4994f1d6e2"}`
|
||||
var expected struct{ ID string }
|
||||
err := json.Unmarshal([]byte(jsonContainer), &expected)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
fakeRT := &FakeRoundTripper{message: jsonContainer, status: http.StatusOK}
|
||||
client := newTestClient(fakeRT)
|
||||
config := CreateExecOptions{
|
||||
Container: "test",
|
||||
AttachStdin: true,
|
||||
AttachStdout: true,
|
||||
AttachStderr: false,
|
||||
Tty: false,
|
||||
Cmd: []string{"touch", "/tmp/file"},
|
||||
User: "a-user",
|
||||
}
|
||||
execObj, err := client.CreateExec(config)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
expectedID := "4fa6e0f0c6786287e131c3852c58a2e01cc697a68231826813597e4994f1d6e2"
|
||||
if execObj.ID != expectedID {
|
||||
t.Errorf("ExecCreate: wrong ID. Want %q. Got %q.", expectedID, execObj.ID)
|
||||
}
|
||||
req := fakeRT.requests[0]
|
||||
if req.Method != "POST" {
|
||||
t.Errorf("ExecCreate: wrong HTTP method. Want %q. Got %q.", "POST", req.Method)
|
||||
}
|
||||
expectedURL, _ := url.Parse(client.getURL("/containers/test/exec"))
|
||||
if gotPath := req.URL.Path; gotPath != expectedURL.Path {
|
||||
t.Errorf("ExecCreate: Wrong path in request. Want %q. Got %q.", expectedURL.Path, gotPath)
|
||||
}
|
||||
var gotBody struct{ ID string }
|
||||
err = json.NewDecoder(req.Body).Decode(&gotBody)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestExecStartDetached(t *testing.T) {
|
||||
execID := "4fa6e0f0c6786287e131c3852c58a2e01cc697a68231826813597e4994f1d6e2"
|
||||
fakeRT := &FakeRoundTripper{status: http.StatusOK}
|
||||
client := newTestClient(fakeRT)
|
||||
config := StartExecOptions{
|
||||
Detach: true,
|
||||
}
|
||||
err := client.StartExec(execID, config)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
req := fakeRT.requests[0]
|
||||
if req.Method != "POST" {
|
||||
t.Errorf("ExecStart: wrong HTTP method. Want %q. Got %q.", "POST", req.Method)
|
||||
}
|
||||
expectedURL, _ := url.Parse(client.getURL("/exec/" + execID + "/start"))
|
||||
if gotPath := req.URL.Path; gotPath != expectedURL.Path {
|
||||
t.Errorf("ExecCreate: Wrong path in request. Want %q. Got %q.", expectedURL.Path, gotPath)
|
||||
}
|
||||
t.Log(req.Body)
|
||||
var gotBody struct{ Detach bool }
|
||||
err = json.NewDecoder(req.Body).Decode(&gotBody)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if !gotBody.Detach {
|
||||
t.Fatal("Expected Detach in StartExecOptions to be true")
|
||||
}
|
||||
}
|
||||
|
||||
func TestExecStartAndAttach(t *testing.T) {
|
||||
var reader = strings.NewReader("send value")
|
||||
server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
w.Write([]byte{1, 0, 0, 0, 0, 0, 0, 5})
|
||||
w.Write([]byte("hello"))
|
||||
}))
|
||||
defer server.Close()
|
||||
client, _ := NewClient(server.URL)
|
||||
client.SkipServerVersionCheck = true
|
||||
var stdout, stderr bytes.Buffer
|
||||
success := make(chan struct{})
|
||||
execID := "4fa6e0f0c6786287e131c3852c58a2e01cc697a68231826813597e4994f1d6e2"
|
||||
opts := StartExecOptions{
|
||||
OutputStream: &stdout,
|
||||
ErrorStream: &stderr,
|
||||
InputStream: reader,
|
||||
RawTerminal: true,
|
||||
Success: success,
|
||||
}
|
||||
go func() {
|
||||
if err := client.StartExec(execID, opts); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
}()
|
||||
<-success
|
||||
}
|
||||
|
||||
func TestExecResize(t *testing.T) {
|
||||
execID := "4fa6e0f0c6786287e131c3852c58a2e01cc697a68231826813597e4994f1d6e2"
|
||||
fakeRT := &FakeRoundTripper{status: http.StatusOK}
|
||||
client := newTestClient(fakeRT)
|
||||
err := client.ResizeExecTTY(execID, 10, 20)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
req := fakeRT.requests[0]
|
||||
if req.Method != "POST" {
|
||||
t.Errorf("ExecStart: wrong HTTP method. Want %q. Got %q.", "POST", req.Method)
|
||||
}
|
||||
expectedURL, _ := url.Parse(client.getURL("/exec/" + execID + "/resize?h=10&w=20"))
|
||||
if gotPath := req.URL.RequestURI(); gotPath != expectedURL.RequestURI() {
|
||||
t.Errorf("ExecCreate: Wrong path in request. Want %q. Got %q.", expectedURL.Path, gotPath)
|
||||
}
|
||||
}
|
||||
|
||||
func TestExecInspect(t *testing.T) {
|
||||
jsonExec := `{
|
||||
"ID": "32adfeeec34250f9530ce1dafd40c6233832315e065ea6b362d745e2f63cde0e",
|
||||
"Running": true,
|
||||
"ExitCode": 0,
|
||||
"ProcessConfig": {
|
||||
"privileged": false,
|
||||
"user": "",
|
||||
"tty": true,
|
||||
"entrypoint": "bash",
|
||||
"arguments": []
|
||||
},
|
||||
"OpenStdin": true,
|
||||
"OpenStderr": true,
|
||||
"OpenStdout": true,
|
||||
"Container": {
|
||||
"State": {
|
||||
"Running": true,
|
||||
"Paused": false,
|
||||
"Restarting": false,
|
||||
"OOMKilled": false,
|
||||
"Pid": 29392,
|
||||
"ExitCode": 0,
|
||||
"Error": "",
|
||||
"StartedAt": "2015-01-21T17:08:59.634662178Z",
|
||||
"FinishedAt": "0001-01-01T00:00:00Z"
|
||||
},
|
||||
"ID": "922cd0568714763dc725b24b7c9801016b2a3de68e2a1dc989bf5abf07740521",
|
||||
"Created": "2015-01-21T17:08:59.46407212Z",
|
||||
"Path": "/bin/bash",
|
||||
"Args": [
|
||||
"-lc",
|
||||
"tsuru_unit_agent http://192.168.50.4:8080 689b30e0ab3adce374346de2e72512138e0e8b75 gtest /var/lib/tsuru/start && tail -f /dev/null"
|
||||
],
|
||||
"Config": {
|
||||
"Hostname": "922cd0568714",
|
||||
"Domainname": "",
|
||||
"User": "ubuntu",
|
||||
"Memory": 0,
|
||||
"MemorySwap": 0,
|
||||
"CpuShares": 100,
|
||||
"Cpuset": "",
|
||||
"AttachStdin": false,
|
||||
"AttachStdout": false,
|
||||
"AttachStderr": false,
|
||||
"PortSpecs": null,
|
||||
"ExposedPorts": {
|
||||
"8888/tcp": {}
|
||||
},
|
||||
"Tty": false,
|
||||
"OpenStdin": false,
|
||||
"StdinOnce": false,
|
||||
"Env": [
|
||||
"PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"
|
||||
],
|
||||
"Cmd": [
|
||||
"/bin/bash",
|
||||
"-lc",
|
||||
"tsuru_unit_agent http://192.168.50.4:8080 689b30e0ab3adce374346de2e72512138e0e8b75 gtest /var/lib/tsuru/start && tail -f /dev/null"
|
||||
],
|
||||
"Image": "tsuru/app-gtest",
|
||||
"Volumes": null,
|
||||
"WorkingDir": "",
|
||||
"Entrypoint": null,
|
||||
"NetworkDisabled": false,
|
||||
"MacAddress": "",
|
||||
"OnBuild": null
|
||||
},
|
||||
"Image": "a88060b8b54fde0f7168c86742d0ce83b80f3f10925d85c98fdad9ed00bef544",
|
||||
"NetworkSettings": {
|
||||
"IPAddress": "172.17.0.8",
|
||||
"IPPrefixLen": 16,
|
||||
"MacAddress": "02:42:ac:11:00:08",
|
||||
"LinkLocalIPv6Address": "fe80::42:acff:fe11:8",
|
||||
"LinkLocalIPv6PrefixLen": 64,
|
||||
"GlobalIPv6Address": "",
|
||||
"GlobalIPv6PrefixLen": 0,
|
||||
"Gateway": "172.17.42.1",
|
||||
"IPv6Gateway": "",
|
||||
"Bridge": "docker0",
|
||||
"PortMapping": null,
|
||||
"Ports": {
|
||||
"8888/tcp": [
|
||||
{
|
||||
"HostIp": "0.0.0.0",
|
||||
"HostPort": "49156"
|
||||
}
|
||||
]
|
||||
}
|
||||
},
|
||||
"ResolvConfPath": "/var/lib/docker/containers/922cd0568714763dc725b24b7c9801016b2a3de68e2a1dc989bf5abf07740521/resolv.conf",
|
||||
"HostnamePath": "/var/lib/docker/containers/922cd0568714763dc725b24b7c9801016b2a3de68e2a1dc989bf5abf07740521/hostname",
|
||||
"HostsPath": "/var/lib/docker/containers/922cd0568714763dc725b24b7c9801016b2a3de68e2a1dc989bf5abf07740521/hosts",
|
||||
"Name": "/c7e43b72288ee9d0270a",
|
||||
"Driver": "aufs",
|
||||
"ExecDriver": "native-0.2",
|
||||
"MountLabel": "",
|
||||
"ProcessLabel": "",
|
||||
"AppArmorProfile": "",
|
||||
"RestartCount": 0,
|
||||
"UpdateDns": false,
|
||||
"Volumes": {},
|
||||
"VolumesRW": {}
|
||||
}
|
||||
}`
|
||||
var expected ExecInspect
|
||||
err := json.Unmarshal([]byte(jsonExec), &expected)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
fakeRT := &FakeRoundTripper{message: jsonExec, status: http.StatusOK}
|
||||
client := newTestClient(fakeRT)
|
||||
expectedID := "32adfeeec34250f9530ce1dafd40c6233832315e065ea6b362d745e2f63cde0e"
|
||||
execObj, err := client.InspectExec(expectedID)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if !reflect.DeepEqual(*execObj, expected) {
|
||||
t.Errorf("ExecInspect: Expected %#v. Got %#v.", expected, *execObj)
|
||||
}
|
||||
req := fakeRT.requests[0]
|
||||
if req.Method != "GET" {
|
||||
t.Errorf("ExecInspect: wrong HTTP method. Want %q. Got %q.", "GET", req.Method)
|
||||
}
|
||||
expectedURL, _ := url.Parse(client.getURL("/exec/" + expectedID + "/json"))
|
||||
if gotPath := fakeRT.requests[0].URL.Path; gotPath != expectedURL.Path {
|
||||
t.Errorf("ExecInspect: Wrong path in request. Want %q. Got %q.", expectedURL.Path, gotPath)
|
||||
}
|
||||
}
|
|
@ -1,55 +0,0 @@
|
|||
# 0.9.0 (Unreleased)
|
||||
|
||||
* logrus/text_formatter: don't emit empty msg
|
||||
* logrus/hooks/airbrake: move out of main repository
|
||||
* logrus/hooks/sentry: move out of main repository
|
||||
* logrus/hooks/papertrail: move out of main repository
|
||||
* logrus/hooks/bugsnag: move out of main repository
|
||||
|
||||
# 0.8.7
|
||||
|
||||
* logrus/core: fix possible race (#216)
|
||||
* logrus/doc: small typo fixes and doc improvements
|
||||
|
||||
|
||||
# 0.8.6
|
||||
|
||||
* hooks/raven: allow passing an initialized client
|
||||
|
||||
# 0.8.5
|
||||
|
||||
* logrus/core: revert #208
|
||||
|
||||
# 0.8.4
|
||||
|
||||
* formatter/text: fix data race (#218)
|
||||
|
||||
# 0.8.3
|
||||
|
||||
* logrus/core: fix entry log level (#208)
|
||||
* logrus/core: improve performance of text formatter by 40%
|
||||
* logrus/core: expose `LevelHooks` type
|
||||
* logrus/core: add support for DragonflyBSD and NetBSD
|
||||
* formatter/text: print structs more verbosely
|
||||
|
||||
# 0.8.2
|
||||
|
||||
* logrus: fix more Fatal family functions
|
||||
|
||||
# 0.8.1
|
||||
|
||||
* logrus: fix not exiting on `Fatalf` and `Fatalln`
|
||||
|
||||
# 0.8.0
|
||||
|
||||
* logrus: defaults to stderr instead of stdout
|
||||
* hooks/sentry: add special field for `*http.Request`
|
||||
* formatter/text: ignore Windows for colors
|
||||
|
||||
# 0.7.3
|
||||
|
||||
* formatter/\*: allow configuration of timestamp layout
|
||||
|
||||
# 0.7.2
|
||||
|
||||
* formatter/text: Add configuration option for time format (#158)
|
|
@ -1,21 +0,0 @@
|
|||
The MIT License (MIT)
|
||||
|
||||
Copyright (c) 2014 Simon Eskildsen
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in
|
||||
all copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||
THE SOFTWARE.
|
|
@ -1,365 +0,0 @@
|
|||
# Logrus <img src="http://i.imgur.com/hTeVwmJ.png" width="40" height="40" alt=":walrus:" class="emoji" title=":walrus:"/> [](https://travis-ci.org/Sirupsen/logrus) [][godoc]
|
||||
|
||||
Logrus is a structured logger for Go (golang), completely API compatible with
|
||||
the standard library logger. [Godoc][godoc]. **Please note the Logrus API is not
|
||||
yet stable (pre 1.0). Logrus itself is completely stable and has been used in
|
||||
many large deployments. The core API is unlikely to change much but please
|
||||
version control your Logrus to make sure you aren't fetching latest `master` on
|
||||
every build.**
|
||||
|
||||
Nicely color-coded in development (when a TTY is attached, otherwise just
|
||||
plain text):
|
||||
|
||||

|
||||
|
||||
With `log.Formatter = new(logrus.JSONFormatter)`, for easy parsing by logstash
|
||||
or Splunk:
|
||||
|
||||
```json
|
||||
{"animal":"walrus","level":"info","msg":"A group of walrus emerges from the
|
||||
ocean","size":10,"time":"2014-03-10 19:57:38.562264131 -0400 EDT"}
|
||||
|
||||
{"level":"warning","msg":"The group's number increased tremendously!",
|
||||
"number":122,"omg":true,"time":"2014-03-10 19:57:38.562471297 -0400 EDT"}
|
||||
|
||||
{"animal":"walrus","level":"info","msg":"A giant walrus appears!",
|
||||
"size":10,"time":"2014-03-10 19:57:38.562500591 -0400 EDT"}
|
||||
|
||||
{"animal":"walrus","level":"info","msg":"Tremendously sized cow enters the ocean.",
|
||||
"size":9,"time":"2014-03-10 19:57:38.562527896 -0400 EDT"}
|
||||
|
||||
{"level":"fatal","msg":"The ice breaks!","number":100,"omg":true,
|
||||
"time":"2014-03-10 19:57:38.562543128 -0400 EDT"}
|
||||
```
|
||||
|
||||
With the default `log.Formatter = new(&log.TextFormatter{})` when a TTY is not
|
||||
attached, the output is compatible with the
|
||||
[logfmt](http://godoc.org/github.com/kr/logfmt) format:
|
||||
|
||||
```text
|
||||
time="2015-03-26T01:27:38-04:00" level=debug msg="Started observing beach" animal=walrus number=8
|
||||
time="2015-03-26T01:27:38-04:00" level=info msg="A group of walrus emerges from the ocean" animal=walrus size=10
|
||||
time="2015-03-26T01:27:38-04:00" level=warning msg="The group's number increased tremendously!" number=122 omg=true
|
||||
time="2015-03-26T01:27:38-04:00" level=debug msg="Temperature changes" temperature=-4
|
||||
time="2015-03-26T01:27:38-04:00" level=panic msg="It's over 9000!" animal=orca size=9009
|
||||
time="2015-03-26T01:27:38-04:00" level=fatal msg="The ice breaks!" err=&{0x2082280c0 map[animal:orca size:9009] 2015-03-26 01:27:38.441574009 -0400 EDT panic It's over 9000!} number=100 omg=true
|
||||
exit status 1
|
||||
```
|
||||
|
||||
#### Example
|
||||
|
||||
The simplest way to use Logrus is simply the package-level exported logger:
|
||||
|
||||
```go
|
||||
package main
|
||||
|
||||
import (
|
||||
log "github.com/Sirupsen/logrus"
|
||||
)
|
||||
|
||||
func main() {
|
||||
log.WithFields(log.Fields{
|
||||
"animal": "walrus",
|
||||
}).Info("A walrus appears")
|
||||
}
|
||||
```
|
||||
|
||||
Note that it's completely api-compatible with the stdlib logger, so you can
|
||||
replace your `log` imports everywhere with `log "github.com/Sirupsen/logrus"`
|
||||
and you'll now have the flexibility of Logrus. You can customize it all you
|
||||
want:
|
||||
|
||||
```go
|
||||
package main
|
||||
|
||||
import (
|
||||
"os"
|
||||
log "github.com/Sirupsen/logrus"
|
||||
)
|
||||
|
||||
func init() {
|
||||
// Log as JSON instead of the default ASCII formatter.
|
||||
log.SetFormatter(&log.JSONFormatter{})
|
||||
|
||||
// Output to stderr instead of stdout, could also be a file.
|
||||
log.SetOutput(os.Stderr)
|
||||
|
||||
// Only log the warning severity or above.
|
||||
log.SetLevel(log.WarnLevel)
|
||||
}
|
||||
|
||||
func main() {
|
||||
log.WithFields(log.Fields{
|
||||
"animal": "walrus",
|
||||
"size": 10,
|
||||
}).Info("A group of walrus emerges from the ocean")
|
||||
|
||||
log.WithFields(log.Fields{
|
||||
"omg": true,
|
||||
"number": 122,
|
||||
}).Warn("The group's number increased tremendously!")
|
||||
|
||||
log.WithFields(log.Fields{
|
||||
"omg": true,
|
||||
"number": 100,
|
||||
}).Fatal("The ice breaks!")
|
||||
|
||||
// A common pattern is to re-use fields between logging statements by re-using
|
||||
// the logrus.Entry returned from WithFields()
|
||||
contextLogger := log.WithFields(log.Fields{
|
||||
"common": "this is a common field",
|
||||
"other": "I also should be logged always",
|
||||
})
|
||||
|
||||
contextLogger.Info("I'll be logged with common and other field")
|
||||
contextLogger.Info("Me too")
|
||||
}
|
||||
```
|
||||
|
||||
For more advanced usage such as logging to multiple locations from the same
|
||||
application, you can also create an instance of the `logrus` Logger:
|
||||
|
||||
```go
|
||||
package main
|
||||
|
||||
import (
|
||||
"github.com/Sirupsen/logrus"
|
||||
)
|
||||
|
||||
// Create a new instance of the logger. You can have any number of instances.
|
||||
var log = logrus.New()
|
||||
|
||||
func main() {
|
||||
// The API for setting attributes is a little different than the package level
|
||||
// exported logger. See Godoc.
|
||||
log.Out = os.Stderr
|
||||
|
||||
log.WithFields(logrus.Fields{
|
||||
"animal": "walrus",
|
||||
"size": 10,
|
||||
}).Info("A group of walrus emerges from the ocean")
|
||||
}
|
||||
```
|
||||
|
||||
#### Fields
|
||||
|
||||
Logrus encourages careful, structured logging though logging fields instead of
|
||||
long, unparseable error messages. For example, instead of: `log.Fatalf("Failed
|
||||
to send event %s to topic %s with key %d")`, you should log the much more
|
||||
discoverable:
|
||||
|
||||
```go
|
||||
log.WithFields(log.Fields{
|
||||
"event": event,
|
||||
"topic": topic,
|
||||
"key": key,
|
||||
}).Fatal("Failed to send event")
|
||||
```
|
||||
|
||||
We've found this API forces you to think about logging in a way that produces
|
||||
much more useful logging messages. We've been in countless situations where just
|
||||
a single added field to a log statement that was already there would've saved us
|
||||
hours. The `WithFields` call is optional.
|
||||
|
||||
In general, with Logrus using any of the `printf`-family functions should be
|
||||
seen as a hint you should add a field, however, you can still use the
|
||||
`printf`-family functions with Logrus.
|
||||
|
||||
#### Hooks
|
||||
|
||||
You can add hooks for logging levels. For example to send errors to an exception
|
||||
tracking service on `Error`, `Fatal` and `Panic`, info to StatsD or log to
|
||||
multiple places simultaneously, e.g. syslog.
|
||||
|
||||
Logrus comes with [built-in hooks](hooks/). Add those, or your custom hook, in
|
||||
`init`:
|
||||
|
||||
```go
|
||||
import (
|
||||
log "github.com/Sirupsen/logrus"
|
||||
"gopkg.in/gemnasium/logrus-airbrake-hook.v2" // the package is named "aibrake"
|
||||
logrus_syslog "github.com/Sirupsen/logrus/hooks/syslog"
|
||||
"log/syslog"
|
||||
)
|
||||
|
||||
func init() {
|
||||
|
||||
// Use the Airbrake hook to report errors that have Error severity or above to
|
||||
// an exception tracker. You can create custom hooks, see the Hooks section.
|
||||
log.AddHook(airbrake.NewHook(123, "xyz", "production"))
|
||||
|
||||
hook, err := logrus_syslog.NewSyslogHook("udp", "localhost:514", syslog.LOG_INFO, "")
|
||||
if err != nil {
|
||||
log.Error("Unable to connect to local syslog daemon")
|
||||
} else {
|
||||
log.AddHook(hook)
|
||||
}
|
||||
}
|
||||
```
|
||||
Note: Syslog hook also support connecting to local syslog (Ex. "/dev/log" or "/var/run/syslog" or "/var/run/log"). For the detail, please check the [syslog hook README](hooks/syslog/README.md).
|
||||
|
||||
| Hook | Description |
|
||||
| ----- | ----------- |
|
||||
| [Airbrake](https://github.com/gemnasium/logrus-airbrake-hook) | Send errors to the Airbrake API V3. Uses the official [`gobrake`](https://github.com/airbrake/gobrake) behind the scenes. |
|
||||
| [Airbrake "legacy"](https://github.com/gemnasium/logrus-airbrake-legacy-hook) | Send errors to an exception tracking service compatible with the Airbrake API V2. Uses [`airbrake-go`](https://github.com/tobi/airbrake-go) behind the scenes. |
|
||||
| [Papertrail](https://github.com/polds/logrus-papertrail-hook) | Send errors to the [Papertrail](https://papertrailapp.com) hosted logging service via UDP. |
|
||||
| [Syslog](https://github.com/Sirupsen/logrus/blob/master/hooks/syslog/syslog.go) | Send errors to remote syslog server. Uses standard library `log/syslog` behind the scenes. |
|
||||
| [Bugsnag](https://github.com/Shopify/logrus-bugsnag/blob/master/bugsnag.go) | Send errors to the Bugsnag exception tracking service. |
|
||||
| [Sentry](https://github.com/evalphobia/logrus_sentry) | Send errors to the Sentry error logging and aggregation service. |
|
||||
| [Hiprus](https://github.com/nubo/hiprus) | Send errors to a channel in hipchat. |
|
||||
| [Logrusly](https://github.com/sebest/logrusly) | Send logs to [Loggly](https://www.loggly.com/) |
|
||||
| [Slackrus](https://github.com/johntdyer/slackrus) | Hook for Slack chat. |
|
||||
| [Journalhook](https://github.com/wercker/journalhook) | Hook for logging to `systemd-journald` |
|
||||
| [Graylog](https://github.com/gemnasium/logrus-graylog-hook) | Hook for logging to [Graylog](http://graylog2.org/) |
|
||||
| [Raygun](https://github.com/squirkle/logrus-raygun-hook) | Hook for logging to [Raygun.io](http://raygun.io/) |
|
||||
| [LFShook](https://github.com/rifflock/lfshook) | Hook for logging to the local filesystem |
|
||||
| [Honeybadger](https://github.com/agonzalezro/logrus_honeybadger) | Hook for sending exceptions to Honeybadger |
|
||||
| [Mail](https://github.com/zbindenren/logrus_mail) | Hook for sending exceptions via mail |
|
||||
| [Rollrus](https://github.com/heroku/rollrus) | Hook for sending errors to rollbar |
|
||||
| [Fluentd](https://github.com/evalphobia/logrus_fluent) | Hook for logging to fluentd |
|
||||
| [Mongodb](https://github.com/weekface/mgorus) | Hook for logging to mongodb |
|
||||
| [InfluxDB](https://github.com/Abramovic/logrus_influxdb) | Hook for logging to influxdb |
|
||||
| [Octokit](https://github.com/dorajistyle/logrus-octokit-hook) | Hook for logging to github via octokit |
|
||||
| [DeferPanic](https://github.com/deferpanic/dp-logrus) | Hook for logging to DeferPanic |
|
||||
|
||||
#### Level logging
|
||||
|
||||
Logrus has six logging levels: Debug, Info, Warning, Error, Fatal and Panic.
|
||||
|
||||
```go
|
||||
log.Debug("Useful debugging information.")
|
||||
log.Info("Something noteworthy happened!")
|
||||
log.Warn("You should probably take a look at this.")
|
||||
log.Error("Something failed but I'm not quitting.")
|
||||
// Calls os.Exit(1) after logging
|
||||
log.Fatal("Bye.")
|
||||
// Calls panic() after logging
|
||||
log.Panic("I'm bailing.")
|
||||
```
|
||||
|
||||
You can set the logging level on a `Logger`, then it will only log entries with
|
||||
that severity or anything above it:
|
||||
|
||||
```go
|
||||
// Will log anything that is info or above (warn, error, fatal, panic). Default.
|
||||
log.SetLevel(log.InfoLevel)
|
||||
```
|
||||
|
||||
It may be useful to set `log.Level = logrus.DebugLevel` in a debug or verbose
|
||||
environment if your application has that.
|
||||
|
||||
#### Entries
|
||||
|
||||
Besides the fields added with `WithField` or `WithFields` some fields are
|
||||
automatically added to all logging events:
|
||||
|
||||
1. `time`. The timestamp when the entry was created.
|
||||
2. `msg`. The logging message passed to `{Info,Warn,Error,Fatal,Panic}` after
|
||||
the `AddFields` call. E.g. `Failed to send event.`
|
||||
3. `level`. The logging level. E.g. `info`.
|
||||
|
||||
#### Environments
|
||||
|
||||
Logrus has no notion of environment.
|
||||
|
||||
If you wish for hooks and formatters to only be used in specific environments,
|
||||
you should handle that yourself. For example, if your application has a global
|
||||
variable `Environment`, which is a string representation of the environment you
|
||||
could do:
|
||||
|
||||
```go
|
||||
import (
|
||||
log "github.com/Sirupsen/logrus"
|
||||
)
|
||||
|
||||
init() {
|
||||
// do something here to set environment depending on an environment variable
|
||||
// or command-line flag
|
||||
if Environment == "production" {
|
||||
log.SetFormatter(&log.JSONFormatter{})
|
||||
} else {
|
||||
// The TextFormatter is default, you don't actually have to do this.
|
||||
log.SetFormatter(&log.TextFormatter{})
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
This configuration is how `logrus` was intended to be used, but JSON in
|
||||
production is mostly only useful if you do log aggregation with tools like
|
||||
Splunk or Logstash.
|
||||
|
||||
#### Formatters
|
||||
|
||||
The built-in logging formatters are:
|
||||
|
||||
* `logrus.TextFormatter`. Logs the event in colors if stdout is a tty, otherwise
|
||||
without colors.
|
||||
* *Note:* to force colored output when there is no TTY, set the `ForceColors`
|
||||
field to `true`. To force no colored output even if there is a TTY set the
|
||||
`DisableColors` field to `true`
|
||||
* `logrus.JSONFormatter`. Logs fields as JSON.
|
||||
* `logrus/formatters/logstash.LogstashFormatter`. Logs fields as [Logstash](http://logstash.net) Events.
|
||||
|
||||
```go
|
||||
logrus.SetFormatter(&logstash.LogstashFormatter{Type: "application_name"})
|
||||
```
|
||||
|
||||
Third party logging formatters:
|
||||
|
||||
* [`prefixed`](https://github.com/x-cray/logrus-prefixed-formatter). Displays log entry source along with alternative layout.
|
||||
* [`zalgo`](https://github.com/aybabtme/logzalgo). Invoking the P͉̫o̳̼̊w̖͈̰͎e̬͔̭͂r͚̼̹̲ ̫͓͉̳͈ō̠͕͖̚f̝͍̠ ͕̲̞͖͑Z̖̫̤̫ͪa͉̬͈̗l͖͎g̳̥o̰̥̅!̣͔̲̻͊̄ ̙̘̦̹̦.
|
||||
|
||||
You can define your formatter by implementing the `Formatter` interface,
|
||||
requiring a `Format` method. `Format` takes an `*Entry`. `entry.Data` is a
|
||||
`Fields` type (`map[string]interface{}`) with all your fields as well as the
|
||||
default ones (see Entries section above):
|
||||
|
||||
```go
|
||||
type MyJSONFormatter struct {
|
||||
}
|
||||
|
||||
log.SetFormatter(new(MyJSONFormatter))
|
||||
|
||||
func (f *MyJSONFormatter) Format(entry *Entry) ([]byte, error) {
|
||||
// Note this doesn't include Time, Level and Message which are available on
|
||||
// the Entry. Consult `godoc` on information about those fields or read the
|
||||
// source of the official loggers.
|
||||
serialized, err := json.Marshal(entry.Data)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Failed to marshal fields to JSON, %v", err)
|
||||
}
|
||||
return append(serialized, '\n'), nil
|
||||
}
|
||||
```
|
||||
|
||||
#### Logger as an `io.Writer`
|
||||
|
||||
Logrus can be transformed into an `io.Writer`. That writer is the end of an `io.Pipe` and it is your responsibility to close it.
|
||||
|
||||
```go
|
||||
w := logger.Writer()
|
||||
defer w.Close()
|
||||
|
||||
srv := http.Server{
|
||||
// create a stdlib log.Logger that writes to
|
||||
// logrus.Logger.
|
||||
ErrorLog: log.New(w, "", 0),
|
||||
}
|
||||
```
|
||||
|
||||
Each line written to that writer will be printed the usual way, using formatters
|
||||
and hooks. The level for those entries is `info`.
|
||||
|
||||
#### Rotation
|
||||
|
||||
Log rotation is not provided with Logrus. Log rotation should be done by an
|
||||
external program (like `logrotate(8)`) that can compress and delete old log
|
||||
entries. It should not be a feature of the application-level logger.
|
||||
|
||||
#### Tools
|
||||
|
||||
| Tool | Description |
|
||||
| ---- | ----------- |
|
||||
|[Logrus Mate](https://github.com/gogap/logrus_mate)|Logrus mate is a tool for Logrus to manage loggers, you can initial logger's level, hook and formatter by config file, the logger will generated with different config at different environment.|
|
||||
|
||||
[godoc]: https://godoc.org/github.com/Sirupsen/logrus
|
|
@ -1,26 +0,0 @@
|
|||
/*
|
||||
Package logrus is a structured logger for Go, completely API compatible with the standard library logger.
|
||||
|
||||
|
||||
The simplest way to use Logrus is simply the package-level exported logger:
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
log "github.com/Sirupsen/logrus"
|
||||
)
|
||||
|
||||
func main() {
|
||||
log.WithFields(log.Fields{
|
||||
"animal": "walrus",
|
||||
"number": 1,
|
||||
"size": 10,
|
||||
}).Info("A walrus appears")
|
||||
}
|
||||
|
||||
Output:
|
||||
time="2015-09-07T08:48:33Z" level=info msg="A walrus appears" animal=walrus number=1 size=10
|
||||
|
||||
For a full guide visit https://github.com/Sirupsen/logrus
|
||||
*/
|
||||
package logrus
|
|
@ -1,264 +0,0 @@
|
|||
package logrus
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"time"
|
||||
)
|
||||
|
||||
// Defines the key when adding errors using WithError.
|
||||
var ErrorKey = "error"
|
||||
|
||||
// An entry is the final or intermediate Logrus logging entry. It contains all
|
||||
// the fields passed with WithField{,s}. It's finally logged when Debug, Info,
|
||||
// Warn, Error, Fatal or Panic is called on it. These objects can be reused and
|
||||
// passed around as much as you wish to avoid field duplication.
|
||||
type Entry struct {
|
||||
Logger *Logger
|
||||
|
||||
// Contains all the fields set by the user.
|
||||
Data Fields
|
||||
|
||||
// Time at which the log entry was created
|
||||
Time time.Time
|
||||
|
||||
// Level the log entry was logged at: Debug, Info, Warn, Error, Fatal or Panic
|
||||
Level Level
|
||||
|
||||
// Message passed to Debug, Info, Warn, Error, Fatal or Panic
|
||||
Message string
|
||||
}
|
||||
|
||||
func NewEntry(logger *Logger) *Entry {
|
||||
return &Entry{
|
||||
Logger: logger,
|
||||
// Default is three fields, give a little extra room
|
||||
Data: make(Fields, 5),
|
||||
}
|
||||
}
|
||||
|
||||
// Returns a reader for the entry, which is a proxy to the formatter.
|
||||
func (entry *Entry) Reader() (*bytes.Buffer, error) {
|
||||
serialized, err := entry.Logger.Formatter.Format(entry)
|
||||
return bytes.NewBuffer(serialized), err
|
||||
}
|
||||
|
||||
// Returns the string representation from the reader and ultimately the
|
||||
// formatter.
|
||||
func (entry *Entry) String() (string, error) {
|
||||
reader, err := entry.Reader()
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
return reader.String(), err
|
||||
}
|
||||
|
||||
// Add an error as single field (using the key defined in ErrorKey) to the Entry.
|
||||
func (entry *Entry) WithError(err error) *Entry {
|
||||
return entry.WithField(ErrorKey, err)
|
||||
}
|
||||
|
||||
// Add a single field to the Entry.
|
||||
func (entry *Entry) WithField(key string, value interface{}) *Entry {
|
||||
return entry.WithFields(Fields{key: value})
|
||||
}
|
||||
|
||||
// Add a map of fields to the Entry.
|
||||
func (entry *Entry) WithFields(fields Fields) *Entry {
|
||||
data := Fields{}
|
||||
for k, v := range entry.Data {
|
||||
data[k] = v
|
||||
}
|
||||
for k, v := range fields {
|
||||
data[k] = v
|
||||
}
|
||||
return &Entry{Logger: entry.Logger, Data: data}
|
||||
}
|
||||
|
||||
// This function is not declared with a pointer value because otherwise
|
||||
// race conditions will occur when using multiple goroutines
|
||||
func (entry Entry) log(level Level, msg string) {
|
||||
entry.Time = time.Now()
|
||||
entry.Level = level
|
||||
entry.Message = msg
|
||||
|
||||
if err := entry.Logger.Hooks.Fire(level, &entry); err != nil {
|
||||
entry.Logger.mu.Lock()
|
||||
fmt.Fprintf(os.Stderr, "Failed to fire hook: %v\n", err)
|
||||
entry.Logger.mu.Unlock()
|
||||
}
|
||||
|
||||
reader, err := entry.Reader()
|
||||
if err != nil {
|
||||
entry.Logger.mu.Lock()
|
||||
fmt.Fprintf(os.Stderr, "Failed to obtain reader, %v\n", err)
|
||||
entry.Logger.mu.Unlock()
|
||||
}
|
||||
|
||||
entry.Logger.mu.Lock()
|
||||
defer entry.Logger.mu.Unlock()
|
||||
|
||||
_, err = io.Copy(entry.Logger.Out, reader)
|
||||
if err != nil {
|
||||
fmt.Fprintf(os.Stderr, "Failed to write to log, %v\n", err)
|
||||
}
|
||||
|
||||
// To avoid Entry#log() returning a value that only would make sense for
|
||||
// panic() to use in Entry#Panic(), we avoid the allocation by checking
|
||||
// directly here.
|
||||
if level <= PanicLevel {
|
||||
panic(&entry)
|
||||
}
|
||||
}
|
||||
|
||||
func (entry *Entry) Debug(args ...interface{}) {
|
||||
if entry.Logger.Level >= DebugLevel {
|
||||
entry.log(DebugLevel, fmt.Sprint(args...))
|
||||
}
|
||||
}
|
||||
|
||||
func (entry *Entry) Print(args ...interface{}) {
|
||||
entry.Info(args...)
|
||||
}
|
||||
|
||||
func (entry *Entry) Info(args ...interface{}) {
|
||||
if entry.Logger.Level >= InfoLevel {
|
||||
entry.log(InfoLevel, fmt.Sprint(args...))
|
||||
}
|
||||
}
|
||||
|
||||
func (entry *Entry) Warn(args ...interface{}) {
|
||||
if entry.Logger.Level >= WarnLevel {
|
||||
entry.log(WarnLevel, fmt.Sprint(args...))
|
||||
}
|
||||
}
|
||||
|
||||
func (entry *Entry) Warning(args ...interface{}) {
|
||||
entry.Warn(args...)
|
||||
}
|
||||
|
||||
func (entry *Entry) Error(args ...interface{}) {
|
||||
if entry.Logger.Level >= ErrorLevel {
|
||||
entry.log(ErrorLevel, fmt.Sprint(args...))
|
||||
}
|
||||
}
|
||||
|
||||
func (entry *Entry) Fatal(args ...interface{}) {
|
||||
if entry.Logger.Level >= FatalLevel {
|
||||
entry.log(FatalLevel, fmt.Sprint(args...))
|
||||
}
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
func (entry *Entry) Panic(args ...interface{}) {
|
||||
if entry.Logger.Level >= PanicLevel {
|
||||
entry.log(PanicLevel, fmt.Sprint(args...))
|
||||
}
|
||||
panic(fmt.Sprint(args...))
|
||||
}
|
||||
|
||||
// Entry Printf family functions
|
||||
|
||||
func (entry *Entry) Debugf(format string, args ...interface{}) {
|
||||
if entry.Logger.Level >= DebugLevel {
|
||||
entry.Debug(fmt.Sprintf(format, args...))
|
||||
}
|
||||
}
|
||||
|
||||
func (entry *Entry) Infof(format string, args ...interface{}) {
|
||||
if entry.Logger.Level >= InfoLevel {
|
||||
entry.Info(fmt.Sprintf(format, args...))
|
||||
}
|
||||
}
|
||||
|
||||
func (entry *Entry) Printf(format string, args ...interface{}) {
|
||||
entry.Infof(format, args...)
|
||||
}
|
||||
|
||||
func (entry *Entry) Warnf(format string, args ...interface{}) {
|
||||
if entry.Logger.Level >= WarnLevel {
|
||||
entry.Warn(fmt.Sprintf(format, args...))
|
||||
}
|
||||
}
|
||||
|
||||
func (entry *Entry) Warningf(format string, args ...interface{}) {
|
||||
entry.Warnf(format, args...)
|
||||
}
|
||||
|
||||
func (entry *Entry) Errorf(format string, args ...interface{}) {
|
||||
if entry.Logger.Level >= ErrorLevel {
|
||||
entry.Error(fmt.Sprintf(format, args...))
|
||||
}
|
||||
}
|
||||
|
||||
func (entry *Entry) Fatalf(format string, args ...interface{}) {
|
||||
if entry.Logger.Level >= FatalLevel {
|
||||
entry.Fatal(fmt.Sprintf(format, args...))
|
||||
}
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
func (entry *Entry) Panicf(format string, args ...interface{}) {
|
||||
if entry.Logger.Level >= PanicLevel {
|
||||
entry.Panic(fmt.Sprintf(format, args...))
|
||||
}
|
||||
}
|
||||
|
||||
// Entry Println family functions
|
||||
|
||||
func (entry *Entry) Debugln(args ...interface{}) {
|
||||
if entry.Logger.Level >= DebugLevel {
|
||||
entry.Debug(entry.sprintlnn(args...))
|
||||
}
|
||||
}
|
||||
|
||||
func (entry *Entry) Infoln(args ...interface{}) {
|
||||
if entry.Logger.Level >= InfoLevel {
|
||||
entry.Info(entry.sprintlnn(args...))
|
||||
}
|
||||
}
|
||||
|
||||
func (entry *Entry) Println(args ...interface{}) {
|
||||
entry.Infoln(args...)
|
||||
}
|
||||
|
||||
func (entry *Entry) Warnln(args ...interface{}) {
|
||||
if entry.Logger.Level >= WarnLevel {
|
||||
entry.Warn(entry.sprintlnn(args...))
|
||||
}
|
||||
}
|
||||
|
||||
func (entry *Entry) Warningln(args ...interface{}) {
|
||||
entry.Warnln(args...)
|
||||
}
|
||||
|
||||
func (entry *Entry) Errorln(args ...interface{}) {
|
||||
if entry.Logger.Level >= ErrorLevel {
|
||||
entry.Error(entry.sprintlnn(args...))
|
||||
}
|
||||
}
|
||||
|
||||
func (entry *Entry) Fatalln(args ...interface{}) {
|
||||
if entry.Logger.Level >= FatalLevel {
|
||||
entry.Fatal(entry.sprintlnn(args...))
|
||||
}
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
func (entry *Entry) Panicln(args ...interface{}) {
|
||||
if entry.Logger.Level >= PanicLevel {
|
||||
entry.Panic(entry.sprintlnn(args...))
|
||||
}
|
||||
}
|
||||
|
||||
// Sprintlnn => Sprint no newline. This is to get the behavior of how
|
||||
// fmt.Sprintln where spaces are always added between operands, regardless of
|
||||
// their type. Instead of vendoring the Sprintln implementation to spare a
|
||||
// string allocation, we do the simplest thing.
|
||||
func (entry *Entry) sprintlnn(args ...interface{}) string {
|
||||
msg := fmt.Sprintln(args...)
|
||||
return msg[:len(msg)-1]
|
||||
}
|
|
@ -1,77 +0,0 @@
|
|||
package logrus
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
"github.com/fsouza/go-dockerclient/external/github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestEntryWithError(t *testing.T) {
|
||||
|
||||
assert := assert.New(t)
|
||||
|
||||
defer func() {
|
||||
ErrorKey = "error"
|
||||
}()
|
||||
|
||||
err := fmt.Errorf("kaboom at layer %d", 4711)
|
||||
|
||||
assert.Equal(err, WithError(err).Data["error"])
|
||||
|
||||
logger := New()
|
||||
logger.Out = &bytes.Buffer{}
|
||||
entry := NewEntry(logger)
|
||||
|
||||
assert.Equal(err, entry.WithError(err).Data["error"])
|
||||
|
||||
ErrorKey = "err"
|
||||
|
||||
assert.Equal(err, entry.WithError(err).Data["err"])
|
||||
|
||||
}
|
||||
|
||||
func TestEntryPanicln(t *testing.T) {
|
||||
errBoom := fmt.Errorf("boom time")
|
||||
|
||||
defer func() {
|
||||
p := recover()
|
||||
assert.NotNil(t, p)
|
||||
|
||||
switch pVal := p.(type) {
|
||||
case *Entry:
|
||||
assert.Equal(t, "kaboom", pVal.Message)
|
||||
assert.Equal(t, errBoom, pVal.Data["err"])
|
||||
default:
|
||||
t.Fatalf("want type *Entry, got %T: %#v", pVal, pVal)
|
||||
}
|
||||
}()
|
||||
|
||||
logger := New()
|
||||
logger.Out = &bytes.Buffer{}
|
||||
entry := NewEntry(logger)
|
||||
entry.WithField("err", errBoom).Panicln("kaboom")
|
||||
}
|
||||
|
||||
func TestEntryPanicf(t *testing.T) {
|
||||
errBoom := fmt.Errorf("boom again")
|
||||
|
||||
defer func() {
|
||||
p := recover()
|
||||
assert.NotNil(t, p)
|
||||
|
||||
switch pVal := p.(type) {
|
||||
case *Entry:
|
||||
assert.Equal(t, "kaboom true", pVal.Message)
|
||||
assert.Equal(t, errBoom, pVal.Data["err"])
|
||||
default:
|
||||
t.Fatalf("want type *Entry, got %T: %#v", pVal, pVal)
|
||||
}
|
||||
}()
|
||||
|
||||
logger := New()
|
||||
logger.Out = &bytes.Buffer{}
|
||||
entry := NewEntry(logger)
|
||||
entry.WithField("err", errBoom).Panicf("kaboom %v", true)
|
||||
}
|
|
@ -1,193 +0,0 @@
|
|||
package logrus
|
||||
|
||||
import (
|
||||
"io"
|
||||
)
|
||||
|
||||
var (
|
||||
// std is the name of the standard logger in stdlib `log`
|
||||
std = New()
|
||||
)
|
||||
|
||||
func StandardLogger() *Logger {
|
||||
return std
|
||||
}
|
||||
|
||||
// SetOutput sets the standard logger output.
|
||||
func SetOutput(out io.Writer) {
|
||||
std.mu.Lock()
|
||||
defer std.mu.Unlock()
|
||||
std.Out = out
|
||||
}
|
||||
|
||||
// SetFormatter sets the standard logger formatter.
|
||||
func SetFormatter(formatter Formatter) {
|
||||
std.mu.Lock()
|
||||
defer std.mu.Unlock()
|
||||
std.Formatter = formatter
|
||||
}
|
||||
|
||||
// SetLevel sets the standard logger level.
|
||||
func SetLevel(level Level) {
|
||||
std.mu.Lock()
|
||||
defer std.mu.Unlock()
|
||||
std.Level = level
|
||||
}
|
||||
|
||||
// GetLevel returns the standard logger level.
|
||||
func GetLevel() Level {
|
||||
std.mu.Lock()
|
||||
defer std.mu.Unlock()
|
||||
return std.Level
|
||||
}
|
||||
|
||||
// AddHook adds a hook to the standard logger hooks.
|
||||
func AddHook(hook Hook) {
|
||||
std.mu.Lock()
|
||||
defer std.mu.Unlock()
|
||||
std.Hooks.Add(hook)
|
||||
}
|
||||
|
||||
// WithError creates an entry from the standard logger and adds an error to it, using the value defined in ErrorKey as key.
|
||||
func WithError(err error) *Entry {
|
||||
return std.WithField(ErrorKey, err)
|
||||
}
|
||||
|
||||
// WithField creates an entry from the standard logger and adds a field to
|
||||
// it. If you want multiple fields, use `WithFields`.
|
||||
//
|
||||
// Note that it doesn't log until you call Debug, Print, Info, Warn, Fatal
|
||||
// or Panic on the Entry it returns.
|
||||
func WithField(key string, value interface{}) *Entry {
|
||||
return std.WithField(key, value)
|
||||
}
|
||||
|
||||
// WithFields creates an entry from the standard logger and adds multiple
|
||||
// fields to it. This is simply a helper for `WithField`, invoking it
|
||||
// once for each field.
|
||||
//
|
||||
// Note that it doesn't log until you call Debug, Print, Info, Warn, Fatal
|
||||
// or Panic on the Entry it returns.
|
||||
func WithFields(fields Fields) *Entry {
|
||||
return std.WithFields(fields)
|
||||
}
|
||||
|
||||
// Debug logs a message at level Debug on the standard logger.
|
||||
func Debug(args ...interface{}) {
|
||||
std.Debug(args...)
|
||||
}
|
||||
|
||||
// Print logs a message at level Info on the standard logger.
|
||||
func Print(args ...interface{}) {
|
||||
std.Print(args...)
|
||||
}
|
||||
|
||||
// Info logs a message at level Info on the standard logger.
|
||||
func Info(args ...interface{}) {
|
||||
std.Info(args...)
|
||||
}
|
||||
|
||||
// Warn logs a message at level Warn on the standard logger.
|
||||
func Warn(args ...interface{}) {
|
||||
std.Warn(args...)
|
||||
}
|
||||
|
||||
// Warning logs a message at level Warn on the standard logger.
|
||||
func Warning(args ...interface{}) {
|
||||
std.Warning(args...)
|
||||
}
|
||||
|
||||
// Error logs a message at level Error on the standard logger.
|
||||
func Error(args ...interface{}) {
|
||||
std.Error(args...)
|
||||
}
|
||||
|
||||
// Panic logs a message at level Panic on the standard logger.
|
||||
func Panic(args ...interface{}) {
|
||||
std.Panic(args...)
|
||||
}
|
||||
|
||||
// Fatal logs a message at level Fatal on the standard logger.
|
||||
func Fatal(args ...interface{}) {
|
||||
std.Fatal(args...)
|
||||
}
|
||||
|
||||
// Debugf logs a message at level Debug on the standard logger.
|
||||
func Debugf(format string, args ...interface{}) {
|
||||
std.Debugf(format, args...)
|
||||
}
|
||||
|
||||
// Printf logs a message at level Info on the standard logger.
|
||||
func Printf(format string, args ...interface{}) {
|
||||
std.Printf(format, args...)
|
||||
}
|
||||
|
||||
// Infof logs a message at level Info on the standard logger.
|
||||
func Infof(format string, args ...interface{}) {
|
||||
std.Infof(format, args...)
|
||||
}
|
||||
|
||||
// Warnf logs a message at level Warn on the standard logger.
|
||||
func Warnf(format string, args ...interface{}) {
|
||||
std.Warnf(format, args...)
|
||||
}
|
||||
|
||||
// Warningf logs a message at level Warn on the standard logger.
|
||||
func Warningf(format string, args ...interface{}) {
|
||||
std.Warningf(format, args...)
|
||||
}
|
||||
|
||||
// Errorf logs a message at level Error on the standard logger.
|
||||
func Errorf(format string, args ...interface{}) {
|
||||
std.Errorf(format, args...)
|
||||
}
|
||||
|
||||
// Panicf logs a message at level Panic on the standard logger.
|
||||
func Panicf(format string, args ...interface{}) {
|
||||
std.Panicf(format, args...)
|
||||
}
|
||||
|
||||
// Fatalf logs a message at level Fatal on the standard logger.
|
||||
func Fatalf(format string, args ...interface{}) {
|
||||
std.Fatalf(format, args...)
|
||||
}
|
||||
|
||||
// Debugln logs a message at level Debug on the standard logger.
|
||||
func Debugln(args ...interface{}) {
|
||||
std.Debugln(args...)
|
||||
}
|
||||
|
||||
// Println logs a message at level Info on the standard logger.
|
||||
func Println(args ...interface{}) {
|
||||
std.Println(args...)
|
||||
}
|
||||
|
||||
// Infoln logs a message at level Info on the standard logger.
|
||||
func Infoln(args ...interface{}) {
|
||||
std.Infoln(args...)
|
||||
}
|
||||
|
||||
// Warnln logs a message at level Warn on the standard logger.
|
||||
func Warnln(args ...interface{}) {
|
||||
std.Warnln(args...)
|
||||
}
|
||||
|
||||
// Warningln logs a message at level Warn on the standard logger.
|
||||
func Warningln(args ...interface{}) {
|
||||
std.Warningln(args...)
|
||||
}
|
||||
|
||||
// Errorln logs a message at level Error on the standard logger.
|
||||
func Errorln(args ...interface{}) {
|
||||
std.Errorln(args...)
|
||||
}
|
||||
|
||||
// Panicln logs a message at level Panic on the standard logger.
|
||||
func Panicln(args ...interface{}) {
|
||||
std.Panicln(args...)
|
||||
}
|
||||
|
||||
// Fatalln logs a message at level Fatal on the standard logger.
|
||||
func Fatalln(args ...interface{}) {
|
||||
std.Fatalln(args...)
|
||||
}
|
|
@ -1,48 +0,0 @@
|
|||
package logrus
|
||||
|
||||
import "time"
|
||||
|
||||
const DefaultTimestampFormat = time.RFC3339
|
||||
|
||||
// The Formatter interface is used to implement a custom Formatter. It takes an
|
||||
// `Entry`. It exposes all the fields, including the default ones:
|
||||
//
|
||||
// * `entry.Data["msg"]`. The message passed from Info, Warn, Error ..
|
||||
// * `entry.Data["time"]`. The timestamp.
|
||||
// * `entry.Data["level"]. The level the entry was logged at.
|
||||
//
|
||||
// Any additional fields added with `WithField` or `WithFields` are also in
|
||||
// `entry.Data`. Format is expected to return an array of bytes which are then
|
||||
// logged to `logger.Out`.
|
||||
type Formatter interface {
|
||||
Format(*Entry) ([]byte, error)
|
||||
}
|
||||
|
||||
// This is to not silently overwrite `time`, `msg` and `level` fields when
|
||||
// dumping it. If this code wasn't there doing:
|
||||
//
|
||||
// logrus.WithField("level", 1).Info("hello")
|
||||
//
|
||||
// Would just silently drop the user provided level. Instead with this code
|
||||
// it'll logged as:
|
||||
//
|
||||
// {"level": "info", "fields.level": 1, "msg": "hello", "time": "..."}
|
||||
//
|
||||
// It's not exported because it's still using Data in an opinionated way. It's to
|
||||
// avoid code duplication between the two default formatters.
|
||||
func prefixFieldClashes(data Fields) {
|
||||
_, ok := data["time"]
|
||||
if ok {
|
||||
data["fields.time"] = data["time"]
|
||||
}
|
||||
|
||||
_, ok = data["msg"]
|
||||
if ok {
|
||||
data["fields.msg"] = data["msg"]
|
||||
}
|
||||
|
||||
_, ok = data["level"]
|
||||
if ok {
|
||||
data["fields.level"] = data["level"]
|
||||
}
|
||||
}
|
|
@ -1,98 +0,0 @@
|
|||
package logrus
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"testing"
|
||||
"time"
|
||||
)
|
||||
|
||||
// smallFields is a small size data set for benchmarking
|
||||
var smallFields = Fields{
|
||||
"foo": "bar",
|
||||
"baz": "qux",
|
||||
"one": "two",
|
||||
"three": "four",
|
||||
}
|
||||
|
||||
// largeFields is a large size data set for benchmarking
|
||||
var largeFields = Fields{
|
||||
"foo": "bar",
|
||||
"baz": "qux",
|
||||
"one": "two",
|
||||
"three": "four",
|
||||
"five": "six",
|
||||
"seven": "eight",
|
||||
"nine": "ten",
|
||||
"eleven": "twelve",
|
||||
"thirteen": "fourteen",
|
||||
"fifteen": "sixteen",
|
||||
"seventeen": "eighteen",
|
||||
"nineteen": "twenty",
|
||||
"a": "b",
|
||||
"c": "d",
|
||||
"e": "f",
|
||||
"g": "h",
|
||||
"i": "j",
|
||||
"k": "l",
|
||||
"m": "n",
|
||||
"o": "p",
|
||||
"q": "r",
|
||||
"s": "t",
|
||||
"u": "v",
|
||||
"w": "x",
|
||||
"y": "z",
|
||||
"this": "will",
|
||||
"make": "thirty",
|
||||
"entries": "yeah",
|
||||
}
|
||||
|
||||
var errorFields = Fields{
|
||||
"foo": fmt.Errorf("bar"),
|
||||
"baz": fmt.Errorf("qux"),
|
||||
}
|
||||
|
||||
func BenchmarkErrorTextFormatter(b *testing.B) {
|
||||
doBenchmark(b, &TextFormatter{DisableColors: true}, errorFields)
|
||||
}
|
||||
|
||||
func BenchmarkSmallTextFormatter(b *testing.B) {
|
||||
doBenchmark(b, &TextFormatter{DisableColors: true}, smallFields)
|
||||
}
|
||||
|
||||
func BenchmarkLargeTextFormatter(b *testing.B) {
|
||||
doBenchmark(b, &TextFormatter{DisableColors: true}, largeFields)
|
||||
}
|
||||
|
||||
func BenchmarkSmallColoredTextFormatter(b *testing.B) {
|
||||
doBenchmark(b, &TextFormatter{ForceColors: true}, smallFields)
|
||||
}
|
||||
|
||||
func BenchmarkLargeColoredTextFormatter(b *testing.B) {
|
||||
doBenchmark(b, &TextFormatter{ForceColors: true}, largeFields)
|
||||
}
|
||||
|
||||
func BenchmarkSmallJSONFormatter(b *testing.B) {
|
||||
doBenchmark(b, &JSONFormatter{}, smallFields)
|
||||
}
|
||||
|
||||
func BenchmarkLargeJSONFormatter(b *testing.B) {
|
||||
doBenchmark(b, &JSONFormatter{}, largeFields)
|
||||
}
|
||||
|
||||
func doBenchmark(b *testing.B, formatter Formatter, fields Fields) {
|
||||
entry := &Entry{
|
||||
Time: time.Time{},
|
||||
Level: InfoLevel,
|
||||
Message: "message",
|
||||
Data: fields,
|
||||
}
|
||||
var d []byte
|
||||
var err error
|
||||
for i := 0; i < b.N; i++ {
|
||||
d, err = formatter.Format(entry)
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
b.SetBytes(int64(len(d)))
|
||||
}
|
||||
}
|
|
@ -1,122 +0,0 @@
|
|||
package logrus
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/fsouza/go-dockerclient/external/github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
type TestHook struct {
|
||||
Fired bool
|
||||
}
|
||||
|
||||
func (hook *TestHook) Fire(entry *Entry) error {
|
||||
hook.Fired = true
|
||||
return nil
|
||||
}
|
||||
|
||||
func (hook *TestHook) Levels() []Level {
|
||||
return []Level{
|
||||
DebugLevel,
|
||||
InfoLevel,
|
||||
WarnLevel,
|
||||
ErrorLevel,
|
||||
FatalLevel,
|
||||
PanicLevel,
|
||||
}
|
||||
}
|
||||
|
||||
func TestHookFires(t *testing.T) {
|
||||
hook := new(TestHook)
|
||||
|
||||
LogAndAssertJSON(t, func(log *Logger) {
|
||||
log.Hooks.Add(hook)
|
||||
assert.Equal(t, hook.Fired, false)
|
||||
|
||||
log.Print("test")
|
||||
}, func(fields Fields) {
|
||||
assert.Equal(t, hook.Fired, true)
|
||||
})
|
||||
}
|
||||
|
||||
type ModifyHook struct {
|
||||
}
|
||||
|
||||
func (hook *ModifyHook) Fire(entry *Entry) error {
|
||||
entry.Data["wow"] = "whale"
|
||||
return nil
|
||||
}
|
||||
|
||||
func (hook *ModifyHook) Levels() []Level {
|
||||
return []Level{
|
||||
DebugLevel,
|
||||
InfoLevel,
|
||||
WarnLevel,
|
||||
ErrorLevel,
|
||||
FatalLevel,
|
||||
PanicLevel,
|
||||
}
|
||||
}
|
||||
|
||||
func TestHookCanModifyEntry(t *testing.T) {
|
||||
hook := new(ModifyHook)
|
||||
|
||||
LogAndAssertJSON(t, func(log *Logger) {
|
||||
log.Hooks.Add(hook)
|
||||
log.WithField("wow", "elephant").Print("test")
|
||||
}, func(fields Fields) {
|
||||
assert.Equal(t, fields["wow"], "whale")
|
||||
})
|
||||
}
|
||||
|
||||
func TestCanFireMultipleHooks(t *testing.T) {
|
||||
hook1 := new(ModifyHook)
|
||||
hook2 := new(TestHook)
|
||||
|
||||
LogAndAssertJSON(t, func(log *Logger) {
|
||||
log.Hooks.Add(hook1)
|
||||
log.Hooks.Add(hook2)
|
||||
|
||||
log.WithField("wow", "elephant").Print("test")
|
||||
}, func(fields Fields) {
|
||||
assert.Equal(t, fields["wow"], "whale")
|
||||
assert.Equal(t, hook2.Fired, true)
|
||||
})
|
||||
}
|
||||
|
||||
type ErrorHook struct {
|
||||
Fired bool
|
||||
}
|
||||
|
||||
func (hook *ErrorHook) Fire(entry *Entry) error {
|
||||
hook.Fired = true
|
||||
return nil
|
||||
}
|
||||
|
||||
func (hook *ErrorHook) Levels() []Level {
|
||||
return []Level{
|
||||
ErrorLevel,
|
||||
}
|
||||
}
|
||||
|
||||
func TestErrorHookShouldntFireOnInfo(t *testing.T) {
|
||||
hook := new(ErrorHook)
|
||||
|
||||
LogAndAssertJSON(t, func(log *Logger) {
|
||||
log.Hooks.Add(hook)
|
||||
log.Info("test")
|
||||
}, func(fields Fields) {
|
||||
assert.Equal(t, hook.Fired, false)
|
||||
})
|
||||
}
|
||||
|
||||
func TestErrorHookShouldFireOnError(t *testing.T) {
|
||||
hook := new(ErrorHook)
|
||||
|
||||
LogAndAssertJSON(t, func(log *Logger) {
|
||||
log.Hooks.Add(hook)
|
||||
log.Error("test")
|
||||
}, func(fields Fields) {
|
||||
assert.Equal(t, hook.Fired, true)
|
||||
})
|
||||
}
|
|
@ -1,34 +0,0 @@
|
|||
package logrus
|
||||
|
||||
// A hook to be fired when logging on the logging levels returned from
|
||||
// `Levels()` on your implementation of the interface. Note that this is not
|
||||
// fired in a goroutine or a channel with workers, you should handle such
|
||||
// functionality yourself if your call is non-blocking and you don't wish for
|
||||
// the logging calls for levels returned from `Levels()` to block.
|
||||
type Hook interface {
|
||||
Levels() []Level
|
||||
Fire(*Entry) error
|
||||
}
|
||||
|
||||
// Internal type for storing the hooks on a logger instance.
|
||||
type LevelHooks map[Level][]Hook
|
||||
|
||||
// Add a hook to an instance of logger. This is called with
|
||||
// `log.Hooks.Add(new(MyHook))` where `MyHook` implements the `Hook` interface.
|
||||
func (hooks LevelHooks) Add(hook Hook) {
|
||||
for _, level := range hook.Levels() {
|
||||
hooks[level] = append(hooks[level], hook)
|
||||
}
|
||||
}
|
||||
|
||||
// Fire all the hooks for the passed level. Used by `entry.log` to fire
|
||||
// appropriate hooks for a log entry.
|
||||
func (hooks LevelHooks) Fire(level Level, entry *Entry) error {
|
||||
for _, hook := range hooks[level] {
|
||||
if err := hook.Fire(entry); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
|
@ -1,41 +0,0 @@
|
|||
package logrus
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
)
|
||||
|
||||
type JSONFormatter struct {
|
||||
// TimestampFormat sets the format used for marshaling timestamps.
|
||||
TimestampFormat string
|
||||
}
|
||||
|
||||
func (f *JSONFormatter) Format(entry *Entry) ([]byte, error) {
|
||||
data := make(Fields, len(entry.Data)+3)
|
||||
for k, v := range entry.Data {
|
||||
switch v := v.(type) {
|
||||
case error:
|
||||
// Otherwise errors are ignored by `encoding/json`
|
||||
// https://github.com/Sirupsen/logrus/issues/137
|
||||
data[k] = v.Error()
|
||||
default:
|
||||
data[k] = v
|
||||
}
|
||||
}
|
||||
prefixFieldClashes(data)
|
||||
|
||||
timestampFormat := f.TimestampFormat
|
||||
if timestampFormat == "" {
|
||||
timestampFormat = DefaultTimestampFormat
|
||||
}
|
||||
|
||||
data["time"] = entry.Time.Format(timestampFormat)
|
||||
data["msg"] = entry.Message
|
||||
data["level"] = entry.Level.String()
|
||||
|
||||
serialized, err := json.Marshal(data)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Failed to marshal fields to JSON, %v", err)
|
||||
}
|
||||
return append(serialized, '\n'), nil
|
||||
}
|
|
@ -1,120 +0,0 @@
|
|||
package logrus
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"errors"
|
||||
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestErrorNotLost(t *testing.T) {
|
||||
formatter := &JSONFormatter{}
|
||||
|
||||
b, err := formatter.Format(WithField("error", errors.New("wild walrus")))
|
||||
if err != nil {
|
||||
t.Fatal("Unable to format entry: ", err)
|
||||
}
|
||||
|
||||
entry := make(map[string]interface{})
|
||||
err = json.Unmarshal(b, &entry)
|
||||
if err != nil {
|
||||
t.Fatal("Unable to unmarshal formatted entry: ", err)
|
||||
}
|
||||
|
||||
if entry["error"] != "wild walrus" {
|
||||
t.Fatal("Error field not set")
|
||||
}
|
||||
}
|
||||
|
||||
func TestErrorNotLostOnFieldNotNamedError(t *testing.T) {
|
||||
formatter := &JSONFormatter{}
|
||||
|
||||
b, err := formatter.Format(WithField("omg", errors.New("wild walrus")))
|
||||
if err != nil {
|
||||
t.Fatal("Unable to format entry: ", err)
|
||||
}
|
||||
|
||||
entry := make(map[string]interface{})
|
||||
err = json.Unmarshal(b, &entry)
|
||||
if err != nil {
|
||||
t.Fatal("Unable to unmarshal formatted entry: ", err)
|
||||
}
|
||||
|
||||
if entry["omg"] != "wild walrus" {
|
||||
t.Fatal("Error field not set")
|
||||
}
|
||||
}
|
||||
|
||||
func TestFieldClashWithTime(t *testing.T) {
|
||||
formatter := &JSONFormatter{}
|
||||
|
||||
b, err := formatter.Format(WithField("time", "right now!"))
|
||||
if err != nil {
|
||||
t.Fatal("Unable to format entry: ", err)
|
||||
}
|
||||
|
||||
entry := make(map[string]interface{})
|
||||
err = json.Unmarshal(b, &entry)
|
||||
if err != nil {
|
||||
t.Fatal("Unable to unmarshal formatted entry: ", err)
|
||||
}
|
||||
|
||||
if entry["fields.time"] != "right now!" {
|
||||
t.Fatal("fields.time not set to original time field")
|
||||
}
|
||||
|
||||
if entry["time"] != "0001-01-01T00:00:00Z" {
|
||||
t.Fatal("time field not set to current time, was: ", entry["time"])
|
||||
}
|
||||
}
|
||||
|
||||
func TestFieldClashWithMsg(t *testing.T) {
|
||||
formatter := &JSONFormatter{}
|
||||
|
||||
b, err := formatter.Format(WithField("msg", "something"))
|
||||
if err != nil {
|
||||
t.Fatal("Unable to format entry: ", err)
|
||||
}
|
||||
|
||||
entry := make(map[string]interface{})
|
||||
err = json.Unmarshal(b, &entry)
|
||||
if err != nil {
|
||||
t.Fatal("Unable to unmarshal formatted entry: ", err)
|
||||
}
|
||||
|
||||
if entry["fields.msg"] != "something" {
|
||||
t.Fatal("fields.msg not set to original msg field")
|
||||
}
|
||||
}
|
||||
|
||||
func TestFieldClashWithLevel(t *testing.T) {
|
||||
formatter := &JSONFormatter{}
|
||||
|
||||
b, err := formatter.Format(WithField("level", "something"))
|
||||
if err != nil {
|
||||
t.Fatal("Unable to format entry: ", err)
|
||||
}
|
||||
|
||||
entry := make(map[string]interface{})
|
||||
err = json.Unmarshal(b, &entry)
|
||||
if err != nil {
|
||||
t.Fatal("Unable to unmarshal formatted entry: ", err)
|
||||
}
|
||||
|
||||
if entry["fields.level"] != "something" {
|
||||
t.Fatal("fields.level not set to original level field")
|
||||
}
|
||||
}
|
||||
|
||||
func TestJSONEntryEndsWithNewline(t *testing.T) {
|
||||
formatter := &JSONFormatter{}
|
||||
|
||||
b, err := formatter.Format(WithField("level", "something"))
|
||||
if err != nil {
|
||||
t.Fatal("Unable to format entry: ", err)
|
||||
}
|
||||
|
||||
if b[len(b)-1] != '\n' {
|
||||
t.Fatal("Expected JSON log entry to end with a newline")
|
||||
}
|
||||
}
|
|
@ -1,212 +0,0 @@
|
|||
package logrus
|
||||
|
||||
import (
|
||||
"io"
|
||||
"os"
|
||||
"sync"
|
||||
)
|
||||
|
||||
type Logger struct {
|
||||
// The logs are `io.Copy`'d to this in a mutex. It's common to set this to a
|
||||
// file, or leave it default which is `os.Stderr`. You can also set this to
|
||||
// something more adventorous, such as logging to Kafka.
|
||||
Out io.Writer
|
||||
// Hooks for the logger instance. These allow firing events based on logging
|
||||
// levels and log entries. For example, to send errors to an error tracking
|
||||
// service, log to StatsD or dump the core on fatal errors.
|
||||
Hooks LevelHooks
|
||||
// All log entries pass through the formatter before logged to Out. The
|
||||
// included formatters are `TextFormatter` and `JSONFormatter` for which
|
||||
// TextFormatter is the default. In development (when a TTY is attached) it
|
||||
// logs with colors, but to a file it wouldn't. You can easily implement your
|
||||
// own that implements the `Formatter` interface, see the `README` or included
|
||||
// formatters for examples.
|
||||
Formatter Formatter
|
||||
// The logging level the logger should log at. This is typically (and defaults
|
||||
// to) `logrus.Info`, which allows Info(), Warn(), Error() and Fatal() to be
|
||||
// logged. `logrus.Debug` is useful in
|
||||
Level Level
|
||||
// Used to sync writing to the log.
|
||||
mu sync.Mutex
|
||||
}
|
||||
|
||||
// Creates a new logger. Configuration should be set by changing `Formatter`,
|
||||
// `Out` and `Hooks` directly on the default logger instance. You can also just
|
||||
// instantiate your own:
|
||||
//
|
||||
// var log = &Logger{
|
||||
// Out: os.Stderr,
|
||||
// Formatter: new(JSONFormatter),
|
||||
// Hooks: make(LevelHooks),
|
||||
// Level: logrus.DebugLevel,
|
||||
// }
|
||||
//
|
||||
// It's recommended to make this a global instance called `log`.
|
||||
func New() *Logger {
|
||||
return &Logger{
|
||||
Out: os.Stderr,
|
||||
Formatter: new(TextFormatter),
|
||||
Hooks: make(LevelHooks),
|
||||
Level: InfoLevel,
|
||||
}
|
||||
}
|
||||
|
||||
// Adds a field to the log entry, note that you it doesn't log until you call
|
||||
// Debug, Print, Info, Warn, Fatal or Panic. It only creates a log entry.
|
||||
// If you want multiple fields, use `WithFields`.
|
||||
func (logger *Logger) WithField(key string, value interface{}) *Entry {
|
||||
return NewEntry(logger).WithField(key, value)
|
||||
}
|
||||
|
||||
// Adds a struct of fields to the log entry. All it does is call `WithField` for
|
||||
// each `Field`.
|
||||
func (logger *Logger) WithFields(fields Fields) *Entry {
|
||||
return NewEntry(logger).WithFields(fields)
|
||||
}
|
||||
|
||||
// Add an error as single field to the log entry. All it does is call
|
||||
// `WithError` for the given `error`.
|
||||
func (logger *Logger) WithError(err error) *Entry {
|
||||
return NewEntry(logger).WithError(err)
|
||||
}
|
||||
|
||||
func (logger *Logger) Debugf(format string, args ...interface{}) {
|
||||
if logger.Level >= DebugLevel {
|
||||
NewEntry(logger).Debugf(format, args...)
|
||||
}
|
||||
}
|
||||
|
||||
func (logger *Logger) Infof(format string, args ...interface{}) {
|
||||
if logger.Level >= InfoLevel {
|
||||
NewEntry(logger).Infof(format, args...)
|
||||
}
|
||||
}
|
||||
|
||||
func (logger *Logger) Printf(format string, args ...interface{}) {
|
||||
NewEntry(logger).Printf(format, args...)
|
||||
}
|
||||
|
||||
func (logger *Logger) Warnf(format string, args ...interface{}) {
|
||||
if logger.Level >= WarnLevel {
|
||||
NewEntry(logger).Warnf(format, args...)
|
||||
}
|
||||
}
|
||||
|
||||
func (logger *Logger) Warningf(format string, args ...interface{}) {
|
||||
if logger.Level >= WarnLevel {
|
||||
NewEntry(logger).Warnf(format, args...)
|
||||
}
|
||||
}
|
||||
|
||||
func (logger *Logger) Errorf(format string, args ...interface{}) {
|
||||
if logger.Level >= ErrorLevel {
|
||||
NewEntry(logger).Errorf(format, args...)
|
||||
}
|
||||
}
|
||||
|
||||
func (logger *Logger) Fatalf(format string, args ...interface{}) {
|
||||
if logger.Level >= FatalLevel {
|
||||
NewEntry(logger).Fatalf(format, args...)
|
||||
}
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
func (logger *Logger) Panicf(format string, args ...interface{}) {
|
||||
if logger.Level >= PanicLevel {
|
||||
NewEntry(logger).Panicf(format, args...)
|
||||
}
|
||||
}
|
||||
|
||||
func (logger *Logger) Debug(args ...interface{}) {
|
||||
if logger.Level >= DebugLevel {
|
||||
NewEntry(logger).Debug(args...)
|
||||
}
|
||||
}
|
||||
|
||||
func (logger *Logger) Info(args ...interface{}) {
|
||||
if logger.Level >= InfoLevel {
|
||||
NewEntry(logger).Info(args...)
|
||||
}
|
||||
}
|
||||
|
||||
func (logger *Logger) Print(args ...interface{}) {
|
||||
NewEntry(logger).Info(args...)
|
||||
}
|
||||
|
||||
func (logger *Logger) Warn(args ...interface{}) {
|
||||
if logger.Level >= WarnLevel {
|
||||
NewEntry(logger).Warn(args...)
|
||||
}
|
||||
}
|
||||
|
||||
func (logger *Logger) Warning(args ...interface{}) {
|
||||
if logger.Level >= WarnLevel {
|
||||
NewEntry(logger).Warn(args...)
|
||||
}
|
||||
}
|
||||
|
||||
func (logger *Logger) Error(args ...interface{}) {
|
||||
if logger.Level >= ErrorLevel {
|
||||
NewEntry(logger).Error(args...)
|
||||
}
|
||||
}
|
||||
|
||||
func (logger *Logger) Fatal(args ...interface{}) {
|
||||
if logger.Level >= FatalLevel {
|
||||
NewEntry(logger).Fatal(args...)
|
||||
}
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
func (logger *Logger) Panic(args ...interface{}) {
|
||||
if logger.Level >= PanicLevel {
|
||||
NewEntry(logger).Panic(args...)
|
||||
}
|
||||
}
|
||||
|
||||
func (logger *Logger) Debugln(args ...interface{}) {
|
||||
if logger.Level >= DebugLevel {
|
||||
NewEntry(logger).Debugln(args...)
|
||||
}
|
||||
}
|
||||
|
||||
func (logger *Logger) Infoln(args ...interface{}) {
|
||||
if logger.Level >= InfoLevel {
|
||||
NewEntry(logger).Infoln(args...)
|
||||
}
|
||||
}
|
||||
|
||||
func (logger *Logger) Println(args ...interface{}) {
|
||||
NewEntry(logger).Println(args...)
|
||||
}
|
||||
|
||||
func (logger *Logger) Warnln(args ...interface{}) {
|
||||
if logger.Level >= WarnLevel {
|
||||
NewEntry(logger).Warnln(args...)
|
||||
}
|
||||
}
|
||||
|
||||
func (logger *Logger) Warningln(args ...interface{}) {
|
||||
if logger.Level >= WarnLevel {
|
||||
NewEntry(logger).Warnln(args...)
|
||||
}
|
||||
}
|
||||
|
||||
func (logger *Logger) Errorln(args ...interface{}) {
|
||||
if logger.Level >= ErrorLevel {
|
||||
NewEntry(logger).Errorln(args...)
|
||||
}
|
||||
}
|
||||
|
||||
func (logger *Logger) Fatalln(args ...interface{}) {
|
||||
if logger.Level >= FatalLevel {
|
||||
NewEntry(logger).Fatalln(args...)
|
||||
}
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
func (logger *Logger) Panicln(args ...interface{}) {
|
||||
if logger.Level >= PanicLevel {
|
||||
NewEntry(logger).Panicln(args...)
|
||||
}
|
||||
}
|
|
@ -1,98 +0,0 @@
|
|||
package logrus
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"log"
|
||||
)
|
||||
|
||||
// Fields type, used to pass to `WithFields`.
|
||||
type Fields map[string]interface{}
|
||||
|
||||
// Level type
|
||||
type Level uint8
|
||||
|
||||
// Convert the Level to a string. E.g. PanicLevel becomes "panic".
|
||||
func (level Level) String() string {
|
||||
switch level {
|
||||
case DebugLevel:
|
||||
return "debug"
|
||||
case InfoLevel:
|
||||
return "info"
|
||||
case WarnLevel:
|
||||
return "warning"
|
||||
case ErrorLevel:
|
||||
return "error"
|
||||
case FatalLevel:
|
||||
return "fatal"
|
||||
case PanicLevel:
|
||||
return "panic"
|
||||
}
|
||||
|
||||
return "unknown"
|
||||
}
|
||||
|
||||
// ParseLevel takes a string level and returns the Logrus log level constant.
|
||||
func ParseLevel(lvl string) (Level, error) {
|
||||
switch lvl {
|
||||
case "panic":
|
||||
return PanicLevel, nil
|
||||
case "fatal":
|
||||
return FatalLevel, nil
|
||||
case "error":
|
||||
return ErrorLevel, nil
|
||||
case "warn", "warning":
|
||||
return WarnLevel, nil
|
||||
case "info":
|
||||
return InfoLevel, nil
|
||||
case "debug":
|
||||
return DebugLevel, nil
|
||||
}
|
||||
|
||||
var l Level
|
||||
return l, fmt.Errorf("not a valid logrus Level: %q", lvl)
|
||||
}
|
||||
|
||||
// These are the different logging levels. You can set the logging level to log
|
||||
// on your instance of logger, obtained with `logrus.New()`.
|
||||
const (
|
||||
// PanicLevel level, highest level of severity. Logs and then calls panic with the
|
||||
// message passed to Debug, Info, ...
|
||||
PanicLevel Level = iota
|
||||
// FatalLevel level. Logs and then calls `os.Exit(1)`. It will exit even if the
|
||||
// logging level is set to Panic.
|
||||
FatalLevel
|
||||
// ErrorLevel level. Logs. Used for errors that should definitely be noted.
|
||||
// Commonly used for hooks to send errors to an error tracking service.
|
||||
ErrorLevel
|
||||
// WarnLevel level. Non-critical entries that deserve eyes.
|
||||
WarnLevel
|
||||
// InfoLevel level. General operational entries about what's going on inside the
|
||||
// application.
|
||||
InfoLevel
|
||||
// DebugLevel level. Usually only enabled when debugging. Very verbose logging.
|
||||
DebugLevel
|
||||
)
|
||||
|
||||
// Won't compile if StdLogger can't be realized by a log.Logger
|
||||
var (
|
||||
_ StdLogger = &log.Logger{}
|
||||
_ StdLogger = &Entry{}
|
||||
_ StdLogger = &Logger{}
|
||||
)
|
||||
|
||||
// StdLogger is what your logrus-enabled library should take, that way
|
||||
// it'll accept a stdlib logger and a logrus logger. There's no standard
|
||||
// interface, this is the closest we get, unfortunately.
|
||||
type StdLogger interface {
|
||||
Print(...interface{})
|
||||
Printf(string, ...interface{})
|
||||
Println(...interface{})
|
||||
|
||||
Fatal(...interface{})
|
||||
Fatalf(string, ...interface{})
|
||||
Fatalln(...interface{})
|
||||
|
||||
Panic(...interface{})
|
||||
Panicf(string, ...interface{})
|
||||
Panicln(...interface{})
|
||||
}
|
|
@ -1,301 +0,0 @@
|
|||
package logrus
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"testing"
|
||||
|
||||
"github.com/fsouza/go-dockerclient/external/github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func LogAndAssertJSON(t *testing.T, log func(*Logger), assertions func(fields Fields)) {
|
||||
var buffer bytes.Buffer
|
||||
var fields Fields
|
||||
|
||||
logger := New()
|
||||
logger.Out = &buffer
|
||||
logger.Formatter = new(JSONFormatter)
|
||||
|
||||
log(logger)
|
||||
|
||||
err := json.Unmarshal(buffer.Bytes(), &fields)
|
||||
assert.Nil(t, err)
|
||||
|
||||
assertions(fields)
|
||||
}
|
||||
|
||||
func LogAndAssertText(t *testing.T, log func(*Logger), assertions func(fields map[string]string)) {
|
||||
var buffer bytes.Buffer
|
||||
|
||||
logger := New()
|
||||
logger.Out = &buffer
|
||||
logger.Formatter = &TextFormatter{
|
||||
DisableColors: true,
|
||||
}
|
||||
|
||||
log(logger)
|
||||
|
||||
fields := make(map[string]string)
|
||||
for _, kv := range strings.Split(buffer.String(), " ") {
|
||||
if !strings.Contains(kv, "=") {
|
||||
continue
|
||||
}
|
||||
kvArr := strings.Split(kv, "=")
|
||||
key := strings.TrimSpace(kvArr[0])
|
||||
val := kvArr[1]
|
||||
if kvArr[1][0] == '"' {
|
||||
var err error
|
||||
val, err = strconv.Unquote(val)
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
fields[key] = val
|
||||
}
|
||||
assertions(fields)
|
||||
}
|
||||
|
||||
func TestPrint(t *testing.T) {
|
||||
LogAndAssertJSON(t, func(log *Logger) {
|
||||
log.Print("test")
|
||||
}, func(fields Fields) {
|
||||
assert.Equal(t, fields["msg"], "test")
|
||||
assert.Equal(t, fields["level"], "info")
|
||||
})
|
||||
}
|
||||
|
||||
func TestInfo(t *testing.T) {
|
||||
LogAndAssertJSON(t, func(log *Logger) {
|
||||
log.Info("test")
|
||||
}, func(fields Fields) {
|
||||
assert.Equal(t, fields["msg"], "test")
|
||||
assert.Equal(t, fields["level"], "info")
|
||||
})
|
||||
}
|
||||
|
||||
func TestWarn(t *testing.T) {
|
||||
LogAndAssertJSON(t, func(log *Logger) {
|
||||
log.Warn("test")
|
||||
}, func(fields Fields) {
|
||||
assert.Equal(t, fields["msg"], "test")
|
||||
assert.Equal(t, fields["level"], "warning")
|
||||
})
|
||||
}
|
||||
|
||||
func TestInfolnShouldAddSpacesBetweenStrings(t *testing.T) {
|
||||
LogAndAssertJSON(t, func(log *Logger) {
|
||||
log.Infoln("test", "test")
|
||||
}, func(fields Fields) {
|
||||
assert.Equal(t, fields["msg"], "test test")
|
||||
})
|
||||
}
|
||||
|
||||
func TestInfolnShouldAddSpacesBetweenStringAndNonstring(t *testing.T) {
|
||||
LogAndAssertJSON(t, func(log *Logger) {
|
||||
log.Infoln("test", 10)
|
||||
}, func(fields Fields) {
|
||||
assert.Equal(t, fields["msg"], "test 10")
|
||||
})
|
||||
}
|
||||
|
||||
func TestInfolnShouldAddSpacesBetweenTwoNonStrings(t *testing.T) {
|
||||
LogAndAssertJSON(t, func(log *Logger) {
|
||||
log.Infoln(10, 10)
|
||||
}, func(fields Fields) {
|
||||
assert.Equal(t, fields["msg"], "10 10")
|
||||
})
|
||||
}
|
||||
|
||||
func TestInfoShouldAddSpacesBetweenTwoNonStrings(t *testing.T) {
|
||||
LogAndAssertJSON(t, func(log *Logger) {
|
||||
log.Infoln(10, 10)
|
||||
}, func(fields Fields) {
|
||||
assert.Equal(t, fields["msg"], "10 10")
|
||||
})
|
||||
}
|
||||
|
||||
func TestInfoShouldNotAddSpacesBetweenStringAndNonstring(t *testing.T) {
|
||||
LogAndAssertJSON(t, func(log *Logger) {
|
||||
log.Info("test", 10)
|
||||
}, func(fields Fields) {
|
||||
assert.Equal(t, fields["msg"], "test10")
|
||||
})
|
||||
}
|
||||
|
||||
func TestInfoShouldNotAddSpacesBetweenStrings(t *testing.T) {
|
||||
LogAndAssertJSON(t, func(log *Logger) {
|
||||
log.Info("test", "test")
|
||||
}, func(fields Fields) {
|
||||
assert.Equal(t, fields["msg"], "testtest")
|
||||
})
|
||||
}
|
||||
|
||||
func TestWithFieldsShouldAllowAssignments(t *testing.T) {
|
||||
var buffer bytes.Buffer
|
||||
var fields Fields
|
||||
|
||||
logger := New()
|
||||
logger.Out = &buffer
|
||||
logger.Formatter = new(JSONFormatter)
|
||||
|
||||
localLog := logger.WithFields(Fields{
|
||||
"key1": "value1",
|
||||
})
|
||||
|
||||
localLog.WithField("key2", "value2").Info("test")
|
||||
err := json.Unmarshal(buffer.Bytes(), &fields)
|
||||
assert.Nil(t, err)
|
||||
|
||||
assert.Equal(t, "value2", fields["key2"])
|
||||
assert.Equal(t, "value1", fields["key1"])
|
||||
|
||||
buffer = bytes.Buffer{}
|
||||
fields = Fields{}
|
||||
localLog.Info("test")
|
||||
err = json.Unmarshal(buffer.Bytes(), &fields)
|
||||
assert.Nil(t, err)
|
||||
|
||||
_, ok := fields["key2"]
|
||||
assert.Equal(t, false, ok)
|
||||
assert.Equal(t, "value1", fields["key1"])
|
||||
}
|
||||
|
||||
func TestUserSuppliedFieldDoesNotOverwriteDefaults(t *testing.T) {
|
||||
LogAndAssertJSON(t, func(log *Logger) {
|
||||
log.WithField("msg", "hello").Info("test")
|
||||
}, func(fields Fields) {
|
||||
assert.Equal(t, fields["msg"], "test")
|
||||
})
|
||||
}
|
||||
|
||||
func TestUserSuppliedMsgFieldHasPrefix(t *testing.T) {
|
||||
LogAndAssertJSON(t, func(log *Logger) {
|
||||
log.WithField("msg", "hello").Info("test")
|
||||
}, func(fields Fields) {
|
||||
assert.Equal(t, fields["msg"], "test")
|
||||
assert.Equal(t, fields["fields.msg"], "hello")
|
||||
})
|
||||
}
|
||||
|
||||
func TestUserSuppliedTimeFieldHasPrefix(t *testing.T) {
|
||||
LogAndAssertJSON(t, func(log *Logger) {
|
||||
log.WithField("time", "hello").Info("test")
|
||||
}, func(fields Fields) {
|
||||
assert.Equal(t, fields["fields.time"], "hello")
|
||||
})
|
||||
}
|
||||
|
||||
func TestUserSuppliedLevelFieldHasPrefix(t *testing.T) {
|
||||
LogAndAssertJSON(t, func(log *Logger) {
|
||||
log.WithField("level", 1).Info("test")
|
||||
}, func(fields Fields) {
|
||||
assert.Equal(t, fields["level"], "info")
|
||||
assert.Equal(t, fields["fields.level"], 1.0) // JSON has floats only
|
||||
})
|
||||
}
|
||||
|
||||
func TestDefaultFieldsAreNotPrefixed(t *testing.T) {
|
||||
LogAndAssertText(t, func(log *Logger) {
|
||||
ll := log.WithField("herp", "derp")
|
||||
ll.Info("hello")
|
||||
ll.Info("bye")
|
||||
}, func(fields map[string]string) {
|
||||
for _, fieldName := range []string{"fields.level", "fields.time", "fields.msg"} {
|
||||
if _, ok := fields[fieldName]; ok {
|
||||
t.Fatalf("should not have prefixed %q: %v", fieldName, fields)
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func TestDoubleLoggingDoesntPrefixPreviousFields(t *testing.T) {
|
||||
|
||||
var buffer bytes.Buffer
|
||||
var fields Fields
|
||||
|
||||
logger := New()
|
||||
logger.Out = &buffer
|
||||
logger.Formatter = new(JSONFormatter)
|
||||
|
||||
llog := logger.WithField("context", "eating raw fish")
|
||||
|
||||
llog.Info("looks delicious")
|
||||
|
||||
err := json.Unmarshal(buffer.Bytes(), &fields)
|
||||
assert.NoError(t, err, "should have decoded first message")
|
||||
assert.Equal(t, len(fields), 4, "should only have msg/time/level/context fields")
|
||||
assert.Equal(t, fields["msg"], "looks delicious")
|
||||
assert.Equal(t, fields["context"], "eating raw fish")
|
||||
|
||||
buffer.Reset()
|
||||
|
||||
llog.Warn("omg it is!")
|
||||
|
||||
err = json.Unmarshal(buffer.Bytes(), &fields)
|
||||
assert.NoError(t, err, "should have decoded second message")
|
||||
assert.Equal(t, len(fields), 4, "should only have msg/time/level/context fields")
|
||||
assert.Equal(t, fields["msg"], "omg it is!")
|
||||
assert.Equal(t, fields["context"], "eating raw fish")
|
||||
assert.Nil(t, fields["fields.msg"], "should not have prefixed previous `msg` entry")
|
||||
|
||||
}
|
||||
|
||||
func TestConvertLevelToString(t *testing.T) {
|
||||
assert.Equal(t, "debug", DebugLevel.String())
|
||||
assert.Equal(t, "info", InfoLevel.String())
|
||||
assert.Equal(t, "warning", WarnLevel.String())
|
||||
assert.Equal(t, "error", ErrorLevel.String())
|
||||
assert.Equal(t, "fatal", FatalLevel.String())
|
||||
assert.Equal(t, "panic", PanicLevel.String())
|
||||
}
|
||||
|
||||
func TestParseLevel(t *testing.T) {
|
||||
l, err := ParseLevel("panic")
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(t, PanicLevel, l)
|
||||
|
||||
l, err = ParseLevel("fatal")
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(t, FatalLevel, l)
|
||||
|
||||
l, err = ParseLevel("error")
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(t, ErrorLevel, l)
|
||||
|
||||
l, err = ParseLevel("warn")
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(t, WarnLevel, l)
|
||||
|
||||
l, err = ParseLevel("warning")
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(t, WarnLevel, l)
|
||||
|
||||
l, err = ParseLevel("info")
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(t, InfoLevel, l)
|
||||
|
||||
l, err = ParseLevel("debug")
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(t, DebugLevel, l)
|
||||
|
||||
l, err = ParseLevel("invalid")
|
||||
assert.Equal(t, "not a valid logrus Level: \"invalid\"", err.Error())
|
||||
}
|
||||
|
||||
func TestGetSetLevelRace(t *testing.T) {
|
||||
wg := sync.WaitGroup{}
|
||||
for i := 0; i < 100; i++ {
|
||||
wg.Add(1)
|
||||
go func(i int) {
|
||||
defer wg.Done()
|
||||
if i%2 == 0 {
|
||||
SetLevel(InfoLevel)
|
||||
} else {
|
||||
GetLevel()
|
||||
}
|
||||
}(i)
|
||||
|
||||
}
|
||||
wg.Wait()
|
||||
}
|
|
@ -1,9 +0,0 @@
|
|||
// +build darwin freebsd openbsd netbsd dragonfly
|
||||
|
||||
package logrus
|
||||
|
||||
import "syscall"
|
||||
|
||||
const ioctlReadTermios = syscall.TIOCGETA
|
||||
|
||||
type Termios syscall.Termios
|
|
@ -1,12 +0,0 @@
|
|||
// Based on ssh/terminal:
|
||||
// Copyright 2013 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package logrus
|
||||
|
||||
import "syscall"
|
||||
|
||||
const ioctlReadTermios = syscall.TCGETS
|
||||
|
||||
type Termios syscall.Termios
|
|
@ -1,21 +0,0 @@
|
|||
// Based on ssh/terminal:
|
||||
// Copyright 2011 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build linux darwin freebsd openbsd netbsd dragonfly
|
||||
|
||||
package logrus
|
||||
|
||||
import (
|
||||
"syscall"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
// IsTerminal returns true if stderr's file descriptor is a terminal.
|
||||
func IsTerminal() bool {
|
||||
fd := syscall.Stderr
|
||||
var termios Termios
|
||||
_, _, err := syscall.Syscall6(syscall.SYS_IOCTL, uintptr(fd), ioctlReadTermios, uintptr(unsafe.Pointer(&termios)), 0, 0, 0)
|
||||
return err == 0
|
||||
}
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Reference in a new issue