Update go dependencies
This commit is contained in:
parent
f9624cbe46
commit
307bf76454
280 changed files with 54728 additions and 2991 deletions
19
go.mod
19
go.mod
|
@ -3,34 +3,44 @@ module k8s.io/ingress-nginx
|
|||
go 1.13
|
||||
|
||||
require (
|
||||
github.com/ajg/form v1.5.1 // indirect
|
||||
github.com/armon/go-proxyproto v0.0.0-20200108142055-f0b8253b1507
|
||||
github.com/eapache/channels v1.1.0
|
||||
github.com/eapache/queue v1.1.0 // indirect
|
||||
github.com/fasthttp-contrib/websocket v0.0.0-20160511215533-1f3b11f56072 // indirect
|
||||
github.com/fatih/structs v1.1.0 // indirect
|
||||
github.com/fullsailor/pkcs7 v0.0.0-20190404230743-d7302db945fa // indirect
|
||||
github.com/go-logr/zapr v0.1.1 // indirect
|
||||
github.com/imdario/mergo v0.3.7
|
||||
github.com/imkira/go-interpol v1.1.0 // indirect
|
||||
github.com/json-iterator/go v1.1.9
|
||||
github.com/k0kubun/colorstring v0.0.0-20150214042306-9440f1994b88 // indirect
|
||||
github.com/kylelemons/godebug v0.0.0-20170820004349-d65d576e9348
|
||||
github.com/mitchellh/go-ps v1.0.0
|
||||
github.com/mitchellh/hashstructure v1.0.0
|
||||
github.com/mitchellh/mapstructure v1.1.2
|
||||
github.com/moul/http2curl v1.0.0 // indirect
|
||||
github.com/moul/pb v0.0.0-20180404114147-54bdd96e6a52
|
||||
github.com/ncabatoff/process-exporter v0.6.0
|
||||
github.com/onsi/ginkgo v1.11.0
|
||||
github.com/onsi/gomega v1.8.1
|
||||
github.com/onsi/ginkgo v1.12.0
|
||||
github.com/opencontainers/runc v1.0.0-rc9
|
||||
github.com/parnurzeal/gorequest v0.2.16
|
||||
github.com/pkg/errors v0.9.1
|
||||
github.com/prometheus/client_golang v1.4.0
|
||||
github.com/prometheus/client_model v0.2.0
|
||||
github.com/prometheus/common v0.9.1
|
||||
github.com/spf13/cobra v0.0.5
|
||||
github.com/spf13/pflag v1.0.5
|
||||
github.com/stretchr/testify v1.4.0
|
||||
github.com/tallclair/mdtoc v0.0.0-20190627191617-4dc3d6f90813
|
||||
github.com/xeipuuv/gojsonschema v1.2.0 // indirect
|
||||
github.com/yalp/jsonpath v0.0.0-20180802001716-5cc68e5049a0 // indirect
|
||||
github.com/yudai/gojsondiff v1.0.0 // indirect
|
||||
github.com/yudai/golcs v0.0.0-20170316035057-ecda9a501e82 // indirect
|
||||
github.com/yudai/pp v2.0.1+incompatible // indirect
|
||||
github.com/zakjan/cert-chain-resolver v0.0.0-20180703112424-6076e1ded272
|
||||
golang.org/x/net v0.0.0-20191004110552-13f9640d40b9
|
||||
google.golang.org/grpc v1.23.1
|
||||
gopkg.in/fsnotify/fsnotify.v1 v1.4.7
|
||||
gopkg.in/gavv/httpexpect.v2 v2.0.0
|
||||
gopkg.in/go-playground/assert.v1 v1.2.1 // indirect
|
||||
gopkg.in/go-playground/pool.v3 v3.1.1
|
||||
|
||||
|
@ -44,7 +54,6 @@ require (
|
|||
k8s.io/component-base v0.17.2
|
||||
k8s.io/klog v1.0.0
|
||||
k8s.io/kubernetes v1.17.2
|
||||
moul.io/http2curl v1.0.0 // indirect
|
||||
pault.ag/go/sniff v0.0.0-20200207005214-cf7e4d167732
|
||||
sigs.k8s.io/controller-runtime v0.4.0
|
||||
)
|
||||
|
|
42
go.sum
42
go.sum
|
@ -43,6 +43,8 @@ github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdko
|
|||
github.com/Rican7/retry v0.1.0/go.mod h1:FgOROf8P5bebcC1DS0PdOQiqGUridaZvikzUmkFW6gg=
|
||||
github.com/StackExchange/wmi v0.0.0-20180116203802-5d049714c4a6/go.mod h1:3eOhrUMpNV+6aFIbp5/iudMxNCF27Vw2OZgy4xEx0Fg=
|
||||
github.com/agnivade/levenshtein v1.0.1/go.mod h1:CURSv5d9Uaml+FovSIICkLbAUZ9S4RqaHDIsdSBg7lM=
|
||||
github.com/ajg/form v1.5.1 h1:t9c7v8JUKu/XxOGBU0yjNpaMloxGEJhUkqFRq0ibGeU=
|
||||
github.com/ajg/form v1.5.1/go.mod h1:uL1WgH+h2mgNtvBq0339dVnzXdBETtL2LeUXaIv25UY=
|
||||
github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
|
||||
github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
|
||||
github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
|
||||
|
@ -144,9 +146,12 @@ github.com/evanphx/json-patch v4.2.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLi
|
|||
github.com/evanphx/json-patch v4.5.0+incompatible h1:ouOWdg56aJriqS0huScTkVXPC5IcNrDCXZ6OoTAWu7M=
|
||||
github.com/evanphx/json-patch v4.5.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk=
|
||||
github.com/exponent-io/jsonpath v0.0.0-20151013193312-d6023ce2651d/go.mod h1:ZZMPRZwes7CROmyNKgQzC3XPs6L/G2EJLHddWejkmf4=
|
||||
github.com/fasthttp-contrib/websocket v0.0.0-20160511215533-1f3b11f56072 h1:DddqAaWDpywytcG8w/qoQ5sAN8X12d3Z3koB0C3Rxsc=
|
||||
github.com/fasthttp-contrib/websocket v0.0.0-20160511215533-1f3b11f56072/go.mod h1:duJ4Jxv5lDcvg4QuQr0oowTf7dz4/CR8NtyCooz9HL8=
|
||||
github.com/fatih/camelcase v1.0.0/go.mod h1:yN2Sb0lFhZJUdVvtELVWefmrXpuZESvPmqwoZc+/fpc=
|
||||
github.com/fatih/color v1.6.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4=
|
||||
github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4=
|
||||
github.com/fatih/structs v1.1.0/go.mod h1:9NiDSp5zOcgEDl+j00MP/WkGVPOlPRLejGD8Ga6PJ7M=
|
||||
github.com/flynn/go-shlex v0.0.0-20150515145356-3f9db97f8568/go.mod h1:xEzjJPgXI435gkrCt3MPfRiAkVrwSbHsst4LCFVfpJc=
|
||||
github.com/fsnotify/fsnotify v1.4.7 h1:IXs+QLmnXW2CcXuY+8Mzv/fWEsPGWxqefPtCP5CnV9I=
|
||||
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
|
||||
|
@ -169,8 +174,6 @@ github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V
|
|||
github.com/go-logr/logr v0.1.0 h1:M1Tv3VzNlEHg6uyACnRdtrploV2P7wZqH8BoQMtz0cg=
|
||||
github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7sIas=
|
||||
github.com/go-logr/zapr v0.1.0/go.mod h1:tabnROwaDl0UNxkVeFRbY8bwB37GwRv0P8lg6aAiEnk=
|
||||
github.com/go-logr/zapr v0.1.1 h1:qXBXPDdNncunGs7XeEpsJt8wCjYBygluzfdLO0G5baE=
|
||||
github.com/go-logr/zapr v0.1.1/go.mod h1:tabnROwaDl0UNxkVeFRbY8bwB37GwRv0P8lg6aAiEnk=
|
||||
github.com/go-ole/go-ole v1.2.1/go.mod h1:7FAglXiTm7HKlQRDeOQ6ZNUHidzCWXuZWq/1dTyBNF8=
|
||||
github.com/go-openapi/analysis v0.0.0-20180825180245-b006789cd277/go.mod h1:k70tL6pCuVxPJOHXQ+wIac1FUrvNkHolPie/cLEU6hI=
|
||||
github.com/go-openapi/analysis v0.17.0/go.mod h1:IowGgpVeD0vNm45So8nr+IcQ3pxVtpRoBWb8PVZO0ik=
|
||||
|
@ -345,6 +348,8 @@ github.com/imdario/mergo v0.3.6 h1:xTNEAn+kxVO7dTZGu0CegyqKZmoWFI0rF8UxjlB2d28=
|
|||
github.com/imdario/mergo v0.3.6/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA=
|
||||
github.com/imdario/mergo v0.3.7 h1:Y+UAYTZ7gDEuOfhxKWy+dvb5dRQ6rJjFSdX2HZY1/gI=
|
||||
github.com/imdario/mergo v0.3.7/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA=
|
||||
github.com/imkira/go-interpol v1.1.0 h1:KIiKr0VSG2CUW1hl1jpiyuzuJeKUUpC8iM1AIE7N1Vk=
|
||||
github.com/imkira/go-interpol v1.1.0/go.mod h1:z0h2/2T3XF8kyEPpRgJ3kmNv+C43p+I/CoI+jC3w2iA=
|
||||
github.com/inconshreveable/mousetrap v1.0.0 h1:Z8tu5sraLXCXIcARxBp/8cbvlwVa7Z1NHg9XEKhtSvM=
|
||||
github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8=
|
||||
github.com/jellevandenhooff/dkim v0.0.0-20150330215556-f50fe3d243e1/go.mod h1:E0B/fFc00Y+Rasa88328GlI/XbtyysCtTHZS8h7IrBU=
|
||||
|
@ -364,6 +369,8 @@ github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1
|
|||
github.com/jtolds/gls v4.20.0+incompatible h1:xdiiI2gbIgH/gLH7ADydsJ1uDOEzR8yvV7C0MuV77Wo=
|
||||
github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU=
|
||||
github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w=
|
||||
github.com/k0kubun/colorstring v0.0.0-20150214042306-9440f1994b88 h1:uC1QfSlInpQF+M0ao65imhwqKnz3Q2z/d8PWZRMQvDM=
|
||||
github.com/k0kubun/colorstring v0.0.0-20150214042306-9440f1994b88/go.mod h1:3w7q1U84EfirKl04SVQ/s7nPm1ZPhiXd34z40TNz36k=
|
||||
github.com/karrick/godirwalk v1.7.5/go.mod h1:2c9FRhkDxdIbgkOnCEvnSWs71Bhugbl46shStcFDJ34=
|
||||
github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q=
|
||||
github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00=
|
||||
|
@ -446,7 +453,7 @@ github.com/modern-go/reflect2 v1.0.1 h1:9f412s+6RmYXLWZSEzVVgPGK7C2PphHj5RJrvfx9
|
|||
github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
|
||||
github.com/mohae/deepcopy v0.0.0-20170603005431-491d3605edfb/go.mod h1:TaXosZuwdSHYgviHp1DAtfrULt5eUgsSMsZf+YrPgl8=
|
||||
github.com/morikuni/aec v0.0.0-20170113033406-39771216ff4c/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc=
|
||||
github.com/moul/pb v0.0.0-20180404114147-54bdd96e6a52 h1:8zDEa5yAIWYBHSDpPbSgGIBL/SvPSE9/FlB3aQ54d/A=
|
||||
github.com/moul/http2curl v1.0.0/go.mod h1:8UbvGypXm98wA/IqH45anm5Y2Z6ep6O31QGOAZ3H0fQ=
|
||||
github.com/moul/pb v0.0.0-20180404114147-54bdd96e6a52/go.mod h1:jE2HT8eoucYyUPBFJMreiVlC3KPHkDMtN8wn+ef7Y64=
|
||||
github.com/mozilla/tls-observatory v0.0.0-20180409132520-8791a200eb40/go.mod h1:SrKMQvPiws7F7iqYp8/TX+IhxCYhzr6N/1yb8cwHsGk=
|
||||
github.com/mrunalp/fileutils v0.0.0-20171103030105-7d4729fb3618/go.mod h1:x8F1gnqOkIEiO4rqoeEEEqQbo7HjGMTvyoq3gej4iT0=
|
||||
|
@ -474,8 +481,7 @@ github.com/onsi/ginkgo v1.8.0 h1:VkHVNpR4iVnU8XQR6DBm8BqYjN7CRzw+xKUbVVbbW9w=
|
|||
github.com/onsi/ginkgo v1.8.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
|
||||
github.com/onsi/ginkgo v1.10.1 h1:q/mM8GF/n0shIN8SaAZ0V+jnLPzen6WIVZdiwrRlMlo=
|
||||
github.com/onsi/ginkgo v1.10.1/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
|
||||
github.com/onsi/ginkgo v1.11.0 h1:JAKSXpt1YjtLA7YpPiqO9ss6sNXEsPfSGdwN0UHqzrw=
|
||||
github.com/onsi/ginkgo v1.11.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
|
||||
github.com/onsi/ginkgo v1.12.0/go.mod h1:oUhWkIvk5aDxtKvDDuw8gItl8pKl42LzjC9KZE0HfGg=
|
||||
github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA=
|
||||
github.com/onsi/gomega v1.3.0/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA=
|
||||
github.com/onsi/gomega v1.4.2/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
|
||||
|
@ -484,8 +490,7 @@ github.com/onsi/gomega v1.5.0 h1:izbySO9zDPmjJ8rDjLvkA2zJHIo+HkYXHnf7eN7SSyo=
|
|||
github.com/onsi/gomega v1.5.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
|
||||
github.com/onsi/gomega v1.7.0 h1:XPnZz8VVBHjVsy1vzJmRwIcSwiUO+JFfrv/xGiigmME=
|
||||
github.com/onsi/gomega v1.7.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
|
||||
github.com/onsi/gomega v1.8.1 h1:C5Dqfs/LeauYDX0jJXIe2SWmwCbGzx9yF8C8xy3Lh34=
|
||||
github.com/onsi/gomega v1.8.1/go.mod h1:Ho0h+IUsWyvy1OpqCwxlQ/21gkhVunqlU8fDGcoTdcA=
|
||||
github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY=
|
||||
github.com/opencontainers/go-digest v1.0.0-rc1 h1:WzifXhOVOEOuFYOJAW6aQqW0TooG2iki3E3Ii+WN7gQ=
|
||||
github.com/opencontainers/go-digest v1.0.0-rc1/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s=
|
||||
github.com/opencontainers/image-spec v1.0.1/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0=
|
||||
|
@ -494,8 +499,6 @@ github.com/opencontainers/runc v1.0.0-rc9/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rm
|
|||
github.com/opencontainers/runtime-spec v1.0.0 h1:O6L965K88AilqnxeYPks/75HLpp4IG+FjeSCI3cVdRg=
|
||||
github.com/opencontainers/runtime-spec v1.0.0/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
|
||||
github.com/opencontainers/selinux v1.3.1-0.20190929122143-5215b1806f52/go.mod h1:+BLncwf63G4dgOzykXAxcmnFlUaOlkDdmw/CqsW6pjs=
|
||||
github.com/parnurzeal/gorequest v0.2.16 h1:T/5x+/4BT+nj+3eSknXmCTnEVGSzFzPGdpqmUVVZXHQ=
|
||||
github.com/parnurzeal/gorequest v0.2.16/go.mod h1:3Kh2QUMJoqw3icWAecsyzkpY7UzRfDhbRdTjtNwNiUE=
|
||||
github.com/pborman/uuid v1.2.0/go.mod h1:X/NO0urCmaxf9VXbdlT7C2Yzkj2IKimNn4k+gtPdI/k=
|
||||
github.com/pelletier/go-toml v1.1.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic=
|
||||
github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic=
|
||||
|
@ -612,7 +615,9 @@ github.com/ultraware/funlen v0.0.1/go.mod h1:Dp4UiAus7Wdb9KUZsYWZEWiRzGuM2kXM1lP
|
|||
github.com/ultraware/funlen v0.0.2/go.mod h1:Dp4UiAus7Wdb9KUZsYWZEWiRzGuM2kXM1lPbfaF6xhA=
|
||||
github.com/urfave/cli v1.20.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA=
|
||||
github.com/urfave/negroni v1.0.0/go.mod h1:Meg73S6kFm/4PpbYdq35yYWoCZ9mS/YSx+lKnmiohz4=
|
||||
github.com/valyala/bytebufferpool v1.0.0 h1:GqA5TC/0021Y/b9FG4Oi9Mr3q7XYx6KllzawFIhcdPw=
|
||||
github.com/valyala/bytebufferpool v1.0.0/go.mod h1:6bBcMArwyJ5K/AmCkWv1jt77kVWyCJ6HpOuEn7z0Csc=
|
||||
github.com/valyala/fasthttp v1.2.0 h1:dzZJf2IuMiclVjdw0kkT+f9u4YdrapbNyGAN47E/qnk=
|
||||
github.com/valyala/fasthttp v1.2.0/go.mod h1:4vX61m6KN+xDduDNwXrhIAVZaZaZiQ1luJk8LWSxF3s=
|
||||
github.com/valyala/quicktemplate v1.1.1/go.mod h1:EH+4AkTd43SvgIbQHYu59/cJyxDoOVRUAfrukLPuGJ4=
|
||||
github.com/valyala/tcplisten v0.0.0-20161114210144-ceec8f93295a/go.mod h1:v3UYOV9WzVtRmSR+PDvWpU/qWl4Wa5LApYYX4ZtKbio=
|
||||
|
@ -620,9 +625,23 @@ github.com/vektah/gqlparser v1.1.2/go.mod h1:1ycwN7Ij5njmMkPPAOaRFY4rET2Enx7IkVv
|
|||
github.com/vishvananda/netlink v1.0.0/go.mod h1:+SR5DhBJrl6ZM7CoCKvpw5BKroDKQ+PJqOg65H/2ktk=
|
||||
github.com/vishvananda/netns v0.0.0-20171111001504-be1fbeda1936/go.mod h1:ZjcWmFBXmLKZu9Nxj3WKYEafiSqer2rnvPr0en9UNpI=
|
||||
github.com/vmware/govmomi v0.20.3/go.mod h1:URlwyTFZX72RmxtxuaFL2Uj3fD1JTvZdx59bHWk6aFU=
|
||||
github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f h1:J9EGpcZtP0E/raorCMxlFGSTBrsSlaDGf3jU/qvAE2c=
|
||||
github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU=
|
||||
github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 h1:EzJWgHovont7NscjpAxXsDA8S8BMYve8Y5+7cuRE7R0=
|
||||
github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415/go.mod h1:GwrjFmJcFw6At/Gs6z4yjiIwzuJ1/+UwLxMQDVQXShQ=
|
||||
github.com/xeipuuv/gojsonschema v1.2.0 h1:LhYJRs+L4fBtjZUfuSZIKGeVu0QRy8e5Xi7D17UxZ74=
|
||||
github.com/xeipuuv/gojsonschema v1.2.0/go.mod h1:anYRn/JVcOK2ZgGU+IjEV4nwlhoK5sQluxsYJ78Id3Y=
|
||||
github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU=
|
||||
github.com/xlab/handysort v0.0.0-20150421192137-fb3537ed64a1/go.mod h1:QcJo0QPSfTONNIgpN5RA8prR7fF8nkF6cTWTcNerRO8=
|
||||
github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q=
|
||||
github.com/yalp/jsonpath v0.0.0-20180802001716-5cc68e5049a0 h1:6fRhSjgLCkTD3JnJxvaJ4Sj+TYblw757bqYgZaOq5ZY=
|
||||
github.com/yalp/jsonpath v0.0.0-20180802001716-5cc68e5049a0/go.mod h1:/LWChgwKmvncFJFHJ7Gvn9wZArjbV5/FppcK2fKk/tI=
|
||||
github.com/yudai/gojsondiff v1.0.0 h1:27cbfqXLVEJ1o8I6v3y9lg8Ydm53EKqHXAOMxEGlCOA=
|
||||
github.com/yudai/gojsondiff v1.0.0/go.mod h1:AY32+k2cwILAkW1fbgxQ5mUmMiZFgLIV+FBNExI05xg=
|
||||
github.com/yudai/golcs v0.0.0-20170316035057-ecda9a501e82 h1:BHyfKlQyqbsFN5p3IfnEUduWvb9is428/nNb5L3U01M=
|
||||
github.com/yudai/golcs v0.0.0-20170316035057-ecda9a501e82/go.mod h1:lgjkn3NuSvDfVJdfcVVdX+jpBxNmX4rDAzaS45IcYoM=
|
||||
github.com/yudai/pp v2.0.1+incompatible h1:Q4//iY4pNF6yPLZIigmvcl7k/bPgrcTPIFIcmawg5bI=
|
||||
github.com/yudai/pp v2.0.1+incompatible/go.mod h1:PuxR/8QJ7cyCkFp/aUDS+JY727OFEZkTdatxwunjIkc=
|
||||
github.com/zakjan/cert-chain-resolver v0.0.0-20180703112424-6076e1ded272 h1:scDk3LAM8x+NPuywVGC0q0/G+5Ed8M0+YXecz4XnWRM=
|
||||
github.com/zakjan/cert-chain-resolver v0.0.0-20180703112424-6076e1ded272/go.mod h1:KNkcm66cr4ilOiEcjydK+tc2ShPUhqmuoXCljXUBPu8=
|
||||
go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU=
|
||||
|
@ -736,6 +755,7 @@ golang.org/x/sys v0.0.0-20190616124812-15dcb6c0061f/go.mod h1:h1NjWce9XRLGQEsW7w
|
|||
golang.org/x/sys v0.0.0-20190813064441-fde4db37ae7a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456 h1:ng0gs1AKnRRuEMZoTLLlbOd+C17zUDepwGQBb/n+JVg=
|
||||
golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200122134326-e047566fdf82 h1:ywK/j/KkyTHcdyYSZNXGjMwgmDSfjglYZ3vStQ/gSCU=
|
||||
golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/text v0.0.0-20160726164857-2910a502d2bf/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
|
@ -819,6 +839,8 @@ gopkg.in/fsnotify.v1 v1.4.7 h1:xOHLXZwVvI9hhs+cLKq5+I5onOuwQLhQwiu63xxlHs4=
|
|||
gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys=
|
||||
gopkg.in/fsnotify/fsnotify.v1 v1.4.7 h1:XNNYLJHt73EyYiCZi6+xjupS9CpvmiDgjPTAjrBlQbo=
|
||||
gopkg.in/fsnotify/fsnotify.v1 v1.4.7/go.mod h1:Fyux9zXlo4rWoMSIzpn9fDAYjalPqJ/K1qJ27s+7ltE=
|
||||
gopkg.in/gavv/httpexpect.v2 v2.0.0 h1:hJ8T99juOLAiZgipYmf64KM/W0xQbxNK6fo+7mpYeys=
|
||||
gopkg.in/gavv/httpexpect.v2 v2.0.0/go.mod h1:uMEAayJd5rI8SqPSUiHbQFyj5OTNrBgkLUYex48OYGc=
|
||||
gopkg.in/gcfg.v1 v1.2.0/go.mod h1:yesOnuUOFQAhST5vPY4nbZsb/huCgGGXlipJsBn0b3o=
|
||||
gopkg.in/gemnasium/logrus-airbrake-hook.v2 v2.1.2/go.mod h1:Xk6kEKp8OKb+X14hQBKWaSkCsqBpgog8nAV2xsGOxlo=
|
||||
gopkg.in/go-playground/assert.v1 v1.2.1 h1:xoYuJVE7KT85PYWrN730RguIQO0ePzVRfFMXadIrXTM=
|
||||
|
@ -904,8 +926,6 @@ modernc.org/golex v1.0.0/go.mod h1:b/QX9oBD/LhixY6NDh+IdGv17hgB+51fET1i2kPSmvk=
|
|||
modernc.org/mathutil v1.0.0/go.mod h1:wU0vUrJsVWBZ4P6e7xtFJEhFSNsfRLJ8H458uRjg03k=
|
||||
modernc.org/strutil v1.0.0/go.mod h1:lstksw84oURvj9y3tn8lGvRxyRC1S2+g5uuIzNfIOBs=
|
||||
modernc.org/xc v1.0.0/go.mod h1:mRNCo0bvLjGhHO9WsyuKVU4q0ceiDDDoEeWDJHrNx8I=
|
||||
moul.io/http2curl v1.0.0 h1:6XwpyZOYsgZJrU8exnG87ncVkU1FVCcTRpwzOkTDUi8=
|
||||
moul.io/http2curl v1.0.0/go.mod h1:f6cULg+e4Md/oW1cYmwW4IWQOVl2lGbmCNGOHvzX2kE=
|
||||
mvdan.cc/interfacer v0.0.0-20180901003855-c20040233aed/go.mod h1:Xkxe497xwlCKkIaQYRfC7CSLworTXY9RMqwhhCm+8Nc=
|
||||
mvdan.cc/lint v0.0.0-20170908181259-adc824a0674b/go.mod h1:2odslEg/xrtNQqCYg2/jCoyKnw3vv5biOc3JnIcYfL4=
|
||||
mvdan.cc/unparam v0.0.0-20190209190245-fbb59629db34/go.mod h1:H6SUd1XjIs+qQCyskXg5OFSrilMRUkD8ePJpHKDPaeY=
|
||||
|
|
25
vendor/github.com/ajg/form/.travis.yml
generated
vendored
Normal file
25
vendor/github.com/ajg/form/.travis.yml
generated
vendored
Normal file
|
@ -0,0 +1,25 @@
|
|||
## Copyright 2014 Alvaro J. Genial. All rights reserved.
|
||||
## Use of this source code is governed by a BSD-style
|
||||
## license that can be found in the LICENSE file.
|
||||
|
||||
language: go
|
||||
|
||||
go:
|
||||
- tip
|
||||
- 1.6
|
||||
- 1.5
|
||||
- 1.4
|
||||
- 1.3
|
||||
# 1.2
|
||||
|
||||
before_install:
|
||||
# - go get -v golang.org/x/tools/cmd/cover
|
||||
# - go get -v golang.org/x/tools/cmd/vet
|
||||
# - go get -v golang.org/x/lint/golint
|
||||
- export PATH=$PATH:/home/travis/gopath/bin
|
||||
|
||||
script:
|
||||
- go build -v ./...
|
||||
- go test -v -cover ./...
|
||||
- go vet ./...
|
||||
# - golint .
|
27
vendor/github.com/ajg/form/LICENSE
generated
vendored
Normal file
27
vendor/github.com/ajg/form/LICENSE
generated
vendored
Normal file
|
@ -0,0 +1,27 @@
|
|||
Copyright (c) 2014 Alvaro J. Genial. All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions are
|
||||
met:
|
||||
|
||||
* Redistributions of source code must retain the above copyright
|
||||
notice, this list of conditions and the following disclaimer.
|
||||
* Redistributions in binary form must reproduce the above
|
||||
copyright notice, this list of conditions and the following disclaimer
|
||||
in the documentation and/or other materials provided with the
|
||||
distribution.
|
||||
* Neither the name of Google Inc. nor the names of its
|
||||
contributors may be used to endorse or promote products derived from
|
||||
this software without specific prior written permission.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
247
vendor/github.com/ajg/form/README.md
generated
vendored
Normal file
247
vendor/github.com/ajg/form/README.md
generated
vendored
Normal file
|
@ -0,0 +1,247 @@
|
|||
form
|
||||
====
|
||||
|
||||
A Form Encoding & Decoding Package for Go, written by [Alvaro J. Genial](http://alva.ro).
|
||||
|
||||
[](https://travis-ci.org/ajg/form)
|
||||
[](https://godoc.org/github.com/ajg/form)
|
||||
|
||||
Synopsis
|
||||
--------
|
||||
|
||||
This library is designed to allow seamless, high-fidelity encoding and decoding of arbitrary data in `application/x-www-form-urlencoded` format and as [`url.Values`](http://golang.org/pkg/net/url/#Values). It is intended to be useful primarily in dealing with web forms and URI query strings, both of which natively employ said format.
|
||||
|
||||
Unsurprisingly, `form` is modeled after other Go [`encoding`](http://golang.org/pkg/encoding/) packages, in particular [`encoding/json`](http://golang.org/pkg/encoding/json/), and follows the same conventions (see below for more.) It aims to automatically handle any kind of concrete Go [data value](#values) (i.e., not functions, channels, etc.) while providing mechanisms for custom behavior.
|
||||
|
||||
Status
|
||||
------
|
||||
|
||||
The implementation is in usable shape and is fairly well tested with its accompanying test suite. The API is unlikely to change much, but still may. Lastly, the code has not yet undergone a security review to ensure it is free of vulnerabilities. Please file an issue or send a pull request for fixes & improvements.
|
||||
|
||||
Dependencies
|
||||
------------
|
||||
|
||||
The only requirement is [Go 1.2](http://golang.org/doc/go1.2) or later.
|
||||
|
||||
Usage
|
||||
-----
|
||||
|
||||
```go
|
||||
import "github.com/ajg/form"
|
||||
// or: "gopkg.in/ajg/form.v1"
|
||||
```
|
||||
|
||||
Given a type like the following...
|
||||
|
||||
```go
|
||||
type User struct {
|
||||
Name string `form:"name"`
|
||||
Email string `form:"email"`
|
||||
Joined time.Time `form:"joined,omitempty"`
|
||||
Posts []int `form:"posts"`
|
||||
Preferences map[string]string `form:"prefs"`
|
||||
Avatar []byte `form:"avatar"`
|
||||
PasswordHash int64 `form:"-"`
|
||||
}
|
||||
```
|
||||
|
||||
...it is easy to encode data of that type...
|
||||
|
||||
|
||||
```go
|
||||
func PostUser(url string, u User) error {
|
||||
var c http.Client
|
||||
_, err := c.PostForm(url, form.EncodeToValues(u))
|
||||
return err
|
||||
}
|
||||
```
|
||||
|
||||
...as well as decode it...
|
||||
|
||||
|
||||
```go
|
||||
func Handler(w http.ResponseWriter, r *http.Request) {
|
||||
var u User
|
||||
|
||||
d := form.NewDecoder(r.Body)
|
||||
if err := d.Decode(&u); err != nil {
|
||||
http.Error(w, "Form could not be decoded", http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
|
||||
fmt.Fprintf(w, "Decoded: %#v", u)
|
||||
}
|
||||
```
|
||||
|
||||
...without having to do any grunt work.
|
||||
|
||||
Field Tags
|
||||
----------
|
||||
|
||||
Like other encoding packages, `form` supports the following options for fields:
|
||||
|
||||
- `` `form:"-"` ``: Causes the field to be ignored during encoding and decoding.
|
||||
- `` `form:"<name>"` ``: Overrides the field's name; useful especially when dealing with external identifiers in camelCase, as are commonly found on the web.
|
||||
- `` `form:",omitempty"` ``: Elides the field during encoding if it is empty (typically meaning equal to the type's zero value.)
|
||||
- `` `form:"<name>,omitempty"` ``: The way to combine the two options above.
|
||||
|
||||
Values
|
||||
------
|
||||
|
||||
### Simple Values
|
||||
|
||||
Values of the following types are all considered simple:
|
||||
|
||||
- `bool`
|
||||
- `int`, `int8`, `int16`, `int32`, `int64`, `rune`
|
||||
- `uint`, `uint8`, `uint16`, `uint32`, `uint64`, `byte`
|
||||
- `float32`, `float64`
|
||||
- `complex64`, `complex128`
|
||||
- `string`
|
||||
- `[]byte` (see note)
|
||||
- [`time.Time`](http://golang.org/pkg/time/#Time)
|
||||
- [`url.URL`](http://golang.org/pkg/net/url/#URL)
|
||||
- An alias of any of the above
|
||||
- A pointer to any of the above
|
||||
|
||||
### Composite Values
|
||||
|
||||
A composite value is one that can contain other values. Values of the following kinds...
|
||||
|
||||
- Maps
|
||||
- Slices; except `[]byte` (see note)
|
||||
- Structs; except [`time.Time`](http://golang.org/pkg/time/#Time) and [`url.URL`](http://golang.org/pkg/net/url/#URL)
|
||||
- Arrays
|
||||
- An alias of any of the above
|
||||
- A pointer to any of the above
|
||||
|
||||
...are considered composites in general, unless they implement custom marshaling/unmarshaling. Composite values are encoded as a flat mapping of paths to values, where the paths are constructed by joining the parent and child paths with a period (`.`).
|
||||
|
||||
(Note: a byte slice is treated as a `string` by default because it's more efficient, but can also be decoded as a slice—i.e., with indexes.)
|
||||
|
||||
### Untyped Values
|
||||
|
||||
While encouraged, it is not necessary to define a type (e.g. a `struct`) in order to use `form`, since it is able to encode and decode untyped data generically using the following rules:
|
||||
|
||||
- Simple values will be treated as a `string`.
|
||||
- Composite values will be treated as a `map[string]interface{}`, itself able to contain nested values (both scalar and compound) ad infinitum.
|
||||
- However, if there is a value (of any supported type) already present in a map for a given key, then it will be used when possible, rather than being replaced with a generic value as specified above; this makes it possible to handle partially typed, dynamic or schema-less values.
|
||||
|
||||
### Zero Values
|
||||
|
||||
By default, and without custom marshaling, zero values (also known as empty/default values) are encoded as the empty string. To disable this behavior, meaning to keep zero values in their literal form (e.g. `0` for integral types), `Encoder` offers a `KeepZeros` setter method, which will do just that when set to `true`.
|
||||
|
||||
### Unsupported Values
|
||||
|
||||
Values of the following kinds aren't supported and, if present, must be ignored.
|
||||
|
||||
- Channel
|
||||
- Function
|
||||
- Unsafe pointer
|
||||
- An alias of any of the above
|
||||
- A pointer to any of the above
|
||||
|
||||
Custom Marshaling
|
||||
-----------------
|
||||
|
||||
There is a default (generally lossless) marshaling & unmarshaling scheme for any concrete data value in Go, which is good enough in most cases. However, it is possible to override it and use a custom scheme. For instance, a "binary" field could be marshaled more efficiently using [base64](http://golang.org/pkg/encoding/base64/) to prevent it from being percent-escaped during serialization to `application/x-www-form-urlencoded` format.
|
||||
|
||||
Because `form` provides support for [`encoding.TextMarshaler`](http://golang.org/pkg/encoding/#TextMarshaler) and [`encoding.TextUnmarshaler`](http://golang.org/pkg/encoding/#TextUnmarshaler) it is easy to do that; for instance, like this:
|
||||
|
||||
```go
|
||||
import "encoding"
|
||||
|
||||
type Binary []byte
|
||||
|
||||
var (
|
||||
_ encoding.TextMarshaler = &Binary{}
|
||||
_ encoding.TextUnmarshaler = &Binary{}
|
||||
)
|
||||
|
||||
func (b Binary) MarshalText() ([]byte, error) {
|
||||
return []byte(base64.URLEncoding.EncodeToString([]byte(b))), nil
|
||||
}
|
||||
|
||||
func (b *Binary) UnmarshalText(text []byte) error {
|
||||
bs, err := base64.URLEncoding.DecodeString(string(text))
|
||||
if err == nil {
|
||||
*b = Binary(bs)
|
||||
}
|
||||
return err
|
||||
}
|
||||
```
|
||||
|
||||
Now any value with type `Binary` will automatically be encoded using the [URL](http://golang.org/pkg/encoding/base64/#URLEncoding) variant of base64. It is left as an exercise to the reader to improve upon this scheme by eliminating the need for padding (which, besides being superfluous, uses `=`, a character that will end up percent-escaped.)
|
||||
|
||||
Keys
|
||||
----
|
||||
|
||||
In theory any value can be a key as long as it has a string representation. However, by default, periods have special meaning to `form`, and thus, under the hood (i.e. in encoded form) they are transparently escaped using a preceding backslash (`\`). Backslashes within keys, themselves, are also escaped in this manner (e.g. as `\\`) in order to permit representing `\.` itself (as `\\\.`).
|
||||
|
||||
(Note: it is normally unnecessary to deal with this issue unless keys are being constructed manually—e.g. literally embedded in HTML or in a URI.)
|
||||
|
||||
The default delimiter and escape characters used for encoding and decoding composite keys can be changed using the `DelimitWith` and `EscapeWith` setter methods of `Encoder` and `Decoder`, respectively. For example...
|
||||
|
||||
```go
|
||||
package main
|
||||
|
||||
import (
|
||||
"os"
|
||||
|
||||
"github.com/ajg/form"
|
||||
)
|
||||
|
||||
func main() {
|
||||
type B struct {
|
||||
Qux string `form:"qux"`
|
||||
}
|
||||
type A struct {
|
||||
FooBar B `form:"foo.bar"`
|
||||
}
|
||||
a := A{FooBar: B{"XYZ"}}
|
||||
os.Stdout.WriteString("Default: ")
|
||||
form.NewEncoder(os.Stdout).Encode(a)
|
||||
os.Stdout.WriteString("\nCustom: ")
|
||||
form.NewEncoder(os.Stdout).DelimitWith('/').Encode(a)
|
||||
os.Stdout.WriteString("\n")
|
||||
}
|
||||
|
||||
```
|
||||
|
||||
...will produce...
|
||||
|
||||
```
|
||||
Default: foo%5C.bar.qux=XYZ
|
||||
Custom: foo.bar%2Fqux=XYZ
|
||||
```
|
||||
|
||||
(`%5C` and `%2F` represent `\` and `/`, respectively.)
|
||||
|
||||
Limitations
|
||||
-----------
|
||||
|
||||
- Circular (self-referential) values are untested.
|
||||
|
||||
Future Work
|
||||
-----------
|
||||
|
||||
The following items would be nice to have in the future—though they are not being worked on yet:
|
||||
|
||||
- An option to treat all values as if they had been tagged with `omitempty`.
|
||||
- An option to automatically treat all field names in `camelCase` or `underscore_case`.
|
||||
- Built-in support for the types in [`math/big`](http://golang.org/pkg/math/big/).
|
||||
- Built-in support for the types in [`image/color`](http://golang.org/pkg/image/color/).
|
||||
- Improve encoding/decoding by reading/writing directly from/to the `io.Reader`/`io.Writer` when possible, rather than going through an intermediate representation (i.e. `node`) which requires more memory.
|
||||
|
||||
(Feel free to implement any of these and then send a pull request.)
|
||||
|
||||
Related Work
|
||||
------------
|
||||
|
||||
- Package [gorilla/schema](https://github.com/gorilla/schema), which only implements decoding.
|
||||
- Package [google/go-querystring](https://github.com/google/go-querystring), which only implements encoding.
|
||||
|
||||
License
|
||||
-------
|
||||
|
||||
This library is distributed under a BSD-style [LICENSE](./LICENSE).
|
4
vendor/github.com/ajg/form/TODO.md
generated
vendored
Normal file
4
vendor/github.com/ajg/form/TODO.md
generated
vendored
Normal file
|
@ -0,0 +1,4 @@
|
|||
TODO
|
||||
====
|
||||
|
||||
- Document IgnoreCase and IgnoreUnknownKeys in README.
|
370
vendor/github.com/ajg/form/decode.go
generated
vendored
Normal file
370
vendor/github.com/ajg/form/decode.go
generated
vendored
Normal file
|
@ -0,0 +1,370 @@
|
|||
// Copyright 2014 Alvaro J. Genial. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package form
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"net/url"
|
||||
"reflect"
|
||||
"strconv"
|
||||
"time"
|
||||
)
|
||||
|
||||
// NewDecoder returns a new form Decoder.
|
||||
func NewDecoder(r io.Reader) *Decoder {
|
||||
return &Decoder{r, defaultDelimiter, defaultEscape, false, false}
|
||||
}
|
||||
|
||||
// Decoder decodes data from a form (application/x-www-form-urlencoded).
|
||||
type Decoder struct {
|
||||
r io.Reader
|
||||
d rune
|
||||
e rune
|
||||
ignoreUnknown bool
|
||||
ignoreCase bool
|
||||
}
|
||||
|
||||
// DelimitWith sets r as the delimiter used for composite keys by Decoder d and returns the latter; it is '.' by default.
|
||||
func (d *Decoder) DelimitWith(r rune) *Decoder {
|
||||
d.d = r
|
||||
return d
|
||||
}
|
||||
|
||||
// EscapeWith sets r as the escape used for delimiters (and to escape itself) by Decoder d and returns the latter; it is '\\' by default.
|
||||
func (d *Decoder) EscapeWith(r rune) *Decoder {
|
||||
d.e = r
|
||||
return d
|
||||
}
|
||||
|
||||
// Decode reads in and decodes form-encoded data into dst.
|
||||
func (d Decoder) Decode(dst interface{}) error {
|
||||
bs, err := ioutil.ReadAll(d.r)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
vs, err := url.ParseQuery(string(bs))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
v := reflect.ValueOf(dst)
|
||||
return d.decodeNode(v, parseValues(d.d, d.e, vs, canIndexOrdinally(v)))
|
||||
}
|
||||
|
||||
// IgnoreUnknownKeys if set to true it will make the Decoder ignore values
|
||||
// that are not found in the destination object instead of returning an error.
|
||||
func (d *Decoder) IgnoreUnknownKeys(ignoreUnknown bool) {
|
||||
d.ignoreUnknown = ignoreUnknown
|
||||
}
|
||||
|
||||
// IgnoreCase if set to true it will make the Decoder try to set values in the
|
||||
// destination object even if the case does not match.
|
||||
func (d *Decoder) IgnoreCase(ignoreCase bool) {
|
||||
d.ignoreCase = ignoreCase
|
||||
}
|
||||
|
||||
// DecodeString decodes src into dst.
|
||||
func (d Decoder) DecodeString(dst interface{}, src string) error {
|
||||
vs, err := url.ParseQuery(src)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
v := reflect.ValueOf(dst)
|
||||
return d.decodeNode(v, parseValues(d.d, d.e, vs, canIndexOrdinally(v)))
|
||||
}
|
||||
|
||||
// DecodeValues decodes vs into dst.
|
||||
func (d Decoder) DecodeValues(dst interface{}, vs url.Values) error {
|
||||
v := reflect.ValueOf(dst)
|
||||
return d.decodeNode(v, parseValues(d.d, d.e, vs, canIndexOrdinally(v)))
|
||||
}
|
||||
|
||||
// DecodeString decodes src into dst.
|
||||
func DecodeString(dst interface{}, src string) error {
|
||||
return NewDecoder(nil).DecodeString(dst, src)
|
||||
}
|
||||
|
||||
// DecodeValues decodes vs into dst.
|
||||
func DecodeValues(dst interface{}, vs url.Values) error {
|
||||
return NewDecoder(nil).DecodeValues(dst, vs)
|
||||
}
|
||||
|
||||
func (d Decoder) decodeNode(v reflect.Value, n node) (err error) {
|
||||
defer func() {
|
||||
if e := recover(); e != nil {
|
||||
err = fmt.Errorf("%v", e)
|
||||
}
|
||||
}()
|
||||
|
||||
if v.Kind() == reflect.Slice {
|
||||
return fmt.Errorf("could not decode directly into slice; use pointer to slice")
|
||||
}
|
||||
d.decodeValue(v, n)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d Decoder) decodeValue(v reflect.Value, x interface{}) {
|
||||
t := v.Type()
|
||||
k := v.Kind()
|
||||
|
||||
if k == reflect.Ptr && v.IsNil() {
|
||||
v.Set(reflect.New(t.Elem()))
|
||||
}
|
||||
|
||||
if unmarshalValue(v, x) {
|
||||
return
|
||||
}
|
||||
|
||||
empty := isEmpty(x)
|
||||
|
||||
switch k {
|
||||
case reflect.Ptr:
|
||||
d.decodeValue(v.Elem(), x)
|
||||
return
|
||||
case reflect.Interface:
|
||||
if !v.IsNil() {
|
||||
d.decodeValue(v.Elem(), x)
|
||||
return
|
||||
|
||||
} else if empty {
|
||||
return // Allow nil interfaces only if empty.
|
||||
} else {
|
||||
panic("form: cannot decode non-empty value into into nil interface")
|
||||
}
|
||||
}
|
||||
|
||||
if empty {
|
||||
v.Set(reflect.Zero(t)) // Treat the empty string as the zero value.
|
||||
return
|
||||
}
|
||||
|
||||
switch k {
|
||||
case reflect.Struct:
|
||||
if t.ConvertibleTo(timeType) {
|
||||
d.decodeTime(v, x)
|
||||
} else if t.ConvertibleTo(urlType) {
|
||||
d.decodeURL(v, x)
|
||||
} else {
|
||||
d.decodeStruct(v, x)
|
||||
}
|
||||
case reflect.Slice:
|
||||
d.decodeSlice(v, x)
|
||||
case reflect.Array:
|
||||
d.decodeArray(v, x)
|
||||
case reflect.Map:
|
||||
d.decodeMap(v, x)
|
||||
case reflect.Invalid, reflect.Uintptr, reflect.UnsafePointer, reflect.Chan, reflect.Func:
|
||||
panic(t.String() + " has unsupported kind " + k.String())
|
||||
default:
|
||||
d.decodeBasic(v, x)
|
||||
}
|
||||
}
|
||||
|
||||
func (d Decoder) decodeStruct(v reflect.Value, x interface{}) {
|
||||
t := v.Type()
|
||||
for k, c := range getNode(x) {
|
||||
if f, ok := findField(v, k, d.ignoreCase); !ok && k == "" {
|
||||
panic(getString(x) + " cannot be decoded as " + t.String())
|
||||
} else if !ok {
|
||||
if !d.ignoreUnknown {
|
||||
panic(k + " doesn't exist in " + t.String())
|
||||
}
|
||||
} else if !f.CanSet() {
|
||||
panic(k + " cannot be set in " + t.String())
|
||||
} else {
|
||||
d.decodeValue(f, c)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (d Decoder) decodeMap(v reflect.Value, x interface{}) {
|
||||
t := v.Type()
|
||||
if v.IsNil() {
|
||||
v.Set(reflect.MakeMap(t))
|
||||
}
|
||||
for k, c := range getNode(x) {
|
||||
i := reflect.New(t.Key()).Elem()
|
||||
d.decodeValue(i, k)
|
||||
|
||||
w := v.MapIndex(i)
|
||||
if w.IsValid() { // We have an actual element value to decode into.
|
||||
if w.Kind() == reflect.Interface {
|
||||
w = w.Elem()
|
||||
}
|
||||
w = reflect.New(w.Type()).Elem()
|
||||
} else if t.Elem().Kind() != reflect.Interface { // The map's element type is concrete.
|
||||
w = reflect.New(t.Elem()).Elem()
|
||||
} else {
|
||||
// The best we can do here is to decode as either a string (for scalars) or a map[string]interface {} (for the rest).
|
||||
// We could try to guess the type based on the string (e.g. true/false => bool) but that'll get ugly fast,
|
||||
// especially if we have to guess the kind (slice vs. array vs. map) and index type (e.g. string, int, etc.)
|
||||
switch c.(type) {
|
||||
case node:
|
||||
w = reflect.MakeMap(stringMapType)
|
||||
case string:
|
||||
w = reflect.New(stringType).Elem()
|
||||
default:
|
||||
panic("value is neither node nor string")
|
||||
}
|
||||
}
|
||||
|
||||
d.decodeValue(w, c)
|
||||
v.SetMapIndex(i, w)
|
||||
}
|
||||
}
|
||||
|
||||
func (d Decoder) decodeArray(v reflect.Value, x interface{}) {
|
||||
t := v.Type()
|
||||
for k, c := range getNode(x) {
|
||||
i, err := strconv.Atoi(k)
|
||||
if err != nil {
|
||||
panic(k + " is not a valid index for type " + t.String())
|
||||
}
|
||||
if l := v.Len(); i >= l {
|
||||
panic("index is above array size")
|
||||
}
|
||||
d.decodeValue(v.Index(i), c)
|
||||
}
|
||||
}
|
||||
|
||||
func (d Decoder) decodeSlice(v reflect.Value, x interface{}) {
|
||||
t := v.Type()
|
||||
if t.Elem().Kind() == reflect.Uint8 {
|
||||
// Allow, but don't require, byte slices to be encoded as a single string.
|
||||
if s, ok := x.(string); ok {
|
||||
v.SetBytes([]byte(s))
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// NOTE: Implicit indexing is currently done at the parseValues level,
|
||||
// so if if an implicitKey reaches here it will always replace the last.
|
||||
implicit := 0
|
||||
for k, c := range getNode(x) {
|
||||
var i int
|
||||
if k == implicitKey {
|
||||
i = implicit
|
||||
implicit++
|
||||
} else {
|
||||
explicit, err := strconv.Atoi(k)
|
||||
if err != nil {
|
||||
panic(k + " is not a valid index for type " + t.String())
|
||||
}
|
||||
i = explicit
|
||||
implicit = explicit + 1
|
||||
}
|
||||
// "Extend" the slice if it's too short.
|
||||
if l := v.Len(); i >= l {
|
||||
delta := i - l + 1
|
||||
v.Set(reflect.AppendSlice(v, reflect.MakeSlice(t, delta, delta)))
|
||||
}
|
||||
d.decodeValue(v.Index(i), c)
|
||||
}
|
||||
}
|
||||
|
||||
func (d Decoder) decodeBasic(v reflect.Value, x interface{}) {
|
||||
t := v.Type()
|
||||
switch k, s := t.Kind(), getString(x); k {
|
||||
case reflect.Bool:
|
||||
if b, e := strconv.ParseBool(s); e == nil {
|
||||
v.SetBool(b)
|
||||
} else {
|
||||
panic("could not parse bool from " + strconv.Quote(s))
|
||||
}
|
||||
case reflect.Int,
|
||||
reflect.Int8,
|
||||
reflect.Int16,
|
||||
reflect.Int32,
|
||||
reflect.Int64:
|
||||
if i, e := strconv.ParseInt(s, 10, 64); e == nil {
|
||||
v.SetInt(i)
|
||||
} else {
|
||||
panic("could not parse int from " + strconv.Quote(s))
|
||||
}
|
||||
case reflect.Uint,
|
||||
reflect.Uint8,
|
||||
reflect.Uint16,
|
||||
reflect.Uint32,
|
||||
reflect.Uint64:
|
||||
if u, e := strconv.ParseUint(s, 10, 64); e == nil {
|
||||
v.SetUint(u)
|
||||
} else {
|
||||
panic("could not parse uint from " + strconv.Quote(s))
|
||||
}
|
||||
case reflect.Float32,
|
||||
reflect.Float64:
|
||||
if f, e := strconv.ParseFloat(s, 64); e == nil {
|
||||
v.SetFloat(f)
|
||||
} else {
|
||||
panic("could not parse float from " + strconv.Quote(s))
|
||||
}
|
||||
case reflect.Complex64,
|
||||
reflect.Complex128:
|
||||
var c complex128
|
||||
if n, err := fmt.Sscanf(s, "%g", &c); n == 1 && err == nil {
|
||||
v.SetComplex(c)
|
||||
} else {
|
||||
panic("could not parse complex from " + strconv.Quote(s))
|
||||
}
|
||||
case reflect.String:
|
||||
v.SetString(s)
|
||||
default:
|
||||
panic(t.String() + " has unsupported kind " + k.String())
|
||||
}
|
||||
}
|
||||
|
||||
func (d Decoder) decodeTime(v reflect.Value, x interface{}) {
|
||||
t := v.Type()
|
||||
s := getString(x)
|
||||
// TODO: Find a more efficient way to do this.
|
||||
for _, f := range allowedTimeFormats {
|
||||
if p, err := time.Parse(f, s); err == nil {
|
||||
v.Set(reflect.ValueOf(p).Convert(v.Type()))
|
||||
return
|
||||
}
|
||||
}
|
||||
panic("cannot decode string `" + s + "` as " + t.String())
|
||||
}
|
||||
|
||||
func (d Decoder) decodeURL(v reflect.Value, x interface{}) {
|
||||
t := v.Type()
|
||||
s := getString(x)
|
||||
if u, err := url.Parse(s); err == nil {
|
||||
v.Set(reflect.ValueOf(*u).Convert(v.Type()))
|
||||
return
|
||||
}
|
||||
panic("cannot decode string `" + s + "` as " + t.String())
|
||||
}
|
||||
|
||||
var allowedTimeFormats = []string{
|
||||
"2006-01-02T15:04:05.999999999Z07:00",
|
||||
"2006-01-02T15:04:05.999999999Z07",
|
||||
"2006-01-02T15:04:05.999999999Z",
|
||||
"2006-01-02T15:04:05.999999999",
|
||||
"2006-01-02T15:04:05Z07:00",
|
||||
"2006-01-02T15:04:05Z07",
|
||||
"2006-01-02T15:04:05Z",
|
||||
"2006-01-02T15:04:05",
|
||||
"2006-01-02T15:04Z",
|
||||
"2006-01-02T15:04",
|
||||
"2006-01-02T15Z",
|
||||
"2006-01-02T15",
|
||||
"2006-01-02",
|
||||
"2006-01",
|
||||
"2006",
|
||||
"15:04:05.999999999Z07:00",
|
||||
"15:04:05.999999999Z07",
|
||||
"15:04:05.999999999Z",
|
||||
"15:04:05.999999999",
|
||||
"15:04:05Z07:00",
|
||||
"15:04:05Z07",
|
||||
"15:04:05Z",
|
||||
"15:04:05",
|
||||
"15:04Z",
|
||||
"15:04",
|
||||
"15Z",
|
||||
"15",
|
||||
}
|
388
vendor/github.com/ajg/form/encode.go
generated
vendored
Normal file
388
vendor/github.com/ajg/form/encode.go
generated
vendored
Normal file
|
@ -0,0 +1,388 @@
|
|||
// Copyright 2014 Alvaro J. Genial. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package form
|
||||
|
||||
import (
|
||||
"encoding"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/url"
|
||||
"reflect"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
// NewEncoder returns a new form Encoder.
|
||||
func NewEncoder(w io.Writer) *Encoder {
|
||||
return &Encoder{w, defaultDelimiter, defaultEscape, false}
|
||||
}
|
||||
|
||||
// Encoder provides a way to encode to a Writer.
|
||||
type Encoder struct {
|
||||
w io.Writer
|
||||
d rune
|
||||
e rune
|
||||
z bool
|
||||
}
|
||||
|
||||
// DelimitWith sets r as the delimiter used for composite keys by Encoder e and returns the latter; it is '.' by default.
|
||||
func (e *Encoder) DelimitWith(r rune) *Encoder {
|
||||
e.d = r
|
||||
return e
|
||||
}
|
||||
|
||||
// EscapeWith sets r as the escape used for delimiters (and to escape itself) by Encoder e and returns the latter; it is '\\' by default.
|
||||
func (e *Encoder) EscapeWith(r rune) *Encoder {
|
||||
e.e = r
|
||||
return e
|
||||
}
|
||||
|
||||
// KeepZeros sets whether Encoder e should keep zero (default) values in their literal form when encoding, and returns the former; by default zero values are not kept, but are rather encoded as the empty string.
|
||||
func (e *Encoder) KeepZeros(z bool) *Encoder {
|
||||
e.z = z
|
||||
return e
|
||||
}
|
||||
|
||||
// Encode encodes dst as form and writes it out using the Encoder's Writer.
|
||||
func (e Encoder) Encode(dst interface{}) error {
|
||||
v := reflect.ValueOf(dst)
|
||||
n, err := encodeToNode(v, e.z)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
s := n.values(e.d, e.e).Encode()
|
||||
l, err := io.WriteString(e.w, s)
|
||||
switch {
|
||||
case err != nil:
|
||||
return err
|
||||
case l != len(s):
|
||||
return errors.New("could not write data completely")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// EncodeToString encodes dst as a form and returns it as a string.
|
||||
func EncodeToString(dst interface{}) (string, error) {
|
||||
v := reflect.ValueOf(dst)
|
||||
n, err := encodeToNode(v, false)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
vs := n.values(defaultDelimiter, defaultEscape)
|
||||
return vs.Encode(), nil
|
||||
}
|
||||
|
||||
// EncodeToValues encodes dst as a form and returns it as Values.
|
||||
func EncodeToValues(dst interface{}) (url.Values, error) {
|
||||
v := reflect.ValueOf(dst)
|
||||
n, err := encodeToNode(v, false)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
vs := n.values(defaultDelimiter, defaultEscape)
|
||||
return vs, nil
|
||||
}
|
||||
|
||||
func encodeToNode(v reflect.Value, z bool) (n node, err error) {
|
||||
defer func() {
|
||||
if e := recover(); e != nil {
|
||||
err = fmt.Errorf("%v", e)
|
||||
}
|
||||
}()
|
||||
return getNode(encodeValue(v, z)), nil
|
||||
}
|
||||
|
||||
func encodeValue(v reflect.Value, z bool) interface{} {
|
||||
t := v.Type()
|
||||
k := v.Kind()
|
||||
|
||||
if s, ok := marshalValue(v); ok {
|
||||
return s
|
||||
} else if !z && isEmptyValue(v) {
|
||||
return "" // Treat the zero value as the empty string.
|
||||
}
|
||||
|
||||
switch k {
|
||||
case reflect.Ptr, reflect.Interface:
|
||||
return encodeValue(v.Elem(), z)
|
||||
case reflect.Struct:
|
||||
if t.ConvertibleTo(timeType) {
|
||||
return encodeTime(v)
|
||||
} else if t.ConvertibleTo(urlType) {
|
||||
return encodeURL(v)
|
||||
}
|
||||
return encodeStruct(v, z)
|
||||
case reflect.Slice:
|
||||
return encodeSlice(v, z)
|
||||
case reflect.Array:
|
||||
return encodeArray(v, z)
|
||||
case reflect.Map:
|
||||
return encodeMap(v, z)
|
||||
case reflect.Invalid, reflect.Uintptr, reflect.UnsafePointer, reflect.Chan, reflect.Func:
|
||||
panic(t.String() + " has unsupported kind " + t.Kind().String())
|
||||
default:
|
||||
return encodeBasic(v)
|
||||
}
|
||||
}
|
||||
|
||||
func encodeStruct(v reflect.Value, z bool) interface{} {
|
||||
t := v.Type()
|
||||
n := node{}
|
||||
for i := 0; i < t.NumField(); i++ {
|
||||
f := t.Field(i)
|
||||
k, oe := fieldInfo(f)
|
||||
|
||||
if k == "-" {
|
||||
continue
|
||||
} else if fv := v.Field(i); oe && isEmptyValue(fv) {
|
||||
delete(n, k)
|
||||
} else {
|
||||
n[k] = encodeValue(fv, z)
|
||||
}
|
||||
}
|
||||
return n
|
||||
}
|
||||
|
||||
func encodeMap(v reflect.Value, z bool) interface{} {
|
||||
n := node{}
|
||||
for _, i := range v.MapKeys() {
|
||||
k := getString(encodeValue(i, z))
|
||||
n[k] = encodeValue(v.MapIndex(i), z)
|
||||
}
|
||||
return n
|
||||
}
|
||||
|
||||
func encodeArray(v reflect.Value, z bool) interface{} {
|
||||
n := node{}
|
||||
for i := 0; i < v.Len(); i++ {
|
||||
n[strconv.Itoa(i)] = encodeValue(v.Index(i), z)
|
||||
}
|
||||
return n
|
||||
}
|
||||
|
||||
func encodeSlice(v reflect.Value, z bool) interface{} {
|
||||
t := v.Type()
|
||||
if t.Elem().Kind() == reflect.Uint8 {
|
||||
return string(v.Bytes()) // Encode byte slices as a single string by default.
|
||||
}
|
||||
n := node{}
|
||||
for i := 0; i < v.Len(); i++ {
|
||||
n[strconv.Itoa(i)] = encodeValue(v.Index(i), z)
|
||||
}
|
||||
return n
|
||||
}
|
||||
|
||||
func encodeTime(v reflect.Value) string {
|
||||
t := v.Convert(timeType).Interface().(time.Time)
|
||||
if t.Year() == 0 && (t.Month() == 0 || t.Month() == 1) && (t.Day() == 0 || t.Day() == 1) {
|
||||
return t.Format("15:04:05.999999999Z07:00")
|
||||
} else if t.Hour() == 0 && t.Minute() == 0 && t.Second() == 0 && t.Nanosecond() == 0 {
|
||||
return t.Format("2006-01-02")
|
||||
}
|
||||
return t.Format("2006-01-02T15:04:05.999999999Z07:00")
|
||||
}
|
||||
|
||||
func encodeURL(v reflect.Value) string {
|
||||
u := v.Convert(urlType).Interface().(url.URL)
|
||||
return u.String()
|
||||
}
|
||||
|
||||
func encodeBasic(v reflect.Value) string {
|
||||
t := v.Type()
|
||||
switch k := t.Kind(); k {
|
||||
case reflect.Bool:
|
||||
return strconv.FormatBool(v.Bool())
|
||||
case reflect.Int,
|
||||
reflect.Int8,
|
||||
reflect.Int16,
|
||||
reflect.Int32,
|
||||
reflect.Int64:
|
||||
return strconv.FormatInt(v.Int(), 10)
|
||||
case reflect.Uint,
|
||||
reflect.Uint8,
|
||||
reflect.Uint16,
|
||||
reflect.Uint32,
|
||||
reflect.Uint64:
|
||||
return strconv.FormatUint(v.Uint(), 10)
|
||||
case reflect.Float32:
|
||||
return strconv.FormatFloat(v.Float(), 'g', -1, 32)
|
||||
case reflect.Float64:
|
||||
return strconv.FormatFloat(v.Float(), 'g', -1, 64)
|
||||
case reflect.Complex64, reflect.Complex128:
|
||||
s := fmt.Sprintf("%g", v.Complex())
|
||||
return strings.TrimSuffix(strings.TrimPrefix(s, "("), ")")
|
||||
case reflect.String:
|
||||
return v.String()
|
||||
}
|
||||
panic(t.String() + " has unsupported kind " + t.Kind().String())
|
||||
}
|
||||
|
||||
func isEmptyValue(v reflect.Value) bool {
|
||||
switch t := v.Type(); v.Kind() {
|
||||
case reflect.Array, reflect.Map, reflect.Slice, reflect.String:
|
||||
return v.Len() == 0
|
||||
case reflect.Bool:
|
||||
return !v.Bool()
|
||||
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
|
||||
return v.Int() == 0
|
||||
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
|
||||
return v.Uint() == 0
|
||||
case reflect.Float32, reflect.Float64:
|
||||
return v.Float() == 0
|
||||
case reflect.Complex64, reflect.Complex128:
|
||||
return v.Complex() == 0
|
||||
case reflect.Interface, reflect.Ptr:
|
||||
return v.IsNil()
|
||||
case reflect.Struct:
|
||||
if t.ConvertibleTo(timeType) {
|
||||
return v.Convert(timeType).Interface().(time.Time).IsZero()
|
||||
}
|
||||
return reflect.DeepEqual(v, reflect.Zero(t))
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// canIndexOrdinally returns whether a value contains an ordered sequence of elements.
|
||||
func canIndexOrdinally(v reflect.Value) bool {
|
||||
if !v.IsValid() {
|
||||
return false
|
||||
}
|
||||
switch t := v.Type(); t.Kind() {
|
||||
case reflect.Ptr, reflect.Interface:
|
||||
return canIndexOrdinally(v.Elem())
|
||||
case reflect.Slice, reflect.Array:
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func fieldInfo(f reflect.StructField) (k string, oe bool) {
|
||||
if f.PkgPath != "" { // Skip private fields.
|
||||
return omittedKey, oe
|
||||
}
|
||||
|
||||
k = f.Name
|
||||
tag := f.Tag.Get("form")
|
||||
if tag == "" {
|
||||
return k, oe
|
||||
}
|
||||
|
||||
ps := strings.SplitN(tag, ",", 2)
|
||||
if ps[0] != "" {
|
||||
k = ps[0]
|
||||
}
|
||||
if len(ps) == 2 {
|
||||
oe = ps[1] == "omitempty"
|
||||
}
|
||||
return k, oe
|
||||
}
|
||||
|
||||
func findField(v reflect.Value, n string, ignoreCase bool) (reflect.Value, bool) {
|
||||
t := v.Type()
|
||||
l := v.NumField()
|
||||
|
||||
var lowerN string
|
||||
caseInsensitiveMatch := -1
|
||||
if ignoreCase {
|
||||
lowerN = strings.ToLower(n)
|
||||
}
|
||||
|
||||
// First try named fields.
|
||||
for i := 0; i < l; i++ {
|
||||
f := t.Field(i)
|
||||
k, _ := fieldInfo(f)
|
||||
if k == omittedKey {
|
||||
continue
|
||||
} else if n == k {
|
||||
return v.Field(i), true
|
||||
} else if ignoreCase && lowerN == strings.ToLower(k) {
|
||||
caseInsensitiveMatch = i
|
||||
}
|
||||
}
|
||||
|
||||
// If no exact match was found try case insensitive match.
|
||||
if caseInsensitiveMatch != -1 {
|
||||
return v.Field(caseInsensitiveMatch), true
|
||||
}
|
||||
|
||||
// Then try anonymous (embedded) fields.
|
||||
for i := 0; i < l; i++ {
|
||||
f := t.Field(i)
|
||||
k, _ := fieldInfo(f)
|
||||
if k == omittedKey || !f.Anonymous { // || k != "" ?
|
||||
continue
|
||||
}
|
||||
fv := v.Field(i)
|
||||
fk := fv.Kind()
|
||||
for fk == reflect.Ptr || fk == reflect.Interface {
|
||||
fv = fv.Elem()
|
||||
fk = fv.Kind()
|
||||
}
|
||||
|
||||
if fk != reflect.Struct {
|
||||
continue
|
||||
}
|
||||
if ev, ok := findField(fv, n, ignoreCase); ok {
|
||||
return ev, true
|
||||
}
|
||||
}
|
||||
|
||||
return reflect.Value{}, false
|
||||
}
|
||||
|
||||
var (
|
||||
stringType = reflect.TypeOf(string(""))
|
||||
stringMapType = reflect.TypeOf(map[string]interface{}{})
|
||||
timeType = reflect.TypeOf(time.Time{})
|
||||
timePtrType = reflect.TypeOf(&time.Time{})
|
||||
urlType = reflect.TypeOf(url.URL{})
|
||||
)
|
||||
|
||||
func skipTextMarshalling(t reflect.Type) bool {
|
||||
/*// Skip time.Time because its text unmarshaling is overly rigid:
|
||||
return t == timeType || t == timePtrType*/
|
||||
// Skip time.Time & convertibles because its text unmarshaling is overly rigid:
|
||||
return t.ConvertibleTo(timeType) || t.ConvertibleTo(timePtrType)
|
||||
}
|
||||
|
||||
func unmarshalValue(v reflect.Value, x interface{}) bool {
|
||||
if skipTextMarshalling(v.Type()) {
|
||||
return false
|
||||
}
|
||||
|
||||
tu, ok := v.Interface().(encoding.TextUnmarshaler)
|
||||
if !ok && !v.CanAddr() {
|
||||
return false
|
||||
} else if !ok {
|
||||
return unmarshalValue(v.Addr(), x)
|
||||
}
|
||||
|
||||
s := getString(x)
|
||||
if err := tu.UnmarshalText([]byte(s)); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func marshalValue(v reflect.Value) (string, bool) {
|
||||
if skipTextMarshalling(v.Type()) {
|
||||
return "", false
|
||||
}
|
||||
|
||||
tm, ok := v.Interface().(encoding.TextMarshaler)
|
||||
if !ok && !v.CanAddr() {
|
||||
return "", false
|
||||
} else if !ok {
|
||||
return marshalValue(v.Addr())
|
||||
}
|
||||
|
||||
bs, err := tm.MarshalText()
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return string(bs), true
|
||||
}
|
14
vendor/github.com/ajg/form/form.go
generated
vendored
Normal file
14
vendor/github.com/ajg/form/form.go
generated
vendored
Normal file
|
@ -0,0 +1,14 @@
|
|||
// Copyright 2014 Alvaro J. Genial. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Package form implements encoding and decoding of application/x-www-form-urlencoded data.
|
||||
package form
|
||||
|
||||
const (
|
||||
implicitKey = "_"
|
||||
omittedKey = "-"
|
||||
|
||||
defaultDelimiter = '.'
|
||||
defaultEscape = '\\'
|
||||
)
|
152
vendor/github.com/ajg/form/node.go
generated
vendored
Normal file
152
vendor/github.com/ajg/form/node.go
generated
vendored
Normal file
|
@ -0,0 +1,152 @@
|
|||
// Copyright 2014 Alvaro J. Genial. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package form
|
||||
|
||||
import (
|
||||
"net/url"
|
||||
"strconv"
|
||||
"strings"
|
||||
)
|
||||
|
||||
type node map[string]interface{}
|
||||
|
||||
func (n node) values(d, e rune) url.Values {
|
||||
vs := url.Values{}
|
||||
n.merge(d, e, "", &vs)
|
||||
return vs
|
||||
}
|
||||
|
||||
func (n node) merge(d, e rune, p string, vs *url.Values) {
|
||||
for k, x := range n {
|
||||
switch y := x.(type) {
|
||||
case string:
|
||||
vs.Add(p+escape(d, e, k), y)
|
||||
case node:
|
||||
y.merge(d, e, p+escape(d, e, k)+string(d), vs)
|
||||
default:
|
||||
panic("value is neither string nor node")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// TODO: Add tests for implicit indexing.
|
||||
func parseValues(d, e rune, vs url.Values, canIndexFirstLevelOrdinally bool) node {
|
||||
// NOTE: Because of the flattening of potentially multiple strings to one key, implicit indexing works:
|
||||
// i. At the first level; e.g. Foo.Bar=A&Foo.Bar=B becomes 0.Foo.Bar=A&1.Foo.Bar=B
|
||||
// ii. At the last level; e.g. Foo.Bar._=A&Foo.Bar._=B becomes Foo.Bar.0=A&Foo.Bar.1=B
|
||||
// TODO: At in-between levels; e.g. Foo._.Bar=A&Foo._.Bar=B becomes Foo.0.Bar=A&Foo.1.Bar=B
|
||||
// (This last one requires that there only be one placeholder in order for it to be unambiguous.)
|
||||
|
||||
m := map[string]string{}
|
||||
for k, ss := range vs {
|
||||
indexLastLevelOrdinally := strings.HasSuffix(k, string(d)+implicitKey)
|
||||
|
||||
for i, s := range ss {
|
||||
if canIndexFirstLevelOrdinally {
|
||||
k = strconv.Itoa(i) + string(d) + k
|
||||
} else if indexLastLevelOrdinally {
|
||||
k = strings.TrimSuffix(k, implicitKey) + strconv.Itoa(i)
|
||||
}
|
||||
|
||||
m[k] = s
|
||||
}
|
||||
}
|
||||
|
||||
n := node{}
|
||||
for k, s := range m {
|
||||
n = n.split(d, e, k, s)
|
||||
}
|
||||
return n
|
||||
}
|
||||
|
||||
func splitPath(d, e rune, path string) (k, rest string) {
|
||||
esc := false
|
||||
for i, r := range path {
|
||||
switch {
|
||||
case !esc && r == e:
|
||||
esc = true
|
||||
case !esc && r == d:
|
||||
return unescape(d, e, path[:i]), path[i+1:]
|
||||
default:
|
||||
esc = false
|
||||
}
|
||||
}
|
||||
return unescape(d, e, path), ""
|
||||
}
|
||||
|
||||
func (n node) split(d, e rune, path, s string) node {
|
||||
k, rest := splitPath(d, e, path)
|
||||
if rest == "" {
|
||||
return add(n, k, s)
|
||||
}
|
||||
if _, ok := n[k]; !ok {
|
||||
n[k] = node{}
|
||||
}
|
||||
|
||||
c := getNode(n[k])
|
||||
n[k] = c.split(d, e, rest, s)
|
||||
return n
|
||||
}
|
||||
|
||||
func add(n node, k, s string) node {
|
||||
if n == nil {
|
||||
return node{k: s}
|
||||
}
|
||||
|
||||
if _, ok := n[k]; ok {
|
||||
panic("key " + k + " already set")
|
||||
}
|
||||
|
||||
n[k] = s
|
||||
return n
|
||||
}
|
||||
|
||||
func isEmpty(x interface{}) bool {
|
||||
switch y := x.(type) {
|
||||
case string:
|
||||
return y == ""
|
||||
case node:
|
||||
if s, ok := y[""].(string); ok {
|
||||
return s == ""
|
||||
}
|
||||
return false
|
||||
}
|
||||
panic("value is neither string nor node")
|
||||
}
|
||||
|
||||
func getNode(x interface{}) node {
|
||||
switch y := x.(type) {
|
||||
case string:
|
||||
return node{"": y}
|
||||
case node:
|
||||
return y
|
||||
}
|
||||
panic("value is neither string nor node")
|
||||
}
|
||||
|
||||
func getString(x interface{}) string {
|
||||
switch y := x.(type) {
|
||||
case string:
|
||||
return y
|
||||
case node:
|
||||
if s, ok := y[""].(string); ok {
|
||||
return s
|
||||
}
|
||||
return ""
|
||||
}
|
||||
panic("value is neither string nor node")
|
||||
}
|
||||
|
||||
func escape(d, e rune, s string) string {
|
||||
s = strings.Replace(s, string(e), string(e)+string(e), -1) // Escape the escape (\ => \\)
|
||||
s = strings.Replace(s, string(d), string(e)+string(d), -1) // Escape the delimiter (. => \.)
|
||||
return s
|
||||
}
|
||||
|
||||
func unescape(d, e rune, s string) string {
|
||||
s = strings.Replace(s, string(e)+string(d), string(d), -1) // Unescape the delimiter (\. => .)
|
||||
s = strings.Replace(s, string(e)+string(e), string(e), -1) // Unescape the escape (\\ => \)
|
||||
return s
|
||||
}
|
18
vendor/github.com/ajg/form/pre-commit.sh
generated
vendored
Normal file
18
vendor/github.com/ajg/form/pre-commit.sh
generated
vendored
Normal file
|
@ -0,0 +1,18 @@
|
|||
#!/bin/bash -eu
|
||||
|
||||
# TODO: Only colorize messages given a suitable terminal.
|
||||
# FIXME: Handle case in which no stash entry is created due to no changes.
|
||||
|
||||
printf "\e[30m=== PRE-COMMIT STARTING ===\e[m\n"
|
||||
git stash save --quiet --keep-index --include-untracked
|
||||
|
||||
if go build -v ./... && go test -v -cover ./... && go vet ./... && golint . && travis-lint; then
|
||||
result=$?
|
||||
printf "\e[32m=== PRE-COMMIT SUCCEEDED ===\e[m\n"
|
||||
else
|
||||
result=$?
|
||||
printf "\e[31m=== PRE-COMMIT FAILED ===\e[m\n"
|
||||
fi
|
||||
|
||||
git stash pop --quiet
|
||||
exit $result
|
|
@ -6,7 +6,6 @@
|
|||
# Folders
|
||||
_obj
|
||||
_test
|
||||
.idea
|
||||
|
||||
# Architecture specific extensions/prefixes
|
||||
*.[568vq]
|
||||
|
@ -21,4 +20,4 @@ _cgo_export.*
|
|||
_testmain.go
|
||||
|
||||
*.exe
|
||||
tags
|
||||
*.test
|
13
vendor/github.com/fatih/structs/.travis.yml
generated
vendored
Normal file
13
vendor/github.com/fatih/structs/.travis.yml
generated
vendored
Normal file
|
@ -0,0 +1,13 @@
|
|||
language: go
|
||||
go:
|
||||
- 1.7.x
|
||||
- 1.8.x
|
||||
- 1.9.x
|
||||
- tip
|
||||
sudo: false
|
||||
before_install:
|
||||
- go get github.com/axw/gocov/gocov
|
||||
- go get github.com/mattn/goveralls
|
||||
- if ! go get github.com/golang/tools/cmd/cover; then go get golang.org/x/tools/cmd/cover; fi
|
||||
script:
|
||||
- $HOME/gopath/bin/goveralls -service=travis-ci
|
21
vendor/github.com/fatih/structs/LICENSE
generated
vendored
Normal file
21
vendor/github.com/fatih/structs/LICENSE
generated
vendored
Normal file
|
@ -0,0 +1,21 @@
|
|||
The MIT License (MIT)
|
||||
|
||||
Copyright (c) 2014 Fatih Arslan
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
SOFTWARE.
|
163
vendor/github.com/fatih/structs/README.md
generated
vendored
Normal file
163
vendor/github.com/fatih/structs/README.md
generated
vendored
Normal file
|
@ -0,0 +1,163 @@
|
|||
# Structs [](http://godoc.org/github.com/fatih/structs) [](https://travis-ci.org/fatih/structs) [](https://coveralls.io/r/fatih/structs)
|
||||
|
||||
Structs contains various utilities to work with Go (Golang) structs. It was
|
||||
initially used by me to convert a struct into a `map[string]interface{}`. With
|
||||
time I've added other utilities for structs. It's basically a high level
|
||||
package based on primitives from the reflect package. Feel free to add new
|
||||
functions or improve the existing code.
|
||||
|
||||
## Install
|
||||
|
||||
```bash
|
||||
go get github.com/fatih/structs
|
||||
```
|
||||
|
||||
## Usage and Examples
|
||||
|
||||
Just like the standard lib `strings`, `bytes` and co packages, `structs` has
|
||||
many global functions to manipulate or organize your struct data. Lets define
|
||||
and declare a struct:
|
||||
|
||||
```go
|
||||
type Server struct {
|
||||
Name string `json:"name,omitempty"`
|
||||
ID int
|
||||
Enabled bool
|
||||
users []string // not exported
|
||||
http.Server // embedded
|
||||
}
|
||||
|
||||
server := &Server{
|
||||
Name: "gopher",
|
||||
ID: 123456,
|
||||
Enabled: true,
|
||||
}
|
||||
```
|
||||
|
||||
```go
|
||||
// Convert a struct to a map[string]interface{}
|
||||
// => {"Name":"gopher", "ID":123456, "Enabled":true}
|
||||
m := structs.Map(server)
|
||||
|
||||
// Convert the values of a struct to a []interface{}
|
||||
// => ["gopher", 123456, true]
|
||||
v := structs.Values(server)
|
||||
|
||||
// Convert the names of a struct to a []string
|
||||
// (see "Names methods" for more info about fields)
|
||||
n := structs.Names(server)
|
||||
|
||||
// Convert the values of a struct to a []*Field
|
||||
// (see "Field methods" for more info about fields)
|
||||
f := structs.Fields(server)
|
||||
|
||||
// Return the struct name => "Server"
|
||||
n := structs.Name(server)
|
||||
|
||||
// Check if any field of a struct is initialized or not.
|
||||
h := structs.HasZero(server)
|
||||
|
||||
// Check if all fields of a struct is initialized or not.
|
||||
z := structs.IsZero(server)
|
||||
|
||||
// Check if server is a struct or a pointer to struct
|
||||
i := structs.IsStruct(server)
|
||||
```
|
||||
|
||||
### Struct methods
|
||||
|
||||
The structs functions can be also used as independent methods by creating a new
|
||||
`*structs.Struct`. This is handy if you want to have more control over the
|
||||
structs (such as retrieving a single Field).
|
||||
|
||||
```go
|
||||
// Create a new struct type:
|
||||
s := structs.New(server)
|
||||
|
||||
m := s.Map() // Get a map[string]interface{}
|
||||
v := s.Values() // Get a []interface{}
|
||||
f := s.Fields() // Get a []*Field
|
||||
n := s.Names() // Get a []string
|
||||
f := s.Field(name) // Get a *Field based on the given field name
|
||||
f, ok := s.FieldOk(name) // Get a *Field based on the given field name
|
||||
n := s.Name() // Get the struct name
|
||||
h := s.HasZero() // Check if any field is uninitialized
|
||||
z := s.IsZero() // Check if all fields are uninitialized
|
||||
```
|
||||
|
||||
### Field methods
|
||||
|
||||
We can easily examine a single Field for more detail. Below you can see how we
|
||||
get and interact with various field methods:
|
||||
|
||||
|
||||
```go
|
||||
s := structs.New(server)
|
||||
|
||||
// Get the Field struct for the "Name" field
|
||||
name := s.Field("Name")
|
||||
|
||||
// Get the underlying value, value => "gopher"
|
||||
value := name.Value().(string)
|
||||
|
||||
// Set the field's value
|
||||
name.Set("another gopher")
|
||||
|
||||
// Get the field's kind, kind => "string"
|
||||
name.Kind()
|
||||
|
||||
// Check if the field is exported or not
|
||||
if name.IsExported() {
|
||||
fmt.Println("Name field is exported")
|
||||
}
|
||||
|
||||
// Check if the value is a zero value, such as "" for string, 0 for int
|
||||
if !name.IsZero() {
|
||||
fmt.Println("Name is initialized")
|
||||
}
|
||||
|
||||
// Check if the field is an anonymous (embedded) field
|
||||
if !name.IsEmbedded() {
|
||||
fmt.Println("Name is not an embedded field")
|
||||
}
|
||||
|
||||
// Get the Field's tag value for tag name "json", tag value => "name,omitempty"
|
||||
tagValue := name.Tag("json")
|
||||
```
|
||||
|
||||
Nested structs are supported too:
|
||||
|
||||
```go
|
||||
addrField := s.Field("Server").Field("Addr")
|
||||
|
||||
// Get the value for addr
|
||||
a := addrField.Value().(string)
|
||||
|
||||
// Or get all fields
|
||||
httpServer := s.Field("Server").Fields()
|
||||
```
|
||||
|
||||
We can also get a slice of Fields from the Struct type to iterate over all
|
||||
fields. This is handy if you wish to examine all fields:
|
||||
|
||||
```go
|
||||
s := structs.New(server)
|
||||
|
||||
for _, f := range s.Fields() {
|
||||
fmt.Printf("field name: %+v\n", f.Name())
|
||||
|
||||
if f.IsExported() {
|
||||
fmt.Printf("value : %+v\n", f.Value())
|
||||
fmt.Printf("is zero : %+v\n", f.IsZero())
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
## Credits
|
||||
|
||||
* [Fatih Arslan](https://github.com/fatih)
|
||||
* [Cihangir Savas](https://github.com/cihangir)
|
||||
|
||||
## License
|
||||
|
||||
The MIT License (MIT) - see LICENSE.md for more details
|
141
vendor/github.com/fatih/structs/field.go
generated
vendored
Normal file
141
vendor/github.com/fatih/structs/field.go
generated
vendored
Normal file
|
@ -0,0 +1,141 @@
|
|||
package structs
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"reflect"
|
||||
)
|
||||
|
||||
var (
|
||||
errNotExported = errors.New("field is not exported")
|
||||
errNotSettable = errors.New("field is not settable")
|
||||
)
|
||||
|
||||
// Field represents a single struct field that encapsulates high level
|
||||
// functions around the field.
|
||||
type Field struct {
|
||||
value reflect.Value
|
||||
field reflect.StructField
|
||||
defaultTag string
|
||||
}
|
||||
|
||||
// Tag returns the value associated with key in the tag string. If there is no
|
||||
// such key in the tag, Tag returns the empty string.
|
||||
func (f *Field) Tag(key string) string {
|
||||
return f.field.Tag.Get(key)
|
||||
}
|
||||
|
||||
// Value returns the underlying value of the field. It panics if the field
|
||||
// is not exported.
|
||||
func (f *Field) Value() interface{} {
|
||||
return f.value.Interface()
|
||||
}
|
||||
|
||||
// IsEmbedded returns true if the given field is an anonymous field (embedded)
|
||||
func (f *Field) IsEmbedded() bool {
|
||||
return f.field.Anonymous
|
||||
}
|
||||
|
||||
// IsExported returns true if the given field is exported.
|
||||
func (f *Field) IsExported() bool {
|
||||
return f.field.PkgPath == ""
|
||||
}
|
||||
|
||||
// IsZero returns true if the given field is not initialized (has a zero value).
|
||||
// It panics if the field is not exported.
|
||||
func (f *Field) IsZero() bool {
|
||||
zero := reflect.Zero(f.value.Type()).Interface()
|
||||
current := f.Value()
|
||||
|
||||
return reflect.DeepEqual(current, zero)
|
||||
}
|
||||
|
||||
// Name returns the name of the given field
|
||||
func (f *Field) Name() string {
|
||||
return f.field.Name
|
||||
}
|
||||
|
||||
// Kind returns the fields kind, such as "string", "map", "bool", etc ..
|
||||
func (f *Field) Kind() reflect.Kind {
|
||||
return f.value.Kind()
|
||||
}
|
||||
|
||||
// Set sets the field to given value v. It returns an error if the field is not
|
||||
// settable (not addressable or not exported) or if the given value's type
|
||||
// doesn't match the fields type.
|
||||
func (f *Field) Set(val interface{}) error {
|
||||
// we can't set unexported fields, so be sure this field is exported
|
||||
if !f.IsExported() {
|
||||
return errNotExported
|
||||
}
|
||||
|
||||
// do we get here? not sure...
|
||||
if !f.value.CanSet() {
|
||||
return errNotSettable
|
||||
}
|
||||
|
||||
given := reflect.ValueOf(val)
|
||||
|
||||
if f.value.Kind() != given.Kind() {
|
||||
return fmt.Errorf("wrong kind. got: %s want: %s", given.Kind(), f.value.Kind())
|
||||
}
|
||||
|
||||
f.value.Set(given)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Zero sets the field to its zero value. It returns an error if the field is not
|
||||
// settable (not addressable or not exported).
|
||||
func (f *Field) Zero() error {
|
||||
zero := reflect.Zero(f.value.Type()).Interface()
|
||||
return f.Set(zero)
|
||||
}
|
||||
|
||||
// Fields returns a slice of Fields. This is particular handy to get the fields
|
||||
// of a nested struct . A struct tag with the content of "-" ignores the
|
||||
// checking of that particular field. Example:
|
||||
//
|
||||
// // Field is ignored by this package.
|
||||
// Field *http.Request `structs:"-"`
|
||||
//
|
||||
// It panics if field is not exported or if field's kind is not struct
|
||||
func (f *Field) Fields() []*Field {
|
||||
return getFields(f.value, f.defaultTag)
|
||||
}
|
||||
|
||||
// Field returns the field from a nested struct. It panics if the nested struct
|
||||
// is not exported or if the field was not found.
|
||||
func (f *Field) Field(name string) *Field {
|
||||
field, ok := f.FieldOk(name)
|
||||
if !ok {
|
||||
panic("field not found")
|
||||
}
|
||||
|
||||
return field
|
||||
}
|
||||
|
||||
// FieldOk returns the field from a nested struct. The boolean returns whether
|
||||
// the field was found (true) or not (false).
|
||||
func (f *Field) FieldOk(name string) (*Field, bool) {
|
||||
value := &f.value
|
||||
// value must be settable so we need to make sure it holds the address of the
|
||||
// variable and not a copy, so we can pass the pointer to strctVal instead of a
|
||||
// copy (which is not assigned to any variable, hence not settable).
|
||||
// see "https://blog.golang.org/laws-of-reflection#TOC_8."
|
||||
if f.value.Kind() != reflect.Ptr {
|
||||
a := f.value.Addr()
|
||||
value = &a
|
||||
}
|
||||
v := strctVal(value.Interface())
|
||||
t := v.Type()
|
||||
|
||||
field, ok := t.FieldByName(name)
|
||||
if !ok {
|
||||
return nil, false
|
||||
}
|
||||
|
||||
return &Field{
|
||||
field: field,
|
||||
value: v.FieldByName(name),
|
||||
}, true
|
||||
}
|
584
vendor/github.com/fatih/structs/structs.go
generated
vendored
Normal file
584
vendor/github.com/fatih/structs/structs.go
generated
vendored
Normal file
|
@ -0,0 +1,584 @@
|
|||
// Package structs contains various utilities functions to work with structs.
|
||||
package structs
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"reflect"
|
||||
)
|
||||
|
||||
var (
|
||||
// DefaultTagName is the default tag name for struct fields which provides
|
||||
// a more granular to tweak certain structs. Lookup the necessary functions
|
||||
// for more info.
|
||||
DefaultTagName = "structs" // struct's field default tag name
|
||||
)
|
||||
|
||||
// Struct encapsulates a struct type to provide several high level functions
|
||||
// around the struct.
|
||||
type Struct struct {
|
||||
raw interface{}
|
||||
value reflect.Value
|
||||
TagName string
|
||||
}
|
||||
|
||||
// New returns a new *Struct with the struct s. It panics if the s's kind is
|
||||
// not struct.
|
||||
func New(s interface{}) *Struct {
|
||||
return &Struct{
|
||||
raw: s,
|
||||
value: strctVal(s),
|
||||
TagName: DefaultTagName,
|
||||
}
|
||||
}
|
||||
|
||||
// Map converts the given struct to a map[string]interface{}, where the keys
|
||||
// of the map are the field names and the values of the map the associated
|
||||
// values of the fields. The default key string is the struct field name but
|
||||
// can be changed in the struct field's tag value. The "structs" key in the
|
||||
// struct's field tag value is the key name. Example:
|
||||
//
|
||||
// // Field appears in map as key "myName".
|
||||
// Name string `structs:"myName"`
|
||||
//
|
||||
// A tag value with the content of "-" ignores that particular field. Example:
|
||||
//
|
||||
// // Field is ignored by this package.
|
||||
// Field bool `structs:"-"`
|
||||
//
|
||||
// A tag value with the content of "string" uses the stringer to get the value. Example:
|
||||
//
|
||||
// // The value will be output of Animal's String() func.
|
||||
// // Map will panic if Animal does not implement String().
|
||||
// Field *Animal `structs:"field,string"`
|
||||
//
|
||||
// A tag value with the option of "flatten" used in a struct field is to flatten its fields
|
||||
// in the output map. Example:
|
||||
//
|
||||
// // The FieldStruct's fields will be flattened into the output map.
|
||||
// FieldStruct time.Time `structs:",flatten"`
|
||||
//
|
||||
// A tag value with the option of "omitnested" stops iterating further if the type
|
||||
// is a struct. Example:
|
||||
//
|
||||
// // Field is not processed further by this package.
|
||||
// Field time.Time `structs:"myName,omitnested"`
|
||||
// Field *http.Request `structs:",omitnested"`
|
||||
//
|
||||
// A tag value with the option of "omitempty" ignores that particular field if
|
||||
// the field value is empty. Example:
|
||||
//
|
||||
// // Field appears in map as key "myName", but the field is
|
||||
// // skipped if empty.
|
||||
// Field string `structs:"myName,omitempty"`
|
||||
//
|
||||
// // Field appears in map as key "Field" (the default), but
|
||||
// // the field is skipped if empty.
|
||||
// Field string `structs:",omitempty"`
|
||||
//
|
||||
// Note that only exported fields of a struct can be accessed, non exported
|
||||
// fields will be neglected.
|
||||
func (s *Struct) Map() map[string]interface{} {
|
||||
out := make(map[string]interface{})
|
||||
s.FillMap(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// FillMap is the same as Map. Instead of returning the output, it fills the
|
||||
// given map.
|
||||
func (s *Struct) FillMap(out map[string]interface{}) {
|
||||
if out == nil {
|
||||
return
|
||||
}
|
||||
|
||||
fields := s.structFields()
|
||||
|
||||
for _, field := range fields {
|
||||
name := field.Name
|
||||
val := s.value.FieldByName(name)
|
||||
isSubStruct := false
|
||||
var finalVal interface{}
|
||||
|
||||
tagName, tagOpts := parseTag(field.Tag.Get(s.TagName))
|
||||
if tagName != "" {
|
||||
name = tagName
|
||||
}
|
||||
|
||||
// if the value is a zero value and the field is marked as omitempty do
|
||||
// not include
|
||||
if tagOpts.Has("omitempty") {
|
||||
zero := reflect.Zero(val.Type()).Interface()
|
||||
current := val.Interface()
|
||||
|
||||
if reflect.DeepEqual(current, zero) {
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
if !tagOpts.Has("omitnested") {
|
||||
finalVal = s.nested(val)
|
||||
|
||||
v := reflect.ValueOf(val.Interface())
|
||||
if v.Kind() == reflect.Ptr {
|
||||
v = v.Elem()
|
||||
}
|
||||
|
||||
switch v.Kind() {
|
||||
case reflect.Map, reflect.Struct:
|
||||
isSubStruct = true
|
||||
}
|
||||
} else {
|
||||
finalVal = val.Interface()
|
||||
}
|
||||
|
||||
if tagOpts.Has("string") {
|
||||
s, ok := val.Interface().(fmt.Stringer)
|
||||
if ok {
|
||||
out[name] = s.String()
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
if isSubStruct && (tagOpts.Has("flatten")) {
|
||||
for k := range finalVal.(map[string]interface{}) {
|
||||
out[k] = finalVal.(map[string]interface{})[k]
|
||||
}
|
||||
} else {
|
||||
out[name] = finalVal
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Values converts the given s struct's field values to a []interface{}. A
|
||||
// struct tag with the content of "-" ignores the that particular field.
|
||||
// Example:
|
||||
//
|
||||
// // Field is ignored by this package.
|
||||
// Field int `structs:"-"`
|
||||
//
|
||||
// A value with the option of "omitnested" stops iterating further if the type
|
||||
// is a struct. Example:
|
||||
//
|
||||
// // Fields is not processed further by this package.
|
||||
// Field time.Time `structs:",omitnested"`
|
||||
// Field *http.Request `structs:",omitnested"`
|
||||
//
|
||||
// A tag value with the option of "omitempty" ignores that particular field and
|
||||
// is not added to the values if the field value is empty. Example:
|
||||
//
|
||||
// // Field is skipped if empty
|
||||
// Field string `structs:",omitempty"`
|
||||
//
|
||||
// Note that only exported fields of a struct can be accessed, non exported
|
||||
// fields will be neglected.
|
||||
func (s *Struct) Values() []interface{} {
|
||||
fields := s.structFields()
|
||||
|
||||
var t []interface{}
|
||||
|
||||
for _, field := range fields {
|
||||
val := s.value.FieldByName(field.Name)
|
||||
|
||||
_, tagOpts := parseTag(field.Tag.Get(s.TagName))
|
||||
|
||||
// if the value is a zero value and the field is marked as omitempty do
|
||||
// not include
|
||||
if tagOpts.Has("omitempty") {
|
||||
zero := reflect.Zero(val.Type()).Interface()
|
||||
current := val.Interface()
|
||||
|
||||
if reflect.DeepEqual(current, zero) {
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
if tagOpts.Has("string") {
|
||||
s, ok := val.Interface().(fmt.Stringer)
|
||||
if ok {
|
||||
t = append(t, s.String())
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
if IsStruct(val.Interface()) && !tagOpts.Has("omitnested") {
|
||||
// look out for embedded structs, and convert them to a
|
||||
// []interface{} to be added to the final values slice
|
||||
t = append(t, Values(val.Interface())...)
|
||||
} else {
|
||||
t = append(t, val.Interface())
|
||||
}
|
||||
}
|
||||
|
||||
return t
|
||||
}
|
||||
|
||||
// Fields returns a slice of Fields. A struct tag with the content of "-"
|
||||
// ignores the checking of that particular field. Example:
|
||||
//
|
||||
// // Field is ignored by this package.
|
||||
// Field bool `structs:"-"`
|
||||
//
|
||||
// It panics if s's kind is not struct.
|
||||
func (s *Struct) Fields() []*Field {
|
||||
return getFields(s.value, s.TagName)
|
||||
}
|
||||
|
||||
// Names returns a slice of field names. A struct tag with the content of "-"
|
||||
// ignores the checking of that particular field. Example:
|
||||
//
|
||||
// // Field is ignored by this package.
|
||||
// Field bool `structs:"-"`
|
||||
//
|
||||
// It panics if s's kind is not struct.
|
||||
func (s *Struct) Names() []string {
|
||||
fields := getFields(s.value, s.TagName)
|
||||
|
||||
names := make([]string, len(fields))
|
||||
|
||||
for i, field := range fields {
|
||||
names[i] = field.Name()
|
||||
}
|
||||
|
||||
return names
|
||||
}
|
||||
|
||||
func getFields(v reflect.Value, tagName string) []*Field {
|
||||
if v.Kind() == reflect.Ptr {
|
||||
v = v.Elem()
|
||||
}
|
||||
|
||||
t := v.Type()
|
||||
|
||||
var fields []*Field
|
||||
|
||||
for i := 0; i < t.NumField(); i++ {
|
||||
field := t.Field(i)
|
||||
|
||||
if tag := field.Tag.Get(tagName); tag == "-" {
|
||||
continue
|
||||
}
|
||||
|
||||
f := &Field{
|
||||
field: field,
|
||||
value: v.FieldByName(field.Name),
|
||||
}
|
||||
|
||||
fields = append(fields, f)
|
||||
|
||||
}
|
||||
|
||||
return fields
|
||||
}
|
||||
|
||||
// Field returns a new Field struct that provides several high level functions
|
||||
// around a single struct field entity. It panics if the field is not found.
|
||||
func (s *Struct) Field(name string) *Field {
|
||||
f, ok := s.FieldOk(name)
|
||||
if !ok {
|
||||
panic("field not found")
|
||||
}
|
||||
|
||||
return f
|
||||
}
|
||||
|
||||
// FieldOk returns a new Field struct that provides several high level functions
|
||||
// around a single struct field entity. The boolean returns true if the field
|
||||
// was found.
|
||||
func (s *Struct) FieldOk(name string) (*Field, bool) {
|
||||
t := s.value.Type()
|
||||
|
||||
field, ok := t.FieldByName(name)
|
||||
if !ok {
|
||||
return nil, false
|
||||
}
|
||||
|
||||
return &Field{
|
||||
field: field,
|
||||
value: s.value.FieldByName(name),
|
||||
defaultTag: s.TagName,
|
||||
}, true
|
||||
}
|
||||
|
||||
// IsZero returns true if all fields in a struct is a zero value (not
|
||||
// initialized) A struct tag with the content of "-" ignores the checking of
|
||||
// that particular field. Example:
|
||||
//
|
||||
// // Field is ignored by this package.
|
||||
// Field bool `structs:"-"`
|
||||
//
|
||||
// A value with the option of "omitnested" stops iterating further if the type
|
||||
// is a struct. Example:
|
||||
//
|
||||
// // Field is not processed further by this package.
|
||||
// Field time.Time `structs:"myName,omitnested"`
|
||||
// Field *http.Request `structs:",omitnested"`
|
||||
//
|
||||
// Note that only exported fields of a struct can be accessed, non exported
|
||||
// fields will be neglected. It panics if s's kind is not struct.
|
||||
func (s *Struct) IsZero() bool {
|
||||
fields := s.structFields()
|
||||
|
||||
for _, field := range fields {
|
||||
val := s.value.FieldByName(field.Name)
|
||||
|
||||
_, tagOpts := parseTag(field.Tag.Get(s.TagName))
|
||||
|
||||
if IsStruct(val.Interface()) && !tagOpts.Has("omitnested") {
|
||||
ok := IsZero(val.Interface())
|
||||
if !ok {
|
||||
return false
|
||||
}
|
||||
|
||||
continue
|
||||
}
|
||||
|
||||
// zero value of the given field, such as "" for string, 0 for int
|
||||
zero := reflect.Zero(val.Type()).Interface()
|
||||
|
||||
// current value of the given field
|
||||
current := val.Interface()
|
||||
|
||||
if !reflect.DeepEqual(current, zero) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
// HasZero returns true if a field in a struct is not initialized (zero value).
|
||||
// A struct tag with the content of "-" ignores the checking of that particular
|
||||
// field. Example:
|
||||
//
|
||||
// // Field is ignored by this package.
|
||||
// Field bool `structs:"-"`
|
||||
//
|
||||
// A value with the option of "omitnested" stops iterating further if the type
|
||||
// is a struct. Example:
|
||||
//
|
||||
// // Field is not processed further by this package.
|
||||
// Field time.Time `structs:"myName,omitnested"`
|
||||
// Field *http.Request `structs:",omitnested"`
|
||||
//
|
||||
// Note that only exported fields of a struct can be accessed, non exported
|
||||
// fields will be neglected. It panics if s's kind is not struct.
|
||||
func (s *Struct) HasZero() bool {
|
||||
fields := s.structFields()
|
||||
|
||||
for _, field := range fields {
|
||||
val := s.value.FieldByName(field.Name)
|
||||
|
||||
_, tagOpts := parseTag(field.Tag.Get(s.TagName))
|
||||
|
||||
if IsStruct(val.Interface()) && !tagOpts.Has("omitnested") {
|
||||
ok := HasZero(val.Interface())
|
||||
if ok {
|
||||
return true
|
||||
}
|
||||
|
||||
continue
|
||||
}
|
||||
|
||||
// zero value of the given field, such as "" for string, 0 for int
|
||||
zero := reflect.Zero(val.Type()).Interface()
|
||||
|
||||
// current value of the given field
|
||||
current := val.Interface()
|
||||
|
||||
if reflect.DeepEqual(current, zero) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
// Name returns the structs's type name within its package. For more info refer
|
||||
// to Name() function.
|
||||
func (s *Struct) Name() string {
|
||||
return s.value.Type().Name()
|
||||
}
|
||||
|
||||
// structFields returns the exported struct fields for a given s struct. This
|
||||
// is a convenient helper method to avoid duplicate code in some of the
|
||||
// functions.
|
||||
func (s *Struct) structFields() []reflect.StructField {
|
||||
t := s.value.Type()
|
||||
|
||||
var f []reflect.StructField
|
||||
|
||||
for i := 0; i < t.NumField(); i++ {
|
||||
field := t.Field(i)
|
||||
// we can't access the value of unexported fields
|
||||
if field.PkgPath != "" {
|
||||
continue
|
||||
}
|
||||
|
||||
// don't check if it's omitted
|
||||
if tag := field.Tag.Get(s.TagName); tag == "-" {
|
||||
continue
|
||||
}
|
||||
|
||||
f = append(f, field)
|
||||
}
|
||||
|
||||
return f
|
||||
}
|
||||
|
||||
func strctVal(s interface{}) reflect.Value {
|
||||
v := reflect.ValueOf(s)
|
||||
|
||||
// if pointer get the underlying element≤
|
||||
for v.Kind() == reflect.Ptr {
|
||||
v = v.Elem()
|
||||
}
|
||||
|
||||
if v.Kind() != reflect.Struct {
|
||||
panic("not struct")
|
||||
}
|
||||
|
||||
return v
|
||||
}
|
||||
|
||||
// Map converts the given struct to a map[string]interface{}. For more info
|
||||
// refer to Struct types Map() method. It panics if s's kind is not struct.
|
||||
func Map(s interface{}) map[string]interface{} {
|
||||
return New(s).Map()
|
||||
}
|
||||
|
||||
// FillMap is the same as Map. Instead of returning the output, it fills the
|
||||
// given map.
|
||||
func FillMap(s interface{}, out map[string]interface{}) {
|
||||
New(s).FillMap(out)
|
||||
}
|
||||
|
||||
// Values converts the given struct to a []interface{}. For more info refer to
|
||||
// Struct types Values() method. It panics if s's kind is not struct.
|
||||
func Values(s interface{}) []interface{} {
|
||||
return New(s).Values()
|
||||
}
|
||||
|
||||
// Fields returns a slice of *Field. For more info refer to Struct types
|
||||
// Fields() method. It panics if s's kind is not struct.
|
||||
func Fields(s interface{}) []*Field {
|
||||
return New(s).Fields()
|
||||
}
|
||||
|
||||
// Names returns a slice of field names. For more info refer to Struct types
|
||||
// Names() method. It panics if s's kind is not struct.
|
||||
func Names(s interface{}) []string {
|
||||
return New(s).Names()
|
||||
}
|
||||
|
||||
// IsZero returns true if all fields is equal to a zero value. For more info
|
||||
// refer to Struct types IsZero() method. It panics if s's kind is not struct.
|
||||
func IsZero(s interface{}) bool {
|
||||
return New(s).IsZero()
|
||||
}
|
||||
|
||||
// HasZero returns true if any field is equal to a zero value. For more info
|
||||
// refer to Struct types HasZero() method. It panics if s's kind is not struct.
|
||||
func HasZero(s interface{}) bool {
|
||||
return New(s).HasZero()
|
||||
}
|
||||
|
||||
// IsStruct returns true if the given variable is a struct or a pointer to
|
||||
// struct.
|
||||
func IsStruct(s interface{}) bool {
|
||||
v := reflect.ValueOf(s)
|
||||
if v.Kind() == reflect.Ptr {
|
||||
v = v.Elem()
|
||||
}
|
||||
|
||||
// uninitialized zero value of a struct
|
||||
if v.Kind() == reflect.Invalid {
|
||||
return false
|
||||
}
|
||||
|
||||
return v.Kind() == reflect.Struct
|
||||
}
|
||||
|
||||
// Name returns the structs's type name within its package. It returns an
|
||||
// empty string for unnamed types. It panics if s's kind is not struct.
|
||||
func Name(s interface{}) string {
|
||||
return New(s).Name()
|
||||
}
|
||||
|
||||
// nested retrieves recursively all types for the given value and returns the
|
||||
// nested value.
|
||||
func (s *Struct) nested(val reflect.Value) interface{} {
|
||||
var finalVal interface{}
|
||||
|
||||
v := reflect.ValueOf(val.Interface())
|
||||
if v.Kind() == reflect.Ptr {
|
||||
v = v.Elem()
|
||||
}
|
||||
|
||||
switch v.Kind() {
|
||||
case reflect.Struct:
|
||||
n := New(val.Interface())
|
||||
n.TagName = s.TagName
|
||||
m := n.Map()
|
||||
|
||||
// do not add the converted value if there are no exported fields, ie:
|
||||
// time.Time
|
||||
if len(m) == 0 {
|
||||
finalVal = val.Interface()
|
||||
} else {
|
||||
finalVal = m
|
||||
}
|
||||
case reflect.Map:
|
||||
// get the element type of the map
|
||||
mapElem := val.Type()
|
||||
switch val.Type().Kind() {
|
||||
case reflect.Ptr, reflect.Array, reflect.Map,
|
||||
reflect.Slice, reflect.Chan:
|
||||
mapElem = val.Type().Elem()
|
||||
if mapElem.Kind() == reflect.Ptr {
|
||||
mapElem = mapElem.Elem()
|
||||
}
|
||||
}
|
||||
|
||||
// only iterate over struct types, ie: map[string]StructType,
|
||||
// map[string][]StructType,
|
||||
if mapElem.Kind() == reflect.Struct ||
|
||||
(mapElem.Kind() == reflect.Slice &&
|
||||
mapElem.Elem().Kind() == reflect.Struct) {
|
||||
m := make(map[string]interface{}, val.Len())
|
||||
for _, k := range val.MapKeys() {
|
||||
m[k.String()] = s.nested(val.MapIndex(k))
|
||||
}
|
||||
finalVal = m
|
||||
break
|
||||
}
|
||||
|
||||
// TODO(arslan): should this be optional?
|
||||
finalVal = val.Interface()
|
||||
case reflect.Slice, reflect.Array:
|
||||
if val.Type().Kind() == reflect.Interface {
|
||||
finalVal = val.Interface()
|
||||
break
|
||||
}
|
||||
|
||||
// TODO(arslan): should this be optional?
|
||||
// do not iterate of non struct types, just pass the value. Ie: []int,
|
||||
// []string, co... We only iterate further if it's a struct.
|
||||
// i.e []foo or []*foo
|
||||
if val.Type().Elem().Kind() != reflect.Struct &&
|
||||
!(val.Type().Elem().Kind() == reflect.Ptr &&
|
||||
val.Type().Elem().Elem().Kind() == reflect.Struct) {
|
||||
finalVal = val.Interface()
|
||||
break
|
||||
}
|
||||
|
||||
slices := make([]interface{}, val.Len())
|
||||
for x := 0; x < val.Len(); x++ {
|
||||
slices[x] = s.nested(val.Index(x))
|
||||
}
|
||||
finalVal = slices
|
||||
default:
|
||||
finalVal = val.Interface()
|
||||
}
|
||||
|
||||
return finalVal
|
||||
}
|
32
vendor/github.com/fatih/structs/tags.go
generated
vendored
Normal file
32
vendor/github.com/fatih/structs/tags.go
generated
vendored
Normal file
|
@ -0,0 +1,32 @@
|
|||
package structs
|
||||
|
||||
import "strings"
|
||||
|
||||
// tagOptions contains a slice of tag options
|
||||
type tagOptions []string
|
||||
|
||||
// Has returns true if the given option is available in tagOptions
|
||||
func (t tagOptions) Has(opt string) bool {
|
||||
for _, tagOpt := range t {
|
||||
if tagOpt == opt {
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
// parseTag splits a struct field's tag into its name and a list of options
|
||||
// which comes after a name. A tag is in the form of: "name,option1,option2".
|
||||
// The name can be neglectected.
|
||||
func parseTag(tag string) (string, tagOptions) {
|
||||
// tag is one of followings:
|
||||
// ""
|
||||
// "name"
|
||||
// "name,opt"
|
||||
// "name,opt,opt2"
|
||||
// ",opt"
|
||||
|
||||
res := strings.Split(tag, ",")
|
||||
return res[0], res[1:]
|
||||
}
|
|
@ -1,4 +1,4 @@
|
|||
Copyright (c) 2019 The Go Authors. All rights reserved.
|
||||
Copyright (c) 2013 Google. All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions are
|
320
vendor/github.com/google/go-querystring/query/encode.go
generated
vendored
Normal file
320
vendor/github.com/google/go-querystring/query/encode.go
generated
vendored
Normal file
|
@ -0,0 +1,320 @@
|
|||
// Copyright 2013 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Package query implements encoding of structs into URL query parameters.
|
||||
//
|
||||
// As a simple example:
|
||||
//
|
||||
// type Options struct {
|
||||
// Query string `url:"q"`
|
||||
// ShowAll bool `url:"all"`
|
||||
// Page int `url:"page"`
|
||||
// }
|
||||
//
|
||||
// opt := Options{ "foo", true, 2 }
|
||||
// v, _ := query.Values(opt)
|
||||
// fmt.Print(v.Encode()) // will output: "q=foo&all=true&page=2"
|
||||
//
|
||||
// The exact mapping between Go values and url.Values is described in the
|
||||
// documentation for the Values() function.
|
||||
package query
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"net/url"
|
||||
"reflect"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
var timeType = reflect.TypeOf(time.Time{})
|
||||
|
||||
var encoderType = reflect.TypeOf(new(Encoder)).Elem()
|
||||
|
||||
// Encoder is an interface implemented by any type that wishes to encode
|
||||
// itself into URL values in a non-standard way.
|
||||
type Encoder interface {
|
||||
EncodeValues(key string, v *url.Values) error
|
||||
}
|
||||
|
||||
// Values returns the url.Values encoding of v.
|
||||
//
|
||||
// Values expects to be passed a struct, and traverses it recursively using the
|
||||
// following encoding rules.
|
||||
//
|
||||
// Each exported struct field is encoded as a URL parameter unless
|
||||
//
|
||||
// - the field's tag is "-", or
|
||||
// - the field is empty and its tag specifies the "omitempty" option
|
||||
//
|
||||
// The empty values are false, 0, any nil pointer or interface value, any array
|
||||
// slice, map, or string of length zero, and any time.Time that returns true
|
||||
// for IsZero().
|
||||
//
|
||||
// The URL parameter name defaults to the struct field name but can be
|
||||
// specified in the struct field's tag value. The "url" key in the struct
|
||||
// field's tag value is the key name, followed by an optional comma and
|
||||
// options. For example:
|
||||
//
|
||||
// // Field is ignored by this package.
|
||||
// Field int `url:"-"`
|
||||
//
|
||||
// // Field appears as URL parameter "myName".
|
||||
// Field int `url:"myName"`
|
||||
//
|
||||
// // Field appears as URL parameter "myName" and the field is omitted if
|
||||
// // its value is empty
|
||||
// Field int `url:"myName,omitempty"`
|
||||
//
|
||||
// // Field appears as URL parameter "Field" (the default), but the field
|
||||
// // is skipped if empty. Note the leading comma.
|
||||
// Field int `url:",omitempty"`
|
||||
//
|
||||
// For encoding individual field values, the following type-dependent rules
|
||||
// apply:
|
||||
//
|
||||
// Boolean values default to encoding as the strings "true" or "false".
|
||||
// Including the "int" option signals that the field should be encoded as the
|
||||
// strings "1" or "0".
|
||||
//
|
||||
// time.Time values default to encoding as RFC3339 timestamps. Including the
|
||||
// "unix" option signals that the field should be encoded as a Unix time (see
|
||||
// time.Unix())
|
||||
//
|
||||
// Slice and Array values default to encoding as multiple URL values of the
|
||||
// same name. Including the "comma" option signals that the field should be
|
||||
// encoded as a single comma-delimited value. Including the "space" option
|
||||
// similarly encodes the value as a single space-delimited string. Including
|
||||
// the "semicolon" option will encode the value as a semicolon-delimited string.
|
||||
// Including the "brackets" option signals that the multiple URL values should
|
||||
// have "[]" appended to the value name. "numbered" will append a number to
|
||||
// the end of each incidence of the value name, example:
|
||||
// name0=value0&name1=value1, etc.
|
||||
//
|
||||
// Anonymous struct fields are usually encoded as if their inner exported
|
||||
// fields were fields in the outer struct, subject to the standard Go
|
||||
// visibility rules. An anonymous struct field with a name given in its URL
|
||||
// tag is treated as having that name, rather than being anonymous.
|
||||
//
|
||||
// Non-nil pointer values are encoded as the value pointed to.
|
||||
//
|
||||
// Nested structs are encoded including parent fields in value names for
|
||||
// scoping. e.g:
|
||||
//
|
||||
// "user[name]=acme&user[addr][postcode]=1234&user[addr][city]=SFO"
|
||||
//
|
||||
// All other values are encoded using their default string representation.
|
||||
//
|
||||
// Multiple fields that encode to the same URL parameter name will be included
|
||||
// as multiple URL values of the same name.
|
||||
func Values(v interface{}) (url.Values, error) {
|
||||
values := make(url.Values)
|
||||
val := reflect.ValueOf(v)
|
||||
for val.Kind() == reflect.Ptr {
|
||||
if val.IsNil() {
|
||||
return values, nil
|
||||
}
|
||||
val = val.Elem()
|
||||
}
|
||||
|
||||
if v == nil {
|
||||
return values, nil
|
||||
}
|
||||
|
||||
if val.Kind() != reflect.Struct {
|
||||
return nil, fmt.Errorf("query: Values() expects struct input. Got %v", val.Kind())
|
||||
}
|
||||
|
||||
err := reflectValue(values, val, "")
|
||||
return values, err
|
||||
}
|
||||
|
||||
// reflectValue populates the values parameter from the struct fields in val.
|
||||
// Embedded structs are followed recursively (using the rules defined in the
|
||||
// Values function documentation) breadth-first.
|
||||
func reflectValue(values url.Values, val reflect.Value, scope string) error {
|
||||
var embedded []reflect.Value
|
||||
|
||||
typ := val.Type()
|
||||
for i := 0; i < typ.NumField(); i++ {
|
||||
sf := typ.Field(i)
|
||||
if sf.PkgPath != "" && !sf.Anonymous { // unexported
|
||||
continue
|
||||
}
|
||||
|
||||
sv := val.Field(i)
|
||||
tag := sf.Tag.Get("url")
|
||||
if tag == "-" {
|
||||
continue
|
||||
}
|
||||
name, opts := parseTag(tag)
|
||||
if name == "" {
|
||||
if sf.Anonymous && sv.Kind() == reflect.Struct {
|
||||
// save embedded struct for later processing
|
||||
embedded = append(embedded, sv)
|
||||
continue
|
||||
}
|
||||
|
||||
name = sf.Name
|
||||
}
|
||||
|
||||
if scope != "" {
|
||||
name = scope + "[" + name + "]"
|
||||
}
|
||||
|
||||
if opts.Contains("omitempty") && isEmptyValue(sv) {
|
||||
continue
|
||||
}
|
||||
|
||||
if sv.Type().Implements(encoderType) {
|
||||
if !reflect.Indirect(sv).IsValid() {
|
||||
sv = reflect.New(sv.Type().Elem())
|
||||
}
|
||||
|
||||
m := sv.Interface().(Encoder)
|
||||
if err := m.EncodeValues(name, &values); err != nil {
|
||||
return err
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
if sv.Kind() == reflect.Slice || sv.Kind() == reflect.Array {
|
||||
var del byte
|
||||
if opts.Contains("comma") {
|
||||
del = ','
|
||||
} else if opts.Contains("space") {
|
||||
del = ' '
|
||||
} else if opts.Contains("semicolon") {
|
||||
del = ';'
|
||||
} else if opts.Contains("brackets") {
|
||||
name = name + "[]"
|
||||
}
|
||||
|
||||
if del != 0 {
|
||||
s := new(bytes.Buffer)
|
||||
first := true
|
||||
for i := 0; i < sv.Len(); i++ {
|
||||
if first {
|
||||
first = false
|
||||
} else {
|
||||
s.WriteByte(del)
|
||||
}
|
||||
s.WriteString(valueString(sv.Index(i), opts))
|
||||
}
|
||||
values.Add(name, s.String())
|
||||
} else {
|
||||
for i := 0; i < sv.Len(); i++ {
|
||||
k := name
|
||||
if opts.Contains("numbered") {
|
||||
k = fmt.Sprintf("%s%d", name, i)
|
||||
}
|
||||
values.Add(k, valueString(sv.Index(i), opts))
|
||||
}
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
for sv.Kind() == reflect.Ptr {
|
||||
if sv.IsNil() {
|
||||
break
|
||||
}
|
||||
sv = sv.Elem()
|
||||
}
|
||||
|
||||
if sv.Type() == timeType {
|
||||
values.Add(name, valueString(sv, opts))
|
||||
continue
|
||||
}
|
||||
|
||||
if sv.Kind() == reflect.Struct {
|
||||
reflectValue(values, sv, name)
|
||||
continue
|
||||
}
|
||||
|
||||
values.Add(name, valueString(sv, opts))
|
||||
}
|
||||
|
||||
for _, f := range embedded {
|
||||
if err := reflectValue(values, f, scope); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// valueString returns the string representation of a value.
|
||||
func valueString(v reflect.Value, opts tagOptions) string {
|
||||
for v.Kind() == reflect.Ptr {
|
||||
if v.IsNil() {
|
||||
return ""
|
||||
}
|
||||
v = v.Elem()
|
||||
}
|
||||
|
||||
if v.Kind() == reflect.Bool && opts.Contains("int") {
|
||||
if v.Bool() {
|
||||
return "1"
|
||||
}
|
||||
return "0"
|
||||
}
|
||||
|
||||
if v.Type() == timeType {
|
||||
t := v.Interface().(time.Time)
|
||||
if opts.Contains("unix") {
|
||||
return strconv.FormatInt(t.Unix(), 10)
|
||||
}
|
||||
return t.Format(time.RFC3339)
|
||||
}
|
||||
|
||||
return fmt.Sprint(v.Interface())
|
||||
}
|
||||
|
||||
// isEmptyValue checks if a value should be considered empty for the purposes
|
||||
// of omitting fields with the "omitempty" option.
|
||||
func isEmptyValue(v reflect.Value) bool {
|
||||
switch v.Kind() {
|
||||
case reflect.Array, reflect.Map, reflect.Slice, reflect.String:
|
||||
return v.Len() == 0
|
||||
case reflect.Bool:
|
||||
return !v.Bool()
|
||||
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
|
||||
return v.Int() == 0
|
||||
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
|
||||
return v.Uint() == 0
|
||||
case reflect.Float32, reflect.Float64:
|
||||
return v.Float() == 0
|
||||
case reflect.Interface, reflect.Ptr:
|
||||
return v.IsNil()
|
||||
}
|
||||
|
||||
if v.Type() == timeType {
|
||||
return v.Interface().(time.Time).IsZero()
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
// tagOptions is the string following a comma in a struct field's "url" tag, or
|
||||
// the empty string. It does not include the leading comma.
|
||||
type tagOptions []string
|
||||
|
||||
// parseTag splits a struct field's url tag into its name and comma-separated
|
||||
// options.
|
||||
func parseTag(tag string) (string, tagOptions) {
|
||||
s := strings.Split(tag, ",")
|
||||
return s[0], s[1:]
|
||||
}
|
||||
|
||||
// Contains checks whether the tagOptions contains the specified option.
|
||||
func (o tagOptions) Contains(option string) bool {
|
||||
for _, s := range o {
|
||||
if s == option {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
25
vendor/github.com/gorilla/websocket/.gitignore
generated
vendored
Normal file
25
vendor/github.com/gorilla/websocket/.gitignore
generated
vendored
Normal file
|
@ -0,0 +1,25 @@
|
|||
# Compiled Object files, Static and Dynamic libs (Shared Objects)
|
||||
*.o
|
||||
*.a
|
||||
*.so
|
||||
|
||||
# Folders
|
||||
_obj
|
||||
_test
|
||||
|
||||
# Architecture specific extensions/prefixes
|
||||
*.[568vq]
|
||||
[568vq].out
|
||||
|
||||
*.cgo1.go
|
||||
*.cgo2.c
|
||||
_cgo_defun.c
|
||||
_cgo_gotypes.go
|
||||
_cgo_export.*
|
||||
|
||||
_testmain.go
|
||||
|
||||
*.exe
|
||||
|
||||
.idea/
|
||||
*.iml
|
19
vendor/github.com/gorilla/websocket/.travis.yml
generated
vendored
Normal file
19
vendor/github.com/gorilla/websocket/.travis.yml
generated
vendored
Normal file
|
@ -0,0 +1,19 @@
|
|||
language: go
|
||||
sudo: false
|
||||
|
||||
matrix:
|
||||
include:
|
||||
- go: 1.7.x
|
||||
- go: 1.8.x
|
||||
- go: 1.9.x
|
||||
- go: 1.10.x
|
||||
- go: 1.11.x
|
||||
- go: tip
|
||||
allow_failures:
|
||||
- go: tip
|
||||
|
||||
script:
|
||||
- go get -t -v ./...
|
||||
- diff -u <(echo -n) <(gofmt -d .)
|
||||
- go vet $(go list ./... | grep -v /vendor/)
|
||||
- go test -v -race ./...
|
9
vendor/github.com/gorilla/websocket/AUTHORS
generated
vendored
Normal file
9
vendor/github.com/gorilla/websocket/AUTHORS
generated
vendored
Normal file
|
@ -0,0 +1,9 @@
|
|||
# This is the official list of Gorilla WebSocket authors for copyright
|
||||
# purposes.
|
||||
#
|
||||
# Please keep the list sorted.
|
||||
|
||||
Gary Burd <gary@beagledreams.com>
|
||||
Google LLC (https://opensource.google.com/)
|
||||
Joachim Bauch <mail@joachim-bauch.de>
|
||||
|
22
vendor/github.com/gorilla/websocket/LICENSE
generated
vendored
Normal file
22
vendor/github.com/gorilla/websocket/LICENSE
generated
vendored
Normal file
|
@ -0,0 +1,22 @@
|
|||
Copyright (c) 2013 The Gorilla WebSocket Authors. All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions are met:
|
||||
|
||||
Redistributions of source code must retain the above copyright notice, this
|
||||
list of conditions and the following disclaimer.
|
||||
|
||||
Redistributions in binary form must reproduce the above copyright notice,
|
||||
this list of conditions and the following disclaimer in the documentation
|
||||
and/or other materials provided with the distribution.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
|
||||
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
|
||||
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
||||
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
|
||||
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
||||
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
|
||||
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
|
||||
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
|
||||
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
64
vendor/github.com/gorilla/websocket/README.md
generated
vendored
Normal file
64
vendor/github.com/gorilla/websocket/README.md
generated
vendored
Normal file
|
@ -0,0 +1,64 @@
|
|||
# Gorilla WebSocket
|
||||
|
||||
Gorilla WebSocket is a [Go](http://golang.org/) implementation of the
|
||||
[WebSocket](http://www.rfc-editor.org/rfc/rfc6455.txt) protocol.
|
||||
|
||||
[](https://travis-ci.org/gorilla/websocket)
|
||||
[](https://godoc.org/github.com/gorilla/websocket)
|
||||
|
||||
### Documentation
|
||||
|
||||
* [API Reference](http://godoc.org/github.com/gorilla/websocket)
|
||||
* [Chat example](https://github.com/gorilla/websocket/tree/master/examples/chat)
|
||||
* [Command example](https://github.com/gorilla/websocket/tree/master/examples/command)
|
||||
* [Client and server example](https://github.com/gorilla/websocket/tree/master/examples/echo)
|
||||
* [File watch example](https://github.com/gorilla/websocket/tree/master/examples/filewatch)
|
||||
|
||||
### Status
|
||||
|
||||
The Gorilla WebSocket package provides a complete and tested implementation of
|
||||
the [WebSocket](http://www.rfc-editor.org/rfc/rfc6455.txt) protocol. The
|
||||
package API is stable.
|
||||
|
||||
### Installation
|
||||
|
||||
go get github.com/gorilla/websocket
|
||||
|
||||
### Protocol Compliance
|
||||
|
||||
The Gorilla WebSocket package passes the server tests in the [Autobahn Test
|
||||
Suite](http://autobahn.ws/testsuite) using the application in the [examples/autobahn
|
||||
subdirectory](https://github.com/gorilla/websocket/tree/master/examples/autobahn).
|
||||
|
||||
### Gorilla WebSocket compared with other packages
|
||||
|
||||
<table>
|
||||
<tr>
|
||||
<th></th>
|
||||
<th><a href="http://godoc.org/github.com/gorilla/websocket">github.com/gorilla</a></th>
|
||||
<th><a href="http://godoc.org/golang.org/x/net/websocket">golang.org/x/net</a></th>
|
||||
</tr>
|
||||
<tr>
|
||||
<tr><td colspan="3"><a href="http://tools.ietf.org/html/rfc6455">RFC 6455</a> Features</td></tr>
|
||||
<tr><td>Passes <a href="http://autobahn.ws/testsuite/">Autobahn Test Suite</a></td><td><a href="https://github.com/gorilla/websocket/tree/master/examples/autobahn">Yes</a></td><td>No</td></tr>
|
||||
<tr><td>Receive <a href="https://tools.ietf.org/html/rfc6455#section-5.4">fragmented</a> message<td>Yes</td><td><a href="https://code.google.com/p/go/issues/detail?id=7632">No</a>, see note 1</td></tr>
|
||||
<tr><td>Send <a href="https://tools.ietf.org/html/rfc6455#section-5.5.1">close</a> message</td><td><a href="http://godoc.org/github.com/gorilla/websocket#hdr-Control_Messages">Yes</a></td><td><a href="https://code.google.com/p/go/issues/detail?id=4588">No</a></td></tr>
|
||||
<tr><td>Send <a href="https://tools.ietf.org/html/rfc6455#section-5.5.2">pings</a> and receive <a href="https://tools.ietf.org/html/rfc6455#section-5.5.3">pongs</a></td><td><a href="http://godoc.org/github.com/gorilla/websocket#hdr-Control_Messages">Yes</a></td><td>No</td></tr>
|
||||
<tr><td>Get the <a href="https://tools.ietf.org/html/rfc6455#section-5.6">type</a> of a received data message</td><td>Yes</td><td>Yes, see note 2</td></tr>
|
||||
<tr><td colspan="3">Other Features</tr></td>
|
||||
<tr><td><a href="https://tools.ietf.org/html/rfc7692">Compression Extensions</a></td><td>Experimental</td><td>No</td></tr>
|
||||
<tr><td>Read message using io.Reader</td><td><a href="http://godoc.org/github.com/gorilla/websocket#Conn.NextReader">Yes</a></td><td>No, see note 3</td></tr>
|
||||
<tr><td>Write message using io.WriteCloser</td><td><a href="http://godoc.org/github.com/gorilla/websocket#Conn.NextWriter">Yes</a></td><td>No, see note 3</td></tr>
|
||||
</table>
|
||||
|
||||
Notes:
|
||||
|
||||
1. Large messages are fragmented in [Chrome's new WebSocket implementation](http://www.ietf.org/mail-archive/web/hybi/current/msg10503.html).
|
||||
2. The application can get the type of a received data message by implementing
|
||||
a [Codec marshal](http://godoc.org/golang.org/x/net/websocket#Codec.Marshal)
|
||||
function.
|
||||
3. The go.net io.Reader and io.Writer operate across WebSocket frame boundaries.
|
||||
Read returns when the input buffer is full or a frame boundary is
|
||||
encountered. Each call to Write sends a single frame message. The Gorilla
|
||||
io.Reader and io.WriteCloser operate on a single WebSocket message.
|
||||
|
395
vendor/github.com/gorilla/websocket/client.go
generated
vendored
Normal file
395
vendor/github.com/gorilla/websocket/client.go
generated
vendored
Normal file
|
@ -0,0 +1,395 @@
|
|||
// Copyright 2013 The Gorilla WebSocket Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package websocket
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"crypto/tls"
|
||||
"errors"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"net"
|
||||
"net/http"
|
||||
"net/http/httptrace"
|
||||
"net/url"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
// ErrBadHandshake is returned when the server response to opening handshake is
|
||||
// invalid.
|
||||
var ErrBadHandshake = errors.New("websocket: bad handshake")
|
||||
|
||||
var errInvalidCompression = errors.New("websocket: invalid compression negotiation")
|
||||
|
||||
// NewClient creates a new client connection using the given net connection.
|
||||
// The URL u specifies the host and request URI. Use requestHeader to specify
|
||||
// the origin (Origin), subprotocols (Sec-WebSocket-Protocol) and cookies
|
||||
// (Cookie). Use the response.Header to get the selected subprotocol
|
||||
// (Sec-WebSocket-Protocol) and cookies (Set-Cookie).
|
||||
//
|
||||
// If the WebSocket handshake fails, ErrBadHandshake is returned along with a
|
||||
// non-nil *http.Response so that callers can handle redirects, authentication,
|
||||
// etc.
|
||||
//
|
||||
// Deprecated: Use Dialer instead.
|
||||
func NewClient(netConn net.Conn, u *url.URL, requestHeader http.Header, readBufSize, writeBufSize int) (c *Conn, response *http.Response, err error) {
|
||||
d := Dialer{
|
||||
ReadBufferSize: readBufSize,
|
||||
WriteBufferSize: writeBufSize,
|
||||
NetDial: func(net, addr string) (net.Conn, error) {
|
||||
return netConn, nil
|
||||
},
|
||||
}
|
||||
return d.Dial(u.String(), requestHeader)
|
||||
}
|
||||
|
||||
// A Dialer contains options for connecting to WebSocket server.
|
||||
type Dialer struct {
|
||||
// NetDial specifies the dial function for creating TCP connections. If
|
||||
// NetDial is nil, net.Dial is used.
|
||||
NetDial func(network, addr string) (net.Conn, error)
|
||||
|
||||
// NetDialContext specifies the dial function for creating TCP connections. If
|
||||
// NetDialContext is nil, net.DialContext is used.
|
||||
NetDialContext func(ctx context.Context, network, addr string) (net.Conn, error)
|
||||
|
||||
// Proxy specifies a function to return a proxy for a given
|
||||
// Request. If the function returns a non-nil error, the
|
||||
// request is aborted with the provided error.
|
||||
// If Proxy is nil or returns a nil *URL, no proxy is used.
|
||||
Proxy func(*http.Request) (*url.URL, error)
|
||||
|
||||
// TLSClientConfig specifies the TLS configuration to use with tls.Client.
|
||||
// If nil, the default configuration is used.
|
||||
TLSClientConfig *tls.Config
|
||||
|
||||
// HandshakeTimeout specifies the duration for the handshake to complete.
|
||||
HandshakeTimeout time.Duration
|
||||
|
||||
// ReadBufferSize and WriteBufferSize specify I/O buffer sizes. If a buffer
|
||||
// size is zero, then a useful default size is used. The I/O buffer sizes
|
||||
// do not limit the size of the messages that can be sent or received.
|
||||
ReadBufferSize, WriteBufferSize int
|
||||
|
||||
// WriteBufferPool is a pool of buffers for write operations. If the value
|
||||
// is not set, then write buffers are allocated to the connection for the
|
||||
// lifetime of the connection.
|
||||
//
|
||||
// A pool is most useful when the application has a modest volume of writes
|
||||
// across a large number of connections.
|
||||
//
|
||||
// Applications should use a single pool for each unique value of
|
||||
// WriteBufferSize.
|
||||
WriteBufferPool BufferPool
|
||||
|
||||
// Subprotocols specifies the client's requested subprotocols.
|
||||
Subprotocols []string
|
||||
|
||||
// EnableCompression specifies if the client should attempt to negotiate
|
||||
// per message compression (RFC 7692). Setting this value to true does not
|
||||
// guarantee that compression will be supported. Currently only "no context
|
||||
// takeover" modes are supported.
|
||||
EnableCompression bool
|
||||
|
||||
// Jar specifies the cookie jar.
|
||||
// If Jar is nil, cookies are not sent in requests and ignored
|
||||
// in responses.
|
||||
Jar http.CookieJar
|
||||
}
|
||||
|
||||
// Dial creates a new client connection by calling DialContext with a background context.
|
||||
func (d *Dialer) Dial(urlStr string, requestHeader http.Header) (*Conn, *http.Response, error) {
|
||||
return d.DialContext(context.Background(), urlStr, requestHeader)
|
||||
}
|
||||
|
||||
var errMalformedURL = errors.New("malformed ws or wss URL")
|
||||
|
||||
func hostPortNoPort(u *url.URL) (hostPort, hostNoPort string) {
|
||||
hostPort = u.Host
|
||||
hostNoPort = u.Host
|
||||
if i := strings.LastIndex(u.Host, ":"); i > strings.LastIndex(u.Host, "]") {
|
||||
hostNoPort = hostNoPort[:i]
|
||||
} else {
|
||||
switch u.Scheme {
|
||||
case "wss":
|
||||
hostPort += ":443"
|
||||
case "https":
|
||||
hostPort += ":443"
|
||||
default:
|
||||
hostPort += ":80"
|
||||
}
|
||||
}
|
||||
return hostPort, hostNoPort
|
||||
}
|
||||
|
||||
// DefaultDialer is a dialer with all fields set to the default values.
|
||||
var DefaultDialer = &Dialer{
|
||||
Proxy: http.ProxyFromEnvironment,
|
||||
HandshakeTimeout: 45 * time.Second,
|
||||
}
|
||||
|
||||
// nilDialer is dialer to use when receiver is nil.
|
||||
var nilDialer = *DefaultDialer
|
||||
|
||||
// DialContext creates a new client connection. Use requestHeader to specify the
|
||||
// origin (Origin), subprotocols (Sec-WebSocket-Protocol) and cookies (Cookie).
|
||||
// Use the response.Header to get the selected subprotocol
|
||||
// (Sec-WebSocket-Protocol) and cookies (Set-Cookie).
|
||||
//
|
||||
// The context will be used in the request and in the Dialer
|
||||
//
|
||||
// If the WebSocket handshake fails, ErrBadHandshake is returned along with a
|
||||
// non-nil *http.Response so that callers can handle redirects, authentication,
|
||||
// etcetera. The response body may not contain the entire response and does not
|
||||
// need to be closed by the application.
|
||||
func (d *Dialer) DialContext(ctx context.Context, urlStr string, requestHeader http.Header) (*Conn, *http.Response, error) {
|
||||
if d == nil {
|
||||
d = &nilDialer
|
||||
}
|
||||
|
||||
challengeKey, err := generateChallengeKey()
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
u, err := url.Parse(urlStr)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
switch u.Scheme {
|
||||
case "ws":
|
||||
u.Scheme = "http"
|
||||
case "wss":
|
||||
u.Scheme = "https"
|
||||
default:
|
||||
return nil, nil, errMalformedURL
|
||||
}
|
||||
|
||||
if u.User != nil {
|
||||
// User name and password are not allowed in websocket URIs.
|
||||
return nil, nil, errMalformedURL
|
||||
}
|
||||
|
||||
req := &http.Request{
|
||||
Method: "GET",
|
||||
URL: u,
|
||||
Proto: "HTTP/1.1",
|
||||
ProtoMajor: 1,
|
||||
ProtoMinor: 1,
|
||||
Header: make(http.Header),
|
||||
Host: u.Host,
|
||||
}
|
||||
req = req.WithContext(ctx)
|
||||
|
||||
// Set the cookies present in the cookie jar of the dialer
|
||||
if d.Jar != nil {
|
||||
for _, cookie := range d.Jar.Cookies(u) {
|
||||
req.AddCookie(cookie)
|
||||
}
|
||||
}
|
||||
|
||||
// Set the request headers using the capitalization for names and values in
|
||||
// RFC examples. Although the capitalization shouldn't matter, there are
|
||||
// servers that depend on it. The Header.Set method is not used because the
|
||||
// method canonicalizes the header names.
|
||||
req.Header["Upgrade"] = []string{"websocket"}
|
||||
req.Header["Connection"] = []string{"Upgrade"}
|
||||
req.Header["Sec-WebSocket-Key"] = []string{challengeKey}
|
||||
req.Header["Sec-WebSocket-Version"] = []string{"13"}
|
||||
if len(d.Subprotocols) > 0 {
|
||||
req.Header["Sec-WebSocket-Protocol"] = []string{strings.Join(d.Subprotocols, ", ")}
|
||||
}
|
||||
for k, vs := range requestHeader {
|
||||
switch {
|
||||
case k == "Host":
|
||||
if len(vs) > 0 {
|
||||
req.Host = vs[0]
|
||||
}
|
||||
case k == "Upgrade" ||
|
||||
k == "Connection" ||
|
||||
k == "Sec-Websocket-Key" ||
|
||||
k == "Sec-Websocket-Version" ||
|
||||
k == "Sec-Websocket-Extensions" ||
|
||||
(k == "Sec-Websocket-Protocol" && len(d.Subprotocols) > 0):
|
||||
return nil, nil, errors.New("websocket: duplicate header not allowed: " + k)
|
||||
case k == "Sec-Websocket-Protocol":
|
||||
req.Header["Sec-WebSocket-Protocol"] = vs
|
||||
default:
|
||||
req.Header[k] = vs
|
||||
}
|
||||
}
|
||||
|
||||
if d.EnableCompression {
|
||||
req.Header["Sec-WebSocket-Extensions"] = []string{"permessage-deflate; server_no_context_takeover; client_no_context_takeover"}
|
||||
}
|
||||
|
||||
if d.HandshakeTimeout != 0 {
|
||||
var cancel func()
|
||||
ctx, cancel = context.WithTimeout(ctx, d.HandshakeTimeout)
|
||||
defer cancel()
|
||||
}
|
||||
|
||||
// Get network dial function.
|
||||
var netDial func(network, add string) (net.Conn, error)
|
||||
|
||||
if d.NetDialContext != nil {
|
||||
netDial = func(network, addr string) (net.Conn, error) {
|
||||
return d.NetDialContext(ctx, network, addr)
|
||||
}
|
||||
} else if d.NetDial != nil {
|
||||
netDial = d.NetDial
|
||||
} else {
|
||||
netDialer := &net.Dialer{}
|
||||
netDial = func(network, addr string) (net.Conn, error) {
|
||||
return netDialer.DialContext(ctx, network, addr)
|
||||
}
|
||||
}
|
||||
|
||||
// If needed, wrap the dial function to set the connection deadline.
|
||||
if deadline, ok := ctx.Deadline(); ok {
|
||||
forwardDial := netDial
|
||||
netDial = func(network, addr string) (net.Conn, error) {
|
||||
c, err := forwardDial(network, addr)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
err = c.SetDeadline(deadline)
|
||||
if err != nil {
|
||||
c.Close()
|
||||
return nil, err
|
||||
}
|
||||
return c, nil
|
||||
}
|
||||
}
|
||||
|
||||
// If needed, wrap the dial function to connect through a proxy.
|
||||
if d.Proxy != nil {
|
||||
proxyURL, err := d.Proxy(req)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
if proxyURL != nil {
|
||||
dialer, err := proxy_FromURL(proxyURL, netDialerFunc(netDial))
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
netDial = dialer.Dial
|
||||
}
|
||||
}
|
||||
|
||||
hostPort, hostNoPort := hostPortNoPort(u)
|
||||
trace := httptrace.ContextClientTrace(ctx)
|
||||
if trace != nil && trace.GetConn != nil {
|
||||
trace.GetConn(hostPort)
|
||||
}
|
||||
|
||||
netConn, err := netDial("tcp", hostPort)
|
||||
if trace != nil && trace.GotConn != nil {
|
||||
trace.GotConn(httptrace.GotConnInfo{
|
||||
Conn: netConn,
|
||||
})
|
||||
}
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
defer func() {
|
||||
if netConn != nil {
|
||||
netConn.Close()
|
||||
}
|
||||
}()
|
||||
|
||||
if u.Scheme == "https" {
|
||||
cfg := cloneTLSConfig(d.TLSClientConfig)
|
||||
if cfg.ServerName == "" {
|
||||
cfg.ServerName = hostNoPort
|
||||
}
|
||||
tlsConn := tls.Client(netConn, cfg)
|
||||
netConn = tlsConn
|
||||
|
||||
var err error
|
||||
if trace != nil {
|
||||
err = doHandshakeWithTrace(trace, tlsConn, cfg)
|
||||
} else {
|
||||
err = doHandshake(tlsConn, cfg)
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
}
|
||||
|
||||
conn := newConn(netConn, false, d.ReadBufferSize, d.WriteBufferSize, d.WriteBufferPool, nil, nil)
|
||||
|
||||
if err := req.Write(netConn); err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
if trace != nil && trace.GotFirstResponseByte != nil {
|
||||
if peek, err := conn.br.Peek(1); err == nil && len(peek) == 1 {
|
||||
trace.GotFirstResponseByte()
|
||||
}
|
||||
}
|
||||
|
||||
resp, err := http.ReadResponse(conn.br, req)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
if d.Jar != nil {
|
||||
if rc := resp.Cookies(); len(rc) > 0 {
|
||||
d.Jar.SetCookies(u, rc)
|
||||
}
|
||||
}
|
||||
|
||||
if resp.StatusCode != 101 ||
|
||||
!strings.EqualFold(resp.Header.Get("Upgrade"), "websocket") ||
|
||||
!strings.EqualFold(resp.Header.Get("Connection"), "upgrade") ||
|
||||
resp.Header.Get("Sec-Websocket-Accept") != computeAcceptKey(challengeKey) {
|
||||
// Before closing the network connection on return from this
|
||||
// function, slurp up some of the response to aid application
|
||||
// debugging.
|
||||
buf := make([]byte, 1024)
|
||||
n, _ := io.ReadFull(resp.Body, buf)
|
||||
resp.Body = ioutil.NopCloser(bytes.NewReader(buf[:n]))
|
||||
return nil, resp, ErrBadHandshake
|
||||
}
|
||||
|
||||
for _, ext := range parseExtensions(resp.Header) {
|
||||
if ext[""] != "permessage-deflate" {
|
||||
continue
|
||||
}
|
||||
_, snct := ext["server_no_context_takeover"]
|
||||
_, cnct := ext["client_no_context_takeover"]
|
||||
if !snct || !cnct {
|
||||
return nil, resp, errInvalidCompression
|
||||
}
|
||||
conn.newCompressionWriter = compressNoContextTakeover
|
||||
conn.newDecompressionReader = decompressNoContextTakeover
|
||||
break
|
||||
}
|
||||
|
||||
resp.Body = ioutil.NopCloser(bytes.NewReader([]byte{}))
|
||||
conn.subprotocol = resp.Header.Get("Sec-Websocket-Protocol")
|
||||
|
||||
netConn.SetDeadline(time.Time{})
|
||||
netConn = nil // to avoid close in defer.
|
||||
return conn, resp, nil
|
||||
}
|
||||
|
||||
func doHandshake(tlsConn *tls.Conn, cfg *tls.Config) error {
|
||||
if err := tlsConn.Handshake(); err != nil {
|
||||
return err
|
||||
}
|
||||
if !cfg.InsecureSkipVerify {
|
||||
if err := tlsConn.VerifyHostname(cfg.ServerName); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
16
vendor/github.com/gorilla/websocket/client_clone.go
generated
vendored
Normal file
16
vendor/github.com/gorilla/websocket/client_clone.go
generated
vendored
Normal file
|
@ -0,0 +1,16 @@
|
|||
// Copyright 2013 The Gorilla WebSocket Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build go1.8
|
||||
|
||||
package websocket
|
||||
|
||||
import "crypto/tls"
|
||||
|
||||
func cloneTLSConfig(cfg *tls.Config) *tls.Config {
|
||||
if cfg == nil {
|
||||
return &tls.Config{}
|
||||
}
|
||||
return cfg.Clone()
|
||||
}
|
38
vendor/github.com/gorilla/websocket/client_clone_legacy.go
generated
vendored
Normal file
38
vendor/github.com/gorilla/websocket/client_clone_legacy.go
generated
vendored
Normal file
|
@ -0,0 +1,38 @@
|
|||
// Copyright 2013 The Gorilla WebSocket Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build !go1.8
|
||||
|
||||
package websocket
|
||||
|
||||
import "crypto/tls"
|
||||
|
||||
// cloneTLSConfig clones all public fields except the fields
|
||||
// SessionTicketsDisabled and SessionTicketKey. This avoids copying the
|
||||
// sync.Mutex in the sync.Once and makes it safe to call cloneTLSConfig on a
|
||||
// config in active use.
|
||||
func cloneTLSConfig(cfg *tls.Config) *tls.Config {
|
||||
if cfg == nil {
|
||||
return &tls.Config{}
|
||||
}
|
||||
return &tls.Config{
|
||||
Rand: cfg.Rand,
|
||||
Time: cfg.Time,
|
||||
Certificates: cfg.Certificates,
|
||||
NameToCertificate: cfg.NameToCertificate,
|
||||
GetCertificate: cfg.GetCertificate,
|
||||
RootCAs: cfg.RootCAs,
|
||||
NextProtos: cfg.NextProtos,
|
||||
ServerName: cfg.ServerName,
|
||||
ClientAuth: cfg.ClientAuth,
|
||||
ClientCAs: cfg.ClientCAs,
|
||||
InsecureSkipVerify: cfg.InsecureSkipVerify,
|
||||
CipherSuites: cfg.CipherSuites,
|
||||
PreferServerCipherSuites: cfg.PreferServerCipherSuites,
|
||||
ClientSessionCache: cfg.ClientSessionCache,
|
||||
MinVersion: cfg.MinVersion,
|
||||
MaxVersion: cfg.MaxVersion,
|
||||
CurvePreferences: cfg.CurvePreferences,
|
||||
}
|
||||
}
|
148
vendor/github.com/gorilla/websocket/compression.go
generated
vendored
Normal file
148
vendor/github.com/gorilla/websocket/compression.go
generated
vendored
Normal file
|
@ -0,0 +1,148 @@
|
|||
// Copyright 2017 The Gorilla WebSocket Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package websocket
|
||||
|
||||
import (
|
||||
"compress/flate"
|
||||
"errors"
|
||||
"io"
|
||||
"strings"
|
||||
"sync"
|
||||
)
|
||||
|
||||
const (
|
||||
minCompressionLevel = -2 // flate.HuffmanOnly not defined in Go < 1.6
|
||||
maxCompressionLevel = flate.BestCompression
|
||||
defaultCompressionLevel = 1
|
||||
)
|
||||
|
||||
var (
|
||||
flateWriterPools [maxCompressionLevel - minCompressionLevel + 1]sync.Pool
|
||||
flateReaderPool = sync.Pool{New: func() interface{} {
|
||||
return flate.NewReader(nil)
|
||||
}}
|
||||
)
|
||||
|
||||
func decompressNoContextTakeover(r io.Reader) io.ReadCloser {
|
||||
const tail =
|
||||
// Add four bytes as specified in RFC
|
||||
"\x00\x00\xff\xff" +
|
||||
// Add final block to squelch unexpected EOF error from flate reader.
|
||||
"\x01\x00\x00\xff\xff"
|
||||
|
||||
fr, _ := flateReaderPool.Get().(io.ReadCloser)
|
||||
fr.(flate.Resetter).Reset(io.MultiReader(r, strings.NewReader(tail)), nil)
|
||||
return &flateReadWrapper{fr}
|
||||
}
|
||||
|
||||
func isValidCompressionLevel(level int) bool {
|
||||
return minCompressionLevel <= level && level <= maxCompressionLevel
|
||||
}
|
||||
|
||||
func compressNoContextTakeover(w io.WriteCloser, level int) io.WriteCloser {
|
||||
p := &flateWriterPools[level-minCompressionLevel]
|
||||
tw := &truncWriter{w: w}
|
||||
fw, _ := p.Get().(*flate.Writer)
|
||||
if fw == nil {
|
||||
fw, _ = flate.NewWriter(tw, level)
|
||||
} else {
|
||||
fw.Reset(tw)
|
||||
}
|
||||
return &flateWriteWrapper{fw: fw, tw: tw, p: p}
|
||||
}
|
||||
|
||||
// truncWriter is an io.Writer that writes all but the last four bytes of the
|
||||
// stream to another io.Writer.
|
||||
type truncWriter struct {
|
||||
w io.WriteCloser
|
||||
n int
|
||||
p [4]byte
|
||||
}
|
||||
|
||||
func (w *truncWriter) Write(p []byte) (int, error) {
|
||||
n := 0
|
||||
|
||||
// fill buffer first for simplicity.
|
||||
if w.n < len(w.p) {
|
||||
n = copy(w.p[w.n:], p)
|
||||
p = p[n:]
|
||||
w.n += n
|
||||
if len(p) == 0 {
|
||||
return n, nil
|
||||
}
|
||||
}
|
||||
|
||||
m := len(p)
|
||||
if m > len(w.p) {
|
||||
m = len(w.p)
|
||||
}
|
||||
|
||||
if nn, err := w.w.Write(w.p[:m]); err != nil {
|
||||
return n + nn, err
|
||||
}
|
||||
|
||||
copy(w.p[:], w.p[m:])
|
||||
copy(w.p[len(w.p)-m:], p[len(p)-m:])
|
||||
nn, err := w.w.Write(p[:len(p)-m])
|
||||
return n + nn, err
|
||||
}
|
||||
|
||||
type flateWriteWrapper struct {
|
||||
fw *flate.Writer
|
||||
tw *truncWriter
|
||||
p *sync.Pool
|
||||
}
|
||||
|
||||
func (w *flateWriteWrapper) Write(p []byte) (int, error) {
|
||||
if w.fw == nil {
|
||||
return 0, errWriteClosed
|
||||
}
|
||||
return w.fw.Write(p)
|
||||
}
|
||||
|
||||
func (w *flateWriteWrapper) Close() error {
|
||||
if w.fw == nil {
|
||||
return errWriteClosed
|
||||
}
|
||||
err1 := w.fw.Flush()
|
||||
w.p.Put(w.fw)
|
||||
w.fw = nil
|
||||
if w.tw.p != [4]byte{0, 0, 0xff, 0xff} {
|
||||
return errors.New("websocket: internal error, unexpected bytes at end of flate stream")
|
||||
}
|
||||
err2 := w.tw.w.Close()
|
||||
if err1 != nil {
|
||||
return err1
|
||||
}
|
||||
return err2
|
||||
}
|
||||
|
||||
type flateReadWrapper struct {
|
||||
fr io.ReadCloser
|
||||
}
|
||||
|
||||
func (r *flateReadWrapper) Read(p []byte) (int, error) {
|
||||
if r.fr == nil {
|
||||
return 0, io.ErrClosedPipe
|
||||
}
|
||||
n, err := r.fr.Read(p)
|
||||
if err == io.EOF {
|
||||
// Preemptively place the reader back in the pool. This helps with
|
||||
// scenarios where the application does not call NextReader() soon after
|
||||
// this final read.
|
||||
r.Close()
|
||||
}
|
||||
return n, err
|
||||
}
|
||||
|
||||
func (r *flateReadWrapper) Close() error {
|
||||
if r.fr == nil {
|
||||
return io.ErrClosedPipe
|
||||
}
|
||||
err := r.fr.Close()
|
||||
flateReaderPool.Put(r.fr)
|
||||
r.fr = nil
|
||||
return err
|
||||
}
|
1165
vendor/github.com/gorilla/websocket/conn.go
generated
vendored
Normal file
1165
vendor/github.com/gorilla/websocket/conn.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load diff
15
vendor/github.com/gorilla/websocket/conn_write.go
generated
vendored
Normal file
15
vendor/github.com/gorilla/websocket/conn_write.go
generated
vendored
Normal file
|
@ -0,0 +1,15 @@
|
|||
// Copyright 2016 The Gorilla WebSocket Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build go1.8
|
||||
|
||||
package websocket
|
||||
|
||||
import "net"
|
||||
|
||||
func (c *Conn) writeBufs(bufs ...[]byte) error {
|
||||
b := net.Buffers(bufs)
|
||||
_, err := b.WriteTo(c.conn)
|
||||
return err
|
||||
}
|
18
vendor/github.com/gorilla/websocket/conn_write_legacy.go
generated
vendored
Normal file
18
vendor/github.com/gorilla/websocket/conn_write_legacy.go
generated
vendored
Normal file
|
@ -0,0 +1,18 @@
|
|||
// Copyright 2016 The Gorilla WebSocket Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build !go1.8
|
||||
|
||||
package websocket
|
||||
|
||||
func (c *Conn) writeBufs(bufs ...[]byte) error {
|
||||
for _, buf := range bufs {
|
||||
if len(buf) > 0 {
|
||||
if _, err := c.conn.Write(buf); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
180
vendor/github.com/gorilla/websocket/doc.go
generated
vendored
Normal file
180
vendor/github.com/gorilla/websocket/doc.go
generated
vendored
Normal file
|
@ -0,0 +1,180 @@
|
|||
// Copyright 2013 The Gorilla WebSocket Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Package websocket implements the WebSocket protocol defined in RFC 6455.
|
||||
//
|
||||
// Overview
|
||||
//
|
||||
// The Conn type represents a WebSocket connection. A server application calls
|
||||
// the Upgrader.Upgrade method from an HTTP request handler to get a *Conn:
|
||||
//
|
||||
// var upgrader = websocket.Upgrader{
|
||||
// ReadBufferSize: 1024,
|
||||
// WriteBufferSize: 1024,
|
||||
// }
|
||||
//
|
||||
// func handler(w http.ResponseWriter, r *http.Request) {
|
||||
// conn, err := upgrader.Upgrade(w, r, nil)
|
||||
// if err != nil {
|
||||
// log.Println(err)
|
||||
// return
|
||||
// }
|
||||
// ... Use conn to send and receive messages.
|
||||
// }
|
||||
//
|
||||
// Call the connection's WriteMessage and ReadMessage methods to send and
|
||||
// receive messages as a slice of bytes. This snippet of code shows how to echo
|
||||
// messages using these methods:
|
||||
//
|
||||
// for {
|
||||
// messageType, p, err := conn.ReadMessage()
|
||||
// if err != nil {
|
||||
// log.Println(err)
|
||||
// return
|
||||
// }
|
||||
// if err := conn.WriteMessage(messageType, p); err != nil {
|
||||
// log.Println(err)
|
||||
// return
|
||||
// }
|
||||
// }
|
||||
//
|
||||
// In above snippet of code, p is a []byte and messageType is an int with value
|
||||
// websocket.BinaryMessage or websocket.TextMessage.
|
||||
//
|
||||
// An application can also send and receive messages using the io.WriteCloser
|
||||
// and io.Reader interfaces. To send a message, call the connection NextWriter
|
||||
// method to get an io.WriteCloser, write the message to the writer and close
|
||||
// the writer when done. To receive a message, call the connection NextReader
|
||||
// method to get an io.Reader and read until io.EOF is returned. This snippet
|
||||
// shows how to echo messages using the NextWriter and NextReader methods:
|
||||
//
|
||||
// for {
|
||||
// messageType, r, err := conn.NextReader()
|
||||
// if err != nil {
|
||||
// return
|
||||
// }
|
||||
// w, err := conn.NextWriter(messageType)
|
||||
// if err != nil {
|
||||
// return err
|
||||
// }
|
||||
// if _, err := io.Copy(w, r); err != nil {
|
||||
// return err
|
||||
// }
|
||||
// if err := w.Close(); err != nil {
|
||||
// return err
|
||||
// }
|
||||
// }
|
||||
//
|
||||
// Data Messages
|
||||
//
|
||||
// The WebSocket protocol distinguishes between text and binary data messages.
|
||||
// Text messages are interpreted as UTF-8 encoded text. The interpretation of
|
||||
// binary messages is left to the application.
|
||||
//
|
||||
// This package uses the TextMessage and BinaryMessage integer constants to
|
||||
// identify the two data message types. The ReadMessage and NextReader methods
|
||||
// return the type of the received message. The messageType argument to the
|
||||
// WriteMessage and NextWriter methods specifies the type of a sent message.
|
||||
//
|
||||
// It is the application's responsibility to ensure that text messages are
|
||||
// valid UTF-8 encoded text.
|
||||
//
|
||||
// Control Messages
|
||||
//
|
||||
// The WebSocket protocol defines three types of control messages: close, ping
|
||||
// and pong. Call the connection WriteControl, WriteMessage or NextWriter
|
||||
// methods to send a control message to the peer.
|
||||
//
|
||||
// Connections handle received close messages by calling the handler function
|
||||
// set with the SetCloseHandler method and by returning a *CloseError from the
|
||||
// NextReader, ReadMessage or the message Read method. The default close
|
||||
// handler sends a close message to the peer.
|
||||
//
|
||||
// Connections handle received ping messages by calling the handler function
|
||||
// set with the SetPingHandler method. The default ping handler sends a pong
|
||||
// message to the peer.
|
||||
//
|
||||
// Connections handle received pong messages by calling the handler function
|
||||
// set with the SetPongHandler method. The default pong handler does nothing.
|
||||
// If an application sends ping messages, then the application should set a
|
||||
// pong handler to receive the corresponding pong.
|
||||
//
|
||||
// The control message handler functions are called from the NextReader,
|
||||
// ReadMessage and message reader Read methods. The default close and ping
|
||||
// handlers can block these methods for a short time when the handler writes to
|
||||
// the connection.
|
||||
//
|
||||
// The application must read the connection to process close, ping and pong
|
||||
// messages sent from the peer. If the application is not otherwise interested
|
||||
// in messages from the peer, then the application should start a goroutine to
|
||||
// read and discard messages from the peer. A simple example is:
|
||||
//
|
||||
// func readLoop(c *websocket.Conn) {
|
||||
// for {
|
||||
// if _, _, err := c.NextReader(); err != nil {
|
||||
// c.Close()
|
||||
// break
|
||||
// }
|
||||
// }
|
||||
// }
|
||||
//
|
||||
// Concurrency
|
||||
//
|
||||
// Connections support one concurrent reader and one concurrent writer.
|
||||
//
|
||||
// Applications are responsible for ensuring that no more than one goroutine
|
||||
// calls the write methods (NextWriter, SetWriteDeadline, WriteMessage,
|
||||
// WriteJSON, EnableWriteCompression, SetCompressionLevel) concurrently and
|
||||
// that no more than one goroutine calls the read methods (NextReader,
|
||||
// SetReadDeadline, ReadMessage, ReadJSON, SetPongHandler, SetPingHandler)
|
||||
// concurrently.
|
||||
//
|
||||
// The Close and WriteControl methods can be called concurrently with all other
|
||||
// methods.
|
||||
//
|
||||
// Origin Considerations
|
||||
//
|
||||
// Web browsers allow Javascript applications to open a WebSocket connection to
|
||||
// any host. It's up to the server to enforce an origin policy using the Origin
|
||||
// request header sent by the browser.
|
||||
//
|
||||
// The Upgrader calls the function specified in the CheckOrigin field to check
|
||||
// the origin. If the CheckOrigin function returns false, then the Upgrade
|
||||
// method fails the WebSocket handshake with HTTP status 403.
|
||||
//
|
||||
// If the CheckOrigin field is nil, then the Upgrader uses a safe default: fail
|
||||
// the handshake if the Origin request header is present and the Origin host is
|
||||
// not equal to the Host request header.
|
||||
//
|
||||
// The deprecated package-level Upgrade function does not perform origin
|
||||
// checking. The application is responsible for checking the Origin header
|
||||
// before calling the Upgrade function.
|
||||
//
|
||||
// Compression EXPERIMENTAL
|
||||
//
|
||||
// Per message compression extensions (RFC 7692) are experimentally supported
|
||||
// by this package in a limited capacity. Setting the EnableCompression option
|
||||
// to true in Dialer or Upgrader will attempt to negotiate per message deflate
|
||||
// support.
|
||||
//
|
||||
// var upgrader = websocket.Upgrader{
|
||||
// EnableCompression: true,
|
||||
// }
|
||||
//
|
||||
// If compression was successfully negotiated with the connection's peer, any
|
||||
// message received in compressed form will be automatically decompressed.
|
||||
// All Read methods will return uncompressed bytes.
|
||||
//
|
||||
// Per message compression of messages written to a connection can be enabled
|
||||
// or disabled by calling the corresponding Conn method:
|
||||
//
|
||||
// conn.EnableWriteCompression(false)
|
||||
//
|
||||
// Currently this package does not support compression with "context takeover".
|
||||
// This means that messages must be compressed and decompressed in isolation,
|
||||
// without retaining sliding window or dictionary state across messages. For
|
||||
// more details refer to RFC 7692.
|
||||
//
|
||||
// Use of compression is experimental and may result in decreased performance.
|
||||
package websocket
|
60
vendor/github.com/gorilla/websocket/json.go
generated
vendored
Normal file
60
vendor/github.com/gorilla/websocket/json.go
generated
vendored
Normal file
|
@ -0,0 +1,60 @@
|
|||
// Copyright 2013 The Gorilla WebSocket Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package websocket
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"io"
|
||||
)
|
||||
|
||||
// WriteJSON writes the JSON encoding of v as a message.
|
||||
//
|
||||
// Deprecated: Use c.WriteJSON instead.
|
||||
func WriteJSON(c *Conn, v interface{}) error {
|
||||
return c.WriteJSON(v)
|
||||
}
|
||||
|
||||
// WriteJSON writes the JSON encoding of v as a message.
|
||||
//
|
||||
// See the documentation for encoding/json Marshal for details about the
|
||||
// conversion of Go values to JSON.
|
||||
func (c *Conn) WriteJSON(v interface{}) error {
|
||||
w, err := c.NextWriter(TextMessage)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err1 := json.NewEncoder(w).Encode(v)
|
||||
err2 := w.Close()
|
||||
if err1 != nil {
|
||||
return err1
|
||||
}
|
||||
return err2
|
||||
}
|
||||
|
||||
// ReadJSON reads the next JSON-encoded message from the connection and stores
|
||||
// it in the value pointed to by v.
|
||||
//
|
||||
// Deprecated: Use c.ReadJSON instead.
|
||||
func ReadJSON(c *Conn, v interface{}) error {
|
||||
return c.ReadJSON(v)
|
||||
}
|
||||
|
||||
// ReadJSON reads the next JSON-encoded message from the connection and stores
|
||||
// it in the value pointed to by v.
|
||||
//
|
||||
// See the documentation for the encoding/json Unmarshal function for details
|
||||
// about the conversion of JSON to a Go value.
|
||||
func (c *Conn) ReadJSON(v interface{}) error {
|
||||
_, r, err := c.NextReader()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = json.NewDecoder(r).Decode(v)
|
||||
if err == io.EOF {
|
||||
// One value is expected in the message.
|
||||
err = io.ErrUnexpectedEOF
|
||||
}
|
||||
return err
|
||||
}
|
54
vendor/github.com/gorilla/websocket/mask.go
generated
vendored
Normal file
54
vendor/github.com/gorilla/websocket/mask.go
generated
vendored
Normal file
|
@ -0,0 +1,54 @@
|
|||
// Copyright 2016 The Gorilla WebSocket Authors. All rights reserved. Use of
|
||||
// this source code is governed by a BSD-style license that can be found in the
|
||||
// LICENSE file.
|
||||
|
||||
// +build !appengine
|
||||
|
||||
package websocket
|
||||
|
||||
import "unsafe"
|
||||
|
||||
const wordSize = int(unsafe.Sizeof(uintptr(0)))
|
||||
|
||||
func maskBytes(key [4]byte, pos int, b []byte) int {
|
||||
// Mask one byte at a time for small buffers.
|
||||
if len(b) < 2*wordSize {
|
||||
for i := range b {
|
||||
b[i] ^= key[pos&3]
|
||||
pos++
|
||||
}
|
||||
return pos & 3
|
||||
}
|
||||
|
||||
// Mask one byte at a time to word boundary.
|
||||
if n := int(uintptr(unsafe.Pointer(&b[0]))) % wordSize; n != 0 {
|
||||
n = wordSize - n
|
||||
for i := range b[:n] {
|
||||
b[i] ^= key[pos&3]
|
||||
pos++
|
||||
}
|
||||
b = b[n:]
|
||||
}
|
||||
|
||||
// Create aligned word size key.
|
||||
var k [wordSize]byte
|
||||
for i := range k {
|
||||
k[i] = key[(pos+i)&3]
|
||||
}
|
||||
kw := *(*uintptr)(unsafe.Pointer(&k))
|
||||
|
||||
// Mask one word at a time.
|
||||
n := (len(b) / wordSize) * wordSize
|
||||
for i := 0; i < n; i += wordSize {
|
||||
*(*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&b[0])) + uintptr(i))) ^= kw
|
||||
}
|
||||
|
||||
// Mask one byte at a time for remaining bytes.
|
||||
b = b[n:]
|
||||
for i := range b {
|
||||
b[i] ^= key[pos&3]
|
||||
pos++
|
||||
}
|
||||
|
||||
return pos & 3
|
||||
}
|
15
vendor/github.com/gorilla/websocket/mask_safe.go
generated
vendored
Normal file
15
vendor/github.com/gorilla/websocket/mask_safe.go
generated
vendored
Normal file
|
@ -0,0 +1,15 @@
|
|||
// Copyright 2016 The Gorilla WebSocket Authors. All rights reserved. Use of
|
||||
// this source code is governed by a BSD-style license that can be found in the
|
||||
// LICENSE file.
|
||||
|
||||
// +build appengine
|
||||
|
||||
package websocket
|
||||
|
||||
func maskBytes(key [4]byte, pos int, b []byte) int {
|
||||
for i := range b {
|
||||
b[i] ^= key[pos&3]
|
||||
pos++
|
||||
}
|
||||
return pos & 3
|
||||
}
|
102
vendor/github.com/gorilla/websocket/prepared.go
generated
vendored
Normal file
102
vendor/github.com/gorilla/websocket/prepared.go
generated
vendored
Normal file
|
@ -0,0 +1,102 @@
|
|||
// Copyright 2017 The Gorilla WebSocket Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package websocket
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"net"
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
// PreparedMessage caches on the wire representations of a message payload.
|
||||
// Use PreparedMessage to efficiently send a message payload to multiple
|
||||
// connections. PreparedMessage is especially useful when compression is used
|
||||
// because the CPU and memory expensive compression operation can be executed
|
||||
// once for a given set of compression options.
|
||||
type PreparedMessage struct {
|
||||
messageType int
|
||||
data []byte
|
||||
mu sync.Mutex
|
||||
frames map[prepareKey]*preparedFrame
|
||||
}
|
||||
|
||||
// prepareKey defines a unique set of options to cache prepared frames in PreparedMessage.
|
||||
type prepareKey struct {
|
||||
isServer bool
|
||||
compress bool
|
||||
compressionLevel int
|
||||
}
|
||||
|
||||
// preparedFrame contains data in wire representation.
|
||||
type preparedFrame struct {
|
||||
once sync.Once
|
||||
data []byte
|
||||
}
|
||||
|
||||
// NewPreparedMessage returns an initialized PreparedMessage. You can then send
|
||||
// it to connection using WritePreparedMessage method. Valid wire
|
||||
// representation will be calculated lazily only once for a set of current
|
||||
// connection options.
|
||||
func NewPreparedMessage(messageType int, data []byte) (*PreparedMessage, error) {
|
||||
pm := &PreparedMessage{
|
||||
messageType: messageType,
|
||||
frames: make(map[prepareKey]*preparedFrame),
|
||||
data: data,
|
||||
}
|
||||
|
||||
// Prepare a plain server frame.
|
||||
_, frameData, err := pm.frame(prepareKey{isServer: true, compress: false})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// To protect against caller modifying the data argument, remember the data
|
||||
// copied to the plain server frame.
|
||||
pm.data = frameData[len(frameData)-len(data):]
|
||||
return pm, nil
|
||||
}
|
||||
|
||||
func (pm *PreparedMessage) frame(key prepareKey) (int, []byte, error) {
|
||||
pm.mu.Lock()
|
||||
frame, ok := pm.frames[key]
|
||||
if !ok {
|
||||
frame = &preparedFrame{}
|
||||
pm.frames[key] = frame
|
||||
}
|
||||
pm.mu.Unlock()
|
||||
|
||||
var err error
|
||||
frame.once.Do(func() {
|
||||
// Prepare a frame using a 'fake' connection.
|
||||
// TODO: Refactor code in conn.go to allow more direct construction of
|
||||
// the frame.
|
||||
mu := make(chan bool, 1)
|
||||
mu <- true
|
||||
var nc prepareConn
|
||||
c := &Conn{
|
||||
conn: &nc,
|
||||
mu: mu,
|
||||
isServer: key.isServer,
|
||||
compressionLevel: key.compressionLevel,
|
||||
enableWriteCompression: true,
|
||||
writeBuf: make([]byte, defaultWriteBufferSize+maxFrameHeaderSize),
|
||||
}
|
||||
if key.compress {
|
||||
c.newCompressionWriter = compressNoContextTakeover
|
||||
}
|
||||
err = c.WriteMessage(pm.messageType, pm.data)
|
||||
frame.data = nc.buf.Bytes()
|
||||
})
|
||||
return pm.messageType, frame.data, err
|
||||
}
|
||||
|
||||
type prepareConn struct {
|
||||
buf bytes.Buffer
|
||||
net.Conn
|
||||
}
|
||||
|
||||
func (pc *prepareConn) Write(p []byte) (int, error) { return pc.buf.Write(p) }
|
||||
func (pc *prepareConn) SetWriteDeadline(t time.Time) error { return nil }
|
77
vendor/github.com/gorilla/websocket/proxy.go
generated
vendored
Normal file
77
vendor/github.com/gorilla/websocket/proxy.go
generated
vendored
Normal file
|
@ -0,0 +1,77 @@
|
|||
// Copyright 2017 The Gorilla WebSocket Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package websocket
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"encoding/base64"
|
||||
"errors"
|
||||
"net"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"strings"
|
||||
)
|
||||
|
||||
type netDialerFunc func(network, addr string) (net.Conn, error)
|
||||
|
||||
func (fn netDialerFunc) Dial(network, addr string) (net.Conn, error) {
|
||||
return fn(network, addr)
|
||||
}
|
||||
|
||||
func init() {
|
||||
proxy_RegisterDialerType("http", func(proxyURL *url.URL, forwardDialer proxy_Dialer) (proxy_Dialer, error) {
|
||||
return &httpProxyDialer{proxyURL: proxyURL, fowardDial: forwardDialer.Dial}, nil
|
||||
})
|
||||
}
|
||||
|
||||
type httpProxyDialer struct {
|
||||
proxyURL *url.URL
|
||||
fowardDial func(network, addr string) (net.Conn, error)
|
||||
}
|
||||
|
||||
func (hpd *httpProxyDialer) Dial(network string, addr string) (net.Conn, error) {
|
||||
hostPort, _ := hostPortNoPort(hpd.proxyURL)
|
||||
conn, err := hpd.fowardDial(network, hostPort)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
connectHeader := make(http.Header)
|
||||
if user := hpd.proxyURL.User; user != nil {
|
||||
proxyUser := user.Username()
|
||||
if proxyPassword, passwordSet := user.Password(); passwordSet {
|
||||
credential := base64.StdEncoding.EncodeToString([]byte(proxyUser + ":" + proxyPassword))
|
||||
connectHeader.Set("Proxy-Authorization", "Basic "+credential)
|
||||
}
|
||||
}
|
||||
|
||||
connectReq := &http.Request{
|
||||
Method: "CONNECT",
|
||||
URL: &url.URL{Opaque: addr},
|
||||
Host: addr,
|
||||
Header: connectHeader,
|
||||
}
|
||||
|
||||
if err := connectReq.Write(conn); err != nil {
|
||||
conn.Close()
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Read response. It's OK to use and discard buffered reader here becaue
|
||||
// the remote server does not speak until spoken to.
|
||||
br := bufio.NewReader(conn)
|
||||
resp, err := http.ReadResponse(br, connectReq)
|
||||
if err != nil {
|
||||
conn.Close()
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if resp.StatusCode != 200 {
|
||||
conn.Close()
|
||||
f := strings.SplitN(resp.Status, " ", 2)
|
||||
return nil, errors.New(f[1])
|
||||
}
|
||||
return conn, nil
|
||||
}
|
363
vendor/github.com/gorilla/websocket/server.go
generated
vendored
Normal file
363
vendor/github.com/gorilla/websocket/server.go
generated
vendored
Normal file
|
@ -0,0 +1,363 @@
|
|||
// Copyright 2013 The Gorilla WebSocket Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package websocket
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"errors"
|
||||
"io"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
// HandshakeError describes an error with the handshake from the peer.
|
||||
type HandshakeError struct {
|
||||
message string
|
||||
}
|
||||
|
||||
func (e HandshakeError) Error() string { return e.message }
|
||||
|
||||
// Upgrader specifies parameters for upgrading an HTTP connection to a
|
||||
// WebSocket connection.
|
||||
type Upgrader struct {
|
||||
// HandshakeTimeout specifies the duration for the handshake to complete.
|
||||
HandshakeTimeout time.Duration
|
||||
|
||||
// ReadBufferSize and WriteBufferSize specify I/O buffer sizes. If a buffer
|
||||
// size is zero, then buffers allocated by the HTTP server are used. The
|
||||
// I/O buffer sizes do not limit the size of the messages that can be sent
|
||||
// or received.
|
||||
ReadBufferSize, WriteBufferSize int
|
||||
|
||||
// WriteBufferPool is a pool of buffers for write operations. If the value
|
||||
// is not set, then write buffers are allocated to the connection for the
|
||||
// lifetime of the connection.
|
||||
//
|
||||
// A pool is most useful when the application has a modest volume of writes
|
||||
// across a large number of connections.
|
||||
//
|
||||
// Applications should use a single pool for each unique value of
|
||||
// WriteBufferSize.
|
||||
WriteBufferPool BufferPool
|
||||
|
||||
// Subprotocols specifies the server's supported protocols in order of
|
||||
// preference. If this field is not nil, then the Upgrade method negotiates a
|
||||
// subprotocol by selecting the first match in this list with a protocol
|
||||
// requested by the client. If there's no match, then no protocol is
|
||||
// negotiated (the Sec-Websocket-Protocol header is not included in the
|
||||
// handshake response).
|
||||
Subprotocols []string
|
||||
|
||||
// Error specifies the function for generating HTTP error responses. If Error
|
||||
// is nil, then http.Error is used to generate the HTTP response.
|
||||
Error func(w http.ResponseWriter, r *http.Request, status int, reason error)
|
||||
|
||||
// CheckOrigin returns true if the request Origin header is acceptable. If
|
||||
// CheckOrigin is nil, then a safe default is used: return false if the
|
||||
// Origin request header is present and the origin host is not equal to
|
||||
// request Host header.
|
||||
//
|
||||
// A CheckOrigin function should carefully validate the request origin to
|
||||
// prevent cross-site request forgery.
|
||||
CheckOrigin func(r *http.Request) bool
|
||||
|
||||
// EnableCompression specify if the server should attempt to negotiate per
|
||||
// message compression (RFC 7692). Setting this value to true does not
|
||||
// guarantee that compression will be supported. Currently only "no context
|
||||
// takeover" modes are supported.
|
||||
EnableCompression bool
|
||||
}
|
||||
|
||||
func (u *Upgrader) returnError(w http.ResponseWriter, r *http.Request, status int, reason string) (*Conn, error) {
|
||||
err := HandshakeError{reason}
|
||||
if u.Error != nil {
|
||||
u.Error(w, r, status, err)
|
||||
} else {
|
||||
w.Header().Set("Sec-Websocket-Version", "13")
|
||||
http.Error(w, http.StatusText(status), status)
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// checkSameOrigin returns true if the origin is not set or is equal to the request host.
|
||||
func checkSameOrigin(r *http.Request) bool {
|
||||
origin := r.Header["Origin"]
|
||||
if len(origin) == 0 {
|
||||
return true
|
||||
}
|
||||
u, err := url.Parse(origin[0])
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
return equalASCIIFold(u.Host, r.Host)
|
||||
}
|
||||
|
||||
func (u *Upgrader) selectSubprotocol(r *http.Request, responseHeader http.Header) string {
|
||||
if u.Subprotocols != nil {
|
||||
clientProtocols := Subprotocols(r)
|
||||
for _, serverProtocol := range u.Subprotocols {
|
||||
for _, clientProtocol := range clientProtocols {
|
||||
if clientProtocol == serverProtocol {
|
||||
return clientProtocol
|
||||
}
|
||||
}
|
||||
}
|
||||
} else if responseHeader != nil {
|
||||
return responseHeader.Get("Sec-Websocket-Protocol")
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
// Upgrade upgrades the HTTP server connection to the WebSocket protocol.
|
||||
//
|
||||
// The responseHeader is included in the response to the client's upgrade
|
||||
// request. Use the responseHeader to specify cookies (Set-Cookie) and the
|
||||
// application negotiated subprotocol (Sec-WebSocket-Protocol).
|
||||
//
|
||||
// If the upgrade fails, then Upgrade replies to the client with an HTTP error
|
||||
// response.
|
||||
func (u *Upgrader) Upgrade(w http.ResponseWriter, r *http.Request, responseHeader http.Header) (*Conn, error) {
|
||||
const badHandshake = "websocket: the client is not using the websocket protocol: "
|
||||
|
||||
if !tokenListContainsValue(r.Header, "Connection", "upgrade") {
|
||||
return u.returnError(w, r, http.StatusBadRequest, badHandshake+"'upgrade' token not found in 'Connection' header")
|
||||
}
|
||||
|
||||
if !tokenListContainsValue(r.Header, "Upgrade", "websocket") {
|
||||
return u.returnError(w, r, http.StatusBadRequest, badHandshake+"'websocket' token not found in 'Upgrade' header")
|
||||
}
|
||||
|
||||
if r.Method != "GET" {
|
||||
return u.returnError(w, r, http.StatusMethodNotAllowed, badHandshake+"request method is not GET")
|
||||
}
|
||||
|
||||
if !tokenListContainsValue(r.Header, "Sec-Websocket-Version", "13") {
|
||||
return u.returnError(w, r, http.StatusBadRequest, "websocket: unsupported version: 13 not found in 'Sec-Websocket-Version' header")
|
||||
}
|
||||
|
||||
if _, ok := responseHeader["Sec-Websocket-Extensions"]; ok {
|
||||
return u.returnError(w, r, http.StatusInternalServerError, "websocket: application specific 'Sec-WebSocket-Extensions' headers are unsupported")
|
||||
}
|
||||
|
||||
checkOrigin := u.CheckOrigin
|
||||
if checkOrigin == nil {
|
||||
checkOrigin = checkSameOrigin
|
||||
}
|
||||
if !checkOrigin(r) {
|
||||
return u.returnError(w, r, http.StatusForbidden, "websocket: request origin not allowed by Upgrader.CheckOrigin")
|
||||
}
|
||||
|
||||
challengeKey := r.Header.Get("Sec-Websocket-Key")
|
||||
if challengeKey == "" {
|
||||
return u.returnError(w, r, http.StatusBadRequest, "websocket: not a websocket handshake: `Sec-WebSocket-Key' header is missing or blank")
|
||||
}
|
||||
|
||||
subprotocol := u.selectSubprotocol(r, responseHeader)
|
||||
|
||||
// Negotiate PMCE
|
||||
var compress bool
|
||||
if u.EnableCompression {
|
||||
for _, ext := range parseExtensions(r.Header) {
|
||||
if ext[""] != "permessage-deflate" {
|
||||
continue
|
||||
}
|
||||
compress = true
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
h, ok := w.(http.Hijacker)
|
||||
if !ok {
|
||||
return u.returnError(w, r, http.StatusInternalServerError, "websocket: response does not implement http.Hijacker")
|
||||
}
|
||||
var brw *bufio.ReadWriter
|
||||
netConn, brw, err := h.Hijack()
|
||||
if err != nil {
|
||||
return u.returnError(w, r, http.StatusInternalServerError, err.Error())
|
||||
}
|
||||
|
||||
if brw.Reader.Buffered() > 0 {
|
||||
netConn.Close()
|
||||
return nil, errors.New("websocket: client sent data before handshake is complete")
|
||||
}
|
||||
|
||||
var br *bufio.Reader
|
||||
if u.ReadBufferSize == 0 && bufioReaderSize(netConn, brw.Reader) > 256 {
|
||||
// Reuse hijacked buffered reader as connection reader.
|
||||
br = brw.Reader
|
||||
}
|
||||
|
||||
buf := bufioWriterBuffer(netConn, brw.Writer)
|
||||
|
||||
var writeBuf []byte
|
||||
if u.WriteBufferPool == nil && u.WriteBufferSize == 0 && len(buf) >= maxFrameHeaderSize+256 {
|
||||
// Reuse hijacked write buffer as connection buffer.
|
||||
writeBuf = buf
|
||||
}
|
||||
|
||||
c := newConn(netConn, true, u.ReadBufferSize, u.WriteBufferSize, u.WriteBufferPool, br, writeBuf)
|
||||
c.subprotocol = subprotocol
|
||||
|
||||
if compress {
|
||||
c.newCompressionWriter = compressNoContextTakeover
|
||||
c.newDecompressionReader = decompressNoContextTakeover
|
||||
}
|
||||
|
||||
// Use larger of hijacked buffer and connection write buffer for header.
|
||||
p := buf
|
||||
if len(c.writeBuf) > len(p) {
|
||||
p = c.writeBuf
|
||||
}
|
||||
p = p[:0]
|
||||
|
||||
p = append(p, "HTTP/1.1 101 Switching Protocols\r\nUpgrade: websocket\r\nConnection: Upgrade\r\nSec-WebSocket-Accept: "...)
|
||||
p = append(p, computeAcceptKey(challengeKey)...)
|
||||
p = append(p, "\r\n"...)
|
||||
if c.subprotocol != "" {
|
||||
p = append(p, "Sec-WebSocket-Protocol: "...)
|
||||
p = append(p, c.subprotocol...)
|
||||
p = append(p, "\r\n"...)
|
||||
}
|
||||
if compress {
|
||||
p = append(p, "Sec-WebSocket-Extensions: permessage-deflate; server_no_context_takeover; client_no_context_takeover\r\n"...)
|
||||
}
|
||||
for k, vs := range responseHeader {
|
||||
if k == "Sec-Websocket-Protocol" {
|
||||
continue
|
||||
}
|
||||
for _, v := range vs {
|
||||
p = append(p, k...)
|
||||
p = append(p, ": "...)
|
||||
for i := 0; i < len(v); i++ {
|
||||
b := v[i]
|
||||
if b <= 31 {
|
||||
// prevent response splitting.
|
||||
b = ' '
|
||||
}
|
||||
p = append(p, b)
|
||||
}
|
||||
p = append(p, "\r\n"...)
|
||||
}
|
||||
}
|
||||
p = append(p, "\r\n"...)
|
||||
|
||||
// Clear deadlines set by HTTP server.
|
||||
netConn.SetDeadline(time.Time{})
|
||||
|
||||
if u.HandshakeTimeout > 0 {
|
||||
netConn.SetWriteDeadline(time.Now().Add(u.HandshakeTimeout))
|
||||
}
|
||||
if _, err = netConn.Write(p); err != nil {
|
||||
netConn.Close()
|
||||
return nil, err
|
||||
}
|
||||
if u.HandshakeTimeout > 0 {
|
||||
netConn.SetWriteDeadline(time.Time{})
|
||||
}
|
||||
|
||||
return c, nil
|
||||
}
|
||||
|
||||
// Upgrade upgrades the HTTP server connection to the WebSocket protocol.
|
||||
//
|
||||
// Deprecated: Use websocket.Upgrader instead.
|
||||
//
|
||||
// Upgrade does not perform origin checking. The application is responsible for
|
||||
// checking the Origin header before calling Upgrade. An example implementation
|
||||
// of the same origin policy check is:
|
||||
//
|
||||
// if req.Header.Get("Origin") != "http://"+req.Host {
|
||||
// http.Error(w, "Origin not allowed", http.StatusForbidden)
|
||||
// return
|
||||
// }
|
||||
//
|
||||
// If the endpoint supports subprotocols, then the application is responsible
|
||||
// for negotiating the protocol used on the connection. Use the Subprotocols()
|
||||
// function to get the subprotocols requested by the client. Use the
|
||||
// Sec-Websocket-Protocol response header to specify the subprotocol selected
|
||||
// by the application.
|
||||
//
|
||||
// The responseHeader is included in the response to the client's upgrade
|
||||
// request. Use the responseHeader to specify cookies (Set-Cookie) and the
|
||||
// negotiated subprotocol (Sec-Websocket-Protocol).
|
||||
//
|
||||
// The connection buffers IO to the underlying network connection. The
|
||||
// readBufSize and writeBufSize parameters specify the size of the buffers to
|
||||
// use. Messages can be larger than the buffers.
|
||||
//
|
||||
// If the request is not a valid WebSocket handshake, then Upgrade returns an
|
||||
// error of type HandshakeError. Applications should handle this error by
|
||||
// replying to the client with an HTTP error response.
|
||||
func Upgrade(w http.ResponseWriter, r *http.Request, responseHeader http.Header, readBufSize, writeBufSize int) (*Conn, error) {
|
||||
u := Upgrader{ReadBufferSize: readBufSize, WriteBufferSize: writeBufSize}
|
||||
u.Error = func(w http.ResponseWriter, r *http.Request, status int, reason error) {
|
||||
// don't return errors to maintain backwards compatibility
|
||||
}
|
||||
u.CheckOrigin = func(r *http.Request) bool {
|
||||
// allow all connections by default
|
||||
return true
|
||||
}
|
||||
return u.Upgrade(w, r, responseHeader)
|
||||
}
|
||||
|
||||
// Subprotocols returns the subprotocols requested by the client in the
|
||||
// Sec-Websocket-Protocol header.
|
||||
func Subprotocols(r *http.Request) []string {
|
||||
h := strings.TrimSpace(r.Header.Get("Sec-Websocket-Protocol"))
|
||||
if h == "" {
|
||||
return nil
|
||||
}
|
||||
protocols := strings.Split(h, ",")
|
||||
for i := range protocols {
|
||||
protocols[i] = strings.TrimSpace(protocols[i])
|
||||
}
|
||||
return protocols
|
||||
}
|
||||
|
||||
// IsWebSocketUpgrade returns true if the client requested upgrade to the
|
||||
// WebSocket protocol.
|
||||
func IsWebSocketUpgrade(r *http.Request) bool {
|
||||
return tokenListContainsValue(r.Header, "Connection", "upgrade") &&
|
||||
tokenListContainsValue(r.Header, "Upgrade", "websocket")
|
||||
}
|
||||
|
||||
// bufioReaderSize size returns the size of a bufio.Reader.
|
||||
func bufioReaderSize(originalReader io.Reader, br *bufio.Reader) int {
|
||||
// This code assumes that peek on a reset reader returns
|
||||
// bufio.Reader.buf[:0].
|
||||
// TODO: Use bufio.Reader.Size() after Go 1.10
|
||||
br.Reset(originalReader)
|
||||
if p, err := br.Peek(0); err == nil {
|
||||
return cap(p)
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
// writeHook is an io.Writer that records the last slice passed to it vio
|
||||
// io.Writer.Write.
|
||||
type writeHook struct {
|
||||
p []byte
|
||||
}
|
||||
|
||||
func (wh *writeHook) Write(p []byte) (int, error) {
|
||||
wh.p = p
|
||||
return len(p), nil
|
||||
}
|
||||
|
||||
// bufioWriterBuffer grabs the buffer from a bufio.Writer.
|
||||
func bufioWriterBuffer(originalWriter io.Writer, bw *bufio.Writer) []byte {
|
||||
// This code assumes that bufio.Writer.buf[:1] is passed to the
|
||||
// bufio.Writer's underlying writer.
|
||||
var wh writeHook
|
||||
bw.Reset(&wh)
|
||||
bw.WriteByte(0)
|
||||
bw.Flush()
|
||||
|
||||
bw.Reset(originalWriter)
|
||||
|
||||
return wh.p[:cap(wh.p)]
|
||||
}
|
19
vendor/github.com/gorilla/websocket/trace.go
generated
vendored
Normal file
19
vendor/github.com/gorilla/websocket/trace.go
generated
vendored
Normal file
|
@ -0,0 +1,19 @@
|
|||
// +build go1.8
|
||||
|
||||
package websocket
|
||||
|
||||
import (
|
||||
"crypto/tls"
|
||||
"net/http/httptrace"
|
||||
)
|
||||
|
||||
func doHandshakeWithTrace(trace *httptrace.ClientTrace, tlsConn *tls.Conn, cfg *tls.Config) error {
|
||||
if trace.TLSHandshakeStart != nil {
|
||||
trace.TLSHandshakeStart()
|
||||
}
|
||||
err := doHandshake(tlsConn, cfg)
|
||||
if trace.TLSHandshakeDone != nil {
|
||||
trace.TLSHandshakeDone(tlsConn.ConnectionState(), err)
|
||||
}
|
||||
return err
|
||||
}
|
12
vendor/github.com/gorilla/websocket/trace_17.go
generated
vendored
Normal file
12
vendor/github.com/gorilla/websocket/trace_17.go
generated
vendored
Normal file
|
@ -0,0 +1,12 @@
|
|||
// +build !go1.8
|
||||
|
||||
package websocket
|
||||
|
||||
import (
|
||||
"crypto/tls"
|
||||
"net/http/httptrace"
|
||||
)
|
||||
|
||||
func doHandshakeWithTrace(trace *httptrace.ClientTrace, tlsConn *tls.Conn, cfg *tls.Config) error {
|
||||
return doHandshake(tlsConn, cfg)
|
||||
}
|
237
vendor/github.com/gorilla/websocket/util.go
generated
vendored
Normal file
237
vendor/github.com/gorilla/websocket/util.go
generated
vendored
Normal file
|
@ -0,0 +1,237 @@
|
|||
// Copyright 2013 The Gorilla WebSocket Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package websocket
|
||||
|
||||
import (
|
||||
"crypto/rand"
|
||||
"crypto/sha1"
|
||||
"encoding/base64"
|
||||
"io"
|
||||
"net/http"
|
||||
"strings"
|
||||
"unicode/utf8"
|
||||
)
|
||||
|
||||
var keyGUID = []byte("258EAFA5-E914-47DA-95CA-C5AB0DC85B11")
|
||||
|
||||
func computeAcceptKey(challengeKey string) string {
|
||||
h := sha1.New()
|
||||
h.Write([]byte(challengeKey))
|
||||
h.Write(keyGUID)
|
||||
return base64.StdEncoding.EncodeToString(h.Sum(nil))
|
||||
}
|
||||
|
||||
func generateChallengeKey() (string, error) {
|
||||
p := make([]byte, 16)
|
||||
if _, err := io.ReadFull(rand.Reader, p); err != nil {
|
||||
return "", err
|
||||
}
|
||||
return base64.StdEncoding.EncodeToString(p), nil
|
||||
}
|
||||
|
||||
// Octet types from RFC 2616.
|
||||
var octetTypes [256]byte
|
||||
|
||||
const (
|
||||
isTokenOctet = 1 << iota
|
||||
isSpaceOctet
|
||||
)
|
||||
|
||||
func init() {
|
||||
// From RFC 2616
|
||||
//
|
||||
// OCTET = <any 8-bit sequence of data>
|
||||
// CHAR = <any US-ASCII character (octets 0 - 127)>
|
||||
// CTL = <any US-ASCII control character (octets 0 - 31) and DEL (127)>
|
||||
// CR = <US-ASCII CR, carriage return (13)>
|
||||
// LF = <US-ASCII LF, linefeed (10)>
|
||||
// SP = <US-ASCII SP, space (32)>
|
||||
// HT = <US-ASCII HT, horizontal-tab (9)>
|
||||
// <"> = <US-ASCII double-quote mark (34)>
|
||||
// CRLF = CR LF
|
||||
// LWS = [CRLF] 1*( SP | HT )
|
||||
// TEXT = <any OCTET except CTLs, but including LWS>
|
||||
// separators = "(" | ")" | "<" | ">" | "@" | "," | ";" | ":" | "\" | <">
|
||||
// | "/" | "[" | "]" | "?" | "=" | "{" | "}" | SP | HT
|
||||
// token = 1*<any CHAR except CTLs or separators>
|
||||
// qdtext = <any TEXT except <">>
|
||||
|
||||
for c := 0; c < 256; c++ {
|
||||
var t byte
|
||||
isCtl := c <= 31 || c == 127
|
||||
isChar := 0 <= c && c <= 127
|
||||
isSeparator := strings.IndexRune(" \t\"(),/:;<=>?@[]\\{}", rune(c)) >= 0
|
||||
if strings.IndexRune(" \t\r\n", rune(c)) >= 0 {
|
||||
t |= isSpaceOctet
|
||||
}
|
||||
if isChar && !isCtl && !isSeparator {
|
||||
t |= isTokenOctet
|
||||
}
|
||||
octetTypes[c] = t
|
||||
}
|
||||
}
|
||||
|
||||
func skipSpace(s string) (rest string) {
|
||||
i := 0
|
||||
for ; i < len(s); i++ {
|
||||
if octetTypes[s[i]]&isSpaceOctet == 0 {
|
||||
break
|
||||
}
|
||||
}
|
||||
return s[i:]
|
||||
}
|
||||
|
||||
func nextToken(s string) (token, rest string) {
|
||||
i := 0
|
||||
for ; i < len(s); i++ {
|
||||
if octetTypes[s[i]]&isTokenOctet == 0 {
|
||||
break
|
||||
}
|
||||
}
|
||||
return s[:i], s[i:]
|
||||
}
|
||||
|
||||
func nextTokenOrQuoted(s string) (value string, rest string) {
|
||||
if !strings.HasPrefix(s, "\"") {
|
||||
return nextToken(s)
|
||||
}
|
||||
s = s[1:]
|
||||
for i := 0; i < len(s); i++ {
|
||||
switch s[i] {
|
||||
case '"':
|
||||
return s[:i], s[i+1:]
|
||||
case '\\':
|
||||
p := make([]byte, len(s)-1)
|
||||
j := copy(p, s[:i])
|
||||
escape := true
|
||||
for i = i + 1; i < len(s); i++ {
|
||||
b := s[i]
|
||||
switch {
|
||||
case escape:
|
||||
escape = false
|
||||
p[j] = b
|
||||
j++
|
||||
case b == '\\':
|
||||
escape = true
|
||||
case b == '"':
|
||||
return string(p[:j]), s[i+1:]
|
||||
default:
|
||||
p[j] = b
|
||||
j++
|
||||
}
|
||||
}
|
||||
return "", ""
|
||||
}
|
||||
}
|
||||
return "", ""
|
||||
}
|
||||
|
||||
// equalASCIIFold returns true if s is equal to t with ASCII case folding.
|
||||
func equalASCIIFold(s, t string) bool {
|
||||
for s != "" && t != "" {
|
||||
sr, size := utf8.DecodeRuneInString(s)
|
||||
s = s[size:]
|
||||
tr, size := utf8.DecodeRuneInString(t)
|
||||
t = t[size:]
|
||||
if sr == tr {
|
||||
continue
|
||||
}
|
||||
if 'A' <= sr && sr <= 'Z' {
|
||||
sr = sr + 'a' - 'A'
|
||||
}
|
||||
if 'A' <= tr && tr <= 'Z' {
|
||||
tr = tr + 'a' - 'A'
|
||||
}
|
||||
if sr != tr {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return s == t
|
||||
}
|
||||
|
||||
// tokenListContainsValue returns true if the 1#token header with the given
|
||||
// name contains a token equal to value with ASCII case folding.
|
||||
func tokenListContainsValue(header http.Header, name string, value string) bool {
|
||||
headers:
|
||||
for _, s := range header[name] {
|
||||
for {
|
||||
var t string
|
||||
t, s = nextToken(skipSpace(s))
|
||||
if t == "" {
|
||||
continue headers
|
||||
}
|
||||
s = skipSpace(s)
|
||||
if s != "" && s[0] != ',' {
|
||||
continue headers
|
||||
}
|
||||
if equalASCIIFold(t, value) {
|
||||
return true
|
||||
}
|
||||
if s == "" {
|
||||
continue headers
|
||||
}
|
||||
s = s[1:]
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// parseExtensions parses WebSocket extensions from a header.
|
||||
func parseExtensions(header http.Header) []map[string]string {
|
||||
// From RFC 6455:
|
||||
//
|
||||
// Sec-WebSocket-Extensions = extension-list
|
||||
// extension-list = 1#extension
|
||||
// extension = extension-token *( ";" extension-param )
|
||||
// extension-token = registered-token
|
||||
// registered-token = token
|
||||
// extension-param = token [ "=" (token | quoted-string) ]
|
||||
// ;When using the quoted-string syntax variant, the value
|
||||
// ;after quoted-string unescaping MUST conform to the
|
||||
// ;'token' ABNF.
|
||||
|
||||
var result []map[string]string
|
||||
headers:
|
||||
for _, s := range header["Sec-Websocket-Extensions"] {
|
||||
for {
|
||||
var t string
|
||||
t, s = nextToken(skipSpace(s))
|
||||
if t == "" {
|
||||
continue headers
|
||||
}
|
||||
ext := map[string]string{"": t}
|
||||
for {
|
||||
s = skipSpace(s)
|
||||
if !strings.HasPrefix(s, ";") {
|
||||
break
|
||||
}
|
||||
var k string
|
||||
k, s = nextToken(skipSpace(s[1:]))
|
||||
if k == "" {
|
||||
continue headers
|
||||
}
|
||||
s = skipSpace(s)
|
||||
var v string
|
||||
if strings.HasPrefix(s, "=") {
|
||||
v, s = nextTokenOrQuoted(skipSpace(s[1:]))
|
||||
s = skipSpace(s)
|
||||
}
|
||||
if s != "" && s[0] != ',' && s[0] != ';' {
|
||||
continue headers
|
||||
}
|
||||
ext[k] = v
|
||||
}
|
||||
if s != "" && s[0] != ',' {
|
||||
continue headers
|
||||
}
|
||||
result = append(result, ext)
|
||||
if s == "" {
|
||||
continue headers
|
||||
}
|
||||
s = s[1:]
|
||||
}
|
||||
}
|
||||
return result
|
||||
}
|
473
vendor/github.com/gorilla/websocket/x_net_proxy.go
generated
vendored
Normal file
473
vendor/github.com/gorilla/websocket/x_net_proxy.go
generated
vendored
Normal file
|
@ -0,0 +1,473 @@
|
|||
// Code generated by golang.org/x/tools/cmd/bundle. DO NOT EDIT.
|
||||
//go:generate bundle -o x_net_proxy.go golang.org/x/net/proxy
|
||||
|
||||
// Package proxy provides support for a variety of protocols to proxy network
|
||||
// data.
|
||||
//
|
||||
|
||||
package websocket
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"io"
|
||||
"net"
|
||||
"net/url"
|
||||
"os"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
)
|
||||
|
||||
type proxy_direct struct{}
|
||||
|
||||
// Direct is a direct proxy: one that makes network connections directly.
|
||||
var proxy_Direct = proxy_direct{}
|
||||
|
||||
func (proxy_direct) Dial(network, addr string) (net.Conn, error) {
|
||||
return net.Dial(network, addr)
|
||||
}
|
||||
|
||||
// A PerHost directs connections to a default Dialer unless the host name
|
||||
// requested matches one of a number of exceptions.
|
||||
type proxy_PerHost struct {
|
||||
def, bypass proxy_Dialer
|
||||
|
||||
bypassNetworks []*net.IPNet
|
||||
bypassIPs []net.IP
|
||||
bypassZones []string
|
||||
bypassHosts []string
|
||||
}
|
||||
|
||||
// NewPerHost returns a PerHost Dialer that directs connections to either
|
||||
// defaultDialer or bypass, depending on whether the connection matches one of
|
||||
// the configured rules.
|
||||
func proxy_NewPerHost(defaultDialer, bypass proxy_Dialer) *proxy_PerHost {
|
||||
return &proxy_PerHost{
|
||||
def: defaultDialer,
|
||||
bypass: bypass,
|
||||
}
|
||||
}
|
||||
|
||||
// Dial connects to the address addr on the given network through either
|
||||
// defaultDialer or bypass.
|
||||
func (p *proxy_PerHost) Dial(network, addr string) (c net.Conn, err error) {
|
||||
host, _, err := net.SplitHostPort(addr)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return p.dialerForRequest(host).Dial(network, addr)
|
||||
}
|
||||
|
||||
func (p *proxy_PerHost) dialerForRequest(host string) proxy_Dialer {
|
||||
if ip := net.ParseIP(host); ip != nil {
|
||||
for _, net := range p.bypassNetworks {
|
||||
if net.Contains(ip) {
|
||||
return p.bypass
|
||||
}
|
||||
}
|
||||
for _, bypassIP := range p.bypassIPs {
|
||||
if bypassIP.Equal(ip) {
|
||||
return p.bypass
|
||||
}
|
||||
}
|
||||
return p.def
|
||||
}
|
||||
|
||||
for _, zone := range p.bypassZones {
|
||||
if strings.HasSuffix(host, zone) {
|
||||
return p.bypass
|
||||
}
|
||||
if host == zone[1:] {
|
||||
// For a zone ".example.com", we match "example.com"
|
||||
// too.
|
||||
return p.bypass
|
||||
}
|
||||
}
|
||||
for _, bypassHost := range p.bypassHosts {
|
||||
if bypassHost == host {
|
||||
return p.bypass
|
||||
}
|
||||
}
|
||||
return p.def
|
||||
}
|
||||
|
||||
// AddFromString parses a string that contains comma-separated values
|
||||
// specifying hosts that should use the bypass proxy. Each value is either an
|
||||
// IP address, a CIDR range, a zone (*.example.com) or a host name
|
||||
// (localhost). A best effort is made to parse the string and errors are
|
||||
// ignored.
|
||||
func (p *proxy_PerHost) AddFromString(s string) {
|
||||
hosts := strings.Split(s, ",")
|
||||
for _, host := range hosts {
|
||||
host = strings.TrimSpace(host)
|
||||
if len(host) == 0 {
|
||||
continue
|
||||
}
|
||||
if strings.Contains(host, "/") {
|
||||
// We assume that it's a CIDR address like 127.0.0.0/8
|
||||
if _, net, err := net.ParseCIDR(host); err == nil {
|
||||
p.AddNetwork(net)
|
||||
}
|
||||
continue
|
||||
}
|
||||
if ip := net.ParseIP(host); ip != nil {
|
||||
p.AddIP(ip)
|
||||
continue
|
||||
}
|
||||
if strings.HasPrefix(host, "*.") {
|
||||
p.AddZone(host[1:])
|
||||
continue
|
||||
}
|
||||
p.AddHost(host)
|
||||
}
|
||||
}
|
||||
|
||||
// AddIP specifies an IP address that will use the bypass proxy. Note that
|
||||
// this will only take effect if a literal IP address is dialed. A connection
|
||||
// to a named host will never match an IP.
|
||||
func (p *proxy_PerHost) AddIP(ip net.IP) {
|
||||
p.bypassIPs = append(p.bypassIPs, ip)
|
||||
}
|
||||
|
||||
// AddNetwork specifies an IP range that will use the bypass proxy. Note that
|
||||
// this will only take effect if a literal IP address is dialed. A connection
|
||||
// to a named host will never match.
|
||||
func (p *proxy_PerHost) AddNetwork(net *net.IPNet) {
|
||||
p.bypassNetworks = append(p.bypassNetworks, net)
|
||||
}
|
||||
|
||||
// AddZone specifies a DNS suffix that will use the bypass proxy. A zone of
|
||||
// "example.com" matches "example.com" and all of its subdomains.
|
||||
func (p *proxy_PerHost) AddZone(zone string) {
|
||||
if strings.HasSuffix(zone, ".") {
|
||||
zone = zone[:len(zone)-1]
|
||||
}
|
||||
if !strings.HasPrefix(zone, ".") {
|
||||
zone = "." + zone
|
||||
}
|
||||
p.bypassZones = append(p.bypassZones, zone)
|
||||
}
|
||||
|
||||
// AddHost specifies a host name that will use the bypass proxy.
|
||||
func (p *proxy_PerHost) AddHost(host string) {
|
||||
if strings.HasSuffix(host, ".") {
|
||||
host = host[:len(host)-1]
|
||||
}
|
||||
p.bypassHosts = append(p.bypassHosts, host)
|
||||
}
|
||||
|
||||
// A Dialer is a means to establish a connection.
|
||||
type proxy_Dialer interface {
|
||||
// Dial connects to the given address via the proxy.
|
||||
Dial(network, addr string) (c net.Conn, err error)
|
||||
}
|
||||
|
||||
// Auth contains authentication parameters that specific Dialers may require.
|
||||
type proxy_Auth struct {
|
||||
User, Password string
|
||||
}
|
||||
|
||||
// FromEnvironment returns the dialer specified by the proxy related variables in
|
||||
// the environment.
|
||||
func proxy_FromEnvironment() proxy_Dialer {
|
||||
allProxy := proxy_allProxyEnv.Get()
|
||||
if len(allProxy) == 0 {
|
||||
return proxy_Direct
|
||||
}
|
||||
|
||||
proxyURL, err := url.Parse(allProxy)
|
||||
if err != nil {
|
||||
return proxy_Direct
|
||||
}
|
||||
proxy, err := proxy_FromURL(proxyURL, proxy_Direct)
|
||||
if err != nil {
|
||||
return proxy_Direct
|
||||
}
|
||||
|
||||
noProxy := proxy_noProxyEnv.Get()
|
||||
if len(noProxy) == 0 {
|
||||
return proxy
|
||||
}
|
||||
|
||||
perHost := proxy_NewPerHost(proxy, proxy_Direct)
|
||||
perHost.AddFromString(noProxy)
|
||||
return perHost
|
||||
}
|
||||
|
||||
// proxySchemes is a map from URL schemes to a function that creates a Dialer
|
||||
// from a URL with such a scheme.
|
||||
var proxy_proxySchemes map[string]func(*url.URL, proxy_Dialer) (proxy_Dialer, error)
|
||||
|
||||
// RegisterDialerType takes a URL scheme and a function to generate Dialers from
|
||||
// a URL with that scheme and a forwarding Dialer. Registered schemes are used
|
||||
// by FromURL.
|
||||
func proxy_RegisterDialerType(scheme string, f func(*url.URL, proxy_Dialer) (proxy_Dialer, error)) {
|
||||
if proxy_proxySchemes == nil {
|
||||
proxy_proxySchemes = make(map[string]func(*url.URL, proxy_Dialer) (proxy_Dialer, error))
|
||||
}
|
||||
proxy_proxySchemes[scheme] = f
|
||||
}
|
||||
|
||||
// FromURL returns a Dialer given a URL specification and an underlying
|
||||
// Dialer for it to make network requests.
|
||||
func proxy_FromURL(u *url.URL, forward proxy_Dialer) (proxy_Dialer, error) {
|
||||
var auth *proxy_Auth
|
||||
if u.User != nil {
|
||||
auth = new(proxy_Auth)
|
||||
auth.User = u.User.Username()
|
||||
if p, ok := u.User.Password(); ok {
|
||||
auth.Password = p
|
||||
}
|
||||
}
|
||||
|
||||
switch u.Scheme {
|
||||
case "socks5":
|
||||
return proxy_SOCKS5("tcp", u.Host, auth, forward)
|
||||
}
|
||||
|
||||
// If the scheme doesn't match any of the built-in schemes, see if it
|
||||
// was registered by another package.
|
||||
if proxy_proxySchemes != nil {
|
||||
if f, ok := proxy_proxySchemes[u.Scheme]; ok {
|
||||
return f(u, forward)
|
||||
}
|
||||
}
|
||||
|
||||
return nil, errors.New("proxy: unknown scheme: " + u.Scheme)
|
||||
}
|
||||
|
||||
var (
|
||||
proxy_allProxyEnv = &proxy_envOnce{
|
||||
names: []string{"ALL_PROXY", "all_proxy"},
|
||||
}
|
||||
proxy_noProxyEnv = &proxy_envOnce{
|
||||
names: []string{"NO_PROXY", "no_proxy"},
|
||||
}
|
||||
)
|
||||
|
||||
// envOnce looks up an environment variable (optionally by multiple
|
||||
// names) once. It mitigates expensive lookups on some platforms
|
||||
// (e.g. Windows).
|
||||
// (Borrowed from net/http/transport.go)
|
||||
type proxy_envOnce struct {
|
||||
names []string
|
||||
once sync.Once
|
||||
val string
|
||||
}
|
||||
|
||||
func (e *proxy_envOnce) Get() string {
|
||||
e.once.Do(e.init)
|
||||
return e.val
|
||||
}
|
||||
|
||||
func (e *proxy_envOnce) init() {
|
||||
for _, n := range e.names {
|
||||
e.val = os.Getenv(n)
|
||||
if e.val != "" {
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// SOCKS5 returns a Dialer that makes SOCKSv5 connections to the given address
|
||||
// with an optional username and password. See RFC 1928 and RFC 1929.
|
||||
func proxy_SOCKS5(network, addr string, auth *proxy_Auth, forward proxy_Dialer) (proxy_Dialer, error) {
|
||||
s := &proxy_socks5{
|
||||
network: network,
|
||||
addr: addr,
|
||||
forward: forward,
|
||||
}
|
||||
if auth != nil {
|
||||
s.user = auth.User
|
||||
s.password = auth.Password
|
||||
}
|
||||
|
||||
return s, nil
|
||||
}
|
||||
|
||||
type proxy_socks5 struct {
|
||||
user, password string
|
||||
network, addr string
|
||||
forward proxy_Dialer
|
||||
}
|
||||
|
||||
const proxy_socks5Version = 5
|
||||
|
||||
const (
|
||||
proxy_socks5AuthNone = 0
|
||||
proxy_socks5AuthPassword = 2
|
||||
)
|
||||
|
||||
const proxy_socks5Connect = 1
|
||||
|
||||
const (
|
||||
proxy_socks5IP4 = 1
|
||||
proxy_socks5Domain = 3
|
||||
proxy_socks5IP6 = 4
|
||||
)
|
||||
|
||||
var proxy_socks5Errors = []string{
|
||||
"",
|
||||
"general failure",
|
||||
"connection forbidden",
|
||||
"network unreachable",
|
||||
"host unreachable",
|
||||
"connection refused",
|
||||
"TTL expired",
|
||||
"command not supported",
|
||||
"address type not supported",
|
||||
}
|
||||
|
||||
// Dial connects to the address addr on the given network via the SOCKS5 proxy.
|
||||
func (s *proxy_socks5) Dial(network, addr string) (net.Conn, error) {
|
||||
switch network {
|
||||
case "tcp", "tcp6", "tcp4":
|
||||
default:
|
||||
return nil, errors.New("proxy: no support for SOCKS5 proxy connections of type " + network)
|
||||
}
|
||||
|
||||
conn, err := s.forward.Dial(s.network, s.addr)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := s.connect(conn, addr); err != nil {
|
||||
conn.Close()
|
||||
return nil, err
|
||||
}
|
||||
return conn, nil
|
||||
}
|
||||
|
||||
// connect takes an existing connection to a socks5 proxy server,
|
||||
// and commands the server to extend that connection to target,
|
||||
// which must be a canonical address with a host and port.
|
||||
func (s *proxy_socks5) connect(conn net.Conn, target string) error {
|
||||
host, portStr, err := net.SplitHostPort(target)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
port, err := strconv.Atoi(portStr)
|
||||
if err != nil {
|
||||
return errors.New("proxy: failed to parse port number: " + portStr)
|
||||
}
|
||||
if port < 1 || port > 0xffff {
|
||||
return errors.New("proxy: port number out of range: " + portStr)
|
||||
}
|
||||
|
||||
// the size here is just an estimate
|
||||
buf := make([]byte, 0, 6+len(host))
|
||||
|
||||
buf = append(buf, proxy_socks5Version)
|
||||
if len(s.user) > 0 && len(s.user) < 256 && len(s.password) < 256 {
|
||||
buf = append(buf, 2 /* num auth methods */, proxy_socks5AuthNone, proxy_socks5AuthPassword)
|
||||
} else {
|
||||
buf = append(buf, 1 /* num auth methods */, proxy_socks5AuthNone)
|
||||
}
|
||||
|
||||
if _, err := conn.Write(buf); err != nil {
|
||||
return errors.New("proxy: failed to write greeting to SOCKS5 proxy at " + s.addr + ": " + err.Error())
|
||||
}
|
||||
|
||||
if _, err := io.ReadFull(conn, buf[:2]); err != nil {
|
||||
return errors.New("proxy: failed to read greeting from SOCKS5 proxy at " + s.addr + ": " + err.Error())
|
||||
}
|
||||
if buf[0] != 5 {
|
||||
return errors.New("proxy: SOCKS5 proxy at " + s.addr + " has unexpected version " + strconv.Itoa(int(buf[0])))
|
||||
}
|
||||
if buf[1] == 0xff {
|
||||
return errors.New("proxy: SOCKS5 proxy at " + s.addr + " requires authentication")
|
||||
}
|
||||
|
||||
// See RFC 1929
|
||||
if buf[1] == proxy_socks5AuthPassword {
|
||||
buf = buf[:0]
|
||||
buf = append(buf, 1 /* password protocol version */)
|
||||
buf = append(buf, uint8(len(s.user)))
|
||||
buf = append(buf, s.user...)
|
||||
buf = append(buf, uint8(len(s.password)))
|
||||
buf = append(buf, s.password...)
|
||||
|
||||
if _, err := conn.Write(buf); err != nil {
|
||||
return errors.New("proxy: failed to write authentication request to SOCKS5 proxy at " + s.addr + ": " + err.Error())
|
||||
}
|
||||
|
||||
if _, err := io.ReadFull(conn, buf[:2]); err != nil {
|
||||
return errors.New("proxy: failed to read authentication reply from SOCKS5 proxy at " + s.addr + ": " + err.Error())
|
||||
}
|
||||
|
||||
if buf[1] != 0 {
|
||||
return errors.New("proxy: SOCKS5 proxy at " + s.addr + " rejected username/password")
|
||||
}
|
||||
}
|
||||
|
||||
buf = buf[:0]
|
||||
buf = append(buf, proxy_socks5Version, proxy_socks5Connect, 0 /* reserved */)
|
||||
|
||||
if ip := net.ParseIP(host); ip != nil {
|
||||
if ip4 := ip.To4(); ip4 != nil {
|
||||
buf = append(buf, proxy_socks5IP4)
|
||||
ip = ip4
|
||||
} else {
|
||||
buf = append(buf, proxy_socks5IP6)
|
||||
}
|
||||
buf = append(buf, ip...)
|
||||
} else {
|
||||
if len(host) > 255 {
|
||||
return errors.New("proxy: destination host name too long: " + host)
|
||||
}
|
||||
buf = append(buf, proxy_socks5Domain)
|
||||
buf = append(buf, byte(len(host)))
|
||||
buf = append(buf, host...)
|
||||
}
|
||||
buf = append(buf, byte(port>>8), byte(port))
|
||||
|
||||
if _, err := conn.Write(buf); err != nil {
|
||||
return errors.New("proxy: failed to write connect request to SOCKS5 proxy at " + s.addr + ": " + err.Error())
|
||||
}
|
||||
|
||||
if _, err := io.ReadFull(conn, buf[:4]); err != nil {
|
||||
return errors.New("proxy: failed to read connect reply from SOCKS5 proxy at " + s.addr + ": " + err.Error())
|
||||
}
|
||||
|
||||
failure := "unknown error"
|
||||
if int(buf[1]) < len(proxy_socks5Errors) {
|
||||
failure = proxy_socks5Errors[buf[1]]
|
||||
}
|
||||
|
||||
if len(failure) > 0 {
|
||||
return errors.New("proxy: SOCKS5 proxy at " + s.addr + " failed to connect: " + failure)
|
||||
}
|
||||
|
||||
bytesToDiscard := 0
|
||||
switch buf[3] {
|
||||
case proxy_socks5IP4:
|
||||
bytesToDiscard = net.IPv4len
|
||||
case proxy_socks5IP6:
|
||||
bytesToDiscard = net.IPv6len
|
||||
case proxy_socks5Domain:
|
||||
_, err := io.ReadFull(conn, buf[:1])
|
||||
if err != nil {
|
||||
return errors.New("proxy: failed to read domain length from SOCKS5 proxy at " + s.addr + ": " + err.Error())
|
||||
}
|
||||
bytesToDiscard = int(buf[0])
|
||||
default:
|
||||
return errors.New("proxy: got unknown address type " + strconv.Itoa(int(buf[3])) + " from SOCKS5 proxy at " + s.addr)
|
||||
}
|
||||
|
||||
if cap(buf) < bytesToDiscard {
|
||||
buf = make([]byte, bytesToDiscard)
|
||||
} else {
|
||||
buf = buf[:bytesToDiscard]
|
||||
}
|
||||
if _, err := io.ReadFull(conn, buf); err != nil {
|
||||
return errors.New("proxy: failed to read address from SOCKS5 proxy at " + s.addr + ": " + err.Error())
|
||||
}
|
||||
|
||||
// Also need to discard the port number
|
||||
if _, err := io.ReadFull(conn, buf[:2]); err != nil {
|
||||
return errors.New("proxy: failed to read port from SOCKS5 proxy at " + s.addr + ": " + err.Error())
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
1
vendor/github.com/imkira/go-interpol/.codebeatignore
generated
vendored
Normal file
1
vendor/github.com/imkira/go-interpol/.codebeatignore
generated
vendored
Normal file
|
@ -0,0 +1 @@
|
|||
examples/*
|
8
vendor/github.com/imkira/go-interpol/.gitignore
generated
vendored
Normal file
8
vendor/github.com/imkira/go-interpol/.gitignore
generated
vendored
Normal file
|
@ -0,0 +1,8 @@
|
|||
# MacOSX
|
||||
.DS_Store
|
||||
|
||||
# vim
|
||||
*.swp
|
||||
|
||||
# test
|
||||
coverage.*
|
21
vendor/github.com/imkira/go-interpol/.travis.yml
generated
vendored
Normal file
21
vendor/github.com/imkira/go-interpol/.travis.yml
generated
vendored
Normal file
|
@ -0,0 +1,21 @@
|
|||
language: go
|
||||
|
||||
go:
|
||||
- 1.2
|
||||
- 1.3
|
||||
- 1.4
|
||||
- 1.5
|
||||
- 1.6
|
||||
- 1.7
|
||||
- tip
|
||||
|
||||
before_install:
|
||||
if [[ $TRAVIS_GO_VERSION == 1.7* ]]; then make deps; fi
|
||||
|
||||
script:
|
||||
- if [[ $TRAVIS_GO_VERSION == 1.7* ]]; then make gometalinter; fi
|
||||
- make test
|
||||
- make cover
|
||||
|
||||
after_success:
|
||||
- bash <(curl -s https://codecov.io/bash)
|
20
vendor/github.com/imkira/go-interpol/LICENSE
generated
vendored
Normal file
20
vendor/github.com/imkira/go-interpol/LICENSE
generated
vendored
Normal file
|
@ -0,0 +1,20 @@
|
|||
Copyright (c) 2016 Mario Freitas (imkira@gmail.com)
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining
|
||||
a copy of this software and associated documentation files (the
|
||||
"Software"), to deal in the Software without restriction, including
|
||||
without limitation the rights to use, copy, modify, merge, publish,
|
||||
distribute, sublicense, and/or sell copies of the Software, and to
|
||||
permit persons to whom the Software is furnished to do so, subject to
|
||||
the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be
|
||||
included in all copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
||||
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
||||
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
|
||||
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
|
||||
LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
|
||||
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
|
||||
WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
21
vendor/github.com/imkira/go-interpol/Makefile
generated
vendored
Normal file
21
vendor/github.com/imkira/go-interpol/Makefile
generated
vendored
Normal file
|
@ -0,0 +1,21 @@
|
|||
.PHONY: all deps gometalinter test cover
|
||||
|
||||
all: gometalinter test cover
|
||||
|
||||
deps:
|
||||
go get -u github.com/alecthomas/gometalinter
|
||||
gometalinter --install
|
||||
|
||||
gometalinter:
|
||||
gometalinter --vendor --deadline=1m --tests \
|
||||
--enable=gofmt \
|
||||
--enable=goimports \
|
||||
--enable=lll \
|
||||
--enable=misspell \
|
||||
--enable=unused
|
||||
|
||||
test:
|
||||
go test -v -race -cpu=1,2,4 -coverprofile=coverage.txt -covermode=atomic -benchmem -bench .
|
||||
|
||||
cover:
|
||||
go tool cover -html=coverage.txt -o coverage.html
|
74
vendor/github.com/imkira/go-interpol/README.md
generated
vendored
Normal file
74
vendor/github.com/imkira/go-interpol/README.md
generated
vendored
Normal file
|
@ -0,0 +1,74 @@
|
|||
# interpol
|
||||
|
||||
[](https://github.com/imkira/go-interpol/blob/master/LICENSE.txt)
|
||||
[](https://godoc.org/github.com/imkira/go-interpol)
|
||||
[](https://travis-ci.org/imkira/go-interpol)
|
||||
[](https://codecov.io/gh/imkira/go-interpol)
|
||||
[](https://codebeat.co/projects/github-com-imkira-go-interpol)
|
||||
[](https://goreportcard.com/report/github.com/imkira/go-interpol)
|
||||
|
||||
interpol is a [Go](http://golang.org) package for doing format-string like
|
||||
string interpolation using named parameters.
|
||||
|
||||
Currently, a template only accepts variable placeholders delimited by brace
|
||||
characters (eg. "Hello {foo} {bar}").
|
||||
|
||||
## Install
|
||||
|
||||
First, you need to install the package:
|
||||
|
||||
```go
|
||||
go get -u github.com/imkira/go-interpol
|
||||
```
|
||||
|
||||
## Documentation
|
||||
|
||||
For advanced usage, make sure to check the
|
||||
[available documentation here](http://godoc.org/github.com/imkira/go-interpol).
|
||||
|
||||
## Example
|
||||
|
||||
The following code should use `interpol.WithMap` function, which simply
|
||||
replaces every key with the corresponding value of the specified map.
|
||||
When run, it should output `Hello World!!!`.
|
||||
|
||||
```go
|
||||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/imkira/go-interpol"
|
||||
)
|
||||
|
||||
func main() {
|
||||
m := map[string]string{
|
||||
"foo": "Hello",
|
||||
"bar": "World",
|
||||
}
|
||||
str, err := interpol.WithMap("{foo} {bar}!!!", m)
|
||||
if err != nil {
|
||||
fmt.Printf("Error: %v\n", err)
|
||||
return
|
||||
}
|
||||
fmt.Println(str)
|
||||
}
|
||||
```
|
||||
|
||||
## Contribute
|
||||
|
||||
Found a bug? Want to contribute and add a new feature?
|
||||
|
||||
Please fork this project and send me a pull request!
|
||||
|
||||
## License
|
||||
|
||||
go-interpol is licensed under the MIT license:
|
||||
|
||||
www.opensource.org/licenses/MIT
|
||||
|
||||
## Copyright
|
||||
|
||||
Copyright (c) 2016 Mario Freitas. See
|
||||
[LICENSE](http://github.com/imkira/go-interpol/blob/master/LICENSE)
|
||||
for further details.
|
171
vendor/github.com/imkira/go-interpol/interpol.go
generated
vendored
Normal file
171
vendor/github.com/imkira/go-interpol/interpol.go
generated
vendored
Normal file
|
@ -0,0 +1,171 @@
|
|||
// Package interpol provides utility functions for doing format-string like
|
||||
// string interpolation using named parameters.
|
||||
// Currently, a template only accepts variable placeholders delimited by brace
|
||||
// characters (eg. "Hello {foo} {bar}").
|
||||
package interpol
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"errors"
|
||||
"io"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// Errors returned when formatting templates.
|
||||
var (
|
||||
ErrUnexpectedClose = errors.New("interpol: unexpected close in template")
|
||||
ErrExpectingClose = errors.New("interpol: expecting close in template")
|
||||
ErrKeyNotFound = errors.New("interpol: key not found")
|
||||
ErrReadByteFailed = errors.New("interpol: read byte failed")
|
||||
)
|
||||
|
||||
// Func receives the placeholder key and writes to the io.Writer. If an error
|
||||
// happens, the function can return an error, in which case the interpolation
|
||||
// will be aborted.
|
||||
type Func func(key string, w io.Writer) error
|
||||
|
||||
// New creates a new interpolator with the given list of options.
|
||||
// You can use options such as the ones returned by WithTemplate, WithFormat
|
||||
// and WithOutput.
|
||||
func New(opts ...Option) *Interpolator {
|
||||
opts2 := &Options{}
|
||||
setOptions(opts, newOptionSetter(opts2))
|
||||
return NewWithOptions(opts2)
|
||||
}
|
||||
|
||||
// NewWithOptions creates a new interpolator with the given options.
|
||||
func NewWithOptions(opts *Options) *Interpolator {
|
||||
return &Interpolator{
|
||||
template: templateReader(opts),
|
||||
output: outputWriter(opts),
|
||||
format: opts.Format,
|
||||
rb: make([]rune, 0, 64),
|
||||
start: -1,
|
||||
closing: false,
|
||||
}
|
||||
}
|
||||
|
||||
// Interpolator interpolates Template to Output, according to Format.
|
||||
type Interpolator struct {
|
||||
template io.RuneReader
|
||||
output runeWriter
|
||||
format Func
|
||||
rb []rune
|
||||
start int
|
||||
closing bool
|
||||
}
|
||||
|
||||
// Interpolate reads runes from Template and writes them to Output, with the
|
||||
// exception of placeholders which are passed to Format.
|
||||
func (i *Interpolator) Interpolate() error {
|
||||
for pos := 0; ; pos++ {
|
||||
r, _, err := i.template.ReadRune()
|
||||
if err != nil {
|
||||
if err == io.EOF {
|
||||
break
|
||||
}
|
||||
return err
|
||||
}
|
||||
if err := i.parse(r, pos); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return i.finish()
|
||||
}
|
||||
|
||||
func (i *Interpolator) parse(r rune, pos int) error {
|
||||
switch r {
|
||||
case '{':
|
||||
return i.open(pos)
|
||||
case '}':
|
||||
return i.close()
|
||||
default:
|
||||
return i.append(r)
|
||||
}
|
||||
}
|
||||
|
||||
func (i *Interpolator) open(pos int) error {
|
||||
if i.closing {
|
||||
return ErrUnexpectedClose
|
||||
}
|
||||
if i.start >= 0 {
|
||||
if _, err := i.output.WriteRune('{'); err != nil {
|
||||
return err
|
||||
}
|
||||
i.start = -1
|
||||
} else {
|
||||
i.start = pos + 1
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (i *Interpolator) close() error {
|
||||
if i.start >= 0 {
|
||||
if err := i.format(string(i.rb), i.output); err != nil {
|
||||
return err
|
||||
}
|
||||
i.rb = i.rb[:0]
|
||||
i.start = -1
|
||||
} else if i.closing {
|
||||
i.closing = false
|
||||
if _, err := i.output.WriteRune('}'); err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
i.closing = true
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (i *Interpolator) append(r rune) error {
|
||||
if i.closing {
|
||||
return ErrUnexpectedClose
|
||||
}
|
||||
if i.start < 0 {
|
||||
_, err := i.output.WriteRune(r)
|
||||
return err
|
||||
}
|
||||
i.rb = append(i.rb, r)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (i *Interpolator) finish() error {
|
||||
if i.start >= 0 {
|
||||
return ErrExpectingClose
|
||||
}
|
||||
if i.closing {
|
||||
return ErrUnexpectedClose
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// WithFunc interpolates the specified template with replacements using the
|
||||
// given function.
|
||||
func WithFunc(template string, format Func) (string, error) {
|
||||
buffer := bytes.NewBuffer(make([]byte, 0, len(template)))
|
||||
opts := &Options{
|
||||
Template: strings.NewReader(template),
|
||||
Output: buffer,
|
||||
Format: format,
|
||||
}
|
||||
i := NewWithOptions(opts)
|
||||
if err := i.Interpolate(); err != nil {
|
||||
return "", err
|
||||
}
|
||||
return buffer.String(), nil
|
||||
}
|
||||
|
||||
// WithMap interpolates the specified template with replacements using the
|
||||
// given map. If a placeholder is used for which a value is not found, an error
|
||||
// is returned.
|
||||
func WithMap(template string, m map[string]string) (string, error) {
|
||||
format := func(key string, w io.Writer) error {
|
||||
value, ok := m[key]
|
||||
if !ok {
|
||||
return ErrKeyNotFound
|
||||
}
|
||||
_, err := w.Write([]byte(value))
|
||||
return err
|
||||
}
|
||||
return WithFunc(template, format)
|
||||
}
|
52
vendor/github.com/imkira/go-interpol/io.go
generated
vendored
Normal file
52
vendor/github.com/imkira/go-interpol/io.go
generated
vendored
Normal file
|
@ -0,0 +1,52 @@
|
|||
package interpol
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"io"
|
||||
"unicode/utf8"
|
||||
)
|
||||
|
||||
type runeWriter interface {
|
||||
io.Writer
|
||||
WriteRune(r rune) (int, error)
|
||||
}
|
||||
|
||||
func templateReader(opts *Options) io.RuneReader {
|
||||
if rr, ok := opts.Template.(io.RuneReader); ok {
|
||||
return rr
|
||||
}
|
||||
return bufio.NewReaderSize(opts.Template, utf8.UTFMax)
|
||||
}
|
||||
|
||||
func outputWriter(opts *Options) runeWriter {
|
||||
if rw, ok := opts.Output.(runeWriter); ok {
|
||||
return rw
|
||||
}
|
||||
return &simpleRuneWriter{w: opts.Output}
|
||||
}
|
||||
|
||||
type simpleRuneWriter struct {
|
||||
runeEncoder
|
||||
w io.Writer
|
||||
}
|
||||
|
||||
func (rw *simpleRuneWriter) Write(b []byte) (int, error) {
|
||||
return rw.w.Write(b)
|
||||
}
|
||||
|
||||
func (rw *simpleRuneWriter) WriteRune(r rune) (int, error) {
|
||||
return rw.w.Write(rw.encode(r))
|
||||
}
|
||||
|
||||
type runeEncoder struct {
|
||||
b [utf8.UTFMax]byte
|
||||
}
|
||||
|
||||
func (re *runeEncoder) encode(r rune) []byte {
|
||||
if r < utf8.RuneSelf {
|
||||
re.b[0] = byte(r)
|
||||
return re.b[:1]
|
||||
}
|
||||
n := utf8.EncodeRune(re.b[:], r)
|
||||
return re.b[:n]
|
||||
}
|
68
vendor/github.com/imkira/go-interpol/options.go
generated
vendored
Normal file
68
vendor/github.com/imkira/go-interpol/options.go
generated
vendored
Normal file
|
@ -0,0 +1,68 @@
|
|||
package interpol
|
||||
|
||||
import "io"
|
||||
|
||||
// Options contains all options supported by an Interpolator.
|
||||
type Options struct {
|
||||
Template io.Reader
|
||||
Format Func
|
||||
Output io.Writer
|
||||
}
|
||||
|
||||
// Option is an option that can be applied to an Interpolator.
|
||||
type Option func(OptionSetter)
|
||||
|
||||
// OptionSetter is an interface that contains the setters for all options
|
||||
// supported by Interpolator.
|
||||
type OptionSetter interface {
|
||||
SetTemplate(template io.Reader)
|
||||
SetFormat(format Func)
|
||||
SetOutput(output io.Writer)
|
||||
}
|
||||
|
||||
// WithTemplate assigns Template to Options.
|
||||
func WithTemplate(template io.Reader) Option {
|
||||
return func(setter OptionSetter) {
|
||||
setter.SetTemplate(template)
|
||||
}
|
||||
}
|
||||
|
||||
// WithFormat assigns Format to Options.
|
||||
func WithFormat(format Func) Option {
|
||||
return func(setter OptionSetter) {
|
||||
setter.SetFormat(format)
|
||||
}
|
||||
}
|
||||
|
||||
// WithOutput assigns Output to Options.
|
||||
func WithOutput(output io.Writer) Option {
|
||||
return func(setter OptionSetter) {
|
||||
setter.SetOutput(output)
|
||||
}
|
||||
}
|
||||
|
||||
type optionSetter struct {
|
||||
opts *Options
|
||||
}
|
||||
|
||||
func newOptionSetter(opts *Options) *optionSetter {
|
||||
return &optionSetter{opts: opts}
|
||||
}
|
||||
|
||||
func (s *optionSetter) SetTemplate(template io.Reader) {
|
||||
s.opts.Template = template
|
||||
}
|
||||
|
||||
func (s *optionSetter) SetFormat(format Func) {
|
||||
s.opts.Format = format
|
||||
}
|
||||
|
||||
func (s *optionSetter) SetOutput(output io.Writer) {
|
||||
s.opts.Output = output
|
||||
}
|
||||
|
||||
func setOptions(opts []Option, setter OptionSetter) {
|
||||
for _, opt := range opts {
|
||||
opt(setter)
|
||||
}
|
||||
}
|
27
vendor/github.com/klauspost/compress/LICENSE
generated
vendored
Normal file
27
vendor/github.com/klauspost/compress/LICENSE
generated
vendored
Normal file
|
@ -0,0 +1,27 @@
|
|||
Copyright (c) 2012 The Go Authors. All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions are
|
||||
met:
|
||||
|
||||
* Redistributions of source code must retain the above copyright
|
||||
notice, this list of conditions and the following disclaimer.
|
||||
* Redistributions in binary form must reproduce the above
|
||||
copyright notice, this list of conditions and the following disclaimer
|
||||
in the documentation and/or other materials provided with the
|
||||
distribution.
|
||||
* Neither the name of Google Inc. nor the names of its
|
||||
contributors may be used to endorse or promote products derived from
|
||||
this software without specific prior written permission.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
32
vendor/github.com/klauspost/compress/flate/copy.go
generated
vendored
Normal file
32
vendor/github.com/klauspost/compress/flate/copy.go
generated
vendored
Normal file
|
@ -0,0 +1,32 @@
|
|||
// Copyright 2012 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package flate
|
||||
|
||||
// forwardCopy is like the built-in copy function except that it always goes
|
||||
// forward from the start, even if the dst and src overlap.
|
||||
// It is equivalent to:
|
||||
// for i := 0; i < n; i++ {
|
||||
// mem[dst+i] = mem[src+i]
|
||||
// }
|
||||
func forwardCopy(mem []byte, dst, src, n int) {
|
||||
if dst <= src {
|
||||
copy(mem[dst:dst+n], mem[src:src+n])
|
||||
return
|
||||
}
|
||||
for {
|
||||
if dst >= src+n {
|
||||
copy(mem[dst:dst+n], mem[src:src+n])
|
||||
return
|
||||
}
|
||||
// There is some forward overlap. The destination
|
||||
// will be filled with a repeated pattern of mem[src:src+k].
|
||||
// We copy one instance of the pattern here, then repeat.
|
||||
// Each time around this loop k will double.
|
||||
k := dst - src
|
||||
copy(mem[dst:dst+k], mem[src:src+k])
|
||||
n -= k
|
||||
dst += k
|
||||
}
|
||||
}
|
42
vendor/github.com/klauspost/compress/flate/crc32_amd64.go
generated
vendored
Normal file
42
vendor/github.com/klauspost/compress/flate/crc32_amd64.go
generated
vendored
Normal file
|
@ -0,0 +1,42 @@
|
|||
//+build !noasm
|
||||
//+build !appengine
|
||||
//+build !gccgo
|
||||
|
||||
// Copyright 2015, Klaus Post, see LICENSE for details.
|
||||
|
||||
package flate
|
||||
|
||||
import (
|
||||
"github.com/klauspost/cpuid"
|
||||
)
|
||||
|
||||
// crc32sse returns a hash for the first 4 bytes of the slice
|
||||
// len(a) must be >= 4.
|
||||
//go:noescape
|
||||
func crc32sse(a []byte) uint32
|
||||
|
||||
// crc32sseAll calculates hashes for each 4-byte set in a.
|
||||
// dst must be east len(a) - 4 in size.
|
||||
// The size is not checked by the assembly.
|
||||
//go:noescape
|
||||
func crc32sseAll(a []byte, dst []uint32)
|
||||
|
||||
// matchLenSSE4 returns the number of matching bytes in a and b
|
||||
// up to length 'max'. Both slices must be at least 'max'
|
||||
// bytes in size.
|
||||
//
|
||||
// TODO: drop the "SSE4" name, since it doesn't use any SSE instructions.
|
||||
//
|
||||
//go:noescape
|
||||
func matchLenSSE4(a, b []byte, max int) int
|
||||
|
||||
// histogram accumulates a histogram of b in h.
|
||||
// h must be at least 256 entries in length,
|
||||
// and must be cleared before calling this function.
|
||||
//go:noescape
|
||||
func histogram(b []byte, h []int32)
|
||||
|
||||
// Detect SSE 4.2 feature.
|
||||
func init() {
|
||||
useSSE42 = cpuid.CPU.SSE42()
|
||||
}
|
214
vendor/github.com/klauspost/compress/flate/crc32_amd64.s
generated
vendored
Normal file
214
vendor/github.com/klauspost/compress/flate/crc32_amd64.s
generated
vendored
Normal file
|
@ -0,0 +1,214 @@
|
|||
//+build !noasm
|
||||
//+build !appengine
|
||||
//+build !gccgo
|
||||
|
||||
// Copyright 2015, Klaus Post, see LICENSE for details.
|
||||
|
||||
// func crc32sse(a []byte) uint32
|
||||
TEXT ·crc32sse(SB), 4, $0
|
||||
MOVQ a+0(FP), R10
|
||||
XORQ BX, BX
|
||||
|
||||
// CRC32 dword (R10), EBX
|
||||
BYTE $0xF2; BYTE $0x41; BYTE $0x0f
|
||||
BYTE $0x38; BYTE $0xf1; BYTE $0x1a
|
||||
|
||||
MOVL BX, ret+24(FP)
|
||||
RET
|
||||
|
||||
// func crc32sseAll(a []byte, dst []uint32)
|
||||
TEXT ·crc32sseAll(SB), 4, $0
|
||||
MOVQ a+0(FP), R8 // R8: src
|
||||
MOVQ a_len+8(FP), R10 // input length
|
||||
MOVQ dst+24(FP), R9 // R9: dst
|
||||
SUBQ $4, R10
|
||||
JS end
|
||||
JZ one_crc
|
||||
MOVQ R10, R13
|
||||
SHRQ $2, R10 // len/4
|
||||
ANDQ $3, R13 // len&3
|
||||
XORQ BX, BX
|
||||
ADDQ $1, R13
|
||||
TESTQ R10, R10
|
||||
JZ rem_loop
|
||||
|
||||
crc_loop:
|
||||
MOVQ (R8), R11
|
||||
XORQ BX, BX
|
||||
XORQ DX, DX
|
||||
XORQ DI, DI
|
||||
MOVQ R11, R12
|
||||
SHRQ $8, R11
|
||||
MOVQ R12, AX
|
||||
MOVQ R11, CX
|
||||
SHRQ $16, R12
|
||||
SHRQ $16, R11
|
||||
MOVQ R12, SI
|
||||
|
||||
// CRC32 EAX, EBX
|
||||
BYTE $0xF2; BYTE $0x0f
|
||||
BYTE $0x38; BYTE $0xf1; BYTE $0xd8
|
||||
|
||||
// CRC32 ECX, EDX
|
||||
BYTE $0xF2; BYTE $0x0f
|
||||
BYTE $0x38; BYTE $0xf1; BYTE $0xd1
|
||||
|
||||
// CRC32 ESI, EDI
|
||||
BYTE $0xF2; BYTE $0x0f
|
||||
BYTE $0x38; BYTE $0xf1; BYTE $0xfe
|
||||
MOVL BX, (R9)
|
||||
MOVL DX, 4(R9)
|
||||
MOVL DI, 8(R9)
|
||||
|
||||
XORQ BX, BX
|
||||
MOVL R11, AX
|
||||
|
||||
// CRC32 EAX, EBX
|
||||
BYTE $0xF2; BYTE $0x0f
|
||||
BYTE $0x38; BYTE $0xf1; BYTE $0xd8
|
||||
MOVL BX, 12(R9)
|
||||
|
||||
ADDQ $16, R9
|
||||
ADDQ $4, R8
|
||||
XORQ BX, BX
|
||||
SUBQ $1, R10
|
||||
JNZ crc_loop
|
||||
|
||||
rem_loop:
|
||||
MOVL (R8), AX
|
||||
|
||||
// CRC32 EAX, EBX
|
||||
BYTE $0xF2; BYTE $0x0f
|
||||
BYTE $0x38; BYTE $0xf1; BYTE $0xd8
|
||||
|
||||
MOVL BX, (R9)
|
||||
ADDQ $4, R9
|
||||
ADDQ $1, R8
|
||||
XORQ BX, BX
|
||||
SUBQ $1, R13
|
||||
JNZ rem_loop
|
||||
|
||||
end:
|
||||
RET
|
||||
|
||||
one_crc:
|
||||
MOVQ $1, R13
|
||||
XORQ BX, BX
|
||||
JMP rem_loop
|
||||
|
||||
// func matchLenSSE4(a, b []byte, max int) int
|
||||
TEXT ·matchLenSSE4(SB), 4, $0
|
||||
MOVQ a_base+0(FP), SI
|
||||
MOVQ b_base+24(FP), DI
|
||||
MOVQ DI, DX
|
||||
MOVQ max+48(FP), CX
|
||||
|
||||
cmp8:
|
||||
// As long as we are 8 or more bytes before the end of max, we can load and
|
||||
// compare 8 bytes at a time. If those 8 bytes are equal, repeat.
|
||||
CMPQ CX, $8
|
||||
JLT cmp1
|
||||
MOVQ (SI), AX
|
||||
MOVQ (DI), BX
|
||||
CMPQ AX, BX
|
||||
JNE bsf
|
||||
ADDQ $8, SI
|
||||
ADDQ $8, DI
|
||||
SUBQ $8, CX
|
||||
JMP cmp8
|
||||
|
||||
bsf:
|
||||
// If those 8 bytes were not equal, XOR the two 8 byte values, and return
|
||||
// the index of the first byte that differs. The BSF instruction finds the
|
||||
// least significant 1 bit, the amd64 architecture is little-endian, and
|
||||
// the shift by 3 converts a bit index to a byte index.
|
||||
XORQ AX, BX
|
||||
BSFQ BX, BX
|
||||
SHRQ $3, BX
|
||||
ADDQ BX, DI
|
||||
|
||||
// Subtract off &b[0] to convert from &b[ret] to ret, and return.
|
||||
SUBQ DX, DI
|
||||
MOVQ DI, ret+56(FP)
|
||||
RET
|
||||
|
||||
cmp1:
|
||||
// In the slices' tail, compare 1 byte at a time.
|
||||
CMPQ CX, $0
|
||||
JEQ matchLenEnd
|
||||
MOVB (SI), AX
|
||||
MOVB (DI), BX
|
||||
CMPB AX, BX
|
||||
JNE matchLenEnd
|
||||
ADDQ $1, SI
|
||||
ADDQ $1, DI
|
||||
SUBQ $1, CX
|
||||
JMP cmp1
|
||||
|
||||
matchLenEnd:
|
||||
// Subtract off &b[0] to convert from &b[ret] to ret, and return.
|
||||
SUBQ DX, DI
|
||||
MOVQ DI, ret+56(FP)
|
||||
RET
|
||||
|
||||
// func histogram(b []byte, h []int32)
|
||||
TEXT ·histogram(SB), 4, $0
|
||||
MOVQ b+0(FP), SI // SI: &b
|
||||
MOVQ b_len+8(FP), R9 // R9: len(b)
|
||||
MOVQ h+24(FP), DI // DI: Histogram
|
||||
MOVQ R9, R8
|
||||
SHRQ $3, R8
|
||||
JZ hist1
|
||||
XORQ R11, R11
|
||||
|
||||
loop_hist8:
|
||||
MOVQ (SI), R10
|
||||
|
||||
MOVB R10, R11
|
||||
INCL (DI)(R11*4)
|
||||
SHRQ $8, R10
|
||||
|
||||
MOVB R10, R11
|
||||
INCL (DI)(R11*4)
|
||||
SHRQ $8, R10
|
||||
|
||||
MOVB R10, R11
|
||||
INCL (DI)(R11*4)
|
||||
SHRQ $8, R10
|
||||
|
||||
MOVB R10, R11
|
||||
INCL (DI)(R11*4)
|
||||
SHRQ $8, R10
|
||||
|
||||
MOVB R10, R11
|
||||
INCL (DI)(R11*4)
|
||||
SHRQ $8, R10
|
||||
|
||||
MOVB R10, R11
|
||||
INCL (DI)(R11*4)
|
||||
SHRQ $8, R10
|
||||
|
||||
MOVB R10, R11
|
||||
INCL (DI)(R11*4)
|
||||
SHRQ $8, R10
|
||||
|
||||
INCL (DI)(R10*4)
|
||||
|
||||
ADDQ $8, SI
|
||||
DECQ R8
|
||||
JNZ loop_hist8
|
||||
|
||||
hist1:
|
||||
ANDQ $7, R9
|
||||
JZ end_hist
|
||||
XORQ R10, R10
|
||||
|
||||
loop_hist1:
|
||||
MOVB (SI), R10
|
||||
INCL (DI)(R10*4)
|
||||
INCQ SI
|
||||
DECQ R9
|
||||
JNZ loop_hist1
|
||||
|
||||
end_hist:
|
||||
RET
|
35
vendor/github.com/klauspost/compress/flate/crc32_noasm.go
generated
vendored
Normal file
35
vendor/github.com/klauspost/compress/flate/crc32_noasm.go
generated
vendored
Normal file
|
@ -0,0 +1,35 @@
|
|||
//+build !amd64 noasm appengine gccgo
|
||||
|
||||
// Copyright 2015, Klaus Post, see LICENSE for details.
|
||||
|
||||
package flate
|
||||
|
||||
func init() {
|
||||
useSSE42 = false
|
||||
}
|
||||
|
||||
// crc32sse should never be called.
|
||||
func crc32sse(a []byte) uint32 {
|
||||
panic("no assembler")
|
||||
}
|
||||
|
||||
// crc32sseAll should never be called.
|
||||
func crc32sseAll(a []byte, dst []uint32) {
|
||||
panic("no assembler")
|
||||
}
|
||||
|
||||
// matchLenSSE4 should never be called.
|
||||
func matchLenSSE4(a, b []byte, max int) int {
|
||||
panic("no assembler")
|
||||
return 0
|
||||
}
|
||||
|
||||
// histogram accumulates a histogram of b in h.
|
||||
//
|
||||
// len(h) must be >= 256, and h's elements must be all zeroes.
|
||||
func histogram(b []byte, h []int32) {
|
||||
h = h[:256]
|
||||
for _, t := range b {
|
||||
h[t]++
|
||||
}
|
||||
}
|
1353
vendor/github.com/klauspost/compress/flate/deflate.go
generated
vendored
Normal file
1353
vendor/github.com/klauspost/compress/flate/deflate.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load diff
184
vendor/github.com/klauspost/compress/flate/dict_decoder.go
generated
vendored
Normal file
184
vendor/github.com/klauspost/compress/flate/dict_decoder.go
generated
vendored
Normal file
|
@ -0,0 +1,184 @@
|
|||
// Copyright 2016 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package flate
|
||||
|
||||
// dictDecoder implements the LZ77 sliding dictionary as used in decompression.
|
||||
// LZ77 decompresses data through sequences of two forms of commands:
|
||||
//
|
||||
// * Literal insertions: Runs of one or more symbols are inserted into the data
|
||||
// stream as is. This is accomplished through the writeByte method for a
|
||||
// single symbol, or combinations of writeSlice/writeMark for multiple symbols.
|
||||
// Any valid stream must start with a literal insertion if no preset dictionary
|
||||
// is used.
|
||||
//
|
||||
// * Backward copies: Runs of one or more symbols are copied from previously
|
||||
// emitted data. Backward copies come as the tuple (dist, length) where dist
|
||||
// determines how far back in the stream to copy from and length determines how
|
||||
// many bytes to copy. Note that it is valid for the length to be greater than
|
||||
// the distance. Since LZ77 uses forward copies, that situation is used to
|
||||
// perform a form of run-length encoding on repeated runs of symbols.
|
||||
// The writeCopy and tryWriteCopy are used to implement this command.
|
||||
//
|
||||
// For performance reasons, this implementation performs little to no sanity
|
||||
// checks about the arguments. As such, the invariants documented for each
|
||||
// method call must be respected.
|
||||
type dictDecoder struct {
|
||||
hist []byte // Sliding window history
|
||||
|
||||
// Invariant: 0 <= rdPos <= wrPos <= len(hist)
|
||||
wrPos int // Current output position in buffer
|
||||
rdPos int // Have emitted hist[:rdPos] already
|
||||
full bool // Has a full window length been written yet?
|
||||
}
|
||||
|
||||
// init initializes dictDecoder to have a sliding window dictionary of the given
|
||||
// size. If a preset dict is provided, it will initialize the dictionary with
|
||||
// the contents of dict.
|
||||
func (dd *dictDecoder) init(size int, dict []byte) {
|
||||
*dd = dictDecoder{hist: dd.hist}
|
||||
|
||||
if cap(dd.hist) < size {
|
||||
dd.hist = make([]byte, size)
|
||||
}
|
||||
dd.hist = dd.hist[:size]
|
||||
|
||||
if len(dict) > len(dd.hist) {
|
||||
dict = dict[len(dict)-len(dd.hist):]
|
||||
}
|
||||
dd.wrPos = copy(dd.hist, dict)
|
||||
if dd.wrPos == len(dd.hist) {
|
||||
dd.wrPos = 0
|
||||
dd.full = true
|
||||
}
|
||||
dd.rdPos = dd.wrPos
|
||||
}
|
||||
|
||||
// histSize reports the total amount of historical data in the dictionary.
|
||||
func (dd *dictDecoder) histSize() int {
|
||||
if dd.full {
|
||||
return len(dd.hist)
|
||||
}
|
||||
return dd.wrPos
|
||||
}
|
||||
|
||||
// availRead reports the number of bytes that can be flushed by readFlush.
|
||||
func (dd *dictDecoder) availRead() int {
|
||||
return dd.wrPos - dd.rdPos
|
||||
}
|
||||
|
||||
// availWrite reports the available amount of output buffer space.
|
||||
func (dd *dictDecoder) availWrite() int {
|
||||
return len(dd.hist) - dd.wrPos
|
||||
}
|
||||
|
||||
// writeSlice returns a slice of the available buffer to write data to.
|
||||
//
|
||||
// This invariant will be kept: len(s) <= availWrite()
|
||||
func (dd *dictDecoder) writeSlice() []byte {
|
||||
return dd.hist[dd.wrPos:]
|
||||
}
|
||||
|
||||
// writeMark advances the writer pointer by cnt.
|
||||
//
|
||||
// This invariant must be kept: 0 <= cnt <= availWrite()
|
||||
func (dd *dictDecoder) writeMark(cnt int) {
|
||||
dd.wrPos += cnt
|
||||
}
|
||||
|
||||
// writeByte writes a single byte to the dictionary.
|
||||
//
|
||||
// This invariant must be kept: 0 < availWrite()
|
||||
func (dd *dictDecoder) writeByte(c byte) {
|
||||
dd.hist[dd.wrPos] = c
|
||||
dd.wrPos++
|
||||
}
|
||||
|
||||
// writeCopy copies a string at a given (dist, length) to the output.
|
||||
// This returns the number of bytes copied and may be less than the requested
|
||||
// length if the available space in the output buffer is too small.
|
||||
//
|
||||
// This invariant must be kept: 0 < dist <= histSize()
|
||||
func (dd *dictDecoder) writeCopy(dist, length int) int {
|
||||
dstBase := dd.wrPos
|
||||
dstPos := dstBase
|
||||
srcPos := dstPos - dist
|
||||
endPos := dstPos + length
|
||||
if endPos > len(dd.hist) {
|
||||
endPos = len(dd.hist)
|
||||
}
|
||||
|
||||
// Copy non-overlapping section after destination position.
|
||||
//
|
||||
// This section is non-overlapping in that the copy length for this section
|
||||
// is always less than or equal to the backwards distance. This can occur
|
||||
// if a distance refers to data that wraps-around in the buffer.
|
||||
// Thus, a backwards copy is performed here; that is, the exact bytes in
|
||||
// the source prior to the copy is placed in the destination.
|
||||
if srcPos < 0 {
|
||||
srcPos += len(dd.hist)
|
||||
dstPos += copy(dd.hist[dstPos:endPos], dd.hist[srcPos:])
|
||||
srcPos = 0
|
||||
}
|
||||
|
||||
// Copy possibly overlapping section before destination position.
|
||||
//
|
||||
// This section can overlap if the copy length for this section is larger
|
||||
// than the backwards distance. This is allowed by LZ77 so that repeated
|
||||
// strings can be succinctly represented using (dist, length) pairs.
|
||||
// Thus, a forwards copy is performed here; that is, the bytes copied is
|
||||
// possibly dependent on the resulting bytes in the destination as the copy
|
||||
// progresses along. This is functionally equivalent to the following:
|
||||
//
|
||||
// for i := 0; i < endPos-dstPos; i++ {
|
||||
// dd.hist[dstPos+i] = dd.hist[srcPos+i]
|
||||
// }
|
||||
// dstPos = endPos
|
||||
//
|
||||
for dstPos < endPos {
|
||||
dstPos += copy(dd.hist[dstPos:endPos], dd.hist[srcPos:dstPos])
|
||||
}
|
||||
|
||||
dd.wrPos = dstPos
|
||||
return dstPos - dstBase
|
||||
}
|
||||
|
||||
// tryWriteCopy tries to copy a string at a given (distance, length) to the
|
||||
// output. This specialized version is optimized for short distances.
|
||||
//
|
||||
// This method is designed to be inlined for performance reasons.
|
||||
//
|
||||
// This invariant must be kept: 0 < dist <= histSize()
|
||||
func (dd *dictDecoder) tryWriteCopy(dist, length int) int {
|
||||
dstPos := dd.wrPos
|
||||
endPos := dstPos + length
|
||||
if dstPos < dist || endPos > len(dd.hist) {
|
||||
return 0
|
||||
}
|
||||
dstBase := dstPos
|
||||
srcPos := dstPos - dist
|
||||
|
||||
// Copy possibly overlapping section before destination position.
|
||||
loop:
|
||||
dstPos += copy(dd.hist[dstPos:endPos], dd.hist[srcPos:dstPos])
|
||||
if dstPos < endPos {
|
||||
goto loop // Avoid for-loop so that this function can be inlined
|
||||
}
|
||||
|
||||
dd.wrPos = dstPos
|
||||
return dstPos - dstBase
|
||||
}
|
||||
|
||||
// readFlush returns a slice of the historical buffer that is ready to be
|
||||
// emitted to the user. The data returned by readFlush must be fully consumed
|
||||
// before calling any other dictDecoder methods.
|
||||
func (dd *dictDecoder) readFlush() []byte {
|
||||
toRead := dd.hist[dd.rdPos:dd.wrPos]
|
||||
dd.rdPos = dd.wrPos
|
||||
if dd.wrPos == len(dd.hist) {
|
||||
dd.wrPos, dd.rdPos = 0, 0
|
||||
dd.full = true
|
||||
}
|
||||
return toRead
|
||||
}
|
701
vendor/github.com/klauspost/compress/flate/huffman_bit_writer.go
generated
vendored
Normal file
701
vendor/github.com/klauspost/compress/flate/huffman_bit_writer.go
generated
vendored
Normal file
|
@ -0,0 +1,701 @@
|
|||
// Copyright 2009 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package flate
|
||||
|
||||
import (
|
||||
"io"
|
||||
)
|
||||
|
||||
const (
|
||||
// The largest offset code.
|
||||
offsetCodeCount = 30
|
||||
|
||||
// The special code used to mark the end of a block.
|
||||
endBlockMarker = 256
|
||||
|
||||
// The first length code.
|
||||
lengthCodesStart = 257
|
||||
|
||||
// The number of codegen codes.
|
||||
codegenCodeCount = 19
|
||||
badCode = 255
|
||||
|
||||
// bufferFlushSize indicates the buffer size
|
||||
// after which bytes are flushed to the writer.
|
||||
// Should preferably be a multiple of 6, since
|
||||
// we accumulate 6 bytes between writes to the buffer.
|
||||
bufferFlushSize = 240
|
||||
|
||||
// bufferSize is the actual output byte buffer size.
|
||||
// It must have additional headroom for a flush
|
||||
// which can contain up to 8 bytes.
|
||||
bufferSize = bufferFlushSize + 8
|
||||
)
|
||||
|
||||
// The number of extra bits needed by length code X - LENGTH_CODES_START.
|
||||
var lengthExtraBits = []int8{
|
||||
/* 257 */ 0, 0, 0,
|
||||
/* 260 */ 0, 0, 0, 0, 0, 1, 1, 1, 1, 2,
|
||||
/* 270 */ 2, 2, 2, 3, 3, 3, 3, 4, 4, 4,
|
||||
/* 280 */ 4, 5, 5, 5, 5, 0,
|
||||
}
|
||||
|
||||
// The length indicated by length code X - LENGTH_CODES_START.
|
||||
var lengthBase = []uint32{
|
||||
0, 1, 2, 3, 4, 5, 6, 7, 8, 10,
|
||||
12, 14, 16, 20, 24, 28, 32, 40, 48, 56,
|
||||
64, 80, 96, 112, 128, 160, 192, 224, 255,
|
||||
}
|
||||
|
||||
// offset code word extra bits.
|
||||
var offsetExtraBits = []int8{
|
||||
0, 0, 0, 0, 1, 1, 2, 2, 3, 3,
|
||||
4, 4, 5, 5, 6, 6, 7, 7, 8, 8,
|
||||
9, 9, 10, 10, 11, 11, 12, 12, 13, 13,
|
||||
/* extended window */
|
||||
14, 14, 15, 15, 16, 16, 17, 17, 18, 18, 19, 19, 20, 20,
|
||||
}
|
||||
|
||||
var offsetBase = []uint32{
|
||||
/* normal deflate */
|
||||
0x000000, 0x000001, 0x000002, 0x000003, 0x000004,
|
||||
0x000006, 0x000008, 0x00000c, 0x000010, 0x000018,
|
||||
0x000020, 0x000030, 0x000040, 0x000060, 0x000080,
|
||||
0x0000c0, 0x000100, 0x000180, 0x000200, 0x000300,
|
||||
0x000400, 0x000600, 0x000800, 0x000c00, 0x001000,
|
||||
0x001800, 0x002000, 0x003000, 0x004000, 0x006000,
|
||||
|
||||
/* extended window */
|
||||
0x008000, 0x00c000, 0x010000, 0x018000, 0x020000,
|
||||
0x030000, 0x040000, 0x060000, 0x080000, 0x0c0000,
|
||||
0x100000, 0x180000, 0x200000, 0x300000,
|
||||
}
|
||||
|
||||
// The odd order in which the codegen code sizes are written.
|
||||
var codegenOrder = []uint32{16, 17, 18, 0, 8, 7, 9, 6, 10, 5, 11, 4, 12, 3, 13, 2, 14, 1, 15}
|
||||
|
||||
type huffmanBitWriter struct {
|
||||
// writer is the underlying writer.
|
||||
// Do not use it directly; use the write method, which ensures
|
||||
// that Write errors are sticky.
|
||||
writer io.Writer
|
||||
|
||||
// Data waiting to be written is bytes[0:nbytes]
|
||||
// and then the low nbits of bits.
|
||||
bits uint64
|
||||
nbits uint
|
||||
bytes [bufferSize]byte
|
||||
codegenFreq [codegenCodeCount]int32
|
||||
nbytes int
|
||||
literalFreq []int32
|
||||
offsetFreq []int32
|
||||
codegen []uint8
|
||||
literalEncoding *huffmanEncoder
|
||||
offsetEncoding *huffmanEncoder
|
||||
codegenEncoding *huffmanEncoder
|
||||
err error
|
||||
}
|
||||
|
||||
func newHuffmanBitWriter(w io.Writer) *huffmanBitWriter {
|
||||
return &huffmanBitWriter{
|
||||
writer: w,
|
||||
literalFreq: make([]int32, maxNumLit),
|
||||
offsetFreq: make([]int32, offsetCodeCount),
|
||||
codegen: make([]uint8, maxNumLit+offsetCodeCount+1),
|
||||
literalEncoding: newHuffmanEncoder(maxNumLit),
|
||||
codegenEncoding: newHuffmanEncoder(codegenCodeCount),
|
||||
offsetEncoding: newHuffmanEncoder(offsetCodeCount),
|
||||
}
|
||||
}
|
||||
|
||||
func (w *huffmanBitWriter) reset(writer io.Writer) {
|
||||
w.writer = writer
|
||||
w.bits, w.nbits, w.nbytes, w.err = 0, 0, 0, nil
|
||||
w.bytes = [bufferSize]byte{}
|
||||
}
|
||||
|
||||
func (w *huffmanBitWriter) flush() {
|
||||
if w.err != nil {
|
||||
w.nbits = 0
|
||||
return
|
||||
}
|
||||
n := w.nbytes
|
||||
for w.nbits != 0 {
|
||||
w.bytes[n] = byte(w.bits)
|
||||
w.bits >>= 8
|
||||
if w.nbits > 8 { // Avoid underflow
|
||||
w.nbits -= 8
|
||||
} else {
|
||||
w.nbits = 0
|
||||
}
|
||||
n++
|
||||
}
|
||||
w.bits = 0
|
||||
w.write(w.bytes[:n])
|
||||
w.nbytes = 0
|
||||
}
|
||||
|
||||
func (w *huffmanBitWriter) write(b []byte) {
|
||||
if w.err != nil {
|
||||
return
|
||||
}
|
||||
_, w.err = w.writer.Write(b)
|
||||
}
|
||||
|
||||
func (w *huffmanBitWriter) writeBits(b int32, nb uint) {
|
||||
if w.err != nil {
|
||||
return
|
||||
}
|
||||
w.bits |= uint64(b) << w.nbits
|
||||
w.nbits += nb
|
||||
if w.nbits >= 48 {
|
||||
bits := w.bits
|
||||
w.bits >>= 48
|
||||
w.nbits -= 48
|
||||
n := w.nbytes
|
||||
bytes := w.bytes[n : n+6]
|
||||
bytes[0] = byte(bits)
|
||||
bytes[1] = byte(bits >> 8)
|
||||
bytes[2] = byte(bits >> 16)
|
||||
bytes[3] = byte(bits >> 24)
|
||||
bytes[4] = byte(bits >> 32)
|
||||
bytes[5] = byte(bits >> 40)
|
||||
n += 6
|
||||
if n >= bufferFlushSize {
|
||||
w.write(w.bytes[:n])
|
||||
n = 0
|
||||
}
|
||||
w.nbytes = n
|
||||
}
|
||||
}
|
||||
|
||||
func (w *huffmanBitWriter) writeBytes(bytes []byte) {
|
||||
if w.err != nil {
|
||||
return
|
||||
}
|
||||
n := w.nbytes
|
||||
if w.nbits&7 != 0 {
|
||||
w.err = InternalError("writeBytes with unfinished bits")
|
||||
return
|
||||
}
|
||||
for w.nbits != 0 {
|
||||
w.bytes[n] = byte(w.bits)
|
||||
w.bits >>= 8
|
||||
w.nbits -= 8
|
||||
n++
|
||||
}
|
||||
if n != 0 {
|
||||
w.write(w.bytes[:n])
|
||||
}
|
||||
w.nbytes = 0
|
||||
w.write(bytes)
|
||||
}
|
||||
|
||||
// RFC 1951 3.2.7 specifies a special run-length encoding for specifying
|
||||
// the literal and offset lengths arrays (which are concatenated into a single
|
||||
// array). This method generates that run-length encoding.
|
||||
//
|
||||
// The result is written into the codegen array, and the frequencies
|
||||
// of each code is written into the codegenFreq array.
|
||||
// Codes 0-15 are single byte codes. Codes 16-18 are followed by additional
|
||||
// information. Code badCode is an end marker
|
||||
//
|
||||
// numLiterals The number of literals in literalEncoding
|
||||
// numOffsets The number of offsets in offsetEncoding
|
||||
// litenc, offenc The literal and offset encoder to use
|
||||
func (w *huffmanBitWriter) generateCodegen(numLiterals int, numOffsets int, litEnc, offEnc *huffmanEncoder) {
|
||||
for i := range w.codegenFreq {
|
||||
w.codegenFreq[i] = 0
|
||||
}
|
||||
// Note that we are using codegen both as a temporary variable for holding
|
||||
// a copy of the frequencies, and as the place where we put the result.
|
||||
// This is fine because the output is always shorter than the input used
|
||||
// so far.
|
||||
codegen := w.codegen // cache
|
||||
// Copy the concatenated code sizes to codegen. Put a marker at the end.
|
||||
cgnl := codegen[:numLiterals]
|
||||
for i := range cgnl {
|
||||
cgnl[i] = uint8(litEnc.codes[i].len)
|
||||
}
|
||||
|
||||
cgnl = codegen[numLiterals : numLiterals+numOffsets]
|
||||
for i := range cgnl {
|
||||
cgnl[i] = uint8(offEnc.codes[i].len)
|
||||
}
|
||||
codegen[numLiterals+numOffsets] = badCode
|
||||
|
||||
size := codegen[0]
|
||||
count := 1
|
||||
outIndex := 0
|
||||
for inIndex := 1; size != badCode; inIndex++ {
|
||||
// INVARIANT: We have seen "count" copies of size that have not yet
|
||||
// had output generated for them.
|
||||
nextSize := codegen[inIndex]
|
||||
if nextSize == size {
|
||||
count++
|
||||
continue
|
||||
}
|
||||
// We need to generate codegen indicating "count" of size.
|
||||
if size != 0 {
|
||||
codegen[outIndex] = size
|
||||
outIndex++
|
||||
w.codegenFreq[size]++
|
||||
count--
|
||||
for count >= 3 {
|
||||
n := 6
|
||||
if n > count {
|
||||
n = count
|
||||
}
|
||||
codegen[outIndex] = 16
|
||||
outIndex++
|
||||
codegen[outIndex] = uint8(n - 3)
|
||||
outIndex++
|
||||
w.codegenFreq[16]++
|
||||
count -= n
|
||||
}
|
||||
} else {
|
||||
for count >= 11 {
|
||||
n := 138
|
||||
if n > count {
|
||||
n = count
|
||||
}
|
||||
codegen[outIndex] = 18
|
||||
outIndex++
|
||||
codegen[outIndex] = uint8(n - 11)
|
||||
outIndex++
|
||||
w.codegenFreq[18]++
|
||||
count -= n
|
||||
}
|
||||
if count >= 3 {
|
||||
// count >= 3 && count <= 10
|
||||
codegen[outIndex] = 17
|
||||
outIndex++
|
||||
codegen[outIndex] = uint8(count - 3)
|
||||
outIndex++
|
||||
w.codegenFreq[17]++
|
||||
count = 0
|
||||
}
|
||||
}
|
||||
count--
|
||||
for ; count >= 0; count-- {
|
||||
codegen[outIndex] = size
|
||||
outIndex++
|
||||
w.codegenFreq[size]++
|
||||
}
|
||||
// Set up invariant for next time through the loop.
|
||||
size = nextSize
|
||||
count = 1
|
||||
}
|
||||
// Marker indicating the end of the codegen.
|
||||
codegen[outIndex] = badCode
|
||||
}
|
||||
|
||||
// dynamicSize returns the size of dynamically encoded data in bits.
|
||||
func (w *huffmanBitWriter) dynamicSize(litEnc, offEnc *huffmanEncoder, extraBits int) (size, numCodegens int) {
|
||||
numCodegens = len(w.codegenFreq)
|
||||
for numCodegens > 4 && w.codegenFreq[codegenOrder[numCodegens-1]] == 0 {
|
||||
numCodegens--
|
||||
}
|
||||
header := 3 + 5 + 5 + 4 + (3 * numCodegens) +
|
||||
w.codegenEncoding.bitLength(w.codegenFreq[:]) +
|
||||
int(w.codegenFreq[16])*2 +
|
||||
int(w.codegenFreq[17])*3 +
|
||||
int(w.codegenFreq[18])*7
|
||||
size = header +
|
||||
litEnc.bitLength(w.literalFreq) +
|
||||
offEnc.bitLength(w.offsetFreq) +
|
||||
extraBits
|
||||
|
||||
return size, numCodegens
|
||||
}
|
||||
|
||||
// fixedSize returns the size of dynamically encoded data in bits.
|
||||
func (w *huffmanBitWriter) fixedSize(extraBits int) int {
|
||||
return 3 +
|
||||
fixedLiteralEncoding.bitLength(w.literalFreq) +
|
||||
fixedOffsetEncoding.bitLength(w.offsetFreq) +
|
||||
extraBits
|
||||
}
|
||||
|
||||
// storedSize calculates the stored size, including header.
|
||||
// The function returns the size in bits and whether the block
|
||||
// fits inside a single block.
|
||||
func (w *huffmanBitWriter) storedSize(in []byte) (int, bool) {
|
||||
if in == nil {
|
||||
return 0, false
|
||||
}
|
||||
if len(in) <= maxStoreBlockSize {
|
||||
return (len(in) + 5) * 8, true
|
||||
}
|
||||
return 0, false
|
||||
}
|
||||
|
||||
func (w *huffmanBitWriter) writeCode(c hcode) {
|
||||
if w.err != nil {
|
||||
return
|
||||
}
|
||||
w.bits |= uint64(c.code) << w.nbits
|
||||
w.nbits += uint(c.len)
|
||||
if w.nbits >= 48 {
|
||||
bits := w.bits
|
||||
w.bits >>= 48
|
||||
w.nbits -= 48
|
||||
n := w.nbytes
|
||||
bytes := w.bytes[n : n+6]
|
||||
bytes[0] = byte(bits)
|
||||
bytes[1] = byte(bits >> 8)
|
||||
bytes[2] = byte(bits >> 16)
|
||||
bytes[3] = byte(bits >> 24)
|
||||
bytes[4] = byte(bits >> 32)
|
||||
bytes[5] = byte(bits >> 40)
|
||||
n += 6
|
||||
if n >= bufferFlushSize {
|
||||
w.write(w.bytes[:n])
|
||||
n = 0
|
||||
}
|
||||
w.nbytes = n
|
||||
}
|
||||
}
|
||||
|
||||
// Write the header of a dynamic Huffman block to the output stream.
|
||||
//
|
||||
// numLiterals The number of literals specified in codegen
|
||||
// numOffsets The number of offsets specified in codegen
|
||||
// numCodegens The number of codegens used in codegen
|
||||
func (w *huffmanBitWriter) writeDynamicHeader(numLiterals int, numOffsets int, numCodegens int, isEof bool) {
|
||||
if w.err != nil {
|
||||
return
|
||||
}
|
||||
var firstBits int32 = 4
|
||||
if isEof {
|
||||
firstBits = 5
|
||||
}
|
||||
w.writeBits(firstBits, 3)
|
||||
w.writeBits(int32(numLiterals-257), 5)
|
||||
w.writeBits(int32(numOffsets-1), 5)
|
||||
w.writeBits(int32(numCodegens-4), 4)
|
||||
|
||||
for i := 0; i < numCodegens; i++ {
|
||||
value := uint(w.codegenEncoding.codes[codegenOrder[i]].len)
|
||||
w.writeBits(int32(value), 3)
|
||||
}
|
||||
|
||||
i := 0
|
||||
for {
|
||||
var codeWord int = int(w.codegen[i])
|
||||
i++
|
||||
if codeWord == badCode {
|
||||
break
|
||||
}
|
||||
w.writeCode(w.codegenEncoding.codes[uint32(codeWord)])
|
||||
|
||||
switch codeWord {
|
||||
case 16:
|
||||
w.writeBits(int32(w.codegen[i]), 2)
|
||||
i++
|
||||
break
|
||||
case 17:
|
||||
w.writeBits(int32(w.codegen[i]), 3)
|
||||
i++
|
||||
break
|
||||
case 18:
|
||||
w.writeBits(int32(w.codegen[i]), 7)
|
||||
i++
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (w *huffmanBitWriter) writeStoredHeader(length int, isEof bool) {
|
||||
if w.err != nil {
|
||||
return
|
||||
}
|
||||
var flag int32
|
||||
if isEof {
|
||||
flag = 1
|
||||
}
|
||||
w.writeBits(flag, 3)
|
||||
w.flush()
|
||||
w.writeBits(int32(length), 16)
|
||||
w.writeBits(int32(^uint16(length)), 16)
|
||||
}
|
||||
|
||||
func (w *huffmanBitWriter) writeFixedHeader(isEof bool) {
|
||||
if w.err != nil {
|
||||
return
|
||||
}
|
||||
// Indicate that we are a fixed Huffman block
|
||||
var value int32 = 2
|
||||
if isEof {
|
||||
value = 3
|
||||
}
|
||||
w.writeBits(value, 3)
|
||||
}
|
||||
|
||||
// writeBlock will write a block of tokens with the smallest encoding.
|
||||
// The original input can be supplied, and if the huffman encoded data
|
||||
// is larger than the original bytes, the data will be written as a
|
||||
// stored block.
|
||||
// If the input is nil, the tokens will always be Huffman encoded.
|
||||
func (w *huffmanBitWriter) writeBlock(tokens []token, eof bool, input []byte) {
|
||||
if w.err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
tokens = append(tokens, endBlockMarker)
|
||||
numLiterals, numOffsets := w.indexTokens(tokens)
|
||||
|
||||
var extraBits int
|
||||
storedSize, storable := w.storedSize(input)
|
||||
if storable {
|
||||
// We only bother calculating the costs of the extra bits required by
|
||||
// the length of offset fields (which will be the same for both fixed
|
||||
// and dynamic encoding), if we need to compare those two encodings
|
||||
// against stored encoding.
|
||||
for lengthCode := lengthCodesStart + 8; lengthCode < numLiterals; lengthCode++ {
|
||||
// First eight length codes have extra size = 0.
|
||||
extraBits += int(w.literalFreq[lengthCode]) * int(lengthExtraBits[lengthCode-lengthCodesStart])
|
||||
}
|
||||
for offsetCode := 4; offsetCode < numOffsets; offsetCode++ {
|
||||
// First four offset codes have extra size = 0.
|
||||
extraBits += int(w.offsetFreq[offsetCode]) * int(offsetExtraBits[offsetCode])
|
||||
}
|
||||
}
|
||||
|
||||
// Figure out smallest code.
|
||||
// Fixed Huffman baseline.
|
||||
var literalEncoding = fixedLiteralEncoding
|
||||
var offsetEncoding = fixedOffsetEncoding
|
||||
var size = w.fixedSize(extraBits)
|
||||
|
||||
// Dynamic Huffman?
|
||||
var numCodegens int
|
||||
|
||||
// Generate codegen and codegenFrequencies, which indicates how to encode
|
||||
// the literalEncoding and the offsetEncoding.
|
||||
w.generateCodegen(numLiterals, numOffsets, w.literalEncoding, w.offsetEncoding)
|
||||
w.codegenEncoding.generate(w.codegenFreq[:], 7)
|
||||
dynamicSize, numCodegens := w.dynamicSize(w.literalEncoding, w.offsetEncoding, extraBits)
|
||||
|
||||
if dynamicSize < size {
|
||||
size = dynamicSize
|
||||
literalEncoding = w.literalEncoding
|
||||
offsetEncoding = w.offsetEncoding
|
||||
}
|
||||
|
||||
// Stored bytes?
|
||||
if storable && storedSize < size {
|
||||
w.writeStoredHeader(len(input), eof)
|
||||
w.writeBytes(input)
|
||||
return
|
||||
}
|
||||
|
||||
// Huffman.
|
||||
if literalEncoding == fixedLiteralEncoding {
|
||||
w.writeFixedHeader(eof)
|
||||
} else {
|
||||
w.writeDynamicHeader(numLiterals, numOffsets, numCodegens, eof)
|
||||
}
|
||||
|
||||
// Write the tokens.
|
||||
w.writeTokens(tokens, literalEncoding.codes, offsetEncoding.codes)
|
||||
}
|
||||
|
||||
// writeBlockDynamic encodes a block using a dynamic Huffman table.
|
||||
// This should be used if the symbols used have a disproportionate
|
||||
// histogram distribution.
|
||||
// If input is supplied and the compression savings are below 1/16th of the
|
||||
// input size the block is stored.
|
||||
func (w *huffmanBitWriter) writeBlockDynamic(tokens []token, eof bool, input []byte) {
|
||||
if w.err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
tokens = append(tokens, endBlockMarker)
|
||||
numLiterals, numOffsets := w.indexTokens(tokens)
|
||||
|
||||
// Generate codegen and codegenFrequencies, which indicates how to encode
|
||||
// the literalEncoding and the offsetEncoding.
|
||||
w.generateCodegen(numLiterals, numOffsets, w.literalEncoding, w.offsetEncoding)
|
||||
w.codegenEncoding.generate(w.codegenFreq[:], 7)
|
||||
size, numCodegens := w.dynamicSize(w.literalEncoding, w.offsetEncoding, 0)
|
||||
|
||||
// Store bytes, if we don't get a reasonable improvement.
|
||||
if ssize, storable := w.storedSize(input); storable && ssize < (size+size>>4) {
|
||||
w.writeStoredHeader(len(input), eof)
|
||||
w.writeBytes(input)
|
||||
return
|
||||
}
|
||||
|
||||
// Write Huffman table.
|
||||
w.writeDynamicHeader(numLiterals, numOffsets, numCodegens, eof)
|
||||
|
||||
// Write the tokens.
|
||||
w.writeTokens(tokens, w.literalEncoding.codes, w.offsetEncoding.codes)
|
||||
}
|
||||
|
||||
// indexTokens indexes a slice of tokens, and updates
|
||||
// literalFreq and offsetFreq, and generates literalEncoding
|
||||
// and offsetEncoding.
|
||||
// The number of literal and offset tokens is returned.
|
||||
func (w *huffmanBitWriter) indexTokens(tokens []token) (numLiterals, numOffsets int) {
|
||||
for i := range w.literalFreq {
|
||||
w.literalFreq[i] = 0
|
||||
}
|
||||
for i := range w.offsetFreq {
|
||||
w.offsetFreq[i] = 0
|
||||
}
|
||||
|
||||
for _, t := range tokens {
|
||||
if t < matchType {
|
||||
w.literalFreq[t.literal()]++
|
||||
continue
|
||||
}
|
||||
length := t.length()
|
||||
offset := t.offset()
|
||||
w.literalFreq[lengthCodesStart+lengthCode(length)]++
|
||||
w.offsetFreq[offsetCode(offset)]++
|
||||
}
|
||||
|
||||
// get the number of literals
|
||||
numLiterals = len(w.literalFreq)
|
||||
for w.literalFreq[numLiterals-1] == 0 {
|
||||
numLiterals--
|
||||
}
|
||||
// get the number of offsets
|
||||
numOffsets = len(w.offsetFreq)
|
||||
for numOffsets > 0 && w.offsetFreq[numOffsets-1] == 0 {
|
||||
numOffsets--
|
||||
}
|
||||
if numOffsets == 0 {
|
||||
// We haven't found a single match. If we want to go with the dynamic encoding,
|
||||
// we should count at least one offset to be sure that the offset huffman tree could be encoded.
|
||||
w.offsetFreq[0] = 1
|
||||
numOffsets = 1
|
||||
}
|
||||
w.literalEncoding.generate(w.literalFreq, 15)
|
||||
w.offsetEncoding.generate(w.offsetFreq, 15)
|
||||
return
|
||||
}
|
||||
|
||||
// writeTokens writes a slice of tokens to the output.
|
||||
// codes for literal and offset encoding must be supplied.
|
||||
func (w *huffmanBitWriter) writeTokens(tokens []token, leCodes, oeCodes []hcode) {
|
||||
if w.err != nil {
|
||||
return
|
||||
}
|
||||
for _, t := range tokens {
|
||||
if t < matchType {
|
||||
w.writeCode(leCodes[t.literal()])
|
||||
continue
|
||||
}
|
||||
// Write the length
|
||||
length := t.length()
|
||||
lengthCode := lengthCode(length)
|
||||
w.writeCode(leCodes[lengthCode+lengthCodesStart])
|
||||
extraLengthBits := uint(lengthExtraBits[lengthCode])
|
||||
if extraLengthBits > 0 {
|
||||
extraLength := int32(length - lengthBase[lengthCode])
|
||||
w.writeBits(extraLength, extraLengthBits)
|
||||
}
|
||||
// Write the offset
|
||||
offset := t.offset()
|
||||
offsetCode := offsetCode(offset)
|
||||
w.writeCode(oeCodes[offsetCode])
|
||||
extraOffsetBits := uint(offsetExtraBits[offsetCode])
|
||||
if extraOffsetBits > 0 {
|
||||
extraOffset := int32(offset - offsetBase[offsetCode])
|
||||
w.writeBits(extraOffset, extraOffsetBits)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// huffOffset is a static offset encoder used for huffman only encoding.
|
||||
// It can be reused since we will not be encoding offset values.
|
||||
var huffOffset *huffmanEncoder
|
||||
|
||||
func init() {
|
||||
w := newHuffmanBitWriter(nil)
|
||||
w.offsetFreq[0] = 1
|
||||
huffOffset = newHuffmanEncoder(offsetCodeCount)
|
||||
huffOffset.generate(w.offsetFreq, 15)
|
||||
}
|
||||
|
||||
// writeBlockHuff encodes a block of bytes as either
|
||||
// Huffman encoded literals or uncompressed bytes if the
|
||||
// results only gains very little from compression.
|
||||
func (w *huffmanBitWriter) writeBlockHuff(eof bool, input []byte) {
|
||||
if w.err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
// Clear histogram
|
||||
for i := range w.literalFreq {
|
||||
w.literalFreq[i] = 0
|
||||
}
|
||||
|
||||
// Add everything as literals
|
||||
histogram(input, w.literalFreq)
|
||||
|
||||
w.literalFreq[endBlockMarker] = 1
|
||||
|
||||
const numLiterals = endBlockMarker + 1
|
||||
const numOffsets = 1
|
||||
|
||||
w.literalEncoding.generate(w.literalFreq, 15)
|
||||
|
||||
// Figure out smallest code.
|
||||
// Always use dynamic Huffman or Store
|
||||
var numCodegens int
|
||||
|
||||
// Generate codegen and codegenFrequencies, which indicates how to encode
|
||||
// the literalEncoding and the offsetEncoding.
|
||||
w.generateCodegen(numLiterals, numOffsets, w.literalEncoding, huffOffset)
|
||||
w.codegenEncoding.generate(w.codegenFreq[:], 7)
|
||||
size, numCodegens := w.dynamicSize(w.literalEncoding, huffOffset, 0)
|
||||
|
||||
// Store bytes, if we don't get a reasonable improvement.
|
||||
if ssize, storable := w.storedSize(input); storable && ssize < (size+size>>4) {
|
||||
w.writeStoredHeader(len(input), eof)
|
||||
w.writeBytes(input)
|
||||
return
|
||||
}
|
||||
|
||||
// Huffman.
|
||||
w.writeDynamicHeader(numLiterals, numOffsets, numCodegens, eof)
|
||||
encoding := w.literalEncoding.codes[:257]
|
||||
n := w.nbytes
|
||||
for _, t := range input {
|
||||
// Bitwriting inlined, ~30% speedup
|
||||
c := encoding[t]
|
||||
w.bits |= uint64(c.code) << w.nbits
|
||||
w.nbits += uint(c.len)
|
||||
if w.nbits < 48 {
|
||||
continue
|
||||
}
|
||||
// Store 6 bytes
|
||||
bits := w.bits
|
||||
w.bits >>= 48
|
||||
w.nbits -= 48
|
||||
bytes := w.bytes[n : n+6]
|
||||
bytes[0] = byte(bits)
|
||||
bytes[1] = byte(bits >> 8)
|
||||
bytes[2] = byte(bits >> 16)
|
||||
bytes[3] = byte(bits >> 24)
|
||||
bytes[4] = byte(bits >> 32)
|
||||
bytes[5] = byte(bits >> 40)
|
||||
n += 6
|
||||
if n < bufferFlushSize {
|
||||
continue
|
||||
}
|
||||
w.write(w.bytes[:n])
|
||||
if w.err != nil {
|
||||
return // Return early in the event of write failures
|
||||
}
|
||||
n = 0
|
||||
}
|
||||
w.nbytes = n
|
||||
w.writeCode(encoding[endBlockMarker])
|
||||
}
|
344
vendor/github.com/klauspost/compress/flate/huffman_code.go
generated
vendored
Normal file
344
vendor/github.com/klauspost/compress/flate/huffman_code.go
generated
vendored
Normal file
|
@ -0,0 +1,344 @@
|
|||
// Copyright 2009 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package flate
|
||||
|
||||
import (
|
||||
"math"
|
||||
"sort"
|
||||
)
|
||||
|
||||
// hcode is a huffman code with a bit code and bit length.
|
||||
type hcode struct {
|
||||
code, len uint16
|
||||
}
|
||||
|
||||
type huffmanEncoder struct {
|
||||
codes []hcode
|
||||
freqcache []literalNode
|
||||
bitCount [17]int32
|
||||
lns byLiteral // stored to avoid repeated allocation in generate
|
||||
lfs byFreq // stored to avoid repeated allocation in generate
|
||||
}
|
||||
|
||||
type literalNode struct {
|
||||
literal uint16
|
||||
freq int32
|
||||
}
|
||||
|
||||
// A levelInfo describes the state of the constructed tree for a given depth.
|
||||
type levelInfo struct {
|
||||
// Our level. for better printing
|
||||
level int32
|
||||
|
||||
// The frequency of the last node at this level
|
||||
lastFreq int32
|
||||
|
||||
// The frequency of the next character to add to this level
|
||||
nextCharFreq int32
|
||||
|
||||
// The frequency of the next pair (from level below) to add to this level.
|
||||
// Only valid if the "needed" value of the next lower level is 0.
|
||||
nextPairFreq int32
|
||||
|
||||
// The number of chains remaining to generate for this level before moving
|
||||
// up to the next level
|
||||
needed int32
|
||||
}
|
||||
|
||||
// set sets the code and length of an hcode.
|
||||
func (h *hcode) set(code uint16, length uint16) {
|
||||
h.len = length
|
||||
h.code = code
|
||||
}
|
||||
|
||||
func maxNode() literalNode { return literalNode{math.MaxUint16, math.MaxInt32} }
|
||||
|
||||
func newHuffmanEncoder(size int) *huffmanEncoder {
|
||||
return &huffmanEncoder{codes: make([]hcode, size)}
|
||||
}
|
||||
|
||||
// Generates a HuffmanCode corresponding to the fixed literal table
|
||||
func generateFixedLiteralEncoding() *huffmanEncoder {
|
||||
h := newHuffmanEncoder(maxNumLit)
|
||||
codes := h.codes
|
||||
var ch uint16
|
||||
for ch = 0; ch < maxNumLit; ch++ {
|
||||
var bits uint16
|
||||
var size uint16
|
||||
switch {
|
||||
case ch < 144:
|
||||
// size 8, 000110000 .. 10111111
|
||||
bits = ch + 48
|
||||
size = 8
|
||||
break
|
||||
case ch < 256:
|
||||
// size 9, 110010000 .. 111111111
|
||||
bits = ch + 400 - 144
|
||||
size = 9
|
||||
break
|
||||
case ch < 280:
|
||||
// size 7, 0000000 .. 0010111
|
||||
bits = ch - 256
|
||||
size = 7
|
||||
break
|
||||
default:
|
||||
// size 8, 11000000 .. 11000111
|
||||
bits = ch + 192 - 280
|
||||
size = 8
|
||||
}
|
||||
codes[ch] = hcode{code: reverseBits(bits, byte(size)), len: size}
|
||||
}
|
||||
return h
|
||||
}
|
||||
|
||||
func generateFixedOffsetEncoding() *huffmanEncoder {
|
||||
h := newHuffmanEncoder(30)
|
||||
codes := h.codes
|
||||
for ch := range codes {
|
||||
codes[ch] = hcode{code: reverseBits(uint16(ch), 5), len: 5}
|
||||
}
|
||||
return h
|
||||
}
|
||||
|
||||
var fixedLiteralEncoding *huffmanEncoder = generateFixedLiteralEncoding()
|
||||
var fixedOffsetEncoding *huffmanEncoder = generateFixedOffsetEncoding()
|
||||
|
||||
func (h *huffmanEncoder) bitLength(freq []int32) int {
|
||||
var total int
|
||||
for i, f := range freq {
|
||||
if f != 0 {
|
||||
total += int(f) * int(h.codes[i].len)
|
||||
}
|
||||
}
|
||||
return total
|
||||
}
|
||||
|
||||
const maxBitsLimit = 16
|
||||
|
||||
// Return the number of literals assigned to each bit size in the Huffman encoding
|
||||
//
|
||||
// This method is only called when list.length >= 3
|
||||
// The cases of 0, 1, and 2 literals are handled by special case code.
|
||||
//
|
||||
// list An array of the literals with non-zero frequencies
|
||||
// and their associated frequencies. The array is in order of increasing
|
||||
// frequency, and has as its last element a special element with frequency
|
||||
// MaxInt32
|
||||
// maxBits The maximum number of bits that should be used to encode any literal.
|
||||
// Must be less than 16.
|
||||
// return An integer array in which array[i] indicates the number of literals
|
||||
// that should be encoded in i bits.
|
||||
func (h *huffmanEncoder) bitCounts(list []literalNode, maxBits int32) []int32 {
|
||||
if maxBits >= maxBitsLimit {
|
||||
panic("flate: maxBits too large")
|
||||
}
|
||||
n := int32(len(list))
|
||||
list = list[0 : n+1]
|
||||
list[n] = maxNode()
|
||||
|
||||
// The tree can't have greater depth than n - 1, no matter what. This
|
||||
// saves a little bit of work in some small cases
|
||||
if maxBits > n-1 {
|
||||
maxBits = n - 1
|
||||
}
|
||||
|
||||
// Create information about each of the levels.
|
||||
// A bogus "Level 0" whose sole purpose is so that
|
||||
// level1.prev.needed==0. This makes level1.nextPairFreq
|
||||
// be a legitimate value that never gets chosen.
|
||||
var levels [maxBitsLimit]levelInfo
|
||||
// leafCounts[i] counts the number of literals at the left
|
||||
// of ancestors of the rightmost node at level i.
|
||||
// leafCounts[i][j] is the number of literals at the left
|
||||
// of the level j ancestor.
|
||||
var leafCounts [maxBitsLimit][maxBitsLimit]int32
|
||||
|
||||
for level := int32(1); level <= maxBits; level++ {
|
||||
// For every level, the first two items are the first two characters.
|
||||
// We initialize the levels as if we had already figured this out.
|
||||
levels[level] = levelInfo{
|
||||
level: level,
|
||||
lastFreq: list[1].freq,
|
||||
nextCharFreq: list[2].freq,
|
||||
nextPairFreq: list[0].freq + list[1].freq,
|
||||
}
|
||||
leafCounts[level][level] = 2
|
||||
if level == 1 {
|
||||
levels[level].nextPairFreq = math.MaxInt32
|
||||
}
|
||||
}
|
||||
|
||||
// We need a total of 2*n - 2 items at top level and have already generated 2.
|
||||
levels[maxBits].needed = 2*n - 4
|
||||
|
||||
level := maxBits
|
||||
for {
|
||||
l := &levels[level]
|
||||
if l.nextPairFreq == math.MaxInt32 && l.nextCharFreq == math.MaxInt32 {
|
||||
// We've run out of both leafs and pairs.
|
||||
// End all calculations for this level.
|
||||
// To make sure we never come back to this level or any lower level,
|
||||
// set nextPairFreq impossibly large.
|
||||
l.needed = 0
|
||||
levels[level+1].nextPairFreq = math.MaxInt32
|
||||
level++
|
||||
continue
|
||||
}
|
||||
|
||||
prevFreq := l.lastFreq
|
||||
if l.nextCharFreq < l.nextPairFreq {
|
||||
// The next item on this row is a leaf node.
|
||||
n := leafCounts[level][level] + 1
|
||||
l.lastFreq = l.nextCharFreq
|
||||
// Lower leafCounts are the same of the previous node.
|
||||
leafCounts[level][level] = n
|
||||
l.nextCharFreq = list[n].freq
|
||||
} else {
|
||||
// The next item on this row is a pair from the previous row.
|
||||
// nextPairFreq isn't valid until we generate two
|
||||
// more values in the level below
|
||||
l.lastFreq = l.nextPairFreq
|
||||
// Take leaf counts from the lower level, except counts[level] remains the same.
|
||||
copy(leafCounts[level][:level], leafCounts[level-1][:level])
|
||||
levels[l.level-1].needed = 2
|
||||
}
|
||||
|
||||
if l.needed--; l.needed == 0 {
|
||||
// We've done everything we need to do for this level.
|
||||
// Continue calculating one level up. Fill in nextPairFreq
|
||||
// of that level with the sum of the two nodes we've just calculated on
|
||||
// this level.
|
||||
if l.level == maxBits {
|
||||
// All done!
|
||||
break
|
||||
}
|
||||
levels[l.level+1].nextPairFreq = prevFreq + l.lastFreq
|
||||
level++
|
||||
} else {
|
||||
// If we stole from below, move down temporarily to replenish it.
|
||||
for levels[level-1].needed > 0 {
|
||||
level--
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Somethings is wrong if at the end, the top level is null or hasn't used
|
||||
// all of the leaves.
|
||||
if leafCounts[maxBits][maxBits] != n {
|
||||
panic("leafCounts[maxBits][maxBits] != n")
|
||||
}
|
||||
|
||||
bitCount := h.bitCount[:maxBits+1]
|
||||
bits := 1
|
||||
counts := &leafCounts[maxBits]
|
||||
for level := maxBits; level > 0; level-- {
|
||||
// chain.leafCount gives the number of literals requiring at least "bits"
|
||||
// bits to encode.
|
||||
bitCount[bits] = counts[level] - counts[level-1]
|
||||
bits++
|
||||
}
|
||||
return bitCount
|
||||
}
|
||||
|
||||
// Look at the leaves and assign them a bit count and an encoding as specified
|
||||
// in RFC 1951 3.2.2
|
||||
func (h *huffmanEncoder) assignEncodingAndSize(bitCount []int32, list []literalNode) {
|
||||
code := uint16(0)
|
||||
for n, bits := range bitCount {
|
||||
code <<= 1
|
||||
if n == 0 || bits == 0 {
|
||||
continue
|
||||
}
|
||||
// The literals list[len(list)-bits] .. list[len(list)-bits]
|
||||
// are encoded using "bits" bits, and get the values
|
||||
// code, code + 1, .... The code values are
|
||||
// assigned in literal order (not frequency order).
|
||||
chunk := list[len(list)-int(bits):]
|
||||
|
||||
h.lns.sort(chunk)
|
||||
for _, node := range chunk {
|
||||
h.codes[node.literal] = hcode{code: reverseBits(code, uint8(n)), len: uint16(n)}
|
||||
code++
|
||||
}
|
||||
list = list[0 : len(list)-int(bits)]
|
||||
}
|
||||
}
|
||||
|
||||
// Update this Huffman Code object to be the minimum code for the specified frequency count.
|
||||
//
|
||||
// freq An array of frequencies, in which frequency[i] gives the frequency of literal i.
|
||||
// maxBits The maximum number of bits to use for any literal.
|
||||
func (h *huffmanEncoder) generate(freq []int32, maxBits int32) {
|
||||
if h.freqcache == nil {
|
||||
// Allocate a reusable buffer with the longest possible frequency table.
|
||||
// Possible lengths are codegenCodeCount, offsetCodeCount and maxNumLit.
|
||||
// The largest of these is maxNumLit, so we allocate for that case.
|
||||
h.freqcache = make([]literalNode, maxNumLit+1)
|
||||
}
|
||||
list := h.freqcache[:len(freq)+1]
|
||||
// Number of non-zero literals
|
||||
count := 0
|
||||
// Set list to be the set of all non-zero literals and their frequencies
|
||||
for i, f := range freq {
|
||||
if f != 0 {
|
||||
list[count] = literalNode{uint16(i), f}
|
||||
count++
|
||||
} else {
|
||||
list[count] = literalNode{}
|
||||
h.codes[i].len = 0
|
||||
}
|
||||
}
|
||||
list[len(freq)] = literalNode{}
|
||||
|
||||
list = list[:count]
|
||||
if count <= 2 {
|
||||
// Handle the small cases here, because they are awkward for the general case code. With
|
||||
// two or fewer literals, everything has bit length 1.
|
||||
for i, node := range list {
|
||||
// "list" is in order of increasing literal value.
|
||||
h.codes[node.literal].set(uint16(i), 1)
|
||||
}
|
||||
return
|
||||
}
|
||||
h.lfs.sort(list)
|
||||
|
||||
// Get the number of literals for each bit count
|
||||
bitCount := h.bitCounts(list, maxBits)
|
||||
// And do the assignment
|
||||
h.assignEncodingAndSize(bitCount, list)
|
||||
}
|
||||
|
||||
type byLiteral []literalNode
|
||||
|
||||
func (s *byLiteral) sort(a []literalNode) {
|
||||
*s = byLiteral(a)
|
||||
sort.Sort(s)
|
||||
}
|
||||
|
||||
func (s byLiteral) Len() int { return len(s) }
|
||||
|
||||
func (s byLiteral) Less(i, j int) bool {
|
||||
return s[i].literal < s[j].literal
|
||||
}
|
||||
|
||||
func (s byLiteral) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
|
||||
|
||||
type byFreq []literalNode
|
||||
|
||||
func (s *byFreq) sort(a []literalNode) {
|
||||
*s = byFreq(a)
|
||||
sort.Sort(s)
|
||||
}
|
||||
|
||||
func (s byFreq) Len() int { return len(s) }
|
||||
|
||||
func (s byFreq) Less(i, j int) bool {
|
||||
if s[i].freq == s[j].freq {
|
||||
return s[i].literal < s[j].literal
|
||||
}
|
||||
return s[i].freq < s[j].freq
|
||||
}
|
||||
|
||||
func (s byFreq) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
|
880
vendor/github.com/klauspost/compress/flate/inflate.go
generated
vendored
Normal file
880
vendor/github.com/klauspost/compress/flate/inflate.go
generated
vendored
Normal file
|
@ -0,0 +1,880 @@
|
|||
// Copyright 2009 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Package flate implements the DEFLATE compressed data format, described in
|
||||
// RFC 1951. The gzip and zlib packages implement access to DEFLATE-based file
|
||||
// formats.
|
||||
package flate
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"io"
|
||||
"math/bits"
|
||||
"strconv"
|
||||
"sync"
|
||||
)
|
||||
|
||||
const (
|
||||
maxCodeLen = 16 // max length of Huffman code
|
||||
maxCodeLenMask = 15 // mask for max length of Huffman code
|
||||
// The next three numbers come from the RFC section 3.2.7, with the
|
||||
// additional proviso in section 3.2.5 which implies that distance codes
|
||||
// 30 and 31 should never occur in compressed data.
|
||||
maxNumLit = 286
|
||||
maxNumDist = 30
|
||||
numCodes = 19 // number of codes in Huffman meta-code
|
||||
)
|
||||
|
||||
// Initialize the fixedHuffmanDecoder only once upon first use.
|
||||
var fixedOnce sync.Once
|
||||
var fixedHuffmanDecoder huffmanDecoder
|
||||
|
||||
// A CorruptInputError reports the presence of corrupt input at a given offset.
|
||||
type CorruptInputError int64
|
||||
|
||||
func (e CorruptInputError) Error() string {
|
||||
return "flate: corrupt input before offset " + strconv.FormatInt(int64(e), 10)
|
||||
}
|
||||
|
||||
// An InternalError reports an error in the flate code itself.
|
||||
type InternalError string
|
||||
|
||||
func (e InternalError) Error() string { return "flate: internal error: " + string(e) }
|
||||
|
||||
// A ReadError reports an error encountered while reading input.
|
||||
//
|
||||
// Deprecated: No longer returned.
|
||||
type ReadError struct {
|
||||
Offset int64 // byte offset where error occurred
|
||||
Err error // error returned by underlying Read
|
||||
}
|
||||
|
||||
func (e *ReadError) Error() string {
|
||||
return "flate: read error at offset " + strconv.FormatInt(e.Offset, 10) + ": " + e.Err.Error()
|
||||
}
|
||||
|
||||
// A WriteError reports an error encountered while writing output.
|
||||
//
|
||||
// Deprecated: No longer returned.
|
||||
type WriteError struct {
|
||||
Offset int64 // byte offset where error occurred
|
||||
Err error // error returned by underlying Write
|
||||
}
|
||||
|
||||
func (e *WriteError) Error() string {
|
||||
return "flate: write error at offset " + strconv.FormatInt(e.Offset, 10) + ": " + e.Err.Error()
|
||||
}
|
||||
|
||||
// Resetter resets a ReadCloser returned by NewReader or NewReaderDict to
|
||||
// to switch to a new underlying Reader. This permits reusing a ReadCloser
|
||||
// instead of allocating a new one.
|
||||
type Resetter interface {
|
||||
// Reset discards any buffered data and resets the Resetter as if it was
|
||||
// newly initialized with the given reader.
|
||||
Reset(r io.Reader, dict []byte) error
|
||||
}
|
||||
|
||||
// The data structure for decoding Huffman tables is based on that of
|
||||
// zlib. There is a lookup table of a fixed bit width (huffmanChunkBits),
|
||||
// For codes smaller than the table width, there are multiple entries
|
||||
// (each combination of trailing bits has the same value). For codes
|
||||
// larger than the table width, the table contains a link to an overflow
|
||||
// table. The width of each entry in the link table is the maximum code
|
||||
// size minus the chunk width.
|
||||
//
|
||||
// Note that you can do a lookup in the table even without all bits
|
||||
// filled. Since the extra bits are zero, and the DEFLATE Huffman codes
|
||||
// have the property that shorter codes come before longer ones, the
|
||||
// bit length estimate in the result is a lower bound on the actual
|
||||
// number of bits.
|
||||
//
|
||||
// See the following:
|
||||
// http://www.gzip.org/algorithm.txt
|
||||
|
||||
// chunk & 15 is number of bits
|
||||
// chunk >> 4 is value, including table link
|
||||
|
||||
const (
|
||||
huffmanChunkBits = 9
|
||||
huffmanNumChunks = 1 << huffmanChunkBits
|
||||
huffmanCountMask = 15
|
||||
huffmanValueShift = 4
|
||||
)
|
||||
|
||||
type huffmanDecoder struct {
|
||||
min int // the minimum code length
|
||||
chunks *[huffmanNumChunks]uint32 // chunks as described above
|
||||
links [][]uint32 // overflow links
|
||||
linkMask uint32 // mask the width of the link table
|
||||
}
|
||||
|
||||
// Initialize Huffman decoding tables from array of code lengths.
|
||||
// Following this function, h is guaranteed to be initialized into a complete
|
||||
// tree (i.e., neither over-subscribed nor under-subscribed). The exception is a
|
||||
// degenerate case where the tree has only a single symbol with length 1. Empty
|
||||
// trees are permitted.
|
||||
func (h *huffmanDecoder) init(lengths []int) bool {
|
||||
// Sanity enables additional runtime tests during Huffman
|
||||
// table construction. It's intended to be used during
|
||||
// development to supplement the currently ad-hoc unit tests.
|
||||
const sanity = false
|
||||
|
||||
if h.chunks == nil {
|
||||
h.chunks = &[huffmanNumChunks]uint32{}
|
||||
}
|
||||
if h.min != 0 {
|
||||
*h = huffmanDecoder{chunks: h.chunks, links: h.links}
|
||||
}
|
||||
|
||||
// Count number of codes of each length,
|
||||
// compute min and max length.
|
||||
var count [maxCodeLen]int
|
||||
var min, max int
|
||||
for _, n := range lengths {
|
||||
if n == 0 {
|
||||
continue
|
||||
}
|
||||
if min == 0 || n < min {
|
||||
min = n
|
||||
}
|
||||
if n > max {
|
||||
max = n
|
||||
}
|
||||
count[n&maxCodeLenMask]++
|
||||
}
|
||||
|
||||
// Empty tree. The decompressor.huffSym function will fail later if the tree
|
||||
// is used. Technically, an empty tree is only valid for the HDIST tree and
|
||||
// not the HCLEN and HLIT tree. However, a stream with an empty HCLEN tree
|
||||
// is guaranteed to fail since it will attempt to use the tree to decode the
|
||||
// codes for the HLIT and HDIST trees. Similarly, an empty HLIT tree is
|
||||
// guaranteed to fail later since the compressed data section must be
|
||||
// composed of at least one symbol (the end-of-block marker).
|
||||
if max == 0 {
|
||||
return true
|
||||
}
|
||||
|
||||
code := 0
|
||||
var nextcode [maxCodeLen]int
|
||||
for i := min; i <= max; i++ {
|
||||
code <<= 1
|
||||
nextcode[i&maxCodeLenMask] = code
|
||||
code += count[i&maxCodeLenMask]
|
||||
}
|
||||
|
||||
// Check that the coding is complete (i.e., that we've
|
||||
// assigned all 2-to-the-max possible bit sequences).
|
||||
// Exception: To be compatible with zlib, we also need to
|
||||
// accept degenerate single-code codings. See also
|
||||
// TestDegenerateHuffmanCoding.
|
||||
if code != 1<<uint(max) && !(code == 1 && max == 1) {
|
||||
return false
|
||||
}
|
||||
|
||||
h.min = min
|
||||
chunks := h.chunks[:]
|
||||
for i := range chunks {
|
||||
chunks[i] = 0
|
||||
}
|
||||
|
||||
if max > huffmanChunkBits {
|
||||
numLinks := 1 << (uint(max) - huffmanChunkBits)
|
||||
h.linkMask = uint32(numLinks - 1)
|
||||
|
||||
// create link tables
|
||||
link := nextcode[huffmanChunkBits+1] >> 1
|
||||
if cap(h.links) < huffmanNumChunks-link {
|
||||
h.links = make([][]uint32, huffmanNumChunks-link)
|
||||
} else {
|
||||
h.links = h.links[:huffmanNumChunks-link]
|
||||
}
|
||||
for j := uint(link); j < huffmanNumChunks; j++ {
|
||||
reverse := int(bits.Reverse16(uint16(j)))
|
||||
reverse >>= uint(16 - huffmanChunkBits)
|
||||
off := j - uint(link)
|
||||
if sanity && h.chunks[reverse] != 0 {
|
||||
panic("impossible: overwriting existing chunk")
|
||||
}
|
||||
h.chunks[reverse] = uint32(off<<huffmanValueShift | (huffmanChunkBits + 1))
|
||||
if cap(h.links[off]) < numLinks {
|
||||
h.links[off] = make([]uint32, numLinks)
|
||||
} else {
|
||||
links := h.links[off][:0]
|
||||
h.links[off] = links[:numLinks]
|
||||
}
|
||||
}
|
||||
} else {
|
||||
h.links = h.links[:0]
|
||||
}
|
||||
|
||||
for i, n := range lengths {
|
||||
if n == 0 {
|
||||
continue
|
||||
}
|
||||
code := nextcode[n]
|
||||
nextcode[n]++
|
||||
chunk := uint32(i<<huffmanValueShift | n)
|
||||
reverse := int(bits.Reverse16(uint16(code)))
|
||||
reverse >>= uint(16 - n)
|
||||
if n <= huffmanChunkBits {
|
||||
for off := reverse; off < len(h.chunks); off += 1 << uint(n) {
|
||||
// We should never need to overwrite
|
||||
// an existing chunk. Also, 0 is
|
||||
// never a valid chunk, because the
|
||||
// lower 4 "count" bits should be
|
||||
// between 1 and 15.
|
||||
if sanity && h.chunks[off] != 0 {
|
||||
panic("impossible: overwriting existing chunk")
|
||||
}
|
||||
h.chunks[off] = chunk
|
||||
}
|
||||
} else {
|
||||
j := reverse & (huffmanNumChunks - 1)
|
||||
if sanity && h.chunks[j]&huffmanCountMask != huffmanChunkBits+1 {
|
||||
// Longer codes should have been
|
||||
// associated with a link table above.
|
||||
panic("impossible: not an indirect chunk")
|
||||
}
|
||||
value := h.chunks[j] >> huffmanValueShift
|
||||
linktab := h.links[value]
|
||||
reverse >>= huffmanChunkBits
|
||||
for off := reverse; off < len(linktab); off += 1 << uint(n-huffmanChunkBits) {
|
||||
if sanity && linktab[off] != 0 {
|
||||
panic("impossible: overwriting existing chunk")
|
||||
}
|
||||
linktab[off] = chunk
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if sanity {
|
||||
// Above we've sanity checked that we never overwrote
|
||||
// an existing entry. Here we additionally check that
|
||||
// we filled the tables completely.
|
||||
for i, chunk := range h.chunks {
|
||||
if chunk == 0 {
|
||||
// As an exception, in the degenerate
|
||||
// single-code case, we allow odd
|
||||
// chunks to be missing.
|
||||
if code == 1 && i%2 == 1 {
|
||||
continue
|
||||
}
|
||||
panic("impossible: missing chunk")
|
||||
}
|
||||
}
|
||||
for _, linktab := range h.links {
|
||||
for _, chunk := range linktab {
|
||||
if chunk == 0 {
|
||||
panic("impossible: missing chunk")
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
// The actual read interface needed by NewReader.
|
||||
// If the passed in io.Reader does not also have ReadByte,
|
||||
// the NewReader will introduce its own buffering.
|
||||
type Reader interface {
|
||||
io.Reader
|
||||
io.ByteReader
|
||||
}
|
||||
|
||||
// Decompress state.
|
||||
type decompressor struct {
|
||||
// Input source.
|
||||
r Reader
|
||||
roffset int64
|
||||
|
||||
// Input bits, in top of b.
|
||||
b uint32
|
||||
nb uint
|
||||
|
||||
// Huffman decoders for literal/length, distance.
|
||||
h1, h2 huffmanDecoder
|
||||
|
||||
// Length arrays used to define Huffman codes.
|
||||
bits *[maxNumLit + maxNumDist]int
|
||||
codebits *[numCodes]int
|
||||
|
||||
// Output history, buffer.
|
||||
dict dictDecoder
|
||||
|
||||
// Temporary buffer (avoids repeated allocation).
|
||||
buf [4]byte
|
||||
|
||||
// Next step in the decompression,
|
||||
// and decompression state.
|
||||
step func(*decompressor)
|
||||
stepState int
|
||||
final bool
|
||||
err error
|
||||
toRead []byte
|
||||
hl, hd *huffmanDecoder
|
||||
copyLen int
|
||||
copyDist int
|
||||
}
|
||||
|
||||
func (f *decompressor) nextBlock() {
|
||||
for f.nb < 1+2 {
|
||||
if f.err = f.moreBits(); f.err != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
f.final = f.b&1 == 1
|
||||
f.b >>= 1
|
||||
typ := f.b & 3
|
||||
f.b >>= 2
|
||||
f.nb -= 1 + 2
|
||||
switch typ {
|
||||
case 0:
|
||||
f.dataBlock()
|
||||
case 1:
|
||||
// compressed, fixed Huffman tables
|
||||
f.hl = &fixedHuffmanDecoder
|
||||
f.hd = nil
|
||||
f.huffmanBlock()
|
||||
case 2:
|
||||
// compressed, dynamic Huffman tables
|
||||
if f.err = f.readHuffman(); f.err != nil {
|
||||
break
|
||||
}
|
||||
f.hl = &f.h1
|
||||
f.hd = &f.h2
|
||||
f.huffmanBlock()
|
||||
default:
|
||||
// 3 is reserved.
|
||||
f.err = CorruptInputError(f.roffset)
|
||||
}
|
||||
}
|
||||
|
||||
func (f *decompressor) Read(b []byte) (int, error) {
|
||||
for {
|
||||
if len(f.toRead) > 0 {
|
||||
n := copy(b, f.toRead)
|
||||
f.toRead = f.toRead[n:]
|
||||
if len(f.toRead) == 0 {
|
||||
return n, f.err
|
||||
}
|
||||
return n, nil
|
||||
}
|
||||
if f.err != nil {
|
||||
return 0, f.err
|
||||
}
|
||||
f.step(f)
|
||||
if f.err != nil && len(f.toRead) == 0 {
|
||||
f.toRead = f.dict.readFlush() // Flush what's left in case of error
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Support the io.WriteTo interface for io.Copy and friends.
|
||||
func (f *decompressor) WriteTo(w io.Writer) (int64, error) {
|
||||
total := int64(0)
|
||||
flushed := false
|
||||
for {
|
||||
if len(f.toRead) > 0 {
|
||||
n, err := w.Write(f.toRead)
|
||||
total += int64(n)
|
||||
if err != nil {
|
||||
f.err = err
|
||||
return total, err
|
||||
}
|
||||
if n != len(f.toRead) {
|
||||
return total, io.ErrShortWrite
|
||||
}
|
||||
f.toRead = f.toRead[:0]
|
||||
}
|
||||
if f.err != nil && flushed {
|
||||
if f.err == io.EOF {
|
||||
return total, nil
|
||||
}
|
||||
return total, f.err
|
||||
}
|
||||
if f.err == nil {
|
||||
f.step(f)
|
||||
}
|
||||
if len(f.toRead) == 0 && f.err != nil && !flushed {
|
||||
f.toRead = f.dict.readFlush() // Flush what's left in case of error
|
||||
flushed = true
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (f *decompressor) Close() error {
|
||||
if f.err == io.EOF {
|
||||
return nil
|
||||
}
|
||||
return f.err
|
||||
}
|
||||
|
||||
// RFC 1951 section 3.2.7.
|
||||
// Compression with dynamic Huffman codes
|
||||
|
||||
var codeOrder = [...]int{16, 17, 18, 0, 8, 7, 9, 6, 10, 5, 11, 4, 12, 3, 13, 2, 14, 1, 15}
|
||||
|
||||
func (f *decompressor) readHuffman() error {
|
||||
// HLIT[5], HDIST[5], HCLEN[4].
|
||||
for f.nb < 5+5+4 {
|
||||
if err := f.moreBits(); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
nlit := int(f.b&0x1F) + 257
|
||||
if nlit > maxNumLit {
|
||||
return CorruptInputError(f.roffset)
|
||||
}
|
||||
f.b >>= 5
|
||||
ndist := int(f.b&0x1F) + 1
|
||||
if ndist > maxNumDist {
|
||||
return CorruptInputError(f.roffset)
|
||||
}
|
||||
f.b >>= 5
|
||||
nclen := int(f.b&0xF) + 4
|
||||
// numCodes is 19, so nclen is always valid.
|
||||
f.b >>= 4
|
||||
f.nb -= 5 + 5 + 4
|
||||
|
||||
// (HCLEN+4)*3 bits: code lengths in the magic codeOrder order.
|
||||
for i := 0; i < nclen; i++ {
|
||||
for f.nb < 3 {
|
||||
if err := f.moreBits(); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
f.codebits[codeOrder[i]] = int(f.b & 0x7)
|
||||
f.b >>= 3
|
||||
f.nb -= 3
|
||||
}
|
||||
for i := nclen; i < len(codeOrder); i++ {
|
||||
f.codebits[codeOrder[i]] = 0
|
||||
}
|
||||
if !f.h1.init(f.codebits[0:]) {
|
||||
return CorruptInputError(f.roffset)
|
||||
}
|
||||
|
||||
// HLIT + 257 code lengths, HDIST + 1 code lengths,
|
||||
// using the code length Huffman code.
|
||||
for i, n := 0, nlit+ndist; i < n; {
|
||||
x, err := f.huffSym(&f.h1)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if x < 16 {
|
||||
// Actual length.
|
||||
f.bits[i] = x
|
||||
i++
|
||||
continue
|
||||
}
|
||||
// Repeat previous length or zero.
|
||||
var rep int
|
||||
var nb uint
|
||||
var b int
|
||||
switch x {
|
||||
default:
|
||||
return InternalError("unexpected length code")
|
||||
case 16:
|
||||
rep = 3
|
||||
nb = 2
|
||||
if i == 0 {
|
||||
return CorruptInputError(f.roffset)
|
||||
}
|
||||
b = f.bits[i-1]
|
||||
case 17:
|
||||
rep = 3
|
||||
nb = 3
|
||||
b = 0
|
||||
case 18:
|
||||
rep = 11
|
||||
nb = 7
|
||||
b = 0
|
||||
}
|
||||
for f.nb < nb {
|
||||
if err := f.moreBits(); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
rep += int(f.b & uint32(1<<nb-1))
|
||||
f.b >>= nb
|
||||
f.nb -= nb
|
||||
if i+rep > n {
|
||||
return CorruptInputError(f.roffset)
|
||||
}
|
||||
for j := 0; j < rep; j++ {
|
||||
f.bits[i] = b
|
||||
i++
|
||||
}
|
||||
}
|
||||
|
||||
if !f.h1.init(f.bits[0:nlit]) || !f.h2.init(f.bits[nlit:nlit+ndist]) {
|
||||
return CorruptInputError(f.roffset)
|
||||
}
|
||||
|
||||
// As an optimization, we can initialize the min bits to read at a time
|
||||
// for the HLIT tree to the length of the EOB marker since we know that
|
||||
// every block must terminate with one. This preserves the property that
|
||||
// we never read any extra bytes after the end of the DEFLATE stream.
|
||||
if f.h1.min < f.bits[endBlockMarker] {
|
||||
f.h1.min = f.bits[endBlockMarker]
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Decode a single Huffman block from f.
|
||||
// hl and hd are the Huffman states for the lit/length values
|
||||
// and the distance values, respectively. If hd == nil, using the
|
||||
// fixed distance encoding associated with fixed Huffman blocks.
|
||||
func (f *decompressor) huffmanBlock() {
|
||||
const (
|
||||
stateInit = iota // Zero value must be stateInit
|
||||
stateDict
|
||||
)
|
||||
|
||||
switch f.stepState {
|
||||
case stateInit:
|
||||
goto readLiteral
|
||||
case stateDict:
|
||||
goto copyHistory
|
||||
}
|
||||
|
||||
readLiteral:
|
||||
// Read literal and/or (length, distance) according to RFC section 3.2.3.
|
||||
{
|
||||
v, err := f.huffSym(f.hl)
|
||||
if err != nil {
|
||||
f.err = err
|
||||
return
|
||||
}
|
||||
var n uint // number of bits extra
|
||||
var length int
|
||||
switch {
|
||||
case v < 256:
|
||||
f.dict.writeByte(byte(v))
|
||||
if f.dict.availWrite() == 0 {
|
||||
f.toRead = f.dict.readFlush()
|
||||
f.step = (*decompressor).huffmanBlock
|
||||
f.stepState = stateInit
|
||||
return
|
||||
}
|
||||
goto readLiteral
|
||||
case v == 256:
|
||||
f.finishBlock()
|
||||
return
|
||||
// otherwise, reference to older data
|
||||
case v < 265:
|
||||
length = v - (257 - 3)
|
||||
n = 0
|
||||
case v < 269:
|
||||
length = v*2 - (265*2 - 11)
|
||||
n = 1
|
||||
case v < 273:
|
||||
length = v*4 - (269*4 - 19)
|
||||
n = 2
|
||||
case v < 277:
|
||||
length = v*8 - (273*8 - 35)
|
||||
n = 3
|
||||
case v < 281:
|
||||
length = v*16 - (277*16 - 67)
|
||||
n = 4
|
||||
case v < 285:
|
||||
length = v*32 - (281*32 - 131)
|
||||
n = 5
|
||||
case v < maxNumLit:
|
||||
length = 258
|
||||
n = 0
|
||||
default:
|
||||
f.err = CorruptInputError(f.roffset)
|
||||
return
|
||||
}
|
||||
if n > 0 {
|
||||
for f.nb < n {
|
||||
if err = f.moreBits(); err != nil {
|
||||
f.err = err
|
||||
return
|
||||
}
|
||||
}
|
||||
length += int(f.b & uint32(1<<n-1))
|
||||
f.b >>= n
|
||||
f.nb -= n
|
||||
}
|
||||
|
||||
var dist int
|
||||
if f.hd == nil {
|
||||
for f.nb < 5 {
|
||||
if err = f.moreBits(); err != nil {
|
||||
f.err = err
|
||||
return
|
||||
}
|
||||
}
|
||||
dist = int(bits.Reverse8(uint8(f.b & 0x1F << 3)))
|
||||
f.b >>= 5
|
||||
f.nb -= 5
|
||||
} else {
|
||||
if dist, err = f.huffSym(f.hd); err != nil {
|
||||
f.err = err
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
switch {
|
||||
case dist < 4:
|
||||
dist++
|
||||
case dist < maxNumDist:
|
||||
nb := uint(dist-2) >> 1
|
||||
// have 1 bit in bottom of dist, need nb more.
|
||||
extra := (dist & 1) << nb
|
||||
for f.nb < nb {
|
||||
if err = f.moreBits(); err != nil {
|
||||
f.err = err
|
||||
return
|
||||
}
|
||||
}
|
||||
extra |= int(f.b & uint32(1<<nb-1))
|
||||
f.b >>= nb
|
||||
f.nb -= nb
|
||||
dist = 1<<(nb+1) + 1 + extra
|
||||
default:
|
||||
f.err = CorruptInputError(f.roffset)
|
||||
return
|
||||
}
|
||||
|
||||
// No check on length; encoding can be prescient.
|
||||
if dist > f.dict.histSize() {
|
||||
f.err = CorruptInputError(f.roffset)
|
||||
return
|
||||
}
|
||||
|
||||
f.copyLen, f.copyDist = length, dist
|
||||
goto copyHistory
|
||||
}
|
||||
|
||||
copyHistory:
|
||||
// Perform a backwards copy according to RFC section 3.2.3.
|
||||
{
|
||||
cnt := f.dict.tryWriteCopy(f.copyDist, f.copyLen)
|
||||
if cnt == 0 {
|
||||
cnt = f.dict.writeCopy(f.copyDist, f.copyLen)
|
||||
}
|
||||
f.copyLen -= cnt
|
||||
|
||||
if f.dict.availWrite() == 0 || f.copyLen > 0 {
|
||||
f.toRead = f.dict.readFlush()
|
||||
f.step = (*decompressor).huffmanBlock // We need to continue this work
|
||||
f.stepState = stateDict
|
||||
return
|
||||
}
|
||||
goto readLiteral
|
||||
}
|
||||
}
|
||||
|
||||
// Copy a single uncompressed data block from input to output.
|
||||
func (f *decompressor) dataBlock() {
|
||||
// Uncompressed.
|
||||
// Discard current half-byte.
|
||||
f.nb = 0
|
||||
f.b = 0
|
||||
|
||||
// Length then ones-complement of length.
|
||||
nr, err := io.ReadFull(f.r, f.buf[0:4])
|
||||
f.roffset += int64(nr)
|
||||
if err != nil {
|
||||
f.err = noEOF(err)
|
||||
return
|
||||
}
|
||||
n := int(f.buf[0]) | int(f.buf[1])<<8
|
||||
nn := int(f.buf[2]) | int(f.buf[3])<<8
|
||||
if uint16(nn) != uint16(^n) {
|
||||
f.err = CorruptInputError(f.roffset)
|
||||
return
|
||||
}
|
||||
|
||||
if n == 0 {
|
||||
f.toRead = f.dict.readFlush()
|
||||
f.finishBlock()
|
||||
return
|
||||
}
|
||||
|
||||
f.copyLen = n
|
||||
f.copyData()
|
||||
}
|
||||
|
||||
// copyData copies f.copyLen bytes from the underlying reader into f.hist.
|
||||
// It pauses for reads when f.hist is full.
|
||||
func (f *decompressor) copyData() {
|
||||
buf := f.dict.writeSlice()
|
||||
if len(buf) > f.copyLen {
|
||||
buf = buf[:f.copyLen]
|
||||
}
|
||||
|
||||
cnt, err := io.ReadFull(f.r, buf)
|
||||
f.roffset += int64(cnt)
|
||||
f.copyLen -= cnt
|
||||
f.dict.writeMark(cnt)
|
||||
if err != nil {
|
||||
f.err = noEOF(err)
|
||||
return
|
||||
}
|
||||
|
||||
if f.dict.availWrite() == 0 || f.copyLen > 0 {
|
||||
f.toRead = f.dict.readFlush()
|
||||
f.step = (*decompressor).copyData
|
||||
return
|
||||
}
|
||||
f.finishBlock()
|
||||
}
|
||||
|
||||
func (f *decompressor) finishBlock() {
|
||||
if f.final {
|
||||
if f.dict.availRead() > 0 {
|
||||
f.toRead = f.dict.readFlush()
|
||||
}
|
||||
f.err = io.EOF
|
||||
}
|
||||
f.step = (*decompressor).nextBlock
|
||||
}
|
||||
|
||||
// noEOF returns err, unless err == io.EOF, in which case it returns io.ErrUnexpectedEOF.
|
||||
func noEOF(e error) error {
|
||||
if e == io.EOF {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
return e
|
||||
}
|
||||
|
||||
func (f *decompressor) moreBits() error {
|
||||
c, err := f.r.ReadByte()
|
||||
if err != nil {
|
||||
return noEOF(err)
|
||||
}
|
||||
f.roffset++
|
||||
f.b |= uint32(c) << f.nb
|
||||
f.nb += 8
|
||||
return nil
|
||||
}
|
||||
|
||||
// Read the next Huffman-encoded symbol from f according to h.
|
||||
func (f *decompressor) huffSym(h *huffmanDecoder) (int, error) {
|
||||
// Since a huffmanDecoder can be empty or be composed of a degenerate tree
|
||||
// with single element, huffSym must error on these two edge cases. In both
|
||||
// cases, the chunks slice will be 0 for the invalid sequence, leading it
|
||||
// satisfy the n == 0 check below.
|
||||
n := uint(h.min)
|
||||
// Optimization. Compiler isn't smart enough to keep f.b,f.nb in registers,
|
||||
// but is smart enough to keep local variables in registers, so use nb and b,
|
||||
// inline call to moreBits and reassign b,nb back to f on return.
|
||||
nb, b := f.nb, f.b
|
||||
for {
|
||||
for nb < n {
|
||||
c, err := f.r.ReadByte()
|
||||
if err != nil {
|
||||
f.b = b
|
||||
f.nb = nb
|
||||
return 0, noEOF(err)
|
||||
}
|
||||
f.roffset++
|
||||
b |= uint32(c) << (nb & 31)
|
||||
nb += 8
|
||||
}
|
||||
chunk := h.chunks[b&(huffmanNumChunks-1)]
|
||||
n = uint(chunk & huffmanCountMask)
|
||||
if n > huffmanChunkBits {
|
||||
chunk = h.links[chunk>>huffmanValueShift][(b>>huffmanChunkBits)&h.linkMask]
|
||||
n = uint(chunk & huffmanCountMask)
|
||||
}
|
||||
if n <= nb {
|
||||
if n == 0 {
|
||||
f.b = b
|
||||
f.nb = nb
|
||||
f.err = CorruptInputError(f.roffset)
|
||||
return 0, f.err
|
||||
}
|
||||
f.b = b >> (n & 31)
|
||||
f.nb = nb - n
|
||||
return int(chunk >> huffmanValueShift), nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func makeReader(r io.Reader) Reader {
|
||||
if rr, ok := r.(Reader); ok {
|
||||
return rr
|
||||
}
|
||||
return bufio.NewReader(r)
|
||||
}
|
||||
|
||||
func fixedHuffmanDecoderInit() {
|
||||
fixedOnce.Do(func() {
|
||||
// These come from the RFC section 3.2.6.
|
||||
var bits [288]int
|
||||
for i := 0; i < 144; i++ {
|
||||
bits[i] = 8
|
||||
}
|
||||
for i := 144; i < 256; i++ {
|
||||
bits[i] = 9
|
||||
}
|
||||
for i := 256; i < 280; i++ {
|
||||
bits[i] = 7
|
||||
}
|
||||
for i := 280; i < 288; i++ {
|
||||
bits[i] = 8
|
||||
}
|
||||
fixedHuffmanDecoder.init(bits[:])
|
||||
})
|
||||
}
|
||||
|
||||
func (f *decompressor) Reset(r io.Reader, dict []byte) error {
|
||||
*f = decompressor{
|
||||
r: makeReader(r),
|
||||
bits: f.bits,
|
||||
codebits: f.codebits,
|
||||
h1: f.h1,
|
||||
h2: f.h2,
|
||||
dict: f.dict,
|
||||
step: (*decompressor).nextBlock,
|
||||
}
|
||||
f.dict.init(maxMatchOffset, dict)
|
||||
return nil
|
||||
}
|
||||
|
||||
// NewReader returns a new ReadCloser that can be used
|
||||
// to read the uncompressed version of r.
|
||||
// If r does not also implement io.ByteReader,
|
||||
// the decompressor may read more data than necessary from r.
|
||||
// It is the caller's responsibility to call Close on the ReadCloser
|
||||
// when finished reading.
|
||||
//
|
||||
// The ReadCloser returned by NewReader also implements Resetter.
|
||||
func NewReader(r io.Reader) io.ReadCloser {
|
||||
fixedHuffmanDecoderInit()
|
||||
|
||||
var f decompressor
|
||||
f.r = makeReader(r)
|
||||
f.bits = new([maxNumLit + maxNumDist]int)
|
||||
f.codebits = new([numCodes]int)
|
||||
f.step = (*decompressor).nextBlock
|
||||
f.dict.init(maxMatchOffset, nil)
|
||||
return &f
|
||||
}
|
||||
|
||||
// NewReaderDict is like NewReader but initializes the reader
|
||||
// with a preset dictionary. The returned Reader behaves as if
|
||||
// the uncompressed data stream started with the given dictionary,
|
||||
// which has already been read. NewReaderDict is typically used
|
||||
// to read data compressed by NewWriterDict.
|
||||
//
|
||||
// The ReadCloser returned by NewReader also implements Resetter.
|
||||
func NewReaderDict(r io.Reader, dict []byte) io.ReadCloser {
|
||||
fixedHuffmanDecoderInit()
|
||||
|
||||
var f decompressor
|
||||
f.r = makeReader(r)
|
||||
f.bits = new([maxNumLit + maxNumDist]int)
|
||||
f.codebits = new([numCodes]int)
|
||||
f.step = (*decompressor).nextBlock
|
||||
f.dict.init(maxMatchOffset, dict)
|
||||
return &f
|
||||
}
|
48
vendor/github.com/klauspost/compress/flate/reverse_bits.go
generated
vendored
Normal file
48
vendor/github.com/klauspost/compress/flate/reverse_bits.go
generated
vendored
Normal file
|
@ -0,0 +1,48 @@
|
|||
// Copyright 2009 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package flate
|
||||
|
||||
var reverseByte = [256]byte{
|
||||
0x00, 0x80, 0x40, 0xc0, 0x20, 0xa0, 0x60, 0xe0,
|
||||
0x10, 0x90, 0x50, 0xd0, 0x30, 0xb0, 0x70, 0xf0,
|
||||
0x08, 0x88, 0x48, 0xc8, 0x28, 0xa8, 0x68, 0xe8,
|
||||
0x18, 0x98, 0x58, 0xd8, 0x38, 0xb8, 0x78, 0xf8,
|
||||
0x04, 0x84, 0x44, 0xc4, 0x24, 0xa4, 0x64, 0xe4,
|
||||
0x14, 0x94, 0x54, 0xd4, 0x34, 0xb4, 0x74, 0xf4,
|
||||
0x0c, 0x8c, 0x4c, 0xcc, 0x2c, 0xac, 0x6c, 0xec,
|
||||
0x1c, 0x9c, 0x5c, 0xdc, 0x3c, 0xbc, 0x7c, 0xfc,
|
||||
0x02, 0x82, 0x42, 0xc2, 0x22, 0xa2, 0x62, 0xe2,
|
||||
0x12, 0x92, 0x52, 0xd2, 0x32, 0xb2, 0x72, 0xf2,
|
||||
0x0a, 0x8a, 0x4a, 0xca, 0x2a, 0xaa, 0x6a, 0xea,
|
||||
0x1a, 0x9a, 0x5a, 0xda, 0x3a, 0xba, 0x7a, 0xfa,
|
||||
0x06, 0x86, 0x46, 0xc6, 0x26, 0xa6, 0x66, 0xe6,
|
||||
0x16, 0x96, 0x56, 0xd6, 0x36, 0xb6, 0x76, 0xf6,
|
||||
0x0e, 0x8e, 0x4e, 0xce, 0x2e, 0xae, 0x6e, 0xee,
|
||||
0x1e, 0x9e, 0x5e, 0xde, 0x3e, 0xbe, 0x7e, 0xfe,
|
||||
0x01, 0x81, 0x41, 0xc1, 0x21, 0xa1, 0x61, 0xe1,
|
||||
0x11, 0x91, 0x51, 0xd1, 0x31, 0xb1, 0x71, 0xf1,
|
||||
0x09, 0x89, 0x49, 0xc9, 0x29, 0xa9, 0x69, 0xe9,
|
||||
0x19, 0x99, 0x59, 0xd9, 0x39, 0xb9, 0x79, 0xf9,
|
||||
0x05, 0x85, 0x45, 0xc5, 0x25, 0xa5, 0x65, 0xe5,
|
||||
0x15, 0x95, 0x55, 0xd5, 0x35, 0xb5, 0x75, 0xf5,
|
||||
0x0d, 0x8d, 0x4d, 0xcd, 0x2d, 0xad, 0x6d, 0xed,
|
||||
0x1d, 0x9d, 0x5d, 0xdd, 0x3d, 0xbd, 0x7d, 0xfd,
|
||||
0x03, 0x83, 0x43, 0xc3, 0x23, 0xa3, 0x63, 0xe3,
|
||||
0x13, 0x93, 0x53, 0xd3, 0x33, 0xb3, 0x73, 0xf3,
|
||||
0x0b, 0x8b, 0x4b, 0xcb, 0x2b, 0xab, 0x6b, 0xeb,
|
||||
0x1b, 0x9b, 0x5b, 0xdb, 0x3b, 0xbb, 0x7b, 0xfb,
|
||||
0x07, 0x87, 0x47, 0xc7, 0x27, 0xa7, 0x67, 0xe7,
|
||||
0x17, 0x97, 0x57, 0xd7, 0x37, 0xb7, 0x77, 0xf7,
|
||||
0x0f, 0x8f, 0x4f, 0xcf, 0x2f, 0xaf, 0x6f, 0xef,
|
||||
0x1f, 0x9f, 0x5f, 0xdf, 0x3f, 0xbf, 0x7f, 0xff,
|
||||
}
|
||||
|
||||
func reverseUint16(v uint16) uint16 {
|
||||
return uint16(reverseByte[v>>8]) | uint16(reverseByte[v&0xFF])<<8
|
||||
}
|
||||
|
||||
func reverseBits(number uint16, bitLength byte) uint16 {
|
||||
return reverseUint16(number << uint8(16-bitLength))
|
||||
}
|
900
vendor/github.com/klauspost/compress/flate/snappy.go
generated
vendored
Normal file
900
vendor/github.com/klauspost/compress/flate/snappy.go
generated
vendored
Normal file
|
@ -0,0 +1,900 @@
|
|||
// Copyright 2011 The Snappy-Go Authors. All rights reserved.
|
||||
// Modified for deflate by Klaus Post (c) 2015.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package flate
|
||||
|
||||
// emitLiteral writes a literal chunk and returns the number of bytes written.
|
||||
func emitLiteral(dst *tokens, lit []byte) {
|
||||
ol := int(dst.n)
|
||||
for i, v := range lit {
|
||||
dst.tokens[(i+ol)&maxStoreBlockSize] = token(v)
|
||||
}
|
||||
dst.n += uint16(len(lit))
|
||||
}
|
||||
|
||||
// emitCopy writes a copy chunk and returns the number of bytes written.
|
||||
func emitCopy(dst *tokens, offset, length int) {
|
||||
dst.tokens[dst.n] = matchToken(uint32(length-3), uint32(offset-minOffsetSize))
|
||||
dst.n++
|
||||
}
|
||||
|
||||
type snappyEnc interface {
|
||||
Encode(dst *tokens, src []byte)
|
||||
Reset()
|
||||
}
|
||||
|
||||
func newSnappy(level int) snappyEnc {
|
||||
switch level {
|
||||
case 1:
|
||||
return &snappyL1{}
|
||||
case 2:
|
||||
return &snappyL2{snappyGen: snappyGen{cur: maxStoreBlockSize, prev: make([]byte, 0, maxStoreBlockSize)}}
|
||||
case 3:
|
||||
return &snappyL3{snappyGen: snappyGen{cur: maxStoreBlockSize, prev: make([]byte, 0, maxStoreBlockSize)}}
|
||||
case 4:
|
||||
return &snappyL4{snappyL3{snappyGen: snappyGen{cur: maxStoreBlockSize, prev: make([]byte, 0, maxStoreBlockSize)}}}
|
||||
default:
|
||||
panic("invalid level specified")
|
||||
}
|
||||
}
|
||||
|
||||
const (
|
||||
tableBits = 14 // Bits used in the table
|
||||
tableSize = 1 << tableBits // Size of the table
|
||||
tableMask = tableSize - 1 // Mask for table indices. Redundant, but can eliminate bounds checks.
|
||||
tableShift = 32 - tableBits // Right-shift to get the tableBits most significant bits of a uint32.
|
||||
baseMatchOffset = 1 // The smallest match offset
|
||||
baseMatchLength = 3 // The smallest match length per the RFC section 3.2.5
|
||||
maxMatchOffset = 1 << 15 // The largest match offset
|
||||
)
|
||||
|
||||
func load32(b []byte, i int) uint32 {
|
||||
b = b[i : i+4 : len(b)] // Help the compiler eliminate bounds checks on the next line.
|
||||
return uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24
|
||||
}
|
||||
|
||||
func load64(b []byte, i int) uint64 {
|
||||
b = b[i : i+8 : len(b)] // Help the compiler eliminate bounds checks on the next line.
|
||||
return uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 |
|
||||
uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56
|
||||
}
|
||||
|
||||
func hash(u uint32) uint32 {
|
||||
return (u * 0x1e35a7bd) >> tableShift
|
||||
}
|
||||
|
||||
// snappyL1 encapsulates level 1 compression
|
||||
type snappyL1 struct{}
|
||||
|
||||
func (e *snappyL1) Reset() {}
|
||||
|
||||
func (e *snappyL1) Encode(dst *tokens, src []byte) {
|
||||
const (
|
||||
inputMargin = 16 - 1
|
||||
minNonLiteralBlockSize = 1 + 1 + inputMargin
|
||||
)
|
||||
|
||||
// This check isn't in the Snappy implementation, but there, the caller
|
||||
// instead of the callee handles this case.
|
||||
if len(src) < minNonLiteralBlockSize {
|
||||
// We do not fill the token table.
|
||||
// This will be picked up by caller.
|
||||
dst.n = uint16(len(src))
|
||||
return
|
||||
}
|
||||
|
||||
// Initialize the hash table.
|
||||
//
|
||||
// The table element type is uint16, as s < sLimit and sLimit < len(src)
|
||||
// and len(src) <= maxStoreBlockSize and maxStoreBlockSize == 65535.
|
||||
var table [tableSize]uint16
|
||||
|
||||
// sLimit is when to stop looking for offset/length copies. The inputMargin
|
||||
// lets us use a fast path for emitLiteral in the main loop, while we are
|
||||
// looking for copies.
|
||||
sLimit := len(src) - inputMargin
|
||||
|
||||
// nextEmit is where in src the next emitLiteral should start from.
|
||||
nextEmit := 0
|
||||
|
||||
// The encoded form must start with a literal, as there are no previous
|
||||
// bytes to copy, so we start looking for hash matches at s == 1.
|
||||
s := 1
|
||||
nextHash := hash(load32(src, s))
|
||||
|
||||
for {
|
||||
// Copied from the C++ snappy implementation:
|
||||
//
|
||||
// Heuristic match skipping: If 32 bytes are scanned with no matches
|
||||
// found, start looking only at every other byte. If 32 more bytes are
|
||||
// scanned (or skipped), look at every third byte, etc.. When a match
|
||||
// is found, immediately go back to looking at every byte. This is a
|
||||
// small loss (~5% performance, ~0.1% density) for compressible data
|
||||
// due to more bookkeeping, but for non-compressible data (such as
|
||||
// JPEG) it's a huge win since the compressor quickly "realizes" the
|
||||
// data is incompressible and doesn't bother looking for matches
|
||||
// everywhere.
|
||||
//
|
||||
// The "skip" variable keeps track of how many bytes there are since
|
||||
// the last match; dividing it by 32 (ie. right-shifting by five) gives
|
||||
// the number of bytes to move ahead for each iteration.
|
||||
skip := 32
|
||||
|
||||
nextS := s
|
||||
candidate := 0
|
||||
for {
|
||||
s = nextS
|
||||
bytesBetweenHashLookups := skip >> 5
|
||||
nextS = s + bytesBetweenHashLookups
|
||||
skip += bytesBetweenHashLookups
|
||||
if nextS > sLimit {
|
||||
goto emitRemainder
|
||||
}
|
||||
candidate = int(table[nextHash&tableMask])
|
||||
table[nextHash&tableMask] = uint16(s)
|
||||
nextHash = hash(load32(src, nextS))
|
||||
if s-candidate <= maxMatchOffset && load32(src, s) == load32(src, candidate) {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
// A 4-byte match has been found. We'll later see if more than 4 bytes
|
||||
// match. But, prior to the match, src[nextEmit:s] are unmatched. Emit
|
||||
// them as literal bytes.
|
||||
emitLiteral(dst, src[nextEmit:s])
|
||||
|
||||
// Call emitCopy, and then see if another emitCopy could be our next
|
||||
// move. Repeat until we find no match for the input immediately after
|
||||
// what was consumed by the last emitCopy call.
|
||||
//
|
||||
// If we exit this loop normally then we need to call emitLiteral next,
|
||||
// though we don't yet know how big the literal will be. We handle that
|
||||
// by proceeding to the next iteration of the main loop. We also can
|
||||
// exit this loop via goto if we get close to exhausting the input.
|
||||
for {
|
||||
// Invariant: we have a 4-byte match at s, and no need to emit any
|
||||
// literal bytes prior to s.
|
||||
base := s
|
||||
|
||||
// Extend the 4-byte match as long as possible.
|
||||
//
|
||||
// This is an inlined version of Snappy's:
|
||||
// s = extendMatch(src, candidate+4, s+4)
|
||||
s += 4
|
||||
s1 := base + maxMatchLength
|
||||
if s1 > len(src) {
|
||||
s1 = len(src)
|
||||
}
|
||||
a := src[s:s1]
|
||||
b := src[candidate+4:]
|
||||
b = b[:len(a)]
|
||||
l := len(a)
|
||||
for i := range a {
|
||||
if a[i] != b[i] {
|
||||
l = i
|
||||
break
|
||||
}
|
||||
}
|
||||
s += l
|
||||
|
||||
// matchToken is flate's equivalent of Snappy's emitCopy.
|
||||
dst.tokens[dst.n] = matchToken(uint32(s-base-baseMatchLength), uint32(base-candidate-baseMatchOffset))
|
||||
dst.n++
|
||||
nextEmit = s
|
||||
if s >= sLimit {
|
||||
goto emitRemainder
|
||||
}
|
||||
|
||||
// We could immediately start working at s now, but to improve
|
||||
// compression we first update the hash table at s-1 and at s. If
|
||||
// another emitCopy is not our next move, also calculate nextHash
|
||||
// at s+1. At least on GOARCH=amd64, these three hash calculations
|
||||
// are faster as one load64 call (with some shifts) instead of
|
||||
// three load32 calls.
|
||||
x := load64(src, s-1)
|
||||
prevHash := hash(uint32(x >> 0))
|
||||
table[prevHash&tableMask] = uint16(s - 1)
|
||||
currHash := hash(uint32(x >> 8))
|
||||
candidate = int(table[currHash&tableMask])
|
||||
table[currHash&tableMask] = uint16(s)
|
||||
if s-candidate > maxMatchOffset || uint32(x>>8) != load32(src, candidate) {
|
||||
nextHash = hash(uint32(x >> 16))
|
||||
s++
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
emitRemainder:
|
||||
if nextEmit < len(src) {
|
||||
emitLiteral(dst, src[nextEmit:])
|
||||
}
|
||||
}
|
||||
|
||||
type tableEntry struct {
|
||||
val uint32
|
||||
offset int32
|
||||
}
|
||||
|
||||
func load3232(b []byte, i int32) uint32 {
|
||||
b = b[i : i+4 : len(b)] // Help the compiler eliminate bounds checks on the next line.
|
||||
return uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24
|
||||
}
|
||||
|
||||
func load6432(b []byte, i int32) uint64 {
|
||||
b = b[i : i+8 : len(b)] // Help the compiler eliminate bounds checks on the next line.
|
||||
return uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 |
|
||||
uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56
|
||||
}
|
||||
|
||||
// snappyGen maintains the table for matches,
|
||||
// and the previous byte block for level 2.
|
||||
// This is the generic implementation.
|
||||
type snappyGen struct {
|
||||
prev []byte
|
||||
cur int32
|
||||
}
|
||||
|
||||
// snappyGen maintains the table for matches,
|
||||
// and the previous byte block for level 2.
|
||||
// This is the generic implementation.
|
||||
type snappyL2 struct {
|
||||
snappyGen
|
||||
table [tableSize]tableEntry
|
||||
}
|
||||
|
||||
// EncodeL2 uses a similar algorithm to level 1, but is capable
|
||||
// of matching across blocks giving better compression at a small slowdown.
|
||||
func (e *snappyL2) Encode(dst *tokens, src []byte) {
|
||||
const (
|
||||
inputMargin = 8 - 1
|
||||
minNonLiteralBlockSize = 1 + 1 + inputMargin
|
||||
)
|
||||
|
||||
// Protect against e.cur wraparound.
|
||||
if e.cur > 1<<30 {
|
||||
for i := range e.table[:] {
|
||||
e.table[i] = tableEntry{}
|
||||
}
|
||||
e.cur = maxStoreBlockSize
|
||||
}
|
||||
|
||||
// This check isn't in the Snappy implementation, but there, the caller
|
||||
// instead of the callee handles this case.
|
||||
if len(src) < minNonLiteralBlockSize {
|
||||
// We do not fill the token table.
|
||||
// This will be picked up by caller.
|
||||
dst.n = uint16(len(src))
|
||||
e.cur += maxStoreBlockSize
|
||||
e.prev = e.prev[:0]
|
||||
return
|
||||
}
|
||||
|
||||
// sLimit is when to stop looking for offset/length copies. The inputMargin
|
||||
// lets us use a fast path for emitLiteral in the main loop, while we are
|
||||
// looking for copies.
|
||||
sLimit := int32(len(src) - inputMargin)
|
||||
|
||||
// nextEmit is where in src the next emitLiteral should start from.
|
||||
nextEmit := int32(0)
|
||||
s := int32(0)
|
||||
cv := load3232(src, s)
|
||||
nextHash := hash(cv)
|
||||
|
||||
for {
|
||||
// Copied from the C++ snappy implementation:
|
||||
//
|
||||
// Heuristic match skipping: If 32 bytes are scanned with no matches
|
||||
// found, start looking only at every other byte. If 32 more bytes are
|
||||
// scanned (or skipped), look at every third byte, etc.. When a match
|
||||
// is found, immediately go back to looking at every byte. This is a
|
||||
// small loss (~5% performance, ~0.1% density) for compressible data
|
||||
// due to more bookkeeping, but for non-compressible data (such as
|
||||
// JPEG) it's a huge win since the compressor quickly "realizes" the
|
||||
// data is incompressible and doesn't bother looking for matches
|
||||
// everywhere.
|
||||
//
|
||||
// The "skip" variable keeps track of how many bytes there are since
|
||||
// the last match; dividing it by 32 (ie. right-shifting by five) gives
|
||||
// the number of bytes to move ahead for each iteration.
|
||||
skip := int32(32)
|
||||
|
||||
nextS := s
|
||||
var candidate tableEntry
|
||||
for {
|
||||
s = nextS
|
||||
bytesBetweenHashLookups := skip >> 5
|
||||
nextS = s + bytesBetweenHashLookups
|
||||
skip += bytesBetweenHashLookups
|
||||
if nextS > sLimit {
|
||||
goto emitRemainder
|
||||
}
|
||||
candidate = e.table[nextHash&tableMask]
|
||||
now := load3232(src, nextS)
|
||||
e.table[nextHash&tableMask] = tableEntry{offset: s + e.cur, val: cv}
|
||||
nextHash = hash(now)
|
||||
|
||||
offset := s - (candidate.offset - e.cur)
|
||||
if offset > maxMatchOffset || cv != candidate.val {
|
||||
// Out of range or not matched.
|
||||
cv = now
|
||||
continue
|
||||
}
|
||||
break
|
||||
}
|
||||
|
||||
// A 4-byte match has been found. We'll later see if more than 4 bytes
|
||||
// match. But, prior to the match, src[nextEmit:s] are unmatched. Emit
|
||||
// them as literal bytes.
|
||||
emitLiteral(dst, src[nextEmit:s])
|
||||
|
||||
// Call emitCopy, and then see if another emitCopy could be our next
|
||||
// move. Repeat until we find no match for the input immediately after
|
||||
// what was consumed by the last emitCopy call.
|
||||
//
|
||||
// If we exit this loop normally then we need to call emitLiteral next,
|
||||
// though we don't yet know how big the literal will be. We handle that
|
||||
// by proceeding to the next iteration of the main loop. We also can
|
||||
// exit this loop via goto if we get close to exhausting the input.
|
||||
for {
|
||||
// Invariant: we have a 4-byte match at s, and no need to emit any
|
||||
// literal bytes prior to s.
|
||||
|
||||
// Extend the 4-byte match as long as possible.
|
||||
//
|
||||
s += 4
|
||||
t := candidate.offset - e.cur + 4
|
||||
l := e.matchlen(s, t, src)
|
||||
|
||||
// matchToken is flate's equivalent of Snappy's emitCopy. (length,offset)
|
||||
dst.tokens[dst.n] = matchToken(uint32(l+4-baseMatchLength), uint32(s-t-baseMatchOffset))
|
||||
dst.n++
|
||||
s += l
|
||||
nextEmit = s
|
||||
if s >= sLimit {
|
||||
t += l
|
||||
// Index first pair after match end.
|
||||
if int(t+4) < len(src) && t > 0 {
|
||||
cv := load3232(src, t)
|
||||
e.table[hash(cv)&tableMask] = tableEntry{offset: t + e.cur, val: cv}
|
||||
}
|
||||
goto emitRemainder
|
||||
}
|
||||
|
||||
// We could immediately start working at s now, but to improve
|
||||
// compression we first update the hash table at s-1 and at s. If
|
||||
// another emitCopy is not our next move, also calculate nextHash
|
||||
// at s+1. At least on GOARCH=amd64, these three hash calculations
|
||||
// are faster as one load64 call (with some shifts) instead of
|
||||
// three load32 calls.
|
||||
x := load6432(src, s-1)
|
||||
prevHash := hash(uint32(x))
|
||||
e.table[prevHash&tableMask] = tableEntry{offset: e.cur + s - 1, val: uint32(x)}
|
||||
x >>= 8
|
||||
currHash := hash(uint32(x))
|
||||
candidate = e.table[currHash&tableMask]
|
||||
e.table[currHash&tableMask] = tableEntry{offset: e.cur + s, val: uint32(x)}
|
||||
|
||||
offset := s - (candidate.offset - e.cur)
|
||||
if offset > maxMatchOffset || uint32(x) != candidate.val {
|
||||
cv = uint32(x >> 8)
|
||||
nextHash = hash(cv)
|
||||
s++
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
emitRemainder:
|
||||
if int(nextEmit) < len(src) {
|
||||
emitLiteral(dst, src[nextEmit:])
|
||||
}
|
||||
e.cur += int32(len(src))
|
||||
e.prev = e.prev[:len(src)]
|
||||
copy(e.prev, src)
|
||||
}
|
||||
|
||||
type tableEntryPrev struct {
|
||||
Cur tableEntry
|
||||
Prev tableEntry
|
||||
}
|
||||
|
||||
// snappyL3
|
||||
type snappyL3 struct {
|
||||
snappyGen
|
||||
table [tableSize]tableEntryPrev
|
||||
}
|
||||
|
||||
// Encode uses a similar algorithm to level 2, will check up to two candidates.
|
||||
func (e *snappyL3) Encode(dst *tokens, src []byte) {
|
||||
const (
|
||||
inputMargin = 8 - 1
|
||||
minNonLiteralBlockSize = 1 + 1 + inputMargin
|
||||
)
|
||||
|
||||
// Protect against e.cur wraparound.
|
||||
if e.cur > 1<<30 {
|
||||
for i := range e.table[:] {
|
||||
e.table[i] = tableEntryPrev{}
|
||||
}
|
||||
e.snappyGen = snappyGen{cur: maxStoreBlockSize, prev: e.prev[:0]}
|
||||
}
|
||||
|
||||
// This check isn't in the Snappy implementation, but there, the caller
|
||||
// instead of the callee handles this case.
|
||||
if len(src) < minNonLiteralBlockSize {
|
||||
// We do not fill the token table.
|
||||
// This will be picked up by caller.
|
||||
dst.n = uint16(len(src))
|
||||
e.cur += maxStoreBlockSize
|
||||
e.prev = e.prev[:0]
|
||||
return
|
||||
}
|
||||
|
||||
// sLimit is when to stop looking for offset/length copies. The inputMargin
|
||||
// lets us use a fast path for emitLiteral in the main loop, while we are
|
||||
// looking for copies.
|
||||
sLimit := int32(len(src) - inputMargin)
|
||||
|
||||
// nextEmit is where in src the next emitLiteral should start from.
|
||||
nextEmit := int32(0)
|
||||
s := int32(0)
|
||||
cv := load3232(src, s)
|
||||
nextHash := hash(cv)
|
||||
|
||||
for {
|
||||
// Copied from the C++ snappy implementation:
|
||||
//
|
||||
// Heuristic match skipping: If 32 bytes are scanned with no matches
|
||||
// found, start looking only at every other byte. If 32 more bytes are
|
||||
// scanned (or skipped), look at every third byte, etc.. When a match
|
||||
// is found, immediately go back to looking at every byte. This is a
|
||||
// small loss (~5% performance, ~0.1% density) for compressible data
|
||||
// due to more bookkeeping, but for non-compressible data (such as
|
||||
// JPEG) it's a huge win since the compressor quickly "realizes" the
|
||||
// data is incompressible and doesn't bother looking for matches
|
||||
// everywhere.
|
||||
//
|
||||
// The "skip" variable keeps track of how many bytes there are since
|
||||
// the last match; dividing it by 32 (ie. right-shifting by five) gives
|
||||
// the number of bytes to move ahead for each iteration.
|
||||
skip := int32(32)
|
||||
|
||||
nextS := s
|
||||
var candidate tableEntry
|
||||
for {
|
||||
s = nextS
|
||||
bytesBetweenHashLookups := skip >> 5
|
||||
nextS = s + bytesBetweenHashLookups
|
||||
skip += bytesBetweenHashLookups
|
||||
if nextS > sLimit {
|
||||
goto emitRemainder
|
||||
}
|
||||
candidates := e.table[nextHash&tableMask]
|
||||
now := load3232(src, nextS)
|
||||
e.table[nextHash&tableMask] = tableEntryPrev{Prev: candidates.Cur, Cur: tableEntry{offset: s + e.cur, val: cv}}
|
||||
nextHash = hash(now)
|
||||
|
||||
// Check both candidates
|
||||
candidate = candidates.Cur
|
||||
if cv == candidate.val {
|
||||
offset := s - (candidate.offset - e.cur)
|
||||
if offset <= maxMatchOffset {
|
||||
break
|
||||
}
|
||||
} else {
|
||||
// We only check if value mismatches.
|
||||
// Offset will always be invalid in other cases.
|
||||
candidate = candidates.Prev
|
||||
if cv == candidate.val {
|
||||
offset := s - (candidate.offset - e.cur)
|
||||
if offset <= maxMatchOffset {
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
cv = now
|
||||
}
|
||||
|
||||
// A 4-byte match has been found. We'll later see if more than 4 bytes
|
||||
// match. But, prior to the match, src[nextEmit:s] are unmatched. Emit
|
||||
// them as literal bytes.
|
||||
emitLiteral(dst, src[nextEmit:s])
|
||||
|
||||
// Call emitCopy, and then see if another emitCopy could be our next
|
||||
// move. Repeat until we find no match for the input immediately after
|
||||
// what was consumed by the last emitCopy call.
|
||||
//
|
||||
// If we exit this loop normally then we need to call emitLiteral next,
|
||||
// though we don't yet know how big the literal will be. We handle that
|
||||
// by proceeding to the next iteration of the main loop. We also can
|
||||
// exit this loop via goto if we get close to exhausting the input.
|
||||
for {
|
||||
// Invariant: we have a 4-byte match at s, and no need to emit any
|
||||
// literal bytes prior to s.
|
||||
|
||||
// Extend the 4-byte match as long as possible.
|
||||
//
|
||||
s += 4
|
||||
t := candidate.offset - e.cur + 4
|
||||
l := e.matchlen(s, t, src)
|
||||
|
||||
// matchToken is flate's equivalent of Snappy's emitCopy. (length,offset)
|
||||
dst.tokens[dst.n] = matchToken(uint32(l+4-baseMatchLength), uint32(s-t-baseMatchOffset))
|
||||
dst.n++
|
||||
s += l
|
||||
nextEmit = s
|
||||
if s >= sLimit {
|
||||
t += l
|
||||
// Index first pair after match end.
|
||||
if int(t+4) < len(src) && t > 0 {
|
||||
cv := load3232(src, t)
|
||||
nextHash = hash(cv)
|
||||
e.table[nextHash&tableMask] = tableEntryPrev{
|
||||
Prev: e.table[nextHash&tableMask].Cur,
|
||||
Cur: tableEntry{offset: e.cur + t, val: cv},
|
||||
}
|
||||
}
|
||||
goto emitRemainder
|
||||
}
|
||||
|
||||
// We could immediately start working at s now, but to improve
|
||||
// compression we first update the hash table at s-3 to s. If
|
||||
// another emitCopy is not our next move, also calculate nextHash
|
||||
// at s+1. At least on GOARCH=amd64, these three hash calculations
|
||||
// are faster as one load64 call (with some shifts) instead of
|
||||
// three load32 calls.
|
||||
x := load6432(src, s-3)
|
||||
prevHash := hash(uint32(x))
|
||||
e.table[prevHash&tableMask] = tableEntryPrev{
|
||||
Prev: e.table[prevHash&tableMask].Cur,
|
||||
Cur: tableEntry{offset: e.cur + s - 3, val: uint32(x)},
|
||||
}
|
||||
x >>= 8
|
||||
prevHash = hash(uint32(x))
|
||||
|
||||
e.table[prevHash&tableMask] = tableEntryPrev{
|
||||
Prev: e.table[prevHash&tableMask].Cur,
|
||||
Cur: tableEntry{offset: e.cur + s - 2, val: uint32(x)},
|
||||
}
|
||||
x >>= 8
|
||||
prevHash = hash(uint32(x))
|
||||
|
||||
e.table[prevHash&tableMask] = tableEntryPrev{
|
||||
Prev: e.table[prevHash&tableMask].Cur,
|
||||
Cur: tableEntry{offset: e.cur + s - 1, val: uint32(x)},
|
||||
}
|
||||
x >>= 8
|
||||
currHash := hash(uint32(x))
|
||||
candidates := e.table[currHash&tableMask]
|
||||
cv = uint32(x)
|
||||
e.table[currHash&tableMask] = tableEntryPrev{
|
||||
Prev: candidates.Cur,
|
||||
Cur: tableEntry{offset: s + e.cur, val: cv},
|
||||
}
|
||||
|
||||
// Check both candidates
|
||||
candidate = candidates.Cur
|
||||
if cv == candidate.val {
|
||||
offset := s - (candidate.offset - e.cur)
|
||||
if offset <= maxMatchOffset {
|
||||
continue
|
||||
}
|
||||
} else {
|
||||
// We only check if value mismatches.
|
||||
// Offset will always be invalid in other cases.
|
||||
candidate = candidates.Prev
|
||||
if cv == candidate.val {
|
||||
offset := s - (candidate.offset - e.cur)
|
||||
if offset <= maxMatchOffset {
|
||||
continue
|
||||
}
|
||||
}
|
||||
}
|
||||
cv = uint32(x >> 8)
|
||||
nextHash = hash(cv)
|
||||
s++
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
emitRemainder:
|
||||
if int(nextEmit) < len(src) {
|
||||
emitLiteral(dst, src[nextEmit:])
|
||||
}
|
||||
e.cur += int32(len(src))
|
||||
e.prev = e.prev[:len(src)]
|
||||
copy(e.prev, src)
|
||||
}
|
||||
|
||||
// snappyL4
|
||||
type snappyL4 struct {
|
||||
snappyL3
|
||||
}
|
||||
|
||||
// Encode uses a similar algorithm to level 3,
|
||||
// but will check up to two candidates if first isn't long enough.
|
||||
func (e *snappyL4) Encode(dst *tokens, src []byte) {
|
||||
const (
|
||||
inputMargin = 8 - 3
|
||||
minNonLiteralBlockSize = 1 + 1 + inputMargin
|
||||
matchLenGood = 12
|
||||
)
|
||||
|
||||
// Protect against e.cur wraparound.
|
||||
if e.cur > 1<<30 {
|
||||
for i := range e.table[:] {
|
||||
e.table[i] = tableEntryPrev{}
|
||||
}
|
||||
e.snappyGen = snappyGen{cur: maxStoreBlockSize, prev: e.prev[:0]}
|
||||
}
|
||||
|
||||
// This check isn't in the Snappy implementation, but there, the caller
|
||||
// instead of the callee handles this case.
|
||||
if len(src) < minNonLiteralBlockSize {
|
||||
// We do not fill the token table.
|
||||
// This will be picked up by caller.
|
||||
dst.n = uint16(len(src))
|
||||
e.cur += maxStoreBlockSize
|
||||
e.prev = e.prev[:0]
|
||||
return
|
||||
}
|
||||
|
||||
// sLimit is when to stop looking for offset/length copies. The inputMargin
|
||||
// lets us use a fast path for emitLiteral in the main loop, while we are
|
||||
// looking for copies.
|
||||
sLimit := int32(len(src) - inputMargin)
|
||||
|
||||
// nextEmit is where in src the next emitLiteral should start from.
|
||||
nextEmit := int32(0)
|
||||
s := int32(0)
|
||||
cv := load3232(src, s)
|
||||
nextHash := hash(cv)
|
||||
|
||||
for {
|
||||
// Copied from the C++ snappy implementation:
|
||||
//
|
||||
// Heuristic match skipping: If 32 bytes are scanned with no matches
|
||||
// found, start looking only at every other byte. If 32 more bytes are
|
||||
// scanned (or skipped), look at every third byte, etc.. When a match
|
||||
// is found, immediately go back to looking at every byte. This is a
|
||||
// small loss (~5% performance, ~0.1% density) for compressible data
|
||||
// due to more bookkeeping, but for non-compressible data (such as
|
||||
// JPEG) it's a huge win since the compressor quickly "realizes" the
|
||||
// data is incompressible and doesn't bother looking for matches
|
||||
// everywhere.
|
||||
//
|
||||
// The "skip" variable keeps track of how many bytes there are since
|
||||
// the last match; dividing it by 32 (ie. right-shifting by five) gives
|
||||
// the number of bytes to move ahead for each iteration.
|
||||
skip := int32(32)
|
||||
|
||||
nextS := s
|
||||
var candidate tableEntry
|
||||
var candidateAlt tableEntry
|
||||
for {
|
||||
s = nextS
|
||||
bytesBetweenHashLookups := skip >> 5
|
||||
nextS = s + bytesBetweenHashLookups
|
||||
skip += bytesBetweenHashLookups
|
||||
if nextS > sLimit {
|
||||
goto emitRemainder
|
||||
}
|
||||
candidates := e.table[nextHash&tableMask]
|
||||
now := load3232(src, nextS)
|
||||
e.table[nextHash&tableMask] = tableEntryPrev{Prev: candidates.Cur, Cur: tableEntry{offset: s + e.cur, val: cv}}
|
||||
nextHash = hash(now)
|
||||
|
||||
// Check both candidates
|
||||
candidate = candidates.Cur
|
||||
if cv == candidate.val {
|
||||
offset := s - (candidate.offset - e.cur)
|
||||
if offset < maxMatchOffset {
|
||||
offset = s - (candidates.Prev.offset - e.cur)
|
||||
if cv == candidates.Prev.val && offset < maxMatchOffset {
|
||||
candidateAlt = candidates.Prev
|
||||
}
|
||||
break
|
||||
}
|
||||
} else {
|
||||
// We only check if value mismatches.
|
||||
// Offset will always be invalid in other cases.
|
||||
candidate = candidates.Prev
|
||||
if cv == candidate.val {
|
||||
offset := s - (candidate.offset - e.cur)
|
||||
if offset < maxMatchOffset {
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
cv = now
|
||||
}
|
||||
|
||||
// A 4-byte match has been found. We'll later see if more than 4 bytes
|
||||
// match. But, prior to the match, src[nextEmit:s] are unmatched. Emit
|
||||
// them as literal bytes.
|
||||
emitLiteral(dst, src[nextEmit:s])
|
||||
|
||||
// Call emitCopy, and then see if another emitCopy could be our next
|
||||
// move. Repeat until we find no match for the input immediately after
|
||||
// what was consumed by the last emitCopy call.
|
||||
//
|
||||
// If we exit this loop normally then we need to call emitLiteral next,
|
||||
// though we don't yet know how big the literal will be. We handle that
|
||||
// by proceeding to the next iteration of the main loop. We also can
|
||||
// exit this loop via goto if we get close to exhausting the input.
|
||||
for {
|
||||
// Invariant: we have a 4-byte match at s, and no need to emit any
|
||||
// literal bytes prior to s.
|
||||
|
||||
// Extend the 4-byte match as long as possible.
|
||||
//
|
||||
s += 4
|
||||
t := candidate.offset - e.cur + 4
|
||||
l := e.matchlen(s, t, src)
|
||||
// Try alternative candidate if match length < matchLenGood.
|
||||
if l < matchLenGood-4 && candidateAlt.offset != 0 {
|
||||
t2 := candidateAlt.offset - e.cur + 4
|
||||
l2 := e.matchlen(s, t2, src)
|
||||
if l2 > l {
|
||||
l = l2
|
||||
t = t2
|
||||
}
|
||||
}
|
||||
// matchToken is flate's equivalent of Snappy's emitCopy. (length,offset)
|
||||
dst.tokens[dst.n] = matchToken(uint32(l+4-baseMatchLength), uint32(s-t-baseMatchOffset))
|
||||
dst.n++
|
||||
s += l
|
||||
nextEmit = s
|
||||
if s >= sLimit {
|
||||
t += l
|
||||
// Index first pair after match end.
|
||||
if int(t+4) < len(src) && t > 0 {
|
||||
cv := load3232(src, t)
|
||||
nextHash = hash(cv)
|
||||
e.table[nextHash&tableMask] = tableEntryPrev{
|
||||
Prev: e.table[nextHash&tableMask].Cur,
|
||||
Cur: tableEntry{offset: e.cur + t, val: cv},
|
||||
}
|
||||
}
|
||||
goto emitRemainder
|
||||
}
|
||||
|
||||
// We could immediately start working at s now, but to improve
|
||||
// compression we first update the hash table at s-3 to s. If
|
||||
// another emitCopy is not our next move, also calculate nextHash
|
||||
// at s+1. At least on GOARCH=amd64, these three hash calculations
|
||||
// are faster as one load64 call (with some shifts) instead of
|
||||
// three load32 calls.
|
||||
x := load6432(src, s-3)
|
||||
prevHash := hash(uint32(x))
|
||||
e.table[prevHash&tableMask] = tableEntryPrev{
|
||||
Prev: e.table[prevHash&tableMask].Cur,
|
||||
Cur: tableEntry{offset: e.cur + s - 3, val: uint32(x)},
|
||||
}
|
||||
x >>= 8
|
||||
prevHash = hash(uint32(x))
|
||||
|
||||
e.table[prevHash&tableMask] = tableEntryPrev{
|
||||
Prev: e.table[prevHash&tableMask].Cur,
|
||||
Cur: tableEntry{offset: e.cur + s - 2, val: uint32(x)},
|
||||
}
|
||||
x >>= 8
|
||||
prevHash = hash(uint32(x))
|
||||
|
||||
e.table[prevHash&tableMask] = tableEntryPrev{
|
||||
Prev: e.table[prevHash&tableMask].Cur,
|
||||
Cur: tableEntry{offset: e.cur + s - 1, val: uint32(x)},
|
||||
}
|
||||
x >>= 8
|
||||
currHash := hash(uint32(x))
|
||||
candidates := e.table[currHash&tableMask]
|
||||
cv = uint32(x)
|
||||
e.table[currHash&tableMask] = tableEntryPrev{
|
||||
Prev: candidates.Cur,
|
||||
Cur: tableEntry{offset: s + e.cur, val: cv},
|
||||
}
|
||||
|
||||
// Check both candidates
|
||||
candidate = candidates.Cur
|
||||
candidateAlt = tableEntry{}
|
||||
if cv == candidate.val {
|
||||
offset := s - (candidate.offset - e.cur)
|
||||
if offset <= maxMatchOffset {
|
||||
offset = s - (candidates.Prev.offset - e.cur)
|
||||
if cv == candidates.Prev.val && offset <= maxMatchOffset {
|
||||
candidateAlt = candidates.Prev
|
||||
}
|
||||
continue
|
||||
}
|
||||
} else {
|
||||
// We only check if value mismatches.
|
||||
// Offset will always be invalid in other cases.
|
||||
candidate = candidates.Prev
|
||||
if cv == candidate.val {
|
||||
offset := s - (candidate.offset - e.cur)
|
||||
if offset <= maxMatchOffset {
|
||||
continue
|
||||
}
|
||||
}
|
||||
}
|
||||
cv = uint32(x >> 8)
|
||||
nextHash = hash(cv)
|
||||
s++
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
emitRemainder:
|
||||
if int(nextEmit) < len(src) {
|
||||
emitLiteral(dst, src[nextEmit:])
|
||||
}
|
||||
e.cur += int32(len(src))
|
||||
e.prev = e.prev[:len(src)]
|
||||
copy(e.prev, src)
|
||||
}
|
||||
|
||||
func (e *snappyGen) matchlen(s, t int32, src []byte) int32 {
|
||||
s1 := int(s) + maxMatchLength - 4
|
||||
if s1 > len(src) {
|
||||
s1 = len(src)
|
||||
}
|
||||
|
||||
// If we are inside the current block
|
||||
if t >= 0 {
|
||||
b := src[t:]
|
||||
a := src[s:s1]
|
||||
b = b[:len(a)]
|
||||
// Extend the match to be as long as possible.
|
||||
for i := range a {
|
||||
if a[i] != b[i] {
|
||||
return int32(i)
|
||||
}
|
||||
}
|
||||
return int32(len(a))
|
||||
}
|
||||
|
||||
// We found a match in the previous block.
|
||||
tp := int32(len(e.prev)) + t
|
||||
if tp < 0 {
|
||||
return 0
|
||||
}
|
||||
|
||||
// Extend the match to be as long as possible.
|
||||
a := src[s:s1]
|
||||
b := e.prev[tp:]
|
||||
if len(b) > len(a) {
|
||||
b = b[:len(a)]
|
||||
}
|
||||
a = a[:len(b)]
|
||||
for i := range b {
|
||||
if a[i] != b[i] {
|
||||
return int32(i)
|
||||
}
|
||||
}
|
||||
|
||||
// If we reached our limit, we matched everything we are
|
||||
// allowed to in the previous block and we return.
|
||||
n := int32(len(b))
|
||||
if int(s+n) == s1 {
|
||||
return n
|
||||
}
|
||||
|
||||
// Continue looking for more matches in the current block.
|
||||
a = src[s+n : s1]
|
||||
b = src[:len(a)]
|
||||
for i := range a {
|
||||
if a[i] != b[i] {
|
||||
return int32(i) + n
|
||||
}
|
||||
}
|
||||
return int32(len(a)) + n
|
||||
}
|
||||
|
||||
// Reset the encoding table.
|
||||
func (e *snappyGen) Reset() {
|
||||
e.prev = e.prev[:0]
|
||||
e.cur += maxMatchOffset
|
||||
}
|
115
vendor/github.com/klauspost/compress/flate/token.go
generated
vendored
Normal file
115
vendor/github.com/klauspost/compress/flate/token.go
generated
vendored
Normal file
|
@ -0,0 +1,115 @@
|
|||
// Copyright 2009 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package flate
|
||||
|
||||
import "fmt"
|
||||
|
||||
const (
|
||||
// 2 bits: type 0 = literal 1=EOF 2=Match 3=Unused
|
||||
// 8 bits: xlength = length - MIN_MATCH_LENGTH
|
||||
// 22 bits xoffset = offset - MIN_OFFSET_SIZE, or literal
|
||||
lengthShift = 22
|
||||
offsetMask = 1<<lengthShift - 1
|
||||
typeMask = 3 << 30
|
||||
literalType = 0 << 30
|
||||
matchType = 1 << 30
|
||||
)
|
||||
|
||||
// The length code for length X (MIN_MATCH_LENGTH <= X <= MAX_MATCH_LENGTH)
|
||||
// is lengthCodes[length - MIN_MATCH_LENGTH]
|
||||
var lengthCodes = [...]uint32{
|
||||
0, 1, 2, 3, 4, 5, 6, 7, 8, 8,
|
||||
9, 9, 10, 10, 11, 11, 12, 12, 12, 12,
|
||||
13, 13, 13, 13, 14, 14, 14, 14, 15, 15,
|
||||
15, 15, 16, 16, 16, 16, 16, 16, 16, 16,
|
||||
17, 17, 17, 17, 17, 17, 17, 17, 18, 18,
|
||||
18, 18, 18, 18, 18, 18, 19, 19, 19, 19,
|
||||
19, 19, 19, 19, 20, 20, 20, 20, 20, 20,
|
||||
20, 20, 20, 20, 20, 20, 20, 20, 20, 20,
|
||||
21, 21, 21, 21, 21, 21, 21, 21, 21, 21,
|
||||
21, 21, 21, 21, 21, 21, 22, 22, 22, 22,
|
||||
22, 22, 22, 22, 22, 22, 22, 22, 22, 22,
|
||||
22, 22, 23, 23, 23, 23, 23, 23, 23, 23,
|
||||
23, 23, 23, 23, 23, 23, 23, 23, 24, 24,
|
||||
24, 24, 24, 24, 24, 24, 24, 24, 24, 24,
|
||||
24, 24, 24, 24, 24, 24, 24, 24, 24, 24,
|
||||
24, 24, 24, 24, 24, 24, 24, 24, 24, 24,
|
||||
25, 25, 25, 25, 25, 25, 25, 25, 25, 25,
|
||||
25, 25, 25, 25, 25, 25, 25, 25, 25, 25,
|
||||
25, 25, 25, 25, 25, 25, 25, 25, 25, 25,
|
||||
25, 25, 26, 26, 26, 26, 26, 26, 26, 26,
|
||||
26, 26, 26, 26, 26, 26, 26, 26, 26, 26,
|
||||
26, 26, 26, 26, 26, 26, 26, 26, 26, 26,
|
||||
26, 26, 26, 26, 27, 27, 27, 27, 27, 27,
|
||||
27, 27, 27, 27, 27, 27, 27, 27, 27, 27,
|
||||
27, 27, 27, 27, 27, 27, 27, 27, 27, 27,
|
||||
27, 27, 27, 27, 27, 28,
|
||||
}
|
||||
|
||||
var offsetCodes = [...]uint32{
|
||||
0, 1, 2, 3, 4, 4, 5, 5, 6, 6, 6, 6, 7, 7, 7, 7,
|
||||
8, 8, 8, 8, 8, 8, 8, 8, 9, 9, 9, 9, 9, 9, 9, 9,
|
||||
10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10,
|
||||
11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11,
|
||||
12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12,
|
||||
12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12,
|
||||
13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13,
|
||||
13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13,
|
||||
14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14,
|
||||
14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14,
|
||||
14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14,
|
||||
14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14,
|
||||
15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15,
|
||||
15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15,
|
||||
15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15,
|
||||
15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15,
|
||||
}
|
||||
|
||||
type token uint32
|
||||
|
||||
type tokens struct {
|
||||
tokens [maxStoreBlockSize + 1]token
|
||||
n uint16 // Must be able to contain maxStoreBlockSize
|
||||
}
|
||||
|
||||
// Convert a literal into a literal token.
|
||||
func literalToken(literal uint32) token { return token(literalType + literal) }
|
||||
|
||||
// Convert a < xlength, xoffset > pair into a match token.
|
||||
func matchToken(xlength uint32, xoffset uint32) token {
|
||||
return token(matchType + xlength<<lengthShift + xoffset)
|
||||
}
|
||||
|
||||
func matchTokend(xlength uint32, xoffset uint32) token {
|
||||
if xlength > maxMatchLength || xoffset > maxMatchOffset {
|
||||
panic(fmt.Sprintf("Invalid match: len: %d, offset: %d\n", xlength, xoffset))
|
||||
return token(matchType)
|
||||
}
|
||||
return token(matchType + xlength<<lengthShift + xoffset)
|
||||
}
|
||||
|
||||
// Returns the type of a token
|
||||
func (t token) typ() uint32 { return uint32(t) & typeMask }
|
||||
|
||||
// Returns the literal of a literal token
|
||||
func (t token) literal() uint32 { return uint32(t - literalType) }
|
||||
|
||||
// Returns the extra offset of a match token
|
||||
func (t token) offset() uint32 { return uint32(t) & offsetMask }
|
||||
|
||||
func (t token) length() uint32 { return uint32((t - matchType) >> lengthShift) }
|
||||
|
||||
func lengthCode(len uint32) uint32 { return lengthCodes[len] }
|
||||
|
||||
// Returns the offset code corresponding to a specific offset
|
||||
func offsetCode(off uint32) uint32 {
|
||||
if off < uint32(len(offsetCodes)) {
|
||||
return offsetCodes[off]
|
||||
} else if off>>7 < uint32(len(offsetCodes)) {
|
||||
return offsetCodes[off>>7] + 14
|
||||
} else {
|
||||
return offsetCodes[off>>14] + 28
|
||||
}
|
||||
}
|
344
vendor/github.com/klauspost/compress/gzip/gunzip.go
generated
vendored
Normal file
344
vendor/github.com/klauspost/compress/gzip/gunzip.go
generated
vendored
Normal file
|
@ -0,0 +1,344 @@
|
|||
// Copyright 2009 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Package gzip implements reading and writing of gzip format compressed files,
|
||||
// as specified in RFC 1952.
|
||||
package gzip
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"encoding/binary"
|
||||
"errors"
|
||||
"hash/crc32"
|
||||
"io"
|
||||
"time"
|
||||
|
||||
"github.com/klauspost/compress/flate"
|
||||
)
|
||||
|
||||
const (
|
||||
gzipID1 = 0x1f
|
||||
gzipID2 = 0x8b
|
||||
gzipDeflate = 8
|
||||
flagText = 1 << 0
|
||||
flagHdrCrc = 1 << 1
|
||||
flagExtra = 1 << 2
|
||||
flagName = 1 << 3
|
||||
flagComment = 1 << 4
|
||||
)
|
||||
|
||||
var (
|
||||
// ErrChecksum is returned when reading GZIP data that has an invalid checksum.
|
||||
ErrChecksum = errors.New("gzip: invalid checksum")
|
||||
// ErrHeader is returned when reading GZIP data that has an invalid header.
|
||||
ErrHeader = errors.New("gzip: invalid header")
|
||||
)
|
||||
|
||||
var le = binary.LittleEndian
|
||||
|
||||
// noEOF converts io.EOF to io.ErrUnexpectedEOF.
|
||||
func noEOF(err error) error {
|
||||
if err == io.EOF {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// The gzip file stores a header giving metadata about the compressed file.
|
||||
// That header is exposed as the fields of the Writer and Reader structs.
|
||||
//
|
||||
// Strings must be UTF-8 encoded and may only contain Unicode code points
|
||||
// U+0001 through U+00FF, due to limitations of the GZIP file format.
|
||||
type Header struct {
|
||||
Comment string // comment
|
||||
Extra []byte // "extra data"
|
||||
ModTime time.Time // modification time
|
||||
Name string // file name
|
||||
OS byte // operating system type
|
||||
}
|
||||
|
||||
// A Reader is an io.Reader that can be read to retrieve
|
||||
// uncompressed data from a gzip-format compressed file.
|
||||
//
|
||||
// In general, a gzip file can be a concatenation of gzip files,
|
||||
// each with its own header. Reads from the Reader
|
||||
// return the concatenation of the uncompressed data of each.
|
||||
// Only the first header is recorded in the Reader fields.
|
||||
//
|
||||
// Gzip files store a length and checksum of the uncompressed data.
|
||||
// The Reader will return a ErrChecksum when Read
|
||||
// reaches the end of the uncompressed data if it does not
|
||||
// have the expected length or checksum. Clients should treat data
|
||||
// returned by Read as tentative until they receive the io.EOF
|
||||
// marking the end of the data.
|
||||
type Reader struct {
|
||||
Header // valid after NewReader or Reader.Reset
|
||||
r flate.Reader
|
||||
decompressor io.ReadCloser
|
||||
digest uint32 // CRC-32, IEEE polynomial (section 8)
|
||||
size uint32 // Uncompressed size (section 2.3.1)
|
||||
buf [512]byte
|
||||
err error
|
||||
multistream bool
|
||||
}
|
||||
|
||||
// NewReader creates a new Reader reading the given reader.
|
||||
// If r does not also implement io.ByteReader,
|
||||
// the decompressor may read more data than necessary from r.
|
||||
//
|
||||
// It is the caller's responsibility to call Close on the Reader when done.
|
||||
//
|
||||
// The Reader.Header fields will be valid in the Reader returned.
|
||||
func NewReader(r io.Reader) (*Reader, error) {
|
||||
z := new(Reader)
|
||||
if err := z.Reset(r); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return z, nil
|
||||
}
|
||||
|
||||
// Reset discards the Reader z's state and makes it equivalent to the
|
||||
// result of its original state from NewReader, but reading from r instead.
|
||||
// This permits reusing a Reader rather than allocating a new one.
|
||||
func (z *Reader) Reset(r io.Reader) error {
|
||||
*z = Reader{
|
||||
decompressor: z.decompressor,
|
||||
multistream: true,
|
||||
}
|
||||
if rr, ok := r.(flate.Reader); ok {
|
||||
z.r = rr
|
||||
} else {
|
||||
z.r = bufio.NewReader(r)
|
||||
}
|
||||
z.Header, z.err = z.readHeader()
|
||||
return z.err
|
||||
}
|
||||
|
||||
// Multistream controls whether the reader supports multistream files.
|
||||
//
|
||||
// If enabled (the default), the Reader expects the input to be a sequence
|
||||
// of individually gzipped data streams, each with its own header and
|
||||
// trailer, ending at EOF. The effect is that the concatenation of a sequence
|
||||
// of gzipped files is treated as equivalent to the gzip of the concatenation
|
||||
// of the sequence. This is standard behavior for gzip readers.
|
||||
//
|
||||
// Calling Multistream(false) disables this behavior; disabling the behavior
|
||||
// can be useful when reading file formats that distinguish individual gzip
|
||||
// data streams or mix gzip data streams with other data streams.
|
||||
// In this mode, when the Reader reaches the end of the data stream,
|
||||
// Read returns io.EOF. If the underlying reader implements io.ByteReader,
|
||||
// it will be left positioned just after the gzip stream.
|
||||
// To start the next stream, call z.Reset(r) followed by z.Multistream(false).
|
||||
// If there is no next stream, z.Reset(r) will return io.EOF.
|
||||
func (z *Reader) Multistream(ok bool) {
|
||||
z.multistream = ok
|
||||
}
|
||||
|
||||
// readString reads a NUL-terminated string from z.r.
|
||||
// It treats the bytes read as being encoded as ISO 8859-1 (Latin-1) and
|
||||
// will output a string encoded using UTF-8.
|
||||
// This method always updates z.digest with the data read.
|
||||
func (z *Reader) readString() (string, error) {
|
||||
var err error
|
||||
needConv := false
|
||||
for i := 0; ; i++ {
|
||||
if i >= len(z.buf) {
|
||||
return "", ErrHeader
|
||||
}
|
||||
z.buf[i], err = z.r.ReadByte()
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
if z.buf[i] > 0x7f {
|
||||
needConv = true
|
||||
}
|
||||
if z.buf[i] == 0 {
|
||||
// Digest covers the NUL terminator.
|
||||
z.digest = crc32.Update(z.digest, crc32.IEEETable, z.buf[:i+1])
|
||||
|
||||
// Strings are ISO 8859-1, Latin-1 (RFC 1952, section 2.3.1).
|
||||
if needConv {
|
||||
s := make([]rune, 0, i)
|
||||
for _, v := range z.buf[:i] {
|
||||
s = append(s, rune(v))
|
||||
}
|
||||
return string(s), nil
|
||||
}
|
||||
return string(z.buf[:i]), nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// readHeader reads the GZIP header according to section 2.3.1.
|
||||
// This method does not set z.err.
|
||||
func (z *Reader) readHeader() (hdr Header, err error) {
|
||||
if _, err = io.ReadFull(z.r, z.buf[:10]); err != nil {
|
||||
// RFC 1952, section 2.2, says the following:
|
||||
// A gzip file consists of a series of "members" (compressed data sets).
|
||||
//
|
||||
// Other than this, the specification does not clarify whether a
|
||||
// "series" is defined as "one or more" or "zero or more". To err on the
|
||||
// side of caution, Go interprets this to mean "zero or more".
|
||||
// Thus, it is okay to return io.EOF here.
|
||||
return hdr, err
|
||||
}
|
||||
if z.buf[0] != gzipID1 || z.buf[1] != gzipID2 || z.buf[2] != gzipDeflate {
|
||||
return hdr, ErrHeader
|
||||
}
|
||||
flg := z.buf[3]
|
||||
hdr.ModTime = time.Unix(int64(le.Uint32(z.buf[4:8])), 0)
|
||||
// z.buf[8] is XFL and is currently ignored.
|
||||
hdr.OS = z.buf[9]
|
||||
z.digest = crc32.ChecksumIEEE(z.buf[:10])
|
||||
|
||||
if flg&flagExtra != 0 {
|
||||
if _, err = io.ReadFull(z.r, z.buf[:2]); err != nil {
|
||||
return hdr, noEOF(err)
|
||||
}
|
||||
z.digest = crc32.Update(z.digest, crc32.IEEETable, z.buf[:2])
|
||||
data := make([]byte, le.Uint16(z.buf[:2]))
|
||||
if _, err = io.ReadFull(z.r, data); err != nil {
|
||||
return hdr, noEOF(err)
|
||||
}
|
||||
z.digest = crc32.Update(z.digest, crc32.IEEETable, data)
|
||||
hdr.Extra = data
|
||||
}
|
||||
|
||||
var s string
|
||||
if flg&flagName != 0 {
|
||||
if s, err = z.readString(); err != nil {
|
||||
return hdr, err
|
||||
}
|
||||
hdr.Name = s
|
||||
}
|
||||
|
||||
if flg&flagComment != 0 {
|
||||
if s, err = z.readString(); err != nil {
|
||||
return hdr, err
|
||||
}
|
||||
hdr.Comment = s
|
||||
}
|
||||
|
||||
if flg&flagHdrCrc != 0 {
|
||||
if _, err = io.ReadFull(z.r, z.buf[:2]); err != nil {
|
||||
return hdr, noEOF(err)
|
||||
}
|
||||
digest := le.Uint16(z.buf[:2])
|
||||
if digest != uint16(z.digest) {
|
||||
return hdr, ErrHeader
|
||||
}
|
||||
}
|
||||
|
||||
z.digest = 0
|
||||
if z.decompressor == nil {
|
||||
z.decompressor = flate.NewReader(z.r)
|
||||
} else {
|
||||
z.decompressor.(flate.Resetter).Reset(z.r, nil)
|
||||
}
|
||||
return hdr, nil
|
||||
}
|
||||
|
||||
// Read implements io.Reader, reading uncompressed bytes from its underlying Reader.
|
||||
func (z *Reader) Read(p []byte) (n int, err error) {
|
||||
if z.err != nil {
|
||||
return 0, z.err
|
||||
}
|
||||
|
||||
n, z.err = z.decompressor.Read(p)
|
||||
z.digest = crc32.Update(z.digest, crc32.IEEETable, p[:n])
|
||||
z.size += uint32(n)
|
||||
if z.err != io.EOF {
|
||||
// In the normal case we return here.
|
||||
return n, z.err
|
||||
}
|
||||
|
||||
// Finished file; check checksum and size.
|
||||
if _, err := io.ReadFull(z.r, z.buf[:8]); err != nil {
|
||||
z.err = noEOF(err)
|
||||
return n, z.err
|
||||
}
|
||||
digest := le.Uint32(z.buf[:4])
|
||||
size := le.Uint32(z.buf[4:8])
|
||||
if digest != z.digest || size != z.size {
|
||||
z.err = ErrChecksum
|
||||
return n, z.err
|
||||
}
|
||||
z.digest, z.size = 0, 0
|
||||
|
||||
// File is ok; check if there is another.
|
||||
if !z.multistream {
|
||||
return n, io.EOF
|
||||
}
|
||||
z.err = nil // Remove io.EOF
|
||||
|
||||
if _, z.err = z.readHeader(); z.err != nil {
|
||||
return n, z.err
|
||||
}
|
||||
|
||||
// Read from next file, if necessary.
|
||||
if n > 0 {
|
||||
return n, nil
|
||||
}
|
||||
return z.Read(p)
|
||||
}
|
||||
|
||||
// Support the io.WriteTo interface for io.Copy and friends.
|
||||
func (z *Reader) WriteTo(w io.Writer) (int64, error) {
|
||||
total := int64(0)
|
||||
crcWriter := crc32.NewIEEE()
|
||||
for {
|
||||
if z.err != nil {
|
||||
if z.err == io.EOF {
|
||||
return total, nil
|
||||
}
|
||||
return total, z.err
|
||||
}
|
||||
|
||||
// We write both to output and digest.
|
||||
mw := io.MultiWriter(w, crcWriter)
|
||||
n, err := z.decompressor.(io.WriterTo).WriteTo(mw)
|
||||
total += n
|
||||
z.size += uint32(n)
|
||||
if err != nil {
|
||||
z.err = err
|
||||
return total, z.err
|
||||
}
|
||||
|
||||
// Finished file; check checksum + size.
|
||||
if _, err := io.ReadFull(z.r, z.buf[0:8]); err != nil {
|
||||
if err == io.EOF {
|
||||
err = io.ErrUnexpectedEOF
|
||||
}
|
||||
z.err = err
|
||||
return total, err
|
||||
}
|
||||
z.digest = crcWriter.Sum32()
|
||||
digest := le.Uint32(z.buf[:4])
|
||||
size := le.Uint32(z.buf[4:8])
|
||||
if digest != z.digest || size != z.size {
|
||||
z.err = ErrChecksum
|
||||
return total, z.err
|
||||
}
|
||||
z.digest, z.size = 0, 0
|
||||
|
||||
// File is ok; check if there is another.
|
||||
if !z.multistream {
|
||||
return total, nil
|
||||
}
|
||||
crcWriter.Reset()
|
||||
z.err = nil // Remove io.EOF
|
||||
|
||||
if _, z.err = z.readHeader(); z.err != nil {
|
||||
if z.err == io.EOF {
|
||||
return total, nil
|
||||
}
|
||||
return total, z.err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Close closes the Reader. It does not close the underlying io.Reader.
|
||||
// In order for the GZIP checksum to be verified, the reader must be
|
||||
// fully consumed until the io.EOF.
|
||||
func (z *Reader) Close() error { return z.decompressor.Close() }
|
251
vendor/github.com/klauspost/compress/gzip/gzip.go
generated
vendored
Normal file
251
vendor/github.com/klauspost/compress/gzip/gzip.go
generated
vendored
Normal file
|
@ -0,0 +1,251 @@
|
|||
// Copyright 2010 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package gzip
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"hash/crc32"
|
||||
"io"
|
||||
|
||||
"github.com/klauspost/compress/flate"
|
||||
)
|
||||
|
||||
// These constants are copied from the flate package, so that code that imports
|
||||
// "compress/gzip" does not also have to import "compress/flate".
|
||||
const (
|
||||
NoCompression = flate.NoCompression
|
||||
BestSpeed = flate.BestSpeed
|
||||
BestCompression = flate.BestCompression
|
||||
DefaultCompression = flate.DefaultCompression
|
||||
ConstantCompression = flate.ConstantCompression
|
||||
HuffmanOnly = flate.HuffmanOnly
|
||||
)
|
||||
|
||||
// A Writer is an io.WriteCloser.
|
||||
// Writes to a Writer are compressed and written to w.
|
||||
type Writer struct {
|
||||
Header // written at first call to Write, Flush, or Close
|
||||
w io.Writer
|
||||
level int
|
||||
wroteHeader bool
|
||||
compressor *flate.Writer
|
||||
digest uint32 // CRC-32, IEEE polynomial (section 8)
|
||||
size uint32 // Uncompressed size (section 2.3.1)
|
||||
closed bool
|
||||
buf [10]byte
|
||||
err error
|
||||
}
|
||||
|
||||
// NewWriter returns a new Writer.
|
||||
// Writes to the returned writer are compressed and written to w.
|
||||
//
|
||||
// It is the caller's responsibility to call Close on the WriteCloser when done.
|
||||
// Writes may be buffered and not flushed until Close.
|
||||
//
|
||||
// Callers that wish to set the fields in Writer.Header must do so before
|
||||
// the first call to Write, Flush, or Close.
|
||||
func NewWriter(w io.Writer) *Writer {
|
||||
z, _ := NewWriterLevel(w, DefaultCompression)
|
||||
return z
|
||||
}
|
||||
|
||||
// NewWriterLevel is like NewWriter but specifies the compression level instead
|
||||
// of assuming DefaultCompression.
|
||||
//
|
||||
// The compression level can be DefaultCompression, NoCompression, or any
|
||||
// integer value between BestSpeed and BestCompression inclusive. The error
|
||||
// returned will be nil if the level is valid.
|
||||
func NewWriterLevel(w io.Writer, level int) (*Writer, error) {
|
||||
if level < HuffmanOnly || level > BestCompression {
|
||||
return nil, fmt.Errorf("gzip: invalid compression level: %d", level)
|
||||
}
|
||||
z := new(Writer)
|
||||
z.init(w, level)
|
||||
return z, nil
|
||||
}
|
||||
|
||||
func (z *Writer) init(w io.Writer, level int) {
|
||||
compressor := z.compressor
|
||||
if compressor != nil {
|
||||
compressor.Reset(w)
|
||||
}
|
||||
*z = Writer{
|
||||
Header: Header{
|
||||
OS: 255, // unknown
|
||||
},
|
||||
w: w,
|
||||
level: level,
|
||||
compressor: compressor,
|
||||
}
|
||||
}
|
||||
|
||||
// Reset discards the Writer z's state and makes it equivalent to the
|
||||
// result of its original state from NewWriter or NewWriterLevel, but
|
||||
// writing to w instead. This permits reusing a Writer rather than
|
||||
// allocating a new one.
|
||||
func (z *Writer) Reset(w io.Writer) {
|
||||
z.init(w, z.level)
|
||||
}
|
||||
|
||||
// writeBytes writes a length-prefixed byte slice to z.w.
|
||||
func (z *Writer) writeBytes(b []byte) error {
|
||||
if len(b) > 0xffff {
|
||||
return errors.New("gzip.Write: Extra data is too large")
|
||||
}
|
||||
le.PutUint16(z.buf[:2], uint16(len(b)))
|
||||
_, err := z.w.Write(z.buf[:2])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
_, err = z.w.Write(b)
|
||||
return err
|
||||
}
|
||||
|
||||
// writeString writes a UTF-8 string s in GZIP's format to z.w.
|
||||
// GZIP (RFC 1952) specifies that strings are NUL-terminated ISO 8859-1 (Latin-1).
|
||||
func (z *Writer) writeString(s string) (err error) {
|
||||
// GZIP stores Latin-1 strings; error if non-Latin-1; convert if non-ASCII.
|
||||
needconv := false
|
||||
for _, v := range s {
|
||||
if v == 0 || v > 0xff {
|
||||
return errors.New("gzip.Write: non-Latin-1 header string")
|
||||
}
|
||||
if v > 0x7f {
|
||||
needconv = true
|
||||
}
|
||||
}
|
||||
if needconv {
|
||||
b := make([]byte, 0, len(s))
|
||||
for _, v := range s {
|
||||
b = append(b, byte(v))
|
||||
}
|
||||
_, err = z.w.Write(b)
|
||||
} else {
|
||||
_, err = io.WriteString(z.w, s)
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// GZIP strings are NUL-terminated.
|
||||
z.buf[0] = 0
|
||||
_, err = z.w.Write(z.buf[:1])
|
||||
return err
|
||||
}
|
||||
|
||||
// Write writes a compressed form of p to the underlying io.Writer. The
|
||||
// compressed bytes are not necessarily flushed until the Writer is closed.
|
||||
func (z *Writer) Write(p []byte) (int, error) {
|
||||
if z.err != nil {
|
||||
return 0, z.err
|
||||
}
|
||||
var n int
|
||||
// Write the GZIP header lazily.
|
||||
if !z.wroteHeader {
|
||||
z.wroteHeader = true
|
||||
z.buf[0] = gzipID1
|
||||
z.buf[1] = gzipID2
|
||||
z.buf[2] = gzipDeflate
|
||||
z.buf[3] = 0
|
||||
if z.Extra != nil {
|
||||
z.buf[3] |= 0x04
|
||||
}
|
||||
if z.Name != "" {
|
||||
z.buf[3] |= 0x08
|
||||
}
|
||||
if z.Comment != "" {
|
||||
z.buf[3] |= 0x10
|
||||
}
|
||||
le.PutUint32(z.buf[4:8], uint32(z.ModTime.Unix()))
|
||||
if z.level == BestCompression {
|
||||
z.buf[8] = 2
|
||||
} else if z.level == BestSpeed {
|
||||
z.buf[8] = 4
|
||||
} else {
|
||||
z.buf[8] = 0
|
||||
}
|
||||
z.buf[9] = z.OS
|
||||
n, z.err = z.w.Write(z.buf[:10])
|
||||
if z.err != nil {
|
||||
return n, z.err
|
||||
}
|
||||
if z.Extra != nil {
|
||||
z.err = z.writeBytes(z.Extra)
|
||||
if z.err != nil {
|
||||
return n, z.err
|
||||
}
|
||||
}
|
||||
if z.Name != "" {
|
||||
z.err = z.writeString(z.Name)
|
||||
if z.err != nil {
|
||||
return n, z.err
|
||||
}
|
||||
}
|
||||
if z.Comment != "" {
|
||||
z.err = z.writeString(z.Comment)
|
||||
if z.err != nil {
|
||||
return n, z.err
|
||||
}
|
||||
}
|
||||
if z.compressor == nil {
|
||||
z.compressor, _ = flate.NewWriter(z.w, z.level)
|
||||
}
|
||||
}
|
||||
z.size += uint32(len(p))
|
||||
z.digest = crc32.Update(z.digest, crc32.IEEETable, p)
|
||||
n, z.err = z.compressor.Write(p)
|
||||
return n, z.err
|
||||
}
|
||||
|
||||
// Flush flushes any pending compressed data to the underlying writer.
|
||||
//
|
||||
// It is useful mainly in compressed network protocols, to ensure that
|
||||
// a remote reader has enough data to reconstruct a packet. Flush does
|
||||
// not return until the data has been written. If the underlying
|
||||
// writer returns an error, Flush returns that error.
|
||||
//
|
||||
// In the terminology of the zlib library, Flush is equivalent to Z_SYNC_FLUSH.
|
||||
func (z *Writer) Flush() error {
|
||||
if z.err != nil {
|
||||
return z.err
|
||||
}
|
||||
if z.closed {
|
||||
return nil
|
||||
}
|
||||
if !z.wroteHeader {
|
||||
z.Write(nil)
|
||||
if z.err != nil {
|
||||
return z.err
|
||||
}
|
||||
}
|
||||
z.err = z.compressor.Flush()
|
||||
return z.err
|
||||
}
|
||||
|
||||
// Close closes the Writer, flushing any unwritten data to the underlying
|
||||
// io.Writer, but does not close the underlying io.Writer.
|
||||
func (z *Writer) Close() error {
|
||||
if z.err != nil {
|
||||
return z.err
|
||||
}
|
||||
if z.closed {
|
||||
return nil
|
||||
}
|
||||
z.closed = true
|
||||
if !z.wroteHeader {
|
||||
z.Write(nil)
|
||||
if z.err != nil {
|
||||
return z.err
|
||||
}
|
||||
}
|
||||
z.err = z.compressor.Close()
|
||||
if z.err != nil {
|
||||
return z.err
|
||||
}
|
||||
le.PutUint32(z.buf[:4], z.digest)
|
||||
le.PutUint32(z.buf[4:8], z.size)
|
||||
_, z.err = z.w.Write(z.buf[:8])
|
||||
return z.err
|
||||
}
|
183
vendor/github.com/klauspost/compress/zlib/reader.go
generated
vendored
Normal file
183
vendor/github.com/klauspost/compress/zlib/reader.go
generated
vendored
Normal file
|
@ -0,0 +1,183 @@
|
|||
// Copyright 2009 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
/*
|
||||
Package zlib implements reading and writing of zlib format compressed data,
|
||||
as specified in RFC 1950.
|
||||
|
||||
The implementation provides filters that uncompress during reading
|
||||
and compress during writing. For example, to write compressed data
|
||||
to a buffer:
|
||||
|
||||
var b bytes.Buffer
|
||||
w := zlib.NewWriter(&b)
|
||||
w.Write([]byte("hello, world\n"))
|
||||
w.Close()
|
||||
|
||||
and to read that data back:
|
||||
|
||||
r, err := zlib.NewReader(&b)
|
||||
io.Copy(os.Stdout, r)
|
||||
r.Close()
|
||||
*/
|
||||
package zlib
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"errors"
|
||||
"hash"
|
||||
"hash/adler32"
|
||||
"io"
|
||||
|
||||
"github.com/klauspost/compress/flate"
|
||||
)
|
||||
|
||||
const zlibDeflate = 8
|
||||
|
||||
var (
|
||||
// ErrChecksum is returned when reading ZLIB data that has an invalid checksum.
|
||||
ErrChecksum = errors.New("zlib: invalid checksum")
|
||||
// ErrDictionary is returned when reading ZLIB data that has an invalid dictionary.
|
||||
ErrDictionary = errors.New("zlib: invalid dictionary")
|
||||
// ErrHeader is returned when reading ZLIB data that has an invalid header.
|
||||
ErrHeader = errors.New("zlib: invalid header")
|
||||
)
|
||||
|
||||
type reader struct {
|
||||
r flate.Reader
|
||||
decompressor io.ReadCloser
|
||||
digest hash.Hash32
|
||||
err error
|
||||
scratch [4]byte
|
||||
}
|
||||
|
||||
// Resetter resets a ReadCloser returned by NewReader or NewReaderDict to
|
||||
// to switch to a new underlying Reader. This permits reusing a ReadCloser
|
||||
// instead of allocating a new one.
|
||||
type Resetter interface {
|
||||
// Reset discards any buffered data and resets the Resetter as if it was
|
||||
// newly initialized with the given reader.
|
||||
Reset(r io.Reader, dict []byte) error
|
||||
}
|
||||
|
||||
// NewReader creates a new ReadCloser.
|
||||
// Reads from the returned ReadCloser read and decompress data from r.
|
||||
// If r does not implement io.ByteReader, the decompressor may read more
|
||||
// data than necessary from r.
|
||||
// It is the caller's responsibility to call Close on the ReadCloser when done.
|
||||
//
|
||||
// The ReadCloser returned by NewReader also implements Resetter.
|
||||
func NewReader(r io.Reader) (io.ReadCloser, error) {
|
||||
return NewReaderDict(r, nil)
|
||||
}
|
||||
|
||||
// NewReaderDict is like NewReader but uses a preset dictionary.
|
||||
// NewReaderDict ignores the dictionary if the compressed data does not refer to it.
|
||||
// If the compressed data refers to a different dictionary, NewReaderDict returns ErrDictionary.
|
||||
//
|
||||
// The ReadCloser returned by NewReaderDict also implements Resetter.
|
||||
func NewReaderDict(r io.Reader, dict []byte) (io.ReadCloser, error) {
|
||||
z := new(reader)
|
||||
err := z.Reset(r, dict)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return z, nil
|
||||
}
|
||||
|
||||
func (z *reader) Read(p []byte) (int, error) {
|
||||
if z.err != nil {
|
||||
return 0, z.err
|
||||
}
|
||||
|
||||
var n int
|
||||
n, z.err = z.decompressor.Read(p)
|
||||
z.digest.Write(p[0:n])
|
||||
if z.err != io.EOF {
|
||||
// In the normal case we return here.
|
||||
return n, z.err
|
||||
}
|
||||
|
||||
// Finished file; check checksum.
|
||||
if _, err := io.ReadFull(z.r, z.scratch[0:4]); err != nil {
|
||||
if err == io.EOF {
|
||||
err = io.ErrUnexpectedEOF
|
||||
}
|
||||
z.err = err
|
||||
return n, z.err
|
||||
}
|
||||
// ZLIB (RFC 1950) is big-endian, unlike GZIP (RFC 1952).
|
||||
checksum := uint32(z.scratch[0])<<24 | uint32(z.scratch[1])<<16 | uint32(z.scratch[2])<<8 | uint32(z.scratch[3])
|
||||
if checksum != z.digest.Sum32() {
|
||||
z.err = ErrChecksum
|
||||
return n, z.err
|
||||
}
|
||||
return n, io.EOF
|
||||
}
|
||||
|
||||
// Calling Close does not close the wrapped io.Reader originally passed to NewReader.
|
||||
// In order for the ZLIB checksum to be verified, the reader must be
|
||||
// fully consumed until the io.EOF.
|
||||
func (z *reader) Close() error {
|
||||
if z.err != nil && z.err != io.EOF {
|
||||
return z.err
|
||||
}
|
||||
z.err = z.decompressor.Close()
|
||||
return z.err
|
||||
}
|
||||
|
||||
func (z *reader) Reset(r io.Reader, dict []byte) error {
|
||||
*z = reader{decompressor: z.decompressor, digest: z.digest}
|
||||
if fr, ok := r.(flate.Reader); ok {
|
||||
z.r = fr
|
||||
} else {
|
||||
z.r = bufio.NewReader(r)
|
||||
}
|
||||
|
||||
// Read the header (RFC 1950 section 2.2.).
|
||||
_, z.err = io.ReadFull(z.r, z.scratch[0:2])
|
||||
if z.err != nil {
|
||||
if z.err == io.EOF {
|
||||
z.err = io.ErrUnexpectedEOF
|
||||
}
|
||||
return z.err
|
||||
}
|
||||
h := uint(z.scratch[0])<<8 | uint(z.scratch[1])
|
||||
if (z.scratch[0]&0x0f != zlibDeflate) || (h%31 != 0) {
|
||||
z.err = ErrHeader
|
||||
return z.err
|
||||
}
|
||||
haveDict := z.scratch[1]&0x20 != 0
|
||||
if haveDict {
|
||||
_, z.err = io.ReadFull(z.r, z.scratch[0:4])
|
||||
if z.err != nil {
|
||||
if z.err == io.EOF {
|
||||
z.err = io.ErrUnexpectedEOF
|
||||
}
|
||||
return z.err
|
||||
}
|
||||
checksum := uint32(z.scratch[0])<<24 | uint32(z.scratch[1])<<16 | uint32(z.scratch[2])<<8 | uint32(z.scratch[3])
|
||||
if checksum != adler32.Checksum(dict) {
|
||||
z.err = ErrDictionary
|
||||
return z.err
|
||||
}
|
||||
}
|
||||
|
||||
if z.decompressor == nil {
|
||||
if haveDict {
|
||||
z.decompressor = flate.NewReaderDict(z.r, dict)
|
||||
} else {
|
||||
z.decompressor = flate.NewReader(z.r)
|
||||
}
|
||||
} else {
|
||||
z.decompressor.(flate.Resetter).Reset(z.r, dict)
|
||||
}
|
||||
|
||||
if z.digest != nil {
|
||||
z.digest.Reset()
|
||||
} else {
|
||||
z.digest = adler32.New()
|
||||
}
|
||||
return nil
|
||||
}
|
201
vendor/github.com/klauspost/compress/zlib/writer.go
generated
vendored
Normal file
201
vendor/github.com/klauspost/compress/zlib/writer.go
generated
vendored
Normal file
|
@ -0,0 +1,201 @@
|
|||
// Copyright 2009 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package zlib
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"hash"
|
||||
"hash/adler32"
|
||||
"io"
|
||||
|
||||
"github.com/klauspost/compress/flate"
|
||||
)
|
||||
|
||||
// These constants are copied from the flate package, so that code that imports
|
||||
// "compress/zlib" does not also have to import "compress/flate".
|
||||
const (
|
||||
NoCompression = flate.NoCompression
|
||||
BestSpeed = flate.BestSpeed
|
||||
BestCompression = flate.BestCompression
|
||||
DefaultCompression = flate.DefaultCompression
|
||||
ConstantCompression = flate.ConstantCompression
|
||||
HuffmanOnly = flate.HuffmanOnly
|
||||
)
|
||||
|
||||
// A Writer takes data written to it and writes the compressed
|
||||
// form of that data to an underlying writer (see NewWriter).
|
||||
type Writer struct {
|
||||
w io.Writer
|
||||
level int
|
||||
dict []byte
|
||||
compressor *flate.Writer
|
||||
digest hash.Hash32
|
||||
err error
|
||||
scratch [4]byte
|
||||
wroteHeader bool
|
||||
}
|
||||
|
||||
// NewWriter creates a new Writer.
|
||||
// Writes to the returned Writer are compressed and written to w.
|
||||
//
|
||||
// It is the caller's responsibility to call Close on the WriteCloser when done.
|
||||
// Writes may be buffered and not flushed until Close.
|
||||
func NewWriter(w io.Writer) *Writer {
|
||||
z, _ := NewWriterLevelDict(w, DefaultCompression, nil)
|
||||
return z
|
||||
}
|
||||
|
||||
// NewWriterLevel is like NewWriter but specifies the compression level instead
|
||||
// of assuming DefaultCompression.
|
||||
//
|
||||
// The compression level can be DefaultCompression, NoCompression, HuffmanOnly
|
||||
// or any integer value between BestSpeed and BestCompression inclusive.
|
||||
// The error returned will be nil if the level is valid.
|
||||
func NewWriterLevel(w io.Writer, level int) (*Writer, error) {
|
||||
return NewWriterLevelDict(w, level, nil)
|
||||
}
|
||||
|
||||
// NewWriterLevelDict is like NewWriterLevel but specifies a dictionary to
|
||||
// compress with.
|
||||
//
|
||||
// The dictionary may be nil. If not, its contents should not be modified until
|
||||
// the Writer is closed.
|
||||
func NewWriterLevelDict(w io.Writer, level int, dict []byte) (*Writer, error) {
|
||||
if level < HuffmanOnly || level > BestCompression {
|
||||
return nil, fmt.Errorf("zlib: invalid compression level: %d", level)
|
||||
}
|
||||
return &Writer{
|
||||
w: w,
|
||||
level: level,
|
||||
dict: dict,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Reset clears the state of the Writer z such that it is equivalent to its
|
||||
// initial state from NewWriterLevel or NewWriterLevelDict, but instead writing
|
||||
// to w.
|
||||
func (z *Writer) Reset(w io.Writer) {
|
||||
z.w = w
|
||||
// z.level and z.dict left unchanged.
|
||||
if z.compressor != nil {
|
||||
z.compressor.Reset(w)
|
||||
}
|
||||
if z.digest != nil {
|
||||
z.digest.Reset()
|
||||
}
|
||||
z.err = nil
|
||||
z.scratch = [4]byte{}
|
||||
z.wroteHeader = false
|
||||
}
|
||||
|
||||
// writeHeader writes the ZLIB header.
|
||||
func (z *Writer) writeHeader() (err error) {
|
||||
z.wroteHeader = true
|
||||
// ZLIB has a two-byte header (as documented in RFC 1950).
|
||||
// The first four bits is the CINFO (compression info), which is 7 for the default deflate window size.
|
||||
// The next four bits is the CM (compression method), which is 8 for deflate.
|
||||
z.scratch[0] = 0x78
|
||||
// The next two bits is the FLEVEL (compression level). The four values are:
|
||||
// 0=fastest, 1=fast, 2=default, 3=best.
|
||||
// The next bit, FDICT, is set if a dictionary is given.
|
||||
// The final five FCHECK bits form a mod-31 checksum.
|
||||
switch z.level {
|
||||
case -2, 0, 1:
|
||||
z.scratch[1] = 0 << 6
|
||||
case 2, 3, 4, 5:
|
||||
z.scratch[1] = 1 << 6
|
||||
case 6, -1:
|
||||
z.scratch[1] = 2 << 6
|
||||
case 7, 8, 9:
|
||||
z.scratch[1] = 3 << 6
|
||||
default:
|
||||
panic("unreachable")
|
||||
}
|
||||
if z.dict != nil {
|
||||
z.scratch[1] |= 1 << 5
|
||||
}
|
||||
z.scratch[1] += uint8(31 - (uint16(z.scratch[0])<<8+uint16(z.scratch[1]))%31)
|
||||
if _, err = z.w.Write(z.scratch[0:2]); err != nil {
|
||||
return err
|
||||
}
|
||||
if z.dict != nil {
|
||||
// The next four bytes are the Adler-32 checksum of the dictionary.
|
||||
checksum := adler32.Checksum(z.dict)
|
||||
z.scratch[0] = uint8(checksum >> 24)
|
||||
z.scratch[1] = uint8(checksum >> 16)
|
||||
z.scratch[2] = uint8(checksum >> 8)
|
||||
z.scratch[3] = uint8(checksum >> 0)
|
||||
if _, err = z.w.Write(z.scratch[0:4]); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
if z.compressor == nil {
|
||||
// Initialize deflater unless the Writer is being reused
|
||||
// after a Reset call.
|
||||
z.compressor, err = flate.NewWriterDict(z.w, z.level, z.dict)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
z.digest = adler32.New()
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Write writes a compressed form of p to the underlying io.Writer. The
|
||||
// compressed bytes are not necessarily flushed until the Writer is closed or
|
||||
// explicitly flushed.
|
||||
func (z *Writer) Write(p []byte) (n int, err error) {
|
||||
if !z.wroteHeader {
|
||||
z.err = z.writeHeader()
|
||||
}
|
||||
if z.err != nil {
|
||||
return 0, z.err
|
||||
}
|
||||
if len(p) == 0 {
|
||||
return 0, nil
|
||||
}
|
||||
n, err = z.compressor.Write(p)
|
||||
if err != nil {
|
||||
z.err = err
|
||||
return
|
||||
}
|
||||
z.digest.Write(p)
|
||||
return
|
||||
}
|
||||
|
||||
// Flush flushes the Writer to its underlying io.Writer.
|
||||
func (z *Writer) Flush() error {
|
||||
if !z.wroteHeader {
|
||||
z.err = z.writeHeader()
|
||||
}
|
||||
if z.err != nil {
|
||||
return z.err
|
||||
}
|
||||
z.err = z.compressor.Flush()
|
||||
return z.err
|
||||
}
|
||||
|
||||
// Close closes the Writer, flushing any unwritten data to the underlying
|
||||
// io.Writer, but does not close the underlying io.Writer.
|
||||
func (z *Writer) Close() error {
|
||||
if !z.wroteHeader {
|
||||
z.err = z.writeHeader()
|
||||
}
|
||||
if z.err != nil {
|
||||
return z.err
|
||||
}
|
||||
z.err = z.compressor.Close()
|
||||
if z.err != nil {
|
||||
return z.err
|
||||
}
|
||||
checksum := z.digest.Sum32()
|
||||
// ZLIB (RFC 1950) is big-endian, unlike GZIP (RFC 1952).
|
||||
z.scratch[0] = uint8(checksum >> 24)
|
||||
z.scratch[1] = uint8(checksum >> 16)
|
||||
z.scratch[2] = uint8(checksum >> 8)
|
||||
z.scratch[3] = uint8(checksum >> 0)
|
||||
_, z.err = z.w.Write(z.scratch[0:4])
|
||||
return z.err
|
||||
}
|
0
vendor/moul.io/http2curl/.gitignore → vendor/github.com/klauspost/cpuid/.gitignore
generated
vendored
0
vendor/moul.io/http2curl/.gitignore → vendor/github.com/klauspost/cpuid/.gitignore
generated
vendored
23
vendor/github.com/klauspost/cpuid/.travis.yml
generated
vendored
Normal file
23
vendor/github.com/klauspost/cpuid/.travis.yml
generated
vendored
Normal file
|
@ -0,0 +1,23 @@
|
|||
language: go
|
||||
|
||||
sudo: false
|
||||
|
||||
os:
|
||||
- linux
|
||||
- osx
|
||||
go:
|
||||
- 1.8.x
|
||||
- 1.9.x
|
||||
- 1.10.x
|
||||
- master
|
||||
|
||||
script:
|
||||
- go vet ./...
|
||||
- go test -v ./...
|
||||
- go test -race ./...
|
||||
- diff <(gofmt -d .) <("")
|
||||
|
||||
matrix:
|
||||
allow_failures:
|
||||
- go: 'master'
|
||||
fast_finish: true
|
35
vendor/github.com/klauspost/cpuid/CONTRIBUTING.txt
generated
vendored
Normal file
35
vendor/github.com/klauspost/cpuid/CONTRIBUTING.txt
generated
vendored
Normal file
|
@ -0,0 +1,35 @@
|
|||
Developer Certificate of Origin
|
||||
Version 1.1
|
||||
|
||||
Copyright (C) 2015- Klaus Post & Contributors.
|
||||
Email: klauspost@gmail.com
|
||||
|
||||
Everyone is permitted to copy and distribute verbatim copies of this
|
||||
license document, but changing it is not allowed.
|
||||
|
||||
|
||||
Developer's Certificate of Origin 1.1
|
||||
|
||||
By making a contribution to this project, I certify that:
|
||||
|
||||
(a) The contribution was created in whole or in part by me and I
|
||||
have the right to submit it under the open source license
|
||||
indicated in the file; or
|
||||
|
||||
(b) The contribution is based upon previous work that, to the best
|
||||
of my knowledge, is covered under an appropriate open source
|
||||
license and I have the right under that license to submit that
|
||||
work with modifications, whether created in whole or in part
|
||||
by me, under the same open source license (unless I am
|
||||
permitted to submit under a different license), as indicated
|
||||
in the file; or
|
||||
|
||||
(c) The contribution was provided directly to me by some other
|
||||
person who certified (a), (b) or (c) and I have not modified
|
||||
it.
|
||||
|
||||
(d) I understand and agree that this project and the contribution
|
||||
are public and that a record of the contribution (including all
|
||||
personal information I submit with it, including my sign-off) is
|
||||
maintained indefinitely and may be redistributed consistent with
|
||||
this project or the open source license(s) involved.
|
22
vendor/github.com/klauspost/cpuid/LICENSE
generated
vendored
Normal file
22
vendor/github.com/klauspost/cpuid/LICENSE
generated
vendored
Normal file
|
@ -0,0 +1,22 @@
|
|||
The MIT License (MIT)
|
||||
|
||||
Copyright (c) 2015 Klaus Post
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
SOFTWARE.
|
||||
|
145
vendor/github.com/klauspost/cpuid/README.md
generated
vendored
Normal file
145
vendor/github.com/klauspost/cpuid/README.md
generated
vendored
Normal file
|
@ -0,0 +1,145 @@
|
|||
# cpuid
|
||||
Package cpuid provides information about the CPU running the current program.
|
||||
|
||||
CPU features are detected on startup, and kept for fast access through the life of the application.
|
||||
Currently x86 / x64 (AMD64) is supported, and no external C (cgo) code is used, which should make the library very easy to use.
|
||||
|
||||
You can access the CPU information by accessing the shared CPU variable of the cpuid library.
|
||||
|
||||
Package home: https://github.com/klauspost/cpuid
|
||||
|
||||
[![GoDoc][1]][2] [![Build Status][3]][4]
|
||||
|
||||
[1]: https://godoc.org/github.com/klauspost/cpuid?status.svg
|
||||
[2]: https://godoc.org/github.com/klauspost/cpuid
|
||||
[3]: https://travis-ci.org/klauspost/cpuid.svg
|
||||
[4]: https://travis-ci.org/klauspost/cpuid
|
||||
|
||||
# features
|
||||
## CPU Instructions
|
||||
* **CMOV** (i686 CMOV)
|
||||
* **NX** (NX (No-Execute) bit)
|
||||
* **AMD3DNOW** (AMD 3DNOW)
|
||||
* **AMD3DNOWEXT** (AMD 3DNowExt)
|
||||
* **MMX** (standard MMX)
|
||||
* **MMXEXT** (SSE integer functions or AMD MMX ext)
|
||||
* **SSE** (SSE functions)
|
||||
* **SSE2** (P4 SSE functions)
|
||||
* **SSE3** (Prescott SSE3 functions)
|
||||
* **SSSE3** (Conroe SSSE3 functions)
|
||||
* **SSE4** (Penryn SSE4.1 functions)
|
||||
* **SSE4A** (AMD Barcelona microarchitecture SSE4a instructions)
|
||||
* **SSE42** (Nehalem SSE4.2 functions)
|
||||
* **AVX** (AVX functions)
|
||||
* **AVX2** (AVX2 functions)
|
||||
* **FMA3** (Intel FMA 3)
|
||||
* **FMA4** (Bulldozer FMA4 functions)
|
||||
* **XOP** (Bulldozer XOP functions)
|
||||
* **F16C** (Half-precision floating-point conversion)
|
||||
* **BMI1** (Bit Manipulation Instruction Set 1)
|
||||
* **BMI2** (Bit Manipulation Instruction Set 2)
|
||||
* **TBM** (AMD Trailing Bit Manipulation)
|
||||
* **LZCNT** (LZCNT instruction)
|
||||
* **POPCNT** (POPCNT instruction)
|
||||
* **AESNI** (Advanced Encryption Standard New Instructions)
|
||||
* **CLMUL** (Carry-less Multiplication)
|
||||
* **HTT** (Hyperthreading (enabled))
|
||||
* **HLE** (Hardware Lock Elision)
|
||||
* **RTM** (Restricted Transactional Memory)
|
||||
* **RDRAND** (RDRAND instruction is available)
|
||||
* **RDSEED** (RDSEED instruction is available)
|
||||
* **ADX** (Intel ADX (Multi-Precision Add-Carry Instruction Extensions))
|
||||
* **SHA** (Intel SHA Extensions)
|
||||
* **AVX512F** (AVX-512 Foundation)
|
||||
* **AVX512DQ** (AVX-512 Doubleword and Quadword Instructions)
|
||||
* **AVX512IFMA** (AVX-512 Integer Fused Multiply-Add Instructions)
|
||||
* **AVX512PF** (AVX-512 Prefetch Instructions)
|
||||
* **AVX512ER** (AVX-512 Exponential and Reciprocal Instructions)
|
||||
* **AVX512CD** (AVX-512 Conflict Detection Instructions)
|
||||
* **AVX512BW** (AVX-512 Byte and Word Instructions)
|
||||
* **AVX512VL** (AVX-512 Vector Length Extensions)
|
||||
* **AVX512VBMI** (AVX-512 Vector Bit Manipulation Instructions)
|
||||
* **MPX** (Intel MPX (Memory Protection Extensions))
|
||||
* **ERMS** (Enhanced REP MOVSB/STOSB)
|
||||
* **RDTSCP** (RDTSCP Instruction)
|
||||
* **CX16** (CMPXCHG16B Instruction)
|
||||
* **SGX** (Software Guard Extensions, with activation details)
|
||||
|
||||
## Performance
|
||||
* **RDTSCP()** Returns current cycle count. Can be used for benchmarking.
|
||||
* **SSE2SLOW** (SSE2 is supported, but usually not faster)
|
||||
* **SSE3SLOW** (SSE3 is supported, but usually not faster)
|
||||
* **ATOM** (Atom processor, some SSSE3 instructions are slower)
|
||||
* **Cache line** (Probable size of a cache line).
|
||||
* **L1, L2, L3 Cache size** on newer Intel/AMD CPUs.
|
||||
|
||||
## Cpu Vendor/VM
|
||||
* **Intel**
|
||||
* **AMD**
|
||||
* **VIA**
|
||||
* **Transmeta**
|
||||
* **NSC**
|
||||
* **KVM** (Kernel-based Virtual Machine)
|
||||
* **MSVM** (Microsoft Hyper-V or Windows Virtual PC)
|
||||
* **VMware**
|
||||
* **XenHVM**
|
||||
|
||||
# installing
|
||||
|
||||
```go get github.com/klauspost/cpuid```
|
||||
|
||||
# example
|
||||
|
||||
```Go
|
||||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/klauspost/cpuid"
|
||||
)
|
||||
|
||||
func main() {
|
||||
// Print basic CPU information:
|
||||
fmt.Println("Name:", cpuid.CPU.BrandName)
|
||||
fmt.Println("PhysicalCores:", cpuid.CPU.PhysicalCores)
|
||||
fmt.Println("ThreadsPerCore:", cpuid.CPU.ThreadsPerCore)
|
||||
fmt.Println("LogicalCores:", cpuid.CPU.LogicalCores)
|
||||
fmt.Println("Family", cpuid.CPU.Family, "Model:", cpuid.CPU.Model)
|
||||
fmt.Println("Features:", cpuid.CPU.Features)
|
||||
fmt.Println("Cacheline bytes:", cpuid.CPU.CacheLine)
|
||||
fmt.Println("L1 Data Cache:", cpuid.CPU.Cache.L1D, "bytes")
|
||||
fmt.Println("L1 Instruction Cache:", cpuid.CPU.Cache.L1D, "bytes")
|
||||
fmt.Println("L2 Cache:", cpuid.CPU.Cache.L2, "bytes")
|
||||
fmt.Println("L3 Cache:", cpuid.CPU.Cache.L3, "bytes")
|
||||
|
||||
// Test if we have a specific feature:
|
||||
if cpuid.CPU.SSE() {
|
||||
fmt.Println("We have Streaming SIMD Extensions")
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
Sample output:
|
||||
```
|
||||
>go run main.go
|
||||
Name: Intel(R) Core(TM) i5-2540M CPU @ 2.60GHz
|
||||
PhysicalCores: 2
|
||||
ThreadsPerCore: 2
|
||||
LogicalCores: 4
|
||||
Family 6 Model: 42
|
||||
Features: CMOV,MMX,MMXEXT,SSE,SSE2,SSE3,SSSE3,SSE4.1,SSE4.2,AVX,AESNI,CLMUL
|
||||
Cacheline bytes: 64
|
||||
We have Streaming SIMD Extensions
|
||||
```
|
||||
|
||||
# private package
|
||||
|
||||
In the "private" folder you can find an autogenerated version of the library you can include in your own packages.
|
||||
|
||||
For this purpose all exports are removed, and functions and constants are lowercased.
|
||||
|
||||
This is not a recommended way of using the library, but provided for convenience, if it is difficult for you to use external packages.
|
||||
|
||||
# license
|
||||
|
||||
This code is published under an MIT license. See LICENSE file for more information.
|
1040
vendor/github.com/klauspost/cpuid/cpuid.go
generated
vendored
Normal file
1040
vendor/github.com/klauspost/cpuid/cpuid.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load diff
42
vendor/github.com/klauspost/cpuid/cpuid_386.s
generated
vendored
Normal file
42
vendor/github.com/klauspost/cpuid/cpuid_386.s
generated
vendored
Normal file
|
@ -0,0 +1,42 @@
|
|||
// Copyright (c) 2015 Klaus Post, released under MIT License. See LICENSE file.
|
||||
|
||||
// +build 386,!gccgo
|
||||
|
||||
// func asmCpuid(op uint32) (eax, ebx, ecx, edx uint32)
|
||||
TEXT ·asmCpuid(SB), 7, $0
|
||||
XORL CX, CX
|
||||
MOVL op+0(FP), AX
|
||||
CPUID
|
||||
MOVL AX, eax+4(FP)
|
||||
MOVL BX, ebx+8(FP)
|
||||
MOVL CX, ecx+12(FP)
|
||||
MOVL DX, edx+16(FP)
|
||||
RET
|
||||
|
||||
// func asmCpuidex(op, op2 uint32) (eax, ebx, ecx, edx uint32)
|
||||
TEXT ·asmCpuidex(SB), 7, $0
|
||||
MOVL op+0(FP), AX
|
||||
MOVL op2+4(FP), CX
|
||||
CPUID
|
||||
MOVL AX, eax+8(FP)
|
||||
MOVL BX, ebx+12(FP)
|
||||
MOVL CX, ecx+16(FP)
|
||||
MOVL DX, edx+20(FP)
|
||||
RET
|
||||
|
||||
// func xgetbv(index uint32) (eax, edx uint32)
|
||||
TEXT ·asmXgetbv(SB), 7, $0
|
||||
MOVL index+0(FP), CX
|
||||
BYTE $0x0f; BYTE $0x01; BYTE $0xd0 // XGETBV
|
||||
MOVL AX, eax+4(FP)
|
||||
MOVL DX, edx+8(FP)
|
||||
RET
|
||||
|
||||
// func asmRdtscpAsm() (eax, ebx, ecx, edx uint32)
|
||||
TEXT ·asmRdtscpAsm(SB), 7, $0
|
||||
BYTE $0x0F; BYTE $0x01; BYTE $0xF9 // RDTSCP
|
||||
MOVL AX, eax+0(FP)
|
||||
MOVL BX, ebx+4(FP)
|
||||
MOVL CX, ecx+8(FP)
|
||||
MOVL DX, edx+12(FP)
|
||||
RET
|
42
vendor/github.com/klauspost/cpuid/cpuid_amd64.s
generated
vendored
Normal file
42
vendor/github.com/klauspost/cpuid/cpuid_amd64.s
generated
vendored
Normal file
|
@ -0,0 +1,42 @@
|
|||
// Copyright (c) 2015 Klaus Post, released under MIT License. See LICENSE file.
|
||||
|
||||
//+build amd64,!gccgo
|
||||
|
||||
// func asmCpuid(op uint32) (eax, ebx, ecx, edx uint32)
|
||||
TEXT ·asmCpuid(SB), 7, $0
|
||||
XORQ CX, CX
|
||||
MOVL op+0(FP), AX
|
||||
CPUID
|
||||
MOVL AX, eax+8(FP)
|
||||
MOVL BX, ebx+12(FP)
|
||||
MOVL CX, ecx+16(FP)
|
||||
MOVL DX, edx+20(FP)
|
||||
RET
|
||||
|
||||
// func asmCpuidex(op, op2 uint32) (eax, ebx, ecx, edx uint32)
|
||||
TEXT ·asmCpuidex(SB), 7, $0
|
||||
MOVL op+0(FP), AX
|
||||
MOVL op2+4(FP), CX
|
||||
CPUID
|
||||
MOVL AX, eax+8(FP)
|
||||
MOVL BX, ebx+12(FP)
|
||||
MOVL CX, ecx+16(FP)
|
||||
MOVL DX, edx+20(FP)
|
||||
RET
|
||||
|
||||
// func asmXgetbv(index uint32) (eax, edx uint32)
|
||||
TEXT ·asmXgetbv(SB), 7, $0
|
||||
MOVL index+0(FP), CX
|
||||
BYTE $0x0f; BYTE $0x01; BYTE $0xd0 // XGETBV
|
||||
MOVL AX, eax+8(FP)
|
||||
MOVL DX, edx+12(FP)
|
||||
RET
|
||||
|
||||
// func asmRdtscpAsm() (eax, ebx, ecx, edx uint32)
|
||||
TEXT ·asmRdtscpAsm(SB), 7, $0
|
||||
BYTE $0x0F; BYTE $0x01; BYTE $0xF9 // RDTSCP
|
||||
MOVL AX, eax+0(FP)
|
||||
MOVL BX, ebx+4(FP)
|
||||
MOVL CX, ecx+8(FP)
|
||||
MOVL DX, edx+12(FP)
|
||||
RET
|
17
vendor/github.com/klauspost/cpuid/detect_intel.go
generated
vendored
Normal file
17
vendor/github.com/klauspost/cpuid/detect_intel.go
generated
vendored
Normal file
|
@ -0,0 +1,17 @@
|
|||
// Copyright (c) 2015 Klaus Post, released under MIT License. See LICENSE file.
|
||||
|
||||
// +build 386,!gccgo amd64,!gccgo
|
||||
|
||||
package cpuid
|
||||
|
||||
func asmCpuid(op uint32) (eax, ebx, ecx, edx uint32)
|
||||
func asmCpuidex(op, op2 uint32) (eax, ebx, ecx, edx uint32)
|
||||
func asmXgetbv(index uint32) (eax, edx uint32)
|
||||
func asmRdtscpAsm() (eax, ebx, ecx, edx uint32)
|
||||
|
||||
func initCPU() {
|
||||
cpuid = asmCpuid
|
||||
cpuidex = asmCpuidex
|
||||
xgetbv = asmXgetbv
|
||||
rdtscpAsm = asmRdtscpAsm
|
||||
}
|
23
vendor/github.com/klauspost/cpuid/detect_ref.go
generated
vendored
Normal file
23
vendor/github.com/klauspost/cpuid/detect_ref.go
generated
vendored
Normal file
|
@ -0,0 +1,23 @@
|
|||
// Copyright (c) 2015 Klaus Post, released under MIT License. See LICENSE file.
|
||||
|
||||
// +build !amd64,!386 gccgo
|
||||
|
||||
package cpuid
|
||||
|
||||
func initCPU() {
|
||||
cpuid = func(op uint32) (eax, ebx, ecx, edx uint32) {
|
||||
return 0, 0, 0, 0
|
||||
}
|
||||
|
||||
cpuidex = func(op, op2 uint32) (eax, ebx, ecx, edx uint32) {
|
||||
return 0, 0, 0, 0
|
||||
}
|
||||
|
||||
xgetbv = func(index uint32) (eax, edx uint32) {
|
||||
return 0, 0
|
||||
}
|
||||
|
||||
rdtscpAsm = func() (eax, ebx, ecx, edx uint32) {
|
||||
return 0, 0, 0, 0
|
||||
}
|
||||
}
|
4
vendor/github.com/klauspost/cpuid/generate.go
generated
vendored
Normal file
4
vendor/github.com/klauspost/cpuid/generate.go
generated
vendored
Normal file
|
@ -0,0 +1,4 @@
|
|||
package cpuid
|
||||
|
||||
//go:generate go run private-gen.go
|
||||
//go:generate gofmt -w ./private
|
24
vendor/github.com/moul/http2curl/.gitignore
generated
vendored
Normal file
24
vendor/github.com/moul/http2curl/.gitignore
generated
vendored
Normal file
|
@ -0,0 +1,24 @@
|
|||
# Compiled Object files, Static and Dynamic libs (Shared Objects)
|
||||
*.o
|
||||
*.a
|
||||
*.so
|
||||
|
||||
# Folders
|
||||
_obj
|
||||
_test
|
||||
|
||||
# Architecture specific extensions/prefixes
|
||||
*.[568vq]
|
||||
[568vq].out
|
||||
|
||||
*.cgo1.go
|
||||
*.cgo2.c
|
||||
_cgo_defun.c
|
||||
_cgo_gotypes.go
|
||||
_cgo_export.*
|
||||
|
||||
_testmain.go
|
||||
|
||||
*.exe
|
||||
*.test
|
||||
*.prof
|
0
vendor/moul.io/http2curl/.travis.yml → vendor/github.com/moul/http2curl/.travis.yml
generated
vendored
0
vendor/moul.io/http2curl/.travis.yml → vendor/github.com/moul/http2curl/.travis.yml
generated
vendored
0
vendor/moul.io/http2curl/LICENSE → vendor/github.com/moul/http2curl/LICENSE
generated
vendored
0
vendor/moul.io/http2curl/LICENSE → vendor/github.com/moul/http2curl/LICENSE
generated
vendored
0
vendor/moul.io/http2curl/Makefile → vendor/github.com/moul/http2curl/Makefile
generated
vendored
0
vendor/moul.io/http2curl/Makefile → vendor/github.com/moul/http2curl/Makefile
generated
vendored
0
vendor/moul.io/http2curl/README.md → vendor/github.com/moul/http2curl/README.md
generated
vendored
0
vendor/moul.io/http2curl/README.md → vendor/github.com/moul/http2curl/README.md
generated
vendored
18
vendor/github.com/onsi/ginkgo/.travis.yml
generated
vendored
18
vendor/github.com/onsi/ginkgo/.travis.yml
generated
vendored
|
@ -4,14 +4,22 @@ go:
|
|||
- 1.13.x
|
||||
- tip
|
||||
|
||||
cache:
|
||||
directories:
|
||||
- $GOPATH/pkg/mod
|
||||
|
||||
# allow internal package imports, necessary for forked repositories
|
||||
go_import_path: github.com/onsi/ginkgo
|
||||
|
||||
install:
|
||||
- go get -v -t ./...
|
||||
- go get golang.org/x/tools/cmd/cover
|
||||
- go get github.com/onsi/gomega
|
||||
- go install github.com/onsi/ginkgo/ginkgo
|
||||
- GO111MODULE="off" go get -v -t ./...
|
||||
- GO111MODULE="off" go get golang.org/x/tools/cmd/cover
|
||||
- GO111MODULE="off" go get github.com/onsi/gomega
|
||||
- GO111MODULE="off" go install github.com/onsi/ginkgo/ginkgo
|
||||
- export PATH=$PATH:$HOME/gopath/bin
|
||||
|
||||
script: $HOME/gopath/bin/ginkgo -r --randomizeAllSpecs --randomizeSuites --race --trace && go vet
|
||||
script:
|
||||
- GO111MODULE="on" go mod tidy
|
||||
- diff -u <(echo -n) <(git diff go.mod)
|
||||
- diff -u <(echo -n) <(git diff go.sum)
|
||||
- $HOME/gopath/bin/ginkgo -r --randomizeAllSpecs --randomizeSuites --race --trace && go vet
|
||||
|
|
5
vendor/github.com/onsi/ginkgo/CHANGELOG.md
generated
vendored
5
vendor/github.com/onsi/ginkgo/CHANGELOG.md
generated
vendored
|
@ -1,3 +1,8 @@
|
|||
## 1.12.0
|
||||
|
||||
### Features
|
||||
- Add module definition (#630) [78916ab]
|
||||
|
||||
## 1.11.0
|
||||
|
||||
### Features
|
||||
|
|
2
vendor/github.com/onsi/ginkgo/config/config.go
generated
vendored
2
vendor/github.com/onsi/ginkgo/config/config.go
generated
vendored
|
@ -20,7 +20,7 @@ import (
|
|||
"fmt"
|
||||
)
|
||||
|
||||
const VERSION = "1.11.0"
|
||||
const VERSION = "1.12.0"
|
||||
|
||||
type GinkgoConfigType struct {
|
||||
RandomSeed int64
|
||||
|
|
9
vendor/github.com/onsi/ginkgo/go.mod
generated
vendored
Normal file
9
vendor/github.com/onsi/ginkgo/go.mod
generated
vendored
Normal file
|
@ -0,0 +1,9 @@
|
|||
module github.com/onsi/ginkgo
|
||||
|
||||
require (
|
||||
github.com/hpcloud/tail v1.0.0
|
||||
github.com/onsi/gomega v1.7.1
|
||||
golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e
|
||||
)
|
||||
|
||||
go 1.12
|
26
vendor/github.com/onsi/ginkgo/go.sum
generated
vendored
Normal file
26
vendor/github.com/onsi/ginkgo/go.sum
generated
vendored
Normal file
|
@ -0,0 +1,26 @@
|
|||
github.com/fsnotify/fsnotify v1.4.7 h1:IXs+QLmnXW2CcXuY+8Mzv/fWEsPGWxqefPtCP5CnV9I=
|
||||
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
|
||||
github.com/golang/protobuf v1.2.0 h1:P3YflyNX/ehuJFLhxviNdFxQPkGK5cDcApsge1SqnvM=
|
||||
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||
github.com/hpcloud/tail v1.0.0 h1:nfCOvKYfkgYP8hkirhJocXT2+zOD8yUNjXaWfTlyFKI=
|
||||
github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
|
||||
github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
|
||||
github.com/onsi/gomega v1.7.1 h1:K0jcRCwNQM3vFGh1ppMtDh/+7ApJrjldlX8fA0jDTLQ=
|
||||
github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY=
|
||||
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd h1:nTDtHvHSdCn1m6ITfMRqtOd/9+7a3s8RBNOZ3eYZzJA=
|
||||
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f h1:wMNYb4v58l5UBM7MYRLPG6ZhfOqbKu7X5eyFl8ZhKvA=
|
||||
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e h1:N7DeIrjYszNmSW409R3frPPwglRwMkXSBzwVbkOjLLA=
|
||||
golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg=
|
||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM=
|
||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/fsnotify.v1 v1.4.7 h1:xOHLXZwVvI9hhs+cLKq5+I5onOuwQLhQwiu63xxlHs4=
|
||||
gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys=
|
||||
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ=
|
||||
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw=
|
||||
gopkg.in/yaml.v2 v2.2.4 h1:/eiJrUcujPVeJ3xlSWaiNi3uSVmDGBK1pDHUHAnao1I=
|
||||
gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
3
vendor/github.com/onsi/gomega/.travis.yml
generated
vendored
3
vendor/github.com/onsi/gomega/.travis.yml
generated
vendored
|
@ -1,8 +1,9 @@
|
|||
language: go
|
||||
|
||||
go:
|
||||
- 1.10.x
|
||||
- 1.11.x
|
||||
- 1.12.x
|
||||
- 1.13.x
|
||||
- gotip
|
||||
|
||||
env:
|
||||
|
|
11
vendor/github.com/onsi/gomega/CHANGELOG.md
generated
vendored
11
vendor/github.com/onsi/gomega/CHANGELOG.md
generated
vendored
|
@ -1,14 +1,3 @@
|
|||
## 1.8.1
|
||||
|
||||
### Fixes
|
||||
- Fix unexpected MatchError() behaviour (#375) [8ae7b2f]
|
||||
|
||||
## 1.8.0
|
||||
|
||||
### Features
|
||||
- Allow optional description to be lazily evaluated function (#364) [bf64010]
|
||||
- Support wrapped errors (#359) [0a981cb]
|
||||
|
||||
## 1.7.1
|
||||
|
||||
### Fixes
|
||||
|
|
1
vendor/github.com/onsi/gomega/go.mod
generated
vendored
1
vendor/github.com/onsi/gomega/go.mod
generated
vendored
|
@ -9,7 +9,6 @@ require (
|
|||
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f // indirect
|
||||
golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e // indirect
|
||||
golang.org/x/text v0.3.0 // indirect
|
||||
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7
|
||||
gopkg.in/fsnotify.v1 v1.4.7 // indirect
|
||||
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 // indirect
|
||||
gopkg.in/yaml.v2 v2.2.4
|
||||
|
|
2
vendor/github.com/onsi/gomega/go.sum
generated
vendored
2
vendor/github.com/onsi/gomega/go.sum
generated
vendored
|
@ -14,8 +14,6 @@ golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e h1:o3PsSEY8E4eXWkXrIP9YJALUk
|
|||
golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg=
|
||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7 h1:9zdDQZ7Thm29KFXgAX/+yaf3eVbP7djjWp/dXAppNCc=
|
||||
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM=
|
||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/fsnotify.v1 v1.4.7 h1:xOHLXZwVvI9hhs+cLKq5+I5onOuwQLhQwiu63xxlHs4=
|
||||
|
|
19
vendor/github.com/onsi/gomega/gomega_dsl.go
generated
vendored
19
vendor/github.com/onsi/gomega/gomega_dsl.go
generated
vendored
|
@ -24,7 +24,7 @@ import (
|
|||
"github.com/onsi/gomega/types"
|
||||
)
|
||||
|
||||
const GOMEGA_VERSION = "1.8.1"
|
||||
const GOMEGA_VERSION = "1.7.1"
|
||||
|
||||
const nilFailHandlerPanic = `You are trying to make an assertion, but Gomega's fail handler is nil.
|
||||
If you're using Ginkgo then you probably forgot to put your assertion in an It().
|
||||
|
@ -293,18 +293,16 @@ func SetDefaultConsistentlyPollingInterval(t time.Duration) {
|
|||
// AsyncAssertion is returned by Eventually and Consistently and polls the actual value passed into Eventually against
|
||||
// the matcher passed to the Should and ShouldNot methods.
|
||||
//
|
||||
// Both Should and ShouldNot take a variadic optionalDescription argument.
|
||||
// This argument allows you to make your failure messages more descriptive.
|
||||
// If a single argument of type `func() string` is passed, this function will be lazily evaluated if a failure occurs
|
||||
// and the returned string is used to annotate the failure message.
|
||||
// Otherwise, this argument is passed on to fmt.Sprintf() and then used to annotate the failure message.
|
||||
// Both Should and ShouldNot take a variadic optionalDescription argument. This is passed on to
|
||||
// fmt.Sprintf() and is used to annotate failure messages. This allows you to make your failure messages more
|
||||
// descriptive.
|
||||
//
|
||||
// Both Should and ShouldNot return a boolean that is true if the assertion passed and false if it failed.
|
||||
//
|
||||
// Example:
|
||||
//
|
||||
// Eventually(myChannel).Should(Receive(), "Something should have come down the pipe.")
|
||||
// Consistently(myChannel).ShouldNot(Receive(), func() string { return "Nothing should have come down the pipe." })
|
||||
// Consistently(myChannel).ShouldNot(Receive(), "Nothing should have come down the pipe.")
|
||||
type AsyncAssertion interface {
|
||||
Should(matcher types.GomegaMatcher, optionalDescription ...interface{}) bool
|
||||
ShouldNot(matcher types.GomegaMatcher, optionalDescription ...interface{}) bool
|
||||
|
@ -319,11 +317,8 @@ type GomegaAsyncAssertion = AsyncAssertion
|
|||
// Typically Should/ShouldNot are used with Ω and To/ToNot/NotTo are used with Expect
|
||||
// though this is not enforced.
|
||||
//
|
||||
// All methods take a variadic optionalDescription argument.
|
||||
// This argument allows you to make your failure messages more descriptive.
|
||||
// If a single argument of type `func() string` is passed, this function will be lazily evaluated if a failure occurs
|
||||
// and the returned string is used to annotate the failure message.
|
||||
// Otherwise, this argument is passed on to fmt.Sprintf() and then used to annotate the failure message.
|
||||
// All methods take a variadic optionalDescription argument. This is passed on to fmt.Sprintf()
|
||||
// and is used to annotate failure messages.
|
||||
//
|
||||
// All methods return a bool that is true if the assertion passed and false if it failed.
|
||||
//
|
||||
|
|
10
vendor/github.com/onsi/gomega/internal/assertion/assertion.go
generated
vendored
10
vendor/github.com/onsi/gomega/internal/assertion/assertion.go
generated
vendored
|
@ -52,19 +52,16 @@ func (assertion *Assertion) buildDescription(optionalDescription ...interface{})
|
|||
switch len(optionalDescription) {
|
||||
case 0:
|
||||
return ""
|
||||
case 1:
|
||||
if describe, ok := optionalDescription[0].(func() string); ok {
|
||||
return describe() + "\n"
|
||||
}
|
||||
default:
|
||||
return fmt.Sprintf(optionalDescription[0].(string), optionalDescription[1:]...) + "\n"
|
||||
}
|
||||
return fmt.Sprintf(optionalDescription[0].(string), optionalDescription[1:]...) + "\n"
|
||||
}
|
||||
|
||||
func (assertion *Assertion) match(matcher types.GomegaMatcher, desiredMatch bool, optionalDescription ...interface{}) bool {
|
||||
matches, err := matcher.Match(assertion.actualInput)
|
||||
description := assertion.buildDescription(optionalDescription...)
|
||||
assertion.failWrapper.TWithHelper.Helper()
|
||||
if err != nil {
|
||||
description := assertion.buildDescription(optionalDescription...)
|
||||
assertion.failWrapper.Fail(description+err.Error(), 2+assertion.offset)
|
||||
return false
|
||||
}
|
||||
|
@ -75,7 +72,6 @@ func (assertion *Assertion) match(matcher types.GomegaMatcher, desiredMatch bool
|
|||
} else {
|
||||
message = matcher.NegatedFailureMessage(assertion.actualInput)
|
||||
}
|
||||
description := assertion.buildDescription(optionalDescription...)
|
||||
assertion.failWrapper.Fail(description+message, 2+assertion.offset)
|
||||
return false
|
||||
}
|
||||
|
|
10
vendor/github.com/onsi/gomega/internal/asyncassertion/async_assertion.go
generated
vendored
10
vendor/github.com/onsi/gomega/internal/asyncassertion/async_assertion.go
generated
vendored
|
@ -60,12 +60,9 @@ func (assertion *AsyncAssertion) buildDescription(optionalDescription ...interfa
|
|||
switch len(optionalDescription) {
|
||||
case 0:
|
||||
return ""
|
||||
case 1:
|
||||
if describe, ok := optionalDescription[0].(func() string); ok {
|
||||
return describe() + "\n"
|
||||
}
|
||||
default:
|
||||
return fmt.Sprintf(optionalDescription[0].(string), optionalDescription[1:]...) + "\n"
|
||||
}
|
||||
return fmt.Sprintf(optionalDescription[0].(string), optionalDescription[1:]...) + "\n"
|
||||
}
|
||||
|
||||
func (assertion *AsyncAssertion) actualInputIsAFunction() bool {
|
||||
|
@ -106,6 +103,8 @@ func (assertion *AsyncAssertion) match(matcher types.GomegaMatcher, desiredMatch
|
|||
timer := time.Now()
|
||||
timeout := time.After(assertion.timeoutInterval)
|
||||
|
||||
description := assertion.buildDescription(optionalDescription...)
|
||||
|
||||
var matches bool
|
||||
var err error
|
||||
mayChange := true
|
||||
|
@ -130,7 +129,6 @@ func (assertion *AsyncAssertion) match(matcher types.GomegaMatcher, desiredMatch
|
|||
}
|
||||
}
|
||||
assertion.failWrapper.TWithHelper.Helper()
|
||||
description := assertion.buildDescription(optionalDescription...)
|
||||
assertion.failWrapper.Fail(fmt.Sprintf("%s after %.3fs.\n%s%s%s", preamble, time.Since(timer).Seconds(), description, message, errMsg), 3+assertion.offset)
|
||||
}
|
||||
|
||||
|
|
18
vendor/github.com/onsi/gomega/matchers/match_error_matcher.go
generated
vendored
18
vendor/github.com/onsi/gomega/matchers/match_error_matcher.go
generated
vendored
|
@ -5,7 +5,6 @@ import (
|
|||
"reflect"
|
||||
|
||||
"github.com/onsi/gomega/format"
|
||||
"golang.org/x/xerrors"
|
||||
)
|
||||
|
||||
type MatchErrorMatcher struct {
|
||||
|
@ -22,28 +21,25 @@ func (matcher *MatchErrorMatcher) Match(actual interface{}) (success bool, err e
|
|||
}
|
||||
|
||||
actualErr := actual.(error)
|
||||
expected := matcher.Expected
|
||||
|
||||
if isError(expected) {
|
||||
return reflect.DeepEqual(actualErr, expected) || xerrors.Is(actualErr, expected.(error)), nil
|
||||
if isError(matcher.Expected) {
|
||||
return reflect.DeepEqual(actualErr, matcher.Expected), nil
|
||||
}
|
||||
|
||||
if isString(expected) {
|
||||
return actualErr.Error() == expected, nil
|
||||
if isString(matcher.Expected) {
|
||||
return actualErr.Error() == matcher.Expected, nil
|
||||
}
|
||||
|
||||
var subMatcher omegaMatcher
|
||||
var hasSubMatcher bool
|
||||
if expected != nil {
|
||||
subMatcher, hasSubMatcher = (expected).(omegaMatcher)
|
||||
if matcher.Expected != nil {
|
||||
subMatcher, hasSubMatcher = (matcher.Expected).(omegaMatcher)
|
||||
if hasSubMatcher {
|
||||
return subMatcher.Match(actualErr.Error())
|
||||
}
|
||||
}
|
||||
|
||||
return false, fmt.Errorf(
|
||||
"MatchError must be passed an error, a string, or a Matcher that can match on strings. Got:\n%s",
|
||||
format.Object(expected, 1))
|
||||
return false, fmt.Errorf("MatchError must be passed an error, string, or Matcher that can match on strings. Got:\n%s", format.Object(matcher.Expected, 1))
|
||||
}
|
||||
|
||||
func (matcher *MatchErrorMatcher) FailureMessage(actual interface{}) (message string) {
|
||||
|
|
15
vendor/github.com/parnurzeal/gorequest/.travis.yml
generated
vendored
15
vendor/github.com/parnurzeal/gorequest/.travis.yml
generated
vendored
|
@ -1,15 +0,0 @@
|
|||
language: go
|
||||
go:
|
||||
- 1.7
|
||||
- 1.6
|
||||
- 1.5
|
||||
- 1.4
|
||||
- 1.3
|
||||
- 1.2
|
||||
install:
|
||||
- go get -t -v ./...
|
||||
notifications:
|
||||
email:
|
||||
recipients: parnurzeal@gmail.com
|
||||
on_success: change
|
||||
on_failure: always
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Reference in a new issue