Update dependencies to K8s 1.8
This commit is contained in:
parent
ba6c89672d
commit
6a59f4c9a2
1114 changed files with 160955 additions and 262845 deletions
1041
Godeps/Godeps.json
generated
1041
Godeps/Godeps.json
generated
File diff suppressed because it is too large
Load diff
54
vendor/github.com/docker/distribution/AUTHORS
generated
vendored
54
vendor/github.com/docker/distribution/AUTHORS
generated
vendored
|
@ -1,32 +1,47 @@
|
|||
a-palchikov <deemok@gmail.com>
|
||||
Aaron Lehmann <aaron.lehmann@docker.com>
|
||||
Aaron Schlesinger <aschlesinger@deis.com>
|
||||
Aaron Vinson <avinson.public@gmail.com>
|
||||
Adam Duke <adam.v.duke@gmail.com>
|
||||
Adam Enger <adamenger@gmail.com>
|
||||
Adrian Mouat <adrian.mouat@gmail.com>
|
||||
Ahmet Alp Balkan <ahmetalpbalkan@gmail.com>
|
||||
Alex Chan <alex.chan@metaswitch.com>
|
||||
Alex Elman <aelman@indeed.com>
|
||||
Alexey Gladkov <gladkov.alexey@gmail.com>
|
||||
allencloud <allen.sun@daocloud.io>
|
||||
amitshukla <ashukla73@hotmail.com>
|
||||
Amy Lindburg <amy.lindburg@docker.com>
|
||||
Andrew Hsu <andrewhsu@acm.org>
|
||||
Andrew Meredith <andymeredith@gmail.com>
|
||||
Andrew T Nguyen <andrew.nguyen@docker.com>
|
||||
Andrey Kostov <kostov.andrey@gmail.com>
|
||||
Andy Goldstein <agoldste@redhat.com>
|
||||
Anis Elleuch <vadmeste@gmail.com>
|
||||
Anton Tiurin <noxiouz@yandex.ru>
|
||||
Antonio Mercado <amercado@thinknode.com>
|
||||
Antonio Murdaca <runcom@redhat.com>
|
||||
Anusha Ragunathan <anusha@docker.com>
|
||||
Arien Holthuizen <aholthuizen@schubergphilis.com>
|
||||
Arnaud Porterie <arnaud.porterie@docker.com>
|
||||
Arthur Baars <arthur@semmle.com>
|
||||
Asuka Suzuki <hello@tanksuzuki.com>
|
||||
Avi Miller <avi.miller@oracle.com>
|
||||
Ayose Cazorla <ayosec@gmail.com>
|
||||
BadZen <dave.trombley@gmail.com>
|
||||
Ben Bodenmiller <bbodenmiller@hotmail.com>
|
||||
Ben Firshman <ben@firshman.co.uk>
|
||||
bin liu <liubin0329@gmail.com>
|
||||
Brian Bland <brian.bland@docker.com>
|
||||
burnettk <burnettk@gmail.com>
|
||||
Carson A <ca@carsonoid.net>
|
||||
Cezar Sa Espinola <cezarsa@gmail.com>
|
||||
Charles Smith <charles.smith@docker.com>
|
||||
Chris Dillon <squarism@gmail.com>
|
||||
cuiwei13 <cuiwei13@pku.edu.cn>
|
||||
cyli <cyli@twistedmatrix.com>
|
||||
Daisuke Fujita <dtanshi45@gmail.com>
|
||||
Daniel Huhn <daniel@danielhuhn.de>
|
||||
Darren Shepherd <darren@rancher.com>
|
||||
Dave Trombley <dave.trombley@gmail.com>
|
||||
Dave Tucker <dt@docker.com>
|
||||
|
@ -40,12 +55,17 @@ Diogo Mónica <diogo.monica@gmail.com>
|
|||
DJ Enriquez <dj.enriquez@infospace.com>
|
||||
Donald Huang <don.hcd@gmail.com>
|
||||
Doug Davis <dug@us.ibm.com>
|
||||
Edgar Lee <edgar.lee@docker.com>
|
||||
Eric Yang <windfarer@gmail.com>
|
||||
Fabio Berchtold <jamesclonk@jamesclonk.ch>
|
||||
Fabio Huser <fabio@fh1.ch>
|
||||
farmerworking <farmerworking@gmail.com>
|
||||
Felix Yan <felixonmars@archlinux.org>
|
||||
Florentin Raud <florentin.raud@gmail.com>
|
||||
Frank Chen <frankchn@gmail.com>
|
||||
Frederick F. Kautz IV <fkautz@alumni.cmu.edu>
|
||||
gabriell nascimento <gabriell@bluesoft.com.br>
|
||||
Gleb Schukin <gschukin@ptsecurity.com>
|
||||
harche <p.harshal@gmail.com>
|
||||
Henri Gomez <henri.gomez@gmail.com>
|
||||
Hu Keping <hukeping@huawei.com>
|
||||
|
@ -54,16 +74,26 @@ HuKeping <hukeping@huawei.com>
|
|||
Ian Babrou <ibobrik@gmail.com>
|
||||
igayoso <igayoso@gmail.com>
|
||||
Jack Griffin <jackpg14@gmail.com>
|
||||
James Findley <jfindley@fastmail.com>
|
||||
Jason Freidman <jason.freidman@gmail.com>
|
||||
Jason Heiss <jheiss@aput.net>
|
||||
Jeff Nickoloff <jeff@allingeek.com>
|
||||
Jess Frazelle <acidburn@google.com>
|
||||
Jessie Frazelle <jessie@docker.com>
|
||||
jhaohai <jhaohai@foxmail.com>
|
||||
Jianqing Wang <tsing@jianqing.org>
|
||||
Jihoon Chung <jihoon@gmail.com>
|
||||
Joao Fernandes <joao.fernandes@docker.com>
|
||||
John Mulhausen <john@docker.com>
|
||||
John Starks <jostarks@microsoft.com>
|
||||
Jon Johnson <jonjohnson@google.com>
|
||||
Jon Poler <jonathan.poler@apcera.com>
|
||||
Jonathan Boulle <jonathanboulle@gmail.com>
|
||||
Jordan Liggitt <jliggitt@redhat.com>
|
||||
Josh Chorlton <josh.chorlton@docker.com>
|
||||
Josh Hawn <josh.hawn@docker.com>
|
||||
Julien Fernandez <julien.fernandez@gmail.com>
|
||||
Ke Xu <leonhartx.k@gmail.com>
|
||||
Keerthan Mala <kmala@engineyard.com>
|
||||
Kelsey Hightower <kelsey.hightower@gmail.com>
|
||||
Kenneth Lim <kennethlimcp@gmail.com>
|
||||
|
@ -71,38 +101,56 @@ Kenny Leung <kleung@google.com>
|
|||
Li Yi <denverdino@gmail.com>
|
||||
Liu Hua <sdu.liu@huawei.com>
|
||||
liuchang0812 <liuchang0812@gmail.com>
|
||||
Lloyd Ramey <lnr0626@gmail.com>
|
||||
Louis Kottmann <louis.kottmann@gmail.com>
|
||||
Luke Carpenter <x@rubynerd.net>
|
||||
Marcus Martins <marcus@docker.com>
|
||||
Mary Anthony <mary@docker.com>
|
||||
Matt Bentley <mbentley@mbentley.net>
|
||||
Matt Duch <matt@learnmetrics.com>
|
||||
Matt Moore <mattmoor@google.com>
|
||||
Matt Robenolt <matt@ydekproductions.com>
|
||||
Matthew Green <greenmr@live.co.uk>
|
||||
Michael Prokop <mika@grml.org>
|
||||
Michal Minar <miminar@redhat.com>
|
||||
Michal Minář <miminar@redhat.com>
|
||||
Mike Brown <brownwm@us.ibm.com>
|
||||
Miquel Sabaté <msabate@suse.com>
|
||||
Misty Stanley-Jones <misty@apache.org>
|
||||
Misty Stanley-Jones <misty@docker.com>
|
||||
Morgan Bauer <mbauer@us.ibm.com>
|
||||
moxiegirl <mary@docker.com>
|
||||
Nathan Sullivan <nathan@nightsys.net>
|
||||
nevermosby <robolwq@qq.com>
|
||||
Nghia Tran <tcnghia@gmail.com>
|
||||
Nikita Tarasov <nikita@mygento.ru>
|
||||
Noah Treuhaft <noah.treuhaft@docker.com>
|
||||
Nuutti Kotivuori <nuutti.kotivuori@poplatek.fi>
|
||||
Oilbeater <liumengxinfly@gmail.com>
|
||||
Olivier Gambier <olivier@docker.com>
|
||||
Olivier Jacques <olivier.jacques@hp.com>
|
||||
Omer Cohen <git@omer.io>
|
||||
Patrick Devine <patrick.devine@docker.com>
|
||||
Phil Estes <estesp@linux.vnet.ibm.com>
|
||||
Philip Misiowiec <philip@atlashealth.com>
|
||||
Pierre-Yves Ritschard <pyr@spootnik.org>
|
||||
Qiao Anran <qiaoanran@gmail.com>
|
||||
Randy Barlow <randy@electronsweatshop.com>
|
||||
Richard Scothern <richard.scothern@docker.com>
|
||||
Rodolfo Carvalho <rhcarvalho@gmail.com>
|
||||
Rusty Conover <rusty@luckydinosaur.com>
|
||||
Sean Boran <Boran@users.noreply.github.com>
|
||||
Sebastiaan van Stijn <github@gone.nl>
|
||||
Sebastien Coavoux <s.coavoux@free.fr>
|
||||
Serge Dubrouski <sergeyfd@gmail.com>
|
||||
Sharif Nassar <sharif@mrwacky.com>
|
||||
Shawn Falkner-Horine <dreadpirateshawn@gmail.com>
|
||||
Shreyas Karnik <karnik.shreyas@gmail.com>
|
||||
Simon Thulbourn <simon+github@thulbourn.com>
|
||||
spacexnice <yaoyao.xyy@alibaba-inc.com>
|
||||
Spencer Rinehart <anubis@overthemonkey.com>
|
||||
Stan Hu <stanhu@gmail.com>
|
||||
Stefan Majewsky <stefan.majewsky@sap.com>
|
||||
Stefan Weil <sw@weilnetz.de>
|
||||
Stephen J Day <stephen.day@docker.com>
|
||||
Sungho Moon <sungho.moon@navercorp.com>
|
||||
|
@ -114,8 +162,11 @@ Thomas Sjögren <konstruktoid@users.noreply.github.com>
|
|||
Tianon Gravi <admwiggin@gmail.com>
|
||||
Tibor Vass <teabee89@gmail.com>
|
||||
Tonis Tiigi <tonistiigi@gmail.com>
|
||||
Tony Holdstock-Brown <tony@docker.com>
|
||||
Trevor Pounds <trevor.pounds@gmail.com>
|
||||
Troels Thomsen <troels@thomsen.io>
|
||||
Victor Vieux <vieux@docker.com>
|
||||
Victoria Bialas <victoria.bialas@docker.com>
|
||||
Vincent Batts <vbatts@redhat.com>
|
||||
Vincent Demeester <vincent@sbr.pm>
|
||||
Vincent Giersch <vincent.giersch@ovh.net>
|
||||
|
@ -124,5 +175,8 @@ weiyuan.yl <weiyuan.yl@alibaba-inc.com>
|
|||
xg.song <xg.song@venusource.com>
|
||||
xiekeyang <xiekeyang@huawei.com>
|
||||
Yann ROBERT <yann.robert@anantaplex.fr>
|
||||
yaoyao.xyy <yaoyao.xyy@alibaba-inc.com>
|
||||
yuexiao-wang <wang.yuexiao@zte.com.cn>
|
||||
yuzou <zouyu7@huawei.com>
|
||||
zhouhaibing089 <zhouhaibing089@gmail.com>
|
||||
姜继忠 <jizhong.jiangjz@alibaba-inc.com>
|
||||
|
|
|
@ -1,10 +1,12 @@
|
|||
package digest
|
||||
package digestset
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"sort"
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
digest "github.com/opencontainers/go-digest"
|
||||
)
|
||||
|
||||
var (
|
||||
|
@ -44,7 +46,7 @@ func NewSet() *Set {
|
|||
// values or short values. This function does not test equality,
|
||||
// rather whether the second value could match against the first
|
||||
// value.
|
||||
func checkShortMatch(alg Algorithm, hex, shortAlg, shortHex string) bool {
|
||||
func checkShortMatch(alg digest.Algorithm, hex, shortAlg, shortHex string) bool {
|
||||
if len(hex) == len(shortHex) {
|
||||
if hex != shortHex {
|
||||
return false
|
||||
|
@ -64,7 +66,7 @@ func checkShortMatch(alg Algorithm, hex, shortAlg, shortHex string) bool {
|
|||
// If no digests could be found ErrDigestNotFound will be returned
|
||||
// with an empty digest value. If multiple matches are found
|
||||
// ErrDigestAmbiguous will be returned with an empty digest value.
|
||||
func (dst *Set) Lookup(d string) (Digest, error) {
|
||||
func (dst *Set) Lookup(d string) (digest.Digest, error) {
|
||||
dst.mutex.RLock()
|
||||
defer dst.mutex.RUnlock()
|
||||
if len(dst.entries) == 0 {
|
||||
|
@ -72,11 +74,11 @@ func (dst *Set) Lookup(d string) (Digest, error) {
|
|||
}
|
||||
var (
|
||||
searchFunc func(int) bool
|
||||
alg Algorithm
|
||||
alg digest.Algorithm
|
||||
hex string
|
||||
)
|
||||
dgst, err := ParseDigest(d)
|
||||
if err == ErrDigestInvalidFormat {
|
||||
dgst, err := digest.Parse(d)
|
||||
if err == digest.ErrDigestInvalidFormat {
|
||||
hex = d
|
||||
searchFunc = func(i int) bool {
|
||||
return dst.entries[i].val >= d
|
||||
|
@ -108,7 +110,7 @@ func (dst *Set) Lookup(d string) (Digest, error) {
|
|||
// Add adds the given digest to the set. An error will be returned
|
||||
// if the given digest is invalid. If the digest already exists in the
|
||||
// set, this operation will be a no-op.
|
||||
func (dst *Set) Add(d Digest) error {
|
||||
func (dst *Set) Add(d digest.Digest) error {
|
||||
if err := d.Validate(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -139,7 +141,7 @@ func (dst *Set) Add(d Digest) error {
|
|||
// Remove removes the given digest from the set. An err will be
|
||||
// returned if the given digest is invalid. If the digest does
|
||||
// not exist in the set, this operation will be a no-op.
|
||||
func (dst *Set) Remove(d Digest) error {
|
||||
func (dst *Set) Remove(d digest.Digest) error {
|
||||
if err := d.Validate(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -167,10 +169,10 @@ func (dst *Set) Remove(d Digest) error {
|
|||
}
|
||||
|
||||
// All returns all the digests in the set
|
||||
func (dst *Set) All() []Digest {
|
||||
func (dst *Set) All() []digest.Digest {
|
||||
dst.mutex.RLock()
|
||||
defer dst.mutex.RUnlock()
|
||||
retValues := make([]Digest, len(dst.entries))
|
||||
retValues := make([]digest.Digest, len(dst.entries))
|
||||
for i := range dst.entries {
|
||||
retValues[i] = dst.entries[i].digest
|
||||
}
|
||||
|
@ -183,10 +185,10 @@ func (dst *Set) All() []Digest {
|
|||
// entire value of digest if uniqueness cannot be achieved without the
|
||||
// full value. This function will attempt to make short codes as short
|
||||
// as possible to be unique.
|
||||
func ShortCodeTable(dst *Set, length int) map[Digest]string {
|
||||
func ShortCodeTable(dst *Set, length int) map[digest.Digest]string {
|
||||
dst.mutex.RLock()
|
||||
defer dst.mutex.RUnlock()
|
||||
m := make(map[Digest]string, len(dst.entries))
|
||||
m := make(map[digest.Digest]string, len(dst.entries))
|
||||
l := length
|
||||
resetIdx := 0
|
||||
for i := 0; i < len(dst.entries); i++ {
|
||||
|
@ -222,9 +224,9 @@ func ShortCodeTable(dst *Set, length int) map[Digest]string {
|
|||
}
|
||||
|
||||
type digestEntry struct {
|
||||
alg Algorithm
|
||||
alg digest.Algorithm
|
||||
val string
|
||||
digest Digest
|
||||
digest digest.Digest
|
||||
}
|
||||
|
||||
type digestEntries []*digestEntry
|
42
vendor/github.com/docker/distribution/reference/helpers.go
generated
vendored
Normal file
42
vendor/github.com/docker/distribution/reference/helpers.go
generated
vendored
Normal file
|
@ -0,0 +1,42 @@
|
|||
package reference
|
||||
|
||||
import "path"
|
||||
|
||||
// IsNameOnly returns true if reference only contains a repo name.
|
||||
func IsNameOnly(ref Named) bool {
|
||||
if _, ok := ref.(NamedTagged); ok {
|
||||
return false
|
||||
}
|
||||
if _, ok := ref.(Canonical); ok {
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// FamiliarName returns the familiar name string
|
||||
// for the given named, familiarizing if needed.
|
||||
func FamiliarName(ref Named) string {
|
||||
if nn, ok := ref.(normalizedNamed); ok {
|
||||
return nn.Familiar().Name()
|
||||
}
|
||||
return ref.Name()
|
||||
}
|
||||
|
||||
// FamiliarString returns the familiar string representation
|
||||
// for the given reference, familiarizing if needed.
|
||||
func FamiliarString(ref Reference) string {
|
||||
if nn, ok := ref.(normalizedNamed); ok {
|
||||
return nn.Familiar().String()
|
||||
}
|
||||
return ref.String()
|
||||
}
|
||||
|
||||
// FamiliarMatch reports whether ref matches the specified pattern.
|
||||
// See https://godoc.org/path#Match for supported patterns.
|
||||
func FamiliarMatch(pattern string, ref Reference) (bool, error) {
|
||||
matched, err := path.Match(pattern, FamiliarString(ref))
|
||||
if namedRef, isNamed := ref.(Named); isNamed && !matched {
|
||||
matched, _ = path.Match(pattern, FamiliarName(namedRef))
|
||||
}
|
||||
return matched, err
|
||||
}
|
170
vendor/github.com/docker/distribution/reference/normalize.go
generated
vendored
Normal file
170
vendor/github.com/docker/distribution/reference/normalize.go
generated
vendored
Normal file
|
@ -0,0 +1,170 @@
|
|||
package reference
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/docker/distribution/digestset"
|
||||
"github.com/opencontainers/go-digest"
|
||||
)
|
||||
|
||||
var (
|
||||
legacyDefaultDomain = "index.docker.io"
|
||||
defaultDomain = "docker.io"
|
||||
officialRepoName = "library"
|
||||
defaultTag = "latest"
|
||||
)
|
||||
|
||||
// normalizedNamed represents a name which has been
|
||||
// normalized and has a familiar form. A familiar name
|
||||
// is what is used in Docker UI. An example normalized
|
||||
// name is "docker.io/library/ubuntu" and corresponding
|
||||
// familiar name of "ubuntu".
|
||||
type normalizedNamed interface {
|
||||
Named
|
||||
Familiar() Named
|
||||
}
|
||||
|
||||
// ParseNormalizedNamed parses a string into a named reference
|
||||
// transforming a familiar name from Docker UI to a fully
|
||||
// qualified reference. If the value may be an identifier
|
||||
// use ParseAnyReference.
|
||||
func ParseNormalizedNamed(s string) (Named, error) {
|
||||
if ok := anchoredIdentifierRegexp.MatchString(s); ok {
|
||||
return nil, fmt.Errorf("invalid repository name (%s), cannot specify 64-byte hexadecimal strings", s)
|
||||
}
|
||||
domain, remainder := splitDockerDomain(s)
|
||||
var remoteName string
|
||||
if tagSep := strings.IndexRune(remainder, ':'); tagSep > -1 {
|
||||
remoteName = remainder[:tagSep]
|
||||
} else {
|
||||
remoteName = remainder
|
||||
}
|
||||
if strings.ToLower(remoteName) != remoteName {
|
||||
return nil, errors.New("invalid reference format: repository name must be lowercase")
|
||||
}
|
||||
|
||||
ref, err := Parse(domain + "/" + remainder)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
named, isNamed := ref.(Named)
|
||||
if !isNamed {
|
||||
return nil, fmt.Errorf("reference %s has no name", ref.String())
|
||||
}
|
||||
return named, nil
|
||||
}
|
||||
|
||||
// splitDockerDomain splits a repository name to domain and remotename string.
|
||||
// If no valid domain is found, the default domain is used. Repository name
|
||||
// needs to be already validated before.
|
||||
func splitDockerDomain(name string) (domain, remainder string) {
|
||||
i := strings.IndexRune(name, '/')
|
||||
if i == -1 || (!strings.ContainsAny(name[:i], ".:") && name[:i] != "localhost") {
|
||||
domain, remainder = defaultDomain, name
|
||||
} else {
|
||||
domain, remainder = name[:i], name[i+1:]
|
||||
}
|
||||
if domain == legacyDefaultDomain {
|
||||
domain = defaultDomain
|
||||
}
|
||||
if domain == defaultDomain && !strings.ContainsRune(remainder, '/') {
|
||||
remainder = officialRepoName + "/" + remainder
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// familiarizeName returns a shortened version of the name familiar
|
||||
// to to the Docker UI. Familiar names have the default domain
|
||||
// "docker.io" and "library/" repository prefix removed.
|
||||
// For example, "docker.io/library/redis" will have the familiar
|
||||
// name "redis" and "docker.io/dmcgowan/myapp" will be "dmcgowan/myapp".
|
||||
// Returns a familiarized named only reference.
|
||||
func familiarizeName(named namedRepository) repository {
|
||||
repo := repository{
|
||||
domain: named.Domain(),
|
||||
path: named.Path(),
|
||||
}
|
||||
|
||||
if repo.domain == defaultDomain {
|
||||
repo.domain = ""
|
||||
// Handle official repositories which have the pattern "library/<official repo name>"
|
||||
if split := strings.Split(repo.path, "/"); len(split) == 2 && split[0] == officialRepoName {
|
||||
repo.path = split[1]
|
||||
}
|
||||
}
|
||||
return repo
|
||||
}
|
||||
|
||||
func (r reference) Familiar() Named {
|
||||
return reference{
|
||||
namedRepository: familiarizeName(r.namedRepository),
|
||||
tag: r.tag,
|
||||
digest: r.digest,
|
||||
}
|
||||
}
|
||||
|
||||
func (r repository) Familiar() Named {
|
||||
return familiarizeName(r)
|
||||
}
|
||||
|
||||
func (t taggedReference) Familiar() Named {
|
||||
return taggedReference{
|
||||
namedRepository: familiarizeName(t.namedRepository),
|
||||
tag: t.tag,
|
||||
}
|
||||
}
|
||||
|
||||
func (c canonicalReference) Familiar() Named {
|
||||
return canonicalReference{
|
||||
namedRepository: familiarizeName(c.namedRepository),
|
||||
digest: c.digest,
|
||||
}
|
||||
}
|
||||
|
||||
// TagNameOnly adds the default tag "latest" to a reference if it only has
|
||||
// a repo name.
|
||||
func TagNameOnly(ref Named) Named {
|
||||
if IsNameOnly(ref) {
|
||||
namedTagged, err := WithTag(ref, defaultTag)
|
||||
if err != nil {
|
||||
// Default tag must be valid, to create a NamedTagged
|
||||
// type with non-validated input the WithTag function
|
||||
// should be used instead
|
||||
panic(err)
|
||||
}
|
||||
return namedTagged
|
||||
}
|
||||
return ref
|
||||
}
|
||||
|
||||
// ParseAnyReference parses a reference string as a possible identifier,
|
||||
// full digest, or familiar name.
|
||||
func ParseAnyReference(ref string) (Reference, error) {
|
||||
if ok := anchoredIdentifierRegexp.MatchString(ref); ok {
|
||||
return digestReference("sha256:" + ref), nil
|
||||
}
|
||||
if dgst, err := digest.Parse(ref); err == nil {
|
||||
return digestReference(dgst), nil
|
||||
}
|
||||
|
||||
return ParseNormalizedNamed(ref)
|
||||
}
|
||||
|
||||
// ParseAnyReferenceWithSet parses a reference string as a possible short
|
||||
// identifier to be matched in a digest set, a full digest, or familiar name.
|
||||
func ParseAnyReferenceWithSet(ref string, ds *digestset.Set) (Reference, error) {
|
||||
if ok := anchoredShortIdentifierRegexp.MatchString(ref); ok {
|
||||
dgst, err := ds.Lookup(ref)
|
||||
if err == nil {
|
||||
return digestReference(dgst), nil
|
||||
}
|
||||
} else {
|
||||
if dgst, err := digest.Parse(ref); err == nil {
|
||||
return digestReference(dgst), nil
|
||||
}
|
||||
}
|
||||
|
||||
return ParseNormalizedNamed(ref)
|
||||
}
|
213
vendor/github.com/docker/distribution/reference/reference.go
generated
vendored
213
vendor/github.com/docker/distribution/reference/reference.go
generated
vendored
|
@ -4,28 +4,32 @@
|
|||
// Grammar
|
||||
//
|
||||
// reference := name [ ":" tag ] [ "@" digest ]
|
||||
// name := [hostname '/'] component ['/' component]*
|
||||
// hostname := hostcomponent ['.' hostcomponent]* [':' port-number]
|
||||
// hostcomponent := /([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9])/
|
||||
// name := [domain '/'] path-component ['/' path-component]*
|
||||
// domain := domain-component ['.' domain-component]* [':' port-number]
|
||||
// domain-component := /([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9])/
|
||||
// port-number := /[0-9]+/
|
||||
// component := alpha-numeric [separator alpha-numeric]*
|
||||
// path-component := alpha-numeric [separator alpha-numeric]*
|
||||
// alpha-numeric := /[a-z0-9]+/
|
||||
// separator := /[_.]|__|[-]*/
|
||||
//
|
||||
// tag := /[\w][\w.-]{0,127}/
|
||||
//
|
||||
// digest := digest-algorithm ":" digest-hex
|
||||
// digest-algorithm := digest-algorithm-component [ digest-algorithm-separator digest-algorithm-component ]
|
||||
// digest-algorithm := digest-algorithm-component [ digest-algorithm-separator digest-algorithm-component ]*
|
||||
// digest-algorithm-separator := /[+.-_]/
|
||||
// digest-algorithm-component := /[A-Za-z][A-Za-z0-9]*/
|
||||
// digest-hex := /[0-9a-fA-F]{32,}/ ; At least 128 bit digest value
|
||||
//
|
||||
// identifier := /[a-f0-9]{64}/
|
||||
// short-identifier := /[a-f0-9]{6,64}/
|
||||
package reference
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/docker/distribution/digest"
|
||||
"github.com/opencontainers/go-digest"
|
||||
)
|
||||
|
||||
const (
|
||||
|
@ -43,11 +47,17 @@ var (
|
|||
// ErrDigestInvalidFormat represents an error while trying to parse a string as a tag.
|
||||
ErrDigestInvalidFormat = errors.New("invalid digest format")
|
||||
|
||||
// ErrNameContainsUppercase is returned for invalid repository names that contain uppercase characters.
|
||||
ErrNameContainsUppercase = errors.New("repository name must be lowercase")
|
||||
|
||||
// ErrNameEmpty is returned for empty, invalid repository names.
|
||||
ErrNameEmpty = errors.New("repository name must have at least one component")
|
||||
|
||||
// ErrNameTooLong is returned when a repository name is longer than NameTotalLengthMax.
|
||||
ErrNameTooLong = fmt.Errorf("repository name must not be more than %v characters", NameTotalLengthMax)
|
||||
|
||||
// ErrNameNotCanonical is returned when a name is not canonical.
|
||||
ErrNameNotCanonical = errors.New("repository name must be canonical")
|
||||
)
|
||||
|
||||
// Reference is an opaque object reference identifier that may include
|
||||
|
@ -121,23 +131,56 @@ type Digested interface {
|
|||
}
|
||||
|
||||
// Canonical reference is an object with a fully unique
|
||||
// name including a name with hostname and digest
|
||||
// name including a name with domain and digest
|
||||
type Canonical interface {
|
||||
Named
|
||||
Digest() digest.Digest
|
||||
}
|
||||
|
||||
// namedRepository is a reference to a repository with a name.
|
||||
// A namedRepository has both domain and path components.
|
||||
type namedRepository interface {
|
||||
Named
|
||||
Domain() string
|
||||
Path() string
|
||||
}
|
||||
|
||||
// Domain returns the domain part of the Named reference
|
||||
func Domain(named Named) string {
|
||||
if r, ok := named.(namedRepository); ok {
|
||||
return r.Domain()
|
||||
}
|
||||
domain, _ := splitDomain(named.Name())
|
||||
return domain
|
||||
}
|
||||
|
||||
// Path returns the name without the domain part of the Named reference
|
||||
func Path(named Named) (name string) {
|
||||
if r, ok := named.(namedRepository); ok {
|
||||
return r.Path()
|
||||
}
|
||||
_, path := splitDomain(named.Name())
|
||||
return path
|
||||
}
|
||||
|
||||
func splitDomain(name string) (string, string) {
|
||||
match := anchoredNameRegexp.FindStringSubmatch(name)
|
||||
if len(match) != 3 {
|
||||
return "", name
|
||||
}
|
||||
return match[1], match[2]
|
||||
}
|
||||
|
||||
// SplitHostname splits a named reference into a
|
||||
// hostname and name string. If no valid hostname is
|
||||
// found, the hostname is empty and the full value
|
||||
// is returned as name
|
||||
// DEPRECATED: Use Domain or Path
|
||||
func SplitHostname(named Named) (string, string) {
|
||||
name := named.Name()
|
||||
match := anchoredNameRegexp.FindStringSubmatch(name)
|
||||
if match == nil || len(match) != 3 {
|
||||
return "", name
|
||||
if r, ok := named.(namedRepository); ok {
|
||||
return r.Domain(), r.Path()
|
||||
}
|
||||
return match[1], match[2]
|
||||
return splitDomain(named.Name())
|
||||
}
|
||||
|
||||
// Parse parses s and returns a syntactically valid Reference.
|
||||
|
@ -149,7 +192,9 @@ func Parse(s string) (Reference, error) {
|
|||
if s == "" {
|
||||
return nil, ErrNameEmpty
|
||||
}
|
||||
// TODO(dmcgowan): Provide more specific and helpful error
|
||||
if ReferenceRegexp.FindStringSubmatch(strings.ToLower(s)) != nil {
|
||||
return nil, ErrNameContainsUppercase
|
||||
}
|
||||
return nil, ErrReferenceInvalidFormat
|
||||
}
|
||||
|
||||
|
@ -157,13 +202,24 @@ func Parse(s string) (Reference, error) {
|
|||
return nil, ErrNameTooLong
|
||||
}
|
||||
|
||||
var repo repository
|
||||
|
||||
nameMatch := anchoredNameRegexp.FindStringSubmatch(matches[1])
|
||||
if nameMatch != nil && len(nameMatch) == 3 {
|
||||
repo.domain = nameMatch[1]
|
||||
repo.path = nameMatch[2]
|
||||
} else {
|
||||
repo.domain = ""
|
||||
repo.path = matches[1]
|
||||
}
|
||||
|
||||
ref := reference{
|
||||
name: matches[1],
|
||||
tag: matches[2],
|
||||
namedRepository: repo,
|
||||
tag: matches[2],
|
||||
}
|
||||
if matches[3] != "" {
|
||||
var err error
|
||||
ref.digest, err = digest.ParseDigest(matches[3])
|
||||
ref.digest, err = digest.Parse(matches[3])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -178,18 +234,17 @@ func Parse(s string) (Reference, error) {
|
|||
}
|
||||
|
||||
// ParseNamed parses s and returns a syntactically valid reference implementing
|
||||
// the Named interface. The reference must have a name, otherwise an error is
|
||||
// returned.
|
||||
// the Named interface. The reference must have a name and be in the canonical
|
||||
// form, otherwise an error is returned.
|
||||
// If an error was encountered it is returned, along with a nil Reference.
|
||||
// NOTE: ParseNamed will not handle short digests.
|
||||
func ParseNamed(s string) (Named, error) {
|
||||
ref, err := Parse(s)
|
||||
named, err := ParseNormalizedNamed(s)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
named, isNamed := ref.(Named)
|
||||
if !isNamed {
|
||||
return nil, fmt.Errorf("reference %s has no name", ref.String())
|
||||
if named.String() != s {
|
||||
return nil, ErrNameNotCanonical
|
||||
}
|
||||
return named, nil
|
||||
}
|
||||
|
@ -200,10 +255,15 @@ func WithName(name string) (Named, error) {
|
|||
if len(name) > NameTotalLengthMax {
|
||||
return nil, ErrNameTooLong
|
||||
}
|
||||
if !anchoredNameRegexp.MatchString(name) {
|
||||
|
||||
match := anchoredNameRegexp.FindStringSubmatch(name)
|
||||
if match == nil || len(match) != 3 {
|
||||
return nil, ErrReferenceInvalidFormat
|
||||
}
|
||||
return repository(name), nil
|
||||
return repository{
|
||||
domain: match[1],
|
||||
path: match[2],
|
||||
}, nil
|
||||
}
|
||||
|
||||
// WithTag combines the name from "name" and the tag from "tag" to form a
|
||||
|
@ -212,9 +272,23 @@ func WithTag(name Named, tag string) (NamedTagged, error) {
|
|||
if !anchoredTagRegexp.MatchString(tag) {
|
||||
return nil, ErrTagInvalidFormat
|
||||
}
|
||||
var repo repository
|
||||
if r, ok := name.(namedRepository); ok {
|
||||
repo.domain = r.Domain()
|
||||
repo.path = r.Path()
|
||||
} else {
|
||||
repo.path = name.Name()
|
||||
}
|
||||
if canonical, ok := name.(Canonical); ok {
|
||||
return reference{
|
||||
namedRepository: repo,
|
||||
tag: tag,
|
||||
digest: canonical.Digest(),
|
||||
}, nil
|
||||
}
|
||||
return taggedReference{
|
||||
name: name.Name(),
|
||||
tag: tag,
|
||||
namedRepository: repo,
|
||||
tag: tag,
|
||||
}, nil
|
||||
}
|
||||
|
||||
|
@ -224,14 +298,37 @@ func WithDigest(name Named, digest digest.Digest) (Canonical, error) {
|
|||
if !anchoredDigestRegexp.MatchString(digest.String()) {
|
||||
return nil, ErrDigestInvalidFormat
|
||||
}
|
||||
var repo repository
|
||||
if r, ok := name.(namedRepository); ok {
|
||||
repo.domain = r.Domain()
|
||||
repo.path = r.Path()
|
||||
} else {
|
||||
repo.path = name.Name()
|
||||
}
|
||||
if tagged, ok := name.(Tagged); ok {
|
||||
return reference{
|
||||
namedRepository: repo,
|
||||
tag: tagged.Tag(),
|
||||
digest: digest,
|
||||
}, nil
|
||||
}
|
||||
return canonicalReference{
|
||||
name: name.Name(),
|
||||
digest: digest,
|
||||
namedRepository: repo,
|
||||
digest: digest,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// TrimNamed removes any tag or digest from the named reference.
|
||||
func TrimNamed(ref Named) Named {
|
||||
domain, path := SplitHostname(ref)
|
||||
return repository{
|
||||
domain: domain,
|
||||
path: path,
|
||||
}
|
||||
}
|
||||
|
||||
func getBestReferenceType(ref reference) Reference {
|
||||
if ref.name == "" {
|
||||
if ref.Name() == "" {
|
||||
// Allow digest only references
|
||||
if ref.digest != "" {
|
||||
return digestReference(ref.digest)
|
||||
|
@ -241,16 +338,16 @@ func getBestReferenceType(ref reference) Reference {
|
|||
if ref.tag == "" {
|
||||
if ref.digest != "" {
|
||||
return canonicalReference{
|
||||
name: ref.name,
|
||||
digest: ref.digest,
|
||||
namedRepository: ref.namedRepository,
|
||||
digest: ref.digest,
|
||||
}
|
||||
}
|
||||
return repository(ref.name)
|
||||
return ref.namedRepository
|
||||
}
|
||||
if ref.digest == "" {
|
||||
return taggedReference{
|
||||
name: ref.name,
|
||||
tag: ref.tag,
|
||||
namedRepository: ref.namedRepository,
|
||||
tag: ref.tag,
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -258,17 +355,13 @@ func getBestReferenceType(ref reference) Reference {
|
|||
}
|
||||
|
||||
type reference struct {
|
||||
name string
|
||||
namedRepository
|
||||
tag string
|
||||
digest digest.Digest
|
||||
}
|
||||
|
||||
func (r reference) String() string {
|
||||
return r.name + ":" + r.tag + "@" + r.digest.String()
|
||||
}
|
||||
|
||||
func (r reference) Name() string {
|
||||
return r.name
|
||||
return r.Name() + ":" + r.tag + "@" + r.digest.String()
|
||||
}
|
||||
|
||||
func (r reference) Tag() string {
|
||||
|
@ -279,20 +372,34 @@ func (r reference) Digest() digest.Digest {
|
|||
return r.digest
|
||||
}
|
||||
|
||||
type repository string
|
||||
type repository struct {
|
||||
domain string
|
||||
path string
|
||||
}
|
||||
|
||||
func (r repository) String() string {
|
||||
return string(r)
|
||||
return r.Name()
|
||||
}
|
||||
|
||||
func (r repository) Name() string {
|
||||
return string(r)
|
||||
if r.domain == "" {
|
||||
return r.path
|
||||
}
|
||||
return r.domain + "/" + r.path
|
||||
}
|
||||
|
||||
func (r repository) Domain() string {
|
||||
return r.domain
|
||||
}
|
||||
|
||||
func (r repository) Path() string {
|
||||
return r.path
|
||||
}
|
||||
|
||||
type digestReference digest.Digest
|
||||
|
||||
func (d digestReference) String() string {
|
||||
return d.String()
|
||||
return digest.Digest(d).String()
|
||||
}
|
||||
|
||||
func (d digestReference) Digest() digest.Digest {
|
||||
|
@ -300,16 +407,12 @@ func (d digestReference) Digest() digest.Digest {
|
|||
}
|
||||
|
||||
type taggedReference struct {
|
||||
name string
|
||||
tag string
|
||||
namedRepository
|
||||
tag string
|
||||
}
|
||||
|
||||
func (t taggedReference) String() string {
|
||||
return t.name + ":" + t.tag
|
||||
}
|
||||
|
||||
func (t taggedReference) Name() string {
|
||||
return t.name
|
||||
return t.Name() + ":" + t.tag
|
||||
}
|
||||
|
||||
func (t taggedReference) Tag() string {
|
||||
|
@ -317,16 +420,12 @@ func (t taggedReference) Tag() string {
|
|||
}
|
||||
|
||||
type canonicalReference struct {
|
||||
name string
|
||||
namedRepository
|
||||
digest digest.Digest
|
||||
}
|
||||
|
||||
func (c canonicalReference) String() string {
|
||||
return c.name + "@" + c.digest.String()
|
||||
}
|
||||
|
||||
func (c canonicalReference) Name() string {
|
||||
return c.name
|
||||
return c.Name() + "@" + c.digest.String()
|
||||
}
|
||||
|
||||
func (c canonicalReference) Digest() digest.Digest {
|
||||
|
|
41
vendor/github.com/docker/distribution/reference/regexp.go
generated
vendored
41
vendor/github.com/docker/distribution/reference/regexp.go
generated
vendored
|
@ -19,18 +19,18 @@ var (
|
|||
alphaNumericRegexp,
|
||||
optional(repeated(separatorRegexp, alphaNumericRegexp)))
|
||||
|
||||
// hostnameComponentRegexp restricts the registry hostname component of a
|
||||
// repository name to start with a component as defined by hostnameRegexp
|
||||
// domainComponentRegexp restricts the registry domain component of a
|
||||
// repository name to start with a component as defined by DomainRegexp
|
||||
// and followed by an optional port.
|
||||
hostnameComponentRegexp = match(`(?:[a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9])`)
|
||||
domainComponentRegexp = match(`(?:[a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9])`)
|
||||
|
||||
// hostnameRegexp defines the structure of potential hostname components
|
||||
// DomainRegexp defines the structure of potential domain components
|
||||
// that may be part of image names. This is purposely a subset of what is
|
||||
// allowed by DNS to ensure backwards compatibility with Docker image
|
||||
// names.
|
||||
hostnameRegexp = expression(
|
||||
hostnameComponentRegexp,
|
||||
optional(repeated(literal(`.`), hostnameComponentRegexp)),
|
||||
DomainRegexp = expression(
|
||||
domainComponentRegexp,
|
||||
optional(repeated(literal(`.`), domainComponentRegexp)),
|
||||
optional(literal(`:`), match(`[0-9]+`)))
|
||||
|
||||
// TagRegexp matches valid tag names. From docker/docker:graph/tags.go.
|
||||
|
@ -48,17 +48,17 @@ var (
|
|||
anchoredDigestRegexp = anchored(DigestRegexp)
|
||||
|
||||
// NameRegexp is the format for the name component of references. The
|
||||
// regexp has capturing groups for the hostname and name part omitting
|
||||
// regexp has capturing groups for the domain and name part omitting
|
||||
// the separating forward slash from either.
|
||||
NameRegexp = expression(
|
||||
optional(hostnameRegexp, literal(`/`)),
|
||||
optional(DomainRegexp, literal(`/`)),
|
||||
nameComponentRegexp,
|
||||
optional(repeated(literal(`/`), nameComponentRegexp)))
|
||||
|
||||
// anchoredNameRegexp is used to parse a name value, capturing the
|
||||
// hostname and trailing components.
|
||||
// domain and trailing components.
|
||||
anchoredNameRegexp = anchored(
|
||||
optional(capture(hostnameRegexp), literal(`/`)),
|
||||
optional(capture(DomainRegexp), literal(`/`)),
|
||||
capture(nameComponentRegexp,
|
||||
optional(repeated(literal(`/`), nameComponentRegexp))))
|
||||
|
||||
|
@ -68,6 +68,25 @@ var (
|
|||
ReferenceRegexp = anchored(capture(NameRegexp),
|
||||
optional(literal(":"), capture(TagRegexp)),
|
||||
optional(literal("@"), capture(DigestRegexp)))
|
||||
|
||||
// IdentifierRegexp is the format for string identifier used as a
|
||||
// content addressable identifier using sha256. These identifiers
|
||||
// are like digests without the algorithm, since sha256 is used.
|
||||
IdentifierRegexp = match(`([a-f0-9]{64})`)
|
||||
|
||||
// ShortIdentifierRegexp is the format used to represent a prefix
|
||||
// of an identifier. A prefix may be used to match a sha256 identifier
|
||||
// within a list of trusted identifiers.
|
||||
ShortIdentifierRegexp = match(`([a-f0-9]{6,64})`)
|
||||
|
||||
// anchoredIdentifierRegexp is used to check or match an
|
||||
// identifier value, anchored at start and end of string.
|
||||
anchoredIdentifierRegexp = anchored(IdentifierRegexp)
|
||||
|
||||
// anchoredShortIdentifierRegexp is used to check if a value
|
||||
// is a possible identifier prefix, anchored at start and end
|
||||
// of string.
|
||||
anchoredShortIdentifierRegexp = anchored(ShortIdentifierRegexp)
|
||||
)
|
||||
|
||||
// match compiles the string to a regular expression.
|
||||
|
|
24
vendor/github.com/exponent-io/jsonpath/.gitignore
generated
vendored
24
vendor/github.com/exponent-io/jsonpath/.gitignore
generated
vendored
|
@ -1,24 +0,0 @@
|
|||
# Compiled Object files, Static and Dynamic libs (Shared Objects)
|
||||
*.o
|
||||
*.a
|
||||
*.so
|
||||
|
||||
# Folders
|
||||
_obj
|
||||
_test
|
||||
|
||||
# Architecture specific extensions/prefixes
|
||||
*.[568vq]
|
||||
[568vq].out
|
||||
|
||||
*.cgo1.go
|
||||
*.cgo2.c
|
||||
_cgo_defun.c
|
||||
_cgo_gotypes.go
|
||||
_cgo_export.*
|
||||
|
||||
_testmain.go
|
||||
|
||||
*.exe
|
||||
*.test
|
||||
*.prof
|
5
vendor/github.com/exponent-io/jsonpath/.travis.yml
generated
vendored
5
vendor/github.com/exponent-io/jsonpath/.travis.yml
generated
vendored
|
@ -1,5 +0,0 @@
|
|||
language: go
|
||||
|
||||
go:
|
||||
- 1.5
|
||||
- tip
|
66
vendor/github.com/exponent-io/jsonpath/README.md
generated
vendored
66
vendor/github.com/exponent-io/jsonpath/README.md
generated
vendored
|
@ -1,66 +0,0 @@
|
|||
[](https://godoc.org/github.com/exponent-io/jsonpath)
|
||||
[](https://travis-ci.org/exponent-io/jsonpath)
|
||||
|
||||
# jsonpath
|
||||
|
||||
This package extends the [json.Decoder](https://golang.org/pkg/encoding/json/#Decoder) to support navigating a stream of JSON tokens. You should be able to use this extended Decoder places where a json.Decoder would have been used.
|
||||
|
||||
This Decoder has the following enhancements...
|
||||
* The [Scan](https://godoc.org/github.com/exponent-io/jsonpath/#Decoder.Scan) method supports scanning a JSON stream while extracting particular values along the way using [PathActions](https://godoc.org/github.com/exponent-io/jsonpath#PathActions).
|
||||
* The [SeekTo](https://godoc.org/github.com/exponent-io/jsonpath#Decoder.SeekTo) method supports seeking forward in a JSON token stream to a particular path.
|
||||
* The [Path](https://godoc.org/github.com/exponent-io/jsonpath#Decoder.Path) method returns the path of the most recently parsed token.
|
||||
* The [Token](https://godoc.org/github.com/exponent-io/jsonpath#Decoder.Token) method has been modified to distinguish between strings that are object keys and strings that are values. Object key strings are returned as the [KeyString](https://godoc.org/github.com/exponent-io/jsonpath#KeyString) type rather than a native string.
|
||||
|
||||
## Installation
|
||||
|
||||
go get -u github.com/exponent-io/jsonpath
|
||||
|
||||
## Example Usage
|
||||
|
||||
#### SeekTo
|
||||
|
||||
```go
|
||||
import "github.com/exponent-io/jsonpath"
|
||||
|
||||
var j = []byte(`[
|
||||
{"Space": "YCbCr", "Point": {"Y": 255, "Cb": 0, "Cr": -10}},
|
||||
{"Space": "RGB", "Point": {"R": 98, "G": 218, "B": 255}}
|
||||
]`)
|
||||
|
||||
w := json.NewDecoder(bytes.NewReader(j))
|
||||
var v interface{}
|
||||
|
||||
w.SeekTo(1, "Point", "G")
|
||||
w.Decode(&v) // v is 218
|
||||
```
|
||||
|
||||
#### Scan with PathActions
|
||||
|
||||
```go
|
||||
var j = []byte(`{"colors":[
|
||||
{"Space": "YCbCr", "Point": {"Y": 255, "Cb": 0, "Cr": -10, "A": 58}},
|
||||
{"Space": "RGB", "Point": {"R": 98, "G": 218, "B": 255, "A": 231}}
|
||||
]}`)
|
||||
|
||||
var actions PathActions
|
||||
|
||||
// Extract the value at Point.A
|
||||
actions.Add(func(d *Decoder) error {
|
||||
var alpha int
|
||||
err := d.Decode(&alpha)
|
||||
fmt.Printf("Alpha: %v\n", alpha)
|
||||
return err
|
||||
}, "Point", "A")
|
||||
|
||||
w := NewDecoder(bytes.NewReader(j))
|
||||
w.SeekTo("colors", 0)
|
||||
|
||||
var ok = true
|
||||
var err error
|
||||
for ok {
|
||||
ok, err = w.Scan(&actions)
|
||||
if err != nil && err != io.EOF {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
```
|
210
vendor/github.com/exponent-io/jsonpath/decoder.go
generated
vendored
210
vendor/github.com/exponent-io/jsonpath/decoder.go
generated
vendored
|
@ -1,210 +0,0 @@
|
|||
package jsonpath
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"io"
|
||||
)
|
||||
|
||||
// KeyString is returned from Decoder.Token to represent each key in a JSON object value.
|
||||
type KeyString string
|
||||
|
||||
// Decoder extends the Go runtime's encoding/json.Decoder to support navigating in a stream of JSON tokens.
|
||||
type Decoder struct {
|
||||
json.Decoder
|
||||
|
||||
path JsonPath
|
||||
context jsonContext
|
||||
}
|
||||
|
||||
// NewDecoder creates a new instance of the extended JSON Decoder.
|
||||
func NewDecoder(r io.Reader) *Decoder {
|
||||
return &Decoder{Decoder: *json.NewDecoder(r)}
|
||||
}
|
||||
|
||||
// SeekTo causes the Decoder to move forward to a given path in the JSON structure.
|
||||
//
|
||||
// The path argument must consist of strings or integers. Each string specifies an JSON object key, and
|
||||
// each integer specifies an index into a JSON array.
|
||||
//
|
||||
// Consider the JSON structure
|
||||
//
|
||||
// { "a": [0,"s",12e4,{"b":0,"v":35} ] }
|
||||
//
|
||||
// SeekTo("a",3,"v") will move to the value referenced by the "a" key in the current object,
|
||||
// followed by a move to the 4th value (index 3) in the array, followed by a move to the value at key "v".
|
||||
// In this example, a subsequent call to the decoder's Decode() would unmarshal the value 35.
|
||||
//
|
||||
// SeekTo returns a boolean value indicating whether a match was found.
|
||||
//
|
||||
// Decoder is intended to be used with a stream of tokens. As a result it navigates forward only.
|
||||
func (d *Decoder) SeekTo(path ...interface{}) (bool, error) {
|
||||
|
||||
if len(path) == 0 {
|
||||
return len(d.path) == 0, nil
|
||||
}
|
||||
last := len(path) - 1
|
||||
if i, ok := path[last].(int); ok {
|
||||
path[last] = i - 1
|
||||
}
|
||||
|
||||
for {
|
||||
if d.path.Equal(path) {
|
||||
return true, nil
|
||||
}
|
||||
_, err := d.Token()
|
||||
if err == io.EOF {
|
||||
return false, nil
|
||||
} else if err != nil {
|
||||
return false, err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Decode reads the next JSON-encoded value from its input and stores it in the value pointed to by v. This is
|
||||
// equivalent to encoding/json.Decode().
|
||||
func (d *Decoder) Decode(v interface{}) error {
|
||||
switch d.context {
|
||||
case objValue:
|
||||
d.context = objKey
|
||||
break
|
||||
case arrValue:
|
||||
d.path.incTop()
|
||||
break
|
||||
}
|
||||
return d.Decoder.Decode(v)
|
||||
}
|
||||
|
||||
// Path returns a slice of string and/or int values representing the path from the root of the JSON object to the
|
||||
// position of the most-recently parsed token.
|
||||
func (d *Decoder) Path() JsonPath {
|
||||
p := make(JsonPath, len(d.path))
|
||||
copy(p, d.path)
|
||||
return p
|
||||
}
|
||||
|
||||
// Token is equivalent to the Token() method on json.Decoder. The primary difference is that it distinguishes
|
||||
// between strings that are keys and and strings that are values. String tokens that are object keys are returned as a
|
||||
// KeyString rather than as a native string.
|
||||
func (d *Decoder) Token() (json.Token, error) {
|
||||
t, err := d.Decoder.Token()
|
||||
if err != nil {
|
||||
return t, err
|
||||
}
|
||||
|
||||
if t == nil {
|
||||
switch d.context {
|
||||
case objValue:
|
||||
d.context = objKey
|
||||
break
|
||||
case arrValue:
|
||||
d.path.incTop()
|
||||
break
|
||||
}
|
||||
return t, err
|
||||
}
|
||||
|
||||
switch t := t.(type) {
|
||||
case json.Delim:
|
||||
switch t {
|
||||
case json.Delim('{'):
|
||||
if d.context == arrValue {
|
||||
d.path.incTop()
|
||||
}
|
||||
d.path.push("")
|
||||
d.context = objKey
|
||||
break
|
||||
case json.Delim('}'):
|
||||
d.path.pop()
|
||||
d.context = d.path.inferContext()
|
||||
break
|
||||
case json.Delim('['):
|
||||
if d.context == arrValue {
|
||||
d.path.incTop()
|
||||
}
|
||||
d.path.push(-1)
|
||||
d.context = arrValue
|
||||
break
|
||||
case json.Delim(']'):
|
||||
d.path.pop()
|
||||
d.context = d.path.inferContext()
|
||||
break
|
||||
}
|
||||
case float64, json.Number, bool:
|
||||
switch d.context {
|
||||
case objValue:
|
||||
d.context = objKey
|
||||
break
|
||||
case arrValue:
|
||||
d.path.incTop()
|
||||
break
|
||||
}
|
||||
break
|
||||
case string:
|
||||
switch d.context {
|
||||
case objKey:
|
||||
d.path.nameTop(t)
|
||||
d.context = objValue
|
||||
return KeyString(t), err
|
||||
case objValue:
|
||||
d.context = objKey
|
||||
case arrValue:
|
||||
d.path.incTop()
|
||||
}
|
||||
break
|
||||
}
|
||||
|
||||
return t, err
|
||||
}
|
||||
|
||||
// Scan moves forward over the JSON stream consuming all the tokens at the current level (current object, current array)
|
||||
// invoking each matching PathAction along the way.
|
||||
//
|
||||
// Scan returns true if there are more contiguous values to scan (for example in an array).
|
||||
func (d *Decoder) Scan(ext *PathActions) (bool, error) {
|
||||
|
||||
rootPath := d.Path()
|
||||
|
||||
// If this is an array path, increment the root path in our local copy.
|
||||
if rootPath.inferContext() == arrValue {
|
||||
rootPath.incTop()
|
||||
}
|
||||
|
||||
for {
|
||||
// advance the token position
|
||||
_, err := d.Token()
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
match:
|
||||
var relPath JsonPath
|
||||
|
||||
// capture the new JSON path
|
||||
path := d.Path()
|
||||
|
||||
if len(path) > len(rootPath) {
|
||||
// capture the path relative to where the scan started
|
||||
relPath = path[len(rootPath):]
|
||||
} else {
|
||||
// if the path is not longer than the root, then we are done with this scan
|
||||
// return boolean flag indicating if there are more items to scan at the same level
|
||||
return d.Decoder.More(), nil
|
||||
}
|
||||
|
||||
// match the relative path against the path actions
|
||||
if node := ext.node.match(relPath); node != nil {
|
||||
if node.action != nil {
|
||||
// we have a match so execute the action
|
||||
err = node.action(d)
|
||||
if err != nil {
|
||||
return d.Decoder.More(), err
|
||||
}
|
||||
// The action may have advanced the decoder. If we are in an array, advancing it further would
|
||||
// skip tokens. So, if we are scanning an array, jump to the top without advancing the token.
|
||||
if d.path.inferContext() == arrValue && d.Decoder.More() {
|
||||
goto match
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
67
vendor/github.com/exponent-io/jsonpath/path.go
generated
vendored
67
vendor/github.com/exponent-io/jsonpath/path.go
generated
vendored
|
@ -1,67 +0,0 @@
|
|||
// Extends the Go runtime's json.Decoder enabling navigation of a stream of json tokens.
|
||||
package jsonpath
|
||||
|
||||
import "fmt"
|
||||
|
||||
type jsonContext int
|
||||
|
||||
const (
|
||||
none jsonContext = iota
|
||||
objKey
|
||||
objValue
|
||||
arrValue
|
||||
)
|
||||
|
||||
// AnyIndex can be used in a pattern to match any array index.
|
||||
const AnyIndex = -2
|
||||
|
||||
// JsonPath is a slice of strings and/or integers. Each string specifies an JSON object key, and
|
||||
// each integer specifies an index into a JSON array.
|
||||
type JsonPath []interface{}
|
||||
|
||||
func (p *JsonPath) push(n interface{}) { *p = append(*p, n) }
|
||||
func (p *JsonPath) pop() { *p = (*p)[:len(*p)-1] }
|
||||
|
||||
// increment the index at the top of the stack (must be an array index)
|
||||
func (p *JsonPath) incTop() { (*p)[len(*p)-1] = (*p)[len(*p)-1].(int) + 1 }
|
||||
|
||||
// name the key at the top of the stack (must be an object key)
|
||||
func (p *JsonPath) nameTop(n string) { (*p)[len(*p)-1] = n }
|
||||
|
||||
// infer the context from the item at the top of the stack
|
||||
func (p *JsonPath) inferContext() jsonContext {
|
||||
if len(*p) == 0 {
|
||||
return none
|
||||
}
|
||||
t := (*p)[len(*p)-1]
|
||||
switch t.(type) {
|
||||
case string:
|
||||
return objKey
|
||||
case int:
|
||||
return arrValue
|
||||
default:
|
||||
panic(fmt.Sprintf("Invalid stack type %T", t))
|
||||
}
|
||||
}
|
||||
|
||||
// Equal tests for equality between two JsonPath types.
|
||||
func (p *JsonPath) Equal(o JsonPath) bool {
|
||||
if len(*p) != len(o) {
|
||||
return false
|
||||
}
|
||||
for i, v := range *p {
|
||||
if v != o[i] {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func (p *JsonPath) HasPrefix(o JsonPath) bool {
|
||||
for i, v := range o {
|
||||
if v != (*p)[i] {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
61
vendor/github.com/exponent-io/jsonpath/pathaction.go
generated
vendored
61
vendor/github.com/exponent-io/jsonpath/pathaction.go
generated
vendored
|
@ -1,61 +0,0 @@
|
|||
package jsonpath
|
||||
|
||||
// pathNode is used to construct a trie of paths to be matched
|
||||
type pathNode struct {
|
||||
matchOn interface{} // string, or integer
|
||||
childNodes []pathNode
|
||||
action DecodeAction
|
||||
}
|
||||
|
||||
// match climbs the trie to find a node that matches the given JSON path.
|
||||
func (n *pathNode) match(path JsonPath) *pathNode {
|
||||
var node *pathNode = n
|
||||
for _, ps := range path {
|
||||
found := false
|
||||
for i, n := range node.childNodes {
|
||||
if n.matchOn == ps {
|
||||
node = &node.childNodes[i]
|
||||
found = true
|
||||
break
|
||||
} else if _, ok := ps.(int); ok && n.matchOn == AnyIndex {
|
||||
node = &node.childNodes[i]
|
||||
found = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if !found {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
return node
|
||||
}
|
||||
|
||||
// PathActions represents a collection of DecodeAction functions that should be called at certain path positions
|
||||
// when scanning the JSON stream. PathActions can be created once and used many times in one or more JSON streams.
|
||||
type PathActions struct {
|
||||
node pathNode
|
||||
}
|
||||
|
||||
// DecodeAction handlers are called by the Decoder when scanning objects. See PathActions.Add for more detail.
|
||||
type DecodeAction func(d *Decoder) error
|
||||
|
||||
// Add specifies an action to call on the Decoder when the specified path is encountered.
|
||||
func (je *PathActions) Add(action DecodeAction, path ...interface{}) {
|
||||
|
||||
var node *pathNode = &je.node
|
||||
for _, ps := range path {
|
||||
found := false
|
||||
for i, n := range node.childNodes {
|
||||
if n.matchOn == ps {
|
||||
node = &node.childNodes[i]
|
||||
found = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if !found {
|
||||
node.childNodes = append(node.childNodes, pathNode{matchOn: ps})
|
||||
node = &node.childNodes[len(node.childNodes)-1]
|
||||
}
|
||||
}
|
||||
node.action = action
|
||||
}
|
1
vendor/github.com/google/btree/.travis.yml
generated
vendored
Normal file
1
vendor/github.com/google/btree/.travis.yml
generated
vendored
Normal file
|
@ -0,0 +1 @@
|
|||
language: go
|
202
vendor/github.com/google/btree/LICENSE
generated
vendored
Normal file
202
vendor/github.com/google/btree/LICENSE
generated
vendored
Normal file
|
@ -0,0 +1,202 @@
|
|||
|
||||
Apache License
|
||||
Version 2.0, January 2004
|
||||
http://www.apache.org/licenses/
|
||||
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||
|
||||
1. Definitions.
|
||||
|
||||
"License" shall mean the terms and conditions for use, reproduction,
|
||||
and distribution as defined by Sections 1 through 9 of this document.
|
||||
|
||||
"Licensor" shall mean the copyright owner or entity authorized by
|
||||
the copyright owner that is granting the License.
|
||||
|
||||
"Legal Entity" shall mean the union of the acting entity and all
|
||||
other entities that control, are controlled by, or are under common
|
||||
control with that entity. For the purposes of this definition,
|
||||
"control" means (i) the power, direct or indirect, to cause the
|
||||
direction or management of such entity, whether by contract or
|
||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||
|
||||
"You" (or "Your") shall mean an individual or Legal Entity
|
||||
exercising permissions granted by this License.
|
||||
|
||||
"Source" form shall mean the preferred form for making modifications,
|
||||
including but not limited to software source code, documentation
|
||||
source, and configuration files.
|
||||
|
||||
"Object" form shall mean any form resulting from mechanical
|
||||
transformation or translation of a Source form, including but
|
||||
not limited to compiled object code, generated documentation,
|
||||
and conversions to other media types.
|
||||
|
||||
"Work" shall mean the work of authorship, whether in Source or
|
||||
Object form, made available under the License, as indicated by a
|
||||
copyright notice that is included in or attached to the work
|
||||
(an example is provided in the Appendix below).
|
||||
|
||||
"Derivative Works" shall mean any work, whether in Source or Object
|
||||
form, that is based on (or derived from) the Work and for which the
|
||||
editorial revisions, annotations, elaborations, or other modifications
|
||||
represent, as a whole, an original work of authorship. For the purposes
|
||||
of this License, Derivative Works shall not include works that remain
|
||||
separable from, or merely link (or bind by name) to the interfaces of,
|
||||
the Work and Derivative Works thereof.
|
||||
|
||||
"Contribution" shall mean any work of authorship, including
|
||||
the original version of the Work and any modifications or additions
|
||||
to that Work or Derivative Works thereof, that is intentionally
|
||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||
or by an individual or Legal Entity authorized to submit on behalf of
|
||||
the copyright owner. For the purposes of this definition, "submitted"
|
||||
means any form of electronic, verbal, or written communication sent
|
||||
to the Licensor or its representatives, including but not limited to
|
||||
communication on electronic mailing lists, source code control systems,
|
||||
and issue tracking systems that are managed by, or on behalf of, the
|
||||
Licensor for the purpose of discussing and improving the Work, but
|
||||
excluding communication that is conspicuously marked or otherwise
|
||||
designated in writing by the copyright owner as "Not a Contribution."
|
||||
|
||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||
on behalf of whom a Contribution has been received by Licensor and
|
||||
subsequently incorporated within the Work.
|
||||
|
||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
copyright license to reproduce, prepare Derivative Works of,
|
||||
publicly display, publicly perform, sublicense, and distribute the
|
||||
Work and such Derivative Works in Source or Object form.
|
||||
|
||||
3. Grant of Patent License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
(except as stated in this section) patent license to make, have made,
|
||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||
where such license applies only to those patent claims licensable
|
||||
by such Contributor that are necessarily infringed by their
|
||||
Contribution(s) alone or by combination of their Contribution(s)
|
||||
with the Work to which such Contribution(s) was submitted. If You
|
||||
institute patent litigation against any entity (including a
|
||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||
or a Contribution incorporated within the Work constitutes direct
|
||||
or contributory patent infringement, then any patent licenses
|
||||
granted to You under this License for that Work shall terminate
|
||||
as of the date such litigation is filed.
|
||||
|
||||
4. Redistribution. You may reproduce and distribute copies of the
|
||||
Work or Derivative Works thereof in any medium, with or without
|
||||
modifications, and in Source or Object form, provided that You
|
||||
meet the following conditions:
|
||||
|
||||
(a) You must give any other recipients of the Work or
|
||||
Derivative Works a copy of this License; and
|
||||
|
||||
(b) You must cause any modified files to carry prominent notices
|
||||
stating that You changed the files; and
|
||||
|
||||
(c) You must retain, in the Source form of any Derivative Works
|
||||
that You distribute, all copyright, patent, trademark, and
|
||||
attribution notices from the Source form of the Work,
|
||||
excluding those notices that do not pertain to any part of
|
||||
the Derivative Works; and
|
||||
|
||||
(d) If the Work includes a "NOTICE" text file as part of its
|
||||
distribution, then any Derivative Works that You distribute must
|
||||
include a readable copy of the attribution notices contained
|
||||
within such NOTICE file, excluding those notices that do not
|
||||
pertain to any part of the Derivative Works, in at least one
|
||||
of the following places: within a NOTICE text file distributed
|
||||
as part of the Derivative Works; within the Source form or
|
||||
documentation, if provided along with the Derivative Works; or,
|
||||
within a display generated by the Derivative Works, if and
|
||||
wherever such third-party notices normally appear. The contents
|
||||
of the NOTICE file are for informational purposes only and
|
||||
do not modify the License. You may add Your own attribution
|
||||
notices within Derivative Works that You distribute, alongside
|
||||
or as an addendum to the NOTICE text from the Work, provided
|
||||
that such additional attribution notices cannot be construed
|
||||
as modifying the License.
|
||||
|
||||
You may add Your own copyright statement to Your modifications and
|
||||
may provide additional or different license terms and conditions
|
||||
for use, reproduction, or distribution of Your modifications, or
|
||||
for any such Derivative Works as a whole, provided Your use,
|
||||
reproduction, and distribution of the Work otherwise complies with
|
||||
the conditions stated in this License.
|
||||
|
||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||
any Contribution intentionally submitted for inclusion in the Work
|
||||
by You to the Licensor shall be under the terms and conditions of
|
||||
this License, without any additional terms or conditions.
|
||||
Notwithstanding the above, nothing herein shall supersede or modify
|
||||
the terms of any separate license agreement you may have executed
|
||||
with Licensor regarding such Contributions.
|
||||
|
||||
6. Trademarks. This License does not grant permission to use the trade
|
||||
names, trademarks, service marks, or product names of the Licensor,
|
||||
except as required for reasonable and customary use in describing the
|
||||
origin of the Work and reproducing the content of the NOTICE file.
|
||||
|
||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||
agreed to in writing, Licensor provides the Work (and each
|
||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
implied, including, without limitation, any warranties or conditions
|
||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||
appropriateness of using or redistributing the Work and assume any
|
||||
risks associated with Your exercise of permissions under this License.
|
||||
|
||||
8. Limitation of Liability. In no event and under no legal theory,
|
||||
whether in tort (including negligence), contract, or otherwise,
|
||||
unless required by applicable law (such as deliberate and grossly
|
||||
negligent acts) or agreed to in writing, shall any Contributor be
|
||||
liable to You for damages, including any direct, indirect, special,
|
||||
incidental, or consequential damages of any character arising as a
|
||||
result of this License or out of the use or inability to use the
|
||||
Work (including but not limited to damages for loss of goodwill,
|
||||
work stoppage, computer failure or malfunction, or any and all
|
||||
other commercial damages or losses), even if such Contributor
|
||||
has been advised of the possibility of such damages.
|
||||
|
||||
9. Accepting Warranty or Additional Liability. While redistributing
|
||||
the Work or Derivative Works thereof, You may choose to offer,
|
||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||
or other liability obligations and/or rights consistent with this
|
||||
License. However, in accepting such obligations, You may act only
|
||||
on Your own behalf and on Your sole responsibility, not on behalf
|
||||
of any other Contributor, and only if You agree to indemnify,
|
||||
defend, and hold each Contributor harmless for any liability
|
||||
incurred by, or claims asserted against, such Contributor by reason
|
||||
of your accepting any such warranty or additional liability.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
APPENDIX: How to apply the Apache License to your work.
|
||||
|
||||
To apply the Apache License to your work, attach the following
|
||||
boilerplate notice, with the fields enclosed by brackets "[]"
|
||||
replaced with your own identifying information. (Don't include
|
||||
the brackets!) The text should be enclosed in the appropriate
|
||||
comment syntax for the file format. We also recommend that a
|
||||
file or class name and description of purpose be included on the
|
||||
same "printed page" as the copyright notice for easier
|
||||
identification within third-party archives.
|
||||
|
||||
Copyright [yyyy] [name of copyright owner]
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
12
vendor/github.com/google/btree/README.md
generated
vendored
Normal file
12
vendor/github.com/google/btree/README.md
generated
vendored
Normal file
|
@ -0,0 +1,12 @@
|
|||
# BTree implementation for Go
|
||||
|
||||

|
||||
|
||||
This package provides an in-memory B-Tree implementation for Go, useful as
|
||||
an ordered, mutable data structure.
|
||||
|
||||
The API is based off of the wonderful
|
||||
http://godoc.org/github.com/petar/GoLLRB/llrb, and is meant to allow btree to
|
||||
act as a drop-in replacement for gollrb trees.
|
||||
|
||||
See http://godoc.org/github.com/google/btree for documentation.
|
649
vendor/github.com/google/btree/btree.go
generated
vendored
Normal file
649
vendor/github.com/google/btree/btree.go
generated
vendored
Normal file
|
@ -0,0 +1,649 @@
|
|||
// Copyright 2014 Google Inc.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// Package btree implements in-memory B-Trees of arbitrary degree.
|
||||
//
|
||||
// btree implements an in-memory B-Tree for use as an ordered data structure.
|
||||
// It is not meant for persistent storage solutions.
|
||||
//
|
||||
// It has a flatter structure than an equivalent red-black or other binary tree,
|
||||
// which in some cases yields better memory usage and/or performance.
|
||||
// See some discussion on the matter here:
|
||||
// http://google-opensource.blogspot.com/2013/01/c-containers-that-save-memory-and-time.html
|
||||
// Note, though, that this project is in no way related to the C++ B-Tree
|
||||
// implmentation written about there.
|
||||
//
|
||||
// Within this tree, each node contains a slice of items and a (possibly nil)
|
||||
// slice of children. For basic numeric values or raw structs, this can cause
|
||||
// efficiency differences when compared to equivalent C++ template code that
|
||||
// stores values in arrays within the node:
|
||||
// * Due to the overhead of storing values as interfaces (each
|
||||
// value needs to be stored as the value itself, then 2 words for the
|
||||
// interface pointing to that value and its type), resulting in higher
|
||||
// memory use.
|
||||
// * Since interfaces can point to values anywhere in memory, values are
|
||||
// most likely not stored in contiguous blocks, resulting in a higher
|
||||
// number of cache misses.
|
||||
// These issues don't tend to matter, though, when working with strings or other
|
||||
// heap-allocated structures, since C++-equivalent structures also must store
|
||||
// pointers and also distribute their values across the heap.
|
||||
//
|
||||
// This implementation is designed to be a drop-in replacement to gollrb.LLRB
|
||||
// trees, (http://github.com/petar/gollrb), an excellent and probably the most
|
||||
// widely used ordered tree implementation in the Go ecosystem currently.
|
||||
// Its functions, therefore, exactly mirror those of
|
||||
// llrb.LLRB where possible. Unlike gollrb, though, we currently don't
|
||||
// support storing multiple equivalent values or backwards iteration.
|
||||
package btree
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"sort"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// Item represents a single object in the tree.
|
||||
type Item interface {
|
||||
// Less tests whether the current item is less than the given argument.
|
||||
//
|
||||
// This must provide a strict weak ordering.
|
||||
// If !a.Less(b) && !b.Less(a), we treat this to mean a == b (i.e. we can only
|
||||
// hold one of either a or b in the tree).
|
||||
Less(than Item) bool
|
||||
}
|
||||
|
||||
const (
|
||||
DefaultFreeListSize = 32
|
||||
)
|
||||
|
||||
// FreeList represents a free list of btree nodes. By default each
|
||||
// BTree has its own FreeList, but multiple BTrees can share the same
|
||||
// FreeList.
|
||||
// Two Btrees using the same freelist are not safe for concurrent write access.
|
||||
type FreeList struct {
|
||||
freelist []*node
|
||||
}
|
||||
|
||||
// NewFreeList creates a new free list.
|
||||
// size is the maximum size of the returned free list.
|
||||
func NewFreeList(size int) *FreeList {
|
||||
return &FreeList{freelist: make([]*node, 0, size)}
|
||||
}
|
||||
|
||||
func (f *FreeList) newNode() (n *node) {
|
||||
index := len(f.freelist) - 1
|
||||
if index < 0 {
|
||||
return new(node)
|
||||
}
|
||||
f.freelist, n = f.freelist[:index], f.freelist[index]
|
||||
return
|
||||
}
|
||||
|
||||
func (f *FreeList) freeNode(n *node) {
|
||||
if len(f.freelist) < cap(f.freelist) {
|
||||
f.freelist = append(f.freelist, n)
|
||||
}
|
||||
}
|
||||
|
||||
// ItemIterator allows callers of Ascend* to iterate in-order over portions of
|
||||
// the tree. When this function returns false, iteration will stop and the
|
||||
// associated Ascend* function will immediately return.
|
||||
type ItemIterator func(i Item) bool
|
||||
|
||||
// New creates a new B-Tree with the given degree.
|
||||
//
|
||||
// New(2), for example, will create a 2-3-4 tree (each node contains 1-3 items
|
||||
// and 2-4 children).
|
||||
func New(degree int) *BTree {
|
||||
return NewWithFreeList(degree, NewFreeList(DefaultFreeListSize))
|
||||
}
|
||||
|
||||
// NewWithFreeList creates a new B-Tree that uses the given node free list.
|
||||
func NewWithFreeList(degree int, f *FreeList) *BTree {
|
||||
if degree <= 1 {
|
||||
panic("bad degree")
|
||||
}
|
||||
return &BTree{
|
||||
degree: degree,
|
||||
freelist: f,
|
||||
}
|
||||
}
|
||||
|
||||
// items stores items in a node.
|
||||
type items []Item
|
||||
|
||||
// insertAt inserts a value into the given index, pushing all subsequent values
|
||||
// forward.
|
||||
func (s *items) insertAt(index int, item Item) {
|
||||
*s = append(*s, nil)
|
||||
if index < len(*s) {
|
||||
copy((*s)[index+1:], (*s)[index:])
|
||||
}
|
||||
(*s)[index] = item
|
||||
}
|
||||
|
||||
// removeAt removes a value at a given index, pulling all subsequent values
|
||||
// back.
|
||||
func (s *items) removeAt(index int) Item {
|
||||
item := (*s)[index]
|
||||
(*s)[index] = nil
|
||||
copy((*s)[index:], (*s)[index+1:])
|
||||
*s = (*s)[:len(*s)-1]
|
||||
return item
|
||||
}
|
||||
|
||||
// pop removes and returns the last element in the list.
|
||||
func (s *items) pop() (out Item) {
|
||||
index := len(*s) - 1
|
||||
out = (*s)[index]
|
||||
(*s)[index] = nil
|
||||
*s = (*s)[:index]
|
||||
return
|
||||
}
|
||||
|
||||
// find returns the index where the given item should be inserted into this
|
||||
// list. 'found' is true if the item already exists in the list at the given
|
||||
// index.
|
||||
func (s items) find(item Item) (index int, found bool) {
|
||||
i := sort.Search(len(s), func(i int) bool {
|
||||
return item.Less(s[i])
|
||||
})
|
||||
if i > 0 && !s[i-1].Less(item) {
|
||||
return i - 1, true
|
||||
}
|
||||
return i, false
|
||||
}
|
||||
|
||||
// children stores child nodes in a node.
|
||||
type children []*node
|
||||
|
||||
// insertAt inserts a value into the given index, pushing all subsequent values
|
||||
// forward.
|
||||
func (s *children) insertAt(index int, n *node) {
|
||||
*s = append(*s, nil)
|
||||
if index < len(*s) {
|
||||
copy((*s)[index+1:], (*s)[index:])
|
||||
}
|
||||
(*s)[index] = n
|
||||
}
|
||||
|
||||
// removeAt removes a value at a given index, pulling all subsequent values
|
||||
// back.
|
||||
func (s *children) removeAt(index int) *node {
|
||||
n := (*s)[index]
|
||||
(*s)[index] = nil
|
||||
copy((*s)[index:], (*s)[index+1:])
|
||||
*s = (*s)[:len(*s)-1]
|
||||
return n
|
||||
}
|
||||
|
||||
// pop removes and returns the last element in the list.
|
||||
func (s *children) pop() (out *node) {
|
||||
index := len(*s) - 1
|
||||
out = (*s)[index]
|
||||
(*s)[index] = nil
|
||||
*s = (*s)[:index]
|
||||
return
|
||||
}
|
||||
|
||||
// node is an internal node in a tree.
|
||||
//
|
||||
// It must at all times maintain the invariant that either
|
||||
// * len(children) == 0, len(items) unconstrained
|
||||
// * len(children) == len(items) + 1
|
||||
type node struct {
|
||||
items items
|
||||
children children
|
||||
t *BTree
|
||||
}
|
||||
|
||||
// split splits the given node at the given index. The current node shrinks,
|
||||
// and this function returns the item that existed at that index and a new node
|
||||
// containing all items/children after it.
|
||||
func (n *node) split(i int) (Item, *node) {
|
||||
item := n.items[i]
|
||||
next := n.t.newNode()
|
||||
next.items = append(next.items, n.items[i+1:]...)
|
||||
n.items = n.items[:i]
|
||||
if len(n.children) > 0 {
|
||||
next.children = append(next.children, n.children[i+1:]...)
|
||||
n.children = n.children[:i+1]
|
||||
}
|
||||
return item, next
|
||||
}
|
||||
|
||||
// maybeSplitChild checks if a child should be split, and if so splits it.
|
||||
// Returns whether or not a split occurred.
|
||||
func (n *node) maybeSplitChild(i, maxItems int) bool {
|
||||
if len(n.children[i].items) < maxItems {
|
||||
return false
|
||||
}
|
||||
first := n.children[i]
|
||||
item, second := first.split(maxItems / 2)
|
||||
n.items.insertAt(i, item)
|
||||
n.children.insertAt(i+1, second)
|
||||
return true
|
||||
}
|
||||
|
||||
// insert inserts an item into the subtree rooted at this node, making sure
|
||||
// no nodes in the subtree exceed maxItems items. Should an equivalent item be
|
||||
// be found/replaced by insert, it will be returned.
|
||||
func (n *node) insert(item Item, maxItems int) Item {
|
||||
i, found := n.items.find(item)
|
||||
if found {
|
||||
out := n.items[i]
|
||||
n.items[i] = item
|
||||
return out
|
||||
}
|
||||
if len(n.children) == 0 {
|
||||
n.items.insertAt(i, item)
|
||||
return nil
|
||||
}
|
||||
if n.maybeSplitChild(i, maxItems) {
|
||||
inTree := n.items[i]
|
||||
switch {
|
||||
case item.Less(inTree):
|
||||
// no change, we want first split node
|
||||
case inTree.Less(item):
|
||||
i++ // we want second split node
|
||||
default:
|
||||
out := n.items[i]
|
||||
n.items[i] = item
|
||||
return out
|
||||
}
|
||||
}
|
||||
return n.children[i].insert(item, maxItems)
|
||||
}
|
||||
|
||||
// get finds the given key in the subtree and returns it.
|
||||
func (n *node) get(key Item) Item {
|
||||
i, found := n.items.find(key)
|
||||
if found {
|
||||
return n.items[i]
|
||||
} else if len(n.children) > 0 {
|
||||
return n.children[i].get(key)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// min returns the first item in the subtree.
|
||||
func min(n *node) Item {
|
||||
if n == nil {
|
||||
return nil
|
||||
}
|
||||
for len(n.children) > 0 {
|
||||
n = n.children[0]
|
||||
}
|
||||
if len(n.items) == 0 {
|
||||
return nil
|
||||
}
|
||||
return n.items[0]
|
||||
}
|
||||
|
||||
// max returns the last item in the subtree.
|
||||
func max(n *node) Item {
|
||||
if n == nil {
|
||||
return nil
|
||||
}
|
||||
for len(n.children) > 0 {
|
||||
n = n.children[len(n.children)-1]
|
||||
}
|
||||
if len(n.items) == 0 {
|
||||
return nil
|
||||
}
|
||||
return n.items[len(n.items)-1]
|
||||
}
|
||||
|
||||
// toRemove details what item to remove in a node.remove call.
|
||||
type toRemove int
|
||||
|
||||
const (
|
||||
removeItem toRemove = iota // removes the given item
|
||||
removeMin // removes smallest item in the subtree
|
||||
removeMax // removes largest item in the subtree
|
||||
)
|
||||
|
||||
// remove removes an item from the subtree rooted at this node.
|
||||
func (n *node) remove(item Item, minItems int, typ toRemove) Item {
|
||||
var i int
|
||||
var found bool
|
||||
switch typ {
|
||||
case removeMax:
|
||||
if len(n.children) == 0 {
|
||||
return n.items.pop()
|
||||
}
|
||||
i = len(n.items)
|
||||
case removeMin:
|
||||
if len(n.children) == 0 {
|
||||
return n.items.removeAt(0)
|
||||
}
|
||||
i = 0
|
||||
case removeItem:
|
||||
i, found = n.items.find(item)
|
||||
if len(n.children) == 0 {
|
||||
if found {
|
||||
return n.items.removeAt(i)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
default:
|
||||
panic("invalid type")
|
||||
}
|
||||
// If we get to here, we have children.
|
||||
child := n.children[i]
|
||||
if len(child.items) <= minItems {
|
||||
return n.growChildAndRemove(i, item, minItems, typ)
|
||||
}
|
||||
// Either we had enough items to begin with, or we've done some
|
||||
// merging/stealing, because we've got enough now and we're ready to return
|
||||
// stuff.
|
||||
if found {
|
||||
// The item exists at index 'i', and the child we've selected can give us a
|
||||
// predecessor, since if we've gotten here it's got > minItems items in it.
|
||||
out := n.items[i]
|
||||
// We use our special-case 'remove' call with typ=maxItem to pull the
|
||||
// predecessor of item i (the rightmost leaf of our immediate left child)
|
||||
// and set it into where we pulled the item from.
|
||||
n.items[i] = child.remove(nil, minItems, removeMax)
|
||||
return out
|
||||
}
|
||||
// Final recursive call. Once we're here, we know that the item isn't in this
|
||||
// node and that the child is big enough to remove from.
|
||||
return child.remove(item, minItems, typ)
|
||||
}
|
||||
|
||||
// growChildAndRemove grows child 'i' to make sure it's possible to remove an
|
||||
// item from it while keeping it at minItems, then calls remove to actually
|
||||
// remove it.
|
||||
//
|
||||
// Most documentation says we have to do two sets of special casing:
|
||||
// 1) item is in this node
|
||||
// 2) item is in child
|
||||
// In both cases, we need to handle the two subcases:
|
||||
// A) node has enough values that it can spare one
|
||||
// B) node doesn't have enough values
|
||||
// For the latter, we have to check:
|
||||
// a) left sibling has node to spare
|
||||
// b) right sibling has node to spare
|
||||
// c) we must merge
|
||||
// To simplify our code here, we handle cases #1 and #2 the same:
|
||||
// If a node doesn't have enough items, we make sure it does (using a,b,c).
|
||||
// We then simply redo our remove call, and the second time (regardless of
|
||||
// whether we're in case 1 or 2), we'll have enough items and can guarantee
|
||||
// that we hit case A.
|
||||
func (n *node) growChildAndRemove(i int, item Item, minItems int, typ toRemove) Item {
|
||||
child := n.children[i]
|
||||
if i > 0 && len(n.children[i-1].items) > minItems {
|
||||
// Steal from left child
|
||||
stealFrom := n.children[i-1]
|
||||
stolenItem := stealFrom.items.pop()
|
||||
child.items.insertAt(0, n.items[i-1])
|
||||
n.items[i-1] = stolenItem
|
||||
if len(stealFrom.children) > 0 {
|
||||
child.children.insertAt(0, stealFrom.children.pop())
|
||||
}
|
||||
} else if i < len(n.items) && len(n.children[i+1].items) > minItems {
|
||||
// steal from right child
|
||||
stealFrom := n.children[i+1]
|
||||
stolenItem := stealFrom.items.removeAt(0)
|
||||
child.items = append(child.items, n.items[i])
|
||||
n.items[i] = stolenItem
|
||||
if len(stealFrom.children) > 0 {
|
||||
child.children = append(child.children, stealFrom.children.removeAt(0))
|
||||
}
|
||||
} else {
|
||||
if i >= len(n.items) {
|
||||
i--
|
||||
child = n.children[i]
|
||||
}
|
||||
// merge with right child
|
||||
mergeItem := n.items.removeAt(i)
|
||||
mergeChild := n.children.removeAt(i + 1)
|
||||
child.items = append(child.items, mergeItem)
|
||||
child.items = append(child.items, mergeChild.items...)
|
||||
child.children = append(child.children, mergeChild.children...)
|
||||
n.t.freeNode(mergeChild)
|
||||
}
|
||||
return n.remove(item, minItems, typ)
|
||||
}
|
||||
|
||||
// iterate provides a simple method for iterating over elements in the tree.
|
||||
// It could probably use some work to be extra-efficient (it calls from() a
|
||||
// little more than it should), but it works pretty well for now.
|
||||
//
|
||||
// It requires that 'from' and 'to' both return true for values we should hit
|
||||
// with the iterator. It should also be the case that 'from' returns true for
|
||||
// values less than or equal to values 'to' returns true for, and 'to'
|
||||
// returns true for values greater than or equal to those that 'from'
|
||||
// does.
|
||||
func (n *node) iterate(from, to func(Item) bool, iter ItemIterator) bool {
|
||||
for i, item := range n.items {
|
||||
if !from(item) {
|
||||
continue
|
||||
}
|
||||
if len(n.children) > 0 && !n.children[i].iterate(from, to, iter) {
|
||||
return false
|
||||
}
|
||||
if !to(item) {
|
||||
return false
|
||||
}
|
||||
if !iter(item) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
if len(n.children) > 0 {
|
||||
return n.children[len(n.children)-1].iterate(from, to, iter)
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// Used for testing/debugging purposes.
|
||||
func (n *node) print(w io.Writer, level int) {
|
||||
fmt.Fprintf(w, "%sNODE:%v\n", strings.Repeat(" ", level), n.items)
|
||||
for _, c := range n.children {
|
||||
c.print(w, level+1)
|
||||
}
|
||||
}
|
||||
|
||||
// BTree is an implementation of a B-Tree.
|
||||
//
|
||||
// BTree stores Item instances in an ordered structure, allowing easy insertion,
|
||||
// removal, and iteration.
|
||||
//
|
||||
// Write operations are not safe for concurrent mutation by multiple
|
||||
// goroutines, but Read operations are.
|
||||
type BTree struct {
|
||||
degree int
|
||||
length int
|
||||
root *node
|
||||
freelist *FreeList
|
||||
}
|
||||
|
||||
// maxItems returns the max number of items to allow per node.
|
||||
func (t *BTree) maxItems() int {
|
||||
return t.degree*2 - 1
|
||||
}
|
||||
|
||||
// minItems returns the min number of items to allow per node (ignored for the
|
||||
// root node).
|
||||
func (t *BTree) minItems() int {
|
||||
return t.degree - 1
|
||||
}
|
||||
|
||||
func (t *BTree) newNode() (n *node) {
|
||||
n = t.freelist.newNode()
|
||||
n.t = t
|
||||
return
|
||||
}
|
||||
|
||||
func (t *BTree) freeNode(n *node) {
|
||||
for i := range n.items {
|
||||
n.items[i] = nil // clear to allow GC
|
||||
}
|
||||
n.items = n.items[:0]
|
||||
for i := range n.children {
|
||||
n.children[i] = nil // clear to allow GC
|
||||
}
|
||||
n.children = n.children[:0]
|
||||
n.t = nil // clear to allow GC
|
||||
t.freelist.freeNode(n)
|
||||
}
|
||||
|
||||
// ReplaceOrInsert adds the given item to the tree. If an item in the tree
|
||||
// already equals the given one, it is removed from the tree and returned.
|
||||
// Otherwise, nil is returned.
|
||||
//
|
||||
// nil cannot be added to the tree (will panic).
|
||||
func (t *BTree) ReplaceOrInsert(item Item) Item {
|
||||
if item == nil {
|
||||
panic("nil item being added to BTree")
|
||||
}
|
||||
if t.root == nil {
|
||||
t.root = t.newNode()
|
||||
t.root.items = append(t.root.items, item)
|
||||
t.length++
|
||||
return nil
|
||||
} else if len(t.root.items) >= t.maxItems() {
|
||||
item2, second := t.root.split(t.maxItems() / 2)
|
||||
oldroot := t.root
|
||||
t.root = t.newNode()
|
||||
t.root.items = append(t.root.items, item2)
|
||||
t.root.children = append(t.root.children, oldroot, second)
|
||||
}
|
||||
out := t.root.insert(item, t.maxItems())
|
||||
if out == nil {
|
||||
t.length++
|
||||
}
|
||||
return out
|
||||
}
|
||||
|
||||
// Delete removes an item equal to the passed in item from the tree, returning
|
||||
// it. If no such item exists, returns nil.
|
||||
func (t *BTree) Delete(item Item) Item {
|
||||
return t.deleteItem(item, removeItem)
|
||||
}
|
||||
|
||||
// DeleteMin removes the smallest item in the tree and returns it.
|
||||
// If no such item exists, returns nil.
|
||||
func (t *BTree) DeleteMin() Item {
|
||||
return t.deleteItem(nil, removeMin)
|
||||
}
|
||||
|
||||
// DeleteMax removes the largest item in the tree and returns it.
|
||||
// If no such item exists, returns nil.
|
||||
func (t *BTree) DeleteMax() Item {
|
||||
return t.deleteItem(nil, removeMax)
|
||||
}
|
||||
|
||||
func (t *BTree) deleteItem(item Item, typ toRemove) Item {
|
||||
if t.root == nil || len(t.root.items) == 0 {
|
||||
return nil
|
||||
}
|
||||
out := t.root.remove(item, t.minItems(), typ)
|
||||
if len(t.root.items) == 0 && len(t.root.children) > 0 {
|
||||
oldroot := t.root
|
||||
t.root = t.root.children[0]
|
||||
t.freeNode(oldroot)
|
||||
}
|
||||
if out != nil {
|
||||
t.length--
|
||||
}
|
||||
return out
|
||||
}
|
||||
|
||||
// AscendRange calls the iterator for every value in the tree within the range
|
||||
// [greaterOrEqual, lessThan), until iterator returns false.
|
||||
func (t *BTree) AscendRange(greaterOrEqual, lessThan Item, iterator ItemIterator) {
|
||||
if t.root == nil {
|
||||
return
|
||||
}
|
||||
t.root.iterate(
|
||||
func(a Item) bool { return !a.Less(greaterOrEqual) },
|
||||
func(a Item) bool { return a.Less(lessThan) },
|
||||
iterator)
|
||||
}
|
||||
|
||||
// AscendLessThan calls the iterator for every value in the tree within the range
|
||||
// [first, pivot), until iterator returns false.
|
||||
func (t *BTree) AscendLessThan(pivot Item, iterator ItemIterator) {
|
||||
if t.root == nil {
|
||||
return
|
||||
}
|
||||
t.root.iterate(
|
||||
func(a Item) bool { return true },
|
||||
func(a Item) bool { return a.Less(pivot) },
|
||||
iterator)
|
||||
}
|
||||
|
||||
// AscendGreaterOrEqual calls the iterator for every value in the tree within
|
||||
// the range [pivot, last], until iterator returns false.
|
||||
func (t *BTree) AscendGreaterOrEqual(pivot Item, iterator ItemIterator) {
|
||||
if t.root == nil {
|
||||
return
|
||||
}
|
||||
t.root.iterate(
|
||||
func(a Item) bool { return !a.Less(pivot) },
|
||||
func(a Item) bool { return true },
|
||||
iterator)
|
||||
}
|
||||
|
||||
// Ascend calls the iterator for every value in the tree within the range
|
||||
// [first, last], until iterator returns false.
|
||||
func (t *BTree) Ascend(iterator ItemIterator) {
|
||||
if t.root == nil {
|
||||
return
|
||||
}
|
||||
t.root.iterate(
|
||||
func(a Item) bool { return true },
|
||||
func(a Item) bool { return true },
|
||||
iterator)
|
||||
}
|
||||
|
||||
// Get looks for the key item in the tree, returning it. It returns nil if
|
||||
// unable to find that item.
|
||||
func (t *BTree) Get(key Item) Item {
|
||||
if t.root == nil {
|
||||
return nil
|
||||
}
|
||||
return t.root.get(key)
|
||||
}
|
||||
|
||||
// Min returns the smallest item in the tree, or nil if the tree is empty.
|
||||
func (t *BTree) Min() Item {
|
||||
return min(t.root)
|
||||
}
|
||||
|
||||
// Max returns the largest item in the tree, or nil if the tree is empty.
|
||||
func (t *BTree) Max() Item {
|
||||
return max(t.root)
|
||||
}
|
||||
|
||||
// Has returns true if the given key is in the tree.
|
||||
func (t *BTree) Has(key Item) bool {
|
||||
return t.Get(key) != nil
|
||||
}
|
||||
|
||||
// Len returns the number of items currently in the tree.
|
||||
func (t *BTree) Len() int {
|
||||
return t.length
|
||||
}
|
||||
|
||||
// Int implements the Item interface for integers.
|
||||
type Int int
|
||||
|
||||
// Less returns true if int(a) < int(b).
|
||||
func (a Int) Less(b Item) bool {
|
||||
return a < b.(Int)
|
||||
}
|
76
vendor/github.com/google/btree/btree_mem.go
generated
vendored
Normal file
76
vendor/github.com/google/btree/btree_mem.go
generated
vendored
Normal file
|
@ -0,0 +1,76 @@
|
|||
// Copyright 2014 Google Inc.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// +build ignore
|
||||
|
||||
// This binary compares memory usage between btree and gollrb.
|
||||
package main
|
||||
|
||||
import (
|
||||
"flag"
|
||||
"fmt"
|
||||
"math/rand"
|
||||
"runtime"
|
||||
"time"
|
||||
|
||||
"github.com/google/btree"
|
||||
"github.com/petar/GoLLRB/llrb"
|
||||
)
|
||||
|
||||
var (
|
||||
size = flag.Int("size", 1000000, "size of the tree to build")
|
||||
degree = flag.Int("degree", 8, "degree of btree")
|
||||
gollrb = flag.Bool("llrb", false, "use llrb instead of btree")
|
||||
)
|
||||
|
||||
func main() {
|
||||
flag.Parse()
|
||||
vals := rand.Perm(*size)
|
||||
var t, v interface{}
|
||||
v = vals
|
||||
var stats runtime.MemStats
|
||||
for i := 0; i < 10; i++ {
|
||||
runtime.GC()
|
||||
}
|
||||
fmt.Println("-------- BEFORE ----------")
|
||||
runtime.ReadMemStats(&stats)
|
||||
fmt.Printf("%+v\n", stats)
|
||||
start := time.Now()
|
||||
if *gollrb {
|
||||
tr := llrb.New()
|
||||
for _, v := range vals {
|
||||
tr.ReplaceOrInsert(llrb.Int(v))
|
||||
}
|
||||
t = tr // keep it around
|
||||
} else {
|
||||
tr := btree.New(*degree)
|
||||
for _, v := range vals {
|
||||
tr.ReplaceOrInsert(btree.Int(v))
|
||||
}
|
||||
t = tr // keep it around
|
||||
}
|
||||
fmt.Printf("%v inserts in %v\n", *size, time.Since(start))
|
||||
fmt.Println("-------- AFTER ----------")
|
||||
runtime.ReadMemStats(&stats)
|
||||
fmt.Printf("%+v\n", stats)
|
||||
for i := 0; i < 10; i++ {
|
||||
runtime.GC()
|
||||
}
|
||||
fmt.Println("-------- AFTER GC ----------")
|
||||
runtime.ReadMemStats(&stats)
|
||||
fmt.Printf("%+v\n", stats)
|
||||
if t == v {
|
||||
fmt.Println("to make sure vals and tree aren't GC'd")
|
||||
}
|
||||
}
|
2112
vendor/github.com/googleapis/gnostic/OpenAPIv2/OpenAPIv2.go
generated
vendored
2112
vendor/github.com/googleapis/gnostic/OpenAPIv2/OpenAPIv2.go
generated
vendored
File diff suppressed because it is too large
Load diff
10
vendor/github.com/googleapis/gnostic/OpenAPIv2/OpenAPIv2.pb.go
generated
vendored
10
vendor/github.com/googleapis/gnostic/OpenAPIv2/OpenAPIv2.pb.go
generated
vendored
|
@ -1376,7 +1376,7 @@ func (m *ItemsItem) GetSchema() []*Schema {
|
|||
}
|
||||
|
||||
type JsonReference struct {
|
||||
XRef string `protobuf:"bytes,1,opt,name=_ref,json=ref" json:"_ref,omitempty"`
|
||||
XRef string `protobuf:"bytes,1,opt,name=_ref,json=Ref" json:"_ref,omitempty"`
|
||||
Description string `protobuf:"bytes,2,opt,name=description" json:"description,omitempty"`
|
||||
}
|
||||
|
||||
|
@ -2513,7 +2513,7 @@ func _ParametersItem_OneofSizer(msg proto.Message) (n int) {
|
|||
}
|
||||
|
||||
type PathItem struct {
|
||||
XRef string `protobuf:"bytes,1,opt,name=_ref,json=ref" json:"_ref,omitempty"`
|
||||
XRef string `protobuf:"bytes,1,opt,name=_ref,json=Ref" json:"_ref,omitempty"`
|
||||
Get *Operation `protobuf:"bytes,2,opt,name=get" json:"get,omitempty"`
|
||||
Put *Operation `protobuf:"bytes,3,opt,name=put" json:"put,omitempty"`
|
||||
Post *Operation `protobuf:"bytes,4,opt,name=post" json:"post,omitempty"`
|
||||
|
@ -3392,7 +3392,7 @@ func (m *Responses) GetVendorExtension() []*NamedAny {
|
|||
|
||||
// A deterministic version of a JSON Schema object.
|
||||
type Schema struct {
|
||||
XRef string `protobuf:"bytes,1,opt,name=_ref,json=ref" json:"_ref,omitempty"`
|
||||
XRef string `protobuf:"bytes,1,opt,name=_ref,json=Ref" json:"_ref,omitempty"`
|
||||
Format string `protobuf:"bytes,2,opt,name=format" json:"format,omitempty"`
|
||||
Title string `protobuf:"bytes,3,opt,name=title" json:"title,omitempty"`
|
||||
Description string `protobuf:"bytes,4,opt,name=description" json:"description,omitempty"`
|
||||
|
@ -4351,7 +4351,7 @@ var fileDescriptor0 = []byte{
|
|||
0xfe, 0x90, 0x83, 0x0c, 0x89, 0xa3, 0x14, 0x7e, 0x7d, 0x1e, 0xe1, 0xbf, 0x05, 0x4d, 0xa6, 0x0c,
|
||||
0xac, 0x56, 0x77, 0x23, 0x51, 0xab, 0x2b, 0x4f, 0x2e, 0xac, 0x6c, 0xdd, 0x85, 0xd6, 0x37, 0x02,
|
||||
0xe2, 0x1a, 0x78, 0x80, 0x7d, 0xec, 0xf6, 0xb0, 0xb6, 0x0c, 0x15, 0xd3, 0xc7, 0x03, 0x21, 0xe3,
|
||||
0xb2, 0x8f, 0x07, 0xd3, 0xeb, 0x4f, 0x5b, 0x1e, 0xd4, 0xc5, 0x33, 0xcd, 0x58, 0x5c, 0x39, 0xf3,
|
||||
0xb2, 0x81, 0x07, 0xd3, 0xeb, 0x4f, 0x5b, 0x1e, 0xd4, 0xc5, 0x33, 0xcd, 0x58, 0x5c, 0x39, 0xf3,
|
||||
0x59, 0xe6, 0x1e, 0x34, 0x24, 0x50, 0xb9, 0xe5, 0x2b, 0xb2, 0xaa, 0x58, 0x52, 0x3b, 0x20, 0x0e,
|
||||
0xdd, 0x7a, 0x17, 0x16, 0x12, 0x0a, 0xa8, 0xa4, 0x74, 0x2d, 0x4d, 0x29, 0x25, 0x4c, 0xa1, 0xb7,
|
||||
0x82, 0xd8, 0xfb, 0xd0, 0x66, 0xc4, 0xe2, 0x22, 0x9a, 0x8a, 0xde, 0xeb, 0x69, 0x7a, 0x17, 0x94,
|
||||
|
@ -4452,5 +4452,5 @@ var fileDescriptor0 = []byte{
|
|||
0xf3, 0x70, 0x5f, 0x1c, 0xc1, 0xe5, 0xf0, 0xcc, 0x7d, 0xcc, 0xdb, 0xaf, 0x42, 0x9b, 0xf8, 0x47,
|
||||
0x12, 0xd7, 0x3c, 0xd9, 0xb9, 0xbd, 0x28, 0xbe, 0x5d, 0xdd, 0xf7, 0x49, 0x48, 0xf6, 0x8b, 0xbf,
|
||||
0x28, 0x95, 0xf7, 0x76, 0x0f, 0x0e, 0x6b, 0xec, 0x63, 0xd0, 0x37, 0xff, 0x19, 0x00, 0x00, 0xff,
|
||||
0xff, 0x3c, 0x01, 0x3f, 0x38, 0xe4, 0x3a, 0x00, 0x00,
|
||||
0xff, 0xd4, 0x0a, 0xef, 0xca, 0xe4, 0x3a, 0x00, 0x00,
|
||||
}
|
||||
|
|
10
vendor/github.com/googleapis/gnostic/compiler/context.go
generated
vendored
10
vendor/github.com/googleapis/gnostic/compiler/context.go
generated
vendored
|
@ -14,28 +14,30 @@
|
|||
|
||||
package compiler
|
||||
|
||||
// Context contains state of the compiler as it traverses a document.
|
||||
type Context struct {
|
||||
Parent *Context
|
||||
Name string
|
||||
ExtensionHandlers *[]ExtensionHandler
|
||||
}
|
||||
|
||||
// NewContextWithExtensions returns a new object representing the compiler state
|
||||
func NewContextWithExtensions(name string, parent *Context, extensionHandlers *[]ExtensionHandler) *Context {
|
||||
return &Context{Name: name, Parent: parent, ExtensionHandlers: extensionHandlers}
|
||||
}
|
||||
|
||||
// NewContext returns a new object representing the compiler state
|
||||
func NewContext(name string, parent *Context) *Context {
|
||||
if parent != nil {
|
||||
return &Context{Name: name, Parent: parent, ExtensionHandlers: parent.ExtensionHandlers}
|
||||
} else {
|
||||
return &Context{Name: name, Parent: parent, ExtensionHandlers: nil}
|
||||
}
|
||||
return &Context{Name: name, Parent: parent, ExtensionHandlers: nil}
|
||||
}
|
||||
|
||||
// Description returns a text description of the compiler state
|
||||
func (context *Context) Description() string {
|
||||
if context.Parent != nil {
|
||||
return context.Parent.Description() + "." + context.Name
|
||||
} else {
|
||||
return context.Name
|
||||
}
|
||||
return context.Name
|
||||
}
|
||||
|
|
12
vendor/github.com/googleapis/gnostic/compiler/error.go
generated
vendored
12
vendor/github.com/googleapis/gnostic/compiler/error.go
generated
vendored
|
@ -14,29 +14,31 @@
|
|||
|
||||
package compiler
|
||||
|
||||
// basic error type
|
||||
// Error represents compiler errors and their location in the document.
|
||||
type Error struct {
|
||||
Context *Context
|
||||
Message string
|
||||
}
|
||||
|
||||
// NewError creates an Error.
|
||||
func NewError(context *Context, message string) *Error {
|
||||
return &Error{Context: context, Message: message}
|
||||
}
|
||||
|
||||
// Error returns the string value of an Error.
|
||||
func (err *Error) Error() string {
|
||||
if err.Context != nil {
|
||||
return "ERROR " + err.Context.Description() + " " + err.Message
|
||||
} else {
|
||||
if err.Context == nil {
|
||||
return "ERROR " + err.Message
|
||||
}
|
||||
return "ERROR " + err.Context.Description() + " " + err.Message
|
||||
}
|
||||
|
||||
// container for groups of errors
|
||||
// ErrorGroup is a container for groups of Error values.
|
||||
type ErrorGroup struct {
|
||||
Errors []error
|
||||
}
|
||||
|
||||
// NewErrorGroupOrNil returns a new ErrorGroup for a slice of errors or nil if the slice is empty.
|
||||
func NewErrorGroupOrNil(errors []error) error {
|
||||
if len(errors) == 0 {
|
||||
return nil
|
||||
|
|
4
vendor/github.com/googleapis/gnostic/compiler/extension-handler.go
generated
vendored
4
vendor/github.com/googleapis/gnostic/compiler/extension-handler.go
generated
vendored
|
@ -29,16 +29,18 @@ import (
|
|||
yaml "gopkg.in/yaml.v2"
|
||||
)
|
||||
|
||||
// ExtensionHandler describes a binary that is called by the compiler to handle specification extensions.
|
||||
type ExtensionHandler struct {
|
||||
Name string
|
||||
}
|
||||
|
||||
// HandleExtension calls a binary extension handler.
|
||||
func HandleExtension(context *Context, in interface{}, extensionName string) (bool, *any.Any, error) {
|
||||
handled := false
|
||||
var errFromPlugin error
|
||||
var outFromPlugin *any.Any
|
||||
|
||||
if context.ExtensionHandlers != nil && len(*(context.ExtensionHandlers)) != 0 {
|
||||
if context != nil && context.ExtensionHandlers != nil && len(*(context.ExtensionHandlers)) != 0 {
|
||||
for _, customAnyProtoGenerator := range *(context.ExtensionHandlers) {
|
||||
outFromPlugin, errFromPlugin = customAnyProtoGenerator.handle(in, extensionName)
|
||||
if outFromPlugin == nil {
|
||||
|
|
70
vendor/github.com/googleapis/gnostic/compiler/helpers.go
generated
vendored
70
vendor/github.com/googleapis/gnostic/compiler/helpers.go
generated
vendored
|
@ -19,27 +19,27 @@ import (
|
|||
"gopkg.in/yaml.v2"
|
||||
"regexp"
|
||||
"sort"
|
||||
"strings"
|
||||
"strconv"
|
||||
)
|
||||
|
||||
// compiler helper functions, usually called from generated code
|
||||
|
||||
// UnpackMap gets a yaml.MapSlice if possible.
|
||||
func UnpackMap(in interface{}) (yaml.MapSlice, bool) {
|
||||
m, ok := in.(yaml.MapSlice)
|
||||
if ok {
|
||||
return m, ok
|
||||
} else {
|
||||
// do we have an empty array?
|
||||
a, ok := in.([]interface{})
|
||||
if ok && len(a) == 0 {
|
||||
// if so, return an empty map
|
||||
return yaml.MapSlice{}, ok
|
||||
} else {
|
||||
return nil, ok
|
||||
}
|
||||
return m, true
|
||||
}
|
||||
// do we have an empty array?
|
||||
a, ok := in.([]interface{})
|
||||
if ok && len(a) == 0 {
|
||||
// if so, return an empty map
|
||||
return yaml.MapSlice{}, true
|
||||
}
|
||||
return nil, false
|
||||
}
|
||||
|
||||
// SortedKeysForMap returns the sorted keys of a yaml.MapSlice.
|
||||
func SortedKeysForMap(m yaml.MapSlice) []string {
|
||||
keys := make([]string, 0)
|
||||
for _, item := range m {
|
||||
|
@ -49,6 +49,7 @@ func SortedKeysForMap(m yaml.MapSlice) []string {
|
|||
return keys
|
||||
}
|
||||
|
||||
// MapHasKey returns true if a yaml.MapSlice contains a specified key.
|
||||
func MapHasKey(m yaml.MapSlice, key string) bool {
|
||||
for _, item := range m {
|
||||
itemKey, ok := item.Key.(string)
|
||||
|
@ -59,6 +60,7 @@ func MapHasKey(m yaml.MapSlice, key string) bool {
|
|||
return false
|
||||
}
|
||||
|
||||
// MapValueForKey gets the value of a map value for a specified key.
|
||||
func MapValueForKey(m yaml.MapSlice, key string) interface{} {
|
||||
for _, item := range m {
|
||||
itemKey, ok := item.Key.(string)
|
||||
|
@ -69,6 +71,7 @@ func MapValueForKey(m yaml.MapSlice, key string) interface{} {
|
|||
return nil
|
||||
}
|
||||
|
||||
// ConvertInterfaceArrayToStringArray converts an array of interfaces to an array of strings, if possible.
|
||||
func ConvertInterfaceArrayToStringArray(interfaceArray []interface{}) []string {
|
||||
stringArray := make([]string, 0)
|
||||
for _, item := range interfaceArray {
|
||||
|
@ -80,22 +83,7 @@ func ConvertInterfaceArrayToStringArray(interfaceArray []interface{}) []string {
|
|||
return stringArray
|
||||
}
|
||||
|
||||
func PatternMatches(pattern string, value string) bool {
|
||||
// if pattern contains a subpattern like "{path}", replace it with ".*"
|
||||
if pattern[0] != '^' {
|
||||
subpatternPattern := regexp.MustCompile("^.*(\\{.*\\}).*$")
|
||||
if matches := subpatternPattern.FindSubmatch([]byte(pattern)); matches != nil {
|
||||
match := string(matches[1])
|
||||
pattern = strings.Replace(pattern, match, ".*", -1)
|
||||
}
|
||||
}
|
||||
matched, err := regexp.Match(pattern, []byte(value))
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return matched
|
||||
}
|
||||
|
||||
// MissingKeysInMap identifies which keys from a list of required keys are not in a map.
|
||||
func MissingKeysInMap(m yaml.MapSlice, requiredKeys []string) []string {
|
||||
missingKeys := make([]string, 0)
|
||||
for _, k := range requiredKeys {
|
||||
|
@ -106,7 +94,8 @@ func MissingKeysInMap(m yaml.MapSlice, requiredKeys []string) []string {
|
|||
return missingKeys
|
||||
}
|
||||
|
||||
func InvalidKeysInMap(m yaml.MapSlice, allowedKeys []string, allowedPatterns []string) []string {
|
||||
// InvalidKeysInMap returns keys in a map that don't match a list of allowed keys and patterns.
|
||||
func InvalidKeysInMap(m yaml.MapSlice, allowedKeys []string, allowedPatterns []*regexp.Regexp) []string {
|
||||
invalidKeys := make([]string, 0)
|
||||
for _, item := range m {
|
||||
itemKey, ok := item.Key.(string)
|
||||
|
@ -123,7 +112,7 @@ func InvalidKeysInMap(m yaml.MapSlice, allowedKeys []string, allowedPatterns []s
|
|||
if !found {
|
||||
// does the key match an allowed pattern?
|
||||
for _, allowedPattern := range allowedPatterns {
|
||||
if PatternMatches(allowedPattern, key) {
|
||||
if allowedPattern.MatchString(key) {
|
||||
found = true
|
||||
break
|
||||
}
|
||||
|
@ -137,13 +126,13 @@ func InvalidKeysInMap(m yaml.MapSlice, allowedKeys []string, allowedPatterns []s
|
|||
return invalidKeys
|
||||
}
|
||||
|
||||
// describe a map (for debugging purposes)
|
||||
// DescribeMap describes a map (for debugging purposes).
|
||||
func DescribeMap(in interface{}, indent string) string {
|
||||
description := ""
|
||||
m, ok := in.(map[string]interface{})
|
||||
if ok {
|
||||
keys := make([]string, 0)
|
||||
for k, _ := range m {
|
||||
for k := range m {
|
||||
keys = append(keys, k)
|
||||
}
|
||||
sort.Strings(keys)
|
||||
|
@ -166,14 +155,15 @@ func DescribeMap(in interface{}, indent string) string {
|
|||
return description
|
||||
}
|
||||
|
||||
// PluralProperties returns the string "properties" pluralized.
|
||||
func PluralProperties(count int) string {
|
||||
if count == 1 {
|
||||
return "property"
|
||||
} else {
|
||||
return "properties"
|
||||
}
|
||||
return "properties"
|
||||
}
|
||||
|
||||
// StringArrayContainsValue returns true if a string array contains a specified value.
|
||||
func StringArrayContainsValue(array []string, value string) bool {
|
||||
for _, item := range array {
|
||||
if item == value {
|
||||
|
@ -183,6 +173,7 @@ func StringArrayContainsValue(array []string, value string) bool {
|
|||
return false
|
||||
}
|
||||
|
||||
// StringArrayContainsValues returns true if a string array contains all of a list of specified values.
|
||||
func StringArrayContainsValues(array []string, values []string) bool {
|
||||
for _, value := range values {
|
||||
if !StringArrayContainsValue(array, value) {
|
||||
|
@ -191,3 +182,16 @@ func StringArrayContainsValues(array []string, values []string) bool {
|
|||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// StringValue returns the string value of an item.
|
||||
func StringValue(item interface{}) (value string, ok bool) {
|
||||
value, ok = item.(string)
|
||||
if ok {
|
||||
return value, ok
|
||||
}
|
||||
intValue, ok := item.(int)
|
||||
if ok {
|
||||
return strconv.Itoa(intValue), true
|
||||
}
|
||||
return "", false
|
||||
}
|
||||
|
|
116
vendor/github.com/googleapis/gnostic/compiler/reader.go
generated
vendored
116
vendor/github.com/googleapis/gnostic/compiler/reader.go
generated
vendored
|
@ -25,29 +25,30 @@ import (
|
|||
"strings"
|
||||
)
|
||||
|
||||
var file_cache map[string][]byte
|
||||
var info_cache map[string]interface{}
|
||||
var fileCache map[string][]byte
|
||||
var infoCache map[string]interface{}
|
||||
var count int64
|
||||
|
||||
var VERBOSE_READER = false
|
||||
var verboseReader = false
|
||||
|
||||
func initializeFileCache() {
|
||||
if file_cache == nil {
|
||||
file_cache = make(map[string][]byte, 0)
|
||||
if fileCache == nil {
|
||||
fileCache = make(map[string][]byte, 0)
|
||||
}
|
||||
}
|
||||
|
||||
func initializeInfoCache() {
|
||||
if info_cache == nil {
|
||||
info_cache = make(map[string]interface{}, 0)
|
||||
if infoCache == nil {
|
||||
infoCache = make(map[string]interface{}, 0)
|
||||
}
|
||||
}
|
||||
|
||||
// FetchFile gets a specified file from the local filesystem or a remote location.
|
||||
func FetchFile(fileurl string) ([]byte, error) {
|
||||
initializeFileCache()
|
||||
bytes, ok := file_cache[fileurl]
|
||||
bytes, ok := fileCache[fileurl]
|
||||
if ok {
|
||||
if VERBOSE_READER {
|
||||
if verboseReader {
|
||||
log.Printf("Cache hit %s", fileurl)
|
||||
}
|
||||
return bytes, nil
|
||||
|
@ -56,30 +57,17 @@ func FetchFile(fileurl string) ([]byte, error) {
|
|||
response, err := http.Get(fileurl)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
} else {
|
||||
defer response.Body.Close()
|
||||
bytes, err := ioutil.ReadAll(response.Body)
|
||||
if err == nil {
|
||||
file_cache[fileurl] = bytes
|
||||
}
|
||||
return bytes, err
|
||||
}
|
||||
defer response.Body.Close()
|
||||
bytes, err = ioutil.ReadAll(response.Body)
|
||||
if err == nil {
|
||||
fileCache[fileurl] = bytes
|
||||
}
|
||||
return bytes, err
|
||||
}
|
||||
|
||||
// read a file and unmarshal it as a yaml.MapSlice
|
||||
func ReadInfoForFile(filename string) (interface{}, error) {
|
||||
initializeInfoCache()
|
||||
info, ok := info_cache[filename]
|
||||
if ok {
|
||||
if VERBOSE_READER {
|
||||
log.Printf("Cache hit info for file %s", filename)
|
||||
}
|
||||
return info, nil
|
||||
}
|
||||
if VERBOSE_READER {
|
||||
log.Printf("Reading info for file %s", filename)
|
||||
}
|
||||
|
||||
// ReadBytesForFile reads the bytes of a file.
|
||||
func ReadBytesForFile(filename string) ([]byte, error) {
|
||||
// is the filename a url?
|
||||
fileurl, _ := url.Parse(filename)
|
||||
if fileurl.Scheme != "" {
|
||||
|
@ -88,43 +76,51 @@ func ReadInfoForFile(filename string) (interface{}, error) {
|
|||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var info yaml.MapSlice
|
||||
err = yaml.Unmarshal(bytes, &info)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
info_cache[filename] = info
|
||||
return info, nil
|
||||
} else {
|
||||
// no, it's a local filename
|
||||
bytes, err := ioutil.ReadFile(filename)
|
||||
if err != nil {
|
||||
log.Printf("File error: %v\n", err)
|
||||
return nil, err
|
||||
}
|
||||
var info yaml.MapSlice
|
||||
err = yaml.Unmarshal(bytes, &info)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
info_cache[filename] = info
|
||||
return info, nil
|
||||
return bytes, nil
|
||||
}
|
||||
// no, it's a local filename
|
||||
bytes, err := ioutil.ReadFile(filename)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return bytes, nil
|
||||
}
|
||||
|
||||
// read a file and return the fragment needed to resolve a $ref
|
||||
// ReadInfoFromBytes unmarshals a file as a yaml.MapSlice.
|
||||
func ReadInfoFromBytes(filename string, bytes []byte) (interface{}, error) {
|
||||
initializeInfoCache()
|
||||
cachedInfo, ok := infoCache[filename]
|
||||
if ok {
|
||||
if verboseReader {
|
||||
log.Printf("Cache hit info for file %s", filename)
|
||||
}
|
||||
return cachedInfo, nil
|
||||
}
|
||||
if verboseReader {
|
||||
log.Printf("Reading info for file %s", filename)
|
||||
}
|
||||
var info yaml.MapSlice
|
||||
err := yaml.Unmarshal(bytes, &info)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
infoCache[filename] = info
|
||||
return info, nil
|
||||
}
|
||||
|
||||
// ReadInfoForRef reads a file and return the fragment needed to resolve a $ref.
|
||||
func ReadInfoForRef(basefile string, ref string) (interface{}, error) {
|
||||
initializeInfoCache()
|
||||
{
|
||||
info, ok := info_cache[ref]
|
||||
info, ok := infoCache[ref]
|
||||
if ok {
|
||||
if VERBOSE_READER {
|
||||
if verboseReader {
|
||||
log.Printf("Cache hit for ref %s#%s", basefile, ref)
|
||||
}
|
||||
return info, nil
|
||||
}
|
||||
}
|
||||
if VERBOSE_READER {
|
||||
if verboseReader {
|
||||
log.Printf("Reading info for ref %s#%s", basefile, ref)
|
||||
}
|
||||
count = count + 1
|
||||
|
@ -136,7 +132,11 @@ func ReadInfoForRef(basefile string, ref string) (interface{}, error) {
|
|||
} else {
|
||||
filename = basefile
|
||||
}
|
||||
info, err := ReadInfoForFile(filename)
|
||||
bytes, err := ReadBytesForFile(filename)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
info, err := ReadInfoFromBytes(filename, bytes)
|
||||
if err != nil {
|
||||
log.Printf("File error: %v\n", err)
|
||||
} else {
|
||||
|
@ -154,7 +154,7 @@ func ReadInfoForRef(basefile string, ref string) (interface{}, error) {
|
|||
}
|
||||
}
|
||||
if !found {
|
||||
info_cache[ref] = nil
|
||||
infoCache[ref] = nil
|
||||
return nil, NewError(nil, fmt.Sprintf("could not resolve %s", ref))
|
||||
}
|
||||
}
|
||||
|
@ -162,6 +162,6 @@ func ReadInfoForRef(basefile string, ref string) (interface{}, error) {
|
|||
}
|
||||
}
|
||||
}
|
||||
info_cache[ref] = info
|
||||
infoCache[ref] = info
|
||||
return info, nil
|
||||
}
|
||||
|
|
2
vendor/github.com/googleapis/gnostic/extensions/COMPILE-EXTENSION.sh
generated
vendored
2
vendor/github.com/googleapis/gnostic/extensions/COMPILE-EXTENSION.sh
generated
vendored
|
@ -3,5 +3,3 @@ go get github.com/golang/protobuf/protoc-gen-go
|
|||
protoc \
|
||||
--go_out=Mgoogle/protobuf/any.proto=github.com/golang/protobuf/ptypes/any:. *.proto
|
||||
|
||||
go build
|
||||
go install
|
||||
|
|
23
vendor/github.com/googleapis/gnostic/extensions/extensions.go
generated
vendored
23
vendor/github.com/googleapis/gnostic/extensions/extensions.go
generated
vendored
|
@ -21,40 +21,39 @@ import (
|
|||
|
||||
"github.com/golang/protobuf/proto"
|
||||
"github.com/golang/protobuf/ptypes"
|
||||
"gopkg.in/yaml.v2"
|
||||
)
|
||||
|
||||
type documentHandler func(version string, extensionName string, document string)
|
||||
type extensionHandler func(name string, info yaml.MapSlice) (bool, proto.Message, error)
|
||||
type extensionHandler func(name string, yamlInput string) (bool, proto.Message, error)
|
||||
|
||||
func forInputYamlFromOpenapic(handler documentHandler) {
|
||||
data, err := ioutil.ReadAll(os.Stdin)
|
||||
|
||||
if err != nil {
|
||||
fmt.Println("File error:", err.Error())
|
||||
os.Exit(1)
|
||||
}
|
||||
if len(data) == 0 {
|
||||
fmt.Println("No input data.")
|
||||
os.Exit(1)
|
||||
}
|
||||
request := &ExtensionHandlerRequest{}
|
||||
err = proto.Unmarshal(data, request)
|
||||
if err != nil {
|
||||
fmt.Println("Input error:", err.Error())
|
||||
os.Exit(1)
|
||||
}
|
||||
handler(request.Wrapper.Version, request.Wrapper.ExtensionName, request.Wrapper.Yaml)
|
||||
}
|
||||
|
||||
// ProcessExtension calles the handler for a specified extension.
|
||||
func ProcessExtension(handleExtension extensionHandler) {
|
||||
response := &ExtensionHandlerResponse{}
|
||||
forInputYamlFromOpenapic(
|
||||
func(version string, extensionName string, yamlInput string) {
|
||||
var info yaml.MapSlice
|
||||
var newObject proto.Message
|
||||
var err error
|
||||
err = yaml.Unmarshal([]byte(yamlInput), &info)
|
||||
if err != nil {
|
||||
response.Error = append(response.Error, err.Error())
|
||||
responseBytes, _ := proto.Marshal(response)
|
||||
os.Stdout.Write(responseBytes)
|
||||
os.Exit(0)
|
||||
}
|
||||
|
||||
handled, newObject, err := handleExtension(extensionName, info)
|
||||
handled, newObject, err := handleExtension(extensionName, yamlInput)
|
||||
if !handled {
|
||||
responseBytes, _ := proto.Marshal(response)
|
||||
os.Stdout.Write(responseBytes)
|
||||
|
|
18
vendor/github.com/gregjones/httpcache/.travis.yml
generated
vendored
Normal file
18
vendor/github.com/gregjones/httpcache/.travis.yml
generated
vendored
Normal file
|
@ -0,0 +1,18 @@
|
|||
sudo: false
|
||||
language: go
|
||||
go:
|
||||
- 1.6.x
|
||||
- 1.7.x
|
||||
- 1.8.x
|
||||
- master
|
||||
matrix:
|
||||
allow_failures:
|
||||
- go: master
|
||||
fast_finish: true
|
||||
install:
|
||||
- # Do nothing. This is needed to prevent default install action "go get -t -v ./..." from happening here (we want it to happen inside script step).
|
||||
script:
|
||||
- go get -t -v ./...
|
||||
- diff -u <(echo -n) <(gofmt -d .)
|
||||
- go tool vet .
|
||||
- go test -v -race ./...
|
7
vendor/github.com/gregjones/httpcache/LICENSE.txt
generated
vendored
Normal file
7
vendor/github.com/gregjones/httpcache/LICENSE.txt
generated
vendored
Normal file
|
@ -0,0 +1,7 @@
|
|||
Copyright © 2012 Greg Jones (greg.jones@gmail.com)
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the “Software”), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
24
vendor/github.com/gregjones/httpcache/README.md
generated
vendored
Normal file
24
vendor/github.com/gregjones/httpcache/README.md
generated
vendored
Normal file
|
@ -0,0 +1,24 @@
|
|||
httpcache
|
||||
=========
|
||||
|
||||
[](https://travis-ci.org/gregjones/httpcache) [](https://godoc.org/github.com/gregjones/httpcache)
|
||||
|
||||
Package httpcache provides a http.RoundTripper implementation that works as a mostly RFC-compliant cache for http responses.
|
||||
|
||||
It is only suitable for use as a 'private' cache (i.e. for a web-browser or an API-client and not for a shared proxy).
|
||||
|
||||
Cache Backends
|
||||
--------------
|
||||
|
||||
- The built-in 'memory' cache stores responses in an in-memory map.
|
||||
- [`github.com/gregjones/httpcache/diskcache`](https://github.com/gregjones/httpcache/tree/master/diskcache) provides a filesystem-backed cache using the [diskv](https://github.com/peterbourgon/diskv) library.
|
||||
- [`github.com/gregjones/httpcache/memcache`](https://github.com/gregjones/httpcache/tree/master/memcache) provides memcache implementations, for both App Engine and 'normal' memcache servers.
|
||||
- [`sourcegraph.com/sourcegraph/s3cache`](https://sourcegraph.com/github.com/sourcegraph/s3cache) uses Amazon S3 for storage.
|
||||
- [`github.com/gregjones/httpcache/leveldbcache`](https://github.com/gregjones/httpcache/tree/master/leveldbcache) provides a filesystem-backed cache using [leveldb](https://github.com/syndtr/goleveldb/leveldb).
|
||||
- [`github.com/die-net/lrucache`](https://github.com/die-net/lrucache) provides an in-memory cache that will evict least-recently used entries.
|
||||
- [`github.com/die-net/lrucache/twotier`](https://github.com/die-net/lrucache/tree/master/twotier) allows caches to be combined, for example to use lrucache above with a persistent disk-cache.
|
||||
|
||||
License
|
||||
-------
|
||||
|
||||
- [MIT License](LICENSE.txt)
|
61
vendor/github.com/gregjones/httpcache/diskcache/diskcache.go
generated
vendored
Normal file
61
vendor/github.com/gregjones/httpcache/diskcache/diskcache.go
generated
vendored
Normal file
|
@ -0,0 +1,61 @@
|
|||
// Package diskcache provides an implementation of httpcache.Cache that uses the diskv package
|
||||
// to supplement an in-memory map with persistent storage
|
||||
//
|
||||
package diskcache
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"crypto/md5"
|
||||
"encoding/hex"
|
||||
"github.com/peterbourgon/diskv"
|
||||
"io"
|
||||
)
|
||||
|
||||
// Cache is an implementation of httpcache.Cache that supplements the in-memory map with persistent storage
|
||||
type Cache struct {
|
||||
d *diskv.Diskv
|
||||
}
|
||||
|
||||
// Get returns the response corresponding to key if present
|
||||
func (c *Cache) Get(key string) (resp []byte, ok bool) {
|
||||
key = keyToFilename(key)
|
||||
resp, err := c.d.Read(key)
|
||||
if err != nil {
|
||||
return []byte{}, false
|
||||
}
|
||||
return resp, true
|
||||
}
|
||||
|
||||
// Set saves a response to the cache as key
|
||||
func (c *Cache) Set(key string, resp []byte) {
|
||||
key = keyToFilename(key)
|
||||
c.d.WriteStream(key, bytes.NewReader(resp), true)
|
||||
}
|
||||
|
||||
// Delete removes the response with key from the cache
|
||||
func (c *Cache) Delete(key string) {
|
||||
key = keyToFilename(key)
|
||||
c.d.Erase(key)
|
||||
}
|
||||
|
||||
func keyToFilename(key string) string {
|
||||
h := md5.New()
|
||||
io.WriteString(h, key)
|
||||
return hex.EncodeToString(h.Sum(nil))
|
||||
}
|
||||
|
||||
// New returns a new Cache that will store files in basePath
|
||||
func New(basePath string) *Cache {
|
||||
return &Cache{
|
||||
d: diskv.New(diskv.Options{
|
||||
BasePath: basePath,
|
||||
CacheSizeMax: 100 * 1024 * 1024, // 100MB
|
||||
}),
|
||||
}
|
||||
}
|
||||
|
||||
// NewWithDiskv returns a new Cache using the provided Diskv as underlying
|
||||
// storage.
|
||||
func NewWithDiskv(d *diskv.Diskv) *Cache {
|
||||
return &Cache{d}
|
||||
}
|
553
vendor/github.com/gregjones/httpcache/httpcache.go
generated
vendored
Normal file
553
vendor/github.com/gregjones/httpcache/httpcache.go
generated
vendored
Normal file
|
@ -0,0 +1,553 @@
|
|||
// Package httpcache provides a http.RoundTripper implementation that works as a
|
||||
// mostly RFC-compliant cache for http responses.
|
||||
//
|
||||
// It is only suitable for use as a 'private' cache (i.e. for a web-browser or an API-client
|
||||
// and not for a shared proxy).
|
||||
//
|
||||
package httpcache
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"net/http/httputil"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
const (
|
||||
stale = iota
|
||||
fresh
|
||||
transparent
|
||||
// XFromCache is the header added to responses that are returned from the cache
|
||||
XFromCache = "X-From-Cache"
|
||||
)
|
||||
|
||||
// A Cache interface is used by the Transport to store and retrieve responses.
|
||||
type Cache interface {
|
||||
// Get returns the []byte representation of a cached response and a bool
|
||||
// set to true if the value isn't empty
|
||||
Get(key string) (responseBytes []byte, ok bool)
|
||||
// Set stores the []byte representation of a response against a key
|
||||
Set(key string, responseBytes []byte)
|
||||
// Delete removes the value associated with the key
|
||||
Delete(key string)
|
||||
}
|
||||
|
||||
// cacheKey returns the cache key for req.
|
||||
func cacheKey(req *http.Request) string {
|
||||
return req.URL.String()
|
||||
}
|
||||
|
||||
// CachedResponse returns the cached http.Response for req if present, and nil
|
||||
// otherwise.
|
||||
func CachedResponse(c Cache, req *http.Request) (resp *http.Response, err error) {
|
||||
cachedVal, ok := c.Get(cacheKey(req))
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
|
||||
b := bytes.NewBuffer(cachedVal)
|
||||
return http.ReadResponse(bufio.NewReader(b), req)
|
||||
}
|
||||
|
||||
// MemoryCache is an implemtation of Cache that stores responses in an in-memory map.
|
||||
type MemoryCache struct {
|
||||
mu sync.RWMutex
|
||||
items map[string][]byte
|
||||
}
|
||||
|
||||
// Get returns the []byte representation of the response and true if present, false if not
|
||||
func (c *MemoryCache) Get(key string) (resp []byte, ok bool) {
|
||||
c.mu.RLock()
|
||||
resp, ok = c.items[key]
|
||||
c.mu.RUnlock()
|
||||
return resp, ok
|
||||
}
|
||||
|
||||
// Set saves response resp to the cache with key
|
||||
func (c *MemoryCache) Set(key string, resp []byte) {
|
||||
c.mu.Lock()
|
||||
c.items[key] = resp
|
||||
c.mu.Unlock()
|
||||
}
|
||||
|
||||
// Delete removes key from the cache
|
||||
func (c *MemoryCache) Delete(key string) {
|
||||
c.mu.Lock()
|
||||
delete(c.items, key)
|
||||
c.mu.Unlock()
|
||||
}
|
||||
|
||||
// NewMemoryCache returns a new Cache that will store items in an in-memory map
|
||||
func NewMemoryCache() *MemoryCache {
|
||||
c := &MemoryCache{items: map[string][]byte{}}
|
||||
return c
|
||||
}
|
||||
|
||||
// Transport is an implementation of http.RoundTripper that will return values from a cache
|
||||
// where possible (avoiding a network request) and will additionally add validators (etag/if-modified-since)
|
||||
// to repeated requests allowing servers to return 304 / Not Modified
|
||||
type Transport struct {
|
||||
// The RoundTripper interface actually used to make requests
|
||||
// If nil, http.DefaultTransport is used
|
||||
Transport http.RoundTripper
|
||||
Cache Cache
|
||||
// If true, responses returned from the cache will be given an extra header, X-From-Cache
|
||||
MarkCachedResponses bool
|
||||
}
|
||||
|
||||
// NewTransport returns a new Transport with the
|
||||
// provided Cache implementation and MarkCachedResponses set to true
|
||||
func NewTransport(c Cache) *Transport {
|
||||
return &Transport{Cache: c, MarkCachedResponses: true}
|
||||
}
|
||||
|
||||
// Client returns an *http.Client that caches responses.
|
||||
func (t *Transport) Client() *http.Client {
|
||||
return &http.Client{Transport: t}
|
||||
}
|
||||
|
||||
// varyMatches will return false unless all of the cached values for the headers listed in Vary
|
||||
// match the new request
|
||||
func varyMatches(cachedResp *http.Response, req *http.Request) bool {
|
||||
for _, header := range headerAllCommaSepValues(cachedResp.Header, "vary") {
|
||||
header = http.CanonicalHeaderKey(header)
|
||||
if header != "" && req.Header.Get(header) != cachedResp.Header.Get("X-Varied-"+header) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// RoundTrip takes a Request and returns a Response
|
||||
//
|
||||
// If there is a fresh Response already in cache, then it will be returned without connecting to
|
||||
// the server.
|
||||
//
|
||||
// If there is a stale Response, then any validators it contains will be set on the new request
|
||||
// to give the server a chance to respond with NotModified. If this happens, then the cached Response
|
||||
// will be returned.
|
||||
func (t *Transport) RoundTrip(req *http.Request) (resp *http.Response, err error) {
|
||||
cacheKey := cacheKey(req)
|
||||
cacheable := (req.Method == "GET" || req.Method == "HEAD") && req.Header.Get("range") == ""
|
||||
var cachedResp *http.Response
|
||||
if cacheable {
|
||||
cachedResp, err = CachedResponse(t.Cache, req)
|
||||
} else {
|
||||
// Need to invalidate an existing value
|
||||
t.Cache.Delete(cacheKey)
|
||||
}
|
||||
|
||||
transport := t.Transport
|
||||
if transport == nil {
|
||||
transport = http.DefaultTransport
|
||||
}
|
||||
|
||||
if cacheable && cachedResp != nil && err == nil {
|
||||
if t.MarkCachedResponses {
|
||||
cachedResp.Header.Set(XFromCache, "1")
|
||||
}
|
||||
|
||||
if varyMatches(cachedResp, req) {
|
||||
// Can only use cached value if the new request doesn't Vary significantly
|
||||
freshness := getFreshness(cachedResp.Header, req.Header)
|
||||
if freshness == fresh {
|
||||
return cachedResp, nil
|
||||
}
|
||||
|
||||
if freshness == stale {
|
||||
var req2 *http.Request
|
||||
// Add validators if caller hasn't already done so
|
||||
etag := cachedResp.Header.Get("etag")
|
||||
if etag != "" && req.Header.Get("etag") == "" {
|
||||
req2 = cloneRequest(req)
|
||||
req2.Header.Set("if-none-match", etag)
|
||||
}
|
||||
lastModified := cachedResp.Header.Get("last-modified")
|
||||
if lastModified != "" && req.Header.Get("last-modified") == "" {
|
||||
if req2 == nil {
|
||||
req2 = cloneRequest(req)
|
||||
}
|
||||
req2.Header.Set("if-modified-since", lastModified)
|
||||
}
|
||||
if req2 != nil {
|
||||
req = req2
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
resp, err = transport.RoundTrip(req)
|
||||
if err == nil && req.Method == "GET" && resp.StatusCode == http.StatusNotModified {
|
||||
// Replace the 304 response with the one from cache, but update with some new headers
|
||||
endToEndHeaders := getEndToEndHeaders(resp.Header)
|
||||
for _, header := range endToEndHeaders {
|
||||
cachedResp.Header[header] = resp.Header[header]
|
||||
}
|
||||
cachedResp.Status = fmt.Sprintf("%d %s", http.StatusOK, http.StatusText(http.StatusOK))
|
||||
cachedResp.StatusCode = http.StatusOK
|
||||
|
||||
resp = cachedResp
|
||||
} else if (err != nil || (cachedResp != nil && resp.StatusCode >= 500)) &&
|
||||
req.Method == "GET" && canStaleOnError(cachedResp.Header, req.Header) {
|
||||
// In case of transport failure and stale-if-error activated, returns cached content
|
||||
// when available
|
||||
cachedResp.Status = fmt.Sprintf("%d %s", http.StatusOK, http.StatusText(http.StatusOK))
|
||||
cachedResp.StatusCode = http.StatusOK
|
||||
return cachedResp, nil
|
||||
} else {
|
||||
if err != nil || resp.StatusCode != http.StatusOK {
|
||||
t.Cache.Delete(cacheKey)
|
||||
}
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
} else {
|
||||
reqCacheControl := parseCacheControl(req.Header)
|
||||
if _, ok := reqCacheControl["only-if-cached"]; ok {
|
||||
resp = newGatewayTimeoutResponse(req)
|
||||
} else {
|
||||
resp, err = transport.RoundTrip(req)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if cacheable && canStore(parseCacheControl(req.Header), parseCacheControl(resp.Header)) {
|
||||
for _, varyKey := range headerAllCommaSepValues(resp.Header, "vary") {
|
||||
varyKey = http.CanonicalHeaderKey(varyKey)
|
||||
fakeHeader := "X-Varied-" + varyKey
|
||||
reqValue := req.Header.Get(varyKey)
|
||||
if reqValue != "" {
|
||||
resp.Header.Set(fakeHeader, reqValue)
|
||||
}
|
||||
}
|
||||
switch req.Method {
|
||||
case "GET":
|
||||
// Delay caching until EOF is reached.
|
||||
resp.Body = &cachingReadCloser{
|
||||
R: resp.Body,
|
||||
OnEOF: func(r io.Reader) {
|
||||
resp := *resp
|
||||
resp.Body = ioutil.NopCloser(r)
|
||||
respBytes, err := httputil.DumpResponse(&resp, true)
|
||||
if err == nil {
|
||||
t.Cache.Set(cacheKey, respBytes)
|
||||
}
|
||||
},
|
||||
}
|
||||
default:
|
||||
respBytes, err := httputil.DumpResponse(resp, true)
|
||||
if err == nil {
|
||||
t.Cache.Set(cacheKey, respBytes)
|
||||
}
|
||||
}
|
||||
} else {
|
||||
t.Cache.Delete(cacheKey)
|
||||
}
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
// ErrNoDateHeader indicates that the HTTP headers contained no Date header.
|
||||
var ErrNoDateHeader = errors.New("no Date header")
|
||||
|
||||
// Date parses and returns the value of the Date header.
|
||||
func Date(respHeaders http.Header) (date time.Time, err error) {
|
||||
dateHeader := respHeaders.Get("date")
|
||||
if dateHeader == "" {
|
||||
err = ErrNoDateHeader
|
||||
return
|
||||
}
|
||||
|
||||
return time.Parse(time.RFC1123, dateHeader)
|
||||
}
|
||||
|
||||
type realClock struct{}
|
||||
|
||||
func (c *realClock) since(d time.Time) time.Duration {
|
||||
return time.Since(d)
|
||||
}
|
||||
|
||||
type timer interface {
|
||||
since(d time.Time) time.Duration
|
||||
}
|
||||
|
||||
var clock timer = &realClock{}
|
||||
|
||||
// getFreshness will return one of fresh/stale/transparent based on the cache-control
|
||||
// values of the request and the response
|
||||
//
|
||||
// fresh indicates the response can be returned
|
||||
// stale indicates that the response needs validating before it is returned
|
||||
// transparent indicates the response should not be used to fulfil the request
|
||||
//
|
||||
// Because this is only a private cache, 'public' and 'private' in cache-control aren't
|
||||
// signficant. Similarly, smax-age isn't used.
|
||||
func getFreshness(respHeaders, reqHeaders http.Header) (freshness int) {
|
||||
respCacheControl := parseCacheControl(respHeaders)
|
||||
reqCacheControl := parseCacheControl(reqHeaders)
|
||||
if _, ok := reqCacheControl["no-cache"]; ok {
|
||||
return transparent
|
||||
}
|
||||
if _, ok := respCacheControl["no-cache"]; ok {
|
||||
return stale
|
||||
}
|
||||
if _, ok := reqCacheControl["only-if-cached"]; ok {
|
||||
return fresh
|
||||
}
|
||||
|
||||
date, err := Date(respHeaders)
|
||||
if err != nil {
|
||||
return stale
|
||||
}
|
||||
currentAge := clock.since(date)
|
||||
|
||||
var lifetime time.Duration
|
||||
var zeroDuration time.Duration
|
||||
|
||||
// If a response includes both an Expires header and a max-age directive,
|
||||
// the max-age directive overrides the Expires header, even if the Expires header is more restrictive.
|
||||
if maxAge, ok := respCacheControl["max-age"]; ok {
|
||||
lifetime, err = time.ParseDuration(maxAge + "s")
|
||||
if err != nil {
|
||||
lifetime = zeroDuration
|
||||
}
|
||||
} else {
|
||||
expiresHeader := respHeaders.Get("Expires")
|
||||
if expiresHeader != "" {
|
||||
expires, err := time.Parse(time.RFC1123, expiresHeader)
|
||||
if err != nil {
|
||||
lifetime = zeroDuration
|
||||
} else {
|
||||
lifetime = expires.Sub(date)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if maxAge, ok := reqCacheControl["max-age"]; ok {
|
||||
// the client is willing to accept a response whose age is no greater than the specified time in seconds
|
||||
lifetime, err = time.ParseDuration(maxAge + "s")
|
||||
if err != nil {
|
||||
lifetime = zeroDuration
|
||||
}
|
||||
}
|
||||
if minfresh, ok := reqCacheControl["min-fresh"]; ok {
|
||||
// the client wants a response that will still be fresh for at least the specified number of seconds.
|
||||
minfreshDuration, err := time.ParseDuration(minfresh + "s")
|
||||
if err == nil {
|
||||
currentAge = time.Duration(currentAge + minfreshDuration)
|
||||
}
|
||||
}
|
||||
|
||||
if maxstale, ok := reqCacheControl["max-stale"]; ok {
|
||||
// Indicates that the client is willing to accept a response that has exceeded its expiration time.
|
||||
// If max-stale is assigned a value, then the client is willing to accept a response that has exceeded
|
||||
// its expiration time by no more than the specified number of seconds.
|
||||
// If no value is assigned to max-stale, then the client is willing to accept a stale response of any age.
|
||||
//
|
||||
// Responses served only because of a max-stale value are supposed to have a Warning header added to them,
|
||||
// but that seems like a hassle, and is it actually useful? If so, then there needs to be a different
|
||||
// return-value available here.
|
||||
if maxstale == "" {
|
||||
return fresh
|
||||
}
|
||||
maxstaleDuration, err := time.ParseDuration(maxstale + "s")
|
||||
if err == nil {
|
||||
currentAge = time.Duration(currentAge - maxstaleDuration)
|
||||
}
|
||||
}
|
||||
|
||||
if lifetime > currentAge {
|
||||
return fresh
|
||||
}
|
||||
|
||||
return stale
|
||||
}
|
||||
|
||||
// Returns true if either the request or the response includes the stale-if-error
|
||||
// cache control extension: https://tools.ietf.org/html/rfc5861
|
||||
func canStaleOnError(respHeaders, reqHeaders http.Header) bool {
|
||||
respCacheControl := parseCacheControl(respHeaders)
|
||||
reqCacheControl := parseCacheControl(reqHeaders)
|
||||
|
||||
var err error
|
||||
lifetime := time.Duration(-1)
|
||||
|
||||
if staleMaxAge, ok := respCacheControl["stale-if-error"]; ok {
|
||||
if staleMaxAge != "" {
|
||||
lifetime, err = time.ParseDuration(staleMaxAge + "s")
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
} else {
|
||||
return true
|
||||
}
|
||||
}
|
||||
if staleMaxAge, ok := reqCacheControl["stale-if-error"]; ok {
|
||||
if staleMaxAge != "" {
|
||||
lifetime, err = time.ParseDuration(staleMaxAge + "s")
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
} else {
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
if lifetime >= 0 {
|
||||
date, err := Date(respHeaders)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
currentAge := clock.since(date)
|
||||
if lifetime > currentAge {
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
func getEndToEndHeaders(respHeaders http.Header) []string {
|
||||
// These headers are always hop-by-hop
|
||||
hopByHopHeaders := map[string]struct{}{
|
||||
"Connection": struct{}{},
|
||||
"Keep-Alive": struct{}{},
|
||||
"Proxy-Authenticate": struct{}{},
|
||||
"Proxy-Authorization": struct{}{},
|
||||
"Te": struct{}{},
|
||||
"Trailers": struct{}{},
|
||||
"Transfer-Encoding": struct{}{},
|
||||
"Upgrade": struct{}{},
|
||||
}
|
||||
|
||||
for _, extra := range strings.Split(respHeaders.Get("connection"), ",") {
|
||||
// any header listed in connection, if present, is also considered hop-by-hop
|
||||
if strings.Trim(extra, " ") != "" {
|
||||
hopByHopHeaders[http.CanonicalHeaderKey(extra)] = struct{}{}
|
||||
}
|
||||
}
|
||||
endToEndHeaders := []string{}
|
||||
for respHeader, _ := range respHeaders {
|
||||
if _, ok := hopByHopHeaders[respHeader]; !ok {
|
||||
endToEndHeaders = append(endToEndHeaders, respHeader)
|
||||
}
|
||||
}
|
||||
return endToEndHeaders
|
||||
}
|
||||
|
||||
func canStore(reqCacheControl, respCacheControl cacheControl) (canStore bool) {
|
||||
if _, ok := respCacheControl["no-store"]; ok {
|
||||
return false
|
||||
}
|
||||
if _, ok := reqCacheControl["no-store"]; ok {
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func newGatewayTimeoutResponse(req *http.Request) *http.Response {
|
||||
var braw bytes.Buffer
|
||||
braw.WriteString("HTTP/1.1 504 Gateway Timeout\r\n\r\n")
|
||||
resp, err := http.ReadResponse(bufio.NewReader(&braw), req)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return resp
|
||||
}
|
||||
|
||||
// cloneRequest returns a clone of the provided *http.Request.
|
||||
// The clone is a shallow copy of the struct and its Header map.
|
||||
// (This function copyright goauth2 authors: https://code.google.com/p/goauth2)
|
||||
func cloneRequest(r *http.Request) *http.Request {
|
||||
// shallow copy of the struct
|
||||
r2 := new(http.Request)
|
||||
*r2 = *r
|
||||
// deep copy of the Header
|
||||
r2.Header = make(http.Header)
|
||||
for k, s := range r.Header {
|
||||
r2.Header[k] = s
|
||||
}
|
||||
return r2
|
||||
}
|
||||
|
||||
type cacheControl map[string]string
|
||||
|
||||
func parseCacheControl(headers http.Header) cacheControl {
|
||||
cc := cacheControl{}
|
||||
ccHeader := headers.Get("Cache-Control")
|
||||
for _, part := range strings.Split(ccHeader, ",") {
|
||||
part = strings.Trim(part, " ")
|
||||
if part == "" {
|
||||
continue
|
||||
}
|
||||
if strings.ContainsRune(part, '=') {
|
||||
keyval := strings.Split(part, "=")
|
||||
cc[strings.Trim(keyval[0], " ")] = strings.Trim(keyval[1], ",")
|
||||
} else {
|
||||
cc[part] = ""
|
||||
}
|
||||
}
|
||||
return cc
|
||||
}
|
||||
|
||||
// headerAllCommaSepValues returns all comma-separated values (each
|
||||
// with whitespace trimmed) for header name in headers. According to
|
||||
// Section 4.2 of the HTTP/1.1 spec
|
||||
// (http://www.w3.org/Protocols/rfc2616/rfc2616-sec4.html#sec4.2),
|
||||
// values from multiple occurrences of a header should be concatenated, if
|
||||
// the header's value is a comma-separated list.
|
||||
func headerAllCommaSepValues(headers http.Header, name string) []string {
|
||||
var vals []string
|
||||
for _, val := range headers[http.CanonicalHeaderKey(name)] {
|
||||
fields := strings.Split(val, ",")
|
||||
for i, f := range fields {
|
||||
fields[i] = strings.TrimSpace(f)
|
||||
}
|
||||
vals = append(vals, fields...)
|
||||
}
|
||||
return vals
|
||||
}
|
||||
|
||||
// cachingReadCloser is a wrapper around ReadCloser R that calls OnEOF
|
||||
// handler with a full copy of the content read from R when EOF is
|
||||
// reached.
|
||||
type cachingReadCloser struct {
|
||||
// Underlying ReadCloser.
|
||||
R io.ReadCloser
|
||||
// OnEOF is called with a copy of the content of R when EOF is reached.
|
||||
OnEOF func(io.Reader)
|
||||
|
||||
buf bytes.Buffer // buf stores a copy of the content of R.
|
||||
}
|
||||
|
||||
// Read reads the next len(p) bytes from R or until R is drained. The
|
||||
// return value n is the number of bytes read. If R has no data to
|
||||
// return, err is io.EOF and OnEOF is called with a full copy of what
|
||||
// has been read so far.
|
||||
func (r *cachingReadCloser) Read(p []byte) (n int, err error) {
|
||||
n, err = r.R.Read(p)
|
||||
r.buf.Write(p[:n])
|
||||
if err == io.EOF {
|
||||
r.OnEOF(bytes.NewReader(r.buf.Bytes()))
|
||||
}
|
||||
return n, err
|
||||
}
|
||||
|
||||
func (r *cachingReadCloser) Close() error {
|
||||
return r.R.Close()
|
||||
}
|
||||
|
||||
// NewMemoryCacheTransport returns a new Transport using the in-memory cache implementation
|
||||
func NewMemoryCacheTransport() *Transport {
|
||||
c := NewMemoryCache()
|
||||
t := NewTransport(c)
|
||||
return t
|
||||
}
|
3
vendor/github.com/json-iterator/go/.codecov.yml
generated
vendored
Normal file
3
vendor/github.com/json-iterator/go/.codecov.yml
generated
vendored
Normal file
|
@ -0,0 +1,3 @@
|
|||
ignore:
|
||||
- "output_tests/.*"
|
||||
|
3
vendor/github.com/json-iterator/go/.gitignore
generated
vendored
Normal file
3
vendor/github.com/json-iterator/go/.gitignore
generated
vendored
Normal file
|
@ -0,0 +1,3 @@
|
|||
.idea
|
||||
/coverage.txt
|
||||
/profile.out
|
13
vendor/github.com/json-iterator/go/.travis.yml
generated
vendored
Normal file
13
vendor/github.com/json-iterator/go/.travis.yml
generated
vendored
Normal file
|
@ -0,0 +1,13 @@
|
|||
language: go
|
||||
|
||||
go:
|
||||
- 1.8.x
|
||||
|
||||
before_install:
|
||||
- go get -t -v ./...
|
||||
|
||||
script:
|
||||
- ./test.sh
|
||||
|
||||
after_success:
|
||||
- bash <(curl -s https://codecov.io/bash)
|
|
@ -1,6 +1,6 @@
|
|||
The MIT License (MIT)
|
||||
MIT License
|
||||
|
||||
Copyright (c) 2015 Exponent Labs LLC
|
||||
Copyright (c) 2016 json-iterator
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
80
vendor/github.com/json-iterator/go/README.md
generated
vendored
Normal file
80
vendor/github.com/json-iterator/go/README.md
generated
vendored
Normal file
|
@ -0,0 +1,80 @@
|
|||
[](https://sourcegraph.com/github.com/json-iterator/go?badge)
|
||||
[](http://godoc.org/github.com/json-iterator/go)
|
||||
[](https://travis-ci.org/json-iterator/go)
|
||||
[](https://codecov.io/gh/json-iterator/go)
|
||||
[](https://goreportcard.com/report/github.com/json-iterator/go)
|
||||
[](https://raw.githubusercontent.com/json-iterator/go/master/LICENSE)
|
||||
[](https://gitter.im/json-iterator/Lobby)
|
||||
|
||||
A high-performance 100% compatible drop-in replacement of "encoding/json"
|
||||
|
||||
```
|
||||
Go开发者们请加入我们,滴滴出行平台技术部 taowen@didichuxing.com
|
||||
```
|
||||
|
||||
# Benchmark
|
||||
|
||||

|
||||
|
||||
Source code: https://github.com/json-iterator/go-benchmark/blob/master/src/github.com/json-iterator/go-benchmark/benchmark_medium_payload_test.go
|
||||
|
||||
Raw Result (easyjson requires static code generation)
|
||||
|
||||
| | ns/op | allocation bytes | allocation times |
|
||||
| --- | --- | --- | --- |
|
||||
| std decode | 35510 ns/op | 1960 B/op | 99 allocs/op |
|
||||
| easyjson decode | 8499 ns/op | 160 B/op | 4 allocs/op |
|
||||
| jsoniter decode | 5623 ns/op | 160 B/op | 3 allocs/op |
|
||||
| std encode | 2213 ns/op | 712 B/op | 5 allocs/op |
|
||||
| easyjson encode | 883 ns/op | 576 B/op | 3 allocs/op |
|
||||
| jsoniter encode | 837 ns/op | 384 B/op | 4 allocs/op |
|
||||
|
||||
# Usage
|
||||
|
||||
100% compatibility with standard lib
|
||||
|
||||
Replace
|
||||
|
||||
```go
|
||||
import "encoding/json"
|
||||
json.Marshal(&data)
|
||||
```
|
||||
|
||||
with
|
||||
|
||||
```go
|
||||
import "github.com/json-iterator/go"
|
||||
jsoniter.Marshal(&data)
|
||||
```
|
||||
|
||||
Replace
|
||||
|
||||
```go
|
||||
import "encoding/json"
|
||||
json.Unmarshal(input, &data)
|
||||
```
|
||||
|
||||
with
|
||||
|
||||
```go
|
||||
import "github.com/json-iterator/go"
|
||||
jsoniter.Unmarshal(input, &data)
|
||||
```
|
||||
|
||||
[More documentation](http://jsoniter.com/migrate-from-go-std.html)
|
||||
|
||||
# How to get
|
||||
|
||||
```
|
||||
go get github.com/json-iterator/go
|
||||
```
|
||||
|
||||
# Contribution Welcomed !
|
||||
|
||||
Contributors
|
||||
|
||||
* [thockin](https://github.com/thockin)
|
||||
* [mattn](https://github.com/mattn)
|
||||
* [cch123](https://github.com/cch123)
|
||||
|
||||
Report issue or pull request, or email taowen@gmail.com, or [](https://gitter.im/json-iterator/Lobby)
|
127
vendor/github.com/json-iterator/go/feature_adapter.go
generated
vendored
Normal file
127
vendor/github.com/json-iterator/go/feature_adapter.go
generated
vendored
Normal file
|
@ -0,0 +1,127 @@
|
|||
package jsoniter
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"io"
|
||||
)
|
||||
|
||||
// RawMessage to make replace json with jsoniter
|
||||
type RawMessage []byte
|
||||
|
||||
// Unmarshal adapts to json/encoding Unmarshal API
|
||||
//
|
||||
// Unmarshal parses the JSON-encoded data and stores the result in the value pointed to by v.
|
||||
// Refer to https://godoc.org/encoding/json#Unmarshal for more information
|
||||
func Unmarshal(data []byte, v interface{}) error {
|
||||
return ConfigDefault.Unmarshal(data, v)
|
||||
}
|
||||
|
||||
func lastNotSpacePos(data []byte) int {
|
||||
for i := len(data) - 1; i >= 0; i-- {
|
||||
if data[i] != ' ' && data[i] != '\t' && data[i] != '\r' && data[i] != '\n' {
|
||||
return i + 1
|
||||
}
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
// UnmarshalFromString convenient method to read from string instead of []byte
|
||||
func UnmarshalFromString(str string, v interface{}) error {
|
||||
return ConfigDefault.UnmarshalFromString(str, v)
|
||||
}
|
||||
|
||||
// Get quick method to get value from deeply nested JSON structure
|
||||
func Get(data []byte, path ...interface{}) Any {
|
||||
return ConfigDefault.Get(data, path...)
|
||||
}
|
||||
|
||||
// Marshal adapts to json/encoding Marshal API
|
||||
//
|
||||
// Marshal returns the JSON encoding of v, adapts to json/encoding Marshal API
|
||||
// Refer to https://godoc.org/encoding/json#Marshal for more information
|
||||
func Marshal(v interface{}) ([]byte, error) {
|
||||
return ConfigDefault.Marshal(v)
|
||||
}
|
||||
|
||||
// MarshalIndent same as json.MarshalIndent. Prefix is not supported.
|
||||
func MarshalIndent(v interface{}, prefix, indent string) ([]byte, error) {
|
||||
return ConfigDefault.MarshalIndent(v, prefix, indent)
|
||||
}
|
||||
|
||||
// MarshalToString convenient method to write as string instead of []byte
|
||||
func MarshalToString(v interface{}) (string, error) {
|
||||
return ConfigDefault.MarshalToString(v)
|
||||
}
|
||||
|
||||
// NewDecoder adapts to json/stream NewDecoder API.
|
||||
//
|
||||
// NewDecoder returns a new decoder that reads from r.
|
||||
//
|
||||
// Instead of a json/encoding Decoder, an Decoder is returned
|
||||
// Refer to https://godoc.org/encoding/json#NewDecoder for more information
|
||||
func NewDecoder(reader io.Reader) *Decoder {
|
||||
return ConfigDefault.NewDecoder(reader)
|
||||
}
|
||||
|
||||
// Decoder reads and decodes JSON values from an input stream.
|
||||
// Decoder provides identical APIs with json/stream Decoder (Token() and UseNumber() are in progress)
|
||||
type Decoder struct {
|
||||
iter *Iterator
|
||||
}
|
||||
|
||||
// Decode decode JSON into interface{}
|
||||
func (adapter *Decoder) Decode(obj interface{}) error {
|
||||
adapter.iter.ReadVal(obj)
|
||||
err := adapter.iter.Error
|
||||
if err == io.EOF {
|
||||
return nil
|
||||
}
|
||||
return adapter.iter.Error
|
||||
}
|
||||
|
||||
// More is there more?
|
||||
func (adapter *Decoder) More() bool {
|
||||
return adapter.iter.head != adapter.iter.tail
|
||||
}
|
||||
|
||||
// Buffered remaining buffer
|
||||
func (adapter *Decoder) Buffered() io.Reader {
|
||||
remaining := adapter.iter.buf[adapter.iter.head:adapter.iter.tail]
|
||||
return bytes.NewReader(remaining)
|
||||
}
|
||||
|
||||
// UseNumber for number JSON element, use float64 or json.NumberValue (alias of string)
|
||||
func (adapter *Decoder) UseNumber() {
|
||||
origCfg := adapter.iter.cfg.configBeforeFrozen
|
||||
origCfg.UseNumber = true
|
||||
adapter.iter.cfg = origCfg.Froze().(*frozenConfig)
|
||||
}
|
||||
|
||||
// NewEncoder same as json.NewEncoder
|
||||
func NewEncoder(writer io.Writer) *Encoder {
|
||||
return ConfigDefault.NewEncoder(writer)
|
||||
}
|
||||
|
||||
// Encoder same as json.Encoder
|
||||
type Encoder struct {
|
||||
stream *Stream
|
||||
}
|
||||
|
||||
// Encode encode interface{} as JSON to io.Writer
|
||||
func (adapter *Encoder) Encode(val interface{}) error {
|
||||
adapter.stream.WriteVal(val)
|
||||
adapter.stream.Flush()
|
||||
return adapter.stream.Error
|
||||
}
|
||||
|
||||
// SetIndent set the indention. Prefix is not supported
|
||||
func (adapter *Encoder) SetIndent(prefix, indent string) {
|
||||
adapter.stream.cfg.indentionStep = len(indent)
|
||||
}
|
||||
|
||||
// SetEscapeHTML escape html by default, set to false to disable
|
||||
func (adapter *Encoder) SetEscapeHTML(escapeHTML bool) {
|
||||
config := adapter.stream.cfg.configBeforeFrozen
|
||||
config.EscapeHTML = escapeHTML
|
||||
adapter.stream.cfg = config.Froze().(*frozenConfig)
|
||||
}
|
242
vendor/github.com/json-iterator/go/feature_any.go
generated
vendored
Normal file
242
vendor/github.com/json-iterator/go/feature_any.go
generated
vendored
Normal file
|
@ -0,0 +1,242 @@
|
|||
package jsoniter
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"reflect"
|
||||
)
|
||||
|
||||
// Any generic object representation.
|
||||
// The lazy json implementation holds []byte and parse lazily.
|
||||
type Any interface {
|
||||
LastError() error
|
||||
ValueType() ValueType
|
||||
MustBeValid() Any
|
||||
ToBool() bool
|
||||
ToInt() int
|
||||
ToInt32() int32
|
||||
ToInt64() int64
|
||||
ToUint() uint
|
||||
ToUint32() uint32
|
||||
ToUint64() uint64
|
||||
ToFloat32() float32
|
||||
ToFloat64() float64
|
||||
ToString() string
|
||||
ToVal(val interface{})
|
||||
Get(path ...interface{}) Any
|
||||
// TODO: add Set
|
||||
Size() int
|
||||
Keys() []string
|
||||
GetInterface() interface{}
|
||||
WriteTo(stream *Stream)
|
||||
}
|
||||
|
||||
type baseAny struct{}
|
||||
|
||||
func (any *baseAny) Get(path ...interface{}) Any {
|
||||
return &invalidAny{baseAny{}, fmt.Errorf("Get %v from simple value", path)}
|
||||
}
|
||||
|
||||
func (any *baseAny) Size() int {
|
||||
return 0
|
||||
}
|
||||
|
||||
func (any *baseAny) Keys() []string {
|
||||
return []string{}
|
||||
}
|
||||
|
||||
func (any *baseAny) ToVal(obj interface{}) {
|
||||
panic("not implemented")
|
||||
}
|
||||
|
||||
// WrapInt32 turn int32 into Any interface
|
||||
func WrapInt32(val int32) Any {
|
||||
return &int32Any{baseAny{}, val}
|
||||
}
|
||||
|
||||
// WrapInt64 turn int64 into Any interface
|
||||
func WrapInt64(val int64) Any {
|
||||
return &int64Any{baseAny{}, val}
|
||||
}
|
||||
|
||||
// WrapUint32 turn uint32 into Any interface
|
||||
func WrapUint32(val uint32) Any {
|
||||
return &uint32Any{baseAny{}, val}
|
||||
}
|
||||
|
||||
// WrapUint64 turn uint64 into Any interface
|
||||
func WrapUint64(val uint64) Any {
|
||||
return &uint64Any{baseAny{}, val}
|
||||
}
|
||||
|
||||
// WrapFloat64 turn float64 into Any interface
|
||||
func WrapFloat64(val float64) Any {
|
||||
return &floatAny{baseAny{}, val}
|
||||
}
|
||||
|
||||
// WrapString turn string into Any interface
|
||||
func WrapString(val string) Any {
|
||||
return &stringAny{baseAny{}, val}
|
||||
}
|
||||
|
||||
// Wrap turn a go object into Any interface
|
||||
func Wrap(val interface{}) Any {
|
||||
if val == nil {
|
||||
return &nilAny{}
|
||||
}
|
||||
asAny, isAny := val.(Any)
|
||||
if isAny {
|
||||
return asAny
|
||||
}
|
||||
typ := reflect.TypeOf(val)
|
||||
switch typ.Kind() {
|
||||
case reflect.Slice:
|
||||
return wrapArray(val)
|
||||
case reflect.Struct:
|
||||
return wrapStruct(val)
|
||||
case reflect.Map:
|
||||
return wrapMap(val)
|
||||
case reflect.String:
|
||||
return WrapString(val.(string))
|
||||
case reflect.Int:
|
||||
return WrapInt64(int64(val.(int)))
|
||||
case reflect.Int8:
|
||||
return WrapInt32(int32(val.(int8)))
|
||||
case reflect.Int16:
|
||||
return WrapInt32(int32(val.(int16)))
|
||||
case reflect.Int32:
|
||||
return WrapInt32(val.(int32))
|
||||
case reflect.Int64:
|
||||
return WrapInt64(val.(int64))
|
||||
case reflect.Uint:
|
||||
return WrapUint64(uint64(val.(uint)))
|
||||
case reflect.Uint8:
|
||||
return WrapUint32(uint32(val.(uint8)))
|
||||
case reflect.Uint16:
|
||||
return WrapUint32(uint32(val.(uint16)))
|
||||
case reflect.Uint32:
|
||||
return WrapUint32(uint32(val.(uint32)))
|
||||
case reflect.Uint64:
|
||||
return WrapUint64(val.(uint64))
|
||||
case reflect.Float32:
|
||||
return WrapFloat64(float64(val.(float32)))
|
||||
case reflect.Float64:
|
||||
return WrapFloat64(val.(float64))
|
||||
case reflect.Bool:
|
||||
if val.(bool) == true {
|
||||
return &trueAny{}
|
||||
}
|
||||
return &falseAny{}
|
||||
}
|
||||
return &invalidAny{baseAny{}, fmt.Errorf("unsupported type: %v", typ)}
|
||||
}
|
||||
|
||||
// ReadAny read next JSON element as an Any object. It is a better json.RawMessage.
|
||||
func (iter *Iterator) ReadAny() Any {
|
||||
return iter.readAny()
|
||||
}
|
||||
|
||||
func (iter *Iterator) readAny() Any {
|
||||
c := iter.nextToken()
|
||||
switch c {
|
||||
case '"':
|
||||
iter.unreadByte()
|
||||
return &stringAny{baseAny{}, iter.ReadString()}
|
||||
case 'n':
|
||||
iter.skipThreeBytes('u', 'l', 'l') // null
|
||||
return &nilAny{}
|
||||
case 't':
|
||||
iter.skipThreeBytes('r', 'u', 'e') // true
|
||||
return &trueAny{}
|
||||
case 'f':
|
||||
iter.skipFourBytes('a', 'l', 's', 'e') // false
|
||||
return &falseAny{}
|
||||
case '{':
|
||||
return iter.readObjectAny()
|
||||
case '[':
|
||||
return iter.readArrayAny()
|
||||
case '-':
|
||||
return iter.readNumberAny(false)
|
||||
default:
|
||||
return iter.readNumberAny(true)
|
||||
}
|
||||
}
|
||||
|
||||
func (iter *Iterator) readNumberAny(positive bool) Any {
|
||||
iter.startCapture(iter.head - 1)
|
||||
iter.skipNumber()
|
||||
lazyBuf := iter.stopCapture()
|
||||
return &numberLazyAny{baseAny{}, iter.cfg, lazyBuf, nil}
|
||||
}
|
||||
|
||||
func (iter *Iterator) readObjectAny() Any {
|
||||
iter.startCapture(iter.head - 1)
|
||||
iter.skipObject()
|
||||
lazyBuf := iter.stopCapture()
|
||||
return &objectLazyAny{baseAny{}, iter.cfg, lazyBuf, nil}
|
||||
}
|
||||
|
||||
func (iter *Iterator) readArrayAny() Any {
|
||||
iter.startCapture(iter.head - 1)
|
||||
iter.skipArray()
|
||||
lazyBuf := iter.stopCapture()
|
||||
return &arrayLazyAny{baseAny{}, iter.cfg, lazyBuf, nil}
|
||||
}
|
||||
|
||||
func locateObjectField(iter *Iterator, target string) []byte {
|
||||
var found []byte
|
||||
iter.ReadObjectCB(func(iter *Iterator, field string) bool {
|
||||
if field == target {
|
||||
found = iter.SkipAndReturnBytes()
|
||||
return false
|
||||
}
|
||||
iter.Skip()
|
||||
return true
|
||||
})
|
||||
return found
|
||||
}
|
||||
|
||||
func locateArrayElement(iter *Iterator, target int) []byte {
|
||||
var found []byte
|
||||
n := 0
|
||||
iter.ReadArrayCB(func(iter *Iterator) bool {
|
||||
if n == target {
|
||||
found = iter.SkipAndReturnBytes()
|
||||
return false
|
||||
}
|
||||
iter.Skip()
|
||||
n++
|
||||
return true
|
||||
})
|
||||
return found
|
||||
}
|
||||
|
||||
func locatePath(iter *Iterator, path []interface{}) Any {
|
||||
for i, pathKeyObj := range path {
|
||||
switch pathKey := pathKeyObj.(type) {
|
||||
case string:
|
||||
valueBytes := locateObjectField(iter, pathKey)
|
||||
if valueBytes == nil {
|
||||
return newInvalidAny(path[i:])
|
||||
}
|
||||
iter.ResetBytes(valueBytes)
|
||||
case int:
|
||||
valueBytes := locateArrayElement(iter, pathKey)
|
||||
if valueBytes == nil {
|
||||
return newInvalidAny(path[i:])
|
||||
}
|
||||
iter.ResetBytes(valueBytes)
|
||||
case int32:
|
||||
if '*' == pathKey {
|
||||
return iter.readAny().Get(path[i:]...)
|
||||
}
|
||||
return newInvalidAny(path[i:])
|
||||
default:
|
||||
return newInvalidAny(path[i:])
|
||||
}
|
||||
}
|
||||
if iter.Error != nil && iter.Error != io.EOF {
|
||||
return &invalidAny{baseAny{}, iter.Error}
|
||||
}
|
||||
return iter.readAny()
|
||||
}
|
278
vendor/github.com/json-iterator/go/feature_any_array.go
generated
vendored
Normal file
278
vendor/github.com/json-iterator/go/feature_any_array.go
generated
vendored
Normal file
|
@ -0,0 +1,278 @@
|
|||
package jsoniter
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
type arrayLazyAny struct {
|
||||
baseAny
|
||||
cfg *frozenConfig
|
||||
buf []byte
|
||||
err error
|
||||
}
|
||||
|
||||
func (any *arrayLazyAny) ValueType() ValueType {
|
||||
return ArrayValue
|
||||
}
|
||||
|
||||
func (any *arrayLazyAny) MustBeValid() Any {
|
||||
return any
|
||||
}
|
||||
|
||||
func (any *arrayLazyAny) LastError() error {
|
||||
return any.err
|
||||
}
|
||||
|
||||
func (any *arrayLazyAny) ToBool() bool {
|
||||
iter := any.cfg.BorrowIterator(any.buf)
|
||||
defer any.cfg.ReturnIterator(iter)
|
||||
return iter.ReadArray()
|
||||
}
|
||||
|
||||
func (any *arrayLazyAny) ToInt() int {
|
||||
if any.ToBool() {
|
||||
return 1
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func (any *arrayLazyAny) ToInt32() int32 {
|
||||
if any.ToBool() {
|
||||
return 1
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func (any *arrayLazyAny) ToInt64() int64 {
|
||||
if any.ToBool() {
|
||||
return 1
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func (any *arrayLazyAny) ToUint() uint {
|
||||
if any.ToBool() {
|
||||
return 1
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func (any *arrayLazyAny) ToUint32() uint32 {
|
||||
if any.ToBool() {
|
||||
return 1
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func (any *arrayLazyAny) ToUint64() uint64 {
|
||||
if any.ToBool() {
|
||||
return 1
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func (any *arrayLazyAny) ToFloat32() float32 {
|
||||
if any.ToBool() {
|
||||
return 1
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func (any *arrayLazyAny) ToFloat64() float64 {
|
||||
if any.ToBool() {
|
||||
return 1
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func (any *arrayLazyAny) ToString() string {
|
||||
return *(*string)(unsafe.Pointer(&any.buf))
|
||||
}
|
||||
|
||||
func (any *arrayLazyAny) ToVal(val interface{}) {
|
||||
iter := any.cfg.BorrowIterator(any.buf)
|
||||
defer any.cfg.ReturnIterator(iter)
|
||||
iter.ReadVal(val)
|
||||
}
|
||||
|
||||
func (any *arrayLazyAny) Get(path ...interface{}) Any {
|
||||
if len(path) == 0 {
|
||||
return any
|
||||
}
|
||||
switch firstPath := path[0].(type) {
|
||||
case int:
|
||||
iter := any.cfg.BorrowIterator(any.buf)
|
||||
defer any.cfg.ReturnIterator(iter)
|
||||
valueBytes := locateArrayElement(iter, firstPath)
|
||||
if valueBytes == nil {
|
||||
return newInvalidAny(path)
|
||||
}
|
||||
iter.ResetBytes(valueBytes)
|
||||
return locatePath(iter, path[1:])
|
||||
case int32:
|
||||
if '*' == firstPath {
|
||||
iter := any.cfg.BorrowIterator(any.buf)
|
||||
defer any.cfg.ReturnIterator(iter)
|
||||
arr := make([]Any, 0)
|
||||
iter.ReadArrayCB(func(iter *Iterator) bool {
|
||||
found := iter.readAny().Get(path[1:]...)
|
||||
if found.ValueType() != InvalidValue {
|
||||
arr = append(arr, found)
|
||||
}
|
||||
return true
|
||||
})
|
||||
return wrapArray(arr)
|
||||
}
|
||||
return newInvalidAny(path)
|
||||
default:
|
||||
return newInvalidAny(path)
|
||||
}
|
||||
}
|
||||
|
||||
func (any *arrayLazyAny) Size() int {
|
||||
size := 0
|
||||
iter := any.cfg.BorrowIterator(any.buf)
|
||||
defer any.cfg.ReturnIterator(iter)
|
||||
iter.ReadArrayCB(func(iter *Iterator) bool {
|
||||
size++
|
||||
iter.Skip()
|
||||
return true
|
||||
})
|
||||
return size
|
||||
}
|
||||
|
||||
func (any *arrayLazyAny) WriteTo(stream *Stream) {
|
||||
stream.Write(any.buf)
|
||||
}
|
||||
|
||||
func (any *arrayLazyAny) GetInterface() interface{} {
|
||||
iter := any.cfg.BorrowIterator(any.buf)
|
||||
defer any.cfg.ReturnIterator(iter)
|
||||
return iter.Read()
|
||||
}
|
||||
|
||||
type arrayAny struct {
|
||||
baseAny
|
||||
val reflect.Value
|
||||
}
|
||||
|
||||
func wrapArray(val interface{}) *arrayAny {
|
||||
return &arrayAny{baseAny{}, reflect.ValueOf(val)}
|
||||
}
|
||||
|
||||
func (any *arrayAny) ValueType() ValueType {
|
||||
return ArrayValue
|
||||
}
|
||||
|
||||
func (any *arrayAny) MustBeValid() Any {
|
||||
return any
|
||||
}
|
||||
|
||||
func (any *arrayAny) LastError() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (any *arrayAny) ToBool() bool {
|
||||
return any.val.Len() != 0
|
||||
}
|
||||
|
||||
func (any *arrayAny) ToInt() int {
|
||||
if any.val.Len() == 0 {
|
||||
return 0
|
||||
}
|
||||
return 1
|
||||
}
|
||||
|
||||
func (any *arrayAny) ToInt32() int32 {
|
||||
if any.val.Len() == 0 {
|
||||
return 0
|
||||
}
|
||||
return 1
|
||||
}
|
||||
|
||||
func (any *arrayAny) ToInt64() int64 {
|
||||
if any.val.Len() == 0 {
|
||||
return 0
|
||||
}
|
||||
return 1
|
||||
}
|
||||
|
||||
func (any *arrayAny) ToUint() uint {
|
||||
if any.val.Len() == 0 {
|
||||
return 0
|
||||
}
|
||||
return 1
|
||||
}
|
||||
|
||||
func (any *arrayAny) ToUint32() uint32 {
|
||||
if any.val.Len() == 0 {
|
||||
return 0
|
||||
}
|
||||
return 1
|
||||
}
|
||||
|
||||
func (any *arrayAny) ToUint64() uint64 {
|
||||
if any.val.Len() == 0 {
|
||||
return 0
|
||||
}
|
||||
return 1
|
||||
}
|
||||
|
||||
func (any *arrayAny) ToFloat32() float32 {
|
||||
if any.val.Len() == 0 {
|
||||
return 0
|
||||
}
|
||||
return 1
|
||||
}
|
||||
|
||||
func (any *arrayAny) ToFloat64() float64 {
|
||||
if any.val.Len() == 0 {
|
||||
return 0
|
||||
}
|
||||
return 1
|
||||
}
|
||||
|
||||
func (any *arrayAny) ToString() string {
|
||||
str, _ := MarshalToString(any.val.Interface())
|
||||
return str
|
||||
}
|
||||
|
||||
func (any *arrayAny) Get(path ...interface{}) Any {
|
||||
if len(path) == 0 {
|
||||
return any
|
||||
}
|
||||
switch firstPath := path[0].(type) {
|
||||
case int:
|
||||
if firstPath < 0 || firstPath >= any.val.Len() {
|
||||
return newInvalidAny(path)
|
||||
}
|
||||
return Wrap(any.val.Index(firstPath).Interface())
|
||||
case int32:
|
||||
if '*' == firstPath {
|
||||
mappedAll := make([]Any, 0)
|
||||
for i := 0; i < any.val.Len(); i++ {
|
||||
mapped := Wrap(any.val.Index(i).Interface()).Get(path[1:]...)
|
||||
if mapped.ValueType() != InvalidValue {
|
||||
mappedAll = append(mappedAll, mapped)
|
||||
}
|
||||
}
|
||||
return wrapArray(mappedAll)
|
||||
}
|
||||
return newInvalidAny(path)
|
||||
default:
|
||||
return newInvalidAny(path)
|
||||
}
|
||||
}
|
||||
|
||||
func (any *arrayAny) Size() int {
|
||||
return any.val.Len()
|
||||
}
|
||||
|
||||
func (any *arrayAny) WriteTo(stream *Stream) {
|
||||
stream.WriteVal(any.val)
|
||||
}
|
||||
|
||||
func (any *arrayAny) GetInterface() interface{} {
|
||||
return any.val.Interface()
|
||||
}
|
137
vendor/github.com/json-iterator/go/feature_any_bool.go
generated
vendored
Normal file
137
vendor/github.com/json-iterator/go/feature_any_bool.go
generated
vendored
Normal file
|
@ -0,0 +1,137 @@
|
|||
package jsoniter
|
||||
|
||||
type trueAny struct {
|
||||
baseAny
|
||||
}
|
||||
|
||||
func (any *trueAny) LastError() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (any *trueAny) ToBool() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
func (any *trueAny) ToInt() int {
|
||||
return 1
|
||||
}
|
||||
|
||||
func (any *trueAny) ToInt32() int32 {
|
||||
return 1
|
||||
}
|
||||
|
||||
func (any *trueAny) ToInt64() int64 {
|
||||
return 1
|
||||
}
|
||||
|
||||
func (any *trueAny) ToUint() uint {
|
||||
return 1
|
||||
}
|
||||
|
||||
func (any *trueAny) ToUint32() uint32 {
|
||||
return 1
|
||||
}
|
||||
|
||||
func (any *trueAny) ToUint64() uint64 {
|
||||
return 1
|
||||
}
|
||||
|
||||
func (any *trueAny) ToFloat32() float32 {
|
||||
return 1
|
||||
}
|
||||
|
||||
func (any *trueAny) ToFloat64() float64 {
|
||||
return 1
|
||||
}
|
||||
|
||||
func (any *trueAny) ToString() string {
|
||||
return "true"
|
||||
}
|
||||
|
||||
func (any *trueAny) WriteTo(stream *Stream) {
|
||||
stream.WriteTrue()
|
||||
}
|
||||
|
||||
func (any *trueAny) Parse() *Iterator {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (any *trueAny) GetInterface() interface{} {
|
||||
return true
|
||||
}
|
||||
|
||||
func (any *trueAny) ValueType() ValueType {
|
||||
return BoolValue
|
||||
}
|
||||
|
||||
func (any *trueAny) MustBeValid() Any {
|
||||
return any
|
||||
}
|
||||
|
||||
type falseAny struct {
|
||||
baseAny
|
||||
}
|
||||
|
||||
func (any *falseAny) LastError() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (any *falseAny) ToBool() bool {
|
||||
return false
|
||||
}
|
||||
|
||||
func (any *falseAny) ToInt() int {
|
||||
return 0
|
||||
}
|
||||
|
||||
func (any *falseAny) ToInt32() int32 {
|
||||
return 0
|
||||
}
|
||||
|
||||
func (any *falseAny) ToInt64() int64 {
|
||||
return 0
|
||||
}
|
||||
|
||||
func (any *falseAny) ToUint() uint {
|
||||
return 0
|
||||
}
|
||||
|
||||
func (any *falseAny) ToUint32() uint32 {
|
||||
return 0
|
||||
}
|
||||
|
||||
func (any *falseAny) ToUint64() uint64 {
|
||||
return 0
|
||||
}
|
||||
|
||||
func (any *falseAny) ToFloat32() float32 {
|
||||
return 0
|
||||
}
|
||||
|
||||
func (any *falseAny) ToFloat64() float64 {
|
||||
return 0
|
||||
}
|
||||
|
||||
func (any *falseAny) ToString() string {
|
||||
return "false"
|
||||
}
|
||||
|
||||
func (any *falseAny) WriteTo(stream *Stream) {
|
||||
stream.WriteFalse()
|
||||
}
|
||||
|
||||
func (any *falseAny) Parse() *Iterator {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (any *falseAny) GetInterface() interface{} {
|
||||
return false
|
||||
}
|
||||
|
||||
func (any *falseAny) ValueType() ValueType {
|
||||
return BoolValue
|
||||
}
|
||||
|
||||
func (any *falseAny) MustBeValid() Any {
|
||||
return any
|
||||
}
|
83
vendor/github.com/json-iterator/go/feature_any_float.go
generated
vendored
Normal file
83
vendor/github.com/json-iterator/go/feature_any_float.go
generated
vendored
Normal file
|
@ -0,0 +1,83 @@
|
|||
package jsoniter
|
||||
|
||||
import (
|
||||
"strconv"
|
||||
)
|
||||
|
||||
type floatAny struct {
|
||||
baseAny
|
||||
val float64
|
||||
}
|
||||
|
||||
func (any *floatAny) Parse() *Iterator {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (any *floatAny) ValueType() ValueType {
|
||||
return NumberValue
|
||||
}
|
||||
|
||||
func (any *floatAny) MustBeValid() Any {
|
||||
return any
|
||||
}
|
||||
|
||||
func (any *floatAny) LastError() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (any *floatAny) ToBool() bool {
|
||||
return any.ToFloat64() != 0
|
||||
}
|
||||
|
||||
func (any *floatAny) ToInt() int {
|
||||
return int(any.val)
|
||||
}
|
||||
|
||||
func (any *floatAny) ToInt32() int32 {
|
||||
return int32(any.val)
|
||||
}
|
||||
|
||||
func (any *floatAny) ToInt64() int64 {
|
||||
return int64(any.val)
|
||||
}
|
||||
|
||||
func (any *floatAny) ToUint() uint {
|
||||
if any.val > 0 {
|
||||
return uint(any.val)
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func (any *floatAny) ToUint32() uint32 {
|
||||
if any.val > 0 {
|
||||
return uint32(any.val)
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func (any *floatAny) ToUint64() uint64 {
|
||||
if any.val > 0 {
|
||||
return uint64(any.val)
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func (any *floatAny) ToFloat32() float32 {
|
||||
return float32(any.val)
|
||||
}
|
||||
|
||||
func (any *floatAny) ToFloat64() float64 {
|
||||
return any.val
|
||||
}
|
||||
|
||||
func (any *floatAny) ToString() string {
|
||||
return strconv.FormatFloat(any.val, 'E', -1, 64)
|
||||
}
|
||||
|
||||
func (any *floatAny) WriteTo(stream *Stream) {
|
||||
stream.WriteFloat64(any.val)
|
||||
}
|
||||
|
||||
func (any *floatAny) GetInterface() interface{} {
|
||||
return any.val
|
||||
}
|
74
vendor/github.com/json-iterator/go/feature_any_int32.go
generated
vendored
Normal file
74
vendor/github.com/json-iterator/go/feature_any_int32.go
generated
vendored
Normal file
|
@ -0,0 +1,74 @@
|
|||
package jsoniter
|
||||
|
||||
import (
|
||||
"strconv"
|
||||
)
|
||||
|
||||
type int32Any struct {
|
||||
baseAny
|
||||
val int32
|
||||
}
|
||||
|
||||
func (any *int32Any) LastError() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (any *int32Any) ValueType() ValueType {
|
||||
return NumberValue
|
||||
}
|
||||
|
||||
func (any *int32Any) MustBeValid() Any {
|
||||
return any
|
||||
}
|
||||
|
||||
func (any *int32Any) ToBool() bool {
|
||||
return any.val != 0
|
||||
}
|
||||
|
||||
func (any *int32Any) ToInt() int {
|
||||
return int(any.val)
|
||||
}
|
||||
|
||||
func (any *int32Any) ToInt32() int32 {
|
||||
return any.val
|
||||
}
|
||||
|
||||
func (any *int32Any) ToInt64() int64 {
|
||||
return int64(any.val)
|
||||
}
|
||||
|
||||
func (any *int32Any) ToUint() uint {
|
||||
return uint(any.val)
|
||||
}
|
||||
|
||||
func (any *int32Any) ToUint32() uint32 {
|
||||
return uint32(any.val)
|
||||
}
|
||||
|
||||
func (any *int32Any) ToUint64() uint64 {
|
||||
return uint64(any.val)
|
||||
}
|
||||
|
||||
func (any *int32Any) ToFloat32() float32 {
|
||||
return float32(any.val)
|
||||
}
|
||||
|
||||
func (any *int32Any) ToFloat64() float64 {
|
||||
return float64(any.val)
|
||||
}
|
||||
|
||||
func (any *int32Any) ToString() string {
|
||||
return strconv.FormatInt(int64(any.val), 10)
|
||||
}
|
||||
|
||||
func (any *int32Any) WriteTo(stream *Stream) {
|
||||
stream.WriteInt32(any.val)
|
||||
}
|
||||
|
||||
func (any *int32Any) Parse() *Iterator {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (any *int32Any) GetInterface() interface{} {
|
||||
return any.val
|
||||
}
|
74
vendor/github.com/json-iterator/go/feature_any_int64.go
generated
vendored
Normal file
74
vendor/github.com/json-iterator/go/feature_any_int64.go
generated
vendored
Normal file
|
@ -0,0 +1,74 @@
|
|||
package jsoniter
|
||||
|
||||
import (
|
||||
"strconv"
|
||||
)
|
||||
|
||||
type int64Any struct {
|
||||
baseAny
|
||||
val int64
|
||||
}
|
||||
|
||||
func (any *int64Any) LastError() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (any *int64Any) ValueType() ValueType {
|
||||
return NumberValue
|
||||
}
|
||||
|
||||
func (any *int64Any) MustBeValid() Any {
|
||||
return any
|
||||
}
|
||||
|
||||
func (any *int64Any) ToBool() bool {
|
||||
return any.val != 0
|
||||
}
|
||||
|
||||
func (any *int64Any) ToInt() int {
|
||||
return int(any.val)
|
||||
}
|
||||
|
||||
func (any *int64Any) ToInt32() int32 {
|
||||
return int32(any.val)
|
||||
}
|
||||
|
||||
func (any *int64Any) ToInt64() int64 {
|
||||
return any.val
|
||||
}
|
||||
|
||||
func (any *int64Any) ToUint() uint {
|
||||
return uint(any.val)
|
||||
}
|
||||
|
||||
func (any *int64Any) ToUint32() uint32 {
|
||||
return uint32(any.val)
|
||||
}
|
||||
|
||||
func (any *int64Any) ToUint64() uint64 {
|
||||
return uint64(any.val)
|
||||
}
|
||||
|
||||
func (any *int64Any) ToFloat32() float32 {
|
||||
return float32(any.val)
|
||||
}
|
||||
|
||||
func (any *int64Any) ToFloat64() float64 {
|
||||
return float64(any.val)
|
||||
}
|
||||
|
||||
func (any *int64Any) ToString() string {
|
||||
return strconv.FormatInt(any.val, 10)
|
||||
}
|
||||
|
||||
func (any *int64Any) WriteTo(stream *Stream) {
|
||||
stream.WriteInt64(any.val)
|
||||
}
|
||||
|
||||
func (any *int64Any) Parse() *Iterator {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (any *int64Any) GetInterface() interface{} {
|
||||
return any.val
|
||||
}
|
82
vendor/github.com/json-iterator/go/feature_any_invalid.go
generated
vendored
Normal file
82
vendor/github.com/json-iterator/go/feature_any_invalid.go
generated
vendored
Normal file
|
@ -0,0 +1,82 @@
|
|||
package jsoniter
|
||||
|
||||
import "fmt"
|
||||
|
||||
type invalidAny struct {
|
||||
baseAny
|
||||
err error
|
||||
}
|
||||
|
||||
func newInvalidAny(path []interface{}) *invalidAny {
|
||||
return &invalidAny{baseAny{}, fmt.Errorf("%v not found", path)}
|
||||
}
|
||||
|
||||
func (any *invalidAny) LastError() error {
|
||||
return any.err
|
||||
}
|
||||
|
||||
func (any *invalidAny) ValueType() ValueType {
|
||||
return InvalidValue
|
||||
}
|
||||
|
||||
func (any *invalidAny) MustBeValid() Any {
|
||||
panic(any.err)
|
||||
}
|
||||
|
||||
func (any *invalidAny) ToBool() bool {
|
||||
return false
|
||||
}
|
||||
|
||||
func (any *invalidAny) ToInt() int {
|
||||
return 0
|
||||
}
|
||||
|
||||
func (any *invalidAny) ToInt32() int32 {
|
||||
return 0
|
||||
}
|
||||
|
||||
func (any *invalidAny) ToInt64() int64 {
|
||||
return 0
|
||||
}
|
||||
|
||||
func (any *invalidAny) ToUint() uint {
|
||||
return 0
|
||||
}
|
||||
|
||||
func (any *invalidAny) ToUint32() uint32 {
|
||||
return 0
|
||||
}
|
||||
|
||||
func (any *invalidAny) ToUint64() uint64 {
|
||||
return 0
|
||||
}
|
||||
|
||||
func (any *invalidAny) ToFloat32() float32 {
|
||||
return 0
|
||||
}
|
||||
|
||||
func (any *invalidAny) ToFloat64() float64 {
|
||||
return 0
|
||||
}
|
||||
|
||||
func (any *invalidAny) ToString() string {
|
||||
return ""
|
||||
}
|
||||
|
||||
func (any *invalidAny) WriteTo(stream *Stream) {
|
||||
}
|
||||
|
||||
func (any *invalidAny) Get(path ...interface{}) Any {
|
||||
if any.err == nil {
|
||||
return &invalidAny{baseAny{}, fmt.Errorf("get %v from invalid", path)}
|
||||
}
|
||||
return &invalidAny{baseAny{}, fmt.Errorf("%v, get %v from invalid", any.err, path)}
|
||||
}
|
||||
|
||||
func (any *invalidAny) Parse() *Iterator {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (any *invalidAny) GetInterface() interface{} {
|
||||
return nil
|
||||
}
|
69
vendor/github.com/json-iterator/go/feature_any_nil.go
generated
vendored
Normal file
69
vendor/github.com/json-iterator/go/feature_any_nil.go
generated
vendored
Normal file
|
@ -0,0 +1,69 @@
|
|||
package jsoniter
|
||||
|
||||
type nilAny struct {
|
||||
baseAny
|
||||
}
|
||||
|
||||
func (any *nilAny) LastError() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (any *nilAny) ValueType() ValueType {
|
||||
return NilValue
|
||||
}
|
||||
|
||||
func (any *nilAny) MustBeValid() Any {
|
||||
return any
|
||||
}
|
||||
|
||||
func (any *nilAny) ToBool() bool {
|
||||
return false
|
||||
}
|
||||
|
||||
func (any *nilAny) ToInt() int {
|
||||
return 0
|
||||
}
|
||||
|
||||
func (any *nilAny) ToInt32() int32 {
|
||||
return 0
|
||||
}
|
||||
|
||||
func (any *nilAny) ToInt64() int64 {
|
||||
return 0
|
||||
}
|
||||
|
||||
func (any *nilAny) ToUint() uint {
|
||||
return 0
|
||||
}
|
||||
|
||||
func (any *nilAny) ToUint32() uint32 {
|
||||
return 0
|
||||
}
|
||||
|
||||
func (any *nilAny) ToUint64() uint64 {
|
||||
return 0
|
||||
}
|
||||
|
||||
func (any *nilAny) ToFloat32() float32 {
|
||||
return 0
|
||||
}
|
||||
|
||||
func (any *nilAny) ToFloat64() float64 {
|
||||
return 0
|
||||
}
|
||||
|
||||
func (any *nilAny) ToString() string {
|
||||
return ""
|
||||
}
|
||||
|
||||
func (any *nilAny) WriteTo(stream *Stream) {
|
||||
stream.WriteNil()
|
||||
}
|
||||
|
||||
func (any *nilAny) Parse() *Iterator {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (any *nilAny) GetInterface() interface{} {
|
||||
return nil
|
||||
}
|
104
vendor/github.com/json-iterator/go/feature_any_number.go
generated
vendored
Normal file
104
vendor/github.com/json-iterator/go/feature_any_number.go
generated
vendored
Normal file
|
@ -0,0 +1,104 @@
|
|||
package jsoniter
|
||||
|
||||
import "unsafe"
|
||||
|
||||
type numberLazyAny struct {
|
||||
baseAny
|
||||
cfg *frozenConfig
|
||||
buf []byte
|
||||
err error
|
||||
}
|
||||
|
||||
func (any *numberLazyAny) ValueType() ValueType {
|
||||
return NumberValue
|
||||
}
|
||||
|
||||
func (any *numberLazyAny) MustBeValid() Any {
|
||||
return any
|
||||
}
|
||||
|
||||
func (any *numberLazyAny) LastError() error {
|
||||
return any.err
|
||||
}
|
||||
|
||||
func (any *numberLazyAny) ToBool() bool {
|
||||
return any.ToFloat64() != 0
|
||||
}
|
||||
|
||||
func (any *numberLazyAny) ToInt() int {
|
||||
iter := any.cfg.BorrowIterator(any.buf)
|
||||
defer any.cfg.ReturnIterator(iter)
|
||||
val := iter.ReadInt()
|
||||
any.err = iter.Error
|
||||
return val
|
||||
}
|
||||
|
||||
func (any *numberLazyAny) ToInt32() int32 {
|
||||
iter := any.cfg.BorrowIterator(any.buf)
|
||||
defer any.cfg.ReturnIterator(iter)
|
||||
val := iter.ReadInt32()
|
||||
any.err = iter.Error
|
||||
return val
|
||||
}
|
||||
|
||||
func (any *numberLazyAny) ToInt64() int64 {
|
||||
iter := any.cfg.BorrowIterator(any.buf)
|
||||
defer any.cfg.ReturnIterator(iter)
|
||||
val := iter.ReadInt64()
|
||||
any.err = iter.Error
|
||||
return val
|
||||
}
|
||||
|
||||
func (any *numberLazyAny) ToUint() uint {
|
||||
iter := any.cfg.BorrowIterator(any.buf)
|
||||
defer any.cfg.ReturnIterator(iter)
|
||||
val := iter.ReadUint()
|
||||
any.err = iter.Error
|
||||
return val
|
||||
}
|
||||
|
||||
func (any *numberLazyAny) ToUint32() uint32 {
|
||||
iter := any.cfg.BorrowIterator(any.buf)
|
||||
defer any.cfg.ReturnIterator(iter)
|
||||
val := iter.ReadUint32()
|
||||
any.err = iter.Error
|
||||
return val
|
||||
}
|
||||
|
||||
func (any *numberLazyAny) ToUint64() uint64 {
|
||||
iter := any.cfg.BorrowIterator(any.buf)
|
||||
defer any.cfg.ReturnIterator(iter)
|
||||
val := iter.ReadUint64()
|
||||
any.err = iter.Error
|
||||
return val
|
||||
}
|
||||
|
||||
func (any *numberLazyAny) ToFloat32() float32 {
|
||||
iter := any.cfg.BorrowIterator(any.buf)
|
||||
defer any.cfg.ReturnIterator(iter)
|
||||
val := iter.ReadFloat32()
|
||||
any.err = iter.Error
|
||||
return val
|
||||
}
|
||||
|
||||
func (any *numberLazyAny) ToFloat64() float64 {
|
||||
iter := any.cfg.BorrowIterator(any.buf)
|
||||
defer any.cfg.ReturnIterator(iter)
|
||||
val := iter.ReadFloat64()
|
||||
any.err = iter.Error
|
||||
return val
|
||||
}
|
||||
|
||||
func (any *numberLazyAny) ToString() string {
|
||||
return *(*string)(unsafe.Pointer(&any.buf))
|
||||
}
|
||||
|
||||
func (any *numberLazyAny) WriteTo(stream *Stream) {
|
||||
stream.Write(any.buf)
|
||||
}
|
||||
|
||||
func (any *numberLazyAny) GetInterface() interface{} {
|
||||
iter := any.cfg.BorrowIterator(any.buf)
|
||||
defer any.cfg.ReturnIterator(iter)
|
||||
return iter.Read()
|
||||
}
|
374
vendor/github.com/json-iterator/go/feature_any_object.go
generated
vendored
Normal file
374
vendor/github.com/json-iterator/go/feature_any_object.go
generated
vendored
Normal file
|
@ -0,0 +1,374 @@
|
|||
package jsoniter
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
type objectLazyAny struct {
|
||||
baseAny
|
||||
cfg *frozenConfig
|
||||
buf []byte
|
||||
err error
|
||||
}
|
||||
|
||||
func (any *objectLazyAny) ValueType() ValueType {
|
||||
return ObjectValue
|
||||
}
|
||||
|
||||
func (any *objectLazyAny) MustBeValid() Any {
|
||||
return any
|
||||
}
|
||||
|
||||
func (any *objectLazyAny) LastError() error {
|
||||
return any.err
|
||||
}
|
||||
|
||||
func (any *objectLazyAny) ToBool() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
func (any *objectLazyAny) ToInt() int {
|
||||
return 0
|
||||
}
|
||||
|
||||
func (any *objectLazyAny) ToInt32() int32 {
|
||||
return 0
|
||||
}
|
||||
|
||||
func (any *objectLazyAny) ToInt64() int64 {
|
||||
return 0
|
||||
}
|
||||
|
||||
func (any *objectLazyAny) ToUint() uint {
|
||||
return 0
|
||||
}
|
||||
|
||||
func (any *objectLazyAny) ToUint32() uint32 {
|
||||
return 0
|
||||
}
|
||||
|
||||
func (any *objectLazyAny) ToUint64() uint64 {
|
||||
return 0
|
||||
}
|
||||
|
||||
func (any *objectLazyAny) ToFloat32() float32 {
|
||||
return 0
|
||||
}
|
||||
|
||||
func (any *objectLazyAny) ToFloat64() float64 {
|
||||
return 0
|
||||
}
|
||||
|
||||
func (any *objectLazyAny) ToString() string {
|
||||
return *(*string)(unsafe.Pointer(&any.buf))
|
||||
}
|
||||
|
||||
func (any *objectLazyAny) ToVal(obj interface{}) {
|
||||
iter := any.cfg.BorrowIterator(any.buf)
|
||||
defer any.cfg.ReturnIterator(iter)
|
||||
iter.ReadVal(obj)
|
||||
}
|
||||
|
||||
func (any *objectLazyAny) Get(path ...interface{}) Any {
|
||||
if len(path) == 0 {
|
||||
return any
|
||||
}
|
||||
switch firstPath := path[0].(type) {
|
||||
case string:
|
||||
iter := any.cfg.BorrowIterator(any.buf)
|
||||
defer any.cfg.ReturnIterator(iter)
|
||||
valueBytes := locateObjectField(iter, firstPath)
|
||||
if valueBytes == nil {
|
||||
return newInvalidAny(path)
|
||||
}
|
||||
iter.ResetBytes(valueBytes)
|
||||
return locatePath(iter, path[1:])
|
||||
case int32:
|
||||
if '*' == firstPath {
|
||||
mappedAll := map[string]Any{}
|
||||
iter := any.cfg.BorrowIterator(any.buf)
|
||||
defer any.cfg.ReturnIterator(iter)
|
||||
iter.ReadMapCB(func(iter *Iterator, field string) bool {
|
||||
mapped := locatePath(iter, path[1:])
|
||||
if mapped.ValueType() != InvalidValue {
|
||||
mappedAll[field] = mapped
|
||||
}
|
||||
return true
|
||||
})
|
||||
return wrapMap(mappedAll)
|
||||
}
|
||||
return newInvalidAny(path)
|
||||
default:
|
||||
return newInvalidAny(path)
|
||||
}
|
||||
}
|
||||
|
||||
func (any *objectLazyAny) Keys() []string {
|
||||
keys := []string{}
|
||||
iter := any.cfg.BorrowIterator(any.buf)
|
||||
defer any.cfg.ReturnIterator(iter)
|
||||
iter.ReadMapCB(func(iter *Iterator, field string) bool {
|
||||
iter.Skip()
|
||||
keys = append(keys, field)
|
||||
return true
|
||||
})
|
||||
return keys
|
||||
}
|
||||
|
||||
func (any *objectLazyAny) Size() int {
|
||||
size := 0
|
||||
iter := any.cfg.BorrowIterator(any.buf)
|
||||
defer any.cfg.ReturnIterator(iter)
|
||||
iter.ReadObjectCB(func(iter *Iterator, field string) bool {
|
||||
iter.Skip()
|
||||
size++
|
||||
return true
|
||||
})
|
||||
return size
|
||||
}
|
||||
|
||||
func (any *objectLazyAny) WriteTo(stream *Stream) {
|
||||
stream.Write(any.buf)
|
||||
}
|
||||
|
||||
func (any *objectLazyAny) GetInterface() interface{} {
|
||||
iter := any.cfg.BorrowIterator(any.buf)
|
||||
defer any.cfg.ReturnIterator(iter)
|
||||
return iter.Read()
|
||||
}
|
||||
|
||||
type objectAny struct {
|
||||
baseAny
|
||||
err error
|
||||
val reflect.Value
|
||||
}
|
||||
|
||||
func wrapStruct(val interface{}) *objectAny {
|
||||
return &objectAny{baseAny{}, nil, reflect.ValueOf(val)}
|
||||
}
|
||||
|
||||
func (any *objectAny) ValueType() ValueType {
|
||||
return ObjectValue
|
||||
}
|
||||
|
||||
func (any *objectAny) MustBeValid() Any {
|
||||
return any
|
||||
}
|
||||
|
||||
func (any *objectAny) Parse() *Iterator {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (any *objectAny) LastError() error {
|
||||
return any.err
|
||||
}
|
||||
|
||||
func (any *objectAny) ToBool() bool {
|
||||
return any.val.NumField() != 0
|
||||
}
|
||||
|
||||
func (any *objectAny) ToInt() int {
|
||||
return 0
|
||||
}
|
||||
|
||||
func (any *objectAny) ToInt32() int32 {
|
||||
return 0
|
||||
}
|
||||
|
||||
func (any *objectAny) ToInt64() int64 {
|
||||
return 0
|
||||
}
|
||||
|
||||
func (any *objectAny) ToUint() uint {
|
||||
return 0
|
||||
}
|
||||
|
||||
func (any *objectAny) ToUint32() uint32 {
|
||||
return 0
|
||||
}
|
||||
|
||||
func (any *objectAny) ToUint64() uint64 {
|
||||
return 0
|
||||
}
|
||||
|
||||
func (any *objectAny) ToFloat32() float32 {
|
||||
return 0
|
||||
}
|
||||
|
||||
func (any *objectAny) ToFloat64() float64 {
|
||||
return 0
|
||||
}
|
||||
|
||||
func (any *objectAny) ToString() string {
|
||||
str, err := MarshalToString(any.val.Interface())
|
||||
any.err = err
|
||||
return str
|
||||
}
|
||||
|
||||
func (any *objectAny) Get(path ...interface{}) Any {
|
||||
if len(path) == 0 {
|
||||
return any
|
||||
}
|
||||
switch firstPath := path[0].(type) {
|
||||
case string:
|
||||
field := any.val.FieldByName(firstPath)
|
||||
if !field.IsValid() {
|
||||
return newInvalidAny(path)
|
||||
}
|
||||
return Wrap(field.Interface())
|
||||
case int32:
|
||||
if '*' == firstPath {
|
||||
mappedAll := map[string]Any{}
|
||||
for i := 0; i < any.val.NumField(); i++ {
|
||||
field := any.val.Field(i)
|
||||
if field.CanInterface() {
|
||||
mapped := Wrap(field.Interface()).Get(path[1:]...)
|
||||
if mapped.ValueType() != InvalidValue {
|
||||
mappedAll[any.val.Type().Field(i).Name] = mapped
|
||||
}
|
||||
}
|
||||
}
|
||||
return wrapMap(mappedAll)
|
||||
}
|
||||
return newInvalidAny(path)
|
||||
default:
|
||||
return newInvalidAny(path)
|
||||
}
|
||||
}
|
||||
|
||||
func (any *objectAny) Keys() []string {
|
||||
keys := make([]string, 0, any.val.NumField())
|
||||
for i := 0; i < any.val.NumField(); i++ {
|
||||
keys = append(keys, any.val.Type().Field(i).Name)
|
||||
}
|
||||
return keys
|
||||
}
|
||||
|
||||
func (any *objectAny) Size() int {
|
||||
return any.val.NumField()
|
||||
}
|
||||
|
||||
func (any *objectAny) WriteTo(stream *Stream) {
|
||||
stream.WriteVal(any.val)
|
||||
}
|
||||
|
||||
func (any *objectAny) GetInterface() interface{} {
|
||||
return any.val.Interface()
|
||||
}
|
||||
|
||||
type mapAny struct {
|
||||
baseAny
|
||||
err error
|
||||
val reflect.Value
|
||||
}
|
||||
|
||||
func wrapMap(val interface{}) *mapAny {
|
||||
return &mapAny{baseAny{}, nil, reflect.ValueOf(val)}
|
||||
}
|
||||
|
||||
func (any *mapAny) ValueType() ValueType {
|
||||
return ObjectValue
|
||||
}
|
||||
|
||||
func (any *mapAny) MustBeValid() Any {
|
||||
return any
|
||||
}
|
||||
|
||||
func (any *mapAny) Parse() *Iterator {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (any *mapAny) LastError() error {
|
||||
return any.err
|
||||
}
|
||||
|
||||
func (any *mapAny) ToBool() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
func (any *mapAny) ToInt() int {
|
||||
return 0
|
||||
}
|
||||
|
||||
func (any *mapAny) ToInt32() int32 {
|
||||
return 0
|
||||
}
|
||||
|
||||
func (any *mapAny) ToInt64() int64 {
|
||||
return 0
|
||||
}
|
||||
|
||||
func (any *mapAny) ToUint() uint {
|
||||
return 0
|
||||
}
|
||||
|
||||
func (any *mapAny) ToUint32() uint32 {
|
||||
return 0
|
||||
}
|
||||
|
||||
func (any *mapAny) ToUint64() uint64 {
|
||||
return 0
|
||||
}
|
||||
|
||||
func (any *mapAny) ToFloat32() float32 {
|
||||
return 0
|
||||
}
|
||||
|
||||
func (any *mapAny) ToFloat64() float64 {
|
||||
return 0
|
||||
}
|
||||
|
||||
func (any *mapAny) ToString() string {
|
||||
str, err := MarshalToString(any.val.Interface())
|
||||
any.err = err
|
||||
return str
|
||||
}
|
||||
|
||||
func (any *mapAny) Get(path ...interface{}) Any {
|
||||
if len(path) == 0 {
|
||||
return any
|
||||
}
|
||||
switch firstPath := path[0].(type) {
|
||||
case int32:
|
||||
if '*' == firstPath {
|
||||
mappedAll := map[string]Any{}
|
||||
for _, key := range any.val.MapKeys() {
|
||||
keyAsStr := key.String()
|
||||
element := Wrap(any.val.MapIndex(key).Interface())
|
||||
mapped := element.Get(path[1:]...)
|
||||
if mapped.ValueType() != InvalidValue {
|
||||
mappedAll[keyAsStr] = mapped
|
||||
}
|
||||
}
|
||||
return wrapMap(mappedAll)
|
||||
}
|
||||
return newInvalidAny(path)
|
||||
default:
|
||||
value := any.val.MapIndex(reflect.ValueOf(firstPath))
|
||||
if !value.IsValid() {
|
||||
return newInvalidAny(path)
|
||||
}
|
||||
return Wrap(value.Interface())
|
||||
}
|
||||
}
|
||||
|
||||
func (any *mapAny) Keys() []string {
|
||||
keys := make([]string, 0, any.val.Len())
|
||||
for _, key := range any.val.MapKeys() {
|
||||
keys = append(keys, key.String())
|
||||
}
|
||||
return keys
|
||||
}
|
||||
|
||||
func (any *mapAny) Size() int {
|
||||
return any.val.Len()
|
||||
}
|
||||
|
||||
func (any *mapAny) WriteTo(stream *Stream) {
|
||||
stream.WriteVal(any.val)
|
||||
}
|
||||
|
||||
func (any *mapAny) GetInterface() interface{} {
|
||||
return any.val.Interface()
|
||||
}
|
166
vendor/github.com/json-iterator/go/feature_any_string.go
generated
vendored
Normal file
166
vendor/github.com/json-iterator/go/feature_any_string.go
generated
vendored
Normal file
|
@ -0,0 +1,166 @@
|
|||
package jsoniter
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strconv"
|
||||
)
|
||||
|
||||
type stringAny struct {
|
||||
baseAny
|
||||
val string
|
||||
}
|
||||
|
||||
func (any *stringAny) Get(path ...interface{}) Any {
|
||||
if len(path) == 0 {
|
||||
return any
|
||||
}
|
||||
return &invalidAny{baseAny{}, fmt.Errorf("Get %v from simple value", path)}
|
||||
}
|
||||
|
||||
func (any *stringAny) Parse() *Iterator {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (any *stringAny) ValueType() ValueType {
|
||||
return StringValue
|
||||
}
|
||||
|
||||
func (any *stringAny) MustBeValid() Any {
|
||||
return any
|
||||
}
|
||||
|
||||
func (any *stringAny) LastError() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (any *stringAny) ToBool() bool {
|
||||
str := any.ToString()
|
||||
if str == "0" {
|
||||
return false
|
||||
}
|
||||
for _, c := range str {
|
||||
switch c {
|
||||
case ' ', '\n', '\r', '\t':
|
||||
default:
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (any *stringAny) ToInt() int {
|
||||
return int(any.ToInt64())
|
||||
|
||||
}
|
||||
|
||||
func (any *stringAny) ToInt32() int32 {
|
||||
return int32(any.ToInt64())
|
||||
}
|
||||
|
||||
func (any *stringAny) ToInt64() int64 {
|
||||
if any.val == "" {
|
||||
return 0
|
||||
}
|
||||
|
||||
flag := 1
|
||||
startPos := 0
|
||||
endPos := 0
|
||||
if any.val[0] == '+' || any.val[0] == '-' {
|
||||
startPos = 1
|
||||
}
|
||||
|
||||
if any.val[0] == '-' {
|
||||
flag = -1
|
||||
}
|
||||
|
||||
for i := startPos; i < len(any.val); i++ {
|
||||
if any.val[i] >= '0' && any.val[i] <= '9' {
|
||||
endPos = i + 1
|
||||
} else {
|
||||
break
|
||||
}
|
||||
}
|
||||
parsed, _ := strconv.ParseInt(any.val[startPos:endPos], 10, 64)
|
||||
return int64(flag) * parsed
|
||||
}
|
||||
|
||||
func (any *stringAny) ToUint() uint {
|
||||
return uint(any.ToUint64())
|
||||
}
|
||||
|
||||
func (any *stringAny) ToUint32() uint32 {
|
||||
return uint32(any.ToUint64())
|
||||
}
|
||||
|
||||
func (any *stringAny) ToUint64() uint64 {
|
||||
if any.val == "" {
|
||||
return 0
|
||||
}
|
||||
|
||||
startPos := 0
|
||||
endPos := 0
|
||||
|
||||
if any.val[0] == '-' {
|
||||
return 0
|
||||
}
|
||||
if any.val[0] == '+' {
|
||||
startPos = 1
|
||||
}
|
||||
|
||||
for i := startPos; i < len(any.val); i++ {
|
||||
if any.val[i] >= '0' && any.val[i] <= '9' {
|
||||
endPos = i + 1
|
||||
} else {
|
||||
break
|
||||
}
|
||||
}
|
||||
parsed, _ := strconv.ParseUint(any.val[startPos:endPos], 10, 64)
|
||||
return parsed
|
||||
}
|
||||
|
||||
func (any *stringAny) ToFloat32() float32 {
|
||||
return float32(any.ToFloat64())
|
||||
}
|
||||
|
||||
func (any *stringAny) ToFloat64() float64 {
|
||||
if len(any.val) == 0 {
|
||||
return 0
|
||||
}
|
||||
|
||||
// first char invalid
|
||||
if any.val[0] != '+' && any.val[0] != '-' && (any.val[0] > '9' || any.val[0] < '0') {
|
||||
return 0
|
||||
}
|
||||
|
||||
// extract valid num expression from string
|
||||
// eg 123true => 123, -12.12xxa => -12.12
|
||||
endPos := 1
|
||||
for i := 1; i < len(any.val); i++ {
|
||||
if any.val[i] == '.' || any.val[i] == 'e' || any.val[i] == 'E' || any.val[i] == '+' || any.val[i] == '-' {
|
||||
endPos = i + 1
|
||||
continue
|
||||
}
|
||||
|
||||
// end position is the first char which is not digit
|
||||
if any.val[i] >= '0' && any.val[i] <= '9' {
|
||||
endPos = i + 1
|
||||
} else {
|
||||
endPos = i
|
||||
break
|
||||
}
|
||||
}
|
||||
parsed, _ := strconv.ParseFloat(any.val[:endPos], 64)
|
||||
return parsed
|
||||
}
|
||||
|
||||
func (any *stringAny) ToString() string {
|
||||
return any.val
|
||||
}
|
||||
|
||||
func (any *stringAny) WriteTo(stream *Stream) {
|
||||
stream.WriteString(any.val)
|
||||
}
|
||||
|
||||
func (any *stringAny) GetInterface() interface{} {
|
||||
return any.val
|
||||
}
|
74
vendor/github.com/json-iterator/go/feature_any_uint32.go
generated
vendored
Normal file
74
vendor/github.com/json-iterator/go/feature_any_uint32.go
generated
vendored
Normal file
|
@ -0,0 +1,74 @@
|
|||
package jsoniter
|
||||
|
||||
import (
|
||||
"strconv"
|
||||
)
|
||||
|
||||
type uint32Any struct {
|
||||
baseAny
|
||||
val uint32
|
||||
}
|
||||
|
||||
func (any *uint32Any) LastError() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (any *uint32Any) ValueType() ValueType {
|
||||
return NumberValue
|
||||
}
|
||||
|
||||
func (any *uint32Any) MustBeValid() Any {
|
||||
return any
|
||||
}
|
||||
|
||||
func (any *uint32Any) ToBool() bool {
|
||||
return any.val != 0
|
||||
}
|
||||
|
||||
func (any *uint32Any) ToInt() int {
|
||||
return int(any.val)
|
||||
}
|
||||
|
||||
func (any *uint32Any) ToInt32() int32 {
|
||||
return int32(any.val)
|
||||
}
|
||||
|
||||
func (any *uint32Any) ToInt64() int64 {
|
||||
return int64(any.val)
|
||||
}
|
||||
|
||||
func (any *uint32Any) ToUint() uint {
|
||||
return uint(any.val)
|
||||
}
|
||||
|
||||
func (any *uint32Any) ToUint32() uint32 {
|
||||
return any.val
|
||||
}
|
||||
|
||||
func (any *uint32Any) ToUint64() uint64 {
|
||||
return uint64(any.val)
|
||||
}
|
||||
|
||||
func (any *uint32Any) ToFloat32() float32 {
|
||||
return float32(any.val)
|
||||
}
|
||||
|
||||
func (any *uint32Any) ToFloat64() float64 {
|
||||
return float64(any.val)
|
||||
}
|
||||
|
||||
func (any *uint32Any) ToString() string {
|
||||
return strconv.FormatInt(int64(any.val), 10)
|
||||
}
|
||||
|
||||
func (any *uint32Any) WriteTo(stream *Stream) {
|
||||
stream.WriteUint32(any.val)
|
||||
}
|
||||
|
||||
func (any *uint32Any) Parse() *Iterator {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (any *uint32Any) GetInterface() interface{} {
|
||||
return any.val
|
||||
}
|
74
vendor/github.com/json-iterator/go/feature_any_uint64.go
generated
vendored
Normal file
74
vendor/github.com/json-iterator/go/feature_any_uint64.go
generated
vendored
Normal file
|
@ -0,0 +1,74 @@
|
|||
package jsoniter
|
||||
|
||||
import (
|
||||
"strconv"
|
||||
)
|
||||
|
||||
type uint64Any struct {
|
||||
baseAny
|
||||
val uint64
|
||||
}
|
||||
|
||||
func (any *uint64Any) LastError() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (any *uint64Any) ValueType() ValueType {
|
||||
return NumberValue
|
||||
}
|
||||
|
||||
func (any *uint64Any) MustBeValid() Any {
|
||||
return any
|
||||
}
|
||||
|
||||
func (any *uint64Any) ToBool() bool {
|
||||
return any.val != 0
|
||||
}
|
||||
|
||||
func (any *uint64Any) ToInt() int {
|
||||
return int(any.val)
|
||||
}
|
||||
|
||||
func (any *uint64Any) ToInt32() int32 {
|
||||
return int32(any.val)
|
||||
}
|
||||
|
||||
func (any *uint64Any) ToInt64() int64 {
|
||||
return int64(any.val)
|
||||
}
|
||||
|
||||
func (any *uint64Any) ToUint() uint {
|
||||
return uint(any.val)
|
||||
}
|
||||
|
||||
func (any *uint64Any) ToUint32() uint32 {
|
||||
return uint32(any.val)
|
||||
}
|
||||
|
||||
func (any *uint64Any) ToUint64() uint64 {
|
||||
return any.val
|
||||
}
|
||||
|
||||
func (any *uint64Any) ToFloat32() float32 {
|
||||
return float32(any.val)
|
||||
}
|
||||
|
||||
func (any *uint64Any) ToFloat64() float64 {
|
||||
return float64(any.val)
|
||||
}
|
||||
|
||||
func (any *uint64Any) ToString() string {
|
||||
return strconv.FormatUint(any.val, 10)
|
||||
}
|
||||
|
||||
func (any *uint64Any) WriteTo(stream *Stream) {
|
||||
stream.WriteUint64(any.val)
|
||||
}
|
||||
|
||||
func (any *uint64Any) Parse() *Iterator {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (any *uint64Any) GetInterface() interface{} {
|
||||
return any.val
|
||||
}
|
312
vendor/github.com/json-iterator/go/feature_config.go
generated
vendored
Normal file
312
vendor/github.com/json-iterator/go/feature_config.go
generated
vendored
Normal file
|
@ -0,0 +1,312 @@
|
|||
package jsoniter
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"io"
|
||||
"reflect"
|
||||
"sync/atomic"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
// Config customize how the API should behave.
|
||||
// The API is created from Config by Froze.
|
||||
type Config struct {
|
||||
IndentionStep int
|
||||
MarshalFloatWith6Digits bool
|
||||
EscapeHTML bool
|
||||
SortMapKeys bool
|
||||
UseNumber bool
|
||||
TagKey string
|
||||
}
|
||||
|
||||
type frozenConfig struct {
|
||||
configBeforeFrozen Config
|
||||
sortMapKeys bool
|
||||
indentionStep int
|
||||
decoderCache unsafe.Pointer
|
||||
encoderCache unsafe.Pointer
|
||||
extensions []Extension
|
||||
streamPool chan *Stream
|
||||
iteratorPool chan *Iterator
|
||||
}
|
||||
|
||||
// API the public interface of this package.
|
||||
// Primary Marshal and Unmarshal.
|
||||
type API interface {
|
||||
IteratorPool
|
||||
StreamPool
|
||||
MarshalToString(v interface{}) (string, error)
|
||||
Marshal(v interface{}) ([]byte, error)
|
||||
MarshalIndent(v interface{}, prefix, indent string) ([]byte, error)
|
||||
UnmarshalFromString(str string, v interface{}) error
|
||||
Unmarshal(data []byte, v interface{}) error
|
||||
Get(data []byte, path ...interface{}) Any
|
||||
NewEncoder(writer io.Writer) *Encoder
|
||||
NewDecoder(reader io.Reader) *Decoder
|
||||
}
|
||||
|
||||
// ConfigDefault the default API
|
||||
var ConfigDefault = Config{
|
||||
EscapeHTML: true,
|
||||
}.Froze()
|
||||
|
||||
// ConfigCompatibleWithStandardLibrary tries to be 100% compatible with standard library behavior
|
||||
var ConfigCompatibleWithStandardLibrary = Config{
|
||||
EscapeHTML: true,
|
||||
SortMapKeys: true,
|
||||
}.Froze()
|
||||
|
||||
// ConfigFastest marshals float with only 6 digits precision
|
||||
var ConfigFastest = Config{
|
||||
EscapeHTML: false,
|
||||
MarshalFloatWith6Digits: true,
|
||||
}.Froze()
|
||||
|
||||
// Froze forge API from config
|
||||
func (cfg Config) Froze() API {
|
||||
// TODO: cache frozen config
|
||||
frozenConfig := &frozenConfig{
|
||||
sortMapKeys: cfg.SortMapKeys,
|
||||
indentionStep: cfg.IndentionStep,
|
||||
streamPool: make(chan *Stream, 16),
|
||||
iteratorPool: make(chan *Iterator, 16),
|
||||
}
|
||||
atomic.StorePointer(&frozenConfig.decoderCache, unsafe.Pointer(&map[string]ValDecoder{}))
|
||||
atomic.StorePointer(&frozenConfig.encoderCache, unsafe.Pointer(&map[string]ValEncoder{}))
|
||||
if cfg.MarshalFloatWith6Digits {
|
||||
frozenConfig.marshalFloatWith6Digits()
|
||||
}
|
||||
if cfg.EscapeHTML {
|
||||
frozenConfig.escapeHTML()
|
||||
}
|
||||
if cfg.UseNumber {
|
||||
frozenConfig.useNumber()
|
||||
}
|
||||
frozenConfig.configBeforeFrozen = cfg
|
||||
return frozenConfig
|
||||
}
|
||||
|
||||
func (cfg *frozenConfig) useNumber() {
|
||||
cfg.addDecoderToCache(reflect.TypeOf((*interface{})(nil)).Elem(), &funcDecoder{func(ptr unsafe.Pointer, iter *Iterator) {
|
||||
if iter.WhatIsNext() == NumberValue {
|
||||
*((*interface{})(ptr)) = json.Number(iter.readNumberAsString())
|
||||
} else {
|
||||
*((*interface{})(ptr)) = iter.Read()
|
||||
}
|
||||
}})
|
||||
}
|
||||
func (cfg *frozenConfig) getTagKey() string {
|
||||
tagKey := cfg.configBeforeFrozen.TagKey
|
||||
if tagKey == "" {
|
||||
return "json"
|
||||
}
|
||||
return tagKey
|
||||
}
|
||||
|
||||
func (cfg *frozenConfig) registerExtension(extension Extension) {
|
||||
cfg.extensions = append(cfg.extensions, extension)
|
||||
}
|
||||
|
||||
type lossyFloat32Encoder struct {
|
||||
}
|
||||
|
||||
func (encoder *lossyFloat32Encoder) Encode(ptr unsafe.Pointer, stream *Stream) {
|
||||
stream.WriteFloat32Lossy(*((*float32)(ptr)))
|
||||
}
|
||||
|
||||
func (encoder *lossyFloat32Encoder) EncodeInterface(val interface{}, stream *Stream) {
|
||||
WriteToStream(val, stream, encoder)
|
||||
}
|
||||
|
||||
func (encoder *lossyFloat32Encoder) IsEmpty(ptr unsafe.Pointer) bool {
|
||||
return *((*float32)(ptr)) == 0
|
||||
}
|
||||
|
||||
type lossyFloat64Encoder struct {
|
||||
}
|
||||
|
||||
func (encoder *lossyFloat64Encoder) Encode(ptr unsafe.Pointer, stream *Stream) {
|
||||
stream.WriteFloat64Lossy(*((*float64)(ptr)))
|
||||
}
|
||||
|
||||
func (encoder *lossyFloat64Encoder) EncodeInterface(val interface{}, stream *Stream) {
|
||||
WriteToStream(val, stream, encoder)
|
||||
}
|
||||
|
||||
func (encoder *lossyFloat64Encoder) IsEmpty(ptr unsafe.Pointer) bool {
|
||||
return *((*float64)(ptr)) == 0
|
||||
}
|
||||
|
||||
// EnableLossyFloatMarshalling keeps 10**(-6) precision
|
||||
// for float variables for better performance.
|
||||
func (cfg *frozenConfig) marshalFloatWith6Digits() {
|
||||
// for better performance
|
||||
cfg.addEncoderToCache(reflect.TypeOf((*float32)(nil)).Elem(), &lossyFloat32Encoder{})
|
||||
cfg.addEncoderToCache(reflect.TypeOf((*float64)(nil)).Elem(), &lossyFloat64Encoder{})
|
||||
}
|
||||
|
||||
type htmlEscapedStringEncoder struct {
|
||||
}
|
||||
|
||||
func (encoder *htmlEscapedStringEncoder) Encode(ptr unsafe.Pointer, stream *Stream) {
|
||||
str := *((*string)(ptr))
|
||||
stream.WriteStringWithHTMLEscaped(str)
|
||||
}
|
||||
|
||||
func (encoder *htmlEscapedStringEncoder) EncodeInterface(val interface{}, stream *Stream) {
|
||||
WriteToStream(val, stream, encoder)
|
||||
}
|
||||
|
||||
func (encoder *htmlEscapedStringEncoder) IsEmpty(ptr unsafe.Pointer) bool {
|
||||
return *((*string)(ptr)) == ""
|
||||
}
|
||||
|
||||
func (cfg *frozenConfig) escapeHTML() {
|
||||
cfg.addEncoderToCache(reflect.TypeOf((*string)(nil)).Elem(), &htmlEscapedStringEncoder{})
|
||||
}
|
||||
|
||||
func (cfg *frozenConfig) addDecoderToCache(cacheKey reflect.Type, decoder ValDecoder) {
|
||||
done := false
|
||||
for !done {
|
||||
ptr := atomic.LoadPointer(&cfg.decoderCache)
|
||||
cache := *(*map[reflect.Type]ValDecoder)(ptr)
|
||||
copied := map[reflect.Type]ValDecoder{}
|
||||
for k, v := range cache {
|
||||
copied[k] = v
|
||||
}
|
||||
copied[cacheKey] = decoder
|
||||
done = atomic.CompareAndSwapPointer(&cfg.decoderCache, ptr, unsafe.Pointer(&copied))
|
||||
}
|
||||
}
|
||||
|
||||
func (cfg *frozenConfig) addEncoderToCache(cacheKey reflect.Type, encoder ValEncoder) {
|
||||
done := false
|
||||
for !done {
|
||||
ptr := atomic.LoadPointer(&cfg.encoderCache)
|
||||
cache := *(*map[reflect.Type]ValEncoder)(ptr)
|
||||
copied := map[reflect.Type]ValEncoder{}
|
||||
for k, v := range cache {
|
||||
copied[k] = v
|
||||
}
|
||||
copied[cacheKey] = encoder
|
||||
done = atomic.CompareAndSwapPointer(&cfg.encoderCache, ptr, unsafe.Pointer(&copied))
|
||||
}
|
||||
}
|
||||
|
||||
func (cfg *frozenConfig) getDecoderFromCache(cacheKey reflect.Type) ValDecoder {
|
||||
ptr := atomic.LoadPointer(&cfg.decoderCache)
|
||||
cache := *(*map[reflect.Type]ValDecoder)(ptr)
|
||||
return cache[cacheKey]
|
||||
}
|
||||
|
||||
func (cfg *frozenConfig) getEncoderFromCache(cacheKey reflect.Type) ValEncoder {
|
||||
ptr := atomic.LoadPointer(&cfg.encoderCache)
|
||||
cache := *(*map[reflect.Type]ValEncoder)(ptr)
|
||||
return cache[cacheKey]
|
||||
}
|
||||
|
||||
func (cfg *frozenConfig) cleanDecoders() {
|
||||
typeDecoders = map[string]ValDecoder{}
|
||||
fieldDecoders = map[string]ValDecoder{}
|
||||
*cfg = *(cfg.configBeforeFrozen.Froze().(*frozenConfig))
|
||||
}
|
||||
|
||||
func (cfg *frozenConfig) cleanEncoders() {
|
||||
typeEncoders = map[string]ValEncoder{}
|
||||
fieldEncoders = map[string]ValEncoder{}
|
||||
*cfg = *(cfg.configBeforeFrozen.Froze().(*frozenConfig))
|
||||
}
|
||||
|
||||
func (cfg *frozenConfig) MarshalToString(v interface{}) (string, error) {
|
||||
stream := cfg.BorrowStream(nil)
|
||||
defer cfg.ReturnStream(stream)
|
||||
stream.WriteVal(v)
|
||||
if stream.Error != nil {
|
||||
return "", stream.Error
|
||||
}
|
||||
return string(stream.Buffer()), nil
|
||||
}
|
||||
|
||||
func (cfg *frozenConfig) Marshal(v interface{}) ([]byte, error) {
|
||||
stream := cfg.BorrowStream(nil)
|
||||
defer cfg.ReturnStream(stream)
|
||||
stream.WriteVal(v)
|
||||
if stream.Error != nil {
|
||||
return nil, stream.Error
|
||||
}
|
||||
result := stream.Buffer()
|
||||
copied := make([]byte, len(result))
|
||||
copy(copied, result)
|
||||
return copied, nil
|
||||
}
|
||||
|
||||
func (cfg *frozenConfig) MarshalIndent(v interface{}, prefix, indent string) ([]byte, error) {
|
||||
if prefix != "" {
|
||||
panic("prefix is not supported")
|
||||
}
|
||||
for _, r := range indent {
|
||||
if r != ' ' {
|
||||
panic("indent can only be space")
|
||||
}
|
||||
}
|
||||
newCfg := cfg.configBeforeFrozen
|
||||
newCfg.IndentionStep = len(indent)
|
||||
return newCfg.Froze().Marshal(v)
|
||||
}
|
||||
|
||||
func (cfg *frozenConfig) UnmarshalFromString(str string, v interface{}) error {
|
||||
data := []byte(str)
|
||||
data = data[:lastNotSpacePos(data)]
|
||||
iter := cfg.BorrowIterator(data)
|
||||
defer cfg.ReturnIterator(iter)
|
||||
iter.ReadVal(v)
|
||||
if iter.head == iter.tail {
|
||||
iter.loadMore()
|
||||
}
|
||||
if iter.Error == io.EOF {
|
||||
return nil
|
||||
}
|
||||
if iter.Error == nil {
|
||||
iter.ReportError("UnmarshalFromString", "there are bytes left after unmarshal")
|
||||
}
|
||||
return iter.Error
|
||||
}
|
||||
|
||||
func (cfg *frozenConfig) Get(data []byte, path ...interface{}) Any {
|
||||
iter := cfg.BorrowIterator(data)
|
||||
defer cfg.ReturnIterator(iter)
|
||||
return locatePath(iter, path)
|
||||
}
|
||||
|
||||
func (cfg *frozenConfig) Unmarshal(data []byte, v interface{}) error {
|
||||
data = data[:lastNotSpacePos(data)]
|
||||
iter := cfg.BorrowIterator(data)
|
||||
defer cfg.ReturnIterator(iter)
|
||||
typ := reflect.TypeOf(v)
|
||||
if typ.Kind() != reflect.Ptr {
|
||||
// return non-pointer error
|
||||
return errors.New("the second param must be ptr type")
|
||||
}
|
||||
iter.ReadVal(v)
|
||||
if iter.head == iter.tail {
|
||||
iter.loadMore()
|
||||
}
|
||||
if iter.Error == io.EOF {
|
||||
return nil
|
||||
}
|
||||
if iter.Error == nil {
|
||||
iter.ReportError("Unmarshal", "there are bytes left after unmarshal")
|
||||
}
|
||||
return iter.Error
|
||||
}
|
||||
|
||||
func (cfg *frozenConfig) NewEncoder(writer io.Writer) *Encoder {
|
||||
stream := NewStream(cfg, writer, 512)
|
||||
return &Encoder{stream}
|
||||
}
|
||||
|
||||
func (cfg *frozenConfig) NewDecoder(reader io.Reader) *Decoder {
|
||||
iter := Parse(cfg, reader, 512)
|
||||
return &Decoder{iter}
|
||||
}
|
307
vendor/github.com/json-iterator/go/feature_iter.go
generated
vendored
Normal file
307
vendor/github.com/json-iterator/go/feature_iter.go
generated
vendored
Normal file
|
@ -0,0 +1,307 @@
|
|||
package jsoniter
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
)
|
||||
|
||||
// ValueType the type for JSON element
|
||||
type ValueType int
|
||||
|
||||
const (
|
||||
// InvalidValue invalid JSON element
|
||||
InvalidValue ValueType = iota
|
||||
// StringValue JSON element "string"
|
||||
StringValue
|
||||
// NumberValue JSON element 100 or 0.10
|
||||
NumberValue
|
||||
// NilValue JSON element null
|
||||
NilValue
|
||||
// BoolValue JSON element true or false
|
||||
BoolValue
|
||||
// ArrayValue JSON element []
|
||||
ArrayValue
|
||||
// ObjectValue JSON element {}
|
||||
ObjectValue
|
||||
)
|
||||
|
||||
var hexDigits []byte
|
||||
var valueTypes []ValueType
|
||||
|
||||
func init() {
|
||||
hexDigits = make([]byte, 256)
|
||||
for i := 0; i < len(hexDigits); i++ {
|
||||
hexDigits[i] = 255
|
||||
}
|
||||
for i := '0'; i <= '9'; i++ {
|
||||
hexDigits[i] = byte(i - '0')
|
||||
}
|
||||
for i := 'a'; i <= 'f'; i++ {
|
||||
hexDigits[i] = byte((i - 'a') + 10)
|
||||
}
|
||||
for i := 'A'; i <= 'F'; i++ {
|
||||
hexDigits[i] = byte((i - 'A') + 10)
|
||||
}
|
||||
valueTypes = make([]ValueType, 256)
|
||||
for i := 0; i < len(valueTypes); i++ {
|
||||
valueTypes[i] = InvalidValue
|
||||
}
|
||||
valueTypes['"'] = StringValue
|
||||
valueTypes['-'] = NumberValue
|
||||
valueTypes['0'] = NumberValue
|
||||
valueTypes['1'] = NumberValue
|
||||
valueTypes['2'] = NumberValue
|
||||
valueTypes['3'] = NumberValue
|
||||
valueTypes['4'] = NumberValue
|
||||
valueTypes['5'] = NumberValue
|
||||
valueTypes['6'] = NumberValue
|
||||
valueTypes['7'] = NumberValue
|
||||
valueTypes['8'] = NumberValue
|
||||
valueTypes['9'] = NumberValue
|
||||
valueTypes['t'] = BoolValue
|
||||
valueTypes['f'] = BoolValue
|
||||
valueTypes['n'] = NilValue
|
||||
valueTypes['['] = ArrayValue
|
||||
valueTypes['{'] = ObjectValue
|
||||
}
|
||||
|
||||
// Iterator is a io.Reader like object, with JSON specific read functions.
|
||||
// Error is not returned as return value, but stored as Error member on this iterator instance.
|
||||
type Iterator struct {
|
||||
cfg *frozenConfig
|
||||
reader io.Reader
|
||||
buf []byte
|
||||
head int
|
||||
tail int
|
||||
captureStartedAt int
|
||||
captured []byte
|
||||
Error error
|
||||
}
|
||||
|
||||
// NewIterator creates an empty Iterator instance
|
||||
func NewIterator(cfg API) *Iterator {
|
||||
return &Iterator{
|
||||
cfg: cfg.(*frozenConfig),
|
||||
reader: nil,
|
||||
buf: nil,
|
||||
head: 0,
|
||||
tail: 0,
|
||||
}
|
||||
}
|
||||
|
||||
// Parse creates an Iterator instance from io.Reader
|
||||
func Parse(cfg API, reader io.Reader, bufSize int) *Iterator {
|
||||
return &Iterator{
|
||||
cfg: cfg.(*frozenConfig),
|
||||
reader: reader,
|
||||
buf: make([]byte, bufSize),
|
||||
head: 0,
|
||||
tail: 0,
|
||||
}
|
||||
}
|
||||
|
||||
// ParseBytes creates an Iterator instance from byte array
|
||||
func ParseBytes(cfg API, input []byte) *Iterator {
|
||||
return &Iterator{
|
||||
cfg: cfg.(*frozenConfig),
|
||||
reader: nil,
|
||||
buf: input,
|
||||
head: 0,
|
||||
tail: len(input),
|
||||
}
|
||||
}
|
||||
|
||||
// ParseString creates an Iterator instance from string
|
||||
func ParseString(cfg API, input string) *Iterator {
|
||||
return ParseBytes(cfg, []byte(input))
|
||||
}
|
||||
|
||||
// Pool returns a pool can provide more iterator with same configuration
|
||||
func (iter *Iterator) Pool() IteratorPool {
|
||||
return iter.cfg
|
||||
}
|
||||
|
||||
// Reset reuse iterator instance by specifying another reader
|
||||
func (iter *Iterator) Reset(reader io.Reader) *Iterator {
|
||||
iter.reader = reader
|
||||
iter.head = 0
|
||||
iter.tail = 0
|
||||
return iter
|
||||
}
|
||||
|
||||
// ResetBytes reuse iterator instance by specifying another byte array as input
|
||||
func (iter *Iterator) ResetBytes(input []byte) *Iterator {
|
||||
iter.reader = nil
|
||||
iter.buf = input
|
||||
iter.head = 0
|
||||
iter.tail = len(input)
|
||||
return iter
|
||||
}
|
||||
|
||||
// WhatIsNext gets ValueType of relatively next json element
|
||||
func (iter *Iterator) WhatIsNext() ValueType {
|
||||
valueType := valueTypes[iter.nextToken()]
|
||||
iter.unreadByte()
|
||||
return valueType
|
||||
}
|
||||
|
||||
func (iter *Iterator) skipWhitespacesWithoutLoadMore() bool {
|
||||
for i := iter.head; i < iter.tail; i++ {
|
||||
c := iter.buf[i]
|
||||
switch c {
|
||||
case ' ', '\n', '\t', '\r':
|
||||
continue
|
||||
}
|
||||
iter.head = i
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func (iter *Iterator) isObjectEnd() bool {
|
||||
c := iter.nextToken()
|
||||
if c == ',' {
|
||||
return false
|
||||
}
|
||||
if c == '}' {
|
||||
return true
|
||||
}
|
||||
iter.ReportError("isObjectEnd", "object ended prematurely")
|
||||
return true
|
||||
}
|
||||
|
||||
func (iter *Iterator) nextToken() byte {
|
||||
// a variation of skip whitespaces, returning the next non-whitespace token
|
||||
for {
|
||||
for i := iter.head; i < iter.tail; i++ {
|
||||
c := iter.buf[i]
|
||||
switch c {
|
||||
case ' ', '\n', '\t', '\r':
|
||||
continue
|
||||
}
|
||||
iter.head = i + 1
|
||||
return c
|
||||
}
|
||||
if !iter.loadMore() {
|
||||
return 0
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// ReportError record a error in iterator instance with current position.
|
||||
func (iter *Iterator) ReportError(operation string, msg string) {
|
||||
if iter.Error != nil {
|
||||
if iter.Error != io.EOF {
|
||||
return
|
||||
}
|
||||
}
|
||||
peekStart := iter.head - 10
|
||||
if peekStart < 0 {
|
||||
peekStart = 0
|
||||
}
|
||||
iter.Error = fmt.Errorf("%s: %s, parsing %v ...%s... at %s", operation, msg, iter.head,
|
||||
string(iter.buf[peekStart:iter.head]), string(iter.buf[0:iter.tail]))
|
||||
}
|
||||
|
||||
// CurrentBuffer gets current buffer as string for debugging purpose
|
||||
func (iter *Iterator) CurrentBuffer() string {
|
||||
peekStart := iter.head - 10
|
||||
if peekStart < 0 {
|
||||
peekStart = 0
|
||||
}
|
||||
return fmt.Sprintf("parsing %v ...|%s|... at %s", iter.head,
|
||||
string(iter.buf[peekStart:iter.head]), string(iter.buf[0:iter.tail]))
|
||||
}
|
||||
|
||||
func (iter *Iterator) readByte() (ret byte) {
|
||||
if iter.head == iter.tail {
|
||||
if iter.loadMore() {
|
||||
ret = iter.buf[iter.head]
|
||||
iter.head++
|
||||
return ret
|
||||
}
|
||||
return 0
|
||||
}
|
||||
ret = iter.buf[iter.head]
|
||||
iter.head++
|
||||
return ret
|
||||
}
|
||||
|
||||
func (iter *Iterator) loadMore() bool {
|
||||
if iter.reader == nil {
|
||||
if iter.Error == nil {
|
||||
iter.head = iter.tail
|
||||
iter.Error = io.EOF
|
||||
}
|
||||
return false
|
||||
}
|
||||
if iter.captured != nil {
|
||||
iter.captured = append(iter.captured,
|
||||
iter.buf[iter.captureStartedAt:iter.tail]...)
|
||||
iter.captureStartedAt = 0
|
||||
}
|
||||
for {
|
||||
n, err := iter.reader.Read(iter.buf)
|
||||
if n == 0 {
|
||||
if err != nil {
|
||||
if iter.Error == nil {
|
||||
iter.Error = err
|
||||
}
|
||||
return false
|
||||
}
|
||||
} else {
|
||||
iter.head = 0
|
||||
iter.tail = n
|
||||
return true
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (iter *Iterator) unreadByte() {
|
||||
if iter.Error != nil {
|
||||
return
|
||||
}
|
||||
iter.head--
|
||||
return
|
||||
}
|
||||
|
||||
// Read read the next JSON element as generic interface{}.
|
||||
func (iter *Iterator) Read() interface{} {
|
||||
valueType := iter.WhatIsNext()
|
||||
switch valueType {
|
||||
case StringValue:
|
||||
return iter.ReadString()
|
||||
case NumberValue:
|
||||
if iter.cfg.configBeforeFrozen.UseNumber {
|
||||
return json.Number(iter.readNumberAsString())
|
||||
}
|
||||
return iter.ReadFloat64()
|
||||
case NilValue:
|
||||
iter.skipFourBytes('n', 'u', 'l', 'l')
|
||||
return nil
|
||||
case BoolValue:
|
||||
return iter.ReadBool()
|
||||
case ArrayValue:
|
||||
arr := []interface{}{}
|
||||
iter.ReadArrayCB(func(iter *Iterator) bool {
|
||||
var elem interface{}
|
||||
iter.ReadVal(&elem)
|
||||
arr = append(arr, elem)
|
||||
return true
|
||||
})
|
||||
return arr
|
||||
case ObjectValue:
|
||||
obj := map[string]interface{}{}
|
||||
iter.ReadMapCB(func(Iter *Iterator, field string) bool {
|
||||
var elem interface{}
|
||||
iter.ReadVal(&elem)
|
||||
obj[field] = elem
|
||||
return true
|
||||
})
|
||||
return obj
|
||||
default:
|
||||
iter.ReportError("Read", fmt.Sprintf("unexpected value type: %v", valueType))
|
||||
return nil
|
||||
}
|
||||
}
|
58
vendor/github.com/json-iterator/go/feature_iter_array.go
generated
vendored
Normal file
58
vendor/github.com/json-iterator/go/feature_iter_array.go
generated
vendored
Normal file
|
@ -0,0 +1,58 @@
|
|||
package jsoniter
|
||||
|
||||
// ReadArray read array element, tells if the array has more element to read.
|
||||
func (iter *Iterator) ReadArray() (ret bool) {
|
||||
c := iter.nextToken()
|
||||
switch c {
|
||||
case 'n':
|
||||
iter.skipThreeBytes('u', 'l', 'l')
|
||||
return false // null
|
||||
case '[':
|
||||
c = iter.nextToken()
|
||||
if c != ']' {
|
||||
iter.unreadByte()
|
||||
return true
|
||||
}
|
||||
return false
|
||||
case ']':
|
||||
return false
|
||||
case ',':
|
||||
return true
|
||||
default:
|
||||
iter.ReportError("ReadArray", "expect [ or , or ] or n, but found: "+string([]byte{c}))
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// ReadArrayCB read array with callback
|
||||
func (iter *Iterator) ReadArrayCB(callback func(*Iterator) bool) (ret bool) {
|
||||
c := iter.nextToken()
|
||||
if c == '[' {
|
||||
c = iter.nextToken()
|
||||
if c != ']' {
|
||||
iter.unreadByte()
|
||||
if !callback(iter) {
|
||||
return false
|
||||
}
|
||||
c = iter.nextToken()
|
||||
for c == ',' {
|
||||
if !callback(iter) {
|
||||
return false
|
||||
}
|
||||
c = iter.nextToken()
|
||||
}
|
||||
if c != ']' {
|
||||
iter.ReportError("ReadArrayCB", "expect ] in the end")
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
return true
|
||||
}
|
||||
if c == 'n' {
|
||||
iter.skipThreeBytes('u', 'l', 'l')
|
||||
return true // null
|
||||
}
|
||||
iter.ReportError("ReadArrayCB", "expect [ or n, but found: "+string([]byte{c}))
|
||||
return false
|
||||
}
|
341
vendor/github.com/json-iterator/go/feature_iter_float.go
generated
vendored
Normal file
341
vendor/github.com/json-iterator/go/feature_iter_float.go
generated
vendored
Normal file
|
@ -0,0 +1,341 @@
|
|||
package jsoniter
|
||||
|
||||
import (
|
||||
"io"
|
||||
"math/big"
|
||||
"strconv"
|
||||
"strings"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
var floatDigits []int8
|
||||
|
||||
const invalidCharForNumber = int8(-1)
|
||||
const endOfNumber = int8(-2)
|
||||
const dotInNumber = int8(-3)
|
||||
|
||||
func init() {
|
||||
floatDigits = make([]int8, 256)
|
||||
for i := 0; i < len(floatDigits); i++ {
|
||||
floatDigits[i] = invalidCharForNumber
|
||||
}
|
||||
for i := int8('0'); i <= int8('9'); i++ {
|
||||
floatDigits[i] = i - int8('0')
|
||||
}
|
||||
floatDigits[','] = endOfNumber
|
||||
floatDigits[']'] = endOfNumber
|
||||
floatDigits['}'] = endOfNumber
|
||||
floatDigits[' '] = endOfNumber
|
||||
floatDigits['\t'] = endOfNumber
|
||||
floatDigits['\n'] = endOfNumber
|
||||
floatDigits['.'] = dotInNumber
|
||||
}
|
||||
|
||||
// ReadBigFloat read big.Float
|
||||
func (iter *Iterator) ReadBigFloat() (ret *big.Float) {
|
||||
str := iter.readNumberAsString()
|
||||
if iter.Error != nil && iter.Error != io.EOF {
|
||||
return nil
|
||||
}
|
||||
prec := 64
|
||||
if len(str) > prec {
|
||||
prec = len(str)
|
||||
}
|
||||
val, _, err := big.ParseFloat(str, 10, uint(prec), big.ToZero)
|
||||
if err != nil {
|
||||
iter.Error = err
|
||||
return nil
|
||||
}
|
||||
return val
|
||||
}
|
||||
|
||||
// ReadBigInt read big.Int
|
||||
func (iter *Iterator) ReadBigInt() (ret *big.Int) {
|
||||
str := iter.readNumberAsString()
|
||||
if iter.Error != nil && iter.Error != io.EOF {
|
||||
return nil
|
||||
}
|
||||
ret = big.NewInt(0)
|
||||
var success bool
|
||||
ret, success = ret.SetString(str, 10)
|
||||
if !success {
|
||||
iter.ReportError("ReadBigInt", "invalid big int")
|
||||
return nil
|
||||
}
|
||||
return ret
|
||||
}
|
||||
|
||||
//ReadFloat32 read float32
|
||||
func (iter *Iterator) ReadFloat32() (ret float32) {
|
||||
c := iter.nextToken()
|
||||
if c == '-' {
|
||||
return -iter.readPositiveFloat32()
|
||||
}
|
||||
iter.unreadByte()
|
||||
return iter.readPositiveFloat32()
|
||||
}
|
||||
|
||||
func (iter *Iterator) readPositiveFloat32() (ret float32) {
|
||||
value := uint64(0)
|
||||
c := byte(' ')
|
||||
i := iter.head
|
||||
// first char
|
||||
if i == iter.tail {
|
||||
return iter.readFloat32SlowPath()
|
||||
}
|
||||
c = iter.buf[i]
|
||||
i++
|
||||
ind := floatDigits[c]
|
||||
switch ind {
|
||||
case invalidCharForNumber:
|
||||
return iter.readFloat32SlowPath()
|
||||
case endOfNumber:
|
||||
iter.ReportError("readFloat32", "empty number")
|
||||
return
|
||||
case dotInNumber:
|
||||
iter.ReportError("readFloat32", "leading dot is invalid")
|
||||
return
|
||||
case 0:
|
||||
if i == iter.tail {
|
||||
return iter.readFloat32SlowPath()
|
||||
}
|
||||
c = iter.buf[i]
|
||||
switch c {
|
||||
case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9':
|
||||
iter.ReportError("readFloat32", "leading zero is invalid")
|
||||
return
|
||||
}
|
||||
}
|
||||
value = uint64(ind)
|
||||
// chars before dot
|
||||
non_decimal_loop:
|
||||
for ; i < iter.tail; i++ {
|
||||
c = iter.buf[i]
|
||||
ind := floatDigits[c]
|
||||
switch ind {
|
||||
case invalidCharForNumber:
|
||||
return iter.readFloat32SlowPath()
|
||||
case endOfNumber:
|
||||
iter.head = i
|
||||
return float32(value)
|
||||
case dotInNumber:
|
||||
break non_decimal_loop
|
||||
}
|
||||
if value > uint64SafeToMultiple10 {
|
||||
return iter.readFloat32SlowPath()
|
||||
}
|
||||
value = (value << 3) + (value << 1) + uint64(ind) // value = value * 10 + ind;
|
||||
}
|
||||
// chars after dot
|
||||
if c == '.' {
|
||||
i++
|
||||
decimalPlaces := 0
|
||||
if i == iter.tail {
|
||||
return iter.readFloat32SlowPath()
|
||||
}
|
||||
for ; i < iter.tail; i++ {
|
||||
c = iter.buf[i]
|
||||
ind := floatDigits[c]
|
||||
switch ind {
|
||||
case endOfNumber:
|
||||
if decimalPlaces > 0 && decimalPlaces < len(pow10) {
|
||||
iter.head = i
|
||||
return float32(float64(value) / float64(pow10[decimalPlaces]))
|
||||
}
|
||||
// too many decimal places
|
||||
return iter.readFloat32SlowPath()
|
||||
case invalidCharForNumber:
|
||||
fallthrough
|
||||
case dotInNumber:
|
||||
return iter.readFloat32SlowPath()
|
||||
}
|
||||
decimalPlaces++
|
||||
if value > uint64SafeToMultiple10 {
|
||||
return iter.readFloat32SlowPath()
|
||||
}
|
||||
value = (value << 3) + (value << 1) + uint64(ind)
|
||||
}
|
||||
}
|
||||
return iter.readFloat32SlowPath()
|
||||
}
|
||||
|
||||
func (iter *Iterator) readNumberAsString() (ret string) {
|
||||
strBuf := [16]byte{}
|
||||
str := strBuf[0:0]
|
||||
load_loop:
|
||||
for {
|
||||
for i := iter.head; i < iter.tail; i++ {
|
||||
c := iter.buf[i]
|
||||
switch c {
|
||||
case '+', '-', '.', 'e', 'E', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9':
|
||||
str = append(str, c)
|
||||
continue
|
||||
default:
|
||||
iter.head = i
|
||||
break load_loop
|
||||
}
|
||||
}
|
||||
if !iter.loadMore() {
|
||||
break
|
||||
}
|
||||
}
|
||||
if iter.Error != nil && iter.Error != io.EOF {
|
||||
return
|
||||
}
|
||||
if len(str) == 0 {
|
||||
iter.ReportError("readNumberAsString", "invalid number")
|
||||
}
|
||||
return *(*string)(unsafe.Pointer(&str))
|
||||
}
|
||||
|
||||
func (iter *Iterator) readFloat32SlowPath() (ret float32) {
|
||||
str := iter.readNumberAsString()
|
||||
if iter.Error != nil && iter.Error != io.EOF {
|
||||
return
|
||||
}
|
||||
errMsg := validateFloat(str)
|
||||
if errMsg != "" {
|
||||
iter.ReportError("readFloat32SlowPath", errMsg)
|
||||
return
|
||||
}
|
||||
val, err := strconv.ParseFloat(str, 32)
|
||||
if err != nil {
|
||||
iter.Error = err
|
||||
return
|
||||
}
|
||||
return float32(val)
|
||||
}
|
||||
|
||||
// ReadFloat64 read float64
|
||||
func (iter *Iterator) ReadFloat64() (ret float64) {
|
||||
c := iter.nextToken()
|
||||
if c == '-' {
|
||||
return -iter.readPositiveFloat64()
|
||||
}
|
||||
iter.unreadByte()
|
||||
return iter.readPositiveFloat64()
|
||||
}
|
||||
|
||||
func (iter *Iterator) readPositiveFloat64() (ret float64) {
|
||||
value := uint64(0)
|
||||
c := byte(' ')
|
||||
i := iter.head
|
||||
// first char
|
||||
if i == iter.tail {
|
||||
return iter.readFloat64SlowPath()
|
||||
}
|
||||
c = iter.buf[i]
|
||||
i++
|
||||
ind := floatDigits[c]
|
||||
switch ind {
|
||||
case invalidCharForNumber:
|
||||
return iter.readFloat64SlowPath()
|
||||
case endOfNumber:
|
||||
iter.ReportError("readFloat64", "empty number")
|
||||
return
|
||||
case dotInNumber:
|
||||
iter.ReportError("readFloat64", "leading dot is invalid")
|
||||
return
|
||||
case 0:
|
||||
if i == iter.tail {
|
||||
return iter.readFloat64SlowPath()
|
||||
}
|
||||
c = iter.buf[i]
|
||||
switch c {
|
||||
case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9':
|
||||
iter.ReportError("readFloat64", "leading zero is invalid")
|
||||
return
|
||||
}
|
||||
}
|
||||
value = uint64(ind)
|
||||
// chars before dot
|
||||
non_decimal_loop:
|
||||
for ; i < iter.tail; i++ {
|
||||
c = iter.buf[i]
|
||||
ind := floatDigits[c]
|
||||
switch ind {
|
||||
case invalidCharForNumber:
|
||||
return iter.readFloat64SlowPath()
|
||||
case endOfNumber:
|
||||
iter.head = i
|
||||
return float64(value)
|
||||
case dotInNumber:
|
||||
break non_decimal_loop
|
||||
}
|
||||
if value > uint64SafeToMultiple10 {
|
||||
return iter.readFloat64SlowPath()
|
||||
}
|
||||
value = (value << 3) + (value << 1) + uint64(ind) // value = value * 10 + ind;
|
||||
}
|
||||
// chars after dot
|
||||
if c == '.' {
|
||||
i++
|
||||
decimalPlaces := 0
|
||||
if i == iter.tail {
|
||||
return iter.readFloat64SlowPath()
|
||||
}
|
||||
for ; i < iter.tail; i++ {
|
||||
c = iter.buf[i]
|
||||
ind := floatDigits[c]
|
||||
switch ind {
|
||||
case endOfNumber:
|
||||
if decimalPlaces > 0 && decimalPlaces < len(pow10) {
|
||||
iter.head = i
|
||||
return float64(value) / float64(pow10[decimalPlaces])
|
||||
}
|
||||
// too many decimal places
|
||||
return iter.readFloat64SlowPath()
|
||||
case invalidCharForNumber:
|
||||
fallthrough
|
||||
case dotInNumber:
|
||||
return iter.readFloat64SlowPath()
|
||||
}
|
||||
decimalPlaces++
|
||||
if value > uint64SafeToMultiple10 {
|
||||
return iter.readFloat64SlowPath()
|
||||
}
|
||||
value = (value << 3) + (value << 1) + uint64(ind)
|
||||
}
|
||||
}
|
||||
return iter.readFloat64SlowPath()
|
||||
}
|
||||
|
||||
func (iter *Iterator) readFloat64SlowPath() (ret float64) {
|
||||
str := iter.readNumberAsString()
|
||||
if iter.Error != nil && iter.Error != io.EOF {
|
||||
return
|
||||
}
|
||||
errMsg := validateFloat(str)
|
||||
if errMsg != "" {
|
||||
iter.ReportError("readFloat64SlowPath", errMsg)
|
||||
return
|
||||
}
|
||||
val, err := strconv.ParseFloat(str, 64)
|
||||
if err != nil {
|
||||
iter.Error = err
|
||||
return
|
||||
}
|
||||
return val
|
||||
}
|
||||
|
||||
func validateFloat(str string) string {
|
||||
// strconv.ParseFloat is not validating `1.` or `1.e1`
|
||||
if len(str) == 0 {
|
||||
return "empty number"
|
||||
}
|
||||
if str[0] == '-' {
|
||||
return "-- is not valid"
|
||||
}
|
||||
dotPos := strings.IndexByte(str, '.')
|
||||
if dotPos != -1 {
|
||||
if dotPos == len(str)-1 {
|
||||
return "dot can not be last character"
|
||||
}
|
||||
switch str[dotPos+1] {
|
||||
case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9':
|
||||
default:
|
||||
return "missing digit after dot"
|
||||
}
|
||||
}
|
||||
return ""
|
||||
}
|
258
vendor/github.com/json-iterator/go/feature_iter_int.go
generated
vendored
Normal file
258
vendor/github.com/json-iterator/go/feature_iter_int.go
generated
vendored
Normal file
|
@ -0,0 +1,258 @@
|
|||
package jsoniter
|
||||
|
||||
import (
|
||||
"math"
|
||||
"strconv"
|
||||
)
|
||||
|
||||
var intDigits []int8
|
||||
|
||||
const uint32SafeToMultiply10 = uint32(0xffffffff)/10 - 1
|
||||
const uint64SafeToMultiple10 = uint64(0xffffffffffffffff)/10 - 1
|
||||
|
||||
func init() {
|
||||
intDigits = make([]int8, 256)
|
||||
for i := 0; i < len(intDigits); i++ {
|
||||
intDigits[i] = invalidCharForNumber
|
||||
}
|
||||
for i := int8('0'); i <= int8('9'); i++ {
|
||||
intDigits[i] = i - int8('0')
|
||||
}
|
||||
}
|
||||
|
||||
// ReadUint read uint
|
||||
func (iter *Iterator) ReadUint() uint {
|
||||
return uint(iter.ReadUint64())
|
||||
}
|
||||
|
||||
// ReadInt read int
|
||||
func (iter *Iterator) ReadInt() int {
|
||||
return int(iter.ReadInt64())
|
||||
}
|
||||
|
||||
// ReadInt8 read int8
|
||||
func (iter *Iterator) ReadInt8() (ret int8) {
|
||||
c := iter.nextToken()
|
||||
if c == '-' {
|
||||
val := iter.readUint32(iter.readByte())
|
||||
if val > math.MaxInt8+1 {
|
||||
iter.ReportError("ReadInt8", "overflow: "+strconv.FormatInt(int64(val), 10))
|
||||
return
|
||||
}
|
||||
return -int8(val)
|
||||
}
|
||||
val := iter.readUint32(c)
|
||||
if val > math.MaxInt8 {
|
||||
iter.ReportError("ReadInt8", "overflow: "+strconv.FormatInt(int64(val), 10))
|
||||
return
|
||||
}
|
||||
return int8(val)
|
||||
}
|
||||
|
||||
// ReadUint8 read uint8
|
||||
func (iter *Iterator) ReadUint8() (ret uint8) {
|
||||
val := iter.readUint32(iter.nextToken())
|
||||
if val > math.MaxUint8 {
|
||||
iter.ReportError("ReadUint8", "overflow: "+strconv.FormatInt(int64(val), 10))
|
||||
return
|
||||
}
|
||||
return uint8(val)
|
||||
}
|
||||
|
||||
// ReadInt16 read int16
|
||||
func (iter *Iterator) ReadInt16() (ret int16) {
|
||||
c := iter.nextToken()
|
||||
if c == '-' {
|
||||
val := iter.readUint32(iter.readByte())
|
||||
if val > math.MaxInt16+1 {
|
||||
iter.ReportError("ReadInt16", "overflow: "+strconv.FormatInt(int64(val), 10))
|
||||
return
|
||||
}
|
||||
return -int16(val)
|
||||
}
|
||||
val := iter.readUint32(c)
|
||||
if val > math.MaxInt16 {
|
||||
iter.ReportError("ReadInt16", "overflow: "+strconv.FormatInt(int64(val), 10))
|
||||
return
|
||||
}
|
||||
return int16(val)
|
||||
}
|
||||
|
||||
// ReadUint16 read uint16
|
||||
func (iter *Iterator) ReadUint16() (ret uint16) {
|
||||
val := iter.readUint32(iter.nextToken())
|
||||
if val > math.MaxUint16 {
|
||||
iter.ReportError("ReadUint16", "overflow: "+strconv.FormatInt(int64(val), 10))
|
||||
return
|
||||
}
|
||||
return uint16(val)
|
||||
}
|
||||
|
||||
// ReadInt32 read int32
|
||||
func (iter *Iterator) ReadInt32() (ret int32) {
|
||||
c := iter.nextToken()
|
||||
if c == '-' {
|
||||
val := iter.readUint32(iter.readByte())
|
||||
if val > math.MaxInt32+1 {
|
||||
iter.ReportError("ReadInt32", "overflow: "+strconv.FormatInt(int64(val), 10))
|
||||
return
|
||||
}
|
||||
return -int32(val)
|
||||
}
|
||||
val := iter.readUint32(c)
|
||||
if val > math.MaxInt32 {
|
||||
iter.ReportError("ReadInt32", "overflow: "+strconv.FormatInt(int64(val), 10))
|
||||
return
|
||||
}
|
||||
return int32(val)
|
||||
}
|
||||
|
||||
// ReadUint32 read uint32
|
||||
func (iter *Iterator) ReadUint32() (ret uint32) {
|
||||
return iter.readUint32(iter.nextToken())
|
||||
}
|
||||
|
||||
func (iter *Iterator) readUint32(c byte) (ret uint32) {
|
||||
ind := intDigits[c]
|
||||
if ind == 0 {
|
||||
return 0 // single zero
|
||||
}
|
||||
if ind == invalidCharForNumber {
|
||||
iter.ReportError("readUint32", "unexpected character: "+string([]byte{byte(ind)}))
|
||||
return
|
||||
}
|
||||
value := uint32(ind)
|
||||
if iter.tail-iter.head > 10 {
|
||||
i := iter.head
|
||||
ind2 := intDigits[iter.buf[i]]
|
||||
if ind2 == invalidCharForNumber {
|
||||
iter.head = i
|
||||
return value
|
||||
}
|
||||
i++
|
||||
ind3 := intDigits[iter.buf[i]]
|
||||
if ind3 == invalidCharForNumber {
|
||||
iter.head = i
|
||||
return value*10 + uint32(ind2)
|
||||
}
|
||||
//iter.head = i + 1
|
||||
//value = value * 100 + uint32(ind2) * 10 + uint32(ind3)
|
||||
i++
|
||||
ind4 := intDigits[iter.buf[i]]
|
||||
if ind4 == invalidCharForNumber {
|
||||
iter.head = i
|
||||
return value*100 + uint32(ind2)*10 + uint32(ind3)
|
||||
}
|
||||
i++
|
||||
ind5 := intDigits[iter.buf[i]]
|
||||
if ind5 == invalidCharForNumber {
|
||||
iter.head = i
|
||||
return value*1000 + uint32(ind2)*100 + uint32(ind3)*10 + uint32(ind4)
|
||||
}
|
||||
i++
|
||||
ind6 := intDigits[iter.buf[i]]
|
||||
if ind6 == invalidCharForNumber {
|
||||
iter.head = i
|
||||
return value*10000 + uint32(ind2)*1000 + uint32(ind3)*100 + uint32(ind4)*10 + uint32(ind5)
|
||||
}
|
||||
i++
|
||||
ind7 := intDigits[iter.buf[i]]
|
||||
if ind7 == invalidCharForNumber {
|
||||
iter.head = i
|
||||
return value*100000 + uint32(ind2)*10000 + uint32(ind3)*1000 + uint32(ind4)*100 + uint32(ind5)*10 + uint32(ind6)
|
||||
}
|
||||
i++
|
||||
ind8 := intDigits[iter.buf[i]]
|
||||
if ind8 == invalidCharForNumber {
|
||||
iter.head = i
|
||||
return value*1000000 + uint32(ind2)*100000 + uint32(ind3)*10000 + uint32(ind4)*1000 + uint32(ind5)*100 + uint32(ind6)*10 + uint32(ind7)
|
||||
}
|
||||
i++
|
||||
ind9 := intDigits[iter.buf[i]]
|
||||
value = value*10000000 + uint32(ind2)*1000000 + uint32(ind3)*100000 + uint32(ind4)*10000 + uint32(ind5)*1000 + uint32(ind6)*100 + uint32(ind7)*10 + uint32(ind8)
|
||||
iter.head = i
|
||||
if ind9 == invalidCharForNumber {
|
||||
return value
|
||||
}
|
||||
}
|
||||
for {
|
||||
for i := iter.head; i < iter.tail; i++ {
|
||||
ind = intDigits[iter.buf[i]]
|
||||
if ind == invalidCharForNumber {
|
||||
iter.head = i
|
||||
return value
|
||||
}
|
||||
if value > uint32SafeToMultiply10 {
|
||||
value2 := (value << 3) + (value << 1) + uint32(ind)
|
||||
if value2 < value {
|
||||
iter.ReportError("readUint32", "overflow")
|
||||
return
|
||||
}
|
||||
value = value2
|
||||
continue
|
||||
}
|
||||
value = (value << 3) + (value << 1) + uint32(ind)
|
||||
}
|
||||
if !iter.loadMore() {
|
||||
return value
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// ReadInt64 read int64
|
||||
func (iter *Iterator) ReadInt64() (ret int64) {
|
||||
c := iter.nextToken()
|
||||
if c == '-' {
|
||||
val := iter.readUint64(iter.readByte())
|
||||
if val > math.MaxInt64+1 {
|
||||
iter.ReportError("ReadInt64", "overflow: "+strconv.FormatUint(uint64(val), 10))
|
||||
return
|
||||
}
|
||||
return -int64(val)
|
||||
}
|
||||
val := iter.readUint64(c)
|
||||
if val > math.MaxInt64 {
|
||||
iter.ReportError("ReadInt64", "overflow: "+strconv.FormatUint(uint64(val), 10))
|
||||
return
|
||||
}
|
||||
return int64(val)
|
||||
}
|
||||
|
||||
// ReadUint64 read uint64
|
||||
func (iter *Iterator) ReadUint64() uint64 {
|
||||
return iter.readUint64(iter.nextToken())
|
||||
}
|
||||
|
||||
func (iter *Iterator) readUint64(c byte) (ret uint64) {
|
||||
ind := intDigits[c]
|
||||
if ind == 0 {
|
||||
return 0 // single zero
|
||||
}
|
||||
if ind == invalidCharForNumber {
|
||||
iter.ReportError("readUint64", "unexpected character: "+string([]byte{byte(ind)}))
|
||||
return
|
||||
}
|
||||
value := uint64(ind)
|
||||
for {
|
||||
for i := iter.head; i < iter.tail; i++ {
|
||||
ind = intDigits[iter.buf[i]]
|
||||
if ind == invalidCharForNumber {
|
||||
iter.head = i
|
||||
return value
|
||||
}
|
||||
if value > uint64SafeToMultiple10 {
|
||||
value2 := (value << 3) + (value << 1) + uint64(ind)
|
||||
if value2 < value {
|
||||
iter.ReportError("readUint64", "overflow")
|
||||
return
|
||||
}
|
||||
value = value2
|
||||
continue
|
||||
}
|
||||
value = (value << 3) + (value << 1) + uint64(ind)
|
||||
}
|
||||
if !iter.loadMore() {
|
||||
return value
|
||||
}
|
||||
}
|
||||
}
|
212
vendor/github.com/json-iterator/go/feature_iter_object.go
generated
vendored
Normal file
212
vendor/github.com/json-iterator/go/feature_iter_object.go
generated
vendored
Normal file
|
@ -0,0 +1,212 @@
|
|||
package jsoniter
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"unicode"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
// ReadObject read one field from object.
|
||||
// If object ended, returns empty string.
|
||||
// Otherwise, returns the field name.
|
||||
func (iter *Iterator) ReadObject() (ret string) {
|
||||
c := iter.nextToken()
|
||||
switch c {
|
||||
case 'n':
|
||||
iter.skipThreeBytes('u', 'l', 'l')
|
||||
return "" // null
|
||||
case '{':
|
||||
c = iter.nextToken()
|
||||
if c == '"' {
|
||||
iter.unreadByte()
|
||||
return string(iter.readObjectFieldAsBytes())
|
||||
}
|
||||
if c == '}' {
|
||||
return "" // end of object
|
||||
}
|
||||
iter.ReportError("ReadObject", `expect " after {`)
|
||||
return
|
||||
case ',':
|
||||
return string(iter.readObjectFieldAsBytes())
|
||||
case '}':
|
||||
return "" // end of object
|
||||
default:
|
||||
iter.ReportError("ReadObject", fmt.Sprintf(`expect { or , or } or n, but found %s`, string([]byte{c})))
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
func (iter *Iterator) readFieldHash() int32 {
|
||||
hash := int64(0x811c9dc5)
|
||||
c := iter.nextToken()
|
||||
if c == '"' {
|
||||
for {
|
||||
for i := iter.head; i < iter.tail; i++ {
|
||||
// require ascii string and no escape
|
||||
b := iter.buf[i]
|
||||
if 'A' <= b && b <= 'Z' {
|
||||
b += 'a' - 'A'
|
||||
}
|
||||
if b == '"' {
|
||||
iter.head = i + 1
|
||||
c = iter.nextToken()
|
||||
if c != ':' {
|
||||
iter.ReportError("readFieldHash", `expect :, but found `+string([]byte{c}))
|
||||
}
|
||||
return int32(hash)
|
||||
}
|
||||
hash ^= int64(b)
|
||||
hash *= 0x1000193
|
||||
}
|
||||
if !iter.loadMore() {
|
||||
iter.ReportError("readFieldHash", `incomplete field name`)
|
||||
return 0
|
||||
}
|
||||
}
|
||||
}
|
||||
iter.ReportError("readFieldHash", `expect ", but found `+string([]byte{c}))
|
||||
return 0
|
||||
}
|
||||
|
||||
func calcHash(str string) int32 {
|
||||
hash := int64(0x811c9dc5)
|
||||
for _, b := range str {
|
||||
hash ^= int64(unicode.ToLower(b))
|
||||
hash *= 0x1000193
|
||||
}
|
||||
return int32(hash)
|
||||
}
|
||||
|
||||
// ReadObjectCB read object with callback, the key is ascii only and field name not copied
|
||||
func (iter *Iterator) ReadObjectCB(callback func(*Iterator, string) bool) bool {
|
||||
c := iter.nextToken()
|
||||
if c == '{' {
|
||||
c = iter.nextToken()
|
||||
if c == '"' {
|
||||
iter.unreadByte()
|
||||
field := iter.readObjectFieldAsBytes()
|
||||
if !callback(iter, *(*string)(unsafe.Pointer(&field))) {
|
||||
return false
|
||||
}
|
||||
c = iter.nextToken()
|
||||
for c == ',' {
|
||||
field = iter.readObjectFieldAsBytes()
|
||||
if !callback(iter, *(*string)(unsafe.Pointer(&field))) {
|
||||
return false
|
||||
}
|
||||
c = iter.nextToken()
|
||||
}
|
||||
if c != '}' {
|
||||
iter.ReportError("ReadObjectCB", `object not ended with }`)
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
if c == '}' {
|
||||
return true
|
||||
}
|
||||
iter.ReportError("ReadObjectCB", `expect " after }`)
|
||||
return false
|
||||
}
|
||||
if c == 'n' {
|
||||
iter.skipThreeBytes('u', 'l', 'l')
|
||||
return true // null
|
||||
}
|
||||
iter.ReportError("ReadObjectCB", `expect { or n`)
|
||||
return false
|
||||
}
|
||||
|
||||
// ReadMapCB read map with callback, the key can be any string
|
||||
func (iter *Iterator) ReadMapCB(callback func(*Iterator, string) bool) bool {
|
||||
c := iter.nextToken()
|
||||
if c == '{' {
|
||||
c = iter.nextToken()
|
||||
if c == '"' {
|
||||
iter.unreadByte()
|
||||
field := iter.ReadString()
|
||||
if iter.nextToken() != ':' {
|
||||
iter.ReportError("ReadMapCB", "expect : after object field")
|
||||
return false
|
||||
}
|
||||
if !callback(iter, field) {
|
||||
return false
|
||||
}
|
||||
c = iter.nextToken()
|
||||
for c == ',' {
|
||||
field = iter.ReadString()
|
||||
if iter.nextToken() != ':' {
|
||||
iter.ReportError("ReadMapCB", "expect : after object field")
|
||||
return false
|
||||
}
|
||||
if !callback(iter, field) {
|
||||
return false
|
||||
}
|
||||
c = iter.nextToken()
|
||||
}
|
||||
if c != '}' {
|
||||
iter.ReportError("ReadMapCB", `object not ended with }`)
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
if c == '}' {
|
||||
return true
|
||||
}
|
||||
iter.ReportError("ReadMapCB", `expect " after }`)
|
||||
return false
|
||||
}
|
||||
if c == 'n' {
|
||||
iter.skipThreeBytes('u', 'l', 'l')
|
||||
return true // null
|
||||
}
|
||||
iter.ReportError("ReadMapCB", `expect { or n`)
|
||||
return false
|
||||
}
|
||||
|
||||
func (iter *Iterator) readObjectStart() bool {
|
||||
c := iter.nextToken()
|
||||
if c == '{' {
|
||||
c = iter.nextToken()
|
||||
if c == '}' {
|
||||
return false
|
||||
}
|
||||
iter.unreadByte()
|
||||
return true
|
||||
} else if c == 'n' {
|
||||
iter.skipThreeBytes('u', 'l', 'l')
|
||||
return false
|
||||
}
|
||||
iter.ReportError("readObjectStart", "expect { or n")
|
||||
return false
|
||||
}
|
||||
|
||||
func (iter *Iterator) readObjectFieldAsBytes() (ret []byte) {
|
||||
str := iter.ReadStringAsSlice()
|
||||
if iter.skipWhitespacesWithoutLoadMore() {
|
||||
if ret == nil {
|
||||
ret = make([]byte, len(str))
|
||||
copy(ret, str)
|
||||
}
|
||||
if !iter.loadMore() {
|
||||
return
|
||||
}
|
||||
}
|
||||
if iter.buf[iter.head] != ':' {
|
||||
iter.ReportError("readObjectFieldAsBytes", "expect : after object field")
|
||||
return
|
||||
}
|
||||
iter.head++
|
||||
if iter.skipWhitespacesWithoutLoadMore() {
|
||||
if ret == nil {
|
||||
ret = make([]byte, len(str))
|
||||
copy(ret, str)
|
||||
}
|
||||
if !iter.loadMore() {
|
||||
return
|
||||
}
|
||||
}
|
||||
if ret == nil {
|
||||
return str
|
||||
}
|
||||
return ret
|
||||
}
|
127
vendor/github.com/json-iterator/go/feature_iter_skip.go
generated
vendored
Normal file
127
vendor/github.com/json-iterator/go/feature_iter_skip.go
generated
vendored
Normal file
|
@ -0,0 +1,127 @@
|
|||
package jsoniter
|
||||
|
||||
import "fmt"
|
||||
|
||||
// ReadNil reads a json object as nil and
|
||||
// returns whether it's a nil or not
|
||||
func (iter *Iterator) ReadNil() (ret bool) {
|
||||
c := iter.nextToken()
|
||||
if c == 'n' {
|
||||
iter.skipThreeBytes('u', 'l', 'l') // null
|
||||
return true
|
||||
}
|
||||
iter.unreadByte()
|
||||
return false
|
||||
}
|
||||
|
||||
// ReadBool reads a json object as BoolValue
|
||||
func (iter *Iterator) ReadBool() (ret bool) {
|
||||
c := iter.nextToken()
|
||||
if c == 't' {
|
||||
iter.skipThreeBytes('r', 'u', 'e')
|
||||
return true
|
||||
}
|
||||
if c == 'f' {
|
||||
iter.skipFourBytes('a', 'l', 's', 'e')
|
||||
return false
|
||||
}
|
||||
iter.ReportError("ReadBool", "expect t or f")
|
||||
return
|
||||
}
|
||||
|
||||
// SkipAndReturnBytes skip next JSON element, and return its content as []byte.
|
||||
// The []byte can be kept, it is a copy of data.
|
||||
func (iter *Iterator) SkipAndReturnBytes() []byte {
|
||||
iter.startCapture(iter.head)
|
||||
iter.Skip()
|
||||
return iter.stopCapture()
|
||||
}
|
||||
|
||||
type captureBuffer struct {
|
||||
startedAt int
|
||||
captured []byte
|
||||
}
|
||||
|
||||
func (iter *Iterator) startCapture(captureStartedAt int) {
|
||||
if iter.captured != nil {
|
||||
panic("already in capture mode")
|
||||
}
|
||||
iter.captureStartedAt = captureStartedAt
|
||||
iter.captured = make([]byte, 0, 32)
|
||||
}
|
||||
|
||||
func (iter *Iterator) stopCapture() []byte {
|
||||
if iter.captured == nil {
|
||||
panic("not in capture mode")
|
||||
}
|
||||
captured := iter.captured
|
||||
remaining := iter.buf[iter.captureStartedAt:iter.head]
|
||||
iter.captureStartedAt = -1
|
||||
iter.captured = nil
|
||||
if len(captured) == 0 {
|
||||
return remaining
|
||||
}
|
||||
captured = append(captured, remaining...)
|
||||
return captured
|
||||
}
|
||||
|
||||
// Skip skips a json object and positions to relatively the next json object
|
||||
func (iter *Iterator) Skip() {
|
||||
c := iter.nextToken()
|
||||
switch c {
|
||||
case '"':
|
||||
iter.skipString()
|
||||
case 'n':
|
||||
iter.skipThreeBytes('u', 'l', 'l') // null
|
||||
case 't':
|
||||
iter.skipThreeBytes('r', 'u', 'e') // true
|
||||
case 'f':
|
||||
iter.skipFourBytes('a', 'l', 's', 'e') // false
|
||||
case '0':
|
||||
iter.unreadByte()
|
||||
iter.ReadFloat32()
|
||||
case '-', '1', '2', '3', '4', '5', '6', '7', '8', '9':
|
||||
iter.skipNumber()
|
||||
case '[':
|
||||
iter.skipArray()
|
||||
case '{':
|
||||
iter.skipObject()
|
||||
default:
|
||||
iter.ReportError("Skip", fmt.Sprintf("do not know how to skip: %v", c))
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
func (iter *Iterator) skipFourBytes(b1, b2, b3, b4 byte) {
|
||||
if iter.readByte() != b1 {
|
||||
iter.ReportError("skipFourBytes", fmt.Sprintf("expect %s", string([]byte{b1, b2, b3, b4})))
|
||||
return
|
||||
}
|
||||
if iter.readByte() != b2 {
|
||||
iter.ReportError("skipFourBytes", fmt.Sprintf("expect %s", string([]byte{b1, b2, b3, b4})))
|
||||
return
|
||||
}
|
||||
if iter.readByte() != b3 {
|
||||
iter.ReportError("skipFourBytes", fmt.Sprintf("expect %s", string([]byte{b1, b2, b3, b4})))
|
||||
return
|
||||
}
|
||||
if iter.readByte() != b4 {
|
||||
iter.ReportError("skipFourBytes", fmt.Sprintf("expect %s", string([]byte{b1, b2, b3, b4})))
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
func (iter *Iterator) skipThreeBytes(b1, b2, b3 byte) {
|
||||
if iter.readByte() != b1 {
|
||||
iter.ReportError("skipThreeBytes", fmt.Sprintf("expect %s", string([]byte{b1, b2, b3})))
|
||||
return
|
||||
}
|
||||
if iter.readByte() != b2 {
|
||||
iter.ReportError("skipThreeBytes", fmt.Sprintf("expect %s", string([]byte{b1, b2, b3})))
|
||||
return
|
||||
}
|
||||
if iter.readByte() != b3 {
|
||||
iter.ReportError("skipThreeBytes", fmt.Sprintf("expect %s", string([]byte{b1, b2, b3})))
|
||||
return
|
||||
}
|
||||
}
|
144
vendor/github.com/json-iterator/go/feature_iter_skip_sloppy.go
generated
vendored
Normal file
144
vendor/github.com/json-iterator/go/feature_iter_skip_sloppy.go
generated
vendored
Normal file
|
@ -0,0 +1,144 @@
|
|||
//+build jsoniter-sloppy
|
||||
|
||||
package jsoniter
|
||||
|
||||
// sloppy but faster implementation, do not validate the input json
|
||||
|
||||
func (iter *Iterator) skipNumber() {
|
||||
for {
|
||||
for i := iter.head; i < iter.tail; i++ {
|
||||
c := iter.buf[i]
|
||||
switch c {
|
||||
case ' ', '\n', '\r', '\t', ',', '}', ']':
|
||||
iter.head = i
|
||||
return
|
||||
}
|
||||
}
|
||||
if !iter.loadMore() {
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (iter *Iterator) skipArray() {
|
||||
level := 1
|
||||
for {
|
||||
for i := iter.head; i < iter.tail; i++ {
|
||||
switch iter.buf[i] {
|
||||
case '"': // If inside string, skip it
|
||||
iter.head = i + 1
|
||||
iter.skipString()
|
||||
i = iter.head - 1 // it will be i++ soon
|
||||
case '[': // If open symbol, increase level
|
||||
level++
|
||||
case ']': // If close symbol, increase level
|
||||
level--
|
||||
|
||||
// If we have returned to the original level, we're done
|
||||
if level == 0 {
|
||||
iter.head = i + 1
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
if !iter.loadMore() {
|
||||
iter.ReportError("skipObject", "incomplete array")
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (iter *Iterator) skipObject() {
|
||||
level := 1
|
||||
for {
|
||||
for i := iter.head; i < iter.tail; i++ {
|
||||
switch iter.buf[i] {
|
||||
case '"': // If inside string, skip it
|
||||
iter.head = i + 1
|
||||
iter.skipString()
|
||||
i = iter.head - 1 // it will be i++ soon
|
||||
case '{': // If open symbol, increase level
|
||||
level++
|
||||
case '}': // If close symbol, increase level
|
||||
level--
|
||||
|
||||
// If we have returned to the original level, we're done
|
||||
if level == 0 {
|
||||
iter.head = i + 1
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
if !iter.loadMore() {
|
||||
iter.ReportError("skipObject", "incomplete object")
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (iter *Iterator) skipString() {
|
||||
for {
|
||||
end, escaped := iter.findStringEnd()
|
||||
if end == -1 {
|
||||
if !iter.loadMore() {
|
||||
iter.ReportError("skipString", "incomplete string")
|
||||
return
|
||||
}
|
||||
if escaped {
|
||||
iter.head = 1 // skip the first char as last char read is \
|
||||
}
|
||||
} else {
|
||||
iter.head = end
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// adapted from: https://github.com/buger/jsonparser/blob/master/parser.go
|
||||
// Tries to find the end of string
|
||||
// Support if string contains escaped quote symbols.
|
||||
func (iter *Iterator) findStringEnd() (int, bool) {
|
||||
escaped := false
|
||||
for i := iter.head; i < iter.tail; i++ {
|
||||
c := iter.buf[i]
|
||||
if c == '"' {
|
||||
if !escaped {
|
||||
return i + 1, false
|
||||
}
|
||||
j := i - 1
|
||||
for {
|
||||
if j < iter.head || iter.buf[j] != '\\' {
|
||||
// even number of backslashes
|
||||
// either end of buffer, or " found
|
||||
return i + 1, true
|
||||
}
|
||||
j--
|
||||
if j < iter.head || iter.buf[j] != '\\' {
|
||||
// odd number of backslashes
|
||||
// it is \" or \\\"
|
||||
break
|
||||
}
|
||||
j--
|
||||
}
|
||||
} else if c == '\\' {
|
||||
escaped = true
|
||||
}
|
||||
}
|
||||
j := iter.tail - 1
|
||||
for {
|
||||
if j < iter.head || iter.buf[j] != '\\' {
|
||||
// even number of backslashes
|
||||
// either end of buffer, or " found
|
||||
return -1, false // do not end with \
|
||||
}
|
||||
j--
|
||||
if j < iter.head || iter.buf[j] != '\\' {
|
||||
// odd number of backslashes
|
||||
// it is \" or \\\"
|
||||
break
|
||||
}
|
||||
j--
|
||||
|
||||
}
|
||||
return -1, true // end with \
|
||||
}
|
89
vendor/github.com/json-iterator/go/feature_iter_skip_strict.go
generated
vendored
Normal file
89
vendor/github.com/json-iterator/go/feature_iter_skip_strict.go
generated
vendored
Normal file
|
@ -0,0 +1,89 @@
|
|||
//+build !jsoniter-sloppy
|
||||
|
||||
package jsoniter
|
||||
|
||||
import "fmt"
|
||||
|
||||
func (iter *Iterator) skipNumber() {
|
||||
if !iter.trySkipNumber() {
|
||||
iter.unreadByte()
|
||||
iter.ReadFloat32()
|
||||
}
|
||||
}
|
||||
|
||||
func (iter *Iterator) trySkipNumber() bool {
|
||||
dotFound := false
|
||||
for i := iter.head; i < iter.tail; i++ {
|
||||
c := iter.buf[i]
|
||||
switch c {
|
||||
case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9':
|
||||
case '.':
|
||||
if dotFound {
|
||||
iter.ReportError("validateNumber", `more than one dot found in number`)
|
||||
return true // already failed
|
||||
}
|
||||
if i+1 == iter.tail {
|
||||
return false
|
||||
}
|
||||
c = iter.buf[i+1]
|
||||
switch c {
|
||||
case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9':
|
||||
default:
|
||||
iter.ReportError("validateNumber", `missing digit after dot`)
|
||||
return true // already failed
|
||||
}
|
||||
dotFound = true
|
||||
default:
|
||||
switch c {
|
||||
case ',', ']', '}', ' ', '\t', '\n', '\r':
|
||||
if iter.head == i {
|
||||
return false // if - without following digits
|
||||
}
|
||||
iter.head = i
|
||||
return true // must be valid
|
||||
}
|
||||
return false // may be invalid
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (iter *Iterator) skipString() {
|
||||
if !iter.trySkipString() {
|
||||
iter.unreadByte()
|
||||
iter.ReadString()
|
||||
}
|
||||
}
|
||||
|
||||
func (iter *Iterator) trySkipString() bool {
|
||||
for i := iter.head; i < iter.tail; i++ {
|
||||
c := iter.buf[i]
|
||||
if c == '"' {
|
||||
iter.head = i + 1
|
||||
return true // valid
|
||||
} else if c == '\\' {
|
||||
return false
|
||||
} else if c < ' ' {
|
||||
iter.ReportError("ReadString",
|
||||
fmt.Sprintf(`invalid control character found: %d`, c))
|
||||
return true // already failed
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (iter *Iterator) skipObject() {
|
||||
iter.unreadByte()
|
||||
iter.ReadObjectCB(func(iter *Iterator, field string) bool {
|
||||
iter.Skip()
|
||||
return true
|
||||
})
|
||||
}
|
||||
|
||||
func (iter *Iterator) skipArray() {
|
||||
iter.unreadByte()
|
||||
iter.ReadArrayCB(func(iter *Iterator) bool {
|
||||
iter.Skip()
|
||||
return true
|
||||
})
|
||||
}
|
215
vendor/github.com/json-iterator/go/feature_iter_string.go
generated
vendored
Normal file
215
vendor/github.com/json-iterator/go/feature_iter_string.go
generated
vendored
Normal file
|
@ -0,0 +1,215 @@
|
|||
package jsoniter
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"unicode/utf16"
|
||||
)
|
||||
|
||||
// ReadString read string from iterator
|
||||
func (iter *Iterator) ReadString() (ret string) {
|
||||
c := iter.nextToken()
|
||||
if c == '"' {
|
||||
for i := iter.head; i < iter.tail; i++ {
|
||||
c := iter.buf[i]
|
||||
if c == '"' {
|
||||
ret = string(iter.buf[iter.head:i])
|
||||
iter.head = i + 1
|
||||
return ret
|
||||
} else if c == '\\' {
|
||||
break
|
||||
} else if c < ' ' {
|
||||
iter.ReportError("ReadString",
|
||||
fmt.Sprintf(`invalid control character found: %d`, c))
|
||||
return
|
||||
}
|
||||
}
|
||||
return iter.readStringSlowPath()
|
||||
} else if c == 'n' {
|
||||
iter.skipThreeBytes('u', 'l', 'l')
|
||||
return ""
|
||||
}
|
||||
iter.ReportError("ReadString", `expects " or n`)
|
||||
return
|
||||
}
|
||||
|
||||
func (iter *Iterator) readStringSlowPath() (ret string) {
|
||||
var str []byte
|
||||
var c byte
|
||||
for iter.Error == nil {
|
||||
c = iter.readByte()
|
||||
if c == '"' {
|
||||
return string(str)
|
||||
}
|
||||
if c == '\\' {
|
||||
c = iter.readByte()
|
||||
str = iter.readEscapedChar(c, str)
|
||||
} else {
|
||||
str = append(str, c)
|
||||
}
|
||||
}
|
||||
iter.ReportError("ReadString", "unexpected end of input")
|
||||
return
|
||||
}
|
||||
|
||||
func (iter *Iterator) readEscapedChar(c byte, str []byte) []byte {
|
||||
switch c {
|
||||
case 'u':
|
||||
r := iter.readU4()
|
||||
if utf16.IsSurrogate(r) {
|
||||
c = iter.readByte()
|
||||
if iter.Error != nil {
|
||||
return nil
|
||||
}
|
||||
if c != '\\' {
|
||||
iter.unreadByte()
|
||||
str = appendRune(str, r)
|
||||
return str
|
||||
}
|
||||
c = iter.readByte()
|
||||
if iter.Error != nil {
|
||||
return nil
|
||||
}
|
||||
if c != 'u' {
|
||||
str = appendRune(str, r)
|
||||
return iter.readEscapedChar(c, str)
|
||||
}
|
||||
r2 := iter.readU4()
|
||||
if iter.Error != nil {
|
||||
return nil
|
||||
}
|
||||
combined := utf16.DecodeRune(r, r2)
|
||||
if combined == '\uFFFD' {
|
||||
str = appendRune(str, r)
|
||||
str = appendRune(str, r2)
|
||||
} else {
|
||||
str = appendRune(str, combined)
|
||||
}
|
||||
} else {
|
||||
str = appendRune(str, r)
|
||||
}
|
||||
case '"':
|
||||
str = append(str, '"')
|
||||
case '\\':
|
||||
str = append(str, '\\')
|
||||
case '/':
|
||||
str = append(str, '/')
|
||||
case 'b':
|
||||
str = append(str, '\b')
|
||||
case 'f':
|
||||
str = append(str, '\f')
|
||||
case 'n':
|
||||
str = append(str, '\n')
|
||||
case 'r':
|
||||
str = append(str, '\r')
|
||||
case 't':
|
||||
str = append(str, '\t')
|
||||
default:
|
||||
iter.ReportError("ReadString",
|
||||
`invalid escape char after \`)
|
||||
return nil
|
||||
}
|
||||
return str
|
||||
}
|
||||
|
||||
// ReadStringAsSlice read string from iterator without copying into string form.
|
||||
// The []byte can not be kept, as it will change after next iterator call.
|
||||
func (iter *Iterator) ReadStringAsSlice() (ret []byte) {
|
||||
c := iter.nextToken()
|
||||
if c == '"' {
|
||||
for i := iter.head; i < iter.tail; i++ {
|
||||
// require ascii string and no escape
|
||||
// for: field name, base64, number
|
||||
if iter.buf[i] == '"' {
|
||||
// fast path: reuse the underlying buffer
|
||||
ret = iter.buf[iter.head:i]
|
||||
iter.head = i + 1
|
||||
return ret
|
||||
}
|
||||
}
|
||||
readLen := iter.tail - iter.head
|
||||
copied := make([]byte, readLen, readLen*2)
|
||||
copy(copied, iter.buf[iter.head:iter.tail])
|
||||
iter.head = iter.tail
|
||||
for iter.Error == nil {
|
||||
c := iter.readByte()
|
||||
if c == '"' {
|
||||
return copied
|
||||
}
|
||||
copied = append(copied, c)
|
||||
}
|
||||
return copied
|
||||
}
|
||||
iter.ReportError("ReadString", `expects " or n`)
|
||||
return
|
||||
}
|
||||
|
||||
func (iter *Iterator) readU4() (ret rune) {
|
||||
for i := 0; i < 4; i++ {
|
||||
c := iter.readByte()
|
||||
if iter.Error != nil {
|
||||
return
|
||||
}
|
||||
if c >= '0' && c <= '9' {
|
||||
ret = ret*16 + rune(c-'0')
|
||||
} else if c >= 'a' && c <= 'f' {
|
||||
ret = ret*16 + rune(c-'a'+10)
|
||||
} else if c >= 'A' && c <= 'F' {
|
||||
ret = ret*16 + rune(c-'A'+10)
|
||||
} else {
|
||||
iter.ReportError("readU4", "expects 0~9 or a~f")
|
||||
return
|
||||
}
|
||||
}
|
||||
return ret
|
||||
}
|
||||
|
||||
const (
|
||||
t1 = 0x00 // 0000 0000
|
||||
tx = 0x80 // 1000 0000
|
||||
t2 = 0xC0 // 1100 0000
|
||||
t3 = 0xE0 // 1110 0000
|
||||
t4 = 0xF0 // 1111 0000
|
||||
t5 = 0xF8 // 1111 1000
|
||||
|
||||
maskx = 0x3F // 0011 1111
|
||||
mask2 = 0x1F // 0001 1111
|
||||
mask3 = 0x0F // 0000 1111
|
||||
mask4 = 0x07 // 0000 0111
|
||||
|
||||
rune1Max = 1<<7 - 1
|
||||
rune2Max = 1<<11 - 1
|
||||
rune3Max = 1<<16 - 1
|
||||
|
||||
surrogateMin = 0xD800
|
||||
surrogateMax = 0xDFFF
|
||||
|
||||
maxRune = '\U0010FFFF' // Maximum valid Unicode code point.
|
||||
runeError = '\uFFFD' // the "error" Rune or "Unicode replacement character"
|
||||
)
|
||||
|
||||
func appendRune(p []byte, r rune) []byte {
|
||||
// Negative values are erroneous. Making it unsigned addresses the problem.
|
||||
switch i := uint32(r); {
|
||||
case i <= rune1Max:
|
||||
p = append(p, byte(r))
|
||||
return p
|
||||
case i <= rune2Max:
|
||||
p = append(p, t2|byte(r>>6))
|
||||
p = append(p, tx|byte(r)&maskx)
|
||||
return p
|
||||
case i > maxRune, surrogateMin <= i && i <= surrogateMax:
|
||||
r = runeError
|
||||
fallthrough
|
||||
case i <= rune3Max:
|
||||
p = append(p, t3|byte(r>>12))
|
||||
p = append(p, tx|byte(r>>6)&maskx)
|
||||
p = append(p, tx|byte(r)&maskx)
|
||||
return p
|
||||
default:
|
||||
p = append(p, t4|byte(r>>18))
|
||||
p = append(p, tx|byte(r>>12)&maskx)
|
||||
p = append(p, tx|byte(r>>6)&maskx)
|
||||
p = append(p, tx|byte(r)&maskx)
|
||||
return p
|
||||
}
|
||||
}
|
15
vendor/github.com/json-iterator/go/feature_json_number.go
generated
vendored
Normal file
15
vendor/github.com/json-iterator/go/feature_json_number.go
generated
vendored
Normal file
|
@ -0,0 +1,15 @@
|
|||
package jsoniter
|
||||
|
||||
import "encoding/json"
|
||||
|
||||
type Number string
|
||||
|
||||
func CastJsonNumber(val interface{}) (string, bool) {
|
||||
switch typedVal := val.(type) {
|
||||
case json.Number:
|
||||
return string(typedVal), true
|
||||
case Number:
|
||||
return string(typedVal), true
|
||||
}
|
||||
return "", false
|
||||
}
|
57
vendor/github.com/json-iterator/go/feature_pool.go
generated
vendored
Normal file
57
vendor/github.com/json-iterator/go/feature_pool.go
generated
vendored
Normal file
|
@ -0,0 +1,57 @@
|
|||
package jsoniter
|
||||
|
||||
import (
|
||||
"io"
|
||||
)
|
||||
|
||||
// IteratorPool a thread safe pool of iterators with same configuration
|
||||
type IteratorPool interface {
|
||||
BorrowIterator(data []byte) *Iterator
|
||||
ReturnIterator(iter *Iterator)
|
||||
}
|
||||
|
||||
// StreamPool a thread safe pool of streams with same configuration
|
||||
type StreamPool interface {
|
||||
BorrowStream(writer io.Writer) *Stream
|
||||
ReturnStream(stream *Stream)
|
||||
}
|
||||
|
||||
func (cfg *frozenConfig) BorrowStream(writer io.Writer) *Stream {
|
||||
select {
|
||||
case stream := <-cfg.streamPool:
|
||||
stream.Reset(writer)
|
||||
return stream
|
||||
default:
|
||||
return NewStream(cfg, writer, 512)
|
||||
}
|
||||
}
|
||||
|
||||
func (cfg *frozenConfig) ReturnStream(stream *Stream) {
|
||||
stream.Error = nil
|
||||
select {
|
||||
case cfg.streamPool <- stream:
|
||||
return
|
||||
default:
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
func (cfg *frozenConfig) BorrowIterator(data []byte) *Iterator {
|
||||
select {
|
||||
case iter := <-cfg.iteratorPool:
|
||||
iter.ResetBytes(data)
|
||||
return iter
|
||||
default:
|
||||
return ParseBytes(cfg, data)
|
||||
}
|
||||
}
|
||||
|
||||
func (cfg *frozenConfig) ReturnIterator(iter *Iterator) {
|
||||
iter.Error = nil
|
||||
select {
|
||||
case cfg.iteratorPool <- iter:
|
||||
return
|
||||
default:
|
||||
return
|
||||
}
|
||||
}
|
691
vendor/github.com/json-iterator/go/feature_reflect.go
generated
vendored
Normal file
691
vendor/github.com/json-iterator/go/feature_reflect.go
generated
vendored
Normal file
|
@ -0,0 +1,691 @@
|
|||
package jsoniter
|
||||
|
||||
import (
|
||||
"encoding"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"reflect"
|
||||
"time"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
// ValDecoder is an internal type registered to cache as needed.
|
||||
// Don't confuse jsoniter.ValDecoder with json.Decoder.
|
||||
// For json.Decoder's adapter, refer to jsoniter.AdapterDecoder(todo link).
|
||||
//
|
||||
// Reflection on type to create decoders, which is then cached
|
||||
// Reflection on value is avoided as we can, as the reflect.Value itself will allocate, with following exceptions
|
||||
// 1. create instance of new value, for example *int will need a int to be allocated
|
||||
// 2. append to slice, if the existing cap is not enough, allocate will be done using Reflect.New
|
||||
// 3. assignment to map, both key and value will be reflect.Value
|
||||
// For a simple struct binding, it will be reflect.Value free and allocation free
|
||||
type ValDecoder interface {
|
||||
Decode(ptr unsafe.Pointer, iter *Iterator)
|
||||
}
|
||||
|
||||
// ValEncoder is an internal type registered to cache as needed.
|
||||
// Don't confuse jsoniter.ValEncoder with json.Encoder.
|
||||
// For json.Encoder's adapter, refer to jsoniter.AdapterEncoder(todo godoc link).
|
||||
type ValEncoder interface {
|
||||
IsEmpty(ptr unsafe.Pointer) bool
|
||||
Encode(ptr unsafe.Pointer, stream *Stream)
|
||||
EncodeInterface(val interface{}, stream *Stream)
|
||||
}
|
||||
|
||||
type checkIsEmpty interface {
|
||||
IsEmpty(ptr unsafe.Pointer) bool
|
||||
}
|
||||
|
||||
// WriteToStream the default implementation for TypeEncoder method EncodeInterface
|
||||
func WriteToStream(val interface{}, stream *Stream, encoder ValEncoder) {
|
||||
e := (*emptyInterface)(unsafe.Pointer(&val))
|
||||
if e.word == nil {
|
||||
stream.WriteNil()
|
||||
return
|
||||
}
|
||||
if reflect.TypeOf(val).Kind() == reflect.Ptr {
|
||||
encoder.Encode(unsafe.Pointer(&e.word), stream)
|
||||
} else {
|
||||
encoder.Encode(e.word, stream)
|
||||
}
|
||||
}
|
||||
|
||||
var jsonNumberType reflect.Type
|
||||
var jsoniterNumberType reflect.Type
|
||||
var jsonRawMessageType reflect.Type
|
||||
var jsoniterRawMessageType reflect.Type
|
||||
var anyType reflect.Type
|
||||
var marshalerType reflect.Type
|
||||
var unmarshalerType reflect.Type
|
||||
var textMarshalerType reflect.Type
|
||||
var textUnmarshalerType reflect.Type
|
||||
|
||||
func init() {
|
||||
jsonNumberType = reflect.TypeOf((*json.Number)(nil)).Elem()
|
||||
jsoniterNumberType = reflect.TypeOf((*Number)(nil)).Elem()
|
||||
jsonRawMessageType = reflect.TypeOf((*json.RawMessage)(nil)).Elem()
|
||||
jsoniterRawMessageType = reflect.TypeOf((*RawMessage)(nil)).Elem()
|
||||
anyType = reflect.TypeOf((*Any)(nil)).Elem()
|
||||
marshalerType = reflect.TypeOf((*json.Marshaler)(nil)).Elem()
|
||||
unmarshalerType = reflect.TypeOf((*json.Unmarshaler)(nil)).Elem()
|
||||
textMarshalerType = reflect.TypeOf((*encoding.TextMarshaler)(nil)).Elem()
|
||||
textUnmarshalerType = reflect.TypeOf((*encoding.TextUnmarshaler)(nil)).Elem()
|
||||
}
|
||||
|
||||
type optionalDecoder struct {
|
||||
valueType reflect.Type
|
||||
valueDecoder ValDecoder
|
||||
}
|
||||
|
||||
func (decoder *optionalDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) {
|
||||
if iter.ReadNil() {
|
||||
*((*unsafe.Pointer)(ptr)) = nil
|
||||
} else {
|
||||
if *((*unsafe.Pointer)(ptr)) == nil {
|
||||
//pointer to null, we have to allocate memory to hold the value
|
||||
value := reflect.New(decoder.valueType)
|
||||
newPtr := extractInterface(value.Interface()).word
|
||||
decoder.valueDecoder.Decode(newPtr, iter)
|
||||
*((*uintptr)(ptr)) = uintptr(newPtr)
|
||||
} else {
|
||||
//reuse existing instance
|
||||
decoder.valueDecoder.Decode(*((*unsafe.Pointer)(ptr)), iter)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
type deferenceDecoder struct {
|
||||
// only to deference a pointer
|
||||
valueType reflect.Type
|
||||
valueDecoder ValDecoder
|
||||
}
|
||||
|
||||
func (decoder *deferenceDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) {
|
||||
if *((*unsafe.Pointer)(ptr)) == nil {
|
||||
//pointer to null, we have to allocate memory to hold the value
|
||||
value := reflect.New(decoder.valueType)
|
||||
newPtr := extractInterface(value.Interface()).word
|
||||
decoder.valueDecoder.Decode(newPtr, iter)
|
||||
*((*uintptr)(ptr)) = uintptr(newPtr)
|
||||
} else {
|
||||
//reuse existing instance
|
||||
decoder.valueDecoder.Decode(*((*unsafe.Pointer)(ptr)), iter)
|
||||
}
|
||||
}
|
||||
|
||||
type optionalEncoder struct {
|
||||
valueEncoder ValEncoder
|
||||
}
|
||||
|
||||
func (encoder *optionalEncoder) Encode(ptr unsafe.Pointer, stream *Stream) {
|
||||
if *((*unsafe.Pointer)(ptr)) == nil {
|
||||
stream.WriteNil()
|
||||
} else {
|
||||
encoder.valueEncoder.Encode(*((*unsafe.Pointer)(ptr)), stream)
|
||||
}
|
||||
}
|
||||
|
||||
func (encoder *optionalEncoder) EncodeInterface(val interface{}, stream *Stream) {
|
||||
WriteToStream(val, stream, encoder)
|
||||
}
|
||||
|
||||
func (encoder *optionalEncoder) IsEmpty(ptr unsafe.Pointer) bool {
|
||||
if *((*unsafe.Pointer)(ptr)) == nil {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
type placeholderEncoder struct {
|
||||
cfg *frozenConfig
|
||||
cacheKey reflect.Type
|
||||
}
|
||||
|
||||
func (encoder *placeholderEncoder) Encode(ptr unsafe.Pointer, stream *Stream) {
|
||||
encoder.getRealEncoder().Encode(ptr, stream)
|
||||
}
|
||||
|
||||
func (encoder *placeholderEncoder) EncodeInterface(val interface{}, stream *Stream) {
|
||||
WriteToStream(val, stream, encoder)
|
||||
}
|
||||
|
||||
func (encoder *placeholderEncoder) IsEmpty(ptr unsafe.Pointer) bool {
|
||||
return encoder.getRealEncoder().IsEmpty(ptr)
|
||||
}
|
||||
|
||||
func (encoder *placeholderEncoder) getRealEncoder() ValEncoder {
|
||||
for i := 0; i < 30; i++ {
|
||||
realDecoder := encoder.cfg.getEncoderFromCache(encoder.cacheKey)
|
||||
_, isPlaceholder := realDecoder.(*placeholderEncoder)
|
||||
if isPlaceholder {
|
||||
time.Sleep(time.Second)
|
||||
} else {
|
||||
return realDecoder
|
||||
}
|
||||
}
|
||||
panic(fmt.Sprintf("real encoder not found for cache key: %v", encoder.cacheKey))
|
||||
}
|
||||
|
||||
type placeholderDecoder struct {
|
||||
cfg *frozenConfig
|
||||
cacheKey reflect.Type
|
||||
}
|
||||
|
||||
func (decoder *placeholderDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) {
|
||||
for i := 0; i < 30; i++ {
|
||||
realDecoder := decoder.cfg.getDecoderFromCache(decoder.cacheKey)
|
||||
_, isPlaceholder := realDecoder.(*placeholderDecoder)
|
||||
if isPlaceholder {
|
||||
time.Sleep(time.Second)
|
||||
} else {
|
||||
realDecoder.Decode(ptr, iter)
|
||||
return
|
||||
}
|
||||
}
|
||||
panic(fmt.Sprintf("real decoder not found for cache key: %v", decoder.cacheKey))
|
||||
}
|
||||
|
||||
// emptyInterface is the header for an interface{} value.
|
||||
type emptyInterface struct {
|
||||
typ unsafe.Pointer
|
||||
word unsafe.Pointer
|
||||
}
|
||||
|
||||
// emptyInterface is the header for an interface with method (not interface{})
|
||||
type nonEmptyInterface struct {
|
||||
// see ../runtime/iface.go:/Itab
|
||||
itab *struct {
|
||||
ityp unsafe.Pointer // static interface type
|
||||
typ unsafe.Pointer // dynamic concrete type
|
||||
link unsafe.Pointer
|
||||
bad int32
|
||||
unused int32
|
||||
fun [100000]unsafe.Pointer // method table
|
||||
}
|
||||
word unsafe.Pointer
|
||||
}
|
||||
|
||||
// ReadVal copy the underlying JSON into go interface, same as json.Unmarshal
|
||||
func (iter *Iterator) ReadVal(obj interface{}) {
|
||||
typ := reflect.TypeOf(obj)
|
||||
cacheKey := typ.Elem()
|
||||
decoder, err := decoderOfType(iter.cfg, cacheKey)
|
||||
if err != nil {
|
||||
iter.Error = err
|
||||
return
|
||||
}
|
||||
e := (*emptyInterface)(unsafe.Pointer(&obj))
|
||||
decoder.Decode(e.word, iter)
|
||||
}
|
||||
|
||||
// WriteVal copy the go interface into underlying JSON, same as json.Marshal
|
||||
func (stream *Stream) WriteVal(val interface{}) {
|
||||
if nil == val {
|
||||
stream.WriteNil()
|
||||
return
|
||||
}
|
||||
typ := reflect.TypeOf(val)
|
||||
cacheKey := typ
|
||||
encoder, err := encoderOfType(stream.cfg, cacheKey)
|
||||
if err != nil {
|
||||
stream.Error = err
|
||||
return
|
||||
}
|
||||
encoder.EncodeInterface(val, stream)
|
||||
}
|
||||
|
||||
type prefix string
|
||||
|
||||
func (p prefix) addToDecoder(decoder ValDecoder, err error) (ValDecoder, error) {
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("%s: %s", p, err.Error())
|
||||
}
|
||||
return decoder, err
|
||||
}
|
||||
|
||||
func (p prefix) addToEncoder(encoder ValEncoder, err error) (ValEncoder, error) {
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("%s: %s", p, err.Error())
|
||||
}
|
||||
return encoder, err
|
||||
}
|
||||
|
||||
func decoderOfType(cfg *frozenConfig, typ reflect.Type) (ValDecoder, error) {
|
||||
cacheKey := typ
|
||||
decoder := cfg.getDecoderFromCache(cacheKey)
|
||||
if decoder != nil {
|
||||
return decoder, nil
|
||||
}
|
||||
decoder = getTypeDecoderFromExtension(typ)
|
||||
if decoder != nil {
|
||||
cfg.addDecoderToCache(cacheKey, decoder)
|
||||
return decoder, nil
|
||||
}
|
||||
decoder = &placeholderDecoder{cfg: cfg, cacheKey: cacheKey}
|
||||
cfg.addDecoderToCache(cacheKey, decoder)
|
||||
decoder, err := createDecoderOfType(cfg, typ)
|
||||
for _, extension := range extensions {
|
||||
decoder = extension.DecorateDecoder(typ, decoder)
|
||||
}
|
||||
cfg.addDecoderToCache(cacheKey, decoder)
|
||||
return decoder, err
|
||||
}
|
||||
|
||||
func createDecoderOfType(cfg *frozenConfig, typ reflect.Type) (ValDecoder, error) {
|
||||
typeName := typ.String()
|
||||
if typ == jsonRawMessageType {
|
||||
return &jsonRawMessageCodec{}, nil
|
||||
}
|
||||
if typ == jsoniterRawMessageType {
|
||||
return &jsoniterRawMessageCodec{}, nil
|
||||
}
|
||||
if typ.AssignableTo(jsonNumberType) {
|
||||
return &jsonNumberCodec{}, nil
|
||||
}
|
||||
if typ.AssignableTo(jsoniterNumberType) {
|
||||
return &jsoniterNumberCodec{}, nil
|
||||
}
|
||||
if typ.Implements(unmarshalerType) {
|
||||
templateInterface := reflect.New(typ).Elem().Interface()
|
||||
var decoder ValDecoder = &unmarshalerDecoder{extractInterface(templateInterface)}
|
||||
if typ.Kind() == reflect.Ptr {
|
||||
decoder = &optionalDecoder{typ.Elem(), decoder}
|
||||
}
|
||||
return decoder, nil
|
||||
}
|
||||
if reflect.PtrTo(typ).Implements(unmarshalerType) {
|
||||
templateInterface := reflect.New(typ).Interface()
|
||||
var decoder ValDecoder = &unmarshalerDecoder{extractInterface(templateInterface)}
|
||||
return decoder, nil
|
||||
}
|
||||
if typ.Implements(textUnmarshalerType) {
|
||||
templateInterface := reflect.New(typ).Elem().Interface()
|
||||
var decoder ValDecoder = &textUnmarshalerDecoder{extractInterface(templateInterface)}
|
||||
if typ.Kind() == reflect.Ptr {
|
||||
decoder = &optionalDecoder{typ.Elem(), decoder}
|
||||
}
|
||||
return decoder, nil
|
||||
}
|
||||
if reflect.PtrTo(typ).Implements(textUnmarshalerType) {
|
||||
templateInterface := reflect.New(typ).Interface()
|
||||
var decoder ValDecoder = &textUnmarshalerDecoder{extractInterface(templateInterface)}
|
||||
return decoder, nil
|
||||
}
|
||||
if typ.Kind() == reflect.Slice && typ.Elem().Kind() == reflect.Uint8 {
|
||||
sliceDecoder, err := prefix("[slice]").addToDecoder(decoderOfSlice(cfg, typ))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &base64Codec{sliceDecoder: sliceDecoder}, nil
|
||||
}
|
||||
if typ.Implements(anyType) {
|
||||
return &anyCodec{}, nil
|
||||
}
|
||||
switch typ.Kind() {
|
||||
case reflect.String:
|
||||
if typeName != "string" {
|
||||
return decoderOfType(cfg, reflect.TypeOf((*string)(nil)).Elem())
|
||||
}
|
||||
return &stringCodec{}, nil
|
||||
case reflect.Int:
|
||||
if typeName != "int" {
|
||||
return decoderOfType(cfg, reflect.TypeOf((*int)(nil)).Elem())
|
||||
}
|
||||
return &intCodec{}, nil
|
||||
case reflect.Int8:
|
||||
if typeName != "int8" {
|
||||
return decoderOfType(cfg, reflect.TypeOf((*int8)(nil)).Elem())
|
||||
}
|
||||
return &int8Codec{}, nil
|
||||
case reflect.Int16:
|
||||
if typeName != "int16" {
|
||||
return decoderOfType(cfg, reflect.TypeOf((*int16)(nil)).Elem())
|
||||
}
|
||||
return &int16Codec{}, nil
|
||||
case reflect.Int32:
|
||||
if typeName != "int32" {
|
||||
return decoderOfType(cfg, reflect.TypeOf((*int32)(nil)).Elem())
|
||||
}
|
||||
return &int32Codec{}, nil
|
||||
case reflect.Int64:
|
||||
if typeName != "int64" {
|
||||
return decoderOfType(cfg, reflect.TypeOf((*int64)(nil)).Elem())
|
||||
}
|
||||
return &int64Codec{}, nil
|
||||
case reflect.Uint:
|
||||
if typeName != "uint" {
|
||||
return decoderOfType(cfg, reflect.TypeOf((*uint)(nil)).Elem())
|
||||
}
|
||||
return &uintCodec{}, nil
|
||||
case reflect.Uint8:
|
||||
if typeName != "uint8" {
|
||||
return decoderOfType(cfg, reflect.TypeOf((*uint8)(nil)).Elem())
|
||||
}
|
||||
return &uint8Codec{}, nil
|
||||
case reflect.Uint16:
|
||||
if typeName != "uint16" {
|
||||
return decoderOfType(cfg, reflect.TypeOf((*uint16)(nil)).Elem())
|
||||
}
|
||||
return &uint16Codec{}, nil
|
||||
case reflect.Uint32:
|
||||
if typeName != "uint32" {
|
||||
return decoderOfType(cfg, reflect.TypeOf((*uint32)(nil)).Elem())
|
||||
}
|
||||
return &uint32Codec{}, nil
|
||||
case reflect.Uintptr:
|
||||
if typeName != "uintptr" {
|
||||
return decoderOfType(cfg, reflect.TypeOf((*uintptr)(nil)).Elem())
|
||||
}
|
||||
return &uintptrCodec{}, nil
|
||||
case reflect.Uint64:
|
||||
if typeName != "uint64" {
|
||||
return decoderOfType(cfg, reflect.TypeOf((*uint64)(nil)).Elem())
|
||||
}
|
||||
return &uint64Codec{}, nil
|
||||
case reflect.Float32:
|
||||
if typeName != "float32" {
|
||||
return decoderOfType(cfg, reflect.TypeOf((*float32)(nil)).Elem())
|
||||
}
|
||||
return &float32Codec{}, nil
|
||||
case reflect.Float64:
|
||||
if typeName != "float64" {
|
||||
return decoderOfType(cfg, reflect.TypeOf((*float64)(nil)).Elem())
|
||||
}
|
||||
return &float64Codec{}, nil
|
||||
case reflect.Bool:
|
||||
if typeName != "bool" {
|
||||
return decoderOfType(cfg, reflect.TypeOf((*bool)(nil)).Elem())
|
||||
}
|
||||
return &boolCodec{}, nil
|
||||
case reflect.Interface:
|
||||
if typ.NumMethod() == 0 {
|
||||
return &emptyInterfaceCodec{}, nil
|
||||
}
|
||||
return &nonEmptyInterfaceCodec{}, nil
|
||||
case reflect.Struct:
|
||||
return prefix(fmt.Sprintf("[%s]", typeName)).addToDecoder(decoderOfStruct(cfg, typ))
|
||||
case reflect.Array:
|
||||
return prefix("[array]").addToDecoder(decoderOfArray(cfg, typ))
|
||||
case reflect.Slice:
|
||||
return prefix("[slice]").addToDecoder(decoderOfSlice(cfg, typ))
|
||||
case reflect.Map:
|
||||
return prefix("[map]").addToDecoder(decoderOfMap(cfg, typ))
|
||||
case reflect.Ptr:
|
||||
return prefix("[optional]").addToDecoder(decoderOfOptional(cfg, typ))
|
||||
default:
|
||||
return nil, fmt.Errorf("unsupported type: %v", typ)
|
||||
}
|
||||
}
|
||||
|
||||
func encoderOfType(cfg *frozenConfig, typ reflect.Type) (ValEncoder, error) {
|
||||
cacheKey := typ
|
||||
encoder := cfg.getEncoderFromCache(cacheKey)
|
||||
if encoder != nil {
|
||||
return encoder, nil
|
||||
}
|
||||
encoder = getTypeEncoderFromExtension(typ)
|
||||
if encoder != nil {
|
||||
cfg.addEncoderToCache(cacheKey, encoder)
|
||||
return encoder, nil
|
||||
}
|
||||
encoder = &placeholderEncoder{cfg: cfg, cacheKey: cacheKey}
|
||||
cfg.addEncoderToCache(cacheKey, encoder)
|
||||
encoder, err := createEncoderOfType(cfg, typ)
|
||||
for _, extension := range extensions {
|
||||
encoder = extension.DecorateEncoder(typ, encoder)
|
||||
}
|
||||
cfg.addEncoderToCache(cacheKey, encoder)
|
||||
return encoder, err
|
||||
}
|
||||
|
||||
func createEncoderOfType(cfg *frozenConfig, typ reflect.Type) (ValEncoder, error) {
|
||||
if typ == jsonRawMessageType {
|
||||
return &jsonRawMessageCodec{}, nil
|
||||
}
|
||||
if typ == jsoniterRawMessageType {
|
||||
return &jsoniterRawMessageCodec{}, nil
|
||||
}
|
||||
if typ.AssignableTo(jsonNumberType) {
|
||||
return &jsonNumberCodec{}, nil
|
||||
}
|
||||
if typ.AssignableTo(jsoniterNumberType) {
|
||||
return &jsoniterNumberCodec{}, nil
|
||||
}
|
||||
if typ.Implements(marshalerType) {
|
||||
checkIsEmpty, err := createCheckIsEmpty(typ)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
templateInterface := reflect.New(typ).Elem().Interface()
|
||||
var encoder ValEncoder = &marshalerEncoder{
|
||||
templateInterface: extractInterface(templateInterface),
|
||||
checkIsEmpty: checkIsEmpty,
|
||||
}
|
||||
if typ.Kind() == reflect.Ptr {
|
||||
encoder = &optionalEncoder{encoder}
|
||||
}
|
||||
return encoder, nil
|
||||
}
|
||||
if typ.Implements(textMarshalerType) {
|
||||
checkIsEmpty, err := createCheckIsEmpty(typ)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
templateInterface := reflect.New(typ).Elem().Interface()
|
||||
var encoder ValEncoder = &textMarshalerEncoder{
|
||||
templateInterface: extractInterface(templateInterface),
|
||||
checkIsEmpty: checkIsEmpty,
|
||||
}
|
||||
if typ.Kind() == reflect.Ptr {
|
||||
encoder = &optionalEncoder{encoder}
|
||||
}
|
||||
return encoder, nil
|
||||
}
|
||||
if typ.Kind() == reflect.Slice && typ.Elem().Kind() == reflect.Uint8 {
|
||||
return &base64Codec{}, nil
|
||||
}
|
||||
if typ.Implements(anyType) {
|
||||
return &anyCodec{}, nil
|
||||
}
|
||||
return createEncoderOfSimpleType(cfg, typ)
|
||||
}
|
||||
|
||||
func createCheckIsEmpty(typ reflect.Type) (checkIsEmpty, error) {
|
||||
kind := typ.Kind()
|
||||
switch kind {
|
||||
case reflect.String:
|
||||
return &stringCodec{}, nil
|
||||
case reflect.Int:
|
||||
return &intCodec{}, nil
|
||||
case reflect.Int8:
|
||||
return &int8Codec{}, nil
|
||||
case reflect.Int16:
|
||||
return &int16Codec{}, nil
|
||||
case reflect.Int32:
|
||||
return &int32Codec{}, nil
|
||||
case reflect.Int64:
|
||||
return &int64Codec{}, nil
|
||||
case reflect.Uint:
|
||||
return &uintCodec{}, nil
|
||||
case reflect.Uint8:
|
||||
return &uint8Codec{}, nil
|
||||
case reflect.Uint16:
|
||||
return &uint16Codec{}, nil
|
||||
case reflect.Uint32:
|
||||
return &uint32Codec{}, nil
|
||||
case reflect.Uintptr:
|
||||
return &uintptrCodec{}, nil
|
||||
case reflect.Uint64:
|
||||
return &uint64Codec{}, nil
|
||||
case reflect.Float32:
|
||||
return &float32Codec{}, nil
|
||||
case reflect.Float64:
|
||||
return &float64Codec{}, nil
|
||||
case reflect.Bool:
|
||||
return &boolCodec{}, nil
|
||||
case reflect.Interface:
|
||||
if typ.NumMethod() == 0 {
|
||||
return &emptyInterfaceCodec{}, nil
|
||||
}
|
||||
return &nonEmptyInterfaceCodec{}, nil
|
||||
case reflect.Struct:
|
||||
return &structEncoder{}, nil
|
||||
case reflect.Array:
|
||||
return &arrayEncoder{}, nil
|
||||
case reflect.Slice:
|
||||
return &sliceEncoder{}, nil
|
||||
case reflect.Map:
|
||||
return &mapEncoder{}, nil
|
||||
case reflect.Ptr:
|
||||
return &optionalEncoder{}, nil
|
||||
default:
|
||||
return nil, fmt.Errorf("unsupported type: %v", typ)
|
||||
}
|
||||
}
|
||||
|
||||
func createEncoderOfSimpleType(cfg *frozenConfig, typ reflect.Type) (ValEncoder, error) {
|
||||
typeName := typ.String()
|
||||
kind := typ.Kind()
|
||||
switch kind {
|
||||
case reflect.String:
|
||||
if typeName != "string" {
|
||||
return encoderOfType(cfg, reflect.TypeOf((*string)(nil)).Elem())
|
||||
}
|
||||
return &stringCodec{}, nil
|
||||
case reflect.Int:
|
||||
if typeName != "int" {
|
||||
return encoderOfType(cfg, reflect.TypeOf((*int)(nil)).Elem())
|
||||
}
|
||||
return &intCodec{}, nil
|
||||
case reflect.Int8:
|
||||
if typeName != "int8" {
|
||||
return encoderOfType(cfg, reflect.TypeOf((*int8)(nil)).Elem())
|
||||
}
|
||||
return &int8Codec{}, nil
|
||||
case reflect.Int16:
|
||||
if typeName != "int16" {
|
||||
return encoderOfType(cfg, reflect.TypeOf((*int16)(nil)).Elem())
|
||||
}
|
||||
return &int16Codec{}, nil
|
||||
case reflect.Int32:
|
||||
if typeName != "int32" {
|
||||
return encoderOfType(cfg, reflect.TypeOf((*int32)(nil)).Elem())
|
||||
}
|
||||
return &int32Codec{}, nil
|
||||
case reflect.Int64:
|
||||
if typeName != "int64" {
|
||||
return encoderOfType(cfg, reflect.TypeOf((*int64)(nil)).Elem())
|
||||
}
|
||||
return &int64Codec{}, nil
|
||||
case reflect.Uint:
|
||||
if typeName != "uint" {
|
||||
return encoderOfType(cfg, reflect.TypeOf((*uint)(nil)).Elem())
|
||||
}
|
||||
return &uintCodec{}, nil
|
||||
case reflect.Uint8:
|
||||
if typeName != "uint8" {
|
||||
return encoderOfType(cfg, reflect.TypeOf((*uint8)(nil)).Elem())
|
||||
}
|
||||
return &uint8Codec{}, nil
|
||||
case reflect.Uint16:
|
||||
if typeName != "uint16" {
|
||||
return encoderOfType(cfg, reflect.TypeOf((*uint16)(nil)).Elem())
|
||||
}
|
||||
return &uint16Codec{}, nil
|
||||
case reflect.Uint32:
|
||||
if typeName != "uint32" {
|
||||
return encoderOfType(cfg, reflect.TypeOf((*uint32)(nil)).Elem())
|
||||
}
|
||||
return &uint32Codec{}, nil
|
||||
case reflect.Uintptr:
|
||||
if typeName != "uintptr" {
|
||||
return encoderOfType(cfg, reflect.TypeOf((*uintptr)(nil)).Elem())
|
||||
}
|
||||
return &uintptrCodec{}, nil
|
||||
case reflect.Uint64:
|
||||
if typeName != "uint64" {
|
||||
return encoderOfType(cfg, reflect.TypeOf((*uint64)(nil)).Elem())
|
||||
}
|
||||
return &uint64Codec{}, nil
|
||||
case reflect.Float32:
|
||||
if typeName != "float32" {
|
||||
return encoderOfType(cfg, reflect.TypeOf((*float32)(nil)).Elem())
|
||||
}
|
||||
return &float32Codec{}, nil
|
||||
case reflect.Float64:
|
||||
if typeName != "float64" {
|
||||
return encoderOfType(cfg, reflect.TypeOf((*float64)(nil)).Elem())
|
||||
}
|
||||
return &float64Codec{}, nil
|
||||
case reflect.Bool:
|
||||
if typeName != "bool" {
|
||||
return encoderOfType(cfg, reflect.TypeOf((*bool)(nil)).Elem())
|
||||
}
|
||||
return &boolCodec{}, nil
|
||||
case reflect.Interface:
|
||||
if typ.NumMethod() == 0 {
|
||||
return &emptyInterfaceCodec{}, nil
|
||||
}
|
||||
return &nonEmptyInterfaceCodec{}, nil
|
||||
case reflect.Struct:
|
||||
return prefix(fmt.Sprintf("[%s]", typeName)).addToEncoder(encoderOfStruct(cfg, typ))
|
||||
case reflect.Array:
|
||||
return prefix("[array]").addToEncoder(encoderOfArray(cfg, typ))
|
||||
case reflect.Slice:
|
||||
return prefix("[slice]").addToEncoder(encoderOfSlice(cfg, typ))
|
||||
case reflect.Map:
|
||||
return prefix("[map]").addToEncoder(encoderOfMap(cfg, typ))
|
||||
case reflect.Ptr:
|
||||
return prefix("[optional]").addToEncoder(encoderOfOptional(cfg, typ))
|
||||
default:
|
||||
return nil, fmt.Errorf("unsupported type: %v", typ)
|
||||
}
|
||||
}
|
||||
|
||||
func decoderOfOptional(cfg *frozenConfig, typ reflect.Type) (ValDecoder, error) {
|
||||
elemType := typ.Elem()
|
||||
decoder, err := decoderOfType(cfg, elemType)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &optionalDecoder{elemType, decoder}, nil
|
||||
}
|
||||
|
||||
func encoderOfOptional(cfg *frozenConfig, typ reflect.Type) (ValEncoder, error) {
|
||||
elemType := typ.Elem()
|
||||
elemEncoder, err := encoderOfType(cfg, elemType)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
encoder := &optionalEncoder{elemEncoder}
|
||||
if elemType.Kind() == reflect.Map {
|
||||
encoder = &optionalEncoder{encoder}
|
||||
}
|
||||
return encoder, nil
|
||||
}
|
||||
|
||||
func decoderOfMap(cfg *frozenConfig, typ reflect.Type) (ValDecoder, error) {
|
||||
decoder, err := decoderOfType(cfg, typ.Elem())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
mapInterface := reflect.New(typ).Interface()
|
||||
return &mapDecoder{typ, typ.Key(), typ.Elem(), decoder, extractInterface(mapInterface)}, nil
|
||||
}
|
||||
|
||||
func extractInterface(val interface{}) emptyInterface {
|
||||
return *((*emptyInterface)(unsafe.Pointer(&val)))
|
||||
}
|
||||
|
||||
func encoderOfMap(cfg *frozenConfig, typ reflect.Type) (ValEncoder, error) {
|
||||
elemType := typ.Elem()
|
||||
encoder, err := encoderOfType(cfg, elemType)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
mapInterface := reflect.New(typ).Elem().Interface()
|
||||
if cfg.sortMapKeys {
|
||||
return &sortKeysMapEncoder{typ, elemType, encoder, *((*emptyInterface)(unsafe.Pointer(&mapInterface)))}, nil
|
||||
}
|
||||
return &mapEncoder{typ, elemType, encoder, *((*emptyInterface)(unsafe.Pointer(&mapInterface)))}, nil
|
||||
}
|
99
vendor/github.com/json-iterator/go/feature_reflect_array.go
generated
vendored
Normal file
99
vendor/github.com/json-iterator/go/feature_reflect_array.go
generated
vendored
Normal file
|
@ -0,0 +1,99 @@
|
|||
package jsoniter
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"reflect"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
func decoderOfArray(cfg *frozenConfig, typ reflect.Type) (ValDecoder, error) {
|
||||
decoder, err := decoderOfType(cfg, typ.Elem())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &arrayDecoder{typ, typ.Elem(), decoder}, nil
|
||||
}
|
||||
|
||||
func encoderOfArray(cfg *frozenConfig, typ reflect.Type) (ValEncoder, error) {
|
||||
encoder, err := encoderOfType(cfg, typ.Elem())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if typ.Elem().Kind() == reflect.Map {
|
||||
encoder = &optionalEncoder{encoder}
|
||||
}
|
||||
return &arrayEncoder{typ, typ.Elem(), encoder}, nil
|
||||
}
|
||||
|
||||
type arrayEncoder struct {
|
||||
arrayType reflect.Type
|
||||
elemType reflect.Type
|
||||
elemEncoder ValEncoder
|
||||
}
|
||||
|
||||
func (encoder *arrayEncoder) Encode(ptr unsafe.Pointer, stream *Stream) {
|
||||
stream.WriteArrayStart()
|
||||
elemPtr := unsafe.Pointer(ptr)
|
||||
encoder.elemEncoder.Encode(elemPtr, stream)
|
||||
for i := 1; i < encoder.arrayType.Len(); i++ {
|
||||
stream.WriteMore()
|
||||
elemPtr = unsafe.Pointer(uintptr(elemPtr) + encoder.elemType.Size())
|
||||
encoder.elemEncoder.Encode(unsafe.Pointer(elemPtr), stream)
|
||||
}
|
||||
stream.WriteArrayEnd()
|
||||
if stream.Error != nil && stream.Error != io.EOF {
|
||||
stream.Error = fmt.Errorf("%v: %s", encoder.arrayType, stream.Error.Error())
|
||||
}
|
||||
}
|
||||
|
||||
func (encoder *arrayEncoder) EncodeInterface(val interface{}, stream *Stream) {
|
||||
// special optimization for interface{}
|
||||
e := (*emptyInterface)(unsafe.Pointer(&val))
|
||||
if e.word == nil {
|
||||
stream.WriteArrayStart()
|
||||
stream.WriteNil()
|
||||
stream.WriteArrayEnd()
|
||||
return
|
||||
}
|
||||
elemType := encoder.arrayType.Elem()
|
||||
if encoder.arrayType.Len() == 1 && (elemType.Kind() == reflect.Ptr || elemType.Kind() == reflect.Map) {
|
||||
ptr := uintptr(e.word)
|
||||
e.word = unsafe.Pointer(&ptr)
|
||||
}
|
||||
if reflect.TypeOf(val).Kind() == reflect.Ptr {
|
||||
encoder.Encode(unsafe.Pointer(&e.word), stream)
|
||||
} else {
|
||||
encoder.Encode(e.word, stream)
|
||||
}
|
||||
}
|
||||
|
||||
func (encoder *arrayEncoder) IsEmpty(ptr unsafe.Pointer) bool {
|
||||
return false
|
||||
}
|
||||
|
||||
type arrayDecoder struct {
|
||||
arrayType reflect.Type
|
||||
elemType reflect.Type
|
||||
elemDecoder ValDecoder
|
||||
}
|
||||
|
||||
func (decoder *arrayDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) {
|
||||
decoder.doDecode(ptr, iter)
|
||||
if iter.Error != nil && iter.Error != io.EOF {
|
||||
iter.Error = fmt.Errorf("%v: %s", decoder.arrayType, iter.Error.Error())
|
||||
}
|
||||
}
|
||||
|
||||
func (decoder *arrayDecoder) doDecode(ptr unsafe.Pointer, iter *Iterator) {
|
||||
offset := uintptr(0)
|
||||
iter.ReadArrayCB(func(iter *Iterator) bool {
|
||||
if offset < decoder.arrayType.Size() {
|
||||
decoder.elemDecoder.Decode(unsafe.Pointer(uintptr(ptr)+offset), iter)
|
||||
offset += decoder.elemType.Size()
|
||||
} else {
|
||||
iter.Skip()
|
||||
}
|
||||
return true
|
||||
})
|
||||
}
|
413
vendor/github.com/json-iterator/go/feature_reflect_extension.go
generated
vendored
Normal file
413
vendor/github.com/json-iterator/go/feature_reflect_extension.go
generated
vendored
Normal file
|
@ -0,0 +1,413 @@
|
|||
package jsoniter
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"reflect"
|
||||
"sort"
|
||||
"strings"
|
||||
"unicode"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
var typeDecoders = map[string]ValDecoder{}
|
||||
var fieldDecoders = map[string]ValDecoder{}
|
||||
var typeEncoders = map[string]ValEncoder{}
|
||||
var fieldEncoders = map[string]ValEncoder{}
|
||||
var extensions = []Extension{}
|
||||
|
||||
// StructDescriptor describe how should we encode/decode the struct
|
||||
type StructDescriptor struct {
|
||||
onePtrEmbedded bool
|
||||
onePtrOptimization bool
|
||||
Type reflect.Type
|
||||
Fields []*Binding
|
||||
}
|
||||
|
||||
// GetField get one field from the descriptor by its name.
|
||||
// Can not use map here to keep field orders.
|
||||
func (structDescriptor *StructDescriptor) GetField(fieldName string) *Binding {
|
||||
for _, binding := range structDescriptor.Fields {
|
||||
if binding.Field.Name == fieldName {
|
||||
return binding
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Binding describe how should we encode/decode the struct field
|
||||
type Binding struct {
|
||||
levels []int
|
||||
Field *reflect.StructField
|
||||
FromNames []string
|
||||
ToNames []string
|
||||
Encoder ValEncoder
|
||||
Decoder ValDecoder
|
||||
}
|
||||
|
||||
// Extension the one for all SPI. Customize encoding/decoding by specifying alternate encoder/decoder.
|
||||
// Can also rename fields by UpdateStructDescriptor.
|
||||
type Extension interface {
|
||||
UpdateStructDescriptor(structDescriptor *StructDescriptor)
|
||||
CreateDecoder(typ reflect.Type) ValDecoder
|
||||
CreateEncoder(typ reflect.Type) ValEncoder
|
||||
DecorateDecoder(typ reflect.Type, decoder ValDecoder) ValDecoder
|
||||
DecorateEncoder(typ reflect.Type, encoder ValEncoder) ValEncoder
|
||||
}
|
||||
|
||||
// DummyExtension embed this type get dummy implementation for all methods of Extension
|
||||
type DummyExtension struct {
|
||||
}
|
||||
|
||||
// UpdateStructDescriptor No-op
|
||||
func (extension *DummyExtension) UpdateStructDescriptor(structDescriptor *StructDescriptor) {
|
||||
}
|
||||
|
||||
// CreateDecoder No-op
|
||||
func (extension *DummyExtension) CreateDecoder(typ reflect.Type) ValDecoder {
|
||||
return nil
|
||||
}
|
||||
|
||||
// CreateEncoder No-op
|
||||
func (extension *DummyExtension) CreateEncoder(typ reflect.Type) ValEncoder {
|
||||
return nil
|
||||
}
|
||||
|
||||
// DecorateDecoder No-op
|
||||
func (extension *DummyExtension) DecorateDecoder(typ reflect.Type, decoder ValDecoder) ValDecoder {
|
||||
return decoder
|
||||
}
|
||||
|
||||
// DecorateEncoder No-op
|
||||
func (extension *DummyExtension) DecorateEncoder(typ reflect.Type, encoder ValEncoder) ValEncoder {
|
||||
return encoder
|
||||
}
|
||||
|
||||
type funcDecoder struct {
|
||||
fun DecoderFunc
|
||||
}
|
||||
|
||||
func (decoder *funcDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) {
|
||||
decoder.fun(ptr, iter)
|
||||
}
|
||||
|
||||
type funcEncoder struct {
|
||||
fun EncoderFunc
|
||||
isEmptyFunc func(ptr unsafe.Pointer) bool
|
||||
}
|
||||
|
||||
func (encoder *funcEncoder) Encode(ptr unsafe.Pointer, stream *Stream) {
|
||||
encoder.fun(ptr, stream)
|
||||
}
|
||||
|
||||
func (encoder *funcEncoder) EncodeInterface(val interface{}, stream *Stream) {
|
||||
WriteToStream(val, stream, encoder)
|
||||
}
|
||||
|
||||
func (encoder *funcEncoder) IsEmpty(ptr unsafe.Pointer) bool {
|
||||
if encoder.isEmptyFunc == nil {
|
||||
return false
|
||||
}
|
||||
return encoder.isEmptyFunc(ptr)
|
||||
}
|
||||
|
||||
// DecoderFunc the function form of TypeDecoder
|
||||
type DecoderFunc func(ptr unsafe.Pointer, iter *Iterator)
|
||||
|
||||
// EncoderFunc the function form of TypeEncoder
|
||||
type EncoderFunc func(ptr unsafe.Pointer, stream *Stream)
|
||||
|
||||
// RegisterTypeDecoderFunc register TypeDecoder for a type with function
|
||||
func RegisterTypeDecoderFunc(typ string, fun DecoderFunc) {
|
||||
typeDecoders[typ] = &funcDecoder{fun}
|
||||
}
|
||||
|
||||
// RegisterTypeDecoder register TypeDecoder for a typ
|
||||
func RegisterTypeDecoder(typ string, decoder ValDecoder) {
|
||||
typeDecoders[typ] = decoder
|
||||
}
|
||||
|
||||
// RegisterFieldDecoderFunc register TypeDecoder for a struct field with function
|
||||
func RegisterFieldDecoderFunc(typ string, field string, fun DecoderFunc) {
|
||||
RegisterFieldDecoder(typ, field, &funcDecoder{fun})
|
||||
}
|
||||
|
||||
// RegisterFieldDecoder register TypeDecoder for a struct field
|
||||
func RegisterFieldDecoder(typ string, field string, decoder ValDecoder) {
|
||||
fieldDecoders[fmt.Sprintf("%s/%s", typ, field)] = decoder
|
||||
}
|
||||
|
||||
// RegisterTypeEncoderFunc register TypeEncoder for a type with encode/isEmpty function
|
||||
func RegisterTypeEncoderFunc(typ string, fun EncoderFunc, isEmptyFunc func(unsafe.Pointer) bool) {
|
||||
typeEncoders[typ] = &funcEncoder{fun, isEmptyFunc}
|
||||
}
|
||||
|
||||
// RegisterTypeEncoder register TypeEncoder for a type
|
||||
func RegisterTypeEncoder(typ string, encoder ValEncoder) {
|
||||
typeEncoders[typ] = encoder
|
||||
}
|
||||
|
||||
// RegisterFieldEncoderFunc register TypeEncoder for a struct field with encode/isEmpty function
|
||||
func RegisterFieldEncoderFunc(typ string, field string, fun EncoderFunc, isEmptyFunc func(unsafe.Pointer) bool) {
|
||||
RegisterFieldEncoder(typ, field, &funcEncoder{fun, isEmptyFunc})
|
||||
}
|
||||
|
||||
// RegisterFieldEncoder register TypeEncoder for a struct field
|
||||
func RegisterFieldEncoder(typ string, field string, encoder ValEncoder) {
|
||||
fieldEncoders[fmt.Sprintf("%s/%s", typ, field)] = encoder
|
||||
}
|
||||
|
||||
// RegisterExtension register extension
|
||||
func RegisterExtension(extension Extension) {
|
||||
extensions = append(extensions, extension)
|
||||
}
|
||||
|
||||
func getTypeDecoderFromExtension(typ reflect.Type) ValDecoder {
|
||||
decoder := _getTypeDecoderFromExtension(typ)
|
||||
if decoder != nil {
|
||||
for _, extension := range extensions {
|
||||
decoder = extension.DecorateDecoder(typ, decoder)
|
||||
}
|
||||
}
|
||||
return decoder
|
||||
}
|
||||
func _getTypeDecoderFromExtension(typ reflect.Type) ValDecoder {
|
||||
for _, extension := range extensions {
|
||||
decoder := extension.CreateDecoder(typ)
|
||||
if decoder != nil {
|
||||
return decoder
|
||||
}
|
||||
}
|
||||
typeName := typ.String()
|
||||
decoder := typeDecoders[typeName]
|
||||
if decoder != nil {
|
||||
return decoder
|
||||
}
|
||||
if typ.Kind() == reflect.Ptr {
|
||||
decoder := typeDecoders[typ.Elem().String()]
|
||||
if decoder != nil {
|
||||
return &optionalDecoder{typ.Elem(), decoder}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func getTypeEncoderFromExtension(typ reflect.Type) ValEncoder {
|
||||
encoder := _getTypeEncoderFromExtension(typ)
|
||||
if encoder != nil {
|
||||
for _, extension := range extensions {
|
||||
encoder = extension.DecorateEncoder(typ, encoder)
|
||||
}
|
||||
}
|
||||
return encoder
|
||||
}
|
||||
|
||||
func _getTypeEncoderFromExtension(typ reflect.Type) ValEncoder {
|
||||
for _, extension := range extensions {
|
||||
encoder := extension.CreateEncoder(typ)
|
||||
if encoder != nil {
|
||||
return encoder
|
||||
}
|
||||
}
|
||||
typeName := typ.String()
|
||||
encoder := typeEncoders[typeName]
|
||||
if encoder != nil {
|
||||
return encoder
|
||||
}
|
||||
if typ.Kind() == reflect.Ptr {
|
||||
encoder := typeEncoders[typ.Elem().String()]
|
||||
if encoder != nil {
|
||||
return &optionalEncoder{encoder}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func describeStruct(cfg *frozenConfig, typ reflect.Type) (*StructDescriptor, error) {
|
||||
embeddedBindings := []*Binding{}
|
||||
bindings := []*Binding{}
|
||||
for i := 0; i < typ.NumField(); i++ {
|
||||
field := typ.Field(i)
|
||||
tag := field.Tag.Get(cfg.getTagKey())
|
||||
tagParts := strings.Split(tag, ",")
|
||||
if tag == "-" {
|
||||
continue
|
||||
}
|
||||
if field.Anonymous && (tag == "" || tagParts[0] == "") {
|
||||
if field.Type.Kind() == reflect.Struct {
|
||||
structDescriptor, err := describeStruct(cfg, field.Type)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
for _, binding := range structDescriptor.Fields {
|
||||
binding.levels = append([]int{i}, binding.levels...)
|
||||
omitempty := binding.Encoder.(*structFieldEncoder).omitempty
|
||||
binding.Encoder = &structFieldEncoder{&field, binding.Encoder, omitempty}
|
||||
binding.Decoder = &structFieldDecoder{&field, binding.Decoder}
|
||||
embeddedBindings = append(embeddedBindings, binding)
|
||||
}
|
||||
continue
|
||||
} else if field.Type.Kind() == reflect.Ptr && field.Type.Elem().Kind() == reflect.Struct {
|
||||
structDescriptor, err := describeStruct(cfg, field.Type.Elem())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
for _, binding := range structDescriptor.Fields {
|
||||
binding.levels = append([]int{i}, binding.levels...)
|
||||
omitempty := binding.Encoder.(*structFieldEncoder).omitempty
|
||||
binding.Encoder = &optionalEncoder{binding.Encoder}
|
||||
binding.Encoder = &structFieldEncoder{&field, binding.Encoder, omitempty}
|
||||
binding.Decoder = &deferenceDecoder{field.Type.Elem(), binding.Decoder}
|
||||
binding.Decoder = &structFieldDecoder{&field, binding.Decoder}
|
||||
embeddedBindings = append(embeddedBindings, binding)
|
||||
}
|
||||
continue
|
||||
}
|
||||
}
|
||||
fieldNames := calcFieldNames(field.Name, tagParts[0], tag)
|
||||
fieldCacheKey := fmt.Sprintf("%s/%s", typ.String(), field.Name)
|
||||
decoder := fieldDecoders[fieldCacheKey]
|
||||
if decoder == nil {
|
||||
var err error
|
||||
decoder, err = decoderOfType(cfg, field.Type)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
encoder := fieldEncoders[fieldCacheKey]
|
||||
if encoder == nil {
|
||||
var err error
|
||||
encoder, err = encoderOfType(cfg, field.Type)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// map is stored as pointer in the struct
|
||||
if field.Type.Kind() == reflect.Map {
|
||||
encoder = &optionalEncoder{encoder}
|
||||
}
|
||||
}
|
||||
binding := &Binding{
|
||||
Field: &field,
|
||||
FromNames: fieldNames,
|
||||
ToNames: fieldNames,
|
||||
Decoder: decoder,
|
||||
Encoder: encoder,
|
||||
}
|
||||
binding.levels = []int{i}
|
||||
bindings = append(bindings, binding)
|
||||
}
|
||||
return createStructDescriptor(cfg, typ, bindings, embeddedBindings), nil
|
||||
}
|
||||
func createStructDescriptor(cfg *frozenConfig, typ reflect.Type, bindings []*Binding, embeddedBindings []*Binding) *StructDescriptor {
|
||||
onePtrEmbedded := false
|
||||
onePtrOptimization := false
|
||||
if typ.NumField() == 1 {
|
||||
firstField := typ.Field(0)
|
||||
switch firstField.Type.Kind() {
|
||||
case reflect.Ptr:
|
||||
if firstField.Anonymous && firstField.Type.Elem().Kind() == reflect.Struct {
|
||||
onePtrEmbedded = true
|
||||
}
|
||||
fallthrough
|
||||
case reflect.Map:
|
||||
onePtrOptimization = true
|
||||
case reflect.Struct:
|
||||
onePtrOptimization = isStructOnePtr(firstField.Type)
|
||||
}
|
||||
}
|
||||
structDescriptor := &StructDescriptor{
|
||||
onePtrEmbedded: onePtrEmbedded,
|
||||
onePtrOptimization: onePtrOptimization,
|
||||
Type: typ,
|
||||
Fields: bindings,
|
||||
}
|
||||
for _, extension := range extensions {
|
||||
extension.UpdateStructDescriptor(structDescriptor)
|
||||
}
|
||||
processTags(structDescriptor, cfg)
|
||||
// merge normal & embedded bindings & sort with original order
|
||||
allBindings := sortableBindings(append(embeddedBindings, structDescriptor.Fields...))
|
||||
sort.Sort(allBindings)
|
||||
structDescriptor.Fields = allBindings
|
||||
return structDescriptor
|
||||
}
|
||||
|
||||
func isStructOnePtr(typ reflect.Type) bool {
|
||||
if typ.NumField() == 1 {
|
||||
firstField := typ.Field(0)
|
||||
switch firstField.Type.Kind() {
|
||||
case reflect.Ptr:
|
||||
return true
|
||||
case reflect.Map:
|
||||
return true
|
||||
case reflect.Struct:
|
||||
return isStructOnePtr(firstField.Type)
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
type sortableBindings []*Binding
|
||||
|
||||
func (bindings sortableBindings) Len() int {
|
||||
return len(bindings)
|
||||
}
|
||||
|
||||
func (bindings sortableBindings) Less(i, j int) bool {
|
||||
left := bindings[i].levels
|
||||
right := bindings[j].levels
|
||||
k := 0
|
||||
for {
|
||||
if left[k] < right[k] {
|
||||
return true
|
||||
} else if left[k] > right[k] {
|
||||
return false
|
||||
}
|
||||
k++
|
||||
}
|
||||
}
|
||||
|
||||
func (bindings sortableBindings) Swap(i, j int) {
|
||||
bindings[i], bindings[j] = bindings[j], bindings[i]
|
||||
}
|
||||
|
||||
func processTags(structDescriptor *StructDescriptor, cfg *frozenConfig) {
|
||||
for _, binding := range structDescriptor.Fields {
|
||||
shouldOmitEmpty := false
|
||||
tagParts := strings.Split(binding.Field.Tag.Get(cfg.getTagKey()), ",")
|
||||
for _, tagPart := range tagParts[1:] {
|
||||
if tagPart == "omitempty" {
|
||||
shouldOmitEmpty = true
|
||||
} else if tagPart == "string" {
|
||||
if binding.Field.Type.Kind() == reflect.String {
|
||||
binding.Decoder = &stringModeStringDecoder{binding.Decoder, cfg}
|
||||
binding.Encoder = &stringModeStringEncoder{binding.Encoder, cfg}
|
||||
} else {
|
||||
binding.Decoder = &stringModeNumberDecoder{binding.Decoder}
|
||||
binding.Encoder = &stringModeNumberEncoder{binding.Encoder}
|
||||
}
|
||||
}
|
||||
}
|
||||
binding.Decoder = &structFieldDecoder{binding.Field, binding.Decoder}
|
||||
binding.Encoder = &structFieldEncoder{binding.Field, binding.Encoder, shouldOmitEmpty}
|
||||
}
|
||||
}
|
||||
|
||||
func calcFieldNames(originalFieldName string, tagProvidedFieldName string, wholeTag string) []string {
|
||||
// ignore?
|
||||
if wholeTag == "-" {
|
||||
return []string{}
|
||||
}
|
||||
// rename?
|
||||
var fieldNames []string
|
||||
if tagProvidedFieldName == "" {
|
||||
fieldNames = []string{originalFieldName}
|
||||
} else {
|
||||
fieldNames = []string{tagProvidedFieldName}
|
||||
}
|
||||
// private?
|
||||
isNotExported := unicode.IsLower(rune(originalFieldName[0]))
|
||||
if isNotExported {
|
||||
fieldNames = []string{}
|
||||
}
|
||||
return fieldNames
|
||||
}
|
244
vendor/github.com/json-iterator/go/feature_reflect_map.go
generated
vendored
Normal file
244
vendor/github.com/json-iterator/go/feature_reflect_map.go
generated
vendored
Normal file
|
@ -0,0 +1,244 @@
|
|||
package jsoniter
|
||||
|
||||
import (
|
||||
"encoding"
|
||||
"encoding/json"
|
||||
"reflect"
|
||||
"sort"
|
||||
"strconv"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
type mapDecoder struct {
|
||||
mapType reflect.Type
|
||||
keyType reflect.Type
|
||||
elemType reflect.Type
|
||||
elemDecoder ValDecoder
|
||||
mapInterface emptyInterface
|
||||
}
|
||||
|
||||
func (decoder *mapDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) {
|
||||
// dark magic to cast unsafe.Pointer back to interface{} using reflect.Type
|
||||
mapInterface := decoder.mapInterface
|
||||
mapInterface.word = ptr
|
||||
realInterface := (*interface{})(unsafe.Pointer(&mapInterface))
|
||||
realVal := reflect.ValueOf(*realInterface).Elem()
|
||||
if iter.ReadNil() {
|
||||
realVal.Set(reflect.Zero(decoder.mapType))
|
||||
return
|
||||
}
|
||||
if realVal.IsNil() {
|
||||
realVal.Set(reflect.MakeMap(realVal.Type()))
|
||||
}
|
||||
iter.ReadMapCB(func(iter *Iterator, keyStr string) bool {
|
||||
elem := reflect.New(decoder.elemType)
|
||||
decoder.elemDecoder.Decode(unsafe.Pointer(elem.Pointer()), iter)
|
||||
// to put into map, we have to use reflection
|
||||
keyType := decoder.keyType
|
||||
// TODO: remove this from loop
|
||||
switch {
|
||||
case keyType.Kind() == reflect.String:
|
||||
realVal.SetMapIndex(reflect.ValueOf(keyStr).Convert(keyType), elem.Elem())
|
||||
return true
|
||||
case keyType.Implements(textUnmarshalerType):
|
||||
textUnmarshaler := reflect.New(keyType.Elem()).Interface().(encoding.TextUnmarshaler)
|
||||
err := textUnmarshaler.UnmarshalText([]byte(keyStr))
|
||||
if err != nil {
|
||||
iter.ReportError("read map key as TextUnmarshaler", err.Error())
|
||||
return false
|
||||
}
|
||||
realVal.SetMapIndex(reflect.ValueOf(textUnmarshaler), elem.Elem())
|
||||
return true
|
||||
case reflect.PtrTo(keyType).Implements(textUnmarshalerType):
|
||||
textUnmarshaler := reflect.New(keyType).Interface().(encoding.TextUnmarshaler)
|
||||
err := textUnmarshaler.UnmarshalText([]byte(keyStr))
|
||||
if err != nil {
|
||||
iter.ReportError("read map key as TextUnmarshaler", err.Error())
|
||||
return false
|
||||
}
|
||||
realVal.SetMapIndex(reflect.ValueOf(textUnmarshaler).Elem(), elem.Elem())
|
||||
return true
|
||||
default:
|
||||
switch keyType.Kind() {
|
||||
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
|
||||
n, err := strconv.ParseInt(keyStr, 10, 64)
|
||||
if err != nil || reflect.Zero(keyType).OverflowInt(n) {
|
||||
iter.ReportError("read map key as int64", "read int64 failed")
|
||||
return false
|
||||
}
|
||||
realVal.SetMapIndex(reflect.ValueOf(n).Convert(keyType), elem.Elem())
|
||||
return true
|
||||
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
|
||||
n, err := strconv.ParseUint(keyStr, 10, 64)
|
||||
if err != nil || reflect.Zero(keyType).OverflowUint(n) {
|
||||
iter.ReportError("read map key as uint64", "read uint64 failed")
|
||||
return false
|
||||
}
|
||||
realVal.SetMapIndex(reflect.ValueOf(n).Convert(keyType), elem.Elem())
|
||||
return true
|
||||
}
|
||||
}
|
||||
iter.ReportError("read map key", "unexpected map key type "+keyType.String())
|
||||
return true
|
||||
})
|
||||
}
|
||||
|
||||
type mapEncoder struct {
|
||||
mapType reflect.Type
|
||||
elemType reflect.Type
|
||||
elemEncoder ValEncoder
|
||||
mapInterface emptyInterface
|
||||
}
|
||||
|
||||
func (encoder *mapEncoder) Encode(ptr unsafe.Pointer, stream *Stream) {
|
||||
mapInterface := encoder.mapInterface
|
||||
mapInterface.word = ptr
|
||||
realInterface := (*interface{})(unsafe.Pointer(&mapInterface))
|
||||
realVal := reflect.ValueOf(*realInterface)
|
||||
stream.WriteObjectStart()
|
||||
for i, key := range realVal.MapKeys() {
|
||||
if i != 0 {
|
||||
stream.WriteMore()
|
||||
}
|
||||
encodeMapKey(key, stream)
|
||||
if stream.indention > 0 {
|
||||
stream.writeTwoBytes(byte(':'), byte(' '))
|
||||
} else {
|
||||
stream.writeByte(':')
|
||||
}
|
||||
val := realVal.MapIndex(key).Interface()
|
||||
encoder.elemEncoder.EncodeInterface(val, stream)
|
||||
}
|
||||
stream.WriteObjectEnd()
|
||||
}
|
||||
|
||||
func encodeMapKey(key reflect.Value, stream *Stream) {
|
||||
if key.Kind() == reflect.String {
|
||||
stream.WriteString(key.String())
|
||||
return
|
||||
}
|
||||
if tm, ok := key.Interface().(encoding.TextMarshaler); ok {
|
||||
buf, err := tm.MarshalText()
|
||||
if err != nil {
|
||||
stream.Error = err
|
||||
return
|
||||
}
|
||||
stream.writeByte('"')
|
||||
stream.Write(buf)
|
||||
stream.writeByte('"')
|
||||
return
|
||||
}
|
||||
switch key.Kind() {
|
||||
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
|
||||
stream.writeByte('"')
|
||||
stream.WriteInt64(key.Int())
|
||||
stream.writeByte('"')
|
||||
return
|
||||
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
|
||||
stream.writeByte('"')
|
||||
stream.WriteUint64(key.Uint())
|
||||
stream.writeByte('"')
|
||||
return
|
||||
}
|
||||
stream.Error = &json.UnsupportedTypeError{Type: key.Type()}
|
||||
}
|
||||
|
||||
func (encoder *mapEncoder) EncodeInterface(val interface{}, stream *Stream) {
|
||||
WriteToStream(val, stream, encoder)
|
||||
}
|
||||
|
||||
func (encoder *mapEncoder) IsEmpty(ptr unsafe.Pointer) bool {
|
||||
mapInterface := encoder.mapInterface
|
||||
mapInterface.word = ptr
|
||||
realInterface := (*interface{})(unsafe.Pointer(&mapInterface))
|
||||
realVal := reflect.ValueOf(*realInterface)
|
||||
return realVal.Len() == 0
|
||||
}
|
||||
|
||||
type sortKeysMapEncoder struct {
|
||||
mapType reflect.Type
|
||||
elemType reflect.Type
|
||||
elemEncoder ValEncoder
|
||||
mapInterface emptyInterface
|
||||
}
|
||||
|
||||
func (encoder *sortKeysMapEncoder) Encode(ptr unsafe.Pointer, stream *Stream) {
|
||||
mapInterface := encoder.mapInterface
|
||||
mapInterface.word = ptr
|
||||
realInterface := (*interface{})(unsafe.Pointer(&mapInterface))
|
||||
realVal := reflect.ValueOf(*realInterface)
|
||||
|
||||
// Extract and sort the keys.
|
||||
keys := realVal.MapKeys()
|
||||
sv := stringValues(make([]reflectWithString, len(keys)))
|
||||
for i, v := range keys {
|
||||
sv[i].v = v
|
||||
if err := sv[i].resolve(); err != nil {
|
||||
stream.Error = err
|
||||
return
|
||||
}
|
||||
}
|
||||
sort.Sort(sv)
|
||||
|
||||
stream.WriteObjectStart()
|
||||
for i, key := range sv {
|
||||
if i != 0 {
|
||||
stream.WriteMore()
|
||||
}
|
||||
stream.WriteVal(key.s) // might need html escape, so can not WriteString directly
|
||||
if stream.indention > 0 {
|
||||
stream.writeTwoBytes(byte(':'), byte(' '))
|
||||
} else {
|
||||
stream.writeByte(':')
|
||||
}
|
||||
val := realVal.MapIndex(key.v).Interface()
|
||||
encoder.elemEncoder.EncodeInterface(val, stream)
|
||||
}
|
||||
stream.WriteObjectEnd()
|
||||
}
|
||||
|
||||
// stringValues is a slice of reflect.Value holding *reflect.StringValue.
|
||||
// It implements the methods to sort by string.
|
||||
type stringValues []reflectWithString
|
||||
|
||||
type reflectWithString struct {
|
||||
v reflect.Value
|
||||
s string
|
||||
}
|
||||
|
||||
func (w *reflectWithString) resolve() error {
|
||||
if w.v.Kind() == reflect.String {
|
||||
w.s = w.v.String()
|
||||
return nil
|
||||
}
|
||||
if tm, ok := w.v.Interface().(encoding.TextMarshaler); ok {
|
||||
buf, err := tm.MarshalText()
|
||||
w.s = string(buf)
|
||||
return err
|
||||
}
|
||||
switch w.v.Kind() {
|
||||
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
|
||||
w.s = strconv.FormatInt(w.v.Int(), 10)
|
||||
return nil
|
||||
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
|
||||
w.s = strconv.FormatUint(w.v.Uint(), 10)
|
||||
return nil
|
||||
}
|
||||
return &json.UnsupportedTypeError{Type: w.v.Type()}
|
||||
}
|
||||
|
||||
func (sv stringValues) Len() int { return len(sv) }
|
||||
func (sv stringValues) Swap(i, j int) { sv[i], sv[j] = sv[j], sv[i] }
|
||||
func (sv stringValues) Less(i, j int) bool { return sv[i].s < sv[j].s }
|
||||
|
||||
func (encoder *sortKeysMapEncoder) EncodeInterface(val interface{}, stream *Stream) {
|
||||
WriteToStream(val, stream, encoder)
|
||||
}
|
||||
|
||||
func (encoder *sortKeysMapEncoder) IsEmpty(ptr unsafe.Pointer) bool {
|
||||
mapInterface := encoder.mapInterface
|
||||
mapInterface.word = ptr
|
||||
realInterface := (*interface{})(unsafe.Pointer(&mapInterface))
|
||||
realVal := reflect.ValueOf(*realInterface)
|
||||
return realVal.Len() == 0
|
||||
}
|
672
vendor/github.com/json-iterator/go/feature_reflect_native.go
generated
vendored
Normal file
672
vendor/github.com/json-iterator/go/feature_reflect_native.go
generated
vendored
Normal file
|
@ -0,0 +1,672 @@
|
|||
package jsoniter
|
||||
|
||||
import (
|
||||
"encoding"
|
||||
"encoding/base64"
|
||||
"encoding/json"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
type stringCodec struct {
|
||||
}
|
||||
|
||||
func (codec *stringCodec) Decode(ptr unsafe.Pointer, iter *Iterator) {
|
||||
*((*string)(ptr)) = iter.ReadString()
|
||||
}
|
||||
|
||||
func (codec *stringCodec) Encode(ptr unsafe.Pointer, stream *Stream) {
|
||||
str := *((*string)(ptr))
|
||||
stream.WriteString(str)
|
||||
}
|
||||
|
||||
func (codec *stringCodec) EncodeInterface(val interface{}, stream *Stream) {
|
||||
WriteToStream(val, stream, codec)
|
||||
}
|
||||
|
||||
func (codec *stringCodec) IsEmpty(ptr unsafe.Pointer) bool {
|
||||
return *((*string)(ptr)) == ""
|
||||
}
|
||||
|
||||
type intCodec struct {
|
||||
}
|
||||
|
||||
func (codec *intCodec) Decode(ptr unsafe.Pointer, iter *Iterator) {
|
||||
*((*int)(ptr)) = iter.ReadInt()
|
||||
}
|
||||
|
||||
func (codec *intCodec) Encode(ptr unsafe.Pointer, stream *Stream) {
|
||||
stream.WriteInt(*((*int)(ptr)))
|
||||
}
|
||||
|
||||
func (codec *intCodec) EncodeInterface(val interface{}, stream *Stream) {
|
||||
WriteToStream(val, stream, codec)
|
||||
}
|
||||
|
||||
func (codec *intCodec) IsEmpty(ptr unsafe.Pointer) bool {
|
||||
return *((*int)(ptr)) == 0
|
||||
}
|
||||
|
||||
type uintptrCodec struct {
|
||||
}
|
||||
|
||||
func (codec *uintptrCodec) Decode(ptr unsafe.Pointer, iter *Iterator) {
|
||||
*((*uintptr)(ptr)) = uintptr(iter.ReadUint64())
|
||||
}
|
||||
|
||||
func (codec *uintptrCodec) Encode(ptr unsafe.Pointer, stream *Stream) {
|
||||
stream.WriteUint64(uint64(*((*uintptr)(ptr))))
|
||||
}
|
||||
|
||||
func (codec *uintptrCodec) EncodeInterface(val interface{}, stream *Stream) {
|
||||
WriteToStream(val, stream, codec)
|
||||
}
|
||||
|
||||
func (codec *uintptrCodec) IsEmpty(ptr unsafe.Pointer) bool {
|
||||
return *((*uintptr)(ptr)) == 0
|
||||
}
|
||||
|
||||
type int8Codec struct {
|
||||
}
|
||||
|
||||
func (codec *int8Codec) Decode(ptr unsafe.Pointer, iter *Iterator) {
|
||||
*((*int8)(ptr)) = iter.ReadInt8()
|
||||
}
|
||||
|
||||
func (codec *int8Codec) Encode(ptr unsafe.Pointer, stream *Stream) {
|
||||
stream.WriteInt8(*((*int8)(ptr)))
|
||||
}
|
||||
|
||||
func (codec *int8Codec) EncodeInterface(val interface{}, stream *Stream) {
|
||||
WriteToStream(val, stream, codec)
|
||||
}
|
||||
|
||||
func (codec *int8Codec) IsEmpty(ptr unsafe.Pointer) bool {
|
||||
return *((*int8)(ptr)) == 0
|
||||
}
|
||||
|
||||
type int16Codec struct {
|
||||
}
|
||||
|
||||
func (codec *int16Codec) Decode(ptr unsafe.Pointer, iter *Iterator) {
|
||||
*((*int16)(ptr)) = iter.ReadInt16()
|
||||
}
|
||||
|
||||
func (codec *int16Codec) Encode(ptr unsafe.Pointer, stream *Stream) {
|
||||
stream.WriteInt16(*((*int16)(ptr)))
|
||||
}
|
||||
|
||||
func (codec *int16Codec) EncodeInterface(val interface{}, stream *Stream) {
|
||||
WriteToStream(val, stream, codec)
|
||||
}
|
||||
|
||||
func (codec *int16Codec) IsEmpty(ptr unsafe.Pointer) bool {
|
||||
return *((*int16)(ptr)) == 0
|
||||
}
|
||||
|
||||
type int32Codec struct {
|
||||
}
|
||||
|
||||
func (codec *int32Codec) Decode(ptr unsafe.Pointer, iter *Iterator) {
|
||||
*((*int32)(ptr)) = iter.ReadInt32()
|
||||
}
|
||||
|
||||
func (codec *int32Codec) Encode(ptr unsafe.Pointer, stream *Stream) {
|
||||
stream.WriteInt32(*((*int32)(ptr)))
|
||||
}
|
||||
|
||||
func (codec *int32Codec) EncodeInterface(val interface{}, stream *Stream) {
|
||||
WriteToStream(val, stream, codec)
|
||||
}
|
||||
|
||||
func (codec *int32Codec) IsEmpty(ptr unsafe.Pointer) bool {
|
||||
return *((*int32)(ptr)) == 0
|
||||
}
|
||||
|
||||
type int64Codec struct {
|
||||
}
|
||||
|
||||
func (codec *int64Codec) Decode(ptr unsafe.Pointer, iter *Iterator) {
|
||||
*((*int64)(ptr)) = iter.ReadInt64()
|
||||
}
|
||||
|
||||
func (codec *int64Codec) Encode(ptr unsafe.Pointer, stream *Stream) {
|
||||
stream.WriteInt64(*((*int64)(ptr)))
|
||||
}
|
||||
|
||||
func (codec *int64Codec) EncodeInterface(val interface{}, stream *Stream) {
|
||||
WriteToStream(val, stream, codec)
|
||||
}
|
||||
|
||||
func (codec *int64Codec) IsEmpty(ptr unsafe.Pointer) bool {
|
||||
return *((*int64)(ptr)) == 0
|
||||
}
|
||||
|
||||
type uintCodec struct {
|
||||
}
|
||||
|
||||
func (codec *uintCodec) Decode(ptr unsafe.Pointer, iter *Iterator) {
|
||||
*((*uint)(ptr)) = iter.ReadUint()
|
||||
}
|
||||
|
||||
func (codec *uintCodec) Encode(ptr unsafe.Pointer, stream *Stream) {
|
||||
stream.WriteUint(*((*uint)(ptr)))
|
||||
}
|
||||
|
||||
func (codec *uintCodec) EncodeInterface(val interface{}, stream *Stream) {
|
||||
WriteToStream(val, stream, codec)
|
||||
}
|
||||
|
||||
func (codec *uintCodec) IsEmpty(ptr unsafe.Pointer) bool {
|
||||
return *((*uint)(ptr)) == 0
|
||||
}
|
||||
|
||||
type uint8Codec struct {
|
||||
}
|
||||
|
||||
func (codec *uint8Codec) Decode(ptr unsafe.Pointer, iter *Iterator) {
|
||||
*((*uint8)(ptr)) = iter.ReadUint8()
|
||||
}
|
||||
|
||||
func (codec *uint8Codec) Encode(ptr unsafe.Pointer, stream *Stream) {
|
||||
stream.WriteUint8(*((*uint8)(ptr)))
|
||||
}
|
||||
|
||||
func (codec *uint8Codec) EncodeInterface(val interface{}, stream *Stream) {
|
||||
WriteToStream(val, stream, codec)
|
||||
}
|
||||
|
||||
func (codec *uint8Codec) IsEmpty(ptr unsafe.Pointer) bool {
|
||||
return *((*uint8)(ptr)) == 0
|
||||
}
|
||||
|
||||
type uint16Codec struct {
|
||||
}
|
||||
|
||||
func (codec *uint16Codec) Decode(ptr unsafe.Pointer, iter *Iterator) {
|
||||
*((*uint16)(ptr)) = iter.ReadUint16()
|
||||
}
|
||||
|
||||
func (codec *uint16Codec) Encode(ptr unsafe.Pointer, stream *Stream) {
|
||||
stream.WriteUint16(*((*uint16)(ptr)))
|
||||
}
|
||||
|
||||
func (codec *uint16Codec) EncodeInterface(val interface{}, stream *Stream) {
|
||||
WriteToStream(val, stream, codec)
|
||||
}
|
||||
|
||||
func (codec *uint16Codec) IsEmpty(ptr unsafe.Pointer) bool {
|
||||
return *((*uint16)(ptr)) == 0
|
||||
}
|
||||
|
||||
type uint32Codec struct {
|
||||
}
|
||||
|
||||
func (codec *uint32Codec) Decode(ptr unsafe.Pointer, iter *Iterator) {
|
||||
*((*uint32)(ptr)) = iter.ReadUint32()
|
||||
}
|
||||
|
||||
func (codec *uint32Codec) Encode(ptr unsafe.Pointer, stream *Stream) {
|
||||
stream.WriteUint32(*((*uint32)(ptr)))
|
||||
}
|
||||
|
||||
func (codec *uint32Codec) EncodeInterface(val interface{}, stream *Stream) {
|
||||
WriteToStream(val, stream, codec)
|
||||
}
|
||||
|
||||
func (codec *uint32Codec) IsEmpty(ptr unsafe.Pointer) bool {
|
||||
return *((*uint32)(ptr)) == 0
|
||||
}
|
||||
|
||||
type uint64Codec struct {
|
||||
}
|
||||
|
||||
func (codec *uint64Codec) Decode(ptr unsafe.Pointer, iter *Iterator) {
|
||||
*((*uint64)(ptr)) = iter.ReadUint64()
|
||||
}
|
||||
|
||||
func (codec *uint64Codec) Encode(ptr unsafe.Pointer, stream *Stream) {
|
||||
stream.WriteUint64(*((*uint64)(ptr)))
|
||||
}
|
||||
|
||||
func (codec *uint64Codec) EncodeInterface(val interface{}, stream *Stream) {
|
||||
WriteToStream(val, stream, codec)
|
||||
}
|
||||
|
||||
func (codec *uint64Codec) IsEmpty(ptr unsafe.Pointer) bool {
|
||||
return *((*uint64)(ptr)) == 0
|
||||
}
|
||||
|
||||
type float32Codec struct {
|
||||
}
|
||||
|
||||
func (codec *float32Codec) Decode(ptr unsafe.Pointer, iter *Iterator) {
|
||||
*((*float32)(ptr)) = iter.ReadFloat32()
|
||||
}
|
||||
|
||||
func (codec *float32Codec) Encode(ptr unsafe.Pointer, stream *Stream) {
|
||||
stream.WriteFloat32(*((*float32)(ptr)))
|
||||
}
|
||||
|
||||
func (codec *float32Codec) EncodeInterface(val interface{}, stream *Stream) {
|
||||
WriteToStream(val, stream, codec)
|
||||
}
|
||||
|
||||
func (codec *float32Codec) IsEmpty(ptr unsafe.Pointer) bool {
|
||||
return *((*float32)(ptr)) == 0
|
||||
}
|
||||
|
||||
type float64Codec struct {
|
||||
}
|
||||
|
||||
func (codec *float64Codec) Decode(ptr unsafe.Pointer, iter *Iterator) {
|
||||
*((*float64)(ptr)) = iter.ReadFloat64()
|
||||
}
|
||||
|
||||
func (codec *float64Codec) Encode(ptr unsafe.Pointer, stream *Stream) {
|
||||
stream.WriteFloat64(*((*float64)(ptr)))
|
||||
}
|
||||
|
||||
func (codec *float64Codec) EncodeInterface(val interface{}, stream *Stream) {
|
||||
WriteToStream(val, stream, codec)
|
||||
}
|
||||
|
||||
func (codec *float64Codec) IsEmpty(ptr unsafe.Pointer) bool {
|
||||
return *((*float64)(ptr)) == 0
|
||||
}
|
||||
|
||||
type boolCodec struct {
|
||||
}
|
||||
|
||||
func (codec *boolCodec) Decode(ptr unsafe.Pointer, iter *Iterator) {
|
||||
*((*bool)(ptr)) = iter.ReadBool()
|
||||
}
|
||||
|
||||
func (codec *boolCodec) Encode(ptr unsafe.Pointer, stream *Stream) {
|
||||
stream.WriteBool(*((*bool)(ptr)))
|
||||
}
|
||||
|
||||
func (codec *boolCodec) EncodeInterface(val interface{}, stream *Stream) {
|
||||
WriteToStream(val, stream, codec)
|
||||
}
|
||||
|
||||
func (codec *boolCodec) IsEmpty(ptr unsafe.Pointer) bool {
|
||||
return !(*((*bool)(ptr)))
|
||||
}
|
||||
|
||||
type emptyInterfaceCodec struct {
|
||||
}
|
||||
|
||||
func (codec *emptyInterfaceCodec) Decode(ptr unsafe.Pointer, iter *Iterator) {
|
||||
*((*interface{})(ptr)) = iter.Read()
|
||||
}
|
||||
|
||||
func (codec *emptyInterfaceCodec) Encode(ptr unsafe.Pointer, stream *Stream) {
|
||||
stream.WriteVal(*((*interface{})(ptr)))
|
||||
}
|
||||
|
||||
func (codec *emptyInterfaceCodec) EncodeInterface(val interface{}, stream *Stream) {
|
||||
stream.WriteVal(val)
|
||||
}
|
||||
|
||||
func (codec *emptyInterfaceCodec) IsEmpty(ptr unsafe.Pointer) bool {
|
||||
return ptr == nil
|
||||
}
|
||||
|
||||
type nonEmptyInterfaceCodec struct {
|
||||
}
|
||||
|
||||
func (codec *nonEmptyInterfaceCodec) Decode(ptr unsafe.Pointer, iter *Iterator) {
|
||||
nonEmptyInterface := (*nonEmptyInterface)(ptr)
|
||||
if nonEmptyInterface.itab == nil {
|
||||
iter.ReportError("read non-empty interface", "do not know which concrete type to decode to")
|
||||
return
|
||||
}
|
||||
var i interface{}
|
||||
e := (*emptyInterface)(unsafe.Pointer(&i))
|
||||
e.typ = nonEmptyInterface.itab.typ
|
||||
e.word = nonEmptyInterface.word
|
||||
iter.ReadVal(&i)
|
||||
nonEmptyInterface.word = e.word
|
||||
}
|
||||
|
||||
func (codec *nonEmptyInterfaceCodec) Encode(ptr unsafe.Pointer, stream *Stream) {
|
||||
nonEmptyInterface := (*nonEmptyInterface)(ptr)
|
||||
var i interface{}
|
||||
e := (*emptyInterface)(unsafe.Pointer(&i))
|
||||
e.typ = nonEmptyInterface.itab.typ
|
||||
e.word = nonEmptyInterface.word
|
||||
stream.WriteVal(i)
|
||||
}
|
||||
|
||||
func (codec *nonEmptyInterfaceCodec) EncodeInterface(val interface{}, stream *Stream) {
|
||||
stream.WriteVal(val)
|
||||
}
|
||||
|
||||
func (codec *nonEmptyInterfaceCodec) IsEmpty(ptr unsafe.Pointer) bool {
|
||||
nonEmptyInterface := (*nonEmptyInterface)(ptr)
|
||||
return nonEmptyInterface.word == nil
|
||||
}
|
||||
|
||||
type anyCodec struct {
|
||||
}
|
||||
|
||||
func (codec *anyCodec) Decode(ptr unsafe.Pointer, iter *Iterator) {
|
||||
*((*Any)(ptr)) = iter.ReadAny()
|
||||
}
|
||||
|
||||
func (codec *anyCodec) Encode(ptr unsafe.Pointer, stream *Stream) {
|
||||
(*((*Any)(ptr))).WriteTo(stream)
|
||||
}
|
||||
|
||||
func (codec *anyCodec) EncodeInterface(val interface{}, stream *Stream) {
|
||||
(val.(Any)).WriteTo(stream)
|
||||
}
|
||||
|
||||
func (codec *anyCodec) IsEmpty(ptr unsafe.Pointer) bool {
|
||||
return (*((*Any)(ptr))).Size() == 0
|
||||
}
|
||||
|
||||
type jsonNumberCodec struct {
|
||||
}
|
||||
|
||||
func (codec *jsonNumberCodec) Decode(ptr unsafe.Pointer, iter *Iterator) {
|
||||
*((*json.Number)(ptr)) = json.Number([]byte(iter.readNumberAsString()))
|
||||
}
|
||||
|
||||
func (codec *jsonNumberCodec) Encode(ptr unsafe.Pointer, stream *Stream) {
|
||||
stream.WriteRaw(string(*((*json.Number)(ptr))))
|
||||
}
|
||||
|
||||
func (codec *jsonNumberCodec) EncodeInterface(val interface{}, stream *Stream) {
|
||||
stream.WriteRaw(string(val.(json.Number)))
|
||||
}
|
||||
|
||||
func (codec *jsonNumberCodec) IsEmpty(ptr unsafe.Pointer) bool {
|
||||
return len(*((*json.Number)(ptr))) == 0
|
||||
}
|
||||
|
||||
type jsoniterNumberCodec struct {
|
||||
}
|
||||
|
||||
func (codec *jsoniterNumberCodec) Decode(ptr unsafe.Pointer, iter *Iterator) {
|
||||
*((*Number)(ptr)) = Number([]byte(iter.readNumberAsString()))
|
||||
}
|
||||
|
||||
func (codec *jsoniterNumberCodec) Encode(ptr unsafe.Pointer, stream *Stream) {
|
||||
stream.WriteRaw(string(*((*Number)(ptr))))
|
||||
}
|
||||
|
||||
func (codec *jsoniterNumberCodec) EncodeInterface(val interface{}, stream *Stream) {
|
||||
stream.WriteRaw(string(val.(Number)))
|
||||
}
|
||||
|
||||
func (codec *jsoniterNumberCodec) IsEmpty(ptr unsafe.Pointer) bool {
|
||||
return len(*((*Number)(ptr))) == 0
|
||||
}
|
||||
|
||||
type jsonRawMessageCodec struct {
|
||||
}
|
||||
|
||||
func (codec *jsonRawMessageCodec) Decode(ptr unsafe.Pointer, iter *Iterator) {
|
||||
*((*json.RawMessage)(ptr)) = json.RawMessage(iter.SkipAndReturnBytes())
|
||||
}
|
||||
|
||||
func (codec *jsonRawMessageCodec) Encode(ptr unsafe.Pointer, stream *Stream) {
|
||||
stream.WriteRaw(string(*((*json.RawMessage)(ptr))))
|
||||
}
|
||||
|
||||
func (codec *jsonRawMessageCodec) EncodeInterface(val interface{}, stream *Stream) {
|
||||
stream.WriteRaw(string(val.(json.RawMessage)))
|
||||
}
|
||||
|
||||
func (codec *jsonRawMessageCodec) IsEmpty(ptr unsafe.Pointer) bool {
|
||||
return len(*((*json.RawMessage)(ptr))) == 0
|
||||
}
|
||||
|
||||
type jsoniterRawMessageCodec struct {
|
||||
}
|
||||
|
||||
func (codec *jsoniterRawMessageCodec) Decode(ptr unsafe.Pointer, iter *Iterator) {
|
||||
*((*RawMessage)(ptr)) = RawMessage(iter.SkipAndReturnBytes())
|
||||
}
|
||||
|
||||
func (codec *jsoniterRawMessageCodec) Encode(ptr unsafe.Pointer, stream *Stream) {
|
||||
stream.WriteRaw(string(*((*RawMessage)(ptr))))
|
||||
}
|
||||
|
||||
func (codec *jsoniterRawMessageCodec) EncodeInterface(val interface{}, stream *Stream) {
|
||||
stream.WriteRaw(string(val.(RawMessage)))
|
||||
}
|
||||
|
||||
func (codec *jsoniterRawMessageCodec) IsEmpty(ptr unsafe.Pointer) bool {
|
||||
return len(*((*RawMessage)(ptr))) == 0
|
||||
}
|
||||
|
||||
type base64Codec struct {
|
||||
sliceDecoder ValDecoder
|
||||
}
|
||||
|
||||
func (codec *base64Codec) Decode(ptr unsafe.Pointer, iter *Iterator) {
|
||||
if iter.ReadNil() {
|
||||
ptrSlice := (*sliceHeader)(ptr)
|
||||
ptrSlice.Len = 0
|
||||
ptrSlice.Cap = 0
|
||||
ptrSlice.Data = nil
|
||||
return
|
||||
}
|
||||
switch iter.WhatIsNext() {
|
||||
case StringValue:
|
||||
encoding := base64.StdEncoding
|
||||
src := iter.SkipAndReturnBytes()
|
||||
src = src[1 : len(src)-1]
|
||||
decodedLen := encoding.DecodedLen(len(src))
|
||||
dst := make([]byte, decodedLen)
|
||||
len, err := encoding.Decode(dst, src)
|
||||
if err != nil {
|
||||
iter.ReportError("decode base64", err.Error())
|
||||
} else {
|
||||
dst = dst[:len]
|
||||
dstSlice := (*sliceHeader)(unsafe.Pointer(&dst))
|
||||
ptrSlice := (*sliceHeader)(ptr)
|
||||
ptrSlice.Data = dstSlice.Data
|
||||
ptrSlice.Cap = dstSlice.Cap
|
||||
ptrSlice.Len = dstSlice.Len
|
||||
}
|
||||
case ArrayValue:
|
||||
codec.sliceDecoder.Decode(ptr, iter)
|
||||
default:
|
||||
iter.ReportError("base64Codec", "invalid input")
|
||||
}
|
||||
}
|
||||
|
||||
func (codec *base64Codec) Encode(ptr unsafe.Pointer, stream *Stream) {
|
||||
src := *((*[]byte)(ptr))
|
||||
if len(src) == 0 {
|
||||
stream.WriteNil()
|
||||
return
|
||||
}
|
||||
encoding := base64.StdEncoding
|
||||
stream.writeByte('"')
|
||||
toGrow := encoding.EncodedLen(len(src))
|
||||
stream.ensure(toGrow)
|
||||
encoding.Encode(stream.buf[stream.n:], src)
|
||||
stream.n += toGrow
|
||||
stream.writeByte('"')
|
||||
}
|
||||
|
||||
func (codec *base64Codec) EncodeInterface(val interface{}, stream *Stream) {
|
||||
ptr := extractInterface(val).word
|
||||
src := *((*[]byte)(ptr))
|
||||
if len(src) == 0 {
|
||||
stream.WriteNil()
|
||||
return
|
||||
}
|
||||
encoding := base64.StdEncoding
|
||||
stream.writeByte('"')
|
||||
toGrow := encoding.EncodedLen(len(src))
|
||||
stream.ensure(toGrow)
|
||||
encoding.Encode(stream.buf[stream.n:], src)
|
||||
stream.n += toGrow
|
||||
stream.writeByte('"')
|
||||
}
|
||||
|
||||
func (codec *base64Codec) IsEmpty(ptr unsafe.Pointer) bool {
|
||||
return len(*((*[]byte)(ptr))) == 0
|
||||
}
|
||||
|
||||
type stringModeNumberDecoder struct {
|
||||
elemDecoder ValDecoder
|
||||
}
|
||||
|
||||
func (decoder *stringModeNumberDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) {
|
||||
c := iter.nextToken()
|
||||
if c != '"' {
|
||||
iter.ReportError("stringModeNumberDecoder", `expect "`)
|
||||
return
|
||||
}
|
||||
decoder.elemDecoder.Decode(ptr, iter)
|
||||
if iter.Error != nil {
|
||||
return
|
||||
}
|
||||
c = iter.readByte()
|
||||
if c != '"' {
|
||||
iter.ReportError("stringModeNumberDecoder", `expect "`)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
type stringModeStringDecoder struct {
|
||||
elemDecoder ValDecoder
|
||||
cfg *frozenConfig
|
||||
}
|
||||
|
||||
func (decoder *stringModeStringDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) {
|
||||
decoder.elemDecoder.Decode(ptr, iter)
|
||||
str := *((*string)(ptr))
|
||||
tempIter := decoder.cfg.BorrowIterator([]byte(str))
|
||||
defer decoder.cfg.ReturnIterator(tempIter)
|
||||
*((*string)(ptr)) = tempIter.ReadString()
|
||||
}
|
||||
|
||||
type stringModeNumberEncoder struct {
|
||||
elemEncoder ValEncoder
|
||||
}
|
||||
|
||||
func (encoder *stringModeNumberEncoder) Encode(ptr unsafe.Pointer, stream *Stream) {
|
||||
stream.writeByte('"')
|
||||
encoder.elemEncoder.Encode(ptr, stream)
|
||||
stream.writeByte('"')
|
||||
}
|
||||
|
||||
func (encoder *stringModeNumberEncoder) EncodeInterface(val interface{}, stream *Stream) {
|
||||
WriteToStream(val, stream, encoder)
|
||||
}
|
||||
|
||||
func (encoder *stringModeNumberEncoder) IsEmpty(ptr unsafe.Pointer) bool {
|
||||
return encoder.elemEncoder.IsEmpty(ptr)
|
||||
}
|
||||
|
||||
type stringModeStringEncoder struct {
|
||||
elemEncoder ValEncoder
|
||||
cfg *frozenConfig
|
||||
}
|
||||
|
||||
func (encoder *stringModeStringEncoder) Encode(ptr unsafe.Pointer, stream *Stream) {
|
||||
tempStream := encoder.cfg.BorrowStream(nil)
|
||||
defer encoder.cfg.ReturnStream(tempStream)
|
||||
encoder.elemEncoder.Encode(ptr, tempStream)
|
||||
stream.WriteString(string(tempStream.Buffer()))
|
||||
}
|
||||
|
||||
func (encoder *stringModeStringEncoder) EncodeInterface(val interface{}, stream *Stream) {
|
||||
WriteToStream(val, stream, encoder)
|
||||
}
|
||||
|
||||
func (encoder *stringModeStringEncoder) IsEmpty(ptr unsafe.Pointer) bool {
|
||||
return encoder.elemEncoder.IsEmpty(ptr)
|
||||
}
|
||||
|
||||
type marshalerEncoder struct {
|
||||
templateInterface emptyInterface
|
||||
checkIsEmpty checkIsEmpty
|
||||
}
|
||||
|
||||
func (encoder *marshalerEncoder) Encode(ptr unsafe.Pointer, stream *Stream) {
|
||||
templateInterface := encoder.templateInterface
|
||||
templateInterface.word = ptr
|
||||
realInterface := (*interface{})(unsafe.Pointer(&templateInterface))
|
||||
marshaler := (*realInterface).(json.Marshaler)
|
||||
bytes, err := marshaler.MarshalJSON()
|
||||
if err != nil {
|
||||
stream.Error = err
|
||||
} else {
|
||||
stream.Write(bytes)
|
||||
}
|
||||
}
|
||||
func (encoder *marshalerEncoder) EncodeInterface(val interface{}, stream *Stream) {
|
||||
WriteToStream(val, stream, encoder)
|
||||
}
|
||||
|
||||
func (encoder *marshalerEncoder) IsEmpty(ptr unsafe.Pointer) bool {
|
||||
return encoder.checkIsEmpty.IsEmpty(ptr)
|
||||
}
|
||||
|
||||
type textMarshalerEncoder struct {
|
||||
templateInterface emptyInterface
|
||||
checkIsEmpty checkIsEmpty
|
||||
}
|
||||
|
||||
func (encoder *textMarshalerEncoder) Encode(ptr unsafe.Pointer, stream *Stream) {
|
||||
templateInterface := encoder.templateInterface
|
||||
templateInterface.word = ptr
|
||||
realInterface := (*interface{})(unsafe.Pointer(&templateInterface))
|
||||
marshaler := (*realInterface).(encoding.TextMarshaler)
|
||||
bytes, err := marshaler.MarshalText()
|
||||
if err != nil {
|
||||
stream.Error = err
|
||||
} else {
|
||||
stream.WriteString(string(bytes))
|
||||
}
|
||||
}
|
||||
|
||||
func (encoder *textMarshalerEncoder) EncodeInterface(val interface{}, stream *Stream) {
|
||||
WriteToStream(val, stream, encoder)
|
||||
}
|
||||
|
||||
func (encoder *textMarshalerEncoder) IsEmpty(ptr unsafe.Pointer) bool {
|
||||
return encoder.checkIsEmpty.IsEmpty(ptr)
|
||||
}
|
||||
|
||||
type unmarshalerDecoder struct {
|
||||
templateInterface emptyInterface
|
||||
}
|
||||
|
||||
func (decoder *unmarshalerDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) {
|
||||
templateInterface := decoder.templateInterface
|
||||
templateInterface.word = ptr
|
||||
realInterface := (*interface{})(unsafe.Pointer(&templateInterface))
|
||||
unmarshaler := (*realInterface).(json.Unmarshaler)
|
||||
iter.nextToken()
|
||||
iter.unreadByte() // skip spaces
|
||||
bytes := iter.SkipAndReturnBytes()
|
||||
err := unmarshaler.UnmarshalJSON(bytes)
|
||||
if err != nil {
|
||||
iter.ReportError("unmarshalerDecoder", err.Error())
|
||||
}
|
||||
}
|
||||
|
||||
type textUnmarshalerDecoder struct {
|
||||
templateInterface emptyInterface
|
||||
}
|
||||
|
||||
func (decoder *textUnmarshalerDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) {
|
||||
templateInterface := decoder.templateInterface
|
||||
templateInterface.word = ptr
|
||||
realInterface := (*interface{})(unsafe.Pointer(&templateInterface))
|
||||
unmarshaler := (*realInterface).(encoding.TextUnmarshaler)
|
||||
str := iter.ReadString()
|
||||
err := unmarshaler.UnmarshalText([]byte(str))
|
||||
if err != nil {
|
||||
iter.ReportError("textUnmarshalerDecoder", err.Error())
|
||||
}
|
||||
}
|
196
vendor/github.com/json-iterator/go/feature_reflect_object.go
generated
vendored
Normal file
196
vendor/github.com/json-iterator/go/feature_reflect_object.go
generated
vendored
Normal file
|
@ -0,0 +1,196 @@
|
|||
package jsoniter
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"reflect"
|
||||
"strings"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
func encoderOfStruct(cfg *frozenConfig, typ reflect.Type) (ValEncoder, error) {
|
||||
type bindingTo struct {
|
||||
binding *Binding
|
||||
toName string
|
||||
ignored bool
|
||||
}
|
||||
orderedBindings := []*bindingTo{}
|
||||
structDescriptor, err := describeStruct(cfg, typ)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
for _, binding := range structDescriptor.Fields {
|
||||
for _, toName := range binding.ToNames {
|
||||
new := &bindingTo{
|
||||
binding: binding,
|
||||
toName: toName,
|
||||
}
|
||||
for _, old := range orderedBindings {
|
||||
if old.toName != toName {
|
||||
continue
|
||||
}
|
||||
old.ignored, new.ignored = resolveConflictBinding(cfg, old.binding, new.binding)
|
||||
}
|
||||
orderedBindings = append(orderedBindings, new)
|
||||
}
|
||||
}
|
||||
if len(orderedBindings) == 0 {
|
||||
return &emptyStructEncoder{}, nil
|
||||
}
|
||||
finalOrderedFields := []structFieldTo{}
|
||||
for _, bindingTo := range orderedBindings {
|
||||
if !bindingTo.ignored {
|
||||
finalOrderedFields = append(finalOrderedFields, structFieldTo{
|
||||
encoder: bindingTo.binding.Encoder.(*structFieldEncoder),
|
||||
toName: bindingTo.toName,
|
||||
})
|
||||
}
|
||||
}
|
||||
return &structEncoder{structDescriptor.onePtrEmbedded, structDescriptor.onePtrOptimization, finalOrderedFields}, nil
|
||||
}
|
||||
|
||||
func resolveConflictBinding(cfg *frozenConfig, old, new *Binding) (ignoreOld, ignoreNew bool) {
|
||||
newTagged := new.Field.Tag.Get(cfg.getTagKey()) != ""
|
||||
oldTagged := old.Field.Tag.Get(cfg.getTagKey()) != ""
|
||||
if newTagged {
|
||||
if oldTagged {
|
||||
if len(old.levels) > len(new.levels) {
|
||||
return true, false
|
||||
} else if len(new.levels) > len(old.levels) {
|
||||
return false, true
|
||||
} else {
|
||||
return true, true
|
||||
}
|
||||
} else {
|
||||
return true, false
|
||||
}
|
||||
} else {
|
||||
if oldTagged {
|
||||
return true, false
|
||||
}
|
||||
if len(old.levels) > len(new.levels) {
|
||||
return true, false
|
||||
} else if len(new.levels) > len(old.levels) {
|
||||
return false, true
|
||||
} else {
|
||||
return true, true
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func decoderOfStruct(cfg *frozenConfig, typ reflect.Type) (ValDecoder, error) {
|
||||
bindings := map[string]*Binding{}
|
||||
structDescriptor, err := describeStruct(cfg, typ)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
for _, binding := range structDescriptor.Fields {
|
||||
for _, fromName := range binding.FromNames {
|
||||
old := bindings[fromName]
|
||||
if old == nil {
|
||||
bindings[fromName] = binding
|
||||
continue
|
||||
}
|
||||
ignoreOld, ignoreNew := resolveConflictBinding(cfg, old, binding)
|
||||
if ignoreOld {
|
||||
delete(bindings, fromName)
|
||||
}
|
||||
if !ignoreNew {
|
||||
bindings[fromName] = binding
|
||||
}
|
||||
}
|
||||
}
|
||||
fields := map[string]*structFieldDecoder{}
|
||||
for k, binding := range bindings {
|
||||
fields[strings.ToLower(k)] = binding.Decoder.(*structFieldDecoder)
|
||||
}
|
||||
return createStructDecoder(typ, fields)
|
||||
}
|
||||
|
||||
type structFieldEncoder struct {
|
||||
field *reflect.StructField
|
||||
fieldEncoder ValEncoder
|
||||
omitempty bool
|
||||
}
|
||||
|
||||
func (encoder *structFieldEncoder) Encode(ptr unsafe.Pointer, stream *Stream) {
|
||||
fieldPtr := unsafe.Pointer(uintptr(ptr) + encoder.field.Offset)
|
||||
encoder.fieldEncoder.Encode(fieldPtr, stream)
|
||||
if stream.Error != nil && stream.Error != io.EOF {
|
||||
stream.Error = fmt.Errorf("%s: %s", encoder.field.Name, stream.Error.Error())
|
||||
}
|
||||
}
|
||||
|
||||
func (encoder *structFieldEncoder) EncodeInterface(val interface{}, stream *Stream) {
|
||||
WriteToStream(val, stream, encoder)
|
||||
}
|
||||
|
||||
func (encoder *structFieldEncoder) IsEmpty(ptr unsafe.Pointer) bool {
|
||||
fieldPtr := unsafe.Pointer(uintptr(ptr) + encoder.field.Offset)
|
||||
return encoder.fieldEncoder.IsEmpty(fieldPtr)
|
||||
}
|
||||
|
||||
type structEncoder struct {
|
||||
onePtrEmbedded bool
|
||||
onePtrOptimization bool
|
||||
fields []structFieldTo
|
||||
}
|
||||
|
||||
type structFieldTo struct {
|
||||
encoder *structFieldEncoder
|
||||
toName string
|
||||
}
|
||||
|
||||
func (encoder *structEncoder) Encode(ptr unsafe.Pointer, stream *Stream) {
|
||||
stream.WriteObjectStart()
|
||||
isNotFirst := false
|
||||
for _, field := range encoder.fields {
|
||||
if field.encoder.omitempty && field.encoder.IsEmpty(ptr) {
|
||||
continue
|
||||
}
|
||||
if isNotFirst {
|
||||
stream.WriteMore()
|
||||
}
|
||||
stream.WriteObjectField(field.toName)
|
||||
field.encoder.Encode(ptr, stream)
|
||||
isNotFirst = true
|
||||
}
|
||||
stream.WriteObjectEnd()
|
||||
}
|
||||
|
||||
func (encoder *structEncoder) EncodeInterface(val interface{}, stream *Stream) {
|
||||
e := (*emptyInterface)(unsafe.Pointer(&val))
|
||||
if encoder.onePtrOptimization {
|
||||
if e.word == nil && encoder.onePtrEmbedded {
|
||||
stream.WriteObjectStart()
|
||||
stream.WriteObjectEnd()
|
||||
return
|
||||
}
|
||||
ptr := uintptr(e.word)
|
||||
e.word = unsafe.Pointer(&ptr)
|
||||
}
|
||||
if reflect.TypeOf(val).Kind() == reflect.Ptr {
|
||||
encoder.Encode(unsafe.Pointer(&e.word), stream)
|
||||
} else {
|
||||
encoder.Encode(e.word, stream)
|
||||
}
|
||||
}
|
||||
|
||||
func (encoder *structEncoder) IsEmpty(ptr unsafe.Pointer) bool {
|
||||
return false
|
||||
}
|
||||
|
||||
type emptyStructEncoder struct {
|
||||
}
|
||||
|
||||
func (encoder *emptyStructEncoder) Encode(ptr unsafe.Pointer, stream *Stream) {
|
||||
stream.WriteEmptyObject()
|
||||
}
|
||||
|
||||
func (encoder *emptyStructEncoder) EncodeInterface(val interface{}, stream *Stream) {
|
||||
WriteToStream(val, stream, encoder)
|
||||
}
|
||||
|
||||
func (encoder *emptyStructEncoder) IsEmpty(ptr unsafe.Pointer) bool {
|
||||
return false
|
||||
}
|
149
vendor/github.com/json-iterator/go/feature_reflect_slice.go
generated
vendored
Normal file
149
vendor/github.com/json-iterator/go/feature_reflect_slice.go
generated
vendored
Normal file
|
@ -0,0 +1,149 @@
|
|||
package jsoniter
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"reflect"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
func decoderOfSlice(cfg *frozenConfig, typ reflect.Type) (ValDecoder, error) {
|
||||
decoder, err := decoderOfType(cfg, typ.Elem())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &sliceDecoder{typ, typ.Elem(), decoder}, nil
|
||||
}
|
||||
|
||||
func encoderOfSlice(cfg *frozenConfig, typ reflect.Type) (ValEncoder, error) {
|
||||
encoder, err := encoderOfType(cfg, typ.Elem())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if typ.Elem().Kind() == reflect.Map {
|
||||
encoder = &optionalEncoder{encoder}
|
||||
}
|
||||
return &sliceEncoder{typ, typ.Elem(), encoder}, nil
|
||||
}
|
||||
|
||||
type sliceEncoder struct {
|
||||
sliceType reflect.Type
|
||||
elemType reflect.Type
|
||||
elemEncoder ValEncoder
|
||||
}
|
||||
|
||||
func (encoder *sliceEncoder) Encode(ptr unsafe.Pointer, stream *Stream) {
|
||||
slice := (*sliceHeader)(ptr)
|
||||
if slice.Data == nil {
|
||||
stream.WriteNil()
|
||||
return
|
||||
}
|
||||
if slice.Len == 0 {
|
||||
stream.WriteEmptyArray()
|
||||
return
|
||||
}
|
||||
stream.WriteArrayStart()
|
||||
elemPtr := unsafe.Pointer(slice.Data)
|
||||
encoder.elemEncoder.Encode(unsafe.Pointer(elemPtr), stream)
|
||||
for i := 1; i < slice.Len; i++ {
|
||||
stream.WriteMore()
|
||||
elemPtr = unsafe.Pointer(uintptr(elemPtr) + encoder.elemType.Size())
|
||||
encoder.elemEncoder.Encode(unsafe.Pointer(elemPtr), stream)
|
||||
}
|
||||
stream.WriteArrayEnd()
|
||||
if stream.Error != nil && stream.Error != io.EOF {
|
||||
stream.Error = fmt.Errorf("%v: %s", encoder.sliceType, stream.Error.Error())
|
||||
}
|
||||
}
|
||||
|
||||
func (encoder *sliceEncoder) EncodeInterface(val interface{}, stream *Stream) {
|
||||
WriteToStream(val, stream, encoder)
|
||||
}
|
||||
|
||||
func (encoder *sliceEncoder) IsEmpty(ptr unsafe.Pointer) bool {
|
||||
slice := (*sliceHeader)(ptr)
|
||||
return slice.Len == 0
|
||||
}
|
||||
|
||||
type sliceDecoder struct {
|
||||
sliceType reflect.Type
|
||||
elemType reflect.Type
|
||||
elemDecoder ValDecoder
|
||||
}
|
||||
|
||||
// sliceHeader is a safe version of SliceHeader used within this package.
|
||||
type sliceHeader struct {
|
||||
Data unsafe.Pointer
|
||||
Len int
|
||||
Cap int
|
||||
}
|
||||
|
||||
func (decoder *sliceDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) {
|
||||
decoder.doDecode(ptr, iter)
|
||||
if iter.Error != nil && iter.Error != io.EOF {
|
||||
iter.Error = fmt.Errorf("%v: %s", decoder.sliceType, iter.Error.Error())
|
||||
}
|
||||
}
|
||||
|
||||
func (decoder *sliceDecoder) doDecode(ptr unsafe.Pointer, iter *Iterator) {
|
||||
slice := (*sliceHeader)(ptr)
|
||||
if iter.ReadNil() {
|
||||
slice.Len = 0
|
||||
slice.Cap = 0
|
||||
slice.Data = nil
|
||||
return
|
||||
}
|
||||
reuseSlice(slice, decoder.sliceType, 4)
|
||||
slice.Len = 0
|
||||
offset := uintptr(0)
|
||||
iter.ReadArrayCB(func(iter *Iterator) bool {
|
||||
growOne(slice, decoder.sliceType, decoder.elemType)
|
||||
decoder.elemDecoder.Decode(unsafe.Pointer(uintptr(slice.Data)+offset), iter)
|
||||
offset += decoder.elemType.Size()
|
||||
return true
|
||||
})
|
||||
}
|
||||
|
||||
// grow grows the slice s so that it can hold extra more values, allocating
|
||||
// more capacity if needed. It also returns the old and new slice lengths.
|
||||
func growOne(slice *sliceHeader, sliceType reflect.Type, elementType reflect.Type) {
|
||||
newLen := slice.Len + 1
|
||||
if newLen <= slice.Cap {
|
||||
slice.Len = newLen
|
||||
return
|
||||
}
|
||||
newCap := slice.Cap
|
||||
if newCap == 0 {
|
||||
newCap = 1
|
||||
} else {
|
||||
for newCap < newLen {
|
||||
if slice.Len < 1024 {
|
||||
newCap += newCap
|
||||
} else {
|
||||
newCap += newCap / 4
|
||||
}
|
||||
}
|
||||
}
|
||||
newVal := reflect.MakeSlice(sliceType, newLen, newCap)
|
||||
dst := unsafe.Pointer(newVal.Pointer())
|
||||
// copy old array into new array
|
||||
originalBytesCount := uintptr(slice.Len) * elementType.Size()
|
||||
srcPtr := (*[1 << 30]byte)(slice.Data)
|
||||
dstPtr := (*[1 << 30]byte)(dst)
|
||||
for i := uintptr(0); i < originalBytesCount; i++ {
|
||||
dstPtr[i] = srcPtr[i]
|
||||
}
|
||||
slice.Data = dst
|
||||
slice.Len = newLen
|
||||
slice.Cap = newCap
|
||||
}
|
||||
|
||||
func reuseSlice(slice *sliceHeader, sliceType reflect.Type, expectedCap int) {
|
||||
if expectedCap <= slice.Cap {
|
||||
return
|
||||
}
|
||||
newVal := reflect.MakeSlice(sliceType, 0, expectedCap)
|
||||
dst := unsafe.Pointer(newVal.Pointer())
|
||||
slice.Data = dst
|
||||
slice.Cap = expectedCap
|
||||
}
|
916
vendor/github.com/json-iterator/go/feature_reflect_struct_decoder.go
generated
vendored
Normal file
916
vendor/github.com/json-iterator/go/feature_reflect_struct_decoder.go
generated
vendored
Normal file
|
@ -0,0 +1,916 @@
|
|||
package jsoniter
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"reflect"
|
||||
"strings"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
func createStructDecoder(typ reflect.Type, fields map[string]*structFieldDecoder) (ValDecoder, error) {
|
||||
knownHash := map[int32]struct{}{
|
||||
0: {},
|
||||
}
|
||||
switch len(fields) {
|
||||
case 0:
|
||||
return &skipObjectDecoder{typ}, nil
|
||||
case 1:
|
||||
for fieldName, fieldDecoder := range fields {
|
||||
fieldHash := calcHash(fieldName)
|
||||
_, known := knownHash[fieldHash]
|
||||
if known {
|
||||
return &generalStructDecoder{typ, fields}, nil
|
||||
}
|
||||
knownHash[fieldHash] = struct{}{}
|
||||
return &oneFieldStructDecoder{typ, fieldHash, fieldDecoder}, nil
|
||||
}
|
||||
case 2:
|
||||
var fieldHash1 int32
|
||||
var fieldHash2 int32
|
||||
var fieldDecoder1 *structFieldDecoder
|
||||
var fieldDecoder2 *structFieldDecoder
|
||||
for fieldName, fieldDecoder := range fields {
|
||||
fieldHash := calcHash(fieldName)
|
||||
_, known := knownHash[fieldHash]
|
||||
if known {
|
||||
return &generalStructDecoder{typ, fields}, nil
|
||||
}
|
||||
knownHash[fieldHash] = struct{}{}
|
||||
if fieldHash1 == 0 {
|
||||
fieldHash1 = fieldHash
|
||||
fieldDecoder1 = fieldDecoder
|
||||
} else {
|
||||
fieldHash2 = fieldHash
|
||||
fieldDecoder2 = fieldDecoder
|
||||
}
|
||||
}
|
||||
return &twoFieldsStructDecoder{typ, fieldHash1, fieldDecoder1, fieldHash2, fieldDecoder2}, nil
|
||||
case 3:
|
||||
var fieldName1 int32
|
||||
var fieldName2 int32
|
||||
var fieldName3 int32
|
||||
var fieldDecoder1 *structFieldDecoder
|
||||
var fieldDecoder2 *structFieldDecoder
|
||||
var fieldDecoder3 *structFieldDecoder
|
||||
for fieldName, fieldDecoder := range fields {
|
||||
fieldHash := calcHash(fieldName)
|
||||
_, known := knownHash[fieldHash]
|
||||
if known {
|
||||
return &generalStructDecoder{typ, fields}, nil
|
||||
}
|
||||
knownHash[fieldHash] = struct{}{}
|
||||
if fieldName1 == 0 {
|
||||
fieldName1 = fieldHash
|
||||
fieldDecoder1 = fieldDecoder
|
||||
} else if fieldName2 == 0 {
|
||||
fieldName2 = fieldHash
|
||||
fieldDecoder2 = fieldDecoder
|
||||
} else {
|
||||
fieldName3 = fieldHash
|
||||
fieldDecoder3 = fieldDecoder
|
||||
}
|
||||
}
|
||||
return &threeFieldsStructDecoder{typ,
|
||||
fieldName1, fieldDecoder1, fieldName2, fieldDecoder2, fieldName3, fieldDecoder3}, nil
|
||||
case 4:
|
||||
var fieldName1 int32
|
||||
var fieldName2 int32
|
||||
var fieldName3 int32
|
||||
var fieldName4 int32
|
||||
var fieldDecoder1 *structFieldDecoder
|
||||
var fieldDecoder2 *structFieldDecoder
|
||||
var fieldDecoder3 *structFieldDecoder
|
||||
var fieldDecoder4 *structFieldDecoder
|
||||
for fieldName, fieldDecoder := range fields {
|
||||
fieldHash := calcHash(fieldName)
|
||||
_, known := knownHash[fieldHash]
|
||||
if known {
|
||||
return &generalStructDecoder{typ, fields}, nil
|
||||
}
|
||||
knownHash[fieldHash] = struct{}{}
|
||||
if fieldName1 == 0 {
|
||||
fieldName1 = fieldHash
|
||||
fieldDecoder1 = fieldDecoder
|
||||
} else if fieldName2 == 0 {
|
||||
fieldName2 = fieldHash
|
||||
fieldDecoder2 = fieldDecoder
|
||||
} else if fieldName3 == 0 {
|
||||
fieldName3 = fieldHash
|
||||
fieldDecoder3 = fieldDecoder
|
||||
} else {
|
||||
fieldName4 = fieldHash
|
||||
fieldDecoder4 = fieldDecoder
|
||||
}
|
||||
}
|
||||
return &fourFieldsStructDecoder{typ,
|
||||
fieldName1, fieldDecoder1, fieldName2, fieldDecoder2, fieldName3, fieldDecoder3,
|
||||
fieldName4, fieldDecoder4}, nil
|
||||
case 5:
|
||||
var fieldName1 int32
|
||||
var fieldName2 int32
|
||||
var fieldName3 int32
|
||||
var fieldName4 int32
|
||||
var fieldName5 int32
|
||||
var fieldDecoder1 *structFieldDecoder
|
||||
var fieldDecoder2 *structFieldDecoder
|
||||
var fieldDecoder3 *structFieldDecoder
|
||||
var fieldDecoder4 *structFieldDecoder
|
||||
var fieldDecoder5 *structFieldDecoder
|
||||
for fieldName, fieldDecoder := range fields {
|
||||
fieldHash := calcHash(fieldName)
|
||||
_, known := knownHash[fieldHash]
|
||||
if known {
|
||||
return &generalStructDecoder{typ, fields}, nil
|
||||
}
|
||||
knownHash[fieldHash] = struct{}{}
|
||||
if fieldName1 == 0 {
|
||||
fieldName1 = fieldHash
|
||||
fieldDecoder1 = fieldDecoder
|
||||
} else if fieldName2 == 0 {
|
||||
fieldName2 = fieldHash
|
||||
fieldDecoder2 = fieldDecoder
|
||||
} else if fieldName3 == 0 {
|
||||
fieldName3 = fieldHash
|
||||
fieldDecoder3 = fieldDecoder
|
||||
} else if fieldName4 == 0 {
|
||||
fieldName4 = fieldHash
|
||||
fieldDecoder4 = fieldDecoder
|
||||
} else {
|
||||
fieldName5 = fieldHash
|
||||
fieldDecoder5 = fieldDecoder
|
||||
}
|
||||
}
|
||||
return &fiveFieldsStructDecoder{typ,
|
||||
fieldName1, fieldDecoder1, fieldName2, fieldDecoder2, fieldName3, fieldDecoder3,
|
||||
fieldName4, fieldDecoder4, fieldName5, fieldDecoder5}, nil
|
||||
case 6:
|
||||
var fieldName1 int32
|
||||
var fieldName2 int32
|
||||
var fieldName3 int32
|
||||
var fieldName4 int32
|
||||
var fieldName5 int32
|
||||
var fieldName6 int32
|
||||
var fieldDecoder1 *structFieldDecoder
|
||||
var fieldDecoder2 *structFieldDecoder
|
||||
var fieldDecoder3 *structFieldDecoder
|
||||
var fieldDecoder4 *structFieldDecoder
|
||||
var fieldDecoder5 *structFieldDecoder
|
||||
var fieldDecoder6 *structFieldDecoder
|
||||
for fieldName, fieldDecoder := range fields {
|
||||
fieldHash := calcHash(fieldName)
|
||||
_, known := knownHash[fieldHash]
|
||||
if known {
|
||||
return &generalStructDecoder{typ, fields}, nil
|
||||
}
|
||||
knownHash[fieldHash] = struct{}{}
|
||||
if fieldName1 == 0 {
|
||||
fieldName1 = fieldHash
|
||||
fieldDecoder1 = fieldDecoder
|
||||
} else if fieldName2 == 0 {
|
||||
fieldName2 = fieldHash
|
||||
fieldDecoder2 = fieldDecoder
|
||||
} else if fieldName3 == 0 {
|
||||
fieldName3 = fieldHash
|
||||
fieldDecoder3 = fieldDecoder
|
||||
} else if fieldName4 == 0 {
|
||||
fieldName4 = fieldHash
|
||||
fieldDecoder4 = fieldDecoder
|
||||
} else if fieldName5 == 0 {
|
||||
fieldName5 = fieldHash
|
||||
fieldDecoder5 = fieldDecoder
|
||||
} else {
|
||||
fieldName6 = fieldHash
|
||||
fieldDecoder6 = fieldDecoder
|
||||
}
|
||||
}
|
||||
return &sixFieldsStructDecoder{typ,
|
||||
fieldName1, fieldDecoder1, fieldName2, fieldDecoder2, fieldName3, fieldDecoder3,
|
||||
fieldName4, fieldDecoder4, fieldName5, fieldDecoder5, fieldName6, fieldDecoder6}, nil
|
||||
case 7:
|
||||
var fieldName1 int32
|
||||
var fieldName2 int32
|
||||
var fieldName3 int32
|
||||
var fieldName4 int32
|
||||
var fieldName5 int32
|
||||
var fieldName6 int32
|
||||
var fieldName7 int32
|
||||
var fieldDecoder1 *structFieldDecoder
|
||||
var fieldDecoder2 *structFieldDecoder
|
||||
var fieldDecoder3 *structFieldDecoder
|
||||
var fieldDecoder4 *structFieldDecoder
|
||||
var fieldDecoder5 *structFieldDecoder
|
||||
var fieldDecoder6 *structFieldDecoder
|
||||
var fieldDecoder7 *structFieldDecoder
|
||||
for fieldName, fieldDecoder := range fields {
|
||||
fieldHash := calcHash(fieldName)
|
||||
_, known := knownHash[fieldHash]
|
||||
if known {
|
||||
return &generalStructDecoder{typ, fields}, nil
|
||||
}
|
||||
knownHash[fieldHash] = struct{}{}
|
||||
if fieldName1 == 0 {
|
||||
fieldName1 = fieldHash
|
||||
fieldDecoder1 = fieldDecoder
|
||||
} else if fieldName2 == 0 {
|
||||
fieldName2 = fieldHash
|
||||
fieldDecoder2 = fieldDecoder
|
||||
} else if fieldName3 == 0 {
|
||||
fieldName3 = fieldHash
|
||||
fieldDecoder3 = fieldDecoder
|
||||
} else if fieldName4 == 0 {
|
||||
fieldName4 = fieldHash
|
||||
fieldDecoder4 = fieldDecoder
|
||||
} else if fieldName5 == 0 {
|
||||
fieldName5 = fieldHash
|
||||
fieldDecoder5 = fieldDecoder
|
||||
} else if fieldName6 == 0 {
|
||||
fieldName6 = fieldHash
|
||||
fieldDecoder6 = fieldDecoder
|
||||
} else {
|
||||
fieldName7 = fieldHash
|
||||
fieldDecoder7 = fieldDecoder
|
||||
}
|
||||
}
|
||||
return &sevenFieldsStructDecoder{typ,
|
||||
fieldName1, fieldDecoder1, fieldName2, fieldDecoder2, fieldName3, fieldDecoder3,
|
||||
fieldName4, fieldDecoder4, fieldName5, fieldDecoder5, fieldName6, fieldDecoder6,
|
||||
fieldName7, fieldDecoder7}, nil
|
||||
case 8:
|
||||
var fieldName1 int32
|
||||
var fieldName2 int32
|
||||
var fieldName3 int32
|
||||
var fieldName4 int32
|
||||
var fieldName5 int32
|
||||
var fieldName6 int32
|
||||
var fieldName7 int32
|
||||
var fieldName8 int32
|
||||
var fieldDecoder1 *structFieldDecoder
|
||||
var fieldDecoder2 *structFieldDecoder
|
||||
var fieldDecoder3 *structFieldDecoder
|
||||
var fieldDecoder4 *structFieldDecoder
|
||||
var fieldDecoder5 *structFieldDecoder
|
||||
var fieldDecoder6 *structFieldDecoder
|
||||
var fieldDecoder7 *structFieldDecoder
|
||||
var fieldDecoder8 *structFieldDecoder
|
||||
for fieldName, fieldDecoder := range fields {
|
||||
fieldHash := calcHash(fieldName)
|
||||
_, known := knownHash[fieldHash]
|
||||
if known {
|
||||
return &generalStructDecoder{typ, fields}, nil
|
||||
}
|
||||
knownHash[fieldHash] = struct{}{}
|
||||
if fieldName1 == 0 {
|
||||
fieldName1 = fieldHash
|
||||
fieldDecoder1 = fieldDecoder
|
||||
} else if fieldName2 == 0 {
|
||||
fieldName2 = fieldHash
|
||||
fieldDecoder2 = fieldDecoder
|
||||
} else if fieldName3 == 0 {
|
||||
fieldName3 = fieldHash
|
||||
fieldDecoder3 = fieldDecoder
|
||||
} else if fieldName4 == 0 {
|
||||
fieldName4 = fieldHash
|
||||
fieldDecoder4 = fieldDecoder
|
||||
} else if fieldName5 == 0 {
|
||||
fieldName5 = fieldHash
|
||||
fieldDecoder5 = fieldDecoder
|
||||
} else if fieldName6 == 0 {
|
||||
fieldName6 = fieldHash
|
||||
fieldDecoder6 = fieldDecoder
|
||||
} else if fieldName7 == 0 {
|
||||
fieldName7 = fieldHash
|
||||
fieldDecoder7 = fieldDecoder
|
||||
} else {
|
||||
fieldName8 = fieldHash
|
||||
fieldDecoder8 = fieldDecoder
|
||||
}
|
||||
}
|
||||
return &eightFieldsStructDecoder{typ,
|
||||
fieldName1, fieldDecoder1, fieldName2, fieldDecoder2, fieldName3, fieldDecoder3,
|
||||
fieldName4, fieldDecoder4, fieldName5, fieldDecoder5, fieldName6, fieldDecoder6,
|
||||
fieldName7, fieldDecoder7, fieldName8, fieldDecoder8}, nil
|
||||
case 9:
|
||||
var fieldName1 int32
|
||||
var fieldName2 int32
|
||||
var fieldName3 int32
|
||||
var fieldName4 int32
|
||||
var fieldName5 int32
|
||||
var fieldName6 int32
|
||||
var fieldName7 int32
|
||||
var fieldName8 int32
|
||||
var fieldName9 int32
|
||||
var fieldDecoder1 *structFieldDecoder
|
||||
var fieldDecoder2 *structFieldDecoder
|
||||
var fieldDecoder3 *structFieldDecoder
|
||||
var fieldDecoder4 *structFieldDecoder
|
||||
var fieldDecoder5 *structFieldDecoder
|
||||
var fieldDecoder6 *structFieldDecoder
|
||||
var fieldDecoder7 *structFieldDecoder
|
||||
var fieldDecoder8 *structFieldDecoder
|
||||
var fieldDecoder9 *structFieldDecoder
|
||||
for fieldName, fieldDecoder := range fields {
|
||||
fieldHash := calcHash(fieldName)
|
||||
_, known := knownHash[fieldHash]
|
||||
if known {
|
||||
return &generalStructDecoder{typ, fields}, nil
|
||||
}
|
||||
knownHash[fieldHash] = struct{}{}
|
||||
if fieldName1 == 0 {
|
||||
fieldName1 = fieldHash
|
||||
fieldDecoder1 = fieldDecoder
|
||||
} else if fieldName2 == 0 {
|
||||
fieldName2 = fieldHash
|
||||
fieldDecoder2 = fieldDecoder
|
||||
} else if fieldName3 == 0 {
|
||||
fieldName3 = fieldHash
|
||||
fieldDecoder3 = fieldDecoder
|
||||
} else if fieldName4 == 0 {
|
||||
fieldName4 = fieldHash
|
||||
fieldDecoder4 = fieldDecoder
|
||||
} else if fieldName5 == 0 {
|
||||
fieldName5 = fieldHash
|
||||
fieldDecoder5 = fieldDecoder
|
||||
} else if fieldName6 == 0 {
|
||||
fieldName6 = fieldHash
|
||||
fieldDecoder6 = fieldDecoder
|
||||
} else if fieldName7 == 0 {
|
||||
fieldName7 = fieldHash
|
||||
fieldDecoder7 = fieldDecoder
|
||||
} else if fieldName8 == 0 {
|
||||
fieldName8 = fieldHash
|
||||
fieldDecoder8 = fieldDecoder
|
||||
} else {
|
||||
fieldName9 = fieldHash
|
||||
fieldDecoder9 = fieldDecoder
|
||||
}
|
||||
}
|
||||
return &nineFieldsStructDecoder{typ,
|
||||
fieldName1, fieldDecoder1, fieldName2, fieldDecoder2, fieldName3, fieldDecoder3,
|
||||
fieldName4, fieldDecoder4, fieldName5, fieldDecoder5, fieldName6, fieldDecoder6,
|
||||
fieldName7, fieldDecoder7, fieldName8, fieldDecoder8, fieldName9, fieldDecoder9}, nil
|
||||
case 10:
|
||||
var fieldName1 int32
|
||||
var fieldName2 int32
|
||||
var fieldName3 int32
|
||||
var fieldName4 int32
|
||||
var fieldName5 int32
|
||||
var fieldName6 int32
|
||||
var fieldName7 int32
|
||||
var fieldName8 int32
|
||||
var fieldName9 int32
|
||||
var fieldName10 int32
|
||||
var fieldDecoder1 *structFieldDecoder
|
||||
var fieldDecoder2 *structFieldDecoder
|
||||
var fieldDecoder3 *structFieldDecoder
|
||||
var fieldDecoder4 *structFieldDecoder
|
||||
var fieldDecoder5 *structFieldDecoder
|
||||
var fieldDecoder6 *structFieldDecoder
|
||||
var fieldDecoder7 *structFieldDecoder
|
||||
var fieldDecoder8 *structFieldDecoder
|
||||
var fieldDecoder9 *structFieldDecoder
|
||||
var fieldDecoder10 *structFieldDecoder
|
||||
for fieldName, fieldDecoder := range fields {
|
||||
fieldHash := calcHash(fieldName)
|
||||
_, known := knownHash[fieldHash]
|
||||
if known {
|
||||
return &generalStructDecoder{typ, fields}, nil
|
||||
}
|
||||
knownHash[fieldHash] = struct{}{}
|
||||
if fieldName1 == 0 {
|
||||
fieldName1 = fieldHash
|
||||
fieldDecoder1 = fieldDecoder
|
||||
} else if fieldName2 == 0 {
|
||||
fieldName2 = fieldHash
|
||||
fieldDecoder2 = fieldDecoder
|
||||
} else if fieldName3 == 0 {
|
||||
fieldName3 = fieldHash
|
||||
fieldDecoder3 = fieldDecoder
|
||||
} else if fieldName4 == 0 {
|
||||
fieldName4 = fieldHash
|
||||
fieldDecoder4 = fieldDecoder
|
||||
} else if fieldName5 == 0 {
|
||||
fieldName5 = fieldHash
|
||||
fieldDecoder5 = fieldDecoder
|
||||
} else if fieldName6 == 0 {
|
||||
fieldName6 = fieldHash
|
||||
fieldDecoder6 = fieldDecoder
|
||||
} else if fieldName7 == 0 {
|
||||
fieldName7 = fieldHash
|
||||
fieldDecoder7 = fieldDecoder
|
||||
} else if fieldName8 == 0 {
|
||||
fieldName8 = fieldHash
|
||||
fieldDecoder8 = fieldDecoder
|
||||
} else if fieldName9 == 0 {
|
||||
fieldName9 = fieldHash
|
||||
fieldDecoder9 = fieldDecoder
|
||||
} else {
|
||||
fieldName10 = fieldHash
|
||||
fieldDecoder10 = fieldDecoder
|
||||
}
|
||||
}
|
||||
return &tenFieldsStructDecoder{typ,
|
||||
fieldName1, fieldDecoder1, fieldName2, fieldDecoder2, fieldName3, fieldDecoder3,
|
||||
fieldName4, fieldDecoder4, fieldName5, fieldDecoder5, fieldName6, fieldDecoder6,
|
||||
fieldName7, fieldDecoder7, fieldName8, fieldDecoder8, fieldName9, fieldDecoder9,
|
||||
fieldName10, fieldDecoder10}, nil
|
||||
}
|
||||
return &generalStructDecoder{typ, fields}, nil
|
||||
}
|
||||
|
||||
type generalStructDecoder struct {
|
||||
typ reflect.Type
|
||||
fields map[string]*structFieldDecoder
|
||||
}
|
||||
|
||||
func (decoder *generalStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) {
|
||||
if !iter.readObjectStart() {
|
||||
return
|
||||
}
|
||||
fieldBytes := iter.readObjectFieldAsBytes()
|
||||
field := *(*string)(unsafe.Pointer(&fieldBytes))
|
||||
fieldDecoder := decoder.fields[strings.ToLower(field)]
|
||||
if fieldDecoder == nil {
|
||||
iter.Skip()
|
||||
} else {
|
||||
fieldDecoder.Decode(ptr, iter)
|
||||
}
|
||||
for iter.nextToken() == ',' {
|
||||
fieldBytes = iter.readObjectFieldAsBytes()
|
||||
field = *(*string)(unsafe.Pointer(&fieldBytes))
|
||||
fieldDecoder = decoder.fields[strings.ToLower(field)]
|
||||
if fieldDecoder == nil {
|
||||
iter.Skip()
|
||||
} else {
|
||||
fieldDecoder.Decode(ptr, iter)
|
||||
}
|
||||
}
|
||||
if iter.Error != nil && iter.Error != io.EOF {
|
||||
iter.Error = fmt.Errorf("%v: %s", decoder.typ, iter.Error.Error())
|
||||
}
|
||||
}
|
||||
|
||||
type skipObjectDecoder struct {
|
||||
typ reflect.Type
|
||||
}
|
||||
|
||||
func (decoder *skipObjectDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) {
|
||||
valueType := iter.WhatIsNext()
|
||||
if valueType != ObjectValue && valueType != NilValue {
|
||||
iter.ReportError("skipObjectDecoder", "expect object or null")
|
||||
return
|
||||
}
|
||||
iter.Skip()
|
||||
}
|
||||
|
||||
type oneFieldStructDecoder struct {
|
||||
typ reflect.Type
|
||||
fieldHash int32
|
||||
fieldDecoder *structFieldDecoder
|
||||
}
|
||||
|
||||
func (decoder *oneFieldStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) {
|
||||
if !iter.readObjectStart() {
|
||||
return
|
||||
}
|
||||
for {
|
||||
if iter.readFieldHash() == decoder.fieldHash {
|
||||
decoder.fieldDecoder.Decode(ptr, iter)
|
||||
} else {
|
||||
iter.Skip()
|
||||
}
|
||||
if iter.isObjectEnd() {
|
||||
break
|
||||
}
|
||||
}
|
||||
if iter.Error != nil && iter.Error != io.EOF {
|
||||
iter.Error = fmt.Errorf("%v: %s", decoder.typ, iter.Error.Error())
|
||||
}
|
||||
}
|
||||
|
||||
type twoFieldsStructDecoder struct {
|
||||
typ reflect.Type
|
||||
fieldHash1 int32
|
||||
fieldDecoder1 *structFieldDecoder
|
||||
fieldHash2 int32
|
||||
fieldDecoder2 *structFieldDecoder
|
||||
}
|
||||
|
||||
func (decoder *twoFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) {
|
||||
if !iter.readObjectStart() {
|
||||
return
|
||||
}
|
||||
for {
|
||||
switch iter.readFieldHash() {
|
||||
case decoder.fieldHash1:
|
||||
decoder.fieldDecoder1.Decode(ptr, iter)
|
||||
case decoder.fieldHash2:
|
||||
decoder.fieldDecoder2.Decode(ptr, iter)
|
||||
default:
|
||||
iter.Skip()
|
||||
}
|
||||
if iter.isObjectEnd() {
|
||||
break
|
||||
}
|
||||
}
|
||||
if iter.Error != nil && iter.Error != io.EOF {
|
||||
iter.Error = fmt.Errorf("%v: %s", decoder.typ, iter.Error.Error())
|
||||
}
|
||||
}
|
||||
|
||||
type threeFieldsStructDecoder struct {
|
||||
typ reflect.Type
|
||||
fieldHash1 int32
|
||||
fieldDecoder1 *structFieldDecoder
|
||||
fieldHash2 int32
|
||||
fieldDecoder2 *structFieldDecoder
|
||||
fieldHash3 int32
|
||||
fieldDecoder3 *structFieldDecoder
|
||||
}
|
||||
|
||||
func (decoder *threeFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) {
|
||||
if !iter.readObjectStart() {
|
||||
return
|
||||
}
|
||||
for {
|
||||
switch iter.readFieldHash() {
|
||||
case decoder.fieldHash1:
|
||||
decoder.fieldDecoder1.Decode(ptr, iter)
|
||||
case decoder.fieldHash2:
|
||||
decoder.fieldDecoder2.Decode(ptr, iter)
|
||||
case decoder.fieldHash3:
|
||||
decoder.fieldDecoder3.Decode(ptr, iter)
|
||||
default:
|
||||
iter.Skip()
|
||||
}
|
||||
if iter.isObjectEnd() {
|
||||
break
|
||||
}
|
||||
}
|
||||
if iter.Error != nil && iter.Error != io.EOF {
|
||||
iter.Error = fmt.Errorf("%v: %s", decoder.typ, iter.Error.Error())
|
||||
}
|
||||
}
|
||||
|
||||
type fourFieldsStructDecoder struct {
|
||||
typ reflect.Type
|
||||
fieldHash1 int32
|
||||
fieldDecoder1 *structFieldDecoder
|
||||
fieldHash2 int32
|
||||
fieldDecoder2 *structFieldDecoder
|
||||
fieldHash3 int32
|
||||
fieldDecoder3 *structFieldDecoder
|
||||
fieldHash4 int32
|
||||
fieldDecoder4 *structFieldDecoder
|
||||
}
|
||||
|
||||
func (decoder *fourFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) {
|
||||
if !iter.readObjectStart() {
|
||||
return
|
||||
}
|
||||
for {
|
||||
switch iter.readFieldHash() {
|
||||
case decoder.fieldHash1:
|
||||
decoder.fieldDecoder1.Decode(ptr, iter)
|
||||
case decoder.fieldHash2:
|
||||
decoder.fieldDecoder2.Decode(ptr, iter)
|
||||
case decoder.fieldHash3:
|
||||
decoder.fieldDecoder3.Decode(ptr, iter)
|
||||
case decoder.fieldHash4:
|
||||
decoder.fieldDecoder4.Decode(ptr, iter)
|
||||
default:
|
||||
iter.Skip()
|
||||
}
|
||||
if iter.isObjectEnd() {
|
||||
break
|
||||
}
|
||||
}
|
||||
if iter.Error != nil && iter.Error != io.EOF {
|
||||
iter.Error = fmt.Errorf("%v: %s", decoder.typ, iter.Error.Error())
|
||||
}
|
||||
}
|
||||
|
||||
type fiveFieldsStructDecoder struct {
|
||||
typ reflect.Type
|
||||
fieldHash1 int32
|
||||
fieldDecoder1 *structFieldDecoder
|
||||
fieldHash2 int32
|
||||
fieldDecoder2 *structFieldDecoder
|
||||
fieldHash3 int32
|
||||
fieldDecoder3 *structFieldDecoder
|
||||
fieldHash4 int32
|
||||
fieldDecoder4 *structFieldDecoder
|
||||
fieldHash5 int32
|
||||
fieldDecoder5 *structFieldDecoder
|
||||
}
|
||||
|
||||
func (decoder *fiveFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) {
|
||||
if !iter.readObjectStart() {
|
||||
return
|
||||
}
|
||||
for {
|
||||
switch iter.readFieldHash() {
|
||||
case decoder.fieldHash1:
|
||||
decoder.fieldDecoder1.Decode(ptr, iter)
|
||||
case decoder.fieldHash2:
|
||||
decoder.fieldDecoder2.Decode(ptr, iter)
|
||||
case decoder.fieldHash3:
|
||||
decoder.fieldDecoder3.Decode(ptr, iter)
|
||||
case decoder.fieldHash4:
|
||||
decoder.fieldDecoder4.Decode(ptr, iter)
|
||||
case decoder.fieldHash5:
|
||||
decoder.fieldDecoder5.Decode(ptr, iter)
|
||||
default:
|
||||
iter.Skip()
|
||||
}
|
||||
if iter.isObjectEnd() {
|
||||
break
|
||||
}
|
||||
}
|
||||
if iter.Error != nil && iter.Error != io.EOF {
|
||||
iter.Error = fmt.Errorf("%v: %s", decoder.typ, iter.Error.Error())
|
||||
}
|
||||
}
|
||||
|
||||
type sixFieldsStructDecoder struct {
|
||||
typ reflect.Type
|
||||
fieldHash1 int32
|
||||
fieldDecoder1 *structFieldDecoder
|
||||
fieldHash2 int32
|
||||
fieldDecoder2 *structFieldDecoder
|
||||
fieldHash3 int32
|
||||
fieldDecoder3 *structFieldDecoder
|
||||
fieldHash4 int32
|
||||
fieldDecoder4 *structFieldDecoder
|
||||
fieldHash5 int32
|
||||
fieldDecoder5 *structFieldDecoder
|
||||
fieldHash6 int32
|
||||
fieldDecoder6 *structFieldDecoder
|
||||
}
|
||||
|
||||
func (decoder *sixFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) {
|
||||
if !iter.readObjectStart() {
|
||||
return
|
||||
}
|
||||
for {
|
||||
switch iter.readFieldHash() {
|
||||
case decoder.fieldHash1:
|
||||
decoder.fieldDecoder1.Decode(ptr, iter)
|
||||
case decoder.fieldHash2:
|
||||
decoder.fieldDecoder2.Decode(ptr, iter)
|
||||
case decoder.fieldHash3:
|
||||
decoder.fieldDecoder3.Decode(ptr, iter)
|
||||
case decoder.fieldHash4:
|
||||
decoder.fieldDecoder4.Decode(ptr, iter)
|
||||
case decoder.fieldHash5:
|
||||
decoder.fieldDecoder5.Decode(ptr, iter)
|
||||
case decoder.fieldHash6:
|
||||
decoder.fieldDecoder6.Decode(ptr, iter)
|
||||
default:
|
||||
iter.Skip()
|
||||
}
|
||||
if iter.isObjectEnd() {
|
||||
break
|
||||
}
|
||||
}
|
||||
if iter.Error != nil && iter.Error != io.EOF {
|
||||
iter.Error = fmt.Errorf("%v: %s", decoder.typ, iter.Error.Error())
|
||||
}
|
||||
}
|
||||
|
||||
type sevenFieldsStructDecoder struct {
|
||||
typ reflect.Type
|
||||
fieldHash1 int32
|
||||
fieldDecoder1 *structFieldDecoder
|
||||
fieldHash2 int32
|
||||
fieldDecoder2 *structFieldDecoder
|
||||
fieldHash3 int32
|
||||
fieldDecoder3 *structFieldDecoder
|
||||
fieldHash4 int32
|
||||
fieldDecoder4 *structFieldDecoder
|
||||
fieldHash5 int32
|
||||
fieldDecoder5 *structFieldDecoder
|
||||
fieldHash6 int32
|
||||
fieldDecoder6 *structFieldDecoder
|
||||
fieldHash7 int32
|
||||
fieldDecoder7 *structFieldDecoder
|
||||
}
|
||||
|
||||
func (decoder *sevenFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) {
|
||||
if !iter.readObjectStart() {
|
||||
return
|
||||
}
|
||||
for {
|
||||
switch iter.readFieldHash() {
|
||||
case decoder.fieldHash1:
|
||||
decoder.fieldDecoder1.Decode(ptr, iter)
|
||||
case decoder.fieldHash2:
|
||||
decoder.fieldDecoder2.Decode(ptr, iter)
|
||||
case decoder.fieldHash3:
|
||||
decoder.fieldDecoder3.Decode(ptr, iter)
|
||||
case decoder.fieldHash4:
|
||||
decoder.fieldDecoder4.Decode(ptr, iter)
|
||||
case decoder.fieldHash5:
|
||||
decoder.fieldDecoder5.Decode(ptr, iter)
|
||||
case decoder.fieldHash6:
|
||||
decoder.fieldDecoder6.Decode(ptr, iter)
|
||||
case decoder.fieldHash7:
|
||||
decoder.fieldDecoder7.Decode(ptr, iter)
|
||||
default:
|
||||
iter.Skip()
|
||||
}
|
||||
if iter.isObjectEnd() {
|
||||
break
|
||||
}
|
||||
}
|
||||
if iter.Error != nil && iter.Error != io.EOF {
|
||||
iter.Error = fmt.Errorf("%v: %s", decoder.typ, iter.Error.Error())
|
||||
}
|
||||
}
|
||||
|
||||
type eightFieldsStructDecoder struct {
|
||||
typ reflect.Type
|
||||
fieldHash1 int32
|
||||
fieldDecoder1 *structFieldDecoder
|
||||
fieldHash2 int32
|
||||
fieldDecoder2 *structFieldDecoder
|
||||
fieldHash3 int32
|
||||
fieldDecoder3 *structFieldDecoder
|
||||
fieldHash4 int32
|
||||
fieldDecoder4 *structFieldDecoder
|
||||
fieldHash5 int32
|
||||
fieldDecoder5 *structFieldDecoder
|
||||
fieldHash6 int32
|
||||
fieldDecoder6 *structFieldDecoder
|
||||
fieldHash7 int32
|
||||
fieldDecoder7 *structFieldDecoder
|
||||
fieldHash8 int32
|
||||
fieldDecoder8 *structFieldDecoder
|
||||
}
|
||||
|
||||
func (decoder *eightFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) {
|
||||
if !iter.readObjectStart() {
|
||||
return
|
||||
}
|
||||
for {
|
||||
switch iter.readFieldHash() {
|
||||
case decoder.fieldHash1:
|
||||
decoder.fieldDecoder1.Decode(ptr, iter)
|
||||
case decoder.fieldHash2:
|
||||
decoder.fieldDecoder2.Decode(ptr, iter)
|
||||
case decoder.fieldHash3:
|
||||
decoder.fieldDecoder3.Decode(ptr, iter)
|
||||
case decoder.fieldHash4:
|
||||
decoder.fieldDecoder4.Decode(ptr, iter)
|
||||
case decoder.fieldHash5:
|
||||
decoder.fieldDecoder5.Decode(ptr, iter)
|
||||
case decoder.fieldHash6:
|
||||
decoder.fieldDecoder6.Decode(ptr, iter)
|
||||
case decoder.fieldHash7:
|
||||
decoder.fieldDecoder7.Decode(ptr, iter)
|
||||
case decoder.fieldHash8:
|
||||
decoder.fieldDecoder8.Decode(ptr, iter)
|
||||
default:
|
||||
iter.Skip()
|
||||
}
|
||||
if iter.isObjectEnd() {
|
||||
break
|
||||
}
|
||||
}
|
||||
if iter.Error != nil && iter.Error != io.EOF {
|
||||
iter.Error = fmt.Errorf("%v: %s", decoder.typ, iter.Error.Error())
|
||||
}
|
||||
}
|
||||
|
||||
type nineFieldsStructDecoder struct {
|
||||
typ reflect.Type
|
||||
fieldHash1 int32
|
||||
fieldDecoder1 *structFieldDecoder
|
||||
fieldHash2 int32
|
||||
fieldDecoder2 *structFieldDecoder
|
||||
fieldHash3 int32
|
||||
fieldDecoder3 *structFieldDecoder
|
||||
fieldHash4 int32
|
||||
fieldDecoder4 *structFieldDecoder
|
||||
fieldHash5 int32
|
||||
fieldDecoder5 *structFieldDecoder
|
||||
fieldHash6 int32
|
||||
fieldDecoder6 *structFieldDecoder
|
||||
fieldHash7 int32
|
||||
fieldDecoder7 *structFieldDecoder
|
||||
fieldHash8 int32
|
||||
fieldDecoder8 *structFieldDecoder
|
||||
fieldHash9 int32
|
||||
fieldDecoder9 *structFieldDecoder
|
||||
}
|
||||
|
||||
func (decoder *nineFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) {
|
||||
if !iter.readObjectStart() {
|
||||
return
|
||||
}
|
||||
for {
|
||||
switch iter.readFieldHash() {
|
||||
case decoder.fieldHash1:
|
||||
decoder.fieldDecoder1.Decode(ptr, iter)
|
||||
case decoder.fieldHash2:
|
||||
decoder.fieldDecoder2.Decode(ptr, iter)
|
||||
case decoder.fieldHash3:
|
||||
decoder.fieldDecoder3.Decode(ptr, iter)
|
||||
case decoder.fieldHash4:
|
||||
decoder.fieldDecoder4.Decode(ptr, iter)
|
||||
case decoder.fieldHash5:
|
||||
decoder.fieldDecoder5.Decode(ptr, iter)
|
||||
case decoder.fieldHash6:
|
||||
decoder.fieldDecoder6.Decode(ptr, iter)
|
||||
case decoder.fieldHash7:
|
||||
decoder.fieldDecoder7.Decode(ptr, iter)
|
||||
case decoder.fieldHash8:
|
||||
decoder.fieldDecoder8.Decode(ptr, iter)
|
||||
case decoder.fieldHash9:
|
||||
decoder.fieldDecoder9.Decode(ptr, iter)
|
||||
default:
|
||||
iter.Skip()
|
||||
}
|
||||
if iter.isObjectEnd() {
|
||||
break
|
||||
}
|
||||
}
|
||||
if iter.Error != nil && iter.Error != io.EOF {
|
||||
iter.Error = fmt.Errorf("%v: %s", decoder.typ, iter.Error.Error())
|
||||
}
|
||||
}
|
||||
|
||||
type tenFieldsStructDecoder struct {
|
||||
typ reflect.Type
|
||||
fieldHash1 int32
|
||||
fieldDecoder1 *structFieldDecoder
|
||||
fieldHash2 int32
|
||||
fieldDecoder2 *structFieldDecoder
|
||||
fieldHash3 int32
|
||||
fieldDecoder3 *structFieldDecoder
|
||||
fieldHash4 int32
|
||||
fieldDecoder4 *structFieldDecoder
|
||||
fieldHash5 int32
|
||||
fieldDecoder5 *structFieldDecoder
|
||||
fieldHash6 int32
|
||||
fieldDecoder6 *structFieldDecoder
|
||||
fieldHash7 int32
|
||||
fieldDecoder7 *structFieldDecoder
|
||||
fieldHash8 int32
|
||||
fieldDecoder8 *structFieldDecoder
|
||||
fieldHash9 int32
|
||||
fieldDecoder9 *structFieldDecoder
|
||||
fieldHash10 int32
|
||||
fieldDecoder10 *structFieldDecoder
|
||||
}
|
||||
|
||||
func (decoder *tenFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) {
|
||||
if !iter.readObjectStart() {
|
||||
return
|
||||
}
|
||||
for {
|
||||
switch iter.readFieldHash() {
|
||||
case decoder.fieldHash1:
|
||||
decoder.fieldDecoder1.Decode(ptr, iter)
|
||||
case decoder.fieldHash2:
|
||||
decoder.fieldDecoder2.Decode(ptr, iter)
|
||||
case decoder.fieldHash3:
|
||||
decoder.fieldDecoder3.Decode(ptr, iter)
|
||||
case decoder.fieldHash4:
|
||||
decoder.fieldDecoder4.Decode(ptr, iter)
|
||||
case decoder.fieldHash5:
|
||||
decoder.fieldDecoder5.Decode(ptr, iter)
|
||||
case decoder.fieldHash6:
|
||||
decoder.fieldDecoder6.Decode(ptr, iter)
|
||||
case decoder.fieldHash7:
|
||||
decoder.fieldDecoder7.Decode(ptr, iter)
|
||||
case decoder.fieldHash8:
|
||||
decoder.fieldDecoder8.Decode(ptr, iter)
|
||||
case decoder.fieldHash9:
|
||||
decoder.fieldDecoder9.Decode(ptr, iter)
|
||||
case decoder.fieldHash10:
|
||||
decoder.fieldDecoder10.Decode(ptr, iter)
|
||||
default:
|
||||
iter.Skip()
|
||||
}
|
||||
if iter.isObjectEnd() {
|
||||
break
|
||||
}
|
||||
}
|
||||
if iter.Error != nil && iter.Error != io.EOF {
|
||||
iter.Error = fmt.Errorf("%v: %s", decoder.typ, iter.Error.Error())
|
||||
}
|
||||
}
|
||||
|
||||
type structFieldDecoder struct {
|
||||
field *reflect.StructField
|
||||
fieldDecoder ValDecoder
|
||||
}
|
||||
|
||||
func (decoder *structFieldDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) {
|
||||
fieldPtr := unsafe.Pointer(uintptr(ptr) + decoder.field.Offset)
|
||||
decoder.fieldDecoder.Decode(fieldPtr, iter)
|
||||
if iter.Error != nil && iter.Error != io.EOF {
|
||||
iter.Error = fmt.Errorf("%s: %s", decoder.field.Name, iter.Error.Error())
|
||||
}
|
||||
}
|
305
vendor/github.com/json-iterator/go/feature_stream.go
generated
vendored
Normal file
305
vendor/github.com/json-iterator/go/feature_stream.go
generated
vendored
Normal file
|
@ -0,0 +1,305 @@
|
|||
package jsoniter
|
||||
|
||||
import (
|
||||
"io"
|
||||
)
|
||||
|
||||
// Stream is a io.Writer like object, with JSON specific write functions.
|
||||
// Error is not returned as return value, but stored as Error member on this stream instance.
|
||||
type Stream struct {
|
||||
cfg *frozenConfig
|
||||
out io.Writer
|
||||
buf []byte
|
||||
n int
|
||||
Error error
|
||||
indention int
|
||||
}
|
||||
|
||||
// NewStream create new stream instance.
|
||||
// cfg can be jsoniter.ConfigDefault.
|
||||
// out can be nil if write to internal buffer.
|
||||
// bufSize is the initial size for the internal buffer in bytes.
|
||||
func NewStream(cfg API, out io.Writer, bufSize int) *Stream {
|
||||
return &Stream{
|
||||
cfg: cfg.(*frozenConfig),
|
||||
out: out,
|
||||
buf: make([]byte, bufSize),
|
||||
n: 0,
|
||||
Error: nil,
|
||||
indention: 0,
|
||||
}
|
||||
}
|
||||
|
||||
// Pool returns a pool can provide more stream with same configuration
|
||||
func (stream *Stream) Pool() StreamPool {
|
||||
return stream.cfg
|
||||
}
|
||||
|
||||
// Reset reuse this stream instance by assign a new writer
|
||||
func (stream *Stream) Reset(out io.Writer) {
|
||||
stream.out = out
|
||||
stream.n = 0
|
||||
}
|
||||
|
||||
// Available returns how many bytes are unused in the buffer.
|
||||
func (stream *Stream) Available() int {
|
||||
return len(stream.buf) - stream.n
|
||||
}
|
||||
|
||||
// Buffered returns the number of bytes that have been written into the current buffer.
|
||||
func (stream *Stream) Buffered() int {
|
||||
return stream.n
|
||||
}
|
||||
|
||||
// Buffer if writer is nil, use this method to take the result
|
||||
func (stream *Stream) Buffer() []byte {
|
||||
return stream.buf[:stream.n]
|
||||
}
|
||||
|
||||
// Write writes the contents of p into the buffer.
|
||||
// It returns the number of bytes written.
|
||||
// If nn < len(p), it also returns an error explaining
|
||||
// why the write is short.
|
||||
func (stream *Stream) Write(p []byte) (nn int, err error) {
|
||||
for len(p) > stream.Available() && stream.Error == nil {
|
||||
if stream.out == nil {
|
||||
stream.growAtLeast(len(p))
|
||||
} else {
|
||||
var n int
|
||||
if stream.Buffered() == 0 {
|
||||
// Large write, empty buffer.
|
||||
// Write directly from p to avoid copy.
|
||||
n, stream.Error = stream.out.Write(p)
|
||||
} else {
|
||||
n = copy(stream.buf[stream.n:], p)
|
||||
stream.n += n
|
||||
stream.Flush()
|
||||
}
|
||||
nn += n
|
||||
p = p[n:]
|
||||
}
|
||||
}
|
||||
if stream.Error != nil {
|
||||
return nn, stream.Error
|
||||
}
|
||||
n := copy(stream.buf[stream.n:], p)
|
||||
stream.n += n
|
||||
nn += n
|
||||
return nn, nil
|
||||
}
|
||||
|
||||
// WriteByte writes a single byte.
|
||||
func (stream *Stream) writeByte(c byte) {
|
||||
if stream.Error != nil {
|
||||
return
|
||||
}
|
||||
if stream.Available() < 1 {
|
||||
stream.growAtLeast(1)
|
||||
}
|
||||
stream.buf[stream.n] = c
|
||||
stream.n++
|
||||
}
|
||||
|
||||
func (stream *Stream) writeTwoBytes(c1 byte, c2 byte) {
|
||||
if stream.Error != nil {
|
||||
return
|
||||
}
|
||||
if stream.Available() < 2 {
|
||||
stream.growAtLeast(2)
|
||||
}
|
||||
stream.buf[stream.n] = c1
|
||||
stream.buf[stream.n+1] = c2
|
||||
stream.n += 2
|
||||
}
|
||||
|
||||
func (stream *Stream) writeThreeBytes(c1 byte, c2 byte, c3 byte) {
|
||||
if stream.Error != nil {
|
||||
return
|
||||
}
|
||||
if stream.Available() < 3 {
|
||||
stream.growAtLeast(3)
|
||||
}
|
||||
stream.buf[stream.n] = c1
|
||||
stream.buf[stream.n+1] = c2
|
||||
stream.buf[stream.n+2] = c3
|
||||
stream.n += 3
|
||||
}
|
||||
|
||||
func (stream *Stream) writeFourBytes(c1 byte, c2 byte, c3 byte, c4 byte) {
|
||||
if stream.Error != nil {
|
||||
return
|
||||
}
|
||||
if stream.Available() < 4 {
|
||||
stream.growAtLeast(4)
|
||||
}
|
||||
stream.buf[stream.n] = c1
|
||||
stream.buf[stream.n+1] = c2
|
||||
stream.buf[stream.n+2] = c3
|
||||
stream.buf[stream.n+3] = c4
|
||||
stream.n += 4
|
||||
}
|
||||
|
||||
func (stream *Stream) writeFiveBytes(c1 byte, c2 byte, c3 byte, c4 byte, c5 byte) {
|
||||
if stream.Error != nil {
|
||||
return
|
||||
}
|
||||
if stream.Available() < 5 {
|
||||
stream.growAtLeast(5)
|
||||
}
|
||||
stream.buf[stream.n] = c1
|
||||
stream.buf[stream.n+1] = c2
|
||||
stream.buf[stream.n+2] = c3
|
||||
stream.buf[stream.n+3] = c4
|
||||
stream.buf[stream.n+4] = c5
|
||||
stream.n += 5
|
||||
}
|
||||
|
||||
// Flush writes any buffered data to the underlying io.Writer.
|
||||
func (stream *Stream) Flush() error {
|
||||
if stream.out == nil {
|
||||
return nil
|
||||
}
|
||||
if stream.Error != nil {
|
||||
return stream.Error
|
||||
}
|
||||
if stream.n == 0 {
|
||||
return nil
|
||||
}
|
||||
n, err := stream.out.Write(stream.buf[0:stream.n])
|
||||
if n < stream.n && err == nil {
|
||||
err = io.ErrShortWrite
|
||||
}
|
||||
if err != nil {
|
||||
if n > 0 && n < stream.n {
|
||||
copy(stream.buf[0:stream.n-n], stream.buf[n:stream.n])
|
||||
}
|
||||
stream.n -= n
|
||||
stream.Error = err
|
||||
return err
|
||||
}
|
||||
stream.n = 0
|
||||
return nil
|
||||
}
|
||||
|
||||
func (stream *Stream) ensure(minimal int) {
|
||||
available := stream.Available()
|
||||
if available < minimal {
|
||||
stream.growAtLeast(minimal)
|
||||
}
|
||||
}
|
||||
|
||||
func (stream *Stream) growAtLeast(minimal int) {
|
||||
if stream.out != nil {
|
||||
stream.Flush()
|
||||
}
|
||||
toGrow := len(stream.buf)
|
||||
if toGrow < minimal {
|
||||
toGrow = minimal
|
||||
}
|
||||
newBuf := make([]byte, len(stream.buf)+toGrow)
|
||||
copy(newBuf, stream.Buffer())
|
||||
stream.buf = newBuf
|
||||
}
|
||||
|
||||
// WriteRaw write string out without quotes, just like []byte
|
||||
func (stream *Stream) WriteRaw(s string) {
|
||||
stream.ensure(len(s))
|
||||
if stream.Error != nil {
|
||||
return
|
||||
}
|
||||
n := copy(stream.buf[stream.n:], s)
|
||||
stream.n += n
|
||||
}
|
||||
|
||||
// WriteNil write null to stream
|
||||
func (stream *Stream) WriteNil() {
|
||||
stream.writeFourBytes('n', 'u', 'l', 'l')
|
||||
}
|
||||
|
||||
// WriteTrue write true to stream
|
||||
func (stream *Stream) WriteTrue() {
|
||||
stream.writeFourBytes('t', 'r', 'u', 'e')
|
||||
}
|
||||
|
||||
// WriteFalse write false to stream
|
||||
func (stream *Stream) WriteFalse() {
|
||||
stream.writeFiveBytes('f', 'a', 'l', 's', 'e')
|
||||
}
|
||||
|
||||
// WriteBool write true or false into stream
|
||||
func (stream *Stream) WriteBool(val bool) {
|
||||
if val {
|
||||
stream.WriteTrue()
|
||||
} else {
|
||||
stream.WriteFalse()
|
||||
}
|
||||
}
|
||||
|
||||
// WriteObjectStart write { with possible indention
|
||||
func (stream *Stream) WriteObjectStart() {
|
||||
stream.indention += stream.cfg.indentionStep
|
||||
stream.writeByte('{')
|
||||
stream.writeIndention(0)
|
||||
}
|
||||
|
||||
// WriteObjectField write "field": with possible indention
|
||||
func (stream *Stream) WriteObjectField(field string) {
|
||||
stream.WriteString(field)
|
||||
if stream.indention > 0 {
|
||||
stream.writeTwoBytes(':', ' ')
|
||||
} else {
|
||||
stream.writeByte(':')
|
||||
}
|
||||
}
|
||||
|
||||
// WriteObjectEnd write } with possible indention
|
||||
func (stream *Stream) WriteObjectEnd() {
|
||||
stream.writeIndention(stream.cfg.indentionStep)
|
||||
stream.indention -= stream.cfg.indentionStep
|
||||
stream.writeByte('}')
|
||||
}
|
||||
|
||||
// WriteEmptyObject write {}
|
||||
func (stream *Stream) WriteEmptyObject() {
|
||||
stream.writeByte('{')
|
||||
stream.writeByte('}')
|
||||
}
|
||||
|
||||
// WriteMore write , with possible indention
|
||||
func (stream *Stream) WriteMore() {
|
||||
stream.writeByte(',')
|
||||
stream.writeIndention(0)
|
||||
}
|
||||
|
||||
// WriteArrayStart write [ with possible indention
|
||||
func (stream *Stream) WriteArrayStart() {
|
||||
stream.indention += stream.cfg.indentionStep
|
||||
stream.writeByte('[')
|
||||
stream.writeIndention(0)
|
||||
}
|
||||
|
||||
// WriteEmptyArray write []
|
||||
func (stream *Stream) WriteEmptyArray() {
|
||||
stream.writeByte('[')
|
||||
stream.writeByte(']')
|
||||
}
|
||||
|
||||
// WriteArrayEnd write ] with possible indention
|
||||
func (stream *Stream) WriteArrayEnd() {
|
||||
stream.writeIndention(stream.cfg.indentionStep)
|
||||
stream.indention -= stream.cfg.indentionStep
|
||||
stream.writeByte(']')
|
||||
}
|
||||
|
||||
func (stream *Stream) writeIndention(delta int) {
|
||||
if stream.indention == 0 {
|
||||
return
|
||||
}
|
||||
stream.writeByte('\n')
|
||||
toWrite := stream.indention - delta
|
||||
stream.ensure(toWrite)
|
||||
for i := 0; i < toWrite && stream.n < len(stream.buf); i++ {
|
||||
stream.buf[stream.n] = ' '
|
||||
stream.n++
|
||||
}
|
||||
}
|
96
vendor/github.com/json-iterator/go/feature_stream_float.go
generated
vendored
Normal file
96
vendor/github.com/json-iterator/go/feature_stream_float.go
generated
vendored
Normal file
|
@ -0,0 +1,96 @@
|
|||
package jsoniter
|
||||
|
||||
import (
|
||||
"math"
|
||||
"strconv"
|
||||
)
|
||||
|
||||
var pow10 []uint64
|
||||
|
||||
func init() {
|
||||
pow10 = []uint64{1, 10, 100, 1000, 10000, 100000, 1000000}
|
||||
}
|
||||
|
||||
// WriteFloat32 write float32 to stream
|
||||
func (stream *Stream) WriteFloat32(val float32) {
|
||||
abs := math.Abs(float64(val))
|
||||
fmt := byte('f')
|
||||
// Note: Must use float32 comparisons for underlying float32 value to get precise cutoffs right.
|
||||
if abs != 0 {
|
||||
if float32(abs) < 1e-6 || float32(abs) >= 1e21 {
|
||||
fmt = 'e'
|
||||
}
|
||||
}
|
||||
stream.WriteRaw(strconv.FormatFloat(float64(val), fmt, -1, 32))
|
||||
}
|
||||
|
||||
// WriteFloat32Lossy write float32 to stream with ONLY 6 digits precision although much much faster
|
||||
func (stream *Stream) WriteFloat32Lossy(val float32) {
|
||||
if val < 0 {
|
||||
stream.writeByte('-')
|
||||
val = -val
|
||||
}
|
||||
if val > 0x4ffffff {
|
||||
stream.WriteFloat32(val)
|
||||
return
|
||||
}
|
||||
precision := 6
|
||||
exp := uint64(1000000) // 6
|
||||
lval := uint64(float64(val)*float64(exp) + 0.5)
|
||||
stream.WriteUint64(lval / exp)
|
||||
fval := lval % exp
|
||||
if fval == 0 {
|
||||
return
|
||||
}
|
||||
stream.writeByte('.')
|
||||
stream.ensure(10)
|
||||
for p := precision - 1; p > 0 && fval < pow10[p]; p-- {
|
||||
stream.writeByte('0')
|
||||
}
|
||||
stream.WriteUint64(fval)
|
||||
for stream.buf[stream.n-1] == '0' {
|
||||
stream.n--
|
||||
}
|
||||
}
|
||||
|
||||
// WriteFloat64 write float64 to stream
|
||||
func (stream *Stream) WriteFloat64(val float64) {
|
||||
abs := math.Abs(val)
|
||||
fmt := byte('f')
|
||||
// Note: Must use float32 comparisons for underlying float32 value to get precise cutoffs right.
|
||||
if abs != 0 {
|
||||
if abs < 1e-6 || abs >= 1e21 {
|
||||
fmt = 'e'
|
||||
}
|
||||
}
|
||||
stream.WriteRaw(strconv.FormatFloat(float64(val), fmt, -1, 64))
|
||||
}
|
||||
|
||||
// WriteFloat64Lossy write float64 to stream with ONLY 6 digits precision although much much faster
|
||||
func (stream *Stream) WriteFloat64Lossy(val float64) {
|
||||
if val < 0 {
|
||||
stream.writeByte('-')
|
||||
val = -val
|
||||
}
|
||||
if val > 0x4ffffff {
|
||||
stream.WriteFloat64(val)
|
||||
return
|
||||
}
|
||||
precision := 6
|
||||
exp := uint64(1000000) // 6
|
||||
lval := uint64(val*float64(exp) + 0.5)
|
||||
stream.WriteUint64(lval / exp)
|
||||
fval := lval % exp
|
||||
if fval == 0 {
|
||||
return
|
||||
}
|
||||
stream.writeByte('.')
|
||||
stream.ensure(10)
|
||||
for p := precision - 1; p > 0 && fval < pow10[p]; p-- {
|
||||
stream.writeByte('0')
|
||||
}
|
||||
stream.WriteUint64(fval)
|
||||
for stream.buf[stream.n-1] == '0' {
|
||||
stream.n--
|
||||
}
|
||||
}
|
320
vendor/github.com/json-iterator/go/feature_stream_int.go
generated
vendored
Normal file
320
vendor/github.com/json-iterator/go/feature_stream_int.go
generated
vendored
Normal file
|
@ -0,0 +1,320 @@
|
|||
package jsoniter
|
||||
|
||||
var digits []uint32
|
||||
|
||||
func init() {
|
||||
digits = make([]uint32, 1000)
|
||||
for i := uint32(0); i < 1000; i++ {
|
||||
digits[i] = (((i / 100) + '0') << 16) + ((((i / 10) % 10) + '0') << 8) + i%10 + '0'
|
||||
if i < 10 {
|
||||
digits[i] += 2 << 24
|
||||
} else if i < 100 {
|
||||
digits[i] += 1 << 24
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func writeFirstBuf(buf []byte, v uint32, n int) int {
|
||||
start := v >> 24
|
||||
if start == 0 {
|
||||
buf[n] = byte(v >> 16)
|
||||
n++
|
||||
buf[n] = byte(v >> 8)
|
||||
n++
|
||||
} else if start == 1 {
|
||||
buf[n] = byte(v >> 8)
|
||||
n++
|
||||
}
|
||||
buf[n] = byte(v)
|
||||
n++
|
||||
return n
|
||||
}
|
||||
|
||||
func writeBuf(buf []byte, v uint32, n int) {
|
||||
buf[n] = byte(v >> 16)
|
||||
buf[n+1] = byte(v >> 8)
|
||||
buf[n+2] = byte(v)
|
||||
}
|
||||
|
||||
// WriteUint8 write uint8 to stream
|
||||
func (stream *Stream) WriteUint8(val uint8) {
|
||||
stream.ensure(3)
|
||||
stream.n = writeFirstBuf(stream.buf, digits[val], stream.n)
|
||||
}
|
||||
|
||||
// WriteInt8 write int8 to stream
|
||||
func (stream *Stream) WriteInt8(nval int8) {
|
||||
stream.ensure(4)
|
||||
n := stream.n
|
||||
var val uint8
|
||||
if nval < 0 {
|
||||
val = uint8(-nval)
|
||||
stream.buf[n] = '-'
|
||||
n++
|
||||
} else {
|
||||
val = uint8(nval)
|
||||
}
|
||||
stream.n = writeFirstBuf(stream.buf, digits[val], n)
|
||||
}
|
||||
|
||||
// WriteUint16 write uint16 to stream
|
||||
func (stream *Stream) WriteUint16(val uint16) {
|
||||
stream.ensure(5)
|
||||
q1 := val / 1000
|
||||
if q1 == 0 {
|
||||
stream.n = writeFirstBuf(stream.buf, digits[val], stream.n)
|
||||
return
|
||||
}
|
||||
r1 := val - q1*1000
|
||||
n := writeFirstBuf(stream.buf, digits[q1], stream.n)
|
||||
writeBuf(stream.buf, digits[r1], n)
|
||||
stream.n = n + 3
|
||||
return
|
||||
}
|
||||
|
||||
// WriteInt16 write int16 to stream
|
||||
func (stream *Stream) WriteInt16(nval int16) {
|
||||
stream.ensure(6)
|
||||
n := stream.n
|
||||
var val uint16
|
||||
if nval < 0 {
|
||||
val = uint16(-nval)
|
||||
stream.buf[n] = '-'
|
||||
n++
|
||||
} else {
|
||||
val = uint16(nval)
|
||||
}
|
||||
q1 := val / 1000
|
||||
if q1 == 0 {
|
||||
stream.n = writeFirstBuf(stream.buf, digits[val], n)
|
||||
return
|
||||
}
|
||||
r1 := val - q1*1000
|
||||
n = writeFirstBuf(stream.buf, digits[q1], n)
|
||||
writeBuf(stream.buf, digits[r1], n)
|
||||
stream.n = n + 3
|
||||
return
|
||||
}
|
||||
|
||||
// WriteUint32 write uint32 to stream
|
||||
func (stream *Stream) WriteUint32(val uint32) {
|
||||
stream.ensure(10)
|
||||
n := stream.n
|
||||
q1 := val / 1000
|
||||
if q1 == 0 {
|
||||
stream.n = writeFirstBuf(stream.buf, digits[val], n)
|
||||
return
|
||||
}
|
||||
r1 := val - q1*1000
|
||||
q2 := q1 / 1000
|
||||
if q2 == 0 {
|
||||
n := writeFirstBuf(stream.buf, digits[q1], n)
|
||||
writeBuf(stream.buf, digits[r1], n)
|
||||
stream.n = n + 3
|
||||
return
|
||||
}
|
||||
r2 := q1 - q2*1000
|
||||
q3 := q2 / 1000
|
||||
if q3 == 0 {
|
||||
n = writeFirstBuf(stream.buf, digits[q2], n)
|
||||
} else {
|
||||
r3 := q2 - q3*1000
|
||||
stream.buf[n] = byte(q3 + '0')
|
||||
n++
|
||||
writeBuf(stream.buf, digits[r3], n)
|
||||
n += 3
|
||||
}
|
||||
writeBuf(stream.buf, digits[r2], n)
|
||||
writeBuf(stream.buf, digits[r1], n+3)
|
||||
stream.n = n + 6
|
||||
}
|
||||
|
||||
// WriteInt32 write int32 to stream
|
||||
func (stream *Stream) WriteInt32(nval int32) {
|
||||
stream.ensure(11)
|
||||
n := stream.n
|
||||
var val uint32
|
||||
if nval < 0 {
|
||||
val = uint32(-nval)
|
||||
stream.buf[n] = '-'
|
||||
n++
|
||||
} else {
|
||||
val = uint32(nval)
|
||||
}
|
||||
q1 := val / 1000
|
||||
if q1 == 0 {
|
||||
stream.n = writeFirstBuf(stream.buf, digits[val], n)
|
||||
return
|
||||
}
|
||||
r1 := val - q1*1000
|
||||
q2 := q1 / 1000
|
||||
if q2 == 0 {
|
||||
n := writeFirstBuf(stream.buf, digits[q1], n)
|
||||
writeBuf(stream.buf, digits[r1], n)
|
||||
stream.n = n + 3
|
||||
return
|
||||
}
|
||||
r2 := q1 - q2*1000
|
||||
q3 := q2 / 1000
|
||||
if q3 == 0 {
|
||||
n = writeFirstBuf(stream.buf, digits[q2], n)
|
||||
} else {
|
||||
r3 := q2 - q3*1000
|
||||
stream.buf[n] = byte(q3 + '0')
|
||||
n++
|
||||
writeBuf(stream.buf, digits[r3], n)
|
||||
n += 3
|
||||
}
|
||||
writeBuf(stream.buf, digits[r2], n)
|
||||
writeBuf(stream.buf, digits[r1], n+3)
|
||||
stream.n = n + 6
|
||||
}
|
||||
|
||||
// WriteUint64 write uint64 to stream
|
||||
func (stream *Stream) WriteUint64(val uint64) {
|
||||
stream.ensure(20)
|
||||
n := stream.n
|
||||
q1 := val / 1000
|
||||
if q1 == 0 {
|
||||
stream.n = writeFirstBuf(stream.buf, digits[val], n)
|
||||
return
|
||||
}
|
||||
r1 := val - q1*1000
|
||||
q2 := q1 / 1000
|
||||
if q2 == 0 {
|
||||
n := writeFirstBuf(stream.buf, digits[q1], n)
|
||||
writeBuf(stream.buf, digits[r1], n)
|
||||
stream.n = n + 3
|
||||
return
|
||||
}
|
||||
r2 := q1 - q2*1000
|
||||
q3 := q2 / 1000
|
||||
if q3 == 0 {
|
||||
n = writeFirstBuf(stream.buf, digits[q2], n)
|
||||
writeBuf(stream.buf, digits[r2], n)
|
||||
writeBuf(stream.buf, digits[r1], n+3)
|
||||
stream.n = n + 6
|
||||
return
|
||||
}
|
||||
r3 := q2 - q3*1000
|
||||
q4 := q3 / 1000
|
||||
if q4 == 0 {
|
||||
n = writeFirstBuf(stream.buf, digits[q3], n)
|
||||
writeBuf(stream.buf, digits[r3], n)
|
||||
writeBuf(stream.buf, digits[r2], n+3)
|
||||
writeBuf(stream.buf, digits[r1], n+6)
|
||||
stream.n = n + 9
|
||||
return
|
||||
}
|
||||
r4 := q3 - q4*1000
|
||||
q5 := q4 / 1000
|
||||
if q5 == 0 {
|
||||
n = writeFirstBuf(stream.buf, digits[q4], n)
|
||||
writeBuf(stream.buf, digits[r4], n)
|
||||
writeBuf(stream.buf, digits[r3], n+3)
|
||||
writeBuf(stream.buf, digits[r2], n+6)
|
||||
writeBuf(stream.buf, digits[r1], n+9)
|
||||
stream.n = n + 12
|
||||
return
|
||||
}
|
||||
r5 := q4 - q5*1000
|
||||
q6 := q5 / 1000
|
||||
if q6 == 0 {
|
||||
n = writeFirstBuf(stream.buf, digits[q5], n)
|
||||
} else {
|
||||
n = writeFirstBuf(stream.buf, digits[q6], n)
|
||||
r6 := q5 - q6*1000
|
||||
writeBuf(stream.buf, digits[r6], n)
|
||||
n += 3
|
||||
}
|
||||
writeBuf(stream.buf, digits[r5], n)
|
||||
writeBuf(stream.buf, digits[r4], n+3)
|
||||
writeBuf(stream.buf, digits[r3], n+6)
|
||||
writeBuf(stream.buf, digits[r2], n+9)
|
||||
writeBuf(stream.buf, digits[r1], n+12)
|
||||
stream.n = n + 15
|
||||
}
|
||||
|
||||
// WriteInt64 write int64 to stream
|
||||
func (stream *Stream) WriteInt64(nval int64) {
|
||||
stream.ensure(20)
|
||||
n := stream.n
|
||||
var val uint64
|
||||
if nval < 0 {
|
||||
val = uint64(-nval)
|
||||
stream.buf[n] = '-'
|
||||
n++
|
||||
} else {
|
||||
val = uint64(nval)
|
||||
}
|
||||
q1 := val / 1000
|
||||
if q1 == 0 {
|
||||
stream.n = writeFirstBuf(stream.buf, digits[val], n)
|
||||
return
|
||||
}
|
||||
r1 := val - q1*1000
|
||||
q2 := q1 / 1000
|
||||
if q2 == 0 {
|
||||
n := writeFirstBuf(stream.buf, digits[q1], n)
|
||||
writeBuf(stream.buf, digits[r1], n)
|
||||
stream.n = n + 3
|
||||
return
|
||||
}
|
||||
r2 := q1 - q2*1000
|
||||
q3 := q2 / 1000
|
||||
if q3 == 0 {
|
||||
n = writeFirstBuf(stream.buf, digits[q2], n)
|
||||
writeBuf(stream.buf, digits[r2], n)
|
||||
writeBuf(stream.buf, digits[r1], n+3)
|
||||
stream.n = n + 6
|
||||
return
|
||||
}
|
||||
r3 := q2 - q3*1000
|
||||
q4 := q3 / 1000
|
||||
if q4 == 0 {
|
||||
n = writeFirstBuf(stream.buf, digits[q3], n)
|
||||
writeBuf(stream.buf, digits[r3], n)
|
||||
writeBuf(stream.buf, digits[r2], n+3)
|
||||
writeBuf(stream.buf, digits[r1], n+6)
|
||||
stream.n = n + 9
|
||||
return
|
||||
}
|
||||
r4 := q3 - q4*1000
|
||||
q5 := q4 / 1000
|
||||
if q5 == 0 {
|
||||
n = writeFirstBuf(stream.buf, digits[q4], n)
|
||||
writeBuf(stream.buf, digits[r4], n)
|
||||
writeBuf(stream.buf, digits[r3], n+3)
|
||||
writeBuf(stream.buf, digits[r2], n+6)
|
||||
writeBuf(stream.buf, digits[r1], n+9)
|
||||
stream.n = n + 12
|
||||
return
|
||||
}
|
||||
r5 := q4 - q5*1000
|
||||
q6 := q5 / 1000
|
||||
if q6 == 0 {
|
||||
n = writeFirstBuf(stream.buf, digits[q5], n)
|
||||
} else {
|
||||
stream.buf[n] = byte(q6 + '0')
|
||||
n++
|
||||
r6 := q5 - q6*1000
|
||||
writeBuf(stream.buf, digits[r6], n)
|
||||
n += 3
|
||||
}
|
||||
writeBuf(stream.buf, digits[r5], n)
|
||||
writeBuf(stream.buf, digits[r4], n+3)
|
||||
writeBuf(stream.buf, digits[r3], n+6)
|
||||
writeBuf(stream.buf, digits[r2], n+9)
|
||||
writeBuf(stream.buf, digits[r1], n+12)
|
||||
stream.n = n + 15
|
||||
}
|
||||
|
||||
// WriteInt write int to stream
|
||||
func (stream *Stream) WriteInt(val int) {
|
||||
stream.WriteInt64(int64(val))
|
||||
}
|
||||
|
||||
// WriteUint write uint to stream
|
||||
func (stream *Stream) WriteUint(val uint) {
|
||||
stream.WriteUint64(uint64(val))
|
||||
}
|
396
vendor/github.com/json-iterator/go/feature_stream_string.go
generated
vendored
Normal file
396
vendor/github.com/json-iterator/go/feature_stream_string.go
generated
vendored
Normal file
|
@ -0,0 +1,396 @@
|
|||
package jsoniter
|
||||
|
||||
import (
|
||||
"unicode/utf8"
|
||||
)
|
||||
|
||||
// htmlSafeSet holds the value true if the ASCII character with the given
|
||||
// array position can be safely represented inside a JSON string, embedded
|
||||
// inside of HTML <script> tags, without any additional escaping.
|
||||
//
|
||||
// All values are true except for the ASCII control characters (0-31), the
|
||||
// double quote ("), the backslash character ("\"), HTML opening and closing
|
||||
// tags ("<" and ">"), and the ampersand ("&").
|
||||
var htmlSafeSet = [utf8.RuneSelf]bool{
|
||||
' ': true,
|
||||
'!': true,
|
||||
'"': false,
|
||||
'#': true,
|
||||
'$': true,
|
||||
'%': true,
|
||||
'&': false,
|
||||
'\'': true,
|
||||
'(': true,
|
||||
')': true,
|
||||
'*': true,
|
||||
'+': true,
|
||||
',': true,
|
||||
'-': true,
|
||||
'.': true,
|
||||
'/': true,
|
||||
'0': true,
|
||||
'1': true,
|
||||
'2': true,
|
||||
'3': true,
|
||||
'4': true,
|
||||
'5': true,
|
||||
'6': true,
|
||||
'7': true,
|
||||
'8': true,
|
||||
'9': true,
|
||||
':': true,
|
||||
';': true,
|
||||
'<': false,
|
||||
'=': true,
|
||||
'>': false,
|
||||
'?': true,
|
||||
'@': true,
|
||||
'A': true,
|
||||
'B': true,
|
||||
'C': true,
|
||||
'D': true,
|
||||
'E': true,
|
||||
'F': true,
|
||||
'G': true,
|
||||
'H': true,
|
||||
'I': true,
|
||||
'J': true,
|
||||
'K': true,
|
||||
'L': true,
|
||||
'M': true,
|
||||
'N': true,
|
||||
'O': true,
|
||||
'P': true,
|
||||
'Q': true,
|
||||
'R': true,
|
||||
'S': true,
|
||||
'T': true,
|
||||
'U': true,
|
||||
'V': true,
|
||||
'W': true,
|
||||
'X': true,
|
||||
'Y': true,
|
||||
'Z': true,
|
||||
'[': true,
|
||||
'\\': false,
|
||||
']': true,
|
||||
'^': true,
|
||||
'_': true,
|
||||
'`': true,
|
||||
'a': true,
|
||||
'b': true,
|
||||
'c': true,
|
||||
'd': true,
|
||||
'e': true,
|
||||
'f': true,
|
||||
'g': true,
|
||||
'h': true,
|
||||
'i': true,
|
||||
'j': true,
|
||||
'k': true,
|
||||
'l': true,
|
||||
'm': true,
|
||||
'n': true,
|
||||
'o': true,
|
||||
'p': true,
|
||||
'q': true,
|
||||
'r': true,
|
||||
's': true,
|
||||
't': true,
|
||||
'u': true,
|
||||
'v': true,
|
||||
'w': true,
|
||||
'x': true,
|
||||
'y': true,
|
||||
'z': true,
|
||||
'{': true,
|
||||
'|': true,
|
||||
'}': true,
|
||||
'~': true,
|
||||
'\u007f': true,
|
||||
}
|
||||
|
||||
// safeSet holds the value true if the ASCII character with the given array
|
||||
// position can be represented inside a JSON string without any further
|
||||
// escaping.
|
||||
//
|
||||
// All values are true except for the ASCII control characters (0-31), the
|
||||
// double quote ("), and the backslash character ("\").
|
||||
var safeSet = [utf8.RuneSelf]bool{
|
||||
' ': true,
|
||||
'!': true,
|
||||
'"': false,
|
||||
'#': true,
|
||||
'$': true,
|
||||
'%': true,
|
||||
'&': true,
|
||||
'\'': true,
|
||||
'(': true,
|
||||
')': true,
|
||||
'*': true,
|
||||
'+': true,
|
||||
',': true,
|
||||
'-': true,
|
||||
'.': true,
|
||||
'/': true,
|
||||
'0': true,
|
||||
'1': true,
|
||||
'2': true,
|
||||
'3': true,
|
||||
'4': true,
|
||||
'5': true,
|
||||
'6': true,
|
||||
'7': true,
|
||||
'8': true,
|
||||
'9': true,
|
||||
':': true,
|
||||
';': true,
|
||||
'<': true,
|
||||
'=': true,
|
||||
'>': true,
|
||||
'?': true,
|
||||
'@': true,
|
||||
'A': true,
|
||||
'B': true,
|
||||
'C': true,
|
||||
'D': true,
|
||||
'E': true,
|
||||
'F': true,
|
||||
'G': true,
|
||||
'H': true,
|
||||
'I': true,
|
||||
'J': true,
|
||||
'K': true,
|
||||
'L': true,
|
||||
'M': true,
|
||||
'N': true,
|
||||
'O': true,
|
||||
'P': true,
|
||||
'Q': true,
|
||||
'R': true,
|
||||
'S': true,
|
||||
'T': true,
|
||||
'U': true,
|
||||
'V': true,
|
||||
'W': true,
|
||||
'X': true,
|
||||
'Y': true,
|
||||
'Z': true,
|
||||
'[': true,
|
||||
'\\': false,
|
||||
']': true,
|
||||
'^': true,
|
||||
'_': true,
|
||||
'`': true,
|
||||
'a': true,
|
||||
'b': true,
|
||||
'c': true,
|
||||
'd': true,
|
||||
'e': true,
|
||||
'f': true,
|
||||
'g': true,
|
||||
'h': true,
|
||||
'i': true,
|
||||
'j': true,
|
||||
'k': true,
|
||||
'l': true,
|
||||
'm': true,
|
||||
'n': true,
|
||||
'o': true,
|
||||
'p': true,
|
||||
'q': true,
|
||||
'r': true,
|
||||
's': true,
|
||||
't': true,
|
||||
'u': true,
|
||||
'v': true,
|
||||
'w': true,
|
||||
'x': true,
|
||||
'y': true,
|
||||
'z': true,
|
||||
'{': true,
|
||||
'|': true,
|
||||
'}': true,
|
||||
'~': true,
|
||||
'\u007f': true,
|
||||
}
|
||||
|
||||
var hex = "0123456789abcdef"
|
||||
|
||||
// WriteStringWithHTMLEscaped write string to stream with html special characters escaped
|
||||
func (stream *Stream) WriteStringWithHTMLEscaped(s string) {
|
||||
stream.ensure(32)
|
||||
valLen := len(s)
|
||||
toWriteLen := valLen
|
||||
bufLengthMinusTwo := len(stream.buf) - 2 // make room for the quotes
|
||||
if stream.n+toWriteLen > bufLengthMinusTwo {
|
||||
toWriteLen = bufLengthMinusTwo - stream.n
|
||||
}
|
||||
n := stream.n
|
||||
stream.buf[n] = '"'
|
||||
n++
|
||||
// write string, the fast path, without utf8 and escape support
|
||||
i := 0
|
||||
for ; i < toWriteLen; i++ {
|
||||
c := s[i]
|
||||
if c < utf8.RuneSelf && htmlSafeSet[c] {
|
||||
stream.buf[n] = c
|
||||
n++
|
||||
} else {
|
||||
break
|
||||
}
|
||||
}
|
||||
if i == valLen {
|
||||
stream.buf[n] = '"'
|
||||
n++
|
||||
stream.n = n
|
||||
return
|
||||
}
|
||||
stream.n = n
|
||||
writeStringSlowPathWithHTMLEscaped(stream, i, s, valLen)
|
||||
}
|
||||
|
||||
func writeStringSlowPathWithHTMLEscaped(stream *Stream, i int, s string, valLen int) {
|
||||
start := i
|
||||
// for the remaining parts, we process them char by char
|
||||
for i < valLen {
|
||||
if b := s[i]; b < utf8.RuneSelf {
|
||||
if htmlSafeSet[b] {
|
||||
i++
|
||||
continue
|
||||
}
|
||||
if start < i {
|
||||
stream.WriteRaw(s[start:i])
|
||||
}
|
||||
switch b {
|
||||
case '\\', '"':
|
||||
stream.writeTwoBytes('\\', b)
|
||||
case '\n':
|
||||
stream.writeTwoBytes('\\', 'n')
|
||||
case '\r':
|
||||
stream.writeTwoBytes('\\', 'r')
|
||||
case '\t':
|
||||
stream.writeTwoBytes('\\', 't')
|
||||
default:
|
||||
// This encodes bytes < 0x20 except for \t, \n and \r.
|
||||
// If escapeHTML is set, it also escapes <, >, and &
|
||||
// because they can lead to security holes when
|
||||
// user-controlled strings are rendered into JSON
|
||||
// and served to some browsers.
|
||||
stream.WriteRaw(`\u00`)
|
||||
stream.writeTwoBytes(hex[b>>4], hex[b&0xF])
|
||||
}
|
||||
i++
|
||||
start = i
|
||||
continue
|
||||
}
|
||||
c, size := utf8.DecodeRuneInString(s[i:])
|
||||
if c == utf8.RuneError && size == 1 {
|
||||
if start < i {
|
||||
stream.WriteRaw(s[start:i])
|
||||
}
|
||||
stream.WriteRaw(`\ufffd`)
|
||||
i++
|
||||
start = i
|
||||
continue
|
||||
}
|
||||
// U+2028 is LINE SEPARATOR.
|
||||
// U+2029 is PARAGRAPH SEPARATOR.
|
||||
// They are both technically valid characters in JSON strings,
|
||||
// but don't work in JSONP, which has to be evaluated as JavaScript,
|
||||
// and can lead to security holes there. It is valid JSON to
|
||||
// escape them, so we do so unconditionally.
|
||||
// See http://timelessrepo.com/json-isnt-a-javascript-subset for discussion.
|
||||
if c == '\u2028' || c == '\u2029' {
|
||||
if start < i {
|
||||
stream.WriteRaw(s[start:i])
|
||||
}
|
||||
stream.WriteRaw(`\u202`)
|
||||
stream.writeByte(hex[c&0xF])
|
||||
i += size
|
||||
start = i
|
||||
continue
|
||||
}
|
||||
i += size
|
||||
}
|
||||
if start < len(s) {
|
||||
stream.WriteRaw(s[start:])
|
||||
}
|
||||
stream.writeByte('"')
|
||||
}
|
||||
|
||||
// WriteString write string to stream without html escape
|
||||
func (stream *Stream) WriteString(s string) {
|
||||
stream.ensure(32)
|
||||
valLen := len(s)
|
||||
toWriteLen := valLen
|
||||
bufLengthMinusTwo := len(stream.buf) - 2 // make room for the quotes
|
||||
if stream.n+toWriteLen > bufLengthMinusTwo {
|
||||
toWriteLen = bufLengthMinusTwo - stream.n
|
||||
}
|
||||
n := stream.n
|
||||
stream.buf[n] = '"'
|
||||
n++
|
||||
// write string, the fast path, without utf8 and escape support
|
||||
i := 0
|
||||
for ; i < toWriteLen; i++ {
|
||||
c := s[i]
|
||||
if c > 31 && c != '"' && c != '\\' {
|
||||
stream.buf[n] = c
|
||||
n++
|
||||
} else {
|
||||
break
|
||||
}
|
||||
}
|
||||
if i == valLen {
|
||||
stream.buf[n] = '"'
|
||||
n++
|
||||
stream.n = n
|
||||
return
|
||||
}
|
||||
stream.n = n
|
||||
writeStringSlowPath(stream, i, s, valLen)
|
||||
}
|
||||
|
||||
func writeStringSlowPath(stream *Stream, i int, s string, valLen int) {
|
||||
start := i
|
||||
// for the remaining parts, we process them char by char
|
||||
for i < valLen {
|
||||
if b := s[i]; b < utf8.RuneSelf {
|
||||
if safeSet[b] {
|
||||
i++
|
||||
continue
|
||||
}
|
||||
if start < i {
|
||||
stream.WriteRaw(s[start:i])
|
||||
}
|
||||
switch b {
|
||||
case '\\', '"':
|
||||
stream.writeTwoBytes('\\', b)
|
||||
case '\n':
|
||||
stream.writeTwoBytes('\\', 'n')
|
||||
case '\r':
|
||||
stream.writeTwoBytes('\\', 'r')
|
||||
case '\t':
|
||||
stream.writeTwoBytes('\\', 't')
|
||||
default:
|
||||
// This encodes bytes < 0x20 except for \t, \n and \r.
|
||||
// If escapeHTML is set, it also escapes <, >, and &
|
||||
// because they can lead to security holes when
|
||||
// user-controlled strings are rendered into JSON
|
||||
// and served to some browsers.
|
||||
stream.WriteRaw(`\u00`)
|
||||
stream.writeTwoBytes(hex[b>>4], hex[b&0xF])
|
||||
}
|
||||
i++
|
||||
start = i
|
||||
continue
|
||||
}
|
||||
i++
|
||||
continue
|
||||
}
|
||||
if start < len(s) {
|
||||
stream.WriteRaw(s[start:])
|
||||
}
|
||||
stream.writeByte('"')
|
||||
}
|
7
vendor/github.com/json-iterator/go/fuzzy_mode_convert_table.md
generated
vendored
Normal file
7
vendor/github.com/json-iterator/go/fuzzy_mode_convert_table.md
generated
vendored
Normal file
|
@ -0,0 +1,7 @@
|
|||
| json type \ dest type | bool | int | uint | float |string|
|
||||
| --- | --- | --- | --- |--|--|
|
||||
| number | positive => true <br/> negative => true <br/> zero => false| 23.2 => 23 <br/> -32.1 => -32| 12.1 => 12 <br/> -12.1 => 0|as normal|same as origin|
|
||||
| string | empty string => false <br/> string "0" => false <br/> other strings => true | "123.32" => 123 <br/> "-123.4" => -123 <br/> "123.23xxxw" => 123 <br/> "abcde12" => 0 <br/> "-32.1" => -32| 13.2 => 13 <br/> -1.1 => 0 |12.1 => 12.1 <br/> -12.3 => -12.3<br/> 12.4xxa => 12.4 <br/> +1.1e2 =>110 |same as origin|
|
||||
| bool | true => true <br/> false => false| true => 1 <br/> false => 0 | true => 1 <br/> false => 0 |true => 1 <br/>false => 0|true => "true" <br/> false => "false"|
|
||||
| object | true | 0 | 0 |0|originnal json|
|
||||
| array | empty array => false <br/> nonempty array => true| [] => 0 <br/> [1,2] => 1 | [] => 0 <br/> [1,2] => 1 |[] => 0<br/>[1,2] => 1|original json|
|
18
vendor/github.com/json-iterator/go/jsoniter.go
generated
vendored
Normal file
18
vendor/github.com/json-iterator/go/jsoniter.go
generated
vendored
Normal file
|
@ -0,0 +1,18 @@
|
|||
// Package jsoniter implements encoding and decoding of JSON as defined in
|
||||
// RFC 4627 and provides interfaces with identical syntax of standard lib encoding/json.
|
||||
// Converting from encoding/json to jsoniter is no more than replacing the package with jsoniter
|
||||
// and variable type declarations (if any).
|
||||
// jsoniter interfaces gives 100% compatibility with code using standard lib.
|
||||
//
|
||||
// "JSON and Go"
|
||||
// (https://golang.org/doc/articles/json_and_go.html)
|
||||
// gives a description of how Marshal/Unmarshal operate
|
||||
// between arbitrary or predefined json objects and bytes,
|
||||
// and it applies to jsoniter.Marshal/Unmarshal as well.
|
||||
//
|
||||
// Besides, jsoniter.Iterator provides a different set of interfaces
|
||||
// iterating given bytes/string/reader
|
||||
// and yielding parsed elements one by one.
|
||||
// This set of interfaces reads input as required and gives
|
||||
// better performance.
|
||||
package jsoniter
|
12
vendor/github.com/json-iterator/go/test.sh
generated
vendored
Executable file
12
vendor/github.com/json-iterator/go/test.sh
generated
vendored
Executable file
|
@ -0,0 +1,12 @@
|
|||
#!/usr/bin/env bash
|
||||
|
||||
set -e
|
||||
echo "" > coverage.txt
|
||||
|
||||
for d in $(go list ./... | grep -v vendor); do
|
||||
go test -coverprofile=profile.out $d
|
||||
if [ -f profile.out ]; then
|
||||
cat profile.out >> coverage.txt
|
||||
rm profile.out
|
||||
fi
|
||||
done
|
12
vendor/github.com/opencontainers/go-digest/.pullapprove.yml
generated
vendored
Normal file
12
vendor/github.com/opencontainers/go-digest/.pullapprove.yml
generated
vendored
Normal file
|
@ -0,0 +1,12 @@
|
|||
approve_by_comment: true
|
||||
approve_regex: '^(Approved|lgtm|LGTM|:shipit:|:star:|:\+1:|:ship:)'
|
||||
reject_regex: ^Rejected
|
||||
reset_on_push: true
|
||||
author_approval: ignored
|
||||
signed_off_by:
|
||||
required: true
|
||||
reviewers:
|
||||
teams:
|
||||
- go-digest-maintainers
|
||||
name: default
|
||||
required: 2
|
4
vendor/github.com/opencontainers/go-digest/.travis.yml
generated
vendored
Normal file
4
vendor/github.com/opencontainers/go-digest/.travis.yml
generated
vendored
Normal file
|
@ -0,0 +1,4 @@
|
|||
language: go
|
||||
go:
|
||||
- 1.7
|
||||
- master
|
72
vendor/github.com/opencontainers/go-digest/CONTRIBUTING.md
generated
vendored
Normal file
72
vendor/github.com/opencontainers/go-digest/CONTRIBUTING.md
generated
vendored
Normal file
|
@ -0,0 +1,72 @@
|
|||
# Contributing to Docker open source projects
|
||||
|
||||
Want to hack on this project? Awesome! Here are instructions to get you started.
|
||||
|
||||
This project is a part of the [Docker](https://www.docker.com) project, and follows
|
||||
the same rules and principles. If you're already familiar with the way
|
||||
Docker does things, you'll feel right at home.
|
||||
|
||||
Otherwise, go read Docker's
|
||||
[contributions guidelines](https://github.com/docker/docker/blob/master/CONTRIBUTING.md),
|
||||
[issue triaging](https://github.com/docker/docker/blob/master/project/ISSUE-TRIAGE.md),
|
||||
[review process](https://github.com/docker/docker/blob/master/project/REVIEWING.md) and
|
||||
[branches and tags](https://github.com/docker/docker/blob/master/project/BRANCHES-AND-TAGS.md).
|
||||
|
||||
For an in-depth description of our contribution process, visit the
|
||||
contributors guide: [Understand how to contribute](https://docs.docker.com/opensource/workflow/make-a-contribution/)
|
||||
|
||||
### Sign your work
|
||||
|
||||
The sign-off is a simple line at the end of the explanation for the patch. Your
|
||||
signature certifies that you wrote the patch or otherwise have the right to pass
|
||||
it on as an open-source patch. The rules are pretty simple: if you can certify
|
||||
the below (from [developercertificate.org](http://developercertificate.org/)):
|
||||
|
||||
```
|
||||
Developer Certificate of Origin
|
||||
Version 1.1
|
||||
|
||||
Copyright (C) 2004, 2006 The Linux Foundation and its contributors.
|
||||
1 Letterman Drive
|
||||
Suite D4700
|
||||
San Francisco, CA, 94129
|
||||
|
||||
Everyone is permitted to copy and distribute verbatim copies of this
|
||||
license document, but changing it is not allowed.
|
||||
|
||||
|
||||
Developer's Certificate of Origin 1.1
|
||||
|
||||
By making a contribution to this project, I certify that:
|
||||
|
||||
(a) The contribution was created in whole or in part by me and I
|
||||
have the right to submit it under the open source license
|
||||
indicated in the file; or
|
||||
|
||||
(b) The contribution is based upon previous work that, to the best
|
||||
of my knowledge, is covered under an appropriate open source
|
||||
license and I have the right under that license to submit that
|
||||
work with modifications, whether created in whole or in part
|
||||
by me, under the same open source license (unless I am
|
||||
permitted to submit under a different license), as indicated
|
||||
in the file; or
|
||||
|
||||
(c) The contribution was provided directly to me by some other
|
||||
person who certified (a), (b) or (c) and I have not modified
|
||||
it.
|
||||
|
||||
(d) I understand and agree that this project and the contribution
|
||||
are public and that a record of the contribution (including all
|
||||
personal information I submit with it, including my sign-off) is
|
||||
maintained indefinitely and may be redistributed consistent with
|
||||
this project or the open source license(s) involved.
|
||||
```
|
||||
|
||||
Then you just add a line to every git commit message:
|
||||
|
||||
Signed-off-by: Joe Smith <joe.smith@email.com>
|
||||
|
||||
Use your real name (sorry, no pseudonyms or anonymous contributions.)
|
||||
|
||||
If you set your `user.name` and `user.email` git configs, you can sign your
|
||||
commit automatically with `git commit -s`.
|
191
vendor/github.com/opencontainers/go-digest/LICENSE.code
generated
vendored
Normal file
191
vendor/github.com/opencontainers/go-digest/LICENSE.code
generated
vendored
Normal file
|
@ -0,0 +1,191 @@
|
|||
|
||||
Apache License
|
||||
Version 2.0, January 2004
|
||||
https://www.apache.org/licenses/
|
||||
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||
|
||||
1. Definitions.
|
||||
|
||||
"License" shall mean the terms and conditions for use, reproduction,
|
||||
and distribution as defined by Sections 1 through 9 of this document.
|
||||
|
||||
"Licensor" shall mean the copyright owner or entity authorized by
|
||||
the copyright owner that is granting the License.
|
||||
|
||||
"Legal Entity" shall mean the union of the acting entity and all
|
||||
other entities that control, are controlled by, or are under common
|
||||
control with that entity. For the purposes of this definition,
|
||||
"control" means (i) the power, direct or indirect, to cause the
|
||||
direction or management of such entity, whether by contract or
|
||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||
|
||||
"You" (or "Your") shall mean an individual or Legal Entity
|
||||
exercising permissions granted by this License.
|
||||
|
||||
"Source" form shall mean the preferred form for making modifications,
|
||||
including but not limited to software source code, documentation
|
||||
source, and configuration files.
|
||||
|
||||
"Object" form shall mean any form resulting from mechanical
|
||||
transformation or translation of a Source form, including but
|
||||
not limited to compiled object code, generated documentation,
|
||||
and conversions to other media types.
|
||||
|
||||
"Work" shall mean the work of authorship, whether in Source or
|
||||
Object form, made available under the License, as indicated by a
|
||||
copyright notice that is included in or attached to the work
|
||||
(an example is provided in the Appendix below).
|
||||
|
||||
"Derivative Works" shall mean any work, whether in Source or Object
|
||||
form, that is based on (or derived from) the Work and for which the
|
||||
editorial revisions, annotations, elaborations, or other modifications
|
||||
represent, as a whole, an original work of authorship. For the purposes
|
||||
of this License, Derivative Works shall not include works that remain
|
||||
separable from, or merely link (or bind by name) to the interfaces of,
|
||||
the Work and Derivative Works thereof.
|
||||
|
||||
"Contribution" shall mean any work of authorship, including
|
||||
the original version of the Work and any modifications or additions
|
||||
to that Work or Derivative Works thereof, that is intentionally
|
||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||
or by an individual or Legal Entity authorized to submit on behalf of
|
||||
the copyright owner. For the purposes of this definition, "submitted"
|
||||
means any form of electronic, verbal, or written communication sent
|
||||
to the Licensor or its representatives, including but not limited to
|
||||
communication on electronic mailing lists, source code control systems,
|
||||
and issue tracking systems that are managed by, or on behalf of, the
|
||||
Licensor for the purpose of discussing and improving the Work, but
|
||||
excluding communication that is conspicuously marked or otherwise
|
||||
designated in writing by the copyright owner as "Not a Contribution."
|
||||
|
||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||
on behalf of whom a Contribution has been received by Licensor and
|
||||
subsequently incorporated within the Work.
|
||||
|
||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
copyright license to reproduce, prepare Derivative Works of,
|
||||
publicly display, publicly perform, sublicense, and distribute the
|
||||
Work and such Derivative Works in Source or Object form.
|
||||
|
||||
3. Grant of Patent License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
(except as stated in this section) patent license to make, have made,
|
||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||
where such license applies only to those patent claims licensable
|
||||
by such Contributor that are necessarily infringed by their
|
||||
Contribution(s) alone or by combination of their Contribution(s)
|
||||
with the Work to which such Contribution(s) was submitted. If You
|
||||
institute patent litigation against any entity (including a
|
||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||
or a Contribution incorporated within the Work constitutes direct
|
||||
or contributory patent infringement, then any patent licenses
|
||||
granted to You under this License for that Work shall terminate
|
||||
as of the date such litigation is filed.
|
||||
|
||||
4. Redistribution. You may reproduce and distribute copies of the
|
||||
Work or Derivative Works thereof in any medium, with or without
|
||||
modifications, and in Source or Object form, provided that You
|
||||
meet the following conditions:
|
||||
|
||||
(a) You must give any other recipients of the Work or
|
||||
Derivative Works a copy of this License; and
|
||||
|
||||
(b) You must cause any modified files to carry prominent notices
|
||||
stating that You changed the files; and
|
||||
|
||||
(c) You must retain, in the Source form of any Derivative Works
|
||||
that You distribute, all copyright, patent, trademark, and
|
||||
attribution notices from the Source form of the Work,
|
||||
excluding those notices that do not pertain to any part of
|
||||
the Derivative Works; and
|
||||
|
||||
(d) If the Work includes a "NOTICE" text file as part of its
|
||||
distribution, then any Derivative Works that You distribute must
|
||||
include a readable copy of the attribution notices contained
|
||||
within such NOTICE file, excluding those notices that do not
|
||||
pertain to any part of the Derivative Works, in at least one
|
||||
of the following places: within a NOTICE text file distributed
|
||||
as part of the Derivative Works; within the Source form or
|
||||
documentation, if provided along with the Derivative Works; or,
|
||||
within a display generated by the Derivative Works, if and
|
||||
wherever such third-party notices normally appear. The contents
|
||||
of the NOTICE file are for informational purposes only and
|
||||
do not modify the License. You may add Your own attribution
|
||||
notices within Derivative Works that You distribute, alongside
|
||||
or as an addendum to the NOTICE text from the Work, provided
|
||||
that such additional attribution notices cannot be construed
|
||||
as modifying the License.
|
||||
|
||||
You may add Your own copyright statement to Your modifications and
|
||||
may provide additional or different license terms and conditions
|
||||
for use, reproduction, or distribution of Your modifications, or
|
||||
for any such Derivative Works as a whole, provided Your use,
|
||||
reproduction, and distribution of the Work otherwise complies with
|
||||
the conditions stated in this License.
|
||||
|
||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||
any Contribution intentionally submitted for inclusion in the Work
|
||||
by You to the Licensor shall be under the terms and conditions of
|
||||
this License, without any additional terms or conditions.
|
||||
Notwithstanding the above, nothing herein shall supersede or modify
|
||||
the terms of any separate license agreement you may have executed
|
||||
with Licensor regarding such Contributions.
|
||||
|
||||
6. Trademarks. This License does not grant permission to use the trade
|
||||
names, trademarks, service marks, or product names of the Licensor,
|
||||
except as required for reasonable and customary use in describing the
|
||||
origin of the Work and reproducing the content of the NOTICE file.
|
||||
|
||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||
agreed to in writing, Licensor provides the Work (and each
|
||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
implied, including, without limitation, any warranties or conditions
|
||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||
appropriateness of using or redistributing the Work and assume any
|
||||
risks associated with Your exercise of permissions under this License.
|
||||
|
||||
8. Limitation of Liability. In no event and under no legal theory,
|
||||
whether in tort (including negligence), contract, or otherwise,
|
||||
unless required by applicable law (such as deliberate and grossly
|
||||
negligent acts) or agreed to in writing, shall any Contributor be
|
||||
liable to You for damages, including any direct, indirect, special,
|
||||
incidental, or consequential damages of any character arising as a
|
||||
result of this License or out of the use or inability to use the
|
||||
Work (including but not limited to damages for loss of goodwill,
|
||||
work stoppage, computer failure or malfunction, or any and all
|
||||
other commercial damages or losses), even if such Contributor
|
||||
has been advised of the possibility of such damages.
|
||||
|
||||
9. Accepting Warranty or Additional Liability. While redistributing
|
||||
the Work or Derivative Works thereof, You may choose to offer,
|
||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||
or other liability obligations and/or rights consistent with this
|
||||
License. However, in accepting such obligations, You may act only
|
||||
on Your own behalf and on Your sole responsibility, not on behalf
|
||||
of any other Contributor, and only if You agree to indemnify,
|
||||
defend, and hold each Contributor harmless for any liability
|
||||
incurred by, or claims asserted against, such Contributor by reason
|
||||
of your accepting any such warranty or additional liability.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
Copyright 2016 Docker, Inc.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
https://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
425
vendor/github.com/opencontainers/go-digest/LICENSE.docs
generated
vendored
Normal file
425
vendor/github.com/opencontainers/go-digest/LICENSE.docs
generated
vendored
Normal file
|
@ -0,0 +1,425 @@
|
|||
Attribution-ShareAlike 4.0 International
|
||||
|
||||
=======================================================================
|
||||
|
||||
Creative Commons Corporation ("Creative Commons") is not a law firm and
|
||||
does not provide legal services or legal advice. Distribution of
|
||||
Creative Commons public licenses does not create a lawyer-client or
|
||||
other relationship. Creative Commons makes its licenses and related
|
||||
information available on an "as-is" basis. Creative Commons gives no
|
||||
warranties regarding its licenses, any material licensed under their
|
||||
terms and conditions, or any related information. Creative Commons
|
||||
disclaims all liability for damages resulting from their use to the
|
||||
fullest extent possible.
|
||||
|
||||
Using Creative Commons Public Licenses
|
||||
|
||||
Creative Commons public licenses provide a standard set of terms and
|
||||
conditions that creators and other rights holders may use to share
|
||||
original works of authorship and other material subject to copyright
|
||||
and certain other rights specified in the public license below. The
|
||||
following considerations are for informational purposes only, are not
|
||||
exhaustive, and do not form part of our licenses.
|
||||
|
||||
Considerations for licensors: Our public licenses are
|
||||
intended for use by those authorized to give the public
|
||||
permission to use material in ways otherwise restricted by
|
||||
copyright and certain other rights. Our licenses are
|
||||
irrevocable. Licensors should read and understand the terms
|
||||
and conditions of the license they choose before applying it.
|
||||
Licensors should also secure all rights necessary before
|
||||
applying our licenses so that the public can reuse the
|
||||
material as expected. Licensors should clearly mark any
|
||||
material not subject to the license. This includes other CC-
|
||||
licensed material, or material used under an exception or
|
||||
limitation to copyright. More considerations for licensors:
|
||||
wiki.creativecommons.org/Considerations_for_licensors
|
||||
|
||||
Considerations for the public: By using one of our public
|
||||
licenses, a licensor grants the public permission to use the
|
||||
licensed material under specified terms and conditions. If
|
||||
the licensor's permission is not necessary for any reason--for
|
||||
example, because of any applicable exception or limitation to
|
||||
copyright--then that use is not regulated by the license. Our
|
||||
licenses grant only permissions under copyright and certain
|
||||
other rights that a licensor has authority to grant. Use of
|
||||
the licensed material may still be restricted for other
|
||||
reasons, including because others have copyright or other
|
||||
rights in the material. A licensor may make special requests,
|
||||
such as asking that all changes be marked or described.
|
||||
Although not required by our licenses, you are encouraged to
|
||||
respect those requests where reasonable. More_considerations
|
||||
for the public:
|
||||
wiki.creativecommons.org/Considerations_for_licensees
|
||||
|
||||
=======================================================================
|
||||
|
||||
Creative Commons Attribution-ShareAlike 4.0 International Public
|
||||
License
|
||||
|
||||
By exercising the Licensed Rights (defined below), You accept and agree
|
||||
to be bound by the terms and conditions of this Creative Commons
|
||||
Attribution-ShareAlike 4.0 International Public License ("Public
|
||||
License"). To the extent this Public License may be interpreted as a
|
||||
contract, You are granted the Licensed Rights in consideration of Your
|
||||
acceptance of these terms and conditions, and the Licensor grants You
|
||||
such rights in consideration of benefits the Licensor receives from
|
||||
making the Licensed Material available under these terms and
|
||||
conditions.
|
||||
|
||||
|
||||
Section 1 -- Definitions.
|
||||
|
||||
a. Adapted Material means material subject to Copyright and Similar
|
||||
Rights that is derived from or based upon the Licensed Material
|
||||
and in which the Licensed Material is translated, altered,
|
||||
arranged, transformed, or otherwise modified in a manner requiring
|
||||
permission under the Copyright and Similar Rights held by the
|
||||
Licensor. For purposes of this Public License, where the Licensed
|
||||
Material is a musical work, performance, or sound recording,
|
||||
Adapted Material is always produced where the Licensed Material is
|
||||
synched in timed relation with a moving image.
|
||||
|
||||
b. Adapter's License means the license You apply to Your Copyright
|
||||
and Similar Rights in Your contributions to Adapted Material in
|
||||
accordance with the terms and conditions of this Public License.
|
||||
|
||||
c. BY-SA Compatible License means a license listed at
|
||||
creativecommons.org/compatiblelicenses, approved by Creative
|
||||
Commons as essentially the equivalent of this Public License.
|
||||
|
||||
d. Copyright and Similar Rights means copyright and/or similar rights
|
||||
closely related to copyright including, without limitation,
|
||||
performance, broadcast, sound recording, and Sui Generis Database
|
||||
Rights, without regard to how the rights are labeled or
|
||||
categorized. For purposes of this Public License, the rights
|
||||
specified in Section 2(b)(1)-(2) are not Copyright and Similar
|
||||
Rights.
|
||||
|
||||
e. Effective Technological Measures means those measures that, in the
|
||||
absence of proper authority, may not be circumvented under laws
|
||||
fulfilling obligations under Article 11 of the WIPO Copyright
|
||||
Treaty adopted on December 20, 1996, and/or similar international
|
||||
agreements.
|
||||
|
||||
f. Exceptions and Limitations means fair use, fair dealing, and/or
|
||||
any other exception or limitation to Copyright and Similar Rights
|
||||
that applies to Your use of the Licensed Material.
|
||||
|
||||
g. License Elements means the license attributes listed in the name
|
||||
of a Creative Commons Public License. The License Elements of this
|
||||
Public License are Attribution and ShareAlike.
|
||||
|
||||
h. Licensed Material means the artistic or literary work, database,
|
||||
or other material to which the Licensor applied this Public
|
||||
License.
|
||||
|
||||
i. Licensed Rights means the rights granted to You subject to the
|
||||
terms and conditions of this Public License, which are limited to
|
||||
all Copyright and Similar Rights that apply to Your use of the
|
||||
Licensed Material and that the Licensor has authority to license.
|
||||
|
||||
j. Licensor means the individual(s) or entity(ies) granting rights
|
||||
under this Public License.
|
||||
|
||||
k. Share means to provide material to the public by any means or
|
||||
process that requires permission under the Licensed Rights, such
|
||||
as reproduction, public display, public performance, distribution,
|
||||
dissemination, communication, or importation, and to make material
|
||||
available to the public including in ways that members of the
|
||||
public may access the material from a place and at a time
|
||||
individually chosen by them.
|
||||
|
||||
l. Sui Generis Database Rights means rights other than copyright
|
||||
resulting from Directive 96/9/EC of the European Parliament and of
|
||||
the Council of 11 March 1996 on the legal protection of databases,
|
||||
as amended and/or succeeded, as well as other essentially
|
||||
equivalent rights anywhere in the world.
|
||||
|
||||
m. You means the individual or entity exercising the Licensed Rights
|
||||
under this Public License. Your has a corresponding meaning.
|
||||
|
||||
|
||||
Section 2 -- Scope.
|
||||
|
||||
a. License grant.
|
||||
|
||||
1. Subject to the terms and conditions of this Public License,
|
||||
the Licensor hereby grants You a worldwide, royalty-free,
|
||||
non-sublicensable, non-exclusive, irrevocable license to
|
||||
exercise the Licensed Rights in the Licensed Material to:
|
||||
|
||||
a. reproduce and Share the Licensed Material, in whole or
|
||||
in part; and
|
||||
|
||||
b. produce, reproduce, and Share Adapted Material.
|
||||
|
||||
2. Exceptions and Limitations. For the avoidance of doubt, where
|
||||
Exceptions and Limitations apply to Your use, this Public
|
||||
License does not apply, and You do not need to comply with
|
||||
its terms and conditions.
|
||||
|
||||
3. Term. The term of this Public License is specified in Section
|
||||
6(a).
|
||||
|
||||
4. Media and formats; technical modifications allowed. The
|
||||
Licensor authorizes You to exercise the Licensed Rights in
|
||||
all media and formats whether now known or hereafter created,
|
||||
and to make technical modifications necessary to do so. The
|
||||
Licensor waives and/or agrees not to assert any right or
|
||||
authority to forbid You from making technical modifications
|
||||
necessary to exercise the Licensed Rights, including
|
||||
technical modifications necessary to circumvent Effective
|
||||
Technological Measures. For purposes of this Public License,
|
||||
simply making modifications authorized by this Section 2(a)
|
||||
(4) never produces Adapted Material.
|
||||
|
||||
5. Downstream recipients.
|
||||
|
||||
a. Offer from the Licensor -- Licensed Material. Every
|
||||
recipient of the Licensed Material automatically
|
||||
receives an offer from the Licensor to exercise the
|
||||
Licensed Rights under the terms and conditions of this
|
||||
Public License.
|
||||
|
||||
b. Additional offer from the Licensor -- Adapted Material.
|
||||
Every recipient of Adapted Material from You
|
||||
automatically receives an offer from the Licensor to
|
||||
exercise the Licensed Rights in the Adapted Material
|
||||
under the conditions of the Adapter's License You apply.
|
||||
|
||||
c. No downstream restrictions. You may not offer or impose
|
||||
any additional or different terms or conditions on, or
|
||||
apply any Effective Technological Measures to, the
|
||||
Licensed Material if doing so restricts exercise of the
|
||||
Licensed Rights by any recipient of the Licensed
|
||||
Material.
|
||||
|
||||
6. No endorsement. Nothing in this Public License constitutes or
|
||||
may be construed as permission to assert or imply that You
|
||||
are, or that Your use of the Licensed Material is, connected
|
||||
with, or sponsored, endorsed, or granted official status by,
|
||||
the Licensor or others designated to receive attribution as
|
||||
provided in Section 3(a)(1)(A)(i).
|
||||
|
||||
b. Other rights.
|
||||
|
||||
1. Moral rights, such as the right of integrity, are not
|
||||
licensed under this Public License, nor are publicity,
|
||||
privacy, and/or other similar personality rights; however, to
|
||||
the extent possible, the Licensor waives and/or agrees not to
|
||||
assert any such rights held by the Licensor to the limited
|
||||
extent necessary to allow You to exercise the Licensed
|
||||
Rights, but not otherwise.
|
||||
|
||||
2. Patent and trademark rights are not licensed under this
|
||||
Public License.
|
||||
|
||||
3. To the extent possible, the Licensor waives any right to
|
||||
collect royalties from You for the exercise of the Licensed
|
||||
Rights, whether directly or through a collecting society
|
||||
under any voluntary or waivable statutory or compulsory
|
||||
licensing scheme. In all other cases the Licensor expressly
|
||||
reserves any right to collect such royalties.
|
||||
|
||||
|
||||
Section 3 -- License Conditions.
|
||||
|
||||
Your exercise of the Licensed Rights is expressly made subject to the
|
||||
following conditions.
|
||||
|
||||
a. Attribution.
|
||||
|
||||
1. If You Share the Licensed Material (including in modified
|
||||
form), You must:
|
||||
|
||||
a. retain the following if it is supplied by the Licensor
|
||||
with the Licensed Material:
|
||||
|
||||
i. identification of the creator(s) of the Licensed
|
||||
Material and any others designated to receive
|
||||
attribution, in any reasonable manner requested by
|
||||
the Licensor (including by pseudonym if
|
||||
designated);
|
||||
|
||||
ii. a copyright notice;
|
||||
|
||||
iii. a notice that refers to this Public License;
|
||||
|
||||
iv. a notice that refers to the disclaimer of
|
||||
warranties;
|
||||
|
||||
v. a URI or hyperlink to the Licensed Material to the
|
||||
extent reasonably practicable;
|
||||
|
||||
b. indicate if You modified the Licensed Material and
|
||||
retain an indication of any previous modifications; and
|
||||
|
||||
c. indicate the Licensed Material is licensed under this
|
||||
Public License, and include the text of, or the URI or
|
||||
hyperlink to, this Public License.
|
||||
|
||||
2. You may satisfy the conditions in Section 3(a)(1) in any
|
||||
reasonable manner based on the medium, means, and context in
|
||||
which You Share the Licensed Material. For example, it may be
|
||||
reasonable to satisfy the conditions by providing a URI or
|
||||
hyperlink to a resource that includes the required
|
||||
information.
|
||||
|
||||
3. If requested by the Licensor, You must remove any of the
|
||||
information required by Section 3(a)(1)(A) to the extent
|
||||
reasonably practicable.
|
||||
|
||||
b. ShareAlike.
|
||||
|
||||
In addition to the conditions in Section 3(a), if You Share
|
||||
Adapted Material You produce, the following conditions also apply.
|
||||
|
||||
1. The Adapter's License You apply must be a Creative Commons
|
||||
license with the same License Elements, this version or
|
||||
later, or a BY-SA Compatible License.
|
||||
|
||||
2. You must include the text of, or the URI or hyperlink to, the
|
||||
Adapter's License You apply. You may satisfy this condition
|
||||
in any reasonable manner based on the medium, means, and
|
||||
context in which You Share Adapted Material.
|
||||
|
||||
3. You may not offer or impose any additional or different terms
|
||||
or conditions on, or apply any Effective Technological
|
||||
Measures to, Adapted Material that restrict exercise of the
|
||||
rights granted under the Adapter's License You apply.
|
||||
|
||||
|
||||
Section 4 -- Sui Generis Database Rights.
|
||||
|
||||
Where the Licensed Rights include Sui Generis Database Rights that
|
||||
apply to Your use of the Licensed Material:
|
||||
|
||||
a. for the avoidance of doubt, Section 2(a)(1) grants You the right
|
||||
to extract, reuse, reproduce, and Share all or a substantial
|
||||
portion of the contents of the database;
|
||||
|
||||
b. if You include all or a substantial portion of the database
|
||||
contents in a database in which You have Sui Generis Database
|
||||
Rights, then the database in which You have Sui Generis Database
|
||||
Rights (but not its individual contents) is Adapted Material,
|
||||
|
||||
including for purposes of Section 3(b); and
|
||||
c. You must comply with the conditions in Section 3(a) if You Share
|
||||
all or a substantial portion of the contents of the database.
|
||||
|
||||
For the avoidance of doubt, this Section 4 supplements and does not
|
||||
replace Your obligations under this Public License where the Licensed
|
||||
Rights include other Copyright and Similar Rights.
|
||||
|
||||
|
||||
Section 5 -- Disclaimer of Warranties and Limitation of Liability.
|
||||
|
||||
a. UNLESS OTHERWISE SEPARATELY UNDERTAKEN BY THE LICENSOR, TO THE
|
||||
EXTENT POSSIBLE, THE LICENSOR OFFERS THE LICENSED MATERIAL AS-IS
|
||||
AND AS-AVAILABLE, AND MAKES NO REPRESENTATIONS OR WARRANTIES OF
|
||||
ANY KIND CONCERNING THE LICENSED MATERIAL, WHETHER EXPRESS,
|
||||
IMPLIED, STATUTORY, OR OTHER. THIS INCLUDES, WITHOUT LIMITATION,
|
||||
WARRANTIES OF TITLE, MERCHANTABILITY, FITNESS FOR A PARTICULAR
|
||||
PURPOSE, NON-INFRINGEMENT, ABSENCE OF LATENT OR OTHER DEFECTS,
|
||||
ACCURACY, OR THE PRESENCE OR ABSENCE OF ERRORS, WHETHER OR NOT
|
||||
KNOWN OR DISCOVERABLE. WHERE DISCLAIMERS OF WARRANTIES ARE NOT
|
||||
ALLOWED IN FULL OR IN PART, THIS DISCLAIMER MAY NOT APPLY TO YOU.
|
||||
|
||||
b. TO THE EXTENT POSSIBLE, IN NO EVENT WILL THE LICENSOR BE LIABLE
|
||||
TO YOU ON ANY LEGAL THEORY (INCLUDING, WITHOUT LIMITATION,
|
||||
NEGLIGENCE) OR OTHERWISE FOR ANY DIRECT, SPECIAL, INDIRECT,
|
||||
INCIDENTAL, CONSEQUENTIAL, PUNITIVE, EXEMPLARY, OR OTHER LOSSES,
|
||||
COSTS, EXPENSES, OR DAMAGES ARISING OUT OF THIS PUBLIC LICENSE OR
|
||||
USE OF THE LICENSED MATERIAL, EVEN IF THE LICENSOR HAS BEEN
|
||||
ADVISED OF THE POSSIBILITY OF SUCH LOSSES, COSTS, EXPENSES, OR
|
||||
DAMAGES. WHERE A LIMITATION OF LIABILITY IS NOT ALLOWED IN FULL OR
|
||||
IN PART, THIS LIMITATION MAY NOT APPLY TO YOU.
|
||||
|
||||
c. The disclaimer of warranties and limitation of liability provided
|
||||
above shall be interpreted in a manner that, to the extent
|
||||
possible, most closely approximates an absolute disclaimer and
|
||||
waiver of all liability.
|
||||
|
||||
|
||||
Section 6 -- Term and Termination.
|
||||
|
||||
a. This Public License applies for the term of the Copyright and
|
||||
Similar Rights licensed here. However, if You fail to comply with
|
||||
this Public License, then Your rights under this Public License
|
||||
terminate automatically.
|
||||
|
||||
b. Where Your right to use the Licensed Material has terminated under
|
||||
Section 6(a), it reinstates:
|
||||
|
||||
1. automatically as of the date the violation is cured, provided
|
||||
it is cured within 30 days of Your discovery of the
|
||||
violation; or
|
||||
|
||||
2. upon express reinstatement by the Licensor.
|
||||
|
||||
For the avoidance of doubt, this Section 6(b) does not affect any
|
||||
right the Licensor may have to seek remedies for Your violations
|
||||
of this Public License.
|
||||
|
||||
c. For the avoidance of doubt, the Licensor may also offer the
|
||||
Licensed Material under separate terms or conditions or stop
|
||||
distributing the Licensed Material at any time; however, doing so
|
||||
will not terminate this Public License.
|
||||
|
||||
d. Sections 1, 5, 6, 7, and 8 survive termination of this Public
|
||||
License.
|
||||
|
||||
|
||||
Section 7 -- Other Terms and Conditions.
|
||||
|
||||
a. The Licensor shall not be bound by any additional or different
|
||||
terms or conditions communicated by You unless expressly agreed.
|
||||
|
||||
b. Any arrangements, understandings, or agreements regarding the
|
||||
Licensed Material not stated herein are separate from and
|
||||
independent of the terms and conditions of this Public License.
|
||||
|
||||
|
||||
Section 8 -- Interpretation.
|
||||
|
||||
a. For the avoidance of doubt, this Public License does not, and
|
||||
shall not be interpreted to, reduce, limit, restrict, or impose
|
||||
conditions on any use of the Licensed Material that could lawfully
|
||||
be made without permission under this Public License.
|
||||
|
||||
b. To the extent possible, if any provision of this Public License is
|
||||
deemed unenforceable, it shall be automatically reformed to the
|
||||
minimum extent necessary to make it enforceable. If the provision
|
||||
cannot be reformed, it shall be severed from this Public License
|
||||
without affecting the enforceability of the remaining terms and
|
||||
conditions.
|
||||
|
||||
c. No term or condition of this Public License will be waived and no
|
||||
failure to comply consented to unless expressly agreed to by the
|
||||
Licensor.
|
||||
|
||||
d. Nothing in this Public License constitutes or may be interpreted
|
||||
as a limitation upon, or waiver of, any privileges and immunities
|
||||
that apply to the Licensor or You, including from the legal
|
||||
processes of any jurisdiction or authority.
|
||||
|
||||
|
||||
=======================================================================
|
||||
|
||||
Creative Commons is not a party to its public licenses.
|
||||
Notwithstanding, Creative Commons may elect to apply one of its public
|
||||
licenses to material it publishes and in those instances will be
|
||||
considered the "Licensor." Except for the limited purpose of indicating
|
||||
that material is shared under a Creative Commons public license or as
|
||||
otherwise permitted by the Creative Commons policies published at
|
||||
creativecommons.org/policies, Creative Commons does not authorize the
|
||||
use of the trademark "Creative Commons" or any other trademark or logo
|
||||
of Creative Commons without its prior written consent including,
|
||||
without limitation, in connection with any unauthorized modifications
|
||||
to any of its public licenses or any other arrangements,
|
||||
understandings, or agreements concerning use of licensed material. For
|
||||
the avoidance of doubt, this paragraph does not form part of the public
|
||||
licenses.
|
||||
|
||||
Creative Commons may be contacted at creativecommons.org.
|
7
vendor/github.com/opencontainers/go-digest/MAINTAINERS
generated
vendored
Normal file
7
vendor/github.com/opencontainers/go-digest/MAINTAINERS
generated
vendored
Normal file
|
@ -0,0 +1,7 @@
|
|||
Brandon Philips <brandon.philips@coreos.com> (@philips)
|
||||
Brendan Burns <bburns@microsoft.com> (@brendandburns)
|
||||
Jason Bouzane <jbouzane@google.com> (@jbouzane)
|
||||
John Starks <jostarks@microsoft.com> (@jstarks)
|
||||
Jonathan Boulle <jon.boulle@coreos.com> (@jonboulle)
|
||||
Stephen Day <stephen.day@docker.com> (@stevvooe)
|
||||
Vincent Batts <vbatts@redhat.com> (@vbatts)
|
104
vendor/github.com/opencontainers/go-digest/README.md
generated
vendored
Normal file
104
vendor/github.com/opencontainers/go-digest/README.md
generated
vendored
Normal file
|
@ -0,0 +1,104 @@
|
|||
# go-digest
|
||||
|
||||
[](https://godoc.org/github.com/docker/go-digest) [](https://goreportcard.com/report/github.com/docker/go-digest) [](https://travis-ci.org/docker/go-digest)
|
||||
|
||||
Common digest package used across the container ecosystem.
|
||||
|
||||
Please see the [godoc](https://godoc.org/github.com/docker/go-digest) for more information.
|
||||
|
||||
# What is a digest?
|
||||
|
||||
A digest is just a hash.
|
||||
|
||||
The most common use case for a digest is to create a content
|
||||
identifier for use in [Content Addressable Storage](https://en.wikipedia.org/wiki/Content-addressable_storage)
|
||||
systems:
|
||||
|
||||
```go
|
||||
id := digest.FromBytes([]byte("my content"))
|
||||
```
|
||||
|
||||
In the example above, the id can be used to uniquely identify
|
||||
the byte slice "my content". This allows two disparate applications
|
||||
to agree on a verifiable identifier without having to trust one
|
||||
another.
|
||||
|
||||
An identifying digest can be verified, as follows:
|
||||
|
||||
```go
|
||||
if id != digest.FromBytes([]byte("my content")) {
|
||||
return errors.New("the content has changed!")
|
||||
}
|
||||
```
|
||||
|
||||
A `Verifier` type can be used to handle cases where an `io.Reader`
|
||||
makes more sense:
|
||||
|
||||
```go
|
||||
rd := getContent()
|
||||
verifier := id.Verifier()
|
||||
io.Copy(verifier, rd)
|
||||
|
||||
if !verifier.Verified() {
|
||||
return errors.New("the content has changed!")
|
||||
}
|
||||
```
|
||||
|
||||
Using [Merkle DAGs](https://en.wikipedia.org/wiki/Merkle_tree), this
|
||||
can power a rich, safe, content distribution system.
|
||||
|
||||
# Usage
|
||||
|
||||
While the [godoc](https://godoc.org/github.com/docker/go-digest) is
|
||||
considered the best resource, a few important items need to be called
|
||||
out when using this package.
|
||||
|
||||
1. Make sure to import the hash implementations into your application
|
||||
or the package will panic. You should have something like the
|
||||
following in the main (or other entrypoint) of your application:
|
||||
|
||||
```go
|
||||
import (
|
||||
_ "crypto/sha256"
|
||||
_ "crypto/sha512"
|
||||
)
|
||||
```
|
||||
This may seem inconvenient but it allows you replace the hash
|
||||
implementations with others, such as https://github.com/stevvooe/resumable.
|
||||
|
||||
2. Even though `digest.Digest` may be assemable as a string, _always_
|
||||
verify your input with `digest.Parse` or use `Digest.Validate`
|
||||
when accepting untrusted input. While there are measures to
|
||||
avoid common problems, this will ensure you have valid digests
|
||||
in the rest of your application.
|
||||
|
||||
# Stability
|
||||
|
||||
The Go API, at this stage, is considered stable, unless otherwise noted.
|
||||
|
||||
As always, before using a package export, read the [godoc](https://godoc.org/github.com/docker/go-digest).
|
||||
|
||||
# Contributing
|
||||
|
||||
This package is considered fairly complete. It has been in production
|
||||
in thousands (millions?) of deployments and is fairly battle-hardened.
|
||||
New additions will be met with skepticism. If you think there is a
|
||||
missing feature, please file a bug clearly describing the problem and
|
||||
the alternatives you tried before submitting a PR.
|
||||
|
||||
# Reporting security issues
|
||||
|
||||
The maintainers take security seriously. If you discover a security
|
||||
issue, please bring it to their attention right away!
|
||||
|
||||
Please DO NOT file a public issue, instead send your report privately
|
||||
to security@docker.com.
|
||||
|
||||
Security reports are greatly appreciated and we will publicly thank you
|
||||
for it. We also like to send gifts—if you're into Docker schwag, make
|
||||
sure to let us know. We currently do not offer a paid security bounty
|
||||
program, but are not ruling it out in the future.
|
||||
|
||||
# Copyright and license
|
||||
|
||||
Copyright © 2016 Docker, Inc. All rights reserved, except as follows. Code is released under the [Apache 2.0 license](LICENSE.code). This `README.md` file and the [`CONTRIBUTING.md`](CONTRIBUTING.md) file are licensed under the Creative Commons Attribution 4.0 International License under the terms and conditions set forth in the file [`LICENSE.docs`](LICENSE.docs). You may obtain a duplicate copy of the same license, titled CC BY-SA 4.0, at http://creativecommons.org/licenses/by-sa/4.0/.
|
|
@ -39,7 +39,7 @@ var (
|
|||
)
|
||||
|
||||
// Available returns true if the digest type is available for use. If this
|
||||
// returns false, New and Hash will return nil.
|
||||
// returns false, Digester and Hash will return nil.
|
||||
func (a Algorithm) Available() bool {
|
||||
h, ok := algorithms[a]
|
||||
if !ok {
|
||||
|
@ -72,13 +72,17 @@ func (a *Algorithm) Set(value string) error {
|
|||
*a = Algorithm(value)
|
||||
}
|
||||
|
||||
if !a.Available() {
|
||||
return ErrDigestUnsupported
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// New returns a new digester for the specified algorithm. If the algorithm
|
||||
// Digester returns a new digester for the specified algorithm. If the algorithm
|
||||
// does not have a digester implementation, nil will be returned. This can be
|
||||
// checked by calling Available before calling New.
|
||||
func (a Algorithm) New() Digester {
|
||||
// checked by calling Available before calling Digester.
|
||||
func (a Algorithm) Digester() Digester {
|
||||
return &digester{
|
||||
alg: a,
|
||||
hash: a.Hash(),
|
||||
|
@ -89,6 +93,11 @@ func (a Algorithm) New() Digester {
|
|||
// method will panic. Check Algorithm.Available() before calling.
|
||||
func (a Algorithm) Hash() hash.Hash {
|
||||
if !a.Available() {
|
||||
// Empty algorithm string is invalid
|
||||
if a == "" {
|
||||
panic(fmt.Sprintf("empty digest algorithm, validate before calling Algorithm.Hash()"))
|
||||
}
|
||||
|
||||
// NOTE(stevvooe): A missing hash is usually a programming error that
|
||||
// must be resolved at compile time. We don't import in the digest
|
||||
// package to allow users to choose their hash implementation (such as
|
||||
|
@ -104,7 +113,7 @@ func (a Algorithm) Hash() hash.Hash {
|
|||
|
||||
// FromReader returns the digest of the reader using the algorithm.
|
||||
func (a Algorithm) FromReader(rd io.Reader) (Digest, error) {
|
||||
digester := a.New()
|
||||
digester := a.Digester()
|
||||
|
||||
if _, err := io.Copy(digester.Hash(), rd); err != nil {
|
||||
return "", err
|
||||
|
@ -115,7 +124,7 @@ func (a Algorithm) FromReader(rd io.Reader) (Digest, error) {
|
|||
|
||||
// FromBytes digests the input and returns a Digest.
|
||||
func (a Algorithm) FromBytes(p []byte) Digest {
|
||||
digester := a.New()
|
||||
digester := a.Digester()
|
||||
|
||||
if _, err := digester.Hash().Write(p); err != nil {
|
||||
// Writes to a Hash should never fail. None of the existing
|
||||
|
@ -129,27 +138,7 @@ func (a Algorithm) FromBytes(p []byte) Digest {
|
|||
return digester.Digest()
|
||||
}
|
||||
|
||||
// TODO(stevvooe): Allow resolution of verifiers using the digest type and
|
||||
// this registration system.
|
||||
|
||||
// Digester calculates the digest of written data. Writes should go directly
|
||||
// to the return value of Hash, while calling Digest will return the current
|
||||
// value of the digest.
|
||||
type Digester interface {
|
||||
Hash() hash.Hash // provides direct access to underlying hash instance.
|
||||
Digest() Digest
|
||||
}
|
||||
|
||||
// digester provides a simple digester definition that embeds a hasher.
|
||||
type digester struct {
|
||||
alg Algorithm
|
||||
hash hash.Hash
|
||||
}
|
||||
|
||||
func (d *digester) Hash() hash.Hash {
|
||||
return d.hash
|
||||
}
|
||||
|
||||
func (d *digester) Digest() Digest {
|
||||
return NewDigest(d.alg, d.hash)
|
||||
// FromString digests the string input and returns a Digest.
|
||||
func (a Algorithm) FromString(s string) Digest {
|
||||
return a.FromBytes([]byte(s))
|
||||
}
|
|
@ -8,11 +8,6 @@ import (
|
|||
"strings"
|
||||
)
|
||||
|
||||
const (
|
||||
// DigestSha256EmptyTar is the canonical sha256 digest of empty data
|
||||
DigestSha256EmptyTar = "sha256:e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855"
|
||||
)
|
||||
|
||||
// Digest allows simple protection of hex formatted digest strings, prefixed
|
||||
// by their algorithm. Strings of type Digest have some guarantee of being in
|
||||
// the correct format and it provides quick access to the components of a
|
||||
|
@ -61,16 +56,14 @@ var (
|
|||
ErrDigestUnsupported = fmt.Errorf("unsupported digest algorithm")
|
||||
)
|
||||
|
||||
// ParseDigest parses s and returns the validated digest object. An error will
|
||||
// Parse parses s and returns the validated digest object. An error will
|
||||
// be returned if the format is invalid.
|
||||
func ParseDigest(s string) (Digest, error) {
|
||||
func Parse(s string) (Digest, error) {
|
||||
d := Digest(s)
|
||||
|
||||
return d, d.Validate()
|
||||
}
|
||||
|
||||
// FromReader returns the most valid digest for the underlying content using
|
||||
// the canonical digest algorithm.
|
||||
// FromReader consumes the content of rd until io.EOF, returning canonical digest.
|
||||
func FromReader(rd io.Reader) (Digest, error) {
|
||||
return Canonical.FromReader(rd)
|
||||
}
|
||||
|
@ -80,35 +73,34 @@ func FromBytes(p []byte) Digest {
|
|||
return Canonical.FromBytes(p)
|
||||
}
|
||||
|
||||
// FromString digests the input and returns a Digest.
|
||||
func FromString(s string) Digest {
|
||||
return Canonical.FromString(s)
|
||||
}
|
||||
|
||||
// Validate checks that the contents of d is a valid digest, returning an
|
||||
// error if not.
|
||||
func (d Digest) Validate() error {
|
||||
s := string(d)
|
||||
|
||||
if !DigestRegexpAnchored.MatchString(s) {
|
||||
return ErrDigestInvalidFormat
|
||||
}
|
||||
|
||||
i := strings.Index(s, ":")
|
||||
if i < 0 {
|
||||
|
||||
// validate i then run through regexp
|
||||
if i < 0 || i+1 == len(s) || !DigestRegexpAnchored.MatchString(s) {
|
||||
return ErrDigestInvalidFormat
|
||||
}
|
||||
|
||||
// case: "sha256:" with no hex.
|
||||
if i+1 == len(s) {
|
||||
return ErrDigestInvalidFormat
|
||||
}
|
||||
|
||||
switch algorithm := Algorithm(s[:i]); algorithm {
|
||||
case SHA256, SHA384, SHA512:
|
||||
if algorithm.Size()*2 != len(s[i+1:]) {
|
||||
return ErrDigestInvalidLength
|
||||
}
|
||||
break
|
||||
default:
|
||||
algorithm := Algorithm(s[:i])
|
||||
if !algorithm.Available() {
|
||||
return ErrDigestUnsupported
|
||||
}
|
||||
|
||||
// Digests much always be hex-encoded, ensuring that their hex portion will
|
||||
// always be size*2
|
||||
if algorithm.Size()*2 != len(s[i+1:]) {
|
||||
return ErrDigestInvalidLength
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -118,6 +110,15 @@ func (d Digest) Algorithm() Algorithm {
|
|||
return Algorithm(d[:d.sepIndex()])
|
||||
}
|
||||
|
||||
// Verifier returns a writer object that can be used to verify a stream of
|
||||
// content against the digest. If the digest is invalid, the method will panic.
|
||||
func (d Digest) Verifier() Verifier {
|
||||
return hashVerifier{
|
||||
hash: d.Algorithm().Hash(),
|
||||
digest: d,
|
||||
}
|
||||
}
|
||||
|
||||
// Hex returns the hex digest portion of the digest. This will panic if the
|
||||
// underlying digest is not in a valid format.
|
||||
func (d Digest) Hex() string {
|
||||
|
@ -132,7 +133,7 @@ func (d Digest) sepIndex() int {
|
|||
i := strings.Index(string(d), ":")
|
||||
|
||||
if i < 0 {
|
||||
panic("could not find ':' in digest: " + d)
|
||||
panic(fmt.Sprintf("no ':' separator in digest %q", d))
|
||||
}
|
||||
|
||||
return i
|
25
vendor/github.com/opencontainers/go-digest/digester.go
generated
vendored
Normal file
25
vendor/github.com/opencontainers/go-digest/digester.go
generated
vendored
Normal file
|
@ -0,0 +1,25 @@
|
|||
package digest
|
||||
|
||||
import "hash"
|
||||
|
||||
// Digester calculates the digest of written data. Writes should go directly
|
||||
// to the return value of Hash, while calling Digest will return the current
|
||||
// value of the digest.
|
||||
type Digester interface {
|
||||
Hash() hash.Hash // provides direct access to underlying hash instance.
|
||||
Digest() Digest
|
||||
}
|
||||
|
||||
// digester provides a simple digester definition that embeds a hasher.
|
||||
type digester struct {
|
||||
alg Algorithm
|
||||
hash hash.Hash
|
||||
}
|
||||
|
||||
func (d *digester) Hash() hash.Hash {
|
||||
return d.hash
|
||||
}
|
||||
|
||||
func (d *digester) Digest() Digest {
|
||||
return NewDigest(d.alg, d.hash)
|
||||
}
|
|
@ -17,19 +17,6 @@ type Verifier interface {
|
|||
Verified() bool
|
||||
}
|
||||
|
||||
// NewDigestVerifier returns a verifier that compares the written bytes
|
||||
// against a passed in digest.
|
||||
func NewDigestVerifier(d Digest) (Verifier, error) {
|
||||
if err := d.Validate(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return hashVerifier{
|
||||
hash: d.Algorithm().Hash(),
|
||||
digest: d,
|
||||
}, nil
|
||||
}
|
||||
|
||||
type hashVerifier struct {
|
||||
digest Digest
|
||||
hash hash.Hash
|
13
vendor/github.com/ugorji/go/LICENSE → vendor/github.com/peterbourgon/diskv/LICENSE
generated
vendored
13
vendor/github.com/ugorji/go/LICENSE → vendor/github.com/peterbourgon/diskv/LICENSE
generated
vendored
|
@ -1,7 +1,4 @@
|
|||
The MIT License (MIT)
|
||||
|
||||
Copyright (c) 2012-2015 Ugorji Nwoke.
|
||||
All rights reserved.
|
||||
Copyright (c) 2011-2012 Peter Bourgon
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
|
@ -10,13 +7,13 @@ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
|||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Software.
|
||||
The above copyright notice and this permission notice shall be included in
|
||||
all copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
SOFTWARE.
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||
THE SOFTWARE.
|
141
vendor/github.com/peterbourgon/diskv/README.md
generated
vendored
Normal file
141
vendor/github.com/peterbourgon/diskv/README.md
generated
vendored
Normal file
|
@ -0,0 +1,141 @@
|
|||
# What is diskv?
|
||||
|
||||
Diskv (disk-vee) is a simple, persistent key-value store written in the Go
|
||||
language. It starts with an incredibly simple API for storing arbitrary data on
|
||||
a filesystem by key, and builds several layers of performance-enhancing
|
||||
abstraction on top. The end result is a conceptually simple, but highly
|
||||
performant, disk-backed storage system.
|
||||
|
||||
[![Build Status][1]][2]
|
||||
|
||||
[1]: https://drone.io/github.com/peterbourgon/diskv/status.png
|
||||
[2]: https://drone.io/github.com/peterbourgon/diskv/latest
|
||||
|
||||
|
||||
# Installing
|
||||
|
||||
Install [Go 1][3], either [from source][4] or [with a prepackaged binary][5].
|
||||
Then,
|
||||
|
||||
```bash
|
||||
$ go get github.com/peterbourgon/diskv
|
||||
```
|
||||
|
||||
[3]: http://golang.org
|
||||
[4]: http://golang.org/doc/install/source
|
||||
[5]: http://golang.org/doc/install
|
||||
|
||||
|
||||
# Usage
|
||||
|
||||
```go
|
||||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/peterbourgon/diskv"
|
||||
)
|
||||
|
||||
func main() {
|
||||
// Simplest transform function: put all the data files into the base dir.
|
||||
flatTransform := func(s string) []string { return []string{} }
|
||||
|
||||
// Initialize a new diskv store, rooted at "my-data-dir", with a 1MB cache.
|
||||
d := diskv.New(diskv.Options{
|
||||
BasePath: "my-data-dir",
|
||||
Transform: flatTransform,
|
||||
CacheSizeMax: 1024 * 1024,
|
||||
})
|
||||
|
||||
// Write three bytes to the key "alpha".
|
||||
key := "alpha"
|
||||
d.Write(key, []byte{'1', '2', '3'})
|
||||
|
||||
// Read the value back out of the store.
|
||||
value, _ := d.Read(key)
|
||||
fmt.Printf("%v\n", value)
|
||||
|
||||
// Erase the key+value from the store (and the disk).
|
||||
d.Erase(key)
|
||||
}
|
||||
```
|
||||
|
||||
More complex examples can be found in the "examples" subdirectory.
|
||||
|
||||
|
||||
# Theory
|
||||
|
||||
## Basic idea
|
||||
|
||||
At its core, diskv is a map of a key (`string`) to arbitrary data (`[]byte`).
|
||||
The data is written to a single file on disk, with the same name as the key.
|
||||
The key determines where that file will be stored, via a user-provided
|
||||
`TransformFunc`, which takes a key and returns a slice (`[]string`)
|
||||
corresponding to a path list where the key file will be stored. The simplest
|
||||
TransformFunc,
|
||||
|
||||
```go
|
||||
func SimpleTransform (key string) []string {
|
||||
return []string{}
|
||||
}
|
||||
```
|
||||
|
||||
will place all keys in the same, base directory. The design is inspired by
|
||||
[Redis diskstore][6]; a TransformFunc which emulates the default diskstore
|
||||
behavior is available in the content-addressable-storage example.
|
||||
|
||||
[6]: http://groups.google.com/group/redis-db/browse_thread/thread/d444bc786689bde9?pli=1
|
||||
|
||||
**Note** that your TransformFunc should ensure that one valid key doesn't
|
||||
transform to a subset of another valid key. That is, it shouldn't be possible
|
||||
to construct valid keys that resolve to directory names. As a concrete example,
|
||||
if your TransformFunc splits on every 3 characters, then
|
||||
|
||||
```go
|
||||
d.Write("abcabc", val) // OK: written to <base>/abc/abc/abcabc
|
||||
d.Write("abc", val) // Error: attempted write to <base>/abc/abc, but it's a directory
|
||||
```
|
||||
|
||||
This will be addressed in an upcoming version of diskv.
|
||||
|
||||
Probably the most important design principle behind diskv is that your data is
|
||||
always flatly available on the disk. diskv will never do anything that would
|
||||
prevent you from accessing, copying, backing up, or otherwise interacting with
|
||||
your data via common UNIX commandline tools.
|
||||
|
||||
## Adding a cache
|
||||
|
||||
An in-memory caching layer is provided by combining the BasicStore
|
||||
functionality with a simple map structure, and keeping it up-to-date as
|
||||
appropriate. Since the map structure in Go is not threadsafe, it's combined
|
||||
with a RWMutex to provide safe concurrent access.
|
||||
|
||||
## Adding order
|
||||
|
||||
diskv is a key-value store and therefore inherently unordered. An ordering
|
||||
system can be injected into the store by passing something which satisfies the
|
||||
diskv.Index interface. (A default implementation, using Google's
|
||||
[btree][7] package, is provided.) Basically, diskv keeps an ordered (by a
|
||||
user-provided Less function) index of the keys, which can be queried.
|
||||
|
||||
[7]: https://github.com/google/btree
|
||||
|
||||
## Adding compression
|
||||
|
||||
Something which implements the diskv.Compression interface may be passed
|
||||
during store creation, so that all Writes and Reads are filtered through
|
||||
a compression/decompression pipeline. Several default implementations,
|
||||
using stdlib compression algorithms, are provided. Note that data is cached
|
||||
compressed; the cost of decompression is borne with each Read.
|
||||
|
||||
## Streaming
|
||||
|
||||
diskv also now provides ReadStream and WriteStream methods, to allow very large
|
||||
data to be handled efficiently.
|
||||
|
||||
|
||||
# Future plans
|
||||
|
||||
* Needs plenty of robust testing: huge datasets, etc...
|
||||
* More thorough benchmarking
|
||||
* Your suggestions for use-cases I haven't thought of
|
64
vendor/github.com/peterbourgon/diskv/compression.go
generated
vendored
Normal file
64
vendor/github.com/peterbourgon/diskv/compression.go
generated
vendored
Normal file
|
@ -0,0 +1,64 @@
|
|||
package diskv
|
||||
|
||||
import (
|
||||
"compress/flate"
|
||||
"compress/gzip"
|
||||
"compress/zlib"
|
||||
"io"
|
||||
)
|
||||
|
||||
// Compression is an interface that Diskv uses to implement compression of
|
||||
// data. Writer takes a destination io.Writer and returns a WriteCloser that
|
||||
// compresses all data written through it. Reader takes a source io.Reader and
|
||||
// returns a ReadCloser that decompresses all data read through it. You may
|
||||
// define these methods on your own type, or use one of the NewCompression
|
||||
// helpers.
|
||||
type Compression interface {
|
||||
Writer(dst io.Writer) (io.WriteCloser, error)
|
||||
Reader(src io.Reader) (io.ReadCloser, error)
|
||||
}
|
||||
|
||||
// NewGzipCompression returns a Gzip-based Compression.
|
||||
func NewGzipCompression() Compression {
|
||||
return NewGzipCompressionLevel(flate.DefaultCompression)
|
||||
}
|
||||
|
||||
// NewGzipCompressionLevel returns a Gzip-based Compression with the given level.
|
||||
func NewGzipCompressionLevel(level int) Compression {
|
||||
return &genericCompression{
|
||||
wf: func(w io.Writer) (io.WriteCloser, error) { return gzip.NewWriterLevel(w, level) },
|
||||
rf: func(r io.Reader) (io.ReadCloser, error) { return gzip.NewReader(r) },
|
||||
}
|
||||
}
|
||||
|
||||
// NewZlibCompression returns a Zlib-based Compression.
|
||||
func NewZlibCompression() Compression {
|
||||
return NewZlibCompressionLevel(flate.DefaultCompression)
|
||||
}
|
||||
|
||||
// NewZlibCompressionLevel returns a Zlib-based Compression with the given level.
|
||||
func NewZlibCompressionLevel(level int) Compression {
|
||||
return NewZlibCompressionLevelDict(level, nil)
|
||||
}
|
||||
|
||||
// NewZlibCompressionLevelDict returns a Zlib-based Compression with the given
|
||||
// level, based on the given dictionary.
|
||||
func NewZlibCompressionLevelDict(level int, dict []byte) Compression {
|
||||
return &genericCompression{
|
||||
func(w io.Writer) (io.WriteCloser, error) { return zlib.NewWriterLevelDict(w, level, dict) },
|
||||
func(r io.Reader) (io.ReadCloser, error) { return zlib.NewReaderDict(r, dict) },
|
||||
}
|
||||
}
|
||||
|
||||
type genericCompression struct {
|
||||
wf func(w io.Writer) (io.WriteCloser, error)
|
||||
rf func(r io.Reader) (io.ReadCloser, error)
|
||||
}
|
||||
|
||||
func (g *genericCompression) Writer(dst io.Writer) (io.WriteCloser, error) {
|
||||
return g.wf(dst)
|
||||
}
|
||||
|
||||
func (g *genericCompression) Reader(src io.Reader) (io.ReadCloser, error) {
|
||||
return g.rf(src)
|
||||
}
|
624
vendor/github.com/peterbourgon/diskv/diskv.go
generated
vendored
Normal file
624
vendor/github.com/peterbourgon/diskv/diskv.go
generated
vendored
Normal file
|
@ -0,0 +1,624 @@
|
|||
// Diskv (disk-vee) is a simple, persistent, key-value store.
|
||||
// It stores all data flatly on the filesystem.
|
||||
|
||||
package diskv
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"sync"
|
||||
"syscall"
|
||||
)
|
||||
|
||||
const (
|
||||
defaultBasePath = "diskv"
|
||||
defaultFilePerm os.FileMode = 0666
|
||||
defaultPathPerm os.FileMode = 0777
|
||||
)
|
||||
|
||||
var (
|
||||
defaultTransform = func(s string) []string { return []string{} }
|
||||
errCanceled = errors.New("canceled")
|
||||
errEmptyKey = errors.New("empty key")
|
||||
errBadKey = errors.New("bad key")
|
||||
errImportDirectory = errors.New("can't import a directory")
|
||||
)
|
||||
|
||||
// TransformFunction transforms a key into a slice of strings, with each
|
||||
// element in the slice representing a directory in the file path where the
|
||||
// key's entry will eventually be stored.
|
||||
//
|
||||
// For example, if TransformFunc transforms "abcdef" to ["ab", "cde", "f"],
|
||||
// the final location of the data file will be <basedir>/ab/cde/f/abcdef
|
||||
type TransformFunction func(s string) []string
|
||||
|
||||
// Options define a set of properties that dictate Diskv behavior.
|
||||
// All values are optional.
|
||||
type Options struct {
|
||||
BasePath string
|
||||
Transform TransformFunction
|
||||
CacheSizeMax uint64 // bytes
|
||||
PathPerm os.FileMode
|
||||
FilePerm os.FileMode
|
||||
// If TempDir is set, it will enable filesystem atomic writes by
|
||||
// writing temporary files to that location before being moved
|
||||
// to BasePath.
|
||||
// Note that TempDir MUST be on the same device/partition as
|
||||
// BasePath.
|
||||
TempDir string
|
||||
|
||||
Index Index
|
||||
IndexLess LessFunction
|
||||
|
||||
Compression Compression
|
||||
}
|
||||
|
||||
// Diskv implements the Diskv interface. You shouldn't construct Diskv
|
||||
// structures directly; instead, use the New constructor.
|
||||
type Diskv struct {
|
||||
Options
|
||||
mu sync.RWMutex
|
||||
cache map[string][]byte
|
||||
cacheSize uint64
|
||||
}
|
||||
|
||||
// New returns an initialized Diskv structure, ready to use.
|
||||
// If the path identified by baseDir already contains data,
|
||||
// it will be accessible, but not yet cached.
|
||||
func New(o Options) *Diskv {
|
||||
if o.BasePath == "" {
|
||||
o.BasePath = defaultBasePath
|
||||
}
|
||||
if o.Transform == nil {
|
||||
o.Transform = defaultTransform
|
||||
}
|
||||
if o.PathPerm == 0 {
|
||||
o.PathPerm = defaultPathPerm
|
||||
}
|
||||
if o.FilePerm == 0 {
|
||||
o.FilePerm = defaultFilePerm
|
||||
}
|
||||
|
||||
d := &Diskv{
|
||||
Options: o,
|
||||
cache: map[string][]byte{},
|
||||
cacheSize: 0,
|
||||
}
|
||||
|
||||
if d.Index != nil && d.IndexLess != nil {
|
||||
d.Index.Initialize(d.IndexLess, d.Keys(nil))
|
||||
}
|
||||
|
||||
return d
|
||||
}
|
||||
|
||||
// Write synchronously writes the key-value pair to disk, making it immediately
|
||||
// available for reads. Write relies on the filesystem to perform an eventual
|
||||
// sync to physical media. If you need stronger guarantees, see WriteStream.
|
||||
func (d *Diskv) Write(key string, val []byte) error {
|
||||
return d.WriteStream(key, bytes.NewBuffer(val), false)
|
||||
}
|
||||
|
||||
// WriteStream writes the data represented by the io.Reader to the disk, under
|
||||
// the provided key. If sync is true, WriteStream performs an explicit sync on
|
||||
// the file as soon as it's written.
|
||||
//
|
||||
// bytes.Buffer provides io.Reader semantics for basic data types.
|
||||
func (d *Diskv) WriteStream(key string, r io.Reader, sync bool) error {
|
||||
if len(key) <= 0 {
|
||||
return errEmptyKey
|
||||
}
|
||||
|
||||
d.mu.Lock()
|
||||
defer d.mu.Unlock()
|
||||
|
||||
return d.writeStreamWithLock(key, r, sync)
|
||||
}
|
||||
|
||||
// createKeyFileWithLock either creates the key file directly, or
|
||||
// creates a temporary file in TempDir if it is set.
|
||||
func (d *Diskv) createKeyFileWithLock(key string) (*os.File, error) {
|
||||
if d.TempDir != "" {
|
||||
if err := os.MkdirAll(d.TempDir, d.PathPerm); err != nil {
|
||||
return nil, fmt.Errorf("temp mkdir: %s", err)
|
||||
}
|
||||
f, err := ioutil.TempFile(d.TempDir, "")
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("temp file: %s", err)
|
||||
}
|
||||
|
||||
if err := f.Chmod(d.FilePerm); err != nil {
|
||||
f.Close() // error deliberately ignored
|
||||
os.Remove(f.Name()) // error deliberately ignored
|
||||
return nil, fmt.Errorf("chmod: %s", err)
|
||||
}
|
||||
return f, nil
|
||||
}
|
||||
|
||||
mode := os.O_WRONLY | os.O_CREATE | os.O_TRUNC // overwrite if exists
|
||||
f, err := os.OpenFile(d.completeFilename(key), mode, d.FilePerm)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("open file: %s", err)
|
||||
}
|
||||
return f, nil
|
||||
}
|
||||
|
||||
// writeStream does no input validation checking.
|
||||
func (d *Diskv) writeStreamWithLock(key string, r io.Reader, sync bool) error {
|
||||
if err := d.ensurePathWithLock(key); err != nil {
|
||||
return fmt.Errorf("ensure path: %s", err)
|
||||
}
|
||||
|
||||
f, err := d.createKeyFileWithLock(key)
|
||||
if err != nil {
|
||||
return fmt.Errorf("create key file: %s", err)
|
||||
}
|
||||
|
||||
wc := io.WriteCloser(&nopWriteCloser{f})
|
||||
if d.Compression != nil {
|
||||
wc, err = d.Compression.Writer(f)
|
||||
if err != nil {
|
||||
f.Close() // error deliberately ignored
|
||||
os.Remove(f.Name()) // error deliberately ignored
|
||||
return fmt.Errorf("compression writer: %s", err)
|
||||
}
|
||||
}
|
||||
|
||||
if _, err := io.Copy(wc, r); err != nil {
|
||||
f.Close() // error deliberately ignored
|
||||
os.Remove(f.Name()) // error deliberately ignored
|
||||
return fmt.Errorf("i/o copy: %s", err)
|
||||
}
|
||||
|
||||
if err := wc.Close(); err != nil {
|
||||
f.Close() // error deliberately ignored
|
||||
os.Remove(f.Name()) // error deliberately ignored
|
||||
return fmt.Errorf("compression close: %s", err)
|
||||
}
|
||||
|
||||
if sync {
|
||||
if err := f.Sync(); err != nil {
|
||||
f.Close() // error deliberately ignored
|
||||
os.Remove(f.Name()) // error deliberately ignored
|
||||
return fmt.Errorf("file sync: %s", err)
|
||||
}
|
||||
}
|
||||
|
||||
if err := f.Close(); err != nil {
|
||||
return fmt.Errorf("file close: %s", err)
|
||||
}
|
||||
|
||||
if f.Name() != d.completeFilename(key) {
|
||||
if err := os.Rename(f.Name(), d.completeFilename(key)); err != nil {
|
||||
os.Remove(f.Name()) // error deliberately ignored
|
||||
return fmt.Errorf("rename: %s", err)
|
||||
}
|
||||
}
|
||||
|
||||
if d.Index != nil {
|
||||
d.Index.Insert(key)
|
||||
}
|
||||
|
||||
d.bustCacheWithLock(key) // cache only on read
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Import imports the source file into diskv under the destination key. If the
|
||||
// destination key already exists, it's overwritten. If move is true, the
|
||||
// source file is removed after a successful import.
|
||||
func (d *Diskv) Import(srcFilename, dstKey string, move bool) (err error) {
|
||||
if dstKey == "" {
|
||||
return errEmptyKey
|
||||
}
|
||||
|
||||
if fi, err := os.Stat(srcFilename); err != nil {
|
||||
return err
|
||||
} else if fi.IsDir() {
|
||||
return errImportDirectory
|
||||
}
|
||||
|
||||
d.mu.Lock()
|
||||
defer d.mu.Unlock()
|
||||
|
||||
if err := d.ensurePathWithLock(dstKey); err != nil {
|
||||
return fmt.Errorf("ensure path: %s", err)
|
||||
}
|
||||
|
||||
if move {
|
||||
if err := syscall.Rename(srcFilename, d.completeFilename(dstKey)); err == nil {
|
||||
d.bustCacheWithLock(dstKey)
|
||||
return nil
|
||||
} else if err != syscall.EXDEV {
|
||||
// If it failed due to being on a different device, fall back to copying
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
f, err := os.Open(srcFilename)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer f.Close()
|
||||
err = d.writeStreamWithLock(dstKey, f, false)
|
||||
if err == nil && move {
|
||||
err = os.Remove(srcFilename)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// Read reads the key and returns the value.
|
||||
// If the key is available in the cache, Read won't touch the disk.
|
||||
// If the key is not in the cache, Read will have the side-effect of
|
||||
// lazily caching the value.
|
||||
func (d *Diskv) Read(key string) ([]byte, error) {
|
||||
rc, err := d.ReadStream(key, false)
|
||||
if err != nil {
|
||||
return []byte{}, err
|
||||
}
|
||||
defer rc.Close()
|
||||
return ioutil.ReadAll(rc)
|
||||
}
|
||||
|
||||
// ReadStream reads the key and returns the value (data) as an io.ReadCloser.
|
||||
// If the value is cached from a previous read, and direct is false,
|
||||
// ReadStream will use the cached value. Otherwise, it will return a handle to
|
||||
// the file on disk, and cache the data on read.
|
||||
//
|
||||
// If direct is true, ReadStream will lazily delete any cached value for the
|
||||
// key, and return a direct handle to the file on disk.
|
||||
//
|
||||
// If compression is enabled, ReadStream taps into the io.Reader stream prior
|
||||
// to decompression, and caches the compressed data.
|
||||
func (d *Diskv) ReadStream(key string, direct bool) (io.ReadCloser, error) {
|
||||
d.mu.RLock()
|
||||
defer d.mu.RUnlock()
|
||||
|
||||
if val, ok := d.cache[key]; ok {
|
||||
if !direct {
|
||||
buf := bytes.NewBuffer(val)
|
||||
if d.Compression != nil {
|
||||
return d.Compression.Reader(buf)
|
||||
}
|
||||
return ioutil.NopCloser(buf), nil
|
||||
}
|
||||
|
||||
go func() {
|
||||
d.mu.Lock()
|
||||
defer d.mu.Unlock()
|
||||
d.uncacheWithLock(key, uint64(len(val)))
|
||||
}()
|
||||
}
|
||||
|
||||
return d.readWithRLock(key)
|
||||
}
|
||||
|
||||
// read ignores the cache, and returns an io.ReadCloser representing the
|
||||
// decompressed data for the given key, streamed from the disk. Clients should
|
||||
// acquire a read lock on the Diskv and check the cache themselves before
|
||||
// calling read.
|
||||
func (d *Diskv) readWithRLock(key string) (io.ReadCloser, error) {
|
||||
filename := d.completeFilename(key)
|
||||
|
||||
fi, err := os.Stat(filename)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if fi.IsDir() {
|
||||
return nil, os.ErrNotExist
|
||||
}
|
||||
|
||||
f, err := os.Open(filename)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var r io.Reader
|
||||
if d.CacheSizeMax > 0 {
|
||||
r = newSiphon(f, d, key)
|
||||
} else {
|
||||
r = &closingReader{f}
|
||||
}
|
||||
|
||||
var rc = io.ReadCloser(ioutil.NopCloser(r))
|
||||
if d.Compression != nil {
|
||||
rc, err = d.Compression.Reader(r)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
return rc, nil
|
||||
}
|
||||
|
||||
// closingReader provides a Reader that automatically closes the
|
||||
// embedded ReadCloser when it reaches EOF
|
||||
type closingReader struct {
|
||||
rc io.ReadCloser
|
||||
}
|
||||
|
||||
func (cr closingReader) Read(p []byte) (int, error) {
|
||||
n, err := cr.rc.Read(p)
|
||||
if err == io.EOF {
|
||||
if closeErr := cr.rc.Close(); closeErr != nil {
|
||||
return n, closeErr // close must succeed for Read to succeed
|
||||
}
|
||||
}
|
||||
return n, err
|
||||
}
|
||||
|
||||
// siphon is like a TeeReader: it copies all data read through it to an
|
||||
// internal buffer, and moves that buffer to the cache at EOF.
|
||||
type siphon struct {
|
||||
f *os.File
|
||||
d *Diskv
|
||||
key string
|
||||
buf *bytes.Buffer
|
||||
}
|
||||
|
||||
// newSiphon constructs a siphoning reader that represents the passed file.
|
||||
// When a successful series of reads ends in an EOF, the siphon will write
|
||||
// the buffered data to Diskv's cache under the given key.
|
||||
func newSiphon(f *os.File, d *Diskv, key string) io.Reader {
|
||||
return &siphon{
|
||||
f: f,
|
||||
d: d,
|
||||
key: key,
|
||||
buf: &bytes.Buffer{},
|
||||
}
|
||||
}
|
||||
|
||||
// Read implements the io.Reader interface for siphon.
|
||||
func (s *siphon) Read(p []byte) (int, error) {
|
||||
n, err := s.f.Read(p)
|
||||
|
||||
if err == nil {
|
||||
return s.buf.Write(p[0:n]) // Write must succeed for Read to succeed
|
||||
}
|
||||
|
||||
if err == io.EOF {
|
||||
s.d.cacheWithoutLock(s.key, s.buf.Bytes()) // cache may fail
|
||||
if closeErr := s.f.Close(); closeErr != nil {
|
||||
return n, closeErr // close must succeed for Read to succeed
|
||||
}
|
||||
return n, err
|
||||
}
|
||||
|
||||
return n, err
|
||||
}
|
||||
|
||||
// Erase synchronously erases the given key from the disk and the cache.
|
||||
func (d *Diskv) Erase(key string) error {
|
||||
d.mu.Lock()
|
||||
defer d.mu.Unlock()
|
||||
|
||||
d.bustCacheWithLock(key)
|
||||
|
||||
// erase from index
|
||||
if d.Index != nil {
|
||||
d.Index.Delete(key)
|
||||
}
|
||||
|
||||
// erase from disk
|
||||
filename := d.completeFilename(key)
|
||||
if s, err := os.Stat(filename); err == nil {
|
||||
if s.IsDir() {
|
||||
return errBadKey
|
||||
}
|
||||
if err = os.Remove(filename); err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
// Return err as-is so caller can do os.IsNotExist(err).
|
||||
return err
|
||||
}
|
||||
|
||||
// clean up and return
|
||||
d.pruneDirsWithLock(key)
|
||||
return nil
|
||||
}
|
||||
|
||||
// EraseAll will delete all of the data from the store, both in the cache and on
|
||||
// the disk. Note that EraseAll doesn't distinguish diskv-related data from non-
|
||||
// diskv-related data. Care should be taken to always specify a diskv base
|
||||
// directory that is exclusively for diskv data.
|
||||
func (d *Diskv) EraseAll() error {
|
||||
d.mu.Lock()
|
||||
defer d.mu.Unlock()
|
||||
d.cache = make(map[string][]byte)
|
||||
d.cacheSize = 0
|
||||
if d.TempDir != "" {
|
||||
os.RemoveAll(d.TempDir) // errors ignored
|
||||
}
|
||||
return os.RemoveAll(d.BasePath)
|
||||
}
|
||||
|
||||
// Has returns true if the given key exists.
|
||||
func (d *Diskv) Has(key string) bool {
|
||||
d.mu.Lock()
|
||||
defer d.mu.Unlock()
|
||||
|
||||
if _, ok := d.cache[key]; ok {
|
||||
return true
|
||||
}
|
||||
|
||||
filename := d.completeFilename(key)
|
||||
s, err := os.Stat(filename)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
if s.IsDir() {
|
||||
return false
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
// Keys returns a channel that will yield every key accessible by the store,
|
||||
// in undefined order. If a cancel channel is provided, closing it will
|
||||
// terminate and close the keys channel.
|
||||
func (d *Diskv) Keys(cancel <-chan struct{}) <-chan string {
|
||||
return d.KeysPrefix("", cancel)
|
||||
}
|
||||
|
||||
// KeysPrefix returns a channel that will yield every key accessible by the
|
||||
// store with the given prefix, in undefined order. If a cancel channel is
|
||||
// provided, closing it will terminate and close the keys channel. If the
|
||||
// provided prefix is the empty string, all keys will be yielded.
|
||||
func (d *Diskv) KeysPrefix(prefix string, cancel <-chan struct{}) <-chan string {
|
||||
var prepath string
|
||||
if prefix == "" {
|
||||
prepath = d.BasePath
|
||||
} else {
|
||||
prepath = d.pathFor(prefix)
|
||||
}
|
||||
c := make(chan string)
|
||||
go func() {
|
||||
filepath.Walk(prepath, walker(c, prefix, cancel))
|
||||
close(c)
|
||||
}()
|
||||
return c
|
||||
}
|
||||
|
||||
// walker returns a function which satisfies the filepath.WalkFunc interface.
|
||||
// It sends every non-directory file entry down the channel c.
|
||||
func walker(c chan<- string, prefix string, cancel <-chan struct{}) filepath.WalkFunc {
|
||||
return func(path string, info os.FileInfo, err error) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if info.IsDir() || !strings.HasPrefix(info.Name(), prefix) {
|
||||
return nil // "pass"
|
||||
}
|
||||
|
||||
select {
|
||||
case c <- info.Name():
|
||||
case <-cancel:
|
||||
return errCanceled
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// pathFor returns the absolute path for location on the filesystem where the
|
||||
// data for the given key will be stored.
|
||||
func (d *Diskv) pathFor(key string) string {
|
||||
return filepath.Join(d.BasePath, filepath.Join(d.Transform(key)...))
|
||||
}
|
||||
|
||||
// ensurePathWithLock is a helper function that generates all necessary
|
||||
// directories on the filesystem for the given key.
|
||||
func (d *Diskv) ensurePathWithLock(key string) error {
|
||||
return os.MkdirAll(d.pathFor(key), d.PathPerm)
|
||||
}
|
||||
|
||||
// completeFilename returns the absolute path to the file for the given key.
|
||||
func (d *Diskv) completeFilename(key string) string {
|
||||
return filepath.Join(d.pathFor(key), key)
|
||||
}
|
||||
|
||||
// cacheWithLock attempts to cache the given key-value pair in the store's
|
||||
// cache. It can fail if the value is larger than the cache's maximum size.
|
||||
func (d *Diskv) cacheWithLock(key string, val []byte) error {
|
||||
valueSize := uint64(len(val))
|
||||
if err := d.ensureCacheSpaceWithLock(valueSize); err != nil {
|
||||
return fmt.Errorf("%s; not caching", err)
|
||||
}
|
||||
|
||||
// be very strict about memory guarantees
|
||||
if (d.cacheSize + valueSize) > d.CacheSizeMax {
|
||||
panic(fmt.Sprintf("failed to make room for value (%d/%d)", valueSize, d.CacheSizeMax))
|
||||
}
|
||||
|
||||
d.cache[key] = val
|
||||
d.cacheSize += valueSize
|
||||
return nil
|
||||
}
|
||||
|
||||
// cacheWithoutLock acquires the store's (write) mutex and calls cacheWithLock.
|
||||
func (d *Diskv) cacheWithoutLock(key string, val []byte) error {
|
||||
d.mu.Lock()
|
||||
defer d.mu.Unlock()
|
||||
return d.cacheWithLock(key, val)
|
||||
}
|
||||
|
||||
func (d *Diskv) bustCacheWithLock(key string) {
|
||||
if val, ok := d.cache[key]; ok {
|
||||
d.uncacheWithLock(key, uint64(len(val)))
|
||||
}
|
||||
}
|
||||
|
||||
func (d *Diskv) uncacheWithLock(key string, sz uint64) {
|
||||
d.cacheSize -= sz
|
||||
delete(d.cache, key)
|
||||
}
|
||||
|
||||
// pruneDirsWithLock deletes empty directories in the path walk leading to the
|
||||
// key k. Typically this function is called after an Erase is made.
|
||||
func (d *Diskv) pruneDirsWithLock(key string) error {
|
||||
pathlist := d.Transform(key)
|
||||
for i := range pathlist {
|
||||
dir := filepath.Join(d.BasePath, filepath.Join(pathlist[:len(pathlist)-i]...))
|
||||
|
||||
// thanks to Steven Blenkinsop for this snippet
|
||||
switch fi, err := os.Stat(dir); true {
|
||||
case err != nil:
|
||||
return err
|
||||
case !fi.IsDir():
|
||||
panic(fmt.Sprintf("corrupt dirstate at %s", dir))
|
||||
}
|
||||
|
||||
nlinks, err := filepath.Glob(filepath.Join(dir, "*"))
|
||||
if err != nil {
|
||||
return err
|
||||
} else if len(nlinks) > 0 {
|
||||
return nil // has subdirs -- do not prune
|
||||
}
|
||||
if err = os.Remove(dir); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// ensureCacheSpaceWithLock deletes entries from the cache in arbitrary order
|
||||
// until the cache has at least valueSize bytes available.
|
||||
func (d *Diskv) ensureCacheSpaceWithLock(valueSize uint64) error {
|
||||
if valueSize > d.CacheSizeMax {
|
||||
return fmt.Errorf("value size (%d bytes) too large for cache (%d bytes)", valueSize, d.CacheSizeMax)
|
||||
}
|
||||
|
||||
safe := func() bool { return (d.cacheSize + valueSize) <= d.CacheSizeMax }
|
||||
|
||||
for key, val := range d.cache {
|
||||
if safe() {
|
||||
break
|
||||
}
|
||||
|
||||
d.uncacheWithLock(key, uint64(len(val)))
|
||||
}
|
||||
|
||||
if !safe() {
|
||||
panic(fmt.Sprintf("%d bytes still won't fit in the cache! (max %d bytes)", valueSize, d.CacheSizeMax))
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// nopWriteCloser wraps an io.Writer and provides a no-op Close method to
|
||||
// satisfy the io.WriteCloser interface.
|
||||
type nopWriteCloser struct {
|
||||
io.Writer
|
||||
}
|
||||
|
||||
func (wc *nopWriteCloser) Write(p []byte) (int, error) { return wc.Writer.Write(p) }
|
||||
func (wc *nopWriteCloser) Close() error { return nil }
|
115
vendor/github.com/peterbourgon/diskv/index.go
generated
vendored
Normal file
115
vendor/github.com/peterbourgon/diskv/index.go
generated
vendored
Normal file
|
@ -0,0 +1,115 @@
|
|||
package diskv
|
||||
|
||||
import (
|
||||
"sync"
|
||||
|
||||
"github.com/google/btree"
|
||||
)
|
||||
|
||||
// Index is a generic interface for things that can
|
||||
// provide an ordered list of keys.
|
||||
type Index interface {
|
||||
Initialize(less LessFunction, keys <-chan string)
|
||||
Insert(key string)
|
||||
Delete(key string)
|
||||
Keys(from string, n int) []string
|
||||
}
|
||||
|
||||
// LessFunction is used to initialize an Index of keys in a specific order.
|
||||
type LessFunction func(string, string) bool
|
||||
|
||||
// btreeString is a custom data type that satisfies the BTree Less interface,
|
||||
// making the strings it wraps sortable by the BTree package.
|
||||
type btreeString struct {
|
||||
s string
|
||||
l LessFunction
|
||||
}
|
||||
|
||||
// Less satisfies the BTree.Less interface using the btreeString's LessFunction.
|
||||
func (s btreeString) Less(i btree.Item) bool {
|
||||
return s.l(s.s, i.(btreeString).s)
|
||||
}
|
||||
|
||||
// BTreeIndex is an implementation of the Index interface using google/btree.
|
||||
type BTreeIndex struct {
|
||||
sync.RWMutex
|
||||
LessFunction
|
||||
*btree.BTree
|
||||
}
|
||||
|
||||
// Initialize populates the BTree tree with data from the keys channel,
|
||||
// according to the passed less function. It's destructive to the BTreeIndex.
|
||||
func (i *BTreeIndex) Initialize(less LessFunction, keys <-chan string) {
|
||||
i.Lock()
|
||||
defer i.Unlock()
|
||||
i.LessFunction = less
|
||||
i.BTree = rebuild(less, keys)
|
||||
}
|
||||
|
||||
// Insert inserts the given key (only) into the BTree tree.
|
||||
func (i *BTreeIndex) Insert(key string) {
|
||||
i.Lock()
|
||||
defer i.Unlock()
|
||||
if i.BTree == nil || i.LessFunction == nil {
|
||||
panic("uninitialized index")
|
||||
}
|
||||
i.BTree.ReplaceOrInsert(btreeString{s: key, l: i.LessFunction})
|
||||
}
|
||||
|
||||
// Delete removes the given key (only) from the BTree tree.
|
||||
func (i *BTreeIndex) Delete(key string) {
|
||||
i.Lock()
|
||||
defer i.Unlock()
|
||||
if i.BTree == nil || i.LessFunction == nil {
|
||||
panic("uninitialized index")
|
||||
}
|
||||
i.BTree.Delete(btreeString{s: key, l: i.LessFunction})
|
||||
}
|
||||
|
||||
// Keys yields a maximum of n keys in order. If the passed 'from' key is empty,
|
||||
// Keys will return the first n keys. If the passed 'from' key is non-empty, the
|
||||
// first key in the returned slice will be the key that immediately follows the
|
||||
// passed key, in key order.
|
||||
func (i *BTreeIndex) Keys(from string, n int) []string {
|
||||
i.RLock()
|
||||
defer i.RUnlock()
|
||||
|
||||
if i.BTree == nil || i.LessFunction == nil {
|
||||
panic("uninitialized index")
|
||||
}
|
||||
|
||||
if i.BTree.Len() <= 0 {
|
||||
return []string{}
|
||||
}
|
||||
|
||||
btreeFrom := btreeString{s: from, l: i.LessFunction}
|
||||
skipFirst := true
|
||||
if len(from) <= 0 || !i.BTree.Has(btreeFrom) {
|
||||
// no such key, so fabricate an always-smallest item
|
||||
btreeFrom = btreeString{s: "", l: func(string, string) bool { return true }}
|
||||
skipFirst = false
|
||||
}
|
||||
|
||||
keys := []string{}
|
||||
iterator := func(i btree.Item) bool {
|
||||
keys = append(keys, i.(btreeString).s)
|
||||
return len(keys) < n
|
||||
}
|
||||
i.BTree.AscendGreaterOrEqual(btreeFrom, iterator)
|
||||
|
||||
if skipFirst && len(keys) > 0 {
|
||||
keys = keys[1:]
|
||||
}
|
||||
|
||||
return keys
|
||||
}
|
||||
|
||||
// rebuildIndex does the work of regenerating the index
|
||||
// with the given keys.
|
||||
func rebuild(less LessFunction, keys <-chan string) *btree.BTree {
|
||||
tree := btree.New(2)
|
||||
for key := range keys {
|
||||
tree.ReplaceOrInsert(btreeString{s: key, l: less})
|
||||
}
|
||||
return tree
|
||||
}
|
199
vendor/github.com/ugorji/go/codec/0doc.go
generated
vendored
199
vendor/github.com/ugorji/go/codec/0doc.go
generated
vendored
|
@ -1,199 +0,0 @@
|
|||
// Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved.
|
||||
// Use of this source code is governed by a MIT license found in the LICENSE file.
|
||||
|
||||
/*
|
||||
High Performance, Feature-Rich Idiomatic Go codec/encoding library for
|
||||
binc, msgpack, cbor, json.
|
||||
|
||||
Supported Serialization formats are:
|
||||
|
||||
- msgpack: https://github.com/msgpack/msgpack
|
||||
- binc: http://github.com/ugorji/binc
|
||||
- cbor: http://cbor.io http://tools.ietf.org/html/rfc7049
|
||||
- json: http://json.org http://tools.ietf.org/html/rfc7159
|
||||
- simple:
|
||||
|
||||
To install:
|
||||
|
||||
go get github.com/ugorji/go/codec
|
||||
|
||||
This package understands the 'unsafe' tag, to allow using unsafe semantics:
|
||||
|
||||
- When decoding into a struct, you need to read the field name as a string
|
||||
so you can find the struct field it is mapped to.
|
||||
Using `unsafe` will bypass the allocation and copying overhead of []byte->string conversion.
|
||||
|
||||
To install using unsafe, pass the 'unsafe' tag:
|
||||
|
||||
go get -tags=unsafe github.com/ugorji/go/codec
|
||||
|
||||
For detailed usage information, read the primer at http://ugorji.net/blog/go-codec-primer .
|
||||
|
||||
The idiomatic Go support is as seen in other encoding packages in
|
||||
the standard library (ie json, xml, gob, etc).
|
||||
|
||||
Rich Feature Set includes:
|
||||
|
||||
- Simple but extremely powerful and feature-rich API
|
||||
- Very High Performance.
|
||||
Our extensive benchmarks show us outperforming Gob, Json, Bson, etc by 2-4X.
|
||||
- Multiple conversions:
|
||||
Package coerces types where appropriate
|
||||
e.g. decode an int in the stream into a float, etc.
|
||||
- Corner Cases:
|
||||
Overflows, nil maps/slices, nil values in streams are handled correctly
|
||||
- Standard field renaming via tags
|
||||
- Support for omitting empty fields during an encoding
|
||||
- Encoding from any value and decoding into pointer to any value
|
||||
(struct, slice, map, primitives, pointers, interface{}, etc)
|
||||
- Extensions to support efficient encoding/decoding of any named types
|
||||
- Support encoding.(Binary|Text)(M|Unm)arshaler interfaces
|
||||
- Decoding without a schema (into a interface{}).
|
||||
Includes Options to configure what specific map or slice type to use
|
||||
when decoding an encoded list or map into a nil interface{}
|
||||
- Encode a struct as an array, and decode struct from an array in the data stream
|
||||
- Comprehensive support for anonymous fields
|
||||
- Fast (no-reflection) encoding/decoding of common maps and slices
|
||||
- Code-generation for faster performance.
|
||||
- Support binary (e.g. messagepack, cbor) and text (e.g. json) formats
|
||||
- Support indefinite-length formats to enable true streaming
|
||||
(for formats which support it e.g. json, cbor)
|
||||
- Support canonical encoding, where a value is ALWAYS encoded as same sequence of bytes.
|
||||
This mostly applies to maps, where iteration order is non-deterministic.
|
||||
- NIL in data stream decoded as zero value
|
||||
- Never silently skip data when decoding.
|
||||
User decides whether to return an error or silently skip data when keys or indexes
|
||||
in the data stream do not map to fields in the struct.
|
||||
- Detect and error when encoding a cyclic reference (instead of stack overflow shutdown)
|
||||
- Encode/Decode from/to chan types (for iterative streaming support)
|
||||
- Drop-in replacement for encoding/json. `json:` key in struct tag supported.
|
||||
- Provides a RPC Server and Client Codec for net/rpc communication protocol.
|
||||
- Handle unique idiosyncrasies of codecs e.g.
|
||||
- For messagepack, configure how ambiguities in handling raw bytes are resolved
|
||||
- For messagepack, provide rpc server/client codec to support
|
||||
msgpack-rpc protocol defined at:
|
||||
https://github.com/msgpack-rpc/msgpack-rpc/blob/master/spec.md
|
||||
|
||||
Extension Support
|
||||
|
||||
Users can register a function to handle the encoding or decoding of
|
||||
their custom types.
|
||||
|
||||
There are no restrictions on what the custom type can be. Some examples:
|
||||
|
||||
type BisSet []int
|
||||
type BitSet64 uint64
|
||||
type UUID string
|
||||
type MyStructWithUnexportedFields struct { a int; b bool; c []int; }
|
||||
type GifImage struct { ... }
|
||||
|
||||
As an illustration, MyStructWithUnexportedFields would normally be
|
||||
encoded as an empty map because it has no exported fields, while UUID
|
||||
would be encoded as a string. However, with extension support, you can
|
||||
encode any of these however you like.
|
||||
|
||||
RPC
|
||||
|
||||
RPC Client and Server Codecs are implemented, so the codecs can be used
|
||||
with the standard net/rpc package.
|
||||
|
||||
Usage
|
||||
|
||||
The Handle is SAFE for concurrent READ, but NOT SAFE for concurrent modification.
|
||||
|
||||
The Encoder and Decoder are NOT safe for concurrent use.
|
||||
|
||||
Consequently, the usage model is basically:
|
||||
|
||||
- Create and initialize the Handle before any use.
|
||||
Once created, DO NOT modify it.
|
||||
- Multiple Encoders or Decoders can now use the Handle concurrently.
|
||||
They only read information off the Handle (never write).
|
||||
- However, each Encoder or Decoder MUST not be used concurrently
|
||||
- To re-use an Encoder/Decoder, call Reset(...) on it first.
|
||||
This allows you use state maintained on the Encoder/Decoder.
|
||||
|
||||
Sample usage model:
|
||||
|
||||
// create and configure Handle
|
||||
var (
|
||||
bh codec.BincHandle
|
||||
mh codec.MsgpackHandle
|
||||
ch codec.CborHandle
|
||||
)
|
||||
|
||||
mh.MapType = reflect.TypeOf(map[string]interface{}(nil))
|
||||
|
||||
// configure extensions
|
||||
// e.g. for msgpack, define functions and enable Time support for tag 1
|
||||
// mh.SetExt(reflect.TypeOf(time.Time{}), 1, myExt)
|
||||
|
||||
// create and use decoder/encoder
|
||||
var (
|
||||
r io.Reader
|
||||
w io.Writer
|
||||
b []byte
|
||||
h = &bh // or mh to use msgpack
|
||||
)
|
||||
|
||||
dec = codec.NewDecoder(r, h)
|
||||
dec = codec.NewDecoderBytes(b, h)
|
||||
err = dec.Decode(&v)
|
||||
|
||||
enc = codec.NewEncoder(w, h)
|
||||
enc = codec.NewEncoderBytes(&b, h)
|
||||
err = enc.Encode(v)
|
||||
|
||||
//RPC Server
|
||||
go func() {
|
||||
for {
|
||||
conn, err := listener.Accept()
|
||||
rpcCodec := codec.GoRpc.ServerCodec(conn, h)
|
||||
//OR rpcCodec := codec.MsgpackSpecRpc.ServerCodec(conn, h)
|
||||
rpc.ServeCodec(rpcCodec)
|
||||
}
|
||||
}()
|
||||
|
||||
//RPC Communication (client side)
|
||||
conn, err = net.Dial("tcp", "localhost:5555")
|
||||
rpcCodec := codec.GoRpc.ClientCodec(conn, h)
|
||||
//OR rpcCodec := codec.MsgpackSpecRpc.ClientCodec(conn, h)
|
||||
client := rpc.NewClientWithCodec(rpcCodec)
|
||||
|
||||
*/
|
||||
package codec
|
||||
|
||||
// Benefits of go-codec:
|
||||
//
|
||||
// - encoding/json always reads whole file into memory first.
|
||||
// This makes it unsuitable for parsing very large files.
|
||||
// - encoding/xml cannot parse into a map[string]interface{}
|
||||
// I found this out on reading https://github.com/clbanning/mxj
|
||||
|
||||
// TODO:
|
||||
//
|
||||
// - optimization for codecgen:
|
||||
// if len of entity is <= 3 words, then support a value receiver for encode.
|
||||
// - (En|De)coder should store an error when it occurs.
|
||||
// Until reset, subsequent calls return that error that was stored.
|
||||
// This means that free panics must go away.
|
||||
// All errors must be raised through errorf method.
|
||||
// - Decoding using a chan is good, but incurs concurrency costs.
|
||||
// This is because there's no fast way to use a channel without it
|
||||
// having to switch goroutines constantly.
|
||||
// Callback pattern is still the best. Maybe consider supporting something like:
|
||||
// type X struct {
|
||||
// Name string
|
||||
// Ys []Y
|
||||
// Ys chan <- Y
|
||||
// Ys func(Y) -> call this function for each entry
|
||||
// }
|
||||
// - Consider adding a isZeroer interface { isZero() bool }
|
||||
// It is used within isEmpty, for omitEmpty support.
|
||||
// - Consider making Handle used AS-IS within the encoding/decoding session.
|
||||
// This means that we don't cache Handle information within the (En|De)coder,
|
||||
// except we really need it at Reset(...)
|
||||
// - Consider adding math/big support
|
||||
// - Consider reducing the size of the generated functions:
|
||||
// Maybe use one loop, and put the conditionals in the loop.
|
||||
// for ... { if cLen > 0 { if j == cLen { break } } else if dd.CheckBreak() { break } }
|
148
vendor/github.com/ugorji/go/codec/README.md
generated
vendored
148
vendor/github.com/ugorji/go/codec/README.md
generated
vendored
|
@ -1,148 +0,0 @@
|
|||
# Codec
|
||||
|
||||
High Performance, Feature-Rich Idiomatic Go codec/encoding library for
|
||||
binc, msgpack, cbor, json.
|
||||
|
||||
Supported Serialization formats are:
|
||||
|
||||
- msgpack: https://github.com/msgpack/msgpack
|
||||
- binc: http://github.com/ugorji/binc
|
||||
- cbor: http://cbor.io http://tools.ietf.org/html/rfc7049
|
||||
- json: http://json.org http://tools.ietf.org/html/rfc7159
|
||||
- simple:
|
||||
|
||||
To install:
|
||||
|
||||
go get github.com/ugorji/go/codec
|
||||
|
||||
This package understands the `unsafe` tag, to allow using unsafe semantics:
|
||||
|
||||
- When decoding into a struct, you need to read the field name as a string
|
||||
so you can find the struct field it is mapped to.
|
||||
Using `unsafe` will bypass the allocation and copying overhead of `[]byte->string` conversion.
|
||||
|
||||
To use it, you must pass the `unsafe` tag during install:
|
||||
|
||||
```
|
||||
go install -tags=unsafe github.com/ugorji/go/codec
|
||||
```
|
||||
|
||||
Online documentation: http://godoc.org/github.com/ugorji/go/codec
|
||||
Detailed Usage/How-to Primer: http://ugorji.net/blog/go-codec-primer
|
||||
|
||||
The idiomatic Go support is as seen in other encoding packages in
|
||||
the standard library (ie json, xml, gob, etc).
|
||||
|
||||
Rich Feature Set includes:
|
||||
|
||||
- Simple but extremely powerful and feature-rich API
|
||||
- Very High Performance.
|
||||
Our extensive benchmarks show us outperforming Gob, Json, Bson, etc by 2-4X.
|
||||
- Multiple conversions:
|
||||
Package coerces types where appropriate
|
||||
e.g. decode an int in the stream into a float, etc.
|
||||
- Corner Cases:
|
||||
Overflows, nil maps/slices, nil values in streams are handled correctly
|
||||
- Standard field renaming via tags
|
||||
- Support for omitting empty fields during an encoding
|
||||
- Encoding from any value and decoding into pointer to any value
|
||||
(struct, slice, map, primitives, pointers, interface{}, etc)
|
||||
- Extensions to support efficient encoding/decoding of any named types
|
||||
- Support encoding.(Binary|Text)(M|Unm)arshaler interfaces
|
||||
- Decoding without a schema (into a interface{}).
|
||||
Includes Options to configure what specific map or slice type to use
|
||||
when decoding an encoded list or map into a nil interface{}
|
||||
- Encode a struct as an array, and decode struct from an array in the data stream
|
||||
- Comprehensive support for anonymous fields
|
||||
- Fast (no-reflection) encoding/decoding of common maps and slices
|
||||
- Code-generation for faster performance.
|
||||
- Support binary (e.g. messagepack, cbor) and text (e.g. json) formats
|
||||
- Support indefinite-length formats to enable true streaming
|
||||
(for formats which support it e.g. json, cbor)
|
||||
- Support canonical encoding, where a value is ALWAYS encoded as same sequence of bytes.
|
||||
This mostly applies to maps, where iteration order is non-deterministic.
|
||||
- NIL in data stream decoded as zero value
|
||||
- Never silently skip data when decoding.
|
||||
User decides whether to return an error or silently skip data when keys or indexes
|
||||
in the data stream do not map to fields in the struct.
|
||||
- Encode/Decode from/to chan types (for iterative streaming support)
|
||||
- Drop-in replacement for encoding/json. `json:` key in struct tag supported.
|
||||
- Provides a RPC Server and Client Codec for net/rpc communication protocol.
|
||||
- Handle unique idiosyncrasies of codecs e.g.
|
||||
- For messagepack, configure how ambiguities in handling raw bytes are resolved
|
||||
- For messagepack, provide rpc server/client codec to support
|
||||
msgpack-rpc protocol defined at:
|
||||
https://github.com/msgpack-rpc/msgpack-rpc/blob/master/spec.md
|
||||
|
||||
## Extension Support
|
||||
|
||||
Users can register a function to handle the encoding or decoding of
|
||||
their custom types.
|
||||
|
||||
There are no restrictions on what the custom type can be. Some examples:
|
||||
|
||||
type BisSet []int
|
||||
type BitSet64 uint64
|
||||
type UUID string
|
||||
type MyStructWithUnexportedFields struct { a int; b bool; c []int; }
|
||||
type GifImage struct { ... }
|
||||
|
||||
As an illustration, MyStructWithUnexportedFields would normally be
|
||||
encoded as an empty map because it has no exported fields, while UUID
|
||||
would be encoded as a string. However, with extension support, you can
|
||||
encode any of these however you like.
|
||||
|
||||
## RPC
|
||||
|
||||
RPC Client and Server Codecs are implemented, so the codecs can be used
|
||||
with the standard net/rpc package.
|
||||
|
||||
## Usage
|
||||
|
||||
Typical usage model:
|
||||
|
||||
// create and configure Handle
|
||||
var (
|
||||
bh codec.BincHandle
|
||||
mh codec.MsgpackHandle
|
||||
ch codec.CborHandle
|
||||
)
|
||||
|
||||
mh.MapType = reflect.TypeOf(map[string]interface{}(nil))
|
||||
|
||||
// configure extensions
|
||||
// e.g. for msgpack, define functions and enable Time support for tag 1
|
||||
// mh.SetExt(reflect.TypeOf(time.Time{}), 1, myExt)
|
||||
|
||||
// create and use decoder/encoder
|
||||
var (
|
||||
r io.Reader
|
||||
w io.Writer
|
||||
b []byte
|
||||
h = &bh // or mh to use msgpack
|
||||
)
|
||||
|
||||
dec = codec.NewDecoder(r, h)
|
||||
dec = codec.NewDecoderBytes(b, h)
|
||||
err = dec.Decode(&v)
|
||||
|
||||
enc = codec.NewEncoder(w, h)
|
||||
enc = codec.NewEncoderBytes(&b, h)
|
||||
err = enc.Encode(v)
|
||||
|
||||
//RPC Server
|
||||
go func() {
|
||||
for {
|
||||
conn, err := listener.Accept()
|
||||
rpcCodec := codec.GoRpc.ServerCodec(conn, h)
|
||||
//OR rpcCodec := codec.MsgpackSpecRpc.ServerCodec(conn, h)
|
||||
rpc.ServeCodec(rpcCodec)
|
||||
}
|
||||
}()
|
||||
|
||||
//RPC Communication (client side)
|
||||
conn, err = net.Dial("tcp", "localhost:5555")
|
||||
rpcCodec := codec.GoRpc.ClientCodec(conn, h)
|
||||
//OR rpcCodec := codec.MsgpackSpecRpc.ClientCodec(conn, h)
|
||||
client := rpc.NewClientWithCodec(rpcCodec)
|
||||
|
929
vendor/github.com/ugorji/go/codec/binc.go
generated
vendored
929
vendor/github.com/ugorji/go/codec/binc.go
generated
vendored
|
@ -1,929 +0,0 @@
|
|||
// Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved.
|
||||
// Use of this source code is governed by a MIT license found in the LICENSE file.
|
||||
|
||||
package codec
|
||||
|
||||
import (
|
||||
"math"
|
||||
"reflect"
|
||||
"time"
|
||||
)
|
||||
|
||||
const bincDoPrune = true // No longer needed. Needed before as C lib did not support pruning.
|
||||
|
||||
// vd as low 4 bits (there are 16 slots)
|
||||
const (
|
||||
bincVdSpecial byte = iota
|
||||
bincVdPosInt
|
||||
bincVdNegInt
|
||||
bincVdFloat
|
||||
|
||||
bincVdString
|
||||
bincVdByteArray
|
||||
bincVdArray
|
||||
bincVdMap
|
||||
|
||||
bincVdTimestamp
|
||||
bincVdSmallInt
|
||||
bincVdUnicodeOther
|
||||
bincVdSymbol
|
||||
|
||||
bincVdDecimal
|
||||
_ // open slot
|
||||
_ // open slot
|
||||
bincVdCustomExt = 0x0f
|
||||
)
|
||||
|
||||
const (
|
||||
bincSpNil byte = iota
|
||||
bincSpFalse
|
||||
bincSpTrue
|
||||
bincSpNan
|
||||
bincSpPosInf
|
||||
bincSpNegInf
|
||||
bincSpZeroFloat
|
||||
bincSpZero
|
||||
bincSpNegOne
|
||||
)
|
||||
|
||||
const (
|
||||
bincFlBin16 byte = iota
|
||||
bincFlBin32
|
||||
_ // bincFlBin32e
|
||||
bincFlBin64
|
||||
_ // bincFlBin64e
|
||||
// others not currently supported
|
||||
)
|
||||
|
||||
type bincEncDriver struct {
|
||||
e *Encoder
|
||||
w encWriter
|
||||
m map[string]uint16 // symbols
|
||||
b [scratchByteArrayLen]byte
|
||||
s uint16 // symbols sequencer
|
||||
encNoSeparator
|
||||
}
|
||||
|
||||
func (e *bincEncDriver) IsBuiltinType(rt uintptr) bool {
|
||||
return rt == timeTypId
|
||||
}
|
||||
|
||||
func (e *bincEncDriver) EncodeBuiltin(rt uintptr, v interface{}) {
|
||||
if rt == timeTypId {
|
||||
var bs []byte
|
||||
switch x := v.(type) {
|
||||
case time.Time:
|
||||
bs = encodeTime(x)
|
||||
case *time.Time:
|
||||
bs = encodeTime(*x)
|
||||
default:
|
||||
e.e.errorf("binc error encoding builtin: expect time.Time, received %T", v)
|
||||
}
|
||||
e.w.writen1(bincVdTimestamp<<4 | uint8(len(bs)))
|
||||
e.w.writeb(bs)
|
||||
}
|
||||
}
|
||||
|
||||
func (e *bincEncDriver) EncodeNil() {
|
||||
e.w.writen1(bincVdSpecial<<4 | bincSpNil)
|
||||
}
|
||||
|
||||
func (e *bincEncDriver) EncodeBool(b bool) {
|
||||
if b {
|
||||
e.w.writen1(bincVdSpecial<<4 | bincSpTrue)
|
||||
} else {
|
||||
e.w.writen1(bincVdSpecial<<4 | bincSpFalse)
|
||||
}
|
||||
}
|
||||
|
||||
func (e *bincEncDriver) EncodeFloat32(f float32) {
|
||||
if f == 0 {
|
||||
e.w.writen1(bincVdSpecial<<4 | bincSpZeroFloat)
|
||||
return
|
||||
}
|
||||
e.w.writen1(bincVdFloat<<4 | bincFlBin32)
|
||||
bigenHelper{e.b[:4], e.w}.writeUint32(math.Float32bits(f))
|
||||
}
|
||||
|
||||
func (e *bincEncDriver) EncodeFloat64(f float64) {
|
||||
if f == 0 {
|
||||
e.w.writen1(bincVdSpecial<<4 | bincSpZeroFloat)
|
||||
return
|
||||
}
|
||||
bigen.PutUint64(e.b[:8], math.Float64bits(f))
|
||||
if bincDoPrune {
|
||||
i := 7
|
||||
for ; i >= 0 && (e.b[i] == 0); i-- {
|
||||
}
|
||||
i++
|
||||
if i <= 6 {
|
||||
e.w.writen1(bincVdFloat<<4 | 0x8 | bincFlBin64)
|
||||
e.w.writen1(byte(i))
|
||||
e.w.writeb(e.b[:i])
|
||||
return
|
||||
}
|
||||
}
|
||||
e.w.writen1(bincVdFloat<<4 | bincFlBin64)
|
||||
e.w.writeb(e.b[:8])
|
||||
}
|
||||
|
||||
func (e *bincEncDriver) encIntegerPrune(bd byte, pos bool, v uint64, lim uint8) {
|
||||
if lim == 4 {
|
||||
bigen.PutUint32(e.b[:lim], uint32(v))
|
||||
} else {
|
||||
bigen.PutUint64(e.b[:lim], v)
|
||||
}
|
||||
if bincDoPrune {
|
||||
i := pruneSignExt(e.b[:lim], pos)
|
||||
e.w.writen1(bd | lim - 1 - byte(i))
|
||||
e.w.writeb(e.b[i:lim])
|
||||
} else {
|
||||
e.w.writen1(bd | lim - 1)
|
||||
e.w.writeb(e.b[:lim])
|
||||
}
|
||||
}
|
||||
|
||||
func (e *bincEncDriver) EncodeInt(v int64) {
|
||||
const nbd byte = bincVdNegInt << 4
|
||||
if v >= 0 {
|
||||
e.encUint(bincVdPosInt<<4, true, uint64(v))
|
||||
} else if v == -1 {
|
||||
e.w.writen1(bincVdSpecial<<4 | bincSpNegOne)
|
||||
} else {
|
||||
e.encUint(bincVdNegInt<<4, false, uint64(-v))
|
||||
}
|
||||
}
|
||||
|
||||
func (e *bincEncDriver) EncodeUint(v uint64) {
|
||||
e.encUint(bincVdPosInt<<4, true, v)
|
||||
}
|
||||
|
||||
func (e *bincEncDriver) encUint(bd byte, pos bool, v uint64) {
|
||||
if v == 0 {
|
||||
e.w.writen1(bincVdSpecial<<4 | bincSpZero)
|
||||
} else if pos && v >= 1 && v <= 16 {
|
||||
e.w.writen1(bincVdSmallInt<<4 | byte(v-1))
|
||||
} else if v <= math.MaxUint8 {
|
||||
e.w.writen2(bd|0x0, byte(v))
|
||||
} else if v <= math.MaxUint16 {
|
||||
e.w.writen1(bd | 0x01)
|
||||
bigenHelper{e.b[:2], e.w}.writeUint16(uint16(v))
|
||||
} else if v <= math.MaxUint32 {
|
||||
e.encIntegerPrune(bd, pos, v, 4)
|
||||
} else {
|
||||
e.encIntegerPrune(bd, pos, v, 8)
|
||||
}
|
||||
}
|
||||
|
||||
func (e *bincEncDriver) EncodeExt(rv interface{}, xtag uint64, ext Ext, _ *Encoder) {
|
||||
bs := ext.WriteExt(rv)
|
||||
if bs == nil {
|
||||
e.EncodeNil()
|
||||
return
|
||||
}
|
||||
e.encodeExtPreamble(uint8(xtag), len(bs))
|
||||
e.w.writeb(bs)
|
||||
}
|
||||
|
||||
func (e *bincEncDriver) EncodeRawExt(re *RawExt, _ *Encoder) {
|
||||
e.encodeExtPreamble(uint8(re.Tag), len(re.Data))
|
||||
e.w.writeb(re.Data)
|
||||
}
|
||||
|
||||
func (e *bincEncDriver) encodeExtPreamble(xtag byte, length int) {
|
||||
e.encLen(bincVdCustomExt<<4, uint64(length))
|
||||
e.w.writen1(xtag)
|
||||
}
|
||||
|
||||
func (e *bincEncDriver) EncodeArrayStart(length int) {
|
||||
e.encLen(bincVdArray<<4, uint64(length))
|
||||
}
|
||||
|
||||
func (e *bincEncDriver) EncodeMapStart(length int) {
|
||||
e.encLen(bincVdMap<<4, uint64(length))
|
||||
}
|
||||
|
||||
func (e *bincEncDriver) EncodeString(c charEncoding, v string) {
|
||||
l := uint64(len(v))
|
||||
e.encBytesLen(c, l)
|
||||
if l > 0 {
|
||||
e.w.writestr(v)
|
||||
}
|
||||
}
|
||||
|
||||
func (e *bincEncDriver) EncodeSymbol(v string) {
|
||||
// if WriteSymbolsNoRefs {
|
||||
// e.encodeString(c_UTF8, v)
|
||||
// return
|
||||
// }
|
||||
|
||||
//symbols only offer benefit when string length > 1.
|
||||
//This is because strings with length 1 take only 2 bytes to store
|
||||
//(bd with embedded length, and single byte for string val).
|
||||
|
||||
l := len(v)
|
||||
if l == 0 {
|
||||
e.encBytesLen(c_UTF8, 0)
|
||||
return
|
||||
} else if l == 1 {
|
||||
e.encBytesLen(c_UTF8, 1)
|
||||
e.w.writen1(v[0])
|
||||
return
|
||||
}
|
||||
if e.m == nil {
|
||||
e.m = make(map[string]uint16, 16)
|
||||
}
|
||||
ui, ok := e.m[v]
|
||||
if ok {
|
||||
if ui <= math.MaxUint8 {
|
||||
e.w.writen2(bincVdSymbol<<4, byte(ui))
|
||||
} else {
|
||||
e.w.writen1(bincVdSymbol<<4 | 0x8)
|
||||
bigenHelper{e.b[:2], e.w}.writeUint16(ui)
|
||||
}
|
||||
} else {
|
||||
e.s++
|
||||
ui = e.s
|
||||
//ui = uint16(atomic.AddUint32(&e.s, 1))
|
||||
e.m[v] = ui
|
||||
var lenprec uint8
|
||||
if l <= math.MaxUint8 {
|
||||
// lenprec = 0
|
||||
} else if l <= math.MaxUint16 {
|
||||
lenprec = 1
|
||||
} else if int64(l) <= math.MaxUint32 {
|
||||
lenprec = 2
|
||||
} else {
|
||||
lenprec = 3
|
||||
}
|
||||
if ui <= math.MaxUint8 {
|
||||
e.w.writen2(bincVdSymbol<<4|0x0|0x4|lenprec, byte(ui))
|
||||
} else {
|
||||
e.w.writen1(bincVdSymbol<<4 | 0x8 | 0x4 | lenprec)
|
||||
bigenHelper{e.b[:2], e.w}.writeUint16(ui)
|
||||
}
|
||||
if lenprec == 0 {
|
||||
e.w.writen1(byte(l))
|
||||
} else if lenprec == 1 {
|
||||
bigenHelper{e.b[:2], e.w}.writeUint16(uint16(l))
|
||||
} else if lenprec == 2 {
|
||||
bigenHelper{e.b[:4], e.w}.writeUint32(uint32(l))
|
||||
} else {
|
||||
bigenHelper{e.b[:8], e.w}.writeUint64(uint64(l))
|
||||
}
|
||||
e.w.writestr(v)
|
||||
}
|
||||
}
|
||||
|
||||
func (e *bincEncDriver) EncodeStringBytes(c charEncoding, v []byte) {
|
||||
l := uint64(len(v))
|
||||
e.encBytesLen(c, l)
|
||||
if l > 0 {
|
||||
e.w.writeb(v)
|
||||
}
|
||||
}
|
||||
|
||||
func (e *bincEncDriver) encBytesLen(c charEncoding, length uint64) {
|
||||
//TODO: support bincUnicodeOther (for now, just use string or bytearray)
|
||||
if c == c_RAW {
|
||||
e.encLen(bincVdByteArray<<4, length)
|
||||
} else {
|
||||
e.encLen(bincVdString<<4, length)
|
||||
}
|
||||
}
|
||||
|
||||
func (e *bincEncDriver) encLen(bd byte, l uint64) {
|
||||
if l < 12 {
|
||||
e.w.writen1(bd | uint8(l+4))
|
||||
} else {
|
||||
e.encLenNumber(bd, l)
|
||||
}
|
||||
}
|
||||
|
||||
func (e *bincEncDriver) encLenNumber(bd byte, v uint64) {
|
||||
if v <= math.MaxUint8 {
|
||||
e.w.writen2(bd, byte(v))
|
||||
} else if v <= math.MaxUint16 {
|
||||
e.w.writen1(bd | 0x01)
|
||||
bigenHelper{e.b[:2], e.w}.writeUint16(uint16(v))
|
||||
} else if v <= math.MaxUint32 {
|
||||
e.w.writen1(bd | 0x02)
|
||||
bigenHelper{e.b[:4], e.w}.writeUint32(uint32(v))
|
||||
} else {
|
||||
e.w.writen1(bd | 0x03)
|
||||
bigenHelper{e.b[:8], e.w}.writeUint64(uint64(v))
|
||||
}
|
||||
}
|
||||
|
||||
//------------------------------------
|
||||
|
||||
type bincDecSymbol struct {
|
||||
s string
|
||||
b []byte
|
||||
i uint16
|
||||
}
|
||||
|
||||
type bincDecDriver struct {
|
||||
d *Decoder
|
||||
h *BincHandle
|
||||
r decReader
|
||||
br bool // bytes reader
|
||||
bdRead bool
|
||||
bd byte
|
||||
vd byte
|
||||
vs byte
|
||||
noStreamingCodec
|
||||
decNoSeparator
|
||||
b [scratchByteArrayLen]byte
|
||||
|
||||
// linear searching on this slice is ok,
|
||||
// because we typically expect < 32 symbols in each stream.
|
||||
s []bincDecSymbol
|
||||
}
|
||||
|
||||
func (d *bincDecDriver) readNextBd() {
|
||||
d.bd = d.r.readn1()
|
||||
d.vd = d.bd >> 4
|
||||
d.vs = d.bd & 0x0f
|
||||
d.bdRead = true
|
||||
}
|
||||
|
||||
func (d *bincDecDriver) uncacheRead() {
|
||||
if d.bdRead {
|
||||
d.r.unreadn1()
|
||||
d.bdRead = false
|
||||
}
|
||||
}
|
||||
|
||||
func (d *bincDecDriver) ContainerType() (vt valueType) {
|
||||
if d.vd == bincVdSpecial && d.vs == bincSpNil {
|
||||
return valueTypeNil
|
||||
} else if d.vd == bincVdByteArray {
|
||||
return valueTypeBytes
|
||||
} else if d.vd == bincVdString {
|
||||
return valueTypeString
|
||||
} else if d.vd == bincVdArray {
|
||||
return valueTypeArray
|
||||
} else if d.vd == bincVdMap {
|
||||
return valueTypeMap
|
||||
} else {
|
||||
// d.d.errorf("isContainerType: unsupported parameter: %v", vt)
|
||||
}
|
||||
return valueTypeUnset
|
||||
}
|
||||
|
||||
func (d *bincDecDriver) TryDecodeAsNil() bool {
|
||||
if !d.bdRead {
|
||||
d.readNextBd()
|
||||
}
|
||||
if d.bd == bincVdSpecial<<4|bincSpNil {
|
||||
d.bdRead = false
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (d *bincDecDriver) IsBuiltinType(rt uintptr) bool {
|
||||
return rt == timeTypId
|
||||
}
|
||||
|
||||
func (d *bincDecDriver) DecodeBuiltin(rt uintptr, v interface{}) {
|
||||
if !d.bdRead {
|
||||
d.readNextBd()
|
||||
}
|
||||
if rt == timeTypId {
|
||||
if d.vd != bincVdTimestamp {
|
||||
d.d.errorf("Invalid d.vd. Expecting 0x%x. Received: 0x%x", bincVdTimestamp, d.vd)
|
||||
return
|
||||
}
|
||||
tt, err := decodeTime(d.r.readx(int(d.vs)))
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
var vt *time.Time = v.(*time.Time)
|
||||
*vt = tt
|
||||
d.bdRead = false
|
||||
}
|
||||
}
|
||||
|
||||
func (d *bincDecDriver) decFloatPre(vs, defaultLen byte) {
|
||||
if vs&0x8 == 0 {
|
||||
d.r.readb(d.b[0:defaultLen])
|
||||
} else {
|
||||
l := d.r.readn1()
|
||||
if l > 8 {
|
||||
d.d.errorf("At most 8 bytes used to represent float. Received: %v bytes", l)
|
||||
return
|
||||
}
|
||||
for i := l; i < 8; i++ {
|
||||
d.b[i] = 0
|
||||
}
|
||||
d.r.readb(d.b[0:l])
|
||||
}
|
||||
}
|
||||
|
||||
func (d *bincDecDriver) decFloat() (f float64) {
|
||||
//if true { f = math.Float64frombits(bigen.Uint64(d.r.readx(8))); break; }
|
||||
if x := d.vs & 0x7; x == bincFlBin32 {
|
||||
d.decFloatPre(d.vs, 4)
|
||||
f = float64(math.Float32frombits(bigen.Uint32(d.b[0:4])))
|
||||
} else if x == bincFlBin64 {
|
||||
d.decFloatPre(d.vs, 8)
|
||||
f = math.Float64frombits(bigen.Uint64(d.b[0:8]))
|
||||
} else {
|
||||
d.d.errorf("only float32 and float64 are supported. d.vd: 0x%x, d.vs: 0x%x", d.vd, d.vs)
|
||||
return
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (d *bincDecDriver) decUint() (v uint64) {
|
||||
// need to inline the code (interface conversion and type assertion expensive)
|
||||
switch d.vs {
|
||||
case 0:
|
||||
v = uint64(d.r.readn1())
|
||||
case 1:
|
||||
d.r.readb(d.b[6:8])
|
||||
v = uint64(bigen.Uint16(d.b[6:8]))
|
||||
case 2:
|
||||
d.b[4] = 0
|
||||
d.r.readb(d.b[5:8])
|
||||
v = uint64(bigen.Uint32(d.b[4:8]))
|
||||
case 3:
|
||||
d.r.readb(d.b[4:8])
|
||||
v = uint64(bigen.Uint32(d.b[4:8]))
|
||||
case 4, 5, 6:
|
||||
lim := int(7 - d.vs)
|
||||
d.r.readb(d.b[lim:8])
|
||||
for i := 0; i < lim; i++ {
|
||||
d.b[i] = 0
|
||||
}
|
||||
v = uint64(bigen.Uint64(d.b[:8]))
|
||||
case 7:
|
||||
d.r.readb(d.b[:8])
|
||||
v = uint64(bigen.Uint64(d.b[:8]))
|
||||
default:
|
||||
d.d.errorf("unsigned integers with greater than 64 bits of precision not supported")
|
||||
return
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (d *bincDecDriver) decCheckInteger() (ui uint64, neg bool) {
|
||||
if !d.bdRead {
|
||||
d.readNextBd()
|
||||
}
|
||||
vd, vs := d.vd, d.vs
|
||||
if vd == bincVdPosInt {
|
||||
ui = d.decUint()
|
||||
} else if vd == bincVdNegInt {
|
||||
ui = d.decUint()
|
||||
neg = true
|
||||
} else if vd == bincVdSmallInt {
|
||||
ui = uint64(d.vs) + 1
|
||||
} else if vd == bincVdSpecial {
|
||||
if vs == bincSpZero {
|
||||
//i = 0
|
||||
} else if vs == bincSpNegOne {
|
||||
neg = true
|
||||
ui = 1
|
||||
} else {
|
||||
d.d.errorf("numeric decode fails for special value: d.vs: 0x%x", d.vs)
|
||||
return
|
||||
}
|
||||
} else {
|
||||
d.d.errorf("number can only be decoded from uint or int values. d.bd: 0x%x, d.vd: 0x%x", d.bd, d.vd)
|
||||
return
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (d *bincDecDriver) DecodeInt(bitsize uint8) (i int64) {
|
||||
ui, neg := d.decCheckInteger()
|
||||
i, overflow := chkOvf.SignedInt(ui)
|
||||
if overflow {
|
||||
d.d.errorf("simple: overflow converting %v to signed integer", ui)
|
||||
return
|
||||
}
|
||||
if neg {
|
||||
i = -i
|
||||
}
|
||||
if chkOvf.Int(i, bitsize) {
|
||||
d.d.errorf("binc: overflow integer: %v", i)
|
||||
return
|
||||
}
|
||||
d.bdRead = false
|
||||
return
|
||||
}
|
||||
|
||||
func (d *bincDecDriver) DecodeUint(bitsize uint8) (ui uint64) {
|
||||
ui, neg := d.decCheckInteger()
|
||||
if neg {
|
||||
d.d.errorf("Assigning negative signed value to unsigned type")
|
||||
return
|
||||
}
|
||||
if chkOvf.Uint(ui, bitsize) {
|
||||
d.d.errorf("binc: overflow integer: %v", ui)
|
||||
return
|
||||
}
|
||||
d.bdRead = false
|
||||
return
|
||||
}
|
||||
|
||||
func (d *bincDecDriver) DecodeFloat(chkOverflow32 bool) (f float64) {
|
||||
if !d.bdRead {
|
||||
d.readNextBd()
|
||||
}
|
||||
vd, vs := d.vd, d.vs
|
||||
if vd == bincVdSpecial {
|
||||
d.bdRead = false
|
||||
if vs == bincSpNan {
|
||||
return math.NaN()
|
||||
} else if vs == bincSpPosInf {
|
||||
return math.Inf(1)
|
||||
} else if vs == bincSpZeroFloat || vs == bincSpZero {
|
||||
return
|
||||
} else if vs == bincSpNegInf {
|
||||
return math.Inf(-1)
|
||||
} else {
|
||||
d.d.errorf("Invalid d.vs decoding float where d.vd=bincVdSpecial: %v", d.vs)
|
||||
return
|
||||
}
|
||||
} else if vd == bincVdFloat {
|
||||
f = d.decFloat()
|
||||
} else {
|
||||
f = float64(d.DecodeInt(64))
|
||||
}
|
||||
if chkOverflow32 && chkOvf.Float32(f) {
|
||||
d.d.errorf("binc: float32 overflow: %v", f)
|
||||
return
|
||||
}
|
||||
d.bdRead = false
|
||||
return
|
||||
}
|
||||
|
||||
// bool can be decoded from bool only (single byte).
|
||||
func (d *bincDecDriver) DecodeBool() (b bool) {
|
||||
if !d.bdRead {
|
||||
d.readNextBd()
|
||||
}
|
||||
if bd := d.bd; bd == (bincVdSpecial | bincSpFalse) {
|
||||
// b = false
|
||||
} else if bd == (bincVdSpecial | bincSpTrue) {
|
||||
b = true
|
||||
} else {
|
||||
d.d.errorf("Invalid single-byte value for bool: %s: %x", msgBadDesc, d.bd)
|
||||
return
|
||||
}
|
||||
d.bdRead = false
|
||||
return
|
||||
}
|
||||
|
||||
func (d *bincDecDriver) ReadMapStart() (length int) {
|
||||
if d.vd != bincVdMap {
|
||||
d.d.errorf("Invalid d.vd for map. Expecting 0x%x. Got: 0x%x", bincVdMap, d.vd)
|
||||
return
|
||||
}
|
||||
length = d.decLen()
|
||||
d.bdRead = false
|
||||
return
|
||||
}
|
||||
|
||||
func (d *bincDecDriver) ReadArrayStart() (length int) {
|
||||
if d.vd != bincVdArray {
|
||||
d.d.errorf("Invalid d.vd for array. Expecting 0x%x. Got: 0x%x", bincVdArray, d.vd)
|
||||
return
|
||||
}
|
||||
length = d.decLen()
|
||||
d.bdRead = false
|
||||
return
|
||||
}
|
||||
|
||||
func (d *bincDecDriver) decLen() int {
|
||||
if d.vs > 3 {
|
||||
return int(d.vs - 4)
|
||||
}
|
||||
return int(d.decLenNumber())
|
||||
}
|
||||
|
||||
func (d *bincDecDriver) decLenNumber() (v uint64) {
|
||||
if x := d.vs; x == 0 {
|
||||
v = uint64(d.r.readn1())
|
||||
} else if x == 1 {
|
||||
d.r.readb(d.b[6:8])
|
||||
v = uint64(bigen.Uint16(d.b[6:8]))
|
||||
} else if x == 2 {
|
||||
d.r.readb(d.b[4:8])
|
||||
v = uint64(bigen.Uint32(d.b[4:8]))
|
||||
} else {
|
||||
d.r.readb(d.b[:8])
|
||||
v = bigen.Uint64(d.b[:8])
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (d *bincDecDriver) decStringAndBytes(bs []byte, withString, zerocopy bool) (bs2 []byte, s string) {
|
||||
if !d.bdRead {
|
||||
d.readNextBd()
|
||||
}
|
||||
if d.bd == bincVdSpecial<<4|bincSpNil {
|
||||
d.bdRead = false
|
||||
return
|
||||
}
|
||||
var slen int = -1
|
||||
// var ok bool
|
||||
switch d.vd {
|
||||
case bincVdString, bincVdByteArray:
|
||||
slen = d.decLen()
|
||||
if zerocopy {
|
||||
if d.br {
|
||||
bs2 = d.r.readx(slen)
|
||||
} else if len(bs) == 0 {
|
||||
bs2 = decByteSlice(d.r, slen, d.b[:])
|
||||
} else {
|
||||
bs2 = decByteSlice(d.r, slen, bs)
|
||||
}
|
||||
} else {
|
||||
bs2 = decByteSlice(d.r, slen, bs)
|
||||
}
|
||||
if withString {
|
||||
s = string(bs2)
|
||||
}
|
||||
case bincVdSymbol:
|
||||
// zerocopy doesn't apply for symbols,
|
||||
// as the values must be stored in a table for later use.
|
||||
//
|
||||
//from vs: extract numSymbolBytes, containsStringVal, strLenPrecision,
|
||||
//extract symbol
|
||||
//if containsStringVal, read it and put in map
|
||||
//else look in map for string value
|
||||
var symbol uint16
|
||||
vs := d.vs
|
||||
if vs&0x8 == 0 {
|
||||
symbol = uint16(d.r.readn1())
|
||||
} else {
|
||||
symbol = uint16(bigen.Uint16(d.r.readx(2)))
|
||||
}
|
||||
if d.s == nil {
|
||||
d.s = make([]bincDecSymbol, 0, 16)
|
||||
}
|
||||
|
||||
if vs&0x4 == 0 {
|
||||
for i := range d.s {
|
||||
j := &d.s[i]
|
||||
if j.i == symbol {
|
||||
bs2 = j.b
|
||||
if withString {
|
||||
if j.s == "" && bs2 != nil {
|
||||
j.s = string(bs2)
|
||||
}
|
||||
s = j.s
|
||||
}
|
||||
break
|
||||
}
|
||||
}
|
||||
} else {
|
||||
switch vs & 0x3 {
|
||||
case 0:
|
||||
slen = int(d.r.readn1())
|
||||
case 1:
|
||||
slen = int(bigen.Uint16(d.r.readx(2)))
|
||||
case 2:
|
||||
slen = int(bigen.Uint32(d.r.readx(4)))
|
||||
case 3:
|
||||
slen = int(bigen.Uint64(d.r.readx(8)))
|
||||
}
|
||||
// since using symbols, do not store any part of
|
||||
// the parameter bs in the map, as it might be a shared buffer.
|
||||
// bs2 = decByteSlice(d.r, slen, bs)
|
||||
bs2 = decByteSlice(d.r, slen, nil)
|
||||
if withString {
|
||||
s = string(bs2)
|
||||
}
|
||||
d.s = append(d.s, bincDecSymbol{i: symbol, s: s, b: bs2})
|
||||
}
|
||||
default:
|
||||
d.d.errorf("Invalid d.vd. Expecting string:0x%x, bytearray:0x%x or symbol: 0x%x. Got: 0x%x",
|
||||
bincVdString, bincVdByteArray, bincVdSymbol, d.vd)
|
||||
return
|
||||
}
|
||||
d.bdRead = false
|
||||
return
|
||||
}
|
||||
|
||||
func (d *bincDecDriver) DecodeString() (s string) {
|
||||
// DecodeBytes does not accommodate symbols, whose impl stores string version in map.
|
||||
// Use decStringAndBytes directly.
|
||||
// return string(d.DecodeBytes(d.b[:], true, true))
|
||||
_, s = d.decStringAndBytes(d.b[:], true, true)
|
||||
return
|
||||
}
|
||||
|
||||
func (d *bincDecDriver) DecodeBytes(bs []byte, isstring, zerocopy bool) (bsOut []byte) {
|
||||
if isstring {
|
||||
bsOut, _ = d.decStringAndBytes(bs, false, zerocopy)
|
||||
return
|
||||
}
|
||||
if !d.bdRead {
|
||||
d.readNextBd()
|
||||
}
|
||||
if d.bd == bincVdSpecial<<4|bincSpNil {
|
||||
d.bdRead = false
|
||||
return nil
|
||||
}
|
||||
var clen int
|
||||
if d.vd == bincVdString || d.vd == bincVdByteArray {
|
||||
clen = d.decLen()
|
||||
} else {
|
||||
d.d.errorf("Invalid d.vd for bytes. Expecting string:0x%x or bytearray:0x%x. Got: 0x%x",
|
||||
bincVdString, bincVdByteArray, d.vd)
|
||||
return
|
||||
}
|
||||
d.bdRead = false
|
||||
if zerocopy {
|
||||
if d.br {
|
||||
return d.r.readx(clen)
|
||||
} else if len(bs) == 0 {
|
||||
bs = d.b[:]
|
||||
}
|
||||
}
|
||||
return decByteSlice(d.r, clen, bs)
|
||||
}
|
||||
|
||||
func (d *bincDecDriver) DecodeExt(rv interface{}, xtag uint64, ext Ext) (realxtag uint64) {
|
||||
if xtag > 0xff {
|
||||
d.d.errorf("decodeExt: tag must be <= 0xff; got: %v", xtag)
|
||||
return
|
||||
}
|
||||
realxtag1, xbs := d.decodeExtV(ext != nil, uint8(xtag))
|
||||
realxtag = uint64(realxtag1)
|
||||
if ext == nil {
|
||||
re := rv.(*RawExt)
|
||||
re.Tag = realxtag
|
||||
re.Data = detachZeroCopyBytes(d.br, re.Data, xbs)
|
||||
} else {
|
||||
ext.ReadExt(rv, xbs)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (d *bincDecDriver) decodeExtV(verifyTag bool, tag byte) (xtag byte, xbs []byte) {
|
||||
if !d.bdRead {
|
||||
d.readNextBd()
|
||||
}
|
||||
if d.vd == bincVdCustomExt {
|
||||
l := d.decLen()
|
||||
xtag = d.r.readn1()
|
||||
if verifyTag && xtag != tag {
|
||||
d.d.errorf("Wrong extension tag. Got %b. Expecting: %v", xtag, tag)
|
||||
return
|
||||
}
|
||||
xbs = d.r.readx(l)
|
||||
} else if d.vd == bincVdByteArray {
|
||||
xbs = d.DecodeBytes(nil, false, true)
|
||||
} else {
|
||||
d.d.errorf("Invalid d.vd for extensions (Expecting extensions or byte array). Got: 0x%x", d.vd)
|
||||
return
|
||||
}
|
||||
d.bdRead = false
|
||||
return
|
||||
}
|
||||
|
||||
func (d *bincDecDriver) DecodeNaked() {
|
||||
if !d.bdRead {
|
||||
d.readNextBd()
|
||||
}
|
||||
|
||||
n := &d.d.n
|
||||
var decodeFurther bool
|
||||
|
||||
switch d.vd {
|
||||
case bincVdSpecial:
|
||||
switch d.vs {
|
||||
case bincSpNil:
|
||||
n.v = valueTypeNil
|
||||
case bincSpFalse:
|
||||
n.v = valueTypeBool
|
||||
n.b = false
|
||||
case bincSpTrue:
|
||||
n.v = valueTypeBool
|
||||
n.b = true
|
||||
case bincSpNan:
|
||||
n.v = valueTypeFloat
|
||||
n.f = math.NaN()
|
||||
case bincSpPosInf:
|
||||
n.v = valueTypeFloat
|
||||
n.f = math.Inf(1)
|
||||
case bincSpNegInf:
|
||||
n.v = valueTypeFloat
|
||||
n.f = math.Inf(-1)
|
||||
case bincSpZeroFloat:
|
||||
n.v = valueTypeFloat
|
||||
n.f = float64(0)
|
||||
case bincSpZero:
|
||||
n.v = valueTypeUint
|
||||
n.u = uint64(0) // int8(0)
|
||||
case bincSpNegOne:
|
||||
n.v = valueTypeInt
|
||||
n.i = int64(-1) // int8(-1)
|
||||
default:
|
||||
d.d.errorf("decodeNaked: Unrecognized special value 0x%x", d.vs)
|
||||
}
|
||||
case bincVdSmallInt:
|
||||
n.v = valueTypeUint
|
||||
n.u = uint64(int8(d.vs)) + 1 // int8(d.vs) + 1
|
||||
case bincVdPosInt:
|
||||
n.v = valueTypeUint
|
||||
n.u = d.decUint()
|
||||
case bincVdNegInt:
|
||||
n.v = valueTypeInt
|
||||
n.i = -(int64(d.decUint()))
|
||||
case bincVdFloat:
|
||||
n.v = valueTypeFloat
|
||||
n.f = d.decFloat()
|
||||
case bincVdSymbol:
|
||||
n.v = valueTypeSymbol
|
||||
n.s = d.DecodeString()
|
||||
case bincVdString:
|
||||
n.v = valueTypeString
|
||||
n.s = d.DecodeString()
|
||||
case bincVdByteArray:
|
||||
n.v = valueTypeBytes
|
||||
n.l = d.DecodeBytes(nil, false, false)
|
||||
case bincVdTimestamp:
|
||||
n.v = valueTypeTimestamp
|
||||
tt, err := decodeTime(d.r.readx(int(d.vs)))
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
n.t = tt
|
||||
case bincVdCustomExt:
|
||||
n.v = valueTypeExt
|
||||
l := d.decLen()
|
||||
n.u = uint64(d.r.readn1())
|
||||
n.l = d.r.readx(l)
|
||||
case bincVdArray:
|
||||
n.v = valueTypeArray
|
||||
decodeFurther = true
|
||||
case bincVdMap:
|
||||
n.v = valueTypeMap
|
||||
decodeFurther = true
|
||||
default:
|
||||
d.d.errorf("decodeNaked: Unrecognized d.vd: 0x%x", d.vd)
|
||||
}
|
||||
|
||||
if !decodeFurther {
|
||||
d.bdRead = false
|
||||
}
|
||||
if n.v == valueTypeUint && d.h.SignedInteger {
|
||||
n.v = valueTypeInt
|
||||
n.i = int64(n.u)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
//------------------------------------
|
||||
|
||||
//BincHandle is a Handle for the Binc Schema-Free Encoding Format
|
||||
//defined at https://github.com/ugorji/binc .
|
||||
//
|
||||
//BincHandle currently supports all Binc features with the following EXCEPTIONS:
|
||||
// - only integers up to 64 bits of precision are supported.
|
||||
// big integers are unsupported.
|
||||
// - Only IEEE 754 binary32 and binary64 floats are supported (ie Go float32 and float64 types).
|
||||
// extended precision and decimal IEEE 754 floats are unsupported.
|
||||
// - Only UTF-8 strings supported.
|
||||
// Unicode_Other Binc types (UTF16, UTF32) are currently unsupported.
|
||||
//
|
||||
//Note that these EXCEPTIONS are temporary and full support is possible and may happen soon.
|
||||
type BincHandle struct {
|
||||
BasicHandle
|
||||
binaryEncodingType
|
||||
}
|
||||
|
||||
func (h *BincHandle) SetBytesExt(rt reflect.Type, tag uint64, ext BytesExt) (err error) {
|
||||
return h.SetExt(rt, tag, &setExtWrapper{b: ext})
|
||||
}
|
||||
|
||||
func (h *BincHandle) newEncDriver(e *Encoder) encDriver {
|
||||
return &bincEncDriver{e: e, w: e.w}
|
||||
}
|
||||
|
||||
func (h *BincHandle) newDecDriver(d *Decoder) decDriver {
|
||||
return &bincDecDriver{d: d, r: d.r, h: h, br: d.bytes}
|
||||
}
|
||||
|
||||
func (e *bincEncDriver) reset() {
|
||||
e.w = e.e.w
|
||||
e.s = 0
|
||||
e.m = nil
|
||||
}
|
||||
|
||||
func (d *bincDecDriver) reset() {
|
||||
d.r = d.d.r
|
||||
d.s = nil
|
||||
d.bd, d.bdRead, d.vd, d.vs = 0, false, 0, 0
|
||||
}
|
||||
|
||||
var _ decDriver = (*bincDecDriver)(nil)
|
||||
var _ encDriver = (*bincEncDriver)(nil)
|
592
vendor/github.com/ugorji/go/codec/cbor.go
generated
vendored
592
vendor/github.com/ugorji/go/codec/cbor.go
generated
vendored
|
@ -1,592 +0,0 @@
|
|||
// Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved.
|
||||
// Use of this source code is governed by a MIT license found in the LICENSE file.
|
||||
|
||||
package codec
|
||||
|
||||
import (
|
||||
"math"
|
||||
"reflect"
|
||||
)
|
||||
|
||||
const (
|
||||
cborMajorUint byte = iota
|
||||
cborMajorNegInt
|
||||
cborMajorBytes
|
||||
cborMajorText
|
||||
cborMajorArray
|
||||
cborMajorMap
|
||||
cborMajorTag
|
||||
cborMajorOther
|
||||
)
|
||||
|
||||
const (
|
||||
cborBdFalse byte = 0xf4 + iota
|
||||
cborBdTrue
|
||||
cborBdNil
|
||||
cborBdUndefined
|
||||
cborBdExt
|
||||
cborBdFloat16
|
||||
cborBdFloat32
|
||||
cborBdFloat64
|
||||
)
|
||||
|
||||
const (
|
||||
cborBdIndefiniteBytes byte = 0x5f
|
||||
cborBdIndefiniteString = 0x7f
|
||||
cborBdIndefiniteArray = 0x9f
|
||||
cborBdIndefiniteMap = 0xbf
|
||||
cborBdBreak = 0xff
|
||||
)
|
||||
|
||||
const (
|
||||
CborStreamBytes byte = 0x5f
|
||||
CborStreamString = 0x7f
|
||||
CborStreamArray = 0x9f
|
||||
CborStreamMap = 0xbf
|
||||
CborStreamBreak = 0xff
|
||||
)
|
||||
|
||||
const (
|
||||
cborBaseUint byte = 0x00
|
||||
cborBaseNegInt = 0x20
|
||||
cborBaseBytes = 0x40
|
||||
cborBaseString = 0x60
|
||||
cborBaseArray = 0x80
|
||||
cborBaseMap = 0xa0
|
||||
cborBaseTag = 0xc0
|
||||
cborBaseSimple = 0xe0
|
||||
)
|
||||
|
||||
// -------------------
|
||||
|
||||
type cborEncDriver struct {
|
||||
noBuiltInTypes
|
||||
encNoSeparator
|
||||
e *Encoder
|
||||
w encWriter
|
||||
h *CborHandle
|
||||
x [8]byte
|
||||
}
|
||||
|
||||
func (e *cborEncDriver) EncodeNil() {
|
||||
e.w.writen1(cborBdNil)
|
||||
}
|
||||
|
||||
func (e *cborEncDriver) EncodeBool(b bool) {
|
||||
if b {
|
||||
e.w.writen1(cborBdTrue)
|
||||
} else {
|
||||
e.w.writen1(cborBdFalse)
|
||||
}
|
||||
}
|
||||
|
||||
func (e *cborEncDriver) EncodeFloat32(f float32) {
|
||||
e.w.writen1(cborBdFloat32)
|
||||
bigenHelper{e.x[:4], e.w}.writeUint32(math.Float32bits(f))
|
||||
}
|
||||
|
||||
func (e *cborEncDriver) EncodeFloat64(f float64) {
|
||||
e.w.writen1(cborBdFloat64)
|
||||
bigenHelper{e.x[:8], e.w}.writeUint64(math.Float64bits(f))
|
||||
}
|
||||
|
||||
func (e *cborEncDriver) encUint(v uint64, bd byte) {
|
||||
if v <= 0x17 {
|
||||
e.w.writen1(byte(v) + bd)
|
||||
} else if v <= math.MaxUint8 {
|
||||
e.w.writen2(bd+0x18, uint8(v))
|
||||
} else if v <= math.MaxUint16 {
|
||||
e.w.writen1(bd + 0x19)
|
||||
bigenHelper{e.x[:2], e.w}.writeUint16(uint16(v))
|
||||
} else if v <= math.MaxUint32 {
|
||||
e.w.writen1(bd + 0x1a)
|
||||
bigenHelper{e.x[:4], e.w}.writeUint32(uint32(v))
|
||||
} else { // if v <= math.MaxUint64 {
|
||||
e.w.writen1(bd + 0x1b)
|
||||
bigenHelper{e.x[:8], e.w}.writeUint64(v)
|
||||
}
|
||||
}
|
||||
|
||||
func (e *cborEncDriver) EncodeInt(v int64) {
|
||||
if v < 0 {
|
||||
e.encUint(uint64(-1-v), cborBaseNegInt)
|
||||
} else {
|
||||
e.encUint(uint64(v), cborBaseUint)
|
||||
}
|
||||
}
|
||||
|
||||
func (e *cborEncDriver) EncodeUint(v uint64) {
|
||||
e.encUint(v, cborBaseUint)
|
||||
}
|
||||
|
||||
func (e *cborEncDriver) encLen(bd byte, length int) {
|
||||
e.encUint(uint64(length), bd)
|
||||
}
|
||||
|
||||
func (e *cborEncDriver) EncodeExt(rv interface{}, xtag uint64, ext Ext, en *Encoder) {
|
||||
e.encUint(uint64(xtag), cborBaseTag)
|
||||
if v := ext.ConvertExt(rv); v == nil {
|
||||
e.EncodeNil()
|
||||
} else {
|
||||
en.encode(v)
|
||||
}
|
||||
}
|
||||
|
||||
func (e *cborEncDriver) EncodeRawExt(re *RawExt, en *Encoder) {
|
||||
e.encUint(uint64(re.Tag), cborBaseTag)
|
||||
if re.Data != nil {
|
||||
en.encode(re.Data)
|
||||
} else if re.Value == nil {
|
||||
e.EncodeNil()
|
||||
} else {
|
||||
en.encode(re.Value)
|
||||
}
|
||||
}
|
||||
|
||||
func (e *cborEncDriver) EncodeArrayStart(length int) {
|
||||
e.encLen(cborBaseArray, length)
|
||||
}
|
||||
|
||||
func (e *cborEncDriver) EncodeMapStart(length int) {
|
||||
e.encLen(cborBaseMap, length)
|
||||
}
|
||||
|
||||
func (e *cborEncDriver) EncodeString(c charEncoding, v string) {
|
||||
e.encLen(cborBaseString, len(v))
|
||||
e.w.writestr(v)
|
||||
}
|
||||
|
||||
func (e *cborEncDriver) EncodeSymbol(v string) {
|
||||
e.EncodeString(c_UTF8, v)
|
||||
}
|
||||
|
||||
func (e *cborEncDriver) EncodeStringBytes(c charEncoding, v []byte) {
|
||||
if c == c_RAW {
|
||||
e.encLen(cborBaseBytes, len(v))
|
||||
} else {
|
||||
e.encLen(cborBaseString, len(v))
|
||||
}
|
||||
e.w.writeb(v)
|
||||
}
|
||||
|
||||
// ----------------------
|
||||
|
||||
type cborDecDriver struct {
|
||||
d *Decoder
|
||||
h *CborHandle
|
||||
r decReader
|
||||
b [scratchByteArrayLen]byte
|
||||
br bool // bytes reader
|
||||
bdRead bool
|
||||
bd byte
|
||||
noBuiltInTypes
|
||||
decNoSeparator
|
||||
}
|
||||
|
||||
func (d *cborDecDriver) readNextBd() {
|
||||
d.bd = d.r.readn1()
|
||||
d.bdRead = true
|
||||
}
|
||||
|
||||
func (d *cborDecDriver) uncacheRead() {
|
||||
if d.bdRead {
|
||||
d.r.unreadn1()
|
||||
d.bdRead = false
|
||||
}
|
||||
}
|
||||
|
||||
func (d *cborDecDriver) ContainerType() (vt valueType) {
|
||||
if d.bd == cborBdNil {
|
||||
return valueTypeNil
|
||||
} else if d.bd == cborBdIndefiniteBytes || (d.bd >= cborBaseBytes && d.bd < cborBaseString) {
|
||||
return valueTypeBytes
|
||||
} else if d.bd == cborBdIndefiniteString || (d.bd >= cborBaseString && d.bd < cborBaseArray) {
|
||||
return valueTypeString
|
||||
} else if d.bd == cborBdIndefiniteArray || (d.bd >= cborBaseArray && d.bd < cborBaseMap) {
|
||||
return valueTypeArray
|
||||
} else if d.bd == cborBdIndefiniteMap || (d.bd >= cborBaseMap && d.bd < cborBaseTag) {
|
||||
return valueTypeMap
|
||||
} else {
|
||||
// d.d.errorf("isContainerType: unsupported parameter: %v", vt)
|
||||
}
|
||||
return valueTypeUnset
|
||||
}
|
||||
|
||||
func (d *cborDecDriver) TryDecodeAsNil() bool {
|
||||
if !d.bdRead {
|
||||
d.readNextBd()
|
||||
}
|
||||
// treat Nil and Undefined as nil values
|
||||
if d.bd == cborBdNil || d.bd == cborBdUndefined {
|
||||
d.bdRead = false
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (d *cborDecDriver) CheckBreak() bool {
|
||||
if !d.bdRead {
|
||||
d.readNextBd()
|
||||
}
|
||||
if d.bd == cborBdBreak {
|
||||
d.bdRead = false
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (d *cborDecDriver) decUint() (ui uint64) {
|
||||
v := d.bd & 0x1f
|
||||
if v <= 0x17 {
|
||||
ui = uint64(v)
|
||||
} else {
|
||||
if v == 0x18 {
|
||||
ui = uint64(d.r.readn1())
|
||||
} else if v == 0x19 {
|
||||
ui = uint64(bigen.Uint16(d.r.readx(2)))
|
||||
} else if v == 0x1a {
|
||||
ui = uint64(bigen.Uint32(d.r.readx(4)))
|
||||
} else if v == 0x1b {
|
||||
ui = uint64(bigen.Uint64(d.r.readx(8)))
|
||||
} else {
|
||||
d.d.errorf("decUint: Invalid descriptor: %v", d.bd)
|
||||
return
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (d *cborDecDriver) decCheckInteger() (neg bool) {
|
||||
if !d.bdRead {
|
||||
d.readNextBd()
|
||||
}
|
||||
major := d.bd >> 5
|
||||
if major == cborMajorUint {
|
||||
} else if major == cborMajorNegInt {
|
||||
neg = true
|
||||
} else {
|
||||
d.d.errorf("invalid major: %v (bd: %v)", major, d.bd)
|
||||
return
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (d *cborDecDriver) DecodeInt(bitsize uint8) (i int64) {
|
||||
neg := d.decCheckInteger()
|
||||
ui := d.decUint()
|
||||
// check if this number can be converted to an int without overflow
|
||||
var overflow bool
|
||||
if neg {
|
||||
if i, overflow = chkOvf.SignedInt(ui + 1); overflow {
|
||||
d.d.errorf("cbor: overflow converting %v to signed integer", ui+1)
|
||||
return
|
||||
}
|
||||
i = -i
|
||||
} else {
|
||||
if i, overflow = chkOvf.SignedInt(ui); overflow {
|
||||
d.d.errorf("cbor: overflow converting %v to signed integer", ui)
|
||||
return
|
||||
}
|
||||
}
|
||||
if chkOvf.Int(i, bitsize) {
|
||||
d.d.errorf("cbor: overflow integer: %v", i)
|
||||
return
|
||||
}
|
||||
d.bdRead = false
|
||||
return
|
||||
}
|
||||
|
||||
func (d *cborDecDriver) DecodeUint(bitsize uint8) (ui uint64) {
|
||||
if d.decCheckInteger() {
|
||||
d.d.errorf("Assigning negative signed value to unsigned type")
|
||||
return
|
||||
}
|
||||
ui = d.decUint()
|
||||
if chkOvf.Uint(ui, bitsize) {
|
||||
d.d.errorf("cbor: overflow integer: %v", ui)
|
||||
return
|
||||
}
|
||||
d.bdRead = false
|
||||
return
|
||||
}
|
||||
|
||||
func (d *cborDecDriver) DecodeFloat(chkOverflow32 bool) (f float64) {
|
||||
if !d.bdRead {
|
||||
d.readNextBd()
|
||||
}
|
||||
if bd := d.bd; bd == cborBdFloat16 {
|
||||
f = float64(math.Float32frombits(halfFloatToFloatBits(bigen.Uint16(d.r.readx(2)))))
|
||||
} else if bd == cborBdFloat32 {
|
||||
f = float64(math.Float32frombits(bigen.Uint32(d.r.readx(4))))
|
||||
} else if bd == cborBdFloat64 {
|
||||
f = math.Float64frombits(bigen.Uint64(d.r.readx(8)))
|
||||
} else if bd >= cborBaseUint && bd < cborBaseBytes {
|
||||
f = float64(d.DecodeInt(64))
|
||||
} else {
|
||||
d.d.errorf("Float only valid from float16/32/64: Invalid descriptor: %v", bd)
|
||||
return
|
||||
}
|
||||
if chkOverflow32 && chkOvf.Float32(f) {
|
||||
d.d.errorf("cbor: float32 overflow: %v", f)
|
||||
return
|
||||
}
|
||||
d.bdRead = false
|
||||
return
|
||||
}
|
||||
|
||||
// bool can be decoded from bool only (single byte).
|
||||
func (d *cborDecDriver) DecodeBool() (b bool) {
|
||||
if !d.bdRead {
|
||||
d.readNextBd()
|
||||
}
|
||||
if bd := d.bd; bd == cborBdTrue {
|
||||
b = true
|
||||
} else if bd == cborBdFalse {
|
||||
} else {
|
||||
d.d.errorf("Invalid single-byte value for bool: %s: %x", msgBadDesc, d.bd)
|
||||
return
|
||||
}
|
||||
d.bdRead = false
|
||||
return
|
||||
}
|
||||
|
||||
func (d *cborDecDriver) ReadMapStart() (length int) {
|
||||
d.bdRead = false
|
||||
if d.bd == cborBdIndefiniteMap {
|
||||
return -1
|
||||
}
|
||||
return d.decLen()
|
||||
}
|
||||
|
||||
func (d *cborDecDriver) ReadArrayStart() (length int) {
|
||||
d.bdRead = false
|
||||
if d.bd == cborBdIndefiniteArray {
|
||||
return -1
|
||||
}
|
||||
return d.decLen()
|
||||
}
|
||||
|
||||
func (d *cborDecDriver) decLen() int {
|
||||
return int(d.decUint())
|
||||
}
|
||||
|
||||
func (d *cborDecDriver) decAppendIndefiniteBytes(bs []byte) []byte {
|
||||
d.bdRead = false
|
||||
for {
|
||||
if d.CheckBreak() {
|
||||
break
|
||||
}
|
||||
if major := d.bd >> 5; major != cborMajorBytes && major != cborMajorText {
|
||||
d.d.errorf("cbor: expect bytes or string major type in indefinite string/bytes; got: %v, byte: %v", major, d.bd)
|
||||
return nil
|
||||
}
|
||||
n := d.decLen()
|
||||
oldLen := len(bs)
|
||||
newLen := oldLen + n
|
||||
if newLen > cap(bs) {
|
||||
bs2 := make([]byte, newLen, 2*cap(bs)+n)
|
||||
copy(bs2, bs)
|
||||
bs = bs2
|
||||
} else {
|
||||
bs = bs[:newLen]
|
||||
}
|
||||
d.r.readb(bs[oldLen:newLen])
|
||||
// bs = append(bs, d.r.readn()...)
|
||||
d.bdRead = false
|
||||
}
|
||||
d.bdRead = false
|
||||
return bs
|
||||
}
|
||||
|
||||
func (d *cborDecDriver) DecodeBytes(bs []byte, isstring, zerocopy bool) (bsOut []byte) {
|
||||
if !d.bdRead {
|
||||
d.readNextBd()
|
||||
}
|
||||
if d.bd == cborBdNil || d.bd == cborBdUndefined {
|
||||
d.bdRead = false
|
||||
return nil
|
||||
}
|
||||
if d.bd == cborBdIndefiniteBytes || d.bd == cborBdIndefiniteString {
|
||||
if bs == nil {
|
||||
return d.decAppendIndefiniteBytes(nil)
|
||||
}
|
||||
return d.decAppendIndefiniteBytes(bs[:0])
|
||||
}
|
||||
clen := d.decLen()
|
||||
d.bdRead = false
|
||||
if zerocopy {
|
||||
if d.br {
|
||||
return d.r.readx(clen)
|
||||
} else if len(bs) == 0 {
|
||||
bs = d.b[:]
|
||||
}
|
||||
}
|
||||
return decByteSlice(d.r, clen, bs)
|
||||
}
|
||||
|
||||
func (d *cborDecDriver) DecodeString() (s string) {
|
||||
return string(d.DecodeBytes(d.b[:], true, true))
|
||||
}
|
||||
|
||||
func (d *cborDecDriver) DecodeExt(rv interface{}, xtag uint64, ext Ext) (realxtag uint64) {
|
||||
if !d.bdRead {
|
||||
d.readNextBd()
|
||||
}
|
||||
u := d.decUint()
|
||||
d.bdRead = false
|
||||
realxtag = u
|
||||
if ext == nil {
|
||||
re := rv.(*RawExt)
|
||||
re.Tag = realxtag
|
||||
d.d.decode(&re.Value)
|
||||
} else if xtag != realxtag {
|
||||
d.d.errorf("Wrong extension tag. Got %b. Expecting: %v", realxtag, xtag)
|
||||
return
|
||||
} else {
|
||||
var v interface{}
|
||||
d.d.decode(&v)
|
||||
ext.UpdateExt(rv, v)
|
||||
}
|
||||
d.bdRead = false
|
||||
return
|
||||
}
|
||||
|
||||
func (d *cborDecDriver) DecodeNaked() {
|
||||
if !d.bdRead {
|
||||
d.readNextBd()
|
||||
}
|
||||
|
||||
n := &d.d.n
|
||||
var decodeFurther bool
|
||||
|
||||
switch d.bd {
|
||||
case cborBdNil:
|
||||
n.v = valueTypeNil
|
||||
case cborBdFalse:
|
||||
n.v = valueTypeBool
|
||||
n.b = false
|
||||
case cborBdTrue:
|
||||
n.v = valueTypeBool
|
||||
n.b = true
|
||||
case cborBdFloat16, cborBdFloat32:
|
||||
n.v = valueTypeFloat
|
||||
n.f = d.DecodeFloat(true)
|
||||
case cborBdFloat64:
|
||||
n.v = valueTypeFloat
|
||||
n.f = d.DecodeFloat(false)
|
||||
case cborBdIndefiniteBytes:
|
||||
n.v = valueTypeBytes
|
||||
n.l = d.DecodeBytes(nil, false, false)
|
||||
case cborBdIndefiniteString:
|
||||
n.v = valueTypeString
|
||||
n.s = d.DecodeString()
|
||||
case cborBdIndefiniteArray:
|
||||
n.v = valueTypeArray
|
||||
decodeFurther = true
|
||||
case cborBdIndefiniteMap:
|
||||
n.v = valueTypeMap
|
||||
decodeFurther = true
|
||||
default:
|
||||
switch {
|
||||
case d.bd >= cborBaseUint && d.bd < cborBaseNegInt:
|
||||
if d.h.SignedInteger {
|
||||
n.v = valueTypeInt
|
||||
n.i = d.DecodeInt(64)
|
||||
} else {
|
||||
n.v = valueTypeUint
|
||||
n.u = d.DecodeUint(64)
|
||||
}
|
||||
case d.bd >= cborBaseNegInt && d.bd < cborBaseBytes:
|
||||
n.v = valueTypeInt
|
||||
n.i = d.DecodeInt(64)
|
||||
case d.bd >= cborBaseBytes && d.bd < cborBaseString:
|
||||
n.v = valueTypeBytes
|
||||
n.l = d.DecodeBytes(nil, false, false)
|
||||
case d.bd >= cborBaseString && d.bd < cborBaseArray:
|
||||
n.v = valueTypeString
|
||||
n.s = d.DecodeString()
|
||||
case d.bd >= cborBaseArray && d.bd < cborBaseMap:
|
||||
n.v = valueTypeArray
|
||||
decodeFurther = true
|
||||
case d.bd >= cborBaseMap && d.bd < cborBaseTag:
|
||||
n.v = valueTypeMap
|
||||
decodeFurther = true
|
||||
case d.bd >= cborBaseTag && d.bd < cborBaseSimple:
|
||||
n.v = valueTypeExt
|
||||
n.u = d.decUint()
|
||||
n.l = nil
|
||||
// d.bdRead = false
|
||||
// d.d.decode(&re.Value) // handled by decode itself.
|
||||
// decodeFurther = true
|
||||
default:
|
||||
d.d.errorf("decodeNaked: Unrecognized d.bd: 0x%x", d.bd)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
if !decodeFurther {
|
||||
d.bdRead = false
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// -------------------------
|
||||
|
||||
// CborHandle is a Handle for the CBOR encoding format,
|
||||
// defined at http://tools.ietf.org/html/rfc7049 and documented further at http://cbor.io .
|
||||
//
|
||||
// CBOR is comprehensively supported, including support for:
|
||||
// - indefinite-length arrays/maps/bytes/strings
|
||||
// - (extension) tags in range 0..0xffff (0 .. 65535)
|
||||
// - half, single and double-precision floats
|
||||
// - all numbers (1, 2, 4 and 8-byte signed and unsigned integers)
|
||||
// - nil, true, false, ...
|
||||
// - arrays and maps, bytes and text strings
|
||||
//
|
||||
// None of the optional extensions (with tags) defined in the spec are supported out-of-the-box.
|
||||
// Users can implement them as needed (using SetExt), including spec-documented ones:
|
||||
// - timestamp, BigNum, BigFloat, Decimals, Encoded Text (e.g. URL, regexp, base64, MIME Message), etc.
|
||||
//
|
||||
// To encode with indefinite lengths (streaming), users will use
|
||||
// (Must)Encode methods of *Encoder, along with writing CborStreamXXX constants.
|
||||
//
|
||||
// For example, to encode "one-byte" as an indefinite length string:
|
||||
// var buf bytes.Buffer
|
||||
// e := NewEncoder(&buf, new(CborHandle))
|
||||
// buf.WriteByte(CborStreamString)
|
||||
// e.MustEncode("one-")
|
||||
// e.MustEncode("byte")
|
||||
// buf.WriteByte(CborStreamBreak)
|
||||
// encodedBytes := buf.Bytes()
|
||||
// var vv interface{}
|
||||
// NewDecoderBytes(buf.Bytes(), new(CborHandle)).MustDecode(&vv)
|
||||
// // Now, vv contains the same string "one-byte"
|
||||
//
|
||||
type CborHandle struct {
|
||||
binaryEncodingType
|
||||
BasicHandle
|
||||
}
|
||||
|
||||
func (h *CborHandle) SetInterfaceExt(rt reflect.Type, tag uint64, ext InterfaceExt) (err error) {
|
||||
return h.SetExt(rt, tag, &setExtWrapper{i: ext})
|
||||
}
|
||||
|
||||
func (h *CborHandle) newEncDriver(e *Encoder) encDriver {
|
||||
return &cborEncDriver{e: e, w: e.w, h: h}
|
||||
}
|
||||
|
||||
func (h *CborHandle) newDecDriver(d *Decoder) decDriver {
|
||||
return &cborDecDriver{d: d, r: d.r, h: h, br: d.bytes}
|
||||
}
|
||||
|
||||
func (e *cborEncDriver) reset() {
|
||||
e.w = e.e.w
|
||||
}
|
||||
|
||||
func (d *cborDecDriver) reset() {
|
||||
d.r = d.d.r
|
||||
d.bd, d.bdRead = 0, false
|
||||
}
|
||||
|
||||
var _ decDriver = (*cborDecDriver)(nil)
|
||||
var _ encDriver = (*cborEncDriver)(nil)
|
2053
vendor/github.com/ugorji/go/codec/decode.go
generated
vendored
2053
vendor/github.com/ugorji/go/codec/decode.go
generated
vendored
File diff suppressed because it is too large
Load diff
Some files were not shown because too many files have changed in this diff Show more
Loading…
Reference in a new issue