diff --git a/go.mod b/go.mod index b3ac8d61e..f7ba0754f 100644 --- a/go.mod +++ b/go.mod @@ -37,6 +37,7 @@ require ( github.com/smartystreets/goconvey v0.0.0-20190710185942-9d28bd7c0945 // indirect github.com/spf13/cobra v0.0.4 github.com/spf13/pflag v1.0.3 + github.com/tallclair/mdtoc v0.0.0-20190627191617-4dc3d6f90813 github.com/tv42/httpunix v0.0.0-20150427012821-b75d8614f926 github.com/zakjan/cert-chain-resolver v0.0.0-20180703112424-6076e1ded272 golang.org/x/net v0.0.0-20190613194153-d28f0bde5980 diff --git a/go.sum b/go.sum index c7757b2c8..dcf530673 100644 --- a/go.sum +++ b/go.sum @@ -4,6 +4,7 @@ cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMT github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78/go.mod h1:LmzpDX56iTiv29bbRTIsUNlaFfuhWRQBWjQdVyAevI8= github.com/Azure/go-autorest v11.1.2+incompatible h1:viZ3tV5l4gE2Sw0xrasFHytCGtzYCrT+um/rrSQ1BfA= github.com/Azure/go-autorest v11.1.2+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24= +github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ= @@ -49,6 +50,7 @@ github.com/eapache/channels v1.1.0 h1:F1taHcn7/F0i8DYqKXJnyhJcVpp2kgFcNePxXtnyu4 github.com/eapache/channels v1.1.0/go.mod h1:jMm2qB5Ubtg9zLd+inMZd2/NUvXgzmWXsDaLyQIGfH0= github.com/eapache/queue v1.1.0 h1:YOEu7KNc61ntiQlcEeUIoDTJ2o8mQznoNvUhiigpIqc= github.com/eapache/queue v1.1.0/go.mod h1:6eCeP0CKFpHLu8blIFXhExK/dRa7WDZfr6jVFPTqq+I= +github.com/ekalinin/github-markdown-toc v0.0.0-20190514155158-83fadb60a7f1/go.mod h1:XfZS1iyC28CnllR54Ou2Ero6qs4Rmn7GpVumNSj1DZo= github.com/elazarl/goproxy v0.0.0-20170405201442-c4fc26588b6e h1:p1yVGRW3nmb85p1Sh1ZJSDm4A4iKLS5QNbvUHMgGu/M= github.com/elazarl/goproxy v0.0.0-20170405201442-c4fc26588b6e/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc= github.com/emicklei/go-restful v2.9.5+incompatible h1:spTtZBk5DYEvbxMVutUuTyh1Ao2r4iyvLdACqsl/Ljk= @@ -117,6 +119,8 @@ github.com/golang/mock v1.1.1 h1:G5FRp8JnTd7RQH5kemVNlMeyXQAztQ3mOWV95KxsXH8= github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/protobuf v1.2.0 h1:P3YflyNX/ehuJFLhxviNdFxQPkGK5cDcApsge1SqnvM= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/gomarkdown/markdown v0.0.0-20190222000725-ee6a7931a1e4 h1:vELsocEzlhM4lk2nhxolEaQrMp25u7/i9IX8s9uLads= +github.com/gomarkdown/markdown v0.0.0-20190222000725-ee6a7931a1e4/go.mod h1:gmFANS06wAVmF0B9yi65QKsRmPQ97tze7FRLswua+OY= github.com/google/btree v0.0.0-20160524151835-7d79101e329e h1:JHB7F/4TJCrYBW8+GZO8VkWDj1jxcWuCl6uxKODiyi4= github.com/google/btree v0.0.0-20160524151835-7d79101e329e/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/go-cmp v0.3.0 h1:crn/baboCvb5fXaQ0IJ1SGTsTVrWpDsCWC8EGETZijY= @@ -182,6 +186,8 @@ github.com/mitchellh/hashstructure v1.0.0 h1:ZkRJX1CyOoTkar7p/mLS5TZU4nJ1Rn/F8u9 github.com/mitchellh/hashstructure v1.0.0/go.mod h1:QjSHrPWS+BGUVBYkbTZWEnOh3G1DutKwClXU/ABz6AQ= github.com/mitchellh/mapstructure v1.1.2 h1:fmNYVwqnSfB9mZU6OS2O6GsXM+wcskZDuKQzvN1EDeE= github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= +github.com/mmarkdown/mmark v2.0.40+incompatible h1:vMeUeDzBK3H+/mU0oMVfMuhSXJlIA+DE/DMPQNAj5C4= +github.com/mmarkdown/mmark v2.0.40+incompatible/go.mod h1:Uvmoz7tvsWpr7bMVxIpqZPyN3FbOtzDmnsJDFp7ltJs= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/reflect2 v1.0.1 h1:9f412s+6RmYXLWZSEzVVgPGK7C2PphHj5RJrvfx9AWI= @@ -269,6 +275,8 @@ github.com/stretchr/testify v1.2.2 h1:bSDNvY7ZPG5RlJ8otE/7V6gMiyenm9RtJ7IUVIAoJ1 github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0 h1:TivCn/peBQ7UY8ooIcPgZFpTNSz0Q2U6UrFlUfqbe0Q= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +github.com/tallclair/mdtoc v0.0.0-20190627191617-4dc3d6f90813 h1:1Q3NGZNH8/oSGhSe0J8WjkFCeIOb0JSy7M4RpqY64XI= +github.com/tallclair/mdtoc v0.0.0-20190627191617-4dc3d6f90813/go.mod h1:BjDk9nfX4091pXLHhvf6Ejr4/r//9NslWmweWb2Hkbs= github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= github.com/tv42/httpunix v0.0.0-20150427012821-b75d8614f926 h1:G3dpKMzFDjgEh2q1Z7zUUtKa8ViPtH+ocF0bE0g00O8= github.com/tv42/httpunix v0.0.0-20150427012821-b75d8614f926/go.mod h1:9ESjWnEqriFuLhtthL60Sar/7RFoluCcXsuvEwTV5KM= diff --git a/vendor/github.com/BurntSushi/toml/.gitignore b/vendor/github.com/BurntSushi/toml/.gitignore new file mode 100644 index 000000000..0cd380037 --- /dev/null +++ b/vendor/github.com/BurntSushi/toml/.gitignore @@ -0,0 +1,5 @@ +TAGS +tags +.*.swp +tomlcheck/tomlcheck +toml.test diff --git a/vendor/github.com/BurntSushi/toml/.travis.yml b/vendor/github.com/BurntSushi/toml/.travis.yml new file mode 100644 index 000000000..8b8afc4f0 --- /dev/null +++ b/vendor/github.com/BurntSushi/toml/.travis.yml @@ -0,0 +1,15 @@ +language: go +go: + - 1.1 + - 1.2 + - 1.3 + - 1.4 + - 1.5 + - 1.6 + - tip +install: + - go install ./... + - go get github.com/BurntSushi/toml-test +script: + - export PATH="$PATH:$HOME/gopath/bin" + - make test diff --git a/vendor/github.com/BurntSushi/toml/COMPATIBLE b/vendor/github.com/BurntSushi/toml/COMPATIBLE new file mode 100644 index 000000000..6efcfd0ce --- /dev/null +++ b/vendor/github.com/BurntSushi/toml/COMPATIBLE @@ -0,0 +1,3 @@ +Compatible with TOML version +[v0.4.0](https://github.com/toml-lang/toml/blob/v0.4.0/versions/en/toml-v0.4.0.md) + diff --git a/vendor/github.com/BurntSushi/toml/COPYING b/vendor/github.com/BurntSushi/toml/COPYING new file mode 100644 index 000000000..01b574320 --- /dev/null +++ b/vendor/github.com/BurntSushi/toml/COPYING @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2013 TOML authors + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/vendor/github.com/BurntSushi/toml/Makefile b/vendor/github.com/BurntSushi/toml/Makefile new file mode 100644 index 000000000..3600848d3 --- /dev/null +++ b/vendor/github.com/BurntSushi/toml/Makefile @@ -0,0 +1,19 @@ +install: + go install ./... + +test: install + go test -v + toml-test toml-test-decoder + toml-test -encoder toml-test-encoder + +fmt: + gofmt -w *.go */*.go + colcheck *.go */*.go + +tags: + find ./ -name '*.go' -print0 | xargs -0 gotags > TAGS + +push: + git push origin master + git push github master + diff --git a/vendor/github.com/BurntSushi/toml/README.md b/vendor/github.com/BurntSushi/toml/README.md new file mode 100644 index 000000000..7c1b37ecc --- /dev/null +++ b/vendor/github.com/BurntSushi/toml/README.md @@ -0,0 +1,218 @@ +## TOML parser and encoder for Go with reflection + +TOML stands for Tom's Obvious, Minimal Language. This Go package provides a +reflection interface similar to Go's standard library `json` and `xml` +packages. This package also supports the `encoding.TextUnmarshaler` and +`encoding.TextMarshaler` interfaces so that you can define custom data +representations. (There is an example of this below.) + +Spec: https://github.com/toml-lang/toml + +Compatible with TOML version +[v0.4.0](https://github.com/toml-lang/toml/blob/master/versions/en/toml-v0.4.0.md) + +Documentation: https://godoc.org/github.com/BurntSushi/toml + +Installation: + +```bash +go get github.com/BurntSushi/toml +``` + +Try the toml validator: + +```bash +go get github.com/BurntSushi/toml/cmd/tomlv +tomlv some-toml-file.toml +``` + +[![Build Status](https://travis-ci.org/BurntSushi/toml.svg?branch=master)](https://travis-ci.org/BurntSushi/toml) [![GoDoc](https://godoc.org/github.com/BurntSushi/toml?status.svg)](https://godoc.org/github.com/BurntSushi/toml) + +### Testing + +This package passes all tests in +[toml-test](https://github.com/BurntSushi/toml-test) for both the decoder +and the encoder. + +### Examples + +This package works similarly to how the Go standard library handles `XML` +and `JSON`. Namely, data is loaded into Go values via reflection. + +For the simplest example, consider some TOML file as just a list of keys +and values: + +```toml +Age = 25 +Cats = [ "Cauchy", "Plato" ] +Pi = 3.14 +Perfection = [ 6, 28, 496, 8128 ] +DOB = 1987-07-05T05:45:00Z +``` + +Which could be defined in Go as: + +```go +type Config struct { + Age int + Cats []string + Pi float64 + Perfection []int + DOB time.Time // requires `import time` +} +``` + +And then decoded with: + +```go +var conf Config +if _, err := toml.Decode(tomlData, &conf); err != nil { + // handle error +} +``` + +You can also use struct tags if your struct field name doesn't map to a TOML +key value directly: + +```toml +some_key_NAME = "wat" +``` + +```go +type TOML struct { + ObscureKey string `toml:"some_key_NAME"` +} +``` + +### Using the `encoding.TextUnmarshaler` interface + +Here's an example that automatically parses duration strings into +`time.Duration` values: + +```toml +[[song]] +name = "Thunder Road" +duration = "4m49s" + +[[song]] +name = "Stairway to Heaven" +duration = "8m03s" +``` + +Which can be decoded with: + +```go +type song struct { + Name string + Duration duration +} +type songs struct { + Song []song +} +var favorites songs +if _, err := toml.Decode(blob, &favorites); err != nil { + log.Fatal(err) +} + +for _, s := range favorites.Song { + fmt.Printf("%s (%s)\n", s.Name, s.Duration) +} +``` + +And you'll also need a `duration` type that satisfies the +`encoding.TextUnmarshaler` interface: + +```go +type duration struct { + time.Duration +} + +func (d *duration) UnmarshalText(text []byte) error { + var err error + d.Duration, err = time.ParseDuration(string(text)) + return err +} +``` + +### More complex usage + +Here's an example of how to load the example from the official spec page: + +```toml +# This is a TOML document. Boom. + +title = "TOML Example" + +[owner] +name = "Tom Preston-Werner" +organization = "GitHub" +bio = "GitHub Cofounder & CEO\nLikes tater tots and beer." +dob = 1979-05-27T07:32:00Z # First class dates? Why not? + +[database] +server = "192.168.1.1" +ports = [ 8001, 8001, 8002 ] +connection_max = 5000 +enabled = true + +[servers] + + # You can indent as you please. Tabs or spaces. TOML don't care. + [servers.alpha] + ip = "10.0.0.1" + dc = "eqdc10" + + [servers.beta] + ip = "10.0.0.2" + dc = "eqdc10" + +[clients] +data = [ ["gamma", "delta"], [1, 2] ] # just an update to make sure parsers support it + +# Line breaks are OK when inside arrays +hosts = [ + "alpha", + "omega" +] +``` + +And the corresponding Go types are: + +```go +type tomlConfig struct { + Title string + Owner ownerInfo + DB database `toml:"database"` + Servers map[string]server + Clients clients +} + +type ownerInfo struct { + Name string + Org string `toml:"organization"` + Bio string + DOB time.Time +} + +type database struct { + Server string + Ports []int + ConnMax int `toml:"connection_max"` + Enabled bool +} + +type server struct { + IP string + DC string +} + +type clients struct { + Data [][]interface{} + Hosts []string +} +``` + +Note that a case insensitive match will be tried if an exact match can't be +found. + +A working example of the above can be found in `_examples/example.{go,toml}`. diff --git a/vendor/github.com/BurntSushi/toml/decode.go b/vendor/github.com/BurntSushi/toml/decode.go new file mode 100644 index 000000000..b0fd51d5b --- /dev/null +++ b/vendor/github.com/BurntSushi/toml/decode.go @@ -0,0 +1,509 @@ +package toml + +import ( + "fmt" + "io" + "io/ioutil" + "math" + "reflect" + "strings" + "time" +) + +func e(format string, args ...interface{}) error { + return fmt.Errorf("toml: "+format, args...) +} + +// Unmarshaler is the interface implemented by objects that can unmarshal a +// TOML description of themselves. +type Unmarshaler interface { + UnmarshalTOML(interface{}) error +} + +// Unmarshal decodes the contents of `p` in TOML format into a pointer `v`. +func Unmarshal(p []byte, v interface{}) error { + _, err := Decode(string(p), v) + return err +} + +// Primitive is a TOML value that hasn't been decoded into a Go value. +// When using the various `Decode*` functions, the type `Primitive` may +// be given to any value, and its decoding will be delayed. +// +// A `Primitive` value can be decoded using the `PrimitiveDecode` function. +// +// The underlying representation of a `Primitive` value is subject to change. +// Do not rely on it. +// +// N.B. Primitive values are still parsed, so using them will only avoid +// the overhead of reflection. They can be useful when you don't know the +// exact type of TOML data until run time. +type Primitive struct { + undecoded interface{} + context Key +} + +// DEPRECATED! +// +// Use MetaData.PrimitiveDecode instead. +func PrimitiveDecode(primValue Primitive, v interface{}) error { + md := MetaData{decoded: make(map[string]bool)} + return md.unify(primValue.undecoded, rvalue(v)) +} + +// PrimitiveDecode is just like the other `Decode*` functions, except it +// decodes a TOML value that has already been parsed. Valid primitive values +// can *only* be obtained from values filled by the decoder functions, +// including this method. (i.e., `v` may contain more `Primitive` +// values.) +// +// Meta data for primitive values is included in the meta data returned by +// the `Decode*` functions with one exception: keys returned by the Undecoded +// method will only reflect keys that were decoded. Namely, any keys hidden +// behind a Primitive will be considered undecoded. Executing this method will +// update the undecoded keys in the meta data. (See the example.) +func (md *MetaData) PrimitiveDecode(primValue Primitive, v interface{}) error { + md.context = primValue.context + defer func() { md.context = nil }() + return md.unify(primValue.undecoded, rvalue(v)) +} + +// Decode will decode the contents of `data` in TOML format into a pointer +// `v`. +// +// TOML hashes correspond to Go structs or maps. (Dealer's choice. They can be +// used interchangeably.) +// +// TOML arrays of tables correspond to either a slice of structs or a slice +// of maps. +// +// TOML datetimes correspond to Go `time.Time` values. +// +// All other TOML types (float, string, int, bool and array) correspond +// to the obvious Go types. +// +// An exception to the above rules is if a type implements the +// encoding.TextUnmarshaler interface. In this case, any primitive TOML value +// (floats, strings, integers, booleans and datetimes) will be converted to +// a byte string and given to the value's UnmarshalText method. See the +// Unmarshaler example for a demonstration with time duration strings. +// +// Key mapping +// +// TOML keys can map to either keys in a Go map or field names in a Go +// struct. The special `toml` struct tag may be used to map TOML keys to +// struct fields that don't match the key name exactly. (See the example.) +// A case insensitive match to struct names will be tried if an exact match +// can't be found. +// +// The mapping between TOML values and Go values is loose. That is, there +// may exist TOML values that cannot be placed into your representation, and +// there may be parts of your representation that do not correspond to +// TOML values. This loose mapping can be made stricter by using the IsDefined +// and/or Undecoded methods on the MetaData returned. +// +// This decoder will not handle cyclic types. If a cyclic type is passed, +// `Decode` will not terminate. +func Decode(data string, v interface{}) (MetaData, error) { + rv := reflect.ValueOf(v) + if rv.Kind() != reflect.Ptr { + return MetaData{}, e("Decode of non-pointer %s", reflect.TypeOf(v)) + } + if rv.IsNil() { + return MetaData{}, e("Decode of nil %s", reflect.TypeOf(v)) + } + p, err := parse(data) + if err != nil { + return MetaData{}, err + } + md := MetaData{ + p.mapping, p.types, p.ordered, + make(map[string]bool, len(p.ordered)), nil, + } + return md, md.unify(p.mapping, indirect(rv)) +} + +// DecodeFile is just like Decode, except it will automatically read the +// contents of the file at `fpath` and decode it for you. +func DecodeFile(fpath string, v interface{}) (MetaData, error) { + bs, err := ioutil.ReadFile(fpath) + if err != nil { + return MetaData{}, err + } + return Decode(string(bs), v) +} + +// DecodeReader is just like Decode, except it will consume all bytes +// from the reader and decode it for you. +func DecodeReader(r io.Reader, v interface{}) (MetaData, error) { + bs, err := ioutil.ReadAll(r) + if err != nil { + return MetaData{}, err + } + return Decode(string(bs), v) +} + +// unify performs a sort of type unification based on the structure of `rv`, +// which is the client representation. +// +// Any type mismatch produces an error. Finding a type that we don't know +// how to handle produces an unsupported type error. +func (md *MetaData) unify(data interface{}, rv reflect.Value) error { + + // Special case. Look for a `Primitive` value. + if rv.Type() == reflect.TypeOf((*Primitive)(nil)).Elem() { + // Save the undecoded data and the key context into the primitive + // value. + context := make(Key, len(md.context)) + copy(context, md.context) + rv.Set(reflect.ValueOf(Primitive{ + undecoded: data, + context: context, + })) + return nil + } + + // Special case. Unmarshaler Interface support. + if rv.CanAddr() { + if v, ok := rv.Addr().Interface().(Unmarshaler); ok { + return v.UnmarshalTOML(data) + } + } + + // Special case. Handle time.Time values specifically. + // TODO: Remove this code when we decide to drop support for Go 1.1. + // This isn't necessary in Go 1.2 because time.Time satisfies the encoding + // interfaces. + if rv.Type().AssignableTo(rvalue(time.Time{}).Type()) { + return md.unifyDatetime(data, rv) + } + + // Special case. Look for a value satisfying the TextUnmarshaler interface. + if v, ok := rv.Interface().(TextUnmarshaler); ok { + return md.unifyText(data, v) + } + // BUG(burntsushi) + // The behavior here is incorrect whenever a Go type satisfies the + // encoding.TextUnmarshaler interface but also corresponds to a TOML + // hash or array. In particular, the unmarshaler should only be applied + // to primitive TOML values. But at this point, it will be applied to + // all kinds of values and produce an incorrect error whenever those values + // are hashes or arrays (including arrays of tables). + + k := rv.Kind() + + // laziness + if k >= reflect.Int && k <= reflect.Uint64 { + return md.unifyInt(data, rv) + } + switch k { + case reflect.Ptr: + elem := reflect.New(rv.Type().Elem()) + err := md.unify(data, reflect.Indirect(elem)) + if err != nil { + return err + } + rv.Set(elem) + return nil + case reflect.Struct: + return md.unifyStruct(data, rv) + case reflect.Map: + return md.unifyMap(data, rv) + case reflect.Array: + return md.unifyArray(data, rv) + case reflect.Slice: + return md.unifySlice(data, rv) + case reflect.String: + return md.unifyString(data, rv) + case reflect.Bool: + return md.unifyBool(data, rv) + case reflect.Interface: + // we only support empty interfaces. + if rv.NumMethod() > 0 { + return e("unsupported type %s", rv.Type()) + } + return md.unifyAnything(data, rv) + case reflect.Float32: + fallthrough + case reflect.Float64: + return md.unifyFloat64(data, rv) + } + return e("unsupported type %s", rv.Kind()) +} + +func (md *MetaData) unifyStruct(mapping interface{}, rv reflect.Value) error { + tmap, ok := mapping.(map[string]interface{}) + if !ok { + if mapping == nil { + return nil + } + return e("type mismatch for %s: expected table but found %T", + rv.Type().String(), mapping) + } + + for key, datum := range tmap { + var f *field + fields := cachedTypeFields(rv.Type()) + for i := range fields { + ff := &fields[i] + if ff.name == key { + f = ff + break + } + if f == nil && strings.EqualFold(ff.name, key) { + f = ff + } + } + if f != nil { + subv := rv + for _, i := range f.index { + subv = indirect(subv.Field(i)) + } + if isUnifiable(subv) { + md.decoded[md.context.add(key).String()] = true + md.context = append(md.context, key) + if err := md.unify(datum, subv); err != nil { + return err + } + md.context = md.context[0 : len(md.context)-1] + } else if f.name != "" { + // Bad user! No soup for you! + return e("cannot write unexported field %s.%s", + rv.Type().String(), f.name) + } + } + } + return nil +} + +func (md *MetaData) unifyMap(mapping interface{}, rv reflect.Value) error { + tmap, ok := mapping.(map[string]interface{}) + if !ok { + if tmap == nil { + return nil + } + return badtype("map", mapping) + } + if rv.IsNil() { + rv.Set(reflect.MakeMap(rv.Type())) + } + for k, v := range tmap { + md.decoded[md.context.add(k).String()] = true + md.context = append(md.context, k) + + rvkey := indirect(reflect.New(rv.Type().Key())) + rvval := reflect.Indirect(reflect.New(rv.Type().Elem())) + if err := md.unify(v, rvval); err != nil { + return err + } + md.context = md.context[0 : len(md.context)-1] + + rvkey.SetString(k) + rv.SetMapIndex(rvkey, rvval) + } + return nil +} + +func (md *MetaData) unifyArray(data interface{}, rv reflect.Value) error { + datav := reflect.ValueOf(data) + if datav.Kind() != reflect.Slice { + if !datav.IsValid() { + return nil + } + return badtype("slice", data) + } + sliceLen := datav.Len() + if sliceLen != rv.Len() { + return e("expected array length %d; got TOML array of length %d", + rv.Len(), sliceLen) + } + return md.unifySliceArray(datav, rv) +} + +func (md *MetaData) unifySlice(data interface{}, rv reflect.Value) error { + datav := reflect.ValueOf(data) + if datav.Kind() != reflect.Slice { + if !datav.IsValid() { + return nil + } + return badtype("slice", data) + } + n := datav.Len() + if rv.IsNil() || rv.Cap() < n { + rv.Set(reflect.MakeSlice(rv.Type(), n, n)) + } + rv.SetLen(n) + return md.unifySliceArray(datav, rv) +} + +func (md *MetaData) unifySliceArray(data, rv reflect.Value) error { + sliceLen := data.Len() + for i := 0; i < sliceLen; i++ { + v := data.Index(i).Interface() + sliceval := indirect(rv.Index(i)) + if err := md.unify(v, sliceval); err != nil { + return err + } + } + return nil +} + +func (md *MetaData) unifyDatetime(data interface{}, rv reflect.Value) error { + if _, ok := data.(time.Time); ok { + rv.Set(reflect.ValueOf(data)) + return nil + } + return badtype("time.Time", data) +} + +func (md *MetaData) unifyString(data interface{}, rv reflect.Value) error { + if s, ok := data.(string); ok { + rv.SetString(s) + return nil + } + return badtype("string", data) +} + +func (md *MetaData) unifyFloat64(data interface{}, rv reflect.Value) error { + if num, ok := data.(float64); ok { + switch rv.Kind() { + case reflect.Float32: + fallthrough + case reflect.Float64: + rv.SetFloat(num) + default: + panic("bug") + } + return nil + } + return badtype("float", data) +} + +func (md *MetaData) unifyInt(data interface{}, rv reflect.Value) error { + if num, ok := data.(int64); ok { + if rv.Kind() >= reflect.Int && rv.Kind() <= reflect.Int64 { + switch rv.Kind() { + case reflect.Int, reflect.Int64: + // No bounds checking necessary. + case reflect.Int8: + if num < math.MinInt8 || num > math.MaxInt8 { + return e("value %d is out of range for int8", num) + } + case reflect.Int16: + if num < math.MinInt16 || num > math.MaxInt16 { + return e("value %d is out of range for int16", num) + } + case reflect.Int32: + if num < math.MinInt32 || num > math.MaxInt32 { + return e("value %d is out of range for int32", num) + } + } + rv.SetInt(num) + } else if rv.Kind() >= reflect.Uint && rv.Kind() <= reflect.Uint64 { + unum := uint64(num) + switch rv.Kind() { + case reflect.Uint, reflect.Uint64: + // No bounds checking necessary. + case reflect.Uint8: + if num < 0 || unum > math.MaxUint8 { + return e("value %d is out of range for uint8", num) + } + case reflect.Uint16: + if num < 0 || unum > math.MaxUint16 { + return e("value %d is out of range for uint16", num) + } + case reflect.Uint32: + if num < 0 || unum > math.MaxUint32 { + return e("value %d is out of range for uint32", num) + } + } + rv.SetUint(unum) + } else { + panic("unreachable") + } + return nil + } + return badtype("integer", data) +} + +func (md *MetaData) unifyBool(data interface{}, rv reflect.Value) error { + if b, ok := data.(bool); ok { + rv.SetBool(b) + return nil + } + return badtype("boolean", data) +} + +func (md *MetaData) unifyAnything(data interface{}, rv reflect.Value) error { + rv.Set(reflect.ValueOf(data)) + return nil +} + +func (md *MetaData) unifyText(data interface{}, v TextUnmarshaler) error { + var s string + switch sdata := data.(type) { + case TextMarshaler: + text, err := sdata.MarshalText() + if err != nil { + return err + } + s = string(text) + case fmt.Stringer: + s = sdata.String() + case string: + s = sdata + case bool: + s = fmt.Sprintf("%v", sdata) + case int64: + s = fmt.Sprintf("%d", sdata) + case float64: + s = fmt.Sprintf("%f", sdata) + default: + return badtype("primitive (string-like)", data) + } + if err := v.UnmarshalText([]byte(s)); err != nil { + return err + } + return nil +} + +// rvalue returns a reflect.Value of `v`. All pointers are resolved. +func rvalue(v interface{}) reflect.Value { + return indirect(reflect.ValueOf(v)) +} + +// indirect returns the value pointed to by a pointer. +// Pointers are followed until the value is not a pointer. +// New values are allocated for each nil pointer. +// +// An exception to this rule is if the value satisfies an interface of +// interest to us (like encoding.TextUnmarshaler). +func indirect(v reflect.Value) reflect.Value { + if v.Kind() != reflect.Ptr { + if v.CanSet() { + pv := v.Addr() + if _, ok := pv.Interface().(TextUnmarshaler); ok { + return pv + } + } + return v + } + if v.IsNil() { + v.Set(reflect.New(v.Type().Elem())) + } + return indirect(reflect.Indirect(v)) +} + +func isUnifiable(rv reflect.Value) bool { + if rv.CanSet() { + return true + } + if _, ok := rv.Interface().(TextUnmarshaler); ok { + return true + } + return false +} + +func badtype(expected string, data interface{}) error { + return e("cannot load TOML value of type %T into a Go %s", data, expected) +} diff --git a/vendor/github.com/BurntSushi/toml/decode_meta.go b/vendor/github.com/BurntSushi/toml/decode_meta.go new file mode 100644 index 000000000..b9914a679 --- /dev/null +++ b/vendor/github.com/BurntSushi/toml/decode_meta.go @@ -0,0 +1,121 @@ +package toml + +import "strings" + +// MetaData allows access to meta information about TOML data that may not +// be inferrable via reflection. In particular, whether a key has been defined +// and the TOML type of a key. +type MetaData struct { + mapping map[string]interface{} + types map[string]tomlType + keys []Key + decoded map[string]bool + context Key // Used only during decoding. +} + +// IsDefined returns true if the key given exists in the TOML data. The key +// should be specified hierarchially. e.g., +// +// // access the TOML key 'a.b.c' +// IsDefined("a", "b", "c") +// +// IsDefined will return false if an empty key given. Keys are case sensitive. +func (md *MetaData) IsDefined(key ...string) bool { + if len(key) == 0 { + return false + } + + var hash map[string]interface{} + var ok bool + var hashOrVal interface{} = md.mapping + for _, k := range key { + if hash, ok = hashOrVal.(map[string]interface{}); !ok { + return false + } + if hashOrVal, ok = hash[k]; !ok { + return false + } + } + return true +} + +// Type returns a string representation of the type of the key specified. +// +// Type will return the empty string if given an empty key or a key that +// does not exist. Keys are case sensitive. +func (md *MetaData) Type(key ...string) string { + fullkey := strings.Join(key, ".") + if typ, ok := md.types[fullkey]; ok { + return typ.typeString() + } + return "" +} + +// Key is the type of any TOML key, including key groups. Use (MetaData).Keys +// to get values of this type. +type Key []string + +func (k Key) String() string { + return strings.Join(k, ".") +} + +func (k Key) maybeQuotedAll() string { + var ss []string + for i := range k { + ss = append(ss, k.maybeQuoted(i)) + } + return strings.Join(ss, ".") +} + +func (k Key) maybeQuoted(i int) string { + quote := false + for _, c := range k[i] { + if !isBareKeyChar(c) { + quote = true + break + } + } + if quote { + return "\"" + strings.Replace(k[i], "\"", "\\\"", -1) + "\"" + } + return k[i] +} + +func (k Key) add(piece string) Key { + newKey := make(Key, len(k)+1) + copy(newKey, k) + newKey[len(k)] = piece + return newKey +} + +// Keys returns a slice of every key in the TOML data, including key groups. +// Each key is itself a slice, where the first element is the top of the +// hierarchy and the last is the most specific. +// +// The list will have the same order as the keys appeared in the TOML data. +// +// All keys returned are non-empty. +func (md *MetaData) Keys() []Key { + return md.keys +} + +// Undecoded returns all keys that have not been decoded in the order in which +// they appear in the original TOML document. +// +// This includes keys that haven't been decoded because of a Primitive value. +// Once the Primitive value is decoded, the keys will be considered decoded. +// +// Also note that decoding into an empty interface will result in no decoding, +// and so no keys will be considered decoded. +// +// In this sense, the Undecoded keys correspond to keys in the TOML document +// that do not have a concrete type in your representation. +func (md *MetaData) Undecoded() []Key { + undecoded := make([]Key, 0, len(md.keys)) + for _, key := range md.keys { + if !md.decoded[key.String()] { + undecoded = append(undecoded, key) + } + } + return undecoded +} diff --git a/vendor/github.com/BurntSushi/toml/doc.go b/vendor/github.com/BurntSushi/toml/doc.go new file mode 100644 index 000000000..b371f396e --- /dev/null +++ b/vendor/github.com/BurntSushi/toml/doc.go @@ -0,0 +1,27 @@ +/* +Package toml provides facilities for decoding and encoding TOML configuration +files via reflection. There is also support for delaying decoding with +the Primitive type, and querying the set of keys in a TOML document with the +MetaData type. + +The specification implemented: https://github.com/toml-lang/toml + +The sub-command github.com/BurntSushi/toml/cmd/tomlv can be used to verify +whether a file is a valid TOML document. It can also be used to print the +type of each key in a TOML document. + +Testing + +There are two important types of tests used for this package. The first is +contained inside '*_test.go' files and uses the standard Go unit testing +framework. These tests are primarily devoted to holistically testing the +decoder and encoder. + +The second type of testing is used to verify the implementation's adherence +to the TOML specification. These tests have been factored into their own +project: https://github.com/BurntSushi/toml-test + +The reason the tests are in a separate project is so that they can be used by +any implementation of TOML. Namely, it is language agnostic. +*/ +package toml diff --git a/vendor/github.com/BurntSushi/toml/encode.go b/vendor/github.com/BurntSushi/toml/encode.go new file mode 100644 index 000000000..d905c21a2 --- /dev/null +++ b/vendor/github.com/BurntSushi/toml/encode.go @@ -0,0 +1,568 @@ +package toml + +import ( + "bufio" + "errors" + "fmt" + "io" + "reflect" + "sort" + "strconv" + "strings" + "time" +) + +type tomlEncodeError struct{ error } + +var ( + errArrayMixedElementTypes = errors.New( + "toml: cannot encode array with mixed element types") + errArrayNilElement = errors.New( + "toml: cannot encode array with nil element") + errNonString = errors.New( + "toml: cannot encode a map with non-string key type") + errAnonNonStruct = errors.New( + "toml: cannot encode an anonymous field that is not a struct") + errArrayNoTable = errors.New( + "toml: TOML array element cannot contain a table") + errNoKey = errors.New( + "toml: top-level values must be Go maps or structs") + errAnything = errors.New("") // used in testing +) + +var quotedReplacer = strings.NewReplacer( + "\t", "\\t", + "\n", "\\n", + "\r", "\\r", + "\"", "\\\"", + "\\", "\\\\", +) + +// Encoder controls the encoding of Go values to a TOML document to some +// io.Writer. +// +// The indentation level can be controlled with the Indent field. +type Encoder struct { + // A single indentation level. By default it is two spaces. + Indent string + + // hasWritten is whether we have written any output to w yet. + hasWritten bool + w *bufio.Writer +} + +// NewEncoder returns a TOML encoder that encodes Go values to the io.Writer +// given. By default, a single indentation level is 2 spaces. +func NewEncoder(w io.Writer) *Encoder { + return &Encoder{ + w: bufio.NewWriter(w), + Indent: " ", + } +} + +// Encode writes a TOML representation of the Go value to the underlying +// io.Writer. If the value given cannot be encoded to a valid TOML document, +// then an error is returned. +// +// The mapping between Go values and TOML values should be precisely the same +// as for the Decode* functions. Similarly, the TextMarshaler interface is +// supported by encoding the resulting bytes as strings. (If you want to write +// arbitrary binary data then you will need to use something like base64 since +// TOML does not have any binary types.) +// +// When encoding TOML hashes (i.e., Go maps or structs), keys without any +// sub-hashes are encoded first. +// +// If a Go map is encoded, then its keys are sorted alphabetically for +// deterministic output. More control over this behavior may be provided if +// there is demand for it. +// +// Encoding Go values without a corresponding TOML representation---like map +// types with non-string keys---will cause an error to be returned. Similarly +// for mixed arrays/slices, arrays/slices with nil elements, embedded +// non-struct types and nested slices containing maps or structs. +// (e.g., [][]map[string]string is not allowed but []map[string]string is OK +// and so is []map[string][]string.) +func (enc *Encoder) Encode(v interface{}) error { + rv := eindirect(reflect.ValueOf(v)) + if err := enc.safeEncode(Key([]string{}), rv); err != nil { + return err + } + return enc.w.Flush() +} + +func (enc *Encoder) safeEncode(key Key, rv reflect.Value) (err error) { + defer func() { + if r := recover(); r != nil { + if terr, ok := r.(tomlEncodeError); ok { + err = terr.error + return + } + panic(r) + } + }() + enc.encode(key, rv) + return nil +} + +func (enc *Encoder) encode(key Key, rv reflect.Value) { + // Special case. Time needs to be in ISO8601 format. + // Special case. If we can marshal the type to text, then we used that. + // Basically, this prevents the encoder for handling these types as + // generic structs (or whatever the underlying type of a TextMarshaler is). + switch rv.Interface().(type) { + case time.Time, TextMarshaler: + enc.keyEqElement(key, rv) + return + } + + k := rv.Kind() + switch k { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, + reflect.Int64, + reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, + reflect.Uint64, + reflect.Float32, reflect.Float64, reflect.String, reflect.Bool: + enc.keyEqElement(key, rv) + case reflect.Array, reflect.Slice: + if typeEqual(tomlArrayHash, tomlTypeOfGo(rv)) { + enc.eArrayOfTables(key, rv) + } else { + enc.keyEqElement(key, rv) + } + case reflect.Interface: + if rv.IsNil() { + return + } + enc.encode(key, rv.Elem()) + case reflect.Map: + if rv.IsNil() { + return + } + enc.eTable(key, rv) + case reflect.Ptr: + if rv.IsNil() { + return + } + enc.encode(key, rv.Elem()) + case reflect.Struct: + enc.eTable(key, rv) + default: + panic(e("unsupported type for key '%s': %s", key, k)) + } +} + +// eElement encodes any value that can be an array element (primitives and +// arrays). +func (enc *Encoder) eElement(rv reflect.Value) { + switch v := rv.Interface().(type) { + case time.Time: + // Special case time.Time as a primitive. Has to come before + // TextMarshaler below because time.Time implements + // encoding.TextMarshaler, but we need to always use UTC. + enc.wf(v.UTC().Format("2006-01-02T15:04:05Z")) + return + case TextMarshaler: + // Special case. Use text marshaler if it's available for this value. + if s, err := v.MarshalText(); err != nil { + encPanic(err) + } else { + enc.writeQuoted(string(s)) + } + return + } + switch rv.Kind() { + case reflect.Bool: + enc.wf(strconv.FormatBool(rv.Bool())) + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, + reflect.Int64: + enc.wf(strconv.FormatInt(rv.Int(), 10)) + case reflect.Uint, reflect.Uint8, reflect.Uint16, + reflect.Uint32, reflect.Uint64: + enc.wf(strconv.FormatUint(rv.Uint(), 10)) + case reflect.Float32: + enc.wf(floatAddDecimal(strconv.FormatFloat(rv.Float(), 'f', -1, 32))) + case reflect.Float64: + enc.wf(floatAddDecimal(strconv.FormatFloat(rv.Float(), 'f', -1, 64))) + case reflect.Array, reflect.Slice: + enc.eArrayOrSliceElement(rv) + case reflect.Interface: + enc.eElement(rv.Elem()) + case reflect.String: + enc.writeQuoted(rv.String()) + default: + panic(e("unexpected primitive type: %s", rv.Kind())) + } +} + +// By the TOML spec, all floats must have a decimal with at least one +// number on either side. +func floatAddDecimal(fstr string) string { + if !strings.Contains(fstr, ".") { + return fstr + ".0" + } + return fstr +} + +func (enc *Encoder) writeQuoted(s string) { + enc.wf("\"%s\"", quotedReplacer.Replace(s)) +} + +func (enc *Encoder) eArrayOrSliceElement(rv reflect.Value) { + length := rv.Len() + enc.wf("[") + for i := 0; i < length; i++ { + elem := rv.Index(i) + enc.eElement(elem) + if i != length-1 { + enc.wf(", ") + } + } + enc.wf("]") +} + +func (enc *Encoder) eArrayOfTables(key Key, rv reflect.Value) { + if len(key) == 0 { + encPanic(errNoKey) + } + for i := 0; i < rv.Len(); i++ { + trv := rv.Index(i) + if isNil(trv) { + continue + } + panicIfInvalidKey(key) + enc.newline() + enc.wf("%s[[%s]]", enc.indentStr(key), key.maybeQuotedAll()) + enc.newline() + enc.eMapOrStruct(key, trv) + } +} + +func (enc *Encoder) eTable(key Key, rv reflect.Value) { + panicIfInvalidKey(key) + if len(key) == 1 { + // Output an extra newline between top-level tables. + // (The newline isn't written if nothing else has been written though.) + enc.newline() + } + if len(key) > 0 { + enc.wf("%s[%s]", enc.indentStr(key), key.maybeQuotedAll()) + enc.newline() + } + enc.eMapOrStruct(key, rv) +} + +func (enc *Encoder) eMapOrStruct(key Key, rv reflect.Value) { + switch rv := eindirect(rv); rv.Kind() { + case reflect.Map: + enc.eMap(key, rv) + case reflect.Struct: + enc.eStruct(key, rv) + default: + panic("eTable: unhandled reflect.Value Kind: " + rv.Kind().String()) + } +} + +func (enc *Encoder) eMap(key Key, rv reflect.Value) { + rt := rv.Type() + if rt.Key().Kind() != reflect.String { + encPanic(errNonString) + } + + // Sort keys so that we have deterministic output. And write keys directly + // underneath this key first, before writing sub-structs or sub-maps. + var mapKeysDirect, mapKeysSub []string + for _, mapKey := range rv.MapKeys() { + k := mapKey.String() + if typeIsHash(tomlTypeOfGo(rv.MapIndex(mapKey))) { + mapKeysSub = append(mapKeysSub, k) + } else { + mapKeysDirect = append(mapKeysDirect, k) + } + } + + var writeMapKeys = func(mapKeys []string) { + sort.Strings(mapKeys) + for _, mapKey := range mapKeys { + mrv := rv.MapIndex(reflect.ValueOf(mapKey)) + if isNil(mrv) { + // Don't write anything for nil fields. + continue + } + enc.encode(key.add(mapKey), mrv) + } + } + writeMapKeys(mapKeysDirect) + writeMapKeys(mapKeysSub) +} + +func (enc *Encoder) eStruct(key Key, rv reflect.Value) { + // Write keys for fields directly under this key first, because if we write + // a field that creates a new table, then all keys under it will be in that + // table (not the one we're writing here). + rt := rv.Type() + var fieldsDirect, fieldsSub [][]int + var addFields func(rt reflect.Type, rv reflect.Value, start []int) + addFields = func(rt reflect.Type, rv reflect.Value, start []int) { + for i := 0; i < rt.NumField(); i++ { + f := rt.Field(i) + // skip unexported fields + if f.PkgPath != "" && !f.Anonymous { + continue + } + frv := rv.Field(i) + if f.Anonymous { + t := f.Type + switch t.Kind() { + case reflect.Struct: + // Treat anonymous struct fields with + // tag names as though they are not + // anonymous, like encoding/json does. + if getOptions(f.Tag).name == "" { + addFields(t, frv, f.Index) + continue + } + case reflect.Ptr: + if t.Elem().Kind() == reflect.Struct && + getOptions(f.Tag).name == "" { + if !frv.IsNil() { + addFields(t.Elem(), frv.Elem(), f.Index) + } + continue + } + // Fall through to the normal field encoding logic below + // for non-struct anonymous fields. + } + } + + if typeIsHash(tomlTypeOfGo(frv)) { + fieldsSub = append(fieldsSub, append(start, f.Index...)) + } else { + fieldsDirect = append(fieldsDirect, append(start, f.Index...)) + } + } + } + addFields(rt, rv, nil) + + var writeFields = func(fields [][]int) { + for _, fieldIndex := range fields { + sft := rt.FieldByIndex(fieldIndex) + sf := rv.FieldByIndex(fieldIndex) + if isNil(sf) { + // Don't write anything for nil fields. + continue + } + + opts := getOptions(sft.Tag) + if opts.skip { + continue + } + keyName := sft.Name + if opts.name != "" { + keyName = opts.name + } + if opts.omitempty && isEmpty(sf) { + continue + } + if opts.omitzero && isZero(sf) { + continue + } + + enc.encode(key.add(keyName), sf) + } + } + writeFields(fieldsDirect) + writeFields(fieldsSub) +} + +// tomlTypeName returns the TOML type name of the Go value's type. It is +// used to determine whether the types of array elements are mixed (which is +// forbidden). If the Go value is nil, then it is illegal for it to be an array +// element, and valueIsNil is returned as true. + +// Returns the TOML type of a Go value. The type may be `nil`, which means +// no concrete TOML type could be found. +func tomlTypeOfGo(rv reflect.Value) tomlType { + if isNil(rv) || !rv.IsValid() { + return nil + } + switch rv.Kind() { + case reflect.Bool: + return tomlBool + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, + reflect.Int64, + reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, + reflect.Uint64: + return tomlInteger + case reflect.Float32, reflect.Float64: + return tomlFloat + case reflect.Array, reflect.Slice: + if typeEqual(tomlHash, tomlArrayType(rv)) { + return tomlArrayHash + } + return tomlArray + case reflect.Ptr, reflect.Interface: + return tomlTypeOfGo(rv.Elem()) + case reflect.String: + return tomlString + case reflect.Map: + return tomlHash + case reflect.Struct: + switch rv.Interface().(type) { + case time.Time: + return tomlDatetime + case TextMarshaler: + return tomlString + default: + return tomlHash + } + default: + panic("unexpected reflect.Kind: " + rv.Kind().String()) + } +} + +// tomlArrayType returns the element type of a TOML array. The type returned +// may be nil if it cannot be determined (e.g., a nil slice or a zero length +// slize). This function may also panic if it finds a type that cannot be +// expressed in TOML (such as nil elements, heterogeneous arrays or directly +// nested arrays of tables). +func tomlArrayType(rv reflect.Value) tomlType { + if isNil(rv) || !rv.IsValid() || rv.Len() == 0 { + return nil + } + firstType := tomlTypeOfGo(rv.Index(0)) + if firstType == nil { + encPanic(errArrayNilElement) + } + + rvlen := rv.Len() + for i := 1; i < rvlen; i++ { + elem := rv.Index(i) + switch elemType := tomlTypeOfGo(elem); { + case elemType == nil: + encPanic(errArrayNilElement) + case !typeEqual(firstType, elemType): + encPanic(errArrayMixedElementTypes) + } + } + // If we have a nested array, then we must make sure that the nested + // array contains ONLY primitives. + // This checks arbitrarily nested arrays. + if typeEqual(firstType, tomlArray) || typeEqual(firstType, tomlArrayHash) { + nest := tomlArrayType(eindirect(rv.Index(0))) + if typeEqual(nest, tomlHash) || typeEqual(nest, tomlArrayHash) { + encPanic(errArrayNoTable) + } + } + return firstType +} + +type tagOptions struct { + skip bool // "-" + name string + omitempty bool + omitzero bool +} + +func getOptions(tag reflect.StructTag) tagOptions { + t := tag.Get("toml") + if t == "-" { + return tagOptions{skip: true} + } + var opts tagOptions + parts := strings.Split(t, ",") + opts.name = parts[0] + for _, s := range parts[1:] { + switch s { + case "omitempty": + opts.omitempty = true + case "omitzero": + opts.omitzero = true + } + } + return opts +} + +func isZero(rv reflect.Value) bool { + switch rv.Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return rv.Int() == 0 + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + return rv.Uint() == 0 + case reflect.Float32, reflect.Float64: + return rv.Float() == 0.0 + } + return false +} + +func isEmpty(rv reflect.Value) bool { + switch rv.Kind() { + case reflect.Array, reflect.Slice, reflect.Map, reflect.String: + return rv.Len() == 0 + case reflect.Bool: + return !rv.Bool() + } + return false +} + +func (enc *Encoder) newline() { + if enc.hasWritten { + enc.wf("\n") + } +} + +func (enc *Encoder) keyEqElement(key Key, val reflect.Value) { + if len(key) == 0 { + encPanic(errNoKey) + } + panicIfInvalidKey(key) + enc.wf("%s%s = ", enc.indentStr(key), key.maybeQuoted(len(key)-1)) + enc.eElement(val) + enc.newline() +} + +func (enc *Encoder) wf(format string, v ...interface{}) { + if _, err := fmt.Fprintf(enc.w, format, v...); err != nil { + encPanic(err) + } + enc.hasWritten = true +} + +func (enc *Encoder) indentStr(key Key) string { + return strings.Repeat(enc.Indent, len(key)-1) +} + +func encPanic(err error) { + panic(tomlEncodeError{err}) +} + +func eindirect(v reflect.Value) reflect.Value { + switch v.Kind() { + case reflect.Ptr, reflect.Interface: + return eindirect(v.Elem()) + default: + return v + } +} + +func isNil(rv reflect.Value) bool { + switch rv.Kind() { + case reflect.Interface, reflect.Map, reflect.Ptr, reflect.Slice: + return rv.IsNil() + default: + return false + } +} + +func panicIfInvalidKey(key Key) { + for _, k := range key { + if len(k) == 0 { + encPanic(e("Key '%s' is not a valid table name. Key names "+ + "cannot be empty.", key.maybeQuotedAll())) + } + } +} + +func isValidKeyName(s string) bool { + return len(s) != 0 +} diff --git a/vendor/github.com/BurntSushi/toml/encoding_types.go b/vendor/github.com/BurntSushi/toml/encoding_types.go new file mode 100644 index 000000000..d36e1dd60 --- /dev/null +++ b/vendor/github.com/BurntSushi/toml/encoding_types.go @@ -0,0 +1,19 @@ +// +build go1.2 + +package toml + +// In order to support Go 1.1, we define our own TextMarshaler and +// TextUnmarshaler types. For Go 1.2+, we just alias them with the +// standard library interfaces. + +import ( + "encoding" +) + +// TextMarshaler is a synonym for encoding.TextMarshaler. It is defined here +// so that Go 1.1 can be supported. +type TextMarshaler encoding.TextMarshaler + +// TextUnmarshaler is a synonym for encoding.TextUnmarshaler. It is defined +// here so that Go 1.1 can be supported. +type TextUnmarshaler encoding.TextUnmarshaler diff --git a/vendor/github.com/BurntSushi/toml/encoding_types_1.1.go b/vendor/github.com/BurntSushi/toml/encoding_types_1.1.go new file mode 100644 index 000000000..e8d503d04 --- /dev/null +++ b/vendor/github.com/BurntSushi/toml/encoding_types_1.1.go @@ -0,0 +1,18 @@ +// +build !go1.2 + +package toml + +// These interfaces were introduced in Go 1.2, so we add them manually when +// compiling for Go 1.1. + +// TextMarshaler is a synonym for encoding.TextMarshaler. It is defined here +// so that Go 1.1 can be supported. +type TextMarshaler interface { + MarshalText() (text []byte, err error) +} + +// TextUnmarshaler is a synonym for encoding.TextUnmarshaler. It is defined +// here so that Go 1.1 can be supported. +type TextUnmarshaler interface { + UnmarshalText(text []byte) error +} diff --git a/vendor/github.com/BurntSushi/toml/lex.go b/vendor/github.com/BurntSushi/toml/lex.go new file mode 100644 index 000000000..e0a742a88 --- /dev/null +++ b/vendor/github.com/BurntSushi/toml/lex.go @@ -0,0 +1,953 @@ +package toml + +import ( + "fmt" + "strings" + "unicode" + "unicode/utf8" +) + +type itemType int + +const ( + itemError itemType = iota + itemNIL // used in the parser to indicate no type + itemEOF + itemText + itemString + itemRawString + itemMultilineString + itemRawMultilineString + itemBool + itemInteger + itemFloat + itemDatetime + itemArray // the start of an array + itemArrayEnd + itemTableStart + itemTableEnd + itemArrayTableStart + itemArrayTableEnd + itemKeyStart + itemCommentStart + itemInlineTableStart + itemInlineTableEnd +) + +const ( + eof = 0 + comma = ',' + tableStart = '[' + tableEnd = ']' + arrayTableStart = '[' + arrayTableEnd = ']' + tableSep = '.' + keySep = '=' + arrayStart = '[' + arrayEnd = ']' + commentStart = '#' + stringStart = '"' + stringEnd = '"' + rawStringStart = '\'' + rawStringEnd = '\'' + inlineTableStart = '{' + inlineTableEnd = '}' +) + +type stateFn func(lx *lexer) stateFn + +type lexer struct { + input string + start int + pos int + line int + state stateFn + items chan item + + // Allow for backing up up to three runes. + // This is necessary because TOML contains 3-rune tokens (""" and '''). + prevWidths [3]int + nprev int // how many of prevWidths are in use + // If we emit an eof, we can still back up, but it is not OK to call + // next again. + atEOF bool + + // A stack of state functions used to maintain context. + // The idea is to reuse parts of the state machine in various places. + // For example, values can appear at the top level or within arbitrarily + // nested arrays. The last state on the stack is used after a value has + // been lexed. Similarly for comments. + stack []stateFn +} + +type item struct { + typ itemType + val string + line int +} + +func (lx *lexer) nextItem() item { + for { + select { + case item := <-lx.items: + return item + default: + lx.state = lx.state(lx) + } + } +} + +func lex(input string) *lexer { + lx := &lexer{ + input: input, + state: lexTop, + line: 1, + items: make(chan item, 10), + stack: make([]stateFn, 0, 10), + } + return lx +} + +func (lx *lexer) push(state stateFn) { + lx.stack = append(lx.stack, state) +} + +func (lx *lexer) pop() stateFn { + if len(lx.stack) == 0 { + return lx.errorf("BUG in lexer: no states to pop") + } + last := lx.stack[len(lx.stack)-1] + lx.stack = lx.stack[0 : len(lx.stack)-1] + return last +} + +func (lx *lexer) current() string { + return lx.input[lx.start:lx.pos] +} + +func (lx *lexer) emit(typ itemType) { + lx.items <- item{typ, lx.current(), lx.line} + lx.start = lx.pos +} + +func (lx *lexer) emitTrim(typ itemType) { + lx.items <- item{typ, strings.TrimSpace(lx.current()), lx.line} + lx.start = lx.pos +} + +func (lx *lexer) next() (r rune) { + if lx.atEOF { + panic("next called after EOF") + } + if lx.pos >= len(lx.input) { + lx.atEOF = true + return eof + } + + if lx.input[lx.pos] == '\n' { + lx.line++ + } + lx.prevWidths[2] = lx.prevWidths[1] + lx.prevWidths[1] = lx.prevWidths[0] + if lx.nprev < 3 { + lx.nprev++ + } + r, w := utf8.DecodeRuneInString(lx.input[lx.pos:]) + lx.prevWidths[0] = w + lx.pos += w + return r +} + +// ignore skips over the pending input before this point. +func (lx *lexer) ignore() { + lx.start = lx.pos +} + +// backup steps back one rune. Can be called only twice between calls to next. +func (lx *lexer) backup() { + if lx.atEOF { + lx.atEOF = false + return + } + if lx.nprev < 1 { + panic("backed up too far") + } + w := lx.prevWidths[0] + lx.prevWidths[0] = lx.prevWidths[1] + lx.prevWidths[1] = lx.prevWidths[2] + lx.nprev-- + lx.pos -= w + if lx.pos < len(lx.input) && lx.input[lx.pos] == '\n' { + lx.line-- + } +} + +// accept consumes the next rune if it's equal to `valid`. +func (lx *lexer) accept(valid rune) bool { + if lx.next() == valid { + return true + } + lx.backup() + return false +} + +// peek returns but does not consume the next rune in the input. +func (lx *lexer) peek() rune { + r := lx.next() + lx.backup() + return r +} + +// skip ignores all input that matches the given predicate. +func (lx *lexer) skip(pred func(rune) bool) { + for { + r := lx.next() + if pred(r) { + continue + } + lx.backup() + lx.ignore() + return + } +} + +// errorf stops all lexing by emitting an error and returning `nil`. +// Note that any value that is a character is escaped if it's a special +// character (newlines, tabs, etc.). +func (lx *lexer) errorf(format string, values ...interface{}) stateFn { + lx.items <- item{ + itemError, + fmt.Sprintf(format, values...), + lx.line, + } + return nil +} + +// lexTop consumes elements at the top level of TOML data. +func lexTop(lx *lexer) stateFn { + r := lx.next() + if isWhitespace(r) || isNL(r) { + return lexSkip(lx, lexTop) + } + switch r { + case commentStart: + lx.push(lexTop) + return lexCommentStart + case tableStart: + return lexTableStart + case eof: + if lx.pos > lx.start { + return lx.errorf("unexpected EOF") + } + lx.emit(itemEOF) + return nil + } + + // At this point, the only valid item can be a key, so we back up + // and let the key lexer do the rest. + lx.backup() + lx.push(lexTopEnd) + return lexKeyStart +} + +// lexTopEnd is entered whenever a top-level item has been consumed. (A value +// or a table.) It must see only whitespace, and will turn back to lexTop +// upon a newline. If it sees EOF, it will quit the lexer successfully. +func lexTopEnd(lx *lexer) stateFn { + r := lx.next() + switch { + case r == commentStart: + // a comment will read to a newline for us. + lx.push(lexTop) + return lexCommentStart + case isWhitespace(r): + return lexTopEnd + case isNL(r): + lx.ignore() + return lexTop + case r == eof: + lx.emit(itemEOF) + return nil + } + return lx.errorf("expected a top-level item to end with a newline, "+ + "comment, or EOF, but got %q instead", r) +} + +// lexTable lexes the beginning of a table. Namely, it makes sure that +// it starts with a character other than '.' and ']'. +// It assumes that '[' has already been consumed. +// It also handles the case that this is an item in an array of tables. +// e.g., '[[name]]'. +func lexTableStart(lx *lexer) stateFn { + if lx.peek() == arrayTableStart { + lx.next() + lx.emit(itemArrayTableStart) + lx.push(lexArrayTableEnd) + } else { + lx.emit(itemTableStart) + lx.push(lexTableEnd) + } + return lexTableNameStart +} + +func lexTableEnd(lx *lexer) stateFn { + lx.emit(itemTableEnd) + return lexTopEnd +} + +func lexArrayTableEnd(lx *lexer) stateFn { + if r := lx.next(); r != arrayTableEnd { + return lx.errorf("expected end of table array name delimiter %q, "+ + "but got %q instead", arrayTableEnd, r) + } + lx.emit(itemArrayTableEnd) + return lexTopEnd +} + +func lexTableNameStart(lx *lexer) stateFn { + lx.skip(isWhitespace) + switch r := lx.peek(); { + case r == tableEnd || r == eof: + return lx.errorf("unexpected end of table name " + + "(table names cannot be empty)") + case r == tableSep: + return lx.errorf("unexpected table separator " + + "(table names cannot be empty)") + case r == stringStart || r == rawStringStart: + lx.ignore() + lx.push(lexTableNameEnd) + return lexValue // reuse string lexing + default: + return lexBareTableName + } +} + +// lexBareTableName lexes the name of a table. It assumes that at least one +// valid character for the table has already been read. +func lexBareTableName(lx *lexer) stateFn { + r := lx.next() + if isBareKeyChar(r) { + return lexBareTableName + } + lx.backup() + lx.emit(itemText) + return lexTableNameEnd +} + +// lexTableNameEnd reads the end of a piece of a table name, optionally +// consuming whitespace. +func lexTableNameEnd(lx *lexer) stateFn { + lx.skip(isWhitespace) + switch r := lx.next(); { + case isWhitespace(r): + return lexTableNameEnd + case r == tableSep: + lx.ignore() + return lexTableNameStart + case r == tableEnd: + return lx.pop() + default: + return lx.errorf("expected '.' or ']' to end table name, "+ + "but got %q instead", r) + } +} + +// lexKeyStart consumes a key name up until the first non-whitespace character. +// lexKeyStart will ignore whitespace. +func lexKeyStart(lx *lexer) stateFn { + r := lx.peek() + switch { + case r == keySep: + return lx.errorf("unexpected key separator %q", keySep) + case isWhitespace(r) || isNL(r): + lx.next() + return lexSkip(lx, lexKeyStart) + case r == stringStart || r == rawStringStart: + lx.ignore() + lx.emit(itemKeyStart) + lx.push(lexKeyEnd) + return lexValue // reuse string lexing + default: + lx.ignore() + lx.emit(itemKeyStart) + return lexBareKey + } +} + +// lexBareKey consumes the text of a bare key. Assumes that the first character +// (which is not whitespace) has not yet been consumed. +func lexBareKey(lx *lexer) stateFn { + switch r := lx.next(); { + case isBareKeyChar(r): + return lexBareKey + case isWhitespace(r): + lx.backup() + lx.emit(itemText) + return lexKeyEnd + case r == keySep: + lx.backup() + lx.emit(itemText) + return lexKeyEnd + default: + return lx.errorf("bare keys cannot contain %q", r) + } +} + +// lexKeyEnd consumes the end of a key and trims whitespace (up to the key +// separator). +func lexKeyEnd(lx *lexer) stateFn { + switch r := lx.next(); { + case r == keySep: + return lexSkip(lx, lexValue) + case isWhitespace(r): + return lexSkip(lx, lexKeyEnd) + default: + return lx.errorf("expected key separator %q, but got %q instead", + keySep, r) + } +} + +// lexValue starts the consumption of a value anywhere a value is expected. +// lexValue will ignore whitespace. +// After a value is lexed, the last state on the next is popped and returned. +func lexValue(lx *lexer) stateFn { + // We allow whitespace to precede a value, but NOT newlines. + // In array syntax, the array states are responsible for ignoring newlines. + r := lx.next() + switch { + case isWhitespace(r): + return lexSkip(lx, lexValue) + case isDigit(r): + lx.backup() // avoid an extra state and use the same as above + return lexNumberOrDateStart + } + switch r { + case arrayStart: + lx.ignore() + lx.emit(itemArray) + return lexArrayValue + case inlineTableStart: + lx.ignore() + lx.emit(itemInlineTableStart) + return lexInlineTableValue + case stringStart: + if lx.accept(stringStart) { + if lx.accept(stringStart) { + lx.ignore() // Ignore """ + return lexMultilineString + } + lx.backup() + } + lx.ignore() // ignore the '"' + return lexString + case rawStringStart: + if lx.accept(rawStringStart) { + if lx.accept(rawStringStart) { + lx.ignore() // Ignore """ + return lexMultilineRawString + } + lx.backup() + } + lx.ignore() // ignore the "'" + return lexRawString + case '+', '-': + return lexNumberStart + case '.': // special error case, be kind to users + return lx.errorf("floats must start with a digit, not '.'") + } + if unicode.IsLetter(r) { + // Be permissive here; lexBool will give a nice error if the + // user wrote something like + // x = foo + // (i.e. not 'true' or 'false' but is something else word-like.) + lx.backup() + return lexBool + } + return lx.errorf("expected value but found %q instead", r) +} + +// lexArrayValue consumes one value in an array. It assumes that '[' or ',' +// have already been consumed. All whitespace and newlines are ignored. +func lexArrayValue(lx *lexer) stateFn { + r := lx.next() + switch { + case isWhitespace(r) || isNL(r): + return lexSkip(lx, lexArrayValue) + case r == commentStart: + lx.push(lexArrayValue) + return lexCommentStart + case r == comma: + return lx.errorf("unexpected comma") + case r == arrayEnd: + // NOTE(caleb): The spec isn't clear about whether you can have + // a trailing comma or not, so we'll allow it. + return lexArrayEnd + } + + lx.backup() + lx.push(lexArrayValueEnd) + return lexValue +} + +// lexArrayValueEnd consumes everything between the end of an array value and +// the next value (or the end of the array): it ignores whitespace and newlines +// and expects either a ',' or a ']'. +func lexArrayValueEnd(lx *lexer) stateFn { + r := lx.next() + switch { + case isWhitespace(r) || isNL(r): + return lexSkip(lx, lexArrayValueEnd) + case r == commentStart: + lx.push(lexArrayValueEnd) + return lexCommentStart + case r == comma: + lx.ignore() + return lexArrayValue // move on to the next value + case r == arrayEnd: + return lexArrayEnd + } + return lx.errorf( + "expected a comma or array terminator %q, but got %q instead", + arrayEnd, r, + ) +} + +// lexArrayEnd finishes the lexing of an array. +// It assumes that a ']' has just been consumed. +func lexArrayEnd(lx *lexer) stateFn { + lx.ignore() + lx.emit(itemArrayEnd) + return lx.pop() +} + +// lexInlineTableValue consumes one key/value pair in an inline table. +// It assumes that '{' or ',' have already been consumed. Whitespace is ignored. +func lexInlineTableValue(lx *lexer) stateFn { + r := lx.next() + switch { + case isWhitespace(r): + return lexSkip(lx, lexInlineTableValue) + case isNL(r): + return lx.errorf("newlines not allowed within inline tables") + case r == commentStart: + lx.push(lexInlineTableValue) + return lexCommentStart + case r == comma: + return lx.errorf("unexpected comma") + case r == inlineTableEnd: + return lexInlineTableEnd + } + lx.backup() + lx.push(lexInlineTableValueEnd) + return lexKeyStart +} + +// lexInlineTableValueEnd consumes everything between the end of an inline table +// key/value pair and the next pair (or the end of the table): +// it ignores whitespace and expects either a ',' or a '}'. +func lexInlineTableValueEnd(lx *lexer) stateFn { + r := lx.next() + switch { + case isWhitespace(r): + return lexSkip(lx, lexInlineTableValueEnd) + case isNL(r): + return lx.errorf("newlines not allowed within inline tables") + case r == commentStart: + lx.push(lexInlineTableValueEnd) + return lexCommentStart + case r == comma: + lx.ignore() + return lexInlineTableValue + case r == inlineTableEnd: + return lexInlineTableEnd + } + return lx.errorf("expected a comma or an inline table terminator %q, "+ + "but got %q instead", inlineTableEnd, r) +} + +// lexInlineTableEnd finishes the lexing of an inline table. +// It assumes that a '}' has just been consumed. +func lexInlineTableEnd(lx *lexer) stateFn { + lx.ignore() + lx.emit(itemInlineTableEnd) + return lx.pop() +} + +// lexString consumes the inner contents of a string. It assumes that the +// beginning '"' has already been consumed and ignored. +func lexString(lx *lexer) stateFn { + r := lx.next() + switch { + case r == eof: + return lx.errorf("unexpected EOF") + case isNL(r): + return lx.errorf("strings cannot contain newlines") + case r == '\\': + lx.push(lexString) + return lexStringEscape + case r == stringEnd: + lx.backup() + lx.emit(itemString) + lx.next() + lx.ignore() + return lx.pop() + } + return lexString +} + +// lexMultilineString consumes the inner contents of a string. It assumes that +// the beginning '"""' has already been consumed and ignored. +func lexMultilineString(lx *lexer) stateFn { + switch lx.next() { + case eof: + return lx.errorf("unexpected EOF") + case '\\': + return lexMultilineStringEscape + case stringEnd: + if lx.accept(stringEnd) { + if lx.accept(stringEnd) { + lx.backup() + lx.backup() + lx.backup() + lx.emit(itemMultilineString) + lx.next() + lx.next() + lx.next() + lx.ignore() + return lx.pop() + } + lx.backup() + } + } + return lexMultilineString +} + +// lexRawString consumes a raw string. Nothing can be escaped in such a string. +// It assumes that the beginning "'" has already been consumed and ignored. +func lexRawString(lx *lexer) stateFn { + r := lx.next() + switch { + case r == eof: + return lx.errorf("unexpected EOF") + case isNL(r): + return lx.errorf("strings cannot contain newlines") + case r == rawStringEnd: + lx.backup() + lx.emit(itemRawString) + lx.next() + lx.ignore() + return lx.pop() + } + return lexRawString +} + +// lexMultilineRawString consumes a raw string. Nothing can be escaped in such +// a string. It assumes that the beginning "'''" has already been consumed and +// ignored. +func lexMultilineRawString(lx *lexer) stateFn { + switch lx.next() { + case eof: + return lx.errorf("unexpected EOF") + case rawStringEnd: + if lx.accept(rawStringEnd) { + if lx.accept(rawStringEnd) { + lx.backup() + lx.backup() + lx.backup() + lx.emit(itemRawMultilineString) + lx.next() + lx.next() + lx.next() + lx.ignore() + return lx.pop() + } + lx.backup() + } + } + return lexMultilineRawString +} + +// lexMultilineStringEscape consumes an escaped character. It assumes that the +// preceding '\\' has already been consumed. +func lexMultilineStringEscape(lx *lexer) stateFn { + // Handle the special case first: + if isNL(lx.next()) { + return lexMultilineString + } + lx.backup() + lx.push(lexMultilineString) + return lexStringEscape(lx) +} + +func lexStringEscape(lx *lexer) stateFn { + r := lx.next() + switch r { + case 'b': + fallthrough + case 't': + fallthrough + case 'n': + fallthrough + case 'f': + fallthrough + case 'r': + fallthrough + case '"': + fallthrough + case '\\': + return lx.pop() + case 'u': + return lexShortUnicodeEscape + case 'U': + return lexLongUnicodeEscape + } + return lx.errorf("invalid escape character %q; only the following "+ + "escape characters are allowed: "+ + `\b, \t, \n, \f, \r, \", \\, \uXXXX, and \UXXXXXXXX`, r) +} + +func lexShortUnicodeEscape(lx *lexer) stateFn { + var r rune + for i := 0; i < 4; i++ { + r = lx.next() + if !isHexadecimal(r) { + return lx.errorf(`expected four hexadecimal digits after '\u', `+ + "but got %q instead", lx.current()) + } + } + return lx.pop() +} + +func lexLongUnicodeEscape(lx *lexer) stateFn { + var r rune + for i := 0; i < 8; i++ { + r = lx.next() + if !isHexadecimal(r) { + return lx.errorf(`expected eight hexadecimal digits after '\U', `+ + "but got %q instead", lx.current()) + } + } + return lx.pop() +} + +// lexNumberOrDateStart consumes either an integer, a float, or datetime. +func lexNumberOrDateStart(lx *lexer) stateFn { + r := lx.next() + if isDigit(r) { + return lexNumberOrDate + } + switch r { + case '_': + return lexNumber + case 'e', 'E': + return lexFloat + case '.': + return lx.errorf("floats must start with a digit, not '.'") + } + return lx.errorf("expected a digit but got %q", r) +} + +// lexNumberOrDate consumes either an integer, float or datetime. +func lexNumberOrDate(lx *lexer) stateFn { + r := lx.next() + if isDigit(r) { + return lexNumberOrDate + } + switch r { + case '-': + return lexDatetime + case '_': + return lexNumber + case '.', 'e', 'E': + return lexFloat + } + + lx.backup() + lx.emit(itemInteger) + return lx.pop() +} + +// lexDatetime consumes a Datetime, to a first approximation. +// The parser validates that it matches one of the accepted formats. +func lexDatetime(lx *lexer) stateFn { + r := lx.next() + if isDigit(r) { + return lexDatetime + } + switch r { + case '-', 'T', ':', '.', 'Z', '+': + return lexDatetime + } + + lx.backup() + lx.emit(itemDatetime) + return lx.pop() +} + +// lexNumberStart consumes either an integer or a float. It assumes that a sign +// has already been read, but that *no* digits have been consumed. +// lexNumberStart will move to the appropriate integer or float states. +func lexNumberStart(lx *lexer) stateFn { + // We MUST see a digit. Even floats have to start with a digit. + r := lx.next() + if !isDigit(r) { + if r == '.' { + return lx.errorf("floats must start with a digit, not '.'") + } + return lx.errorf("expected a digit but got %q", r) + } + return lexNumber +} + +// lexNumber consumes an integer or a float after seeing the first digit. +func lexNumber(lx *lexer) stateFn { + r := lx.next() + if isDigit(r) { + return lexNumber + } + switch r { + case '_': + return lexNumber + case '.', 'e', 'E': + return lexFloat + } + + lx.backup() + lx.emit(itemInteger) + return lx.pop() +} + +// lexFloat consumes the elements of a float. It allows any sequence of +// float-like characters, so floats emitted by the lexer are only a first +// approximation and must be validated by the parser. +func lexFloat(lx *lexer) stateFn { + r := lx.next() + if isDigit(r) { + return lexFloat + } + switch r { + case '_', '.', '-', '+', 'e', 'E': + return lexFloat + } + + lx.backup() + lx.emit(itemFloat) + return lx.pop() +} + +// lexBool consumes a bool string: 'true' or 'false. +func lexBool(lx *lexer) stateFn { + var rs []rune + for { + r := lx.next() + if !unicode.IsLetter(r) { + lx.backup() + break + } + rs = append(rs, r) + } + s := string(rs) + switch s { + case "true", "false": + lx.emit(itemBool) + return lx.pop() + } + return lx.errorf("expected value but found %q instead", s) +} + +// lexCommentStart begins the lexing of a comment. It will emit +// itemCommentStart and consume no characters, passing control to lexComment. +func lexCommentStart(lx *lexer) stateFn { + lx.ignore() + lx.emit(itemCommentStart) + return lexComment +} + +// lexComment lexes an entire comment. It assumes that '#' has been consumed. +// It will consume *up to* the first newline character, and pass control +// back to the last state on the stack. +func lexComment(lx *lexer) stateFn { + r := lx.peek() + if isNL(r) || r == eof { + lx.emit(itemText) + return lx.pop() + } + lx.next() + return lexComment +} + +// lexSkip ignores all slurped input and moves on to the next state. +func lexSkip(lx *lexer, nextState stateFn) stateFn { + return func(lx *lexer) stateFn { + lx.ignore() + return nextState + } +} + +// isWhitespace returns true if `r` is a whitespace character according +// to the spec. +func isWhitespace(r rune) bool { + return r == '\t' || r == ' ' +} + +func isNL(r rune) bool { + return r == '\n' || r == '\r' +} + +func isDigit(r rune) bool { + return r >= '0' && r <= '9' +} + +func isHexadecimal(r rune) bool { + return (r >= '0' && r <= '9') || + (r >= 'a' && r <= 'f') || + (r >= 'A' && r <= 'F') +} + +func isBareKeyChar(r rune) bool { + return (r >= 'A' && r <= 'Z') || + (r >= 'a' && r <= 'z') || + (r >= '0' && r <= '9') || + r == '_' || + r == '-' +} + +func (itype itemType) String() string { + switch itype { + case itemError: + return "Error" + case itemNIL: + return "NIL" + case itemEOF: + return "EOF" + case itemText: + return "Text" + case itemString, itemRawString, itemMultilineString, itemRawMultilineString: + return "String" + case itemBool: + return "Bool" + case itemInteger: + return "Integer" + case itemFloat: + return "Float" + case itemDatetime: + return "DateTime" + case itemTableStart: + return "TableStart" + case itemTableEnd: + return "TableEnd" + case itemKeyStart: + return "KeyStart" + case itemArray: + return "Array" + case itemArrayEnd: + return "ArrayEnd" + case itemCommentStart: + return "CommentStart" + } + panic(fmt.Sprintf("BUG: Unknown type '%d'.", int(itype))) +} + +func (item item) String() string { + return fmt.Sprintf("(%s, %s)", item.typ.String(), item.val) +} diff --git a/vendor/github.com/BurntSushi/toml/parse.go b/vendor/github.com/BurntSushi/toml/parse.go new file mode 100644 index 000000000..50869ef92 --- /dev/null +++ b/vendor/github.com/BurntSushi/toml/parse.go @@ -0,0 +1,592 @@ +package toml + +import ( + "fmt" + "strconv" + "strings" + "time" + "unicode" + "unicode/utf8" +) + +type parser struct { + mapping map[string]interface{} + types map[string]tomlType + lx *lexer + + // A list of keys in the order that they appear in the TOML data. + ordered []Key + + // the full key for the current hash in scope + context Key + + // the base key name for everything except hashes + currentKey string + + // rough approximation of line number + approxLine int + + // A map of 'key.group.names' to whether they were created implicitly. + implicits map[string]bool +} + +type parseError string + +func (pe parseError) Error() string { + return string(pe) +} + +func parse(data string) (p *parser, err error) { + defer func() { + if r := recover(); r != nil { + var ok bool + if err, ok = r.(parseError); ok { + return + } + panic(r) + } + }() + + p = &parser{ + mapping: make(map[string]interface{}), + types: make(map[string]tomlType), + lx: lex(data), + ordered: make([]Key, 0), + implicits: make(map[string]bool), + } + for { + item := p.next() + if item.typ == itemEOF { + break + } + p.topLevel(item) + } + + return p, nil +} + +func (p *parser) panicf(format string, v ...interface{}) { + msg := fmt.Sprintf("Near line %d (last key parsed '%s'): %s", + p.approxLine, p.current(), fmt.Sprintf(format, v...)) + panic(parseError(msg)) +} + +func (p *parser) next() item { + it := p.lx.nextItem() + if it.typ == itemError { + p.panicf("%s", it.val) + } + return it +} + +func (p *parser) bug(format string, v ...interface{}) { + panic(fmt.Sprintf("BUG: "+format+"\n\n", v...)) +} + +func (p *parser) expect(typ itemType) item { + it := p.next() + p.assertEqual(typ, it.typ) + return it +} + +func (p *parser) assertEqual(expected, got itemType) { + if expected != got { + p.bug("Expected '%s' but got '%s'.", expected, got) + } +} + +func (p *parser) topLevel(item item) { + switch item.typ { + case itemCommentStart: + p.approxLine = item.line + p.expect(itemText) + case itemTableStart: + kg := p.next() + p.approxLine = kg.line + + var key Key + for ; kg.typ != itemTableEnd && kg.typ != itemEOF; kg = p.next() { + key = append(key, p.keyString(kg)) + } + p.assertEqual(itemTableEnd, kg.typ) + + p.establishContext(key, false) + p.setType("", tomlHash) + p.ordered = append(p.ordered, key) + case itemArrayTableStart: + kg := p.next() + p.approxLine = kg.line + + var key Key + for ; kg.typ != itemArrayTableEnd && kg.typ != itemEOF; kg = p.next() { + key = append(key, p.keyString(kg)) + } + p.assertEqual(itemArrayTableEnd, kg.typ) + + p.establishContext(key, true) + p.setType("", tomlArrayHash) + p.ordered = append(p.ordered, key) + case itemKeyStart: + kname := p.next() + p.approxLine = kname.line + p.currentKey = p.keyString(kname) + + val, typ := p.value(p.next()) + p.setValue(p.currentKey, val) + p.setType(p.currentKey, typ) + p.ordered = append(p.ordered, p.context.add(p.currentKey)) + p.currentKey = "" + default: + p.bug("Unexpected type at top level: %s", item.typ) + } +} + +// Gets a string for a key (or part of a key in a table name). +func (p *parser) keyString(it item) string { + switch it.typ { + case itemText: + return it.val + case itemString, itemMultilineString, + itemRawString, itemRawMultilineString: + s, _ := p.value(it) + return s.(string) + default: + p.bug("Unexpected key type: %s", it.typ) + panic("unreachable") + } +} + +// value translates an expected value from the lexer into a Go value wrapped +// as an empty interface. +func (p *parser) value(it item) (interface{}, tomlType) { + switch it.typ { + case itemString: + return p.replaceEscapes(it.val), p.typeOfPrimitive(it) + case itemMultilineString: + trimmed := stripFirstNewline(stripEscapedWhitespace(it.val)) + return p.replaceEscapes(trimmed), p.typeOfPrimitive(it) + case itemRawString: + return it.val, p.typeOfPrimitive(it) + case itemRawMultilineString: + return stripFirstNewline(it.val), p.typeOfPrimitive(it) + case itemBool: + switch it.val { + case "true": + return true, p.typeOfPrimitive(it) + case "false": + return false, p.typeOfPrimitive(it) + } + p.bug("Expected boolean value, but got '%s'.", it.val) + case itemInteger: + if !numUnderscoresOK(it.val) { + p.panicf("Invalid integer %q: underscores must be surrounded by digits", + it.val) + } + val := strings.Replace(it.val, "_", "", -1) + num, err := strconv.ParseInt(val, 10, 64) + if err != nil { + // Distinguish integer values. Normally, it'd be a bug if the lexer + // provides an invalid integer, but it's possible that the number is + // out of range of valid values (which the lexer cannot determine). + // So mark the former as a bug but the latter as a legitimate user + // error. + if e, ok := err.(*strconv.NumError); ok && + e.Err == strconv.ErrRange { + + p.panicf("Integer '%s' is out of the range of 64-bit "+ + "signed integers.", it.val) + } else { + p.bug("Expected integer value, but got '%s'.", it.val) + } + } + return num, p.typeOfPrimitive(it) + case itemFloat: + parts := strings.FieldsFunc(it.val, func(r rune) bool { + switch r { + case '.', 'e', 'E': + return true + } + return false + }) + for _, part := range parts { + if !numUnderscoresOK(part) { + p.panicf("Invalid float %q: underscores must be "+ + "surrounded by digits", it.val) + } + } + if !numPeriodsOK(it.val) { + // As a special case, numbers like '123.' or '1.e2', + // which are valid as far as Go/strconv are concerned, + // must be rejected because TOML says that a fractional + // part consists of '.' followed by 1+ digits. + p.panicf("Invalid float %q: '.' must be followed "+ + "by one or more digits", it.val) + } + val := strings.Replace(it.val, "_", "", -1) + num, err := strconv.ParseFloat(val, 64) + if err != nil { + if e, ok := err.(*strconv.NumError); ok && + e.Err == strconv.ErrRange { + + p.panicf("Float '%s' is out of the range of 64-bit "+ + "IEEE-754 floating-point numbers.", it.val) + } else { + p.panicf("Invalid float value: %q", it.val) + } + } + return num, p.typeOfPrimitive(it) + case itemDatetime: + var t time.Time + var ok bool + var err error + for _, format := range []string{ + "2006-01-02T15:04:05Z07:00", + "2006-01-02T15:04:05", + "2006-01-02", + } { + t, err = time.ParseInLocation(format, it.val, time.Local) + if err == nil { + ok = true + break + } + } + if !ok { + p.panicf("Invalid TOML Datetime: %q.", it.val) + } + return t, p.typeOfPrimitive(it) + case itemArray: + array := make([]interface{}, 0) + types := make([]tomlType, 0) + + for it = p.next(); it.typ != itemArrayEnd; it = p.next() { + if it.typ == itemCommentStart { + p.expect(itemText) + continue + } + + val, typ := p.value(it) + array = append(array, val) + types = append(types, typ) + } + return array, p.typeOfArray(types) + case itemInlineTableStart: + var ( + hash = make(map[string]interface{}) + outerContext = p.context + outerKey = p.currentKey + ) + + p.context = append(p.context, p.currentKey) + p.currentKey = "" + for it := p.next(); it.typ != itemInlineTableEnd; it = p.next() { + if it.typ != itemKeyStart { + p.bug("Expected key start but instead found %q, around line %d", + it.val, p.approxLine) + } + if it.typ == itemCommentStart { + p.expect(itemText) + continue + } + + // retrieve key + k := p.next() + p.approxLine = k.line + kname := p.keyString(k) + + // retrieve value + p.currentKey = kname + val, typ := p.value(p.next()) + // make sure we keep metadata up to date + p.setType(kname, typ) + p.ordered = append(p.ordered, p.context.add(p.currentKey)) + hash[kname] = val + } + p.context = outerContext + p.currentKey = outerKey + return hash, tomlHash + } + p.bug("Unexpected value type: %s", it.typ) + panic("unreachable") +} + +// numUnderscoresOK checks whether each underscore in s is surrounded by +// characters that are not underscores. +func numUnderscoresOK(s string) bool { + accept := false + for _, r := range s { + if r == '_' { + if !accept { + return false + } + accept = false + continue + } + accept = true + } + return accept +} + +// numPeriodsOK checks whether every period in s is followed by a digit. +func numPeriodsOK(s string) bool { + period := false + for _, r := range s { + if period && !isDigit(r) { + return false + } + period = r == '.' + } + return !period +} + +// establishContext sets the current context of the parser, +// where the context is either a hash or an array of hashes. Which one is +// set depends on the value of the `array` parameter. +// +// Establishing the context also makes sure that the key isn't a duplicate, and +// will create implicit hashes automatically. +func (p *parser) establishContext(key Key, array bool) { + var ok bool + + // Always start at the top level and drill down for our context. + hashContext := p.mapping + keyContext := make(Key, 0) + + // We only need implicit hashes for key[0:-1] + for _, k := range key[0 : len(key)-1] { + _, ok = hashContext[k] + keyContext = append(keyContext, k) + + // No key? Make an implicit hash and move on. + if !ok { + p.addImplicit(keyContext) + hashContext[k] = make(map[string]interface{}) + } + + // If the hash context is actually an array of tables, then set + // the hash context to the last element in that array. + // + // Otherwise, it better be a table, since this MUST be a key group (by + // virtue of it not being the last element in a key). + switch t := hashContext[k].(type) { + case []map[string]interface{}: + hashContext = t[len(t)-1] + case map[string]interface{}: + hashContext = t + default: + p.panicf("Key '%s' was already created as a hash.", keyContext) + } + } + + p.context = keyContext + if array { + // If this is the first element for this array, then allocate a new + // list of tables for it. + k := key[len(key)-1] + if _, ok := hashContext[k]; !ok { + hashContext[k] = make([]map[string]interface{}, 0, 5) + } + + // Add a new table. But make sure the key hasn't already been used + // for something else. + if hash, ok := hashContext[k].([]map[string]interface{}); ok { + hashContext[k] = append(hash, make(map[string]interface{})) + } else { + p.panicf("Key '%s' was already created and cannot be used as "+ + "an array.", keyContext) + } + } else { + p.setValue(key[len(key)-1], make(map[string]interface{})) + } + p.context = append(p.context, key[len(key)-1]) +} + +// setValue sets the given key to the given value in the current context. +// It will make sure that the key hasn't already been defined, account for +// implicit key groups. +func (p *parser) setValue(key string, value interface{}) { + var tmpHash interface{} + var ok bool + + hash := p.mapping + keyContext := make(Key, 0) + for _, k := range p.context { + keyContext = append(keyContext, k) + if tmpHash, ok = hash[k]; !ok { + p.bug("Context for key '%s' has not been established.", keyContext) + } + switch t := tmpHash.(type) { + case []map[string]interface{}: + // The context is a table of hashes. Pick the most recent table + // defined as the current hash. + hash = t[len(t)-1] + case map[string]interface{}: + hash = t + default: + p.bug("Expected hash to have type 'map[string]interface{}', but "+ + "it has '%T' instead.", tmpHash) + } + } + keyContext = append(keyContext, key) + + if _, ok := hash[key]; ok { + // Typically, if the given key has already been set, then we have + // to raise an error since duplicate keys are disallowed. However, + // it's possible that a key was previously defined implicitly. In this + // case, it is allowed to be redefined concretely. (See the + // `tests/valid/implicit-and-explicit-after.toml` test in `toml-test`.) + // + // But we have to make sure to stop marking it as an implicit. (So that + // another redefinition provokes an error.) + // + // Note that since it has already been defined (as a hash), we don't + // want to overwrite it. So our business is done. + if p.isImplicit(keyContext) { + p.removeImplicit(keyContext) + return + } + + // Otherwise, we have a concrete key trying to override a previous + // key, which is *always* wrong. + p.panicf("Key '%s' has already been defined.", keyContext) + } + hash[key] = value +} + +// setType sets the type of a particular value at a given key. +// It should be called immediately AFTER setValue. +// +// Note that if `key` is empty, then the type given will be applied to the +// current context (which is either a table or an array of tables). +func (p *parser) setType(key string, typ tomlType) { + keyContext := make(Key, 0, len(p.context)+1) + for _, k := range p.context { + keyContext = append(keyContext, k) + } + if len(key) > 0 { // allow type setting for hashes + keyContext = append(keyContext, key) + } + p.types[keyContext.String()] = typ +} + +// addImplicit sets the given Key as having been created implicitly. +func (p *parser) addImplicit(key Key) { + p.implicits[key.String()] = true +} + +// removeImplicit stops tagging the given key as having been implicitly +// created. +func (p *parser) removeImplicit(key Key) { + p.implicits[key.String()] = false +} + +// isImplicit returns true if the key group pointed to by the key was created +// implicitly. +func (p *parser) isImplicit(key Key) bool { + return p.implicits[key.String()] +} + +// current returns the full key name of the current context. +func (p *parser) current() string { + if len(p.currentKey) == 0 { + return p.context.String() + } + if len(p.context) == 0 { + return p.currentKey + } + return fmt.Sprintf("%s.%s", p.context, p.currentKey) +} + +func stripFirstNewline(s string) string { + if len(s) == 0 || s[0] != '\n' { + return s + } + return s[1:] +} + +func stripEscapedWhitespace(s string) string { + esc := strings.Split(s, "\\\n") + if len(esc) > 1 { + for i := 1; i < len(esc); i++ { + esc[i] = strings.TrimLeftFunc(esc[i], unicode.IsSpace) + } + } + return strings.Join(esc, "") +} + +func (p *parser) replaceEscapes(str string) string { + var replaced []rune + s := []byte(str) + r := 0 + for r < len(s) { + if s[r] != '\\' { + c, size := utf8.DecodeRune(s[r:]) + r += size + replaced = append(replaced, c) + continue + } + r += 1 + if r >= len(s) { + p.bug("Escape sequence at end of string.") + return "" + } + switch s[r] { + default: + p.bug("Expected valid escape code after \\, but got %q.", s[r]) + return "" + case 'b': + replaced = append(replaced, rune(0x0008)) + r += 1 + case 't': + replaced = append(replaced, rune(0x0009)) + r += 1 + case 'n': + replaced = append(replaced, rune(0x000A)) + r += 1 + case 'f': + replaced = append(replaced, rune(0x000C)) + r += 1 + case 'r': + replaced = append(replaced, rune(0x000D)) + r += 1 + case '"': + replaced = append(replaced, rune(0x0022)) + r += 1 + case '\\': + replaced = append(replaced, rune(0x005C)) + r += 1 + case 'u': + // At this point, we know we have a Unicode escape of the form + // `uXXXX` at [r, r+5). (Because the lexer guarantees this + // for us.) + escaped := p.asciiEscapeToUnicode(s[r+1 : r+5]) + replaced = append(replaced, escaped) + r += 5 + case 'U': + // At this point, we know we have a Unicode escape of the form + // `uXXXX` at [r, r+9). (Because the lexer guarantees this + // for us.) + escaped := p.asciiEscapeToUnicode(s[r+1 : r+9]) + replaced = append(replaced, escaped) + r += 9 + } + } + return string(replaced) +} + +func (p *parser) asciiEscapeToUnicode(bs []byte) rune { + s := string(bs) + hex, err := strconv.ParseUint(strings.ToLower(s), 16, 32) + if err != nil { + p.bug("Could not parse '%s' as a hexadecimal number, but the "+ + "lexer claims it's OK: %s", s, err) + } + if !utf8.ValidRune(rune(hex)) { + p.panicf("Escaped character '\\u%s' is not valid UTF-8.", s) + } + return rune(hex) +} + +func isStringType(ty itemType) bool { + return ty == itemString || ty == itemMultilineString || + ty == itemRawString || ty == itemRawMultilineString +} diff --git a/vendor/github.com/BurntSushi/toml/session.vim b/vendor/github.com/BurntSushi/toml/session.vim new file mode 100644 index 000000000..562164be0 --- /dev/null +++ b/vendor/github.com/BurntSushi/toml/session.vim @@ -0,0 +1 @@ +au BufWritePost *.go silent!make tags > /dev/null 2>&1 diff --git a/vendor/github.com/BurntSushi/toml/type_check.go b/vendor/github.com/BurntSushi/toml/type_check.go new file mode 100644 index 000000000..c73f8afc1 --- /dev/null +++ b/vendor/github.com/BurntSushi/toml/type_check.go @@ -0,0 +1,91 @@ +package toml + +// tomlType represents any Go type that corresponds to a TOML type. +// While the first draft of the TOML spec has a simplistic type system that +// probably doesn't need this level of sophistication, we seem to be militating +// toward adding real composite types. +type tomlType interface { + typeString() string +} + +// typeEqual accepts any two types and returns true if they are equal. +func typeEqual(t1, t2 tomlType) bool { + if t1 == nil || t2 == nil { + return false + } + return t1.typeString() == t2.typeString() +} + +func typeIsHash(t tomlType) bool { + return typeEqual(t, tomlHash) || typeEqual(t, tomlArrayHash) +} + +type tomlBaseType string + +func (btype tomlBaseType) typeString() string { + return string(btype) +} + +func (btype tomlBaseType) String() string { + return btype.typeString() +} + +var ( + tomlInteger tomlBaseType = "Integer" + tomlFloat tomlBaseType = "Float" + tomlDatetime tomlBaseType = "Datetime" + tomlString tomlBaseType = "String" + tomlBool tomlBaseType = "Bool" + tomlArray tomlBaseType = "Array" + tomlHash tomlBaseType = "Hash" + tomlArrayHash tomlBaseType = "ArrayHash" +) + +// typeOfPrimitive returns a tomlType of any primitive value in TOML. +// Primitive values are: Integer, Float, Datetime, String and Bool. +// +// Passing a lexer item other than the following will cause a BUG message +// to occur: itemString, itemBool, itemInteger, itemFloat, itemDatetime. +func (p *parser) typeOfPrimitive(lexItem item) tomlType { + switch lexItem.typ { + case itemInteger: + return tomlInteger + case itemFloat: + return tomlFloat + case itemDatetime: + return tomlDatetime + case itemString: + return tomlString + case itemMultilineString: + return tomlString + case itemRawString: + return tomlString + case itemRawMultilineString: + return tomlString + case itemBool: + return tomlBool + } + p.bug("Cannot infer primitive type of lex item '%s'.", lexItem) + panic("unreachable") +} + +// typeOfArray returns a tomlType for an array given a list of types of its +// values. +// +// In the current spec, if an array is homogeneous, then its type is always +// "Array". If the array is not homogeneous, an error is generated. +func (p *parser) typeOfArray(types []tomlType) tomlType { + // Empty arrays are cool. + if len(types) == 0 { + return tomlArray + } + + theType := types[0] + for _, t := range types[1:] { + if !typeEqual(theType, t) { + p.panicf("Array contains values of type '%s' and '%s', but "+ + "arrays must be homogeneous.", theType, t) + } + } + return tomlArray +} diff --git a/vendor/github.com/BurntSushi/toml/type_fields.go b/vendor/github.com/BurntSushi/toml/type_fields.go new file mode 100644 index 000000000..608997c22 --- /dev/null +++ b/vendor/github.com/BurntSushi/toml/type_fields.go @@ -0,0 +1,242 @@ +package toml + +// Struct field handling is adapted from code in encoding/json: +// +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the Go distribution. + +import ( + "reflect" + "sort" + "sync" +) + +// A field represents a single field found in a struct. +type field struct { + name string // the name of the field (`toml` tag included) + tag bool // whether field has a `toml` tag + index []int // represents the depth of an anonymous field + typ reflect.Type // the type of the field +} + +// byName sorts field by name, breaking ties with depth, +// then breaking ties with "name came from toml tag", then +// breaking ties with index sequence. +type byName []field + +func (x byName) Len() int { return len(x) } + +func (x byName) Swap(i, j int) { x[i], x[j] = x[j], x[i] } + +func (x byName) Less(i, j int) bool { + if x[i].name != x[j].name { + return x[i].name < x[j].name + } + if len(x[i].index) != len(x[j].index) { + return len(x[i].index) < len(x[j].index) + } + if x[i].tag != x[j].tag { + return x[i].tag + } + return byIndex(x).Less(i, j) +} + +// byIndex sorts field by index sequence. +type byIndex []field + +func (x byIndex) Len() int { return len(x) } + +func (x byIndex) Swap(i, j int) { x[i], x[j] = x[j], x[i] } + +func (x byIndex) Less(i, j int) bool { + for k, xik := range x[i].index { + if k >= len(x[j].index) { + return false + } + if xik != x[j].index[k] { + return xik < x[j].index[k] + } + } + return len(x[i].index) < len(x[j].index) +} + +// typeFields returns a list of fields that TOML should recognize for the given +// type. The algorithm is breadth-first search over the set of structs to +// include - the top struct and then any reachable anonymous structs. +func typeFields(t reflect.Type) []field { + // Anonymous fields to explore at the current level and the next. + current := []field{} + next := []field{{typ: t}} + + // Count of queued names for current level and the next. + count := map[reflect.Type]int{} + nextCount := map[reflect.Type]int{} + + // Types already visited at an earlier level. + visited := map[reflect.Type]bool{} + + // Fields found. + var fields []field + + for len(next) > 0 { + current, next = next, current[:0] + count, nextCount = nextCount, map[reflect.Type]int{} + + for _, f := range current { + if visited[f.typ] { + continue + } + visited[f.typ] = true + + // Scan f.typ for fields to include. + for i := 0; i < f.typ.NumField(); i++ { + sf := f.typ.Field(i) + if sf.PkgPath != "" && !sf.Anonymous { // unexported + continue + } + opts := getOptions(sf.Tag) + if opts.skip { + continue + } + index := make([]int, len(f.index)+1) + copy(index, f.index) + index[len(f.index)] = i + + ft := sf.Type + if ft.Name() == "" && ft.Kind() == reflect.Ptr { + // Follow pointer. + ft = ft.Elem() + } + + // Record found field and index sequence. + if opts.name != "" || !sf.Anonymous || ft.Kind() != reflect.Struct { + tagged := opts.name != "" + name := opts.name + if name == "" { + name = sf.Name + } + fields = append(fields, field{name, tagged, index, ft}) + if count[f.typ] > 1 { + // If there were multiple instances, add a second, + // so that the annihilation code will see a duplicate. + // It only cares about the distinction between 1 or 2, + // so don't bother generating any more copies. + fields = append(fields, fields[len(fields)-1]) + } + continue + } + + // Record new anonymous struct to explore in next round. + nextCount[ft]++ + if nextCount[ft] == 1 { + f := field{name: ft.Name(), index: index, typ: ft} + next = append(next, f) + } + } + } + } + + sort.Sort(byName(fields)) + + // Delete all fields that are hidden by the Go rules for embedded fields, + // except that fields with TOML tags are promoted. + + // The fields are sorted in primary order of name, secondary order + // of field index length. Loop over names; for each name, delete + // hidden fields by choosing the one dominant field that survives. + out := fields[:0] + for advance, i := 0, 0; i < len(fields); i += advance { + // One iteration per name. + // Find the sequence of fields with the name of this first field. + fi := fields[i] + name := fi.name + for advance = 1; i+advance < len(fields); advance++ { + fj := fields[i+advance] + if fj.name != name { + break + } + } + if advance == 1 { // Only one field with this name + out = append(out, fi) + continue + } + dominant, ok := dominantField(fields[i : i+advance]) + if ok { + out = append(out, dominant) + } + } + + fields = out + sort.Sort(byIndex(fields)) + + return fields +} + +// dominantField looks through the fields, all of which are known to +// have the same name, to find the single field that dominates the +// others using Go's embedding rules, modified by the presence of +// TOML tags. If there are multiple top-level fields, the boolean +// will be false: This condition is an error in Go and we skip all +// the fields. +func dominantField(fields []field) (field, bool) { + // The fields are sorted in increasing index-length order. The winner + // must therefore be one with the shortest index length. Drop all + // longer entries, which is easy: just truncate the slice. + length := len(fields[0].index) + tagged := -1 // Index of first tagged field. + for i, f := range fields { + if len(f.index) > length { + fields = fields[:i] + break + } + if f.tag { + if tagged >= 0 { + // Multiple tagged fields at the same level: conflict. + // Return no field. + return field{}, false + } + tagged = i + } + } + if tagged >= 0 { + return fields[tagged], true + } + // All remaining fields have the same length. If there's more than one, + // we have a conflict (two fields named "X" at the same level) and we + // return no field. + if len(fields) > 1 { + return field{}, false + } + return fields[0], true +} + +var fieldCache struct { + sync.RWMutex + m map[reflect.Type][]field +} + +// cachedTypeFields is like typeFields but uses a cache to avoid repeated work. +func cachedTypeFields(t reflect.Type) []field { + fieldCache.RLock() + f := fieldCache.m[t] + fieldCache.RUnlock() + if f != nil { + return f + } + + // Compute fields without lock. + // Might duplicate effort but won't hold other computations back. + f = typeFields(t) + if f == nil { + f = []field{} + } + + fieldCache.Lock() + if fieldCache.m == nil { + fieldCache.m = map[reflect.Type][]field{} + } + fieldCache.m[t] = f + fieldCache.Unlock() + return f +} diff --git a/vendor/github.com/gomarkdown/markdown/LICENSE.txt b/vendor/github.com/gomarkdown/markdown/LICENSE.txt new file mode 100644 index 000000000..688046102 --- /dev/null +++ b/vendor/github.com/gomarkdown/markdown/LICENSE.txt @@ -0,0 +1,31 @@ +Markdown is distributed under the Simplified BSD License: + +Copyright © 2011 Russ Ross +Copyright © 2018 Krzysztof Kowalczyk +Copyright © 2018 Authors +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions +are met: + +1. Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + +2. Redistributions in binary form must reproduce the above + copyright notice, this list of conditions and the following + disclaimer in the documentation and/or other materials provided with + the distribution. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS +FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE +COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, +INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, +BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN +ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/gomarkdown/markdown/ast/attribute.go b/vendor/github.com/gomarkdown/markdown/ast/attribute.go new file mode 100644 index 000000000..002c6a2ec --- /dev/null +++ b/vendor/github.com/gomarkdown/markdown/ast/attribute.go @@ -0,0 +1,10 @@ +package ast + +// An attribute can be attached to block elements. They are specified as +// {#id .classs key="value"} where quotes for values are mandatory, multiple +// key/value pairs are separated by whitespace. +type Attribute struct { + ID []byte + Classes [][]byte + Attrs map[string][]byte +} diff --git a/vendor/github.com/gomarkdown/markdown/ast/doc.go b/vendor/github.com/gomarkdown/markdown/ast/doc.go new file mode 100644 index 000000000..376dc67cc --- /dev/null +++ b/vendor/github.com/gomarkdown/markdown/ast/doc.go @@ -0,0 +1,4 @@ +/* +Package ast defines tree representation of a parsed markdown document. +*/ +package ast diff --git a/vendor/github.com/gomarkdown/markdown/ast/node.go b/vendor/github.com/gomarkdown/markdown/ast/node.go new file mode 100644 index 000000000..329223a5a --- /dev/null +++ b/vendor/github.com/gomarkdown/markdown/ast/node.go @@ -0,0 +1,554 @@ +package ast + +// ListType contains bitwise or'ed flags for list and list item objects. +type ListType int + +// These are the possible flag values for the ListItem renderer. +// Multiple flag values may be ORed together. +// These are mostly of interest if you are writing a new output format. +const ( + ListTypeOrdered ListType = 1 << iota + ListTypeDefinition + ListTypeTerm + + ListItemContainsBlock + ListItemBeginningOfList // TODO: figure out if this is of any use now + ListItemEndOfList +) + +// CellAlignFlags holds a type of alignment in a table cell. +type CellAlignFlags int + +// These are the possible flag values for the table cell renderer. +// Only a single one of these values will be used; they are not ORed together. +// These are mostly of interest if you are writing a new output format. +const ( + TableAlignmentLeft CellAlignFlags = 1 << iota + TableAlignmentRight + TableAlignmentCenter = (TableAlignmentLeft | TableAlignmentRight) +) + +func (a CellAlignFlags) String() string { + switch a { + case TableAlignmentLeft: + return "left" + case TableAlignmentRight: + return "right" + case TableAlignmentCenter: + return "center" + default: + return "" + } +} + +// DocumentMatters holds the type of a {front,main,back}matter in the document +type DocumentMatters int + +// These are all possible Document divisions. +const ( + DocumentMatterNone DocumentMatters = iota + DocumentMatterFront + DocumentMatterMain + DocumentMatterBack +) + +// CitationTypes holds the type of a citation, informative, normative or suppressed +type CitationTypes int + +const ( + CitationTypeNone CitationTypes = iota + CitationTypeSuppressed + CitationTypeInformative + CitationTypeNormative +) + +// Node defines an ast node +type Node interface { + AsContainer() *Container + AsLeaf() *Leaf + GetParent() Node + SetParent(newParent Node) + GetChildren() []Node + SetChildren(newChildren []Node) +} + +// Container is a type of node that can contain children +type Container struct { + Parent Node + Children []Node + + Literal []byte // Text contents of the leaf nodes + Content []byte // Markdown content of the block nodes + + *Attribute // Block level attribute +} + +// AsContainer returns itself as *Container +func (c *Container) AsContainer() *Container { + return c +} + +// AsLeaf returns nil +func (c *Container) AsLeaf() *Leaf { + return nil +} + +// GetParent returns parent node +func (c *Container) GetParent() Node { + return c.Parent +} + +// SetParent sets the parent node +func (c *Container) SetParent(newParent Node) { + c.Parent = newParent +} + +// GetChildren returns children nodes +func (c *Container) GetChildren() []Node { + return c.Children +} + +// SetChildren sets children node +func (c *Container) SetChildren(newChildren []Node) { + c.Children = newChildren +} + +// Leaf is a type of node that cannot have children +type Leaf struct { + Parent Node + + Literal []byte // Text contents of the leaf nodes + Content []byte // Markdown content of the block nodes + + *Attribute // Block level attribute +} + +// AsContainer returns nil +func (l *Leaf) AsContainer() *Container { + return nil +} + +// AsLeaf returns itself as *Leaf +func (l *Leaf) AsLeaf() *Leaf { + return l +} + +// GetParent returns parent node +func (l *Leaf) GetParent() Node { + return l.Parent +} + +// SetParent sets the parent nodd +func (l *Leaf) SetParent(newParent Node) { + l.Parent = newParent +} + +// GetChildren returns nil because Leaf cannot have children +func (l *Leaf) GetChildren() []Node { + return nil +} + +// SetChildren will panic becuase Leaf cannot have children +func (l *Leaf) SetChildren(newChildren []Node) { + panic("leaf node cannot have children") +} + +// Document represents markdown document node, a root of ast +type Document struct { + Container +} + +// DocumentMatter represents markdown node that signals a document +// division: frontmatter, mainmatter or backmatter. +type DocumentMatter struct { + Container + + Matter DocumentMatters +} + +// BlockQuote represents markdown block quote node +type BlockQuote struct { + Container +} + +// Aside represents an markdown aside node. +type Aside struct { + Container +} + +// List represents markdown list node +type List struct { + Container + + ListFlags ListType + Tight bool // Skip

s around list item data if true + BulletChar byte // '*', '+' or '-' in bullet lists + Delimiter byte // '.' or ')' after the number in ordered lists + Start int // for ordered lists this indicates the starting number if > 0 + RefLink []byte // If not nil, turns this list item into a footnote item and triggers different rendering + IsFootnotesList bool // This is a list of footnotes +} + +// ListItem represents markdown list item node +type ListItem struct { + Container + + ListFlags ListType + Tight bool // Skip

s around list item data if true + BulletChar byte // '*', '+' or '-' in bullet lists + Delimiter byte // '.' or ')' after the number in ordered lists + RefLink []byte // If not nil, turns this list item into a footnote item and triggers different rendering + IsFootnotesList bool // This is a list of footnotes +} + +// Paragraph represents markdown paragraph node +type Paragraph struct { + Container +} + +// Math represents markdown MathAjax inline node +type Math struct { + Leaf +} + +// MathBlock represents markdown MathAjax block node +type MathBlock struct { + Container +} + +// Heading represents markdown heading node +type Heading struct { + Container + + Level int // This holds the heading level number + HeadingID string // This might hold heading ID, if present + IsTitleblock bool // Specifies whether it's a title block + IsSpecial bool // We are a special heading (starts with .#) +} + +// HorizontalRule represents markdown horizontal rule node +type HorizontalRule struct { + Leaf +} + +// Emph represents markdown emphasis node +type Emph struct { + Container +} + +// Strong represents markdown strong node +type Strong struct { + Container +} + +// Del represents markdown del node +type Del struct { + Container +} + +// Link represents markdown link node +type Link struct { + Container + + Destination []byte // Destination is what goes into a href + Title []byte // Title is the tooltip thing that goes in a title attribute + NoteID int // NoteID contains a serial number of a footnote, zero if it's not a footnote + Footnote Node // If it's a footnote, this is a direct link to the footnote Node. Otherwise nil. + DeferredID []byte // If a deferred link this holds the original ID. +} + +// CrossReference is a reference node. +type CrossReference struct { + Container + + Destination []byte // Destination is where the reference points to +} + +// Citation is a citation node. +type Citation struct { + Leaf + + Destination [][]byte // Destination is where the citation points to. Multiple ones are allowed. + Type []CitationTypes // 1:1 mapping of destination and citation type + Suffix [][]byte // Potential citation suffix, i.e. [@!RFC1035, p. 144] +} + +// Image represents markdown image node +type Image struct { + Container + + Destination []byte // Destination is what goes into a href + Title []byte // Title is the tooltip thing that goes in a title attribute +} + +// Text represents markdown text node +type Text struct { + Leaf +} + +// HTMLBlock represents markdown html node +type HTMLBlock struct { + Leaf +} + +// CodeBlock represents markdown code block node +type CodeBlock struct { + Leaf + + IsFenced bool // Specifies whether it's a fenced code block or an indented one + Info []byte // This holds the info string + FenceChar byte + FenceLength int + FenceOffset int +} + +// Softbreak represents markdown softbreak node +// Note: not used currently +type Softbreak struct { + Leaf +} + +// Hardbreak represents markdown hard break node +type Hardbreak struct { + Leaf +} + +// Code represents markdown code node +type Code struct { + Leaf +} + +// HTMLSpan represents markdown html span node +type HTMLSpan struct { + Leaf +} + +// Table represents markdown table node +type Table struct { + Container +} + +// TableCell represents markdown table cell node +type TableCell struct { + Container + + IsHeader bool // This tells if it's under the header row + Align CellAlignFlags // This holds the value for align attribute +} + +// TableHeader represents markdown table head node +type TableHeader struct { + Container +} + +// TableBody represents markdown table body node +type TableBody struct { + Container +} + +// TableRow represents markdown table row node +type TableRow struct { + Container +} + +// TableFooter represents markdown table foot node +type TableFooter struct { + Container +} + +// Caption represents a figure, code or quote caption +type Caption struct { + Container +} + +// CaptionFigure is a node (blockquote or codeblock) that has a caption +type CaptionFigure struct { + Container + + HeadingID string // This might hold heading ID, if present +} + +// Callout is a node that can exist both in text (where it is an actual node) and in a code block. +type Callout struct { + Leaf + + ID []byte // number of this callout +} + +// Index is a node that contains an Index item and an optional, subitem. +type Index struct { + Leaf + + Primary bool + Item []byte + Subitem []byte + ID string // ID of the index +} + +// Subscript is a subscript node +type Subscript struct { + Leaf +} + +// Subscript is a superscript node +type Superscript struct { + Leaf +} + +// Footnotes is a node that contains all footnotes +type Footnotes struct { + Container +} + +func removeNodeFromArray(a []Node, node Node) []Node { + n := len(a) + for i := 0; i < n; i++ { + if a[i] == node { + return append(a[:i], a[i+1:]...) + } + } + return nil +} + +// AppendChild appends child to children of parent +// It panics if either node is nil. +func AppendChild(parent Node, child Node) { + RemoveFromTree(child) + child.SetParent(parent) + newChildren := append(parent.GetChildren(), child) + parent.SetChildren(newChildren) +} + +// RemoveFromTree removes this node from tree +func RemoveFromTree(n Node) { + if n.GetParent() == nil { + return + } + // important: don't clear n.Children if n has no parent + // we're called from AppendChild and that might happen on a node + // that accumulated Children but hasn't been inserted into the tree + n.SetChildren(nil) + p := n.GetParent() + newChildren := removeNodeFromArray(p.GetChildren(), n) + if newChildren != nil { + p.SetChildren(newChildren) + } +} + +// GetLastChild returns last child of node n +// It's implemented as stand-alone function to keep Node interface small +func GetLastChild(n Node) Node { + a := n.GetChildren() + if len(a) > 0 { + return a[len(a)-1] + } + return nil +} + +// GetFirstChild returns first child of node n +// It's implemented as stand-alone function to keep Node interface small +func GetFirstChild(n Node) Node { + a := n.GetChildren() + if len(a) > 0 { + return a[0] + } + return nil +} + +// GetNextNode returns next sibling of node n (node after n) +// We can't make it part of Container or Leaf because we loose Node identity +func GetNextNode(n Node) Node { + parent := n.GetParent() + if parent == nil { + return nil + } + a := parent.GetChildren() + len := len(a) - 1 + for i := 0; i < len; i++ { + if a[i] == n { + return a[i+1] + } + } + return nil +} + +// GetPrevNode returns previous sibling of node n (node before n) +// We can't make it part of Container or Leaf because we loose Node identity +func GetPrevNode(n Node) Node { + parent := n.GetParent() + if parent == nil { + return nil + } + a := parent.GetChildren() + len := len(a) + for i := 1; i < len; i++ { + if a[i] == n { + return a[i-1] + } + } + return nil +} + +// WalkStatus allows NodeVisitor to have some control over the tree traversal. +// It is returned from NodeVisitor and different values allow Node.Walk to +// decide which node to go to next. +type WalkStatus int + +const ( + // GoToNext is the default traversal of every node. + GoToNext WalkStatus = iota + // SkipChildren tells walker to skip all children of current node. + SkipChildren + // Terminate tells walker to terminate the traversal. + Terminate +) + +// NodeVisitor is a callback to be called when traversing the syntax tree. +// Called twice for every node: once with entering=true when the branch is +// first visited, then with entering=false after all the children are done. +type NodeVisitor interface { + Visit(node Node, entering bool) WalkStatus +} + +// NodeVisitorFunc casts a function to match NodeVisitor interface +type NodeVisitorFunc func(node Node, entering bool) WalkStatus + +// Walk traverses tree recursively +func Walk(n Node, visitor NodeVisitor) WalkStatus { + isContainer := n.AsContainer() != nil + status := visitor.Visit(n, true) // entering + if status == Terminate { + // even if terminating, close container node + if isContainer { + visitor.Visit(n, false) + } + return status + } + if isContainer && status != SkipChildren { + children := n.GetChildren() + for _, n := range children { + status = Walk(n, visitor) + if status == Terminate { + return status + } + } + } + if isContainer { + status = visitor.Visit(n, false) // exiting + if status == Terminate { + return status + } + } + return GoToNext +} + +// Visit calls visitor function +func (f NodeVisitorFunc) Visit(node Node, entering bool) WalkStatus { + return f(node, entering) +} + +// WalkFunc is like Walk but accepts just a callback function +func WalkFunc(n Node, f NodeVisitorFunc) { + visitor := NodeVisitorFunc(f) + Walk(n, visitor) +} diff --git a/vendor/github.com/gomarkdown/markdown/ast/print.go b/vendor/github.com/gomarkdown/markdown/ast/print.go new file mode 100644 index 000000000..75daf911b --- /dev/null +++ b/vendor/github.com/gomarkdown/markdown/ast/print.go @@ -0,0 +1,165 @@ +package ast + +import ( + "bytes" + "fmt" + "io" + "strings" +) + +// Print is for debugging. It prints a string representation of parsed +// markdown doc (result of parser.Parse()) to dst. +// +// To make output readable, it shortens text output. +func Print(dst io.Writer, doc Node) { + PrintWithPrefix(dst, doc, " ") +} + +// PrintWithPrefix is like Print but allows customizing prefix used for +// indentation. By default it's 2 spaces. You can change it to e.g. tab +// by passing "\t" +func PrintWithPrefix(w io.Writer, doc Node, prefix string) { + // for more compact output, don't print outer Document + if _, ok := doc.(*Document); ok { + for _, c := range doc.GetChildren() { + printRecur(w, c, prefix, 0) + } + } else { + printRecur(w, doc, prefix, 0) + } +} + +// ToString is like Dump but returns result as a string +func ToString(doc Node) string { + var buf bytes.Buffer + Print(&buf, doc) + return buf.String() +} + +func contentToString(d1 []byte, d2 []byte) string { + if d1 != nil { + return string(d1) + } + if d2 != nil { + return string(d2) + } + return "" +} + +func getContent(node Node) string { + if c := node.AsContainer(); c != nil { + return contentToString(c.Literal, c.Content) + } + leaf := node.AsLeaf() + return contentToString(leaf.Literal, leaf.Content) +} + +func shortenString(s string, maxLen int) string { + // for cleaner, one-line ouput, replace some white-space chars + // with their escaped version + s = strings.Replace(s, "\n", `\n`, -1) + s = strings.Replace(s, "\r", `\r`, -1) + s = strings.Replace(s, "\t", `\t`, -1) + if maxLen < 0 { + return s + } + if len(s) < maxLen { + return s + } + // add "..." to indicate truncation + return s[:maxLen-3] + "..." +} + +// get a short name of the type of v which excludes package name +// and strips "()" from the end +func getNodeType(node Node) string { + s := fmt.Sprintf("%T", node) + s = strings.TrimSuffix(s, "()") + if idx := strings.Index(s, "."); idx != -1 { + return s[idx+1:] + } + return s +} + +func printDefault(w io.Writer, indent string, typeName string, content string) { + content = strings.TrimSpace(content) + if len(content) > 0 { + fmt.Fprintf(w, "%s%s '%s'\n", indent, typeName, content) + } else { + fmt.Fprintf(w, "%s%s\n", indent, typeName) + } +} + +func getListFlags(f ListType) string { + var s string + if f&ListTypeOrdered != 0 { + s += "ordered " + } + if f&ListTypeDefinition != 0 { + s += "definition " + } + if f&ListTypeTerm != 0 { + s += "term " + } + if f&ListItemContainsBlock != 0 { + s += "has_block " + } + if f&ListItemBeginningOfList != 0 { + s += "start " + } + if f&ListItemEndOfList != 0 { + s += "end " + } + s = strings.TrimSpace(s) + return s +} + +func printRecur(w io.Writer, node Node, prefix string, depth int) { + if node == nil { + return + } + indent := strings.Repeat(prefix, depth) + + content := shortenString(getContent(node), 40) + typeName := getNodeType(node) + switch v := node.(type) { + case *Link: + content := "url=" + string(v.Destination) + printDefault(w, indent, typeName, content) + case *Image: + content := "url=" + string(v.Destination) + printDefault(w, indent, typeName, content) + case *List: + if v.Start > 1 { + content += fmt.Sprintf("start=%d ", v.Start) + } + if v.Tight { + content += "tight " + } + if v.IsFootnotesList { + content += "footnotes " + } + flags := getListFlags(v.ListFlags) + if len(flags) > 0 { + content += "flags=" + flags + " " + } + printDefault(w, indent, typeName, content) + case *ListItem: + if v.Tight { + content += "tight " + } + if v.IsFootnotesList { + content += "footnotes " + } + flags := getListFlags(v.ListFlags) + if len(flags) > 0 { + content += "flags=" + flags + " " + } + printDefault(w, indent, typeName, content) + default: + printDefault(w, indent, typeName, content) + } + for _, child := range node.GetChildren() { + printRecur(w, child, prefix, depth+1) + } +} diff --git a/vendor/github.com/gomarkdown/markdown/html/callouts.go b/vendor/github.com/gomarkdown/markdown/html/callouts.go new file mode 100644 index 000000000..e377af229 --- /dev/null +++ b/vendor/github.com/gomarkdown/markdown/html/callouts.go @@ -0,0 +1,42 @@ +package html + +import ( + "bytes" + "io" + + "github.com/gomarkdown/markdown/ast" + "github.com/gomarkdown/markdown/parser" +) + +// EscapeHTMLCallouts writes html-escaped d to w. It escapes &, <, > and " characters, *but* +// expands callouts <> with the callout HTML, i.e. by calling r.callout() with a newly created +// ast.Callout node. +func (r *Renderer) EscapeHTMLCallouts(w io.Writer, d []byte) { + ld := len(d) +Parse: + for i := 0; i < ld; i++ { + for _, comment := range r.opts.Comments { + if !bytes.HasPrefix(d[i:], comment) { + break + } + + lc := len(comment) + if i+lc < ld { + if id, consumed := parser.IsCallout(d[i+lc:]); consumed > 0 { + // We have seen a callout + callout := &ast.Callout{ID: id} + r.callout(w, callout) + i += consumed + lc - 1 + continue Parse + } + } + } + + escSeq := Escaper[d[i]] + if escSeq != nil { + w.Write(escSeq) + } else { + w.Write([]byte{d[i]}) + } + } +} diff --git a/vendor/github.com/gomarkdown/markdown/html/doc.go b/vendor/github.com/gomarkdown/markdown/html/doc.go new file mode 100644 index 000000000..f837c63d1 --- /dev/null +++ b/vendor/github.com/gomarkdown/markdown/html/doc.go @@ -0,0 +1,43 @@ +/* +Package html implements HTML renderer of parsed markdown document. + +Configuring and customizing a renderer + +A renderer can be configured with multiple options: + + import "github.com/gomarkdown/markdown/html" + + flags := html.CommonFlags | html.CompletePage | html.HrefTargetBlank + opts := html.RenderOptions{ + TItle: "A custom title", + Flags: flags, + } + renderer := html.NewRenderer(opts) + +You can also re-use most of the logic and customize rendering of selected nodes +by providing node render hook. +This is most useful for rendering nodes that allow for design choices, like +links or code blocks. + + import ( + "github.com/gomarkdown/markdown/html" + "github.com/gomarkdown/markdown/ast" + ) + + // a very dummy render hook that will output "code_replacements" instead of + // ${content} emitted by html.Renderer + func renderHookCodeBlock(w io.Writer, node *ast.Node, entering bool) (ast.WalkStatus, bool) { + _, ok := node.Data.(*ast.CodeBlockData) + if !ok { + return ast.GoToNext, false + } + io.WriteString(w, "code_replacement") + return ast.GoToNext, true + } + + opts := html.RendererOptions{ + RenderNodeHook: renderHookCodeBlock, + } + renderer := html.NewRenderer(opts) +*/ +package html diff --git a/vendor/github.com/gomarkdown/markdown/html/esc.go b/vendor/github.com/gomarkdown/markdown/html/esc.go new file mode 100644 index 000000000..89ec9a278 --- /dev/null +++ b/vendor/github.com/gomarkdown/markdown/html/esc.go @@ -0,0 +1,50 @@ +package html + +import ( + "html" + "io" +) + +var Escaper = [256][]byte{ + '&': []byte("&"), + '<': []byte("<"), + '>': []byte(">"), + '"': []byte("""), +} + +// EscapeHTML writes html-escaped d to w. It escapes &, <, > and " characters. +func EscapeHTML(w io.Writer, d []byte) { + var start, end int + n := len(d) + for end < n { + escSeq := Escaper[d[end]] + if escSeq != nil { + w.Write(d[start:end]) + w.Write(escSeq) + start = end + 1 + } + end++ + } + if start < n && end <= n { + w.Write(d[start:end]) + } +} + +func escLink(w io.Writer, text []byte) { + unesc := html.UnescapeString(string(text)) + EscapeHTML(w, []byte(unesc)) +} + +// Escape writes the text to w, but skips the escape character. +func Escape(w io.Writer, text []byte) { + esc := false + for i := 0; i < len(text); i++ { + if text[i] == '\\' { + esc = !esc + } + if esc && text[i] == '\\' { + continue + } + w.Write([]byte{text[i]}) + } +} diff --git a/vendor/github.com/gomarkdown/markdown/html/renderer.go b/vendor/github.com/gomarkdown/markdown/html/renderer.go new file mode 100644 index 000000000..af8da2298 --- /dev/null +++ b/vendor/github.com/gomarkdown/markdown/html/renderer.go @@ -0,0 +1,1312 @@ +package html + +import ( + "bytes" + "fmt" + "io" + "regexp" + "sort" + "strconv" + "strings" + + "github.com/gomarkdown/markdown/ast" +) + +// Flags control optional behavior of HTML renderer. +type Flags int + +// IDTag is the tag used for tag identification, it defaults to "id", some renderers +// may wish to override this and use e.g. "anchor". +var IDTag = "id" + +// HTML renderer configuration options. +const ( + FlagsNone Flags = 0 + SkipHTML Flags = 1 << iota // Skip preformatted HTML blocks + SkipImages // Skip embedded images + SkipLinks // Skip all links + Safelink // Only link to trusted protocols + NofollowLinks // Only link with rel="nofollow" + NoreferrerLinks // Only link with rel="noreferrer" + HrefTargetBlank // Add a blank target + CompletePage // Generate a complete HTML page + UseXHTML // Generate XHTML output instead of HTML + FootnoteReturnLinks // Generate a link at the end of a footnote to return to the source + FootnoteNoHRTag // Do not output an HR after starting a footnote list. + Smartypants // Enable smart punctuation substitutions + SmartypantsFractions // Enable smart fractions (with Smartypants) + SmartypantsDashes // Enable smart dashes (with Smartypants) + SmartypantsLatexDashes // Enable LaTeX-style dashes (with Smartypants) + SmartypantsAngledQuotes // Enable angled double quotes (with Smartypants) for double quotes rendering + SmartypantsQuotesNBSP // Enable « French guillemets » (with Smartypants) + TOC // Generate a table of contents + + CommonFlags Flags = Smartypants | SmartypantsFractions | SmartypantsDashes | SmartypantsLatexDashes +) + +var ( + htmlTagRe = regexp.MustCompile("(?i)^" + htmlTag) +) + +const ( + htmlTag = "(?:" + openTag + "|" + closeTag + "|" + htmlComment + "|" + + processingInstruction + "|" + declaration + "|" + cdata + ")" + closeTag = "]" + openTag = "<" + tagName + attribute + "*" + "\\s*/?>" + attribute = "(?:" + "\\s+" + attributeName + attributeValueSpec + "?)" + attributeValue = "(?:" + unquotedValue + "|" + singleQuotedValue + "|" + doubleQuotedValue + ")" + attributeValueSpec = "(?:" + "\\s*=" + "\\s*" + attributeValue + ")" + attributeName = "[a-zA-Z_:][a-zA-Z0-9:._-]*" + cdata = "" + declaration = "]*>" + doubleQuotedValue = "\"[^\"]*\"" + htmlComment = "|" + processingInstruction = "[<][?].*?[?][>]" + singleQuotedValue = "'[^']*'" + tagName = "[A-Za-z][A-Za-z0-9-]*" + unquotedValue = "[^\"'=<>`\\x00-\\x20]+" +) + +// RenderNodeFunc allows reusing most of Renderer logic and replacing +// rendering of some nodes. If it returns false, Renderer.RenderNode +// will execute its logic. If it returns true, Renderer.RenderNode will +// skip rendering this node and will return WalkStatus +type RenderNodeFunc func(w io.Writer, node ast.Node, entering bool) (ast.WalkStatus, bool) + +// RendererOptions is a collection of supplementary parameters tweaking +// the behavior of various parts of HTML renderer. +type RendererOptions struct { + // Prepend this text to each relative URL. + AbsolutePrefix string + // Add this text to each footnote anchor, to ensure uniqueness. + FootnoteAnchorPrefix string + // Show this text inside the tag for a footnote return link, if the + // FootnoteReturnLinks flag is enabled. If blank, the string + // [return] is used. + FootnoteReturnLinkContents string + // CitationFormatString defines how a citation is rendered. If blnck, the string + // [%s] is used. Where %s will be substituted with the citation target. + CitationFormatString string + // If set, add this text to the front of each Heading ID, to ensure uniqueness. + HeadingIDPrefix string + // If set, add this text to the back of each Heading ID, to ensure uniqueness. + HeadingIDSuffix string + + Title string // Document title (used if CompletePage is set) + CSS string // Optional CSS file URL (used if CompletePage is set) + Icon string // Optional icon file URL (used if CompletePage is set) + Head []byte // Optional head data injected in the section (used if CompletePage is set) + + Flags Flags // Flags allow customizing this renderer's behavior + + // if set, called at the start of RenderNode(). Allows replacing + // rendering of some nodes + RenderNodeHook RenderNodeFunc + + // Comments is a list of comments the renderer should detect when + // parsing code blocks and detecting callouts. + Comments [][]byte + + // Generator is a meta tag that is inserted in the generated HTML so show what rendered it. It should not include the closing tag. + // Defaults (note content quote is not closed) to ` " or ">" + + // Track heading IDs to prevent ID collision in a single generation. + headingIDs map[string]int + + lastOutputLen int + disableTags int + + sr *SPRenderer + + documentMatter ast.DocumentMatters // keep track of front/main/back matter. +} + +// NewRenderer creates and configures an Renderer object, which +// satisfies the Renderer interface. +func NewRenderer(opts RendererOptions) *Renderer { + // configure the rendering engine + closeTag := ">" + if opts.Flags&UseXHTML != 0 { + closeTag = " />" + } + + if opts.FootnoteReturnLinkContents == "" { + opts.FootnoteReturnLinkContents = `[return]` + } + if opts.CitationFormatString == "" { + opts.CitationFormatString = `[%s]` + } + if opts.Generator == "" { + opts.Generator = ` = len(tagname) { + break + } + + if strings.ToLower(string(tag[i]))[0] != tagname[j] { + return false, -1 + } + } + + if i == len(tag) { + return false, -1 + } + + rightAngle := skipUntilCharIgnoreQuotes(tag, i, '>') + if rightAngle >= i { + return true, rightAngle + } + + return false, -1 +} + +func isRelativeLink(link []byte) (yes bool) { + // a tag begin with '#' + if link[0] == '#' { + return true + } + + // link begin with '/' but not '//', the second maybe a protocol relative link + if len(link) >= 2 && link[0] == '/' && link[1] != '/' { + return true + } + + // only the root '/' + if len(link) == 1 && link[0] == '/' { + return true + } + + // current directory : begin with "./" + if bytes.HasPrefix(link, []byte("./")) { + return true + } + + // parent directory : begin with "../" + if bytes.HasPrefix(link, []byte("../")) { + return true + } + + return false +} + +func (r *Renderer) ensureUniqueHeadingID(id string) string { + for count, found := r.headingIDs[id]; found; count, found = r.headingIDs[id] { + tmp := fmt.Sprintf("%s-%d", id, count+1) + + if _, tmpFound := r.headingIDs[tmp]; !tmpFound { + r.headingIDs[id] = count + 1 + id = tmp + } else { + id = id + "-1" + } + } + + if _, found := r.headingIDs[id]; !found { + r.headingIDs[id] = 0 + } + + return id +} + +func (r *Renderer) addAbsPrefix(link []byte) []byte { + if r.opts.AbsolutePrefix != "" && isRelativeLink(link) && link[0] != '.' { + newDest := r.opts.AbsolutePrefix + if link[0] != '/' { + newDest += "/" + } + newDest += string(link) + return []byte(newDest) + } + return link +} + +func appendLinkAttrs(attrs []string, flags Flags, link []byte) []string { + if isRelativeLink(link) { + return attrs + } + var val []string + if flags&NofollowLinks != 0 { + val = append(val, "nofollow") + } + if flags&NoreferrerLinks != 0 { + val = append(val, "noreferrer") + } + if flags&HrefTargetBlank != 0 { + attrs = append(attrs, `target="_blank"`) + } + if len(val) == 0 { + return attrs + } + attr := fmt.Sprintf("rel=%q", strings.Join(val, " ")) + return append(attrs, attr) +} + +func isMailto(link []byte) bool { + return bytes.HasPrefix(link, []byte("mailto:")) +} + +func needSkipLink(flags Flags, dest []byte) bool { + if flags&SkipLinks != 0 { + return true + } + return flags&Safelink != 0 && !isSafeLink(dest) && !isMailto(dest) +} + +func isSmartypantable(node ast.Node) bool { + switch node.GetParent().(type) { + case *ast.Link, *ast.CodeBlock, *ast.Code: + return false + } + return true +} + +func appendLanguageAttr(attrs []string, info []byte) []string { + if len(info) == 0 { + return attrs + } + endOfLang := bytes.IndexAny(info, "\t ") + if endOfLang < 0 { + endOfLang = len(info) + } + s := `class="language-` + string(info[:endOfLang]) + `"` + return append(attrs, s) +} + +func (r *Renderer) outTag(w io.Writer, name string, attrs []string) { + s := name + if len(attrs) > 0 { + s += " " + strings.Join(attrs, " ") + } + io.WriteString(w, s+">") + r.lastOutputLen = 1 +} + +func footnoteRef(prefix string, node *ast.Link) string { + urlFrag := prefix + string(slugify(node.Destination)) + nStr := strconv.Itoa(node.NoteID) + anchor := `` + nStr + `` + return `` + anchor + `` +} + +func footnoteItem(prefix string, slug []byte) string { + return `

  • ` +} + +func footnoteReturnLink(prefix, returnLink string, slug []byte) string { + return ` ` + returnLink + `` +} + +func listItemOpenCR(listItem *ast.ListItem) bool { + if ast.GetPrevNode(listItem) == nil { + return false + } + ld := listItem.Parent.(*ast.List) + return !ld.Tight && ld.ListFlags&ast.ListTypeDefinition == 0 +} + +func skipParagraphTags(para *ast.Paragraph) bool { + parent := para.Parent + grandparent := parent.GetParent() + if grandparent == nil || !isList(grandparent) { + return false + } + isParentTerm := isListItemTerm(parent) + grandparentListData := grandparent.(*ast.List) + tightOrTerm := grandparentListData.Tight || isParentTerm + return tightOrTerm +} + +func (r *Renderer) out(w io.Writer, d []byte) { + r.lastOutputLen = len(d) + if r.disableTags > 0 { + d = htmlTagRe.ReplaceAll(d, []byte{}) + } + w.Write(d) +} + +func (r *Renderer) outs(w io.Writer, s string) { + r.lastOutputLen = len(s) + if r.disableTags > 0 { + s = htmlTagRe.ReplaceAllString(s, "") + } + io.WriteString(w, s) +} + +func (r *Renderer) cr(w io.Writer) { + if r.lastOutputLen > 0 { + r.outs(w, "\n") + } +} + +var ( + openHTags = []string{"", "", "", "", ""} +) + +func headingOpenTagFromLevel(level int) string { + if level < 1 || level > 5 { + return " 5 { + return "" + } + return closeHTags[level-1] +} + +func (r *Renderer) outHRTag(w io.Writer, attrs []string) { + hr := tagWithAttributes("") +} + +func (r *Renderer) text(w io.Writer, text *ast.Text) { + if r.opts.Flags&Smartypants != 0 { + var tmp bytes.Buffer + EscapeHTML(&tmp, text.Literal) + r.sr.Process(w, tmp.Bytes()) + } else { + _, parentIsLink := text.Parent.(*ast.Link) + if parentIsLink { + escLink(w, text.Literal) + } else { + EscapeHTML(w, text.Literal) + } + } +} + +func (r *Renderer) hardBreak(w io.Writer, node *ast.Hardbreak) { + r.outOneOf(w, r.opts.Flags&UseXHTML == 0, "
    ", "
    ") + r.cr(w) +} + +func (r *Renderer) outOneOf(w io.Writer, outFirst bool, first string, second string) { + if outFirst { + r.outs(w, first) + } else { + r.outs(w, second) + } +} + +func (r *Renderer) outOneOfCr(w io.Writer, outFirst bool, first string, second string) { + if outFirst { + r.cr(w) + r.outs(w, first) + } else { + r.outs(w, second) + r.cr(w) + } +} + +func (r *Renderer) htmlSpan(w io.Writer, span *ast.HTMLSpan) { + if r.opts.Flags&SkipHTML == 0 { + r.out(w, span.Literal) + } +} + +func (r *Renderer) linkEnter(w io.Writer, link *ast.Link) { + var attrs []string + dest := link.Destination + dest = r.addAbsPrefix(dest) + var hrefBuf bytes.Buffer + hrefBuf.WriteString("href=\"") + escLink(&hrefBuf, dest) + hrefBuf.WriteByte('"') + attrs = append(attrs, hrefBuf.String()) + if link.NoteID != 0 { + r.outs(w, footnoteRef(r.opts.FootnoteAnchorPrefix, link)) + return + } + + attrs = appendLinkAttrs(attrs, r.opts.Flags, dest) + if len(link.Title) > 0 { + var titleBuff bytes.Buffer + titleBuff.WriteString("title=\"") + EscapeHTML(&titleBuff, link.Title) + titleBuff.WriteByte('"') + attrs = append(attrs, titleBuff.String()) + } + r.outTag(w, "") + } +} + +func (r *Renderer) link(w io.Writer, link *ast.Link, entering bool) { + // mark it but don't link it if it is not a safe link: no smartypants + if needSkipLink(r.opts.Flags, link.Destination) { + r.outOneOf(w, entering, "", "") + return + } + + if entering { + r.linkEnter(w, link) + } else { + r.linkExit(w, link) + } +} + +func (r *Renderer) imageEnter(w io.Writer, image *ast.Image) { + dest := image.Destination + dest = r.addAbsPrefix(dest) + if r.disableTags == 0 { + //if options.safe && potentiallyUnsafe(dest) { + //out(w, ``)
+		//} else {
+		r.outs(w, `<img src=`) + } +} + +func (r *Renderer) paragraphEnter(w io.Writer, para *ast.Paragraph) { + // TODO: untangle this clusterfuck about when the newlines need + // to be added and when not. + prev := ast.GetPrevNode(para) + if prev != nil { + switch prev.(type) { + case *ast.HTMLBlock, *ast.List, *ast.Paragraph, *ast.Heading, *ast.CaptionFigure, *ast.CodeBlock, *ast.BlockQuote, *ast.Aside, *ast.HorizontalRule: + r.cr(w) + } + } + + if prev == nil { + _, isParentBlockQuote := para.Parent.(*ast.BlockQuote) + if isParentBlockQuote { + r.cr(w) + } + _, isParentAside := para.Parent.(*ast.Aside) + if isParentAside { + r.cr(w) + } + } + + tag := tagWithAttributes("") + if !(isListItem(para.Parent) && ast.GetNextNode(para) == nil) { + r.cr(w) + } +} + +func (r *Renderer) paragraph(w io.Writer, para *ast.Paragraph, entering bool) { + if skipParagraphTags(para) { + return + } + if entering { + r.paragraphEnter(w, para) + } else { + r.paragraphExit(w, para) + } +} +func (r *Renderer) image(w io.Writer, node *ast.Image, entering bool) { + if entering { + r.imageEnter(w, node) + } else { + r.imageExit(w, node) + } +} + +func (r *Renderer) code(w io.Writer, node *ast.Code) { + r.outs(w, "") + EscapeHTML(w, node.Literal) + r.outs(w, "") +} + +func (r *Renderer) htmlBlock(w io.Writer, node *ast.HTMLBlock) { + if r.opts.Flags&SkipHTML != 0 { + return + } + r.cr(w) + r.out(w, node.Literal) + r.cr(w) +} + +func (r *Renderer) headingEnter(w io.Writer, nodeData *ast.Heading) { + var attrs []string + var class string + // TODO(miek): add helper functions for coalescing these classes. + if nodeData.IsTitleblock { + class = "title" + } + if nodeData.IsSpecial { + if class != "" { + class += " special" + } else { + class = "special" + } + } + if class != "" { + attrs = []string{`class="` + class + `"`} + } + if nodeData.HeadingID != "" { + id := r.ensureUniqueHeadingID(nodeData.HeadingID) + if r.opts.HeadingIDPrefix != "" { + id = r.opts.HeadingIDPrefix + id + } + if r.opts.HeadingIDSuffix != "" { + id = id + r.opts.HeadingIDSuffix + } + attrID := `id="` + id + `"` + attrs = append(attrs, attrID) + } + attrs = append(attrs, BlockAttrs(nodeData)...) + r.cr(w) + r.outTag(w, headingOpenTagFromLevel(nodeData.Level), attrs) +} + +func (r *Renderer) headingExit(w io.Writer, heading *ast.Heading) { + r.outs(w, headingCloseTagFromLevel(heading.Level)) + if !(isListItem(heading.Parent) && ast.GetNextNode(heading) == nil) { + r.cr(w) + } +} + +func (r *Renderer) heading(w io.Writer, node *ast.Heading, entering bool) { + if entering { + r.headingEnter(w, node) + } else { + r.headingExit(w, node) + } +} + +func (r *Renderer) horizontalRule(w io.Writer, node *ast.HorizontalRule) { + r.cr(w) + r.outHRTag(w, BlockAttrs(node)) + r.cr(w) +} + +func (r *Renderer) listEnter(w io.Writer, nodeData *ast.List) { + // TODO: attrs don't seem to be set + var attrs []string + + if nodeData.IsFootnotesList { + r.outs(w, "\n
    \n\n") + if r.opts.Flags&FootnoteNoHRTag == 0 { + r.outHRTag(w, nil) + r.cr(w) + } + } + r.cr(w) + if isListItem(nodeData.Parent) { + grand := nodeData.Parent.GetParent() + if isListTight(grand) { + r.cr(w) + } + } + + openTag := " 0 { + attrs = append(attrs, fmt.Sprintf(`start="%d"`, nodeData.Start)) + } + openTag = "\n") + } +} + +func (r *Renderer) list(w io.Writer, list *ast.List, entering bool) { + if entering { + r.listEnter(w, list) + } else { + r.listExit(w, list) + } +} + +func (r *Renderer) listItemEnter(w io.Writer, listItem *ast.ListItem) { + if listItemOpenCR(listItem) { + r.cr(w) + } + if listItem.RefLink != nil { + slug := slugify(listItem.RefLink) + r.outs(w, footnoteItem(r.opts.FootnoteAnchorPrefix, slug)) + return + } + + openTag := "
  • " + if listItem.ListFlags&ast.ListTypeDefinition != 0 { + openTag = "
    " + } + if listItem.ListFlags&ast.ListTypeTerm != 0 { + openTag = "
    " + } + r.outs(w, openTag) +} + +func (r *Renderer) listItemExit(w io.Writer, listItem *ast.ListItem) { + if listItem.RefLink != nil && r.opts.Flags&FootnoteReturnLinks != 0 { + slug := slugify(listItem.RefLink) + prefix := r.opts.FootnoteAnchorPrefix + link := r.opts.FootnoteReturnLinkContents + s := footnoteReturnLink(prefix, link, slug) + r.outs(w, s) + } + + closeTag := "
  • " + if listItem.ListFlags&ast.ListTypeDefinition != 0 { + closeTag = "" + } + if listItem.ListFlags&ast.ListTypeTerm != 0 { + closeTag = "" + } + r.outs(w, closeTag) + r.cr(w) +} + +func (r *Renderer) listItem(w io.Writer, listItem *ast.ListItem, entering bool) { + if entering { + r.listItemEnter(w, listItem) + } else { + r.listItemExit(w, listItem) + } +} + +func (r *Renderer) codeBlock(w io.Writer, codeBlock *ast.CodeBlock) { + var attrs []string + // TODO(miek): this can add multiple class= attribute, they should be coalesced into one. + // This is probably true for some other elements as well + attrs = appendLanguageAttr(attrs, codeBlock.Info) + attrs = append(attrs, BlockAttrs(codeBlock)...) + r.cr(w) + + r.outs(w, "
    ")
    +	code := tagWithAttributes("")
    +	r.outs(w, "
    ") + if !isListItem(codeBlock.Parent) { + r.cr(w) + } +} + +func (r *Renderer) caption(w io.Writer, caption *ast.Caption, entering bool) { + if entering { + r.outs(w, "
    ") + return + } + r.outs(w, "
    ") +} + +func (r *Renderer) captionFigure(w io.Writer, figure *ast.CaptionFigure, entering bool) { + // TODO(miek): copy more generic ways of mmark over to here. + fig := "` + } else { + fig += ">" + } + r.outOneOf(w, entering, fig, "\n\n") +} + +func (r *Renderer) tableCell(w io.Writer, tableCell *ast.TableCell, entering bool) { + if !entering { + r.outOneOf(w, tableCell.IsHeader, "", "") + r.cr(w) + return + } + + // entering + var attrs []string + openTag := "") + // XXX: this is to adhere to a rather silly test. Should fix test. + if ast.GetFirstChild(node) == nil { + r.cr(w) + } + } else { + r.outs(w, "") + r.cr(w) + } +} + +func (r *Renderer) matter(w io.Writer, node *ast.DocumentMatter, entering bool) { + if !entering { + return + } + if r.documentMatter != ast.DocumentMatterNone { + r.outs(w, "\n") + } + switch node.Matter { + case ast.DocumentMatterFront: + r.outs(w, `
    `) + case ast.DocumentMatterMain: + r.outs(w, `
    `) + case ast.DocumentMatterBack: + r.outs(w, `
    `) + } + r.documentMatter = node.Matter +} + +func (r *Renderer) citation(w io.Writer, node *ast.Citation) { + for i, c := range node.Destination { + attr := []string{`class="none"`} + switch node.Type[i] { + case ast.CitationTypeNormative: + attr[0] = `class="normative"` + case ast.CitationTypeInformative: + attr[0] = `class="informative"` + case ast.CitationTypeSuppressed: + attr[0] = `class="suppressed"` + } + r.outTag(w, "`+r.opts.CitationFormatString+``, c, c)) + r.outs(w, "") + } +} + +func (r *Renderer) callout(w io.Writer, node *ast.Callout) { + attr := []string{`class="callout"`} + r.outTag(w, "") +} + +func (r *Renderer) index(w io.Writer, node *ast.Index) { + // there is no in-text representation. + attr := []string{`class="index"`, fmt.Sprintf(`id="%s"`, node.ID)} + r.outTag(w, "") +} + +// RenderNode renders a markdown node to HTML +func (r *Renderer) RenderNode(w io.Writer, node ast.Node, entering bool) ast.WalkStatus { + if r.opts.RenderNodeHook != nil { + status, didHandle := r.opts.RenderNodeHook(w, node, entering) + if didHandle { + return status + } + } + switch node := node.(type) { + case *ast.Text: + r.text(w, node) + case *ast.Softbreak: + r.cr(w) + // TODO: make it configurable via out(renderer.softbreak) + case *ast.Hardbreak: + r.hardBreak(w, node) + case *ast.Emph: + r.outOneOf(w, entering, "", "") + case *ast.Strong: + r.outOneOf(w, entering, "", "") + case *ast.Del: + r.outOneOf(w, entering, "", "") + case *ast.BlockQuote: + tag := tagWithAttributes("") + case *ast.Aside: + tag := tagWithAttributes("") + case *ast.Link: + r.link(w, node, entering) + case *ast.CrossReference: + link := &ast.Link{Destination: append([]byte("#"), node.Destination...)} + r.link(w, link, entering) + case *ast.Citation: + r.citation(w, node) + case *ast.Image: + if r.opts.Flags&SkipImages != 0 { + return ast.SkipChildren + } + r.image(w, node, entering) + case *ast.Code: + r.code(w, node) + case *ast.CodeBlock: + r.codeBlock(w, node) + case *ast.Caption: + r.caption(w, node, entering) + case *ast.CaptionFigure: + r.captionFigure(w, node, entering) + case *ast.Document: + // do nothing + case *ast.Paragraph: + r.paragraph(w, node, entering) + case *ast.HTMLSpan: + r.htmlSpan(w, node) + case *ast.HTMLBlock: + r.htmlBlock(w, node) + case *ast.Heading: + r.heading(w, node, entering) + case *ast.HorizontalRule: + r.horizontalRule(w, node) + case *ast.List: + r.list(w, node, entering) + case *ast.ListItem: + r.listItem(w, node, entering) + case *ast.Table: + tag := tagWithAttributes("") + case *ast.TableCell: + r.tableCell(w, node, entering) + case *ast.TableHeader: + r.outOneOfCr(w, entering, "", "") + case *ast.TableBody: + r.tableBody(w, node, entering) + case *ast.TableRow: + r.outOneOfCr(w, entering, "", "") + case *ast.TableFooter: + r.outOneOfCr(w, entering, "", "") + case *ast.Math: + r.outOneOf(w, true, `\(`, `\)`) + EscapeHTML(w, node.Literal) + r.outOneOf(w, false, `\(`, `\)`) + case *ast.MathBlock: + r.outOneOf(w, entering, `

    \[`, `\]

    `) + if entering { + EscapeHTML(w, node.Literal) + } + case *ast.DocumentMatter: + r.matter(w, node, entering) + case *ast.Callout: + r.callout(w, node) + case *ast.Index: + r.index(w, node) + case *ast.Subscript: + r.outOneOf(w, true, "", "") + if entering { + Escape(w, node.Literal) + } + r.outOneOf(w, false, "", "") + case *ast.Superscript: + r.outOneOf(w, true, "", "") + if entering { + Escape(w, node.Literal) + } + r.outOneOf(w, false, "", "") + case *ast.Footnotes: + // nothing by default; just output the list. + default: + panic(fmt.Sprintf("Unknown node %T", node)) + } + return ast.GoToNext +} + +// RenderHeader writes HTML document preamble and TOC if requested. +func (r *Renderer) RenderHeader(w io.Writer, ast ast.Node) { + r.writeDocumentHeader(w) + if r.opts.Flags&TOC != 0 { + r.writeTOC(w, ast) + } +} + +// RenderFooter writes HTML document footer. +func (r *Renderer) RenderFooter(w io.Writer, _ ast.Node) { + if r.documentMatter != ast.DocumentMatterNone { + r.outs(w, "
    \n") + } + + if r.opts.Flags&CompletePage == 0 { + return + } + io.WriteString(w, "\n\n\n") +} + +func (r *Renderer) writeDocumentHeader(w io.Writer) { + if r.opts.Flags&CompletePage == 0 { + return + } + ending := "" + if r.opts.Flags&UseXHTML != 0 { + io.WriteString(w, "\n") + io.WriteString(w, "\n") + ending = " /" + } else { + io.WriteString(w, "\n") + io.WriteString(w, "\n") + } + io.WriteString(w, "\n") + io.WriteString(w, " ") + if r.opts.Flags&Smartypants != 0 { + r.sr.Process(w, []byte(r.opts.Title)) + } else { + EscapeHTML(w, []byte(r.opts.Title)) + } + io.WriteString(w, "\n") + io.WriteString(w, r.opts.Generator) + io.WriteString(w, "\"") + io.WriteString(w, ending) + io.WriteString(w, ">\n") + io.WriteString(w, " \n") + if r.opts.CSS != "" { + io.WriteString(w, " \n") + } + if r.opts.Icon != "" { + io.WriteString(w, " \n") + } + if r.opts.Head != nil { + w.Write(r.opts.Head) + } + io.WriteString(w, "\n") + io.WriteString(w, "\n\n") +} + +func (r *Renderer) writeTOC(w io.Writer, doc ast.Node) { + buf := bytes.Buffer{} + + inHeading := false + tocLevel := 0 + headingCount := 0 + + ast.WalkFunc(doc, func(node ast.Node, entering bool) ast.WalkStatus { + if nodeData, ok := node.(*ast.Heading); ok && !nodeData.IsTitleblock { + inHeading = entering + if !entering { + buf.WriteString("") + return ast.GoToNext + } + nodeData.HeadingID = fmt.Sprintf("toc_%d", headingCount) + if nodeData.Level == tocLevel { + buf.WriteString("\n\n
  • ") + } else if nodeData.Level < tocLevel { + for nodeData.Level < tocLevel { + tocLevel-- + buf.WriteString("
  • \n") + } + buf.WriteString("\n\n
  • ") + } else { + for nodeData.Level > tocLevel { + tocLevel++ + buf.WriteString("\n") + } + + if buf.Len() > 0 { + io.WriteString(w, "\n") + } + r.lastOutputLen = buf.Len() +} + +func isList(node ast.Node) bool { + _, ok := node.(*ast.List) + return ok +} + +func isListTight(node ast.Node) bool { + if list, ok := node.(*ast.List); ok { + return list.Tight + } + return false +} + +func isListItem(node ast.Node) bool { + _, ok := node.(*ast.ListItem) + return ok +} + +func isListItemTerm(node ast.Node) bool { + data, ok := node.(*ast.ListItem) + return ok && data.ListFlags&ast.ListTypeTerm != 0 +} + +// TODO: move to internal package +func skipSpace(data []byte, i int) int { + n := len(data) + for i < n && isSpace(data[i]) { + i++ + } + return i +} + +// TODO: move to internal package +var validUris = [][]byte{[]byte("http://"), []byte("https://"), []byte("ftp://"), []byte("mailto://")} +var validPaths = [][]byte{[]byte("/"), []byte("./"), []byte("../")} + +func isSafeLink(link []byte) bool { + for _, path := range validPaths { + if len(link) >= len(path) && bytes.Equal(link[:len(path)], path) { + if len(link) == len(path) { + return true + } else if isAlnum(link[len(path)]) { + return true + } + } + } + + for _, prefix := range validUris { + // TODO: handle unicode here + // case-insensitive prefix test + if len(link) > len(prefix) && bytes.Equal(bytes.ToLower(link[:len(prefix)]), prefix) && isAlnum(link[len(prefix)]) { + return true + } + } + + return false +} + +// TODO: move to internal package +// Create a url-safe slug for fragments +func slugify(in []byte) []byte { + if len(in) == 0 { + return in + } + out := make([]byte, 0, len(in)) + sym := false + + for _, ch := range in { + if isAlnum(ch) { + sym = false + out = append(out, ch) + } else if sym { + continue + } else { + out = append(out, '-') + sym = true + } + } + var a, b int + var ch byte + for a, ch = range out { + if ch != '-' { + break + } + } + for b = len(out) - 1; b > 0; b-- { + if out[b] != '-' { + break + } + } + return out[a : b+1] +} + +// TODO: move to internal package +// isAlnum returns true if c is a digit or letter +// TODO: check when this is looking for ASCII alnum and when it should use unicode +func isAlnum(c byte) bool { + return (c >= '0' && c <= '9') || isLetter(c) +} + +// isSpace returns true if c is a white-space charactr +func isSpace(c byte) bool { + return c == ' ' || c == '\t' || c == '\n' || c == '\r' || c == '\f' || c == '\v' +} + +// isLetter returns true if c is ascii letter +func isLetter(c byte) bool { + return (c >= 'a' && c <= 'z') || (c >= 'A' && c <= 'Z') +} + +// isPunctuation returns true if c is a punctuation symbol. +func isPunctuation(c byte) bool { + for _, r := range []byte("!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~") { + if c == r { + return true + } + } + return false +} + +// BlockAttrs takes a node and checks if it has block level attributes set. If so it +// will return a slice each containing a "key=value(s)" string. +func BlockAttrs(node ast.Node) []string { + var attr *ast.Attribute + if c := node.AsContainer(); c != nil && c.Attribute != nil { + attr = c.Attribute + } + if l := node.AsLeaf(); l != nil && l.Attribute != nil { + attr = l.Attribute + } + if attr == nil { + return nil + } + + var s []string + if attr.ID != nil { + s = append(s, fmt.Sprintf(`%s="%s"`, IDTag, attr.ID)) + } + + classes := "" + for _, c := range attr.Classes { + classes += " " + string(c) + } + if classes != "" { + s = append(s, fmt.Sprintf(`class="%s"`, classes[1:])) // skip space we added. + } + + // sort the attributes so it remain stable between runs + var keys = []string{} + for k, _ := range attr.Attrs { + keys = append(keys, k) + } + sort.Strings(keys) + for _, k := range keys { + s = append(s, fmt.Sprintf(`%s="%s"`, k, attr.Attrs[k])) + } + + return s +} + +func tagWithAttributes(name string, attrs []string) string { + s := name + if len(attrs) > 0 { + s += " " + strings.Join(attrs, " ") + } + return s + ">" +} diff --git a/vendor/github.com/gomarkdown/markdown/html/smartypants.go b/vendor/github.com/gomarkdown/markdown/html/smartypants.go new file mode 100644 index 000000000..a09866b02 --- /dev/null +++ b/vendor/github.com/gomarkdown/markdown/html/smartypants.go @@ -0,0 +1,444 @@ +package html + +import ( + "bytes" + "io" +) + +// SmartyPants rendering + +// SPRenderer is a struct containing state of a Smartypants renderer. +type SPRenderer struct { + inSingleQuote bool + inDoubleQuote bool + callbacks [256]smartCallback +} + +func wordBoundary(c byte) bool { + return c == 0 || isSpace(c) || isPunctuation(c) +} + +func tolower(c byte) byte { + if c >= 'A' && c <= 'Z' { + return c - 'A' + 'a' + } + return c +} + +func isdigit(c byte) bool { + return c >= '0' && c <= '9' +} + +func smartQuoteHelper(out *bytes.Buffer, previousChar byte, nextChar byte, quote byte, isOpen *bool, addNBSP bool) bool { + // edge of the buffer is likely to be a tag that we don't get to see, + // so we treat it like text sometimes + + // enumerate all sixteen possibilities for (previousChar, nextChar) + // each can be one of {0, space, punct, other} + switch { + case previousChar == 0 && nextChar == 0: + // context is not any help here, so toggle + *isOpen = !*isOpen + case isSpace(previousChar) && nextChar == 0: + // [ "] might be [ "foo...] + *isOpen = true + case isPunctuation(previousChar) && nextChar == 0: + // [!"] hmm... could be [Run!"] or [("...] + *isOpen = false + case /* isnormal(previousChar) && */ nextChar == 0: + // [a"] is probably a close + *isOpen = false + case previousChar == 0 && isSpace(nextChar): + // [" ] might be [...foo" ] + *isOpen = false + case isSpace(previousChar) && isSpace(nextChar): + // [ " ] context is not any help here, so toggle + *isOpen = !*isOpen + case isPunctuation(previousChar) && isSpace(nextChar): + // [!" ] is probably a close + *isOpen = false + case /* isnormal(previousChar) && */ isSpace(nextChar): + // [a" ] this is one of the easy cases + *isOpen = false + case previousChar == 0 && isPunctuation(nextChar): + // ["!] hmm... could be ["$1.95] or ["!...] + *isOpen = false + case isSpace(previousChar) && isPunctuation(nextChar): + // [ "!] looks more like [ "$1.95] + *isOpen = true + case isPunctuation(previousChar) && isPunctuation(nextChar): + // [!"!] context is not any help here, so toggle + *isOpen = !*isOpen + case /* isnormal(previousChar) && */ isPunctuation(nextChar): + // [a"!] is probably a close + *isOpen = false + case previousChar == 0 /* && isnormal(nextChar) */ : + // ["a] is probably an open + *isOpen = true + case isSpace(previousChar) /* && isnormal(nextChar) */ : + // [ "a] this is one of the easy cases + *isOpen = true + case isPunctuation(previousChar) /* && isnormal(nextChar) */ : + // [!"a] is probably an open + *isOpen = true + default: + // [a'b] maybe a contraction? + *isOpen = false + } + + // Note that with the limited lookahead, this non-breaking + // space will also be appended to single double quotes. + if addNBSP && !*isOpen { + out.WriteString(" ") + } + + out.WriteByte('&') + if *isOpen { + out.WriteByte('l') + } else { + out.WriteByte('r') + } + out.WriteByte(quote) + out.WriteString("quo;") + + if addNBSP && *isOpen { + out.WriteString(" ") + } + + return true +} + +func (r *SPRenderer) smartSingleQuote(out *bytes.Buffer, previousChar byte, text []byte) int { + if len(text) >= 2 { + t1 := tolower(text[1]) + + if t1 == '\'' { + nextChar := byte(0) + if len(text) >= 3 { + nextChar = text[2] + } + if smartQuoteHelper(out, previousChar, nextChar, 'd', &r.inDoubleQuote, false) { + return 1 + } + } + + if (t1 == 's' || t1 == 't' || t1 == 'm' || t1 == 'd') && (len(text) < 3 || wordBoundary(text[2])) { + out.WriteString("’") + return 0 + } + + if len(text) >= 3 { + t2 := tolower(text[2]) + + if ((t1 == 'r' && t2 == 'e') || (t1 == 'l' && t2 == 'l') || (t1 == 'v' && t2 == 'e')) && + (len(text) < 4 || wordBoundary(text[3])) { + out.WriteString("’") + return 0 + } + } + } + + nextChar := byte(0) + if len(text) > 1 { + nextChar = text[1] + } + if smartQuoteHelper(out, previousChar, nextChar, 's', &r.inSingleQuote, false) { + return 0 + } + + out.WriteByte(text[0]) + return 0 +} + +func (r *SPRenderer) smartParens(out *bytes.Buffer, previousChar byte, text []byte) int { + if len(text) >= 3 { + t1 := tolower(text[1]) + t2 := tolower(text[2]) + + if t1 == 'c' && t2 == ')' { + out.WriteString("©") + return 2 + } + + if t1 == 'r' && t2 == ')' { + out.WriteString("®") + return 2 + } + + if len(text) >= 4 && t1 == 't' && t2 == 'm' && text[3] == ')' { + out.WriteString("™") + return 3 + } + } + + out.WriteByte(text[0]) + return 0 +} + +func (r *SPRenderer) smartDash(out *bytes.Buffer, previousChar byte, text []byte) int { + if len(text) >= 2 { + if text[1] == '-' { + out.WriteString("—") + return 1 + } + + if wordBoundary(previousChar) && wordBoundary(text[1]) { + out.WriteString("–") + return 0 + } + } + + out.WriteByte(text[0]) + return 0 +} + +func (r *SPRenderer) smartDashLatex(out *bytes.Buffer, previousChar byte, text []byte) int { + if len(text) >= 3 && text[1] == '-' && text[2] == '-' { + out.WriteString("—") + return 2 + } + if len(text) >= 2 && text[1] == '-' { + out.WriteString("–") + return 1 + } + + out.WriteByte(text[0]) + return 0 +} + +func (r *SPRenderer) smartAmpVariant(out *bytes.Buffer, previousChar byte, text []byte, quote byte, addNBSP bool) int { + if bytes.HasPrefix(text, []byte(""")) { + nextChar := byte(0) + if len(text) >= 7 { + nextChar = text[6] + } + if smartQuoteHelper(out, previousChar, nextChar, quote, &r.inDoubleQuote, addNBSP) { + return 5 + } + } + + if bytes.HasPrefix(text, []byte("�")) { + return 3 + } + + out.WriteByte('&') + return 0 +} + +func (r *SPRenderer) smartAmp(angledQuotes, addNBSP bool) func(*bytes.Buffer, byte, []byte) int { + var quote byte = 'd' + if angledQuotes { + quote = 'a' + } + + return func(out *bytes.Buffer, previousChar byte, text []byte) int { + return r.smartAmpVariant(out, previousChar, text, quote, addNBSP) + } +} + +func (r *SPRenderer) smartPeriod(out *bytes.Buffer, previousChar byte, text []byte) int { + if len(text) >= 3 && text[1] == '.' && text[2] == '.' { + out.WriteString("…") + return 2 + } + + if len(text) >= 5 && text[1] == ' ' && text[2] == '.' && text[3] == ' ' && text[4] == '.' { + out.WriteString("…") + return 4 + } + + out.WriteByte(text[0]) + return 0 +} + +func (r *SPRenderer) smartBacktick(out *bytes.Buffer, previousChar byte, text []byte) int { + if len(text) >= 2 && text[1] == '`' { + nextChar := byte(0) + if len(text) >= 3 { + nextChar = text[2] + } + if smartQuoteHelper(out, previousChar, nextChar, 'd', &r.inDoubleQuote, false) { + return 1 + } + } + + out.WriteByte(text[0]) + return 0 +} + +func (r *SPRenderer) smartNumberGeneric(out *bytes.Buffer, previousChar byte, text []byte) int { + if wordBoundary(previousChar) && previousChar != '/' && len(text) >= 3 { + // is it of the form digits/digits(word boundary)?, i.e., \d+/\d+\b + // note: check for regular slash (/) or fraction slash (⁄, 0x2044, or 0xe2 81 84 in utf-8) + // and avoid changing dates like 1/23/2005 into fractions. + numEnd := 0 + for len(text) > numEnd && isdigit(text[numEnd]) { + numEnd++ + } + if numEnd == 0 { + out.WriteByte(text[0]) + return 0 + } + denStart := numEnd + 1 + if len(text) > numEnd+3 && text[numEnd] == 0xe2 && text[numEnd+1] == 0x81 && text[numEnd+2] == 0x84 { + denStart = numEnd + 3 + } else if len(text) < numEnd+2 || text[numEnd] != '/' { + out.WriteByte(text[0]) + return 0 + } + denEnd := denStart + for len(text) > denEnd && isdigit(text[denEnd]) { + denEnd++ + } + if denEnd == denStart { + out.WriteByte(text[0]) + return 0 + } + if len(text) == denEnd || wordBoundary(text[denEnd]) && text[denEnd] != '/' { + out.WriteString("") + out.Write(text[:numEnd]) + out.WriteString("") + out.Write(text[denStart:denEnd]) + out.WriteString("") + return denEnd - 1 + } + } + + out.WriteByte(text[0]) + return 0 +} + +func (r *SPRenderer) smartNumber(out *bytes.Buffer, previousChar byte, text []byte) int { + if wordBoundary(previousChar) && previousChar != '/' && len(text) >= 3 { + if text[0] == '1' && text[1] == '/' && text[2] == '2' { + if len(text) < 4 || wordBoundary(text[3]) && text[3] != '/' { + out.WriteString("½") + return 2 + } + } + + if text[0] == '1' && text[1] == '/' && text[2] == '4' { + if len(text) < 4 || wordBoundary(text[3]) && text[3] != '/' || (len(text) >= 5 && tolower(text[3]) == 't' && tolower(text[4]) == 'h') { + out.WriteString("¼") + return 2 + } + } + + if text[0] == '3' && text[1] == '/' && text[2] == '4' { + if len(text) < 4 || wordBoundary(text[3]) && text[3] != '/' || (len(text) >= 6 && tolower(text[3]) == 't' && tolower(text[4]) == 'h' && tolower(text[5]) == 's') { + out.WriteString("¾") + return 2 + } + } + } + + out.WriteByte(text[0]) + return 0 +} + +func (r *SPRenderer) smartDoubleQuoteVariant(out *bytes.Buffer, previousChar byte, text []byte, quote byte) int { + nextChar := byte(0) + if len(text) > 1 { + nextChar = text[1] + } + if !smartQuoteHelper(out, previousChar, nextChar, quote, &r.inDoubleQuote, false) { + out.WriteString(""") + } + + return 0 +} + +func (r *SPRenderer) smartDoubleQuote(out *bytes.Buffer, previousChar byte, text []byte) int { + return r.smartDoubleQuoteVariant(out, previousChar, text, 'd') +} + +func (r *SPRenderer) smartAngledDoubleQuote(out *bytes.Buffer, previousChar byte, text []byte) int { + return r.smartDoubleQuoteVariant(out, previousChar, text, 'a') +} + +func (r *SPRenderer) smartLeftAngle(out *bytes.Buffer, previousChar byte, text []byte) int { + i := 0 + + for i < len(text) && text[i] != '>' { + i++ + } + + out.Write(text[:i+1]) + return i +} + +type smartCallback func(out *bytes.Buffer, previousChar byte, text []byte) int + +// NewSmartypantsRenderer constructs a Smartypants renderer object. +func NewSmartypantsRenderer(flags Flags) *SPRenderer { + var ( + r SPRenderer + + smartAmpAngled = r.smartAmp(true, false) + smartAmpAngledNBSP = r.smartAmp(true, true) + smartAmpRegular = r.smartAmp(false, false) + smartAmpRegularNBSP = r.smartAmp(false, true) + + addNBSP = flags&SmartypantsQuotesNBSP != 0 + ) + + if flags&SmartypantsAngledQuotes == 0 { + r.callbacks['"'] = r.smartDoubleQuote + if !addNBSP { + r.callbacks['&'] = smartAmpRegular + } else { + r.callbacks['&'] = smartAmpRegularNBSP + } + } else { + r.callbacks['"'] = r.smartAngledDoubleQuote + if !addNBSP { + r.callbacks['&'] = smartAmpAngled + } else { + r.callbacks['&'] = smartAmpAngledNBSP + } + } + r.callbacks['\''] = r.smartSingleQuote + r.callbacks['('] = r.smartParens + if flags&SmartypantsDashes != 0 { + if flags&SmartypantsLatexDashes == 0 { + r.callbacks['-'] = r.smartDash + } else { + r.callbacks['-'] = r.smartDashLatex + } + } + r.callbacks['.'] = r.smartPeriod + if flags&SmartypantsFractions == 0 { + r.callbacks['1'] = r.smartNumber + r.callbacks['3'] = r.smartNumber + } else { + for ch := '1'; ch <= '9'; ch++ { + r.callbacks[ch] = r.smartNumberGeneric + } + } + r.callbacks['<'] = r.smartLeftAngle + r.callbacks['`'] = r.smartBacktick + return &r +} + +// Process is the entry point of the Smartypants renderer. +func (r *SPRenderer) Process(w io.Writer, text []byte) { + mark := 0 + for i := 0; i < len(text); i++ { + if action := r.callbacks[text[i]]; action != nil { + if i > mark { + w.Write(text[mark:i]) + } + previousChar := byte(0) + if i > 0 { + previousChar = text[i-1] + } + var tmp bytes.Buffer + i += action(&tmp, previousChar, text[i:]) + w.Write(tmp.Bytes()) + mark = i + 1 + } + } + if mark < len(text) { + w.Write(text[mark:]) + } +} diff --git a/vendor/github.com/gomarkdown/markdown/parser/aside.go b/vendor/github.com/gomarkdown/markdown/parser/aside.go new file mode 100644 index 000000000..96e25fe06 --- /dev/null +++ b/vendor/github.com/gomarkdown/markdown/parser/aside.go @@ -0,0 +1,73 @@ +package parser + +import ( + "bytes" + + "github.com/gomarkdown/markdown/ast" +) + +// returns aisde prefix length +func (p *Parser) asidePrefix(data []byte) int { + i := 0 + n := len(data) + for i < 3 && i < n && data[i] == ' ' { + i++ + } + if i+1 < n && data[i] == 'A' && data[i+1] == '>' { + if i+2 < n && data[i+2] == ' ' { + return i + 3 + } + return i + 2 + } + return 0 +} + +// aside ends with at least one blank line +// followed by something without a aside prefix +func (p *Parser) terminateAside(data []byte, beg, end int) bool { + if p.isEmpty(data[beg:]) <= 0 { + return false + } + if end >= len(data) { + return true + } + return p.asidePrefix(data[end:]) == 0 && p.isEmpty(data[end:]) == 0 +} + +// parse a aside fragment +func (p *Parser) aside(data []byte) int { + var raw bytes.Buffer + beg, end := 0, 0 + // identical to quote + for beg < len(data) { + end = beg + // Step over whole lines, collecting them. While doing that, check for + // fenced code and if one's found, incorporate it altogether, + // irregardless of any contents inside it + for end < len(data) && data[end] != '\n' { + if p.extensions&FencedCode != 0 { + if i := p.fencedCodeBlock(data[end:], false); i > 0 { + // -1 to compensate for the extra end++ after the loop: + end += i - 1 + break + } + } + end++ + } + end = skipCharN(data, end, '\n', 1) + if pre := p.asidePrefix(data[beg:]); pre > 0 { + // skip the prefix + beg += pre + } else if p.terminateAside(data, beg, end) { + break + } + // this line is part of the aside + raw.Write(data[beg:end]) + beg = end + } + + block := p.addBlock(&ast.Aside{}) + p.block(raw.Bytes()) + p.finalize(block) + return end +} diff --git a/vendor/github.com/gomarkdown/markdown/parser/attribute.go b/vendor/github.com/gomarkdown/markdown/parser/attribute.go new file mode 100644 index 000000000..5fdb07095 --- /dev/null +++ b/vendor/github.com/gomarkdown/markdown/parser/attribute.go @@ -0,0 +1,116 @@ +package parser + +import ( + "bytes" + + "github.com/gomarkdown/markdown/ast" +) + +// attribute parses a (potential) block attribute and adds it to p. +func (p *Parser) attribute(data []byte) []byte { + if len(data) < 3 { + return data + } + i := 0 + if data[i] != '{' { + return data + } + i++ + + // last character must be a } otherwise it's not an attribute + end := skipUntilChar(data, i, '\n') + if data[end-1] != '}' { + return data + } + + i = skipSpace(data, i) + b := &ast.Attribute{Attrs: make(map[string][]byte)} + + esc := false + quote := false + trail := 0 +Loop: + for ; i < len(data); i++ { + switch data[i] { + case ' ', '\t', '\f', '\v': + if quote { + continue + } + chunk := data[trail+1 : i] + if len(chunk) == 0 { + trail = i + continue + } + switch { + case chunk[0] == '.': + b.Classes = append(b.Classes, chunk[1:]) + case chunk[0] == '#': + b.ID = chunk[1:] + default: + k, v := keyValue(chunk) + if k != nil && v != nil { + b.Attrs[string(k)] = v + } else { + // this is illegal in an attribute + return data + } + } + trail = i + case '"': + if esc { + esc = !esc + continue + } + quote = !quote + case '\\': + esc = !esc + case '}': + if esc { + esc = !esc + continue + } + chunk := data[trail+1 : i] + if len(chunk) == 0 { + return data + } + switch { + case chunk[0] == '.': + b.Classes = append(b.Classes, chunk[1:]) + case chunk[0] == '#': + b.ID = chunk[1:] + default: + k, v := keyValue(chunk) + if k != nil && v != nil { + b.Attrs[string(k)] = v + } else { + return data + } + } + i++ + break Loop + default: + esc = false + } + } + + p.attr = b + return data[i:] +} + +// key="value" quotes are mandatory. +func keyValue(data []byte) ([]byte, []byte) { + chunk := bytes.SplitN(data, []byte{'='}, 2) + if len(chunk) != 2 { + return nil, nil + } + key := chunk[0] + value := chunk[1] + + if len(value) < 3 || len(key) == 0 { + return nil, nil + } + if value[0] != '"' || value[len(value)-1] != '"' { + return key, nil + } + return key, value[1 : len(value)-1] +} diff --git a/vendor/github.com/gomarkdown/markdown/parser/block.go b/vendor/github.com/gomarkdown/markdown/parser/block.go new file mode 100644 index 000000000..18a2dd894 --- /dev/null +++ b/vendor/github.com/gomarkdown/markdown/parser/block.go @@ -0,0 +1,1978 @@ +package parser + +import ( + "bytes" + "html" + "regexp" + "strconv" + "unicode" + + "github.com/gomarkdown/markdown/ast" +) + +// Parsing block-level elements. + +const ( + charEntity = "&(?:#x[a-f0-9]{1,8}|#[0-9]{1,8}|[a-z][a-z0-9]{1,31});" + escapable = "[!\"#$%&'()*+,./:;<=>?@[\\\\\\]^_`{|}~-]" +) + +var ( + reBackslashOrAmp = regexp.MustCompile("[\\&]") + reEntityOrEscapedChar = regexp.MustCompile("(?i)\\\\" + escapable + "|" + charEntity) + + // blockTags is a set of tags that are recognized as HTML block tags. + // Any of these can be included in markdown text without special escaping. + blockTags = map[string]struct{}{ + "blockquote": struct{}{}, + "del": struct{}{}, + "div": struct{}{}, + "dl": struct{}{}, + "fieldset": struct{}{}, + "form": struct{}{}, + "h1": struct{}{}, + "h2": struct{}{}, + "h3": struct{}{}, + "h4": struct{}{}, + "h5": struct{}{}, + "h6": struct{}{}, + "iframe": struct{}{}, + "ins": struct{}{}, + "math": struct{}{}, + "noscript": struct{}{}, + "ol": struct{}{}, + "pre": struct{}{}, + "p": struct{}{}, + "script": struct{}{}, + "style": struct{}{}, + "table": struct{}{}, + "ul": struct{}{}, + + // HTML5 + "address": struct{}{}, + "article": struct{}{}, + "aside": struct{}{}, + "canvas": struct{}{}, + "figcaption": struct{}{}, + "figure": struct{}{}, + "footer": struct{}{}, + "header": struct{}{}, + "hgroup": struct{}{}, + "main": struct{}{}, + "nav": struct{}{}, + "output": struct{}{}, + "progress": struct{}{}, + "section": struct{}{}, + "video": struct{}{}, + } +) + +// sanitizeAnchorName returns a sanitized anchor name for the given text. +// Taken from https://github.com/shurcooL/sanitized_anchor_name/blob/master/main.go#L14:1 +func sanitizeAnchorName(text string) string { + var anchorName []rune + var futureDash = false + for _, r := range text { + switch { + case unicode.IsLetter(r) || unicode.IsNumber(r): + if futureDash && len(anchorName) > 0 { + anchorName = append(anchorName, '-') + } + futureDash = false + anchorName = append(anchorName, unicode.ToLower(r)) + default: + futureDash = true + } + } + return string(anchorName) +} + +// Parse block-level data. +// Note: this function and many that it calls assume that +// the input buffer ends with a newline. +func (p *Parser) block(data []byte) { + // this is called recursively: enforce a maximum depth + if p.nesting >= p.maxNesting { + return + } + p.nesting++ + + // parse out one block-level construct at a time + for len(data) > 0 { + // attributes that can be specific before a block element: + // + // {#id .class1 .class2 key="value"} + if p.extensions&Attributes != 0 { + data = p.attribute(data) + } + + if p.extensions&Includes != 0 { + f := p.readInclude + path, address, consumed := p.isInclude(data) + if consumed == 0 { + path, address, consumed = p.isCodeInclude(data) + f = p.readCodeInclude + } + if consumed > 0 { + included := f(p.includeStack.Last(), path, address) + p.includeStack.Push(path) + p.block(included) + p.includeStack.Pop() + data = data[consumed:] + continue + } + } + + // user supplied parser function + if p.Opts.ParserHook != nil { + node, blockdata, consumed := p.Opts.ParserHook(data) + if consumed > 0 { + data = data[consumed:] + + if node != nil { + p.addBlock(node) + if blockdata != nil { + p.block(blockdata) + p.finalize(node) + } + } + continue + } + } + + // prefixed heading: + // + // # Heading 1 + // ## Heading 2 + // ... + // ###### Heading 6 + if p.isPrefixHeading(data) { + data = data[p.prefixHeading(data):] + continue + } + + // prefixed special heading: + // (there are no levels.) + // + // .# Abstract + if p.isPrefixSpecialHeading(data) { + data = data[p.prefixSpecialHeading(data):] + continue + } + + // block of preformatted HTML: + // + //
    + // ... + //
    + if data[0] == '<' { + if i := p.html(data, true); i > 0 { + data = data[i:] + continue + } + } + + // title block + // + // % stuff + // % more stuff + // % even more stuff + if p.extensions&Titleblock != 0 { + if data[0] == '%' { + if i := p.titleBlock(data, true); i > 0 { + data = data[i:] + continue + } + } + } + + // blank lines. note: returns the # of bytes to skip + if i := p.isEmpty(data); i > 0 { + data = data[i:] + continue + } + + // indented code block: + // + // func max(a, b int) int { + // if a > b { + // return a + // } + // return b + // } + if p.codePrefix(data) > 0 { + data = data[p.code(data):] + continue + } + + // fenced code block: + // + // ``` go + // func fact(n int) int { + // if n <= 1 { + // return n + // } + // return n * fact(n-1) + // } + // ``` + if p.extensions&FencedCode != 0 { + if i := p.fencedCodeBlock(data, true); i > 0 { + data = data[i:] + continue + } + } + + // horizontal rule: + // + // ------ + // or + // ****** + // or + // ______ + if p.isHRule(data) { + p.addBlock(&ast.HorizontalRule{}) + i := skipUntilChar(data, 0, '\n') + data = data[i:] + continue + } + + // block quote: + // + // > A big quote I found somewhere + // > on the web + if p.quotePrefix(data) > 0 { + data = data[p.quote(data):] + continue + } + + // aside: + // + // A> The proof is too large to fit + // A> in the margin. + if p.extensions&Mmark != 0 { + if p.asidePrefix(data) > 0 { + data = data[p.aside(data):] + continue + } + } + + // figure block: + // + // !--- + // ![Alt Text](img.jpg "This is an image") + // ![Alt Text](img2.jpg "This is a second image") + // !--- + if p.extensions&Mmark != 0 { + if i := p.figureBlock(data, true); i > 0 { + data = data[i:] + continue + } + } + + // table: + // + // Name | Age | Phone + // ------|-----|--------- + // Bob | 31 | 555-1234 + // Alice | 27 | 555-4321 + if p.extensions&Tables != 0 { + if i := p.table(data); i > 0 { + data = data[i:] + continue + } + } + + // an itemized/unordered list: + // + // * Item 1 + // * Item 2 + // + // also works with + or - + if p.uliPrefix(data) > 0 { + data = data[p.list(data, 0, 0):] + continue + } + + // a numbered/ordered list: + // + // 1. Item 1 + // 2. Item 2 + if i := p.oliPrefix(data); i > 0 { + start := 0 + if i > 2 && p.extensions&OrderedListStart != 0 { + s := string(data[:i-2]) + start, _ = strconv.Atoi(s) + if start == 1 { + start = 0 + } + } + data = data[p.list(data, ast.ListTypeOrdered, start):] + continue + } + + // definition lists: + // + // Term 1 + // : Definition a + // : Definition b + // + // Term 2 + // : Definition c + if p.extensions&DefinitionLists != 0 { + if p.dliPrefix(data) > 0 { + data = data[p.list(data, ast.ListTypeDefinition, 0):] + continue + } + } + + if p.extensions&MathJax != 0 { + if i := p.blockMath(data); i > 0 { + data = data[i:] + continue + } + } + + // document matters: + // + // {frontmatter}/{mainmatter}/{backmatter} + if p.extensions&Mmark != 0 { + if i := p.documentMatter(data); i > 0 { + data = data[i:] + continue + } + } + + // anything else must look like a normal paragraph + // note: this finds underlined headings, too + idx := p.paragraph(data) + data = data[idx:] + } + + p.nesting-- +} + +func (p *Parser) addBlock(n ast.Node) ast.Node { + p.closeUnmatchedBlocks() + + if p.attr != nil { + if c := n.AsContainer(); c != nil { + c.Attribute = p.attr + } + if l := n.AsLeaf(); l != nil { + l.Attribute = p.attr + } + p.attr = nil + } + return p.addChild(n) +} + +func (p *Parser) isPrefixHeading(data []byte) bool { + if data[0] != '#' { + return false + } + + if p.extensions&SpaceHeadings != 0 { + level := skipCharN(data, 0, '#', 6) + if level == len(data) || data[level] != ' ' { + return false + } + } + return true +} + +func (p *Parser) prefixHeading(data []byte) int { + level := skipCharN(data, 0, '#', 6) + i := skipChar(data, level, ' ') + end := skipUntilChar(data, i, '\n') + skip := end + id := "" + if p.extensions&HeadingIDs != 0 { + j, k := 0, 0 + // find start/end of heading id + for j = i; j < end-1 && (data[j] != '{' || data[j+1] != '#'); j++ { + } + for k = j + 1; k < end && data[k] != '}'; k++ { + } + // extract heading id iff found + if j < end && k < end { + id = string(data[j+2 : k]) + end = j + skip = k + 1 + for end > 0 && data[end-1] == ' ' { + end-- + } + } + } + for end > 0 && data[end-1] == '#' { + if isBackslashEscaped(data, end-1) { + break + } + end-- + } + for end > 0 && data[end-1] == ' ' { + end-- + } + if end > i { + if id == "" && p.extensions&AutoHeadingIDs != 0 { + id = sanitizeAnchorName(string(data[i:end])) + } + block := &ast.Heading{ + HeadingID: id, + Level: level, + } + block.Content = data[i:end] + p.addBlock(block) + } + return skip +} + +func (p *Parser) isPrefixSpecialHeading(data []byte) bool { + if p.extensions|Mmark == 0 { + return false + } + if len(data) < 4 { + return false + } + if data[0] != '.' { + return false + } + if data[1] != '#' { + return false + } + if data[2] == '#' { // we don't support level, so nack this. + return false + } + + if p.extensions&SpaceHeadings != 0 { + if data[2] != ' ' { + return false + } + } + return true +} + +func (p *Parser) prefixSpecialHeading(data []byte) int { + i := skipChar(data, 2, ' ') // ".#" skipped + end := skipUntilChar(data, i, '\n') + skip := end + id := "" + if p.extensions&HeadingIDs != 0 { + j, k := 0, 0 + // find start/end of heading id + for j = i; j < end-1 && (data[j] != '{' || data[j+1] != '#'); j++ { + } + for k = j + 1; k < end && data[k] != '}'; k++ { + } + // extract heading id iff found + if j < end && k < end { + id = string(data[j+2 : k]) + end = j + skip = k + 1 + for end > 0 && data[end-1] == ' ' { + end-- + } + } + } + for end > 0 && data[end-1] == '#' { + if isBackslashEscaped(data, end-1) { + break + } + end-- + } + for end > 0 && data[end-1] == ' ' { + end-- + } + if end > i { + if id == "" && p.extensions&AutoHeadingIDs != 0 { + id = sanitizeAnchorName(string(data[i:end])) + } + block := &ast.Heading{ + HeadingID: id, + IsSpecial: true, + Level: 1, // always level 1. + } + block.Literal = data[i:end] + block.Content = data[i:end] + p.addBlock(block) + } + return skip +} + +func (p *Parser) isUnderlinedHeading(data []byte) int { + // test of level 1 heading + if data[0] == '=' { + i := skipChar(data, 1, '=') + i = skipChar(data, i, ' ') + if i < len(data) && data[i] == '\n' { + return 1 + } + return 0 + } + + // test of level 2 heading + if data[0] == '-' { + i := skipChar(data, 1, '-') + i = skipChar(data, i, ' ') + if i < len(data) && data[i] == '\n' { + return 2 + } + return 0 + } + + return 0 +} + +func (p *Parser) titleBlock(data []byte, doRender bool) int { + if data[0] != '%' { + return 0 + } + splitData := bytes.Split(data, []byte("\n")) + var i int + for idx, b := range splitData { + if !bytes.HasPrefix(b, []byte("%")) { + i = idx // - 1 + break + } + } + + data = bytes.Join(splitData[0:i], []byte("\n")) + consumed := len(data) + data = bytes.TrimPrefix(data, []byte("% ")) + data = bytes.Replace(data, []byte("\n% "), []byte("\n"), -1) + block := &ast.Heading{ + Level: 1, + IsTitleblock: true, + } + block.Content = data + p.addBlock(block) + + return consumed +} + +func (p *Parser) html(data []byte, doRender bool) int { + var i, j int + + // identify the opening tag + if data[0] != '<' { + return 0 + } + curtag, tagfound := p.htmlFindTag(data[1:]) + + // handle special cases + if !tagfound { + // check for an HTML comment + if size := p.htmlComment(data, doRender); size > 0 { + return size + } + + // check for an
    tag + if size := p.htmlHr(data, doRender); size > 0 { + return size + } + + // no special case recognized + return 0 + } + + // look for an unindented matching closing tag + // followed by a blank line + found := false + /* + closetag := []byte("\n") + j = len(curtag) + 1 + for !found { + // scan for a closing tag at the beginning of a line + if skip := bytes.Index(data[j:], closetag); skip >= 0 { + j += skip + len(closetag) + } else { + break + } + + // see if it is the only thing on the line + if skip := p.isEmpty(data[j:]); skip > 0 { + // see if it is followed by a blank line/eof + j += skip + if j >= len(data) { + found = true + i = j + } else { + if skip := p.isEmpty(data[j:]); skip > 0 { + j += skip + found = true + i = j + } + } + } + } + */ + + // if not found, try a second pass looking for indented match + // but not if tag is "ins" or "del" (following original Markdown.pl) + if !found && curtag != "ins" && curtag != "del" { + i = 1 + for i < len(data) { + i++ + for i < len(data) && !(data[i-1] == '<' && data[i] == '/') { + i++ + } + + if i+2+len(curtag) >= len(data) { + break + } + + j = p.htmlFindEnd(curtag, data[i-1:]) + + if j > 0 { + i += j - 1 + found = true + break + } + } + } + + if !found { + return 0 + } + + // the end of the block has been found + if doRender { + // trim newlines + end := backChar(data, i, '\n') + htmlBLock := &ast.HTMLBlock{ast.Leaf{Content: data[:end]}} + p.addBlock(htmlBLock) + finalizeHTMLBlock(htmlBLock) + } + + return i +} + +func finalizeHTMLBlock(block *ast.HTMLBlock) { + block.Literal = block.Content + block.Content = nil +} + +// HTML comment, lax form +func (p *Parser) htmlComment(data []byte, doRender bool) int { + i := p.inlineHTMLComment(data) + // needs to end with a blank line + if j := p.isEmpty(data[i:]); j > 0 { + size := i + j + if doRender { + // trim trailing newlines + end := backChar(data, size, '\n') + htmlBLock := &ast.HTMLBlock{ast.Leaf{Content: data[:end]}} + p.addBlock(htmlBLock) + finalizeHTMLBlock(htmlBLock) + } + return size + } + return 0 +} + +// HR, which is the only self-closing block tag considered +func (p *Parser) htmlHr(data []byte, doRender bool) int { + if len(data) < 4 { + return 0 + } + if data[0] != '<' || (data[1] != 'h' && data[1] != 'H') || (data[2] != 'r' && data[2] != 'R') { + return 0 + } + if data[3] != ' ' && data[3] != '/' && data[3] != '>' { + // not an
    tag after all; at least not a valid one + return 0 + } + i := 3 + for i < len(data) && data[i] != '>' && data[i] != '\n' { + i++ + } + if i < len(data) && data[i] == '>' { + i++ + if j := p.isEmpty(data[i:]); j > 0 { + size := i + j + if doRender { + // trim newlines + end := backChar(data, size, '\n') + htmlBlock := &ast.HTMLBlock{ast.Leaf{Content: data[:end]}} + p.addBlock(htmlBlock) + finalizeHTMLBlock(htmlBlock) + } + return size + } + } + return 0 +} + +func (p *Parser) htmlFindTag(data []byte) (string, bool) { + i := skipAlnum(data, 0) + key := string(data[:i]) + if _, ok := blockTags[key]; ok { + return key, true + } + return "", false +} + +func (p *Parser) htmlFindEnd(tag string, data []byte) int { + // assume data[0] == '<' && data[1] == '/' already tested + if tag == "hr" { + return 2 + } + // check if tag is a match + closetag := []byte("") + if !bytes.HasPrefix(data, closetag) { + return 0 + } + i := len(closetag) + + // check that the rest of the line is blank + skip := 0 + if skip = p.isEmpty(data[i:]); skip == 0 { + return 0 + } + i += skip + skip = 0 + + if i >= len(data) { + return i + } + + if p.extensions&LaxHTMLBlocks != 0 { + return i + } + if skip = p.isEmpty(data[i:]); skip == 0 { + // following line must be blank + return 0 + } + + return i + skip +} + +func (*Parser) isEmpty(data []byte) int { + // it is okay to call isEmpty on an empty buffer + if len(data) == 0 { + return 0 + } + + var i int + for i = 0; i < len(data) && data[i] != '\n'; i++ { + if data[i] != ' ' && data[i] != '\t' { + return 0 + } + } + i = skipCharN(data, i, '\n', 1) + return i +} + +func (*Parser) isHRule(data []byte) bool { + i := 0 + + // skip up to three spaces + for i < 3 && data[i] == ' ' { + i++ + } + + // look at the hrule char + if data[i] != '*' && data[i] != '-' && data[i] != '_' { + return false + } + c := data[i] + + // the whole line must be the char or whitespace + n := 0 + for i < len(data) && data[i] != '\n' { + switch { + case data[i] == c: + n++ + case data[i] != ' ': + return false + } + i++ + } + + return n >= 3 +} + +// isFenceLine checks if there's a fence line (e.g., ``` or ``` go) at the beginning of data, +// and returns the end index if so, or 0 otherwise. It also returns the marker found. +// If syntax is not nil, it gets set to the syntax specified in the fence line. +func isFenceLine(data []byte, syntax *string, oldmarker string) (end int, marker string) { + i, size := 0, 0 + + n := len(data) + // skip up to three spaces + for i < n && i < 3 && data[i] == ' ' { + i++ + } + + // check for the marker characters: ~ or ` + if i >= n { + return 0, "" + } + if data[i] != '~' && data[i] != '`' { + return 0, "" + } + + c := data[i] + + // the whole line must be the same char or whitespace + for i < n && data[i] == c { + size++ + i++ + } + + // the marker char must occur at least 3 times + if size < 3 { + return 0, "" + } + marker = string(data[i-size : i]) + + // if this is the end marker, it must match the beginning marker + if oldmarker != "" && marker != oldmarker { + return 0, "" + } + + // TODO(shurcooL): It's probably a good idea to simplify the 2 code paths here + // into one, always get the syntax, and discard it if the caller doesn't care. + if syntax != nil { + syn := 0 + i = skipChar(data, i, ' ') + + if i >= n { + if i == n { + return i, marker + } + return 0, "" + } + + syntaxStart := i + + if data[i] == '{' { + i++ + syntaxStart++ + + for i < n && data[i] != '}' && data[i] != '\n' { + syn++ + i++ + } + + if i >= n || data[i] != '}' { + return 0, "" + } + + // strip all whitespace at the beginning and the end + // of the {} block + for syn > 0 && isSpace(data[syntaxStart]) { + syntaxStart++ + syn-- + } + + for syn > 0 && isSpace(data[syntaxStart+syn-1]) { + syn-- + } + + i++ + } else { + for i < n && !isSpace(data[i]) { + syn++ + i++ + } + } + + *syntax = string(data[syntaxStart : syntaxStart+syn]) + } + + i = skipChar(data, i, ' ') + if i >= n || data[i] != '\n' { + if i == n { + return i, marker + } + return 0, "" + } + return i + 1, marker // Take newline into account. +} + +// fencedCodeBlock returns the end index if data contains a fenced code block at the beginning, +// or 0 otherwise. It writes to out if doRender is true, otherwise it has no side effects. +// If doRender is true, a final newline is mandatory to recognize the fenced code block. +func (p *Parser) fencedCodeBlock(data []byte, doRender bool) int { + var syntax string + beg, marker := isFenceLine(data, &syntax, "") + if beg == 0 || beg >= len(data) { + return 0 + } + + var work bytes.Buffer + work.WriteString(syntax) + work.WriteByte('\n') + + for { + // safe to assume beg < len(data) + + // check for the end of the code block + fenceEnd, _ := isFenceLine(data[beg:], nil, marker) + if fenceEnd != 0 { + beg += fenceEnd + break + } + + // copy the current line + end := skipUntilChar(data, beg, '\n') + 1 + + // did we reach the end of the buffer without a closing marker? + if end >= len(data) { + return 0 + } + + // verbatim copy to the working buffer + if doRender { + work.Write(data[beg:end]) + } + beg = end + } + + if doRender { + codeBlock := &ast.CodeBlock{ + IsFenced: true, + } + codeBlock.Content = work.Bytes() // TODO: get rid of temp buffer + + if p.extensions&Mmark == 0 { + p.addBlock(codeBlock) + finalizeCodeBlock(codeBlock) + return beg + } + + // Check for caption and if found make it a figure. + if captionContent, id, consumed := p.caption(data[beg:], []byte("Figure: ")); consumed > 0 { + figure := &ast.CaptionFigure{} + caption := &ast.Caption{} + figure.HeadingID = id + p.Inline(caption, captionContent) + + p.addBlock(figure) + codeBlock.AsLeaf().Attribute = figure.AsContainer().Attribute + p.addChild(codeBlock) + finalizeCodeBlock(codeBlock) + p.addChild(caption) + p.finalize(figure) + + beg += consumed + + return beg + } + + // Still here, normal block + p.addBlock(codeBlock) + finalizeCodeBlock(codeBlock) + } + + return beg +} + +func unescapeChar(str []byte) []byte { + if str[0] == '\\' { + return []byte{str[1]} + } + return []byte(html.UnescapeString(string(str))) +} + +func unescapeString(str []byte) []byte { + if reBackslashOrAmp.Match(str) { + return reEntityOrEscapedChar.ReplaceAllFunc(str, unescapeChar) + } + return str +} + +func finalizeCodeBlock(code *ast.CodeBlock) { + c := code.Content + if code.IsFenced { + newlinePos := bytes.IndexByte(c, '\n') + firstLine := c[:newlinePos] + rest := c[newlinePos+1:] + code.Info = unescapeString(bytes.Trim(firstLine, "\n")) + code.Literal = rest + } else { + code.Literal = c + } + code.Content = nil +} + +func (p *Parser) table(data []byte) int { + i, columns, table := p.tableHeader(data) + if i == 0 { + return 0 + } + + p.addBlock(&ast.TableBody{}) + + for i < len(data) { + pipes, rowStart := 0, i + for ; i < len(data) && data[i] != '\n'; i++ { + if data[i] == '|' { + pipes++ + } + } + + if pipes == 0 { + i = rowStart + break + } + + // include the newline in data sent to tableRow + i = skipCharN(data, i, '\n', 1) + + if p.tableFooter(data[rowStart:i]) { + continue + } + + p.tableRow(data[rowStart:i], columns, false) + } + if captionContent, id, consumed := p.caption(data[i:], []byte("Table: ")); consumed > 0 { + caption := &ast.Caption{} + p.Inline(caption, captionContent) + + // Some switcheroo to re-insert the parsed table as a child of the captionfigure. + figure := &ast.CaptionFigure{} + figure.HeadingID = id + table2 := &ast.Table{} + // Retain any block level attributes. + table2.AsContainer().Attribute = table.AsContainer().Attribute + children := table.GetChildren() + ast.RemoveFromTree(table) + + table2.SetChildren(children) + ast.AppendChild(figure, table2) + ast.AppendChild(figure, caption) + + p.addChild(figure) + p.finalize(figure) + + i += consumed + } + + return i +} + +// check if the specified position is preceded by an odd number of backslashes +func isBackslashEscaped(data []byte, i int) bool { + backslashes := 0 + for i-backslashes-1 >= 0 && data[i-backslashes-1] == '\\' { + backslashes++ + } + return backslashes&1 == 1 +} + +// tableHeaders parses the header. If recognized it will also add a table. +func (p *Parser) tableHeader(data []byte) (size int, columns []ast.CellAlignFlags, table ast.Node) { + i := 0 + colCount := 1 + for i = 0; i < len(data) && data[i] != '\n'; i++ { + if data[i] == '|' && !isBackslashEscaped(data, i) { + colCount++ + } + } + + // doesn't look like a table header + if colCount == 1 { + return + } + + // include the newline in the data sent to tableRow + j := skipCharN(data, i, '\n', 1) + header := data[:j] + + // column count ignores pipes at beginning or end of line + if data[0] == '|' { + colCount-- + } + if i > 2 && data[i-1] == '|' && !isBackslashEscaped(data, i-1) { + colCount-- + } + + columns = make([]ast.CellAlignFlags, colCount) + + // move on to the header underline + i++ + if i >= len(data) { + return + } + + if data[i] == '|' && !isBackslashEscaped(data, i) { + i++ + } + i = skipChar(data, i, ' ') + + // each column header is of form: / *:?-+:? *|/ with # dashes + # colons >= 3 + // and trailing | optional on last column + col := 0 + n := len(data) + for i < n && data[i] != '\n' { + dashes := 0 + + if data[i] == ':' { + i++ + columns[col] |= ast.TableAlignmentLeft + dashes++ + } + for i < n && data[i] == '-' { + i++ + dashes++ + } + if i < n && data[i] == ':' { + i++ + columns[col] |= ast.TableAlignmentRight + dashes++ + } + for i < n && data[i] == ' ' { + i++ + } + if i == n { + return + } + // end of column test is messy + switch { + case dashes < 3: + // not a valid column + return + + case data[i] == '|' && !isBackslashEscaped(data, i): + // marker found, now skip past trailing whitespace + col++ + i++ + for i < n && data[i] == ' ' { + i++ + } + + // trailing junk found after last column + if col >= colCount && i < len(data) && data[i] != '\n' { + return + } + + case (data[i] != '|' || isBackslashEscaped(data, i)) && col+1 < colCount: + // something else found where marker was required + return + + case data[i] == '\n': + // marker is optional for the last column + col++ + + default: + // trailing junk found after last column + return + } + } + if col != colCount { + return + } + + table = &ast.Table{} + p.addBlock(table) + p.addBlock(&ast.TableHeader{}) + p.tableRow(header, columns, true) + size = skipCharN(data, i, '\n', 1) + return +} + +func (p *Parser) tableRow(data []byte, columns []ast.CellAlignFlags, header bool) { + p.addBlock(&ast.TableRow{}) + i, col := 0, 0 + + if data[i] == '|' && !isBackslashEscaped(data, i) { + i++ + } + + n := len(data) + for col = 0; col < len(columns) && i < n; col++ { + for i < n && data[i] == ' ' { + i++ + } + + cellStart := i + + for i < n && (data[i] != '|' || isBackslashEscaped(data, i)) && data[i] != '\n' { + i++ + } + + cellEnd := i + + // skip the end-of-cell marker, possibly taking us past end of buffer + i++ + + for cellEnd > cellStart && cellEnd-1 < n && data[cellEnd-1] == ' ' { + cellEnd-- + } + + block := &ast.TableCell{ + IsHeader: header, + Align: columns[col], + } + block.Content = data[cellStart:cellEnd] + p.addBlock(block) + } + + // pad it out with empty columns to get the right number + for ; col < len(columns); col++ { + block := &ast.TableCell{ + IsHeader: header, + Align: columns[col], + } + p.addBlock(block) + } + + // silently ignore rows with too many cells +} + +// tableFooter parses the (optional) table footer. +func (p *Parser) tableFooter(data []byte) bool { + colCount := 1 + for i := 0; i < len(data) && data[i] != '\n'; i++ { + if data[i] == '|' && !isBackslashEscaped(data, i) { + colCount++ + continue + } + // remaining data must be the = character + if data[i] != '=' { + return false + } + } + + // doesn't look like a table footer + if colCount == 1 { + return false + } + + p.addBlock(&ast.TableFooter{}) + + return true +} + +// returns blockquote prefix length +func (p *Parser) quotePrefix(data []byte) int { + i := 0 + n := len(data) + for i < 3 && i < n && data[i] == ' ' { + i++ + } + if i < n && data[i] == '>' { + if i+1 < n && data[i+1] == ' ' { + return i + 2 + } + return i + 1 + } + return 0 +} + +// blockquote ends with at least one blank line +// followed by something without a blockquote prefix +func (p *Parser) terminateBlockquote(data []byte, beg, end int) bool { + if p.isEmpty(data[beg:]) <= 0 { + return false + } + if end >= len(data) { + return true + } + return p.quotePrefix(data[end:]) == 0 && p.isEmpty(data[end:]) == 0 +} + +// parse a blockquote fragment +func (p *Parser) quote(data []byte) int { + var raw bytes.Buffer + beg, end := 0, 0 + for beg < len(data) { + end = beg + // Step over whole lines, collecting them. While doing that, check for + // fenced code and if one's found, incorporate it altogether, + // irregardless of any contents inside it + for end < len(data) && data[end] != '\n' { + if p.extensions&FencedCode != 0 { + if i := p.fencedCodeBlock(data[end:], false); i > 0 { + // -1 to compensate for the extra end++ after the loop: + end += i - 1 + break + } + } + end++ + } + end = skipCharN(data, end, '\n', 1) + if pre := p.quotePrefix(data[beg:]); pre > 0 { + // skip the prefix + beg += pre + } else if p.terminateBlockquote(data, beg, end) { + break + } + // this line is part of the blockquote + raw.Write(data[beg:end]) + beg = end + } + + if p.extensions&Mmark == 0 { + block := p.addBlock(&ast.BlockQuote{}) + p.block(raw.Bytes()) + p.finalize(block) + return end + } + + if captionContent, id, consumed := p.caption(data[end:], []byte("Quote: ")); consumed > 0 { + figure := &ast.CaptionFigure{} + caption := &ast.Caption{} + figure.HeadingID = id + p.Inline(caption, captionContent) + + p.addBlock(figure) // this discard any attributes + block := &ast.BlockQuote{} + block.AsContainer().Attribute = figure.AsContainer().Attribute + p.addChild(block) + p.block(raw.Bytes()) + p.finalize(block) + + p.addChild(caption) + p.finalize(figure) + + end += consumed + + return end + } + + block := p.addBlock(&ast.BlockQuote{}) + p.block(raw.Bytes()) + p.finalize(block) + + return end +} + +// returns prefix length for block code +func (p *Parser) codePrefix(data []byte) int { + n := len(data) + if n >= 1 && data[0] == '\t' { + return 1 + } + if n >= 4 && data[3] == ' ' && data[2] == ' ' && data[1] == ' ' && data[0] == ' ' { + return 4 + } + return 0 +} + +func (p *Parser) code(data []byte) int { + var work bytes.Buffer + + i := 0 + for i < len(data) { + beg := i + + i = skipUntilChar(data, i, '\n') + i = skipCharN(data, i, '\n', 1) + + blankline := p.isEmpty(data[beg:i]) > 0 + if pre := p.codePrefix(data[beg:i]); pre > 0 { + beg += pre + } else if !blankline { + // non-empty, non-prefixed line breaks the pre + i = beg + break + } + + // verbatim copy to the working buffer + if blankline { + work.WriteByte('\n') + } else { + work.Write(data[beg:i]) + } + } + + // trim all the \n off the end of work + workbytes := work.Bytes() + + eol := backChar(workbytes, len(workbytes), '\n') + + if eol != len(workbytes) { + work.Truncate(eol) + } + + work.WriteByte('\n') + + codeBlock := &ast.CodeBlock{ + IsFenced: false, + } + // TODO: get rid of temp buffer + codeBlock.Content = work.Bytes() + p.addBlock(codeBlock) + finalizeCodeBlock(codeBlock) + + return i +} + +// returns unordered list item prefix +func (p *Parser) uliPrefix(data []byte) int { + // start with up to 3 spaces + i := skipCharN(data, 0, ' ', 3) + + if i >= len(data)-1 { + return 0 + } + // need one of {'*', '+', '-'} followed by a space or a tab + if (data[i] != '*' && data[i] != '+' && data[i] != '-') || + (data[i+1] != ' ' && data[i+1] != '\t') { + return 0 + } + return i + 2 +} + +// returns ordered list item prefix +func (p *Parser) oliPrefix(data []byte) int { + // start with up to 3 spaces + i := skipCharN(data, 0, ' ', 3) + + // count the digits + start := i + for i < len(data) && data[i] >= '0' && data[i] <= '9' { + i++ + } + if start == i || i >= len(data)-1 { + return 0 + } + + // we need >= 1 digits followed by a dot and a space or a tab + if data[i] != '.' || !(data[i+1] == ' ' || data[i+1] == '\t') { + return 0 + } + return i + 2 +} + +// returns definition list item prefix +func (p *Parser) dliPrefix(data []byte) int { + if len(data) < 2 { + return 0 + } + // need a ':' followed by a space or a tab + if data[0] != ':' || !(data[1] == ' ' || data[1] == '\t') { + return 0 + } + i := skipChar(data, 0, ' ') + return i + 2 +} + +// parse ordered or unordered list block +func (p *Parser) list(data []byte, flags ast.ListType, start int) int { + i := 0 + flags |= ast.ListItemBeginningOfList + list := &ast.List{ + ListFlags: flags, + Tight: true, + Start: start, + } + block := p.addBlock(list) + + for i < len(data) { + skip := p.listItem(data[i:], &flags) + if flags&ast.ListItemContainsBlock != 0 { + list.Tight = false + } + i += skip + if skip == 0 || flags&ast.ListItemEndOfList != 0 { + break + } + flags &= ^ast.ListItemBeginningOfList + } + + above := block.GetParent() + finalizeList(list) + p.tip = above + return i +} + +// Returns true if the list item is not the same type as its parent list +func (p *Parser) listTypeChanged(data []byte, flags *ast.ListType) bool { + if p.dliPrefix(data) > 0 && *flags&ast.ListTypeDefinition == 0 { + return true + } else if p.oliPrefix(data) > 0 && *flags&ast.ListTypeOrdered == 0 { + return true + } else if p.uliPrefix(data) > 0 && (*flags&ast.ListTypeOrdered != 0 || *flags&ast.ListTypeDefinition != 0) { + return true + } + return false +} + +// Returns true if block ends with a blank line, descending if needed +// into lists and sublists. +func endsWithBlankLine(block ast.Node) bool { + // TODO: figure this out. Always false now. + for block != nil { + //if block.lastLineBlank { + //return true + //} + switch block.(type) { + case *ast.List, *ast.ListItem: + block = ast.GetLastChild(block) + default: + return false + } + } + return false +} + +func finalizeList(list *ast.List) { + items := list.Parent.GetChildren() + lastItemIdx := len(items) - 1 + for i, item := range items { + isLastItem := i == lastItemIdx + // check for non-final list item ending with blank line: + if !isLastItem && endsWithBlankLine(item) { + list.Tight = false + break + } + // recurse into children of list item, to see if there are spaces + // between any of them: + subItems := item.GetParent().GetChildren() + lastSubItemIdx := len(subItems) - 1 + for j, subItem := range subItems { + isLastSubItem := j == lastSubItemIdx + if (!isLastItem || !isLastSubItem) && endsWithBlankLine(subItem) { + list.Tight = false + break + } + } + } +} + +// Parse a single list item. +// Assumes initial prefix is already removed if this is a sublist. +func (p *Parser) listItem(data []byte, flags *ast.ListType) int { + // keep track of the indentation of the first line + itemIndent := 0 + if data[0] == '\t' { + itemIndent += 4 + } else { + for itemIndent < 3 && data[itemIndent] == ' ' { + itemIndent++ + } + } + + var bulletChar byte = '*' + i := p.uliPrefix(data) + if i == 0 { + i = p.oliPrefix(data) + } else { + bulletChar = data[i-2] + } + if i == 0 { + i = p.dliPrefix(data) + // reset definition term flag + if i > 0 { + *flags &= ^ast.ListTypeTerm + } + } + if i == 0 { + // if in definition list, set term flag and continue + if *flags&ast.ListTypeDefinition != 0 { + *flags |= ast.ListTypeTerm + } else { + return 0 + } + } + + // skip leading whitespace on first line + i = skipChar(data, i, ' ') + + // find the end of the line + line := i + for i > 0 && i < len(data) && data[i-1] != '\n' { + i++ + } + + // get working buffer + var raw bytes.Buffer + + // put the first line into the working buffer + raw.Write(data[line:i]) + line = i + + // process the following lines + containsBlankLine := false + sublist := 0 + +gatherlines: + for line < len(data) { + i++ + + // find the end of this line + for i < len(data) && data[i-1] != '\n' { + i++ + } + + // if it is an empty line, guess that it is part of this item + // and move on to the next line + if p.isEmpty(data[line:i]) > 0 { + containsBlankLine = true + line = i + continue + } + + // calculate the indentation + indent := 0 + indentIndex := 0 + if data[line] == '\t' { + indentIndex++ + indent += 4 + } else { + for indent < 4 && line+indent < i && data[line+indent] == ' ' { + indent++ + indentIndex++ + } + } + + chunk := data[line+indentIndex : i] + + // evaluate how this line fits in + switch { + // is this a nested list item? + case (p.uliPrefix(chunk) > 0 && !p.isHRule(chunk)) || p.oliPrefix(chunk) > 0 || p.dliPrefix(chunk) > 0: + + // to be a nested list, it must be indented more + // if not, it is either a different kind of list + // or the next item in the same list + if indent <= itemIndent { + if p.listTypeChanged(chunk, flags) { + *flags |= ast.ListItemEndOfList + } else if containsBlankLine { + *flags |= ast.ListItemContainsBlock + } + + break gatherlines + } + + if containsBlankLine { + *flags |= ast.ListItemContainsBlock + } + + // is this the first item in the nested list? + if sublist == 0 { + sublist = raw.Len() + // in the case of dliPrefix we are too late and need to search back for the definition item, which + // should be on the previous line, we then adjust sublist to start there. + if p.dliPrefix(chunk) > 0 { + sublist = backUntilChar(raw.Bytes(), raw.Len()-1, '\n') + } + } + + // is this a nested prefix heading? + case p.isPrefixHeading(chunk), p.isPrefixSpecialHeading(chunk): + // if the heading is not indented, it is not nested in the list + // and thus ends the list + if containsBlankLine && indent < 4 { + *flags |= ast.ListItemEndOfList + break gatherlines + } + *flags |= ast.ListItemContainsBlock + + // anything following an empty line is only part + // of this item if it is indented 4 spaces + // (regardless of the indentation of the beginning of the item) + case containsBlankLine && indent < 4: + if *flags&ast.ListTypeDefinition != 0 && i < len(data)-1 { + // is the next item still a part of this list? + next := i + for next < len(data) && data[next] != '\n' { + next++ + } + for next < len(data)-1 && data[next] == '\n' { + next++ + } + if i < len(data)-1 && data[i] != ':' && next < len(data)-1 && data[next] != ':' { + *flags |= ast.ListItemEndOfList + } + } else { + *flags |= ast.ListItemEndOfList + } + break gatherlines + + // a blank line means this should be parsed as a block + case containsBlankLine: + raw.WriteByte('\n') + *flags |= ast.ListItemContainsBlock + } + + // if this line was preceded by one or more blanks, + // re-introduce the blank into the buffer + if containsBlankLine { + containsBlankLine = false + raw.WriteByte('\n') + } + + // add the line into the working buffer without prefix + raw.Write(data[line+indentIndex : i]) + + line = i + } + + rawBytes := raw.Bytes() + + listItem := &ast.ListItem{ + ListFlags: *flags, + Tight: false, + BulletChar: bulletChar, + Delimiter: '.', // Only '.' is possible in Markdown, but ')' will also be possible in CommonMark + } + p.addBlock(listItem) + + // render the contents of the list item + if *flags&ast.ListItemContainsBlock != 0 && *flags&ast.ListTypeTerm == 0 { + // intermediate render of block item, except for definition term + if sublist > 0 { + p.block(rawBytes[:sublist]) + p.block(rawBytes[sublist:]) + } else { + p.block(rawBytes) + } + } else { + // intermediate render of inline item + para := &ast.Paragraph{} + if sublist > 0 { + para.Content = rawBytes[:sublist] + } else { + para.Content = rawBytes + } + p.addChild(para) + if sublist > 0 { + p.block(rawBytes[sublist:]) + } + } + return line +} + +// render a single paragraph that has already been parsed out +func (p *Parser) renderParagraph(data []byte) { + if len(data) == 0 { + return + } + + // trim leading spaces + beg := skipChar(data, 0, ' ') + + end := len(data) + // trim trailing newline + if data[len(data)-1] == '\n' { + end-- + } + + // trim trailing spaces + for end > beg && data[end-1] == ' ' { + end-- + } + para := &ast.Paragraph{} + para.Content = data[beg:end] + p.addBlock(para) +} + +// blockMath handle block surround with $$ +func (p *Parser) blockMath(data []byte) int { + if len(data) <= 4 || data[0] != '$' || data[1] != '$' || data[2] == '$' { + return 0 + } + + // find next $$ + var end int + for end = 2; end+1 < len(data) && (data[end] != '$' || data[end+1] != '$'); end++ { + } + + // $$ not match + if end+1 == len(data) { + return 0 + } + + // render the display math + mathBlock := &ast.MathBlock{} + mathBlock.Literal = data[2:end] + p.addBlock(mathBlock) + + return end + 2 +} + +func (p *Parser) paragraph(data []byte) int { + // prev: index of 1st char of previous line + // line: index of 1st char of current line + // i: index of cursor/end of current line + var prev, line, i int + tabSize := tabSizeDefault + if p.extensions&TabSizeEight != 0 { + tabSize = tabSizeDouble + } + // keep going until we find something to mark the end of the paragraph + for i < len(data) { + // mark the beginning of the current line + prev = line + current := data[i:] + line = i + + // did we find a reference or a footnote? If so, end a paragraph + // preceding it and report that we have consumed up to the end of that + // reference: + if refEnd := isReference(p, current, tabSize); refEnd > 0 { + p.renderParagraph(data[:i]) + return i + refEnd + } + + // did we find a blank line marking the end of the paragraph? + if n := p.isEmpty(current); n > 0 { + // did this blank line followed by a definition list item? + if p.extensions&DefinitionLists != 0 { + if i < len(data)-1 && data[i+1] == ':' { + listLen := p.list(data[prev:], ast.ListTypeDefinition, 0) + return prev + listLen + } + } + + p.renderParagraph(data[:i]) + return i + n + } + + // an underline under some text marks a heading, so our paragraph ended on prev line + if i > 0 { + if level := p.isUnderlinedHeading(current); level > 0 { + // render the paragraph + p.renderParagraph(data[:prev]) + + // ignore leading and trailing whitespace + eol := i - 1 + for prev < eol && data[prev] == ' ' { + prev++ + } + for eol > prev && data[eol-1] == ' ' { + eol-- + } + + id := "" + if p.extensions&AutoHeadingIDs != 0 { + id = sanitizeAnchorName(string(data[prev:eol])) + } + + block := &ast.Heading{ + Level: level, + HeadingID: id, + } + block.Content = data[prev:eol] + p.addBlock(block) + + // find the end of the underline + return skipUntilChar(data, i, '\n') + } + } + + // if the next line starts a block of HTML, then the paragraph ends here + if p.extensions&LaxHTMLBlocks != 0 { + if data[i] == '<' && p.html(current, false) > 0 { + // rewind to before the HTML block + p.renderParagraph(data[:i]) + return i + } + } + + // if there's a prefixed heading or a horizontal rule after this, paragraph is over + if p.isPrefixHeading(current) || p.isPrefixSpecialHeading(current) || p.isHRule(current) { + p.renderParagraph(data[:i]) + return i + } + + // if there's a fenced code block, paragraph is over + if p.extensions&FencedCode != 0 { + if p.fencedCodeBlock(current, false) > 0 { + p.renderParagraph(data[:i]) + return i + } + } + + // if there's a figure block, paragraph is over + if p.extensions&Mmark != 0 { + if p.figureBlock(current, false) > 0 { + p.renderParagraph(data[:i]) + return i + } + } + + // if there's a definition list item, prev line is a definition term + if p.extensions&DefinitionLists != 0 { + if p.dliPrefix(current) != 0 { + ret := p.list(data[prev:], ast.ListTypeDefinition, 0) + return ret + prev + } + } + + // if there's a list after this, paragraph is over + if p.extensions&NoEmptyLineBeforeBlock != 0 { + if p.uliPrefix(current) != 0 || + p.oliPrefix(current) != 0 || + p.quotePrefix(current) != 0 || + p.codePrefix(current) != 0 { + p.renderParagraph(data[:i]) + return i + } + } + + // otherwise, scan to the beginning of the next line + nl := bytes.IndexByte(data[i:], '\n') + if nl >= 0 { + i += nl + 1 + } else { + i += len(data[i:]) + } + } + + p.renderParagraph(data[:i]) + return i +} + +// skipChar advances i as long as data[i] == c +func skipChar(data []byte, i int, c byte) int { + n := len(data) + for i < n && data[i] == c { + i++ + } + return i +} + +// like skipChar but only skips up to max characters +func skipCharN(data []byte, i int, c byte, max int) int { + n := len(data) + for i < n && max > 0 && data[i] == c { + i++ + max-- + } + return i +} + +// skipUntilChar advances i as long as data[i] != c +func skipUntilChar(data []byte, i int, c byte) int { + n := len(data) + for i < n && data[i] != c { + i++ + } + return i +} + +func skipAlnum(data []byte, i int) int { + n := len(data) + for i < n && isAlnum(data[i]) { + i++ + } + return i +} + +func skipSpace(data []byte, i int) int { + n := len(data) + for i < n && isSpace(data[i]) { + i++ + } + return i +} + +func backChar(data []byte, i int, c byte) int { + for i > 0 && data[i-1] == c { + i-- + } + return i +} + +func backUntilChar(data []byte, i int, c byte) int { + for i > 0 && data[i-1] != c { + i-- + } + return i +} diff --git a/vendor/github.com/gomarkdown/markdown/parser/callout.go b/vendor/github.com/gomarkdown/markdown/parser/callout.go new file mode 100644 index 000000000..15858aa97 --- /dev/null +++ b/vendor/github.com/gomarkdown/markdown/parser/callout.go @@ -0,0 +1,29 @@ +package parser + +import ( + "bytes" + "strconv" +) + +// IsCallout detects a callout in the following format: <> Where N is a integer > 0. +func IsCallout(data []byte) (id []byte, consumed int) { + if !bytes.HasPrefix(data, []byte("<<")) { + return nil, 0 + } + start := 2 + end := bytes.Index(data[start:], []byte(">>")) + if end < 0 { + return nil, 0 + } + + b := data[start : start+end] + b = bytes.TrimSpace(b) + i, err := strconv.Atoi(string(b)) + if err != nil { + return nil, 0 + } + if i <= 0 { + return nil, 0 + } + return b, start + end + 2 // 2 for >> +} diff --git a/vendor/github.com/gomarkdown/markdown/parser/caption.go b/vendor/github.com/gomarkdown/markdown/parser/caption.go new file mode 100644 index 000000000..54d3f741f --- /dev/null +++ b/vendor/github.com/gomarkdown/markdown/parser/caption.go @@ -0,0 +1,70 @@ +package parser + +import ( + "bytes" +) + +// caption checks for a caption, it returns the caption data and a potential "headingID". +func (p *Parser) caption(data, caption []byte) ([]byte, string, int) { + if !bytes.HasPrefix(data, caption) { + return nil, "", 0 + } + j := len(caption) + data = data[j:] + end := p.linesUntilEmpty(data) + + data = data[:end] + + id, start := captionID(data) + if id != "" { + return data[:start], id, end + j + } + + return data, "", end + j +} + +// linesUntilEmpty scans lines up to the first empty line. +func (p *Parser) linesUntilEmpty(data []byte) int { + line, i := 0, 0 + + for line < len(data) { + i++ + + // find the end of this line + for i < len(data) && data[i-1] != '\n' { + i++ + } + + if p.isEmpty(data[line:i]) == 0 { + line = i + continue + } + + break + } + return i +} + +// captionID checks if the caption *ends* in {#....}. If so the text after {# is taken to be +// the ID/anchor of the entire figure block. +func captionID(data []byte) (string, int) { + end := len(data) + + j, k := 0, 0 + // find start/end of heading id + for j = 0; j < end-1 && (data[j] != '{' || data[j+1] != '#'); j++ { + } + for k = j + 1; k < end && data[k] != '}'; k++ { + } + // remains must be whitespace. + for l := k + 1; l < end; l++ { + if !isSpace(data[l]) { + return "", 0 + } + } + + if j > 0 && k > 0 && j+2 < k { + return string(data[j+2 : k]), j + } + return "", 0 +} diff --git a/vendor/github.com/gomarkdown/markdown/parser/citation.go b/vendor/github.com/gomarkdown/markdown/parser/citation.go new file mode 100644 index 000000000..8ea1fbeee --- /dev/null +++ b/vendor/github.com/gomarkdown/markdown/parser/citation.go @@ -0,0 +1,86 @@ +package parser + +import ( + "bytes" + + "github.com/gomarkdown/markdown/ast" +) + +// citation parses a citation. In its most simple form [@ref], we allow multiple +// being separated by semicolons and a sub reference inside ala pandoc: [@ref, p. 23]. +// Each citation can have a modifier: !, ? or - wich mean: +// +// ! - normative +// ? - formative +// - - suppressed +// +// The suffix starts after a comma, we strip any whitespace before and after. If the output +// allows for it, this can be rendered. +func citation(p *Parser, data []byte, offset int) (int, ast.Node) { + // look for the matching closing bracket + i := offset + 1 + for level := 1; level > 0 && i < len(data); i++ { + switch { + case data[i] == '\n': + // no newlines allowed. + return 0, nil + + case data[i-1] == '\\': + continue + + case data[i] == '[': + level++ + + case data[i] == ']': + level-- + if level <= 0 { + i-- // compensate for extra i++ in for loop + } + } + } + + if i >= len(data) { + return 0, nil + } + + node := &ast.Citation{} + + citations := bytes.Split(data[1:i], []byte(";")) + for _, citation := range citations { + var suffix []byte + citation = bytes.TrimSpace(citation) + j := 0 + if citation[j] != '@' { + // not a citation, drop out entirely. + return 0, nil + } + if c := bytes.Index(citation, []byte(",")); c > 0 { + part := citation[:c] + suff := citation[c+1:] + part = bytes.TrimSpace(part) + suff = bytes.TrimSpace(suff) + + citation = part + suffix = suff + } + + citeType := ast.CitationTypeInformative + j = 1 + switch citation[j] { + case '!': + citeType = ast.CitationTypeNormative + j++ + case '?': + citeType = ast.CitationTypeInformative + j++ + case '-': + citeType = ast.CitationTypeSuppressed + j++ + } + node.Destination = append(node.Destination, citation[j:]) + node.Type = append(node.Type, citeType) + node.Suffix = append(node.Suffix, suffix) + } + + return i + 1, node +} diff --git a/vendor/github.com/gomarkdown/markdown/parser/esc.go b/vendor/github.com/gomarkdown/markdown/parser/esc.go new file mode 100644 index 000000000..0a79aa35e --- /dev/null +++ b/vendor/github.com/gomarkdown/markdown/parser/esc.go @@ -0,0 +1,20 @@ +package parser + +// isEscape returns true if byte i is prefixed by an odd number of backslahses. +func isEscape(data []byte, i int) bool { + if i == 0 { + return false + } + if i == 1 { + return data[0] == '\\' + } + j := i - 1 + for ; j >= 0; j-- { + if data[j] != '\\' { + break + } + } + j++ + // odd number of backslahes means escape + return (i-j)%2 != 0 +} diff --git a/vendor/github.com/gomarkdown/markdown/parser/figures.go b/vendor/github.com/gomarkdown/markdown/parser/figures.go new file mode 100644 index 000000000..6615449cf --- /dev/null +++ b/vendor/github.com/gomarkdown/markdown/parser/figures.go @@ -0,0 +1,119 @@ +package parser + +import ( + "bytes" + + "github.com/gomarkdown/markdown/ast" +) + +// sFigureLine checks if there's a figure line (e.g., !--- ) at the beginning of data, +// and returns the end index if so, or 0 otherwise. +func sFigureLine(data []byte, oldmarker string) (end int, marker string) { + i, size := 0, 0 + + n := len(data) + // skip up to three spaces + for i < n && i < 3 && data[i] == ' ' { + i++ + } + + // check for the marker characters: ! + if i+1 >= n { + return 0, "" + } + if data[i] != '!' || data[i+1] != '-' { + return 0, "" + } + i++ + + c := data[i] // i.e. the - + + // the whole line must be the same char or whitespace + for i < n && data[i] == c { + size++ + i++ + } + + // the marker char must occur at least 3 times + if size < 3 { + return 0, "" + } + marker = string(data[i-size : i]) + + // if this is the end marker, it must match the beginning marker + if oldmarker != "" && marker != oldmarker { + return 0, "" + } + + // there is no syntax modifier although it might be an idea to re-use this space for something? + + i = skipChar(data, i, ' ') + if i >= n || data[i] != '\n' { + if i == n { + return i, marker + } + return 0, "" + } + return i + 1, marker // Take newline into account. +} + +// figureBlock returns the end index if data contains a figure block at the beginning, +// or 0 otherwise. It writes to out if doRender is true, otherwise it has no side effects. +// If doRender is true, a final newline is mandatory to recognize the figure block. +func (p *Parser) figureBlock(data []byte, doRender bool) int { + beg, marker := sFigureLine(data, "") + if beg == 0 || beg >= len(data) { + return 0 + } + + var raw bytes.Buffer + + for { + // safe to assume beg < len(data) + + // check for the end of the code block + figEnd, _ := sFigureLine(data[beg:], marker) + if figEnd != 0 { + beg += figEnd + break + } + + // copy the current line + end := skipUntilChar(data, beg, '\n') + 1 + + // did we reach the end of the buffer without a closing marker? + if end >= len(data) { + return 0 + } + + // verbatim copy to the working buffer + if doRender { + raw.Write(data[beg:end]) + } + beg = end + } + + if !doRender { + return beg + } + + figure := &ast.CaptionFigure{} + p.addBlock(figure) + p.block(raw.Bytes()) + + defer p.finalize(figure) + + if captionContent, id, consumed := p.caption(data[beg:], []byte("Figure: ")); consumed > 0 { + caption := &ast.Caption{} + p.Inline(caption, captionContent) + + figure.HeadingID = id + + p.addChild(caption) + + beg += consumed + } + + p.finalize(figure) + return beg +} diff --git a/vendor/github.com/gomarkdown/markdown/parser/include.go b/vendor/github.com/gomarkdown/markdown/parser/include.go new file mode 100644 index 000000000..2448a6854 --- /dev/null +++ b/vendor/github.com/gomarkdown/markdown/parser/include.go @@ -0,0 +1,129 @@ +package parser + +import ( + "bytes" + "path" + "path/filepath" +) + +// isInclude parses {{...}}[...], that contains a path between the {{, the [...] syntax contains +// an address to select which lines to include. It is treated as an opaque string and just given +// to readInclude. +func (p *Parser) isInclude(data []byte) (filename string, address []byte, consumed int) { + i := skipCharN(data, 0, ' ', 3) // start with up to 3 spaces + if len(data[i:]) < 3 { + return "", nil, 0 + } + if data[i] != '{' || data[i+1] != '{' { + return "", nil, 0 + } + start := i + 2 + + // find the end delimiter + i = skipUntilChar(data, i, '}') + if i+1 >= len(data) { + return "", nil, 0 + } + end := i + i++ + if data[i] != '}' { + return "", nil, 0 + } + filename = string(data[start:end]) + + if i+1 < len(data) && data[i+1] == '[' { // potential address specification + start := i + 2 + + end = skipUntilChar(data, start, ']') + if end >= len(data) { + return "", nil, 0 + } + address = data[start:end] + return filename, address, end + 1 + } + + return filename, address, i + 1 +} + +func (p *Parser) readInclude(from, file string, address []byte) []byte { + if p.Opts.ReadIncludeFn != nil { + return p.Opts.ReadIncludeFn(from, file, address) + } + + return nil +} + +// isCodeInclude parses <{{...}} which is similar to isInclude the returned bytes are, however wrapped in a code block. +func (p *Parser) isCodeInclude(data []byte) (filename string, address []byte, consumed int) { + i := skipCharN(data, 0, ' ', 3) // start with up to 3 spaces + if len(data[i:]) < 3 { + return "", nil, 0 + } + if data[i] != '<' { + return "", nil, 0 + } + start := i + + filename, address, consumed = p.isInclude(data[i+1:]) + if consumed == 0 { + return "", nil, 0 + } + return filename, address, start + consumed + 1 +} + +// readCodeInclude acts like include except the returned bytes are wrapped in a fenced code block. +func (p *Parser) readCodeInclude(from, file string, address []byte) []byte { + data := p.readInclude(from, file, address) + if data == nil { + return nil + } + ext := path.Ext(file) + buf := &bytes.Buffer{} + buf.Write([]byte("```")) + if ext != "" { // starts with a dot + buf.WriteString(" " + ext[1:] + "\n") + } else { + buf.WriteByte('\n') + } + buf.Write(data) + buf.WriteString("```\n") + return buf.Bytes() +} + +// incStack hold the current stack of chained includes. Each value is the containing +// path of the file being parsed. +type incStack struct { + stack []string +} + +func newIncStack() *incStack { + return &incStack{stack: []string{}} +} + +// Push updates i with new. +func (i *incStack) Push(new string) { + if path.IsAbs(new) { + i.stack = append(i.stack, path.Dir(new)) + return + } + last := "" + if len(i.stack) > 0 { + last = i.stack[len(i.stack)-1] + } + i.stack = append(i.stack, path.Dir(filepath.Join(last, new))) +} + +// Pop pops the last value. +func (i *incStack) Pop() { + if len(i.stack) == 0 { + return + } + i.stack = i.stack[:len(i.stack)-1] +} + +func (i *incStack) Last() string { + if len(i.stack) == 0 { + return "" + } + return i.stack[len(i.stack)-1] +} diff --git a/vendor/github.com/gomarkdown/markdown/parser/inline.go b/vendor/github.com/gomarkdown/markdown/parser/inline.go new file mode 100644 index 000000000..bf2e46bdf --- /dev/null +++ b/vendor/github.com/gomarkdown/markdown/parser/inline.go @@ -0,0 +1,1280 @@ +package parser + +import ( + "bytes" + "regexp" + "strconv" + + "github.com/gomarkdown/markdown/ast" +) + +// Parsing of inline elements + +var ( + urlRe = `((https?|ftp):\/\/|\/)[-A-Za-z0-9+&@#\/%?=~_|!:,.;\(\)]+` + anchorRe = regexp.MustCompile(`^(]+")?\s?>` + urlRe + `<\/a>)`) + + // TODO: improve this regexp to catch all possible entities: + htmlEntityRe = regexp.MustCompile(`&[a-z]{2,5};`) +) + +// Inline parses text within a block. +// Each function returns the number of consumed chars. +func (p *Parser) Inline(currBlock ast.Node, data []byte) { + // handlers might call us recursively: enforce a maximum depth + if p.nesting >= p.maxNesting || len(data) == 0 { + return + } + p.nesting++ + beg, end := 0, 0 + + n := len(data) + for end < n { + handler := p.inlineCallback[data[end]] + if handler == nil { + end++ + continue + } + consumed, node := handler(p, data, end) + if consumed == 0 { + // no action from the callback + end++ + continue + } + // copy inactive chars into the output + ast.AppendChild(currBlock, newTextNode(data[beg:end])) + if node != nil { + ast.AppendChild(currBlock, node) + } + beg = end + consumed + end = beg + } + + if beg < n { + if data[end-1] == '\n' { + end-- + } + ast.AppendChild(currBlock, newTextNode(data[beg:end])) + } + p.nesting-- +} + +// single and double emphasis parsing +func emphasis(p *Parser, data []byte, offset int) (int, ast.Node) { + data = data[offset:] + c := data[0] + + n := len(data) + if n > 2 && data[1] != c { + // whitespace cannot follow an opening emphasis; + // strikethrough only takes two characters '~~' + if isSpace(data[1]) { + return 0, nil + } + if p.extensions&SuperSubscript != 0 && c == '~' { + // potential subscript, no spaces, except when escaped, helperEmphasis does + // not check that for us, so walk the bytes and check. + ret := skipUntilChar(data[1:], 0, c) + if ret == 0 { + return 0, nil + } + ret++ // we started with data[1:] above. + for i := 1; i < ret; i++ { + if isSpace(data[i]) && !isEscape(data, i) { + return 0, nil + } + } + sub := &ast.Subscript{} + sub.Literal = data[1:ret] + return ret + 1, sub + } + ret, node := helperEmphasis(p, data[1:], c) + if ret == 0 { + return 0, nil + } + + return ret + 1, node + } + + if n > 3 && data[1] == c && data[2] != c { + if isSpace(data[2]) { + return 0, nil + } + ret, node := helperDoubleEmphasis(p, data[2:], c) + if ret == 0 { + return 0, nil + } + + return ret + 2, node + } + + if n > 4 && data[1] == c && data[2] == c && data[3] != c { + if c == '~' || isSpace(data[3]) { + return 0, nil + } + ret, node := helperTripleEmphasis(p, data, 3, c) + if ret == 0 { + return 0, nil + } + + return ret + 3, node + } + + return 0, nil +} + +func codeSpan(p *Parser, data []byte, offset int) (int, ast.Node) { + data = data[offset:] + + // count the number of backticks in the delimiter + nb := skipChar(data, 0, '`') + + // find the next delimiter + i, end := 0, 0 + for end = nb; end < len(data) && i < nb; end++ { + if data[end] == '`' { + i++ + } else { + i = 0 + } + } + + // no matching delimiter? + if i < nb && end >= len(data) { + return 0, nil + } + + // trim outside whitespace + fBegin := nb + for fBegin < end && data[fBegin] == ' ' { + fBegin++ + } + + fEnd := end - nb + for fEnd > fBegin && data[fEnd-1] == ' ' { + fEnd-- + } + + // render the code span + if fBegin != fEnd { + code := &ast.Code{} + code.Literal = data[fBegin:fEnd] + return end, code + } + + return end, nil +} + +// newline preceded by two spaces becomes
    +func maybeLineBreak(p *Parser, data []byte, offset int) (int, ast.Node) { + origOffset := offset + offset = skipChar(data, offset, ' ') + + if offset < len(data) && data[offset] == '\n' { + if offset-origOffset >= 2 { + return offset - origOffset + 1, &ast.Hardbreak{} + } + return offset - origOffset, nil + } + return 0, nil +} + +// newline without two spaces works when HardLineBreak is enabled +func lineBreak(p *Parser, data []byte, offset int) (int, ast.Node) { + if p.extensions&HardLineBreak != 0 { + return 1, &ast.Hardbreak{} + } + return 0, nil +} + +type linkType int + +const ( + linkNormal linkType = iota + linkImg + linkDeferredFootnote + linkInlineFootnote + linkCitation +) + +func isReferenceStyleLink(data []byte, pos int, t linkType) bool { + if t == linkDeferredFootnote { + return false + } + return pos < len(data)-1 && data[pos] == '[' && data[pos+1] != '^' +} + +func maybeImage(p *Parser, data []byte, offset int) (int, ast.Node) { + if offset < len(data)-1 && data[offset+1] == '[' { + return link(p, data, offset) + } + return 0, nil +} + +func maybeInlineFootnoteOrSuper(p *Parser, data []byte, offset int) (int, ast.Node) { + if offset < len(data)-1 && data[offset+1] == '[' { + return link(p, data, offset) + } + + if p.extensions&SuperSubscript != 0 { + ret := skipUntilChar(data[offset:], 1, '^') + if ret == 0 { + return 0, nil + } + for i := offset; i < offset+ret; i++ { + if isSpace(data[i]) && !isEscape(data, i) { + return 0, nil + } + } + sup := &ast.Superscript{} + sup.Literal = data[offset+1 : offset+ret] + return offset + ret, sup + } + + return 0, nil +} + +// '[': parse a link or an image or a footnote or a citation +func link(p *Parser, data []byte, offset int) (int, ast.Node) { + // no links allowed inside regular links, footnote, and deferred footnotes + if p.insideLink && (offset > 0 && data[offset-1] == '[' || len(data)-1 > offset && data[offset+1] == '^') { + return 0, nil + } + + var t linkType + switch { + // special case: ![^text] == deferred footnote (that follows something with + // an exclamation point) + case p.extensions&Footnotes != 0 && len(data)-1 > offset && data[offset+1] == '^': + t = linkDeferredFootnote + // ![alt] == image + case offset >= 0 && data[offset] == '!': + t = linkImg + offset++ + // [@citation], [@-citation], [@?citation], [@!citation] + case p.extensions&Mmark != 0 && len(data)-1 > offset && data[offset+1] == '@': + t = linkCitation + // [text] == regular link + // ^[text] == inline footnote + // [^refId] == deferred footnote + case p.extensions&Footnotes != 0: + if offset >= 0 && data[offset] == '^' { + t = linkInlineFootnote + offset++ + } else if len(data)-1 > offset && data[offset+1] == '^' { + t = linkDeferredFootnote + } + default: + t = linkNormal + } + + data = data[offset:] + + if t == linkCitation { + return citation(p, data, 0) + } + + var ( + i = 1 + noteID int + title, link, linkID, altContent []byte + textHasNl = false + ) + + if t == linkDeferredFootnote { + i++ + } + + // look for the matching closing bracket + for level := 1; level > 0 && i < len(data); i++ { + switch { + case data[i] == '\n': + textHasNl = true + + case data[i-1] == '\\': + continue + + case data[i] == '[': + level++ + + case data[i] == ']': + level-- + if level <= 0 { + i-- // compensate for extra i++ in for loop + } + } + } + + if i >= len(data) { + return 0, nil + } + + txtE := i + i++ + var footnoteNode ast.Node + + // skip any amount of whitespace or newline + // (this is much more lax than original markdown syntax) + i = skipSpace(data, i) + + // inline style link + switch { + case i < len(data) && data[i] == '(': + // skip initial whitespace + i++ + + i = skipSpace(data, i) + + linkB := i + + // look for link end: ' " ) + findlinkend: + for i < len(data) { + switch { + case data[i] == '\\': + i += 2 + + case data[i] == ')' || data[i] == '\'' || data[i] == '"': + break findlinkend + + default: + i++ + } + } + + if i >= len(data) { + return 0, nil + } + linkE := i + + // look for title end if present + titleB, titleE := 0, 0 + if data[i] == '\'' || data[i] == '"' { + i++ + titleB = i + + findtitleend: + for i < len(data) { + switch { + case data[i] == '\\': + i += 2 + + case data[i] == ')': + break findtitleend + + default: + i++ + } + } + + if i >= len(data) { + return 0, nil + } + + // skip whitespace after title + titleE = i - 1 + for titleE > titleB && isSpace(data[titleE]) { + titleE-- + } + + // check for closing quote presence + if data[titleE] != '\'' && data[titleE] != '"' { + titleB, titleE = 0, 0 + linkE = i + } + } + + // remove whitespace at the end of the link + for linkE > linkB && isSpace(data[linkE-1]) { + linkE-- + } + + // remove optional angle brackets around the link + if data[linkB] == '<' { + linkB++ + } + if data[linkE-1] == '>' { + linkE-- + } + + // build escaped link and title + if linkE > linkB { + link = data[linkB:linkE] + } + + if titleE > titleB { + title = data[titleB:titleE] + } + + i++ + + // reference style link + case isReferenceStyleLink(data, i, t): + var id []byte + altContentConsidered := false + + // look for the id + i++ + linkB := i + i = skipUntilChar(data, i, ']') + + if i >= len(data) { + return 0, nil + } + linkE := i + + // find the reference + if linkB == linkE { + if textHasNl { + var b bytes.Buffer + + for j := 1; j < txtE; j++ { + switch { + case data[j] != '\n': + b.WriteByte(data[j]) + case data[j-1] != ' ': + b.WriteByte(' ') + } + } + + id = b.Bytes() + } else { + id = data[1:txtE] + altContentConsidered = true + } + } else { + id = data[linkB:linkE] + } + + // find the reference with matching id + lr, ok := p.getRef(string(id)) + if !ok { + return 0, nil + } + + // keep link and title from reference + linkID = id + link = lr.link + title = lr.title + if altContentConsidered { + altContent = lr.text + } + i++ + + // shortcut reference style link or reference or inline footnote + default: + var id []byte + + // craft the id + if textHasNl { + var b bytes.Buffer + + for j := 1; j < txtE; j++ { + switch { + case data[j] != '\n': + b.WriteByte(data[j]) + case data[j-1] != ' ': + b.WriteByte(' ') + } + } + + id = b.Bytes() + } else { + if t == linkDeferredFootnote { + id = data[2:txtE] // get rid of the ^ + } else { + id = data[1:txtE] + } + } + + footnoteNode = &ast.ListItem{} + if t == linkInlineFootnote { + // create a new reference + noteID = len(p.notes) + 1 + + var fragment []byte + if len(id) > 0 { + if len(id) < 16 { + fragment = make([]byte, len(id)) + } else { + fragment = make([]byte, 16) + } + copy(fragment, slugify(id)) + } else { + fragment = append([]byte("footnote-"), []byte(strconv.Itoa(noteID))...) + } + + ref := &reference{ + noteID: noteID, + hasBlock: false, + link: fragment, + title: id, + footnote: footnoteNode, + } + + p.notes = append(p.notes, ref) + p.refsRecord[string(ref.link)] = struct{}{} + + link = ref.link + title = ref.title + } else { + // find the reference with matching id + lr, ok := p.getRef(string(id)) + if !ok { + return 0, nil + } + + if t == linkDeferredFootnote && !p.isFootnote(lr) { + lr.noteID = len(p.notes) + 1 + lr.footnote = footnoteNode + p.notes = append(p.notes, lr) + p.refsRecord[string(lr.link)] = struct{}{} + } + + // keep link and title from reference + link = lr.link + // if inline footnote, title == footnote contents + title = lr.title + noteID = lr.noteID + } + + // rewind the whitespace + i = txtE + 1 + } + + var uLink []byte + if t == linkNormal || t == linkImg { + if len(link) > 0 { + var uLinkBuf bytes.Buffer + unescapeText(&uLinkBuf, link) + uLink = uLinkBuf.Bytes() + } + + // links need something to click on and somewhere to go + if len(uLink) == 0 || (t == linkNormal && txtE <= 1) { + return 0, nil + } + } + + // call the relevant rendering function + switch t { + case linkNormal: + link := &ast.Link{ + Destination: normalizeURI(uLink), + Title: title, + DeferredID: linkID, + } + if len(altContent) > 0 { + ast.AppendChild(link, newTextNode(altContent)) + } else { + // links cannot contain other links, so turn off link parsing + // temporarily and recurse + insideLink := p.insideLink + p.insideLink = true + p.Inline(link, data[1:txtE]) + p.insideLink = insideLink + } + return i, link + + case linkImg: + image := &ast.Image{ + Destination: uLink, + Title: title, + } + ast.AppendChild(image, newTextNode(data[1:txtE])) + return i + 1, image + + case linkInlineFootnote, linkDeferredFootnote: + link := &ast.Link{ + Destination: link, + Title: title, + NoteID: noteID, + Footnote: footnoteNode, + } + if t == linkDeferredFootnote { + link.DeferredID = data[2:txtE] + } + if t == linkInlineFootnote { + i++ + } + return i, link + + default: + return 0, nil + } +} + +func (p *Parser) inlineHTMLComment(data []byte) int { + if len(data) < 5 { + return 0 + } + if data[0] != '<' || data[1] != '!' || data[2] != '-' || data[3] != '-' { + return 0 + } + i := 5 + // scan for an end-of-comment marker, across lines if necessary + for i < len(data) && !(data[i-2] == '-' && data[i-1] == '-' && data[i] == '>') { + i++ + } + // no end-of-comment marker + if i >= len(data) { + return 0 + } + return i + 1 +} + +func stripMailto(link []byte) []byte { + if bytes.HasPrefix(link, []byte("mailto://")) { + return link[9:] + } else if bytes.HasPrefix(link, []byte("mailto:")) { + return link[7:] + } else { + return link + } +} + +// autolinkType specifies a kind of autolink that gets detected. +type autolinkType int + +// These are the possible flag values for the autolink renderer. +const ( + notAutolink autolinkType = iota + normalAutolink + emailAutolink +) + +// '<' when tags or autolinks are allowed +func leftAngle(p *Parser, data []byte, offset int) (int, ast.Node) { + data = data[offset:] + + if p.extensions&Mmark != 0 { + id, consumed := IsCallout(data) + if consumed > 0 { + node := &ast.Callout{} + node.ID = id + return consumed, node + } + } + + altype, end := tagLength(data) + if size := p.inlineHTMLComment(data); size > 0 { + end = size + } + if end <= 2 { + return end, nil + } + if altype == notAutolink { + htmlTag := &ast.HTMLSpan{} + htmlTag.Literal = data[:end] + return end, htmlTag + } + + var uLink bytes.Buffer + unescapeText(&uLink, data[1:end+1-2]) + if uLink.Len() <= 0 { + return end, nil + } + link := uLink.Bytes() + node := &ast.Link{ + Destination: link, + } + if altype == emailAutolink { + node.Destination = append([]byte("mailto:"), link...) + } + ast.AppendChild(node, newTextNode(stripMailto(link))) + return end, node +} + +// '\\' backslash escape +var escapeChars = []byte("\\`*_{}[]()#+-.!:|&<>~") + +func escape(p *Parser, data []byte, offset int) (int, ast.Node) { + data = data[offset:] + + if len(data) <= 1 { + return 2, nil + } + + if p.extensions&BackslashLineBreak != 0 && data[1] == '\n' { + return 2, &ast.Hardbreak{} + } + + if bytes.IndexByte(escapeChars, data[1]) < 0 { + return 0, nil + } + + return 2, newTextNode(data[1:2]) +} + +func unescapeText(ob *bytes.Buffer, src []byte) { + i := 0 + for i < len(src) { + org := i + for i < len(src) && src[i] != '\\' { + i++ + } + + if i > org { + ob.Write(src[org:i]) + } + + if i+1 >= len(src) { + break + } + + ob.WriteByte(src[i+1]) + i += 2 + } +} + +// '&' escaped when it doesn't belong to an entity +// valid entities are assumed to be anything matching &#?[A-Za-z0-9]+; +func entity(p *Parser, data []byte, offset int) (int, ast.Node) { + data = data[offset:] + + end := skipCharN(data, 1, '#', 1) + end = skipAlnum(data, end) + + if end < len(data) && data[end] == ';' { + end++ // real entity + } else { + return 0, nil // lone '&' + } + + ent := data[:end] + // undo & escaping or it will be converted to &amp; by another + // escaper in the renderer + if bytes.Equal(ent, []byte("&")) { + ent = []byte{'&'} + } + + return end, newTextNode(ent) +} + +func linkEndsWithEntity(data []byte, linkEnd int) bool { + entityRanges := htmlEntityRe.FindAllIndex(data[:linkEnd], -1) + return entityRanges != nil && entityRanges[len(entityRanges)-1][1] == linkEnd +} + +// hasPrefixCaseInsensitive is a custom implementation of +// strings.HasPrefix(strings.ToLower(s), prefix) +// we rolled our own because ToLower pulls in a huge machinery of lowercasing +// anything from Unicode and that's very slow. Since this func will only be +// used on ASCII protocol prefixes, we can take shortcuts. +func hasPrefixCaseInsensitive(s, prefix []byte) bool { + if len(s) < len(prefix) { + return false + } + delta := byte('a' - 'A') + for i, b := range prefix { + if b != s[i] && b != s[i]+delta { + return false + } + } + return true +} + +var protocolPrefixes = [][]byte{ + []byte("http://"), + []byte("https://"), + []byte("ftp://"), + []byte("file://"), + []byte("mailto:"), +} + +const shortestPrefix = 6 // len("ftp://"), the shortest of the above + +func maybeAutoLink(p *Parser, data []byte, offset int) (int, ast.Node) { + // quick check to rule out most false hits + if p.insideLink || len(data) < offset+shortestPrefix { + return 0, nil + } + for _, prefix := range protocolPrefixes { + endOfHead := offset + 8 // 8 is the len() of the longest prefix + if endOfHead > len(data) { + endOfHead = len(data) + } + if hasPrefixCaseInsensitive(data[offset:endOfHead], prefix) { + return autoLink(p, data, offset) + } + } + return 0, nil +} + +func autoLink(p *Parser, data []byte, offset int) (int, ast.Node) { + // Now a more expensive check to see if we're not inside an anchor element + anchorStart := offset + offsetFromAnchor := 0 + for anchorStart > 0 && data[anchorStart] != '<' { + anchorStart-- + offsetFromAnchor++ + } + + anchorStr := anchorRe.Find(data[anchorStart:]) + if anchorStr != nil { + anchorClose := &ast.HTMLSpan{} + anchorClose.Literal = anchorStr[offsetFromAnchor:] + return len(anchorStr) - offsetFromAnchor, anchorClose + } + + // scan backward for a word boundary + rewind := 0 + for offset-rewind > 0 && rewind <= 7 && isLetter(data[offset-rewind-1]) { + rewind++ + } + if rewind > 6 { // longest supported protocol is "mailto" which has 6 letters + return 0, nil + } + + origData := data + data = data[offset-rewind:] + + if !isSafeLink(data) { + return 0, nil + } + + linkEnd := 0 + for linkEnd < len(data) && !isEndOfLink(data[linkEnd]) { + linkEnd++ + } + + // Skip punctuation at the end of the link + if (data[linkEnd-1] == '.' || data[linkEnd-1] == ',') && data[linkEnd-2] != '\\' { + linkEnd-- + } + + // But don't skip semicolon if it's a part of escaped entity: + if data[linkEnd-1] == ';' && data[linkEnd-2] != '\\' && !linkEndsWithEntity(data, linkEnd) { + linkEnd-- + } + + // See if the link finishes with a punctuation sign that can be closed. + var copen byte + switch data[linkEnd-1] { + case '"': + copen = '"' + case '\'': + copen = '\'' + case ')': + copen = '(' + case ']': + copen = '[' + case '}': + copen = '{' + default: + copen = 0 + } + + if copen != 0 { + bufEnd := offset - rewind + linkEnd - 2 + + openDelim := 1 + + /* Try to close the final punctuation sign in this same line; + * if we managed to close it outside of the URL, that means that it's + * not part of the URL. If it closes inside the URL, that means it + * is part of the URL. + * + * Examples: + * + * foo http://www.pokemon.com/Pikachu_(Electric) bar + * => http://www.pokemon.com/Pikachu_(Electric) + * + * foo (http://www.pokemon.com/Pikachu_(Electric)) bar + * => http://www.pokemon.com/Pikachu_(Electric) + * + * foo http://www.pokemon.com/Pikachu_(Electric)) bar + * => http://www.pokemon.com/Pikachu_(Electric)) + * + * (foo http://www.pokemon.com/Pikachu_(Electric)) bar + * => foo http://www.pokemon.com/Pikachu_(Electric) + */ + + for bufEnd >= 0 && origData[bufEnd] != '\n' && openDelim != 0 { + if origData[bufEnd] == data[linkEnd-1] { + openDelim++ + } + + if origData[bufEnd] == copen { + openDelim-- + } + + bufEnd-- + } + + if openDelim == 0 { + linkEnd-- + } + } + + var uLink bytes.Buffer + unescapeText(&uLink, data[:linkEnd]) + + if uLink.Len() > 0 { + node := &ast.Link{ + Destination: uLink.Bytes(), + } + ast.AppendChild(node, newTextNode(uLink.Bytes())) + return linkEnd, node + } + + return linkEnd, nil +} + +func isEndOfLink(char byte) bool { + return isSpace(char) || char == '<' +} + +var validUris = [][]byte{[]byte("http://"), []byte("https://"), []byte("ftp://"), []byte("mailto://")} +var validPaths = [][]byte{[]byte("/"), []byte("./"), []byte("../")} + +func isSafeLink(link []byte) bool { + nLink := len(link) + for _, path := range validPaths { + nPath := len(path) + linkPrefix := link[:nPath] + if nLink >= nPath && bytes.Equal(linkPrefix, path) { + if nLink == nPath { + return true + } else if isAlnum(link[nPath]) { + return true + } + } + } + + for _, prefix := range validUris { + // TODO: handle unicode here + // case-insensitive prefix test + nPrefix := len(prefix) + if nLink > nPrefix { + linkPrefix := bytes.ToLower(link[:nPrefix]) + if bytes.Equal(linkPrefix, prefix) && isAlnum(link[nPrefix]) { + return true + } + } + } + + return false +} + +// return the length of the given tag, or 0 is it's not valid +func tagLength(data []byte) (autolink autolinkType, end int) { + var i, j int + + // a valid tag can't be shorter than 3 chars + if len(data) < 3 { + return notAutolink, 0 + } + + // begins with a '<' optionally followed by '/', followed by letter or number + if data[0] != '<' { + return notAutolink, 0 + } + if data[1] == '/' { + i = 2 + } else { + i = 1 + } + + if !isAlnum(data[i]) { + return notAutolink, 0 + } + + // scheme test + autolink = notAutolink + + // try to find the beginning of an URI + for i < len(data) && (isAlnum(data[i]) || data[i] == '.' || data[i] == '+' || data[i] == '-') { + i++ + } + + if i > 1 && i < len(data) && data[i] == '@' { + if j = isMailtoAutoLink(data[i:]); j != 0 { + return emailAutolink, i + j + } + } + + if i > 2 && i < len(data) && data[i] == ':' { + autolink = normalAutolink + i++ + } + + // complete autolink test: no whitespace or ' or " + switch { + case i >= len(data): + autolink = notAutolink + case autolink != notAutolink: + j = i + + for i < len(data) { + if data[i] == '\\' { + i += 2 + } else if data[i] == '>' || data[i] == '\'' || data[i] == '"' || isSpace(data[i]) { + break + } else { + i++ + } + + } + + if i >= len(data) { + return autolink, 0 + } + if i > j && data[i] == '>' { + return autolink, i + 1 + } + + // one of the forbidden chars has been found + autolink = notAutolink + } + i += bytes.IndexByte(data[i:], '>') + if i < 0 { + return autolink, 0 + } + return autolink, i + 1 +} + +// look for the address part of a mail autolink and '>' +// this is less strict than the original markdown e-mail address matching +func isMailtoAutoLink(data []byte) int { + nb := 0 + + // address is assumed to be: [-@._a-zA-Z0-9]+ with exactly one '@' + for i, c := range data { + if isAlnum(c) { + continue + } + + switch c { + case '@': + nb++ + + case '-', '.', '_': + break + + case '>': + if nb == 1 { + return i + 1 + } + return 0 + default: + return 0 + } + } + + return 0 +} + +// look for the next emph char, skipping other constructs +func helperFindEmphChar(data []byte, c byte) int { + i := 0 + + for i < len(data) { + for i < len(data) && data[i] != c && data[i] != '`' && data[i] != '[' { + i++ + } + if i >= len(data) { + return 0 + } + // do not count escaped chars + if i != 0 && data[i-1] == '\\' { + i++ + continue + } + if data[i] == c { + return i + } + + if data[i] == '`' { + // skip a code span + tmpI := 0 + i++ + for i < len(data) && data[i] != '`' { + if tmpI == 0 && data[i] == c { + tmpI = i + } + i++ + } + if i >= len(data) { + return tmpI + } + i++ + } else if data[i] == '[' { + // skip a link + tmpI := 0 + i++ + for i < len(data) && data[i] != ']' { + if tmpI == 0 && data[i] == c { + tmpI = i + } + i++ + } + i++ + for i < len(data) && (data[i] == ' ' || data[i] == '\n') { + i++ + } + if i >= len(data) { + return tmpI + } + if data[i] != '[' && data[i] != '(' { // not a link + if tmpI > 0 { + return tmpI + } + continue + } + cc := data[i] + i++ + for i < len(data) && data[i] != cc { + if tmpI == 0 && data[i] == c { + return i + } + i++ + } + if i >= len(data) { + return tmpI + } + i++ + } + } + return 0 +} + +func helperEmphasis(p *Parser, data []byte, c byte) (int, ast.Node) { + i := 0 + + // skip one symbol if coming from emph3 + if len(data) > 1 && data[0] == c && data[1] == c { + i = 1 + } + + for i < len(data) { + length := helperFindEmphChar(data[i:], c) + if length == 0 { + return 0, nil + } + i += length + if i >= len(data) { + return 0, nil + } + + if i+1 < len(data) && data[i+1] == c { + i++ + continue + } + + if data[i] == c && !isSpace(data[i-1]) { + + if p.extensions&NoIntraEmphasis != 0 { + if !(i+1 == len(data) || isSpace(data[i+1]) || isPunctuation(data[i+1])) { + continue + } + } + + emph := &ast.Emph{} + p.Inline(emph, data[:i]) + return i + 1, emph + } + } + + return 0, nil +} + +func helperDoubleEmphasis(p *Parser, data []byte, c byte) (int, ast.Node) { + i := 0 + + for i < len(data) { + length := helperFindEmphChar(data[i:], c) + if length == 0 { + return 0, nil + } + i += length + + if i+1 < len(data) && data[i] == c && data[i+1] == c && i > 0 && !isSpace(data[i-1]) { + var node ast.Node = &ast.Strong{} + if c == '~' { + node = &ast.Del{} + } + p.Inline(node, data[:i]) + return i + 2, node + } + i++ + } + return 0, nil +} + +func helperTripleEmphasis(p *Parser, data []byte, offset int, c byte) (int, ast.Node) { + i := 0 + origData := data + data = data[offset:] + + for i < len(data) { + length := helperFindEmphChar(data[i:], c) + if length == 0 { + return 0, nil + } + i += length + + // skip whitespace preceded symbols + if data[i] != c || isSpace(data[i-1]) { + continue + } + + switch { + case i+2 < len(data) && data[i+1] == c && data[i+2] == c: + // triple symbol found + strong := &ast.Strong{} + em := &ast.Emph{} + ast.AppendChild(strong, em) + p.Inline(em, data[:i]) + return i + 3, strong + case i+1 < len(data) && data[i+1] == c: + // double symbol found, hand over to emph1 + length, node := helperEmphasis(p, origData[offset-2:], c) + if length == 0 { + return 0, nil + } + return length - 2, node + default: + // single symbol found, hand over to emph2 + length, node := helperDoubleEmphasis(p, origData[offset-1:], c) + if length == 0 { + return 0, nil + } + return length - 1, node + } + } + return 0, nil +} + +// math handle inline math wrapped with '$' +func math(p *Parser, data []byte, offset int) (int, ast.Node) { + data = data[offset:] + + // too short, or block math + if len(data) <= 2 || data[1] == '$' { + return 0, nil + } + + // find next '$' + var end int + for end = 1; end < len(data) && data[end] != '$'; end++ { + } + + // $ not match + if end == len(data) { + return 0, nil + } + + // create inline math node + math := &ast.Math{} + math.Literal = data[1:end] + return end + 1, math +} + +func newTextNode(d []byte) *ast.Text { + return &ast.Text{ast.Leaf{Literal: d}} +} + +func normalizeURI(s []byte) []byte { + return s // TODO: implement +} diff --git a/vendor/github.com/gomarkdown/markdown/parser/matter.go b/vendor/github.com/gomarkdown/markdown/parser/matter.go new file mode 100644 index 000000000..926863572 --- /dev/null +++ b/vendor/github.com/gomarkdown/markdown/parser/matter.go @@ -0,0 +1,36 @@ +package parser + +import ( + "bytes" + + "github.com/gomarkdown/markdown/ast" +) + +func (p *Parser) documentMatter(data []byte) int { + if data[0] != '{' { + return 0 + } + + consumed := 0 + matter := ast.DocumentMatterNone + if bytes.HasPrefix(data, []byte("{frontmatter}")) { + consumed = len("{frontmatter}") + matter = ast.DocumentMatterFront + } + if bytes.HasPrefix(data, []byte("{mainmatter}")) { + consumed = len("{mainmatter}") + matter = ast.DocumentMatterMain + } + if bytes.HasPrefix(data, []byte("{backmatter}")) { + consumed = len("{backmatter}") + matter = ast.DocumentMatterBack + } + if consumed == 0 { + return 0 + } + node := &ast.DocumentMatter{Matter: matter} + p.addBlock(node) + p.finalize(node) + + return consumed +} diff --git a/vendor/github.com/gomarkdown/markdown/parser/options.go b/vendor/github.com/gomarkdown/markdown/parser/options.go new file mode 100644 index 000000000..d3d0c0887 --- /dev/null +++ b/vendor/github.com/gomarkdown/markdown/parser/options.go @@ -0,0 +1,32 @@ +package parser + +import ( + "github.com/gomarkdown/markdown/ast" +) + +// Flags control optional behavior of parser. +type Flags int + +// Options is a collection of supplementary parameters tweaking the behavior of various parts of the parser. +type Options struct { + ParserHook BlockFunc + ReadIncludeFn ReadIncludeFunc + + Flags Flags // Flags allow customizing parser's behavior +} + +// Parser renderer configuration options. +const ( + FlagsNone Flags = 0 + SkipFootnoteList Flags = 1 << iota // Skip adding the footnote list (regardless if they are parsed) +) + +// BlockFunc allows to registration of a parser function. If successful it +// returns an ast.Node, a buffer that should be parsed as a block and the the number of bytes consumed. +type BlockFunc func(data []byte) (ast.Node, []byte, int) + +// ReadIncludeFunc should read the file under path and returns the read bytes, +// from will be set to the name of the current file being parsed. Initially +// this will be empty. address is the optional address specifier of which lines +// of the file to return. If this function is not set no data will be read. +type ReadIncludeFunc func(from, path string, address []byte) []byte diff --git a/vendor/github.com/gomarkdown/markdown/parser/parser.go b/vendor/github.com/gomarkdown/markdown/parser/parser.go new file mode 100644 index 000000000..bb741da14 --- /dev/null +++ b/vendor/github.com/gomarkdown/markdown/parser/parser.go @@ -0,0 +1,811 @@ +/* +Package parser implements parser for markdown text that generates AST (abstract syntax tree). +*/ +package parser + +import ( + "bytes" + "fmt" + "strings" + "unicode/utf8" + + "github.com/gomarkdown/markdown/ast" +) + +// Extensions is a bitmask of enabled parser extensions. +type Extensions int + +// Bit flags representing markdown parsing extensions. +// Use | (or) to specify multiple extensions. +const ( + NoExtensions Extensions = 0 + NoIntraEmphasis Extensions = 1 << iota // Ignore emphasis markers inside words + Tables // Parse tables + FencedCode // Parse fenced code blocks + Autolink // Detect embedded URLs that are not explicitly marked + Strikethrough // Strikethrough text using ~~test~~ + LaxHTMLBlocks // Loosen up HTML block parsing rules + SpaceHeadings // Be strict about prefix heading rules + HardLineBreak // Translate newlines into line breaks + TabSizeEight // Expand tabs to eight spaces instead of four + Footnotes // Pandoc-style footnotes + NoEmptyLineBeforeBlock // No need to insert an empty line to start a (code, quote, ordered list, unordered list) block + HeadingIDs // specify heading IDs with {#id} + Titleblock // Titleblock ala pandoc + AutoHeadingIDs // Create the heading ID from the text + BackslashLineBreak // Translate trailing backslashes into line breaks + DefinitionLists // Parse definition lists + MathJax // Parse MathJax + OrderedListStart // Keep track of the first number used when starting an ordered list. + Attributes // Block Attributes + SuperSubscript // Super- and subscript support: 2^10^, H~2~O. + EmptyLinesBreakList // 2 empty lines break out of list + Includes // Support including other files. + Mmark // Support Mmark syntax, see https://mmark.nl/syntax + + CommonExtensions Extensions = NoIntraEmphasis | Tables | FencedCode | + Autolink | Strikethrough | SpaceHeadings | HeadingIDs | + BackslashLineBreak | DefinitionLists | MathJax +) + +// The size of a tab stop. +const ( + tabSizeDefault = 4 + tabSizeDouble = 8 +) + +// for each character that triggers a response when parsing inline data. +type inlineParser func(p *Parser, data []byte, offset int) (int, ast.Node) + +// ReferenceOverrideFunc is expected to be called with a reference string and +// return either a valid Reference type that the reference string maps to or +// nil. If overridden is false, the default reference logic will be executed. +// See the documentation in Options for more details on use-case. +type ReferenceOverrideFunc func(reference string) (ref *Reference, overridden bool) + +// Parser is a type that holds extensions and the runtime state used by +// Parse, and the renderer. You can not use it directly, construct it with New. +type Parser struct { + + // ReferenceOverride is an optional function callback that is called every + // time a reference is resolved. It can be set before starting parsing. + // + // In Markdown, the link reference syntax can be made to resolve a link to + // a reference instead of an inline URL, in one of the following ways: + // + // * [link text][refid] + // * [refid][] + // + // Usually, the refid is defined at the bottom of the Markdown document. If + // this override function is provided, the refid is passed to the override + // function first, before consulting the defined refids at the bottom. If + // the override function indicates an override did not occur, the refids at + // the bottom will be used to fill in the link details. + ReferenceOverride ReferenceOverrideFunc + + Opts Options + + // after parsing, this is AST root of parsed markdown text + Doc ast.Node + + extensions Extensions + + refs map[string]*reference + refsRecord map[string]struct{} + inlineCallback [256]inlineParser + nesting int + maxNesting int + insideLink bool + indexCnt int // incremented after every index + + // Footnotes need to be ordered as well as available to quickly check for + // presence. If a ref is also a footnote, it's stored both in refs and here + // in notes. Slice is nil if footnotes not enabled. + notes []*reference + + tip ast.Node // = doc + oldTip ast.Node + lastMatchedContainer ast.Node // = doc + allClosed bool + + // Attributes are attached to block level elements. + attr *ast.Attribute + + includeStack *incStack +} + +// New creates a markdown parser with CommonExtensions. +// +// You can then call `doc := p.Parse(markdown)` to parse markdown document +// and `markdown.Render(doc, renderer)` to convert it to another format with +// a renderer. +func New() *Parser { + return NewWithExtensions(CommonExtensions) +} + +// NewWithExtensions creates a markdown parser with given extensions. +func NewWithExtensions(extension Extensions) *Parser { + p := Parser{ + refs: make(map[string]*reference), + refsRecord: make(map[string]struct{}), + maxNesting: 16, + insideLink: false, + Doc: &ast.Document{}, + extensions: extension, + allClosed: true, + includeStack: newIncStack(), + } + p.tip = p.Doc + p.oldTip = p.Doc + p.lastMatchedContainer = p.Doc + + p.inlineCallback[' '] = maybeLineBreak + p.inlineCallback['*'] = emphasis + p.inlineCallback['_'] = emphasis + if p.extensions&Strikethrough != 0 { + p.inlineCallback['~'] = emphasis + } + p.inlineCallback['`'] = codeSpan + p.inlineCallback['\n'] = lineBreak + p.inlineCallback['['] = link + p.inlineCallback['<'] = leftAngle + p.inlineCallback['\\'] = escape + p.inlineCallback['&'] = entity + p.inlineCallback['!'] = maybeImage + if p.extensions&Mmark != 0 { + p.inlineCallback['('] = maybeShortRefOrIndex + } + p.inlineCallback['^'] = maybeInlineFootnoteOrSuper + if p.extensions&Autolink != 0 { + p.inlineCallback['h'] = maybeAutoLink + p.inlineCallback['m'] = maybeAutoLink + p.inlineCallback['f'] = maybeAutoLink + p.inlineCallback['H'] = maybeAutoLink + p.inlineCallback['M'] = maybeAutoLink + p.inlineCallback['F'] = maybeAutoLink + } + if p.extensions&MathJax != 0 { + p.inlineCallback['$'] = math + } + + return &p +} + +func (p *Parser) getRef(refid string) (ref *reference, found bool) { + if p.ReferenceOverride != nil { + r, overridden := p.ReferenceOverride(refid) + if overridden { + if r == nil { + return nil, false + } + return &reference{ + link: []byte(r.Link), + title: []byte(r.Title), + noteID: 0, + hasBlock: false, + text: []byte(r.Text)}, true + } + } + // refs are case insensitive + ref, found = p.refs[strings.ToLower(refid)] + return ref, found +} + +func (p *Parser) isFootnote(ref *reference) bool { + _, ok := p.refsRecord[string(ref.link)] + return ok +} + +func (p *Parser) finalize(block ast.Node) { + p.tip = block.GetParent() +} + +func (p *Parser) addChild(node ast.Node) ast.Node { + for !canNodeContain(p.tip, node) { + p.finalize(p.tip) + } + ast.AppendChild(p.tip, node) + p.tip = node + return node +} + +func canNodeContain(n ast.Node, v ast.Node) bool { + switch n.(type) { + case *ast.List: + return isListItem(v) + case *ast.Document, *ast.BlockQuote, *ast.Aside, *ast.ListItem, *ast.CaptionFigure: + return !isListItem(v) + case *ast.Table: + switch v.(type) { + case *ast.TableHeader, *ast.TableBody, *ast.TableFooter: + return true + default: + return false + } + case *ast.TableHeader, *ast.TableBody, *ast.TableFooter: + _, ok := v.(*ast.TableRow) + return ok + case *ast.TableRow: + _, ok := v.(*ast.TableCell) + return ok + } + return false +} + +func (p *Parser) closeUnmatchedBlocks() { + if p.allClosed { + return + } + for p.oldTip != p.lastMatchedContainer { + parent := p.oldTip.GetParent() + p.finalize(p.oldTip) + p.oldTip = parent + } + p.allClosed = true +} + +// Reference represents the details of a link. +// See the documentation in Options for more details on use-case. +type Reference struct { + // Link is usually the URL the reference points to. + Link string + // Title is the alternate text describing the link in more detail. + Title string + // Text is the optional text to override the ref with if the syntax used was + // [refid][] + Text string +} + +// Parse generates AST (abstract syntax tree) representing markdown document. +// +// The result is a root of the tree whose underlying type is *ast.Document +// +// You can then convert AST to html using html.Renderer, to some other format +// using a custom renderer or transform the tree. +func (p *Parser) Parse(input []byte) ast.Node { + p.block(input) + // Walk the tree and finish up some of unfinished blocks + for p.tip != nil { + p.finalize(p.tip) + } + // Walk the tree again and process inline markdown in each block + ast.WalkFunc(p.Doc, func(node ast.Node, entering bool) ast.WalkStatus { + switch node.(type) { + case *ast.Paragraph, *ast.Heading, *ast.TableCell: + p.Inline(node, node.AsContainer().Content) + node.AsContainer().Content = nil + } + return ast.GoToNext + }) + + if p.Opts.Flags&SkipFootnoteList == 0 { + p.parseRefsToAST() + } + return p.Doc +} + +func (p *Parser) parseRefsToAST() { + if p.extensions&Footnotes == 0 || len(p.notes) == 0 { + return + } + p.tip = p.Doc + list := &ast.List{ + IsFootnotesList: true, + ListFlags: ast.ListTypeOrdered, + } + p.addBlock(&ast.Footnotes{}) + block := p.addBlock(list) + flags := ast.ListItemBeginningOfList + // Note: this loop is intentionally explicit, not range-form. This is + // because the body of the loop will append nested footnotes to p.notes and + // we need to process those late additions. Range form would only walk over + // the fixed initial set. + for i := 0; i < len(p.notes); i++ { + ref := p.notes[i] + p.addChild(ref.footnote) + block := ref.footnote + listItem := block.(*ast.ListItem) + listItem.ListFlags = flags | ast.ListTypeOrdered + listItem.RefLink = ref.link + if ref.hasBlock { + flags |= ast.ListItemContainsBlock + p.block(ref.title) + } else { + p.Inline(block, ref.title) + } + flags &^= ast.ListItemBeginningOfList | ast.ListItemContainsBlock + } + above := list.Parent + finalizeList(list) + p.tip = above + + ast.WalkFunc(block, func(node ast.Node, entering bool) ast.WalkStatus { + switch node.(type) { + case *ast.Paragraph, *ast.Heading: + p.Inline(node, node.AsContainer().Content) + node.AsContainer().Content = nil + } + return ast.GoToNext + }) +} + +// +// Link references +// +// This section implements support for references that (usually) appear +// as footnotes in a document, and can be referenced anywhere in the document. +// The basic format is: +// +// [1]: http://www.google.com/ "Google" +// [2]: http://www.github.com/ "Github" +// +// Anywhere in the document, the reference can be linked by referring to its +// label, i.e., 1 and 2 in this example, as in: +// +// This library is hosted on [Github][2], a git hosting site. +// +// Actual footnotes as specified in Pandoc and supported by some other Markdown +// libraries such as php-markdown are also taken care of. They look like this: +// +// This sentence needs a bit of further explanation.[^note] +// +// [^note]: This is the explanation. +// +// Footnotes should be placed at the end of the document in an ordered list. +// Inline footnotes such as: +// +// Inline footnotes^[Not supported.] also exist. +// +// are not yet supported. + +// reference holds all information necessary for a reference-style links or +// footnotes. +// +// Consider this markdown with reference-style links: +// +// [link][ref] +// +// [ref]: /url/ "tooltip title" +// +// It will be ultimately converted to this HTML: +// +//

    link

    +// +// And a reference structure will be populated as follows: +// +// p.refs["ref"] = &reference{ +// link: "/url/", +// title: "tooltip title", +// } +// +// Alternatively, reference can contain information about a footnote. Consider +// this markdown: +// +// Text needing a footnote.[^a] +// +// [^a]: This is the note +// +// A reference structure will be populated as follows: +// +// p.refs["a"] = &reference{ +// link: "a", +// title: "This is the note", +// noteID: , +// } +// +// TODO: As you can see, it begs for splitting into two dedicated structures +// for refs and for footnotes. +type reference struct { + link []byte + title []byte + noteID int // 0 if not a footnote ref + hasBlock bool + footnote ast.Node // a link to the Item node within a list of footnotes + + text []byte // only gets populated by refOverride feature with Reference.Text +} + +func (r *reference) String() string { + return fmt.Sprintf("{link: %q, title: %q, text: %q, noteID: %d, hasBlock: %v}", + r.link, r.title, r.text, r.noteID, r.hasBlock) +} + +// Check whether or not data starts with a reference link. +// If so, it is parsed and stored in the list of references +// (in the render struct). +// Returns the number of bytes to skip to move past it, +// or zero if the first line is not a reference. +func isReference(p *Parser, data []byte, tabSize int) int { + // up to 3 optional leading spaces + if len(data) < 4 { + return 0 + } + i := 0 + for i < 3 && data[i] == ' ' { + i++ + } + + noteID := 0 + + // id part: anything but a newline between brackets + if data[i] != '[' { + return 0 + } + i++ + if p.extensions&Footnotes != 0 { + if i < len(data) && data[i] == '^' { + // we can set it to anything here because the proper noteIds will + // be assigned later during the second pass. It just has to be != 0 + noteID = 1 + i++ + } + } + idOffset := i + for i < len(data) && data[i] != '\n' && data[i] != '\r' && data[i] != ']' { + i++ + } + if i >= len(data) || data[i] != ']' { + return 0 + } + idEnd := i + // footnotes can have empty ID, like this: [^], but a reference can not be + // empty like this: []. Break early if it's not a footnote and there's no ID + if noteID == 0 && idOffset == idEnd { + return 0 + } + // spacer: colon (space | tab)* newline? (space | tab)* + i++ + if i >= len(data) || data[i] != ':' { + return 0 + } + i++ + for i < len(data) && (data[i] == ' ' || data[i] == '\t') { + i++ + } + if i < len(data) && (data[i] == '\n' || data[i] == '\r') { + i++ + if i < len(data) && data[i] == '\n' && data[i-1] == '\r' { + i++ + } + } + for i < len(data) && (data[i] == ' ' || data[i] == '\t') { + i++ + } + if i >= len(data) { + return 0 + } + + var ( + linkOffset, linkEnd int + titleOffset, titleEnd int + lineEnd int + raw []byte + hasBlock bool + ) + + if p.extensions&Footnotes != 0 && noteID != 0 { + linkOffset, linkEnd, raw, hasBlock = scanFootnote(p, data, i, tabSize) + lineEnd = linkEnd + } else { + linkOffset, linkEnd, titleOffset, titleEnd, lineEnd = scanLinkRef(p, data, i) + } + if lineEnd == 0 { + return 0 + } + + // a valid ref has been found + + ref := &reference{ + noteID: noteID, + hasBlock: hasBlock, + } + + if noteID > 0 { + // reusing the link field for the id since footnotes don't have links + ref.link = data[idOffset:idEnd] + // if footnote, it's not really a title, it's the contained text + ref.title = raw + } else { + ref.link = data[linkOffset:linkEnd] + ref.title = data[titleOffset:titleEnd] + } + + // id matches are case-insensitive + id := string(bytes.ToLower(data[idOffset:idEnd])) + + p.refs[id] = ref + + return lineEnd +} + +func scanLinkRef(p *Parser, data []byte, i int) (linkOffset, linkEnd, titleOffset, titleEnd, lineEnd int) { + // link: whitespace-free sequence, optionally between angle brackets + if data[i] == '<' { + i++ + } + linkOffset = i + for i < len(data) && data[i] != ' ' && data[i] != '\t' && data[i] != '\n' && data[i] != '\r' { + i++ + } + linkEnd = i + if linkEnd < len(data) && data[linkOffset] == '<' && data[linkEnd-1] == '>' { + linkOffset++ + linkEnd-- + } + + // optional spacer: (space | tab)* (newline | '\'' | '"' | '(' ) + for i < len(data) && (data[i] == ' ' || data[i] == '\t') { + i++ + } + if i < len(data) && data[i] != '\n' && data[i] != '\r' && data[i] != '\'' && data[i] != '"' && data[i] != '(' { + return + } + + // compute end-of-line + if i >= len(data) || data[i] == '\r' || data[i] == '\n' { + lineEnd = i + } + if i+1 < len(data) && data[i] == '\r' && data[i+1] == '\n' { + lineEnd++ + } + + // optional (space|tab)* spacer after a newline + if lineEnd > 0 { + i = lineEnd + 1 + for i < len(data) && (data[i] == ' ' || data[i] == '\t') { + i++ + } + } + + // optional title: any non-newline sequence enclosed in '"() alone on its line + if i+1 < len(data) && (data[i] == '\'' || data[i] == '"' || data[i] == '(') { + i++ + titleOffset = i + + // look for EOL + for i < len(data) && data[i] != '\n' && data[i] != '\r' { + i++ + } + if i+1 < len(data) && data[i] == '\n' && data[i+1] == '\r' { + titleEnd = i + 1 + } else { + titleEnd = i + } + + // step back + i-- + for i > titleOffset && (data[i] == ' ' || data[i] == '\t') { + i-- + } + if i > titleOffset && (data[i] == '\'' || data[i] == '"' || data[i] == ')') { + lineEnd = titleEnd + titleEnd = i + } + } + + return +} + +// The first bit of this logic is the same as Parser.listItem, but the rest +// is much simpler. This function simply finds the entire block and shifts it +// over by one tab if it is indeed a block (just returns the line if it's not). +// blockEnd is the end of the section in the input buffer, and contents is the +// extracted text that was shifted over one tab. It will need to be rendered at +// the end of the document. +func scanFootnote(p *Parser, data []byte, i, indentSize int) (blockStart, blockEnd int, contents []byte, hasBlock bool) { + if i == 0 || len(data) == 0 { + return + } + + // skip leading whitespace on first line + for i < len(data) && data[i] == ' ' { + i++ + } + + blockStart = i + + // find the end of the line + blockEnd = i + for i < len(data) && data[i-1] != '\n' { + i++ + } + + // get working buffer + var raw bytes.Buffer + + // put the first line into the working buffer + raw.Write(data[blockEnd:i]) + blockEnd = i + + // process the following lines + containsBlankLine := false + +gatherLines: + for blockEnd < len(data) { + i++ + + // find the end of this line + for i < len(data) && data[i-1] != '\n' { + i++ + } + + // if it is an empty line, guess that it is part of this item + // and move on to the next line + if p.isEmpty(data[blockEnd:i]) > 0 { + containsBlankLine = true + blockEnd = i + continue + } + + n := 0 + if n = isIndented(data[blockEnd:i], indentSize); n == 0 { + // this is the end of the block. + // we don't want to include this last line in the index. + break gatherLines + } + + // if there were blank lines before this one, insert a new one now + if containsBlankLine { + raw.WriteByte('\n') + containsBlankLine = false + } + + // get rid of that first tab, write to buffer + raw.Write(data[blockEnd+n : i]) + hasBlock = true + + blockEnd = i + } + + if data[blockEnd-1] != '\n' { + raw.WriteByte('\n') + } + + contents = raw.Bytes() + + return +} + +// isPunctuation returns true if c is a punctuation symbol. +func isPunctuation(c byte) bool { + for _, r := range []byte("!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~") { + if c == r { + return true + } + } + return false +} + +// isSpace returns true if c is a white-space charactr +func isSpace(c byte) bool { + return c == ' ' || c == '\t' || c == '\n' || c == '\r' || c == '\f' || c == '\v' +} + +// isLetter returns true if c is ascii letter +func isLetter(c byte) bool { + return (c >= 'a' && c <= 'z') || (c >= 'A' && c <= 'Z') +} + +// isAlnum returns true if c is a digit or letter +// TODO: check when this is looking for ASCII alnum and when it should use unicode +func isAlnum(c byte) bool { + return (c >= '0' && c <= '9') || isLetter(c) +} + +// TODO: this is not used +// Replace tab characters with spaces, aligning to the next TAB_SIZE column. +// always ends output with a newline +func expandTabs(out *bytes.Buffer, line []byte, tabSize int) { + // first, check for common cases: no tabs, or only tabs at beginning of line + i, prefix := 0, 0 + slowcase := false + for i = 0; i < len(line); i++ { + if line[i] == '\t' { + if prefix == i { + prefix++ + } else { + slowcase = true + break + } + } + } + + // no need to decode runes if all tabs are at the beginning of the line + if !slowcase { + for i = 0; i < prefix*tabSize; i++ { + out.WriteByte(' ') + } + out.Write(line[prefix:]) + return + } + + // the slow case: we need to count runes to figure out how + // many spaces to insert for each tab + column := 0 + i = 0 + for i < len(line) { + start := i + for i < len(line) && line[i] != '\t' { + _, size := utf8.DecodeRune(line[i:]) + i += size + column++ + } + + if i > start { + out.Write(line[start:i]) + } + + if i >= len(line) { + break + } + + for { + out.WriteByte(' ') + column++ + if column%tabSize == 0 { + break + } + } + + i++ + } +} + +// Find if a line counts as indented or not. +// Returns number of characters the indent is (0 = not indented). +func isIndented(data []byte, indentSize int) int { + if len(data) == 0 { + return 0 + } + if data[0] == '\t' { + return 1 + } + if len(data) < indentSize { + return 0 + } + for i := 0; i < indentSize; i++ { + if data[i] != ' ' { + return 0 + } + } + return indentSize +} + +// Create a url-safe slug for fragments +func slugify(in []byte) []byte { + if len(in) == 0 { + return in + } + out := make([]byte, 0, len(in)) + sym := false + + for _, ch := range in { + if isAlnum(ch) { + sym = false + out = append(out, ch) + } else if sym { + continue + } else { + out = append(out, '-') + sym = true + } + } + var a, b int + var ch byte + for a, ch = range out { + if ch != '-' { + break + } + } + for b = len(out) - 1; b > 0; b-- { + if out[b] != '-' { + break + } + } + return out[a : b+1] +} + +func isListItem(d ast.Node) bool { + _, ok := d.(*ast.ListItem) + return ok +} diff --git a/vendor/github.com/gomarkdown/markdown/parser/ref.go b/vendor/github.com/gomarkdown/markdown/parser/ref.go new file mode 100644 index 000000000..0b59a196d --- /dev/null +++ b/vendor/github.com/gomarkdown/markdown/parser/ref.go @@ -0,0 +1,89 @@ +package parser + +import ( + "bytes" + "fmt" + + "github.com/gomarkdown/markdown/ast" +) + +// parse '(#r)', where r does not contain spaces. Or. +// (!item) (!item, subitem), for an index, (!!item) signals primary. +func maybeShortRefOrIndex(p *Parser, data []byte, offset int) (int, ast.Node) { + if len(data[offset:]) < 4 { + return 0, nil + } + // short ref first + data = data[offset:] + i := 1 + switch data[i] { + case '#': // cross ref + i++ + Loop: + for i < len(data) { + c := data[i] + switch { + case c == ')': + break Loop + case !isAlnum(c): + if c == '_' || c == '-' || c == ':' { + i++ + continue + } + i = 0 + break Loop + } + i++ + } + if i >= len(data) { + return 0, nil + } + if data[i] != ')' { + return 0, nil + } + + id := data[2:i] + node := &ast.CrossReference{} + node.Destination = id + + return i + 1, node + + case '!': // index + i++ + start := i + i = skipUntilChar(data, start, ')') + + // did we reach the end of the buffer without a closing marker? + if i >= len(data) { + return 0, nil + } + + if len(data[start:i]) < 1 { + return 0, nil + } + + idx := &ast.Index{} + + idx.ID = fmt.Sprintf("idxref:%d", p.indexCnt) + p.indexCnt++ + + idx.Primary = data[start] == '!' + buf := data[start:i] + + if idx.Primary { + buf = buf[1:] + } + items := bytes.Split(buf, []byte(",")) + switch len(items) { + case 1: + idx.Item = bytes.TrimSpace(items[0]) + return i + 1, idx + case 2: + idx.Item = bytes.TrimSpace(items[0]) + idx.Subitem = bytes.TrimSpace(items[1]) + return i + 1, idx + } + } + + return 0, nil +} diff --git a/vendor/github.com/mmarkdown/mmark/LICENSE.txt b/vendor/github.com/mmarkdown/mmark/LICENSE.txt new file mode 100644 index 000000000..688046102 --- /dev/null +++ b/vendor/github.com/mmarkdown/mmark/LICENSE.txt @@ -0,0 +1,31 @@ +Markdown is distributed under the Simplified BSD License: + +Copyright © 2011 Russ Ross +Copyright © 2018 Krzysztof Kowalczyk +Copyright © 2018 Authors +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions +are met: + +1. Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + +2. Redistributions in binary form must reproduce the above + copyright notice, this list of conditions and the following + disclaimer in the documentation and/or other materials provided with + the distribution. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS +FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE +COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, +INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, +BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN +ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/mmarkdown/mmark/mast/bibliography.go b/vendor/github.com/mmarkdown/mmark/mast/bibliography.go new file mode 100644 index 000000000..9f1167b17 --- /dev/null +++ b/vendor/github.com/mmarkdown/mmark/mast/bibliography.go @@ -0,0 +1,23 @@ +package mast + +import ( + "github.com/gomarkdown/markdown/ast" + "github.com/mmarkdown/mmark/mast/reference" +) + +// Bibliography represents markdown bibliography node. +type Bibliography struct { + ast.Container + + Type ast.CitationTypes +} + +// BibliographyItem contains a single bibliography item. +type BibliographyItem struct { + ast.Leaf + + Anchor []byte + Type ast.CitationTypes + + Reference *reference.Reference // parsed reference XML +} diff --git a/vendor/github.com/mmarkdown/mmark/mast/index.go b/vendor/github.com/mmarkdown/mmark/mast/index.go new file mode 100644 index 000000000..6052ac6fd --- /dev/null +++ b/vendor/github.com/mmarkdown/mmark/mast/index.go @@ -0,0 +1,34 @@ +package mast + +import "github.com/gomarkdown/markdown/ast" + +// DocumentIndex represents markdown document index node. +type DocumentIndex struct { + ast.Container +} + +// IndexItem contains an index for the indices section. +type IndexItem struct { + ast.Container + + *ast.Index +} + +// IndexSubItem contains an sub item index for the indices section. +type IndexSubItem struct { + ast.Container + + *ast.Index +} + +// IndexLetter has the Letter of this index item. +type IndexLetter struct { + ast.Container +} + +// IndexLink links to the index in the document. +type IndexLink struct { + *ast.Link + + Primary bool +} diff --git a/vendor/github.com/mmarkdown/mmark/mast/nodes.go b/vendor/github.com/mmarkdown/mmark/mast/nodes.go new file mode 100644 index 000000000..24ad9890b --- /dev/null +++ b/vendor/github.com/mmarkdown/mmark/mast/nodes.go @@ -0,0 +1,171 @@ +package mast + +import ( + "bytes" + "sort" + + "github.com/gomarkdown/markdown/ast" +) + +// some extra functions for manipulation the AST + +// MoveChilderen moves the children from a to b *and* make the parent of each point to b. +// Any children of b are obliterated. +func MoveChildren(a, b ast.Node) { + a.SetChildren(b.GetChildren()) + b.SetChildren(nil) + + for _, child := range a.GetChildren() { + child.SetParent(a) + } +} + +// Some attribute helper functions. + +// AttributeFromNode returns the attribute from the node, if it was there was one. +func AttributeFromNode(node ast.Node) *ast.Attribute { + if c := node.AsContainer(); c != nil && c.Attribute != nil { + return c.Attribute + } + if l := node.AsLeaf(); l != nil && l.Attribute != nil { + return l.Attribute + } + return nil +} + +// AttributeInit will initialize an *Attribute on node if there wasn't one. +func AttributeInit(node ast.Node) { + if l := node.AsLeaf(); l != nil && l.Attribute == nil { + l.Attribute = &ast.Attribute{Attrs: make(map[string][]byte)} + return + } + if c := node.AsContainer(); c != nil && c.Attribute == nil { + c.Attribute = &ast.Attribute{Attrs: make(map[string][]byte)} + return + } +} + +// DeleteAttribute delete the attribute under key from a. +func DeleteAttribute(node ast.Node, key string) { + a := AttributeFromNode(node) + if a == nil { + return + } + + switch key { + case "id": + a.ID = nil + case "class": + // TODO + default: + delete(a.Attrs, key) + } +} + +// SetAttribute sets the attribute under key to value. +func SetAttribute(node ast.Node, key string, value []byte) { + a := AttributeFromNode(node) + if a == nil { + return + } + switch key { + case "id": + a.ID = value + case "class": + // TODO + default: + a.Attrs[key] = value + } +} + +// Attribute returns the attribute value under key. Use AttributeClass to retrieve +// a class. +func Attribute(node ast.Node, key string) []byte { + a := AttributeFromNode(node) + if a == nil { + return nil + } + switch key { + case "id": + return a.ID + case "class": + // use AttributeClass. + } + + return a.Attrs[key] +} + +func AttributeBytes(attr *ast.Attribute) []byte { + ret := &bytes.Buffer{} + ret.WriteByte('{') + if len(attr.ID) != 0 { + ret.WriteByte('#') + ret.Write(attr.ID) + } + for _, c := range attr.Classes { + if ret.Len() > 1 { + ret.WriteByte(' ') + } + ret.WriteByte('.') + ret.Write(c) + } + + keys := []string{} + for k := range attr.Attrs { + keys = append(keys, k) + } + sort.Strings(keys) + + for _, k := range keys { + if ret.Len() > 1 { + ret.WriteByte(' ') + } + ret.WriteString(k) + ret.WriteString(`="`) + ret.Write(attr.Attrs[k]) + ret.WriteByte('"') + } + ret.WriteByte('}') + return ret.Bytes() +} + +// AttributeClass returns true is class key is set. +func AttributeClass(node ast.Node, key string) bool { + a := AttributeFromNode(node) + if a == nil { + return false + } + for _, c := range a.Classes { + if string(c) == key { + return true + } + } + return false +} + +// AttributeFilter runs the attribute on node through filter and only allows elements for which filter returns true. +func AttributeFilter(node ast.Node, filter func(key string) bool) { + a := AttributeFromNode(node) + if a == nil { + return + } + if !filter("id") { + a.ID = nil + } + if !filter("class") { + a.Classes = nil + } + for k, _ := range a.Attrs { + if !filter(k) { + delete(a.Attrs, k) + } + } +} + +// FilterFunc checks if s is an allowed key in an attribute. +// If s is: +// "id" the ID should be checked +// "class" the classes should be allowed or disallowed +// any other string means checking the individual attributes. +// it returns true for elements that are allows, false otherwise. +type FilterFunc func(s string) bool diff --git a/vendor/github.com/mmarkdown/mmark/mast/reference/reference.go b/vendor/github.com/mmarkdown/mmark/mast/reference/reference.go new file mode 100644 index 000000000..cefed4332 --- /dev/null +++ b/vendor/github.com/mmarkdown/mmark/mast/reference/reference.go @@ -0,0 +1,71 @@ +// Package reference defines the elements of a block. +package reference + +import "encoding/xml" + +// Author is the reference author. +type Author struct { + Fullname string `xml:"fullname,attr,omitempty"` + Initials string `xml:"initials,attr,omitempty"` + Surname string `xml:"surname,attr,omitempty"` + Role string `xml:"role,attr,omitempty"` + Organization *Organization `xml:"organization,omitempty"` + Address *Address `xml:"address,omitempty"` +} + +type Organization struct { + Abbrev string `xml:"abbrev,attr,omitempty"` + Value string `xml:",chardata"` +} + +// this is copied from ../title.go; it might make sense to unify them, both especially, it we +// want to allow reference to be given in TOML as well. See #55. +// Author denotes an RFC author. + +// Address denotes the address of an RFC author. +type Address struct { + Phone string `xml:"phone,omitempty"` + Email string `xml:"email,omitempty"` + URI string `xml:"uri,omitempty"` + Postal AddressPostal `xml:"postal,omitempty"` +} + +// AddressPostal denotes the postal address of an RFC author. +type AddressPostal struct { + PostalLine []string `xml:"postalline,omitempty"` + + Streets []string `xml:"street,omitempty"` + Cities []string `xml:"city,omitempty"` + Codes []string `xml:"code,omitempty"` + Countries []string `xml:"country,omitempty"` + Regions []string `xml:"region,omitempty"` +} + +// Date is the reference date. +type Date struct { + Year string `xml:"year,attr,omitempty"` + Month string `xml:"month,attr,omitempty"` + Day string `xml:"day,attr,omitempty"` +} + +// Front the reference . +type Front struct { + Title string `xml:"title"` + Authors []Author `xml:"author,omitempty"` + Date Date `xml:"date"` +} + +// Format is the reference . This is deprecated in RFC 7991, see Section 3.3. +type Format struct { + Type string `xml:"type,attr,omitempty"` + Target string `xml:"target,attr"` +} + +// Reference is the entire structure. +type Reference struct { + XMLName xml.Name `xml:"reference"` + Anchor string `xml:"anchor,attr"` + Front Front `xml:"front"` + Format *Format `xml:"format,omitempty"` + Target string `xml:"target,attr"` +} diff --git a/vendor/github.com/mmarkdown/mmark/mast/title.go b/vendor/github.com/mmarkdown/mmark/mast/title.go new file mode 100644 index 000000000..4bcaffc57 --- /dev/null +++ b/vendor/github.com/mmarkdown/mmark/mast/title.go @@ -0,0 +1,95 @@ +package mast + +import ( + "time" + + "github.com/gomarkdown/markdown/ast" +) + +// Title represents the TOML encoded title block. +type Title struct { + ast.Leaf + *TitleData + Trigger string // either triggered by %%% or --- +} + +// NewTitle returns a pointer to TitleData with some defaults set. +func NewTitle(trigger byte) *Title { + t := &Title{ + TitleData: &TitleData{ + Area: "Internet", + Ipr: "trust200902", + Consensus: true, + }, + } + t.Trigger = string([]byte{trigger, trigger, trigger}) + return t +} + +const triggerDash = "---" + +func (t *Title) IsTriggerDash() bool { return t.Trigger == triggerDash } + +// TitleData holds all the elements of the title. +type TitleData struct { + Title string + Abbrev string + + SeriesInfo SeriesInfo + Consensus bool + Ipr string // See https://tools.ietf.org/html/rfc7991#appendix-A.1 + Obsoletes []int + Updates []int + SubmissionType string // IETF, IAB, IRTF or independent + + Date time.Time + Area string + Workgroup string + Keyword []string + Author []Author +} + +// SeriesInfo holds details on the Internet-Draft or RFC, see https://tools.ietf.org/html/rfc7991#section-2.47 +type SeriesInfo struct { + Name string // name of the document, values are "RFC", "Internet-Draft", and "DOI" + Value string // either draft name, or number + Status string // The status of this document, values: "standard", "informational", "experimental", "bcp", "fyi", and "full-standard" + Stream string // "IETF" (default),"IAB", "IRTF" or "independent" +} + +// Author denotes an RFC author. +type Author struct { + Initials string + Surname string + Fullname string + Organization string + OrganizationAbbrev string `toml:"abbrev"` + Role string + ASCII string + Address Address +} + +// Address denotes the address of an RFC author. +type Address struct { + Phone string + Email string + URI string + Postal AddressPostal +} + +// AddressPostal denotes the postal address of an RFC author. +type AddressPostal struct { + Street string + City string + Code string + Country string + Region string + PostalLine []string + + // Plurals when these need to be specified multiple times. + Streets []string + Cities []string + Codes []string + Countries []string + Regions []string +} diff --git a/vendor/github.com/mmarkdown/mmark/mparser/bibliography.go b/vendor/github.com/mmarkdown/mmark/mparser/bibliography.go new file mode 100644 index 000000000..c1275b6d8 --- /dev/null +++ b/vendor/github.com/mmarkdown/mmark/mparser/bibliography.go @@ -0,0 +1,184 @@ +package mparser + +import ( + "bytes" + "encoding/xml" + "log" + + "github.com/gomarkdown/markdown/ast" + "github.com/mmarkdown/mmark/mast" + "github.com/mmarkdown/mmark/mast/reference" +) + +// CitationToBibliography walks the AST and gets all the citations on HTML blocks and groups them into +// normative and informative references. +func CitationToBibliography(doc ast.Node) (normative ast.Node, informative ast.Node) { + seen := map[string]*mast.BibliographyItem{} + raw := map[string][]byte{} + + // Gather all citations. + // Gather all reference HTML Blocks to see if we have XML we can output. + ast.WalkFunc(doc, func(node ast.Node, entering bool) ast.WalkStatus { + switch c := node.(type) { + case *ast.Citation: + for i, d := range c.Destination { + if _, ok := seen[string(bytes.ToLower(d))]; ok { + continue + } + ref := &mast.BibliographyItem{} + ref.Anchor = d + ref.Type = c.Type[i] + + seen[string(d)] = ref + } + case *ast.HTMLBlock: + anchor := anchorFromReference(c.Literal) + if anchor != nil { + raw[string(bytes.ToLower(anchor))] = c.Literal + } + } + return ast.GoToNext + }) + + for _, r := range seen { + // If we have a reference anchor and the raw XML add that here. + if raw, ok := raw[string(bytes.ToLower(r.Anchor))]; ok { + var x reference.Reference + if e := xml.Unmarshal(raw, &x); e != nil { + log.Printf("Failed to unmarshal reference: %q: %s", r.Anchor, e) + continue + } + r.Reference = &x + } + + switch r.Type { + case ast.CitationTypeInformative: + if informative == nil { + informative = &mast.Bibliography{Type: ast.CitationTypeInformative} + } + + ast.AppendChild(informative, r) + case ast.CitationTypeSuppressed: + fallthrough + case ast.CitationTypeNormative: + if normative == nil { + normative = &mast.Bibliography{Type: ast.CitationTypeNormative} + } + ast.AppendChild(normative, r) + } + } + return normative, informative +} + +// NodeBackMatter is the place where we should inject the bibliography +func NodeBackMatter(doc ast.Node) ast.Node { + var matter ast.Node + + ast.WalkFunc(doc, func(node ast.Node, entering bool) ast.WalkStatus { + if mat, ok := node.(*ast.DocumentMatter); ok { + if mat.Matter == ast.DocumentMatterBack { + matter = mat + return ast.Terminate + } + } + return ast.GoToNext + }) + return matter +} + +// Parse '' and return the string after anchor= is the ID for the reference. +func anchorFromReference(data []byte) []byte { + if !bytes.HasPrefix(data, []byte("') { + i++ + } + + // no end-of-reference marker + if i > len(data) { + return nil, false + } + return data[:i], true +} + +func fmtReference(data []byte) []byte { + var x reference.Reference + if e := xml.Unmarshal(data, &x); e != nil { + return data + } + + out, e := xml.MarshalIndent(x, "", " ") + if e != nil { + return data + } + return out +} + +// AddBibliography adds the bibliography to the document. It will be +// added just after the backmatter node. If that node can't be found this +// function returns false and does nothing. +func AddBibliography(doc ast.Node) bool { + where := NodeBackMatter(doc) + if where == nil { + return false + } + + norm, inform := CitationToBibliography(doc) + if norm != nil { + ast.AppendChild(where, norm) + } + if inform != nil { + ast.AppendChild(where, inform) + } + return (norm != nil) || (inform != nil) +} diff --git a/vendor/github.com/mmarkdown/mmark/mparser/extensions.go b/vendor/github.com/mmarkdown/mmark/mparser/extensions.go new file mode 100644 index 000000000..af25f5d11 --- /dev/null +++ b/vendor/github.com/mmarkdown/mmark/mparser/extensions.go @@ -0,0 +1,11 @@ +package mparser + +import ( + "github.com/gomarkdown/markdown/parser" +) + +// Extensions is the default set of extensions mmark requires. +var Extensions = parser.Tables | parser.FencedCode | parser.Autolink | parser.Strikethrough | + parser.SpaceHeadings | parser.HeadingIDs | parser.BackslashLineBreak | parser.SuperSubscript | + parser.DefinitionLists | parser.MathJax | parser.AutoHeadingIDs | parser.Footnotes | + parser.Strikethrough | parser.OrderedListStart | parser.Attributes | parser.Mmark | parser.Includes diff --git a/vendor/github.com/mmarkdown/mmark/mparser/hook.go b/vendor/github.com/mmarkdown/mmark/mparser/hook.go new file mode 100644 index 000000000..40b16798e --- /dev/null +++ b/vendor/github.com/mmarkdown/mmark/mparser/hook.go @@ -0,0 +1,59 @@ +package mparser + +import ( + "io/ioutil" + "log" + "path/filepath" + + "github.com/gomarkdown/markdown/ast" + "github.com/gomarkdown/markdown/parser" +) + +var UnsafeInclude parser.Flags = 1 << 3 + +// Hook will call both TitleHook and ReferenceHook. +func Hook(data []byte) (ast.Node, []byte, int) { + n, b, i := TitleHook(data) + if n != nil { + return n, b, i + } + + return ReferenceHook(data) +} + +// ReadInclude is the hook to read includes. +// Its supports the following options for address. +// +// 4,5 - line numbers separated by commas +// N, - line numbers, end not specified, read until the end. +// /start/,/end/ - regexp separated by commas +// optional a prefix="" string. +func (i Initial) ReadInclude(from, file string, address []byte) []byte { + path := i.path(from, file) + + if i.Flags&UnsafeInclude == 0 { + if ok := i.pathAllowed(path); !ok { + log.Printf("Failure to read: %q: path is not on or below %q", path, i.i) + return nil + } + } + + data, err := ioutil.ReadFile(path) + if err != nil { + log.Printf("Failure to read: %q (from %q)", err, filepath.Join(from, "*")) + return nil + } + + data, err = parseAddress(address, data) + if err != nil { + log.Printf("Failure to parse address for %q: %q (from %q)", path, err, filepath.Join(from, "*")) + return nil + } + if len(data) == 0 { + return data + } + if data[len(data)-1] != '\n' { + data = append(data, '\n') + } + return data +} diff --git a/vendor/github.com/mmarkdown/mmark/mparser/include.go b/vendor/github.com/mmarkdown/mmark/mparser/include.go new file mode 100644 index 000000000..8737def73 --- /dev/null +++ b/vendor/github.com/mmarkdown/mmark/mparser/include.go @@ -0,0 +1,251 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Adapted for mmark, by Miek Gieben, 2015. +// Adapted for mmark2 (fastly simplified and features removed), 2018. + +package mparser + +import ( + "bytes" + "errors" + "fmt" + "os" + "path" + "path/filepath" + "regexp" + "strconv" + "strings" + + "github.com/gomarkdown/markdown/parser" +) + +// Initial is the initial file we are working on, empty for stdin and adjusted is we we have an absolute or relative file. +type Initial struct { + Flags parser.Flags + i string +} + +// NewInitial returns an initialized Initial. +func NewInitial(s string) Initial { + if path.IsAbs(s) { + return Initial{i: path.Dir(s)} + } + + cwd, _ := os.Getwd() + if s == "" { + return Initial{i: cwd} + } + return Initial{i: path.Dir(filepath.Join(cwd, s))} +} + +// path returns the full path we should use according to from, file and initial. +func (i Initial) path(from, file string) string { + if path.IsAbs(file) { + return file + } + if path.IsAbs(from) { + filepath.Join(from, file) + } + + f1 := filepath.Join(i.i, from) + + return filepath.Join(f1, file) +} + +// pathAllowed returns true is file is on the same level or below the initial file. +func (i Initial) pathAllowed(file string) bool { + x, err := filepath.Rel(i.i, file) + if err != nil { + return false + } + return !strings.Contains(x, "..") +} + +// parseAddress parses a code address directive and returns the bytes or an error. +func parseAddress(addr []byte, data []byte) ([]byte, error) { + bytes.TrimSpace(addr) + + if len(addr) == 0 { + return data, nil + } + + // check for prefix, either as ;prefix, prefix; or just standalone prefix. + var prefix []byte + if x := bytes.Index(addr, []byte("prefix=")); x >= 0 { + if x+1 > len(addr) { + return nil, fmt.Errorf("invalid prefix in address specification: %s", addr) + } + start := x + len("prefix=") + quote := addr[start] + if quote != '\'' && quote != '"' { + return nil, fmt.Errorf("invalid prefix in address specification: %s", addr) + } + + end := SkipUntilChar(addr, start+1, quote) + prefix = addr[start+1 : end] + if len(prefix) == 0 { + return nil, fmt.Errorf("invalid prefix in address specification: %s", addr) + } + + addr = append(addr[:x], addr[end+1:]...) + addr = bytes.Replace(addr, []byte(";"), []byte(""), 1) + if len(addr) == 0 { + data = addPrefix(data, prefix) + return data, nil + } + } + + lo, hi, err := addrToByteRange(addr, data) + if err != nil { + return nil, err + } + + // Acme pattern matches can stop mid-line, + // so run to end of line in both directions if not at line start/end. + for lo > 0 && data[lo-1] != '\n' { + lo-- + } + if hi > 0 { + for hi < len(data) && data[hi-1] != '\n' { + hi++ + } + } + + data = data[lo:hi] + if prefix != nil { + data = addPrefix(data, prefix) + } + return data, nil +} + +// addrToByteRange evaluates the given address. It returns the start and end index of the data we should return. +// Supported syntax: N, M or /start/, /end/ . +func addrToByteRange(addr, data []byte) (lo, hi int, err error) { + chunk := bytes.Split(addr, []byte(",")) + if len(chunk) != 2 { + return 0, 0, fmt.Errorf("invalid address specification: %s", addr) + } + left := bytes.TrimSpace(chunk[0]) + right := bytes.TrimSpace(chunk[1]) + + if len(left) == 0 { + return 0, 0, fmt.Errorf("invalid address specification: %s", addr) + } + if len(right) == 0 { + // open ended right term + } + + if left[0] == '/' { //regular expression + if left[len(left)-1] != '/' { + return 0, 0, fmt.Errorf("invalid address specification: %s", addr) + } + if right[0] != '/' { + return 0, 0, fmt.Errorf("invalid address specification: %s", addr) + } + if right[len(right)-1] != '/' { + return 0, 0, fmt.Errorf("invalid address specification: %s", addr) + } + + lo, hi, err = addrRegexp(data, string(left[1:len(left)-1]), string(right[1:len(right)-1])) + if err != nil { + return 0, 0, err + } + } else { + lo, err = strconv.Atoi(string(left)) + if err != nil { + return 0, 0, err + } + i, j := 0, 0 + for i < len(data) { + if data[i] == '\n' { + j++ + if j >= lo { + break + } + } + i++ + } + lo = i + + if len(right) == 0 { + hi = len(data) + goto End + } + + hi, err = strconv.Atoi(string(right)) + if err != nil { + return 0, 0, err + } + i, j = 0, 0 + for i < len(data) { + if data[i] == '\n' { + j++ + if j+1 >= hi { + break + } + } + i++ + } + hi = i + } + +End: + if lo > hi { + return 0, 0, fmt.Errorf("invalid address specification: %s", addr) + } + + return lo, hi, nil +} + +// addrRegexp searches for pattern start and pattern end +func addrRegexp(data []byte, start, end string) (int, int, error) { + start = "(?m:" + start + ")" // match through newlines + reStart, err := regexp.Compile(start) + if err != nil { + return 0, 0, err + } + + end = "(?m:" + end + ")" + reEnd, err := regexp.Compile(end) + if err != nil { + return 0, 0, err + } + m := reStart.FindIndex(data) + if len(m) == 0 { + return 0, 0, errors.New("no match for " + start) + } + lo := m[0] + + m = reEnd.FindIndex(data[lo:]) // start *from* lo + if len(m) == 0 { + return 0, 0, errors.New("no match for " + end) + } + hi := m[0] + + return lo, hi, nil +} + +func SkipUntilChar(data []byte, i int, c byte) int { + n := len(data) + for i < n && data[i] != c { + i++ + } + return i +} + +func addPrefix(data, prefix []byte) []byte { + b := &bytes.Buffer{} + b.Write(prefix) + // assured that data ends in newline + i := 0 + for i < len(data)-1 { + b.WriteByte(data[i]) + if data[i] == '\n' { + b.Write(prefix) + } + i++ + } + return b.Bytes() +} diff --git a/vendor/github.com/mmarkdown/mmark/mparser/index.go b/vendor/github.com/mmarkdown/mmark/mparser/index.go new file mode 100644 index 000000000..72b39c059 --- /dev/null +++ b/vendor/github.com/mmarkdown/mmark/mparser/index.go @@ -0,0 +1,111 @@ +package mparser + +import ( + "bytes" + "fmt" + "sort" + + "github.com/gomarkdown/markdown/ast" + "github.com/mmarkdown/mmark/mast" +) + +// IndexToDocumentIndex crawls the entire doc searching for indices, it will then return +// an mast.DocumentIndex that contains a tree: +// +// IndexLetter +// - IndexItem +// - IndexLink +// - IndexSubItem +// - IndexLink +// - IndexLink +// +// Which can then be rendered by the renderer. +func IndexToDocumentIndex(doc ast.Node) *mast.DocumentIndex { + main := map[string]*mast.IndexItem{} + subitem := map[string][]*mast.IndexSubItem{} // gather these so we can add them in one swoop at the end + + // Gather all indexes. + ast.WalkFunc(doc, func(node ast.Node, entering bool) ast.WalkStatus { + switch i := node.(type) { + case *ast.Index: + item := string(i.Item) + + if _, ok := main[item]; !ok { + main[item] = &mast.IndexItem{Index: i} + } + // only the main item + if i.Subitem == nil { + ast.AppendChild(main[item], newLink(i.ID, len(main[item].GetChildren()), i.Primary)) + return ast.GoToNext + } + // check if we already have a child with the subitem and then just add the link + for _, sub := range subitem[item] { + if bytes.Compare(sub.Subitem, i.Subitem) == 0 { + ast.AppendChild(sub, newLink(i.ID, len(sub.GetChildren()), i.Primary)) + return ast.GoToNext + } + } + + sub := &mast.IndexSubItem{Index: i} + ast.AppendChild(sub, newLink(i.ID, len(subitem[item]), i.Primary)) + subitem[item] = append(subitem[item], sub) + } + return ast.GoToNext + }) + if len(main) == 0 { + return nil + } + + // Now add a subitem children to the correct main item. + for k, sub := range subitem { + // sort sub here ideally + for j := range sub { + ast.AppendChild(main[k], sub[j]) + } + } + + keys := []string{} + for k := range main { + keys = append(keys, k) + } + sort.Strings(keys) + + letters := []*mast.IndexLetter{} + var prevLetter byte + var il *mast.IndexLetter + for _, k := range keys { + letter := k[0] + if letter != prevLetter { + il = &mast.IndexLetter{} + il.Literal = []byte{letter} + letters = append(letters, il) + } + ast.AppendChild(il, main[k]) + prevLetter = letter + } + docIndex := &mast.DocumentIndex{} + for i := range letters { + ast.AppendChild(docIndex, letters[i]) + } + + return docIndex +} + +func newLink(id string, number int, primary bool) *mast.IndexLink { + link := &ast.Link{Destination: []byte(id)} + il := &mast.IndexLink{Link: link, Primary: primary} + il.Literal = []byte(fmt.Sprintf("%d", number)) + return il +} + +// AddIndex adds an index to the end of the current document. If not indices can be found +// this returns false and no index will be added. +func AddIndex(doc ast.Node) bool { + idx := IndexToDocumentIndex(doc) + if idx == nil { + return false + } + + ast.AppendChild(doc, idx) + return true +} diff --git a/vendor/github.com/mmarkdown/mmark/mparser/title.go b/vendor/github.com/mmarkdown/mmark/mparser/title.go new file mode 100644 index 000000000..3c5f72b71 --- /dev/null +++ b/vendor/github.com/mmarkdown/mmark/mparser/title.go @@ -0,0 +1,57 @@ +package mparser + +import ( + "log" + + "github.com/BurntSushi/toml" + "github.com/gomarkdown/markdown/ast" + "github.com/mmarkdown/mmark/mast" +) + +// TitleHook will parse a title and returns it. The start and ending can +// be signalled with %%% or --- (the later to more inline with Hugo and other markdown dialects. +func TitleHook(data []byte) (ast.Node, []byte, int) { + i := 0 + if len(data) < 3 { + return nil, nil, 0 + } + + c := data[i] // first char can either be % or - + if c != '%' && c != '-' { + return nil, nil, 0 + } + + if data[i] != c || data[i+1] != c || data[i+2] != c { + return nil, nil, 0 + } + + i += 3 + beg := i + found := false + // search for end. + for i < len(data) { + if data[i] == c && data[i+1] == c && data[i+2] == c { + found = true + break + } + i++ + } + if !found { + return nil, nil, 0 + } + + node := mast.NewTitle(c) + buf := data[beg:i] + + if c == '-' { + node.Content = buf + return node, nil, i + 3 + } + + if _, err := toml.Decode(string(buf), node.TitleData); err != nil { + log.Printf("Failure parsing title block: %s", err) + } + node.Content = buf + + return node, nil, i + 3 +} diff --git a/vendor/github.com/tallclair/mdtoc/.gitignore b/vendor/github.com/tallclair/mdtoc/.gitignore new file mode 100644 index 000000000..c49427318 --- /dev/null +++ b/vendor/github.com/tallclair/mdtoc/.gitignore @@ -0,0 +1 @@ +mdtoc diff --git a/vendor/github.com/tallclair/mdtoc/CONTRIBUTING.md b/vendor/github.com/tallclair/mdtoc/CONTRIBUTING.md new file mode 100644 index 000000000..db177d4ac --- /dev/null +++ b/vendor/github.com/tallclair/mdtoc/CONTRIBUTING.md @@ -0,0 +1,28 @@ +# How to Contribute + +We'd love to accept your patches and contributions to this project. There are +just a few small guidelines you need to follow. + +## Contributor License Agreement + +Contributions to this project must be accompanied by a Contributor License +Agreement. You (or your employer) retain the copyright to your contribution; +this simply gives us permission to use and redistribute your contributions as +part of the project. Head over to to see +your current agreements on file or to sign a new one. + +You generally only need to submit a CLA once, so if you've already submitted one +(even if it was for a different project), you probably don't need to do it +again. + +## Code reviews + +All submissions, including submissions by project members, require review. We +use GitHub pull requests for this purpose. Consult +[GitHub Help](https://help.github.com/articles/about-pull-requests/) for more +information on using pull requests. + +## Community Guidelines + +This project follows +[Google's Open Source Community Guidelines](https://opensource.google.com/conduct/). diff --git a/vendor/github.com/tallclair/mdtoc/LICENSE b/vendor/github.com/tallclair/mdtoc/LICENSE new file mode 100644 index 000000000..d64569567 --- /dev/null +++ b/vendor/github.com/tallclair/mdtoc/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/tallclair/mdtoc/README.md b/vendor/github.com/tallclair/mdtoc/README.md new file mode 100644 index 000000000..6647951d1 --- /dev/null +++ b/vendor/github.com/tallclair/mdtoc/README.md @@ -0,0 +1,53 @@ +# Markdown Table of Contents Generator + +`mdtoc` is a utility for generating a table-of-contents for markdown files. + +Only github-flavored markdown is currently supported, but I am open to accepting patches to add +other formats. + +# Table of Contents + +Generated with `mdtoc --inplace README.md` + + +- [Usage](#usage) +- [Installation](#installation) + + +## Usage + +Usage: `mdtoc [OPTIONS] [FILE]...` +Generate a table of contents for a markdown file (github flavor). + +TOC may be wrapped in a pair of tags to allow in-place updates: +``` + +generated TOC goes here + +``` + +TOC indentation is normalized, so the shallowest header has indentation 0. + +**Options:** + +`--dryrun` - Whether to check for changes to TOC, rather than overwriting. +Requires `--inplace` flag. Exit code 1 if there are changes. + +`--inplace` - Whether to edit the file in-place, or output to STDOUT. Requires +toc tags to be present. + +`--skip-prefix` - Whether to ignore any headers before the opening toc +tag. (default true) + +For example, with `--skip-prefix=false` the TOC for this file becomes: + +``` +- [Markdown Table of Contents Generator](#markdown-table-of-contents-generator) +- [Table of Contents](#table-of-contents) + - [Usage](#usage) + - [Installation](#installation) +``` + +## Installation + +``` go get github.com/tallclair/mdtoc ``` diff --git a/vendor/github.com/tallclair/mdtoc/go.mod b/vendor/github.com/tallclair/mdtoc/go.mod new file mode 100644 index 000000000..9428ed42b --- /dev/null +++ b/vendor/github.com/tallclair/mdtoc/go.mod @@ -0,0 +1,11 @@ +module github.com/tallclair/mdtoc + +go 1.12 + +require ( + github.com/BurntSushi/toml v0.3.1 // indirect + github.com/ekalinin/github-markdown-toc v0.0.0-20190514155158-83fadb60a7f1 // indirect + github.com/gomarkdown/markdown v0.0.0-20190222000725-ee6a7931a1e4 + github.com/mmarkdown/mmark v2.0.40+incompatible + github.com/stretchr/testify v1.3.0 +) diff --git a/vendor/github.com/tallclair/mdtoc/go.sum b/vendor/github.com/tallclair/mdtoc/go.sum new file mode 100644 index 000000000..5b25ce200 --- /dev/null +++ b/vendor/github.com/tallclair/mdtoc/go.sum @@ -0,0 +1,15 @@ +github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ= +github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= +github.com/davecgh/go-spew v1.1.0 h1:ZDRjVQ15GmhC3fiQ8ni8+OwkZQO4DARzQgrnXU1Liz8= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/ekalinin/github-markdown-toc v0.0.0-20190514155158-83fadb60a7f1 h1:E42haoFVyge+y3G2q/lwksiZPu6bMmim1xLxjOjPeEw= +github.com/ekalinin/github-markdown-toc v0.0.0-20190514155158-83fadb60a7f1/go.mod h1:XfZS1iyC28CnllR54Ou2Ero6qs4Rmn7GpVumNSj1DZo= +github.com/gomarkdown/markdown v0.0.0-20190222000725-ee6a7931a1e4 h1:vELsocEzlhM4lk2nhxolEaQrMp25u7/i9IX8s9uLads= +github.com/gomarkdown/markdown v0.0.0-20190222000725-ee6a7931a1e4/go.mod h1:gmFANS06wAVmF0B9yi65QKsRmPQ97tze7FRLswua+OY= +github.com/mmarkdown/mmark v2.0.40+incompatible h1:vMeUeDzBK3H+/mU0oMVfMuhSXJlIA+DE/DMPQNAj5C4= +github.com/mmarkdown/mmark v2.0.40+incompatible/go.mod h1:Uvmoz7tvsWpr7bMVxIpqZPyN3FbOtzDmnsJDFp7ltJs= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/testify v1.3.0 h1:TivCn/peBQ7UY8ooIcPgZFpTNSz0Q2U6UrFlUfqbe0Q= +github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= diff --git a/vendor/github.com/tallclair/mdtoc/mdtoc.go b/vendor/github.com/tallclair/mdtoc/mdtoc.go new file mode 100644 index 000000000..7a664b75a --- /dev/null +++ b/vendor/github.com/tallclair/mdtoc/mdtoc.go @@ -0,0 +1,301 @@ +/* +Copyright 2019 Google LLC + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package main + +import ( + "bytes" + "flag" + "fmt" + "io/ioutil" + "log" + "math" + "os" + "regexp" + "strings" + + "github.com/gomarkdown/markdown/ast" + "github.com/gomarkdown/markdown/html" + "github.com/gomarkdown/markdown/parser" + "github.com/mmarkdown/mmark/mparser" +) + +const ( + startTOC = "" + endTOC = "" +) + +type options struct { + dryrun bool + inplace bool + skipPrefix bool +} + +var defaultOptions options + +func init() { + flag.BoolVar(&defaultOptions.dryrun, "dryrun", false, "Whether to check for changes to TOC, rather than overwriting. Requires --inplace flag.") + flag.BoolVar(&defaultOptions.inplace, "inplace", false, "Whether to edit the file in-place, or output to STDOUT. Requires toc tags to be present.") + flag.BoolVar(&defaultOptions.skipPrefix, "skip-prefix", true, "Whether to ignore any headers before the opening toc tag.") + + flag.Usage = func() { + fmt.Fprintf(flag.CommandLine.Output(), "Usage: %s [OPTIONS] [FILE]...\n", os.Args[0]) + fmt.Fprintf(flag.CommandLine.Output(), "Generate a table of contents for a markdown file (github flavor).\n") + fmt.Fprintf(flag.CommandLine.Output(), "TOC may be wrapped in a pair of tags to allow in-place updates: \n") + flag.PrintDefaults() + } +} + +func main() { + flag.Parse() + if err := validateArgs(defaultOptions, flag.Args()); err != nil { + fmt.Fprintf(os.Stderr, "Error: %v\n", err) + flag.Usage() + os.Exit(1) + } + + hadError := false + for _, file := range flag.Args() { + toc, err := run(file, defaultOptions) + if err != nil { + log.Printf("%s: %v", file, err) + hadError = true + } else if !defaultOptions.inplace { + fmt.Println(toc) + } + } + + if hadError { + os.Exit(1) + } +} + +func validateArgs(opts options, args []string) error { + if len(args) < 1 { + return fmt.Errorf("must specify at least 1 file") + } + if !opts.inplace && len(args) > 1 { + return fmt.Errorf("non-inplace updates require exactly 1 file") + } + if opts.dryrun && !opts.inplace { + return fmt.Errorf("--dryrun requires --inplace") + } + return nil +} + +// run the TOC generator on file with options. +// Returns the generated toc, and any error. +func run(file string, opts options) (string, error) { + raw, err := ioutil.ReadFile(file) + if err != nil { + return "", fmt.Errorf("unable to read %s: %v", file, err) + } + + start := bytes.Index(raw, []byte(startTOC)) + end := bytes.Index(raw, []byte(endTOC)) + if tocTagRequired(opts) { + if start == -1 { + return "", fmt.Errorf("missing opening TOC tag") + } + if end == -1 { + return "", fmt.Errorf("missing closing TOC tag") + } + if end < start { + return "", fmt.Errorf("TOC closing tag before start tag") + } + } + + var prefix, doc []byte + // skipPrefix is only used when toc tags are present. + if opts.skipPrefix && start != -1 && end != -1 { + prefix = raw[:start] + doc = raw[end:] + } else { + doc = raw + } + toc, err := generateTOC(prefix, doc) + if err != nil { + return toc, fmt.Errorf("failed to generate toc: %v", err) + } + + if !opts.inplace { + return toc, err + } + + realStart := start + len(startTOC) + oldTOC := string(raw[realStart:end]) + if strings.TrimSpace(oldTOC) == strings.TrimSpace(toc) { + // No changes required. + return toc, nil + } else if opts.dryrun { + return toc, fmt.Errorf("changes found:\n%s", toc) + } + + err = atomicWrite(file, + string(raw[:start]), + startTOC+"\n", + string(toc), + string(raw[end:]), + ) + return toc, err +} + +// atomicWrite writes the chunks sequentially to the filePath. +// A temporary file is used so no changes are made to the original in the case of an error. +func atomicWrite(filePath string, chunks ...string) error { + tmpPath := filePath + "_tmp" + tmp, err := os.OpenFile(tmpPath, os.O_WRONLY|os.O_CREATE|os.O_EXCL, 0600) + if err != nil { + return fmt.Errorf("unable to open tepmorary file %s: %v", tmpPath, err) + } + + // Cleanup + defer func() { + tmp.Close() + os.Remove(tmpPath) + }() + + for _, chunk := range chunks { + if _, err := tmp.WriteString(chunk); err != nil { + return err + } + } + + if err := tmp.Close(); err != nil { + return err + } + return os.Rename(tmp.Name(), filePath) +} + +// parse parses a raw markdown document to an AST. +func parse(b []byte) ast.Node { + p := parser.NewWithExtensions(parser.CommonExtensions) + p.Opts = parser.Options{ + // mparser is required for parsing the --- title blocks + ParserHook: mparser.Hook, + } + return p.Parse(b) +} + +func generateTOC(prefix []byte, doc []byte) (string, error) { + prefixMd := parse(prefix) + anchors := make(anchorGen) + // Start counting anchors from the beginning of the doc. + walkHeadings(prefixMd, func(heading *ast.Heading) { + anchors.mkAnchor(asText(heading)) + }) + + md := parse(doc) + + baseLvl := headingBase(md) + toc := &bytes.Buffer{} + htmlRenderer := html.NewRenderer(html.RendererOptions{}) + walkHeadings(md, func(heading *ast.Heading) { + anchor := anchors.mkAnchor(asText(heading)) + content := headingBody(htmlRenderer, heading) + fmt.Fprintf(toc, "%s- [%s](#%s)\n", strings.Repeat(" ", heading.Level-baseLvl), content, anchor) + }) + + return string(toc.Bytes()), nil +} + +func tocTagRequired(opts options) bool { + return opts.inplace +} + +type headingFn func(heading *ast.Heading) + +// walkHeadings runs the heading function on each heading in the parsed markdown document. +func walkHeadings(doc ast.Node, headingFn headingFn) error { + var err error + ast.WalkFunc(doc, func(node ast.Node, entering bool) ast.WalkStatus { + if !entering { + return ast.GoToNext // Don't care about closing the heading section. + } + + heading, ok := node.(*ast.Heading) + if !ok { + return ast.GoToNext // Ignore non-heading nodes. + } + + if heading.IsTitleblock { + return ast.GoToNext // Ignore title blocks (the --- section) + } + + headingFn(heading) + + return ast.GoToNext + }) + return err +} + +func asText(node ast.Node) string { + var text string + ast.WalkFunc(node, func(node ast.Node, entering bool) ast.WalkStatus { + if !entering { + return ast.GoToNext // Don't care about closing the heading section. + } + t, ok := node.(*ast.Text) + if !ok { + return ast.GoToNext // Ignore non-text nodes. + } + + text += string(t.AsLeaf().Literal) + return ast.GoToNext + }) + return text +} + +// Renders the heading body as HTML +func headingBody(renderer *html.Renderer, heading *ast.Heading) string { + var buf bytes.Buffer + for _, child := range heading.Children { + ast.WalkFunc(child, func(node ast.Node, entering bool) ast.WalkStatus { + return renderer.RenderNode(&buf, node, entering) + }) + } + return strings.TrimSpace(buf.String()) +} + +// headingBase finds the minimum heading level. This is useful for normalizing indentation, such as +// when a top-level heading is skipped in the prefix. +func headingBase(doc ast.Node) int { + baseLvl := math.MaxInt32 + walkHeadings(doc, func(heading *ast.Heading) { + if baseLvl > heading.Level { + baseLvl = heading.Level + } + }) + return baseLvl +} + +// Match punctuation that is filtered out from anchor IDs. +var punctuation = regexp.MustCompile(`[^\w\- ]`) + +// anchorGen is used to generate heading anchor IDs, using the github-flavored markdown syntax. +type anchorGen map[string]int + +func (a anchorGen) mkAnchor(text string) string { + text = strings.ToLower(text) + text = punctuation.ReplaceAllString(text, "") + text = strings.ReplaceAll(text, " ", "-") + idx := a[text] + a[text] = idx + 1 + if idx > 0 { + return fmt.Sprintf("%s-%d", text, idx) + } + return text +} diff --git a/vendor/modules.txt b/vendor/modules.txt index a347d8eb9..8647762a4 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -7,6 +7,8 @@ github.com/Azure/go-autorest/autorest/azure github.com/Azure/go-autorest/logger github.com/Azure/go-autorest/version github.com/Azure/go-autorest/autorest/date +# github.com/BurntSushi/toml v0.3.1 +github.com/BurntSushi/toml # github.com/PuerkitoBio/purell v1.1.1 github.com/PuerkitoBio/purell # github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578 @@ -66,6 +68,10 @@ github.com/golang/protobuf/ptypes github.com/golang/protobuf/ptypes/any github.com/golang/protobuf/ptypes/duration github.com/golang/protobuf/ptypes/timestamp +# github.com/gomarkdown/markdown v0.0.0-20190222000725-ee6a7931a1e4 +github.com/gomarkdown/markdown/ast +github.com/gomarkdown/markdown/html +github.com/gomarkdown/markdown/parser # github.com/google/btree v0.0.0-20160524151835-7d79101e329e github.com/google/btree # github.com/google/go-cmp v0.3.0 @@ -125,6 +131,10 @@ github.com/mitchellh/go-ps github.com/mitchellh/hashstructure # github.com/mitchellh/mapstructure v1.1.2 github.com/mitchellh/mapstructure +# github.com/mmarkdown/mmark v2.0.40+incompatible +github.com/mmarkdown/mmark/mparser +github.com/mmarkdown/mmark/mast +github.com/mmarkdown/mmark/mast/reference # github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd github.com/modern-go/concurrent # github.com/modern-go/reflect2 v1.0.1 @@ -207,6 +217,8 @@ github.com/spf13/afero/mem github.com/spf13/cobra # github.com/spf13/pflag v1.0.3 github.com/spf13/pflag +# github.com/tallclair/mdtoc v0.0.0-20190627191617-4dc3d6f90813 +github.com/tallclair/mdtoc # github.com/tv42/httpunix v0.0.0-20150427012821-b75d8614f926 github.com/tv42/httpunix # github.com/zakjan/cert-chain-resolver v0.0.0-20180703112424-6076e1ded272