Add go dependencies for mdtoc

This commit is contained in:
Manuel Alejandro de Brito Fontes 2019-07-21 14:00:47 -04:00
parent cb33c4ed26
commit 36959a4878
No known key found for this signature in database
GPG key ID: 786136016A8BA02A
63 changed files with 12675 additions and 0 deletions

1
go.mod
View file

@ -37,6 +37,7 @@ require (
github.com/smartystreets/goconvey v0.0.0-20190710185942-9d28bd7c0945 // indirect
github.com/spf13/cobra v0.0.4
github.com/spf13/pflag v1.0.3
github.com/tallclair/mdtoc v0.0.0-20190627191617-4dc3d6f90813
github.com/tv42/httpunix v0.0.0-20150427012821-b75d8614f926
github.com/zakjan/cert-chain-resolver v0.0.0-20180703112424-6076e1ded272
golang.org/x/net v0.0.0-20190613194153-d28f0bde5980

8
go.sum
View file

@ -4,6 +4,7 @@ cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMT
github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78/go.mod h1:LmzpDX56iTiv29bbRTIsUNlaFfuhWRQBWjQdVyAevI8=
github.com/Azure/go-autorest v11.1.2+incompatible h1:viZ3tV5l4gE2Sw0xrasFHytCGtzYCrT+um/rrSQ1BfA=
github.com/Azure/go-autorest v11.1.2+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24=
github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ=
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo=
github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ=
@ -49,6 +50,7 @@ github.com/eapache/channels v1.1.0 h1:F1taHcn7/F0i8DYqKXJnyhJcVpp2kgFcNePxXtnyu4
github.com/eapache/channels v1.1.0/go.mod h1:jMm2qB5Ubtg9zLd+inMZd2/NUvXgzmWXsDaLyQIGfH0=
github.com/eapache/queue v1.1.0 h1:YOEu7KNc61ntiQlcEeUIoDTJ2o8mQznoNvUhiigpIqc=
github.com/eapache/queue v1.1.0/go.mod h1:6eCeP0CKFpHLu8blIFXhExK/dRa7WDZfr6jVFPTqq+I=
github.com/ekalinin/github-markdown-toc v0.0.0-20190514155158-83fadb60a7f1/go.mod h1:XfZS1iyC28CnllR54Ou2Ero6qs4Rmn7GpVumNSj1DZo=
github.com/elazarl/goproxy v0.0.0-20170405201442-c4fc26588b6e h1:p1yVGRW3nmb85p1Sh1ZJSDm4A4iKLS5QNbvUHMgGu/M=
github.com/elazarl/goproxy v0.0.0-20170405201442-c4fc26588b6e/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc=
github.com/emicklei/go-restful v2.9.5+incompatible h1:spTtZBk5DYEvbxMVutUuTyh1Ao2r4iyvLdACqsl/Ljk=
@ -117,6 +119,8 @@ github.com/golang/mock v1.1.1 h1:G5FRp8JnTd7RQH5kemVNlMeyXQAztQ3mOWV95KxsXH8=
github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
github.com/golang/protobuf v1.2.0 h1:P3YflyNX/ehuJFLhxviNdFxQPkGK5cDcApsge1SqnvM=
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/gomarkdown/markdown v0.0.0-20190222000725-ee6a7931a1e4 h1:vELsocEzlhM4lk2nhxolEaQrMp25u7/i9IX8s9uLads=
github.com/gomarkdown/markdown v0.0.0-20190222000725-ee6a7931a1e4/go.mod h1:gmFANS06wAVmF0B9yi65QKsRmPQ97tze7FRLswua+OY=
github.com/google/btree v0.0.0-20160524151835-7d79101e329e h1:JHB7F/4TJCrYBW8+GZO8VkWDj1jxcWuCl6uxKODiyi4=
github.com/google/btree v0.0.0-20160524151835-7d79101e329e/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
github.com/google/go-cmp v0.3.0 h1:crn/baboCvb5fXaQ0IJ1SGTsTVrWpDsCWC8EGETZijY=
@ -182,6 +186,8 @@ github.com/mitchellh/hashstructure v1.0.0 h1:ZkRJX1CyOoTkar7p/mLS5TZU4nJ1Rn/F8u9
github.com/mitchellh/hashstructure v1.0.0/go.mod h1:QjSHrPWS+BGUVBYkbTZWEnOh3G1DutKwClXU/ABz6AQ=
github.com/mitchellh/mapstructure v1.1.2 h1:fmNYVwqnSfB9mZU6OS2O6GsXM+wcskZDuKQzvN1EDeE=
github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
github.com/mmarkdown/mmark v2.0.40+incompatible h1:vMeUeDzBK3H+/mU0oMVfMuhSXJlIA+DE/DMPQNAj5C4=
github.com/mmarkdown/mmark v2.0.40+incompatible/go.mod h1:Uvmoz7tvsWpr7bMVxIpqZPyN3FbOtzDmnsJDFp7ltJs=
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg=
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
github.com/modern-go/reflect2 v1.0.1 h1:9f412s+6RmYXLWZSEzVVgPGK7C2PphHj5RJrvfx9AWI=
@ -269,6 +275,8 @@ github.com/stretchr/testify v1.2.2 h1:bSDNvY7ZPG5RlJ8otE/7V6gMiyenm9RtJ7IUVIAoJ1
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
github.com/stretchr/testify v1.3.0 h1:TivCn/peBQ7UY8ooIcPgZFpTNSz0Q2U6UrFlUfqbe0Q=
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
github.com/tallclair/mdtoc v0.0.0-20190627191617-4dc3d6f90813 h1:1Q3NGZNH8/oSGhSe0J8WjkFCeIOb0JSy7M4RpqY64XI=
github.com/tallclair/mdtoc v0.0.0-20190627191617-4dc3d6f90813/go.mod h1:BjDk9nfX4091pXLHhvf6Ejr4/r//9NslWmweWb2Hkbs=
github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U=
github.com/tv42/httpunix v0.0.0-20150427012821-b75d8614f926 h1:G3dpKMzFDjgEh2q1Z7zUUtKa8ViPtH+ocF0bE0g00O8=
github.com/tv42/httpunix v0.0.0-20150427012821-b75d8614f926/go.mod h1:9ESjWnEqriFuLhtthL60Sar/7RFoluCcXsuvEwTV5KM=

5
vendor/github.com/BurntSushi/toml/.gitignore generated vendored Normal file
View file

@ -0,0 +1,5 @@
TAGS
tags
.*.swp
tomlcheck/tomlcheck
toml.test

15
vendor/github.com/BurntSushi/toml/.travis.yml generated vendored Normal file
View file

@ -0,0 +1,15 @@
language: go
go:
- 1.1
- 1.2
- 1.3
- 1.4
- 1.5
- 1.6
- tip
install:
- go install ./...
- go get github.com/BurntSushi/toml-test
script:
- export PATH="$PATH:$HOME/gopath/bin"
- make test

3
vendor/github.com/BurntSushi/toml/COMPATIBLE generated vendored Normal file
View file

@ -0,0 +1,3 @@
Compatible with TOML version
[v0.4.0](https://github.com/toml-lang/toml/blob/v0.4.0/versions/en/toml-v0.4.0.md)

21
vendor/github.com/BurntSushi/toml/COPYING generated vendored Normal file
View file

@ -0,0 +1,21 @@
The MIT License (MIT)
Copyright (c) 2013 TOML authors
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.

19
vendor/github.com/BurntSushi/toml/Makefile generated vendored Normal file
View file

@ -0,0 +1,19 @@
install:
go install ./...
test: install
go test -v
toml-test toml-test-decoder
toml-test -encoder toml-test-encoder
fmt:
gofmt -w *.go */*.go
colcheck *.go */*.go
tags:
find ./ -name '*.go' -print0 | xargs -0 gotags > TAGS
push:
git push origin master
git push github master

218
vendor/github.com/BurntSushi/toml/README.md generated vendored Normal file
View file

@ -0,0 +1,218 @@
## TOML parser and encoder for Go with reflection
TOML stands for Tom's Obvious, Minimal Language. This Go package provides a
reflection interface similar to Go's standard library `json` and `xml`
packages. This package also supports the `encoding.TextUnmarshaler` and
`encoding.TextMarshaler` interfaces so that you can define custom data
representations. (There is an example of this below.)
Spec: https://github.com/toml-lang/toml
Compatible with TOML version
[v0.4.0](https://github.com/toml-lang/toml/blob/master/versions/en/toml-v0.4.0.md)
Documentation: https://godoc.org/github.com/BurntSushi/toml
Installation:
```bash
go get github.com/BurntSushi/toml
```
Try the toml validator:
```bash
go get github.com/BurntSushi/toml/cmd/tomlv
tomlv some-toml-file.toml
```
[![Build Status](https://travis-ci.org/BurntSushi/toml.svg?branch=master)](https://travis-ci.org/BurntSushi/toml) [![GoDoc](https://godoc.org/github.com/BurntSushi/toml?status.svg)](https://godoc.org/github.com/BurntSushi/toml)
### Testing
This package passes all tests in
[toml-test](https://github.com/BurntSushi/toml-test) for both the decoder
and the encoder.
### Examples
This package works similarly to how the Go standard library handles `XML`
and `JSON`. Namely, data is loaded into Go values via reflection.
For the simplest example, consider some TOML file as just a list of keys
and values:
```toml
Age = 25
Cats = [ "Cauchy", "Plato" ]
Pi = 3.14
Perfection = [ 6, 28, 496, 8128 ]
DOB = 1987-07-05T05:45:00Z
```
Which could be defined in Go as:
```go
type Config struct {
Age int
Cats []string
Pi float64
Perfection []int
DOB time.Time // requires `import time`
}
```
And then decoded with:
```go
var conf Config
if _, err := toml.Decode(tomlData, &conf); err != nil {
// handle error
}
```
You can also use struct tags if your struct field name doesn't map to a TOML
key value directly:
```toml
some_key_NAME = "wat"
```
```go
type TOML struct {
ObscureKey string `toml:"some_key_NAME"`
}
```
### Using the `encoding.TextUnmarshaler` interface
Here's an example that automatically parses duration strings into
`time.Duration` values:
```toml
[[song]]
name = "Thunder Road"
duration = "4m49s"
[[song]]
name = "Stairway to Heaven"
duration = "8m03s"
```
Which can be decoded with:
```go
type song struct {
Name string
Duration duration
}
type songs struct {
Song []song
}
var favorites songs
if _, err := toml.Decode(blob, &favorites); err != nil {
log.Fatal(err)
}
for _, s := range favorites.Song {
fmt.Printf("%s (%s)\n", s.Name, s.Duration)
}
```
And you'll also need a `duration` type that satisfies the
`encoding.TextUnmarshaler` interface:
```go
type duration struct {
time.Duration
}
func (d *duration) UnmarshalText(text []byte) error {
var err error
d.Duration, err = time.ParseDuration(string(text))
return err
}
```
### More complex usage
Here's an example of how to load the example from the official spec page:
```toml
# This is a TOML document. Boom.
title = "TOML Example"
[owner]
name = "Tom Preston-Werner"
organization = "GitHub"
bio = "GitHub Cofounder & CEO\nLikes tater tots and beer."
dob = 1979-05-27T07:32:00Z # First class dates? Why not?
[database]
server = "192.168.1.1"
ports = [ 8001, 8001, 8002 ]
connection_max = 5000
enabled = true
[servers]
# You can indent as you please. Tabs or spaces. TOML don't care.
[servers.alpha]
ip = "10.0.0.1"
dc = "eqdc10"
[servers.beta]
ip = "10.0.0.2"
dc = "eqdc10"
[clients]
data = [ ["gamma", "delta"], [1, 2] ] # just an update to make sure parsers support it
# Line breaks are OK when inside arrays
hosts = [
"alpha",
"omega"
]
```
And the corresponding Go types are:
```go
type tomlConfig struct {
Title string
Owner ownerInfo
DB database `toml:"database"`
Servers map[string]server
Clients clients
}
type ownerInfo struct {
Name string
Org string `toml:"organization"`
Bio string
DOB time.Time
}
type database struct {
Server string
Ports []int
ConnMax int `toml:"connection_max"`
Enabled bool
}
type server struct {
IP string
DC string
}
type clients struct {
Data [][]interface{}
Hosts []string
}
```
Note that a case insensitive match will be tried if an exact match can't be
found.
A working example of the above can be found in `_examples/example.{go,toml}`.

509
vendor/github.com/BurntSushi/toml/decode.go generated vendored Normal file
View file

@ -0,0 +1,509 @@
package toml
import (
"fmt"
"io"
"io/ioutil"
"math"
"reflect"
"strings"
"time"
)
func e(format string, args ...interface{}) error {
return fmt.Errorf("toml: "+format, args...)
}
// Unmarshaler is the interface implemented by objects that can unmarshal a
// TOML description of themselves.
type Unmarshaler interface {
UnmarshalTOML(interface{}) error
}
// Unmarshal decodes the contents of `p` in TOML format into a pointer `v`.
func Unmarshal(p []byte, v interface{}) error {
_, err := Decode(string(p), v)
return err
}
// Primitive is a TOML value that hasn't been decoded into a Go value.
// When using the various `Decode*` functions, the type `Primitive` may
// be given to any value, and its decoding will be delayed.
//
// A `Primitive` value can be decoded using the `PrimitiveDecode` function.
//
// The underlying representation of a `Primitive` value is subject to change.
// Do not rely on it.
//
// N.B. Primitive values are still parsed, so using them will only avoid
// the overhead of reflection. They can be useful when you don't know the
// exact type of TOML data until run time.
type Primitive struct {
undecoded interface{}
context Key
}
// DEPRECATED!
//
// Use MetaData.PrimitiveDecode instead.
func PrimitiveDecode(primValue Primitive, v interface{}) error {
md := MetaData{decoded: make(map[string]bool)}
return md.unify(primValue.undecoded, rvalue(v))
}
// PrimitiveDecode is just like the other `Decode*` functions, except it
// decodes a TOML value that has already been parsed. Valid primitive values
// can *only* be obtained from values filled by the decoder functions,
// including this method. (i.e., `v` may contain more `Primitive`
// values.)
//
// Meta data for primitive values is included in the meta data returned by
// the `Decode*` functions with one exception: keys returned by the Undecoded
// method will only reflect keys that were decoded. Namely, any keys hidden
// behind a Primitive will be considered undecoded. Executing this method will
// update the undecoded keys in the meta data. (See the example.)
func (md *MetaData) PrimitiveDecode(primValue Primitive, v interface{}) error {
md.context = primValue.context
defer func() { md.context = nil }()
return md.unify(primValue.undecoded, rvalue(v))
}
// Decode will decode the contents of `data` in TOML format into a pointer
// `v`.
//
// TOML hashes correspond to Go structs or maps. (Dealer's choice. They can be
// used interchangeably.)
//
// TOML arrays of tables correspond to either a slice of structs or a slice
// of maps.
//
// TOML datetimes correspond to Go `time.Time` values.
//
// All other TOML types (float, string, int, bool and array) correspond
// to the obvious Go types.
//
// An exception to the above rules is if a type implements the
// encoding.TextUnmarshaler interface. In this case, any primitive TOML value
// (floats, strings, integers, booleans and datetimes) will be converted to
// a byte string and given to the value's UnmarshalText method. See the
// Unmarshaler example for a demonstration with time duration strings.
//
// Key mapping
//
// TOML keys can map to either keys in a Go map or field names in a Go
// struct. The special `toml` struct tag may be used to map TOML keys to
// struct fields that don't match the key name exactly. (See the example.)
// A case insensitive match to struct names will be tried if an exact match
// can't be found.
//
// The mapping between TOML values and Go values is loose. That is, there
// may exist TOML values that cannot be placed into your representation, and
// there may be parts of your representation that do not correspond to
// TOML values. This loose mapping can be made stricter by using the IsDefined
// and/or Undecoded methods on the MetaData returned.
//
// This decoder will not handle cyclic types. If a cyclic type is passed,
// `Decode` will not terminate.
func Decode(data string, v interface{}) (MetaData, error) {
rv := reflect.ValueOf(v)
if rv.Kind() != reflect.Ptr {
return MetaData{}, e("Decode of non-pointer %s", reflect.TypeOf(v))
}
if rv.IsNil() {
return MetaData{}, e("Decode of nil %s", reflect.TypeOf(v))
}
p, err := parse(data)
if err != nil {
return MetaData{}, err
}
md := MetaData{
p.mapping, p.types, p.ordered,
make(map[string]bool, len(p.ordered)), nil,
}
return md, md.unify(p.mapping, indirect(rv))
}
// DecodeFile is just like Decode, except it will automatically read the
// contents of the file at `fpath` and decode it for you.
func DecodeFile(fpath string, v interface{}) (MetaData, error) {
bs, err := ioutil.ReadFile(fpath)
if err != nil {
return MetaData{}, err
}
return Decode(string(bs), v)
}
// DecodeReader is just like Decode, except it will consume all bytes
// from the reader and decode it for you.
func DecodeReader(r io.Reader, v interface{}) (MetaData, error) {
bs, err := ioutil.ReadAll(r)
if err != nil {
return MetaData{}, err
}
return Decode(string(bs), v)
}
// unify performs a sort of type unification based on the structure of `rv`,
// which is the client representation.
//
// Any type mismatch produces an error. Finding a type that we don't know
// how to handle produces an unsupported type error.
func (md *MetaData) unify(data interface{}, rv reflect.Value) error {
// Special case. Look for a `Primitive` value.
if rv.Type() == reflect.TypeOf((*Primitive)(nil)).Elem() {
// Save the undecoded data and the key context into the primitive
// value.
context := make(Key, len(md.context))
copy(context, md.context)
rv.Set(reflect.ValueOf(Primitive{
undecoded: data,
context: context,
}))
return nil
}
// Special case. Unmarshaler Interface support.
if rv.CanAddr() {
if v, ok := rv.Addr().Interface().(Unmarshaler); ok {
return v.UnmarshalTOML(data)
}
}
// Special case. Handle time.Time values specifically.
// TODO: Remove this code when we decide to drop support for Go 1.1.
// This isn't necessary in Go 1.2 because time.Time satisfies the encoding
// interfaces.
if rv.Type().AssignableTo(rvalue(time.Time{}).Type()) {
return md.unifyDatetime(data, rv)
}
// Special case. Look for a value satisfying the TextUnmarshaler interface.
if v, ok := rv.Interface().(TextUnmarshaler); ok {
return md.unifyText(data, v)
}
// BUG(burntsushi)
// The behavior here is incorrect whenever a Go type satisfies the
// encoding.TextUnmarshaler interface but also corresponds to a TOML
// hash or array. In particular, the unmarshaler should only be applied
// to primitive TOML values. But at this point, it will be applied to
// all kinds of values and produce an incorrect error whenever those values
// are hashes or arrays (including arrays of tables).
k := rv.Kind()
// laziness
if k >= reflect.Int && k <= reflect.Uint64 {
return md.unifyInt(data, rv)
}
switch k {
case reflect.Ptr:
elem := reflect.New(rv.Type().Elem())
err := md.unify(data, reflect.Indirect(elem))
if err != nil {
return err
}
rv.Set(elem)
return nil
case reflect.Struct:
return md.unifyStruct(data, rv)
case reflect.Map:
return md.unifyMap(data, rv)
case reflect.Array:
return md.unifyArray(data, rv)
case reflect.Slice:
return md.unifySlice(data, rv)
case reflect.String:
return md.unifyString(data, rv)
case reflect.Bool:
return md.unifyBool(data, rv)
case reflect.Interface:
// we only support empty interfaces.
if rv.NumMethod() > 0 {
return e("unsupported type %s", rv.Type())
}
return md.unifyAnything(data, rv)
case reflect.Float32:
fallthrough
case reflect.Float64:
return md.unifyFloat64(data, rv)
}
return e("unsupported type %s", rv.Kind())
}
func (md *MetaData) unifyStruct(mapping interface{}, rv reflect.Value) error {
tmap, ok := mapping.(map[string]interface{})
if !ok {
if mapping == nil {
return nil
}
return e("type mismatch for %s: expected table but found %T",
rv.Type().String(), mapping)
}
for key, datum := range tmap {
var f *field
fields := cachedTypeFields(rv.Type())
for i := range fields {
ff := &fields[i]
if ff.name == key {
f = ff
break
}
if f == nil && strings.EqualFold(ff.name, key) {
f = ff
}
}
if f != nil {
subv := rv
for _, i := range f.index {
subv = indirect(subv.Field(i))
}
if isUnifiable(subv) {
md.decoded[md.context.add(key).String()] = true
md.context = append(md.context, key)
if err := md.unify(datum, subv); err != nil {
return err
}
md.context = md.context[0 : len(md.context)-1]
} else if f.name != "" {
// Bad user! No soup for you!
return e("cannot write unexported field %s.%s",
rv.Type().String(), f.name)
}
}
}
return nil
}
func (md *MetaData) unifyMap(mapping interface{}, rv reflect.Value) error {
tmap, ok := mapping.(map[string]interface{})
if !ok {
if tmap == nil {
return nil
}
return badtype("map", mapping)
}
if rv.IsNil() {
rv.Set(reflect.MakeMap(rv.Type()))
}
for k, v := range tmap {
md.decoded[md.context.add(k).String()] = true
md.context = append(md.context, k)
rvkey := indirect(reflect.New(rv.Type().Key()))
rvval := reflect.Indirect(reflect.New(rv.Type().Elem()))
if err := md.unify(v, rvval); err != nil {
return err
}
md.context = md.context[0 : len(md.context)-1]
rvkey.SetString(k)
rv.SetMapIndex(rvkey, rvval)
}
return nil
}
func (md *MetaData) unifyArray(data interface{}, rv reflect.Value) error {
datav := reflect.ValueOf(data)
if datav.Kind() != reflect.Slice {
if !datav.IsValid() {
return nil
}
return badtype("slice", data)
}
sliceLen := datav.Len()
if sliceLen != rv.Len() {
return e("expected array length %d; got TOML array of length %d",
rv.Len(), sliceLen)
}
return md.unifySliceArray(datav, rv)
}
func (md *MetaData) unifySlice(data interface{}, rv reflect.Value) error {
datav := reflect.ValueOf(data)
if datav.Kind() != reflect.Slice {
if !datav.IsValid() {
return nil
}
return badtype("slice", data)
}
n := datav.Len()
if rv.IsNil() || rv.Cap() < n {
rv.Set(reflect.MakeSlice(rv.Type(), n, n))
}
rv.SetLen(n)
return md.unifySliceArray(datav, rv)
}
func (md *MetaData) unifySliceArray(data, rv reflect.Value) error {
sliceLen := data.Len()
for i := 0; i < sliceLen; i++ {
v := data.Index(i).Interface()
sliceval := indirect(rv.Index(i))
if err := md.unify(v, sliceval); err != nil {
return err
}
}
return nil
}
func (md *MetaData) unifyDatetime(data interface{}, rv reflect.Value) error {
if _, ok := data.(time.Time); ok {
rv.Set(reflect.ValueOf(data))
return nil
}
return badtype("time.Time", data)
}
func (md *MetaData) unifyString(data interface{}, rv reflect.Value) error {
if s, ok := data.(string); ok {
rv.SetString(s)
return nil
}
return badtype("string", data)
}
func (md *MetaData) unifyFloat64(data interface{}, rv reflect.Value) error {
if num, ok := data.(float64); ok {
switch rv.Kind() {
case reflect.Float32:
fallthrough
case reflect.Float64:
rv.SetFloat(num)
default:
panic("bug")
}
return nil
}
return badtype("float", data)
}
func (md *MetaData) unifyInt(data interface{}, rv reflect.Value) error {
if num, ok := data.(int64); ok {
if rv.Kind() >= reflect.Int && rv.Kind() <= reflect.Int64 {
switch rv.Kind() {
case reflect.Int, reflect.Int64:
// No bounds checking necessary.
case reflect.Int8:
if num < math.MinInt8 || num > math.MaxInt8 {
return e("value %d is out of range for int8", num)
}
case reflect.Int16:
if num < math.MinInt16 || num > math.MaxInt16 {
return e("value %d is out of range for int16", num)
}
case reflect.Int32:
if num < math.MinInt32 || num > math.MaxInt32 {
return e("value %d is out of range for int32", num)
}
}
rv.SetInt(num)
} else if rv.Kind() >= reflect.Uint && rv.Kind() <= reflect.Uint64 {
unum := uint64(num)
switch rv.Kind() {
case reflect.Uint, reflect.Uint64:
// No bounds checking necessary.
case reflect.Uint8:
if num < 0 || unum > math.MaxUint8 {
return e("value %d is out of range for uint8", num)
}
case reflect.Uint16:
if num < 0 || unum > math.MaxUint16 {
return e("value %d is out of range for uint16", num)
}
case reflect.Uint32:
if num < 0 || unum > math.MaxUint32 {
return e("value %d is out of range for uint32", num)
}
}
rv.SetUint(unum)
} else {
panic("unreachable")
}
return nil
}
return badtype("integer", data)
}
func (md *MetaData) unifyBool(data interface{}, rv reflect.Value) error {
if b, ok := data.(bool); ok {
rv.SetBool(b)
return nil
}
return badtype("boolean", data)
}
func (md *MetaData) unifyAnything(data interface{}, rv reflect.Value) error {
rv.Set(reflect.ValueOf(data))
return nil
}
func (md *MetaData) unifyText(data interface{}, v TextUnmarshaler) error {
var s string
switch sdata := data.(type) {
case TextMarshaler:
text, err := sdata.MarshalText()
if err != nil {
return err
}
s = string(text)
case fmt.Stringer:
s = sdata.String()
case string:
s = sdata
case bool:
s = fmt.Sprintf("%v", sdata)
case int64:
s = fmt.Sprintf("%d", sdata)
case float64:
s = fmt.Sprintf("%f", sdata)
default:
return badtype("primitive (string-like)", data)
}
if err := v.UnmarshalText([]byte(s)); err != nil {
return err
}
return nil
}
// rvalue returns a reflect.Value of `v`. All pointers are resolved.
func rvalue(v interface{}) reflect.Value {
return indirect(reflect.ValueOf(v))
}
// indirect returns the value pointed to by a pointer.
// Pointers are followed until the value is not a pointer.
// New values are allocated for each nil pointer.
//
// An exception to this rule is if the value satisfies an interface of
// interest to us (like encoding.TextUnmarshaler).
func indirect(v reflect.Value) reflect.Value {
if v.Kind() != reflect.Ptr {
if v.CanSet() {
pv := v.Addr()
if _, ok := pv.Interface().(TextUnmarshaler); ok {
return pv
}
}
return v
}
if v.IsNil() {
v.Set(reflect.New(v.Type().Elem()))
}
return indirect(reflect.Indirect(v))
}
func isUnifiable(rv reflect.Value) bool {
if rv.CanSet() {
return true
}
if _, ok := rv.Interface().(TextUnmarshaler); ok {
return true
}
return false
}
func badtype(expected string, data interface{}) error {
return e("cannot load TOML value of type %T into a Go %s", data, expected)
}

121
vendor/github.com/BurntSushi/toml/decode_meta.go generated vendored Normal file
View file

@ -0,0 +1,121 @@
package toml
import "strings"
// MetaData allows access to meta information about TOML data that may not
// be inferrable via reflection. In particular, whether a key has been defined
// and the TOML type of a key.
type MetaData struct {
mapping map[string]interface{}
types map[string]tomlType
keys []Key
decoded map[string]bool
context Key // Used only during decoding.
}
// IsDefined returns true if the key given exists in the TOML data. The key
// should be specified hierarchially. e.g.,
//
// // access the TOML key 'a.b.c'
// IsDefined("a", "b", "c")
//
// IsDefined will return false if an empty key given. Keys are case sensitive.
func (md *MetaData) IsDefined(key ...string) bool {
if len(key) == 0 {
return false
}
var hash map[string]interface{}
var ok bool
var hashOrVal interface{} = md.mapping
for _, k := range key {
if hash, ok = hashOrVal.(map[string]interface{}); !ok {
return false
}
if hashOrVal, ok = hash[k]; !ok {
return false
}
}
return true
}
// Type returns a string representation of the type of the key specified.
//
// Type will return the empty string if given an empty key or a key that
// does not exist. Keys are case sensitive.
func (md *MetaData) Type(key ...string) string {
fullkey := strings.Join(key, ".")
if typ, ok := md.types[fullkey]; ok {
return typ.typeString()
}
return ""
}
// Key is the type of any TOML key, including key groups. Use (MetaData).Keys
// to get values of this type.
type Key []string
func (k Key) String() string {
return strings.Join(k, ".")
}
func (k Key) maybeQuotedAll() string {
var ss []string
for i := range k {
ss = append(ss, k.maybeQuoted(i))
}
return strings.Join(ss, ".")
}
func (k Key) maybeQuoted(i int) string {
quote := false
for _, c := range k[i] {
if !isBareKeyChar(c) {
quote = true
break
}
}
if quote {
return "\"" + strings.Replace(k[i], "\"", "\\\"", -1) + "\""
}
return k[i]
}
func (k Key) add(piece string) Key {
newKey := make(Key, len(k)+1)
copy(newKey, k)
newKey[len(k)] = piece
return newKey
}
// Keys returns a slice of every key in the TOML data, including key groups.
// Each key is itself a slice, where the first element is the top of the
// hierarchy and the last is the most specific.
//
// The list will have the same order as the keys appeared in the TOML data.
//
// All keys returned are non-empty.
func (md *MetaData) Keys() []Key {
return md.keys
}
// Undecoded returns all keys that have not been decoded in the order in which
// they appear in the original TOML document.
//
// This includes keys that haven't been decoded because of a Primitive value.
// Once the Primitive value is decoded, the keys will be considered decoded.
//
// Also note that decoding into an empty interface will result in no decoding,
// and so no keys will be considered decoded.
//
// In this sense, the Undecoded keys correspond to keys in the TOML document
// that do not have a concrete type in your representation.
func (md *MetaData) Undecoded() []Key {
undecoded := make([]Key, 0, len(md.keys))
for _, key := range md.keys {
if !md.decoded[key.String()] {
undecoded = append(undecoded, key)
}
}
return undecoded
}

27
vendor/github.com/BurntSushi/toml/doc.go generated vendored Normal file
View file

@ -0,0 +1,27 @@
/*
Package toml provides facilities for decoding and encoding TOML configuration
files via reflection. There is also support for delaying decoding with
the Primitive type, and querying the set of keys in a TOML document with the
MetaData type.
The specification implemented: https://github.com/toml-lang/toml
The sub-command github.com/BurntSushi/toml/cmd/tomlv can be used to verify
whether a file is a valid TOML document. It can also be used to print the
type of each key in a TOML document.
Testing
There are two important types of tests used for this package. The first is
contained inside '*_test.go' files and uses the standard Go unit testing
framework. These tests are primarily devoted to holistically testing the
decoder and encoder.
The second type of testing is used to verify the implementation's adherence
to the TOML specification. These tests have been factored into their own
project: https://github.com/BurntSushi/toml-test
The reason the tests are in a separate project is so that they can be used by
any implementation of TOML. Namely, it is language agnostic.
*/
package toml

568
vendor/github.com/BurntSushi/toml/encode.go generated vendored Normal file
View file

@ -0,0 +1,568 @@
package toml
import (
"bufio"
"errors"
"fmt"
"io"
"reflect"
"sort"
"strconv"
"strings"
"time"
)
type tomlEncodeError struct{ error }
var (
errArrayMixedElementTypes = errors.New(
"toml: cannot encode array with mixed element types")
errArrayNilElement = errors.New(
"toml: cannot encode array with nil element")
errNonString = errors.New(
"toml: cannot encode a map with non-string key type")
errAnonNonStruct = errors.New(
"toml: cannot encode an anonymous field that is not a struct")
errArrayNoTable = errors.New(
"toml: TOML array element cannot contain a table")
errNoKey = errors.New(
"toml: top-level values must be Go maps or structs")
errAnything = errors.New("") // used in testing
)
var quotedReplacer = strings.NewReplacer(
"\t", "\\t",
"\n", "\\n",
"\r", "\\r",
"\"", "\\\"",
"\\", "\\\\",
)
// Encoder controls the encoding of Go values to a TOML document to some
// io.Writer.
//
// The indentation level can be controlled with the Indent field.
type Encoder struct {
// A single indentation level. By default it is two spaces.
Indent string
// hasWritten is whether we have written any output to w yet.
hasWritten bool
w *bufio.Writer
}
// NewEncoder returns a TOML encoder that encodes Go values to the io.Writer
// given. By default, a single indentation level is 2 spaces.
func NewEncoder(w io.Writer) *Encoder {
return &Encoder{
w: bufio.NewWriter(w),
Indent: " ",
}
}
// Encode writes a TOML representation of the Go value to the underlying
// io.Writer. If the value given cannot be encoded to a valid TOML document,
// then an error is returned.
//
// The mapping between Go values and TOML values should be precisely the same
// as for the Decode* functions. Similarly, the TextMarshaler interface is
// supported by encoding the resulting bytes as strings. (If you want to write
// arbitrary binary data then you will need to use something like base64 since
// TOML does not have any binary types.)
//
// When encoding TOML hashes (i.e., Go maps or structs), keys without any
// sub-hashes are encoded first.
//
// If a Go map is encoded, then its keys are sorted alphabetically for
// deterministic output. More control over this behavior may be provided if
// there is demand for it.
//
// Encoding Go values without a corresponding TOML representation---like map
// types with non-string keys---will cause an error to be returned. Similarly
// for mixed arrays/slices, arrays/slices with nil elements, embedded
// non-struct types and nested slices containing maps or structs.
// (e.g., [][]map[string]string is not allowed but []map[string]string is OK
// and so is []map[string][]string.)
func (enc *Encoder) Encode(v interface{}) error {
rv := eindirect(reflect.ValueOf(v))
if err := enc.safeEncode(Key([]string{}), rv); err != nil {
return err
}
return enc.w.Flush()
}
func (enc *Encoder) safeEncode(key Key, rv reflect.Value) (err error) {
defer func() {
if r := recover(); r != nil {
if terr, ok := r.(tomlEncodeError); ok {
err = terr.error
return
}
panic(r)
}
}()
enc.encode(key, rv)
return nil
}
func (enc *Encoder) encode(key Key, rv reflect.Value) {
// Special case. Time needs to be in ISO8601 format.
// Special case. If we can marshal the type to text, then we used that.
// Basically, this prevents the encoder for handling these types as
// generic structs (or whatever the underlying type of a TextMarshaler is).
switch rv.Interface().(type) {
case time.Time, TextMarshaler:
enc.keyEqElement(key, rv)
return
}
k := rv.Kind()
switch k {
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32,
reflect.Int64,
reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32,
reflect.Uint64,
reflect.Float32, reflect.Float64, reflect.String, reflect.Bool:
enc.keyEqElement(key, rv)
case reflect.Array, reflect.Slice:
if typeEqual(tomlArrayHash, tomlTypeOfGo(rv)) {
enc.eArrayOfTables(key, rv)
} else {
enc.keyEqElement(key, rv)
}
case reflect.Interface:
if rv.IsNil() {
return
}
enc.encode(key, rv.Elem())
case reflect.Map:
if rv.IsNil() {
return
}
enc.eTable(key, rv)
case reflect.Ptr:
if rv.IsNil() {
return
}
enc.encode(key, rv.Elem())
case reflect.Struct:
enc.eTable(key, rv)
default:
panic(e("unsupported type for key '%s': %s", key, k))
}
}
// eElement encodes any value that can be an array element (primitives and
// arrays).
func (enc *Encoder) eElement(rv reflect.Value) {
switch v := rv.Interface().(type) {
case time.Time:
// Special case time.Time as a primitive. Has to come before
// TextMarshaler below because time.Time implements
// encoding.TextMarshaler, but we need to always use UTC.
enc.wf(v.UTC().Format("2006-01-02T15:04:05Z"))
return
case TextMarshaler:
// Special case. Use text marshaler if it's available for this value.
if s, err := v.MarshalText(); err != nil {
encPanic(err)
} else {
enc.writeQuoted(string(s))
}
return
}
switch rv.Kind() {
case reflect.Bool:
enc.wf(strconv.FormatBool(rv.Bool()))
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32,
reflect.Int64:
enc.wf(strconv.FormatInt(rv.Int(), 10))
case reflect.Uint, reflect.Uint8, reflect.Uint16,
reflect.Uint32, reflect.Uint64:
enc.wf(strconv.FormatUint(rv.Uint(), 10))
case reflect.Float32:
enc.wf(floatAddDecimal(strconv.FormatFloat(rv.Float(), 'f', -1, 32)))
case reflect.Float64:
enc.wf(floatAddDecimal(strconv.FormatFloat(rv.Float(), 'f', -1, 64)))
case reflect.Array, reflect.Slice:
enc.eArrayOrSliceElement(rv)
case reflect.Interface:
enc.eElement(rv.Elem())
case reflect.String:
enc.writeQuoted(rv.String())
default:
panic(e("unexpected primitive type: %s", rv.Kind()))
}
}
// By the TOML spec, all floats must have a decimal with at least one
// number on either side.
func floatAddDecimal(fstr string) string {
if !strings.Contains(fstr, ".") {
return fstr + ".0"
}
return fstr
}
func (enc *Encoder) writeQuoted(s string) {
enc.wf("\"%s\"", quotedReplacer.Replace(s))
}
func (enc *Encoder) eArrayOrSliceElement(rv reflect.Value) {
length := rv.Len()
enc.wf("[")
for i := 0; i < length; i++ {
elem := rv.Index(i)
enc.eElement(elem)
if i != length-1 {
enc.wf(", ")
}
}
enc.wf("]")
}
func (enc *Encoder) eArrayOfTables(key Key, rv reflect.Value) {
if len(key) == 0 {
encPanic(errNoKey)
}
for i := 0; i < rv.Len(); i++ {
trv := rv.Index(i)
if isNil(trv) {
continue
}
panicIfInvalidKey(key)
enc.newline()
enc.wf("%s[[%s]]", enc.indentStr(key), key.maybeQuotedAll())
enc.newline()
enc.eMapOrStruct(key, trv)
}
}
func (enc *Encoder) eTable(key Key, rv reflect.Value) {
panicIfInvalidKey(key)
if len(key) == 1 {
// Output an extra newline between top-level tables.
// (The newline isn't written if nothing else has been written though.)
enc.newline()
}
if len(key) > 0 {
enc.wf("%s[%s]", enc.indentStr(key), key.maybeQuotedAll())
enc.newline()
}
enc.eMapOrStruct(key, rv)
}
func (enc *Encoder) eMapOrStruct(key Key, rv reflect.Value) {
switch rv := eindirect(rv); rv.Kind() {
case reflect.Map:
enc.eMap(key, rv)
case reflect.Struct:
enc.eStruct(key, rv)
default:
panic("eTable: unhandled reflect.Value Kind: " + rv.Kind().String())
}
}
func (enc *Encoder) eMap(key Key, rv reflect.Value) {
rt := rv.Type()
if rt.Key().Kind() != reflect.String {
encPanic(errNonString)
}
// Sort keys so that we have deterministic output. And write keys directly
// underneath this key first, before writing sub-structs or sub-maps.
var mapKeysDirect, mapKeysSub []string
for _, mapKey := range rv.MapKeys() {
k := mapKey.String()
if typeIsHash(tomlTypeOfGo(rv.MapIndex(mapKey))) {
mapKeysSub = append(mapKeysSub, k)
} else {
mapKeysDirect = append(mapKeysDirect, k)
}
}
var writeMapKeys = func(mapKeys []string) {
sort.Strings(mapKeys)
for _, mapKey := range mapKeys {
mrv := rv.MapIndex(reflect.ValueOf(mapKey))
if isNil(mrv) {
// Don't write anything for nil fields.
continue
}
enc.encode(key.add(mapKey), mrv)
}
}
writeMapKeys(mapKeysDirect)
writeMapKeys(mapKeysSub)
}
func (enc *Encoder) eStruct(key Key, rv reflect.Value) {
// Write keys for fields directly under this key first, because if we write
// a field that creates a new table, then all keys under it will be in that
// table (not the one we're writing here).
rt := rv.Type()
var fieldsDirect, fieldsSub [][]int
var addFields func(rt reflect.Type, rv reflect.Value, start []int)
addFields = func(rt reflect.Type, rv reflect.Value, start []int) {
for i := 0; i < rt.NumField(); i++ {
f := rt.Field(i)
// skip unexported fields
if f.PkgPath != "" && !f.Anonymous {
continue
}
frv := rv.Field(i)
if f.Anonymous {
t := f.Type
switch t.Kind() {
case reflect.Struct:
// Treat anonymous struct fields with
// tag names as though they are not
// anonymous, like encoding/json does.
if getOptions(f.Tag).name == "" {
addFields(t, frv, f.Index)
continue
}
case reflect.Ptr:
if t.Elem().Kind() == reflect.Struct &&
getOptions(f.Tag).name == "" {
if !frv.IsNil() {
addFields(t.Elem(), frv.Elem(), f.Index)
}
continue
}
// Fall through to the normal field encoding logic below
// for non-struct anonymous fields.
}
}
if typeIsHash(tomlTypeOfGo(frv)) {
fieldsSub = append(fieldsSub, append(start, f.Index...))
} else {
fieldsDirect = append(fieldsDirect, append(start, f.Index...))
}
}
}
addFields(rt, rv, nil)
var writeFields = func(fields [][]int) {
for _, fieldIndex := range fields {
sft := rt.FieldByIndex(fieldIndex)
sf := rv.FieldByIndex(fieldIndex)
if isNil(sf) {
// Don't write anything for nil fields.
continue
}
opts := getOptions(sft.Tag)
if opts.skip {
continue
}
keyName := sft.Name
if opts.name != "" {
keyName = opts.name
}
if opts.omitempty && isEmpty(sf) {
continue
}
if opts.omitzero && isZero(sf) {
continue
}
enc.encode(key.add(keyName), sf)
}
}
writeFields(fieldsDirect)
writeFields(fieldsSub)
}
// tomlTypeName returns the TOML type name of the Go value's type. It is
// used to determine whether the types of array elements are mixed (which is
// forbidden). If the Go value is nil, then it is illegal for it to be an array
// element, and valueIsNil is returned as true.
// Returns the TOML type of a Go value. The type may be `nil`, which means
// no concrete TOML type could be found.
func tomlTypeOfGo(rv reflect.Value) tomlType {
if isNil(rv) || !rv.IsValid() {
return nil
}
switch rv.Kind() {
case reflect.Bool:
return tomlBool
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32,
reflect.Int64,
reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32,
reflect.Uint64:
return tomlInteger
case reflect.Float32, reflect.Float64:
return tomlFloat
case reflect.Array, reflect.Slice:
if typeEqual(tomlHash, tomlArrayType(rv)) {
return tomlArrayHash
}
return tomlArray
case reflect.Ptr, reflect.Interface:
return tomlTypeOfGo(rv.Elem())
case reflect.String:
return tomlString
case reflect.Map:
return tomlHash
case reflect.Struct:
switch rv.Interface().(type) {
case time.Time:
return tomlDatetime
case TextMarshaler:
return tomlString
default:
return tomlHash
}
default:
panic("unexpected reflect.Kind: " + rv.Kind().String())
}
}
// tomlArrayType returns the element type of a TOML array. The type returned
// may be nil if it cannot be determined (e.g., a nil slice or a zero length
// slize). This function may also panic if it finds a type that cannot be
// expressed in TOML (such as nil elements, heterogeneous arrays or directly
// nested arrays of tables).
func tomlArrayType(rv reflect.Value) tomlType {
if isNil(rv) || !rv.IsValid() || rv.Len() == 0 {
return nil
}
firstType := tomlTypeOfGo(rv.Index(0))
if firstType == nil {
encPanic(errArrayNilElement)
}
rvlen := rv.Len()
for i := 1; i < rvlen; i++ {
elem := rv.Index(i)
switch elemType := tomlTypeOfGo(elem); {
case elemType == nil:
encPanic(errArrayNilElement)
case !typeEqual(firstType, elemType):
encPanic(errArrayMixedElementTypes)
}
}
// If we have a nested array, then we must make sure that the nested
// array contains ONLY primitives.
// This checks arbitrarily nested arrays.
if typeEqual(firstType, tomlArray) || typeEqual(firstType, tomlArrayHash) {
nest := tomlArrayType(eindirect(rv.Index(0)))
if typeEqual(nest, tomlHash) || typeEqual(nest, tomlArrayHash) {
encPanic(errArrayNoTable)
}
}
return firstType
}
type tagOptions struct {
skip bool // "-"
name string
omitempty bool
omitzero bool
}
func getOptions(tag reflect.StructTag) tagOptions {
t := tag.Get("toml")
if t == "-" {
return tagOptions{skip: true}
}
var opts tagOptions
parts := strings.Split(t, ",")
opts.name = parts[0]
for _, s := range parts[1:] {
switch s {
case "omitempty":
opts.omitempty = true
case "omitzero":
opts.omitzero = true
}
}
return opts
}
func isZero(rv reflect.Value) bool {
switch rv.Kind() {
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
return rv.Int() == 0
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
return rv.Uint() == 0
case reflect.Float32, reflect.Float64:
return rv.Float() == 0.0
}
return false
}
func isEmpty(rv reflect.Value) bool {
switch rv.Kind() {
case reflect.Array, reflect.Slice, reflect.Map, reflect.String:
return rv.Len() == 0
case reflect.Bool:
return !rv.Bool()
}
return false
}
func (enc *Encoder) newline() {
if enc.hasWritten {
enc.wf("\n")
}
}
func (enc *Encoder) keyEqElement(key Key, val reflect.Value) {
if len(key) == 0 {
encPanic(errNoKey)
}
panicIfInvalidKey(key)
enc.wf("%s%s = ", enc.indentStr(key), key.maybeQuoted(len(key)-1))
enc.eElement(val)
enc.newline()
}
func (enc *Encoder) wf(format string, v ...interface{}) {
if _, err := fmt.Fprintf(enc.w, format, v...); err != nil {
encPanic(err)
}
enc.hasWritten = true
}
func (enc *Encoder) indentStr(key Key) string {
return strings.Repeat(enc.Indent, len(key)-1)
}
func encPanic(err error) {
panic(tomlEncodeError{err})
}
func eindirect(v reflect.Value) reflect.Value {
switch v.Kind() {
case reflect.Ptr, reflect.Interface:
return eindirect(v.Elem())
default:
return v
}
}
func isNil(rv reflect.Value) bool {
switch rv.Kind() {
case reflect.Interface, reflect.Map, reflect.Ptr, reflect.Slice:
return rv.IsNil()
default:
return false
}
}
func panicIfInvalidKey(key Key) {
for _, k := range key {
if len(k) == 0 {
encPanic(e("Key '%s' is not a valid table name. Key names "+
"cannot be empty.", key.maybeQuotedAll()))
}
}
}
func isValidKeyName(s string) bool {
return len(s) != 0
}

19
vendor/github.com/BurntSushi/toml/encoding_types.go generated vendored Normal file
View file

@ -0,0 +1,19 @@
// +build go1.2
package toml
// In order to support Go 1.1, we define our own TextMarshaler and
// TextUnmarshaler types. For Go 1.2+, we just alias them with the
// standard library interfaces.
import (
"encoding"
)
// TextMarshaler is a synonym for encoding.TextMarshaler. It is defined here
// so that Go 1.1 can be supported.
type TextMarshaler encoding.TextMarshaler
// TextUnmarshaler is a synonym for encoding.TextUnmarshaler. It is defined
// here so that Go 1.1 can be supported.
type TextUnmarshaler encoding.TextUnmarshaler

View file

@ -0,0 +1,18 @@
// +build !go1.2
package toml
// These interfaces were introduced in Go 1.2, so we add them manually when
// compiling for Go 1.1.
// TextMarshaler is a synonym for encoding.TextMarshaler. It is defined here
// so that Go 1.1 can be supported.
type TextMarshaler interface {
MarshalText() (text []byte, err error)
}
// TextUnmarshaler is a synonym for encoding.TextUnmarshaler. It is defined
// here so that Go 1.1 can be supported.
type TextUnmarshaler interface {
UnmarshalText(text []byte) error
}

953
vendor/github.com/BurntSushi/toml/lex.go generated vendored Normal file
View file

@ -0,0 +1,953 @@
package toml
import (
"fmt"
"strings"
"unicode"
"unicode/utf8"
)
type itemType int
const (
itemError itemType = iota
itemNIL // used in the parser to indicate no type
itemEOF
itemText
itemString
itemRawString
itemMultilineString
itemRawMultilineString
itemBool
itemInteger
itemFloat
itemDatetime
itemArray // the start of an array
itemArrayEnd
itemTableStart
itemTableEnd
itemArrayTableStart
itemArrayTableEnd
itemKeyStart
itemCommentStart
itemInlineTableStart
itemInlineTableEnd
)
const (
eof = 0
comma = ','
tableStart = '['
tableEnd = ']'
arrayTableStart = '['
arrayTableEnd = ']'
tableSep = '.'
keySep = '='
arrayStart = '['
arrayEnd = ']'
commentStart = '#'
stringStart = '"'
stringEnd = '"'
rawStringStart = '\''
rawStringEnd = '\''
inlineTableStart = '{'
inlineTableEnd = '}'
)
type stateFn func(lx *lexer) stateFn
type lexer struct {
input string
start int
pos int
line int
state stateFn
items chan item
// Allow for backing up up to three runes.
// This is necessary because TOML contains 3-rune tokens (""" and ''').
prevWidths [3]int
nprev int // how many of prevWidths are in use
// If we emit an eof, we can still back up, but it is not OK to call
// next again.
atEOF bool
// A stack of state functions used to maintain context.
// The idea is to reuse parts of the state machine in various places.
// For example, values can appear at the top level or within arbitrarily
// nested arrays. The last state on the stack is used after a value has
// been lexed. Similarly for comments.
stack []stateFn
}
type item struct {
typ itemType
val string
line int
}
func (lx *lexer) nextItem() item {
for {
select {
case item := <-lx.items:
return item
default:
lx.state = lx.state(lx)
}
}
}
func lex(input string) *lexer {
lx := &lexer{
input: input,
state: lexTop,
line: 1,
items: make(chan item, 10),
stack: make([]stateFn, 0, 10),
}
return lx
}
func (lx *lexer) push(state stateFn) {
lx.stack = append(lx.stack, state)
}
func (lx *lexer) pop() stateFn {
if len(lx.stack) == 0 {
return lx.errorf("BUG in lexer: no states to pop")
}
last := lx.stack[len(lx.stack)-1]
lx.stack = lx.stack[0 : len(lx.stack)-1]
return last
}
func (lx *lexer) current() string {
return lx.input[lx.start:lx.pos]
}
func (lx *lexer) emit(typ itemType) {
lx.items <- item{typ, lx.current(), lx.line}
lx.start = lx.pos
}
func (lx *lexer) emitTrim(typ itemType) {
lx.items <- item{typ, strings.TrimSpace(lx.current()), lx.line}
lx.start = lx.pos
}
func (lx *lexer) next() (r rune) {
if lx.atEOF {
panic("next called after EOF")
}
if lx.pos >= len(lx.input) {
lx.atEOF = true
return eof
}
if lx.input[lx.pos] == '\n' {
lx.line++
}
lx.prevWidths[2] = lx.prevWidths[1]
lx.prevWidths[1] = lx.prevWidths[0]
if lx.nprev < 3 {
lx.nprev++
}
r, w := utf8.DecodeRuneInString(lx.input[lx.pos:])
lx.prevWidths[0] = w
lx.pos += w
return r
}
// ignore skips over the pending input before this point.
func (lx *lexer) ignore() {
lx.start = lx.pos
}
// backup steps back one rune. Can be called only twice between calls to next.
func (lx *lexer) backup() {
if lx.atEOF {
lx.atEOF = false
return
}
if lx.nprev < 1 {
panic("backed up too far")
}
w := lx.prevWidths[0]
lx.prevWidths[0] = lx.prevWidths[1]
lx.prevWidths[1] = lx.prevWidths[2]
lx.nprev--
lx.pos -= w
if lx.pos < len(lx.input) && lx.input[lx.pos] == '\n' {
lx.line--
}
}
// accept consumes the next rune if it's equal to `valid`.
func (lx *lexer) accept(valid rune) bool {
if lx.next() == valid {
return true
}
lx.backup()
return false
}
// peek returns but does not consume the next rune in the input.
func (lx *lexer) peek() rune {
r := lx.next()
lx.backup()
return r
}
// skip ignores all input that matches the given predicate.
func (lx *lexer) skip(pred func(rune) bool) {
for {
r := lx.next()
if pred(r) {
continue
}
lx.backup()
lx.ignore()
return
}
}
// errorf stops all lexing by emitting an error and returning `nil`.
// Note that any value that is a character is escaped if it's a special
// character (newlines, tabs, etc.).
func (lx *lexer) errorf(format string, values ...interface{}) stateFn {
lx.items <- item{
itemError,
fmt.Sprintf(format, values...),
lx.line,
}
return nil
}
// lexTop consumes elements at the top level of TOML data.
func lexTop(lx *lexer) stateFn {
r := lx.next()
if isWhitespace(r) || isNL(r) {
return lexSkip(lx, lexTop)
}
switch r {
case commentStart:
lx.push(lexTop)
return lexCommentStart
case tableStart:
return lexTableStart
case eof:
if lx.pos > lx.start {
return lx.errorf("unexpected EOF")
}
lx.emit(itemEOF)
return nil
}
// At this point, the only valid item can be a key, so we back up
// and let the key lexer do the rest.
lx.backup()
lx.push(lexTopEnd)
return lexKeyStart
}
// lexTopEnd is entered whenever a top-level item has been consumed. (A value
// or a table.) It must see only whitespace, and will turn back to lexTop
// upon a newline. If it sees EOF, it will quit the lexer successfully.
func lexTopEnd(lx *lexer) stateFn {
r := lx.next()
switch {
case r == commentStart:
// a comment will read to a newline for us.
lx.push(lexTop)
return lexCommentStart
case isWhitespace(r):
return lexTopEnd
case isNL(r):
lx.ignore()
return lexTop
case r == eof:
lx.emit(itemEOF)
return nil
}
return lx.errorf("expected a top-level item to end with a newline, "+
"comment, or EOF, but got %q instead", r)
}
// lexTable lexes the beginning of a table. Namely, it makes sure that
// it starts with a character other than '.' and ']'.
// It assumes that '[' has already been consumed.
// It also handles the case that this is an item in an array of tables.
// e.g., '[[name]]'.
func lexTableStart(lx *lexer) stateFn {
if lx.peek() == arrayTableStart {
lx.next()
lx.emit(itemArrayTableStart)
lx.push(lexArrayTableEnd)
} else {
lx.emit(itemTableStart)
lx.push(lexTableEnd)
}
return lexTableNameStart
}
func lexTableEnd(lx *lexer) stateFn {
lx.emit(itemTableEnd)
return lexTopEnd
}
func lexArrayTableEnd(lx *lexer) stateFn {
if r := lx.next(); r != arrayTableEnd {
return lx.errorf("expected end of table array name delimiter %q, "+
"but got %q instead", arrayTableEnd, r)
}
lx.emit(itemArrayTableEnd)
return lexTopEnd
}
func lexTableNameStart(lx *lexer) stateFn {
lx.skip(isWhitespace)
switch r := lx.peek(); {
case r == tableEnd || r == eof:
return lx.errorf("unexpected end of table name " +
"(table names cannot be empty)")
case r == tableSep:
return lx.errorf("unexpected table separator " +
"(table names cannot be empty)")
case r == stringStart || r == rawStringStart:
lx.ignore()
lx.push(lexTableNameEnd)
return lexValue // reuse string lexing
default:
return lexBareTableName
}
}
// lexBareTableName lexes the name of a table. It assumes that at least one
// valid character for the table has already been read.
func lexBareTableName(lx *lexer) stateFn {
r := lx.next()
if isBareKeyChar(r) {
return lexBareTableName
}
lx.backup()
lx.emit(itemText)
return lexTableNameEnd
}
// lexTableNameEnd reads the end of a piece of a table name, optionally
// consuming whitespace.
func lexTableNameEnd(lx *lexer) stateFn {
lx.skip(isWhitespace)
switch r := lx.next(); {
case isWhitespace(r):
return lexTableNameEnd
case r == tableSep:
lx.ignore()
return lexTableNameStart
case r == tableEnd:
return lx.pop()
default:
return lx.errorf("expected '.' or ']' to end table name, "+
"but got %q instead", r)
}
}
// lexKeyStart consumes a key name up until the first non-whitespace character.
// lexKeyStart will ignore whitespace.
func lexKeyStart(lx *lexer) stateFn {
r := lx.peek()
switch {
case r == keySep:
return lx.errorf("unexpected key separator %q", keySep)
case isWhitespace(r) || isNL(r):
lx.next()
return lexSkip(lx, lexKeyStart)
case r == stringStart || r == rawStringStart:
lx.ignore()
lx.emit(itemKeyStart)
lx.push(lexKeyEnd)
return lexValue // reuse string lexing
default:
lx.ignore()
lx.emit(itemKeyStart)
return lexBareKey
}
}
// lexBareKey consumes the text of a bare key. Assumes that the first character
// (which is not whitespace) has not yet been consumed.
func lexBareKey(lx *lexer) stateFn {
switch r := lx.next(); {
case isBareKeyChar(r):
return lexBareKey
case isWhitespace(r):
lx.backup()
lx.emit(itemText)
return lexKeyEnd
case r == keySep:
lx.backup()
lx.emit(itemText)
return lexKeyEnd
default:
return lx.errorf("bare keys cannot contain %q", r)
}
}
// lexKeyEnd consumes the end of a key and trims whitespace (up to the key
// separator).
func lexKeyEnd(lx *lexer) stateFn {
switch r := lx.next(); {
case r == keySep:
return lexSkip(lx, lexValue)
case isWhitespace(r):
return lexSkip(lx, lexKeyEnd)
default:
return lx.errorf("expected key separator %q, but got %q instead",
keySep, r)
}
}
// lexValue starts the consumption of a value anywhere a value is expected.
// lexValue will ignore whitespace.
// After a value is lexed, the last state on the next is popped and returned.
func lexValue(lx *lexer) stateFn {
// We allow whitespace to precede a value, but NOT newlines.
// In array syntax, the array states are responsible for ignoring newlines.
r := lx.next()
switch {
case isWhitespace(r):
return lexSkip(lx, lexValue)
case isDigit(r):
lx.backup() // avoid an extra state and use the same as above
return lexNumberOrDateStart
}
switch r {
case arrayStart:
lx.ignore()
lx.emit(itemArray)
return lexArrayValue
case inlineTableStart:
lx.ignore()
lx.emit(itemInlineTableStart)
return lexInlineTableValue
case stringStart:
if lx.accept(stringStart) {
if lx.accept(stringStart) {
lx.ignore() // Ignore """
return lexMultilineString
}
lx.backup()
}
lx.ignore() // ignore the '"'
return lexString
case rawStringStart:
if lx.accept(rawStringStart) {
if lx.accept(rawStringStart) {
lx.ignore() // Ignore """
return lexMultilineRawString
}
lx.backup()
}
lx.ignore() // ignore the "'"
return lexRawString
case '+', '-':
return lexNumberStart
case '.': // special error case, be kind to users
return lx.errorf("floats must start with a digit, not '.'")
}
if unicode.IsLetter(r) {
// Be permissive here; lexBool will give a nice error if the
// user wrote something like
// x = foo
// (i.e. not 'true' or 'false' but is something else word-like.)
lx.backup()
return lexBool
}
return lx.errorf("expected value but found %q instead", r)
}
// lexArrayValue consumes one value in an array. It assumes that '[' or ','
// have already been consumed. All whitespace and newlines are ignored.
func lexArrayValue(lx *lexer) stateFn {
r := lx.next()
switch {
case isWhitespace(r) || isNL(r):
return lexSkip(lx, lexArrayValue)
case r == commentStart:
lx.push(lexArrayValue)
return lexCommentStart
case r == comma:
return lx.errorf("unexpected comma")
case r == arrayEnd:
// NOTE(caleb): The spec isn't clear about whether you can have
// a trailing comma or not, so we'll allow it.
return lexArrayEnd
}
lx.backup()
lx.push(lexArrayValueEnd)
return lexValue
}
// lexArrayValueEnd consumes everything between the end of an array value and
// the next value (or the end of the array): it ignores whitespace and newlines
// and expects either a ',' or a ']'.
func lexArrayValueEnd(lx *lexer) stateFn {
r := lx.next()
switch {
case isWhitespace(r) || isNL(r):
return lexSkip(lx, lexArrayValueEnd)
case r == commentStart:
lx.push(lexArrayValueEnd)
return lexCommentStart
case r == comma:
lx.ignore()
return lexArrayValue // move on to the next value
case r == arrayEnd:
return lexArrayEnd
}
return lx.errorf(
"expected a comma or array terminator %q, but got %q instead",
arrayEnd, r,
)
}
// lexArrayEnd finishes the lexing of an array.
// It assumes that a ']' has just been consumed.
func lexArrayEnd(lx *lexer) stateFn {
lx.ignore()
lx.emit(itemArrayEnd)
return lx.pop()
}
// lexInlineTableValue consumes one key/value pair in an inline table.
// It assumes that '{' or ',' have already been consumed. Whitespace is ignored.
func lexInlineTableValue(lx *lexer) stateFn {
r := lx.next()
switch {
case isWhitespace(r):
return lexSkip(lx, lexInlineTableValue)
case isNL(r):
return lx.errorf("newlines not allowed within inline tables")
case r == commentStart:
lx.push(lexInlineTableValue)
return lexCommentStart
case r == comma:
return lx.errorf("unexpected comma")
case r == inlineTableEnd:
return lexInlineTableEnd
}
lx.backup()
lx.push(lexInlineTableValueEnd)
return lexKeyStart
}
// lexInlineTableValueEnd consumes everything between the end of an inline table
// key/value pair and the next pair (or the end of the table):
// it ignores whitespace and expects either a ',' or a '}'.
func lexInlineTableValueEnd(lx *lexer) stateFn {
r := lx.next()
switch {
case isWhitespace(r):
return lexSkip(lx, lexInlineTableValueEnd)
case isNL(r):
return lx.errorf("newlines not allowed within inline tables")
case r == commentStart:
lx.push(lexInlineTableValueEnd)
return lexCommentStart
case r == comma:
lx.ignore()
return lexInlineTableValue
case r == inlineTableEnd:
return lexInlineTableEnd
}
return lx.errorf("expected a comma or an inline table terminator %q, "+
"but got %q instead", inlineTableEnd, r)
}
// lexInlineTableEnd finishes the lexing of an inline table.
// It assumes that a '}' has just been consumed.
func lexInlineTableEnd(lx *lexer) stateFn {
lx.ignore()
lx.emit(itemInlineTableEnd)
return lx.pop()
}
// lexString consumes the inner contents of a string. It assumes that the
// beginning '"' has already been consumed and ignored.
func lexString(lx *lexer) stateFn {
r := lx.next()
switch {
case r == eof:
return lx.errorf("unexpected EOF")
case isNL(r):
return lx.errorf("strings cannot contain newlines")
case r == '\\':
lx.push(lexString)
return lexStringEscape
case r == stringEnd:
lx.backup()
lx.emit(itemString)
lx.next()
lx.ignore()
return lx.pop()
}
return lexString
}
// lexMultilineString consumes the inner contents of a string. It assumes that
// the beginning '"""' has already been consumed and ignored.
func lexMultilineString(lx *lexer) stateFn {
switch lx.next() {
case eof:
return lx.errorf("unexpected EOF")
case '\\':
return lexMultilineStringEscape
case stringEnd:
if lx.accept(stringEnd) {
if lx.accept(stringEnd) {
lx.backup()
lx.backup()
lx.backup()
lx.emit(itemMultilineString)
lx.next()
lx.next()
lx.next()
lx.ignore()
return lx.pop()
}
lx.backup()
}
}
return lexMultilineString
}
// lexRawString consumes a raw string. Nothing can be escaped in such a string.
// It assumes that the beginning "'" has already been consumed and ignored.
func lexRawString(lx *lexer) stateFn {
r := lx.next()
switch {
case r == eof:
return lx.errorf("unexpected EOF")
case isNL(r):
return lx.errorf("strings cannot contain newlines")
case r == rawStringEnd:
lx.backup()
lx.emit(itemRawString)
lx.next()
lx.ignore()
return lx.pop()
}
return lexRawString
}
// lexMultilineRawString consumes a raw string. Nothing can be escaped in such
// a string. It assumes that the beginning "'''" has already been consumed and
// ignored.
func lexMultilineRawString(lx *lexer) stateFn {
switch lx.next() {
case eof:
return lx.errorf("unexpected EOF")
case rawStringEnd:
if lx.accept(rawStringEnd) {
if lx.accept(rawStringEnd) {
lx.backup()
lx.backup()
lx.backup()
lx.emit(itemRawMultilineString)
lx.next()
lx.next()
lx.next()
lx.ignore()
return lx.pop()
}
lx.backup()
}
}
return lexMultilineRawString
}
// lexMultilineStringEscape consumes an escaped character. It assumes that the
// preceding '\\' has already been consumed.
func lexMultilineStringEscape(lx *lexer) stateFn {
// Handle the special case first:
if isNL(lx.next()) {
return lexMultilineString
}
lx.backup()
lx.push(lexMultilineString)
return lexStringEscape(lx)
}
func lexStringEscape(lx *lexer) stateFn {
r := lx.next()
switch r {
case 'b':
fallthrough
case 't':
fallthrough
case 'n':
fallthrough
case 'f':
fallthrough
case 'r':
fallthrough
case '"':
fallthrough
case '\\':
return lx.pop()
case 'u':
return lexShortUnicodeEscape
case 'U':
return lexLongUnicodeEscape
}
return lx.errorf("invalid escape character %q; only the following "+
"escape characters are allowed: "+
`\b, \t, \n, \f, \r, \", \\, \uXXXX, and \UXXXXXXXX`, r)
}
func lexShortUnicodeEscape(lx *lexer) stateFn {
var r rune
for i := 0; i < 4; i++ {
r = lx.next()
if !isHexadecimal(r) {
return lx.errorf(`expected four hexadecimal digits after '\u', `+
"but got %q instead", lx.current())
}
}
return lx.pop()
}
func lexLongUnicodeEscape(lx *lexer) stateFn {
var r rune
for i := 0; i < 8; i++ {
r = lx.next()
if !isHexadecimal(r) {
return lx.errorf(`expected eight hexadecimal digits after '\U', `+
"but got %q instead", lx.current())
}
}
return lx.pop()
}
// lexNumberOrDateStart consumes either an integer, a float, or datetime.
func lexNumberOrDateStart(lx *lexer) stateFn {
r := lx.next()
if isDigit(r) {
return lexNumberOrDate
}
switch r {
case '_':
return lexNumber
case 'e', 'E':
return lexFloat
case '.':
return lx.errorf("floats must start with a digit, not '.'")
}
return lx.errorf("expected a digit but got %q", r)
}
// lexNumberOrDate consumes either an integer, float or datetime.
func lexNumberOrDate(lx *lexer) stateFn {
r := lx.next()
if isDigit(r) {
return lexNumberOrDate
}
switch r {
case '-':
return lexDatetime
case '_':
return lexNumber
case '.', 'e', 'E':
return lexFloat
}
lx.backup()
lx.emit(itemInteger)
return lx.pop()
}
// lexDatetime consumes a Datetime, to a first approximation.
// The parser validates that it matches one of the accepted formats.
func lexDatetime(lx *lexer) stateFn {
r := lx.next()
if isDigit(r) {
return lexDatetime
}
switch r {
case '-', 'T', ':', '.', 'Z', '+':
return lexDatetime
}
lx.backup()
lx.emit(itemDatetime)
return lx.pop()
}
// lexNumberStart consumes either an integer or a float. It assumes that a sign
// has already been read, but that *no* digits have been consumed.
// lexNumberStart will move to the appropriate integer or float states.
func lexNumberStart(lx *lexer) stateFn {
// We MUST see a digit. Even floats have to start with a digit.
r := lx.next()
if !isDigit(r) {
if r == '.' {
return lx.errorf("floats must start with a digit, not '.'")
}
return lx.errorf("expected a digit but got %q", r)
}
return lexNumber
}
// lexNumber consumes an integer or a float after seeing the first digit.
func lexNumber(lx *lexer) stateFn {
r := lx.next()
if isDigit(r) {
return lexNumber
}
switch r {
case '_':
return lexNumber
case '.', 'e', 'E':
return lexFloat
}
lx.backup()
lx.emit(itemInteger)
return lx.pop()
}
// lexFloat consumes the elements of a float. It allows any sequence of
// float-like characters, so floats emitted by the lexer are only a first
// approximation and must be validated by the parser.
func lexFloat(lx *lexer) stateFn {
r := lx.next()
if isDigit(r) {
return lexFloat
}
switch r {
case '_', '.', '-', '+', 'e', 'E':
return lexFloat
}
lx.backup()
lx.emit(itemFloat)
return lx.pop()
}
// lexBool consumes a bool string: 'true' or 'false.
func lexBool(lx *lexer) stateFn {
var rs []rune
for {
r := lx.next()
if !unicode.IsLetter(r) {
lx.backup()
break
}
rs = append(rs, r)
}
s := string(rs)
switch s {
case "true", "false":
lx.emit(itemBool)
return lx.pop()
}
return lx.errorf("expected value but found %q instead", s)
}
// lexCommentStart begins the lexing of a comment. It will emit
// itemCommentStart and consume no characters, passing control to lexComment.
func lexCommentStart(lx *lexer) stateFn {
lx.ignore()
lx.emit(itemCommentStart)
return lexComment
}
// lexComment lexes an entire comment. It assumes that '#' has been consumed.
// It will consume *up to* the first newline character, and pass control
// back to the last state on the stack.
func lexComment(lx *lexer) stateFn {
r := lx.peek()
if isNL(r) || r == eof {
lx.emit(itemText)
return lx.pop()
}
lx.next()
return lexComment
}
// lexSkip ignores all slurped input and moves on to the next state.
func lexSkip(lx *lexer, nextState stateFn) stateFn {
return func(lx *lexer) stateFn {
lx.ignore()
return nextState
}
}
// isWhitespace returns true if `r` is a whitespace character according
// to the spec.
func isWhitespace(r rune) bool {
return r == '\t' || r == ' '
}
func isNL(r rune) bool {
return r == '\n' || r == '\r'
}
func isDigit(r rune) bool {
return r >= '0' && r <= '9'
}
func isHexadecimal(r rune) bool {
return (r >= '0' && r <= '9') ||
(r >= 'a' && r <= 'f') ||
(r >= 'A' && r <= 'F')
}
func isBareKeyChar(r rune) bool {
return (r >= 'A' && r <= 'Z') ||
(r >= 'a' && r <= 'z') ||
(r >= '0' && r <= '9') ||
r == '_' ||
r == '-'
}
func (itype itemType) String() string {
switch itype {
case itemError:
return "Error"
case itemNIL:
return "NIL"
case itemEOF:
return "EOF"
case itemText:
return "Text"
case itemString, itemRawString, itemMultilineString, itemRawMultilineString:
return "String"
case itemBool:
return "Bool"
case itemInteger:
return "Integer"
case itemFloat:
return "Float"
case itemDatetime:
return "DateTime"
case itemTableStart:
return "TableStart"
case itemTableEnd:
return "TableEnd"
case itemKeyStart:
return "KeyStart"
case itemArray:
return "Array"
case itemArrayEnd:
return "ArrayEnd"
case itemCommentStart:
return "CommentStart"
}
panic(fmt.Sprintf("BUG: Unknown type '%d'.", int(itype)))
}
func (item item) String() string {
return fmt.Sprintf("(%s, %s)", item.typ.String(), item.val)
}

592
vendor/github.com/BurntSushi/toml/parse.go generated vendored Normal file
View file

@ -0,0 +1,592 @@
package toml
import (
"fmt"
"strconv"
"strings"
"time"
"unicode"
"unicode/utf8"
)
type parser struct {
mapping map[string]interface{}
types map[string]tomlType
lx *lexer
// A list of keys in the order that they appear in the TOML data.
ordered []Key
// the full key for the current hash in scope
context Key
// the base key name for everything except hashes
currentKey string
// rough approximation of line number
approxLine int
// A map of 'key.group.names' to whether they were created implicitly.
implicits map[string]bool
}
type parseError string
func (pe parseError) Error() string {
return string(pe)
}
func parse(data string) (p *parser, err error) {
defer func() {
if r := recover(); r != nil {
var ok bool
if err, ok = r.(parseError); ok {
return
}
panic(r)
}
}()
p = &parser{
mapping: make(map[string]interface{}),
types: make(map[string]tomlType),
lx: lex(data),
ordered: make([]Key, 0),
implicits: make(map[string]bool),
}
for {
item := p.next()
if item.typ == itemEOF {
break
}
p.topLevel(item)
}
return p, nil
}
func (p *parser) panicf(format string, v ...interface{}) {
msg := fmt.Sprintf("Near line %d (last key parsed '%s'): %s",
p.approxLine, p.current(), fmt.Sprintf(format, v...))
panic(parseError(msg))
}
func (p *parser) next() item {
it := p.lx.nextItem()
if it.typ == itemError {
p.panicf("%s", it.val)
}
return it
}
func (p *parser) bug(format string, v ...interface{}) {
panic(fmt.Sprintf("BUG: "+format+"\n\n", v...))
}
func (p *parser) expect(typ itemType) item {
it := p.next()
p.assertEqual(typ, it.typ)
return it
}
func (p *parser) assertEqual(expected, got itemType) {
if expected != got {
p.bug("Expected '%s' but got '%s'.", expected, got)
}
}
func (p *parser) topLevel(item item) {
switch item.typ {
case itemCommentStart:
p.approxLine = item.line
p.expect(itemText)
case itemTableStart:
kg := p.next()
p.approxLine = kg.line
var key Key
for ; kg.typ != itemTableEnd && kg.typ != itemEOF; kg = p.next() {
key = append(key, p.keyString(kg))
}
p.assertEqual(itemTableEnd, kg.typ)
p.establishContext(key, false)
p.setType("", tomlHash)
p.ordered = append(p.ordered, key)
case itemArrayTableStart:
kg := p.next()
p.approxLine = kg.line
var key Key
for ; kg.typ != itemArrayTableEnd && kg.typ != itemEOF; kg = p.next() {
key = append(key, p.keyString(kg))
}
p.assertEqual(itemArrayTableEnd, kg.typ)
p.establishContext(key, true)
p.setType("", tomlArrayHash)
p.ordered = append(p.ordered, key)
case itemKeyStart:
kname := p.next()
p.approxLine = kname.line
p.currentKey = p.keyString(kname)
val, typ := p.value(p.next())
p.setValue(p.currentKey, val)
p.setType(p.currentKey, typ)
p.ordered = append(p.ordered, p.context.add(p.currentKey))
p.currentKey = ""
default:
p.bug("Unexpected type at top level: %s", item.typ)
}
}
// Gets a string for a key (or part of a key in a table name).
func (p *parser) keyString(it item) string {
switch it.typ {
case itemText:
return it.val
case itemString, itemMultilineString,
itemRawString, itemRawMultilineString:
s, _ := p.value(it)
return s.(string)
default:
p.bug("Unexpected key type: %s", it.typ)
panic("unreachable")
}
}
// value translates an expected value from the lexer into a Go value wrapped
// as an empty interface.
func (p *parser) value(it item) (interface{}, tomlType) {
switch it.typ {
case itemString:
return p.replaceEscapes(it.val), p.typeOfPrimitive(it)
case itemMultilineString:
trimmed := stripFirstNewline(stripEscapedWhitespace(it.val))
return p.replaceEscapes(trimmed), p.typeOfPrimitive(it)
case itemRawString:
return it.val, p.typeOfPrimitive(it)
case itemRawMultilineString:
return stripFirstNewline(it.val), p.typeOfPrimitive(it)
case itemBool:
switch it.val {
case "true":
return true, p.typeOfPrimitive(it)
case "false":
return false, p.typeOfPrimitive(it)
}
p.bug("Expected boolean value, but got '%s'.", it.val)
case itemInteger:
if !numUnderscoresOK(it.val) {
p.panicf("Invalid integer %q: underscores must be surrounded by digits",
it.val)
}
val := strings.Replace(it.val, "_", "", -1)
num, err := strconv.ParseInt(val, 10, 64)
if err != nil {
// Distinguish integer values. Normally, it'd be a bug if the lexer
// provides an invalid integer, but it's possible that the number is
// out of range of valid values (which the lexer cannot determine).
// So mark the former as a bug but the latter as a legitimate user
// error.
if e, ok := err.(*strconv.NumError); ok &&
e.Err == strconv.ErrRange {
p.panicf("Integer '%s' is out of the range of 64-bit "+
"signed integers.", it.val)
} else {
p.bug("Expected integer value, but got '%s'.", it.val)
}
}
return num, p.typeOfPrimitive(it)
case itemFloat:
parts := strings.FieldsFunc(it.val, func(r rune) bool {
switch r {
case '.', 'e', 'E':
return true
}
return false
})
for _, part := range parts {
if !numUnderscoresOK(part) {
p.panicf("Invalid float %q: underscores must be "+
"surrounded by digits", it.val)
}
}
if !numPeriodsOK(it.val) {
// As a special case, numbers like '123.' or '1.e2',
// which are valid as far as Go/strconv are concerned,
// must be rejected because TOML says that a fractional
// part consists of '.' followed by 1+ digits.
p.panicf("Invalid float %q: '.' must be followed "+
"by one or more digits", it.val)
}
val := strings.Replace(it.val, "_", "", -1)
num, err := strconv.ParseFloat(val, 64)
if err != nil {
if e, ok := err.(*strconv.NumError); ok &&
e.Err == strconv.ErrRange {
p.panicf("Float '%s' is out of the range of 64-bit "+
"IEEE-754 floating-point numbers.", it.val)
} else {
p.panicf("Invalid float value: %q", it.val)
}
}
return num, p.typeOfPrimitive(it)
case itemDatetime:
var t time.Time
var ok bool
var err error
for _, format := range []string{
"2006-01-02T15:04:05Z07:00",
"2006-01-02T15:04:05",
"2006-01-02",
} {
t, err = time.ParseInLocation(format, it.val, time.Local)
if err == nil {
ok = true
break
}
}
if !ok {
p.panicf("Invalid TOML Datetime: %q.", it.val)
}
return t, p.typeOfPrimitive(it)
case itemArray:
array := make([]interface{}, 0)
types := make([]tomlType, 0)
for it = p.next(); it.typ != itemArrayEnd; it = p.next() {
if it.typ == itemCommentStart {
p.expect(itemText)
continue
}
val, typ := p.value(it)
array = append(array, val)
types = append(types, typ)
}
return array, p.typeOfArray(types)
case itemInlineTableStart:
var (
hash = make(map[string]interface{})
outerContext = p.context
outerKey = p.currentKey
)
p.context = append(p.context, p.currentKey)
p.currentKey = ""
for it := p.next(); it.typ != itemInlineTableEnd; it = p.next() {
if it.typ != itemKeyStart {
p.bug("Expected key start but instead found %q, around line %d",
it.val, p.approxLine)
}
if it.typ == itemCommentStart {
p.expect(itemText)
continue
}
// retrieve key
k := p.next()
p.approxLine = k.line
kname := p.keyString(k)
// retrieve value
p.currentKey = kname
val, typ := p.value(p.next())
// make sure we keep metadata up to date
p.setType(kname, typ)
p.ordered = append(p.ordered, p.context.add(p.currentKey))
hash[kname] = val
}
p.context = outerContext
p.currentKey = outerKey
return hash, tomlHash
}
p.bug("Unexpected value type: %s", it.typ)
panic("unreachable")
}
// numUnderscoresOK checks whether each underscore in s is surrounded by
// characters that are not underscores.
func numUnderscoresOK(s string) bool {
accept := false
for _, r := range s {
if r == '_' {
if !accept {
return false
}
accept = false
continue
}
accept = true
}
return accept
}
// numPeriodsOK checks whether every period in s is followed by a digit.
func numPeriodsOK(s string) bool {
period := false
for _, r := range s {
if period && !isDigit(r) {
return false
}
period = r == '.'
}
return !period
}
// establishContext sets the current context of the parser,
// where the context is either a hash or an array of hashes. Which one is
// set depends on the value of the `array` parameter.
//
// Establishing the context also makes sure that the key isn't a duplicate, and
// will create implicit hashes automatically.
func (p *parser) establishContext(key Key, array bool) {
var ok bool
// Always start at the top level and drill down for our context.
hashContext := p.mapping
keyContext := make(Key, 0)
// We only need implicit hashes for key[0:-1]
for _, k := range key[0 : len(key)-1] {
_, ok = hashContext[k]
keyContext = append(keyContext, k)
// No key? Make an implicit hash and move on.
if !ok {
p.addImplicit(keyContext)
hashContext[k] = make(map[string]interface{})
}
// If the hash context is actually an array of tables, then set
// the hash context to the last element in that array.
//
// Otherwise, it better be a table, since this MUST be a key group (by
// virtue of it not being the last element in a key).
switch t := hashContext[k].(type) {
case []map[string]interface{}:
hashContext = t[len(t)-1]
case map[string]interface{}:
hashContext = t
default:
p.panicf("Key '%s' was already created as a hash.", keyContext)
}
}
p.context = keyContext
if array {
// If this is the first element for this array, then allocate a new
// list of tables for it.
k := key[len(key)-1]
if _, ok := hashContext[k]; !ok {
hashContext[k] = make([]map[string]interface{}, 0, 5)
}
// Add a new table. But make sure the key hasn't already been used
// for something else.
if hash, ok := hashContext[k].([]map[string]interface{}); ok {
hashContext[k] = append(hash, make(map[string]interface{}))
} else {
p.panicf("Key '%s' was already created and cannot be used as "+
"an array.", keyContext)
}
} else {
p.setValue(key[len(key)-1], make(map[string]interface{}))
}
p.context = append(p.context, key[len(key)-1])
}
// setValue sets the given key to the given value in the current context.
// It will make sure that the key hasn't already been defined, account for
// implicit key groups.
func (p *parser) setValue(key string, value interface{}) {
var tmpHash interface{}
var ok bool
hash := p.mapping
keyContext := make(Key, 0)
for _, k := range p.context {
keyContext = append(keyContext, k)
if tmpHash, ok = hash[k]; !ok {
p.bug("Context for key '%s' has not been established.", keyContext)
}
switch t := tmpHash.(type) {
case []map[string]interface{}:
// The context is a table of hashes. Pick the most recent table
// defined as the current hash.
hash = t[len(t)-1]
case map[string]interface{}:
hash = t
default:
p.bug("Expected hash to have type 'map[string]interface{}', but "+
"it has '%T' instead.", tmpHash)
}
}
keyContext = append(keyContext, key)
if _, ok := hash[key]; ok {
// Typically, if the given key has already been set, then we have
// to raise an error since duplicate keys are disallowed. However,
// it's possible that a key was previously defined implicitly. In this
// case, it is allowed to be redefined concretely. (See the
// `tests/valid/implicit-and-explicit-after.toml` test in `toml-test`.)
//
// But we have to make sure to stop marking it as an implicit. (So that
// another redefinition provokes an error.)
//
// Note that since it has already been defined (as a hash), we don't
// want to overwrite it. So our business is done.
if p.isImplicit(keyContext) {
p.removeImplicit(keyContext)
return
}
// Otherwise, we have a concrete key trying to override a previous
// key, which is *always* wrong.
p.panicf("Key '%s' has already been defined.", keyContext)
}
hash[key] = value
}
// setType sets the type of a particular value at a given key.
// It should be called immediately AFTER setValue.
//
// Note that if `key` is empty, then the type given will be applied to the
// current context (which is either a table or an array of tables).
func (p *parser) setType(key string, typ tomlType) {
keyContext := make(Key, 0, len(p.context)+1)
for _, k := range p.context {
keyContext = append(keyContext, k)
}
if len(key) > 0 { // allow type setting for hashes
keyContext = append(keyContext, key)
}
p.types[keyContext.String()] = typ
}
// addImplicit sets the given Key as having been created implicitly.
func (p *parser) addImplicit(key Key) {
p.implicits[key.String()] = true
}
// removeImplicit stops tagging the given key as having been implicitly
// created.
func (p *parser) removeImplicit(key Key) {
p.implicits[key.String()] = false
}
// isImplicit returns true if the key group pointed to by the key was created
// implicitly.
func (p *parser) isImplicit(key Key) bool {
return p.implicits[key.String()]
}
// current returns the full key name of the current context.
func (p *parser) current() string {
if len(p.currentKey) == 0 {
return p.context.String()
}
if len(p.context) == 0 {
return p.currentKey
}
return fmt.Sprintf("%s.%s", p.context, p.currentKey)
}
func stripFirstNewline(s string) string {
if len(s) == 0 || s[0] != '\n' {
return s
}
return s[1:]
}
func stripEscapedWhitespace(s string) string {
esc := strings.Split(s, "\\\n")
if len(esc) > 1 {
for i := 1; i < len(esc); i++ {
esc[i] = strings.TrimLeftFunc(esc[i], unicode.IsSpace)
}
}
return strings.Join(esc, "")
}
func (p *parser) replaceEscapes(str string) string {
var replaced []rune
s := []byte(str)
r := 0
for r < len(s) {
if s[r] != '\\' {
c, size := utf8.DecodeRune(s[r:])
r += size
replaced = append(replaced, c)
continue
}
r += 1
if r >= len(s) {
p.bug("Escape sequence at end of string.")
return ""
}
switch s[r] {
default:
p.bug("Expected valid escape code after \\, but got %q.", s[r])
return ""
case 'b':
replaced = append(replaced, rune(0x0008))
r += 1
case 't':
replaced = append(replaced, rune(0x0009))
r += 1
case 'n':
replaced = append(replaced, rune(0x000A))
r += 1
case 'f':
replaced = append(replaced, rune(0x000C))
r += 1
case 'r':
replaced = append(replaced, rune(0x000D))
r += 1
case '"':
replaced = append(replaced, rune(0x0022))
r += 1
case '\\':
replaced = append(replaced, rune(0x005C))
r += 1
case 'u':
// At this point, we know we have a Unicode escape of the form
// `uXXXX` at [r, r+5). (Because the lexer guarantees this
// for us.)
escaped := p.asciiEscapeToUnicode(s[r+1 : r+5])
replaced = append(replaced, escaped)
r += 5
case 'U':
// At this point, we know we have a Unicode escape of the form
// `uXXXX` at [r, r+9). (Because the lexer guarantees this
// for us.)
escaped := p.asciiEscapeToUnicode(s[r+1 : r+9])
replaced = append(replaced, escaped)
r += 9
}
}
return string(replaced)
}
func (p *parser) asciiEscapeToUnicode(bs []byte) rune {
s := string(bs)
hex, err := strconv.ParseUint(strings.ToLower(s), 16, 32)
if err != nil {
p.bug("Could not parse '%s' as a hexadecimal number, but the "+
"lexer claims it's OK: %s", s, err)
}
if !utf8.ValidRune(rune(hex)) {
p.panicf("Escaped character '\\u%s' is not valid UTF-8.", s)
}
return rune(hex)
}
func isStringType(ty itemType) bool {
return ty == itemString || ty == itemMultilineString ||
ty == itemRawString || ty == itemRawMultilineString
}

1
vendor/github.com/BurntSushi/toml/session.vim generated vendored Normal file
View file

@ -0,0 +1 @@
au BufWritePost *.go silent!make tags > /dev/null 2>&1

91
vendor/github.com/BurntSushi/toml/type_check.go generated vendored Normal file
View file

@ -0,0 +1,91 @@
package toml
// tomlType represents any Go type that corresponds to a TOML type.
// While the first draft of the TOML spec has a simplistic type system that
// probably doesn't need this level of sophistication, we seem to be militating
// toward adding real composite types.
type tomlType interface {
typeString() string
}
// typeEqual accepts any two types and returns true if they are equal.
func typeEqual(t1, t2 tomlType) bool {
if t1 == nil || t2 == nil {
return false
}
return t1.typeString() == t2.typeString()
}
func typeIsHash(t tomlType) bool {
return typeEqual(t, tomlHash) || typeEqual(t, tomlArrayHash)
}
type tomlBaseType string
func (btype tomlBaseType) typeString() string {
return string(btype)
}
func (btype tomlBaseType) String() string {
return btype.typeString()
}
var (
tomlInteger tomlBaseType = "Integer"
tomlFloat tomlBaseType = "Float"
tomlDatetime tomlBaseType = "Datetime"
tomlString tomlBaseType = "String"
tomlBool tomlBaseType = "Bool"
tomlArray tomlBaseType = "Array"
tomlHash tomlBaseType = "Hash"
tomlArrayHash tomlBaseType = "ArrayHash"
)
// typeOfPrimitive returns a tomlType of any primitive value in TOML.
// Primitive values are: Integer, Float, Datetime, String and Bool.
//
// Passing a lexer item other than the following will cause a BUG message
// to occur: itemString, itemBool, itemInteger, itemFloat, itemDatetime.
func (p *parser) typeOfPrimitive(lexItem item) tomlType {
switch lexItem.typ {
case itemInteger:
return tomlInteger
case itemFloat:
return tomlFloat
case itemDatetime:
return tomlDatetime
case itemString:
return tomlString
case itemMultilineString:
return tomlString
case itemRawString:
return tomlString
case itemRawMultilineString:
return tomlString
case itemBool:
return tomlBool
}
p.bug("Cannot infer primitive type of lex item '%s'.", lexItem)
panic("unreachable")
}
// typeOfArray returns a tomlType for an array given a list of types of its
// values.
//
// In the current spec, if an array is homogeneous, then its type is always
// "Array". If the array is not homogeneous, an error is generated.
func (p *parser) typeOfArray(types []tomlType) tomlType {
// Empty arrays are cool.
if len(types) == 0 {
return tomlArray
}
theType := types[0]
for _, t := range types[1:] {
if !typeEqual(theType, t) {
p.panicf("Array contains values of type '%s' and '%s', but "+
"arrays must be homogeneous.", theType, t)
}
}
return tomlArray
}

242
vendor/github.com/BurntSushi/toml/type_fields.go generated vendored Normal file
View file

@ -0,0 +1,242 @@
package toml
// Struct field handling is adapted from code in encoding/json:
//
// Copyright 2010 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the Go distribution.
import (
"reflect"
"sort"
"sync"
)
// A field represents a single field found in a struct.
type field struct {
name string // the name of the field (`toml` tag included)
tag bool // whether field has a `toml` tag
index []int // represents the depth of an anonymous field
typ reflect.Type // the type of the field
}
// byName sorts field by name, breaking ties with depth,
// then breaking ties with "name came from toml tag", then
// breaking ties with index sequence.
type byName []field
func (x byName) Len() int { return len(x) }
func (x byName) Swap(i, j int) { x[i], x[j] = x[j], x[i] }
func (x byName) Less(i, j int) bool {
if x[i].name != x[j].name {
return x[i].name < x[j].name
}
if len(x[i].index) != len(x[j].index) {
return len(x[i].index) < len(x[j].index)
}
if x[i].tag != x[j].tag {
return x[i].tag
}
return byIndex(x).Less(i, j)
}
// byIndex sorts field by index sequence.
type byIndex []field
func (x byIndex) Len() int { return len(x) }
func (x byIndex) Swap(i, j int) { x[i], x[j] = x[j], x[i] }
func (x byIndex) Less(i, j int) bool {
for k, xik := range x[i].index {
if k >= len(x[j].index) {
return false
}
if xik != x[j].index[k] {
return xik < x[j].index[k]
}
}
return len(x[i].index) < len(x[j].index)
}
// typeFields returns a list of fields that TOML should recognize for the given
// type. The algorithm is breadth-first search over the set of structs to
// include - the top struct and then any reachable anonymous structs.
func typeFields(t reflect.Type) []field {
// Anonymous fields to explore at the current level and the next.
current := []field{}
next := []field{{typ: t}}
// Count of queued names for current level and the next.
count := map[reflect.Type]int{}
nextCount := map[reflect.Type]int{}
// Types already visited at an earlier level.
visited := map[reflect.Type]bool{}
// Fields found.
var fields []field
for len(next) > 0 {
current, next = next, current[:0]
count, nextCount = nextCount, map[reflect.Type]int{}
for _, f := range current {
if visited[f.typ] {
continue
}
visited[f.typ] = true
// Scan f.typ for fields to include.
for i := 0; i < f.typ.NumField(); i++ {
sf := f.typ.Field(i)
if sf.PkgPath != "" && !sf.Anonymous { // unexported
continue
}
opts := getOptions(sf.Tag)
if opts.skip {
continue
}
index := make([]int, len(f.index)+1)
copy(index, f.index)
index[len(f.index)] = i
ft := sf.Type
if ft.Name() == "" && ft.Kind() == reflect.Ptr {
// Follow pointer.
ft = ft.Elem()
}
// Record found field and index sequence.
if opts.name != "" || !sf.Anonymous || ft.Kind() != reflect.Struct {
tagged := opts.name != ""
name := opts.name
if name == "" {
name = sf.Name
}
fields = append(fields, field{name, tagged, index, ft})
if count[f.typ] > 1 {
// If there were multiple instances, add a second,
// so that the annihilation code will see a duplicate.
// It only cares about the distinction between 1 or 2,
// so don't bother generating any more copies.
fields = append(fields, fields[len(fields)-1])
}
continue
}
// Record new anonymous struct to explore in next round.
nextCount[ft]++
if nextCount[ft] == 1 {
f := field{name: ft.Name(), index: index, typ: ft}
next = append(next, f)
}
}
}
}
sort.Sort(byName(fields))
// Delete all fields that are hidden by the Go rules for embedded fields,
// except that fields with TOML tags are promoted.
// The fields are sorted in primary order of name, secondary order
// of field index length. Loop over names; for each name, delete
// hidden fields by choosing the one dominant field that survives.
out := fields[:0]
for advance, i := 0, 0; i < len(fields); i += advance {
// One iteration per name.
// Find the sequence of fields with the name of this first field.
fi := fields[i]
name := fi.name
for advance = 1; i+advance < len(fields); advance++ {
fj := fields[i+advance]
if fj.name != name {
break
}
}
if advance == 1 { // Only one field with this name
out = append(out, fi)
continue
}
dominant, ok := dominantField(fields[i : i+advance])
if ok {
out = append(out, dominant)
}
}
fields = out
sort.Sort(byIndex(fields))
return fields
}
// dominantField looks through the fields, all of which are known to
// have the same name, to find the single field that dominates the
// others using Go's embedding rules, modified by the presence of
// TOML tags. If there are multiple top-level fields, the boolean
// will be false: This condition is an error in Go and we skip all
// the fields.
func dominantField(fields []field) (field, bool) {
// The fields are sorted in increasing index-length order. The winner
// must therefore be one with the shortest index length. Drop all
// longer entries, which is easy: just truncate the slice.
length := len(fields[0].index)
tagged := -1 // Index of first tagged field.
for i, f := range fields {
if len(f.index) > length {
fields = fields[:i]
break
}
if f.tag {
if tagged >= 0 {
// Multiple tagged fields at the same level: conflict.
// Return no field.
return field{}, false
}
tagged = i
}
}
if tagged >= 0 {
return fields[tagged], true
}
// All remaining fields have the same length. If there's more than one,
// we have a conflict (two fields named "X" at the same level) and we
// return no field.
if len(fields) > 1 {
return field{}, false
}
return fields[0], true
}
var fieldCache struct {
sync.RWMutex
m map[reflect.Type][]field
}
// cachedTypeFields is like typeFields but uses a cache to avoid repeated work.
func cachedTypeFields(t reflect.Type) []field {
fieldCache.RLock()
f := fieldCache.m[t]
fieldCache.RUnlock()
if f != nil {
return f
}
// Compute fields without lock.
// Might duplicate effort but won't hold other computations back.
f = typeFields(t)
if f == nil {
f = []field{}
}
fieldCache.Lock()
if fieldCache.m == nil {
fieldCache.m = map[reflect.Type][]field{}
}
fieldCache.m[t] = f
fieldCache.Unlock()
return f
}

31
vendor/github.com/gomarkdown/markdown/LICENSE.txt generated vendored Normal file
View file

@ -0,0 +1,31 @@
Markdown is distributed under the Simplified BSD License:
Copyright © 2011 Russ Ross
Copyright © 2018 Krzysztof Kowalczyk
Copyright © 2018 Authors
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions
are met:
1. Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following
disclaimer in the documentation and/or other materials provided with
the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.

10
vendor/github.com/gomarkdown/markdown/ast/attribute.go generated vendored Normal file
View file

@ -0,0 +1,10 @@
package ast
// An attribute can be attached to block elements. They are specified as
// {#id .classs key="value"} where quotes for values are mandatory, multiple
// key/value pairs are separated by whitespace.
type Attribute struct {
ID []byte
Classes [][]byte
Attrs map[string][]byte
}

4
vendor/github.com/gomarkdown/markdown/ast/doc.go generated vendored Normal file
View file

@ -0,0 +1,4 @@
/*
Package ast defines tree representation of a parsed markdown document.
*/
package ast

554
vendor/github.com/gomarkdown/markdown/ast/node.go generated vendored Normal file
View file

@ -0,0 +1,554 @@
package ast
// ListType contains bitwise or'ed flags for list and list item objects.
type ListType int
// These are the possible flag values for the ListItem renderer.
// Multiple flag values may be ORed together.
// These are mostly of interest if you are writing a new output format.
const (
ListTypeOrdered ListType = 1 << iota
ListTypeDefinition
ListTypeTerm
ListItemContainsBlock
ListItemBeginningOfList // TODO: figure out if this is of any use now
ListItemEndOfList
)
// CellAlignFlags holds a type of alignment in a table cell.
type CellAlignFlags int
// These are the possible flag values for the table cell renderer.
// Only a single one of these values will be used; they are not ORed together.
// These are mostly of interest if you are writing a new output format.
const (
TableAlignmentLeft CellAlignFlags = 1 << iota
TableAlignmentRight
TableAlignmentCenter = (TableAlignmentLeft | TableAlignmentRight)
)
func (a CellAlignFlags) String() string {
switch a {
case TableAlignmentLeft:
return "left"
case TableAlignmentRight:
return "right"
case TableAlignmentCenter:
return "center"
default:
return ""
}
}
// DocumentMatters holds the type of a {front,main,back}matter in the document
type DocumentMatters int
// These are all possible Document divisions.
const (
DocumentMatterNone DocumentMatters = iota
DocumentMatterFront
DocumentMatterMain
DocumentMatterBack
)
// CitationTypes holds the type of a citation, informative, normative or suppressed
type CitationTypes int
const (
CitationTypeNone CitationTypes = iota
CitationTypeSuppressed
CitationTypeInformative
CitationTypeNormative
)
// Node defines an ast node
type Node interface {
AsContainer() *Container
AsLeaf() *Leaf
GetParent() Node
SetParent(newParent Node)
GetChildren() []Node
SetChildren(newChildren []Node)
}
// Container is a type of node that can contain children
type Container struct {
Parent Node
Children []Node
Literal []byte // Text contents of the leaf nodes
Content []byte // Markdown content of the block nodes
*Attribute // Block level attribute
}
// AsContainer returns itself as *Container
func (c *Container) AsContainer() *Container {
return c
}
// AsLeaf returns nil
func (c *Container) AsLeaf() *Leaf {
return nil
}
// GetParent returns parent node
func (c *Container) GetParent() Node {
return c.Parent
}
// SetParent sets the parent node
func (c *Container) SetParent(newParent Node) {
c.Parent = newParent
}
// GetChildren returns children nodes
func (c *Container) GetChildren() []Node {
return c.Children
}
// SetChildren sets children node
func (c *Container) SetChildren(newChildren []Node) {
c.Children = newChildren
}
// Leaf is a type of node that cannot have children
type Leaf struct {
Parent Node
Literal []byte // Text contents of the leaf nodes
Content []byte // Markdown content of the block nodes
*Attribute // Block level attribute
}
// AsContainer returns nil
func (l *Leaf) AsContainer() *Container {
return nil
}
// AsLeaf returns itself as *Leaf
func (l *Leaf) AsLeaf() *Leaf {
return l
}
// GetParent returns parent node
func (l *Leaf) GetParent() Node {
return l.Parent
}
// SetParent sets the parent nodd
func (l *Leaf) SetParent(newParent Node) {
l.Parent = newParent
}
// GetChildren returns nil because Leaf cannot have children
func (l *Leaf) GetChildren() []Node {
return nil
}
// SetChildren will panic becuase Leaf cannot have children
func (l *Leaf) SetChildren(newChildren []Node) {
panic("leaf node cannot have children")
}
// Document represents markdown document node, a root of ast
type Document struct {
Container
}
// DocumentMatter represents markdown node that signals a document
// division: frontmatter, mainmatter or backmatter.
type DocumentMatter struct {
Container
Matter DocumentMatters
}
// BlockQuote represents markdown block quote node
type BlockQuote struct {
Container
}
// Aside represents an markdown aside node.
type Aside struct {
Container
}
// List represents markdown list node
type List struct {
Container
ListFlags ListType
Tight bool // Skip <p>s around list item data if true
BulletChar byte // '*', '+' or '-' in bullet lists
Delimiter byte // '.' or ')' after the number in ordered lists
Start int // for ordered lists this indicates the starting number if > 0
RefLink []byte // If not nil, turns this list item into a footnote item and triggers different rendering
IsFootnotesList bool // This is a list of footnotes
}
// ListItem represents markdown list item node
type ListItem struct {
Container
ListFlags ListType
Tight bool // Skip <p>s around list item data if true
BulletChar byte // '*', '+' or '-' in bullet lists
Delimiter byte // '.' or ')' after the number in ordered lists
RefLink []byte // If not nil, turns this list item into a footnote item and triggers different rendering
IsFootnotesList bool // This is a list of footnotes
}
// Paragraph represents markdown paragraph node
type Paragraph struct {
Container
}
// Math represents markdown MathAjax inline node
type Math struct {
Leaf
}
// MathBlock represents markdown MathAjax block node
type MathBlock struct {
Container
}
// Heading represents markdown heading node
type Heading struct {
Container
Level int // This holds the heading level number
HeadingID string // This might hold heading ID, if present
IsTitleblock bool // Specifies whether it's a title block
IsSpecial bool // We are a special heading (starts with .#)
}
// HorizontalRule represents markdown horizontal rule node
type HorizontalRule struct {
Leaf
}
// Emph represents markdown emphasis node
type Emph struct {
Container
}
// Strong represents markdown strong node
type Strong struct {
Container
}
// Del represents markdown del node
type Del struct {
Container
}
// Link represents markdown link node
type Link struct {
Container
Destination []byte // Destination is what goes into a href
Title []byte // Title is the tooltip thing that goes in a title attribute
NoteID int // NoteID contains a serial number of a footnote, zero if it's not a footnote
Footnote Node // If it's a footnote, this is a direct link to the footnote Node. Otherwise nil.
DeferredID []byte // If a deferred link this holds the original ID.
}
// CrossReference is a reference node.
type CrossReference struct {
Container
Destination []byte // Destination is where the reference points to
}
// Citation is a citation node.
type Citation struct {
Leaf
Destination [][]byte // Destination is where the citation points to. Multiple ones are allowed.
Type []CitationTypes // 1:1 mapping of destination and citation type
Suffix [][]byte // Potential citation suffix, i.e. [@!RFC1035, p. 144]
}
// Image represents markdown image node
type Image struct {
Container
Destination []byte // Destination is what goes into a href
Title []byte // Title is the tooltip thing that goes in a title attribute
}
// Text represents markdown text node
type Text struct {
Leaf
}
// HTMLBlock represents markdown html node
type HTMLBlock struct {
Leaf
}
// CodeBlock represents markdown code block node
type CodeBlock struct {
Leaf
IsFenced bool // Specifies whether it's a fenced code block or an indented one
Info []byte // This holds the info string
FenceChar byte
FenceLength int
FenceOffset int
}
// Softbreak represents markdown softbreak node
// Note: not used currently
type Softbreak struct {
Leaf
}
// Hardbreak represents markdown hard break node
type Hardbreak struct {
Leaf
}
// Code represents markdown code node
type Code struct {
Leaf
}
// HTMLSpan represents markdown html span node
type HTMLSpan struct {
Leaf
}
// Table represents markdown table node
type Table struct {
Container
}
// TableCell represents markdown table cell node
type TableCell struct {
Container
IsHeader bool // This tells if it's under the header row
Align CellAlignFlags // This holds the value for align attribute
}
// TableHeader represents markdown table head node
type TableHeader struct {
Container
}
// TableBody represents markdown table body node
type TableBody struct {
Container
}
// TableRow represents markdown table row node
type TableRow struct {
Container
}
// TableFooter represents markdown table foot node
type TableFooter struct {
Container
}
// Caption represents a figure, code or quote caption
type Caption struct {
Container
}
// CaptionFigure is a node (blockquote or codeblock) that has a caption
type CaptionFigure struct {
Container
HeadingID string // This might hold heading ID, if present
}
// Callout is a node that can exist both in text (where it is an actual node) and in a code block.
type Callout struct {
Leaf
ID []byte // number of this callout
}
// Index is a node that contains an Index item and an optional, subitem.
type Index struct {
Leaf
Primary bool
Item []byte
Subitem []byte
ID string // ID of the index
}
// Subscript is a subscript node
type Subscript struct {
Leaf
}
// Subscript is a superscript node
type Superscript struct {
Leaf
}
// Footnotes is a node that contains all footnotes
type Footnotes struct {
Container
}
func removeNodeFromArray(a []Node, node Node) []Node {
n := len(a)
for i := 0; i < n; i++ {
if a[i] == node {
return append(a[:i], a[i+1:]...)
}
}
return nil
}
// AppendChild appends child to children of parent
// It panics if either node is nil.
func AppendChild(parent Node, child Node) {
RemoveFromTree(child)
child.SetParent(parent)
newChildren := append(parent.GetChildren(), child)
parent.SetChildren(newChildren)
}
// RemoveFromTree removes this node from tree
func RemoveFromTree(n Node) {
if n.GetParent() == nil {
return
}
// important: don't clear n.Children if n has no parent
// we're called from AppendChild and that might happen on a node
// that accumulated Children but hasn't been inserted into the tree
n.SetChildren(nil)
p := n.GetParent()
newChildren := removeNodeFromArray(p.GetChildren(), n)
if newChildren != nil {
p.SetChildren(newChildren)
}
}
// GetLastChild returns last child of node n
// It's implemented as stand-alone function to keep Node interface small
func GetLastChild(n Node) Node {
a := n.GetChildren()
if len(a) > 0 {
return a[len(a)-1]
}
return nil
}
// GetFirstChild returns first child of node n
// It's implemented as stand-alone function to keep Node interface small
func GetFirstChild(n Node) Node {
a := n.GetChildren()
if len(a) > 0 {
return a[0]
}
return nil
}
// GetNextNode returns next sibling of node n (node after n)
// We can't make it part of Container or Leaf because we loose Node identity
func GetNextNode(n Node) Node {
parent := n.GetParent()
if parent == nil {
return nil
}
a := parent.GetChildren()
len := len(a) - 1
for i := 0; i < len; i++ {
if a[i] == n {
return a[i+1]
}
}
return nil
}
// GetPrevNode returns previous sibling of node n (node before n)
// We can't make it part of Container or Leaf because we loose Node identity
func GetPrevNode(n Node) Node {
parent := n.GetParent()
if parent == nil {
return nil
}
a := parent.GetChildren()
len := len(a)
for i := 1; i < len; i++ {
if a[i] == n {
return a[i-1]
}
}
return nil
}
// WalkStatus allows NodeVisitor to have some control over the tree traversal.
// It is returned from NodeVisitor and different values allow Node.Walk to
// decide which node to go to next.
type WalkStatus int
const (
// GoToNext is the default traversal of every node.
GoToNext WalkStatus = iota
// SkipChildren tells walker to skip all children of current node.
SkipChildren
// Terminate tells walker to terminate the traversal.
Terminate
)
// NodeVisitor is a callback to be called when traversing the syntax tree.
// Called twice for every node: once with entering=true when the branch is
// first visited, then with entering=false after all the children are done.
type NodeVisitor interface {
Visit(node Node, entering bool) WalkStatus
}
// NodeVisitorFunc casts a function to match NodeVisitor interface
type NodeVisitorFunc func(node Node, entering bool) WalkStatus
// Walk traverses tree recursively
func Walk(n Node, visitor NodeVisitor) WalkStatus {
isContainer := n.AsContainer() != nil
status := visitor.Visit(n, true) // entering
if status == Terminate {
// even if terminating, close container node
if isContainer {
visitor.Visit(n, false)
}
return status
}
if isContainer && status != SkipChildren {
children := n.GetChildren()
for _, n := range children {
status = Walk(n, visitor)
if status == Terminate {
return status
}
}
}
if isContainer {
status = visitor.Visit(n, false) // exiting
if status == Terminate {
return status
}
}
return GoToNext
}
// Visit calls visitor function
func (f NodeVisitorFunc) Visit(node Node, entering bool) WalkStatus {
return f(node, entering)
}
// WalkFunc is like Walk but accepts just a callback function
func WalkFunc(n Node, f NodeVisitorFunc) {
visitor := NodeVisitorFunc(f)
Walk(n, visitor)
}

165
vendor/github.com/gomarkdown/markdown/ast/print.go generated vendored Normal file
View file

@ -0,0 +1,165 @@
package ast
import (
"bytes"
"fmt"
"io"
"strings"
)
// Print is for debugging. It prints a string representation of parsed
// markdown doc (result of parser.Parse()) to dst.
//
// To make output readable, it shortens text output.
func Print(dst io.Writer, doc Node) {
PrintWithPrefix(dst, doc, " ")
}
// PrintWithPrefix is like Print but allows customizing prefix used for
// indentation. By default it's 2 spaces. You can change it to e.g. tab
// by passing "\t"
func PrintWithPrefix(w io.Writer, doc Node, prefix string) {
// for more compact output, don't print outer Document
if _, ok := doc.(*Document); ok {
for _, c := range doc.GetChildren() {
printRecur(w, c, prefix, 0)
}
} else {
printRecur(w, doc, prefix, 0)
}
}
// ToString is like Dump but returns result as a string
func ToString(doc Node) string {
var buf bytes.Buffer
Print(&buf, doc)
return buf.String()
}
func contentToString(d1 []byte, d2 []byte) string {
if d1 != nil {
return string(d1)
}
if d2 != nil {
return string(d2)
}
return ""
}
func getContent(node Node) string {
if c := node.AsContainer(); c != nil {
return contentToString(c.Literal, c.Content)
}
leaf := node.AsLeaf()
return contentToString(leaf.Literal, leaf.Content)
}
func shortenString(s string, maxLen int) string {
// for cleaner, one-line ouput, replace some white-space chars
// with their escaped version
s = strings.Replace(s, "\n", `\n`, -1)
s = strings.Replace(s, "\r", `\r`, -1)
s = strings.Replace(s, "\t", `\t`, -1)
if maxLen < 0 {
return s
}
if len(s) < maxLen {
return s
}
// add "..." to indicate truncation
return s[:maxLen-3] + "..."
}
// get a short name of the type of v which excludes package name
// and strips "()" from the end
func getNodeType(node Node) string {
s := fmt.Sprintf("%T", node)
s = strings.TrimSuffix(s, "()")
if idx := strings.Index(s, "."); idx != -1 {
return s[idx+1:]
}
return s
}
func printDefault(w io.Writer, indent string, typeName string, content string) {
content = strings.TrimSpace(content)
if len(content) > 0 {
fmt.Fprintf(w, "%s%s '%s'\n", indent, typeName, content)
} else {
fmt.Fprintf(w, "%s%s\n", indent, typeName)
}
}
func getListFlags(f ListType) string {
var s string
if f&ListTypeOrdered != 0 {
s += "ordered "
}
if f&ListTypeDefinition != 0 {
s += "definition "
}
if f&ListTypeTerm != 0 {
s += "term "
}
if f&ListItemContainsBlock != 0 {
s += "has_block "
}
if f&ListItemBeginningOfList != 0 {
s += "start "
}
if f&ListItemEndOfList != 0 {
s += "end "
}
s = strings.TrimSpace(s)
return s
}
func printRecur(w io.Writer, node Node, prefix string, depth int) {
if node == nil {
return
}
indent := strings.Repeat(prefix, depth)
content := shortenString(getContent(node), 40)
typeName := getNodeType(node)
switch v := node.(type) {
case *Link:
content := "url=" + string(v.Destination)
printDefault(w, indent, typeName, content)
case *Image:
content := "url=" + string(v.Destination)
printDefault(w, indent, typeName, content)
case *List:
if v.Start > 1 {
content += fmt.Sprintf("start=%d ", v.Start)
}
if v.Tight {
content += "tight "
}
if v.IsFootnotesList {
content += "footnotes "
}
flags := getListFlags(v.ListFlags)
if len(flags) > 0 {
content += "flags=" + flags + " "
}
printDefault(w, indent, typeName, content)
case *ListItem:
if v.Tight {
content += "tight "
}
if v.IsFootnotesList {
content += "footnotes "
}
flags := getListFlags(v.ListFlags)
if len(flags) > 0 {
content += "flags=" + flags + " "
}
printDefault(w, indent, typeName, content)
default:
printDefault(w, indent, typeName, content)
}
for _, child := range node.GetChildren() {
printRecur(w, child, prefix, depth+1)
}
}

42
vendor/github.com/gomarkdown/markdown/html/callouts.go generated vendored Normal file
View file

@ -0,0 +1,42 @@
package html
import (
"bytes"
"io"
"github.com/gomarkdown/markdown/ast"
"github.com/gomarkdown/markdown/parser"
)
// EscapeHTMLCallouts writes html-escaped d to w. It escapes &, <, > and " characters, *but*
// expands callouts <<N>> with the callout HTML, i.e. by calling r.callout() with a newly created
// ast.Callout node.
func (r *Renderer) EscapeHTMLCallouts(w io.Writer, d []byte) {
ld := len(d)
Parse:
for i := 0; i < ld; i++ {
for _, comment := range r.opts.Comments {
if !bytes.HasPrefix(d[i:], comment) {
break
}
lc := len(comment)
if i+lc < ld {
if id, consumed := parser.IsCallout(d[i+lc:]); consumed > 0 {
// We have seen a callout
callout := &ast.Callout{ID: id}
r.callout(w, callout)
i += consumed + lc - 1
continue Parse
}
}
}
escSeq := Escaper[d[i]]
if escSeq != nil {
w.Write(escSeq)
} else {
w.Write([]byte{d[i]})
}
}
}

43
vendor/github.com/gomarkdown/markdown/html/doc.go generated vendored Normal file
View file

@ -0,0 +1,43 @@
/*
Package html implements HTML renderer of parsed markdown document.
Configuring and customizing a renderer
A renderer can be configured with multiple options:
import "github.com/gomarkdown/markdown/html"
flags := html.CommonFlags | html.CompletePage | html.HrefTargetBlank
opts := html.RenderOptions{
TItle: "A custom title",
Flags: flags,
}
renderer := html.NewRenderer(opts)
You can also re-use most of the logic and customize rendering of selected nodes
by providing node render hook.
This is most useful for rendering nodes that allow for design choices, like
links or code blocks.
import (
"github.com/gomarkdown/markdown/html"
"github.com/gomarkdown/markdown/ast"
)
// a very dummy render hook that will output "code_replacements" instead of
// <code>${content}</code> emitted by html.Renderer
func renderHookCodeBlock(w io.Writer, node *ast.Node, entering bool) (ast.WalkStatus, bool) {
_, ok := node.Data.(*ast.CodeBlockData)
if !ok {
return ast.GoToNext, false
}
io.WriteString(w, "code_replacement")
return ast.GoToNext, true
}
opts := html.RendererOptions{
RenderNodeHook: renderHookCodeBlock,
}
renderer := html.NewRenderer(opts)
*/
package html

50
vendor/github.com/gomarkdown/markdown/html/esc.go generated vendored Normal file
View file

@ -0,0 +1,50 @@
package html
import (
"html"
"io"
)
var Escaper = [256][]byte{
'&': []byte("&amp;"),
'<': []byte("&lt;"),
'>': []byte("&gt;"),
'"': []byte("&quot;"),
}
// EscapeHTML writes html-escaped d to w. It escapes &, <, > and " characters.
func EscapeHTML(w io.Writer, d []byte) {
var start, end int
n := len(d)
for end < n {
escSeq := Escaper[d[end]]
if escSeq != nil {
w.Write(d[start:end])
w.Write(escSeq)
start = end + 1
}
end++
}
if start < n && end <= n {
w.Write(d[start:end])
}
}
func escLink(w io.Writer, text []byte) {
unesc := html.UnescapeString(string(text))
EscapeHTML(w, []byte(unesc))
}
// Escape writes the text to w, but skips the escape character.
func Escape(w io.Writer, text []byte) {
esc := false
for i := 0; i < len(text); i++ {
if text[i] == '\\' {
esc = !esc
}
if esc && text[i] == '\\' {
continue
}
w.Write([]byte{text[i]})
}
}

1312
vendor/github.com/gomarkdown/markdown/html/renderer.go generated vendored Normal file

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,444 @@
package html
import (
"bytes"
"io"
)
// SmartyPants rendering
// SPRenderer is a struct containing state of a Smartypants renderer.
type SPRenderer struct {
inSingleQuote bool
inDoubleQuote bool
callbacks [256]smartCallback
}
func wordBoundary(c byte) bool {
return c == 0 || isSpace(c) || isPunctuation(c)
}
func tolower(c byte) byte {
if c >= 'A' && c <= 'Z' {
return c - 'A' + 'a'
}
return c
}
func isdigit(c byte) bool {
return c >= '0' && c <= '9'
}
func smartQuoteHelper(out *bytes.Buffer, previousChar byte, nextChar byte, quote byte, isOpen *bool, addNBSP bool) bool {
// edge of the buffer is likely to be a tag that we don't get to see,
// so we treat it like text sometimes
// enumerate all sixteen possibilities for (previousChar, nextChar)
// each can be one of {0, space, punct, other}
switch {
case previousChar == 0 && nextChar == 0:
// context is not any help here, so toggle
*isOpen = !*isOpen
case isSpace(previousChar) && nextChar == 0:
// [ "] might be [ "<code>foo...]
*isOpen = true
case isPunctuation(previousChar) && nextChar == 0:
// [!"] hmm... could be [Run!"] or [("<code>...]
*isOpen = false
case /* isnormal(previousChar) && */ nextChar == 0:
// [a"] is probably a close
*isOpen = false
case previousChar == 0 && isSpace(nextChar):
// [" ] might be [...foo</code>" ]
*isOpen = false
case isSpace(previousChar) && isSpace(nextChar):
// [ " ] context is not any help here, so toggle
*isOpen = !*isOpen
case isPunctuation(previousChar) && isSpace(nextChar):
// [!" ] is probably a close
*isOpen = false
case /* isnormal(previousChar) && */ isSpace(nextChar):
// [a" ] this is one of the easy cases
*isOpen = false
case previousChar == 0 && isPunctuation(nextChar):
// ["!] hmm... could be ["$1.95] or [</code>"!...]
*isOpen = false
case isSpace(previousChar) && isPunctuation(nextChar):
// [ "!] looks more like [ "$1.95]
*isOpen = true
case isPunctuation(previousChar) && isPunctuation(nextChar):
// [!"!] context is not any help here, so toggle
*isOpen = !*isOpen
case /* isnormal(previousChar) && */ isPunctuation(nextChar):
// [a"!] is probably a close
*isOpen = false
case previousChar == 0 /* && isnormal(nextChar) */ :
// ["a] is probably an open
*isOpen = true
case isSpace(previousChar) /* && isnormal(nextChar) */ :
// [ "a] this is one of the easy cases
*isOpen = true
case isPunctuation(previousChar) /* && isnormal(nextChar) */ :
// [!"a] is probably an open
*isOpen = true
default:
// [a'b] maybe a contraction?
*isOpen = false
}
// Note that with the limited lookahead, this non-breaking
// space will also be appended to single double quotes.
if addNBSP && !*isOpen {
out.WriteString("&nbsp;")
}
out.WriteByte('&')
if *isOpen {
out.WriteByte('l')
} else {
out.WriteByte('r')
}
out.WriteByte(quote)
out.WriteString("quo;")
if addNBSP && *isOpen {
out.WriteString("&nbsp;")
}
return true
}
func (r *SPRenderer) smartSingleQuote(out *bytes.Buffer, previousChar byte, text []byte) int {
if len(text) >= 2 {
t1 := tolower(text[1])
if t1 == '\'' {
nextChar := byte(0)
if len(text) >= 3 {
nextChar = text[2]
}
if smartQuoteHelper(out, previousChar, nextChar, 'd', &r.inDoubleQuote, false) {
return 1
}
}
if (t1 == 's' || t1 == 't' || t1 == 'm' || t1 == 'd') && (len(text) < 3 || wordBoundary(text[2])) {
out.WriteString("&rsquo;")
return 0
}
if len(text) >= 3 {
t2 := tolower(text[2])
if ((t1 == 'r' && t2 == 'e') || (t1 == 'l' && t2 == 'l') || (t1 == 'v' && t2 == 'e')) &&
(len(text) < 4 || wordBoundary(text[3])) {
out.WriteString("&rsquo;")
return 0
}
}
}
nextChar := byte(0)
if len(text) > 1 {
nextChar = text[1]
}
if smartQuoteHelper(out, previousChar, nextChar, 's', &r.inSingleQuote, false) {
return 0
}
out.WriteByte(text[0])
return 0
}
func (r *SPRenderer) smartParens(out *bytes.Buffer, previousChar byte, text []byte) int {
if len(text) >= 3 {
t1 := tolower(text[1])
t2 := tolower(text[2])
if t1 == 'c' && t2 == ')' {
out.WriteString("&copy;")
return 2
}
if t1 == 'r' && t2 == ')' {
out.WriteString("&reg;")
return 2
}
if len(text) >= 4 && t1 == 't' && t2 == 'm' && text[3] == ')' {
out.WriteString("&trade;")
return 3
}
}
out.WriteByte(text[0])
return 0
}
func (r *SPRenderer) smartDash(out *bytes.Buffer, previousChar byte, text []byte) int {
if len(text) >= 2 {
if text[1] == '-' {
out.WriteString("&mdash;")
return 1
}
if wordBoundary(previousChar) && wordBoundary(text[1]) {
out.WriteString("&ndash;")
return 0
}
}
out.WriteByte(text[0])
return 0
}
func (r *SPRenderer) smartDashLatex(out *bytes.Buffer, previousChar byte, text []byte) int {
if len(text) >= 3 && text[1] == '-' && text[2] == '-' {
out.WriteString("&mdash;")
return 2
}
if len(text) >= 2 && text[1] == '-' {
out.WriteString("&ndash;")
return 1
}
out.WriteByte(text[0])
return 0
}
func (r *SPRenderer) smartAmpVariant(out *bytes.Buffer, previousChar byte, text []byte, quote byte, addNBSP bool) int {
if bytes.HasPrefix(text, []byte("&quot;")) {
nextChar := byte(0)
if len(text) >= 7 {
nextChar = text[6]
}
if smartQuoteHelper(out, previousChar, nextChar, quote, &r.inDoubleQuote, addNBSP) {
return 5
}
}
if bytes.HasPrefix(text, []byte("&#0;")) {
return 3
}
out.WriteByte('&')
return 0
}
func (r *SPRenderer) smartAmp(angledQuotes, addNBSP bool) func(*bytes.Buffer, byte, []byte) int {
var quote byte = 'd'
if angledQuotes {
quote = 'a'
}
return func(out *bytes.Buffer, previousChar byte, text []byte) int {
return r.smartAmpVariant(out, previousChar, text, quote, addNBSP)
}
}
func (r *SPRenderer) smartPeriod(out *bytes.Buffer, previousChar byte, text []byte) int {
if len(text) >= 3 && text[1] == '.' && text[2] == '.' {
out.WriteString("&hellip;")
return 2
}
if len(text) >= 5 && text[1] == ' ' && text[2] == '.' && text[3] == ' ' && text[4] == '.' {
out.WriteString("&hellip;")
return 4
}
out.WriteByte(text[0])
return 0
}
func (r *SPRenderer) smartBacktick(out *bytes.Buffer, previousChar byte, text []byte) int {
if len(text) >= 2 && text[1] == '`' {
nextChar := byte(0)
if len(text) >= 3 {
nextChar = text[2]
}
if smartQuoteHelper(out, previousChar, nextChar, 'd', &r.inDoubleQuote, false) {
return 1
}
}
out.WriteByte(text[0])
return 0
}
func (r *SPRenderer) smartNumberGeneric(out *bytes.Buffer, previousChar byte, text []byte) int {
if wordBoundary(previousChar) && previousChar != '/' && len(text) >= 3 {
// is it of the form digits/digits(word boundary)?, i.e., \d+/\d+\b
// note: check for regular slash (/) or fraction slash (, 0x2044, or 0xe2 81 84 in utf-8)
// and avoid changing dates like 1/23/2005 into fractions.
numEnd := 0
for len(text) > numEnd && isdigit(text[numEnd]) {
numEnd++
}
if numEnd == 0 {
out.WriteByte(text[0])
return 0
}
denStart := numEnd + 1
if len(text) > numEnd+3 && text[numEnd] == 0xe2 && text[numEnd+1] == 0x81 && text[numEnd+2] == 0x84 {
denStart = numEnd + 3
} else if len(text) < numEnd+2 || text[numEnd] != '/' {
out.WriteByte(text[0])
return 0
}
denEnd := denStart
for len(text) > denEnd && isdigit(text[denEnd]) {
denEnd++
}
if denEnd == denStart {
out.WriteByte(text[0])
return 0
}
if len(text) == denEnd || wordBoundary(text[denEnd]) && text[denEnd] != '/' {
out.WriteString("<sup>")
out.Write(text[:numEnd])
out.WriteString("</sup>&frasl;<sub>")
out.Write(text[denStart:denEnd])
out.WriteString("</sub>")
return denEnd - 1
}
}
out.WriteByte(text[0])
return 0
}
func (r *SPRenderer) smartNumber(out *bytes.Buffer, previousChar byte, text []byte) int {
if wordBoundary(previousChar) && previousChar != '/' && len(text) >= 3 {
if text[0] == '1' && text[1] == '/' && text[2] == '2' {
if len(text) < 4 || wordBoundary(text[3]) && text[3] != '/' {
out.WriteString("&frac12;")
return 2
}
}
if text[0] == '1' && text[1] == '/' && text[2] == '4' {
if len(text) < 4 || wordBoundary(text[3]) && text[3] != '/' || (len(text) >= 5 && tolower(text[3]) == 't' && tolower(text[4]) == 'h') {
out.WriteString("&frac14;")
return 2
}
}
if text[0] == '3' && text[1] == '/' && text[2] == '4' {
if len(text) < 4 || wordBoundary(text[3]) && text[3] != '/' || (len(text) >= 6 && tolower(text[3]) == 't' && tolower(text[4]) == 'h' && tolower(text[5]) == 's') {
out.WriteString("&frac34;")
return 2
}
}
}
out.WriteByte(text[0])
return 0
}
func (r *SPRenderer) smartDoubleQuoteVariant(out *bytes.Buffer, previousChar byte, text []byte, quote byte) int {
nextChar := byte(0)
if len(text) > 1 {
nextChar = text[1]
}
if !smartQuoteHelper(out, previousChar, nextChar, quote, &r.inDoubleQuote, false) {
out.WriteString("&quot;")
}
return 0
}
func (r *SPRenderer) smartDoubleQuote(out *bytes.Buffer, previousChar byte, text []byte) int {
return r.smartDoubleQuoteVariant(out, previousChar, text, 'd')
}
func (r *SPRenderer) smartAngledDoubleQuote(out *bytes.Buffer, previousChar byte, text []byte) int {
return r.smartDoubleQuoteVariant(out, previousChar, text, 'a')
}
func (r *SPRenderer) smartLeftAngle(out *bytes.Buffer, previousChar byte, text []byte) int {
i := 0
for i < len(text) && text[i] != '>' {
i++
}
out.Write(text[:i+1])
return i
}
type smartCallback func(out *bytes.Buffer, previousChar byte, text []byte) int
// NewSmartypantsRenderer constructs a Smartypants renderer object.
func NewSmartypantsRenderer(flags Flags) *SPRenderer {
var (
r SPRenderer
smartAmpAngled = r.smartAmp(true, false)
smartAmpAngledNBSP = r.smartAmp(true, true)
smartAmpRegular = r.smartAmp(false, false)
smartAmpRegularNBSP = r.smartAmp(false, true)
addNBSP = flags&SmartypantsQuotesNBSP != 0
)
if flags&SmartypantsAngledQuotes == 0 {
r.callbacks['"'] = r.smartDoubleQuote
if !addNBSP {
r.callbacks['&'] = smartAmpRegular
} else {
r.callbacks['&'] = smartAmpRegularNBSP
}
} else {
r.callbacks['"'] = r.smartAngledDoubleQuote
if !addNBSP {
r.callbacks['&'] = smartAmpAngled
} else {
r.callbacks['&'] = smartAmpAngledNBSP
}
}
r.callbacks['\''] = r.smartSingleQuote
r.callbacks['('] = r.smartParens
if flags&SmartypantsDashes != 0 {
if flags&SmartypantsLatexDashes == 0 {
r.callbacks['-'] = r.smartDash
} else {
r.callbacks['-'] = r.smartDashLatex
}
}
r.callbacks['.'] = r.smartPeriod
if flags&SmartypantsFractions == 0 {
r.callbacks['1'] = r.smartNumber
r.callbacks['3'] = r.smartNumber
} else {
for ch := '1'; ch <= '9'; ch++ {
r.callbacks[ch] = r.smartNumberGeneric
}
}
r.callbacks['<'] = r.smartLeftAngle
r.callbacks['`'] = r.smartBacktick
return &r
}
// Process is the entry point of the Smartypants renderer.
func (r *SPRenderer) Process(w io.Writer, text []byte) {
mark := 0
for i := 0; i < len(text); i++ {
if action := r.callbacks[text[i]]; action != nil {
if i > mark {
w.Write(text[mark:i])
}
previousChar := byte(0)
if i > 0 {
previousChar = text[i-1]
}
var tmp bytes.Buffer
i += action(&tmp, previousChar, text[i:])
w.Write(tmp.Bytes())
mark = i + 1
}
}
if mark < len(text) {
w.Write(text[mark:])
}
}

73
vendor/github.com/gomarkdown/markdown/parser/aside.go generated vendored Normal file
View file

@ -0,0 +1,73 @@
package parser
import (
"bytes"
"github.com/gomarkdown/markdown/ast"
)
// returns aisde prefix length
func (p *Parser) asidePrefix(data []byte) int {
i := 0
n := len(data)
for i < 3 && i < n && data[i] == ' ' {
i++
}
if i+1 < n && data[i] == 'A' && data[i+1] == '>' {
if i+2 < n && data[i+2] == ' ' {
return i + 3
}
return i + 2
}
return 0
}
// aside ends with at least one blank line
// followed by something without a aside prefix
func (p *Parser) terminateAside(data []byte, beg, end int) bool {
if p.isEmpty(data[beg:]) <= 0 {
return false
}
if end >= len(data) {
return true
}
return p.asidePrefix(data[end:]) == 0 && p.isEmpty(data[end:]) == 0
}
// parse a aside fragment
func (p *Parser) aside(data []byte) int {
var raw bytes.Buffer
beg, end := 0, 0
// identical to quote
for beg < len(data) {
end = beg
// Step over whole lines, collecting them. While doing that, check for
// fenced code and if one's found, incorporate it altogether,
// irregardless of any contents inside it
for end < len(data) && data[end] != '\n' {
if p.extensions&FencedCode != 0 {
if i := p.fencedCodeBlock(data[end:], false); i > 0 {
// -1 to compensate for the extra end++ after the loop:
end += i - 1
break
}
}
end++
}
end = skipCharN(data, end, '\n', 1)
if pre := p.asidePrefix(data[beg:]); pre > 0 {
// skip the prefix
beg += pre
} else if p.terminateAside(data, beg, end) {
break
}
// this line is part of the aside
raw.Write(data[beg:end])
beg = end
}
block := p.addBlock(&ast.Aside{})
p.block(raw.Bytes())
p.finalize(block)
return end
}

View file

@ -0,0 +1,116 @@
package parser
import (
"bytes"
"github.com/gomarkdown/markdown/ast"
)
// attribute parses a (potential) block attribute and adds it to p.
func (p *Parser) attribute(data []byte) []byte {
if len(data) < 3 {
return data
}
i := 0
if data[i] != '{' {
return data
}
i++
// last character must be a } otherwise it's not an attribute
end := skipUntilChar(data, i, '\n')
if data[end-1] != '}' {
return data
}
i = skipSpace(data, i)
b := &ast.Attribute{Attrs: make(map[string][]byte)}
esc := false
quote := false
trail := 0
Loop:
for ; i < len(data); i++ {
switch data[i] {
case ' ', '\t', '\f', '\v':
if quote {
continue
}
chunk := data[trail+1 : i]
if len(chunk) == 0 {
trail = i
continue
}
switch {
case chunk[0] == '.':
b.Classes = append(b.Classes, chunk[1:])
case chunk[0] == '#':
b.ID = chunk[1:]
default:
k, v := keyValue(chunk)
if k != nil && v != nil {
b.Attrs[string(k)] = v
} else {
// this is illegal in an attribute
return data
}
}
trail = i
case '"':
if esc {
esc = !esc
continue
}
quote = !quote
case '\\':
esc = !esc
case '}':
if esc {
esc = !esc
continue
}
chunk := data[trail+1 : i]
if len(chunk) == 0 {
return data
}
switch {
case chunk[0] == '.':
b.Classes = append(b.Classes, chunk[1:])
case chunk[0] == '#':
b.ID = chunk[1:]
default:
k, v := keyValue(chunk)
if k != nil && v != nil {
b.Attrs[string(k)] = v
} else {
return data
}
}
i++
break Loop
default:
esc = false
}
}
p.attr = b
return data[i:]
}
// key="value" quotes are mandatory.
func keyValue(data []byte) ([]byte, []byte) {
chunk := bytes.SplitN(data, []byte{'='}, 2)
if len(chunk) != 2 {
return nil, nil
}
key := chunk[0]
value := chunk[1]
if len(value) < 3 || len(key) == 0 {
return nil, nil
}
if value[0] != '"' || value[len(value)-1] != '"' {
return key, nil
}
return key, value[1 : len(value)-1]
}

1978
vendor/github.com/gomarkdown/markdown/parser/block.go generated vendored Normal file

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,29 @@
package parser
import (
"bytes"
"strconv"
)
// IsCallout detects a callout in the following format: <<N>> Where N is a integer > 0.
func IsCallout(data []byte) (id []byte, consumed int) {
if !bytes.HasPrefix(data, []byte("<<")) {
return nil, 0
}
start := 2
end := bytes.Index(data[start:], []byte(">>"))
if end < 0 {
return nil, 0
}
b := data[start : start+end]
b = bytes.TrimSpace(b)
i, err := strconv.Atoi(string(b))
if err != nil {
return nil, 0
}
if i <= 0 {
return nil, 0
}
return b, start + end + 2 // 2 for >>
}

View file

@ -0,0 +1,70 @@
package parser
import (
"bytes"
)
// caption checks for a caption, it returns the caption data and a potential "headingID".
func (p *Parser) caption(data, caption []byte) ([]byte, string, int) {
if !bytes.HasPrefix(data, caption) {
return nil, "", 0
}
j := len(caption)
data = data[j:]
end := p.linesUntilEmpty(data)
data = data[:end]
id, start := captionID(data)
if id != "" {
return data[:start], id, end + j
}
return data, "", end + j
}
// linesUntilEmpty scans lines up to the first empty line.
func (p *Parser) linesUntilEmpty(data []byte) int {
line, i := 0, 0
for line < len(data) {
i++
// find the end of this line
for i < len(data) && data[i-1] != '\n' {
i++
}
if p.isEmpty(data[line:i]) == 0 {
line = i
continue
}
break
}
return i
}
// captionID checks if the caption *ends* in {#....}. If so the text after {# is taken to be
// the ID/anchor of the entire figure block.
func captionID(data []byte) (string, int) {
end := len(data)
j, k := 0, 0
// find start/end of heading id
for j = 0; j < end-1 && (data[j] != '{' || data[j+1] != '#'); j++ {
}
for k = j + 1; k < end && data[k] != '}'; k++ {
}
// remains must be whitespace.
for l := k + 1; l < end; l++ {
if !isSpace(data[l]) {
return "", 0
}
}
if j > 0 && k > 0 && j+2 < k {
return string(data[j+2 : k]), j
}
return "", 0
}

View file

@ -0,0 +1,86 @@
package parser
import (
"bytes"
"github.com/gomarkdown/markdown/ast"
)
// citation parses a citation. In its most simple form [@ref], we allow multiple
// being separated by semicolons and a sub reference inside ala pandoc: [@ref, p. 23].
// Each citation can have a modifier: !, ? or - wich mean:
//
// ! - normative
// ? - formative
// - - suppressed
//
// The suffix starts after a comma, we strip any whitespace before and after. If the output
// allows for it, this can be rendered.
func citation(p *Parser, data []byte, offset int) (int, ast.Node) {
// look for the matching closing bracket
i := offset + 1
for level := 1; level > 0 && i < len(data); i++ {
switch {
case data[i] == '\n':
// no newlines allowed.
return 0, nil
case data[i-1] == '\\':
continue
case data[i] == '[':
level++
case data[i] == ']':
level--
if level <= 0 {
i-- // compensate for extra i++ in for loop
}
}
}
if i >= len(data) {
return 0, nil
}
node := &ast.Citation{}
citations := bytes.Split(data[1:i], []byte(";"))
for _, citation := range citations {
var suffix []byte
citation = bytes.TrimSpace(citation)
j := 0
if citation[j] != '@' {
// not a citation, drop out entirely.
return 0, nil
}
if c := bytes.Index(citation, []byte(",")); c > 0 {
part := citation[:c]
suff := citation[c+1:]
part = bytes.TrimSpace(part)
suff = bytes.TrimSpace(suff)
citation = part
suffix = suff
}
citeType := ast.CitationTypeInformative
j = 1
switch citation[j] {
case '!':
citeType = ast.CitationTypeNormative
j++
case '?':
citeType = ast.CitationTypeInformative
j++
case '-':
citeType = ast.CitationTypeSuppressed
j++
}
node.Destination = append(node.Destination, citation[j:])
node.Type = append(node.Type, citeType)
node.Suffix = append(node.Suffix, suffix)
}
return i + 1, node
}

20
vendor/github.com/gomarkdown/markdown/parser/esc.go generated vendored Normal file
View file

@ -0,0 +1,20 @@
package parser
// isEscape returns true if byte i is prefixed by an odd number of backslahses.
func isEscape(data []byte, i int) bool {
if i == 0 {
return false
}
if i == 1 {
return data[0] == '\\'
}
j := i - 1
for ; j >= 0; j-- {
if data[j] != '\\' {
break
}
}
j++
// odd number of backslahes means escape
return (i-j)%2 != 0
}

119
vendor/github.com/gomarkdown/markdown/parser/figures.go generated vendored Normal file
View file

@ -0,0 +1,119 @@
package parser
import (
"bytes"
"github.com/gomarkdown/markdown/ast"
)
// sFigureLine checks if there's a figure line (e.g., !--- ) at the beginning of data,
// and returns the end index if so, or 0 otherwise.
func sFigureLine(data []byte, oldmarker string) (end int, marker string) {
i, size := 0, 0
n := len(data)
// skip up to three spaces
for i < n && i < 3 && data[i] == ' ' {
i++
}
// check for the marker characters: !
if i+1 >= n {
return 0, ""
}
if data[i] != '!' || data[i+1] != '-' {
return 0, ""
}
i++
c := data[i] // i.e. the -
// the whole line must be the same char or whitespace
for i < n && data[i] == c {
size++
i++
}
// the marker char must occur at least 3 times
if size < 3 {
return 0, ""
}
marker = string(data[i-size : i])
// if this is the end marker, it must match the beginning marker
if oldmarker != "" && marker != oldmarker {
return 0, ""
}
// there is no syntax modifier although it might be an idea to re-use this space for something?
i = skipChar(data, i, ' ')
if i >= n || data[i] != '\n' {
if i == n {
return i, marker
}
return 0, ""
}
return i + 1, marker // Take newline into account.
}
// figureBlock returns the end index if data contains a figure block at the beginning,
// or 0 otherwise. It writes to out if doRender is true, otherwise it has no side effects.
// If doRender is true, a final newline is mandatory to recognize the figure block.
func (p *Parser) figureBlock(data []byte, doRender bool) int {
beg, marker := sFigureLine(data, "")
if beg == 0 || beg >= len(data) {
return 0
}
var raw bytes.Buffer
for {
// safe to assume beg < len(data)
// check for the end of the code block
figEnd, _ := sFigureLine(data[beg:], marker)
if figEnd != 0 {
beg += figEnd
break
}
// copy the current line
end := skipUntilChar(data, beg, '\n') + 1
// did we reach the end of the buffer without a closing marker?
if end >= len(data) {
return 0
}
// verbatim copy to the working buffer
if doRender {
raw.Write(data[beg:end])
}
beg = end
}
if !doRender {
return beg
}
figure := &ast.CaptionFigure{}
p.addBlock(figure)
p.block(raw.Bytes())
defer p.finalize(figure)
if captionContent, id, consumed := p.caption(data[beg:], []byte("Figure: ")); consumed > 0 {
caption := &ast.Caption{}
p.Inline(caption, captionContent)
figure.HeadingID = id
p.addChild(caption)
beg += consumed
}
p.finalize(figure)
return beg
}

129
vendor/github.com/gomarkdown/markdown/parser/include.go generated vendored Normal file
View file

@ -0,0 +1,129 @@
package parser
import (
"bytes"
"path"
"path/filepath"
)
// isInclude parses {{...}}[...], that contains a path between the {{, the [...] syntax contains
// an address to select which lines to include. It is treated as an opaque string and just given
// to readInclude.
func (p *Parser) isInclude(data []byte) (filename string, address []byte, consumed int) {
i := skipCharN(data, 0, ' ', 3) // start with up to 3 spaces
if len(data[i:]) < 3 {
return "", nil, 0
}
if data[i] != '{' || data[i+1] != '{' {
return "", nil, 0
}
start := i + 2
// find the end delimiter
i = skipUntilChar(data, i, '}')
if i+1 >= len(data) {
return "", nil, 0
}
end := i
i++
if data[i] != '}' {
return "", nil, 0
}
filename = string(data[start:end])
if i+1 < len(data) && data[i+1] == '[' { // potential address specification
start := i + 2
end = skipUntilChar(data, start, ']')
if end >= len(data) {
return "", nil, 0
}
address = data[start:end]
return filename, address, end + 1
}
return filename, address, i + 1
}
func (p *Parser) readInclude(from, file string, address []byte) []byte {
if p.Opts.ReadIncludeFn != nil {
return p.Opts.ReadIncludeFn(from, file, address)
}
return nil
}
// isCodeInclude parses <{{...}} which is similar to isInclude the returned bytes are, however wrapped in a code block.
func (p *Parser) isCodeInclude(data []byte) (filename string, address []byte, consumed int) {
i := skipCharN(data, 0, ' ', 3) // start with up to 3 spaces
if len(data[i:]) < 3 {
return "", nil, 0
}
if data[i] != '<' {
return "", nil, 0
}
start := i
filename, address, consumed = p.isInclude(data[i+1:])
if consumed == 0 {
return "", nil, 0
}
return filename, address, start + consumed + 1
}
// readCodeInclude acts like include except the returned bytes are wrapped in a fenced code block.
func (p *Parser) readCodeInclude(from, file string, address []byte) []byte {
data := p.readInclude(from, file, address)
if data == nil {
return nil
}
ext := path.Ext(file)
buf := &bytes.Buffer{}
buf.Write([]byte("```"))
if ext != "" { // starts with a dot
buf.WriteString(" " + ext[1:] + "\n")
} else {
buf.WriteByte('\n')
}
buf.Write(data)
buf.WriteString("```\n")
return buf.Bytes()
}
// incStack hold the current stack of chained includes. Each value is the containing
// path of the file being parsed.
type incStack struct {
stack []string
}
func newIncStack() *incStack {
return &incStack{stack: []string{}}
}
// Push updates i with new.
func (i *incStack) Push(new string) {
if path.IsAbs(new) {
i.stack = append(i.stack, path.Dir(new))
return
}
last := ""
if len(i.stack) > 0 {
last = i.stack[len(i.stack)-1]
}
i.stack = append(i.stack, path.Dir(filepath.Join(last, new)))
}
// Pop pops the last value.
func (i *incStack) Pop() {
if len(i.stack) == 0 {
return
}
i.stack = i.stack[:len(i.stack)-1]
}
func (i *incStack) Last() string {
if len(i.stack) == 0 {
return ""
}
return i.stack[len(i.stack)-1]
}

1280
vendor/github.com/gomarkdown/markdown/parser/inline.go generated vendored Normal file

File diff suppressed because it is too large Load diff

36
vendor/github.com/gomarkdown/markdown/parser/matter.go generated vendored Normal file
View file

@ -0,0 +1,36 @@
package parser
import (
"bytes"
"github.com/gomarkdown/markdown/ast"
)
func (p *Parser) documentMatter(data []byte) int {
if data[0] != '{' {
return 0
}
consumed := 0
matter := ast.DocumentMatterNone
if bytes.HasPrefix(data, []byte("{frontmatter}")) {
consumed = len("{frontmatter}")
matter = ast.DocumentMatterFront
}
if bytes.HasPrefix(data, []byte("{mainmatter}")) {
consumed = len("{mainmatter}")
matter = ast.DocumentMatterMain
}
if bytes.HasPrefix(data, []byte("{backmatter}")) {
consumed = len("{backmatter}")
matter = ast.DocumentMatterBack
}
if consumed == 0 {
return 0
}
node := &ast.DocumentMatter{Matter: matter}
p.addBlock(node)
p.finalize(node)
return consumed
}

View file

@ -0,0 +1,32 @@
package parser
import (
"github.com/gomarkdown/markdown/ast"
)
// Flags control optional behavior of parser.
type Flags int
// Options is a collection of supplementary parameters tweaking the behavior of various parts of the parser.
type Options struct {
ParserHook BlockFunc
ReadIncludeFn ReadIncludeFunc
Flags Flags // Flags allow customizing parser's behavior
}
// Parser renderer configuration options.
const (
FlagsNone Flags = 0
SkipFootnoteList Flags = 1 << iota // Skip adding the footnote list (regardless if they are parsed)
)
// BlockFunc allows to registration of a parser function. If successful it
// returns an ast.Node, a buffer that should be parsed as a block and the the number of bytes consumed.
type BlockFunc func(data []byte) (ast.Node, []byte, int)
// ReadIncludeFunc should read the file under path and returns the read bytes,
// from will be set to the name of the current file being parsed. Initially
// this will be empty. address is the optional address specifier of which lines
// of the file to return. If this function is not set no data will be read.
type ReadIncludeFunc func(from, path string, address []byte) []byte

811
vendor/github.com/gomarkdown/markdown/parser/parser.go generated vendored Normal file
View file

@ -0,0 +1,811 @@
/*
Package parser implements parser for markdown text that generates AST (abstract syntax tree).
*/
package parser
import (
"bytes"
"fmt"
"strings"
"unicode/utf8"
"github.com/gomarkdown/markdown/ast"
)
// Extensions is a bitmask of enabled parser extensions.
type Extensions int
// Bit flags representing markdown parsing extensions.
// Use | (or) to specify multiple extensions.
const (
NoExtensions Extensions = 0
NoIntraEmphasis Extensions = 1 << iota // Ignore emphasis markers inside words
Tables // Parse tables
FencedCode // Parse fenced code blocks
Autolink // Detect embedded URLs that are not explicitly marked
Strikethrough // Strikethrough text using ~~test~~
LaxHTMLBlocks // Loosen up HTML block parsing rules
SpaceHeadings // Be strict about prefix heading rules
HardLineBreak // Translate newlines into line breaks
TabSizeEight // Expand tabs to eight spaces instead of four
Footnotes // Pandoc-style footnotes
NoEmptyLineBeforeBlock // No need to insert an empty line to start a (code, quote, ordered list, unordered list) block
HeadingIDs // specify heading IDs with {#id}
Titleblock // Titleblock ala pandoc
AutoHeadingIDs // Create the heading ID from the text
BackslashLineBreak // Translate trailing backslashes into line breaks
DefinitionLists // Parse definition lists
MathJax // Parse MathJax
OrderedListStart // Keep track of the first number used when starting an ordered list.
Attributes // Block Attributes
SuperSubscript // Super- and subscript support: 2^10^, H~2~O.
EmptyLinesBreakList // 2 empty lines break out of list
Includes // Support including other files.
Mmark // Support Mmark syntax, see https://mmark.nl/syntax
CommonExtensions Extensions = NoIntraEmphasis | Tables | FencedCode |
Autolink | Strikethrough | SpaceHeadings | HeadingIDs |
BackslashLineBreak | DefinitionLists | MathJax
)
// The size of a tab stop.
const (
tabSizeDefault = 4
tabSizeDouble = 8
)
// for each character that triggers a response when parsing inline data.
type inlineParser func(p *Parser, data []byte, offset int) (int, ast.Node)
// ReferenceOverrideFunc is expected to be called with a reference string and
// return either a valid Reference type that the reference string maps to or
// nil. If overridden is false, the default reference logic will be executed.
// See the documentation in Options for more details on use-case.
type ReferenceOverrideFunc func(reference string) (ref *Reference, overridden bool)
// Parser is a type that holds extensions and the runtime state used by
// Parse, and the renderer. You can not use it directly, construct it with New.
type Parser struct {
// ReferenceOverride is an optional function callback that is called every
// time a reference is resolved. It can be set before starting parsing.
//
// In Markdown, the link reference syntax can be made to resolve a link to
// a reference instead of an inline URL, in one of the following ways:
//
// * [link text][refid]
// * [refid][]
//
// Usually, the refid is defined at the bottom of the Markdown document. If
// this override function is provided, the refid is passed to the override
// function first, before consulting the defined refids at the bottom. If
// the override function indicates an override did not occur, the refids at
// the bottom will be used to fill in the link details.
ReferenceOverride ReferenceOverrideFunc
Opts Options
// after parsing, this is AST root of parsed markdown text
Doc ast.Node
extensions Extensions
refs map[string]*reference
refsRecord map[string]struct{}
inlineCallback [256]inlineParser
nesting int
maxNesting int
insideLink bool
indexCnt int // incremented after every index
// Footnotes need to be ordered as well as available to quickly check for
// presence. If a ref is also a footnote, it's stored both in refs and here
// in notes. Slice is nil if footnotes not enabled.
notes []*reference
tip ast.Node // = doc
oldTip ast.Node
lastMatchedContainer ast.Node // = doc
allClosed bool
// Attributes are attached to block level elements.
attr *ast.Attribute
includeStack *incStack
}
// New creates a markdown parser with CommonExtensions.
//
// You can then call `doc := p.Parse(markdown)` to parse markdown document
// and `markdown.Render(doc, renderer)` to convert it to another format with
// a renderer.
func New() *Parser {
return NewWithExtensions(CommonExtensions)
}
// NewWithExtensions creates a markdown parser with given extensions.
func NewWithExtensions(extension Extensions) *Parser {
p := Parser{
refs: make(map[string]*reference),
refsRecord: make(map[string]struct{}),
maxNesting: 16,
insideLink: false,
Doc: &ast.Document{},
extensions: extension,
allClosed: true,
includeStack: newIncStack(),
}
p.tip = p.Doc
p.oldTip = p.Doc
p.lastMatchedContainer = p.Doc
p.inlineCallback[' '] = maybeLineBreak
p.inlineCallback['*'] = emphasis
p.inlineCallback['_'] = emphasis
if p.extensions&Strikethrough != 0 {
p.inlineCallback['~'] = emphasis
}
p.inlineCallback['`'] = codeSpan
p.inlineCallback['\n'] = lineBreak
p.inlineCallback['['] = link
p.inlineCallback['<'] = leftAngle
p.inlineCallback['\\'] = escape
p.inlineCallback['&'] = entity
p.inlineCallback['!'] = maybeImage
if p.extensions&Mmark != 0 {
p.inlineCallback['('] = maybeShortRefOrIndex
}
p.inlineCallback['^'] = maybeInlineFootnoteOrSuper
if p.extensions&Autolink != 0 {
p.inlineCallback['h'] = maybeAutoLink
p.inlineCallback['m'] = maybeAutoLink
p.inlineCallback['f'] = maybeAutoLink
p.inlineCallback['H'] = maybeAutoLink
p.inlineCallback['M'] = maybeAutoLink
p.inlineCallback['F'] = maybeAutoLink
}
if p.extensions&MathJax != 0 {
p.inlineCallback['$'] = math
}
return &p
}
func (p *Parser) getRef(refid string) (ref *reference, found bool) {
if p.ReferenceOverride != nil {
r, overridden := p.ReferenceOverride(refid)
if overridden {
if r == nil {
return nil, false
}
return &reference{
link: []byte(r.Link),
title: []byte(r.Title),
noteID: 0,
hasBlock: false,
text: []byte(r.Text)}, true
}
}
// refs are case insensitive
ref, found = p.refs[strings.ToLower(refid)]
return ref, found
}
func (p *Parser) isFootnote(ref *reference) bool {
_, ok := p.refsRecord[string(ref.link)]
return ok
}
func (p *Parser) finalize(block ast.Node) {
p.tip = block.GetParent()
}
func (p *Parser) addChild(node ast.Node) ast.Node {
for !canNodeContain(p.tip, node) {
p.finalize(p.tip)
}
ast.AppendChild(p.tip, node)
p.tip = node
return node
}
func canNodeContain(n ast.Node, v ast.Node) bool {
switch n.(type) {
case *ast.List:
return isListItem(v)
case *ast.Document, *ast.BlockQuote, *ast.Aside, *ast.ListItem, *ast.CaptionFigure:
return !isListItem(v)
case *ast.Table:
switch v.(type) {
case *ast.TableHeader, *ast.TableBody, *ast.TableFooter:
return true
default:
return false
}
case *ast.TableHeader, *ast.TableBody, *ast.TableFooter:
_, ok := v.(*ast.TableRow)
return ok
case *ast.TableRow:
_, ok := v.(*ast.TableCell)
return ok
}
return false
}
func (p *Parser) closeUnmatchedBlocks() {
if p.allClosed {
return
}
for p.oldTip != p.lastMatchedContainer {
parent := p.oldTip.GetParent()
p.finalize(p.oldTip)
p.oldTip = parent
}
p.allClosed = true
}
// Reference represents the details of a link.
// See the documentation in Options for more details on use-case.
type Reference struct {
// Link is usually the URL the reference points to.
Link string
// Title is the alternate text describing the link in more detail.
Title string
// Text is the optional text to override the ref with if the syntax used was
// [refid][]
Text string
}
// Parse generates AST (abstract syntax tree) representing markdown document.
//
// The result is a root of the tree whose underlying type is *ast.Document
//
// You can then convert AST to html using html.Renderer, to some other format
// using a custom renderer or transform the tree.
func (p *Parser) Parse(input []byte) ast.Node {
p.block(input)
// Walk the tree and finish up some of unfinished blocks
for p.tip != nil {
p.finalize(p.tip)
}
// Walk the tree again and process inline markdown in each block
ast.WalkFunc(p.Doc, func(node ast.Node, entering bool) ast.WalkStatus {
switch node.(type) {
case *ast.Paragraph, *ast.Heading, *ast.TableCell:
p.Inline(node, node.AsContainer().Content)
node.AsContainer().Content = nil
}
return ast.GoToNext
})
if p.Opts.Flags&SkipFootnoteList == 0 {
p.parseRefsToAST()
}
return p.Doc
}
func (p *Parser) parseRefsToAST() {
if p.extensions&Footnotes == 0 || len(p.notes) == 0 {
return
}
p.tip = p.Doc
list := &ast.List{
IsFootnotesList: true,
ListFlags: ast.ListTypeOrdered,
}
p.addBlock(&ast.Footnotes{})
block := p.addBlock(list)
flags := ast.ListItemBeginningOfList
// Note: this loop is intentionally explicit, not range-form. This is
// because the body of the loop will append nested footnotes to p.notes and
// we need to process those late additions. Range form would only walk over
// the fixed initial set.
for i := 0; i < len(p.notes); i++ {
ref := p.notes[i]
p.addChild(ref.footnote)
block := ref.footnote
listItem := block.(*ast.ListItem)
listItem.ListFlags = flags | ast.ListTypeOrdered
listItem.RefLink = ref.link
if ref.hasBlock {
flags |= ast.ListItemContainsBlock
p.block(ref.title)
} else {
p.Inline(block, ref.title)
}
flags &^= ast.ListItemBeginningOfList | ast.ListItemContainsBlock
}
above := list.Parent
finalizeList(list)
p.tip = above
ast.WalkFunc(block, func(node ast.Node, entering bool) ast.WalkStatus {
switch node.(type) {
case *ast.Paragraph, *ast.Heading:
p.Inline(node, node.AsContainer().Content)
node.AsContainer().Content = nil
}
return ast.GoToNext
})
}
//
// Link references
//
// This section implements support for references that (usually) appear
// as footnotes in a document, and can be referenced anywhere in the document.
// The basic format is:
//
// [1]: http://www.google.com/ "Google"
// [2]: http://www.github.com/ "Github"
//
// Anywhere in the document, the reference can be linked by referring to its
// label, i.e., 1 and 2 in this example, as in:
//
// This library is hosted on [Github][2], a git hosting site.
//
// Actual footnotes as specified in Pandoc and supported by some other Markdown
// libraries such as php-markdown are also taken care of. They look like this:
//
// This sentence needs a bit of further explanation.[^note]
//
// [^note]: This is the explanation.
//
// Footnotes should be placed at the end of the document in an ordered list.
// Inline footnotes such as:
//
// Inline footnotes^[Not supported.] also exist.
//
// are not yet supported.
// reference holds all information necessary for a reference-style links or
// footnotes.
//
// Consider this markdown with reference-style links:
//
// [link][ref]
//
// [ref]: /url/ "tooltip title"
//
// It will be ultimately converted to this HTML:
//
// <p><a href=\"/url/\" title=\"title\">link</a></p>
//
// And a reference structure will be populated as follows:
//
// p.refs["ref"] = &reference{
// link: "/url/",
// title: "tooltip title",
// }
//
// Alternatively, reference can contain information about a footnote. Consider
// this markdown:
//
// Text needing a footnote.[^a]
//
// [^a]: This is the note
//
// A reference structure will be populated as follows:
//
// p.refs["a"] = &reference{
// link: "a",
// title: "This is the note",
// noteID: <some positive int>,
// }
//
// TODO: As you can see, it begs for splitting into two dedicated structures
// for refs and for footnotes.
type reference struct {
link []byte
title []byte
noteID int // 0 if not a footnote ref
hasBlock bool
footnote ast.Node // a link to the Item node within a list of footnotes
text []byte // only gets populated by refOverride feature with Reference.Text
}
func (r *reference) String() string {
return fmt.Sprintf("{link: %q, title: %q, text: %q, noteID: %d, hasBlock: %v}",
r.link, r.title, r.text, r.noteID, r.hasBlock)
}
// Check whether or not data starts with a reference link.
// If so, it is parsed and stored in the list of references
// (in the render struct).
// Returns the number of bytes to skip to move past it,
// or zero if the first line is not a reference.
func isReference(p *Parser, data []byte, tabSize int) int {
// up to 3 optional leading spaces
if len(data) < 4 {
return 0
}
i := 0
for i < 3 && data[i] == ' ' {
i++
}
noteID := 0
// id part: anything but a newline between brackets
if data[i] != '[' {
return 0
}
i++
if p.extensions&Footnotes != 0 {
if i < len(data) && data[i] == '^' {
// we can set it to anything here because the proper noteIds will
// be assigned later during the second pass. It just has to be != 0
noteID = 1
i++
}
}
idOffset := i
for i < len(data) && data[i] != '\n' && data[i] != '\r' && data[i] != ']' {
i++
}
if i >= len(data) || data[i] != ']' {
return 0
}
idEnd := i
// footnotes can have empty ID, like this: [^], but a reference can not be
// empty like this: []. Break early if it's not a footnote and there's no ID
if noteID == 0 && idOffset == idEnd {
return 0
}
// spacer: colon (space | tab)* newline? (space | tab)*
i++
if i >= len(data) || data[i] != ':' {
return 0
}
i++
for i < len(data) && (data[i] == ' ' || data[i] == '\t') {
i++
}
if i < len(data) && (data[i] == '\n' || data[i] == '\r') {
i++
if i < len(data) && data[i] == '\n' && data[i-1] == '\r' {
i++
}
}
for i < len(data) && (data[i] == ' ' || data[i] == '\t') {
i++
}
if i >= len(data) {
return 0
}
var (
linkOffset, linkEnd int
titleOffset, titleEnd int
lineEnd int
raw []byte
hasBlock bool
)
if p.extensions&Footnotes != 0 && noteID != 0 {
linkOffset, linkEnd, raw, hasBlock = scanFootnote(p, data, i, tabSize)
lineEnd = linkEnd
} else {
linkOffset, linkEnd, titleOffset, titleEnd, lineEnd = scanLinkRef(p, data, i)
}
if lineEnd == 0 {
return 0
}
// a valid ref has been found
ref := &reference{
noteID: noteID,
hasBlock: hasBlock,
}
if noteID > 0 {
// reusing the link field for the id since footnotes don't have links
ref.link = data[idOffset:idEnd]
// if footnote, it's not really a title, it's the contained text
ref.title = raw
} else {
ref.link = data[linkOffset:linkEnd]
ref.title = data[titleOffset:titleEnd]
}
// id matches are case-insensitive
id := string(bytes.ToLower(data[idOffset:idEnd]))
p.refs[id] = ref
return lineEnd
}
func scanLinkRef(p *Parser, data []byte, i int) (linkOffset, linkEnd, titleOffset, titleEnd, lineEnd int) {
// link: whitespace-free sequence, optionally between angle brackets
if data[i] == '<' {
i++
}
linkOffset = i
for i < len(data) && data[i] != ' ' && data[i] != '\t' && data[i] != '\n' && data[i] != '\r' {
i++
}
linkEnd = i
if linkEnd < len(data) && data[linkOffset] == '<' && data[linkEnd-1] == '>' {
linkOffset++
linkEnd--
}
// optional spacer: (space | tab)* (newline | '\'' | '"' | '(' )
for i < len(data) && (data[i] == ' ' || data[i] == '\t') {
i++
}
if i < len(data) && data[i] != '\n' && data[i] != '\r' && data[i] != '\'' && data[i] != '"' && data[i] != '(' {
return
}
// compute end-of-line
if i >= len(data) || data[i] == '\r' || data[i] == '\n' {
lineEnd = i
}
if i+1 < len(data) && data[i] == '\r' && data[i+1] == '\n' {
lineEnd++
}
// optional (space|tab)* spacer after a newline
if lineEnd > 0 {
i = lineEnd + 1
for i < len(data) && (data[i] == ' ' || data[i] == '\t') {
i++
}
}
// optional title: any non-newline sequence enclosed in '"() alone on its line
if i+1 < len(data) && (data[i] == '\'' || data[i] == '"' || data[i] == '(') {
i++
titleOffset = i
// look for EOL
for i < len(data) && data[i] != '\n' && data[i] != '\r' {
i++
}
if i+1 < len(data) && data[i] == '\n' && data[i+1] == '\r' {
titleEnd = i + 1
} else {
titleEnd = i
}
// step back
i--
for i > titleOffset && (data[i] == ' ' || data[i] == '\t') {
i--
}
if i > titleOffset && (data[i] == '\'' || data[i] == '"' || data[i] == ')') {
lineEnd = titleEnd
titleEnd = i
}
}
return
}
// The first bit of this logic is the same as Parser.listItem, but the rest
// is much simpler. This function simply finds the entire block and shifts it
// over by one tab if it is indeed a block (just returns the line if it's not).
// blockEnd is the end of the section in the input buffer, and contents is the
// extracted text that was shifted over one tab. It will need to be rendered at
// the end of the document.
func scanFootnote(p *Parser, data []byte, i, indentSize int) (blockStart, blockEnd int, contents []byte, hasBlock bool) {
if i == 0 || len(data) == 0 {
return
}
// skip leading whitespace on first line
for i < len(data) && data[i] == ' ' {
i++
}
blockStart = i
// find the end of the line
blockEnd = i
for i < len(data) && data[i-1] != '\n' {
i++
}
// get working buffer
var raw bytes.Buffer
// put the first line into the working buffer
raw.Write(data[blockEnd:i])
blockEnd = i
// process the following lines
containsBlankLine := false
gatherLines:
for blockEnd < len(data) {
i++
// find the end of this line
for i < len(data) && data[i-1] != '\n' {
i++
}
// if it is an empty line, guess that it is part of this item
// and move on to the next line
if p.isEmpty(data[blockEnd:i]) > 0 {
containsBlankLine = true
blockEnd = i
continue
}
n := 0
if n = isIndented(data[blockEnd:i], indentSize); n == 0 {
// this is the end of the block.
// we don't want to include this last line in the index.
break gatherLines
}
// if there were blank lines before this one, insert a new one now
if containsBlankLine {
raw.WriteByte('\n')
containsBlankLine = false
}
// get rid of that first tab, write to buffer
raw.Write(data[blockEnd+n : i])
hasBlock = true
blockEnd = i
}
if data[blockEnd-1] != '\n' {
raw.WriteByte('\n')
}
contents = raw.Bytes()
return
}
// isPunctuation returns true if c is a punctuation symbol.
func isPunctuation(c byte) bool {
for _, r := range []byte("!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~") {
if c == r {
return true
}
}
return false
}
// isSpace returns true if c is a white-space charactr
func isSpace(c byte) bool {
return c == ' ' || c == '\t' || c == '\n' || c == '\r' || c == '\f' || c == '\v'
}
// isLetter returns true if c is ascii letter
func isLetter(c byte) bool {
return (c >= 'a' && c <= 'z') || (c >= 'A' && c <= 'Z')
}
// isAlnum returns true if c is a digit or letter
// TODO: check when this is looking for ASCII alnum and when it should use unicode
func isAlnum(c byte) bool {
return (c >= '0' && c <= '9') || isLetter(c)
}
// TODO: this is not used
// Replace tab characters with spaces, aligning to the next TAB_SIZE column.
// always ends output with a newline
func expandTabs(out *bytes.Buffer, line []byte, tabSize int) {
// first, check for common cases: no tabs, or only tabs at beginning of line
i, prefix := 0, 0
slowcase := false
for i = 0; i < len(line); i++ {
if line[i] == '\t' {
if prefix == i {
prefix++
} else {
slowcase = true
break
}
}
}
// no need to decode runes if all tabs are at the beginning of the line
if !slowcase {
for i = 0; i < prefix*tabSize; i++ {
out.WriteByte(' ')
}
out.Write(line[prefix:])
return
}
// the slow case: we need to count runes to figure out how
// many spaces to insert for each tab
column := 0
i = 0
for i < len(line) {
start := i
for i < len(line) && line[i] != '\t' {
_, size := utf8.DecodeRune(line[i:])
i += size
column++
}
if i > start {
out.Write(line[start:i])
}
if i >= len(line) {
break
}
for {
out.WriteByte(' ')
column++
if column%tabSize == 0 {
break
}
}
i++
}
}
// Find if a line counts as indented or not.
// Returns number of characters the indent is (0 = not indented).
func isIndented(data []byte, indentSize int) int {
if len(data) == 0 {
return 0
}
if data[0] == '\t' {
return 1
}
if len(data) < indentSize {
return 0
}
for i := 0; i < indentSize; i++ {
if data[i] != ' ' {
return 0
}
}
return indentSize
}
// Create a url-safe slug for fragments
func slugify(in []byte) []byte {
if len(in) == 0 {
return in
}
out := make([]byte, 0, len(in))
sym := false
for _, ch := range in {
if isAlnum(ch) {
sym = false
out = append(out, ch)
} else if sym {
continue
} else {
out = append(out, '-')
sym = true
}
}
var a, b int
var ch byte
for a, ch = range out {
if ch != '-' {
break
}
}
for b = len(out) - 1; b > 0; b-- {
if out[b] != '-' {
break
}
}
return out[a : b+1]
}
func isListItem(d ast.Node) bool {
_, ok := d.(*ast.ListItem)
return ok
}

89
vendor/github.com/gomarkdown/markdown/parser/ref.go generated vendored Normal file
View file

@ -0,0 +1,89 @@
package parser
import (
"bytes"
"fmt"
"github.com/gomarkdown/markdown/ast"
)
// parse '(#r)', where r does not contain spaces. Or.
// (!item) (!item, subitem), for an index, (!!item) signals primary.
func maybeShortRefOrIndex(p *Parser, data []byte, offset int) (int, ast.Node) {
if len(data[offset:]) < 4 {
return 0, nil
}
// short ref first
data = data[offset:]
i := 1
switch data[i] {
case '#': // cross ref
i++
Loop:
for i < len(data) {
c := data[i]
switch {
case c == ')':
break Loop
case !isAlnum(c):
if c == '_' || c == '-' || c == ':' {
i++
continue
}
i = 0
break Loop
}
i++
}
if i >= len(data) {
return 0, nil
}
if data[i] != ')' {
return 0, nil
}
id := data[2:i]
node := &ast.CrossReference{}
node.Destination = id
return i + 1, node
case '!': // index
i++
start := i
i = skipUntilChar(data, start, ')')
// did we reach the end of the buffer without a closing marker?
if i >= len(data) {
return 0, nil
}
if len(data[start:i]) < 1 {
return 0, nil
}
idx := &ast.Index{}
idx.ID = fmt.Sprintf("idxref:%d", p.indexCnt)
p.indexCnt++
idx.Primary = data[start] == '!'
buf := data[start:i]
if idx.Primary {
buf = buf[1:]
}
items := bytes.Split(buf, []byte(","))
switch len(items) {
case 1:
idx.Item = bytes.TrimSpace(items[0])
return i + 1, idx
case 2:
idx.Item = bytes.TrimSpace(items[0])
idx.Subitem = bytes.TrimSpace(items[1])
return i + 1, idx
}
}
return 0, nil
}

31
vendor/github.com/mmarkdown/mmark/LICENSE.txt generated vendored Normal file
View file

@ -0,0 +1,31 @@
Markdown is distributed under the Simplified BSD License:
Copyright © 2011 Russ Ross
Copyright © 2018 Krzysztof Kowalczyk
Copyright © 2018 Authors
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions
are met:
1. Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following
disclaimer in the documentation and/or other materials provided with
the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.

23
vendor/github.com/mmarkdown/mmark/mast/bibliography.go generated vendored Normal file
View file

@ -0,0 +1,23 @@
package mast
import (
"github.com/gomarkdown/markdown/ast"
"github.com/mmarkdown/mmark/mast/reference"
)
// Bibliography represents markdown bibliography node.
type Bibliography struct {
ast.Container
Type ast.CitationTypes
}
// BibliographyItem contains a single bibliography item.
type BibliographyItem struct {
ast.Leaf
Anchor []byte
Type ast.CitationTypes
Reference *reference.Reference // parsed reference XML
}

34
vendor/github.com/mmarkdown/mmark/mast/index.go generated vendored Normal file
View file

@ -0,0 +1,34 @@
package mast
import "github.com/gomarkdown/markdown/ast"
// DocumentIndex represents markdown document index node.
type DocumentIndex struct {
ast.Container
}
// IndexItem contains an index for the indices section.
type IndexItem struct {
ast.Container
*ast.Index
}
// IndexSubItem contains an sub item index for the indices section.
type IndexSubItem struct {
ast.Container
*ast.Index
}
// IndexLetter has the Letter of this index item.
type IndexLetter struct {
ast.Container
}
// IndexLink links to the index in the document.
type IndexLink struct {
*ast.Link
Primary bool
}

171
vendor/github.com/mmarkdown/mmark/mast/nodes.go generated vendored Normal file
View file

@ -0,0 +1,171 @@
package mast
import (
"bytes"
"sort"
"github.com/gomarkdown/markdown/ast"
)
// some extra functions for manipulation the AST
// MoveChilderen moves the children from a to b *and* make the parent of each point to b.
// Any children of b are obliterated.
func MoveChildren(a, b ast.Node) {
a.SetChildren(b.GetChildren())
b.SetChildren(nil)
for _, child := range a.GetChildren() {
child.SetParent(a)
}
}
// Some attribute helper functions.
// AttributeFromNode returns the attribute from the node, if it was there was one.
func AttributeFromNode(node ast.Node) *ast.Attribute {
if c := node.AsContainer(); c != nil && c.Attribute != nil {
return c.Attribute
}
if l := node.AsLeaf(); l != nil && l.Attribute != nil {
return l.Attribute
}
return nil
}
// AttributeInit will initialize an *Attribute on node if there wasn't one.
func AttributeInit(node ast.Node) {
if l := node.AsLeaf(); l != nil && l.Attribute == nil {
l.Attribute = &ast.Attribute{Attrs: make(map[string][]byte)}
return
}
if c := node.AsContainer(); c != nil && c.Attribute == nil {
c.Attribute = &ast.Attribute{Attrs: make(map[string][]byte)}
return
}
}
// DeleteAttribute delete the attribute under key from a.
func DeleteAttribute(node ast.Node, key string) {
a := AttributeFromNode(node)
if a == nil {
return
}
switch key {
case "id":
a.ID = nil
case "class":
// TODO
default:
delete(a.Attrs, key)
}
}
// SetAttribute sets the attribute under key to value.
func SetAttribute(node ast.Node, key string, value []byte) {
a := AttributeFromNode(node)
if a == nil {
return
}
switch key {
case "id":
a.ID = value
case "class":
// TODO
default:
a.Attrs[key] = value
}
}
// Attribute returns the attribute value under key. Use AttributeClass to retrieve
// a class.
func Attribute(node ast.Node, key string) []byte {
a := AttributeFromNode(node)
if a == nil {
return nil
}
switch key {
case "id":
return a.ID
case "class":
// use AttributeClass.
}
return a.Attrs[key]
}
func AttributeBytes(attr *ast.Attribute) []byte {
ret := &bytes.Buffer{}
ret.WriteByte('{')
if len(attr.ID) != 0 {
ret.WriteByte('#')
ret.Write(attr.ID)
}
for _, c := range attr.Classes {
if ret.Len() > 1 {
ret.WriteByte(' ')
}
ret.WriteByte('.')
ret.Write(c)
}
keys := []string{}
for k := range attr.Attrs {
keys = append(keys, k)
}
sort.Strings(keys)
for _, k := range keys {
if ret.Len() > 1 {
ret.WriteByte(' ')
}
ret.WriteString(k)
ret.WriteString(`="`)
ret.Write(attr.Attrs[k])
ret.WriteByte('"')
}
ret.WriteByte('}')
return ret.Bytes()
}
// AttributeClass returns true is class key is set.
func AttributeClass(node ast.Node, key string) bool {
a := AttributeFromNode(node)
if a == nil {
return false
}
for _, c := range a.Classes {
if string(c) == key {
return true
}
}
return false
}
// AttributeFilter runs the attribute on node through filter and only allows elements for which filter returns true.
func AttributeFilter(node ast.Node, filter func(key string) bool) {
a := AttributeFromNode(node)
if a == nil {
return
}
if !filter("id") {
a.ID = nil
}
if !filter("class") {
a.Classes = nil
}
for k, _ := range a.Attrs {
if !filter(k) {
delete(a.Attrs, k)
}
}
}
// FilterFunc checks if s is an allowed key in an attribute.
// If s is:
// "id" the ID should be checked
// "class" the classes should be allowed or disallowed
// any other string means checking the individual attributes.
// it returns true for elements that are allows, false otherwise.
type FilterFunc func(s string) bool

View file

@ -0,0 +1,71 @@
// Package reference defines the elements of a <reference> block.
package reference
import "encoding/xml"
// Author is the reference author.
type Author struct {
Fullname string `xml:"fullname,attr,omitempty"`
Initials string `xml:"initials,attr,omitempty"`
Surname string `xml:"surname,attr,omitempty"`
Role string `xml:"role,attr,omitempty"`
Organization *Organization `xml:"organization,omitempty"`
Address *Address `xml:"address,omitempty"`
}
type Organization struct {
Abbrev string `xml:"abbrev,attr,omitempty"`
Value string `xml:",chardata"`
}
// this is copied from ../title.go; it might make sense to unify them, both especially, it we
// want to allow reference to be given in TOML as well. See #55.
// Author denotes an RFC author.
// Address denotes the address of an RFC author.
type Address struct {
Phone string `xml:"phone,omitempty"`
Email string `xml:"email,omitempty"`
URI string `xml:"uri,omitempty"`
Postal AddressPostal `xml:"postal,omitempty"`
}
// AddressPostal denotes the postal address of an RFC author.
type AddressPostal struct {
PostalLine []string `xml:"postalline,omitempty"`
Streets []string `xml:"street,omitempty"`
Cities []string `xml:"city,omitempty"`
Codes []string `xml:"code,omitempty"`
Countries []string `xml:"country,omitempty"`
Regions []string `xml:"region,omitempty"`
}
// Date is the reference date.
type Date struct {
Year string `xml:"year,attr,omitempty"`
Month string `xml:"month,attr,omitempty"`
Day string `xml:"day,attr,omitempty"`
}
// Front the reference <front>.
type Front struct {
Title string `xml:"title"`
Authors []Author `xml:"author,omitempty"`
Date Date `xml:"date"`
}
// Format is the reference <format>. This is deprecated in RFC 7991, see Section 3.3.
type Format struct {
Type string `xml:"type,attr,omitempty"`
Target string `xml:"target,attr"`
}
// Reference is the entire <reference> structure.
type Reference struct {
XMLName xml.Name `xml:"reference"`
Anchor string `xml:"anchor,attr"`
Front Front `xml:"front"`
Format *Format `xml:"format,omitempty"`
Target string `xml:"target,attr"`
}

95
vendor/github.com/mmarkdown/mmark/mast/title.go generated vendored Normal file
View file

@ -0,0 +1,95 @@
package mast
import (
"time"
"github.com/gomarkdown/markdown/ast"
)
// Title represents the TOML encoded title block.
type Title struct {
ast.Leaf
*TitleData
Trigger string // either triggered by %%% or ---
}
// NewTitle returns a pointer to TitleData with some defaults set.
func NewTitle(trigger byte) *Title {
t := &Title{
TitleData: &TitleData{
Area: "Internet",
Ipr: "trust200902",
Consensus: true,
},
}
t.Trigger = string([]byte{trigger, trigger, trigger})
return t
}
const triggerDash = "---"
func (t *Title) IsTriggerDash() bool { return t.Trigger == triggerDash }
// TitleData holds all the elements of the title.
type TitleData struct {
Title string
Abbrev string
SeriesInfo SeriesInfo
Consensus bool
Ipr string // See https://tools.ietf.org/html/rfc7991#appendix-A.1
Obsoletes []int
Updates []int
SubmissionType string // IETF, IAB, IRTF or independent
Date time.Time
Area string
Workgroup string
Keyword []string
Author []Author
}
// SeriesInfo holds details on the Internet-Draft or RFC, see https://tools.ietf.org/html/rfc7991#section-2.47
type SeriesInfo struct {
Name string // name of the document, values are "RFC", "Internet-Draft", and "DOI"
Value string // either draft name, or number
Status string // The status of this document, values: "standard", "informational", "experimental", "bcp", "fyi", and "full-standard"
Stream string // "IETF" (default),"IAB", "IRTF" or "independent"
}
// Author denotes an RFC author.
type Author struct {
Initials string
Surname string
Fullname string
Organization string
OrganizationAbbrev string `toml:"abbrev"`
Role string
ASCII string
Address Address
}
// Address denotes the address of an RFC author.
type Address struct {
Phone string
Email string
URI string
Postal AddressPostal
}
// AddressPostal denotes the postal address of an RFC author.
type AddressPostal struct {
Street string
City string
Code string
Country string
Region string
PostalLine []string
// Plurals when these need to be specified multiple times.
Streets []string
Cities []string
Codes []string
Countries []string
Regions []string
}

View file

@ -0,0 +1,184 @@
package mparser
import (
"bytes"
"encoding/xml"
"log"
"github.com/gomarkdown/markdown/ast"
"github.com/mmarkdown/mmark/mast"
"github.com/mmarkdown/mmark/mast/reference"
)
// CitationToBibliography walks the AST and gets all the citations on HTML blocks and groups them into
// normative and informative references.
func CitationToBibliography(doc ast.Node) (normative ast.Node, informative ast.Node) {
seen := map[string]*mast.BibliographyItem{}
raw := map[string][]byte{}
// Gather all citations.
// Gather all reference HTML Blocks to see if we have XML we can output.
ast.WalkFunc(doc, func(node ast.Node, entering bool) ast.WalkStatus {
switch c := node.(type) {
case *ast.Citation:
for i, d := range c.Destination {
if _, ok := seen[string(bytes.ToLower(d))]; ok {
continue
}
ref := &mast.BibliographyItem{}
ref.Anchor = d
ref.Type = c.Type[i]
seen[string(d)] = ref
}
case *ast.HTMLBlock:
anchor := anchorFromReference(c.Literal)
if anchor != nil {
raw[string(bytes.ToLower(anchor))] = c.Literal
}
}
return ast.GoToNext
})
for _, r := range seen {
// If we have a reference anchor and the raw XML add that here.
if raw, ok := raw[string(bytes.ToLower(r.Anchor))]; ok {
var x reference.Reference
if e := xml.Unmarshal(raw, &x); e != nil {
log.Printf("Failed to unmarshal reference: %q: %s", r.Anchor, e)
continue
}
r.Reference = &x
}
switch r.Type {
case ast.CitationTypeInformative:
if informative == nil {
informative = &mast.Bibliography{Type: ast.CitationTypeInformative}
}
ast.AppendChild(informative, r)
case ast.CitationTypeSuppressed:
fallthrough
case ast.CitationTypeNormative:
if normative == nil {
normative = &mast.Bibliography{Type: ast.CitationTypeNormative}
}
ast.AppendChild(normative, r)
}
}
return normative, informative
}
// NodeBackMatter is the place where we should inject the bibliography
func NodeBackMatter(doc ast.Node) ast.Node {
var matter ast.Node
ast.WalkFunc(doc, func(node ast.Node, entering bool) ast.WalkStatus {
if mat, ok := node.(*ast.DocumentMatter); ok {
if mat.Matter == ast.DocumentMatterBack {
matter = mat
return ast.Terminate
}
}
return ast.GoToNext
})
return matter
}
// Parse '<reference anchor='CBR03' target=''>' and return the string after anchor= is the ID for the reference.
func anchorFromReference(data []byte) []byte {
if !bytes.HasPrefix(data, []byte("<reference ")) {
return nil
}
anchor := bytes.Index(data, []byte("anchor="))
if anchor < 0 {
return nil
}
beg := anchor + 7
if beg >= len(data) {
return nil
}
quote := data[beg]
i := beg + 1
// scan for an end-of-reference marker
for i < len(data) && data[i] != quote {
i++
}
// no end-of-reference marker
if i >= len(data) {
return nil
}
return data[beg+1 : i]
}
// ReferenceHook is the hook used to parse reference nodes.
func ReferenceHook(data []byte) (ast.Node, []byte, int) {
ref, ok := IsReference(data)
if !ok {
return nil, nil, 0
}
node := &ast.HTMLBlock{}
node.Literal = fmtReference(ref)
return node, nil, len(ref)
}
// IfReference returns wether data contains a reference.
func IsReference(data []byte) ([]byte, bool) {
if !bytes.HasPrefix(data, []byte("<reference ")) {
return nil, false
}
i := 12
// scan for an end-of-reference marker, across lines if necessary
for i < len(data) &&
!(data[i-12] == '<' && data[i-11] == '/' && data[i-10] == 'r' && data[i-9] == 'e' && data[i-8] == 'f' &&
data[i-7] == 'e' && data[i-6] == 'r' && data[i-5] == 'e' &&
data[i-4] == 'n' && data[i-3] == 'c' && data[i-2] == 'e' &&
data[i-1] == '>') {
i++
}
// no end-of-reference marker
if i > len(data) {
return nil, false
}
return data[:i], true
}
func fmtReference(data []byte) []byte {
var x reference.Reference
if e := xml.Unmarshal(data, &x); e != nil {
return data
}
out, e := xml.MarshalIndent(x, "", " ")
if e != nil {
return data
}
return out
}
// AddBibliography adds the bibliography to the document. It will be
// added just after the backmatter node. If that node can't be found this
// function returns false and does nothing.
func AddBibliography(doc ast.Node) bool {
where := NodeBackMatter(doc)
if where == nil {
return false
}
norm, inform := CitationToBibliography(doc)
if norm != nil {
ast.AppendChild(where, norm)
}
if inform != nil {
ast.AppendChild(where, inform)
}
return (norm != nil) || (inform != nil)
}

View file

@ -0,0 +1,11 @@
package mparser
import (
"github.com/gomarkdown/markdown/parser"
)
// Extensions is the default set of extensions mmark requires.
var Extensions = parser.Tables | parser.FencedCode | parser.Autolink | parser.Strikethrough |
parser.SpaceHeadings | parser.HeadingIDs | parser.BackslashLineBreak | parser.SuperSubscript |
parser.DefinitionLists | parser.MathJax | parser.AutoHeadingIDs | parser.Footnotes |
parser.Strikethrough | parser.OrderedListStart | parser.Attributes | parser.Mmark | parser.Includes

59
vendor/github.com/mmarkdown/mmark/mparser/hook.go generated vendored Normal file
View file

@ -0,0 +1,59 @@
package mparser
import (
"io/ioutil"
"log"
"path/filepath"
"github.com/gomarkdown/markdown/ast"
"github.com/gomarkdown/markdown/parser"
)
var UnsafeInclude parser.Flags = 1 << 3
// Hook will call both TitleHook and ReferenceHook.
func Hook(data []byte) (ast.Node, []byte, int) {
n, b, i := TitleHook(data)
if n != nil {
return n, b, i
}
return ReferenceHook(data)
}
// ReadInclude is the hook to read includes.
// Its supports the following options for address.
//
// 4,5 - line numbers separated by commas
// N, - line numbers, end not specified, read until the end.
// /start/,/end/ - regexp separated by commas
// optional a prefix="" string.
func (i Initial) ReadInclude(from, file string, address []byte) []byte {
path := i.path(from, file)
if i.Flags&UnsafeInclude == 0 {
if ok := i.pathAllowed(path); !ok {
log.Printf("Failure to read: %q: path is not on or below %q", path, i.i)
return nil
}
}
data, err := ioutil.ReadFile(path)
if err != nil {
log.Printf("Failure to read: %q (from %q)", err, filepath.Join(from, "*"))
return nil
}
data, err = parseAddress(address, data)
if err != nil {
log.Printf("Failure to parse address for %q: %q (from %q)", path, err, filepath.Join(from, "*"))
return nil
}
if len(data) == 0 {
return data
}
if data[len(data)-1] != '\n' {
data = append(data, '\n')
}
return data
}

251
vendor/github.com/mmarkdown/mmark/mparser/include.go generated vendored Normal file
View file

@ -0,0 +1,251 @@
// Copyright 2012 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Adapted for mmark, by Miek Gieben, 2015.
// Adapted for mmark2 (fastly simplified and features removed), 2018.
package mparser
import (
"bytes"
"errors"
"fmt"
"os"
"path"
"path/filepath"
"regexp"
"strconv"
"strings"
"github.com/gomarkdown/markdown/parser"
)
// Initial is the initial file we are working on, empty for stdin and adjusted is we we have an absolute or relative file.
type Initial struct {
Flags parser.Flags
i string
}
// NewInitial returns an initialized Initial.
func NewInitial(s string) Initial {
if path.IsAbs(s) {
return Initial{i: path.Dir(s)}
}
cwd, _ := os.Getwd()
if s == "" {
return Initial{i: cwd}
}
return Initial{i: path.Dir(filepath.Join(cwd, s))}
}
// path returns the full path we should use according to from, file and initial.
func (i Initial) path(from, file string) string {
if path.IsAbs(file) {
return file
}
if path.IsAbs(from) {
filepath.Join(from, file)
}
f1 := filepath.Join(i.i, from)
return filepath.Join(f1, file)
}
// pathAllowed returns true is file is on the same level or below the initial file.
func (i Initial) pathAllowed(file string) bool {
x, err := filepath.Rel(i.i, file)
if err != nil {
return false
}
return !strings.Contains(x, "..")
}
// parseAddress parses a code address directive and returns the bytes or an error.
func parseAddress(addr []byte, data []byte) ([]byte, error) {
bytes.TrimSpace(addr)
if len(addr) == 0 {
return data, nil
}
// check for prefix, either as ;prefix, prefix; or just standalone prefix.
var prefix []byte
if x := bytes.Index(addr, []byte("prefix=")); x >= 0 {
if x+1 > len(addr) {
return nil, fmt.Errorf("invalid prefix in address specification: %s", addr)
}
start := x + len("prefix=")
quote := addr[start]
if quote != '\'' && quote != '"' {
return nil, fmt.Errorf("invalid prefix in address specification: %s", addr)
}
end := SkipUntilChar(addr, start+1, quote)
prefix = addr[start+1 : end]
if len(prefix) == 0 {
return nil, fmt.Errorf("invalid prefix in address specification: %s", addr)
}
addr = append(addr[:x], addr[end+1:]...)
addr = bytes.Replace(addr, []byte(";"), []byte(""), 1)
if len(addr) == 0 {
data = addPrefix(data, prefix)
return data, nil
}
}
lo, hi, err := addrToByteRange(addr, data)
if err != nil {
return nil, err
}
// Acme pattern matches can stop mid-line,
// so run to end of line in both directions if not at line start/end.
for lo > 0 && data[lo-1] != '\n' {
lo--
}
if hi > 0 {
for hi < len(data) && data[hi-1] != '\n' {
hi++
}
}
data = data[lo:hi]
if prefix != nil {
data = addPrefix(data, prefix)
}
return data, nil
}
// addrToByteRange evaluates the given address. It returns the start and end index of the data we should return.
// Supported syntax: N, M or /start/, /end/ .
func addrToByteRange(addr, data []byte) (lo, hi int, err error) {
chunk := bytes.Split(addr, []byte(","))
if len(chunk) != 2 {
return 0, 0, fmt.Errorf("invalid address specification: %s", addr)
}
left := bytes.TrimSpace(chunk[0])
right := bytes.TrimSpace(chunk[1])
if len(left) == 0 {
return 0, 0, fmt.Errorf("invalid address specification: %s", addr)
}
if len(right) == 0 {
// open ended right term
}
if left[0] == '/' { //regular expression
if left[len(left)-1] != '/' {
return 0, 0, fmt.Errorf("invalid address specification: %s", addr)
}
if right[0] != '/' {
return 0, 0, fmt.Errorf("invalid address specification: %s", addr)
}
if right[len(right)-1] != '/' {
return 0, 0, fmt.Errorf("invalid address specification: %s", addr)
}
lo, hi, err = addrRegexp(data, string(left[1:len(left)-1]), string(right[1:len(right)-1]))
if err != nil {
return 0, 0, err
}
} else {
lo, err = strconv.Atoi(string(left))
if err != nil {
return 0, 0, err
}
i, j := 0, 0
for i < len(data) {
if data[i] == '\n' {
j++
if j >= lo {
break
}
}
i++
}
lo = i
if len(right) == 0 {
hi = len(data)
goto End
}
hi, err = strconv.Atoi(string(right))
if err != nil {
return 0, 0, err
}
i, j = 0, 0
for i < len(data) {
if data[i] == '\n' {
j++
if j+1 >= hi {
break
}
}
i++
}
hi = i
}
End:
if lo > hi {
return 0, 0, fmt.Errorf("invalid address specification: %s", addr)
}
return lo, hi, nil
}
// addrRegexp searches for pattern start and pattern end
func addrRegexp(data []byte, start, end string) (int, int, error) {
start = "(?m:" + start + ")" // match through newlines
reStart, err := regexp.Compile(start)
if err != nil {
return 0, 0, err
}
end = "(?m:" + end + ")"
reEnd, err := regexp.Compile(end)
if err != nil {
return 0, 0, err
}
m := reStart.FindIndex(data)
if len(m) == 0 {
return 0, 0, errors.New("no match for " + start)
}
lo := m[0]
m = reEnd.FindIndex(data[lo:]) // start *from* lo
if len(m) == 0 {
return 0, 0, errors.New("no match for " + end)
}
hi := m[0]
return lo, hi, nil
}
func SkipUntilChar(data []byte, i int, c byte) int {
n := len(data)
for i < n && data[i] != c {
i++
}
return i
}
func addPrefix(data, prefix []byte) []byte {
b := &bytes.Buffer{}
b.Write(prefix)
// assured that data ends in newline
i := 0
for i < len(data)-1 {
b.WriteByte(data[i])
if data[i] == '\n' {
b.Write(prefix)
}
i++
}
return b.Bytes()
}

111
vendor/github.com/mmarkdown/mmark/mparser/index.go generated vendored Normal file
View file

@ -0,0 +1,111 @@
package mparser
import (
"bytes"
"fmt"
"sort"
"github.com/gomarkdown/markdown/ast"
"github.com/mmarkdown/mmark/mast"
)
// IndexToDocumentIndex crawls the entire doc searching for indices, it will then return
// an mast.DocumentIndex that contains a tree:
//
// IndexLetter
// - IndexItem
// - IndexLink
// - IndexSubItem
// - IndexLink
// - IndexLink
//
// Which can then be rendered by the renderer.
func IndexToDocumentIndex(doc ast.Node) *mast.DocumentIndex {
main := map[string]*mast.IndexItem{}
subitem := map[string][]*mast.IndexSubItem{} // gather these so we can add them in one swoop at the end
// Gather all indexes.
ast.WalkFunc(doc, func(node ast.Node, entering bool) ast.WalkStatus {
switch i := node.(type) {
case *ast.Index:
item := string(i.Item)
if _, ok := main[item]; !ok {
main[item] = &mast.IndexItem{Index: i}
}
// only the main item
if i.Subitem == nil {
ast.AppendChild(main[item], newLink(i.ID, len(main[item].GetChildren()), i.Primary))
return ast.GoToNext
}
// check if we already have a child with the subitem and then just add the link
for _, sub := range subitem[item] {
if bytes.Compare(sub.Subitem, i.Subitem) == 0 {
ast.AppendChild(sub, newLink(i.ID, len(sub.GetChildren()), i.Primary))
return ast.GoToNext
}
}
sub := &mast.IndexSubItem{Index: i}
ast.AppendChild(sub, newLink(i.ID, len(subitem[item]), i.Primary))
subitem[item] = append(subitem[item], sub)
}
return ast.GoToNext
})
if len(main) == 0 {
return nil
}
// Now add a subitem children to the correct main item.
for k, sub := range subitem {
// sort sub here ideally
for j := range sub {
ast.AppendChild(main[k], sub[j])
}
}
keys := []string{}
for k := range main {
keys = append(keys, k)
}
sort.Strings(keys)
letters := []*mast.IndexLetter{}
var prevLetter byte
var il *mast.IndexLetter
for _, k := range keys {
letter := k[0]
if letter != prevLetter {
il = &mast.IndexLetter{}
il.Literal = []byte{letter}
letters = append(letters, il)
}
ast.AppendChild(il, main[k])
prevLetter = letter
}
docIndex := &mast.DocumentIndex{}
for i := range letters {
ast.AppendChild(docIndex, letters[i])
}
return docIndex
}
func newLink(id string, number int, primary bool) *mast.IndexLink {
link := &ast.Link{Destination: []byte(id)}
il := &mast.IndexLink{Link: link, Primary: primary}
il.Literal = []byte(fmt.Sprintf("%d", number))
return il
}
// AddIndex adds an index to the end of the current document. If not indices can be found
// this returns false and no index will be added.
func AddIndex(doc ast.Node) bool {
idx := IndexToDocumentIndex(doc)
if idx == nil {
return false
}
ast.AppendChild(doc, idx)
return true
}

57
vendor/github.com/mmarkdown/mmark/mparser/title.go generated vendored Normal file
View file

@ -0,0 +1,57 @@
package mparser
import (
"log"
"github.com/BurntSushi/toml"
"github.com/gomarkdown/markdown/ast"
"github.com/mmarkdown/mmark/mast"
)
// TitleHook will parse a title and returns it. The start and ending can
// be signalled with %%% or --- (the later to more inline with Hugo and other markdown dialects.
func TitleHook(data []byte) (ast.Node, []byte, int) {
i := 0
if len(data) < 3 {
return nil, nil, 0
}
c := data[i] // first char can either be % or -
if c != '%' && c != '-' {
return nil, nil, 0
}
if data[i] != c || data[i+1] != c || data[i+2] != c {
return nil, nil, 0
}
i += 3
beg := i
found := false
// search for end.
for i < len(data) {
if data[i] == c && data[i+1] == c && data[i+2] == c {
found = true
break
}
i++
}
if !found {
return nil, nil, 0
}
node := mast.NewTitle(c)
buf := data[beg:i]
if c == '-' {
node.Content = buf
return node, nil, i + 3
}
if _, err := toml.Decode(string(buf), node.TitleData); err != nil {
log.Printf("Failure parsing title block: %s", err)
}
node.Content = buf
return node, nil, i + 3
}

1
vendor/github.com/tallclair/mdtoc/.gitignore generated vendored Normal file
View file

@ -0,0 +1 @@
mdtoc

28
vendor/github.com/tallclair/mdtoc/CONTRIBUTING.md generated vendored Normal file
View file

@ -0,0 +1,28 @@
# How to Contribute
We'd love to accept your patches and contributions to this project. There are
just a few small guidelines you need to follow.
## Contributor License Agreement
Contributions to this project must be accompanied by a Contributor License
Agreement. You (or your employer) retain the copyright to your contribution;
this simply gives us permission to use and redistribute your contributions as
part of the project. Head over to <https://cla.developers.google.com/> to see
your current agreements on file or to sign a new one.
You generally only need to submit a CLA once, so if you've already submitted one
(even if it was for a different project), you probably don't need to do it
again.
## Code reviews
All submissions, including submissions by project members, require review. We
use GitHub pull requests for this purpose. Consult
[GitHub Help](https://help.github.com/articles/about-pull-requests/) for more
information on using pull requests.
## Community Guidelines
This project follows
[Google's Open Source Community Guidelines](https://opensource.google.com/conduct/).

202
vendor/github.com/tallclair/mdtoc/LICENSE generated vendored Normal file
View file

@ -0,0 +1,202 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright [yyyy] [name of copyright owner]
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

53
vendor/github.com/tallclair/mdtoc/README.md generated vendored Normal file
View file

@ -0,0 +1,53 @@
# Markdown Table of Contents Generator
`mdtoc` is a utility for generating a table-of-contents for markdown files.
Only github-flavored markdown is currently supported, but I am open to accepting patches to add
other formats.
# Table of Contents
Generated with `mdtoc --inplace README.md`
<!-- toc -->
- [Usage](#usage)
- [Installation](#installation)
<!-- /toc -->
## Usage
Usage: `mdtoc [OPTIONS] [FILE]...`
Generate a table of contents for a markdown file (github flavor).
TOC may be wrapped in a pair of tags to allow in-place updates:
```
<!-- toc -->
generated TOC goes here
<!-- /toc -->
```
TOC indentation is normalized, so the shallowest header has indentation 0.
**Options:**
`--dryrun` - Whether to check for changes to TOC, rather than overwriting.
Requires `--inplace` flag. Exit code 1 if there are changes.
`--inplace` - Whether to edit the file in-place, or output to STDOUT. Requires
toc tags to be present.
`--skip-prefix` - Whether to ignore any headers before the opening toc
tag. (default true)
For example, with `--skip-prefix=false` the TOC for this file becomes:
```
- [Markdown Table of Contents Generator](#markdown-table-of-contents-generator)
- [Table of Contents](#table-of-contents)
- [Usage](#usage)
- [Installation](#installation)
```
## Installation
``` go get github.com/tallclair/mdtoc ```

11
vendor/github.com/tallclair/mdtoc/go.mod generated vendored Normal file
View file

@ -0,0 +1,11 @@
module github.com/tallclair/mdtoc
go 1.12
require (
github.com/BurntSushi/toml v0.3.1 // indirect
github.com/ekalinin/github-markdown-toc v0.0.0-20190514155158-83fadb60a7f1 // indirect
github.com/gomarkdown/markdown v0.0.0-20190222000725-ee6a7931a1e4
github.com/mmarkdown/mmark v2.0.40+incompatible
github.com/stretchr/testify v1.3.0
)

15
vendor/github.com/tallclair/mdtoc/go.sum generated vendored Normal file
View file

@ -0,0 +1,15 @@
github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ=
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
github.com/davecgh/go-spew v1.1.0 h1:ZDRjVQ15GmhC3fiQ8ni8+OwkZQO4DARzQgrnXU1Liz8=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/ekalinin/github-markdown-toc v0.0.0-20190514155158-83fadb60a7f1 h1:E42haoFVyge+y3G2q/lwksiZPu6bMmim1xLxjOjPeEw=
github.com/ekalinin/github-markdown-toc v0.0.0-20190514155158-83fadb60a7f1/go.mod h1:XfZS1iyC28CnllR54Ou2Ero6qs4Rmn7GpVumNSj1DZo=
github.com/gomarkdown/markdown v0.0.0-20190222000725-ee6a7931a1e4 h1:vELsocEzlhM4lk2nhxolEaQrMp25u7/i9IX8s9uLads=
github.com/gomarkdown/markdown v0.0.0-20190222000725-ee6a7931a1e4/go.mod h1:gmFANS06wAVmF0B9yi65QKsRmPQ97tze7FRLswua+OY=
github.com/mmarkdown/mmark v2.0.40+incompatible h1:vMeUeDzBK3H+/mU0oMVfMuhSXJlIA+DE/DMPQNAj5C4=
github.com/mmarkdown/mmark v2.0.40+incompatible/go.mod h1:Uvmoz7tvsWpr7bMVxIpqZPyN3FbOtzDmnsJDFp7ltJs=
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/testify v1.3.0 h1:TivCn/peBQ7UY8ooIcPgZFpTNSz0Q2U6UrFlUfqbe0Q=
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=

301
vendor/github.com/tallclair/mdtoc/mdtoc.go generated vendored Normal file
View file

@ -0,0 +1,301 @@
/*
Copyright 2019 Google LLC
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package main
import (
"bytes"
"flag"
"fmt"
"io/ioutil"
"log"
"math"
"os"
"regexp"
"strings"
"github.com/gomarkdown/markdown/ast"
"github.com/gomarkdown/markdown/html"
"github.com/gomarkdown/markdown/parser"
"github.com/mmarkdown/mmark/mparser"
)
const (
startTOC = "<!-- toc -->"
endTOC = "<!-- /toc -->"
)
type options struct {
dryrun bool
inplace bool
skipPrefix bool
}
var defaultOptions options
func init() {
flag.BoolVar(&defaultOptions.dryrun, "dryrun", false, "Whether to check for changes to TOC, rather than overwriting. Requires --inplace flag.")
flag.BoolVar(&defaultOptions.inplace, "inplace", false, "Whether to edit the file in-place, or output to STDOUT. Requires toc tags to be present.")
flag.BoolVar(&defaultOptions.skipPrefix, "skip-prefix", true, "Whether to ignore any headers before the opening toc tag.")
flag.Usage = func() {
fmt.Fprintf(flag.CommandLine.Output(), "Usage: %s [OPTIONS] [FILE]...\n", os.Args[0])
fmt.Fprintf(flag.CommandLine.Output(), "Generate a table of contents for a markdown file (github flavor).\n")
fmt.Fprintf(flag.CommandLine.Output(), "TOC may be wrapped in a pair of tags to allow in-place updates: <!-- toc --><!-- /toc -->\n")
flag.PrintDefaults()
}
}
func main() {
flag.Parse()
if err := validateArgs(defaultOptions, flag.Args()); err != nil {
fmt.Fprintf(os.Stderr, "Error: %v\n", err)
flag.Usage()
os.Exit(1)
}
hadError := false
for _, file := range flag.Args() {
toc, err := run(file, defaultOptions)
if err != nil {
log.Printf("%s: %v", file, err)
hadError = true
} else if !defaultOptions.inplace {
fmt.Println(toc)
}
}
if hadError {
os.Exit(1)
}
}
func validateArgs(opts options, args []string) error {
if len(args) < 1 {
return fmt.Errorf("must specify at least 1 file")
}
if !opts.inplace && len(args) > 1 {
return fmt.Errorf("non-inplace updates require exactly 1 file")
}
if opts.dryrun && !opts.inplace {
return fmt.Errorf("--dryrun requires --inplace")
}
return nil
}
// run the TOC generator on file with options.
// Returns the generated toc, and any error.
func run(file string, opts options) (string, error) {
raw, err := ioutil.ReadFile(file)
if err != nil {
return "", fmt.Errorf("unable to read %s: %v", file, err)
}
start := bytes.Index(raw, []byte(startTOC))
end := bytes.Index(raw, []byte(endTOC))
if tocTagRequired(opts) {
if start == -1 {
return "", fmt.Errorf("missing opening TOC tag")
}
if end == -1 {
return "", fmt.Errorf("missing closing TOC tag")
}
if end < start {
return "", fmt.Errorf("TOC closing tag before start tag")
}
}
var prefix, doc []byte
// skipPrefix is only used when toc tags are present.
if opts.skipPrefix && start != -1 && end != -1 {
prefix = raw[:start]
doc = raw[end:]
} else {
doc = raw
}
toc, err := generateTOC(prefix, doc)
if err != nil {
return toc, fmt.Errorf("failed to generate toc: %v", err)
}
if !opts.inplace {
return toc, err
}
realStart := start + len(startTOC)
oldTOC := string(raw[realStart:end])
if strings.TrimSpace(oldTOC) == strings.TrimSpace(toc) {
// No changes required.
return toc, nil
} else if opts.dryrun {
return toc, fmt.Errorf("changes found:\n%s", toc)
}
err = atomicWrite(file,
string(raw[:start]),
startTOC+"\n",
string(toc),
string(raw[end:]),
)
return toc, err
}
// atomicWrite writes the chunks sequentially to the filePath.
// A temporary file is used so no changes are made to the original in the case of an error.
func atomicWrite(filePath string, chunks ...string) error {
tmpPath := filePath + "_tmp"
tmp, err := os.OpenFile(tmpPath, os.O_WRONLY|os.O_CREATE|os.O_EXCL, 0600)
if err != nil {
return fmt.Errorf("unable to open tepmorary file %s: %v", tmpPath, err)
}
// Cleanup
defer func() {
tmp.Close()
os.Remove(tmpPath)
}()
for _, chunk := range chunks {
if _, err := tmp.WriteString(chunk); err != nil {
return err
}
}
if err := tmp.Close(); err != nil {
return err
}
return os.Rename(tmp.Name(), filePath)
}
// parse parses a raw markdown document to an AST.
func parse(b []byte) ast.Node {
p := parser.NewWithExtensions(parser.CommonExtensions)
p.Opts = parser.Options{
// mparser is required for parsing the --- title blocks
ParserHook: mparser.Hook,
}
return p.Parse(b)
}
func generateTOC(prefix []byte, doc []byte) (string, error) {
prefixMd := parse(prefix)
anchors := make(anchorGen)
// Start counting anchors from the beginning of the doc.
walkHeadings(prefixMd, func(heading *ast.Heading) {
anchors.mkAnchor(asText(heading))
})
md := parse(doc)
baseLvl := headingBase(md)
toc := &bytes.Buffer{}
htmlRenderer := html.NewRenderer(html.RendererOptions{})
walkHeadings(md, func(heading *ast.Heading) {
anchor := anchors.mkAnchor(asText(heading))
content := headingBody(htmlRenderer, heading)
fmt.Fprintf(toc, "%s- [%s](#%s)\n", strings.Repeat(" ", heading.Level-baseLvl), content, anchor)
})
return string(toc.Bytes()), nil
}
func tocTagRequired(opts options) bool {
return opts.inplace
}
type headingFn func(heading *ast.Heading)
// walkHeadings runs the heading function on each heading in the parsed markdown document.
func walkHeadings(doc ast.Node, headingFn headingFn) error {
var err error
ast.WalkFunc(doc, func(node ast.Node, entering bool) ast.WalkStatus {
if !entering {
return ast.GoToNext // Don't care about closing the heading section.
}
heading, ok := node.(*ast.Heading)
if !ok {
return ast.GoToNext // Ignore non-heading nodes.
}
if heading.IsTitleblock {
return ast.GoToNext // Ignore title blocks (the --- section)
}
headingFn(heading)
return ast.GoToNext
})
return err
}
func asText(node ast.Node) string {
var text string
ast.WalkFunc(node, func(node ast.Node, entering bool) ast.WalkStatus {
if !entering {
return ast.GoToNext // Don't care about closing the heading section.
}
t, ok := node.(*ast.Text)
if !ok {
return ast.GoToNext // Ignore non-text nodes.
}
text += string(t.AsLeaf().Literal)
return ast.GoToNext
})
return text
}
// Renders the heading body as HTML
func headingBody(renderer *html.Renderer, heading *ast.Heading) string {
var buf bytes.Buffer
for _, child := range heading.Children {
ast.WalkFunc(child, func(node ast.Node, entering bool) ast.WalkStatus {
return renderer.RenderNode(&buf, node, entering)
})
}
return strings.TrimSpace(buf.String())
}
// headingBase finds the minimum heading level. This is useful for normalizing indentation, such as
// when a top-level heading is skipped in the prefix.
func headingBase(doc ast.Node) int {
baseLvl := math.MaxInt32
walkHeadings(doc, func(heading *ast.Heading) {
if baseLvl > heading.Level {
baseLvl = heading.Level
}
})
return baseLvl
}
// Match punctuation that is filtered out from anchor IDs.
var punctuation = regexp.MustCompile(`[^\w\- ]`)
// anchorGen is used to generate heading anchor IDs, using the github-flavored markdown syntax.
type anchorGen map[string]int
func (a anchorGen) mkAnchor(text string) string {
text = strings.ToLower(text)
text = punctuation.ReplaceAllString(text, "")
text = strings.ReplaceAll(text, " ", "-")
idx := a[text]
a[text] = idx + 1
if idx > 0 {
return fmt.Sprintf("%s-%d", text, idx)
}
return text
}

12
vendor/modules.txt vendored
View file

@ -7,6 +7,8 @@ github.com/Azure/go-autorest/autorest/azure
github.com/Azure/go-autorest/logger
github.com/Azure/go-autorest/version
github.com/Azure/go-autorest/autorest/date
# github.com/BurntSushi/toml v0.3.1
github.com/BurntSushi/toml
# github.com/PuerkitoBio/purell v1.1.1
github.com/PuerkitoBio/purell
# github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578
@ -66,6 +68,10 @@ github.com/golang/protobuf/ptypes
github.com/golang/protobuf/ptypes/any
github.com/golang/protobuf/ptypes/duration
github.com/golang/protobuf/ptypes/timestamp
# github.com/gomarkdown/markdown v0.0.0-20190222000725-ee6a7931a1e4
github.com/gomarkdown/markdown/ast
github.com/gomarkdown/markdown/html
github.com/gomarkdown/markdown/parser
# github.com/google/btree v0.0.0-20160524151835-7d79101e329e
github.com/google/btree
# github.com/google/go-cmp v0.3.0
@ -125,6 +131,10 @@ github.com/mitchellh/go-ps
github.com/mitchellh/hashstructure
# github.com/mitchellh/mapstructure v1.1.2
github.com/mitchellh/mapstructure
# github.com/mmarkdown/mmark v2.0.40+incompatible
github.com/mmarkdown/mmark/mparser
github.com/mmarkdown/mmark/mast
github.com/mmarkdown/mmark/mast/reference
# github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd
github.com/modern-go/concurrent
# github.com/modern-go/reflect2 v1.0.1
@ -207,6 +217,8 @@ github.com/spf13/afero/mem
github.com/spf13/cobra
# github.com/spf13/pflag v1.0.3
github.com/spf13/pflag
# github.com/tallclair/mdtoc v0.0.0-20190627191617-4dc3d6f90813
github.com/tallclair/mdtoc
# github.com/tv42/httpunix v0.0.0-20150427012821-b75d8614f926
github.com/tv42/httpunix
# github.com/zakjan/cert-chain-resolver v0.0.0-20180703112424-6076e1ded272