mirror of
https://gitea.com/gitea/tea.git
synced 2025-09-07 20:32:55 +02:00
Use glamour and termev to render/colorize content (#181)
Merge branch 'master' into use-glamour select Glamour Theme based on BackgroundColor Merge branch 'master' into use-glamour Merge branch 'master' into use-glamour update termev update go.mod label color colorate use glamour for issue content Vendor: Add glamour Co-authored-by: Lunny Xiao <xiaolunwen@gmail.com> Co-authored-by: 6543 <6543@obermui.de> Reviewed-on: https://gitea.com/gitea/tea/pulls/181 Reviewed-by: techknowlogick <techknowlogick@gitea.io> Reviewed-by: Lunny Xiao <xiaolunwen@gmail.com>
This commit is contained in:
19
vendor/github.com/yuin/goldmark/.gitignore
generated
vendored
Normal file
19
vendor/github.com/yuin/goldmark/.gitignore
generated
vendored
Normal file
@ -0,0 +1,19 @@
|
||||
# Binaries for programs and plugins
|
||||
*.exe
|
||||
*.exe~
|
||||
*.dll
|
||||
*.so
|
||||
*.dylib
|
||||
|
||||
# Test binary, build with `go test -c`
|
||||
*.test
|
||||
*.pprof
|
||||
|
||||
# Output of the go coverage tool, specifically when used with LiteIDE
|
||||
*.out
|
||||
|
||||
.DS_Store
|
||||
fuzz/corpus
|
||||
fuzz/crashers
|
||||
fuzz/suppressions
|
||||
fuzz/fuzz-fuzz.zip
|
21
vendor/github.com/yuin/goldmark/LICENSE
generated
vendored
Normal file
21
vendor/github.com/yuin/goldmark/LICENSE
generated
vendored
Normal file
@ -0,0 +1,21 @@
|
||||
MIT License
|
||||
|
||||
Copyright (c) 2019 Yusuke Inuzuka
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
SOFTWARE.
|
16
vendor/github.com/yuin/goldmark/Makefile
generated
vendored
Normal file
16
vendor/github.com/yuin/goldmark/Makefile
generated
vendored
Normal file
@ -0,0 +1,16 @@
|
||||
.PHONY: test fuzz
|
||||
|
||||
test:
|
||||
go test -coverprofile=profile.out -coverpkg=github.com/yuin/goldmark,github.com/yuin/goldmark/ast,github.com/yuin/goldmark/extension,github.com/yuin/goldmark/extension/ast,github.com/yuin/goldmark/parser,github.com/yuin/goldmark/renderer,github.com/yuin/goldmark/renderer/html,github.com/yuin/goldmark/text,github.com/yuin/goldmark/util ./...
|
||||
|
||||
cov: test
|
||||
go tool cover -html=profile.out
|
||||
|
||||
fuzz:
|
||||
which go-fuzz > /dev/null 2>&1 || (GO111MODULE=off go get -u github.com/dvyukov/go-fuzz/go-fuzz github.com/dvyukov/go-fuzz/go-fuzz-build; GO111MODULE=off go get -d github.com/dvyukov/go-fuzz-corpus; true)
|
||||
rm -rf ./fuzz/corpus
|
||||
rm -rf ./fuzz/crashers
|
||||
rm -rf ./fuzz/suppressions
|
||||
rm -f ./fuzz/fuzz-fuzz.zip
|
||||
cd ./fuzz && go-fuzz-build
|
||||
cd ./fuzz && go-fuzz
|
403
vendor/github.com/yuin/goldmark/README.md
generated
vendored
Normal file
403
vendor/github.com/yuin/goldmark/README.md
generated
vendored
Normal file
@ -0,0 +1,403 @@
|
||||
goldmark
|
||||
==========================================
|
||||
|
||||
[](http://godoc.org/github.com/yuin/goldmark)
|
||||
[](https://github.com/yuin/goldmark/actions?query=workflow:test)
|
||||
[](https://coveralls.io/github/yuin/goldmark)
|
||||
[](https://goreportcard.com/report/github.com/yuin/goldmark)
|
||||
|
||||
> A Markdown parser written in Go. Easy to extend, standards-compliant, well-structured.
|
||||
|
||||
goldmark is compliant with CommonMark 0.29.
|
||||
|
||||
Motivation
|
||||
----------------------
|
||||
I needed a Markdown parser for Go that satisfies the following requirements:
|
||||
|
||||
- Easy to extend.
|
||||
- Markdown is poor in document expressions compared to other light markup languages such as reStructuredText.
|
||||
- We have extensions to the Markdown syntax, e.g. PHP Markdown Extra, GitHub Flavored Markdown.
|
||||
- Standards-compliant.
|
||||
- Markdown has many dialects.
|
||||
- GitHub-Flavored Markdown is widely used and is based upon CommonMark, effectively mooting the question of whether or not CommonMark is an ideal specification.
|
||||
- CommonMark is complicated and hard to implement.
|
||||
- Well-structured.
|
||||
- AST-based; preserves source position of nodes.
|
||||
- Written in pure Go.
|
||||
|
||||
[golang-commonmark](https://gitlab.com/golang-commonmark/markdown) may be a good choice, but it seems to be a copy of [markdown-it](https://github.com/markdown-it).
|
||||
|
||||
[blackfriday.v2](https://github.com/russross/blackfriday/tree/v2) is a fast and widely-used implementation, but is not CommonMark-compliant and cannot be extended from outside of the package, since its AST uses structs instead of interfaces.
|
||||
|
||||
Furthermore, its behavior differs from other implementations in some cases, especially regarding lists: [Deep nested lists don't output correctly #329](https://github.com/russross/blackfriday/issues/329), [List block cannot have a second line #244](https://github.com/russross/blackfriday/issues/244), etc.
|
||||
|
||||
This behavior sometimes causes problems. If you migrate your Markdown text from GitHub to blackfriday-based wikis, many lists will immediately be broken.
|
||||
|
||||
As mentioned above, CommonMark is complicated and hard to implement, so Markdown parsers based on CommonMark are few and far between.
|
||||
|
||||
Features
|
||||
----------------------
|
||||
|
||||
- **Standards-compliant.** goldmark is fully compliant with the latest [CommonMark](https://commonmark.org/) specification.
|
||||
- **Extensible.** Do you want to add a `@username` mention syntax to Markdown?
|
||||
You can easily do so in goldmark. You can add your AST nodes,
|
||||
parsers for block-level elements, parsers for inline-level elements,
|
||||
transformers for paragraphs, transformers for the whole AST structure, and
|
||||
renderers.
|
||||
- **Performance.** goldmark's performance is on par with that of cmark,
|
||||
the CommonMark reference implementation written in C.
|
||||
- **Robust.** goldmark is tested with [go-fuzz](https://github.com/dvyukov/go-fuzz), a fuzz testing tool.
|
||||
- **Built-in extensions.** goldmark ships with common extensions like tables, strikethrough,
|
||||
task lists, and definition lists.
|
||||
- **Depends only on standard libraries.**
|
||||
|
||||
Installation
|
||||
----------------------
|
||||
```bash
|
||||
$ go get github.com/yuin/goldmark
|
||||
```
|
||||
|
||||
|
||||
Usage
|
||||
----------------------
|
||||
Import packages:
|
||||
|
||||
```go
|
||||
import (
|
||||
"bytes"
|
||||
"github.com/yuin/goldmark"
|
||||
)
|
||||
```
|
||||
|
||||
|
||||
Convert Markdown documents with the CommonMark-compliant mode:
|
||||
|
||||
```go
|
||||
var buf bytes.Buffer
|
||||
if err := goldmark.Convert(source, &buf); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
```
|
||||
|
||||
With options
|
||||
------------------------------
|
||||
|
||||
```go
|
||||
var buf bytes.Buffer
|
||||
if err := goldmark.Convert(source, &buf, parser.WithContext(ctx)); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
```
|
||||
|
||||
| Functional option | Type | Description |
|
||||
| ----------------- | ---- | ----------- |
|
||||
| `parser.WithContext` | A `parser.Context` | Context for the parsing phase. |
|
||||
|
||||
Context options
|
||||
----------------------
|
||||
|
||||
| Functional option | Type | Description |
|
||||
| ----------------- | ---- | ----------- |
|
||||
| `parser.WithIDs` | A `parser.IDs` | `IDs` allows you to change logics that are related to element id(ex: Auto heading id generation). |
|
||||
|
||||
|
||||
Custom parser and renderer
|
||||
--------------------------
|
||||
```go
|
||||
import (
|
||||
"bytes"
|
||||
"github.com/yuin/goldmark"
|
||||
"github.com/yuin/goldmark/extension"
|
||||
"github.com/yuin/goldmark/parser"
|
||||
"github.com/yuin/goldmark/renderer/html"
|
||||
)
|
||||
|
||||
md := goldmark.New(
|
||||
goldmark.WithExtensions(extension.GFM),
|
||||
goldmark.WithParserOptions(
|
||||
parser.WithAutoHeadingID(),
|
||||
),
|
||||
goldmark.WithRendererOptions(
|
||||
html.WithHardWraps(),
|
||||
html.WithXHTML(),
|
||||
),
|
||||
)
|
||||
var buf bytes.Buffer
|
||||
if err := md.Convert(source, &buf); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
```
|
||||
|
||||
| Functional option | Type | Description |
|
||||
| ----------------- | ---- | ----------- |
|
||||
| `goldmark.WithParser` | `parser.Parser` | This option must be passed before `goldmark.WithParserOptions` and `goldmark.WithExtensions` |
|
||||
| `goldmark.WithRenderer` | `renderer.Renderer` | This option must be passed before `goldmark.WithRendererOptions` and `goldmark.WithExtensions` |
|
||||
| `goldmark.WithParserOptions` | `...parser.Option` | |
|
||||
| `goldmark.WithRendererOptions` | `...renderer.Option` | |
|
||||
| `goldmark.WithExtensions` | `...goldmark.Extender` | |
|
||||
|
||||
Parser and Renderer options
|
||||
------------------------------
|
||||
|
||||
### Parser options
|
||||
|
||||
| Functional option | Type | Description |
|
||||
| ----------------- | ---- | ----------- |
|
||||
| `parser.WithBlockParsers` | A `util.PrioritizedSlice` whose elements are `parser.BlockParser` | Parsers for parsing block level elements. |
|
||||
| `parser.WithInlineParsers` | A `util.PrioritizedSlice` whose elements are `parser.InlineParser` | Parsers for parsing inline level elements. |
|
||||
| `parser.WithParagraphTransformers` | A `util.PrioritizedSlice` whose elements are `parser.ParagraphTransformer` | Transformers for transforming paragraph nodes. |
|
||||
| `parser.WithASTTransformers` | A `util.PrioritizedSlice` whose elements are `parser.ASTTransformer` | Transformers for transforming an AST. |
|
||||
| `parser.WithAutoHeadingID` | `-` | Enables auto heading ids. |
|
||||
| `parser.WithAttribute` | `-` | Enables custom attributes. Currently only headings supports attributes. |
|
||||
|
||||
### HTML Renderer options
|
||||
|
||||
| Functional option | Type | Description |
|
||||
| ----------------- | ---- | ----------- |
|
||||
| `html.WithWriter` | `html.Writer` | `html.Writer` for writing contents to an `io.Writer`. |
|
||||
| `html.WithHardWraps` | `-` | Render newlines as `<br>`.|
|
||||
| `html.WithXHTML` | `-` | Render as XHTML. |
|
||||
| `html.WithUnsafe` | `-` | By default, goldmark does not render raw HTML or potentially dangerous links. With this option, goldmark renders such content as written. |
|
||||
|
||||
### Built-in extensions
|
||||
|
||||
- `extension.Table`
|
||||
- [GitHub Flavored Markdown: Tables](https://github.github.com/gfm/#tables-extension-)
|
||||
- `extension.Strikethrough`
|
||||
- [GitHub Flavored Markdown: Strikethrough](https://github.github.com/gfm/#strikethrough-extension-)
|
||||
- `extension.Linkify`
|
||||
- [GitHub Flavored Markdown: Autolinks](https://github.github.com/gfm/#autolinks-extension-)
|
||||
- `extension.TaskList`
|
||||
- [GitHub Flavored Markdown: Task list items](https://github.github.com/gfm/#task-list-items-extension-)
|
||||
- `extension.GFM`
|
||||
- This extension enables Table, Strikethrough, Linkify and TaskList.
|
||||
- This extension does not filter tags defined in [6.11: Disallowed Raw HTML (extension)](https://github.github.com/gfm/#disallowed-raw-html-extension-).
|
||||
If you need to filter HTML tags, see [Security](#security).
|
||||
- `extension.DefinitionList`
|
||||
- [PHP Markdown Extra: Definition lists](https://michelf.ca/projects/php-markdown/extra/#def-list)
|
||||
- `extension.Footnote`
|
||||
- [PHP Markdown Extra: Footnotes](https://michelf.ca/projects/php-markdown/extra/#footnotes)
|
||||
- `extension.Typographer`
|
||||
- This extension substitutes punctuations with typographic entities like [smartypants](https://daringfireball.net/projects/smartypants/).
|
||||
|
||||
### Attributes
|
||||
The `parser.WithAttribute` option allows you to define attributes on some elements.
|
||||
|
||||
Currently only headings support attributes.
|
||||
|
||||
**Attributes are being discussed in the
|
||||
[CommonMark forum](https://talk.commonmark.org/t/consistent-attribute-syntax/272).
|
||||
This syntax may possibly change in the future.**
|
||||
|
||||
|
||||
#### Headings
|
||||
|
||||
```
|
||||
## heading ## {#id .className attrName=attrValue class="class1 class2"}
|
||||
|
||||
## heading {#id .className attrName=attrValue class="class1 class2"}
|
||||
```
|
||||
|
||||
```
|
||||
heading {#id .className attrName=attrValue}
|
||||
============
|
||||
```
|
||||
|
||||
### Table extension
|
||||
The Table extension implements [Table(extension)](https://github.github.com/gfm/#tables-extension-), as
|
||||
defined in [GitHub Flavored Markdown Spec](https://github.github.com/gfm/).
|
||||
|
||||
Specs are defined for XHTML, so specs use some deprecated attributes for HTML5.
|
||||
|
||||
You can override alignment rendering method via options.
|
||||
|
||||
| Functional option | Type | Description |
|
||||
| ----------------- | ---- | ----------- |
|
||||
| `extension.WithTableCellAlignMethod` | `extension.TableCellAlignMethod` | Option indicates how are table cells aligned. |
|
||||
|
||||
### Typographer extension
|
||||
|
||||
The Typographer extension translates plain ASCII punctuation characters into typographic-punctuation HTML entities.
|
||||
|
||||
Default substitutions are:
|
||||
|
||||
| Punctuation | Default entity |
|
||||
| ------------ | ---------- |
|
||||
| `'` | `‘`, `’` |
|
||||
| `"` | `“`, `”` |
|
||||
| `--` | `–` |
|
||||
| `---` | `—` |
|
||||
| `...` | `…` |
|
||||
| `<<` | `«` |
|
||||
| `>>` | `»` |
|
||||
|
||||
You can override the default substitutions via `extensions.WithTypographicSubstitutions`:
|
||||
|
||||
```go
|
||||
markdown := goldmark.New(
|
||||
goldmark.WithExtensions(
|
||||
extension.NewTypographer(
|
||||
extension.WithTypographicSubstitutions(extension.TypographicSubstitutions{
|
||||
extension.LeftSingleQuote: []byte("‚"),
|
||||
extension.RightSingleQuote: nil, // nil disables a substitution
|
||||
}),
|
||||
),
|
||||
),
|
||||
)
|
||||
```
|
||||
|
||||
### Linkify extension
|
||||
|
||||
The Linkify extension implements [Autolinks(extension)](https://github.github.com/gfm/#autolinks-extension-), as
|
||||
defined in [GitHub Flavored Markdown Spec](https://github.github.com/gfm/).
|
||||
|
||||
Since the spec does not define details about URLs, there are numerous ambiguous cases.
|
||||
|
||||
You can override autolinking patterns via options.
|
||||
|
||||
| Functional option | Type | Description |
|
||||
| ----------------- | ---- | ----------- |
|
||||
| `extension.WithLinkifyAllowedProtocols` | `[][]byte` | List of allowed protocols such as `[][]byte{ []byte("http:") }` |
|
||||
| `extension.WithLinkifyURLRegexp` | `*regexp.Regexp` | Regexp that defines URLs, including protocols |
|
||||
| `extension.WithLinkifyWWWRegexp` | `*regexp.Regexp` | Regexp that defines URL starting with `www.`. This pattern corresponds to [the extended www autolink](https://github.github.com/gfm/#extended-www-autolink) |
|
||||
| `extension.WithLinkifyEmailRegexp` | `*regexp.Regexp` | Regexp that defines email addresses` |
|
||||
|
||||
Example, using [xurls](https://github.com/mvdan/xurls):
|
||||
|
||||
```go
|
||||
import "mvdan.cc/xurls/v2"
|
||||
|
||||
markdown := goldmark.New(
|
||||
goldmark.WithRendererOptions(
|
||||
html.WithXHTML(),
|
||||
html.WithUnsafe(),
|
||||
),
|
||||
goldmark.WithExtensions(
|
||||
extension.NewLinkify(
|
||||
extension.WithLinkifyAllowedProtocols([][]byte{
|
||||
[]byte("http:"),
|
||||
[]byte("https:"),
|
||||
}),
|
||||
extension.WithLinkifyURLRegexp(
|
||||
xurls.Strict(),
|
||||
),
|
||||
),
|
||||
),
|
||||
)
|
||||
```
|
||||
|
||||
Security
|
||||
--------------------
|
||||
By default, goldmark does not render raw HTML or potentially-dangerous URLs.
|
||||
If you need to gain more control over untrusted contents, it is recommended that you
|
||||
use an HTML sanitizer such as [bluemonday](https://github.com/microcosm-cc/bluemonday).
|
||||
|
||||
Benchmark
|
||||
--------------------
|
||||
You can run this benchmark in the `_benchmark` directory.
|
||||
|
||||
### against other golang libraries
|
||||
|
||||
blackfriday v2 seems to be the fastest, but as it is not CommonMark compliant, its performance cannot be directly compared to that of the CommonMark-compliant libraries.
|
||||
|
||||
goldmark, meanwhile, builds a clean, extensible AST structure, achieves full compliance with
|
||||
CommonMark, and consumes less memory, all while being reasonably fast.
|
||||
|
||||
```
|
||||
goos: darwin
|
||||
goarch: amd64
|
||||
BenchmarkMarkdown/Blackfriday-v2-12 326 3465240 ns/op 3298861 B/op 20047 allocs/op
|
||||
BenchmarkMarkdown/GoldMark-12 303 3927494 ns/op 2574809 B/op 13853 allocs/op
|
||||
BenchmarkMarkdown/CommonMark-12 244 4900853 ns/op 2753851 B/op 20527 allocs/op
|
||||
BenchmarkMarkdown/Lute-12 130 9195245 ns/op 9175030 B/op 123534 allocs/op
|
||||
BenchmarkMarkdown/GoMarkdown-12 9 113541994 ns/op 2187472 B/op 22173 allocs/op
|
||||
```
|
||||
|
||||
### against cmark (CommonMark reference implementation written in C)
|
||||
|
||||
```
|
||||
----------- cmark -----------
|
||||
file: _data.md
|
||||
iteration: 50
|
||||
average: 0.0037760639 sec
|
||||
go run ./goldmark_benchmark.go
|
||||
------- goldmark -------
|
||||
file: _data.md
|
||||
iteration: 50
|
||||
average: 0.0040964230 sec
|
||||
```
|
||||
|
||||
As you can see, goldmark's performance is on par with cmark's.
|
||||
|
||||
Extensions
|
||||
--------------------
|
||||
|
||||
- [goldmark-meta](https://github.com/yuin/goldmark-meta): A YAML metadata
|
||||
extension for the goldmark Markdown parser.
|
||||
- [goldmark-highlighting](https://github.com/yuin/goldmark-highlighting): A syntax-highlighting extension
|
||||
for the goldmark markdown parser.
|
||||
- [goldmark-mathjax](https://github.com/litao91/goldmark-mathjax): Mathjax support for the goldmark markdown parser
|
||||
|
||||
goldmark internal(for extension developers)
|
||||
----------------------------------------------
|
||||
### Overview
|
||||
goldmark's Markdown processing is outlined in the diagram below.
|
||||
|
||||
```
|
||||
<Markdown in []byte, parser.Context>
|
||||
|
|
||||
V
|
||||
+-------- parser.Parser ---------------------------
|
||||
| 1. Parse block elements into AST
|
||||
| 1. If a parsed block is a paragraph, apply
|
||||
| ast.ParagraphTransformer
|
||||
| 2. Traverse AST and parse blocks.
|
||||
| 1. Process delimiters(emphasis) at the end of
|
||||
| block parsing
|
||||
| 3. Apply parser.ASTTransformers to AST
|
||||
|
|
||||
V
|
||||
<ast.Node>
|
||||
|
|
||||
V
|
||||
+------- renderer.Renderer ------------------------
|
||||
| 1. Traverse AST and apply renderer.NodeRenderer
|
||||
| corespond to the node type
|
||||
|
||||
|
|
||||
V
|
||||
<Output>
|
||||
```
|
||||
|
||||
### Parsing
|
||||
Markdown documents are read through `text.Reader` interface.
|
||||
|
||||
AST nodes do not have concrete text. AST nodes have segment information of the documents, represented by `text.Segment` .
|
||||
|
||||
`text.Segment` has 3 attributes: `Start`, `End`, `Padding` .
|
||||
|
||||
(TBC)
|
||||
|
||||
**TODO**
|
||||
|
||||
See `extension` directory for examples of extensions.
|
||||
|
||||
Summary:
|
||||
|
||||
1. Define AST Node as a struct in which `ast.BaseBlock` or `ast.BaseInline` is embedded.
|
||||
2. Write a parser that implements `parser.BlockParser` or `parser.InlineParser`.
|
||||
3. Write a renderer that implements `renderer.NodeRenderer`.
|
||||
4. Define your goldmark extension that implements `goldmark.Extender`.
|
||||
|
||||
|
||||
Donation
|
||||
--------------------
|
||||
BTC: 1NEDSyUmo4SMTDP83JJQSWi1MvQUGGNMZB
|
||||
|
||||
License
|
||||
--------------------
|
||||
MIT
|
||||
|
||||
Author
|
||||
--------------------
|
||||
Yusuke Inuzuka
|
492
vendor/github.com/yuin/goldmark/ast/ast.go
generated
vendored
Normal file
492
vendor/github.com/yuin/goldmark/ast/ast.go
generated
vendored
Normal file
@ -0,0 +1,492 @@
|
||||
// Package ast defines AST nodes that represent markdown elements.
|
||||
package ast
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
textm "github.com/yuin/goldmark/text"
|
||||
"github.com/yuin/goldmark/util"
|
||||
)
|
||||
|
||||
// A NodeType indicates what type a node belongs to.
|
||||
type NodeType int
|
||||
|
||||
const (
|
||||
// TypeBlock indicates that a node is kind of block nodes.
|
||||
TypeBlock NodeType = iota + 1
|
||||
// TypeInline indicates that a node is kind of inline nodes.
|
||||
TypeInline
|
||||
// TypeDocument indicates that a node is kind of document nodes.
|
||||
TypeDocument
|
||||
)
|
||||
|
||||
// NodeKind indicates more specific type than NodeType.
|
||||
type NodeKind int
|
||||
|
||||
func (k NodeKind) String() string {
|
||||
return kindNames[k]
|
||||
}
|
||||
|
||||
var kindMax NodeKind
|
||||
var kindNames = []string{""}
|
||||
|
||||
// NewNodeKind returns a new Kind value.
|
||||
func NewNodeKind(name string) NodeKind {
|
||||
kindMax++
|
||||
kindNames = append(kindNames, name)
|
||||
return kindMax
|
||||
}
|
||||
|
||||
// An Attribute is an attribute of the Node
|
||||
type Attribute struct {
|
||||
Name []byte
|
||||
Value interface{}
|
||||
}
|
||||
|
||||
var attrNameIDS = []byte("#")
|
||||
var attrNameID = []byte("id")
|
||||
var attrNameClassS = []byte(".")
|
||||
var attrNameClass = []byte("class")
|
||||
|
||||
// A Node interface defines basic AST node functionalities.
|
||||
type Node interface {
|
||||
// Type returns a type of this node.
|
||||
Type() NodeType
|
||||
|
||||
// Kind returns a kind of this node.
|
||||
Kind() NodeKind
|
||||
|
||||
// NextSibling returns a next sibling node of this node.
|
||||
NextSibling() Node
|
||||
|
||||
// PreviousSibling returns a previous sibling node of this node.
|
||||
PreviousSibling() Node
|
||||
|
||||
// Parent returns a parent node of this node.
|
||||
Parent() Node
|
||||
|
||||
// SetParent sets a parent node to this node.
|
||||
SetParent(Node)
|
||||
|
||||
// SetPreviousSibling sets a previous sibling node to this node.
|
||||
SetPreviousSibling(Node)
|
||||
|
||||
// SetNextSibling sets a next sibling node to this node.
|
||||
SetNextSibling(Node)
|
||||
|
||||
// HasChildren returns true if this node has any children, otherwise false.
|
||||
HasChildren() bool
|
||||
|
||||
// ChildCount returns a total number of children.
|
||||
ChildCount() int
|
||||
|
||||
// FirstChild returns a first child of this node.
|
||||
FirstChild() Node
|
||||
|
||||
// LastChild returns a last child of this node.
|
||||
LastChild() Node
|
||||
|
||||
// AppendChild append a node child to the tail of the children.
|
||||
AppendChild(self, child Node)
|
||||
|
||||
// RemoveChild removes a node child from this node.
|
||||
// If a node child is not children of this node, RemoveChild nothing to do.
|
||||
RemoveChild(self, child Node)
|
||||
|
||||
// RemoveChildren removes all children from this node.
|
||||
RemoveChildren(self Node)
|
||||
|
||||
// SortChildren sorts childrens by comparator.
|
||||
SortChildren(comparator func(n1, n2 Node) int)
|
||||
|
||||
// ReplaceChild replace a node v1 with a node insertee.
|
||||
// If v1 is not children of this node, ReplaceChild append a insetee to the
|
||||
// tail of the children.
|
||||
ReplaceChild(self, v1, insertee Node)
|
||||
|
||||
// InsertBefore inserts a node insertee before a node v1.
|
||||
// If v1 is not children of this node, InsertBefore append a insetee to the
|
||||
// tail of the children.
|
||||
InsertBefore(self, v1, insertee Node)
|
||||
|
||||
// InsertAfterinserts a node insertee after a node v1.
|
||||
// If v1 is not children of this node, InsertBefore append a insetee to the
|
||||
// tail of the children.
|
||||
InsertAfter(self, v1, insertee Node)
|
||||
|
||||
// Dump dumps an AST tree structure to stdout.
|
||||
// This function completely aimed for debugging.
|
||||
// level is a indent level. Implementer should indent informations with
|
||||
// 2 * level spaces.
|
||||
Dump(source []byte, level int)
|
||||
|
||||
// Text returns text values of this node.
|
||||
Text(source []byte) []byte
|
||||
|
||||
// HasBlankPreviousLines returns true if the row before this node is blank,
|
||||
// otherwise false.
|
||||
// This method is valid only for block nodes.
|
||||
HasBlankPreviousLines() bool
|
||||
|
||||
// SetBlankPreviousLines sets whether the row before this node is blank.
|
||||
// This method is valid only for block nodes.
|
||||
SetBlankPreviousLines(v bool)
|
||||
|
||||
// Lines returns text segments that hold positions in a source.
|
||||
// This method is valid only for block nodes.
|
||||
Lines() *textm.Segments
|
||||
|
||||
// SetLines sets text segments that hold positions in a source.
|
||||
// This method is valid only for block nodes.
|
||||
SetLines(*textm.Segments)
|
||||
|
||||
// IsRaw returns true if contents should be rendered as 'raw' contents.
|
||||
IsRaw() bool
|
||||
|
||||
// SetAttribute sets the given value to the attributes.
|
||||
SetAttribute(name []byte, value interface{})
|
||||
|
||||
// SetAttributeString sets the given value to the attributes.
|
||||
SetAttributeString(name string, value interface{})
|
||||
|
||||
// Attribute returns a (attribute value, true) if an attribute
|
||||
// associated with the given name is found, otherwise
|
||||
// (nil, false)
|
||||
Attribute(name []byte) (interface{}, bool)
|
||||
|
||||
// AttributeString returns a (attribute value, true) if an attribute
|
||||
// associated with the given name is found, otherwise
|
||||
// (nil, false)
|
||||
AttributeString(name string) (interface{}, bool)
|
||||
|
||||
// Attributes returns a list of attributes.
|
||||
// This may be a nil if there are no attributes.
|
||||
Attributes() []Attribute
|
||||
|
||||
// RemoveAttributes removes all attributes from this node.
|
||||
RemoveAttributes()
|
||||
}
|
||||
|
||||
// A BaseNode struct implements the Node interface.
|
||||
type BaseNode struct {
|
||||
firstChild Node
|
||||
lastChild Node
|
||||
parent Node
|
||||
next Node
|
||||
prev Node
|
||||
childCount int
|
||||
attributes []Attribute
|
||||
}
|
||||
|
||||
func ensureIsolated(v Node) {
|
||||
if p := v.Parent(); p != nil {
|
||||
p.RemoveChild(p, v)
|
||||
}
|
||||
}
|
||||
|
||||
// HasChildren implements Node.HasChildren .
|
||||
func (n *BaseNode) HasChildren() bool {
|
||||
return n.firstChild != nil
|
||||
}
|
||||
|
||||
// SetPreviousSibling implements Node.SetPreviousSibling .
|
||||
func (n *BaseNode) SetPreviousSibling(v Node) {
|
||||
n.prev = v
|
||||
}
|
||||
|
||||
// SetNextSibling implements Node.SetNextSibling .
|
||||
func (n *BaseNode) SetNextSibling(v Node) {
|
||||
n.next = v
|
||||
}
|
||||
|
||||
// PreviousSibling implements Node.PreviousSibling .
|
||||
func (n *BaseNode) PreviousSibling() Node {
|
||||
return n.prev
|
||||
}
|
||||
|
||||
// NextSibling implements Node.NextSibling .
|
||||
func (n *BaseNode) NextSibling() Node {
|
||||
return n.next
|
||||
}
|
||||
|
||||
// RemoveChild implements Node.RemoveChild .
|
||||
func (n *BaseNode) RemoveChild(self, v Node) {
|
||||
if v.Parent() != self {
|
||||
return
|
||||
}
|
||||
n.childCount--
|
||||
prev := v.PreviousSibling()
|
||||
next := v.NextSibling()
|
||||
if prev != nil {
|
||||
prev.SetNextSibling(next)
|
||||
} else {
|
||||
n.firstChild = next
|
||||
}
|
||||
if next != nil {
|
||||
next.SetPreviousSibling(prev)
|
||||
} else {
|
||||
n.lastChild = prev
|
||||
}
|
||||
v.SetParent(nil)
|
||||
v.SetPreviousSibling(nil)
|
||||
v.SetNextSibling(nil)
|
||||
}
|
||||
|
||||
// RemoveChildren implements Node.RemoveChildren .
|
||||
func (n *BaseNode) RemoveChildren(self Node) {
|
||||
for c := n.firstChild; c != nil; {
|
||||
c.SetParent(nil)
|
||||
c.SetPreviousSibling(nil)
|
||||
next := c.NextSibling()
|
||||
c.SetNextSibling(nil)
|
||||
c = next
|
||||
}
|
||||
n.firstChild = nil
|
||||
n.lastChild = nil
|
||||
n.childCount = 0
|
||||
}
|
||||
|
||||
// SortChildren implements Node.SortChildren
|
||||
func (n *BaseNode) SortChildren(comparator func(n1, n2 Node) int) {
|
||||
var sorted Node
|
||||
current := n.firstChild
|
||||
for current != nil {
|
||||
next := current.NextSibling()
|
||||
if sorted == nil || comparator(sorted, current) >= 0 {
|
||||
current.SetNextSibling(sorted)
|
||||
if sorted != nil {
|
||||
sorted.SetPreviousSibling(current)
|
||||
}
|
||||
sorted = current
|
||||
sorted.SetPreviousSibling(nil)
|
||||
} else {
|
||||
c := sorted
|
||||
for c.NextSibling() != nil && comparator(c.NextSibling(), current) < 0 {
|
||||
c = c.NextSibling()
|
||||
}
|
||||
current.SetNextSibling(c.NextSibling())
|
||||
current.SetPreviousSibling(c)
|
||||
if c.NextSibling() != nil {
|
||||
c.NextSibling().SetPreviousSibling(current)
|
||||
}
|
||||
c.SetNextSibling(current)
|
||||
}
|
||||
current = next
|
||||
}
|
||||
n.firstChild = sorted
|
||||
for c := n.firstChild; c != nil; c = c.NextSibling() {
|
||||
n.lastChild = c
|
||||
}
|
||||
}
|
||||
|
||||
// FirstChild implements Node.FirstChild .
|
||||
func (n *BaseNode) FirstChild() Node {
|
||||
return n.firstChild
|
||||
}
|
||||
|
||||
// LastChild implements Node.LastChild .
|
||||
func (n *BaseNode) LastChild() Node {
|
||||
return n.lastChild
|
||||
}
|
||||
|
||||
// ChildCount implements Node.ChildCount .
|
||||
func (n *BaseNode) ChildCount() int {
|
||||
return n.childCount
|
||||
}
|
||||
|
||||
// Parent implements Node.Parent .
|
||||
func (n *BaseNode) Parent() Node {
|
||||
return n.parent
|
||||
}
|
||||
|
||||
// SetParent implements Node.SetParent .
|
||||
func (n *BaseNode) SetParent(v Node) {
|
||||
n.parent = v
|
||||
}
|
||||
|
||||
// AppendChild implements Node.AppendChild .
|
||||
func (n *BaseNode) AppendChild(self, v Node) {
|
||||
ensureIsolated(v)
|
||||
if n.firstChild == nil {
|
||||
n.firstChild = v
|
||||
v.SetNextSibling(nil)
|
||||
v.SetPreviousSibling(nil)
|
||||
} else {
|
||||
last := n.lastChild
|
||||
last.SetNextSibling(v)
|
||||
v.SetPreviousSibling(last)
|
||||
}
|
||||
v.SetParent(self)
|
||||
n.lastChild = v
|
||||
n.childCount++
|
||||
}
|
||||
|
||||
// ReplaceChild implements Node.ReplaceChild .
|
||||
func (n *BaseNode) ReplaceChild(self, v1, insertee Node) {
|
||||
n.InsertBefore(self, v1, insertee)
|
||||
n.RemoveChild(self, v1)
|
||||
}
|
||||
|
||||
// InsertAfter implements Node.InsertAfter .
|
||||
func (n *BaseNode) InsertAfter(self, v1, insertee Node) {
|
||||
n.InsertBefore(self, v1.NextSibling(), insertee)
|
||||
}
|
||||
|
||||
// InsertBefore implements Node.InsertBefore .
|
||||
func (n *BaseNode) InsertBefore(self, v1, insertee Node) {
|
||||
n.childCount++
|
||||
if v1 == nil {
|
||||
n.AppendChild(self, insertee)
|
||||
return
|
||||
}
|
||||
ensureIsolated(insertee)
|
||||
if v1.Parent() == self {
|
||||
c := v1
|
||||
prev := c.PreviousSibling()
|
||||
if prev != nil {
|
||||
prev.SetNextSibling(insertee)
|
||||
insertee.SetPreviousSibling(prev)
|
||||
} else {
|
||||
n.firstChild = insertee
|
||||
insertee.SetPreviousSibling(nil)
|
||||
}
|
||||
insertee.SetNextSibling(c)
|
||||
c.SetPreviousSibling(insertee)
|
||||
insertee.SetParent(self)
|
||||
}
|
||||
}
|
||||
|
||||
// Text implements Node.Text .
|
||||
func (n *BaseNode) Text(source []byte) []byte {
|
||||
var buf bytes.Buffer
|
||||
for c := n.firstChild; c != nil; c = c.NextSibling() {
|
||||
buf.Write(c.Text(source))
|
||||
}
|
||||
return buf.Bytes()
|
||||
}
|
||||
|
||||
// SetAttribute implements Node.SetAttribute.
|
||||
func (n *BaseNode) SetAttribute(name []byte, value interface{}) {
|
||||
if n.attributes == nil {
|
||||
n.attributes = make([]Attribute, 0, 10)
|
||||
} else {
|
||||
for i, a := range n.attributes {
|
||||
if bytes.Equal(a.Name, name) {
|
||||
n.attributes[i].Name = name
|
||||
n.attributes[i].Value = value
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
n.attributes = append(n.attributes, Attribute{name, value})
|
||||
}
|
||||
|
||||
// SetAttributeString implements Node.SetAttributeString
|
||||
func (n *BaseNode) SetAttributeString(name string, value interface{}) {
|
||||
n.SetAttribute(util.StringToReadOnlyBytes(name), value)
|
||||
}
|
||||
|
||||
// Attribute implements Node.Attribute.
|
||||
func (n *BaseNode) Attribute(name []byte) (interface{}, bool) {
|
||||
if n.attributes == nil {
|
||||
return nil, false
|
||||
}
|
||||
for i, a := range n.attributes {
|
||||
if bytes.Equal(a.Name, name) {
|
||||
return n.attributes[i].Value, true
|
||||
}
|
||||
}
|
||||
return nil, false
|
||||
}
|
||||
|
||||
// AttributeString implements Node.AttributeString.
|
||||
func (n *BaseNode) AttributeString(s string) (interface{}, bool) {
|
||||
return n.Attribute(util.StringToReadOnlyBytes(s))
|
||||
}
|
||||
|
||||
// Attributes implements Node.Attributes
|
||||
func (n *BaseNode) Attributes() []Attribute {
|
||||
return n.attributes
|
||||
}
|
||||
|
||||
// RemoveAttributes implements Node.RemoveAttributes
|
||||
func (n *BaseNode) RemoveAttributes() {
|
||||
n.attributes = nil
|
||||
}
|
||||
|
||||
// DumpHelper is a helper function to implement Node.Dump.
|
||||
// kv is pairs of an attribute name and an attribute value.
|
||||
// cb is a function called after wrote a name and attributes.
|
||||
func DumpHelper(v Node, source []byte, level int, kv map[string]string, cb func(int)) {
|
||||
name := v.Kind().String()
|
||||
indent := strings.Repeat(" ", level)
|
||||
fmt.Printf("%s%s {\n", indent, name)
|
||||
indent2 := strings.Repeat(" ", level+1)
|
||||
if v.Type() == TypeBlock {
|
||||
fmt.Printf("%sRawText: \"", indent2)
|
||||
for i := 0; i < v.Lines().Len(); i++ {
|
||||
line := v.Lines().At(i)
|
||||
fmt.Printf("%s", line.Value(source))
|
||||
}
|
||||
fmt.Printf("\"\n")
|
||||
fmt.Printf("%sHasBlankPreviousLines: %v\n", indent2, v.HasBlankPreviousLines())
|
||||
}
|
||||
for name, value := range kv {
|
||||
fmt.Printf("%s%s: %s\n", indent2, name, value)
|
||||
}
|
||||
if cb != nil {
|
||||
cb(level + 1)
|
||||
}
|
||||
for c := v.FirstChild(); c != nil; c = c.NextSibling() {
|
||||
c.Dump(source, level+1)
|
||||
}
|
||||
fmt.Printf("%s}\n", indent)
|
||||
}
|
||||
|
||||
// WalkStatus represents a current status of the Walk function.
|
||||
type WalkStatus int
|
||||
|
||||
const (
|
||||
// WalkStop indicates no more walking needed.
|
||||
WalkStop WalkStatus = iota + 1
|
||||
|
||||
// WalkSkipChildren indicates that Walk wont walk on children of current
|
||||
// node.
|
||||
WalkSkipChildren
|
||||
|
||||
// WalkContinue indicates that Walk can continue to walk.
|
||||
WalkContinue
|
||||
)
|
||||
|
||||
// Walker is a function that will be called when Walk find a
|
||||
// new node.
|
||||
// entering is set true before walks children, false after walked children.
|
||||
// If Walker returns error, Walk function immediately stop walking.
|
||||
type Walker func(n Node, entering bool) (WalkStatus, error)
|
||||
|
||||
// Walk walks a AST tree by the depth first search algorithm.
|
||||
func Walk(n Node, walker Walker) error {
|
||||
_, err := walkHelper(n, walker)
|
||||
return err
|
||||
}
|
||||
|
||||
func walkHelper(n Node, walker Walker) (WalkStatus, error) {
|
||||
status, err := walker(n, true)
|
||||
if err != nil || status == WalkStop {
|
||||
return status, err
|
||||
}
|
||||
if status != WalkSkipChildren {
|
||||
for c := n.FirstChild(); c != nil; c = c.NextSibling() {
|
||||
if st, err := walkHelper(c, walker); err != nil || st == WalkStop {
|
||||
return WalkStop, err
|
||||
}
|
||||
}
|
||||
}
|
||||
status, err = walker(n, false)
|
||||
if err != nil || status == WalkStop {
|
||||
return WalkStop, err
|
||||
}
|
||||
return WalkContinue, nil
|
||||
}
|
474
vendor/github.com/yuin/goldmark/ast/block.go
generated
vendored
Normal file
474
vendor/github.com/yuin/goldmark/ast/block.go
generated
vendored
Normal file
@ -0,0 +1,474 @@
|
||||
package ast
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
textm "github.com/yuin/goldmark/text"
|
||||
)
|
||||
|
||||
// A BaseBlock struct implements the Node interface.
|
||||
type BaseBlock struct {
|
||||
BaseNode
|
||||
blankPreviousLines bool
|
||||
lines *textm.Segments
|
||||
}
|
||||
|
||||
// Type implements Node.Type
|
||||
func (b *BaseBlock) Type() NodeType {
|
||||
return TypeBlock
|
||||
}
|
||||
|
||||
// IsRaw implements Node.IsRaw
|
||||
func (b *BaseBlock) IsRaw() bool {
|
||||
return false
|
||||
}
|
||||
|
||||
// HasBlankPreviousLines implements Node.HasBlankPreviousLines.
|
||||
func (b *BaseBlock) HasBlankPreviousLines() bool {
|
||||
return b.blankPreviousLines
|
||||
}
|
||||
|
||||
// SetBlankPreviousLines implements Node.SetBlankPreviousLines.
|
||||
func (b *BaseBlock) SetBlankPreviousLines(v bool) {
|
||||
b.blankPreviousLines = v
|
||||
}
|
||||
|
||||
// Lines implements Node.Lines
|
||||
func (b *BaseBlock) Lines() *textm.Segments {
|
||||
if b.lines == nil {
|
||||
b.lines = textm.NewSegments()
|
||||
}
|
||||
return b.lines
|
||||
}
|
||||
|
||||
// SetLines implements Node.SetLines
|
||||
func (b *BaseBlock) SetLines(v *textm.Segments) {
|
||||
b.lines = v
|
||||
}
|
||||
|
||||
// A Document struct is a root node of Markdown text.
|
||||
type Document struct {
|
||||
BaseBlock
|
||||
}
|
||||
|
||||
// KindDocument is a NodeKind of the Document node.
|
||||
var KindDocument = NewNodeKind("Document")
|
||||
|
||||
// Dump implements Node.Dump .
|
||||
func (n *Document) Dump(source []byte, level int) {
|
||||
DumpHelper(n, source, level, nil, nil)
|
||||
}
|
||||
|
||||
// Type implements Node.Type .
|
||||
func (n *Document) Type() NodeType {
|
||||
return TypeDocument
|
||||
}
|
||||
|
||||
// Kind implements Node.Kind.
|
||||
func (n *Document) Kind() NodeKind {
|
||||
return KindDocument
|
||||
}
|
||||
|
||||
// NewDocument returns a new Document node.
|
||||
func NewDocument() *Document {
|
||||
return &Document{
|
||||
BaseBlock: BaseBlock{},
|
||||
}
|
||||
}
|
||||
|
||||
// A TextBlock struct is a node whose lines
|
||||
// should be rendered without any containers.
|
||||
type TextBlock struct {
|
||||
BaseBlock
|
||||
}
|
||||
|
||||
// Dump implements Node.Dump .
|
||||
func (n *TextBlock) Dump(source []byte, level int) {
|
||||
DumpHelper(n, source, level, nil, nil)
|
||||
}
|
||||
|
||||
// KindTextBlock is a NodeKind of the TextBlock node.
|
||||
var KindTextBlock = NewNodeKind("TextBlock")
|
||||
|
||||
// Kind implements Node.Kind.
|
||||
func (n *TextBlock) Kind() NodeKind {
|
||||
return KindTextBlock
|
||||
}
|
||||
|
||||
// NewTextBlock returns a new TextBlock node.
|
||||
func NewTextBlock() *TextBlock {
|
||||
return &TextBlock{
|
||||
BaseBlock: BaseBlock{},
|
||||
}
|
||||
}
|
||||
|
||||
// A Paragraph struct represents a paragraph of Markdown text.
|
||||
type Paragraph struct {
|
||||
BaseBlock
|
||||
}
|
||||
|
||||
// Dump implements Node.Dump .
|
||||
func (n *Paragraph) Dump(source []byte, level int) {
|
||||
DumpHelper(n, source, level, nil, nil)
|
||||
}
|
||||
|
||||
// KindParagraph is a NodeKind of the Paragraph node.
|
||||
var KindParagraph = NewNodeKind("Paragraph")
|
||||
|
||||
// Kind implements Node.Kind.
|
||||
func (n *Paragraph) Kind() NodeKind {
|
||||
return KindParagraph
|
||||
}
|
||||
|
||||
// NewParagraph returns a new Paragraph node.
|
||||
func NewParagraph() *Paragraph {
|
||||
return &Paragraph{
|
||||
BaseBlock: BaseBlock{},
|
||||
}
|
||||
}
|
||||
|
||||
// IsParagraph returns true if the given node implements the Paragraph interface,
|
||||
// otherwise false.
|
||||
func IsParagraph(node Node) bool {
|
||||
_, ok := node.(*Paragraph)
|
||||
return ok
|
||||
}
|
||||
|
||||
// A Heading struct represents headings like SetextHeading and ATXHeading.
|
||||
type Heading struct {
|
||||
BaseBlock
|
||||
// Level returns a level of this heading.
|
||||
// This value is between 1 and 6.
|
||||
Level int
|
||||
}
|
||||
|
||||
// Dump implements Node.Dump .
|
||||
func (n *Heading) Dump(source []byte, level int) {
|
||||
m := map[string]string{
|
||||
"Level": fmt.Sprintf("%d", n.Level),
|
||||
}
|
||||
DumpHelper(n, source, level, m, nil)
|
||||
}
|
||||
|
||||
// KindHeading is a NodeKind of the Heading node.
|
||||
var KindHeading = NewNodeKind("Heading")
|
||||
|
||||
// Kind implements Node.Kind.
|
||||
func (n *Heading) Kind() NodeKind {
|
||||
return KindHeading
|
||||
}
|
||||
|
||||
// NewHeading returns a new Heading node.
|
||||
func NewHeading(level int) *Heading {
|
||||
return &Heading{
|
||||
BaseBlock: BaseBlock{},
|
||||
Level: level,
|
||||
}
|
||||
}
|
||||
|
||||
// A ThematicBreak struct represents a thematic break of Markdown text.
|
||||
type ThematicBreak struct {
|
||||
BaseBlock
|
||||
}
|
||||
|
||||
// Dump implements Node.Dump .
|
||||
func (n *ThematicBreak) Dump(source []byte, level int) {
|
||||
DumpHelper(n, source, level, nil, nil)
|
||||
}
|
||||
|
||||
// KindThematicBreak is a NodeKind of the ThematicBreak node.
|
||||
var KindThematicBreak = NewNodeKind("ThematicBreak")
|
||||
|
||||
// Kind implements Node.Kind.
|
||||
func (n *ThematicBreak) Kind() NodeKind {
|
||||
return KindThematicBreak
|
||||
}
|
||||
|
||||
// NewThematicBreak returns a new ThematicBreak node.
|
||||
func NewThematicBreak() *ThematicBreak {
|
||||
return &ThematicBreak{
|
||||
BaseBlock: BaseBlock{},
|
||||
}
|
||||
}
|
||||
|
||||
// A CodeBlock interface represents an indented code block of Markdown text.
|
||||
type CodeBlock struct {
|
||||
BaseBlock
|
||||
}
|
||||
|
||||
// IsRaw implements Node.IsRaw.
|
||||
func (n *CodeBlock) IsRaw() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
// Dump implements Node.Dump .
|
||||
func (n *CodeBlock) Dump(source []byte, level int) {
|
||||
DumpHelper(n, source, level, nil, nil)
|
||||
}
|
||||
|
||||
// KindCodeBlock is a NodeKind of the CodeBlock node.
|
||||
var KindCodeBlock = NewNodeKind("CodeBlock")
|
||||
|
||||
// Kind implements Node.Kind.
|
||||
func (n *CodeBlock) Kind() NodeKind {
|
||||
return KindCodeBlock
|
||||
}
|
||||
|
||||
// NewCodeBlock returns a new CodeBlock node.
|
||||
func NewCodeBlock() *CodeBlock {
|
||||
return &CodeBlock{
|
||||
BaseBlock: BaseBlock{},
|
||||
}
|
||||
}
|
||||
|
||||
// A FencedCodeBlock struct represents a fenced code block of Markdown text.
|
||||
type FencedCodeBlock struct {
|
||||
BaseBlock
|
||||
// Info returns a info text of this fenced code block.
|
||||
Info *Text
|
||||
|
||||
language []byte
|
||||
}
|
||||
|
||||
// Language returns an language in an info string.
|
||||
// Language returns nil if this node does not have an info string.
|
||||
func (n *FencedCodeBlock) Language(source []byte) []byte {
|
||||
if n.language == nil && n.Info != nil {
|
||||
segment := n.Info.Segment
|
||||
info := segment.Value(source)
|
||||
i := 0
|
||||
for ; i < len(info); i++ {
|
||||
if info[i] == ' ' {
|
||||
break
|
||||
}
|
||||
}
|
||||
n.language = info[:i]
|
||||
}
|
||||
return n.language
|
||||
}
|
||||
|
||||
// IsRaw implements Node.IsRaw.
|
||||
func (n *FencedCodeBlock) IsRaw() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
// Dump implements Node.Dump .
|
||||
func (n *FencedCodeBlock) Dump(source []byte, level int) {
|
||||
m := map[string]string{}
|
||||
if n.Info != nil {
|
||||
m["Info"] = fmt.Sprintf("\"%s\"", n.Info.Text(source))
|
||||
}
|
||||
DumpHelper(n, source, level, m, nil)
|
||||
}
|
||||
|
||||
// KindFencedCodeBlock is a NodeKind of the FencedCodeBlock node.
|
||||
var KindFencedCodeBlock = NewNodeKind("FencedCodeBlock")
|
||||
|
||||
// Kind implements Node.Kind.
|
||||
func (n *FencedCodeBlock) Kind() NodeKind {
|
||||
return KindFencedCodeBlock
|
||||
}
|
||||
|
||||
// NewFencedCodeBlock return a new FencedCodeBlock node.
|
||||
func NewFencedCodeBlock(info *Text) *FencedCodeBlock {
|
||||
return &FencedCodeBlock{
|
||||
BaseBlock: BaseBlock{},
|
||||
Info: info,
|
||||
}
|
||||
}
|
||||
|
||||
// A Blockquote struct represents an blockquote block of Markdown text.
|
||||
type Blockquote struct {
|
||||
BaseBlock
|
||||
}
|
||||
|
||||
// Dump implements Node.Dump .
|
||||
func (n *Blockquote) Dump(source []byte, level int) {
|
||||
DumpHelper(n, source, level, nil, nil)
|
||||
}
|
||||
|
||||
// KindBlockquote is a NodeKind of the Blockquote node.
|
||||
var KindBlockquote = NewNodeKind("Blockquote")
|
||||
|
||||
// Kind implements Node.Kind.
|
||||
func (n *Blockquote) Kind() NodeKind {
|
||||
return KindBlockquote
|
||||
}
|
||||
|
||||
// NewBlockquote returns a new Blockquote node.
|
||||
func NewBlockquote() *Blockquote {
|
||||
return &Blockquote{
|
||||
BaseBlock: BaseBlock{},
|
||||
}
|
||||
}
|
||||
|
||||
// A List struct represents a list of Markdown text.
|
||||
type List struct {
|
||||
BaseBlock
|
||||
|
||||
// Marker is a marker character like '-', '+', ')' and '.'.
|
||||
Marker byte
|
||||
|
||||
// IsTight is a true if this list is a 'tight' list.
|
||||
// See https://spec.commonmark.org/0.29/#loose for details.
|
||||
IsTight bool
|
||||
|
||||
// Start is an initial number of this ordered list.
|
||||
// If this list is not an ordered list, Start is 0.
|
||||
Start int
|
||||
}
|
||||
|
||||
// IsOrdered returns true if this list is an ordered list, otherwise false.
|
||||
func (l *List) IsOrdered() bool {
|
||||
return l.Marker == '.' || l.Marker == ')'
|
||||
}
|
||||
|
||||
// CanContinue returns true if this list can continue with
|
||||
// the given mark and a list type, otherwise false.
|
||||
func (l *List) CanContinue(marker byte, isOrdered bool) bool {
|
||||
return marker == l.Marker && isOrdered == l.IsOrdered()
|
||||
}
|
||||
|
||||
// Dump implements Node.Dump.
|
||||
func (l *List) Dump(source []byte, level int) {
|
||||
m := map[string]string{
|
||||
"Ordered": fmt.Sprintf("%v", l.IsOrdered()),
|
||||
"Marker": fmt.Sprintf("%c", l.Marker),
|
||||
"Tight": fmt.Sprintf("%v", l.IsTight),
|
||||
}
|
||||
if l.IsOrdered() {
|
||||
m["Start"] = fmt.Sprintf("%d", l.Start)
|
||||
}
|
||||
DumpHelper(l, source, level, m, nil)
|
||||
}
|
||||
|
||||
// KindList is a NodeKind of the List node.
|
||||
var KindList = NewNodeKind("List")
|
||||
|
||||
// Kind implements Node.Kind.
|
||||
func (l *List) Kind() NodeKind {
|
||||
return KindList
|
||||
}
|
||||
|
||||
// NewList returns a new List node.
|
||||
func NewList(marker byte) *List {
|
||||
return &List{
|
||||
BaseBlock: BaseBlock{},
|
||||
Marker: marker,
|
||||
IsTight: true,
|
||||
}
|
||||
}
|
||||
|
||||
// A ListItem struct represents a list item of Markdown text.
|
||||
type ListItem struct {
|
||||
BaseBlock
|
||||
|
||||
// Offset is an offset position of this item.
|
||||
Offset int
|
||||
}
|
||||
|
||||
// Dump implements Node.Dump.
|
||||
func (n *ListItem) Dump(source []byte, level int) {
|
||||
m := map[string]string{
|
||||
"Offset": fmt.Sprintf("%d", n.Offset),
|
||||
}
|
||||
DumpHelper(n, source, level, m, nil)
|
||||
}
|
||||
|
||||
// KindListItem is a NodeKind of the ListItem node.
|
||||
var KindListItem = NewNodeKind("ListItem")
|
||||
|
||||
// Kind implements Node.Kind.
|
||||
func (n *ListItem) Kind() NodeKind {
|
||||
return KindListItem
|
||||
}
|
||||
|
||||
// NewListItem returns a new ListItem node.
|
||||
func NewListItem(offset int) *ListItem {
|
||||
return &ListItem{
|
||||
BaseBlock: BaseBlock{},
|
||||
Offset: offset,
|
||||
}
|
||||
}
|
||||
|
||||
// HTMLBlockType represents kinds of an html blocks.
|
||||
// See https://spec.commonmark.org/0.29/#html-blocks
|
||||
type HTMLBlockType int
|
||||
|
||||
const (
|
||||
// HTMLBlockType1 represents type 1 html blocks
|
||||
HTMLBlockType1 HTMLBlockType = iota + 1
|
||||
// HTMLBlockType2 represents type 2 html blocks
|
||||
HTMLBlockType2
|
||||
// HTMLBlockType3 represents type 3 html blocks
|
||||
HTMLBlockType3
|
||||
// HTMLBlockType4 represents type 4 html blocks
|
||||
HTMLBlockType4
|
||||
// HTMLBlockType5 represents type 5 html blocks
|
||||
HTMLBlockType5
|
||||
// HTMLBlockType6 represents type 6 html blocks
|
||||
HTMLBlockType6
|
||||
// HTMLBlockType7 represents type 7 html blocks
|
||||
HTMLBlockType7
|
||||
)
|
||||
|
||||
// An HTMLBlock struct represents an html block of Markdown text.
|
||||
type HTMLBlock struct {
|
||||
BaseBlock
|
||||
|
||||
// Type is a type of this html block.
|
||||
HTMLBlockType HTMLBlockType
|
||||
|
||||
// ClosureLine is a line that closes this html block.
|
||||
ClosureLine textm.Segment
|
||||
}
|
||||
|
||||
// IsRaw implements Node.IsRaw.
|
||||
func (n *HTMLBlock) IsRaw() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
// HasClosure returns true if this html block has a closure line,
|
||||
// otherwise false.
|
||||
func (n *HTMLBlock) HasClosure() bool {
|
||||
return n.ClosureLine.Start >= 0
|
||||
}
|
||||
|
||||
// Dump implements Node.Dump.
|
||||
func (n *HTMLBlock) Dump(source []byte, level int) {
|
||||
indent := strings.Repeat(" ", level)
|
||||
fmt.Printf("%s%s {\n", indent, "HTMLBlock")
|
||||
indent2 := strings.Repeat(" ", level+1)
|
||||
fmt.Printf("%sRawText: \"", indent2)
|
||||
for i := 0; i < n.Lines().Len(); i++ {
|
||||
s := n.Lines().At(i)
|
||||
fmt.Print(string(source[s.Start:s.Stop]))
|
||||
}
|
||||
fmt.Printf("\"\n")
|
||||
for c := n.FirstChild(); c != nil; c = c.NextSibling() {
|
||||
c.Dump(source, level+1)
|
||||
}
|
||||
if n.HasClosure() {
|
||||
cl := n.ClosureLine
|
||||
fmt.Printf("%sClosure: \"%s\"\n", indent2, string(cl.Value(source)))
|
||||
}
|
||||
fmt.Printf("%s}\n", indent)
|
||||
}
|
||||
|
||||
// KindHTMLBlock is a NodeKind of the HTMLBlock node.
|
||||
var KindHTMLBlock = NewNodeKind("HTMLBlock")
|
||||
|
||||
// Kind implements Node.Kind.
|
||||
func (n *HTMLBlock) Kind() NodeKind {
|
||||
return KindHTMLBlock
|
||||
}
|
||||
|
||||
// NewHTMLBlock returns a new HTMLBlock node.
|
||||
func NewHTMLBlock(typ HTMLBlockType) *HTMLBlock {
|
||||
return &HTMLBlock{
|
||||
BaseBlock: BaseBlock{},
|
||||
HTMLBlockType: typ,
|
||||
ClosureLine: textm.NewSegment(-1, -1),
|
||||
}
|
||||
}
|
548
vendor/github.com/yuin/goldmark/ast/inline.go
generated
vendored
Normal file
548
vendor/github.com/yuin/goldmark/ast/inline.go
generated
vendored
Normal file
@ -0,0 +1,548 @@
|
||||
package ast
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
textm "github.com/yuin/goldmark/text"
|
||||
"github.com/yuin/goldmark/util"
|
||||
)
|
||||
|
||||
// A BaseInline struct implements the Node interface.
|
||||
type BaseInline struct {
|
||||
BaseNode
|
||||
}
|
||||
|
||||
// Type implements Node.Type
|
||||
func (b *BaseInline) Type() NodeType {
|
||||
return TypeInline
|
||||
}
|
||||
|
||||
// IsRaw implements Node.IsRaw
|
||||
func (b *BaseInline) IsRaw() bool {
|
||||
return false
|
||||
}
|
||||
|
||||
// HasBlankPreviousLines implements Node.HasBlankPreviousLines.
|
||||
func (b *BaseInline) HasBlankPreviousLines() bool {
|
||||
panic("can not call with inline nodes.")
|
||||
}
|
||||
|
||||
// SetBlankPreviousLines implements Node.SetBlankPreviousLines.
|
||||
func (b *BaseInline) SetBlankPreviousLines(v bool) {
|
||||
panic("can not call with inline nodes.")
|
||||
}
|
||||
|
||||
// Lines implements Node.Lines
|
||||
func (b *BaseInline) Lines() *textm.Segments {
|
||||
panic("can not call with inline nodes.")
|
||||
}
|
||||
|
||||
// SetLines implements Node.SetLines
|
||||
func (b *BaseInline) SetLines(v *textm.Segments) {
|
||||
panic("can not call with inline nodes.")
|
||||
}
|
||||
|
||||
// A Text struct represents a textual content of the Markdown text.
|
||||
type Text struct {
|
||||
BaseInline
|
||||
// Segment is a position in a source text.
|
||||
Segment textm.Segment
|
||||
|
||||
flags uint8
|
||||
}
|
||||
|
||||
const (
|
||||
textSoftLineBreak = 1 << iota
|
||||
textHardLineBreak
|
||||
textRaw
|
||||
textCode
|
||||
)
|
||||
|
||||
func textFlagsString(flags uint8) string {
|
||||
buf := []string{}
|
||||
if flags&textSoftLineBreak != 0 {
|
||||
buf = append(buf, "SoftLineBreak")
|
||||
}
|
||||
if flags&textHardLineBreak != 0 {
|
||||
buf = append(buf, "HardLineBreak")
|
||||
}
|
||||
if flags&textRaw != 0 {
|
||||
buf = append(buf, "Raw")
|
||||
}
|
||||
if flags&textCode != 0 {
|
||||
buf = append(buf, "Code")
|
||||
}
|
||||
return strings.Join(buf, ", ")
|
||||
}
|
||||
|
||||
// Inline implements Inline.Inline.
|
||||
func (n *Text) Inline() {
|
||||
}
|
||||
|
||||
// SoftLineBreak returns true if this node ends with a new line,
|
||||
// otherwise false.
|
||||
func (n *Text) SoftLineBreak() bool {
|
||||
return n.flags&textSoftLineBreak != 0
|
||||
}
|
||||
|
||||
// SetSoftLineBreak sets whether this node ends with a new line.
|
||||
func (n *Text) SetSoftLineBreak(v bool) {
|
||||
if v {
|
||||
n.flags |= textSoftLineBreak
|
||||
} else {
|
||||
n.flags = n.flags &^ textHardLineBreak
|
||||
}
|
||||
}
|
||||
|
||||
// IsRaw returns true if this text should be rendered without unescaping
|
||||
// back slash escapes and resolving references.
|
||||
func (n *Text) IsRaw() bool {
|
||||
return n.flags&textRaw != 0
|
||||
}
|
||||
|
||||
// SetRaw sets whether this text should be rendered as raw contents.
|
||||
func (n *Text) SetRaw(v bool) {
|
||||
if v {
|
||||
n.flags |= textRaw
|
||||
} else {
|
||||
n.flags = n.flags &^ textRaw
|
||||
}
|
||||
}
|
||||
|
||||
// HardLineBreak returns true if this node ends with a hard line break.
|
||||
// See https://spec.commonmark.org/0.29/#hard-line-breaks for details.
|
||||
func (n *Text) HardLineBreak() bool {
|
||||
return n.flags&textHardLineBreak != 0
|
||||
}
|
||||
|
||||
// SetHardLineBreak sets whether this node ends with a hard line break.
|
||||
func (n *Text) SetHardLineBreak(v bool) {
|
||||
if v {
|
||||
n.flags |= textHardLineBreak
|
||||
} else {
|
||||
n.flags = n.flags &^ textHardLineBreak
|
||||
}
|
||||
}
|
||||
|
||||
// Merge merges a Node n into this node.
|
||||
// Merge returns true if the given node has been merged, otherwise false.
|
||||
func (n *Text) Merge(node Node, source []byte) bool {
|
||||
t, ok := node.(*Text)
|
||||
if !ok {
|
||||
return false
|
||||
}
|
||||
if n.Segment.Stop != t.Segment.Start || t.Segment.Padding != 0 || source[n.Segment.Stop-1] == '\n' || t.IsRaw() != n.IsRaw() {
|
||||
return false
|
||||
}
|
||||
n.Segment.Stop = t.Segment.Stop
|
||||
n.SetSoftLineBreak(t.SoftLineBreak())
|
||||
n.SetHardLineBreak(t.HardLineBreak())
|
||||
return true
|
||||
}
|
||||
|
||||
// Text implements Node.Text.
|
||||
func (n *Text) Text(source []byte) []byte {
|
||||
return n.Segment.Value(source)
|
||||
}
|
||||
|
||||
// Dump implements Node.Dump.
|
||||
func (n *Text) Dump(source []byte, level int) {
|
||||
fs := textFlagsString(n.flags)
|
||||
if len(fs) != 0 {
|
||||
fs = "(" + fs + ")"
|
||||
}
|
||||
fmt.Printf("%sText%s: \"%s\"\n", strings.Repeat(" ", level), fs, strings.TrimRight(string(n.Text(source)), "\n"))
|
||||
}
|
||||
|
||||
// KindText is a NodeKind of the Text node.
|
||||
var KindText = NewNodeKind("Text")
|
||||
|
||||
// Kind implements Node.Kind.
|
||||
func (n *Text) Kind() NodeKind {
|
||||
return KindText
|
||||
}
|
||||
|
||||
// NewText returns a new Text node.
|
||||
func NewText() *Text {
|
||||
return &Text{
|
||||
BaseInline: BaseInline{},
|
||||
}
|
||||
}
|
||||
|
||||
// NewTextSegment returns a new Text node with the given source position.
|
||||
func NewTextSegment(v textm.Segment) *Text {
|
||||
return &Text{
|
||||
BaseInline: BaseInline{},
|
||||
Segment: v,
|
||||
}
|
||||
}
|
||||
|
||||
// NewRawTextSegment returns a new Text node with the given source position.
|
||||
// The new node should be rendered as raw contents.
|
||||
func NewRawTextSegment(v textm.Segment) *Text {
|
||||
t := &Text{
|
||||
BaseInline: BaseInline{},
|
||||
Segment: v,
|
||||
}
|
||||
t.SetRaw(true)
|
||||
return t
|
||||
}
|
||||
|
||||
// MergeOrAppendTextSegment merges a given s into the last child of the parent if
|
||||
// it can be merged, otherwise creates a new Text node and appends it to after current
|
||||
// last child.
|
||||
func MergeOrAppendTextSegment(parent Node, s textm.Segment) {
|
||||
last := parent.LastChild()
|
||||
t, ok := last.(*Text)
|
||||
if ok && t.Segment.Stop == s.Start && !t.SoftLineBreak() {
|
||||
t.Segment = t.Segment.WithStop(s.Stop)
|
||||
} else {
|
||||
parent.AppendChild(parent, NewTextSegment(s))
|
||||
}
|
||||
}
|
||||
|
||||
// MergeOrReplaceTextSegment merges a given s into a previous sibling of the node n
|
||||
// if a previous sibling of the node n is *Text, otherwise replaces Node n with s.
|
||||
func MergeOrReplaceTextSegment(parent Node, n Node, s textm.Segment) {
|
||||
prev := n.PreviousSibling()
|
||||
if t, ok := prev.(*Text); ok && t.Segment.Stop == s.Start && !t.SoftLineBreak() {
|
||||
t.Segment = t.Segment.WithStop(s.Stop)
|
||||
parent.RemoveChild(parent, n)
|
||||
} else {
|
||||
parent.ReplaceChild(parent, n, NewTextSegment(s))
|
||||
}
|
||||
}
|
||||
|
||||
// A String struct is a textual content that has a concrete value
|
||||
type String struct {
|
||||
BaseInline
|
||||
|
||||
Value []byte
|
||||
flags uint8
|
||||
}
|
||||
|
||||
// Inline implements Inline.Inline.
|
||||
func (n *String) Inline() {
|
||||
}
|
||||
|
||||
// IsRaw returns true if this text should be rendered without unescaping
|
||||
// back slash escapes and resolving references.
|
||||
func (n *String) IsRaw() bool {
|
||||
return n.flags&textRaw != 0
|
||||
}
|
||||
|
||||
// SetRaw sets whether this text should be rendered as raw contents.
|
||||
func (n *String) SetRaw(v bool) {
|
||||
if v {
|
||||
n.flags |= textRaw
|
||||
} else {
|
||||
n.flags = n.flags &^ textRaw
|
||||
}
|
||||
}
|
||||
|
||||
// IsCode returns true if this text should be rendered without any
|
||||
// modifications.
|
||||
func (n *String) IsCode() bool {
|
||||
return n.flags&textCode != 0
|
||||
}
|
||||
|
||||
// SetCode sets whether this text should be rendered without any modifications.
|
||||
func (n *String) SetCode(v bool) {
|
||||
if v {
|
||||
n.flags |= textCode
|
||||
} else {
|
||||
n.flags = n.flags &^ textCode
|
||||
}
|
||||
}
|
||||
|
||||
// Text implements Node.Text.
|
||||
func (n *String) Text(source []byte) []byte {
|
||||
return n.Value
|
||||
}
|
||||
|
||||
// Dump implements Node.Dump.
|
||||
func (n *String) Dump(source []byte, level int) {
|
||||
fs := textFlagsString(n.flags)
|
||||
if len(fs) != 0 {
|
||||
fs = "(" + fs + ")"
|
||||
}
|
||||
fmt.Printf("%sString%s: \"%s\"\n", strings.Repeat(" ", level), fs, strings.TrimRight(string(n.Value), "\n"))
|
||||
}
|
||||
|
||||
// KindString is a NodeKind of the String node.
|
||||
var KindString = NewNodeKind("String")
|
||||
|
||||
// Kind implements Node.Kind.
|
||||
func (n *String) Kind() NodeKind {
|
||||
return KindString
|
||||
}
|
||||
|
||||
// NewString returns a new String node.
|
||||
func NewString(v []byte) *String {
|
||||
return &String{
|
||||
Value: v,
|
||||
}
|
||||
}
|
||||
|
||||
// A CodeSpan struct represents a code span of Markdown text.
|
||||
type CodeSpan struct {
|
||||
BaseInline
|
||||
}
|
||||
|
||||
// Inline implements Inline.Inline .
|
||||
func (n *CodeSpan) Inline() {
|
||||
}
|
||||
|
||||
// IsBlank returns true if this node consists of spaces, otherwise false.
|
||||
func (n *CodeSpan) IsBlank(source []byte) bool {
|
||||
for c := n.FirstChild(); c != nil; c = c.NextSibling() {
|
||||
text := c.(*Text).Segment
|
||||
if !util.IsBlank(text.Value(source)) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// Dump implements Node.Dump
|
||||
func (n *CodeSpan) Dump(source []byte, level int) {
|
||||
DumpHelper(n, source, level, nil, nil)
|
||||
}
|
||||
|
||||
// KindCodeSpan is a NodeKind of the CodeSpan node.
|
||||
var KindCodeSpan = NewNodeKind("CodeSpan")
|
||||
|
||||
// Kind implements Node.Kind.
|
||||
func (n *CodeSpan) Kind() NodeKind {
|
||||
return KindCodeSpan
|
||||
}
|
||||
|
||||
// NewCodeSpan returns a new CodeSpan node.
|
||||
func NewCodeSpan() *CodeSpan {
|
||||
return &CodeSpan{
|
||||
BaseInline: BaseInline{},
|
||||
}
|
||||
}
|
||||
|
||||
// An Emphasis struct represents an emphasis of Markdown text.
|
||||
type Emphasis struct {
|
||||
BaseInline
|
||||
|
||||
// Level is a level of the emphasis.
|
||||
Level int
|
||||
}
|
||||
|
||||
// Dump implements Node.Dump.
|
||||
func (n *Emphasis) Dump(source []byte, level int) {
|
||||
m := map[string]string{
|
||||
"Level": fmt.Sprintf("%v", n.Level),
|
||||
}
|
||||
DumpHelper(n, source, level, m, nil)
|
||||
}
|
||||
|
||||
// KindEmphasis is a NodeKind of the Emphasis node.
|
||||
var KindEmphasis = NewNodeKind("Emphasis")
|
||||
|
||||
// Kind implements Node.Kind.
|
||||
func (n *Emphasis) Kind() NodeKind {
|
||||
return KindEmphasis
|
||||
}
|
||||
|
||||
// NewEmphasis returns a new Emphasis node with the given level.
|
||||
func NewEmphasis(level int) *Emphasis {
|
||||
return &Emphasis{
|
||||
BaseInline: BaseInline{},
|
||||
Level: level,
|
||||
}
|
||||
}
|
||||
|
||||
type baseLink struct {
|
||||
BaseInline
|
||||
|
||||
// Destination is a destination(URL) of this link.
|
||||
Destination []byte
|
||||
|
||||
// Title is a title of this link.
|
||||
Title []byte
|
||||
}
|
||||
|
||||
// Inline implements Inline.Inline.
|
||||
func (n *baseLink) Inline() {
|
||||
}
|
||||
|
||||
// A Link struct represents a link of the Markdown text.
|
||||
type Link struct {
|
||||
baseLink
|
||||
}
|
||||
|
||||
// Dump implements Node.Dump.
|
||||
func (n *Link) Dump(source []byte, level int) {
|
||||
m := map[string]string{}
|
||||
m["Destination"] = string(n.Destination)
|
||||
m["Title"] = string(n.Title)
|
||||
DumpHelper(n, source, level, m, nil)
|
||||
}
|
||||
|
||||
// KindLink is a NodeKind of the Link node.
|
||||
var KindLink = NewNodeKind("Link")
|
||||
|
||||
// Kind implements Node.Kind.
|
||||
func (n *Link) Kind() NodeKind {
|
||||
return KindLink
|
||||
}
|
||||
|
||||
// NewLink returns a new Link node.
|
||||
func NewLink() *Link {
|
||||
c := &Link{
|
||||
baseLink: baseLink{
|
||||
BaseInline: BaseInline{},
|
||||
},
|
||||
}
|
||||
return c
|
||||
}
|
||||
|
||||
// An Image struct represents an image of the Markdown text.
|
||||
type Image struct {
|
||||
baseLink
|
||||
}
|
||||
|
||||
// Dump implements Node.Dump.
|
||||
func (n *Image) Dump(source []byte, level int) {
|
||||
m := map[string]string{}
|
||||
m["Destination"] = string(n.Destination)
|
||||
m["Title"] = string(n.Title)
|
||||
DumpHelper(n, source, level, m, nil)
|
||||
}
|
||||
|
||||
// KindImage is a NodeKind of the Image node.
|
||||
var KindImage = NewNodeKind("Image")
|
||||
|
||||
// Kind implements Node.Kind.
|
||||
func (n *Image) Kind() NodeKind {
|
||||
return KindImage
|
||||
}
|
||||
|
||||
// NewImage returns a new Image node.
|
||||
func NewImage(link *Link) *Image {
|
||||
c := &Image{
|
||||
baseLink: baseLink{
|
||||
BaseInline: BaseInline{},
|
||||
},
|
||||
}
|
||||
c.Destination = link.Destination
|
||||
c.Title = link.Title
|
||||
for n := link.FirstChild(); n != nil; {
|
||||
next := n.NextSibling()
|
||||
link.RemoveChild(link, n)
|
||||
c.AppendChild(c, n)
|
||||
n = next
|
||||
}
|
||||
|
||||
return c
|
||||
}
|
||||
|
||||
// AutoLinkType defines kind of auto links.
|
||||
type AutoLinkType int
|
||||
|
||||
const (
|
||||
// AutoLinkEmail indicates that an autolink is an email address.
|
||||
AutoLinkEmail AutoLinkType = iota + 1
|
||||
// AutoLinkURL indicates that an autolink is a generic URL.
|
||||
AutoLinkURL
|
||||
)
|
||||
|
||||
// An AutoLink struct represents an autolink of the Markdown text.
|
||||
type AutoLink struct {
|
||||
BaseInline
|
||||
// Type is a type of this autolink.
|
||||
AutoLinkType AutoLinkType
|
||||
|
||||
// Protocol specified a protocol of the link.
|
||||
Protocol []byte
|
||||
|
||||
value *Text
|
||||
}
|
||||
|
||||
// Inline implements Inline.Inline.
|
||||
func (n *AutoLink) Inline() {}
|
||||
|
||||
// Dump implements Node.Dump
|
||||
func (n *AutoLink) Dump(source []byte, level int) {
|
||||
segment := n.value.Segment
|
||||
m := map[string]string{
|
||||
"Value": string(segment.Value(source)),
|
||||
}
|
||||
DumpHelper(n, source, level, m, nil)
|
||||
}
|
||||
|
||||
// KindAutoLink is a NodeKind of the AutoLink node.
|
||||
var KindAutoLink = NewNodeKind("AutoLink")
|
||||
|
||||
// Kind implements Node.Kind.
|
||||
func (n *AutoLink) Kind() NodeKind {
|
||||
return KindAutoLink
|
||||
}
|
||||
|
||||
// URL returns an url of this node.
|
||||
func (n *AutoLink) URL(source []byte) []byte {
|
||||
if n.Protocol != nil {
|
||||
s := n.value.Segment
|
||||
ret := make([]byte, 0, len(n.Protocol)+s.Len()+3)
|
||||
ret = append(ret, n.Protocol...)
|
||||
ret = append(ret, ':', '/', '/')
|
||||
ret = append(ret, n.value.Text(source)...)
|
||||
return ret
|
||||
}
|
||||
return n.value.Text(source)
|
||||
}
|
||||
|
||||
// Label returns a label of this node.
|
||||
func (n *AutoLink) Label(source []byte) []byte {
|
||||
return n.value.Text(source)
|
||||
}
|
||||
|
||||
// NewAutoLink returns a new AutoLink node.
|
||||
func NewAutoLink(typ AutoLinkType, value *Text) *AutoLink {
|
||||
return &AutoLink{
|
||||
BaseInline: BaseInline{},
|
||||
value: value,
|
||||
AutoLinkType: typ,
|
||||
}
|
||||
}
|
||||
|
||||
// A RawHTML struct represents an inline raw HTML of the Markdown text.
|
||||
type RawHTML struct {
|
||||
BaseInline
|
||||
Segments *textm.Segments
|
||||
}
|
||||
|
||||
// Inline implements Inline.Inline.
|
||||
func (n *RawHTML) Inline() {}
|
||||
|
||||
// Dump implements Node.Dump.
|
||||
func (n *RawHTML) Dump(source []byte, level int) {
|
||||
m := map[string]string{}
|
||||
t := []string{}
|
||||
for i := 0; i < n.Segments.Len(); i++ {
|
||||
segment := n.Segments.At(i)
|
||||
t = append(t, string(segment.Value(source)))
|
||||
}
|
||||
m["RawText"] = strings.Join(t, "")
|
||||
DumpHelper(n, source, level, m, nil)
|
||||
}
|
||||
|
||||
// KindRawHTML is a NodeKind of the RawHTML node.
|
||||
var KindRawHTML = NewNodeKind("RawHTML")
|
||||
|
||||
// Kind implements Node.Kind.
|
||||
func (n *RawHTML) Kind() NodeKind {
|
||||
return KindRawHTML
|
||||
}
|
||||
|
||||
// NewRawHTML returns a new RawHTML node.
|
||||
func NewRawHTML() *RawHTML {
|
||||
return &RawHTML{
|
||||
Segments: textm.NewSegments(),
|
||||
}
|
||||
}
|
83
vendor/github.com/yuin/goldmark/extension/ast/definition_list.go
generated
vendored
Normal file
83
vendor/github.com/yuin/goldmark/extension/ast/definition_list.go
generated
vendored
Normal file
@ -0,0 +1,83 @@
|
||||
package ast
|
||||
|
||||
import (
|
||||
gast "github.com/yuin/goldmark/ast"
|
||||
)
|
||||
|
||||
// A DefinitionList struct represents a definition list of Markdown
|
||||
// (PHPMarkdownExtra) text.
|
||||
type DefinitionList struct {
|
||||
gast.BaseBlock
|
||||
Offset int
|
||||
TemporaryParagraph *gast.Paragraph
|
||||
}
|
||||
|
||||
// Dump implements Node.Dump.
|
||||
func (n *DefinitionList) Dump(source []byte, level int) {
|
||||
gast.DumpHelper(n, source, level, nil, nil)
|
||||
}
|
||||
|
||||
// KindDefinitionList is a NodeKind of the DefinitionList node.
|
||||
var KindDefinitionList = gast.NewNodeKind("DefinitionList")
|
||||
|
||||
// Kind implements Node.Kind.
|
||||
func (n *DefinitionList) Kind() gast.NodeKind {
|
||||
return KindDefinitionList
|
||||
}
|
||||
|
||||
// NewDefinitionList returns a new DefinitionList node.
|
||||
func NewDefinitionList(offset int, para *gast.Paragraph) *DefinitionList {
|
||||
return &DefinitionList{
|
||||
Offset: offset,
|
||||
TemporaryParagraph: para,
|
||||
}
|
||||
}
|
||||
|
||||
// A DefinitionTerm struct represents a definition list term of Markdown
|
||||
// (PHPMarkdownExtra) text.
|
||||
type DefinitionTerm struct {
|
||||
gast.BaseBlock
|
||||
}
|
||||
|
||||
// Dump implements Node.Dump.
|
||||
func (n *DefinitionTerm) Dump(source []byte, level int) {
|
||||
gast.DumpHelper(n, source, level, nil, nil)
|
||||
}
|
||||
|
||||
// KindDefinitionTerm is a NodeKind of the DefinitionTerm node.
|
||||
var KindDefinitionTerm = gast.NewNodeKind("DefinitionTerm")
|
||||
|
||||
// Kind implements Node.Kind.
|
||||
func (n *DefinitionTerm) Kind() gast.NodeKind {
|
||||
return KindDefinitionTerm
|
||||
}
|
||||
|
||||
// NewDefinitionTerm returns a new DefinitionTerm node.
|
||||
func NewDefinitionTerm() *DefinitionTerm {
|
||||
return &DefinitionTerm{}
|
||||
}
|
||||
|
||||
// A DefinitionDescription struct represents a definition list description of Markdown
|
||||
// (PHPMarkdownExtra) text.
|
||||
type DefinitionDescription struct {
|
||||
gast.BaseBlock
|
||||
IsTight bool
|
||||
}
|
||||
|
||||
// Dump implements Node.Dump.
|
||||
func (n *DefinitionDescription) Dump(source []byte, level int) {
|
||||
gast.DumpHelper(n, source, level, nil, nil)
|
||||
}
|
||||
|
||||
// KindDefinitionDescription is a NodeKind of the DefinitionDescription node.
|
||||
var KindDefinitionDescription = gast.NewNodeKind("DefinitionDescription")
|
||||
|
||||
// Kind implements Node.Kind.
|
||||
func (n *DefinitionDescription) Kind() gast.NodeKind {
|
||||
return KindDefinitionDescription
|
||||
}
|
||||
|
||||
// NewDefinitionDescription returns a new DefinitionDescription node.
|
||||
func NewDefinitionDescription() *DefinitionDescription {
|
||||
return &DefinitionDescription{}
|
||||
}
|
125
vendor/github.com/yuin/goldmark/extension/ast/footnote.go
generated
vendored
Normal file
125
vendor/github.com/yuin/goldmark/extension/ast/footnote.go
generated
vendored
Normal file
@ -0,0 +1,125 @@
|
||||
package ast
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
gast "github.com/yuin/goldmark/ast"
|
||||
)
|
||||
|
||||
// A FootnoteLink struct represents a link to a footnote of Markdown
|
||||
// (PHP Markdown Extra) text.
|
||||
type FootnoteLink struct {
|
||||
gast.BaseInline
|
||||
Index int
|
||||
}
|
||||
|
||||
// Dump implements Node.Dump.
|
||||
func (n *FootnoteLink) Dump(source []byte, level int) {
|
||||
m := map[string]string{}
|
||||
m["Index"] = fmt.Sprintf("%v", n.Index)
|
||||
gast.DumpHelper(n, source, level, m, nil)
|
||||
}
|
||||
|
||||
// KindFootnoteLink is a NodeKind of the FootnoteLink node.
|
||||
var KindFootnoteLink = gast.NewNodeKind("FootnoteLink")
|
||||
|
||||
// Kind implements Node.Kind.
|
||||
func (n *FootnoteLink) Kind() gast.NodeKind {
|
||||
return KindFootnoteLink
|
||||
}
|
||||
|
||||
// NewFootnoteLink returns a new FootnoteLink node.
|
||||
func NewFootnoteLink(index int) *FootnoteLink {
|
||||
return &FootnoteLink{
|
||||
Index: index,
|
||||
}
|
||||
}
|
||||
|
||||
// A FootnoteBackLink struct represents a link to a footnote of Markdown
|
||||
// (PHP Markdown Extra) text.
|
||||
type FootnoteBackLink struct {
|
||||
gast.BaseInline
|
||||
Index int
|
||||
}
|
||||
|
||||
// Dump implements Node.Dump.
|
||||
func (n *FootnoteBackLink) Dump(source []byte, level int) {
|
||||
m := map[string]string{}
|
||||
m["Index"] = fmt.Sprintf("%v", n.Index)
|
||||
gast.DumpHelper(n, source, level, m, nil)
|
||||
}
|
||||
|
||||
// KindFootnoteBackLink is a NodeKind of the FootnoteBackLink node.
|
||||
var KindFootnoteBackLink = gast.NewNodeKind("FootnoteBackLink")
|
||||
|
||||
// Kind implements Node.Kind.
|
||||
func (n *FootnoteBackLink) Kind() gast.NodeKind {
|
||||
return KindFootnoteBackLink
|
||||
}
|
||||
|
||||
// NewFootnoteBackLink returns a new FootnoteBackLink node.
|
||||
func NewFootnoteBackLink(index int) *FootnoteBackLink {
|
||||
return &FootnoteBackLink{
|
||||
Index: index,
|
||||
}
|
||||
}
|
||||
|
||||
// A Footnote struct represents a footnote of Markdown
|
||||
// (PHP Markdown Extra) text.
|
||||
type Footnote struct {
|
||||
gast.BaseBlock
|
||||
Ref []byte
|
||||
Index int
|
||||
}
|
||||
|
||||
// Dump implements Node.Dump.
|
||||
func (n *Footnote) Dump(source []byte, level int) {
|
||||
m := map[string]string{}
|
||||
m["Index"] = fmt.Sprintf("%v", n.Index)
|
||||
m["Ref"] = fmt.Sprintf("%s", n.Ref)
|
||||
gast.DumpHelper(n, source, level, m, nil)
|
||||
}
|
||||
|
||||
// KindFootnote is a NodeKind of the Footnote node.
|
||||
var KindFootnote = gast.NewNodeKind("Footnote")
|
||||
|
||||
// Kind implements Node.Kind.
|
||||
func (n *Footnote) Kind() gast.NodeKind {
|
||||
return KindFootnote
|
||||
}
|
||||
|
||||
// NewFootnote returns a new Footnote node.
|
||||
func NewFootnote(ref []byte) *Footnote {
|
||||
return &Footnote{
|
||||
Ref: ref,
|
||||
Index: -1,
|
||||
}
|
||||
}
|
||||
|
||||
// A FootnoteList struct represents footnotes of Markdown
|
||||
// (PHP Markdown Extra) text.
|
||||
type FootnoteList struct {
|
||||
gast.BaseBlock
|
||||
Count int
|
||||
}
|
||||
|
||||
// Dump implements Node.Dump.
|
||||
func (n *FootnoteList) Dump(source []byte, level int) {
|
||||
m := map[string]string{}
|
||||
m["Count"] = fmt.Sprintf("%v", n.Count)
|
||||
gast.DumpHelper(n, source, level, m, nil)
|
||||
}
|
||||
|
||||
// KindFootnoteList is a NodeKind of the FootnoteList node.
|
||||
var KindFootnoteList = gast.NewNodeKind("FootnoteList")
|
||||
|
||||
// Kind implements Node.Kind.
|
||||
func (n *FootnoteList) Kind() gast.NodeKind {
|
||||
return KindFootnoteList
|
||||
}
|
||||
|
||||
// NewFootnoteList returns a new FootnoteList node.
|
||||
func NewFootnoteList() *FootnoteList {
|
||||
return &FootnoteList{
|
||||
Count: 0,
|
||||
}
|
||||
}
|
29
vendor/github.com/yuin/goldmark/extension/ast/strikethrough.go
generated
vendored
Normal file
29
vendor/github.com/yuin/goldmark/extension/ast/strikethrough.go
generated
vendored
Normal file
@ -0,0 +1,29 @@
|
||||
// Package ast defines AST nodes that represents extension's elements
|
||||
package ast
|
||||
|
||||
import (
|
||||
gast "github.com/yuin/goldmark/ast"
|
||||
)
|
||||
|
||||
// A Strikethrough struct represents a strikethrough of GFM text.
|
||||
type Strikethrough struct {
|
||||
gast.BaseInline
|
||||
}
|
||||
|
||||
// Dump implements Node.Dump.
|
||||
func (n *Strikethrough) Dump(source []byte, level int) {
|
||||
gast.DumpHelper(n, source, level, nil, nil)
|
||||
}
|
||||
|
||||
// KindStrikethrough is a NodeKind of the Strikethrough node.
|
||||
var KindStrikethrough = gast.NewNodeKind("Strikethrough")
|
||||
|
||||
// Kind implements Node.Kind.
|
||||
func (n *Strikethrough) Kind() gast.NodeKind {
|
||||
return KindStrikethrough
|
||||
}
|
||||
|
||||
// NewStrikethrough returns a new Strikethrough node.
|
||||
func NewStrikethrough() *Strikethrough {
|
||||
return &Strikethrough{}
|
||||
}
|
157
vendor/github.com/yuin/goldmark/extension/ast/table.go
generated
vendored
Normal file
157
vendor/github.com/yuin/goldmark/extension/ast/table.go
generated
vendored
Normal file
@ -0,0 +1,157 @@
|
||||
package ast
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
gast "github.com/yuin/goldmark/ast"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// Alignment is a text alignment of table cells.
|
||||
type Alignment int
|
||||
|
||||
const (
|
||||
// AlignLeft indicates text should be left justified.
|
||||
AlignLeft Alignment = iota + 1
|
||||
|
||||
// AlignRight indicates text should be right justified.
|
||||
AlignRight
|
||||
|
||||
// AlignCenter indicates text should be centered.
|
||||
AlignCenter
|
||||
|
||||
// AlignNone indicates text should be aligned by default manner.
|
||||
AlignNone
|
||||
)
|
||||
|
||||
func (a Alignment) String() string {
|
||||
switch a {
|
||||
case AlignLeft:
|
||||
return "left"
|
||||
case AlignRight:
|
||||
return "right"
|
||||
case AlignCenter:
|
||||
return "center"
|
||||
case AlignNone:
|
||||
return "none"
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
// A Table struct represents a table of Markdown(GFM) text.
|
||||
type Table struct {
|
||||
gast.BaseBlock
|
||||
|
||||
// Alignments returns alignments of the columns.
|
||||
Alignments []Alignment
|
||||
}
|
||||
|
||||
// Dump implements Node.Dump
|
||||
func (n *Table) Dump(source []byte, level int) {
|
||||
gast.DumpHelper(n, source, level, nil, func(level int) {
|
||||
indent := strings.Repeat(" ", level)
|
||||
fmt.Printf("%sAlignments {\n", indent)
|
||||
for i, alignment := range n.Alignments {
|
||||
indent2 := strings.Repeat(" ", level+1)
|
||||
fmt.Printf("%s%s", indent2, alignment.String())
|
||||
if i != len(n.Alignments)-1 {
|
||||
fmt.Println("")
|
||||
}
|
||||
}
|
||||
fmt.Printf("\n%s}\n", indent)
|
||||
})
|
||||
}
|
||||
|
||||
// KindTable is a NodeKind of the Table node.
|
||||
var KindTable = gast.NewNodeKind("Table")
|
||||
|
||||
// Kind implements Node.Kind.
|
||||
func (n *Table) Kind() gast.NodeKind {
|
||||
return KindTable
|
||||
}
|
||||
|
||||
// NewTable returns a new Table node.
|
||||
func NewTable() *Table {
|
||||
return &Table{
|
||||
Alignments: []Alignment{},
|
||||
}
|
||||
}
|
||||
|
||||
// A TableRow struct represents a table row of Markdown(GFM) text.
|
||||
type TableRow struct {
|
||||
gast.BaseBlock
|
||||
Alignments []Alignment
|
||||
}
|
||||
|
||||
// Dump implements Node.Dump.
|
||||
func (n *TableRow) Dump(source []byte, level int) {
|
||||
gast.DumpHelper(n, source, level, nil, nil)
|
||||
}
|
||||
|
||||
// KindTableRow is a NodeKind of the TableRow node.
|
||||
var KindTableRow = gast.NewNodeKind("TableRow")
|
||||
|
||||
// Kind implements Node.Kind.
|
||||
func (n *TableRow) Kind() gast.NodeKind {
|
||||
return KindTableRow
|
||||
}
|
||||
|
||||
// NewTableRow returns a new TableRow node.
|
||||
func NewTableRow(alignments []Alignment) *TableRow {
|
||||
return &TableRow{}
|
||||
}
|
||||
|
||||
// A TableHeader struct represents a table header of Markdown(GFM) text.
|
||||
type TableHeader struct {
|
||||
gast.BaseBlock
|
||||
Alignments []Alignment
|
||||
}
|
||||
|
||||
// KindTableHeader is a NodeKind of the TableHeader node.
|
||||
var KindTableHeader = gast.NewNodeKind("TableHeader")
|
||||
|
||||
// Kind implements Node.Kind.
|
||||
func (n *TableHeader) Kind() gast.NodeKind {
|
||||
return KindTableHeader
|
||||
}
|
||||
|
||||
// Dump implements Node.Dump.
|
||||
func (n *TableHeader) Dump(source []byte, level int) {
|
||||
gast.DumpHelper(n, source, level, nil, nil)
|
||||
}
|
||||
|
||||
// NewTableHeader returns a new TableHeader node.
|
||||
func NewTableHeader(row *TableRow) *TableHeader {
|
||||
n := &TableHeader{}
|
||||
for c := row.FirstChild(); c != nil; {
|
||||
next := c.NextSibling()
|
||||
n.AppendChild(n, c)
|
||||
c = next
|
||||
}
|
||||
return n
|
||||
}
|
||||
|
||||
// A TableCell struct represents a table cell of a Markdown(GFM) text.
|
||||
type TableCell struct {
|
||||
gast.BaseBlock
|
||||
Alignment Alignment
|
||||
}
|
||||
|
||||
// Dump implements Node.Dump.
|
||||
func (n *TableCell) Dump(source []byte, level int) {
|
||||
gast.DumpHelper(n, source, level, nil, nil)
|
||||
}
|
||||
|
||||
// KindTableCell is a NodeKind of the TableCell node.
|
||||
var KindTableCell = gast.NewNodeKind("TableCell")
|
||||
|
||||
// Kind implements Node.Kind.
|
||||
func (n *TableCell) Kind() gast.NodeKind {
|
||||
return KindTableCell
|
||||
}
|
||||
|
||||
// NewTableCell returns a new TableCell node.
|
||||
func NewTableCell() *TableCell {
|
||||
return &TableCell{
|
||||
Alignment: AlignNone,
|
||||
}
|
||||
}
|
35
vendor/github.com/yuin/goldmark/extension/ast/tasklist.go
generated
vendored
Normal file
35
vendor/github.com/yuin/goldmark/extension/ast/tasklist.go
generated
vendored
Normal file
@ -0,0 +1,35 @@
|
||||
package ast
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
gast "github.com/yuin/goldmark/ast"
|
||||
)
|
||||
|
||||
// A TaskCheckBox struct represents a checkbox of a task list.
|
||||
type TaskCheckBox struct {
|
||||
gast.BaseInline
|
||||
IsChecked bool
|
||||
}
|
||||
|
||||
// Dump implements Node.Dump.
|
||||
func (n *TaskCheckBox) Dump(source []byte, level int) {
|
||||
m := map[string]string{
|
||||
"Checked": fmt.Sprintf("%v", n.IsChecked),
|
||||
}
|
||||
gast.DumpHelper(n, source, level, m, nil)
|
||||
}
|
||||
|
||||
// KindTaskCheckBox is a NodeKind of the TaskCheckBox node.
|
||||
var KindTaskCheckBox = gast.NewNodeKind("TaskCheckBox")
|
||||
|
||||
// Kind implements Node.Kind.
|
||||
func (n *TaskCheckBox) Kind() gast.NodeKind {
|
||||
return KindTaskCheckBox
|
||||
}
|
||||
|
||||
// NewTaskCheckBox returns a new TaskCheckBox node.
|
||||
func NewTaskCheckBox(checked bool) *TaskCheckBox {
|
||||
return &TaskCheckBox{
|
||||
IsChecked: checked,
|
||||
}
|
||||
}
|
270
vendor/github.com/yuin/goldmark/extension/definition_list.go
generated
vendored
Normal file
270
vendor/github.com/yuin/goldmark/extension/definition_list.go
generated
vendored
Normal file
@ -0,0 +1,270 @@
|
||||
package extension
|
||||
|
||||
import (
|
||||
"github.com/yuin/goldmark"
|
||||
gast "github.com/yuin/goldmark/ast"
|
||||
"github.com/yuin/goldmark/extension/ast"
|
||||
"github.com/yuin/goldmark/parser"
|
||||
"github.com/yuin/goldmark/renderer"
|
||||
"github.com/yuin/goldmark/renderer/html"
|
||||
"github.com/yuin/goldmark/text"
|
||||
"github.com/yuin/goldmark/util"
|
||||
)
|
||||
|
||||
type definitionListParser struct {
|
||||
}
|
||||
|
||||
var defaultDefinitionListParser = &definitionListParser{}
|
||||
|
||||
// NewDefinitionListParser return a new parser.BlockParser that
|
||||
// can parse PHP Markdown Extra Definition lists.
|
||||
func NewDefinitionListParser() parser.BlockParser {
|
||||
return defaultDefinitionListParser
|
||||
}
|
||||
|
||||
func (b *definitionListParser) Trigger() []byte {
|
||||
return []byte{':'}
|
||||
}
|
||||
|
||||
func (b *definitionListParser) Open(parent gast.Node, reader text.Reader, pc parser.Context) (gast.Node, parser.State) {
|
||||
if _, ok := parent.(*ast.DefinitionList); ok {
|
||||
return nil, parser.NoChildren
|
||||
}
|
||||
line, _ := reader.PeekLine()
|
||||
pos := pc.BlockOffset()
|
||||
indent := pc.BlockIndent()
|
||||
if pos < 0 || line[pos] != ':' || indent != 0 {
|
||||
return nil, parser.NoChildren
|
||||
}
|
||||
|
||||
last := parent.LastChild()
|
||||
// need 1 or more spaces after ':'
|
||||
w, _ := util.IndentWidth(line[pos+1:], pos+1)
|
||||
if w < 1 {
|
||||
return nil, parser.NoChildren
|
||||
}
|
||||
if w >= 8 { // starts with indented code
|
||||
w = 5
|
||||
}
|
||||
w += pos + 1 /* 1 = ':' */
|
||||
|
||||
para, lastIsParagraph := last.(*gast.Paragraph)
|
||||
var list *ast.DefinitionList
|
||||
status := parser.HasChildren
|
||||
var ok bool
|
||||
if lastIsParagraph {
|
||||
list, ok = last.PreviousSibling().(*ast.DefinitionList)
|
||||
if ok { // is not first item
|
||||
list.Offset = w
|
||||
list.TemporaryParagraph = para
|
||||
} else { // is first item
|
||||
list = ast.NewDefinitionList(w, para)
|
||||
status |= parser.RequireParagraph
|
||||
}
|
||||
} else if list, ok = last.(*ast.DefinitionList); ok { // multiple description
|
||||
list.Offset = w
|
||||
list.TemporaryParagraph = nil
|
||||
} else {
|
||||
return nil, parser.NoChildren
|
||||
}
|
||||
|
||||
return list, status
|
||||
}
|
||||
|
||||
func (b *definitionListParser) Continue(node gast.Node, reader text.Reader, pc parser.Context) parser.State {
|
||||
line, _ := reader.PeekLine()
|
||||
if util.IsBlank(line) {
|
||||
return parser.Continue | parser.HasChildren
|
||||
}
|
||||
list, _ := node.(*ast.DefinitionList)
|
||||
w, _ := util.IndentWidth(line, reader.LineOffset())
|
||||
if w < list.Offset {
|
||||
return parser.Close
|
||||
}
|
||||
pos, padding := util.IndentPosition(line, reader.LineOffset(), list.Offset)
|
||||
reader.AdvanceAndSetPadding(pos, padding)
|
||||
return parser.Continue | parser.HasChildren
|
||||
}
|
||||
|
||||
func (b *definitionListParser) Close(node gast.Node, reader text.Reader, pc parser.Context) {
|
||||
// nothing to do
|
||||
}
|
||||
|
||||
func (b *definitionListParser) CanInterruptParagraph() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
func (b *definitionListParser) CanAcceptIndentedLine() bool {
|
||||
return false
|
||||
}
|
||||
|
||||
type definitionDescriptionParser struct {
|
||||
}
|
||||
|
||||
var defaultDefinitionDescriptionParser = &definitionDescriptionParser{}
|
||||
|
||||
// NewDefinitionDescriptionParser return a new parser.BlockParser that
|
||||
// can parse definition description starts with ':'.
|
||||
func NewDefinitionDescriptionParser() parser.BlockParser {
|
||||
return defaultDefinitionDescriptionParser
|
||||
}
|
||||
|
||||
func (b *definitionDescriptionParser) Trigger() []byte {
|
||||
return []byte{':'}
|
||||
}
|
||||
|
||||
func (b *definitionDescriptionParser) Open(parent gast.Node, reader text.Reader, pc parser.Context) (gast.Node, parser.State) {
|
||||
line, _ := reader.PeekLine()
|
||||
pos := pc.BlockOffset()
|
||||
indent := pc.BlockIndent()
|
||||
if pos < 0 || line[pos] != ':' || indent != 0 {
|
||||
return nil, parser.NoChildren
|
||||
}
|
||||
list, _ := parent.(*ast.DefinitionList)
|
||||
if list == nil {
|
||||
return nil, parser.NoChildren
|
||||
}
|
||||
para := list.TemporaryParagraph
|
||||
list.TemporaryParagraph = nil
|
||||
if para != nil {
|
||||
lines := para.Lines()
|
||||
l := lines.Len()
|
||||
for i := 0; i < l; i++ {
|
||||
term := ast.NewDefinitionTerm()
|
||||
segment := lines.At(i)
|
||||
term.Lines().Append(segment.TrimRightSpace(reader.Source()))
|
||||
list.AppendChild(list, term)
|
||||
}
|
||||
para.Parent().RemoveChild(para.Parent(), para)
|
||||
}
|
||||
cpos, padding := util.IndentPosition(line[pos+1:], pos+1, list.Offset-pos-1)
|
||||
reader.AdvanceAndSetPadding(cpos, padding)
|
||||
|
||||
return ast.NewDefinitionDescription(), parser.HasChildren
|
||||
}
|
||||
|
||||
func (b *definitionDescriptionParser) Continue(node gast.Node, reader text.Reader, pc parser.Context) parser.State {
|
||||
// definitionListParser detects end of the description.
|
||||
// so this method will never be called.
|
||||
return parser.Continue | parser.HasChildren
|
||||
}
|
||||
|
||||
func (b *definitionDescriptionParser) Close(node gast.Node, reader text.Reader, pc parser.Context) {
|
||||
desc := node.(*ast.DefinitionDescription)
|
||||
desc.IsTight = !desc.HasBlankPreviousLines()
|
||||
if desc.IsTight {
|
||||
for gc := desc.FirstChild(); gc != nil; gc = gc.NextSibling() {
|
||||
paragraph, ok := gc.(*gast.Paragraph)
|
||||
if ok {
|
||||
textBlock := gast.NewTextBlock()
|
||||
textBlock.SetLines(paragraph.Lines())
|
||||
desc.ReplaceChild(desc, paragraph, textBlock)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (b *definitionDescriptionParser) CanInterruptParagraph() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
func (b *definitionDescriptionParser) CanAcceptIndentedLine() bool {
|
||||
return false
|
||||
}
|
||||
|
||||
// DefinitionListHTMLRenderer is a renderer.NodeRenderer implementation that
|
||||
// renders DefinitionList nodes.
|
||||
type DefinitionListHTMLRenderer struct {
|
||||
html.Config
|
||||
}
|
||||
|
||||
// NewDefinitionListHTMLRenderer returns a new DefinitionListHTMLRenderer.
|
||||
func NewDefinitionListHTMLRenderer(opts ...html.Option) renderer.NodeRenderer {
|
||||
r := &DefinitionListHTMLRenderer{
|
||||
Config: html.NewConfig(),
|
||||
}
|
||||
for _, opt := range opts {
|
||||
opt.SetHTMLOption(&r.Config)
|
||||
}
|
||||
return r
|
||||
}
|
||||
|
||||
// RegisterFuncs implements renderer.NodeRenderer.RegisterFuncs.
|
||||
func (r *DefinitionListHTMLRenderer) RegisterFuncs(reg renderer.NodeRendererFuncRegisterer) {
|
||||
reg.Register(ast.KindDefinitionList, r.renderDefinitionList)
|
||||
reg.Register(ast.KindDefinitionTerm, r.renderDefinitionTerm)
|
||||
reg.Register(ast.KindDefinitionDescription, r.renderDefinitionDescription)
|
||||
}
|
||||
|
||||
// DefinitionListAttributeFilter defines attribute names which dl elements can have.
|
||||
var DefinitionListAttributeFilter = html.GlobalAttributeFilter
|
||||
|
||||
func (r *DefinitionListHTMLRenderer) renderDefinitionList(w util.BufWriter, source []byte, n gast.Node, entering bool) (gast.WalkStatus, error) {
|
||||
if entering {
|
||||
if n.Attributes() != nil {
|
||||
_, _ = w.WriteString("<dl")
|
||||
html.RenderAttributes(w, n, DefinitionListAttributeFilter)
|
||||
_, _ = w.WriteString(">\n")
|
||||
} else {
|
||||
_, _ = w.WriteString("<dl>\n")
|
||||
}
|
||||
} else {
|
||||
_, _ = w.WriteString("</dl>\n")
|
||||
}
|
||||
return gast.WalkContinue, nil
|
||||
}
|
||||
|
||||
// DefinitionTermAttributeFilter defines attribute names which dd elements can have.
|
||||
var DefinitionTermAttributeFilter = html.GlobalAttributeFilter
|
||||
|
||||
func (r *DefinitionListHTMLRenderer) renderDefinitionTerm(w util.BufWriter, source []byte, n gast.Node, entering bool) (gast.WalkStatus, error) {
|
||||
if entering {
|
||||
if n.Attributes() != nil {
|
||||
_, _ = w.WriteString("<dt")
|
||||
html.RenderAttributes(w, n, DefinitionTermAttributeFilter)
|
||||
_ = w.WriteByte('>')
|
||||
} else {
|
||||
_, _ = w.WriteString("<dt>")
|
||||
}
|
||||
} else {
|
||||
_, _ = w.WriteString("</dt>\n")
|
||||
}
|
||||
return gast.WalkContinue, nil
|
||||
}
|
||||
|
||||
// DefinitionDescriptionAttributeFilter defines attribute names which dd elements can have.
|
||||
var DefinitionDescriptionAttributeFilter = html.GlobalAttributeFilter
|
||||
|
||||
func (r *DefinitionListHTMLRenderer) renderDefinitionDescription(w util.BufWriter, source []byte, node gast.Node, entering bool) (gast.WalkStatus, error) {
|
||||
if entering {
|
||||
n := node.(*ast.DefinitionDescription)
|
||||
_, _ = w.WriteString("<dd")
|
||||
if n.Attributes() != nil {
|
||||
html.RenderAttributes(w, n, DefinitionDescriptionAttributeFilter)
|
||||
}
|
||||
if n.IsTight {
|
||||
_, _ = w.WriteString(">")
|
||||
} else {
|
||||
_, _ = w.WriteString(">\n")
|
||||
}
|
||||
} else {
|
||||
_, _ = w.WriteString("</dd>\n")
|
||||
}
|
||||
return gast.WalkContinue, nil
|
||||
}
|
||||
|
||||
type definitionList struct {
|
||||
}
|
||||
|
||||
// DefinitionList is an extension that allow you to use PHP Markdown Extra Definition lists.
|
||||
var DefinitionList = &definitionList{}
|
||||
|
||||
func (e *definitionList) Extend(m goldmark.Markdown) {
|
||||
m.Parser().AddOptions(parser.WithBlockParsers(
|
||||
util.Prioritized(NewDefinitionListParser(), 101),
|
||||
util.Prioritized(NewDefinitionDescriptionParser(), 102),
|
||||
))
|
||||
m.Renderer().AddOptions(renderer.WithNodeRenderers(
|
||||
util.Prioritized(NewDefinitionListHTMLRenderer(), 500),
|
||||
))
|
||||
}
|
336
vendor/github.com/yuin/goldmark/extension/footnote.go
generated
vendored
Normal file
336
vendor/github.com/yuin/goldmark/extension/footnote.go
generated
vendored
Normal file
@ -0,0 +1,336 @@
|
||||
package extension
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"github.com/yuin/goldmark"
|
||||
gast "github.com/yuin/goldmark/ast"
|
||||
"github.com/yuin/goldmark/extension/ast"
|
||||
"github.com/yuin/goldmark/parser"
|
||||
"github.com/yuin/goldmark/renderer"
|
||||
"github.com/yuin/goldmark/renderer/html"
|
||||
"github.com/yuin/goldmark/text"
|
||||
"github.com/yuin/goldmark/util"
|
||||
"strconv"
|
||||
)
|
||||
|
||||
var footnoteListKey = parser.NewContextKey()
|
||||
|
||||
type footnoteBlockParser struct {
|
||||
}
|
||||
|
||||
var defaultFootnoteBlockParser = &footnoteBlockParser{}
|
||||
|
||||
// NewFootnoteBlockParser returns a new parser.BlockParser that can parse
|
||||
// footnotes of the Markdown(PHP Markdown Extra) text.
|
||||
func NewFootnoteBlockParser() parser.BlockParser {
|
||||
return defaultFootnoteBlockParser
|
||||
}
|
||||
|
||||
func (b *footnoteBlockParser) Trigger() []byte {
|
||||
return []byte{'['}
|
||||
}
|
||||
|
||||
func (b *footnoteBlockParser) Open(parent gast.Node, reader text.Reader, pc parser.Context) (gast.Node, parser.State) {
|
||||
line, segment := reader.PeekLine()
|
||||
pos := pc.BlockOffset()
|
||||
if pos < 0 || line[pos] != '[' {
|
||||
return nil, parser.NoChildren
|
||||
}
|
||||
pos++
|
||||
if pos > len(line)-1 || line[pos] != '^' {
|
||||
return nil, parser.NoChildren
|
||||
}
|
||||
open := pos + 1
|
||||
closes := 0
|
||||
closure := util.FindClosure(line[pos+1:], '[', ']', false, false)
|
||||
closes = pos + 1 + closure
|
||||
next := closes + 1
|
||||
if closure > -1 {
|
||||
if next >= len(line) || line[next] != ':' {
|
||||
return nil, parser.NoChildren
|
||||
}
|
||||
} else {
|
||||
return nil, parser.NoChildren
|
||||
}
|
||||
padding := segment.Padding
|
||||
label := reader.Value(text.NewSegment(segment.Start+open-padding, segment.Start+closes-padding))
|
||||
if util.IsBlank(label) {
|
||||
return nil, parser.NoChildren
|
||||
}
|
||||
item := ast.NewFootnote(label)
|
||||
|
||||
pos = next + 1 - padding
|
||||
if pos >= len(line) {
|
||||
reader.Advance(pos)
|
||||
return item, parser.NoChildren
|
||||
}
|
||||
reader.AdvanceAndSetPadding(pos, padding)
|
||||
return item, parser.HasChildren
|
||||
}
|
||||
|
||||
func (b *footnoteBlockParser) Continue(node gast.Node, reader text.Reader, pc parser.Context) parser.State {
|
||||
line, _ := reader.PeekLine()
|
||||
if util.IsBlank(line) {
|
||||
return parser.Continue | parser.HasChildren
|
||||
}
|
||||
childpos, padding := util.IndentPosition(line, reader.LineOffset(), 4)
|
||||
if childpos < 0 {
|
||||
return parser.Close
|
||||
}
|
||||
reader.AdvanceAndSetPadding(childpos, padding)
|
||||
return parser.Continue | parser.HasChildren
|
||||
}
|
||||
|
||||
func (b *footnoteBlockParser) Close(node gast.Node, reader text.Reader, pc parser.Context) {
|
||||
var list *ast.FootnoteList
|
||||
if tlist := pc.Get(footnoteListKey); tlist != nil {
|
||||
list = tlist.(*ast.FootnoteList)
|
||||
} else {
|
||||
list = ast.NewFootnoteList()
|
||||
pc.Set(footnoteListKey, list)
|
||||
node.Parent().InsertBefore(node.Parent(), node, list)
|
||||
}
|
||||
node.Parent().RemoveChild(node.Parent(), node)
|
||||
list.AppendChild(list, node)
|
||||
}
|
||||
|
||||
func (b *footnoteBlockParser) CanInterruptParagraph() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
func (b *footnoteBlockParser) CanAcceptIndentedLine() bool {
|
||||
return false
|
||||
}
|
||||
|
||||
type footnoteParser struct {
|
||||
}
|
||||
|
||||
var defaultFootnoteParser = &footnoteParser{}
|
||||
|
||||
// NewFootnoteParser returns a new parser.InlineParser that can parse
|
||||
// footnote links of the Markdown(PHP Markdown Extra) text.
|
||||
func NewFootnoteParser() parser.InlineParser {
|
||||
return defaultFootnoteParser
|
||||
}
|
||||
|
||||
func (s *footnoteParser) Trigger() []byte {
|
||||
// footnote syntax probably conflict with the image syntax.
|
||||
// So we need trigger this parser with '!'.
|
||||
return []byte{'!', '['}
|
||||
}
|
||||
|
||||
func (s *footnoteParser) Parse(parent gast.Node, block text.Reader, pc parser.Context) gast.Node {
|
||||
line, segment := block.PeekLine()
|
||||
pos := 1
|
||||
if len(line) > 0 && line[0] == '!' {
|
||||
pos++
|
||||
}
|
||||
if pos >= len(line) || line[pos] != '^' {
|
||||
return nil
|
||||
}
|
||||
pos++
|
||||
if pos >= len(line) {
|
||||
return nil
|
||||
}
|
||||
open := pos
|
||||
closure := util.FindClosure(line[pos:], '[', ']', false, false)
|
||||
if closure < 0 {
|
||||
return nil
|
||||
}
|
||||
closes := pos + closure
|
||||
value := block.Value(text.NewSegment(segment.Start+open, segment.Start+closes))
|
||||
block.Advance(closes + 1)
|
||||
|
||||
var list *ast.FootnoteList
|
||||
if tlist := pc.Get(footnoteListKey); tlist != nil {
|
||||
list = tlist.(*ast.FootnoteList)
|
||||
}
|
||||
if list == nil {
|
||||
return nil
|
||||
}
|
||||
index := 0
|
||||
for def := list.FirstChild(); def != nil; def = def.NextSibling() {
|
||||
d := def.(*ast.Footnote)
|
||||
if bytes.Equal(d.Ref, value) {
|
||||
if d.Index < 0 {
|
||||
list.Count += 1
|
||||
d.Index = list.Count
|
||||
}
|
||||
index = d.Index
|
||||
break
|
||||
}
|
||||
}
|
||||
if index == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
return ast.NewFootnoteLink(index)
|
||||
}
|
||||
|
||||
type footnoteASTTransformer struct {
|
||||
}
|
||||
|
||||
var defaultFootnoteASTTransformer = &footnoteASTTransformer{}
|
||||
|
||||
// NewFootnoteASTTransformer returns a new parser.ASTTransformer that
|
||||
// insert a footnote list to the last of the document.
|
||||
func NewFootnoteASTTransformer() parser.ASTTransformer {
|
||||
return defaultFootnoteASTTransformer
|
||||
}
|
||||
|
||||
func (a *footnoteASTTransformer) Transform(node *gast.Document, reader text.Reader, pc parser.Context) {
|
||||
var list *ast.FootnoteList
|
||||
if tlist := pc.Get(footnoteListKey); tlist != nil {
|
||||
list = tlist.(*ast.FootnoteList)
|
||||
} else {
|
||||
return
|
||||
}
|
||||
pc.Set(footnoteListKey, nil)
|
||||
for footnote := list.FirstChild(); footnote != nil; {
|
||||
var container gast.Node = footnote
|
||||
next := footnote.NextSibling()
|
||||
if fc := container.LastChild(); fc != nil && gast.IsParagraph(fc) {
|
||||
container = fc
|
||||
}
|
||||
index := footnote.(*ast.Footnote).Index
|
||||
if index < 0 {
|
||||
list.RemoveChild(list, footnote)
|
||||
} else {
|
||||
container.AppendChild(container, ast.NewFootnoteBackLink(index))
|
||||
}
|
||||
footnote = next
|
||||
}
|
||||
list.SortChildren(func(n1, n2 gast.Node) int {
|
||||
if n1.(*ast.Footnote).Index < n2.(*ast.Footnote).Index {
|
||||
return -1
|
||||
}
|
||||
return 1
|
||||
})
|
||||
if list.Count <= 0 {
|
||||
list.Parent().RemoveChild(list.Parent(), list)
|
||||
return
|
||||
}
|
||||
|
||||
node.AppendChild(node, list)
|
||||
}
|
||||
|
||||
// FootnoteHTMLRenderer is a renderer.NodeRenderer implementation that
|
||||
// renders FootnoteLink nodes.
|
||||
type FootnoteHTMLRenderer struct {
|
||||
html.Config
|
||||
}
|
||||
|
||||
// NewFootnoteHTMLRenderer returns a new FootnoteHTMLRenderer.
|
||||
func NewFootnoteHTMLRenderer(opts ...html.Option) renderer.NodeRenderer {
|
||||
r := &FootnoteHTMLRenderer{
|
||||
Config: html.NewConfig(),
|
||||
}
|
||||
for _, opt := range opts {
|
||||
opt.SetHTMLOption(&r.Config)
|
||||
}
|
||||
return r
|
||||
}
|
||||
|
||||
// RegisterFuncs implements renderer.NodeRenderer.RegisterFuncs.
|
||||
func (r *FootnoteHTMLRenderer) RegisterFuncs(reg renderer.NodeRendererFuncRegisterer) {
|
||||
reg.Register(ast.KindFootnoteLink, r.renderFootnoteLink)
|
||||
reg.Register(ast.KindFootnoteBackLink, r.renderFootnoteBackLink)
|
||||
reg.Register(ast.KindFootnote, r.renderFootnote)
|
||||
reg.Register(ast.KindFootnoteList, r.renderFootnoteList)
|
||||
}
|
||||
|
||||
func (r *FootnoteHTMLRenderer) renderFootnoteLink(w util.BufWriter, source []byte, node gast.Node, entering bool) (gast.WalkStatus, error) {
|
||||
if entering {
|
||||
n := node.(*ast.FootnoteLink)
|
||||
is := strconv.Itoa(n.Index)
|
||||
_, _ = w.WriteString(`<sup id="fnref:`)
|
||||
_, _ = w.WriteString(is)
|
||||
_, _ = w.WriteString(`"><a href="#fn:`)
|
||||
_, _ = w.WriteString(is)
|
||||
_, _ = w.WriteString(`" class="footnote-ref" role="doc-noteref">`)
|
||||
_, _ = w.WriteString(is)
|
||||
_, _ = w.WriteString(`</a></sup>`)
|
||||
}
|
||||
return gast.WalkContinue, nil
|
||||
}
|
||||
|
||||
func (r *FootnoteHTMLRenderer) renderFootnoteBackLink(w util.BufWriter, source []byte, node gast.Node, entering bool) (gast.WalkStatus, error) {
|
||||
if entering {
|
||||
n := node.(*ast.FootnoteBackLink)
|
||||
is := strconv.Itoa(n.Index)
|
||||
_, _ = w.WriteString(` <a href="#fnref:`)
|
||||
_, _ = w.WriteString(is)
|
||||
_, _ = w.WriteString(`" class="footnote-backref" role="doc-backlink">`)
|
||||
_, _ = w.WriteString("↩︎")
|
||||
_, _ = w.WriteString(`</a>`)
|
||||
}
|
||||
return gast.WalkContinue, nil
|
||||
}
|
||||
|
||||
func (r *FootnoteHTMLRenderer) renderFootnote(w util.BufWriter, source []byte, node gast.Node, entering bool) (gast.WalkStatus, error) {
|
||||
n := node.(*ast.Footnote)
|
||||
is := strconv.Itoa(n.Index)
|
||||
if entering {
|
||||
_, _ = w.WriteString(`<li id="fn:`)
|
||||
_, _ = w.WriteString(is)
|
||||
_, _ = w.WriteString(`" role="doc-endnote"`)
|
||||
if node.Attributes() != nil {
|
||||
html.RenderAttributes(w, node, html.ListItemAttributeFilter)
|
||||
}
|
||||
_, _ = w.WriteString(">\n")
|
||||
} else {
|
||||
_, _ = w.WriteString("</li>\n")
|
||||
}
|
||||
return gast.WalkContinue, nil
|
||||
}
|
||||
|
||||
func (r *FootnoteHTMLRenderer) renderFootnoteList(w util.BufWriter, source []byte, node gast.Node, entering bool) (gast.WalkStatus, error) {
|
||||
tag := "section"
|
||||
if r.Config.XHTML {
|
||||
tag = "div"
|
||||
}
|
||||
if entering {
|
||||
_, _ = w.WriteString("<")
|
||||
_, _ = w.WriteString(tag)
|
||||
_, _ = w.WriteString(` class="footnotes" role="doc-endnotes"`)
|
||||
if node.Attributes() != nil {
|
||||
html.RenderAttributes(w, node, html.GlobalAttributeFilter)
|
||||
}
|
||||
_ = w.WriteByte('>')
|
||||
if r.Config.XHTML {
|
||||
_, _ = w.WriteString("\n<hr />\n")
|
||||
} else {
|
||||
_, _ = w.WriteString("\n<hr>\n")
|
||||
}
|
||||
_, _ = w.WriteString("<ol>\n")
|
||||
} else {
|
||||
_, _ = w.WriteString("</ol>\n")
|
||||
_, _ = w.WriteString("</")
|
||||
_, _ = w.WriteString(tag)
|
||||
_, _ = w.WriteString(">\n")
|
||||
}
|
||||
return gast.WalkContinue, nil
|
||||
}
|
||||
|
||||
type footnote struct {
|
||||
}
|
||||
|
||||
// Footnote is an extension that allow you to use PHP Markdown Extra Footnotes.
|
||||
var Footnote = &footnote{}
|
||||
|
||||
func (e *footnote) Extend(m goldmark.Markdown) {
|
||||
m.Parser().AddOptions(
|
||||
parser.WithBlockParsers(
|
||||
util.Prioritized(NewFootnoteBlockParser(), 999),
|
||||
),
|
||||
parser.WithInlineParsers(
|
||||
util.Prioritized(NewFootnoteParser(), 101),
|
||||
),
|
||||
parser.WithASTTransformers(
|
||||
util.Prioritized(NewFootnoteASTTransformer(), 999),
|
||||
),
|
||||
)
|
||||
m.Renderer().AddOptions(renderer.WithNodeRenderers(
|
||||
util.Prioritized(NewFootnoteHTMLRenderer(), 500),
|
||||
))
|
||||
}
|
18
vendor/github.com/yuin/goldmark/extension/gfm.go
generated
vendored
Normal file
18
vendor/github.com/yuin/goldmark/extension/gfm.go
generated
vendored
Normal file
@ -0,0 +1,18 @@
|
||||
package extension
|
||||
|
||||
import (
|
||||
"github.com/yuin/goldmark"
|
||||
)
|
||||
|
||||
type gfm struct {
|
||||
}
|
||||
|
||||
// GFM is an extension that provides Github Flavored markdown functionalities.
|
||||
var GFM = &gfm{}
|
||||
|
||||
func (e *gfm) Extend(m goldmark.Markdown) {
|
||||
Linkify.Extend(m)
|
||||
Table.Extend(m)
|
||||
Strikethrough.Extend(m)
|
||||
TaskList.Extend(m)
|
||||
}
|
303
vendor/github.com/yuin/goldmark/extension/linkify.go
generated
vendored
Normal file
303
vendor/github.com/yuin/goldmark/extension/linkify.go
generated
vendored
Normal file
@ -0,0 +1,303 @@
|
||||
package extension
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"regexp"
|
||||
|
||||
"github.com/yuin/goldmark"
|
||||
"github.com/yuin/goldmark/ast"
|
||||
"github.com/yuin/goldmark/parser"
|
||||
"github.com/yuin/goldmark/text"
|
||||
"github.com/yuin/goldmark/util"
|
||||
)
|
||||
|
||||
var wwwURLRegxp = regexp.MustCompile(`^www\.[-a-zA-Z0-9@:%._\+~#=]{2,256}\.[a-z]+(?:(?:/|[#?])[-a-zA-Z0-9@:%_\+.~#!?&//=\(\);,'">\^{}\[\]` + "`" + `]*)?`)
|
||||
|
||||
var urlRegexp = regexp.MustCompile(`^(?:http|https|ftp):\/\/(?:www\.)?[-a-zA-Z0-9@:%._\+~#=]{2,256}\.[a-z]+(?:(?:/|[#?])[-a-zA-Z0-9@:%_+.~#$!?&//=\(\);,'">\^{}\[\]` + "`" + `]*)?`)
|
||||
|
||||
// An LinkifyConfig struct is a data structure that holds configuration of the
|
||||
// Linkify extension.
|
||||
type LinkifyConfig struct {
|
||||
AllowedProtocols [][]byte
|
||||
URLRegexp *regexp.Regexp
|
||||
WWWRegexp *regexp.Regexp
|
||||
EmailRegexp *regexp.Regexp
|
||||
}
|
||||
|
||||
const optLinkifyAllowedProtocols parser.OptionName = "LinkifyAllowedProtocols"
|
||||
const optLinkifyURLRegexp parser.OptionName = "LinkifyURLRegexp"
|
||||
const optLinkifyWWWRegexp parser.OptionName = "LinkifyWWWRegexp"
|
||||
const optLinkifyEmailRegexp parser.OptionName = "LinkifyEmailRegexp"
|
||||
|
||||
// SetOption implements SetOptioner.
|
||||
func (c *LinkifyConfig) SetOption(name parser.OptionName, value interface{}) {
|
||||
switch name {
|
||||
case optLinkifyAllowedProtocols:
|
||||
c.AllowedProtocols = value.([][]byte)
|
||||
case optLinkifyURLRegexp:
|
||||
c.URLRegexp = value.(*regexp.Regexp)
|
||||
case optLinkifyWWWRegexp:
|
||||
c.WWWRegexp = value.(*regexp.Regexp)
|
||||
case optLinkifyEmailRegexp:
|
||||
c.EmailRegexp = value.(*regexp.Regexp)
|
||||
}
|
||||
}
|
||||
|
||||
// A LinkifyOption interface sets options for the LinkifyOption.
|
||||
type LinkifyOption interface {
|
||||
parser.Option
|
||||
SetLinkifyOption(*LinkifyConfig)
|
||||
}
|
||||
|
||||
type withLinkifyAllowedProtocols struct {
|
||||
value [][]byte
|
||||
}
|
||||
|
||||
func (o *withLinkifyAllowedProtocols) SetParserOption(c *parser.Config) {
|
||||
c.Options[optLinkifyAllowedProtocols] = o.value
|
||||
}
|
||||
|
||||
func (o *withLinkifyAllowedProtocols) SetLinkifyOption(p *LinkifyConfig) {
|
||||
p.AllowedProtocols = o.value
|
||||
}
|
||||
|
||||
// WithLinkifyAllowedProtocols is a functional option that specify allowed
|
||||
// protocols in autolinks. Each protocol must end with ':' like
|
||||
// 'http:' .
|
||||
func WithLinkifyAllowedProtocols(value [][]byte) LinkifyOption {
|
||||
return &withLinkifyAllowedProtocols{
|
||||
value: value,
|
||||
}
|
||||
}
|
||||
|
||||
type withLinkifyURLRegexp struct {
|
||||
value *regexp.Regexp
|
||||
}
|
||||
|
||||
func (o *withLinkifyURLRegexp) SetParserOption(c *parser.Config) {
|
||||
c.Options[optLinkifyURLRegexp] = o.value
|
||||
}
|
||||
|
||||
func (o *withLinkifyURLRegexp) SetLinkifyOption(p *LinkifyConfig) {
|
||||
p.URLRegexp = o.value
|
||||
}
|
||||
|
||||
// WithLinkifyURLRegexp is a functional option that specify
|
||||
// a pattern of the URL including a protocol.
|
||||
func WithLinkifyURLRegexp(value *regexp.Regexp) LinkifyOption {
|
||||
return &withLinkifyURLRegexp{
|
||||
value: value,
|
||||
}
|
||||
}
|
||||
|
||||
// WithLinkifyWWWRegexp is a functional option that specify
|
||||
// a pattern of the URL without a protocol.
|
||||
// This pattern must start with 'www.' .
|
||||
type withLinkifyWWWRegexp struct {
|
||||
value *regexp.Regexp
|
||||
}
|
||||
|
||||
func (o *withLinkifyWWWRegexp) SetParserOption(c *parser.Config) {
|
||||
c.Options[optLinkifyWWWRegexp] = o.value
|
||||
}
|
||||
|
||||
func (o *withLinkifyWWWRegexp) SetLinkifyOption(p *LinkifyConfig) {
|
||||
p.WWWRegexp = o.value
|
||||
}
|
||||
|
||||
func WithLinkifyWWWRegexp(value *regexp.Regexp) LinkifyOption {
|
||||
return &withLinkifyWWWRegexp{
|
||||
value: value,
|
||||
}
|
||||
}
|
||||
|
||||
// WithLinkifyWWWRegexp is a functional otpion that specify
|
||||
// a pattern of the email address.
|
||||
type withLinkifyEmailRegexp struct {
|
||||
value *regexp.Regexp
|
||||
}
|
||||
|
||||
func (o *withLinkifyEmailRegexp) SetParserOption(c *parser.Config) {
|
||||
c.Options[optLinkifyEmailRegexp] = o.value
|
||||
}
|
||||
|
||||
func (o *withLinkifyEmailRegexp) SetLinkifyOption(p *LinkifyConfig) {
|
||||
p.EmailRegexp = o.value
|
||||
}
|
||||
|
||||
func WithLinkifyEmailRegexp(value *regexp.Regexp) LinkifyOption {
|
||||
return &withLinkifyEmailRegexp{
|
||||
value: value,
|
||||
}
|
||||
}
|
||||
|
||||
type linkifyParser struct {
|
||||
LinkifyConfig
|
||||
}
|
||||
|
||||
// NewLinkifyParser return a new InlineParser can parse
|
||||
// text that seems like a URL.
|
||||
func NewLinkifyParser(opts ...LinkifyOption) parser.InlineParser {
|
||||
p := &linkifyParser{
|
||||
LinkifyConfig: LinkifyConfig{
|
||||
AllowedProtocols: nil,
|
||||
URLRegexp: urlRegexp,
|
||||
WWWRegexp: wwwURLRegxp,
|
||||
},
|
||||
}
|
||||
for _, o := range opts {
|
||||
o.SetLinkifyOption(&p.LinkifyConfig)
|
||||
}
|
||||
return p
|
||||
}
|
||||
|
||||
func (s *linkifyParser) Trigger() []byte {
|
||||
// ' ' indicates any white spaces and a line head
|
||||
return []byte{' ', '*', '_', '~', '('}
|
||||
}
|
||||
|
||||
var protoHTTP = []byte("http:")
|
||||
var protoHTTPS = []byte("https:")
|
||||
var protoFTP = []byte("ftp:")
|
||||
var domainWWW = []byte("www.")
|
||||
|
||||
func (s *linkifyParser) Parse(parent ast.Node, block text.Reader, pc parser.Context) ast.Node {
|
||||
if pc.IsInLinkLabel() {
|
||||
return nil
|
||||
}
|
||||
line, segment := block.PeekLine()
|
||||
consumes := 0
|
||||
start := segment.Start
|
||||
c := line[0]
|
||||
// advance if current position is not a line head.
|
||||
if c == ' ' || c == '*' || c == '_' || c == '~' || c == '(' {
|
||||
consumes++
|
||||
start++
|
||||
line = line[1:]
|
||||
}
|
||||
|
||||
var m []int
|
||||
var protocol []byte
|
||||
var typ ast.AutoLinkType = ast.AutoLinkURL
|
||||
if s.LinkifyConfig.AllowedProtocols == nil {
|
||||
if bytes.HasPrefix(line, protoHTTP) || bytes.HasPrefix(line, protoHTTPS) || bytes.HasPrefix(line, protoFTP) {
|
||||
m = s.LinkifyConfig.URLRegexp.FindSubmatchIndex(line)
|
||||
}
|
||||
} else {
|
||||
for _, prefix := range s.LinkifyConfig.AllowedProtocols {
|
||||
if bytes.HasPrefix(line, prefix) {
|
||||
m = s.LinkifyConfig.URLRegexp.FindSubmatchIndex(line)
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
if m == nil && bytes.HasPrefix(line, domainWWW) {
|
||||
m = s.LinkifyConfig.WWWRegexp.FindSubmatchIndex(line)
|
||||
protocol = []byte("http")
|
||||
}
|
||||
if m != nil && m[0] != 0 {
|
||||
m = nil
|
||||
}
|
||||
if m != nil && m[0] == 0 {
|
||||
lastChar := line[m[1]-1]
|
||||
if lastChar == '.' {
|
||||
m[1]--
|
||||
} else if lastChar == ')' {
|
||||
closing := 0
|
||||
for i := m[1] - 1; i >= m[0]; i-- {
|
||||
if line[i] == ')' {
|
||||
closing++
|
||||
} else if line[i] == '(' {
|
||||
closing--
|
||||
}
|
||||
}
|
||||
if closing > 0 {
|
||||
m[1] -= closing
|
||||
}
|
||||
} else if lastChar == ';' {
|
||||
i := m[1] - 2
|
||||
for ; i >= m[0]; i-- {
|
||||
if util.IsAlphaNumeric(line[i]) {
|
||||
continue
|
||||
}
|
||||
break
|
||||
}
|
||||
if i != m[1]-2 {
|
||||
if line[i] == '&' {
|
||||
m[1] -= m[1] - i
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
if m == nil {
|
||||
if len(line) > 0 && util.IsPunct(line[0]) {
|
||||
return nil
|
||||
}
|
||||
typ = ast.AutoLinkEmail
|
||||
stop := -1
|
||||
if s.LinkifyConfig.EmailRegexp == nil {
|
||||
stop = util.FindEmailIndex(line)
|
||||
} else {
|
||||
m := s.LinkifyConfig.EmailRegexp.FindSubmatchIndex(line)
|
||||
if m != nil && m[0] == 0 {
|
||||
stop = m[1]
|
||||
}
|
||||
}
|
||||
if stop < 0 {
|
||||
return nil
|
||||
}
|
||||
at := bytes.IndexByte(line, '@')
|
||||
m = []int{0, stop, at, stop - 1}
|
||||
if m == nil || bytes.IndexByte(line[m[2]:m[3]], '.') < 0 {
|
||||
return nil
|
||||
}
|
||||
lastChar := line[m[1]-1]
|
||||
if lastChar == '.' {
|
||||
m[1]--
|
||||
}
|
||||
if m[1] < len(line) {
|
||||
nextChar := line[m[1]]
|
||||
if nextChar == '-' || nextChar == '_' {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
}
|
||||
if m == nil {
|
||||
return nil
|
||||
}
|
||||
if consumes != 0 {
|
||||
s := segment.WithStop(segment.Start + 1)
|
||||
ast.MergeOrAppendTextSegment(parent, s)
|
||||
}
|
||||
consumes += m[1]
|
||||
block.Advance(consumes)
|
||||
n := ast.NewTextSegment(text.NewSegment(start, start+m[1]))
|
||||
link := ast.NewAutoLink(typ, n)
|
||||
link.Protocol = protocol
|
||||
return link
|
||||
}
|
||||
|
||||
func (s *linkifyParser) CloseBlock(parent ast.Node, pc parser.Context) {
|
||||
// nothing to do
|
||||
}
|
||||
|
||||
type linkify struct {
|
||||
options []LinkifyOption
|
||||
}
|
||||
|
||||
// Linkify is an extension that allow you to parse text that seems like a URL.
|
||||
var Linkify = &linkify{}
|
||||
|
||||
func NewLinkify(opts ...LinkifyOption) goldmark.Extender {
|
||||
return &linkify{
|
||||
options: opts,
|
||||
}
|
||||
}
|
||||
|
||||
func (e *linkify) Extend(m goldmark.Markdown) {
|
||||
m.Parser().AddOptions(
|
||||
parser.WithInlineParsers(
|
||||
util.Prioritized(NewLinkifyParser(e.options...), 999),
|
||||
),
|
||||
)
|
||||
}
|
116
vendor/github.com/yuin/goldmark/extension/strikethrough.go
generated
vendored
Normal file
116
vendor/github.com/yuin/goldmark/extension/strikethrough.go
generated
vendored
Normal file
@ -0,0 +1,116 @@
|
||||
package extension
|
||||
|
||||
import (
|
||||
"github.com/yuin/goldmark"
|
||||
gast "github.com/yuin/goldmark/ast"
|
||||
"github.com/yuin/goldmark/extension/ast"
|
||||
"github.com/yuin/goldmark/parser"
|
||||
"github.com/yuin/goldmark/renderer"
|
||||
"github.com/yuin/goldmark/renderer/html"
|
||||
"github.com/yuin/goldmark/text"
|
||||
"github.com/yuin/goldmark/util"
|
||||
)
|
||||
|
||||
type strikethroughDelimiterProcessor struct {
|
||||
}
|
||||
|
||||
func (p *strikethroughDelimiterProcessor) IsDelimiter(b byte) bool {
|
||||
return b == '~'
|
||||
}
|
||||
|
||||
func (p *strikethroughDelimiterProcessor) CanOpenCloser(opener, closer *parser.Delimiter) bool {
|
||||
return opener.Char == closer.Char
|
||||
}
|
||||
|
||||
func (p *strikethroughDelimiterProcessor) OnMatch(consumes int) gast.Node {
|
||||
return ast.NewStrikethrough()
|
||||
}
|
||||
|
||||
var defaultStrikethroughDelimiterProcessor = &strikethroughDelimiterProcessor{}
|
||||
|
||||
type strikethroughParser struct {
|
||||
}
|
||||
|
||||
var defaultStrikethroughParser = &strikethroughParser{}
|
||||
|
||||
// NewStrikethroughParser return a new InlineParser that parses
|
||||
// strikethrough expressions.
|
||||
func NewStrikethroughParser() parser.InlineParser {
|
||||
return defaultStrikethroughParser
|
||||
}
|
||||
|
||||
func (s *strikethroughParser) Trigger() []byte {
|
||||
return []byte{'~'}
|
||||
}
|
||||
|
||||
func (s *strikethroughParser) Parse(parent gast.Node, block text.Reader, pc parser.Context) gast.Node {
|
||||
before := block.PrecendingCharacter()
|
||||
line, segment := block.PeekLine()
|
||||
node := parser.ScanDelimiter(line, before, 2, defaultStrikethroughDelimiterProcessor)
|
||||
if node == nil {
|
||||
return nil
|
||||
}
|
||||
node.Segment = segment.WithStop(segment.Start + node.OriginalLength)
|
||||
block.Advance(node.OriginalLength)
|
||||
pc.PushDelimiter(node)
|
||||
return node
|
||||
}
|
||||
|
||||
func (s *strikethroughParser) CloseBlock(parent gast.Node, pc parser.Context) {
|
||||
// nothing to do
|
||||
}
|
||||
|
||||
// StrikethroughHTMLRenderer is a renderer.NodeRenderer implementation that
|
||||
// renders Strikethrough nodes.
|
||||
type StrikethroughHTMLRenderer struct {
|
||||
html.Config
|
||||
}
|
||||
|
||||
// NewStrikethroughHTMLRenderer returns a new StrikethroughHTMLRenderer.
|
||||
func NewStrikethroughHTMLRenderer(opts ...html.Option) renderer.NodeRenderer {
|
||||
r := &StrikethroughHTMLRenderer{
|
||||
Config: html.NewConfig(),
|
||||
}
|
||||
for _, opt := range opts {
|
||||
opt.SetHTMLOption(&r.Config)
|
||||
}
|
||||
return r
|
||||
}
|
||||
|
||||
// RegisterFuncs implements renderer.NodeRenderer.RegisterFuncs.
|
||||
func (r *StrikethroughHTMLRenderer) RegisterFuncs(reg renderer.NodeRendererFuncRegisterer) {
|
||||
reg.Register(ast.KindStrikethrough, r.renderStrikethrough)
|
||||
}
|
||||
|
||||
// StrikethroughAttributeFilter defines attribute names which dd elements can have.
|
||||
var StrikethroughAttributeFilter = html.GlobalAttributeFilter
|
||||
|
||||
func (r *StrikethroughHTMLRenderer) renderStrikethrough(w util.BufWriter, source []byte, n gast.Node, entering bool) (gast.WalkStatus, error) {
|
||||
if entering {
|
||||
if n.Attributes() != nil {
|
||||
_, _ = w.WriteString("<del")
|
||||
html.RenderAttributes(w, n, StrikethroughAttributeFilter)
|
||||
_ = w.WriteByte('>')
|
||||
} else {
|
||||
_, _ = w.WriteString("<del>")
|
||||
}
|
||||
} else {
|
||||
_, _ = w.WriteString("</del>")
|
||||
}
|
||||
return gast.WalkContinue, nil
|
||||
}
|
||||
|
||||
type strikethrough struct {
|
||||
}
|
||||
|
||||
// Strikethrough is an extension that allow you to use strikethrough expression like '~~text~~' .
|
||||
var Strikethrough = &strikethrough{}
|
||||
|
||||
func (e *strikethrough) Extend(m goldmark.Markdown) {
|
||||
m.Parser().AddOptions(parser.WithInlineParsers(
|
||||
util.Prioritized(NewStrikethroughParser(), 500),
|
||||
))
|
||||
m.Renderer().AddOptions(renderer.WithNodeRenderers(
|
||||
util.Prioritized(NewStrikethroughHTMLRenderer(), 500),
|
||||
))
|
||||
}
|
446
vendor/github.com/yuin/goldmark/extension/table.go
generated
vendored
Normal file
446
vendor/github.com/yuin/goldmark/extension/table.go
generated
vendored
Normal file
@ -0,0 +1,446 @@
|
||||
package extension
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"regexp"
|
||||
|
||||
"github.com/yuin/goldmark"
|
||||
gast "github.com/yuin/goldmark/ast"
|
||||
"github.com/yuin/goldmark/extension/ast"
|
||||
"github.com/yuin/goldmark/parser"
|
||||
"github.com/yuin/goldmark/renderer"
|
||||
"github.com/yuin/goldmark/renderer/html"
|
||||
"github.com/yuin/goldmark/text"
|
||||
"github.com/yuin/goldmark/util"
|
||||
)
|
||||
|
||||
// TableCellAlignMethod indicates how are table cells aligned in HTML format.indicates how are table cells aligned in HTML format.
|
||||
type TableCellAlignMethod int
|
||||
|
||||
const (
|
||||
// TableCellAlignDefault renders alignments by default method.
|
||||
// With XHTML, alignments are rendered as an align attribute.
|
||||
// With HTML5, alignments are rendered as a style attribute.
|
||||
TableCellAlignDefault TableCellAlignMethod = iota
|
||||
|
||||
// TableCellAlignAttribute renders alignments as an align attribute.
|
||||
TableCellAlignAttribute
|
||||
|
||||
// TableCellAlignStyle renders alignments as a style attribute.
|
||||
TableCellAlignStyle
|
||||
|
||||
// TableCellAlignNone does not care about alignments.
|
||||
// If you using classes or other styles, you can add these attributes
|
||||
// in an ASTTransformer.
|
||||
TableCellAlignNone
|
||||
)
|
||||
|
||||
// TableConfig struct holds options for the extension.
|
||||
type TableConfig struct {
|
||||
html.Config
|
||||
|
||||
// TableCellAlignMethod indicates how are table celss aligned.
|
||||
TableCellAlignMethod TableCellAlignMethod
|
||||
}
|
||||
|
||||
// TableOption interface is a functional option interface for the extension.
|
||||
type TableOption interface {
|
||||
renderer.Option
|
||||
// SetTableOption sets given option to the extension.
|
||||
SetTableOption(*TableConfig)
|
||||
}
|
||||
|
||||
// NewTableConfig returns a new Config with defaults.
|
||||
func NewTableConfig() TableConfig {
|
||||
return TableConfig{
|
||||
Config: html.NewConfig(),
|
||||
TableCellAlignMethod: TableCellAlignDefault,
|
||||
}
|
||||
}
|
||||
|
||||
// SetOption implements renderer.SetOptioner.
|
||||
func (c *TableConfig) SetOption(name renderer.OptionName, value interface{}) {
|
||||
switch name {
|
||||
case optTableCellAlignMethod:
|
||||
c.TableCellAlignMethod = value.(TableCellAlignMethod)
|
||||
default:
|
||||
c.Config.SetOption(name, value)
|
||||
}
|
||||
}
|
||||
|
||||
type withTableHTMLOptions struct {
|
||||
value []html.Option
|
||||
}
|
||||
|
||||
func (o *withTableHTMLOptions) SetConfig(c *renderer.Config) {
|
||||
if o.value != nil {
|
||||
for _, v := range o.value {
|
||||
v.(renderer.Option).SetConfig(c)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (o *withTableHTMLOptions) SetTableOption(c *TableConfig) {
|
||||
if o.value != nil {
|
||||
for _, v := range o.value {
|
||||
v.SetHTMLOption(&c.Config)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// WithTableHTMLOptions is functional option that wraps goldmark HTMLRenderer options.
|
||||
func WithTableHTMLOptions(opts ...html.Option) TableOption {
|
||||
return &withTableHTMLOptions{opts}
|
||||
}
|
||||
|
||||
const optTableCellAlignMethod renderer.OptionName = "TableTableCellAlignMethod"
|
||||
|
||||
type withTableCellAlignMethod struct {
|
||||
value TableCellAlignMethod
|
||||
}
|
||||
|
||||
func (o *withTableCellAlignMethod) SetConfig(c *renderer.Config) {
|
||||
c.Options[optTableCellAlignMethod] = o.value
|
||||
}
|
||||
|
||||
func (o *withTableCellAlignMethod) SetTableOption(c *TableConfig) {
|
||||
c.TableCellAlignMethod = o.value
|
||||
}
|
||||
|
||||
// WithTableCellAlignMethod is a functional option that indicates how are table cells aligned in HTML format.
|
||||
func WithTableCellAlignMethod(a TableCellAlignMethod) TableOption {
|
||||
return &withTableCellAlignMethod{a}
|
||||
}
|
||||
|
||||
var tableDelimRegexp = regexp.MustCompile(`^[\s\-\|\:]+$`)
|
||||
var tableDelimLeft = regexp.MustCompile(`^\s*\:\-+\s*$`)
|
||||
var tableDelimRight = regexp.MustCompile(`^\s*\-+\:\s*$`)
|
||||
var tableDelimCenter = regexp.MustCompile(`^\s*\:\-+\:\s*$`)
|
||||
var tableDelimNone = regexp.MustCompile(`^\s*\-+\s*$`)
|
||||
|
||||
type tableParagraphTransformer struct {
|
||||
}
|
||||
|
||||
var defaultTableParagraphTransformer = &tableParagraphTransformer{}
|
||||
|
||||
// NewTableParagraphTransformer returns a new ParagraphTransformer
|
||||
// that can transform paragraphs into tables.
|
||||
func NewTableParagraphTransformer() parser.ParagraphTransformer {
|
||||
return defaultTableParagraphTransformer
|
||||
}
|
||||
|
||||
func (b *tableParagraphTransformer) Transform(node *gast.Paragraph, reader text.Reader, pc parser.Context) {
|
||||
lines := node.Lines()
|
||||
if lines.Len() < 2 {
|
||||
return
|
||||
}
|
||||
alignments := b.parseDelimiter(lines.At(1), reader)
|
||||
if alignments == nil {
|
||||
return
|
||||
}
|
||||
header := b.parseRow(lines.At(0), alignments, true, reader)
|
||||
if header == nil || len(alignments) != header.ChildCount() {
|
||||
return
|
||||
}
|
||||
table := ast.NewTable()
|
||||
table.Alignments = alignments
|
||||
table.AppendChild(table, ast.NewTableHeader(header))
|
||||
for i := 2; i < lines.Len(); i++ {
|
||||
table.AppendChild(table, b.parseRow(lines.At(i), alignments, false, reader))
|
||||
}
|
||||
node.Parent().InsertBefore(node.Parent(), node, table)
|
||||
node.Parent().RemoveChild(node.Parent(), node)
|
||||
}
|
||||
|
||||
func (b *tableParagraphTransformer) parseRow(segment text.Segment, alignments []ast.Alignment, isHeader bool, reader text.Reader) *ast.TableRow {
|
||||
source := reader.Source()
|
||||
line := segment.Value(source)
|
||||
pos := 0
|
||||
pos += util.TrimLeftSpaceLength(line)
|
||||
limit := len(line)
|
||||
limit -= util.TrimRightSpaceLength(line)
|
||||
row := ast.NewTableRow(alignments)
|
||||
if len(line) > 0 && line[pos] == '|' {
|
||||
pos++
|
||||
}
|
||||
if len(line) > 0 && line[limit-1] == '|' {
|
||||
limit--
|
||||
}
|
||||
i := 0
|
||||
for ; pos < limit; i++ {
|
||||
alignment := ast.AlignNone
|
||||
if i >= len(alignments) {
|
||||
if !isHeader {
|
||||
return row
|
||||
}
|
||||
} else {
|
||||
alignment = alignments[i]
|
||||
}
|
||||
closure := util.FindClosure(line[pos:], byte(0), '|', true, false)
|
||||
if closure < 0 {
|
||||
closure = len(line[pos:])
|
||||
}
|
||||
node := ast.NewTableCell()
|
||||
seg := text.NewSegment(segment.Start+pos, segment.Start+pos+closure)
|
||||
seg = seg.TrimLeftSpace(source)
|
||||
seg = seg.TrimRightSpace(source)
|
||||
node.Lines().Append(seg)
|
||||
node.Alignment = alignment
|
||||
row.AppendChild(row, node)
|
||||
pos += closure + 1
|
||||
}
|
||||
for ; i < len(alignments); i++ {
|
||||
row.AppendChild(row, ast.NewTableCell())
|
||||
}
|
||||
return row
|
||||
}
|
||||
|
||||
func (b *tableParagraphTransformer) parseDelimiter(segment text.Segment, reader text.Reader) []ast.Alignment {
|
||||
line := segment.Value(reader.Source())
|
||||
if !tableDelimRegexp.Match(line) {
|
||||
return nil
|
||||
}
|
||||
cols := bytes.Split(line, []byte{'|'})
|
||||
if util.IsBlank(cols[0]) {
|
||||
cols = cols[1:]
|
||||
}
|
||||
if len(cols) > 0 && util.IsBlank(cols[len(cols)-1]) {
|
||||
cols = cols[:len(cols)-1]
|
||||
}
|
||||
|
||||
var alignments []ast.Alignment
|
||||
for _, col := range cols {
|
||||
if tableDelimLeft.Match(col) {
|
||||
alignments = append(alignments, ast.AlignLeft)
|
||||
} else if tableDelimRight.Match(col) {
|
||||
alignments = append(alignments, ast.AlignRight)
|
||||
} else if tableDelimCenter.Match(col) {
|
||||
alignments = append(alignments, ast.AlignCenter)
|
||||
} else if tableDelimNone.Match(col) {
|
||||
alignments = append(alignments, ast.AlignNone)
|
||||
} else {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
return alignments
|
||||
}
|
||||
|
||||
// TableHTMLRenderer is a renderer.NodeRenderer implementation that
|
||||
// renders Table nodes.
|
||||
type TableHTMLRenderer struct {
|
||||
TableConfig
|
||||
}
|
||||
|
||||
// NewTableHTMLRenderer returns a new TableHTMLRenderer.
|
||||
func NewTableHTMLRenderer(opts ...TableOption) renderer.NodeRenderer {
|
||||
r := &TableHTMLRenderer{
|
||||
TableConfig: NewTableConfig(),
|
||||
}
|
||||
for _, opt := range opts {
|
||||
opt.SetTableOption(&r.TableConfig)
|
||||
}
|
||||
return r
|
||||
}
|
||||
|
||||
// RegisterFuncs implements renderer.NodeRenderer.RegisterFuncs.
|
||||
func (r *TableHTMLRenderer) RegisterFuncs(reg renderer.NodeRendererFuncRegisterer) {
|
||||
reg.Register(ast.KindTable, r.renderTable)
|
||||
reg.Register(ast.KindTableHeader, r.renderTableHeader)
|
||||
reg.Register(ast.KindTableRow, r.renderTableRow)
|
||||
reg.Register(ast.KindTableCell, r.renderTableCell)
|
||||
}
|
||||
|
||||
// TableAttributeFilter defines attribute names which table elements can have.
|
||||
var TableAttributeFilter = html.GlobalAttributeFilter.Extend(
|
||||
[]byte("align"), // [Deprecated]
|
||||
[]byte("bgcolor"), // [Deprecated]
|
||||
[]byte("border"), // [Deprecated]
|
||||
[]byte("cellpadding"), // [Deprecated]
|
||||
[]byte("cellspacing"), // [Deprecated]
|
||||
[]byte("frame"), // [Deprecated]
|
||||
[]byte("rules"), // [Deprecated]
|
||||
[]byte("summary"), // [Deprecated]
|
||||
[]byte("width"), // [Deprecated]
|
||||
)
|
||||
|
||||
func (r *TableHTMLRenderer) renderTable(w util.BufWriter, source []byte, n gast.Node, entering bool) (gast.WalkStatus, error) {
|
||||
if entering {
|
||||
_, _ = w.WriteString("<table")
|
||||
if n.Attributes() != nil {
|
||||
html.RenderAttributes(w, n, TableAttributeFilter)
|
||||
}
|
||||
_, _ = w.WriteString(">\n")
|
||||
} else {
|
||||
_, _ = w.WriteString("</table>\n")
|
||||
}
|
||||
return gast.WalkContinue, nil
|
||||
}
|
||||
|
||||
// TableHeaderAttributeFilter defines attribute names which <thead> elements can have.
|
||||
var TableHeaderAttributeFilter = html.GlobalAttributeFilter.Extend(
|
||||
[]byte("align"), // [Deprecated since HTML4] [Obsolete since HTML5]
|
||||
[]byte("bgcolor"), // [Not Standardized]
|
||||
[]byte("char"), // [Deprecated since HTML4] [Obsolete since HTML5]
|
||||
[]byte("charoff"), // [Deprecated since HTML4] [Obsolete since HTML5]
|
||||
[]byte("valign"), // [Deprecated since HTML4] [Obsolete since HTML5]
|
||||
)
|
||||
|
||||
func (r *TableHTMLRenderer) renderTableHeader(w util.BufWriter, source []byte, n gast.Node, entering bool) (gast.WalkStatus, error) {
|
||||
if entering {
|
||||
_, _ = w.WriteString("<thead")
|
||||
if n.Attributes() != nil {
|
||||
html.RenderAttributes(w, n, TableHeaderAttributeFilter)
|
||||
}
|
||||
_, _ = w.WriteString(">\n")
|
||||
_, _ = w.WriteString("<tr>\n") // Header <tr> has no separate handle
|
||||
} else {
|
||||
_, _ = w.WriteString("</tr>\n")
|
||||
_, _ = w.WriteString("</thead>\n")
|
||||
if n.NextSibling() != nil {
|
||||
_, _ = w.WriteString("<tbody>\n")
|
||||
}
|
||||
}
|
||||
return gast.WalkContinue, nil
|
||||
}
|
||||
|
||||
// TableRowAttributeFilter defines attribute names which <tr> elements can have.
|
||||
var TableRowAttributeFilter = html.GlobalAttributeFilter.Extend(
|
||||
[]byte("align"), // [Obsolete since HTML5]
|
||||
[]byte("bgcolor"), // [Obsolete since HTML5]
|
||||
[]byte("char"), // [Obsolete since HTML5]
|
||||
[]byte("charoff"), // [Obsolete since HTML5]
|
||||
[]byte("valign"), // [Obsolete since HTML5]
|
||||
)
|
||||
|
||||
func (r *TableHTMLRenderer) renderTableRow(w util.BufWriter, source []byte, n gast.Node, entering bool) (gast.WalkStatus, error) {
|
||||
if entering {
|
||||
_, _ = w.WriteString("<tr")
|
||||
if n.Attributes() != nil {
|
||||
html.RenderAttributes(w, n, TableRowAttributeFilter)
|
||||
}
|
||||
_, _ = w.WriteString(">\n")
|
||||
} else {
|
||||
_, _ = w.WriteString("</tr>\n")
|
||||
if n.Parent().LastChild() == n {
|
||||
_, _ = w.WriteString("</tbody>\n")
|
||||
}
|
||||
}
|
||||
return gast.WalkContinue, nil
|
||||
}
|
||||
|
||||
// TableThCellAttributeFilter defines attribute names which table <th> cells can have.
|
||||
var TableThCellAttributeFilter = html.GlobalAttributeFilter.Extend(
|
||||
[]byte("abbr"), // [OK] Contains a short abbreviated description of the cell's content [NOT OK in <td>]
|
||||
|
||||
[]byte("align"), // [Obsolete since HTML5]
|
||||
[]byte("axis"), // [Obsolete since HTML5]
|
||||
[]byte("bgcolor"), // [Not Standardized]
|
||||
[]byte("char"), // [Obsolete since HTML5]
|
||||
[]byte("charoff"), // [Obsolete since HTML5]
|
||||
|
||||
[]byte("colspan"), // [OK] Number of columns that the cell is to span
|
||||
[]byte("headers"), // [OK] This attribute contains a list of space-separated strings, each corresponding to the id attribute of the <th> elements that apply to this element
|
||||
|
||||
[]byte("height"), // [Deprecated since HTML4] [Obsolete since HTML5]
|
||||
|
||||
[]byte("rowspan"), // [OK] Number of rows that the cell is to span
|
||||
[]byte("scope"), // [OK] This enumerated attribute defines the cells that the header (defined in the <th>) element relates to [NOT OK in <td>]
|
||||
|
||||
[]byte("valign"), // [Obsolete since HTML5]
|
||||
[]byte("width"), // [Deprecated since HTML4] [Obsolete since HTML5]
|
||||
)
|
||||
|
||||
// TableTdCellAttributeFilter defines attribute names which table <td> cells can have.
|
||||
var TableTdCellAttributeFilter = html.GlobalAttributeFilter.Extend(
|
||||
[]byte("abbr"), // [Obsolete since HTML5] [OK in <th>]
|
||||
[]byte("align"), // [Obsolete since HTML5]
|
||||
[]byte("axis"), // [Obsolete since HTML5]
|
||||
[]byte("bgcolor"), // [Not Standardized]
|
||||
[]byte("char"), // [Obsolete since HTML5]
|
||||
[]byte("charoff"), // [Obsolete since HTML5]
|
||||
|
||||
[]byte("colspan"), // [OK] Number of columns that the cell is to span
|
||||
[]byte("headers"), // [OK] This attribute contains a list of space-separated strings, each corresponding to the id attribute of the <th> elements that apply to this element
|
||||
|
||||
[]byte("height"), // [Deprecated since HTML4] [Obsolete since HTML5]
|
||||
|
||||
[]byte("rowspan"), // [OK] Number of rows that the cell is to span
|
||||
|
||||
[]byte("scope"), // [Obsolete since HTML5] [OK in <th>]
|
||||
[]byte("valign"), // [Obsolete since HTML5]
|
||||
[]byte("width"), // [Deprecated since HTML4] [Obsolete since HTML5]
|
||||
)
|
||||
|
||||
func (r *TableHTMLRenderer) renderTableCell(w util.BufWriter, source []byte, node gast.Node, entering bool) (gast.WalkStatus, error) {
|
||||
n := node.(*ast.TableCell)
|
||||
tag := "td"
|
||||
if n.Parent().Kind() == ast.KindTableHeader {
|
||||
tag = "th"
|
||||
}
|
||||
if entering {
|
||||
fmt.Fprintf(w, "<%s", tag)
|
||||
if n.Alignment != ast.AlignNone {
|
||||
amethod := r.TableConfig.TableCellAlignMethod
|
||||
if amethod == TableCellAlignDefault {
|
||||
if r.Config.XHTML {
|
||||
amethod = TableCellAlignAttribute
|
||||
} else {
|
||||
amethod = TableCellAlignStyle
|
||||
}
|
||||
}
|
||||
switch amethod {
|
||||
case TableCellAlignAttribute:
|
||||
if _, ok := n.AttributeString("align"); !ok { // Skip align render if overridden
|
||||
fmt.Fprintf(w, ` align="%s"`, n.Alignment.String())
|
||||
}
|
||||
case TableCellAlignStyle:
|
||||
v, ok := n.AttributeString("style")
|
||||
var cob util.CopyOnWriteBuffer
|
||||
if ok {
|
||||
cob = util.NewCopyOnWriteBuffer(v.([]byte))
|
||||
cob.AppendByte(';')
|
||||
}
|
||||
style := fmt.Sprintf("text-align:%s", n.Alignment.String())
|
||||
cob.Append(util.StringToReadOnlyBytes(style))
|
||||
n.SetAttributeString("style", cob.Bytes())
|
||||
}
|
||||
}
|
||||
if n.Attributes() != nil {
|
||||
if tag == "td" {
|
||||
html.RenderAttributes(w, n, TableTdCellAttributeFilter) // <td>
|
||||
} else {
|
||||
html.RenderAttributes(w, n, TableThCellAttributeFilter) // <th>
|
||||
}
|
||||
}
|
||||
_ = w.WriteByte('>')
|
||||
} else {
|
||||
fmt.Fprintf(w, "</%s>\n", tag)
|
||||
}
|
||||
return gast.WalkContinue, nil
|
||||
}
|
||||
|
||||
type table struct {
|
||||
options []TableOption
|
||||
}
|
||||
|
||||
// Table is an extension that allow you to use GFM tables .
|
||||
var Table = &table{
|
||||
options: []TableOption{},
|
||||
}
|
||||
|
||||
// NewTable returns a new extension with given options.
|
||||
func NewTable(opts ...TableOption) goldmark.Extender {
|
||||
return &table{
|
||||
options: opts,
|
||||
}
|
||||
}
|
||||
|
||||
func (e *table) Extend(m goldmark.Markdown) {
|
||||
m.Parser().AddOptions(parser.WithParagraphTransformers(
|
||||
util.Prioritized(NewTableParagraphTransformer(), 200),
|
||||
))
|
||||
m.Renderer().AddOptions(renderer.WithNodeRenderers(
|
||||
util.Prioritized(NewTableHTMLRenderer(e.options...), 500),
|
||||
))
|
||||
}
|
115
vendor/github.com/yuin/goldmark/extension/tasklist.go
generated
vendored
Normal file
115
vendor/github.com/yuin/goldmark/extension/tasklist.go
generated
vendored
Normal file
@ -0,0 +1,115 @@
|
||||
package extension
|
||||
|
||||
import (
|
||||
"github.com/yuin/goldmark"
|
||||
gast "github.com/yuin/goldmark/ast"
|
||||
"github.com/yuin/goldmark/extension/ast"
|
||||
"github.com/yuin/goldmark/parser"
|
||||
"github.com/yuin/goldmark/renderer"
|
||||
"github.com/yuin/goldmark/renderer/html"
|
||||
"github.com/yuin/goldmark/text"
|
||||
"github.com/yuin/goldmark/util"
|
||||
"regexp"
|
||||
)
|
||||
|
||||
var taskListRegexp = regexp.MustCompile(`^\[([\sxX])\]\s*`)
|
||||
|
||||
type taskCheckBoxParser struct {
|
||||
}
|
||||
|
||||
var defaultTaskCheckBoxParser = &taskCheckBoxParser{}
|
||||
|
||||
// NewTaskCheckBoxParser returns a new InlineParser that can parse
|
||||
// checkboxes in list items.
|
||||
// This parser must take precedence over the parser.LinkParser.
|
||||
func NewTaskCheckBoxParser() parser.InlineParser {
|
||||
return defaultTaskCheckBoxParser
|
||||
}
|
||||
|
||||
func (s *taskCheckBoxParser) Trigger() []byte {
|
||||
return []byte{'['}
|
||||
}
|
||||
|
||||
func (s *taskCheckBoxParser) Parse(parent gast.Node, block text.Reader, pc parser.Context) gast.Node {
|
||||
// Given AST structure must be like
|
||||
// - List
|
||||
// - ListItem : parent.Parent
|
||||
// - TextBlock : parent
|
||||
// (current line)
|
||||
if parent.Parent() == nil || parent.Parent().FirstChild() != parent {
|
||||
return nil
|
||||
}
|
||||
|
||||
if _, ok := parent.Parent().(*gast.ListItem); !ok {
|
||||
return nil
|
||||
}
|
||||
line, _ := block.PeekLine()
|
||||
m := taskListRegexp.FindSubmatchIndex(line)
|
||||
if m == nil {
|
||||
return nil
|
||||
}
|
||||
value := line[m[2]:m[3]][0]
|
||||
block.Advance(m[1])
|
||||
checked := value == 'x' || value == 'X'
|
||||
return ast.NewTaskCheckBox(checked)
|
||||
}
|
||||
|
||||
func (s *taskCheckBoxParser) CloseBlock(parent gast.Node, pc parser.Context) {
|
||||
// nothing to do
|
||||
}
|
||||
|
||||
// TaskCheckBoxHTMLRenderer is a renderer.NodeRenderer implementation that
|
||||
// renders checkboxes in list items.
|
||||
type TaskCheckBoxHTMLRenderer struct {
|
||||
html.Config
|
||||
}
|
||||
|
||||
// NewTaskCheckBoxHTMLRenderer returns a new TaskCheckBoxHTMLRenderer.
|
||||
func NewTaskCheckBoxHTMLRenderer(opts ...html.Option) renderer.NodeRenderer {
|
||||
r := &TaskCheckBoxHTMLRenderer{
|
||||
Config: html.NewConfig(),
|
||||
}
|
||||
for _, opt := range opts {
|
||||
opt.SetHTMLOption(&r.Config)
|
||||
}
|
||||
return r
|
||||
}
|
||||
|
||||
// RegisterFuncs implements renderer.NodeRenderer.RegisterFuncs.
|
||||
func (r *TaskCheckBoxHTMLRenderer) RegisterFuncs(reg renderer.NodeRendererFuncRegisterer) {
|
||||
reg.Register(ast.KindTaskCheckBox, r.renderTaskCheckBox)
|
||||
}
|
||||
|
||||
func (r *TaskCheckBoxHTMLRenderer) renderTaskCheckBox(w util.BufWriter, source []byte, node gast.Node, entering bool) (gast.WalkStatus, error) {
|
||||
if !entering {
|
||||
return gast.WalkContinue, nil
|
||||
}
|
||||
n := node.(*ast.TaskCheckBox)
|
||||
|
||||
if n.IsChecked {
|
||||
w.WriteString(`<input checked="" disabled="" type="checkbox"`)
|
||||
} else {
|
||||
w.WriteString(`<input disabled="" type="checkbox"`)
|
||||
}
|
||||
if r.XHTML {
|
||||
w.WriteString(" /> ")
|
||||
} else {
|
||||
w.WriteString("> ")
|
||||
}
|
||||
return gast.WalkContinue, nil
|
||||
}
|
||||
|
||||
type taskList struct {
|
||||
}
|
||||
|
||||
// TaskList is an extension that allow you to use GFM task lists.
|
||||
var TaskList = &taskList{}
|
||||
|
||||
func (e *taskList) Extend(m goldmark.Markdown) {
|
||||
m.Parser().AddOptions(parser.WithInlineParsers(
|
||||
util.Prioritized(NewTaskCheckBoxParser(), 0),
|
||||
))
|
||||
m.Renderer().AddOptions(renderer.WithNodeRenderers(
|
||||
util.Prioritized(NewTaskCheckBoxHTMLRenderer(), 500),
|
||||
))
|
||||
}
|
323
vendor/github.com/yuin/goldmark/extension/typographer.go
generated
vendored
Normal file
323
vendor/github.com/yuin/goldmark/extension/typographer.go
generated
vendored
Normal file
@ -0,0 +1,323 @@
|
||||
package extension
|
||||
|
||||
import (
|
||||
"unicode"
|
||||
|
||||
"github.com/yuin/goldmark"
|
||||
gast "github.com/yuin/goldmark/ast"
|
||||
"github.com/yuin/goldmark/parser"
|
||||
"github.com/yuin/goldmark/text"
|
||||
"github.com/yuin/goldmark/util"
|
||||
)
|
||||
|
||||
var uncloseCounterKey = parser.NewContextKey()
|
||||
|
||||
type unclosedCounter struct {
|
||||
Single int
|
||||
Double int
|
||||
}
|
||||
|
||||
func (u *unclosedCounter) Reset() {
|
||||
u.Single = 0
|
||||
u.Double = 0
|
||||
}
|
||||
|
||||
func getUnclosedCounter(pc parser.Context) *unclosedCounter {
|
||||
v := pc.Get(uncloseCounterKey)
|
||||
if v == nil {
|
||||
v = &unclosedCounter{}
|
||||
pc.Set(uncloseCounterKey, v)
|
||||
}
|
||||
return v.(*unclosedCounter)
|
||||
}
|
||||
|
||||
// TypographicPunctuation is a key of the punctuations that can be replaced with
|
||||
// typographic entities.
|
||||
type TypographicPunctuation int
|
||||
|
||||
const (
|
||||
// LeftSingleQuote is '
|
||||
LeftSingleQuote TypographicPunctuation = iota + 1
|
||||
// RightSingleQuote is '
|
||||
RightSingleQuote
|
||||
// LeftDoubleQuote is "
|
||||
LeftDoubleQuote
|
||||
// RightDoubleQuote is "
|
||||
RightDoubleQuote
|
||||
// EnDash is --
|
||||
EnDash
|
||||
// EmDash is ---
|
||||
EmDash
|
||||
// Ellipsis is ...
|
||||
Ellipsis
|
||||
// LeftAngleQuote is <<
|
||||
LeftAngleQuote
|
||||
// RightAngleQuote is >>
|
||||
RightAngleQuote
|
||||
// Apostrophe is '
|
||||
Apostrophe
|
||||
|
||||
typographicPunctuationMax
|
||||
)
|
||||
|
||||
// An TypographerConfig struct is a data structure that holds configuration of the
|
||||
// Typographer extension.
|
||||
type TypographerConfig struct {
|
||||
Substitutions [][]byte
|
||||
}
|
||||
|
||||
func newDefaultSubstitutions() [][]byte {
|
||||
replacements := make([][]byte, typographicPunctuationMax)
|
||||
replacements[LeftSingleQuote] = []byte("‘")
|
||||
replacements[RightSingleQuote] = []byte("’")
|
||||
replacements[LeftDoubleQuote] = []byte("“")
|
||||
replacements[RightDoubleQuote] = []byte("”")
|
||||
replacements[EnDash] = []byte("–")
|
||||
replacements[EmDash] = []byte("—")
|
||||
replacements[Ellipsis] = []byte("…")
|
||||
replacements[LeftAngleQuote] = []byte("«")
|
||||
replacements[RightAngleQuote] = []byte("»")
|
||||
replacements[Apostrophe] = []byte("’")
|
||||
|
||||
return replacements
|
||||
}
|
||||
|
||||
// SetOption implements SetOptioner.
|
||||
func (b *TypographerConfig) SetOption(name parser.OptionName, value interface{}) {
|
||||
switch name {
|
||||
case optTypographicSubstitutions:
|
||||
b.Substitutions = value.([][]byte)
|
||||
}
|
||||
}
|
||||
|
||||
// A TypographerOption interface sets options for the TypographerParser.
|
||||
type TypographerOption interface {
|
||||
parser.Option
|
||||
SetTypographerOption(*TypographerConfig)
|
||||
}
|
||||
|
||||
const optTypographicSubstitutions parser.OptionName = "TypographicSubstitutions"
|
||||
|
||||
// TypographicSubstitutions is a list of the substitutions for the Typographer extension.
|
||||
type TypographicSubstitutions map[TypographicPunctuation][]byte
|
||||
|
||||
type withTypographicSubstitutions struct {
|
||||
value [][]byte
|
||||
}
|
||||
|
||||
func (o *withTypographicSubstitutions) SetParserOption(c *parser.Config) {
|
||||
c.Options[optTypographicSubstitutions] = o.value
|
||||
}
|
||||
|
||||
func (o *withTypographicSubstitutions) SetTypographerOption(p *TypographerConfig) {
|
||||
p.Substitutions = o.value
|
||||
}
|
||||
|
||||
// WithTypographicSubstitutions is a functional otpion that specify replacement text
|
||||
// for punctuations.
|
||||
func WithTypographicSubstitutions(values map[TypographicPunctuation][]byte) TypographerOption {
|
||||
replacements := newDefaultSubstitutions()
|
||||
for k, v := range values {
|
||||
replacements[k] = v
|
||||
}
|
||||
|
||||
return &withTypographicSubstitutions{replacements}
|
||||
}
|
||||
|
||||
type typographerDelimiterProcessor struct {
|
||||
}
|
||||
|
||||
func (p *typographerDelimiterProcessor) IsDelimiter(b byte) bool {
|
||||
return b == '\'' || b == '"'
|
||||
}
|
||||
|
||||
func (p *typographerDelimiterProcessor) CanOpenCloser(opener, closer *parser.Delimiter) bool {
|
||||
return opener.Char == closer.Char
|
||||
}
|
||||
|
||||
func (p *typographerDelimiterProcessor) OnMatch(consumes int) gast.Node {
|
||||
return nil
|
||||
}
|
||||
|
||||
var defaultTypographerDelimiterProcessor = &typographerDelimiterProcessor{}
|
||||
|
||||
type typographerParser struct {
|
||||
TypographerConfig
|
||||
}
|
||||
|
||||
// NewTypographerParser return a new InlineParser that parses
|
||||
// typographer expressions.
|
||||
func NewTypographerParser(opts ...TypographerOption) parser.InlineParser {
|
||||
p := &typographerParser{
|
||||
TypographerConfig: TypographerConfig{
|
||||
Substitutions: newDefaultSubstitutions(),
|
||||
},
|
||||
}
|
||||
for _, o := range opts {
|
||||
o.SetTypographerOption(&p.TypographerConfig)
|
||||
}
|
||||
return p
|
||||
}
|
||||
|
||||
func (s *typographerParser) Trigger() []byte {
|
||||
return []byte{'\'', '"', '-', '.', '<', '>'}
|
||||
}
|
||||
|
||||
func (s *typographerParser) Parse(parent gast.Node, block text.Reader, pc parser.Context) gast.Node {
|
||||
line, _ := block.PeekLine()
|
||||
c := line[0]
|
||||
if len(line) > 2 {
|
||||
if c == '-' {
|
||||
if s.Substitutions[EmDash] != nil && line[1] == '-' && line[2] == '-' { // ---
|
||||
node := gast.NewString(s.Substitutions[EmDash])
|
||||
node.SetCode(true)
|
||||
block.Advance(3)
|
||||
return node
|
||||
}
|
||||
} else if c == '.' {
|
||||
if s.Substitutions[Ellipsis] != nil && line[1] == '.' && line[2] == '.' { // ...
|
||||
node := gast.NewString(s.Substitutions[Ellipsis])
|
||||
node.SetCode(true)
|
||||
block.Advance(3)
|
||||
return node
|
||||
}
|
||||
return nil
|
||||
}
|
||||
}
|
||||
if len(line) > 1 {
|
||||
if c == '<' {
|
||||
if s.Substitutions[LeftAngleQuote] != nil && line[1] == '<' { // <<
|
||||
node := gast.NewString(s.Substitutions[LeftAngleQuote])
|
||||
node.SetCode(true)
|
||||
block.Advance(2)
|
||||
return node
|
||||
}
|
||||
return nil
|
||||
} else if c == '>' {
|
||||
if s.Substitutions[RightAngleQuote] != nil && line[1] == '>' { // >>
|
||||
node := gast.NewString(s.Substitutions[RightAngleQuote])
|
||||
node.SetCode(true)
|
||||
block.Advance(2)
|
||||
return node
|
||||
}
|
||||
return nil
|
||||
} else if s.Substitutions[EnDash] != nil && c == '-' && line[1] == '-' { // --
|
||||
node := gast.NewString(s.Substitutions[EnDash])
|
||||
node.SetCode(true)
|
||||
block.Advance(2)
|
||||
return node
|
||||
}
|
||||
}
|
||||
if c == '\'' || c == '"' {
|
||||
before := block.PrecendingCharacter()
|
||||
d := parser.ScanDelimiter(line, before, 1, defaultTypographerDelimiterProcessor)
|
||||
if d == nil {
|
||||
return nil
|
||||
}
|
||||
counter := getUnclosedCounter(pc)
|
||||
if c == '\'' {
|
||||
if s.Substitutions[Apostrophe] != nil {
|
||||
// Handle decade abbrevations such as '90s
|
||||
if d.CanOpen && !d.CanClose && len(line) > 3 && util.IsNumeric(line[1]) && util.IsNumeric(line[2]) && line[3] == 's' {
|
||||
after := rune(' ')
|
||||
if len(line) > 4 {
|
||||
after = util.ToRune(line, 4)
|
||||
}
|
||||
if len(line) == 3 || util.IsSpaceRune(after) || util.IsPunctRune(after) {
|
||||
node := gast.NewString(s.Substitutions[Apostrophe])
|
||||
node.SetCode(true)
|
||||
block.Advance(1)
|
||||
return node
|
||||
}
|
||||
}
|
||||
// Convert normal apostrophes. This is probably more flexible than necessary but
|
||||
// converts any apostrophe in between two alphanumerics.
|
||||
if len(line) > 1 && (unicode.IsDigit(before) || unicode.IsLetter(before)) && (unicode.IsLetter(util.ToRune(line, 1))) {
|
||||
node := gast.NewString(s.Substitutions[Apostrophe])
|
||||
node.SetCode(true)
|
||||
block.Advance(1)
|
||||
return node
|
||||
}
|
||||
}
|
||||
if s.Substitutions[LeftSingleQuote] != nil && d.CanOpen && !d.CanClose {
|
||||
nt := LeftSingleQuote
|
||||
// special cases: Alice's, I'm ,Don't, You'd
|
||||
if len(line) > 1 && (line[1] == 's' || line[1] == 'm' || line[1] == 't' || line[1] == 'd') && (len(line) < 3 || util.IsPunct(line[2]) || util.IsSpace(line[2])) {
|
||||
nt = RightSingleQuote
|
||||
}
|
||||
// special cases: I've, I'll, You're
|
||||
if len(line) > 2 && ((line[1] == 'v' && line[2] == 'e') || (line[1] == 'l' && line[2] == 'l') || (line[1] == 'r' && line[2] == 'e')) && (len(line) < 4 || util.IsPunct(line[3]) || util.IsSpace(line[3])) {
|
||||
nt = RightSingleQuote
|
||||
}
|
||||
if nt == LeftSingleQuote {
|
||||
counter.Single++
|
||||
}
|
||||
|
||||
node := gast.NewString(s.Substitutions[nt])
|
||||
node.SetCode(true)
|
||||
block.Advance(1)
|
||||
return node
|
||||
}
|
||||
if s.Substitutions[RightSingleQuote] != nil && counter.Single > 0 {
|
||||
isClose := d.CanClose && !d.CanOpen
|
||||
maybeClose := d.CanClose && d.CanOpen && len(line) > 1 && (line[1] == ',' || line[1] == '.' || line[1] == '!' || line[1] == '?') && (len(line) == 2 || (len(line) > 2 && util.IsPunct(line[2]) || util.IsSpace(line[2])))
|
||||
if isClose || maybeClose {
|
||||
node := gast.NewString(s.Substitutions[RightSingleQuote])
|
||||
node.SetCode(true)
|
||||
block.Advance(1)
|
||||
counter.Single--
|
||||
return node
|
||||
}
|
||||
}
|
||||
}
|
||||
if c == '"' {
|
||||
if s.Substitutions[LeftDoubleQuote] != nil && d.CanOpen && !d.CanClose {
|
||||
node := gast.NewString(s.Substitutions[LeftDoubleQuote])
|
||||
node.SetCode(true)
|
||||
block.Advance(1)
|
||||
counter.Double++
|
||||
return node
|
||||
}
|
||||
if s.Substitutions[RightDoubleQuote] != nil && counter.Double > 0 {
|
||||
isClose := d.CanClose && !d.CanOpen
|
||||
maybeClose := d.CanClose && d.CanOpen && len(line) > 1 && (line[1] == ',' || line[1] == '.' || line[1] == '!' || line[1] == '?') && (len(line) == 2 || (len(line) > 2 && util.IsPunct(line[2]) || util.IsSpace(line[2])))
|
||||
if isClose || maybeClose {
|
||||
// special case: "Monitor 21""
|
||||
if len(line) > 1 && line[1] == '"' && unicode.IsDigit(before) {
|
||||
return nil
|
||||
}
|
||||
node := gast.NewString(s.Substitutions[RightDoubleQuote])
|
||||
node.SetCode(true)
|
||||
block.Advance(1)
|
||||
counter.Double--
|
||||
return node
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *typographerParser) CloseBlock(parent gast.Node, pc parser.Context) {
|
||||
getUnclosedCounter(pc).Reset()
|
||||
}
|
||||
|
||||
type typographer struct {
|
||||
options []TypographerOption
|
||||
}
|
||||
|
||||
// Typographer is an extension that replaces punctuations with typographic entities.
|
||||
var Typographer = &typographer{}
|
||||
|
||||
// NewTypographer returns a new Extender that replaces punctuations with typographic entities.
|
||||
func NewTypographer(opts ...TypographerOption) goldmark.Extender {
|
||||
return &typographer{
|
||||
options: opts,
|
||||
}
|
||||
}
|
||||
|
||||
func (e *typographer) Extend(m goldmark.Markdown) {
|
||||
m.Parser().AddOptions(parser.WithInlineParsers(
|
||||
util.Prioritized(NewTypographerParser(e.options...), 9999),
|
||||
))
|
||||
}
|
3
vendor/github.com/yuin/goldmark/go.mod
generated
vendored
Normal file
3
vendor/github.com/yuin/goldmark/go.mod
generated
vendored
Normal file
@ -0,0 +1,3 @@
|
||||
module github.com/yuin/goldmark
|
||||
|
||||
go 1.13
|
0
vendor/github.com/yuin/goldmark/go.sum
generated
vendored
Normal file
0
vendor/github.com/yuin/goldmark/go.sum
generated
vendored
Normal file
140
vendor/github.com/yuin/goldmark/markdown.go
generated
vendored
Normal file
140
vendor/github.com/yuin/goldmark/markdown.go
generated
vendored
Normal file
@ -0,0 +1,140 @@
|
||||
// Package goldmark implements functions to convert markdown text to a desired format.
|
||||
package goldmark
|
||||
|
||||
import (
|
||||
"github.com/yuin/goldmark/parser"
|
||||
"github.com/yuin/goldmark/renderer"
|
||||
"github.com/yuin/goldmark/renderer/html"
|
||||
"github.com/yuin/goldmark/text"
|
||||
"github.com/yuin/goldmark/util"
|
||||
"io"
|
||||
)
|
||||
|
||||
// DefaultParser returns a new Parser that is configured by default values.
|
||||
func DefaultParser() parser.Parser {
|
||||
return parser.NewParser(parser.WithBlockParsers(parser.DefaultBlockParsers()...),
|
||||
parser.WithInlineParsers(parser.DefaultInlineParsers()...),
|
||||
parser.WithParagraphTransformers(parser.DefaultParagraphTransformers()...),
|
||||
)
|
||||
}
|
||||
|
||||
// DefaultRenderer returns a new Renderer that is configured by default values.
|
||||
func DefaultRenderer() renderer.Renderer {
|
||||
return renderer.NewRenderer(renderer.WithNodeRenderers(util.Prioritized(html.NewRenderer(), 1000)))
|
||||
}
|
||||
|
||||
var defaultMarkdown = New()
|
||||
|
||||
// Convert interprets a UTF-8 bytes source in Markdown and
|
||||
// write rendered contents to a writer w.
|
||||
func Convert(source []byte, w io.Writer, opts ...parser.ParseOption) error {
|
||||
return defaultMarkdown.Convert(source, w, opts...)
|
||||
}
|
||||
|
||||
// A Markdown interface offers functions to convert Markdown text to
|
||||
// a desired format.
|
||||
type Markdown interface {
|
||||
// Convert interprets a UTF-8 bytes source in Markdown and write rendered
|
||||
// contents to a writer w.
|
||||
Convert(source []byte, writer io.Writer, opts ...parser.ParseOption) error
|
||||
|
||||
// Parser returns a Parser that will be used for conversion.
|
||||
Parser() parser.Parser
|
||||
|
||||
// SetParser sets a Parser to this object.
|
||||
SetParser(parser.Parser)
|
||||
|
||||
// Parser returns a Renderer that will be used for conversion.
|
||||
Renderer() renderer.Renderer
|
||||
|
||||
// SetRenderer sets a Renderer to this object.
|
||||
SetRenderer(renderer.Renderer)
|
||||
}
|
||||
|
||||
// Option is a functional option type for Markdown objects.
|
||||
type Option func(*markdown)
|
||||
|
||||
// WithExtensions adds extensions.
|
||||
func WithExtensions(ext ...Extender) Option {
|
||||
return func(m *markdown) {
|
||||
m.extensions = append(m.extensions, ext...)
|
||||
}
|
||||
}
|
||||
|
||||
// WithParser allows you to override the default parser.
|
||||
func WithParser(p parser.Parser) Option {
|
||||
return func(m *markdown) {
|
||||
m.parser = p
|
||||
}
|
||||
}
|
||||
|
||||
// WithParserOptions applies options for the parser.
|
||||
func WithParserOptions(opts ...parser.Option) Option {
|
||||
return func(m *markdown) {
|
||||
m.parser.AddOptions(opts...)
|
||||
}
|
||||
}
|
||||
|
||||
// WithRenderer allows you to override the default renderer.
|
||||
func WithRenderer(r renderer.Renderer) Option {
|
||||
return func(m *markdown) {
|
||||
m.renderer = r
|
||||
}
|
||||
}
|
||||
|
||||
// WithRendererOptions applies options for the renderer.
|
||||
func WithRendererOptions(opts ...renderer.Option) Option {
|
||||
return func(m *markdown) {
|
||||
m.renderer.AddOptions(opts...)
|
||||
}
|
||||
}
|
||||
|
||||
type markdown struct {
|
||||
parser parser.Parser
|
||||
renderer renderer.Renderer
|
||||
extensions []Extender
|
||||
}
|
||||
|
||||
// New returns a new Markdown with given options.
|
||||
func New(options ...Option) Markdown {
|
||||
md := &markdown{
|
||||
parser: DefaultParser(),
|
||||
renderer: DefaultRenderer(),
|
||||
extensions: []Extender{},
|
||||
}
|
||||
for _, opt := range options {
|
||||
opt(md)
|
||||
}
|
||||
for _, e := range md.extensions {
|
||||
e.Extend(md)
|
||||
}
|
||||
return md
|
||||
}
|
||||
|
||||
func (m *markdown) Convert(source []byte, writer io.Writer, opts ...parser.ParseOption) error {
|
||||
reader := text.NewReader(source)
|
||||
doc := m.parser.Parse(reader, opts...)
|
||||
return m.renderer.Render(writer, source, doc)
|
||||
}
|
||||
|
||||
func (m *markdown) Parser() parser.Parser {
|
||||
return m.parser
|
||||
}
|
||||
|
||||
func (m *markdown) SetParser(v parser.Parser) {
|
||||
m.parser = v
|
||||
}
|
||||
|
||||
func (m *markdown) Renderer() renderer.Renderer {
|
||||
return m.renderer
|
||||
}
|
||||
|
||||
func (m *markdown) SetRenderer(v renderer.Renderer) {
|
||||
m.renderer = v
|
||||
}
|
||||
|
||||
// An Extender interface is used for extending Markdown.
|
||||
type Extender interface {
|
||||
// Extend extends the Markdown.
|
||||
Extend(Markdown)
|
||||
}
|
319
vendor/github.com/yuin/goldmark/parser/attribute.go
generated
vendored
Normal file
319
vendor/github.com/yuin/goldmark/parser/attribute.go
generated
vendored
Normal file
@ -0,0 +1,319 @@
|
||||
package parser
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"io"
|
||||
"strconv"
|
||||
|
||||
"github.com/yuin/goldmark/text"
|
||||
"github.com/yuin/goldmark/util"
|
||||
)
|
||||
|
||||
var attrNameID = []byte("id")
|
||||
var attrNameClass = []byte("class")
|
||||
|
||||
// An Attribute is an attribute of the markdown elements
|
||||
type Attribute struct {
|
||||
Name []byte
|
||||
Value interface{}
|
||||
}
|
||||
|
||||
// An Attributes is a collection of attributes.
|
||||
type Attributes []Attribute
|
||||
|
||||
// Find returns a (value, true) if an attribute correspond with given name is found, otherwise (nil, false).
|
||||
func (as Attributes) Find(name []byte) (interface{}, bool) {
|
||||
for _, a := range as {
|
||||
if bytes.Equal(a.Name, name) {
|
||||
return a.Value, true
|
||||
}
|
||||
}
|
||||
return nil, false
|
||||
}
|
||||
|
||||
func (as Attributes) findUpdate(name []byte, cb func(v interface{}) interface{}) bool {
|
||||
for i, a := range as {
|
||||
if bytes.Equal(a.Name, name) {
|
||||
as[i].Value = cb(a.Value)
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// ParseAttributes parses attributes into a map.
|
||||
// ParseAttributes returns a parsed attributes and true if could parse
|
||||
// attributes, otherwise nil and false.
|
||||
func ParseAttributes(reader text.Reader) (Attributes, bool) {
|
||||
savedLine, savedPosition := reader.Position()
|
||||
reader.SkipSpaces()
|
||||
if reader.Peek() != '{' {
|
||||
reader.SetPosition(savedLine, savedPosition)
|
||||
return nil, false
|
||||
}
|
||||
reader.Advance(1)
|
||||
attrs := Attributes{}
|
||||
for {
|
||||
if reader.Peek() == '}' {
|
||||
reader.Advance(1)
|
||||
return attrs, true
|
||||
}
|
||||
attr, ok := parseAttribute(reader)
|
||||
if !ok {
|
||||
reader.SetPosition(savedLine, savedPosition)
|
||||
return nil, false
|
||||
}
|
||||
if bytes.Equal(attr.Name, attrNameClass) {
|
||||
if !attrs.findUpdate(attrNameClass, func(v interface{}) interface{} {
|
||||
ret := make([]byte, 0, len(v.([]byte))+1+len(attr.Value.([]byte)))
|
||||
ret = append(ret, v.([]byte)...)
|
||||
return append(append(ret, ' '), attr.Value.([]byte)...)
|
||||
}) {
|
||||
attrs = append(attrs, attr)
|
||||
}
|
||||
} else {
|
||||
attrs = append(attrs, attr)
|
||||
}
|
||||
reader.SkipSpaces()
|
||||
if reader.Peek() == ',' {
|
||||
reader.Advance(1)
|
||||
reader.SkipSpaces()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func parseAttribute(reader text.Reader) (Attribute, bool) {
|
||||
reader.SkipSpaces()
|
||||
c := reader.Peek()
|
||||
if c == '#' || c == '.' {
|
||||
reader.Advance(1)
|
||||
line, _ := reader.PeekLine()
|
||||
i := 0
|
||||
for ; i < len(line) && !util.IsSpace(line[i]) && (!util.IsPunct(line[i]) || line[i] == '_' || line[i] == '-'); i++ {
|
||||
}
|
||||
name := attrNameClass
|
||||
if c == '#' {
|
||||
name = attrNameID
|
||||
}
|
||||
reader.Advance(i)
|
||||
return Attribute{Name: name, Value: line[0:i]}, true
|
||||
}
|
||||
line, _ := reader.PeekLine()
|
||||
if len(line) == 0 {
|
||||
return Attribute{}, false
|
||||
}
|
||||
c = line[0]
|
||||
if !((c >= 'a' && c <= 'z') || (c >= 'A' && c <= 'Z') ||
|
||||
c == '_' || c == ':') {
|
||||
return Attribute{}, false
|
||||
}
|
||||
i := 0
|
||||
for ; i < len(line); i++ {
|
||||
c = line[i]
|
||||
if !((c >= 'a' && c <= 'z') || (c >= 'A' && c <= 'Z') ||
|
||||
(c >= '0' && c <= '9') ||
|
||||
c == '_' || c == ':' || c == '.' || c == '-') {
|
||||
break
|
||||
}
|
||||
}
|
||||
name := line[:i]
|
||||
reader.Advance(i)
|
||||
reader.SkipSpaces()
|
||||
c = reader.Peek()
|
||||
if c != '=' {
|
||||
return Attribute{}, false
|
||||
}
|
||||
reader.Advance(1)
|
||||
reader.SkipSpaces()
|
||||
value, ok := parseAttributeValue(reader)
|
||||
if !ok {
|
||||
return Attribute{}, false
|
||||
}
|
||||
return Attribute{Name: name, Value: value}, true
|
||||
}
|
||||
|
||||
func parseAttributeValue(reader text.Reader) (interface{}, bool) {
|
||||
reader.SkipSpaces()
|
||||
c := reader.Peek()
|
||||
var value interface{}
|
||||
ok := false
|
||||
switch c {
|
||||
case text.EOF:
|
||||
return Attribute{}, false
|
||||
case '{':
|
||||
value, ok = ParseAttributes(reader)
|
||||
case '[':
|
||||
value, ok = parseAttributeArray(reader)
|
||||
case '"':
|
||||
value, ok = parseAttributeString(reader)
|
||||
default:
|
||||
if c == '-' || c == '+' || util.IsNumeric(c) {
|
||||
value, ok = parseAttributeNumber(reader)
|
||||
} else {
|
||||
value, ok = parseAttributeOthers(reader)
|
||||
}
|
||||
}
|
||||
if !ok {
|
||||
return nil, false
|
||||
}
|
||||
return value, true
|
||||
}
|
||||
|
||||
func parseAttributeArray(reader text.Reader) ([]interface{}, bool) {
|
||||
reader.Advance(1) // skip [
|
||||
ret := []interface{}{}
|
||||
for i := 0; ; i++ {
|
||||
c := reader.Peek()
|
||||
comma := false
|
||||
if i != 0 && c == ',' {
|
||||
reader.Advance(1)
|
||||
comma = true
|
||||
}
|
||||
if c == ']' {
|
||||
if !comma {
|
||||
reader.Advance(1)
|
||||
return ret, true
|
||||
}
|
||||
return nil, false
|
||||
}
|
||||
reader.SkipSpaces()
|
||||
value, ok := parseAttributeValue(reader)
|
||||
if !ok {
|
||||
return nil, false
|
||||
}
|
||||
ret = append(ret, value)
|
||||
reader.SkipSpaces()
|
||||
}
|
||||
}
|
||||
|
||||
func parseAttributeString(reader text.Reader) ([]byte, bool) {
|
||||
reader.Advance(1) // skip "
|
||||
line, _ := reader.PeekLine()
|
||||
i := 0
|
||||
l := len(line)
|
||||
var buf bytes.Buffer
|
||||
for i < l {
|
||||
c := line[i]
|
||||
if c == '\\' && i != l-1 {
|
||||
n := line[i+1]
|
||||
switch n {
|
||||
case '"', '/', '\\':
|
||||
buf.WriteByte(n)
|
||||
i += 2
|
||||
case 'b':
|
||||
buf.WriteString("\b")
|
||||
i += 2
|
||||
case 'f':
|
||||
buf.WriteString("\f")
|
||||
i += 2
|
||||
case 'n':
|
||||
buf.WriteString("\n")
|
||||
i += 2
|
||||
case 'r':
|
||||
buf.WriteString("\r")
|
||||
i += 2
|
||||
case 't':
|
||||
buf.WriteString("\t")
|
||||
i += 2
|
||||
default:
|
||||
buf.WriteByte('\\')
|
||||
i++
|
||||
}
|
||||
continue
|
||||
}
|
||||
if c == '"' {
|
||||
reader.Advance(i + 1)
|
||||
return buf.Bytes(), true
|
||||
}
|
||||
buf.WriteByte(c)
|
||||
i++
|
||||
}
|
||||
return nil, false
|
||||
}
|
||||
|
||||
func scanAttributeDecimal(reader text.Reader, w io.ByteWriter) {
|
||||
for {
|
||||
c := reader.Peek()
|
||||
if util.IsNumeric(c) {
|
||||
w.WriteByte(c)
|
||||
} else {
|
||||
return
|
||||
}
|
||||
reader.Advance(1)
|
||||
}
|
||||
}
|
||||
|
||||
func parseAttributeNumber(reader text.Reader) (float64, bool) {
|
||||
sign := 1
|
||||
c := reader.Peek()
|
||||
if c == '-' {
|
||||
sign = -1
|
||||
reader.Advance(1)
|
||||
} else if c == '+' {
|
||||
reader.Advance(1)
|
||||
}
|
||||
var buf bytes.Buffer
|
||||
if !util.IsNumeric(reader.Peek()) {
|
||||
return 0, false
|
||||
}
|
||||
scanAttributeDecimal(reader, &buf)
|
||||
if buf.Len() == 0 {
|
||||
return 0, false
|
||||
}
|
||||
c = reader.Peek()
|
||||
if c == '.' {
|
||||
buf.WriteByte(c)
|
||||
reader.Advance(1)
|
||||
scanAttributeDecimal(reader, &buf)
|
||||
}
|
||||
c = reader.Peek()
|
||||
if c == 'e' || c == 'E' {
|
||||
buf.WriteByte(c)
|
||||
reader.Advance(1)
|
||||
c = reader.Peek()
|
||||
if c == '-' || c == '+' {
|
||||
buf.WriteByte(c)
|
||||
reader.Advance(1)
|
||||
}
|
||||
scanAttributeDecimal(reader, &buf)
|
||||
}
|
||||
f, err := strconv.ParseFloat(buf.String(), 10)
|
||||
if err != nil {
|
||||
return 0, false
|
||||
}
|
||||
return float64(sign) * f, true
|
||||
}
|
||||
|
||||
var bytesTrue = []byte("true")
|
||||
var bytesFalse = []byte("false")
|
||||
var bytesNull = []byte("null")
|
||||
|
||||
func parseAttributeOthers(reader text.Reader) (interface{}, bool) {
|
||||
line, _ := reader.PeekLine()
|
||||
c := line[0]
|
||||
if !((c >= 'a' && c <= 'z') || (c >= 'A' && c <= 'Z') ||
|
||||
c == '_' || c == ':') {
|
||||
return nil, false
|
||||
}
|
||||
i := 0
|
||||
for ; i < len(line); i++ {
|
||||
c := line[i]
|
||||
if !((c >= 'a' && c <= 'z') || (c >= 'A' && c <= 'Z') ||
|
||||
(c >= '0' && c <= '9') ||
|
||||
c == '_' || c == ':' || c == '.' || c == '-') {
|
||||
break
|
||||
}
|
||||
}
|
||||
value := line[:i]
|
||||
reader.Advance(i)
|
||||
if bytes.Equal(value, bytesTrue) {
|
||||
return true, true
|
||||
}
|
||||
if bytes.Equal(value, bytesFalse) {
|
||||
return false, true
|
||||
}
|
||||
if bytes.Equal(value, bytesNull) {
|
||||
return nil, true
|
||||
}
|
||||
return value, true
|
||||
}
|
243
vendor/github.com/yuin/goldmark/parser/atx_heading.go
generated
vendored
Normal file
243
vendor/github.com/yuin/goldmark/parser/atx_heading.go
generated
vendored
Normal file
@ -0,0 +1,243 @@
|
||||
package parser
|
||||
|
||||
import (
|
||||
"github.com/yuin/goldmark/ast"
|
||||
"github.com/yuin/goldmark/text"
|
||||
"github.com/yuin/goldmark/util"
|
||||
)
|
||||
|
||||
// A HeadingConfig struct is a data structure that holds configuration of the renderers related to headings.
|
||||
type HeadingConfig struct {
|
||||
AutoHeadingID bool
|
||||
Attribute bool
|
||||
}
|
||||
|
||||
// SetOption implements SetOptioner.
|
||||
func (b *HeadingConfig) SetOption(name OptionName, value interface{}) {
|
||||
switch name {
|
||||
case optAutoHeadingID:
|
||||
b.AutoHeadingID = true
|
||||
case optAttribute:
|
||||
b.Attribute = true
|
||||
}
|
||||
}
|
||||
|
||||
// A HeadingOption interface sets options for heading parsers.
|
||||
type HeadingOption interface {
|
||||
Option
|
||||
SetHeadingOption(*HeadingConfig)
|
||||
}
|
||||
|
||||
// AutoHeadingID is an option name that enables auto IDs for headings.
|
||||
const optAutoHeadingID OptionName = "AutoHeadingID"
|
||||
|
||||
type withAutoHeadingID struct {
|
||||
}
|
||||
|
||||
func (o *withAutoHeadingID) SetParserOption(c *Config) {
|
||||
c.Options[optAutoHeadingID] = true
|
||||
}
|
||||
|
||||
func (o *withAutoHeadingID) SetHeadingOption(p *HeadingConfig) {
|
||||
p.AutoHeadingID = true
|
||||
}
|
||||
|
||||
// WithAutoHeadingID is a functional option that enables custom heading ids and
|
||||
// auto generated heading ids.
|
||||
func WithAutoHeadingID() HeadingOption {
|
||||
return &withAutoHeadingID{}
|
||||
}
|
||||
|
||||
type withHeadingAttribute struct {
|
||||
Option
|
||||
}
|
||||
|
||||
func (o *withHeadingAttribute) SetHeadingOption(p *HeadingConfig) {
|
||||
p.Attribute = true
|
||||
}
|
||||
|
||||
// WithHeadingAttribute is a functional option that enables custom heading attributes.
|
||||
func WithHeadingAttribute() HeadingOption {
|
||||
return &withHeadingAttribute{WithAttribute()}
|
||||
}
|
||||
|
||||
type atxHeadingParser struct {
|
||||
HeadingConfig
|
||||
}
|
||||
|
||||
// NewATXHeadingParser return a new BlockParser that can parse ATX headings.
|
||||
func NewATXHeadingParser(opts ...HeadingOption) BlockParser {
|
||||
p := &atxHeadingParser{}
|
||||
for _, o := range opts {
|
||||
o.SetHeadingOption(&p.HeadingConfig)
|
||||
}
|
||||
return p
|
||||
}
|
||||
|
||||
func (b *atxHeadingParser) Trigger() []byte {
|
||||
return []byte{'#'}
|
||||
}
|
||||
|
||||
func (b *atxHeadingParser) Open(parent ast.Node, reader text.Reader, pc Context) (ast.Node, State) {
|
||||
line, segment := reader.PeekLine()
|
||||
pos := pc.BlockOffset()
|
||||
if pos < 0 {
|
||||
return nil, NoChildren
|
||||
}
|
||||
i := pos
|
||||
for ; i < len(line) && line[i] == '#'; i++ {
|
||||
}
|
||||
level := i - pos
|
||||
if i == pos || level > 6 {
|
||||
return nil, NoChildren
|
||||
}
|
||||
l := util.TrimLeftSpaceLength(line[i:])
|
||||
if l == 0 {
|
||||
return nil, NoChildren
|
||||
}
|
||||
start := i + l
|
||||
if start >= len(line) {
|
||||
start = len(line) - 1
|
||||
}
|
||||
origstart := start
|
||||
stop := len(line) - util.TrimRightSpaceLength(line)
|
||||
|
||||
node := ast.NewHeading(level)
|
||||
parsed := false
|
||||
if b.Attribute { // handles special case like ### heading ### {#id}
|
||||
start--
|
||||
closureClose := -1
|
||||
closureOpen := -1
|
||||
for j := start; j < stop; {
|
||||
c := line[j]
|
||||
if util.IsEscapedPunctuation(line, j) {
|
||||
j += 2
|
||||
} else if util.IsSpace(c) && j < stop-1 && line[j+1] == '#' {
|
||||
closureOpen = j + 1
|
||||
k := j + 1
|
||||
for ; k < stop && line[k] == '#'; k++ {
|
||||
}
|
||||
closureClose = k
|
||||
break
|
||||
} else {
|
||||
j++
|
||||
}
|
||||
}
|
||||
if closureClose > 0 {
|
||||
reader.Advance(closureClose)
|
||||
attrs, ok := ParseAttributes(reader)
|
||||
rest, _ := reader.PeekLine()
|
||||
parsed = ok && util.IsBlank(rest)
|
||||
if parsed {
|
||||
for _, attr := range attrs {
|
||||
node.SetAttribute(attr.Name, attr.Value)
|
||||
}
|
||||
node.Lines().Append(text.NewSegment(segment.Start+start+1-segment.Padding, segment.Start+closureOpen-segment.Padding))
|
||||
}
|
||||
}
|
||||
}
|
||||
if !parsed {
|
||||
start = origstart
|
||||
stop := len(line) - util.TrimRightSpaceLength(line)
|
||||
if stop <= start { // empty headings like '##[space]'
|
||||
stop = start
|
||||
} else {
|
||||
i = stop - 1
|
||||
for ; line[i] == '#' && i >= start; i-- {
|
||||
}
|
||||
if i != stop-1 && !util.IsSpace(line[i]) {
|
||||
i = stop - 1
|
||||
}
|
||||
i++
|
||||
stop = i
|
||||
}
|
||||
|
||||
if len(util.TrimRight(line[start:stop], []byte{'#'})) != 0 { // empty heading like '### ###'
|
||||
node.Lines().Append(text.NewSegment(segment.Start+start-segment.Padding, segment.Start+stop-segment.Padding))
|
||||
}
|
||||
}
|
||||
return node, NoChildren
|
||||
}
|
||||
|
||||
func (b *atxHeadingParser) Continue(node ast.Node, reader text.Reader, pc Context) State {
|
||||
return Close
|
||||
}
|
||||
|
||||
func (b *atxHeadingParser) Close(node ast.Node, reader text.Reader, pc Context) {
|
||||
if b.Attribute {
|
||||
_, ok := node.AttributeString("id")
|
||||
if !ok {
|
||||
parseLastLineAttributes(node, reader, pc)
|
||||
}
|
||||
}
|
||||
|
||||
if b.AutoHeadingID {
|
||||
id, ok := node.AttributeString("id")
|
||||
if !ok {
|
||||
generateAutoHeadingID(node.(*ast.Heading), reader, pc)
|
||||
} else {
|
||||
pc.IDs().Put(id.([]byte))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (b *atxHeadingParser) CanInterruptParagraph() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
func (b *atxHeadingParser) CanAcceptIndentedLine() bool {
|
||||
return false
|
||||
}
|
||||
|
||||
func generateAutoHeadingID(node *ast.Heading, reader text.Reader, pc Context) {
|
||||
var line []byte
|
||||
lastIndex := node.Lines().Len() - 1
|
||||
if lastIndex > -1 {
|
||||
lastLine := node.Lines().At(lastIndex)
|
||||
line = lastLine.Value(reader.Source())
|
||||
}
|
||||
headingID := pc.IDs().Generate(line, ast.KindHeading)
|
||||
node.SetAttribute(attrNameID, headingID)
|
||||
}
|
||||
|
||||
func parseLastLineAttributes(node ast.Node, reader text.Reader, pc Context) {
|
||||
lastIndex := node.Lines().Len() - 1
|
||||
if lastIndex < 0 { // empty headings
|
||||
return
|
||||
}
|
||||
lastLine := node.Lines().At(lastIndex)
|
||||
line := lastLine.Value(reader.Source())
|
||||
lr := text.NewReader(line)
|
||||
var attrs Attributes
|
||||
var ok bool
|
||||
var start text.Segment
|
||||
var sl int
|
||||
var end text.Segment
|
||||
for {
|
||||
c := lr.Peek()
|
||||
if c == text.EOF {
|
||||
break
|
||||
}
|
||||
if c == '\\' {
|
||||
lr.Advance(1)
|
||||
if lr.Peek() == '{' {
|
||||
lr.Advance(1)
|
||||
}
|
||||
continue
|
||||
}
|
||||
if c == '{' {
|
||||
sl, start = lr.Position()
|
||||
attrs, ok = ParseAttributes(lr)
|
||||
_, end = lr.Position()
|
||||
lr.SetPosition(sl, start)
|
||||
}
|
||||
lr.Advance(1)
|
||||
}
|
||||
if ok && util.IsBlank(line[end.Start:]) {
|
||||
for _, attr := range attrs {
|
||||
node.SetAttribute(attr.Name, attr.Value)
|
||||
}
|
||||
lastLine.Stop = lastLine.Start + start.Start
|
||||
node.Lines().Set(lastIndex, lastLine)
|
||||
}
|
||||
}
|
42
vendor/github.com/yuin/goldmark/parser/auto_link.go
generated
vendored
Normal file
42
vendor/github.com/yuin/goldmark/parser/auto_link.go
generated
vendored
Normal file
@ -0,0 +1,42 @@
|
||||
package parser
|
||||
|
||||
import (
|
||||
"github.com/yuin/goldmark/ast"
|
||||
"github.com/yuin/goldmark/text"
|
||||
"github.com/yuin/goldmark/util"
|
||||
)
|
||||
|
||||
type autoLinkParser struct {
|
||||
}
|
||||
|
||||
var defaultAutoLinkParser = &autoLinkParser{}
|
||||
|
||||
// NewAutoLinkParser returns a new InlineParser that parses autolinks
|
||||
// surrounded by '<' and '>' .
|
||||
func NewAutoLinkParser() InlineParser {
|
||||
return defaultAutoLinkParser
|
||||
}
|
||||
|
||||
func (s *autoLinkParser) Trigger() []byte {
|
||||
return []byte{'<'}
|
||||
}
|
||||
|
||||
func (s *autoLinkParser) Parse(parent ast.Node, block text.Reader, pc Context) ast.Node {
|
||||
line, segment := block.PeekLine()
|
||||
stop := util.FindEmailIndex(line[1:])
|
||||
typ := ast.AutoLinkType(ast.AutoLinkEmail)
|
||||
if stop < 0 {
|
||||
stop = util.FindURLIndex(line[1:])
|
||||
typ = ast.AutoLinkURL
|
||||
}
|
||||
if stop < 0 {
|
||||
return nil
|
||||
}
|
||||
stop++
|
||||
if stop >= len(line) || line[stop] != '>' {
|
||||
return nil
|
||||
}
|
||||
value := ast.NewTextSegment(text.NewSegment(segment.Start+1, segment.Start+stop))
|
||||
block.Advance(stop + 1)
|
||||
return ast.NewAutoLink(typ, value)
|
||||
}
|
69
vendor/github.com/yuin/goldmark/parser/blockquote.go
generated
vendored
Normal file
69
vendor/github.com/yuin/goldmark/parser/blockquote.go
generated
vendored
Normal file
@ -0,0 +1,69 @@
|
||||
package parser
|
||||
|
||||
import (
|
||||
"github.com/yuin/goldmark/ast"
|
||||
"github.com/yuin/goldmark/text"
|
||||
"github.com/yuin/goldmark/util"
|
||||
)
|
||||
|
||||
type blockquoteParser struct {
|
||||
}
|
||||
|
||||
var defaultBlockquoteParser = &blockquoteParser{}
|
||||
|
||||
// NewBlockquoteParser returns a new BlockParser that
|
||||
// parses blockquotes.
|
||||
func NewBlockquoteParser() BlockParser {
|
||||
return defaultBlockquoteParser
|
||||
}
|
||||
|
||||
func (b *blockquoteParser) process(reader text.Reader) bool {
|
||||
line, _ := reader.PeekLine()
|
||||
w, pos := util.IndentWidth(line, reader.LineOffset())
|
||||
if w > 3 || pos >= len(line) || line[pos] != '>' {
|
||||
return false
|
||||
}
|
||||
pos++
|
||||
if pos >= len(line) || line[pos] == '\n' {
|
||||
reader.Advance(pos)
|
||||
return true
|
||||
}
|
||||
if line[pos] == ' ' || line[pos] == '\t' {
|
||||
pos++
|
||||
}
|
||||
reader.Advance(pos)
|
||||
if line[pos-1] == '\t' {
|
||||
reader.SetPadding(2)
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func (b *blockquoteParser) Trigger() []byte {
|
||||
return []byte{'>'}
|
||||
}
|
||||
|
||||
func (b *blockquoteParser) Open(parent ast.Node, reader text.Reader, pc Context) (ast.Node, State) {
|
||||
if b.process(reader) {
|
||||
return ast.NewBlockquote(), HasChildren
|
||||
}
|
||||
return nil, NoChildren
|
||||
}
|
||||
|
||||
func (b *blockquoteParser) Continue(node ast.Node, reader text.Reader, pc Context) State {
|
||||
if b.process(reader) {
|
||||
return Continue | HasChildren
|
||||
}
|
||||
return Close
|
||||
}
|
||||
|
||||
func (b *blockquoteParser) Close(node ast.Node, reader text.Reader, pc Context) {
|
||||
// nothing to do
|
||||
}
|
||||
|
||||
func (b *blockquoteParser) CanInterruptParagraph() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
func (b *blockquoteParser) CanAcceptIndentedLine() bool {
|
||||
return false
|
||||
}
|
79
vendor/github.com/yuin/goldmark/parser/code_block.go
generated
vendored
Normal file
79
vendor/github.com/yuin/goldmark/parser/code_block.go
generated
vendored
Normal file
@ -0,0 +1,79 @@
|
||||
package parser
|
||||
|
||||
import (
|
||||
"github.com/yuin/goldmark/ast"
|
||||
"github.com/yuin/goldmark/text"
|
||||
"github.com/yuin/goldmark/util"
|
||||
)
|
||||
|
||||
type codeBlockParser struct {
|
||||
}
|
||||
|
||||
// CodeBlockParser is a BlockParser implementation that parses indented code blocks.
|
||||
var defaultCodeBlockParser = &codeBlockParser{}
|
||||
|
||||
// NewCodeBlockParser returns a new BlockParser that
|
||||
// parses code blocks.
|
||||
func NewCodeBlockParser() BlockParser {
|
||||
return defaultCodeBlockParser
|
||||
}
|
||||
|
||||
func (b *codeBlockParser) Trigger() []byte {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (b *codeBlockParser) Open(parent ast.Node, reader text.Reader, pc Context) (ast.Node, State) {
|
||||
line, segment := reader.PeekLine()
|
||||
pos, padding := util.IndentPosition(line, reader.LineOffset(), 4)
|
||||
if pos < 0 || util.IsBlank(line) {
|
||||
return nil, NoChildren
|
||||
}
|
||||
node := ast.NewCodeBlock()
|
||||
reader.AdvanceAndSetPadding(pos, padding)
|
||||
_, segment = reader.PeekLine()
|
||||
node.Lines().Append(segment)
|
||||
reader.Advance(segment.Len() - 1)
|
||||
return node, NoChildren
|
||||
|
||||
}
|
||||
|
||||
func (b *codeBlockParser) Continue(node ast.Node, reader text.Reader, pc Context) State {
|
||||
line, segment := reader.PeekLine()
|
||||
if util.IsBlank(line) {
|
||||
node.Lines().Append(segment.TrimLeftSpaceWidth(4, reader.Source()))
|
||||
return Continue | NoChildren
|
||||
}
|
||||
pos, padding := util.IndentPosition(line, reader.LineOffset(), 4)
|
||||
if pos < 0 {
|
||||
return Close
|
||||
}
|
||||
reader.AdvanceAndSetPadding(pos, padding)
|
||||
_, segment = reader.PeekLine()
|
||||
node.Lines().Append(segment)
|
||||
reader.Advance(segment.Len() - 1)
|
||||
return Continue | NoChildren
|
||||
}
|
||||
|
||||
func (b *codeBlockParser) Close(node ast.Node, reader text.Reader, pc Context) {
|
||||
// trim trailing blank lines
|
||||
lines := node.Lines()
|
||||
length := lines.Len() - 1
|
||||
source := reader.Source()
|
||||
for length >= 0 {
|
||||
line := lines.At(length)
|
||||
if util.IsBlank(line.Value(source)) {
|
||||
length--
|
||||
} else {
|
||||
break
|
||||
}
|
||||
}
|
||||
lines.SetSliced(0, length+1)
|
||||
}
|
||||
|
||||
func (b *codeBlockParser) CanInterruptParagraph() bool {
|
||||
return false
|
||||
}
|
||||
|
||||
func (b *codeBlockParser) CanAcceptIndentedLine() bool {
|
||||
return true
|
||||
}
|
83
vendor/github.com/yuin/goldmark/parser/code_span.go
generated
vendored
Normal file
83
vendor/github.com/yuin/goldmark/parser/code_span.go
generated
vendored
Normal file
@ -0,0 +1,83 @@
|
||||
package parser
|
||||
|
||||
import (
|
||||
"github.com/yuin/goldmark/ast"
|
||||
"github.com/yuin/goldmark/text"
|
||||
"github.com/yuin/goldmark/util"
|
||||
)
|
||||
|
||||
type codeSpanParser struct {
|
||||
}
|
||||
|
||||
var defaultCodeSpanParser = &codeSpanParser{}
|
||||
|
||||
// NewCodeSpanParser return a new InlineParser that parses inline codes
|
||||
// surrounded by '`' .
|
||||
func NewCodeSpanParser() InlineParser {
|
||||
return defaultCodeSpanParser
|
||||
}
|
||||
|
||||
func (s *codeSpanParser) Trigger() []byte {
|
||||
return []byte{'`'}
|
||||
}
|
||||
|
||||
func (s *codeSpanParser) Parse(parent ast.Node, block text.Reader, pc Context) ast.Node {
|
||||
line, startSegment := block.PeekLine()
|
||||
opener := 0
|
||||
for ; opener < len(line) && line[opener] == '`'; opener++ {
|
||||
}
|
||||
block.Advance(opener)
|
||||
l, pos := block.Position()
|
||||
node := ast.NewCodeSpan()
|
||||
for {
|
||||
line, segment := block.PeekLine()
|
||||
if line == nil {
|
||||
block.SetPosition(l, pos)
|
||||
return ast.NewTextSegment(startSegment.WithStop(startSegment.Start + opener))
|
||||
}
|
||||
for i := 0; i < len(line); i++ {
|
||||
c := line[i]
|
||||
if c == '`' {
|
||||
oldi := i
|
||||
for ; i < len(line) && line[i] == '`'; i++ {
|
||||
}
|
||||
closure := i - oldi
|
||||
if closure == opener && (i >= len(line) || line[i] != '`') {
|
||||
segment = segment.WithStop(segment.Start + i - closure)
|
||||
if !segment.IsEmpty() {
|
||||
node.AppendChild(node, ast.NewRawTextSegment(segment))
|
||||
}
|
||||
block.Advance(i)
|
||||
goto end
|
||||
}
|
||||
}
|
||||
}
|
||||
if !util.IsBlank(line) {
|
||||
node.AppendChild(node, ast.NewRawTextSegment(segment))
|
||||
}
|
||||
block.AdvanceLine()
|
||||
}
|
||||
end:
|
||||
if !node.IsBlank(block.Source()) {
|
||||
// trim first halfspace and last halfspace
|
||||
segment := node.FirstChild().(*ast.Text).Segment
|
||||
shouldTrimmed := true
|
||||
if !(!segment.IsEmpty() && block.Source()[segment.Start] == ' ') {
|
||||
shouldTrimmed = false
|
||||
}
|
||||
segment = node.LastChild().(*ast.Text).Segment
|
||||
if !(!segment.IsEmpty() && block.Source()[segment.Stop-1] == ' ') {
|
||||
shouldTrimmed = false
|
||||
}
|
||||
if shouldTrimmed {
|
||||
t := node.FirstChild().(*ast.Text)
|
||||
segment := t.Segment
|
||||
t.Segment = segment.WithStart(segment.Start + 1)
|
||||
t = node.LastChild().(*ast.Text)
|
||||
segment = node.LastChild().(*ast.Text).Segment
|
||||
t.Segment = segment.WithStop(segment.Stop - 1)
|
||||
}
|
||||
|
||||
}
|
||||
return node
|
||||
}
|
241
vendor/github.com/yuin/goldmark/parser/delimiter.go
generated
vendored
Normal file
241
vendor/github.com/yuin/goldmark/parser/delimiter.go
generated
vendored
Normal file
@ -0,0 +1,241 @@
|
||||
package parser
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/yuin/goldmark/ast"
|
||||
"github.com/yuin/goldmark/text"
|
||||
"github.com/yuin/goldmark/util"
|
||||
)
|
||||
|
||||
// A DelimiterProcessor interface provides a set of functions about
|
||||
// Delimiter nodes.
|
||||
type DelimiterProcessor interface {
|
||||
// IsDelimiter returns true if given character is a delimiter, otherwise false.
|
||||
IsDelimiter(byte) bool
|
||||
|
||||
// CanOpenCloser returns true if given opener can close given closer, otherwise false.
|
||||
CanOpenCloser(opener, closer *Delimiter) bool
|
||||
|
||||
// OnMatch will be called when new matched delimiter found.
|
||||
// OnMatch should return a new Node correspond to the matched delimiter.
|
||||
OnMatch(consumes int) ast.Node
|
||||
}
|
||||
|
||||
// A Delimiter struct represents a delimiter like '*' of the Markdown text.
|
||||
type Delimiter struct {
|
||||
ast.BaseInline
|
||||
|
||||
Segment text.Segment
|
||||
|
||||
// CanOpen is set true if this delimiter can open a span for a new node.
|
||||
// See https://spec.commonmark.org/0.29/#can-open-emphasis for details.
|
||||
CanOpen bool
|
||||
|
||||
// CanClose is set true if this delimiter can close a span for a new node.
|
||||
// See https://spec.commonmark.org/0.29/#can-open-emphasis for details.
|
||||
CanClose bool
|
||||
|
||||
// Length is a remaining length of this delimiter.
|
||||
Length int
|
||||
|
||||
// OriginalLength is a original length of this delimiter.
|
||||
OriginalLength int
|
||||
|
||||
// Char is a character of this delimiter.
|
||||
Char byte
|
||||
|
||||
// PreviousDelimiter is a previous sibling delimiter node of this delimiter.
|
||||
PreviousDelimiter *Delimiter
|
||||
|
||||
// NextDelimiter is a next sibling delimiter node of this delimiter.
|
||||
NextDelimiter *Delimiter
|
||||
|
||||
// Processor is a DelimiterProcessor associated with this delimiter.
|
||||
Processor DelimiterProcessor
|
||||
}
|
||||
|
||||
// Inline implements Inline.Inline.
|
||||
func (d *Delimiter) Inline() {}
|
||||
|
||||
// Dump implements Node.Dump.
|
||||
func (d *Delimiter) Dump(source []byte, level int) {
|
||||
fmt.Printf("%sDelimiter: \"%s\"\n", strings.Repeat(" ", level), string(d.Text(source)))
|
||||
}
|
||||
|
||||
var kindDelimiter = ast.NewNodeKind("Delimiter")
|
||||
|
||||
// Kind implements Node.Kind
|
||||
func (d *Delimiter) Kind() ast.NodeKind {
|
||||
return kindDelimiter
|
||||
}
|
||||
|
||||
// Text implements Node.Text
|
||||
func (d *Delimiter) Text(source []byte) []byte {
|
||||
return d.Segment.Value(source)
|
||||
}
|
||||
|
||||
// ConsumeCharacters consumes delimiters.
|
||||
func (d *Delimiter) ConsumeCharacters(n int) {
|
||||
d.Length -= n
|
||||
d.Segment = d.Segment.WithStop(d.Segment.Start + d.Length)
|
||||
}
|
||||
|
||||
// CalcComsumption calculates how many characters should be used for opening
|
||||
// a new span correspond to given closer.
|
||||
func (d *Delimiter) CalcComsumption(closer *Delimiter) int {
|
||||
if (d.CanClose || closer.CanOpen) && (d.OriginalLength+closer.OriginalLength)%3 == 0 && closer.OriginalLength%3 != 0 {
|
||||
return 0
|
||||
}
|
||||
if d.Length >= 2 && closer.Length >= 2 {
|
||||
return 2
|
||||
}
|
||||
return 1
|
||||
}
|
||||
|
||||
// NewDelimiter returns a new Delimiter node.
|
||||
func NewDelimiter(canOpen, canClose bool, length int, char byte, processor DelimiterProcessor) *Delimiter {
|
||||
c := &Delimiter{
|
||||
BaseInline: ast.BaseInline{},
|
||||
CanOpen: canOpen,
|
||||
CanClose: canClose,
|
||||
Length: length,
|
||||
OriginalLength: length,
|
||||
Char: char,
|
||||
PreviousDelimiter: nil,
|
||||
NextDelimiter: nil,
|
||||
Processor: processor,
|
||||
}
|
||||
return c
|
||||
}
|
||||
|
||||
// ScanDelimiter scans a delimiter by given DelimiterProcessor.
|
||||
func ScanDelimiter(line []byte, before rune, min int, processor DelimiterProcessor) *Delimiter {
|
||||
i := 0
|
||||
c := line[i]
|
||||
j := i
|
||||
if !processor.IsDelimiter(c) {
|
||||
return nil
|
||||
}
|
||||
for ; j < len(line) && c == line[j]; j++ {
|
||||
}
|
||||
if (j - i) >= min {
|
||||
after := rune(' ')
|
||||
if j != len(line) {
|
||||
after = util.ToRune(line, j)
|
||||
}
|
||||
|
||||
canOpen, canClose := false, false
|
||||
beforeIsPunctuation := util.IsPunctRune(before)
|
||||
beforeIsWhitespace := util.IsSpaceRune(before)
|
||||
afterIsPunctuation := util.IsPunctRune(after)
|
||||
afterIsWhitespace := util.IsSpaceRune(after)
|
||||
|
||||
isLeft := !afterIsWhitespace &&
|
||||
(!afterIsPunctuation || beforeIsWhitespace || beforeIsPunctuation)
|
||||
isRight := !beforeIsWhitespace &&
|
||||
(!beforeIsPunctuation || afterIsWhitespace || afterIsPunctuation)
|
||||
|
||||
if line[i] == '_' {
|
||||
canOpen = isLeft && (!isRight || beforeIsPunctuation)
|
||||
canClose = isRight && (!isLeft || afterIsPunctuation)
|
||||
} else {
|
||||
canOpen = isLeft
|
||||
canClose = isRight
|
||||
}
|
||||
return NewDelimiter(canOpen, canClose, j-i, c, processor)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// ProcessDelimiters processes the delimiter list in the context.
|
||||
// Processing will be stop when reaching the bottom.
|
||||
//
|
||||
// If you implement an inline parser that can have other inline nodes as
|
||||
// children, you should call this function when nesting span has closed.
|
||||
func ProcessDelimiters(bottom ast.Node, pc Context) {
|
||||
lastDelimiter := pc.LastDelimiter()
|
||||
if lastDelimiter == nil {
|
||||
return
|
||||
}
|
||||
var closer *Delimiter
|
||||
if bottom != nil {
|
||||
if bottom != lastDelimiter {
|
||||
for c := lastDelimiter.PreviousSibling(); c != nil; {
|
||||
if d, ok := c.(*Delimiter); ok {
|
||||
closer = d
|
||||
}
|
||||
prev := c.PreviousSibling()
|
||||
if prev == bottom {
|
||||
break
|
||||
}
|
||||
c = prev
|
||||
}
|
||||
}
|
||||
} else {
|
||||
closer = pc.FirstDelimiter()
|
||||
}
|
||||
if closer == nil {
|
||||
pc.ClearDelimiters(bottom)
|
||||
return
|
||||
}
|
||||
for closer != nil {
|
||||
if !closer.CanClose {
|
||||
closer = closer.NextDelimiter
|
||||
continue
|
||||
}
|
||||
consume := 0
|
||||
found := false
|
||||
maybeOpener := false
|
||||
var opener *Delimiter
|
||||
for opener = closer.PreviousDelimiter; opener != nil; opener = opener.PreviousDelimiter {
|
||||
if opener.CanOpen && opener.Processor.CanOpenCloser(opener, closer) {
|
||||
maybeOpener = true
|
||||
consume = opener.CalcComsumption(closer)
|
||||
if consume > 0 {
|
||||
found = true
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
if !found {
|
||||
if !maybeOpener && !closer.CanOpen {
|
||||
pc.RemoveDelimiter(closer)
|
||||
}
|
||||
closer = closer.NextDelimiter
|
||||
continue
|
||||
}
|
||||
opener.ConsumeCharacters(consume)
|
||||
closer.ConsumeCharacters(consume)
|
||||
|
||||
node := opener.Processor.OnMatch(consume)
|
||||
|
||||
parent := opener.Parent()
|
||||
child := opener.NextSibling()
|
||||
|
||||
for child != nil && child != closer {
|
||||
next := child.NextSibling()
|
||||
node.AppendChild(node, child)
|
||||
child = next
|
||||
}
|
||||
parent.InsertAfter(parent, opener, node)
|
||||
|
||||
for c := opener.NextDelimiter; c != nil && c != closer; {
|
||||
next := c.NextDelimiter
|
||||
pc.RemoveDelimiter(c)
|
||||
c = next
|
||||
}
|
||||
|
||||
if opener.Length == 0 {
|
||||
pc.RemoveDelimiter(opener)
|
||||
}
|
||||
|
||||
if closer.Length == 0 {
|
||||
next := closer.NextDelimiter
|
||||
pc.RemoveDelimiter(closer)
|
||||
closer = next
|
||||
}
|
||||
}
|
||||
pc.ClearDelimiters(bottom)
|
||||
}
|
50
vendor/github.com/yuin/goldmark/parser/emphasis.go
generated
vendored
Normal file
50
vendor/github.com/yuin/goldmark/parser/emphasis.go
generated
vendored
Normal file
@ -0,0 +1,50 @@
|
||||
package parser
|
||||
|
||||
import (
|
||||
"github.com/yuin/goldmark/ast"
|
||||
"github.com/yuin/goldmark/text"
|
||||
)
|
||||
|
||||
type emphasisDelimiterProcessor struct {
|
||||
}
|
||||
|
||||
func (p *emphasisDelimiterProcessor) IsDelimiter(b byte) bool {
|
||||
return b == '*' || b == '_'
|
||||
}
|
||||
|
||||
func (p *emphasisDelimiterProcessor) CanOpenCloser(opener, closer *Delimiter) bool {
|
||||
return opener.Char == closer.Char
|
||||
}
|
||||
|
||||
func (p *emphasisDelimiterProcessor) OnMatch(consumes int) ast.Node {
|
||||
return ast.NewEmphasis(consumes)
|
||||
}
|
||||
|
||||
var defaultEmphasisDelimiterProcessor = &emphasisDelimiterProcessor{}
|
||||
|
||||
type emphasisParser struct {
|
||||
}
|
||||
|
||||
var defaultEmphasisParser = &emphasisParser{}
|
||||
|
||||
// NewEmphasisParser return a new InlineParser that parses emphasises.
|
||||
func NewEmphasisParser() InlineParser {
|
||||
return defaultEmphasisParser
|
||||
}
|
||||
|
||||
func (s *emphasisParser) Trigger() []byte {
|
||||
return []byte{'*', '_'}
|
||||
}
|
||||
|
||||
func (s *emphasisParser) Parse(parent ast.Node, block text.Reader, pc Context) ast.Node {
|
||||
before := block.PrecendingCharacter()
|
||||
line, segment := block.PeekLine()
|
||||
node := ScanDelimiter(line, before, 1, defaultEmphasisDelimiterProcessor)
|
||||
if node == nil {
|
||||
return nil
|
||||
}
|
||||
node.Segment = segment.WithStop(segment.Start + node.OriginalLength)
|
||||
block.Advance(node.OriginalLength)
|
||||
pc.PushDelimiter(node)
|
||||
return node
|
||||
}
|
110
vendor/github.com/yuin/goldmark/parser/fcode_block.go
generated
vendored
Normal file
110
vendor/github.com/yuin/goldmark/parser/fcode_block.go
generated
vendored
Normal file
@ -0,0 +1,110 @@
|
||||
package parser
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
|
||||
"github.com/yuin/goldmark/ast"
|
||||
"github.com/yuin/goldmark/text"
|
||||
"github.com/yuin/goldmark/util"
|
||||
)
|
||||
|
||||
type fencedCodeBlockParser struct {
|
||||
}
|
||||
|
||||
var defaultFencedCodeBlockParser = &fencedCodeBlockParser{}
|
||||
|
||||
// NewFencedCodeBlockParser returns a new BlockParser that
|
||||
// parses fenced code blocks.
|
||||
func NewFencedCodeBlockParser() BlockParser {
|
||||
return defaultFencedCodeBlockParser
|
||||
}
|
||||
|
||||
type fenceData struct {
|
||||
char byte
|
||||
indent int
|
||||
length int
|
||||
node ast.Node
|
||||
}
|
||||
|
||||
var fencedCodeBlockInfoKey = NewContextKey()
|
||||
|
||||
func (b *fencedCodeBlockParser) Trigger() []byte {
|
||||
return []byte{'~', '`'}
|
||||
}
|
||||
|
||||
func (b *fencedCodeBlockParser) Open(parent ast.Node, reader text.Reader, pc Context) (ast.Node, State) {
|
||||
line, segment := reader.PeekLine()
|
||||
pos := pc.BlockOffset()
|
||||
if pos < 0 || (line[pos] != '`' && line[pos] != '~') {
|
||||
return nil, NoChildren
|
||||
}
|
||||
findent := pos
|
||||
fenceChar := line[pos]
|
||||
i := pos
|
||||
for ; i < len(line) && line[i] == fenceChar; i++ {
|
||||
}
|
||||
oFenceLength := i - pos
|
||||
if oFenceLength < 3 {
|
||||
return nil, NoChildren
|
||||
}
|
||||
var info *ast.Text
|
||||
if i < len(line)-1 {
|
||||
rest := line[i:]
|
||||
left := util.TrimLeftSpaceLength(rest)
|
||||
right := util.TrimRightSpaceLength(rest)
|
||||
if left < len(rest)-right {
|
||||
infoStart, infoStop := segment.Start-segment.Padding+i+left, segment.Stop-right
|
||||
value := rest[left : len(rest)-right]
|
||||
if fenceChar == '`' && bytes.IndexByte(value, '`') > -1 {
|
||||
return nil, NoChildren
|
||||
} else if infoStart != infoStop {
|
||||
info = ast.NewTextSegment(text.NewSegment(infoStart, infoStop))
|
||||
}
|
||||
}
|
||||
}
|
||||
node := ast.NewFencedCodeBlock(info)
|
||||
pc.Set(fencedCodeBlockInfoKey, &fenceData{fenceChar, findent, oFenceLength, node})
|
||||
return node, NoChildren
|
||||
|
||||
}
|
||||
|
||||
func (b *fencedCodeBlockParser) Continue(node ast.Node, reader text.Reader, pc Context) State {
|
||||
line, segment := reader.PeekLine()
|
||||
fdata := pc.Get(fencedCodeBlockInfoKey).(*fenceData)
|
||||
w, pos := util.IndentWidth(line, reader.LineOffset())
|
||||
if w < 4 {
|
||||
i := pos
|
||||
for ; i < len(line) && line[i] == fdata.char; i++ {
|
||||
}
|
||||
length := i - pos
|
||||
if length >= fdata.length && util.IsBlank(line[i:]) {
|
||||
newline := 1
|
||||
if line[len(line)-1] != '\n' {
|
||||
newline = 0
|
||||
}
|
||||
reader.Advance(segment.Stop - segment.Start - newline - segment.Padding)
|
||||
return Close
|
||||
}
|
||||
}
|
||||
pos, padding := util.DedentPositionPadding(line, reader.LineOffset(), segment.Padding, fdata.indent)
|
||||
|
||||
seg := text.NewSegmentPadding(segment.Start+pos, segment.Stop, padding)
|
||||
node.Lines().Append(seg)
|
||||
reader.AdvanceAndSetPadding(segment.Stop-segment.Start-pos-1, padding)
|
||||
return Continue | NoChildren
|
||||
}
|
||||
|
||||
func (b *fencedCodeBlockParser) Close(node ast.Node, reader text.Reader, pc Context) {
|
||||
fdata := pc.Get(fencedCodeBlockInfoKey).(*fenceData)
|
||||
if fdata.node == node {
|
||||
pc.Set(fencedCodeBlockInfoKey, nil)
|
||||
}
|
||||
}
|
||||
|
||||
func (b *fencedCodeBlockParser) CanInterruptParagraph() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
func (b *fencedCodeBlockParser) CanAcceptIndentedLine() bool {
|
||||
return false
|
||||
}
|
228
vendor/github.com/yuin/goldmark/parser/html_block.go
generated
vendored
Normal file
228
vendor/github.com/yuin/goldmark/parser/html_block.go
generated
vendored
Normal file
@ -0,0 +1,228 @@
|
||||
package parser
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"regexp"
|
||||
"strings"
|
||||
|
||||
"github.com/yuin/goldmark/ast"
|
||||
"github.com/yuin/goldmark/text"
|
||||
"github.com/yuin/goldmark/util"
|
||||
)
|
||||
|
||||
var allowedBlockTags = map[string]bool{
|
||||
"address": true,
|
||||
"article": true,
|
||||
"aside": true,
|
||||
"base": true,
|
||||
"basefont": true,
|
||||
"blockquote": true,
|
||||
"body": true,
|
||||
"caption": true,
|
||||
"center": true,
|
||||
"col": true,
|
||||
"colgroup": true,
|
||||
"dd": true,
|
||||
"details": true,
|
||||
"dialog": true,
|
||||
"dir": true,
|
||||
"div": true,
|
||||
"dl": true,
|
||||
"dt": true,
|
||||
"fieldset": true,
|
||||
"figcaption": true,
|
||||
"figure": true,
|
||||
"footer": true,
|
||||
"form": true,
|
||||
"frame": true,
|
||||
"frameset": true,
|
||||
"h1": true,
|
||||
"h2": true,
|
||||
"h3": true,
|
||||
"h4": true,
|
||||
"h5": true,
|
||||
"h6": true,
|
||||
"head": true,
|
||||
"header": true,
|
||||
"hr": true,
|
||||
"html": true,
|
||||
"iframe": true,
|
||||
"legend": true,
|
||||
"li": true,
|
||||
"link": true,
|
||||
"main": true,
|
||||
"menu": true,
|
||||
"menuitem": true,
|
||||
"meta": true,
|
||||
"nav": true,
|
||||
"noframes": true,
|
||||
"ol": true,
|
||||
"optgroup": true,
|
||||
"option": true,
|
||||
"p": true,
|
||||
"param": true,
|
||||
"section": true,
|
||||
"source": true,
|
||||
"summary": true,
|
||||
"table": true,
|
||||
"tbody": true,
|
||||
"td": true,
|
||||
"tfoot": true,
|
||||
"th": true,
|
||||
"thead": true,
|
||||
"title": true,
|
||||
"tr": true,
|
||||
"track": true,
|
||||
"ul": true,
|
||||
}
|
||||
|
||||
var htmlBlockType1OpenRegexp = regexp.MustCompile(`(?i)^[ ]{0,3}<(script|pre|style)(?:\s.*|>.*|/>.*|)\n?$`)
|
||||
var htmlBlockType1CloseRegexp = regexp.MustCompile(`(?i)^.*</(?:script|pre|style)>.*`)
|
||||
|
||||
var htmlBlockType2OpenRegexp = regexp.MustCompile(`^[ ]{0,3}<!\-\-`)
|
||||
var htmlBlockType2Close = []byte{'-', '-', '>'}
|
||||
|
||||
var htmlBlockType3OpenRegexp = regexp.MustCompile(`^[ ]{0,3}<\?`)
|
||||
var htmlBlockType3Close = []byte{'?', '>'}
|
||||
|
||||
var htmlBlockType4OpenRegexp = regexp.MustCompile(`^[ ]{0,3}<![A-Z]+.*\n?$`)
|
||||
var htmlBlockType4Close = []byte{'>'}
|
||||
|
||||
var htmlBlockType5OpenRegexp = regexp.MustCompile(`^[ ]{0,3}<\!\[CDATA\[`)
|
||||
var htmlBlockType5Close = []byte{']', ']', '>'}
|
||||
|
||||
var htmlBlockType6Regexp = regexp.MustCompile(`^[ ]{0,3}</?([a-zA-Z0-9]+)(?:\s.*|>.*|/>.*|)\n?$`)
|
||||
|
||||
var htmlBlockType7Regexp = regexp.MustCompile(`^[ ]{0,3}<(/)?([a-zA-Z0-9\-]+)(` + attributePattern + `*)(:?>|/>)\s*\n?$`)
|
||||
|
||||
type htmlBlockParser struct {
|
||||
}
|
||||
|
||||
var defaultHTMLBlockParser = &htmlBlockParser{}
|
||||
|
||||
// NewHTMLBlockParser return a new BlockParser that can parse html
|
||||
// blocks.
|
||||
func NewHTMLBlockParser() BlockParser {
|
||||
return defaultHTMLBlockParser
|
||||
}
|
||||
|
||||
func (b *htmlBlockParser) Trigger() []byte {
|
||||
return []byte{'<'}
|
||||
}
|
||||
|
||||
func (b *htmlBlockParser) Open(parent ast.Node, reader text.Reader, pc Context) (ast.Node, State) {
|
||||
var node *ast.HTMLBlock
|
||||
line, segment := reader.PeekLine()
|
||||
last := pc.LastOpenedBlock().Node
|
||||
if pos := pc.BlockOffset(); pos < 0 || line[pos] != '<' {
|
||||
return nil, NoChildren
|
||||
}
|
||||
|
||||
if m := htmlBlockType1OpenRegexp.FindSubmatchIndex(line); m != nil {
|
||||
node = ast.NewHTMLBlock(ast.HTMLBlockType1)
|
||||
} else if htmlBlockType2OpenRegexp.Match(line) {
|
||||
node = ast.NewHTMLBlock(ast.HTMLBlockType2)
|
||||
} else if htmlBlockType3OpenRegexp.Match(line) {
|
||||
node = ast.NewHTMLBlock(ast.HTMLBlockType3)
|
||||
} else if htmlBlockType4OpenRegexp.Match(line) {
|
||||
node = ast.NewHTMLBlock(ast.HTMLBlockType4)
|
||||
} else if htmlBlockType5OpenRegexp.Match(line) {
|
||||
node = ast.NewHTMLBlock(ast.HTMLBlockType5)
|
||||
} else if match := htmlBlockType7Regexp.FindSubmatchIndex(line); match != nil {
|
||||
isCloseTag := match[2] > -1 && bytes.Equal(line[match[2]:match[3]], []byte("/"))
|
||||
hasAttr := match[6] != match[7]
|
||||
tagName := strings.ToLower(string(line[match[4]:match[5]]))
|
||||
_, ok := allowedBlockTags[tagName]
|
||||
if ok {
|
||||
node = ast.NewHTMLBlock(ast.HTMLBlockType6)
|
||||
} else if tagName != "script" && tagName != "style" && tagName != "pre" && !ast.IsParagraph(last) && !(isCloseTag && hasAttr) { // type 7 can not interrupt paragraph
|
||||
node = ast.NewHTMLBlock(ast.HTMLBlockType7)
|
||||
}
|
||||
}
|
||||
if node == nil {
|
||||
if match := htmlBlockType6Regexp.FindSubmatchIndex(line); match != nil {
|
||||
tagName := string(line[match[2]:match[3]])
|
||||
_, ok := allowedBlockTags[strings.ToLower(tagName)]
|
||||
if ok {
|
||||
node = ast.NewHTMLBlock(ast.HTMLBlockType6)
|
||||
}
|
||||
}
|
||||
}
|
||||
if node != nil {
|
||||
reader.Advance(segment.Len() - 1)
|
||||
node.Lines().Append(segment)
|
||||
return node, NoChildren
|
||||
}
|
||||
return nil, NoChildren
|
||||
}
|
||||
|
||||
func (b *htmlBlockParser) Continue(node ast.Node, reader text.Reader, pc Context) State {
|
||||
htmlBlock := node.(*ast.HTMLBlock)
|
||||
lines := htmlBlock.Lines()
|
||||
line, segment := reader.PeekLine()
|
||||
var closurePattern []byte
|
||||
|
||||
switch htmlBlock.HTMLBlockType {
|
||||
case ast.HTMLBlockType1:
|
||||
if lines.Len() == 1 {
|
||||
firstLine := lines.At(0)
|
||||
if htmlBlockType1CloseRegexp.Match(firstLine.Value(reader.Source())) {
|
||||
return Close
|
||||
}
|
||||
}
|
||||
if htmlBlockType1CloseRegexp.Match(line) {
|
||||
htmlBlock.ClosureLine = segment
|
||||
reader.Advance(segment.Len() - 1)
|
||||
return Close
|
||||
}
|
||||
case ast.HTMLBlockType2:
|
||||
closurePattern = htmlBlockType2Close
|
||||
fallthrough
|
||||
case ast.HTMLBlockType3:
|
||||
if closurePattern == nil {
|
||||
closurePattern = htmlBlockType3Close
|
||||
}
|
||||
fallthrough
|
||||
case ast.HTMLBlockType4:
|
||||
if closurePattern == nil {
|
||||
closurePattern = htmlBlockType4Close
|
||||
}
|
||||
fallthrough
|
||||
case ast.HTMLBlockType5:
|
||||
if closurePattern == nil {
|
||||
closurePattern = htmlBlockType5Close
|
||||
}
|
||||
|
||||
if lines.Len() == 1 {
|
||||
firstLine := lines.At(0)
|
||||
if bytes.Contains(firstLine.Value(reader.Source()), closurePattern) {
|
||||
return Close
|
||||
}
|
||||
}
|
||||
if bytes.Contains(line, closurePattern) {
|
||||
htmlBlock.ClosureLine = segment
|
||||
reader.Advance(segment.Len() - 1)
|
||||
return Close
|
||||
}
|
||||
|
||||
case ast.HTMLBlockType6, ast.HTMLBlockType7:
|
||||
if util.IsBlank(line) {
|
||||
return Close
|
||||
}
|
||||
}
|
||||
node.Lines().Append(segment)
|
||||
reader.Advance(segment.Len() - 1)
|
||||
return Continue | NoChildren
|
||||
}
|
||||
|
||||
func (b *htmlBlockParser) Close(node ast.Node, reader text.Reader, pc Context) {
|
||||
// nothing to do
|
||||
}
|
||||
|
||||
func (b *htmlBlockParser) CanInterruptParagraph() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
func (b *htmlBlockParser) CanAcceptIndentedLine() bool {
|
||||
return false
|
||||
}
|
387
vendor/github.com/yuin/goldmark/parser/link.go
generated
vendored
Normal file
387
vendor/github.com/yuin/goldmark/parser/link.go
generated
vendored
Normal file
@ -0,0 +1,387 @@
|
||||
package parser
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"regexp"
|
||||
"strings"
|
||||
|
||||
"github.com/yuin/goldmark/ast"
|
||||
"github.com/yuin/goldmark/text"
|
||||
"github.com/yuin/goldmark/util"
|
||||
)
|
||||
|
||||
var linkLabelStateKey = NewContextKey()
|
||||
|
||||
type linkLabelState struct {
|
||||
ast.BaseInline
|
||||
|
||||
Segment text.Segment
|
||||
|
||||
IsImage bool
|
||||
|
||||
Prev *linkLabelState
|
||||
|
||||
Next *linkLabelState
|
||||
|
||||
First *linkLabelState
|
||||
|
||||
Last *linkLabelState
|
||||
}
|
||||
|
||||
func newLinkLabelState(segment text.Segment, isImage bool) *linkLabelState {
|
||||
return &linkLabelState{
|
||||
Segment: segment,
|
||||
IsImage: isImage,
|
||||
}
|
||||
}
|
||||
|
||||
func (s *linkLabelState) Text(source []byte) []byte {
|
||||
return s.Segment.Value(source)
|
||||
}
|
||||
|
||||
func (s *linkLabelState) Dump(source []byte, level int) {
|
||||
fmt.Printf("%slinkLabelState: \"%s\"\n", strings.Repeat(" ", level), s.Text(source))
|
||||
}
|
||||
|
||||
var kindLinkLabelState = ast.NewNodeKind("LinkLabelState")
|
||||
|
||||
func (s *linkLabelState) Kind() ast.NodeKind {
|
||||
return kindLinkLabelState
|
||||
}
|
||||
|
||||
func pushLinkLabelState(pc Context, v *linkLabelState) {
|
||||
tlist := pc.Get(linkLabelStateKey)
|
||||
var list *linkLabelState
|
||||
if tlist == nil {
|
||||
list = v
|
||||
v.First = v
|
||||
v.Last = v
|
||||
pc.Set(linkLabelStateKey, list)
|
||||
} else {
|
||||
list = tlist.(*linkLabelState)
|
||||
l := list.Last
|
||||
list.Last = v
|
||||
l.Next = v
|
||||
v.Prev = l
|
||||
}
|
||||
}
|
||||
|
||||
func removeLinkLabelState(pc Context, d *linkLabelState) {
|
||||
tlist := pc.Get(linkLabelStateKey)
|
||||
var list *linkLabelState
|
||||
if tlist == nil {
|
||||
return
|
||||
}
|
||||
list = tlist.(*linkLabelState)
|
||||
|
||||
if d.Prev == nil {
|
||||
list = d.Next
|
||||
if list != nil {
|
||||
list.First = d
|
||||
list.Last = d.Last
|
||||
list.Prev = nil
|
||||
pc.Set(linkLabelStateKey, list)
|
||||
} else {
|
||||
pc.Set(linkLabelStateKey, nil)
|
||||
}
|
||||
} else {
|
||||
d.Prev.Next = d.Next
|
||||
if d.Next != nil {
|
||||
d.Next.Prev = d.Prev
|
||||
}
|
||||
}
|
||||
if list != nil && d.Next == nil {
|
||||
list.Last = d.Prev
|
||||
}
|
||||
d.Next = nil
|
||||
d.Prev = nil
|
||||
d.First = nil
|
||||
d.Last = nil
|
||||
}
|
||||
|
||||
type linkParser struct {
|
||||
}
|
||||
|
||||
var defaultLinkParser = &linkParser{}
|
||||
|
||||
// NewLinkParser return a new InlineParser that parses links.
|
||||
func NewLinkParser() InlineParser {
|
||||
return defaultLinkParser
|
||||
}
|
||||
|
||||
func (s *linkParser) Trigger() []byte {
|
||||
return []byte{'!', '[', ']'}
|
||||
}
|
||||
|
||||
var linkDestinationRegexp = regexp.MustCompile(`\s*([^\s].+)`)
|
||||
var linkTitleRegexp = regexp.MustCompile(`\s+(\)|["'\(].+)`)
|
||||
var linkBottom = NewContextKey()
|
||||
|
||||
func (s *linkParser) Parse(parent ast.Node, block text.Reader, pc Context) ast.Node {
|
||||
line, segment := block.PeekLine()
|
||||
if line[0] == '!' {
|
||||
if len(line) > 1 && line[1] == '[' {
|
||||
block.Advance(1)
|
||||
pc.Set(linkBottom, pc.LastDelimiter())
|
||||
return processLinkLabelOpen(block, segment.Start+1, true, pc)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
if line[0] == '[' {
|
||||
pc.Set(linkBottom, pc.LastDelimiter())
|
||||
return processLinkLabelOpen(block, segment.Start, false, pc)
|
||||
}
|
||||
|
||||
// line[0] == ']'
|
||||
tlist := pc.Get(linkLabelStateKey)
|
||||
if tlist == nil {
|
||||
return nil
|
||||
}
|
||||
last := tlist.(*linkLabelState).Last
|
||||
if last == nil {
|
||||
return nil
|
||||
}
|
||||
block.Advance(1)
|
||||
removeLinkLabelState(pc, last)
|
||||
if s.containsLink(last) { // a link in a link text is not allowed
|
||||
ast.MergeOrReplaceTextSegment(last.Parent(), last, last.Segment)
|
||||
return nil
|
||||
}
|
||||
|
||||
c := block.Peek()
|
||||
l, pos := block.Position()
|
||||
var link *ast.Link
|
||||
var hasValue bool
|
||||
if c == '(' { // normal link
|
||||
link = s.parseLink(parent, last, block, pc)
|
||||
} else if c == '[' { // reference link
|
||||
link, hasValue = s.parseReferenceLink(parent, last, block, pc)
|
||||
if link == nil && hasValue {
|
||||
ast.MergeOrReplaceTextSegment(last.Parent(), last, last.Segment)
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
if link == nil {
|
||||
// maybe shortcut reference link
|
||||
block.SetPosition(l, pos)
|
||||
ssegment := text.NewSegment(last.Segment.Stop, segment.Start)
|
||||
maybeReference := block.Value(ssegment)
|
||||
ref, ok := pc.Reference(util.ToLinkReference(maybeReference))
|
||||
if !ok {
|
||||
ast.MergeOrReplaceTextSegment(last.Parent(), last, last.Segment)
|
||||
return nil
|
||||
}
|
||||
link = ast.NewLink()
|
||||
s.processLinkLabel(parent, link, last, pc)
|
||||
link.Title = ref.Title()
|
||||
link.Destination = ref.Destination()
|
||||
}
|
||||
if last.IsImage {
|
||||
last.Parent().RemoveChild(last.Parent(), last)
|
||||
return ast.NewImage(link)
|
||||
}
|
||||
last.Parent().RemoveChild(last.Parent(), last)
|
||||
return link
|
||||
}
|
||||
|
||||
func (s *linkParser) containsLink(last *linkLabelState) bool {
|
||||
if last.IsImage {
|
||||
return false
|
||||
}
|
||||
var c ast.Node
|
||||
for c = last; c != nil; c = c.NextSibling() {
|
||||
if _, ok := c.(*ast.Link); ok {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func processLinkLabelOpen(block text.Reader, pos int, isImage bool, pc Context) *linkLabelState {
|
||||
start := pos
|
||||
if isImage {
|
||||
start--
|
||||
}
|
||||
state := newLinkLabelState(text.NewSegment(start, pos+1), isImage)
|
||||
pushLinkLabelState(pc, state)
|
||||
block.Advance(1)
|
||||
return state
|
||||
}
|
||||
|
||||
func (s *linkParser) processLinkLabel(parent ast.Node, link *ast.Link, last *linkLabelState, pc Context) {
|
||||
var bottom ast.Node
|
||||
if v := pc.Get(linkBottom); v != nil {
|
||||
bottom = v.(ast.Node)
|
||||
}
|
||||
pc.Set(linkBottom, nil)
|
||||
ProcessDelimiters(bottom, pc)
|
||||
for c := last.NextSibling(); c != nil; {
|
||||
next := c.NextSibling()
|
||||
parent.RemoveChild(parent, c)
|
||||
link.AppendChild(link, c)
|
||||
c = next
|
||||
}
|
||||
}
|
||||
|
||||
func (s *linkParser) parseReferenceLink(parent ast.Node, last *linkLabelState, block text.Reader, pc Context) (*ast.Link, bool) {
|
||||
_, orgpos := block.Position()
|
||||
block.Advance(1) // skip '['
|
||||
line, segment := block.PeekLine()
|
||||
endIndex := util.FindClosure(line, '[', ']', false, true)
|
||||
if endIndex < 0 {
|
||||
return nil, false
|
||||
}
|
||||
|
||||
block.Advance(endIndex + 1)
|
||||
ssegment := segment.WithStop(segment.Start + endIndex)
|
||||
maybeReference := block.Value(ssegment)
|
||||
if util.IsBlank(maybeReference) { // collapsed reference link
|
||||
ssegment = text.NewSegment(last.Segment.Stop, orgpos.Start-1)
|
||||
maybeReference = block.Value(ssegment)
|
||||
}
|
||||
|
||||
ref, ok := pc.Reference(util.ToLinkReference(maybeReference))
|
||||
if !ok {
|
||||
return nil, true
|
||||
}
|
||||
|
||||
link := ast.NewLink()
|
||||
s.processLinkLabel(parent, link, last, pc)
|
||||
link.Title = ref.Title()
|
||||
link.Destination = ref.Destination()
|
||||
return link, true
|
||||
}
|
||||
|
||||
func (s *linkParser) parseLink(parent ast.Node, last *linkLabelState, block text.Reader, pc Context) *ast.Link {
|
||||
block.Advance(1) // skip '('
|
||||
block.SkipSpaces()
|
||||
var title []byte
|
||||
var destination []byte
|
||||
var ok bool
|
||||
if block.Peek() == ')' { // empty link like '[link]()'
|
||||
block.Advance(1)
|
||||
} else {
|
||||
destination, ok = parseLinkDestination(block)
|
||||
if !ok {
|
||||
return nil
|
||||
}
|
||||
block.SkipSpaces()
|
||||
if block.Peek() == ')' {
|
||||
block.Advance(1)
|
||||
} else {
|
||||
title, ok = parseLinkTitle(block)
|
||||
if !ok {
|
||||
return nil
|
||||
}
|
||||
block.SkipSpaces()
|
||||
if block.Peek() == ')' {
|
||||
block.Advance(1)
|
||||
} else {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
link := ast.NewLink()
|
||||
s.processLinkLabel(parent, link, last, pc)
|
||||
link.Destination = destination
|
||||
link.Title = title
|
||||
return link
|
||||
}
|
||||
|
||||
func parseLinkDestination(block text.Reader) ([]byte, bool) {
|
||||
block.SkipSpaces()
|
||||
line, _ := block.PeekLine()
|
||||
buf := []byte{}
|
||||
if block.Peek() == '<' {
|
||||
i := 1
|
||||
for i < len(line) {
|
||||
c := line[i]
|
||||
if c == '\\' && i < len(line)-1 && util.IsPunct(line[i+1]) {
|
||||
buf = append(buf, '\\', line[i+1])
|
||||
i += 2
|
||||
continue
|
||||
} else if c == '>' {
|
||||
block.Advance(i + 1)
|
||||
return line[1:i], true
|
||||
}
|
||||
buf = append(buf, c)
|
||||
i++
|
||||
}
|
||||
return nil, false
|
||||
}
|
||||
opened := 0
|
||||
i := 0
|
||||
for i < len(line) {
|
||||
c := line[i]
|
||||
if c == '\\' && i < len(line)-1 && util.IsPunct(line[i+1]) {
|
||||
buf = append(buf, '\\', line[i+1])
|
||||
i += 2
|
||||
continue
|
||||
} else if c == '(' {
|
||||
opened++
|
||||
} else if c == ')' {
|
||||
opened--
|
||||
if opened < 0 {
|
||||
break
|
||||
}
|
||||
} else if util.IsSpace(c) {
|
||||
break
|
||||
}
|
||||
buf = append(buf, c)
|
||||
i++
|
||||
}
|
||||
block.Advance(i)
|
||||
return line[:i], len(line[:i]) != 0
|
||||
}
|
||||
|
||||
func parseLinkTitle(block text.Reader) ([]byte, bool) {
|
||||
block.SkipSpaces()
|
||||
opener := block.Peek()
|
||||
if opener != '"' && opener != '\'' && opener != '(' {
|
||||
return nil, false
|
||||
}
|
||||
closer := opener
|
||||
if opener == '(' {
|
||||
closer = ')'
|
||||
}
|
||||
savedLine, savedPosition := block.Position()
|
||||
var title []byte
|
||||
for i := 0; ; i++ {
|
||||
line, _ := block.PeekLine()
|
||||
if line == nil {
|
||||
block.SetPosition(savedLine, savedPosition)
|
||||
return nil, false
|
||||
}
|
||||
offset := 0
|
||||
if i == 0 {
|
||||
offset = 1
|
||||
}
|
||||
pos := util.FindClosure(line[offset:], opener, closer, false, true)
|
||||
if pos < 0 {
|
||||
title = append(title, line[offset:]...)
|
||||
block.AdvanceLine()
|
||||
continue
|
||||
}
|
||||
pos += offset + 1 // 1: closer
|
||||
block.Advance(pos)
|
||||
if i == 0 { // avoid allocating new slice
|
||||
return line[offset : pos-1], true
|
||||
}
|
||||
return append(title, line[offset:pos-1]...), true
|
||||
}
|
||||
}
|
||||
|
||||
func (s *linkParser) CloseBlock(parent ast.Node, block text.Reader, pc Context) {
|
||||
tlist := pc.Get(linkLabelStateKey)
|
||||
if tlist == nil {
|
||||
return
|
||||
}
|
||||
for s := tlist.(*linkLabelState); s != nil; {
|
||||
next := s.Next
|
||||
removeLinkLabelState(pc, s)
|
||||
s.Parent().ReplaceChild(s.Parent(), s, ast.NewTextSegment(s.Segment))
|
||||
s = next
|
||||
}
|
||||
}
|
163
vendor/github.com/yuin/goldmark/parser/link_ref.go
generated
vendored
Normal file
163
vendor/github.com/yuin/goldmark/parser/link_ref.go
generated
vendored
Normal file
@ -0,0 +1,163 @@
|
||||
package parser
|
||||
|
||||
import (
|
||||
"github.com/yuin/goldmark/ast"
|
||||
"github.com/yuin/goldmark/text"
|
||||
"github.com/yuin/goldmark/util"
|
||||
)
|
||||
|
||||
type linkReferenceParagraphTransformer struct {
|
||||
}
|
||||
|
||||
// LinkReferenceParagraphTransformer is a ParagraphTransformer implementation
|
||||
// that parses and extracts link reference from paragraphs.
|
||||
var LinkReferenceParagraphTransformer = &linkReferenceParagraphTransformer{}
|
||||
|
||||
func (p *linkReferenceParagraphTransformer) Transform(node *ast.Paragraph, reader text.Reader, pc Context) {
|
||||
lines := node.Lines()
|
||||
block := text.NewBlockReader(reader.Source(), lines)
|
||||
removes := [][2]int{}
|
||||
for {
|
||||
start, end := parseLinkReferenceDefinition(block, pc)
|
||||
if start > -1 {
|
||||
if start == end {
|
||||
end++
|
||||
}
|
||||
removes = append(removes, [2]int{start, end})
|
||||
continue
|
||||
}
|
||||
break
|
||||
}
|
||||
|
||||
offset := 0
|
||||
for _, remove := range removes {
|
||||
if lines.Len() == 0 {
|
||||
break
|
||||
}
|
||||
s := lines.Sliced(remove[1]-offset, lines.Len())
|
||||
lines.SetSliced(0, remove[0]-offset)
|
||||
lines.AppendAll(s)
|
||||
offset = remove[1]
|
||||
}
|
||||
|
||||
if lines.Len() == 0 {
|
||||
t := ast.NewTextBlock()
|
||||
t.SetBlankPreviousLines(node.HasBlankPreviousLines())
|
||||
node.Parent().ReplaceChild(node.Parent(), node, t)
|
||||
return
|
||||
}
|
||||
|
||||
node.SetLines(lines)
|
||||
}
|
||||
|
||||
func parseLinkReferenceDefinition(block text.Reader, pc Context) (int, int) {
|
||||
block.SkipSpaces()
|
||||
line, segment := block.PeekLine()
|
||||
if line == nil {
|
||||
return -1, -1
|
||||
}
|
||||
startLine, _ := block.Position()
|
||||
width, pos := util.IndentWidth(line, 0)
|
||||
if width > 3 {
|
||||
return -1, -1
|
||||
}
|
||||
if width != 0 {
|
||||
pos++
|
||||
}
|
||||
if line[pos] != '[' {
|
||||
return -1, -1
|
||||
}
|
||||
open := segment.Start + pos + 1
|
||||
closes := -1
|
||||
block.Advance(pos + 1)
|
||||
for {
|
||||
line, segment = block.PeekLine()
|
||||
if line == nil {
|
||||
return -1, -1
|
||||
}
|
||||
closure := util.FindClosure(line, '[', ']', false, false)
|
||||
if closure > -1 {
|
||||
closes = segment.Start + closure
|
||||
next := closure + 1
|
||||
if next >= len(line) || line[next] != ':' {
|
||||
return -1, -1
|
||||
}
|
||||
block.Advance(next + 1)
|
||||
break
|
||||
}
|
||||
block.AdvanceLine()
|
||||
}
|
||||
if closes < 0 {
|
||||
return -1, -1
|
||||
}
|
||||
label := block.Value(text.NewSegment(open, closes))
|
||||
if util.IsBlank(label) {
|
||||
return -1, -1
|
||||
}
|
||||
block.SkipSpaces()
|
||||
destination, ok := parseLinkDestination(block)
|
||||
if !ok {
|
||||
return -1, -1
|
||||
}
|
||||
line, segment = block.PeekLine()
|
||||
isNewLine := line == nil || util.IsBlank(line)
|
||||
|
||||
endLine, _ := block.Position()
|
||||
_, spaces, _ := block.SkipSpaces()
|
||||
opener := block.Peek()
|
||||
if opener != '"' && opener != '\'' && opener != '(' {
|
||||
if !isNewLine {
|
||||
return -1, -1
|
||||
}
|
||||
ref := NewReference(label, destination, nil)
|
||||
pc.AddReference(ref)
|
||||
return startLine, endLine + 1
|
||||
}
|
||||
if spaces == 0 {
|
||||
return -1, -1
|
||||
}
|
||||
block.Advance(1)
|
||||
open = -1
|
||||
closes = -1
|
||||
closer := opener
|
||||
if opener == '(' {
|
||||
closer = ')'
|
||||
}
|
||||
for {
|
||||
line, segment = block.PeekLine()
|
||||
if line == nil {
|
||||
return -1, -1
|
||||
}
|
||||
if open < 0 {
|
||||
open = segment.Start
|
||||
}
|
||||
closure := util.FindClosure(line, opener, closer, false, true)
|
||||
if closure > -1 {
|
||||
closes = segment.Start + closure
|
||||
block.Advance(closure + 1)
|
||||
break
|
||||
}
|
||||
block.AdvanceLine()
|
||||
}
|
||||
if closes < 0 {
|
||||
return -1, -1
|
||||
}
|
||||
|
||||
line, segment = block.PeekLine()
|
||||
if line != nil && !util.IsBlank(line) {
|
||||
if !isNewLine {
|
||||
return -1, -1
|
||||
}
|
||||
title := block.Value(text.NewSegment(open, closes))
|
||||
ref := NewReference(label, destination, title)
|
||||
pc.AddReference(ref)
|
||||
return startLine, endLine
|
||||
}
|
||||
|
||||
title := block.Value(text.NewSegment(open, closes))
|
||||
|
||||
endLine, _ = block.Position()
|
||||
ref := NewReference(label, destination, title)
|
||||
pc.AddReference(ref)
|
||||
return startLine, endLine + 1
|
||||
}
|
251
vendor/github.com/yuin/goldmark/parser/list.go
generated
vendored
Normal file
251
vendor/github.com/yuin/goldmark/parser/list.go
generated
vendored
Normal file
@ -0,0 +1,251 @@
|
||||
package parser
|
||||
|
||||
import (
|
||||
"github.com/yuin/goldmark/ast"
|
||||
"github.com/yuin/goldmark/text"
|
||||
"github.com/yuin/goldmark/util"
|
||||
"strconv"
|
||||
)
|
||||
|
||||
type listItemType int
|
||||
|
||||
const (
|
||||
notList listItemType = iota
|
||||
bulletList
|
||||
orderedList
|
||||
)
|
||||
|
||||
// Same as
|
||||
// `^(([ ]*)([\-\*\+]))(\s+.*)?\n?$`.FindSubmatchIndex or
|
||||
// `^(([ ]*)(\d{1,9}[\.\)]))(\s+.*)?\n?$`.FindSubmatchIndex
|
||||
func parseListItem(line []byte) ([6]int, listItemType) {
|
||||
i := 0
|
||||
l := len(line)
|
||||
ret := [6]int{}
|
||||
for ; i < l && line[i] == ' '; i++ {
|
||||
c := line[i]
|
||||
if c == '\t' {
|
||||
return ret, notList
|
||||
}
|
||||
}
|
||||
if i > 3 {
|
||||
return ret, notList
|
||||
}
|
||||
ret[0] = 0
|
||||
ret[1] = i
|
||||
ret[2] = i
|
||||
var typ listItemType
|
||||
if i < l && (line[i] == '-' || line[i] == '*' || line[i] == '+') {
|
||||
i++
|
||||
ret[3] = i
|
||||
typ = bulletList
|
||||
} else if i < l {
|
||||
for ; i < l && util.IsNumeric(line[i]); i++ {
|
||||
}
|
||||
ret[3] = i
|
||||
if ret[3] == ret[2] || ret[3]-ret[2] > 9 {
|
||||
return ret, notList
|
||||
}
|
||||
if i < l && (line[i] == '.' || line[i] == ')') {
|
||||
i++
|
||||
ret[3] = i
|
||||
} else {
|
||||
return ret, notList
|
||||
}
|
||||
typ = orderedList
|
||||
} else {
|
||||
return ret, notList
|
||||
}
|
||||
if i < l && line[i] != '\n' {
|
||||
w, _ := util.IndentWidth(line[i:], 0)
|
||||
if w == 0 {
|
||||
return ret, notList
|
||||
}
|
||||
}
|
||||
if i >= l {
|
||||
ret[4] = -1
|
||||
ret[5] = -1
|
||||
return ret, typ
|
||||
}
|
||||
ret[4] = i
|
||||
ret[5] = len(line)
|
||||
if line[ret[5]-1] == '\n' && line[i] != '\n' {
|
||||
ret[5]--
|
||||
}
|
||||
return ret, typ
|
||||
}
|
||||
|
||||
func matchesListItem(source []byte, strict bool) ([6]int, listItemType) {
|
||||
m, typ := parseListItem(source)
|
||||
if typ != notList && (!strict || strict && m[1] < 4) {
|
||||
return m, typ
|
||||
}
|
||||
return m, notList
|
||||
}
|
||||
|
||||
func calcListOffset(source []byte, match [6]int) int {
|
||||
offset := 0
|
||||
if match[4] < 0 || util.IsBlank(source[match[4]:]) { // list item starts with a blank line
|
||||
offset = 1
|
||||
} else {
|
||||
offset, _ = util.IndentWidth(source[match[4]:], match[4])
|
||||
if offset > 4 { // offseted codeblock
|
||||
offset = 1
|
||||
}
|
||||
}
|
||||
return offset
|
||||
}
|
||||
|
||||
func lastOffset(node ast.Node) int {
|
||||
lastChild := node.LastChild()
|
||||
if lastChild != nil {
|
||||
return lastChild.(*ast.ListItem).Offset
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
type listParser struct {
|
||||
}
|
||||
|
||||
var defaultListParser = &listParser{}
|
||||
|
||||
// NewListParser returns a new BlockParser that
|
||||
// parses lists.
|
||||
// This parser must take precedence over the ListItemParser.
|
||||
func NewListParser() BlockParser {
|
||||
return defaultListParser
|
||||
}
|
||||
|
||||
func (b *listParser) Trigger() []byte {
|
||||
return []byte{'-', '+', '*', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9'}
|
||||
}
|
||||
|
||||
func (b *listParser) Open(parent ast.Node, reader text.Reader, pc Context) (ast.Node, State) {
|
||||
last := pc.LastOpenedBlock().Node
|
||||
if _, lok := last.(*ast.List); lok || pc.Get(skipListParser) != nil {
|
||||
pc.Set(skipListParser, nil)
|
||||
return nil, NoChildren
|
||||
}
|
||||
line, _ := reader.PeekLine()
|
||||
match, typ := matchesListItem(line, true)
|
||||
if typ == notList {
|
||||
return nil, NoChildren
|
||||
}
|
||||
start := -1
|
||||
if typ == orderedList {
|
||||
number := line[match[2] : match[3]-1]
|
||||
start, _ = strconv.Atoi(string(number))
|
||||
}
|
||||
|
||||
if ast.IsParagraph(last) && last.Parent() == parent {
|
||||
// we allow only lists starting with 1 to interrupt paragraphs.
|
||||
if typ == orderedList && start != 1 {
|
||||
return nil, NoChildren
|
||||
}
|
||||
//an empty list item cannot interrupt a paragraph:
|
||||
if match[5]-match[4] == 1 {
|
||||
return nil, NoChildren
|
||||
}
|
||||
}
|
||||
|
||||
marker := line[match[3]-1]
|
||||
node := ast.NewList(marker)
|
||||
if start > -1 {
|
||||
node.Start = start
|
||||
}
|
||||
return node, HasChildren
|
||||
}
|
||||
|
||||
func (b *listParser) Continue(node ast.Node, reader text.Reader, pc Context) State {
|
||||
list := node.(*ast.List)
|
||||
line, _ := reader.PeekLine()
|
||||
if util.IsBlank(line) {
|
||||
// A list item can begin with at most one blank line
|
||||
if node.ChildCount() == 1 && node.LastChild().ChildCount() == 0 {
|
||||
return Close
|
||||
}
|
||||
return Continue | HasChildren
|
||||
}
|
||||
|
||||
// "offset" means a width that bar indicates.
|
||||
// - aaaaaaaa
|
||||
// |----|
|
||||
//
|
||||
// If the indent is less than the last offset like
|
||||
// - a
|
||||
// - b <--- current line
|
||||
// it maybe a new child of the list.
|
||||
offset := lastOffset(node)
|
||||
indent, _ := util.IndentWidth(line, reader.LineOffset())
|
||||
|
||||
if indent < offset {
|
||||
if indent < 4 {
|
||||
match, typ := matchesListItem(line, false) // may have a leading spaces more than 3
|
||||
if typ != notList && match[1]-offset < 4 {
|
||||
marker := line[match[3]-1]
|
||||
if !list.CanContinue(marker, typ == orderedList) {
|
||||
return Close
|
||||
}
|
||||
// Thematic Breaks take precedence over lists
|
||||
if isThematicBreak(line[match[3]-1:], 0) {
|
||||
isHeading := false
|
||||
last := pc.LastOpenedBlock().Node
|
||||
if ast.IsParagraph(last) {
|
||||
c, ok := matchesSetextHeadingBar(line[match[3]-1:])
|
||||
if ok && c == '-' {
|
||||
isHeading = true
|
||||
}
|
||||
}
|
||||
if !isHeading {
|
||||
return Close
|
||||
}
|
||||
}
|
||||
|
||||
return Continue | HasChildren
|
||||
}
|
||||
}
|
||||
return Close
|
||||
}
|
||||
return Continue | HasChildren
|
||||
}
|
||||
|
||||
func (b *listParser) Close(node ast.Node, reader text.Reader, pc Context) {
|
||||
list := node.(*ast.List)
|
||||
|
||||
for c := node.FirstChild(); c != nil && list.IsTight; c = c.NextSibling() {
|
||||
if c.FirstChild() != nil && c.FirstChild() != c.LastChild() {
|
||||
for c1 := c.FirstChild().NextSibling(); c1 != nil; c1 = c1.NextSibling() {
|
||||
if bl, ok := c1.(ast.Node); ok && bl.HasBlankPreviousLines() {
|
||||
list.IsTight = false
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
if c != node.FirstChild() {
|
||||
if bl, ok := c.(ast.Node); ok && bl.HasBlankPreviousLines() {
|
||||
list.IsTight = false
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if list.IsTight {
|
||||
for child := node.FirstChild(); child != nil; child = child.NextSibling() {
|
||||
for gc := child.FirstChild(); gc != nil; gc = gc.NextSibling() {
|
||||
paragraph, ok := gc.(*ast.Paragraph)
|
||||
if ok {
|
||||
textBlock := ast.NewTextBlock()
|
||||
textBlock.SetLines(paragraph.Lines())
|
||||
child.ReplaceChild(child, paragraph, textBlock)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (b *listParser) CanInterruptParagraph() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
func (b *listParser) CanAcceptIndentedLine() bool {
|
||||
return false
|
||||
}
|
85
vendor/github.com/yuin/goldmark/parser/list_item.go
generated
vendored
Normal file
85
vendor/github.com/yuin/goldmark/parser/list_item.go
generated
vendored
Normal file
@ -0,0 +1,85 @@
|
||||
package parser
|
||||
|
||||
import (
|
||||
"github.com/yuin/goldmark/ast"
|
||||
"github.com/yuin/goldmark/text"
|
||||
"github.com/yuin/goldmark/util"
|
||||
)
|
||||
|
||||
type listItemParser struct {
|
||||
}
|
||||
|
||||
var defaultListItemParser = &listItemParser{}
|
||||
|
||||
// NewListItemParser returns a new BlockParser that
|
||||
// parses list items.
|
||||
func NewListItemParser() BlockParser {
|
||||
return defaultListItemParser
|
||||
}
|
||||
|
||||
var skipListParser = NewContextKey()
|
||||
var skipListParserValue interface{} = true
|
||||
|
||||
func (b *listItemParser) Trigger() []byte {
|
||||
return []byte{'-', '+', '*', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9'}
|
||||
}
|
||||
|
||||
func (b *listItemParser) Open(parent ast.Node, reader text.Reader, pc Context) (ast.Node, State) {
|
||||
list, lok := parent.(*ast.List)
|
||||
if !lok { // list item must be a child of a list
|
||||
return nil, NoChildren
|
||||
}
|
||||
offset := lastOffset(list)
|
||||
line, _ := reader.PeekLine()
|
||||
match, typ := matchesListItem(line, false)
|
||||
if typ == notList {
|
||||
return nil, NoChildren
|
||||
}
|
||||
if match[1]-offset > 3 {
|
||||
return nil, NoChildren
|
||||
}
|
||||
itemOffset := calcListOffset(line, match)
|
||||
node := ast.NewListItem(match[3] + itemOffset)
|
||||
if match[4] < 0 || match[5]-match[4] == 1 {
|
||||
return node, NoChildren
|
||||
}
|
||||
|
||||
pos, padding := util.IndentPosition(line[match[4]:], match[4], itemOffset)
|
||||
child := match[3] + pos
|
||||
reader.AdvanceAndSetPadding(child, padding)
|
||||
return node, HasChildren
|
||||
}
|
||||
|
||||
func (b *listItemParser) Continue(node ast.Node, reader text.Reader, pc Context) State {
|
||||
line, _ := reader.PeekLine()
|
||||
if util.IsBlank(line) {
|
||||
return Continue | HasChildren
|
||||
}
|
||||
|
||||
indent, _ := util.IndentWidth(line, reader.LineOffset())
|
||||
offset := lastOffset(node.Parent())
|
||||
if indent < offset && indent < 4 {
|
||||
_, typ := matchesListItem(line, true)
|
||||
// new list item found
|
||||
if typ != notList {
|
||||
pc.Set(skipListParser, skipListParserValue)
|
||||
}
|
||||
return Close
|
||||
}
|
||||
pos, padding := util.IndentPosition(line, reader.LineOffset(), offset)
|
||||
reader.AdvanceAndSetPadding(pos, padding)
|
||||
|
||||
return Continue | HasChildren
|
||||
}
|
||||
|
||||
func (b *listItemParser) Close(node ast.Node, reader text.Reader, pc Context) {
|
||||
// nothing to do
|
||||
}
|
||||
|
||||
func (b *listItemParser) CanInterruptParagraph() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
func (b *listItemParser) CanAcceptIndentedLine() bool {
|
||||
return false
|
||||
}
|
71
vendor/github.com/yuin/goldmark/parser/paragraph.go
generated
vendored
Normal file
71
vendor/github.com/yuin/goldmark/parser/paragraph.go
generated
vendored
Normal file
@ -0,0 +1,71 @@
|
||||
package parser
|
||||
|
||||
import (
|
||||
"github.com/yuin/goldmark/ast"
|
||||
"github.com/yuin/goldmark/text"
|
||||
)
|
||||
|
||||
type paragraphParser struct {
|
||||
}
|
||||
|
||||
var defaultParagraphParser = ¶graphParser{}
|
||||
|
||||
// NewParagraphParser returns a new BlockParser that
|
||||
// parses paragraphs.
|
||||
func NewParagraphParser() BlockParser {
|
||||
return defaultParagraphParser
|
||||
}
|
||||
|
||||
func (b *paragraphParser) Trigger() []byte {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (b *paragraphParser) Open(parent ast.Node, reader text.Reader, pc Context) (ast.Node, State) {
|
||||
_, segment := reader.PeekLine()
|
||||
segment = segment.TrimLeftSpace(reader.Source())
|
||||
if segment.IsEmpty() {
|
||||
return nil, NoChildren
|
||||
}
|
||||
node := ast.NewParagraph()
|
||||
node.Lines().Append(segment)
|
||||
reader.Advance(segment.Len() - 1)
|
||||
return node, NoChildren
|
||||
}
|
||||
|
||||
func (b *paragraphParser) Continue(node ast.Node, reader text.Reader, pc Context) State {
|
||||
_, segment := reader.PeekLine()
|
||||
segment = segment.TrimLeftSpace(reader.Source())
|
||||
if segment.IsEmpty() {
|
||||
return Close
|
||||
}
|
||||
node.Lines().Append(segment)
|
||||
reader.Advance(segment.Len() - 1)
|
||||
return Continue | NoChildren
|
||||
}
|
||||
|
||||
func (b *paragraphParser) Close(node ast.Node, reader text.Reader, pc Context) {
|
||||
parent := node.Parent()
|
||||
if parent == nil {
|
||||
// paragraph has been transformed
|
||||
return
|
||||
}
|
||||
lines := node.Lines()
|
||||
if lines.Len() != 0 {
|
||||
// trim trailing spaces
|
||||
length := lines.Len()
|
||||
lastLine := node.Lines().At(length - 1)
|
||||
node.Lines().Set(length-1, lastLine.TrimRightSpace(reader.Source()))
|
||||
}
|
||||
if lines.Len() == 0 {
|
||||
node.Parent().RemoveChild(node.Parent(), node)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
func (b *paragraphParser) CanInterruptParagraph() bool {
|
||||
return false
|
||||
}
|
||||
|
||||
func (b *paragraphParser) CanAcceptIndentedLine() bool {
|
||||
return false
|
||||
}
|
1211
vendor/github.com/yuin/goldmark/parser/parser.go
generated
vendored
Normal file
1211
vendor/github.com/yuin/goldmark/parser/parser.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
108
vendor/github.com/yuin/goldmark/parser/raw_html.go
generated
vendored
Normal file
108
vendor/github.com/yuin/goldmark/parser/raw_html.go
generated
vendored
Normal file
@ -0,0 +1,108 @@
|
||||
package parser
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"github.com/yuin/goldmark/ast"
|
||||
"github.com/yuin/goldmark/text"
|
||||
"github.com/yuin/goldmark/util"
|
||||
"regexp"
|
||||
)
|
||||
|
||||
type rawHTMLParser struct {
|
||||
}
|
||||
|
||||
var defaultRawHTMLParser = &rawHTMLParser{}
|
||||
|
||||
// NewRawHTMLParser return a new InlineParser that can parse
|
||||
// inline htmls
|
||||
func NewRawHTMLParser() InlineParser {
|
||||
return defaultRawHTMLParser
|
||||
}
|
||||
|
||||
func (s *rawHTMLParser) Trigger() []byte {
|
||||
return []byte{'<'}
|
||||
}
|
||||
|
||||
func (s *rawHTMLParser) Parse(parent ast.Node, block text.Reader, pc Context) ast.Node {
|
||||
line, _ := block.PeekLine()
|
||||
if len(line) > 1 && util.IsAlphaNumeric(line[1]) {
|
||||
return s.parseMultiLineRegexp(openTagRegexp, block, pc)
|
||||
}
|
||||
if len(line) > 2 && line[1] == '/' && util.IsAlphaNumeric(line[2]) {
|
||||
return s.parseMultiLineRegexp(closeTagRegexp, block, pc)
|
||||
}
|
||||
if bytes.HasPrefix(line, []byte("<!--")) {
|
||||
return s.parseMultiLineRegexp(commentRegexp, block, pc)
|
||||
}
|
||||
if bytes.HasPrefix(line, []byte("<?")) {
|
||||
return s.parseSingleLineRegexp(processingInstructionRegexp, block, pc)
|
||||
}
|
||||
if len(line) > 2 && line[1] == '!' && line[2] >= 'A' && line[2] <= 'Z' {
|
||||
return s.parseSingleLineRegexp(declRegexp, block, pc)
|
||||
}
|
||||
if bytes.HasPrefix(line, []byte("<![CDATA[")) {
|
||||
return s.parseMultiLineRegexp(cdataRegexp, block, pc)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
var tagnamePattern = `([A-Za-z][A-Za-z0-9-]*)`
|
||||
var attributePattern = `(?:\s+[a-zA-Z_:][a-zA-Z0-9:._-]*(?:\s*=\s*(?:[^\"'=<>` + "`" + `\x00-\x20]+|'[^']*'|"[^"]*"))?)`
|
||||
var openTagRegexp = regexp.MustCompile("^<" + tagnamePattern + attributePattern + `*\s*/?>`)
|
||||
var closeTagRegexp = regexp.MustCompile("^</" + tagnamePattern + `\s*>`)
|
||||
var commentRegexp = regexp.MustCompile(`^<!---->|<!--(?:-?[^>-])(?:-?[^-])*-->`)
|
||||
var processingInstructionRegexp = regexp.MustCompile(`^(?:<\?).*?(?:\?>)`)
|
||||
var declRegexp = regexp.MustCompile(`^<![A-Z]+\s+[^>]*>`)
|
||||
var cdataRegexp = regexp.MustCompile(`<!\[CDATA\[[\s\S]*?\]\]>`)
|
||||
|
||||
func (s *rawHTMLParser) parseSingleLineRegexp(reg *regexp.Regexp, block text.Reader, pc Context) ast.Node {
|
||||
line, segment := block.PeekLine()
|
||||
match := reg.FindSubmatchIndex(line)
|
||||
if match == nil {
|
||||
return nil
|
||||
}
|
||||
node := ast.NewRawHTML()
|
||||
node.Segments.Append(segment.WithStop(segment.Start + match[1]))
|
||||
block.Advance(match[1])
|
||||
return node
|
||||
}
|
||||
|
||||
var dummyMatch = [][]byte{}
|
||||
|
||||
func (s *rawHTMLParser) parseMultiLineRegexp(reg *regexp.Regexp, block text.Reader, pc Context) ast.Node {
|
||||
sline, ssegment := block.Position()
|
||||
if block.Match(reg) {
|
||||
node := ast.NewRawHTML()
|
||||
eline, esegment := block.Position()
|
||||
block.SetPosition(sline, ssegment)
|
||||
for {
|
||||
line, segment := block.PeekLine()
|
||||
if line == nil {
|
||||
break
|
||||
}
|
||||
l, _ := block.Position()
|
||||
start := segment.Start
|
||||
if l == sline {
|
||||
start = ssegment.Start
|
||||
}
|
||||
end := segment.Stop
|
||||
if l == eline {
|
||||
end = esegment.Start
|
||||
}
|
||||
|
||||
node.Segments.Append(text.NewSegment(start, end))
|
||||
if l == eline {
|
||||
block.Advance(end - start)
|
||||
break
|
||||
} else {
|
||||
block.AdvanceLine()
|
||||
}
|
||||
}
|
||||
return node
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *rawHTMLParser) CloseBlock(parent ast.Node, pc Context) {
|
||||
// nothing to do
|
||||
}
|
126
vendor/github.com/yuin/goldmark/parser/setext_headings.go
generated
vendored
Normal file
126
vendor/github.com/yuin/goldmark/parser/setext_headings.go
generated
vendored
Normal file
@ -0,0 +1,126 @@
|
||||
package parser
|
||||
|
||||
import (
|
||||
"github.com/yuin/goldmark/ast"
|
||||
"github.com/yuin/goldmark/text"
|
||||
"github.com/yuin/goldmark/util"
|
||||
)
|
||||
|
||||
var temporaryParagraphKey = NewContextKey()
|
||||
|
||||
type setextHeadingParser struct {
|
||||
HeadingConfig
|
||||
}
|
||||
|
||||
func matchesSetextHeadingBar(line []byte) (byte, bool) {
|
||||
start := 0
|
||||
end := len(line)
|
||||
space := util.TrimLeftLength(line, []byte{' '})
|
||||
if space > 3 {
|
||||
return 0, false
|
||||
}
|
||||
start += space
|
||||
level1 := util.TrimLeftLength(line[start:end], []byte{'='})
|
||||
c := byte('=')
|
||||
var level2 int
|
||||
if level1 == 0 {
|
||||
level2 = util.TrimLeftLength(line[start:end], []byte{'-'})
|
||||
c = '-'
|
||||
}
|
||||
if util.IsSpace(line[end-1]) {
|
||||
end -= util.TrimRightSpaceLength(line[start:end])
|
||||
}
|
||||
if !((level1 > 0 && start+level1 == end) || (level2 > 0 && start+level2 == end)) {
|
||||
return 0, false
|
||||
}
|
||||
return c, true
|
||||
}
|
||||
|
||||
// NewSetextHeadingParser return a new BlockParser that can parse Setext headings.
|
||||
func NewSetextHeadingParser(opts ...HeadingOption) BlockParser {
|
||||
p := &setextHeadingParser{}
|
||||
for _, o := range opts {
|
||||
o.SetHeadingOption(&p.HeadingConfig)
|
||||
}
|
||||
return p
|
||||
}
|
||||
|
||||
func (b *setextHeadingParser) Trigger() []byte {
|
||||
return []byte{'-', '='}
|
||||
}
|
||||
|
||||
func (b *setextHeadingParser) Open(parent ast.Node, reader text.Reader, pc Context) (ast.Node, State) {
|
||||
last := pc.LastOpenedBlock().Node
|
||||
if last == nil {
|
||||
return nil, NoChildren
|
||||
}
|
||||
paragraph, ok := last.(*ast.Paragraph)
|
||||
if !ok || paragraph.Parent() != parent {
|
||||
return nil, NoChildren
|
||||
}
|
||||
line, segment := reader.PeekLine()
|
||||
c, ok := matchesSetextHeadingBar(line)
|
||||
if !ok {
|
||||
return nil, NoChildren
|
||||
}
|
||||
level := 1
|
||||
if c == '-' {
|
||||
level = 2
|
||||
}
|
||||
node := ast.NewHeading(level)
|
||||
node.Lines().Append(segment)
|
||||
pc.Set(temporaryParagraphKey, last)
|
||||
return node, NoChildren | RequireParagraph
|
||||
}
|
||||
|
||||
func (b *setextHeadingParser) Continue(node ast.Node, reader text.Reader, pc Context) State {
|
||||
return Close
|
||||
}
|
||||
|
||||
func (b *setextHeadingParser) Close(node ast.Node, reader text.Reader, pc Context) {
|
||||
heading := node.(*ast.Heading)
|
||||
segment := node.Lines().At(0)
|
||||
heading.Lines().Clear()
|
||||
tmp := pc.Get(temporaryParagraphKey).(*ast.Paragraph)
|
||||
pc.Set(temporaryParagraphKey, nil)
|
||||
if tmp.Lines().Len() == 0 {
|
||||
next := heading.NextSibling()
|
||||
segment = segment.TrimLeftSpace(reader.Source())
|
||||
if next == nil || !ast.IsParagraph(next) {
|
||||
para := ast.NewParagraph()
|
||||
para.Lines().Append(segment)
|
||||
heading.Parent().InsertAfter(heading.Parent(), heading, para)
|
||||
} else {
|
||||
next.(ast.Node).Lines().Unshift(segment)
|
||||
}
|
||||
heading.Parent().RemoveChild(heading.Parent(), heading)
|
||||
} else {
|
||||
heading.SetLines(tmp.Lines())
|
||||
heading.SetBlankPreviousLines(tmp.HasBlankPreviousLines())
|
||||
tp := tmp.Parent()
|
||||
if tp != nil {
|
||||
tp.RemoveChild(tp, tmp)
|
||||
}
|
||||
}
|
||||
|
||||
if b.Attribute {
|
||||
parseLastLineAttributes(node, reader, pc)
|
||||
}
|
||||
|
||||
if b.AutoHeadingID {
|
||||
id, ok := node.AttributeString("id")
|
||||
if !ok {
|
||||
generateAutoHeadingID(heading, reader, pc)
|
||||
} else {
|
||||
pc.IDs().Put(id.([]byte))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (b *setextHeadingParser) CanInterruptParagraph() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
func (b *setextHeadingParser) CanAcceptIndentedLine() bool {
|
||||
return false
|
||||
}
|
75
vendor/github.com/yuin/goldmark/parser/thematic_break.go
generated
vendored
Normal file
75
vendor/github.com/yuin/goldmark/parser/thematic_break.go
generated
vendored
Normal file
@ -0,0 +1,75 @@
|
||||
package parser
|
||||
|
||||
import (
|
||||
"github.com/yuin/goldmark/ast"
|
||||
"github.com/yuin/goldmark/text"
|
||||
"github.com/yuin/goldmark/util"
|
||||
)
|
||||
|
||||
type thematicBreakPraser struct {
|
||||
}
|
||||
|
||||
var defaultThematicBreakPraser = &thematicBreakPraser{}
|
||||
|
||||
// NewThematicBreakParser returns a new BlockParser that
|
||||
// parses thematic breaks.
|
||||
func NewThematicBreakParser() BlockParser {
|
||||
return defaultThematicBreakPraser
|
||||
}
|
||||
|
||||
func isThematicBreak(line []byte, offset int) bool {
|
||||
w, pos := util.IndentWidth(line, offset)
|
||||
if w > 3 {
|
||||
return false
|
||||
}
|
||||
mark := byte(0)
|
||||
count := 0
|
||||
for i := pos; i < len(line); i++ {
|
||||
c := line[i]
|
||||
if util.IsSpace(c) {
|
||||
continue
|
||||
}
|
||||
if mark == 0 {
|
||||
mark = c
|
||||
count = 1
|
||||
if mark == '*' || mark == '-' || mark == '_' {
|
||||
continue
|
||||
}
|
||||
return false
|
||||
}
|
||||
if c != mark {
|
||||
return false
|
||||
}
|
||||
count++
|
||||
}
|
||||
return count > 2
|
||||
}
|
||||
|
||||
func (b *thematicBreakPraser) Trigger() []byte {
|
||||
return []byte{'-', '*', '_'}
|
||||
}
|
||||
|
||||
func (b *thematicBreakPraser) Open(parent ast.Node, reader text.Reader, pc Context) (ast.Node, State) {
|
||||
line, segment := reader.PeekLine()
|
||||
if isThematicBreak(line, reader.LineOffset()) {
|
||||
reader.Advance(segment.Len() - 1)
|
||||
return ast.NewThematicBreak(), NoChildren
|
||||
}
|
||||
return nil, NoChildren
|
||||
}
|
||||
|
||||
func (b *thematicBreakPraser) Continue(node ast.Node, reader text.Reader, pc Context) State {
|
||||
return Close
|
||||
}
|
||||
|
||||
func (b *thematicBreakPraser) Close(node ast.Node, reader text.Reader, pc Context) {
|
||||
// nothing to do
|
||||
}
|
||||
|
||||
func (b *thematicBreakPraser) CanInterruptParagraph() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
func (b *thematicBreakPraser) CanAcceptIndentedLine() bool {
|
||||
return false
|
||||
}
|
804
vendor/github.com/yuin/goldmark/renderer/html/html.go
generated
vendored
Normal file
804
vendor/github.com/yuin/goldmark/renderer/html/html.go
generated
vendored
Normal file
@ -0,0 +1,804 @@
|
||||
package html
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"strconv"
|
||||
|
||||
"github.com/yuin/goldmark/ast"
|
||||
"github.com/yuin/goldmark/renderer"
|
||||
"github.com/yuin/goldmark/util"
|
||||
)
|
||||
|
||||
// A Config struct has configurations for the HTML based renderers.
|
||||
type Config struct {
|
||||
Writer Writer
|
||||
HardWraps bool
|
||||
XHTML bool
|
||||
Unsafe bool
|
||||
}
|
||||
|
||||
// NewConfig returns a new Config with defaults.
|
||||
func NewConfig() Config {
|
||||
return Config{
|
||||
Writer: DefaultWriter,
|
||||
HardWraps: false,
|
||||
XHTML: false,
|
||||
Unsafe: false,
|
||||
}
|
||||
}
|
||||
|
||||
// SetOption implements renderer.NodeRenderer.SetOption.
|
||||
func (c *Config) SetOption(name renderer.OptionName, value interface{}) {
|
||||
switch name {
|
||||
case optHardWraps:
|
||||
c.HardWraps = value.(bool)
|
||||
case optXHTML:
|
||||
c.XHTML = value.(bool)
|
||||
case optUnsafe:
|
||||
c.Unsafe = value.(bool)
|
||||
case optTextWriter:
|
||||
c.Writer = value.(Writer)
|
||||
}
|
||||
}
|
||||
|
||||
// An Option interface sets options for HTML based renderers.
|
||||
type Option interface {
|
||||
SetHTMLOption(*Config)
|
||||
}
|
||||
|
||||
// TextWriter is an option name used in WithWriter.
|
||||
const optTextWriter renderer.OptionName = "Writer"
|
||||
|
||||
type withWriter struct {
|
||||
value Writer
|
||||
}
|
||||
|
||||
func (o *withWriter) SetConfig(c *renderer.Config) {
|
||||
c.Options[optTextWriter] = o.value
|
||||
}
|
||||
|
||||
func (o *withWriter) SetHTMLOption(c *Config) {
|
||||
c.Writer = o.value
|
||||
}
|
||||
|
||||
// WithWriter is a functional option that allow you to set the given writer to
|
||||
// the renderer.
|
||||
func WithWriter(writer Writer) interface {
|
||||
renderer.Option
|
||||
Option
|
||||
} {
|
||||
return &withWriter{writer}
|
||||
}
|
||||
|
||||
// HardWraps is an option name used in WithHardWraps.
|
||||
const optHardWraps renderer.OptionName = "HardWraps"
|
||||
|
||||
type withHardWraps struct {
|
||||
}
|
||||
|
||||
func (o *withHardWraps) SetConfig(c *renderer.Config) {
|
||||
c.Options[optHardWraps] = true
|
||||
}
|
||||
|
||||
func (o *withHardWraps) SetHTMLOption(c *Config) {
|
||||
c.HardWraps = true
|
||||
}
|
||||
|
||||
// WithHardWraps is a functional option that indicates whether softline breaks
|
||||
// should be rendered as '<br>'.
|
||||
func WithHardWraps() interface {
|
||||
renderer.Option
|
||||
Option
|
||||
} {
|
||||
return &withHardWraps{}
|
||||
}
|
||||
|
||||
// XHTML is an option name used in WithXHTML.
|
||||
const optXHTML renderer.OptionName = "XHTML"
|
||||
|
||||
type withXHTML struct {
|
||||
}
|
||||
|
||||
func (o *withXHTML) SetConfig(c *renderer.Config) {
|
||||
c.Options[optXHTML] = true
|
||||
}
|
||||
|
||||
func (o *withXHTML) SetHTMLOption(c *Config) {
|
||||
c.XHTML = true
|
||||
}
|
||||
|
||||
// WithXHTML is a functional option indicates that nodes should be rendered in
|
||||
// xhtml instead of HTML5.
|
||||
func WithXHTML() interface {
|
||||
Option
|
||||
renderer.Option
|
||||
} {
|
||||
return &withXHTML{}
|
||||
}
|
||||
|
||||
// Unsafe is an option name used in WithUnsafe.
|
||||
const optUnsafe renderer.OptionName = "Unsafe"
|
||||
|
||||
type withUnsafe struct {
|
||||
}
|
||||
|
||||
func (o *withUnsafe) SetConfig(c *renderer.Config) {
|
||||
c.Options[optUnsafe] = true
|
||||
}
|
||||
|
||||
func (o *withUnsafe) SetHTMLOption(c *Config) {
|
||||
c.Unsafe = true
|
||||
}
|
||||
|
||||
// WithUnsafe is a functional option that renders dangerous contents
|
||||
// (raw htmls and potentially dangerous links) as it is.
|
||||
func WithUnsafe() interface {
|
||||
renderer.Option
|
||||
Option
|
||||
} {
|
||||
return &withUnsafe{}
|
||||
}
|
||||
|
||||
// A Renderer struct is an implementation of renderer.NodeRenderer that renders
|
||||
// nodes as (X)HTML.
|
||||
type Renderer struct {
|
||||
Config
|
||||
}
|
||||
|
||||
// NewRenderer returns a new Renderer with given options.
|
||||
func NewRenderer(opts ...Option) renderer.NodeRenderer {
|
||||
r := &Renderer{
|
||||
Config: NewConfig(),
|
||||
}
|
||||
|
||||
for _, opt := range opts {
|
||||
opt.SetHTMLOption(&r.Config)
|
||||
}
|
||||
return r
|
||||
}
|
||||
|
||||
// RegisterFuncs implements NodeRenderer.RegisterFuncs .
|
||||
func (r *Renderer) RegisterFuncs(reg renderer.NodeRendererFuncRegisterer) {
|
||||
// blocks
|
||||
|
||||
reg.Register(ast.KindDocument, r.renderDocument)
|
||||
reg.Register(ast.KindHeading, r.renderHeading)
|
||||
reg.Register(ast.KindBlockquote, r.renderBlockquote)
|
||||
reg.Register(ast.KindCodeBlock, r.renderCodeBlock)
|
||||
reg.Register(ast.KindFencedCodeBlock, r.renderFencedCodeBlock)
|
||||
reg.Register(ast.KindHTMLBlock, r.renderHTMLBlock)
|
||||
reg.Register(ast.KindList, r.renderList)
|
||||
reg.Register(ast.KindListItem, r.renderListItem)
|
||||
reg.Register(ast.KindParagraph, r.renderParagraph)
|
||||
reg.Register(ast.KindTextBlock, r.renderTextBlock)
|
||||
reg.Register(ast.KindThematicBreak, r.renderThematicBreak)
|
||||
|
||||
// inlines
|
||||
|
||||
reg.Register(ast.KindAutoLink, r.renderAutoLink)
|
||||
reg.Register(ast.KindCodeSpan, r.renderCodeSpan)
|
||||
reg.Register(ast.KindEmphasis, r.renderEmphasis)
|
||||
reg.Register(ast.KindImage, r.renderImage)
|
||||
reg.Register(ast.KindLink, r.renderLink)
|
||||
reg.Register(ast.KindRawHTML, r.renderRawHTML)
|
||||
reg.Register(ast.KindText, r.renderText)
|
||||
reg.Register(ast.KindString, r.renderString)
|
||||
}
|
||||
|
||||
func (r *Renderer) writeLines(w util.BufWriter, source []byte, n ast.Node) {
|
||||
l := n.Lines().Len()
|
||||
for i := 0; i < l; i++ {
|
||||
line := n.Lines().At(i)
|
||||
r.Writer.RawWrite(w, line.Value(source))
|
||||
}
|
||||
}
|
||||
|
||||
// GlobalAttributeFilter defines attribute names which any elements can have.
|
||||
var GlobalAttributeFilter = util.NewBytesFilter(
|
||||
[]byte("accesskey"),
|
||||
[]byte("autocapitalize"),
|
||||
[]byte("class"),
|
||||
[]byte("contenteditable"),
|
||||
[]byte("contextmenu"),
|
||||
[]byte("dir"),
|
||||
[]byte("draggable"),
|
||||
[]byte("dropzone"),
|
||||
[]byte("hidden"),
|
||||
[]byte("id"),
|
||||
[]byte("itemprop"),
|
||||
[]byte("lang"),
|
||||
[]byte("slot"),
|
||||
[]byte("spellcheck"),
|
||||
[]byte("style"),
|
||||
[]byte("tabindex"),
|
||||
[]byte("title"),
|
||||
[]byte("translate"),
|
||||
)
|
||||
|
||||
func (r *Renderer) renderDocument(w util.BufWriter, source []byte, node ast.Node, entering bool) (ast.WalkStatus, error) {
|
||||
// nothing to do
|
||||
return ast.WalkContinue, nil
|
||||
}
|
||||
|
||||
// HeadingAttributeFilter defines attribute names which heading elements can have
|
||||
var HeadingAttributeFilter = GlobalAttributeFilter
|
||||
|
||||
func (r *Renderer) renderHeading(w util.BufWriter, source []byte, node ast.Node, entering bool) (ast.WalkStatus, error) {
|
||||
n := node.(*ast.Heading)
|
||||
if entering {
|
||||
_, _ = w.WriteString("<h")
|
||||
_ = w.WriteByte("0123456"[n.Level])
|
||||
if n.Attributes() != nil {
|
||||
RenderAttributes(w, node, HeadingAttributeFilter)
|
||||
}
|
||||
_ = w.WriteByte('>')
|
||||
} else {
|
||||
_, _ = w.WriteString("</h")
|
||||
_ = w.WriteByte("0123456"[n.Level])
|
||||
_, _ = w.WriteString(">\n")
|
||||
}
|
||||
return ast.WalkContinue, nil
|
||||
}
|
||||
|
||||
// BlockquoteAttributeFilter defines attribute names which blockquote elements can have
|
||||
var BlockquoteAttributeFilter = GlobalAttributeFilter.Extend(
|
||||
[]byte("cite"),
|
||||
)
|
||||
|
||||
func (r *Renderer) renderBlockquote(w util.BufWriter, source []byte, n ast.Node, entering bool) (ast.WalkStatus, error) {
|
||||
if entering {
|
||||
if n.Attributes() != nil {
|
||||
_, _ = w.WriteString("<blockquote")
|
||||
RenderAttributes(w, n, BlockquoteAttributeFilter)
|
||||
_ = w.WriteByte('>')
|
||||
} else {
|
||||
_, _ = w.WriteString("<blockquote>\n")
|
||||
}
|
||||
} else {
|
||||
_, _ = w.WriteString("</blockquote>\n")
|
||||
}
|
||||
return ast.WalkContinue, nil
|
||||
}
|
||||
|
||||
func (r *Renderer) renderCodeBlock(w util.BufWriter, source []byte, n ast.Node, entering bool) (ast.WalkStatus, error) {
|
||||
if entering {
|
||||
_, _ = w.WriteString("<pre><code>")
|
||||
r.writeLines(w, source, n)
|
||||
} else {
|
||||
_, _ = w.WriteString("</code></pre>\n")
|
||||
}
|
||||
return ast.WalkContinue, nil
|
||||
}
|
||||
|
||||
func (r *Renderer) renderFencedCodeBlock(w util.BufWriter, source []byte, node ast.Node, entering bool) (ast.WalkStatus, error) {
|
||||
n := node.(*ast.FencedCodeBlock)
|
||||
if entering {
|
||||
_, _ = w.WriteString("<pre><code")
|
||||
language := n.Language(source)
|
||||
if language != nil {
|
||||
_, _ = w.WriteString(" class=\"language-")
|
||||
r.Writer.Write(w, language)
|
||||
_, _ = w.WriteString("\"")
|
||||
}
|
||||
_ = w.WriteByte('>')
|
||||
r.writeLines(w, source, n)
|
||||
} else {
|
||||
_, _ = w.WriteString("</code></pre>\n")
|
||||
}
|
||||
return ast.WalkContinue, nil
|
||||
}
|
||||
|
||||
func (r *Renderer) renderHTMLBlock(w util.BufWriter, source []byte, node ast.Node, entering bool) (ast.WalkStatus, error) {
|
||||
n := node.(*ast.HTMLBlock)
|
||||
if entering {
|
||||
if r.Unsafe {
|
||||
l := n.Lines().Len()
|
||||
for i := 0; i < l; i++ {
|
||||
line := n.Lines().At(i)
|
||||
_, _ = w.Write(line.Value(source))
|
||||
}
|
||||
} else {
|
||||
_, _ = w.WriteString("<!-- raw HTML omitted -->\n")
|
||||
}
|
||||
} else {
|
||||
if n.HasClosure() {
|
||||
if r.Unsafe {
|
||||
closure := n.ClosureLine
|
||||
_, _ = w.Write(closure.Value(source))
|
||||
} else {
|
||||
_, _ = w.WriteString("<!-- raw HTML omitted -->\n")
|
||||
}
|
||||
}
|
||||
}
|
||||
return ast.WalkContinue, nil
|
||||
}
|
||||
|
||||
// ListAttributeFilter defines attribute names which list elements can have.
|
||||
var ListAttributeFilter = GlobalAttributeFilter.Extend(
|
||||
[]byte("start"),
|
||||
[]byte("reversed"),
|
||||
)
|
||||
|
||||
func (r *Renderer) renderList(w util.BufWriter, source []byte, node ast.Node, entering bool) (ast.WalkStatus, error) {
|
||||
n := node.(*ast.List)
|
||||
tag := "ul"
|
||||
if n.IsOrdered() {
|
||||
tag = "ol"
|
||||
}
|
||||
if entering {
|
||||
_ = w.WriteByte('<')
|
||||
_, _ = w.WriteString(tag)
|
||||
if n.IsOrdered() && n.Start != 1 {
|
||||
fmt.Fprintf(w, " start=\"%d\"", n.Start)
|
||||
}
|
||||
if n.Attributes() != nil {
|
||||
RenderAttributes(w, n, ListAttributeFilter)
|
||||
}
|
||||
_, _ = w.WriteString(">\n")
|
||||
} else {
|
||||
_, _ = w.WriteString("</")
|
||||
_, _ = w.WriteString(tag)
|
||||
_, _ = w.WriteString(">\n")
|
||||
}
|
||||
return ast.WalkContinue, nil
|
||||
}
|
||||
|
||||
// ListItemAttributeFilter defines attribute names which list item elements can have.
|
||||
var ListItemAttributeFilter = GlobalAttributeFilter.Extend(
|
||||
[]byte("value"),
|
||||
)
|
||||
|
||||
func (r *Renderer) renderListItem(w util.BufWriter, source []byte, n ast.Node, entering bool) (ast.WalkStatus, error) {
|
||||
if entering {
|
||||
if n.Attributes() != nil {
|
||||
_, _ = w.WriteString("<li")
|
||||
RenderAttributes(w, n, ListItemAttributeFilter)
|
||||
_ = w.WriteByte('>')
|
||||
} else {
|
||||
_, _ = w.WriteString("<li>")
|
||||
}
|
||||
fc := n.FirstChild()
|
||||
if fc != nil {
|
||||
if _, ok := fc.(*ast.TextBlock); !ok {
|
||||
_ = w.WriteByte('\n')
|
||||
}
|
||||
}
|
||||
} else {
|
||||
_, _ = w.WriteString("</li>\n")
|
||||
}
|
||||
return ast.WalkContinue, nil
|
||||
}
|
||||
|
||||
// ParagraphAttributeFilter defines attribute names which paragraph elements can have.
|
||||
var ParagraphAttributeFilter = GlobalAttributeFilter
|
||||
|
||||
func (r *Renderer) renderParagraph(w util.BufWriter, source []byte, n ast.Node, entering bool) (ast.WalkStatus, error) {
|
||||
if entering {
|
||||
if n.Attributes() != nil {
|
||||
_, _ = w.WriteString("<p")
|
||||
RenderAttributes(w, n, ParagraphAttributeFilter)
|
||||
_ = w.WriteByte('>')
|
||||
} else {
|
||||
_, _ = w.WriteString("<p>")
|
||||
}
|
||||
} else {
|
||||
_, _ = w.WriteString("</p>\n")
|
||||
}
|
||||
return ast.WalkContinue, nil
|
||||
}
|
||||
|
||||
func (r *Renderer) renderTextBlock(w util.BufWriter, source []byte, n ast.Node, entering bool) (ast.WalkStatus, error) {
|
||||
if !entering {
|
||||
if _, ok := n.NextSibling().(ast.Node); ok && n.FirstChild() != nil {
|
||||
_ = w.WriteByte('\n')
|
||||
}
|
||||
}
|
||||
return ast.WalkContinue, nil
|
||||
}
|
||||
|
||||
// ThematicAttributeFilter defines attribute names which hr elements can have.
|
||||
var ThematicAttributeFilter = GlobalAttributeFilter.Extend(
|
||||
[]byte("align"), // [Deprecated]
|
||||
[]byte("color"), // [Not Standardized]
|
||||
[]byte("noshade"), // [Deprecated]
|
||||
[]byte("size"), // [Deprecated]
|
||||
[]byte("width"), // [Deprecated]
|
||||
)
|
||||
|
||||
func (r *Renderer) renderThematicBreak(w util.BufWriter, source []byte, n ast.Node, entering bool) (ast.WalkStatus, error) {
|
||||
if !entering {
|
||||
return ast.WalkContinue, nil
|
||||
}
|
||||
_, _ = w.WriteString("<hr")
|
||||
if n.Attributes() != nil {
|
||||
RenderAttributes(w, n, ThematicAttributeFilter)
|
||||
}
|
||||
if r.XHTML {
|
||||
_, _ = w.WriteString(" />\n")
|
||||
} else {
|
||||
_, _ = w.WriteString(">\n")
|
||||
}
|
||||
return ast.WalkContinue, nil
|
||||
}
|
||||
|
||||
// LinkAttributeFilter defines attribute names which link elements can have.
|
||||
var LinkAttributeFilter = GlobalAttributeFilter.Extend(
|
||||
[]byte("download"),
|
||||
// []byte("href"),
|
||||
[]byte("hreflang"),
|
||||
[]byte("media"),
|
||||
[]byte("ping"),
|
||||
[]byte("referrerpolicy"),
|
||||
[]byte("rel"),
|
||||
[]byte("shape"),
|
||||
[]byte("target"),
|
||||
)
|
||||
|
||||
func (r *Renderer) renderAutoLink(w util.BufWriter, source []byte, node ast.Node, entering bool) (ast.WalkStatus, error) {
|
||||
n := node.(*ast.AutoLink)
|
||||
if !entering {
|
||||
return ast.WalkContinue, nil
|
||||
}
|
||||
_, _ = w.WriteString(`<a href="`)
|
||||
url := n.URL(source)
|
||||
label := n.Label(source)
|
||||
if n.AutoLinkType == ast.AutoLinkEmail && !bytes.HasPrefix(bytes.ToLower(url), []byte("mailto:")) {
|
||||
_, _ = w.WriteString("mailto:")
|
||||
}
|
||||
_, _ = w.Write(util.EscapeHTML(util.URLEscape(url, false)))
|
||||
if n.Attributes() != nil {
|
||||
_ = w.WriteByte('"')
|
||||
RenderAttributes(w, n, LinkAttributeFilter)
|
||||
_ = w.WriteByte('>')
|
||||
} else {
|
||||
_, _ = w.WriteString(`">`)
|
||||
}
|
||||
_, _ = w.Write(util.EscapeHTML(label))
|
||||
_, _ = w.WriteString(`</a>`)
|
||||
return ast.WalkContinue, nil
|
||||
}
|
||||
|
||||
// CodeAttributeFilter defines attribute names which code elements can have.
|
||||
var CodeAttributeFilter = GlobalAttributeFilter
|
||||
|
||||
func (r *Renderer) renderCodeSpan(w util.BufWriter, source []byte, n ast.Node, entering bool) (ast.WalkStatus, error) {
|
||||
if entering {
|
||||
if n.Attributes() != nil {
|
||||
_, _ = w.WriteString("<code")
|
||||
RenderAttributes(w, n, CodeAttributeFilter)
|
||||
_ = w.WriteByte('>')
|
||||
} else {
|
||||
_, _ = w.WriteString("<code>")
|
||||
}
|
||||
for c := n.FirstChild(); c != nil; c = c.NextSibling() {
|
||||
segment := c.(*ast.Text).Segment
|
||||
value := segment.Value(source)
|
||||
if bytes.HasSuffix(value, []byte("\n")) {
|
||||
r.Writer.RawWrite(w, value[:len(value)-1])
|
||||
if c != n.LastChild() {
|
||||
r.Writer.RawWrite(w, []byte(" "))
|
||||
}
|
||||
} else {
|
||||
r.Writer.RawWrite(w, value)
|
||||
}
|
||||
}
|
||||
return ast.WalkSkipChildren, nil
|
||||
}
|
||||
_, _ = w.WriteString("</code>")
|
||||
return ast.WalkContinue, nil
|
||||
}
|
||||
|
||||
// EmphasisAttributeFilter defines attribute names which emphasis elements can have.
|
||||
var EmphasisAttributeFilter = GlobalAttributeFilter
|
||||
|
||||
func (r *Renderer) renderEmphasis(w util.BufWriter, source []byte, node ast.Node, entering bool) (ast.WalkStatus, error) {
|
||||
n := node.(*ast.Emphasis)
|
||||
tag := "em"
|
||||
if n.Level == 2 {
|
||||
tag = "strong"
|
||||
}
|
||||
if entering {
|
||||
_ = w.WriteByte('<')
|
||||
_, _ = w.WriteString(tag)
|
||||
if n.Attributes() != nil {
|
||||
RenderAttributes(w, n, EmphasisAttributeFilter)
|
||||
}
|
||||
_ = w.WriteByte('>')
|
||||
} else {
|
||||
_, _ = w.WriteString("</")
|
||||
_, _ = w.WriteString(tag)
|
||||
_ = w.WriteByte('>')
|
||||
}
|
||||
return ast.WalkContinue, nil
|
||||
}
|
||||
|
||||
func (r *Renderer) renderLink(w util.BufWriter, source []byte, node ast.Node, entering bool) (ast.WalkStatus, error) {
|
||||
n := node.(*ast.Link)
|
||||
if entering {
|
||||
_, _ = w.WriteString("<a href=\"")
|
||||
if r.Unsafe || !IsDangerousURL(n.Destination) {
|
||||
_, _ = w.Write(util.EscapeHTML(util.URLEscape(n.Destination, true)))
|
||||
}
|
||||
_ = w.WriteByte('"')
|
||||
if n.Title != nil {
|
||||
_, _ = w.WriteString(` title="`)
|
||||
r.Writer.Write(w, n.Title)
|
||||
_ = w.WriteByte('"')
|
||||
}
|
||||
if n.Attributes() != nil {
|
||||
RenderAttributes(w, n, LinkAttributeFilter)
|
||||
}
|
||||
_ = w.WriteByte('>')
|
||||
} else {
|
||||
_, _ = w.WriteString("</a>")
|
||||
}
|
||||
return ast.WalkContinue, nil
|
||||
}
|
||||
|
||||
// ImageAttributeFilter defines attribute names which image elements can have.
|
||||
var ImageAttributeFilter = GlobalAttributeFilter.Extend(
|
||||
[]byte("align"),
|
||||
[]byte("border"),
|
||||
[]byte("crossorigin"),
|
||||
[]byte("decoding"),
|
||||
[]byte("height"),
|
||||
[]byte("importance"),
|
||||
[]byte("intrinsicsize"),
|
||||
[]byte("ismap"),
|
||||
[]byte("loading"),
|
||||
[]byte("referrerpolicy"),
|
||||
[]byte("sizes"),
|
||||
[]byte("srcset"),
|
||||
[]byte("usemap"),
|
||||
[]byte("width"),
|
||||
)
|
||||
|
||||
func (r *Renderer) renderImage(w util.BufWriter, source []byte, node ast.Node, entering bool) (ast.WalkStatus, error) {
|
||||
if !entering {
|
||||
return ast.WalkContinue, nil
|
||||
}
|
||||
n := node.(*ast.Image)
|
||||
_, _ = w.WriteString("<img src=\"")
|
||||
if r.Unsafe || !IsDangerousURL(n.Destination) {
|
||||
_, _ = w.Write(util.EscapeHTML(util.URLEscape(n.Destination, true)))
|
||||
}
|
||||
_, _ = w.WriteString(`" alt="`)
|
||||
_, _ = w.Write(util.EscapeHTML(n.Text(source)))
|
||||
_ = w.WriteByte('"')
|
||||
if n.Title != nil {
|
||||
_, _ = w.WriteString(` title="`)
|
||||
r.Writer.Write(w, n.Title)
|
||||
_ = w.WriteByte('"')
|
||||
}
|
||||
if n.Attributes() != nil {
|
||||
RenderAttributes(w, n, ImageAttributeFilter)
|
||||
}
|
||||
if r.XHTML {
|
||||
_, _ = w.WriteString(" />")
|
||||
} else {
|
||||
_, _ = w.WriteString(">")
|
||||
}
|
||||
return ast.WalkSkipChildren, nil
|
||||
}
|
||||
|
||||
func (r *Renderer) renderRawHTML(w util.BufWriter, source []byte, node ast.Node, entering bool) (ast.WalkStatus, error) {
|
||||
if !entering {
|
||||
return ast.WalkSkipChildren, nil
|
||||
}
|
||||
if r.Unsafe {
|
||||
n := node.(*ast.RawHTML)
|
||||
l := n.Segments.Len()
|
||||
for i := 0; i < l; i++ {
|
||||
segment := n.Segments.At(i)
|
||||
_, _ = w.Write(segment.Value(source))
|
||||
}
|
||||
return ast.WalkSkipChildren, nil
|
||||
}
|
||||
_, _ = w.WriteString("<!-- raw HTML omitted -->")
|
||||
return ast.WalkSkipChildren, nil
|
||||
}
|
||||
|
||||
func (r *Renderer) renderText(w util.BufWriter, source []byte, node ast.Node, entering bool) (ast.WalkStatus, error) {
|
||||
if !entering {
|
||||
return ast.WalkContinue, nil
|
||||
}
|
||||
n := node.(*ast.Text)
|
||||
segment := n.Segment
|
||||
if n.IsRaw() {
|
||||
r.Writer.RawWrite(w, segment.Value(source))
|
||||
} else {
|
||||
r.Writer.Write(w, segment.Value(source))
|
||||
if n.HardLineBreak() || (n.SoftLineBreak() && r.HardWraps) {
|
||||
if r.XHTML {
|
||||
_, _ = w.WriteString("<br />\n")
|
||||
} else {
|
||||
_, _ = w.WriteString("<br>\n")
|
||||
}
|
||||
} else if n.SoftLineBreak() {
|
||||
_ = w.WriteByte('\n')
|
||||
}
|
||||
}
|
||||
return ast.WalkContinue, nil
|
||||
}
|
||||
|
||||
func (r *Renderer) renderString(w util.BufWriter, source []byte, node ast.Node, entering bool) (ast.WalkStatus, error) {
|
||||
if !entering {
|
||||
return ast.WalkContinue, nil
|
||||
}
|
||||
n := node.(*ast.String)
|
||||
if n.IsCode() {
|
||||
_, _ = w.Write(n.Value)
|
||||
} else {
|
||||
if n.IsRaw() {
|
||||
r.Writer.RawWrite(w, n.Value)
|
||||
} else {
|
||||
r.Writer.Write(w, n.Value)
|
||||
}
|
||||
}
|
||||
return ast.WalkContinue, nil
|
||||
}
|
||||
|
||||
var dataPrefix = []byte("data-")
|
||||
|
||||
// RenderAttributes renders given node's attributes.
|
||||
// You can specify attribute names to render by the filter.
|
||||
// If filter is nil, RenderAttributes renders all attributes.
|
||||
func RenderAttributes(w util.BufWriter, node ast.Node, filter util.BytesFilter) {
|
||||
for _, attr := range node.Attributes() {
|
||||
if filter != nil && !filter.Contains(attr.Name) {
|
||||
if !bytes.HasPrefix(attr.Name, dataPrefix) {
|
||||
continue
|
||||
}
|
||||
}
|
||||
_, _ = w.WriteString(" ")
|
||||
_, _ = w.Write(attr.Name)
|
||||
_, _ = w.WriteString(`="`)
|
||||
// TODO: convert numeric values to strings
|
||||
_, _ = w.Write(util.EscapeHTML(attr.Value.([]byte)))
|
||||
_ = w.WriteByte('"')
|
||||
}
|
||||
}
|
||||
|
||||
// A Writer interface writes textual contents to a writer.
|
||||
type Writer interface {
|
||||
// Write writes the given source to writer with resolving references and unescaping
|
||||
// backslash escaped characters.
|
||||
Write(writer util.BufWriter, source []byte)
|
||||
|
||||
// RawWrite writes the given source to writer without resolving references and
|
||||
// unescaping backslash escaped characters.
|
||||
RawWrite(writer util.BufWriter, source []byte)
|
||||
}
|
||||
|
||||
type defaultWriter struct {
|
||||
}
|
||||
|
||||
func escapeRune(writer util.BufWriter, r rune) {
|
||||
if r < 256 {
|
||||
v := util.EscapeHTMLByte(byte(r))
|
||||
if v != nil {
|
||||
_, _ = writer.Write(v)
|
||||
return
|
||||
}
|
||||
}
|
||||
_, _ = writer.WriteRune(util.ToValidRune(r))
|
||||
}
|
||||
|
||||
func (d *defaultWriter) RawWrite(writer util.BufWriter, source []byte) {
|
||||
n := 0
|
||||
l := len(source)
|
||||
for i := 0; i < l; i++ {
|
||||
v := util.EscapeHTMLByte(source[i])
|
||||
if v != nil {
|
||||
_, _ = writer.Write(source[i-n : i])
|
||||
n = 0
|
||||
_, _ = writer.Write(v)
|
||||
continue
|
||||
}
|
||||
n++
|
||||
}
|
||||
if n != 0 {
|
||||
_, _ = writer.Write(source[l-n:])
|
||||
}
|
||||
}
|
||||
|
||||
func (d *defaultWriter) Write(writer util.BufWriter, source []byte) {
|
||||
escaped := false
|
||||
var ok bool
|
||||
limit := len(source)
|
||||
n := 0
|
||||
for i := 0; i < limit; i++ {
|
||||
c := source[i]
|
||||
if escaped {
|
||||
if util.IsPunct(c) {
|
||||
d.RawWrite(writer, source[n:i-1])
|
||||
n = i
|
||||
escaped = false
|
||||
continue
|
||||
}
|
||||
}
|
||||
if c == '&' {
|
||||
pos := i
|
||||
next := i + 1
|
||||
if next < limit && source[next] == '#' {
|
||||
nnext := next + 1
|
||||
if nnext < limit {
|
||||
nc := source[nnext]
|
||||
// code point like #x22;
|
||||
if nnext < limit && nc == 'x' || nc == 'X' {
|
||||
start := nnext + 1
|
||||
i, ok = util.ReadWhile(source, [2]int{start, limit}, util.IsHexDecimal)
|
||||
if ok && i < limit && source[i] == ';' {
|
||||
v, _ := strconv.ParseUint(util.BytesToReadOnlyString(source[start:i]), 16, 32)
|
||||
d.RawWrite(writer, source[n:pos])
|
||||
n = i + 1
|
||||
escapeRune(writer, rune(v))
|
||||
continue
|
||||
}
|
||||
// code point like #1234;
|
||||
} else if nc >= '0' && nc <= '9' {
|
||||
start := nnext
|
||||
i, ok = util.ReadWhile(source, [2]int{start, limit}, util.IsNumeric)
|
||||
if ok && i < limit && i-start < 8 && source[i] == ';' {
|
||||
v, _ := strconv.ParseUint(util.BytesToReadOnlyString(source[start:i]), 0, 32)
|
||||
d.RawWrite(writer, source[n:pos])
|
||||
n = i + 1
|
||||
escapeRune(writer, rune(v))
|
||||
continue
|
||||
}
|
||||
}
|
||||
}
|
||||
} else {
|
||||
start := next
|
||||
i, ok = util.ReadWhile(source, [2]int{start, limit}, util.IsAlphaNumeric)
|
||||
// entity reference
|
||||
if ok && i < limit && source[i] == ';' {
|
||||
name := util.BytesToReadOnlyString(source[start:i])
|
||||
entity, ok := util.LookUpHTML5EntityByName(name)
|
||||
if ok {
|
||||
d.RawWrite(writer, source[n:pos])
|
||||
n = i + 1
|
||||
d.RawWrite(writer, entity.Characters)
|
||||
continue
|
||||
}
|
||||
}
|
||||
}
|
||||
i = next - 1
|
||||
}
|
||||
if c == '\\' {
|
||||
escaped = true
|
||||
continue
|
||||
}
|
||||
escaped = false
|
||||
}
|
||||
d.RawWrite(writer, source[n:])
|
||||
}
|
||||
|
||||
// DefaultWriter is a default implementation of the Writer.
|
||||
var DefaultWriter = &defaultWriter{}
|
||||
|
||||
var bDataImage = []byte("data:image/")
|
||||
var bPng = []byte("png;")
|
||||
var bGif = []byte("gif;")
|
||||
var bJpeg = []byte("jpeg;")
|
||||
var bWebp = []byte("webp;")
|
||||
var bJs = []byte("javascript:")
|
||||
var bVb = []byte("vbscript:")
|
||||
var bFile = []byte("file:")
|
||||
var bData = []byte("data:")
|
||||
|
||||
// IsDangerousURL returns true if the given url seems a potentially dangerous url,
|
||||
// otherwise false.
|
||||
func IsDangerousURL(url []byte) bool {
|
||||
if bytes.HasPrefix(url, bDataImage) && len(url) >= 11 {
|
||||
v := url[11:]
|
||||
if bytes.HasPrefix(v, bPng) || bytes.HasPrefix(v, bGif) ||
|
||||
bytes.HasPrefix(v, bJpeg) || bytes.HasPrefix(v, bWebp) {
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
return bytes.HasPrefix(url, bJs) || bytes.HasPrefix(url, bVb) ||
|
||||
bytes.HasPrefix(url, bFile) || bytes.HasPrefix(url, bData)
|
||||
}
|
174
vendor/github.com/yuin/goldmark/renderer/renderer.go
generated
vendored
Normal file
174
vendor/github.com/yuin/goldmark/renderer/renderer.go
generated
vendored
Normal file
@ -0,0 +1,174 @@
|
||||
// Package renderer renders the given AST to certain formats.
|
||||
package renderer
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"io"
|
||||
"sync"
|
||||
|
||||
"github.com/yuin/goldmark/ast"
|
||||
"github.com/yuin/goldmark/util"
|
||||
)
|
||||
|
||||
// A Config struct is a data structure that holds configuration of the Renderer.
|
||||
type Config struct {
|
||||
Options map[OptionName]interface{}
|
||||
NodeRenderers util.PrioritizedSlice
|
||||
}
|
||||
|
||||
// NewConfig returns a new Config
|
||||
func NewConfig() *Config {
|
||||
return &Config{
|
||||
Options: map[OptionName]interface{}{},
|
||||
NodeRenderers: util.PrioritizedSlice{},
|
||||
}
|
||||
}
|
||||
|
||||
// An OptionName is a name of the option.
|
||||
type OptionName string
|
||||
|
||||
// An Option interface is a functional option type for the Renderer.
|
||||
type Option interface {
|
||||
SetConfig(*Config)
|
||||
}
|
||||
|
||||
type withNodeRenderers struct {
|
||||
value []util.PrioritizedValue
|
||||
}
|
||||
|
||||
func (o *withNodeRenderers) SetConfig(c *Config) {
|
||||
c.NodeRenderers = append(c.NodeRenderers, o.value...)
|
||||
}
|
||||
|
||||
// WithNodeRenderers is a functional option that allow you to add
|
||||
// NodeRenderers to the renderer.
|
||||
func WithNodeRenderers(ps ...util.PrioritizedValue) Option {
|
||||
return &withNodeRenderers{ps}
|
||||
}
|
||||
|
||||
type withOption struct {
|
||||
name OptionName
|
||||
value interface{}
|
||||
}
|
||||
|
||||
func (o *withOption) SetConfig(c *Config) {
|
||||
c.Options[o.name] = o.value
|
||||
}
|
||||
|
||||
// WithOption is a functional option that allow you to set
|
||||
// an arbitrary option to the parser.
|
||||
func WithOption(name OptionName, value interface{}) Option {
|
||||
return &withOption{name, value}
|
||||
}
|
||||
|
||||
// A SetOptioner interface sets given option to the object.
|
||||
type SetOptioner interface {
|
||||
// SetOption sets given option to the object.
|
||||
// Unacceptable options may be passed.
|
||||
// Thus implementations must ignore unacceptable options.
|
||||
SetOption(name OptionName, value interface{})
|
||||
}
|
||||
|
||||
// NodeRendererFunc is a function that renders a given node.
|
||||
type NodeRendererFunc func(writer util.BufWriter, source []byte, n ast.Node, entering bool) (ast.WalkStatus, error)
|
||||
|
||||
// A NodeRenderer interface offers NodeRendererFuncs.
|
||||
type NodeRenderer interface {
|
||||
// RendererFuncs registers NodeRendererFuncs to given NodeRendererFuncRegisterer.
|
||||
RegisterFuncs(NodeRendererFuncRegisterer)
|
||||
}
|
||||
|
||||
// A NodeRendererFuncRegisterer registers
|
||||
type NodeRendererFuncRegisterer interface {
|
||||
// Register registers given NodeRendererFunc to this object.
|
||||
Register(ast.NodeKind, NodeRendererFunc)
|
||||
}
|
||||
|
||||
// A Renderer interface renders given AST node to given
|
||||
// writer with given Renderer.
|
||||
type Renderer interface {
|
||||
Render(w io.Writer, source []byte, n ast.Node) error
|
||||
|
||||
// AddOptions adds given option to this renderer.
|
||||
AddOptions(...Option)
|
||||
}
|
||||
|
||||
type renderer struct {
|
||||
config *Config
|
||||
options map[OptionName]interface{}
|
||||
nodeRendererFuncsTmp map[ast.NodeKind]NodeRendererFunc
|
||||
maxKind int
|
||||
nodeRendererFuncs []NodeRendererFunc
|
||||
initSync sync.Once
|
||||
}
|
||||
|
||||
// NewRenderer returns a new Renderer with given options.
|
||||
func NewRenderer(options ...Option) Renderer {
|
||||
config := NewConfig()
|
||||
for _, opt := range options {
|
||||
opt.SetConfig(config)
|
||||
}
|
||||
|
||||
r := &renderer{
|
||||
options: map[OptionName]interface{}{},
|
||||
config: config,
|
||||
nodeRendererFuncsTmp: map[ast.NodeKind]NodeRendererFunc{},
|
||||
}
|
||||
|
||||
return r
|
||||
}
|
||||
|
||||
func (r *renderer) AddOptions(opts ...Option) {
|
||||
for _, opt := range opts {
|
||||
opt.SetConfig(r.config)
|
||||
}
|
||||
}
|
||||
|
||||
func (r *renderer) Register(kind ast.NodeKind, v NodeRendererFunc) {
|
||||
r.nodeRendererFuncsTmp[kind] = v
|
||||
if int(kind) > r.maxKind {
|
||||
r.maxKind = int(kind)
|
||||
}
|
||||
}
|
||||
|
||||
// Render renders the given AST node to the given writer with the given Renderer.
|
||||
func (r *renderer) Render(w io.Writer, source []byte, n ast.Node) error {
|
||||
r.initSync.Do(func() {
|
||||
r.options = r.config.Options
|
||||
r.config.NodeRenderers.Sort()
|
||||
l := len(r.config.NodeRenderers)
|
||||
for i := l - 1; i >= 0; i-- {
|
||||
v := r.config.NodeRenderers[i]
|
||||
nr, _ := v.Value.(NodeRenderer)
|
||||
if se, ok := v.Value.(SetOptioner); ok {
|
||||
for oname, ovalue := range r.options {
|
||||
se.SetOption(oname, ovalue)
|
||||
}
|
||||
}
|
||||
nr.RegisterFuncs(r)
|
||||
}
|
||||
r.nodeRendererFuncs = make([]NodeRendererFunc, r.maxKind+1)
|
||||
for kind, nr := range r.nodeRendererFuncsTmp {
|
||||
r.nodeRendererFuncs[kind] = nr
|
||||
}
|
||||
r.config = nil
|
||||
r.nodeRendererFuncsTmp = nil
|
||||
})
|
||||
writer, ok := w.(util.BufWriter)
|
||||
if !ok {
|
||||
writer = bufio.NewWriter(w)
|
||||
}
|
||||
err := ast.Walk(n, func(n ast.Node, entering bool) (ast.WalkStatus, error) {
|
||||
s := ast.WalkStatus(ast.WalkContinue)
|
||||
var err error
|
||||
f := r.nodeRendererFuncs[n.Kind()]
|
||||
if f != nil {
|
||||
s, err = f(writer, source, n, entering)
|
||||
}
|
||||
return s, err
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return writer.Flush()
|
||||
}
|
543
vendor/github.com/yuin/goldmark/text/reader.go
generated
vendored
Normal file
543
vendor/github.com/yuin/goldmark/text/reader.go
generated
vendored
Normal file
@ -0,0 +1,543 @@
|
||||
package text
|
||||
|
||||
import (
|
||||
"io"
|
||||
"regexp"
|
||||
"unicode/utf8"
|
||||
|
||||
"github.com/yuin/goldmark/util"
|
||||
)
|
||||
|
||||
const invalidValue = -1
|
||||
|
||||
// EOF indicates the end of file.
|
||||
const EOF = byte(0xff)
|
||||
|
||||
// A Reader interface provides abstracted method for reading text.
|
||||
type Reader interface {
|
||||
io.RuneReader
|
||||
|
||||
// Source returns a source of the reader.
|
||||
Source() []byte
|
||||
|
||||
// ResetPosition resets positions.
|
||||
ResetPosition()
|
||||
|
||||
// Peek returns a byte at current position without advancing the internal pointer.
|
||||
Peek() byte
|
||||
|
||||
// PeekLine returns the current line without advancing the internal pointer.
|
||||
PeekLine() ([]byte, Segment)
|
||||
|
||||
// PrecendingCharacter returns a character just before current internal pointer.
|
||||
PrecendingCharacter() rune
|
||||
|
||||
// Value returns a value of the given segment.
|
||||
Value(Segment) []byte
|
||||
|
||||
// LineOffset returns a distance from the line head to current position.
|
||||
LineOffset() int
|
||||
|
||||
// Position returns current line number and position.
|
||||
Position() (int, Segment)
|
||||
|
||||
// SetPosition sets current line number and position.
|
||||
SetPosition(int, Segment)
|
||||
|
||||
// SetPadding sets padding to the reader.
|
||||
SetPadding(int)
|
||||
|
||||
// Advance advances the internal pointer.
|
||||
Advance(int)
|
||||
|
||||
// AdvanceAndSetPadding advances the internal pointer and add padding to the
|
||||
// reader.
|
||||
AdvanceAndSetPadding(int, int)
|
||||
|
||||
// AdvanceLine advances the internal pointer to the next line head.
|
||||
AdvanceLine()
|
||||
|
||||
// SkipSpaces skips space characters and returns a non-blank line.
|
||||
// If it reaches EOF, returns false.
|
||||
SkipSpaces() (Segment, int, bool)
|
||||
|
||||
// SkipSpaces skips blank lines and returns a non-blank line.
|
||||
// If it reaches EOF, returns false.
|
||||
SkipBlankLines() (Segment, int, bool)
|
||||
|
||||
// Match performs regular expression matching to current line.
|
||||
Match(reg *regexp.Regexp) bool
|
||||
|
||||
// Match performs regular expression searching to current line.
|
||||
FindSubMatch(reg *regexp.Regexp) [][]byte
|
||||
}
|
||||
|
||||
type reader struct {
|
||||
source []byte
|
||||
sourceLength int
|
||||
line int
|
||||
peekedLine []byte
|
||||
pos Segment
|
||||
head int
|
||||
lineOffset int
|
||||
}
|
||||
|
||||
// NewReader return a new Reader that can read UTF-8 bytes .
|
||||
func NewReader(source []byte) Reader {
|
||||
r := &reader{
|
||||
source: source,
|
||||
sourceLength: len(source),
|
||||
}
|
||||
r.ResetPosition()
|
||||
return r
|
||||
}
|
||||
|
||||
func (r *reader) ResetPosition() {
|
||||
r.line = -1
|
||||
r.head = 0
|
||||
r.lineOffset = -1
|
||||
r.AdvanceLine()
|
||||
}
|
||||
|
||||
func (r *reader) Source() []byte {
|
||||
return r.source
|
||||
}
|
||||
|
||||
func (r *reader) Value(seg Segment) []byte {
|
||||
return seg.Value(r.source)
|
||||
}
|
||||
|
||||
func (r *reader) Peek() byte {
|
||||
if r.pos.Start >= 0 && r.pos.Start < r.sourceLength {
|
||||
if r.pos.Padding != 0 {
|
||||
return space[0]
|
||||
}
|
||||
return r.source[r.pos.Start]
|
||||
}
|
||||
return EOF
|
||||
}
|
||||
|
||||
func (r *reader) PeekLine() ([]byte, Segment) {
|
||||
if r.pos.Start >= 0 && r.pos.Start < r.sourceLength {
|
||||
if r.peekedLine == nil {
|
||||
r.peekedLine = r.pos.Value(r.Source())
|
||||
}
|
||||
return r.peekedLine, r.pos
|
||||
}
|
||||
return nil, r.pos
|
||||
}
|
||||
|
||||
// io.RuneReader interface
|
||||
func (r *reader) ReadRune() (rune, int, error) {
|
||||
return readRuneReader(r)
|
||||
}
|
||||
|
||||
func (r *reader) LineOffset() int {
|
||||
if r.lineOffset < 0 {
|
||||
v := 0
|
||||
for i := r.head; i < r.pos.Start; i++ {
|
||||
if r.source[i] == '\t' {
|
||||
v += util.TabWidth(v)
|
||||
} else {
|
||||
v++
|
||||
}
|
||||
}
|
||||
r.lineOffset = v - r.pos.Padding
|
||||
}
|
||||
return r.lineOffset
|
||||
}
|
||||
|
||||
func (r *reader) PrecendingCharacter() rune {
|
||||
if r.pos.Start <= 0 {
|
||||
if r.pos.Padding != 0 {
|
||||
return rune(' ')
|
||||
}
|
||||
return rune('\n')
|
||||
}
|
||||
i := r.pos.Start - 1
|
||||
for ; i >= 0; i-- {
|
||||
if utf8.RuneStart(r.source[i]) {
|
||||
break
|
||||
}
|
||||
}
|
||||
rn, _ := utf8.DecodeRune(r.source[i:])
|
||||
return rn
|
||||
}
|
||||
|
||||
func (r *reader) Advance(n int) {
|
||||
r.lineOffset = -1
|
||||
if n < len(r.peekedLine) && r.pos.Padding == 0 {
|
||||
r.pos.Start += n
|
||||
r.peekedLine = nil
|
||||
return
|
||||
}
|
||||
r.peekedLine = nil
|
||||
l := r.sourceLength
|
||||
for ; n > 0 && r.pos.Start < l; n-- {
|
||||
if r.pos.Padding != 0 {
|
||||
r.pos.Padding--
|
||||
continue
|
||||
}
|
||||
if r.source[r.pos.Start] == '\n' {
|
||||
r.AdvanceLine()
|
||||
continue
|
||||
}
|
||||
r.pos.Start++
|
||||
}
|
||||
}
|
||||
|
||||
func (r *reader) AdvanceAndSetPadding(n, padding int) {
|
||||
r.Advance(n)
|
||||
if padding > r.pos.Padding {
|
||||
r.SetPadding(padding)
|
||||
}
|
||||
}
|
||||
|
||||
func (r *reader) AdvanceLine() {
|
||||
r.lineOffset = -1
|
||||
r.peekedLine = nil
|
||||
r.pos.Start = r.pos.Stop
|
||||
r.head = r.pos.Start
|
||||
if r.pos.Start < 0 {
|
||||
return
|
||||
}
|
||||
r.pos.Stop = r.sourceLength
|
||||
for i := r.pos.Start; i < r.sourceLength; i++ {
|
||||
c := r.source[i]
|
||||
if c == '\n' {
|
||||
r.pos.Stop = i + 1
|
||||
break
|
||||
}
|
||||
}
|
||||
r.line++
|
||||
r.pos.Padding = 0
|
||||
}
|
||||
|
||||
func (r *reader) Position() (int, Segment) {
|
||||
return r.line, r.pos
|
||||
}
|
||||
|
||||
func (r *reader) SetPosition(line int, pos Segment) {
|
||||
r.lineOffset = -1
|
||||
r.line = line
|
||||
r.pos = pos
|
||||
}
|
||||
|
||||
func (r *reader) SetPadding(v int) {
|
||||
r.pos.Padding = v
|
||||
}
|
||||
|
||||
func (r *reader) SkipSpaces() (Segment, int, bool) {
|
||||
return skipSpacesReader(r)
|
||||
}
|
||||
|
||||
func (r *reader) SkipBlankLines() (Segment, int, bool) {
|
||||
return skipBlankLinesReader(r)
|
||||
}
|
||||
|
||||
func (r *reader) Match(reg *regexp.Regexp) bool {
|
||||
return matchReader(r, reg)
|
||||
}
|
||||
|
||||
func (r *reader) FindSubMatch(reg *regexp.Regexp) [][]byte {
|
||||
return findSubMatchReader(r, reg)
|
||||
}
|
||||
|
||||
// A BlockReader interface is a reader that is optimized for Blocks.
|
||||
type BlockReader interface {
|
||||
Reader
|
||||
// Reset resets current state and sets new segments to the reader.
|
||||
Reset(segment *Segments)
|
||||
}
|
||||
|
||||
type blockReader struct {
|
||||
source []byte
|
||||
segments *Segments
|
||||
segmentsLength int
|
||||
line int
|
||||
pos Segment
|
||||
head int
|
||||
last int
|
||||
lineOffset int
|
||||
}
|
||||
|
||||
// NewBlockReader returns a new BlockReader.
|
||||
func NewBlockReader(source []byte, segments *Segments) BlockReader {
|
||||
r := &blockReader{
|
||||
source: source,
|
||||
}
|
||||
if segments != nil {
|
||||
r.Reset(segments)
|
||||
}
|
||||
return r
|
||||
}
|
||||
|
||||
func (r *blockReader) ResetPosition() {
|
||||
r.line = -1
|
||||
r.head = 0
|
||||
r.last = 0
|
||||
r.lineOffset = -1
|
||||
r.pos.Start = -1
|
||||
r.pos.Stop = -1
|
||||
r.pos.Padding = 0
|
||||
if r.segmentsLength > 0 {
|
||||
last := r.segments.At(r.segmentsLength - 1)
|
||||
r.last = last.Stop
|
||||
}
|
||||
r.AdvanceLine()
|
||||
}
|
||||
|
||||
func (r *blockReader) Reset(segments *Segments) {
|
||||
r.segments = segments
|
||||
r.segmentsLength = segments.Len()
|
||||
r.ResetPosition()
|
||||
}
|
||||
|
||||
func (r *blockReader) Source() []byte {
|
||||
return r.source
|
||||
}
|
||||
|
||||
func (r *blockReader) Value(seg Segment) []byte {
|
||||
line := r.segmentsLength - 1
|
||||
ret := make([]byte, 0, seg.Stop-seg.Start+1)
|
||||
for ; line >= 0; line-- {
|
||||
if seg.Start >= r.segments.At(line).Start {
|
||||
break
|
||||
}
|
||||
}
|
||||
i := seg.Start
|
||||
for ; line < r.segmentsLength; line++ {
|
||||
s := r.segments.At(line)
|
||||
if i < 0 {
|
||||
i = s.Start
|
||||
}
|
||||
ret = s.ConcatPadding(ret)
|
||||
for ; i < seg.Stop && i < s.Stop; i++ {
|
||||
ret = append(ret, r.source[i])
|
||||
}
|
||||
i = -1
|
||||
if s.Stop > seg.Stop {
|
||||
break
|
||||
}
|
||||
}
|
||||
return ret
|
||||
}
|
||||
|
||||
// io.RuneReader interface
|
||||
func (r *blockReader) ReadRune() (rune, int, error) {
|
||||
return readRuneReader(r)
|
||||
}
|
||||
|
||||
func (r *blockReader) PrecendingCharacter() rune {
|
||||
if r.pos.Padding != 0 {
|
||||
return rune(' ')
|
||||
}
|
||||
if r.segments.Len() < 1 {
|
||||
return rune('\n')
|
||||
}
|
||||
firstSegment := r.segments.At(0)
|
||||
if r.line == 0 && r.pos.Start <= firstSegment.Start {
|
||||
return rune('\n')
|
||||
}
|
||||
l := len(r.source)
|
||||
i := r.pos.Start - 1
|
||||
for ; i < l && i >= 0; i-- {
|
||||
if utf8.RuneStart(r.source[i]) {
|
||||
break
|
||||
}
|
||||
}
|
||||
if i < 0 || i >= l {
|
||||
return rune('\n')
|
||||
}
|
||||
rn, _ := utf8.DecodeRune(r.source[i:])
|
||||
return rn
|
||||
}
|
||||
|
||||
func (r *blockReader) LineOffset() int {
|
||||
if r.lineOffset < 0 {
|
||||
v := 0
|
||||
for i := r.head; i < r.pos.Start; i++ {
|
||||
if r.source[i] == '\t' {
|
||||
v += util.TabWidth(v)
|
||||
} else {
|
||||
v++
|
||||
}
|
||||
}
|
||||
r.lineOffset = v - r.pos.Padding
|
||||
}
|
||||
return r.lineOffset
|
||||
}
|
||||
|
||||
func (r *blockReader) Peek() byte {
|
||||
if r.line < r.segmentsLength && r.pos.Start >= 0 && r.pos.Start < r.last {
|
||||
if r.pos.Padding != 0 {
|
||||
return space[0]
|
||||
}
|
||||
return r.source[r.pos.Start]
|
||||
}
|
||||
return EOF
|
||||
}
|
||||
|
||||
func (r *blockReader) PeekLine() ([]byte, Segment) {
|
||||
if r.line < r.segmentsLength && r.pos.Start >= 0 && r.pos.Start < r.last {
|
||||
return r.pos.Value(r.source), r.pos
|
||||
}
|
||||
return nil, r.pos
|
||||
}
|
||||
|
||||
func (r *blockReader) Advance(n int) {
|
||||
r.lineOffset = -1
|
||||
|
||||
if n < r.pos.Stop-r.pos.Start && r.pos.Padding == 0 {
|
||||
r.pos.Start += n
|
||||
return
|
||||
}
|
||||
|
||||
for ; n > 0; n-- {
|
||||
if r.pos.Padding != 0 {
|
||||
r.pos.Padding--
|
||||
continue
|
||||
}
|
||||
if r.pos.Start >= r.pos.Stop-1 && r.pos.Stop < r.last {
|
||||
r.AdvanceLine()
|
||||
continue
|
||||
}
|
||||
r.pos.Start++
|
||||
}
|
||||
}
|
||||
|
||||
func (r *blockReader) AdvanceAndSetPadding(n, padding int) {
|
||||
r.Advance(n)
|
||||
if padding > r.pos.Padding {
|
||||
r.SetPadding(padding)
|
||||
}
|
||||
}
|
||||
|
||||
func (r *blockReader) AdvanceLine() {
|
||||
r.SetPosition(r.line+1, NewSegment(invalidValue, invalidValue))
|
||||
r.head = r.pos.Start
|
||||
}
|
||||
|
||||
func (r *blockReader) Position() (int, Segment) {
|
||||
return r.line, r.pos
|
||||
}
|
||||
|
||||
func (r *blockReader) SetPosition(line int, pos Segment) {
|
||||
r.lineOffset = -1
|
||||
r.line = line
|
||||
if pos.Start == invalidValue {
|
||||
if r.line < r.segmentsLength {
|
||||
s := r.segments.At(line)
|
||||
r.head = s.Start
|
||||
r.pos = s
|
||||
}
|
||||
} else {
|
||||
r.pos = pos
|
||||
if r.line < r.segmentsLength {
|
||||
s := r.segments.At(line)
|
||||
r.head = s.Start
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (r *blockReader) SetPadding(v int) {
|
||||
r.lineOffset = -1
|
||||
r.pos.Padding = v
|
||||
}
|
||||
|
||||
func (r *blockReader) SkipSpaces() (Segment, int, bool) {
|
||||
return skipSpacesReader(r)
|
||||
}
|
||||
|
||||
func (r *blockReader) SkipBlankLines() (Segment, int, bool) {
|
||||
return skipBlankLinesReader(r)
|
||||
}
|
||||
|
||||
func (r *blockReader) Match(reg *regexp.Regexp) bool {
|
||||
return matchReader(r, reg)
|
||||
}
|
||||
|
||||
func (r *blockReader) FindSubMatch(reg *regexp.Regexp) [][]byte {
|
||||
return findSubMatchReader(r, reg)
|
||||
}
|
||||
|
||||
func skipBlankLinesReader(r Reader) (Segment, int, bool) {
|
||||
lines := 0
|
||||
for {
|
||||
line, seg := r.PeekLine()
|
||||
if line == nil {
|
||||
return seg, lines, false
|
||||
}
|
||||
if util.IsBlank(line) {
|
||||
lines++
|
||||
r.AdvanceLine()
|
||||
} else {
|
||||
return seg, lines, true
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func skipSpacesReader(r Reader) (Segment, int, bool) {
|
||||
chars := 0
|
||||
for {
|
||||
line, segment := r.PeekLine()
|
||||
if line == nil {
|
||||
return segment, chars, false
|
||||
}
|
||||
for i, c := range line {
|
||||
if util.IsSpace(c) {
|
||||
chars++
|
||||
r.Advance(1)
|
||||
continue
|
||||
}
|
||||
return segment.WithStart(segment.Start + i + 1), chars, true
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func matchReader(r Reader, reg *regexp.Regexp) bool {
|
||||
oldline, oldseg := r.Position()
|
||||
match := reg.FindReaderSubmatchIndex(r)
|
||||
r.SetPosition(oldline, oldseg)
|
||||
if match == nil {
|
||||
return false
|
||||
}
|
||||
r.Advance(match[1] - match[0])
|
||||
return true
|
||||
}
|
||||
|
||||
func findSubMatchReader(r Reader, reg *regexp.Regexp) [][]byte {
|
||||
oldline, oldseg := r.Position()
|
||||
match := reg.FindReaderSubmatchIndex(r)
|
||||
r.SetPosition(oldline, oldseg)
|
||||
if match == nil {
|
||||
return nil
|
||||
}
|
||||
runes := make([]rune, 0, match[1]-match[0])
|
||||
for i := 0; i < match[1]; {
|
||||
r, size, _ := readRuneReader(r)
|
||||
i += size
|
||||
runes = append(runes, r)
|
||||
}
|
||||
result := [][]byte{}
|
||||
for i := 0; i < len(match); i += 2 {
|
||||
result = append(result, []byte(string(runes[match[i]:match[i+1]])))
|
||||
}
|
||||
|
||||
r.SetPosition(oldline, oldseg)
|
||||
r.Advance(match[1] - match[0])
|
||||
return result
|
||||
}
|
||||
|
||||
func readRuneReader(r Reader) (rune, int, error) {
|
||||
line, _ := r.PeekLine()
|
||||
if line == nil {
|
||||
return 0, 0, io.EOF
|
||||
}
|
||||
rn, size := utf8.DecodeRune(line)
|
||||
if rn == utf8.RuneError {
|
||||
return 0, 0, io.EOF
|
||||
}
|
||||
r.Advance(size)
|
||||
return rn, size, nil
|
||||
}
|
209
vendor/github.com/yuin/goldmark/text/segment.go
generated
vendored
Normal file
209
vendor/github.com/yuin/goldmark/text/segment.go
generated
vendored
Normal file
@ -0,0 +1,209 @@
|
||||
package text
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"github.com/yuin/goldmark/util"
|
||||
)
|
||||
|
||||
var space = []byte(" ")
|
||||
|
||||
// A Segment struct holds information about source positions.
|
||||
type Segment struct {
|
||||
// Start is a start position of the segment.
|
||||
Start int
|
||||
|
||||
// Stop is a stop position of the segment.
|
||||
// This value should be excluded.
|
||||
Stop int
|
||||
|
||||
// Padding is a padding length of the segment.
|
||||
Padding int
|
||||
}
|
||||
|
||||
// NewSegment return a new Segment.
|
||||
func NewSegment(start, stop int) Segment {
|
||||
return Segment{
|
||||
Start: start,
|
||||
Stop: stop,
|
||||
Padding: 0,
|
||||
}
|
||||
}
|
||||
|
||||
// NewSegmentPadding returns a new Segment with the given padding.
|
||||
func NewSegmentPadding(start, stop, n int) Segment {
|
||||
return Segment{
|
||||
Start: start,
|
||||
Stop: stop,
|
||||
Padding: n,
|
||||
}
|
||||
}
|
||||
|
||||
// Value returns a value of the segment.
|
||||
func (t *Segment) Value(buffer []byte) []byte {
|
||||
if t.Padding == 0 {
|
||||
return buffer[t.Start:t.Stop]
|
||||
}
|
||||
result := make([]byte, 0, t.Padding+t.Stop-t.Start+1)
|
||||
result = append(result, bytes.Repeat(space, t.Padding)...)
|
||||
return append(result, buffer[t.Start:t.Stop]...)
|
||||
}
|
||||
|
||||
// Len returns a length of the segment.
|
||||
func (t *Segment) Len() int {
|
||||
return t.Stop - t.Start + t.Padding
|
||||
}
|
||||
|
||||
// Between returns a segment between this segment and the given segment.
|
||||
func (t *Segment) Between(other Segment) Segment {
|
||||
if t.Stop != other.Stop {
|
||||
panic("invalid state")
|
||||
}
|
||||
return NewSegmentPadding(
|
||||
t.Start,
|
||||
other.Start,
|
||||
t.Padding-other.Padding,
|
||||
)
|
||||
}
|
||||
|
||||
// IsEmpty returns true if this segment is empty, otherwise false.
|
||||
func (t *Segment) IsEmpty() bool {
|
||||
return t.Start >= t.Stop && t.Padding == 0
|
||||
}
|
||||
|
||||
// TrimRightSpace returns a new segment by slicing off all trailing
|
||||
// space characters.
|
||||
func (t *Segment) TrimRightSpace(buffer []byte) Segment {
|
||||
v := buffer[t.Start:t.Stop]
|
||||
l := util.TrimRightSpaceLength(v)
|
||||
if l == len(v) {
|
||||
return NewSegment(t.Start, t.Start)
|
||||
}
|
||||
return NewSegmentPadding(t.Start, t.Stop-l, t.Padding)
|
||||
}
|
||||
|
||||
// TrimLeftSpace returns a new segment by slicing off all leading
|
||||
// space characters including padding.
|
||||
func (t *Segment) TrimLeftSpace(buffer []byte) Segment {
|
||||
v := buffer[t.Start:t.Stop]
|
||||
l := util.TrimLeftSpaceLength(v)
|
||||
return NewSegment(t.Start+l, t.Stop)
|
||||
}
|
||||
|
||||
// TrimLeftSpaceWidth returns a new segment by slicing off leading space
|
||||
// characters until the given width.
|
||||
func (t *Segment) TrimLeftSpaceWidth(width int, buffer []byte) Segment {
|
||||
padding := t.Padding
|
||||
for ; width > 0; width-- {
|
||||
if padding == 0 {
|
||||
break
|
||||
}
|
||||
padding--
|
||||
}
|
||||
if width == 0 {
|
||||
return NewSegmentPadding(t.Start, t.Stop, padding)
|
||||
}
|
||||
text := buffer[t.Start:t.Stop]
|
||||
start := t.Start
|
||||
for _, c := range text {
|
||||
if start >= t.Stop-1 || width <= 0 {
|
||||
break
|
||||
}
|
||||
if c == ' ' {
|
||||
width--
|
||||
} else if c == '\t' {
|
||||
width -= 4
|
||||
} else {
|
||||
break
|
||||
}
|
||||
start++
|
||||
}
|
||||
if width < 0 {
|
||||
padding = width * -1
|
||||
}
|
||||
return NewSegmentPadding(start, t.Stop, padding)
|
||||
}
|
||||
|
||||
// WithStart returns a new Segment with same value except Start.
|
||||
func (t *Segment) WithStart(v int) Segment {
|
||||
return NewSegmentPadding(v, t.Stop, t.Padding)
|
||||
}
|
||||
|
||||
// WithStop returns a new Segment with same value except Stop.
|
||||
func (t *Segment) WithStop(v int) Segment {
|
||||
return NewSegmentPadding(t.Start, v, t.Padding)
|
||||
}
|
||||
|
||||
// ConcatPadding concats the padding to the given slice.
|
||||
func (t *Segment) ConcatPadding(v []byte) []byte {
|
||||
if t.Padding > 0 {
|
||||
return append(v, bytes.Repeat(space, t.Padding)...)
|
||||
}
|
||||
return v
|
||||
}
|
||||
|
||||
// Segments is a collection of the Segment.
|
||||
type Segments struct {
|
||||
values []Segment
|
||||
}
|
||||
|
||||
// NewSegments return a new Segments.
|
||||
func NewSegments() *Segments {
|
||||
return &Segments{
|
||||
values: nil,
|
||||
}
|
||||
}
|
||||
|
||||
// Append appends the given segment after the tail of the collection.
|
||||
func (s *Segments) Append(t Segment) {
|
||||
if s.values == nil {
|
||||
s.values = make([]Segment, 0, 20)
|
||||
}
|
||||
s.values = append(s.values, t)
|
||||
}
|
||||
|
||||
// AppendAll appends all elements of given segments after the tail of the collection.
|
||||
func (s *Segments) AppendAll(t []Segment) {
|
||||
if s.values == nil {
|
||||
s.values = make([]Segment, 0, 20)
|
||||
}
|
||||
s.values = append(s.values, t...)
|
||||
}
|
||||
|
||||
// Len returns the length of the collection.
|
||||
func (s *Segments) Len() int {
|
||||
if s.values == nil {
|
||||
return 0
|
||||
}
|
||||
return len(s.values)
|
||||
}
|
||||
|
||||
// At returns a segment at the given index.
|
||||
func (s *Segments) At(i int) Segment {
|
||||
return s.values[i]
|
||||
}
|
||||
|
||||
// Set sets the given Segment.
|
||||
func (s *Segments) Set(i int, v Segment) {
|
||||
s.values[i] = v
|
||||
}
|
||||
|
||||
// SetSliced replace the collection with a subsliced value.
|
||||
func (s *Segments) SetSliced(lo, hi int) {
|
||||
s.values = s.values[lo:hi]
|
||||
}
|
||||
|
||||
// Sliced returns a subslice of the collection.
|
||||
func (s *Segments) Sliced(lo, hi int) []Segment {
|
||||
return s.values[lo:hi]
|
||||
}
|
||||
|
||||
// Clear delete all element of the collection.
|
||||
func (s *Segments) Clear() {
|
||||
s.values = nil
|
||||
}
|
||||
|
||||
// Unshift insert the given Segment to head of the collection.
|
||||
func (s *Segments) Unshift(v Segment) {
|
||||
s.values = append(s.values[0:1], s.values[0:]...)
|
||||
s.values[0] = v
|
||||
}
|
2142
vendor/github.com/yuin/goldmark/util/html5entities.go
generated
vendored
Normal file
2142
vendor/github.com/yuin/goldmark/util/html5entities.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
1491
vendor/github.com/yuin/goldmark/util/unicode_case_folding.go
generated
vendored
Normal file
1491
vendor/github.com/yuin/goldmark/util/unicode_case_folding.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
980
vendor/github.com/yuin/goldmark/util/util.go
generated
vendored
Normal file
980
vendor/github.com/yuin/goldmark/util/util.go
generated
vendored
Normal file
@ -0,0 +1,980 @@
|
||||
// Package util provides utility functions for the goldmark.
|
||||
package util
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"io"
|
||||
"net/url"
|
||||
"regexp"
|
||||
"sort"
|
||||
"strconv"
|
||||
"unicode"
|
||||
"unicode/utf8"
|
||||
)
|
||||
|
||||
// A CopyOnWriteBuffer is a byte buffer that copies buffer when
|
||||
// it need to be changed.
|
||||
type CopyOnWriteBuffer struct {
|
||||
buffer []byte
|
||||
copied bool
|
||||
}
|
||||
|
||||
// NewCopyOnWriteBuffer returns a new CopyOnWriteBuffer.
|
||||
func NewCopyOnWriteBuffer(buffer []byte) CopyOnWriteBuffer {
|
||||
return CopyOnWriteBuffer{
|
||||
buffer: buffer,
|
||||
copied: false,
|
||||
}
|
||||
}
|
||||
|
||||
// Write writes given bytes to the buffer.
|
||||
// Write allocate new buffer and clears it at the first time.
|
||||
func (b *CopyOnWriteBuffer) Write(value []byte) {
|
||||
if !b.copied {
|
||||
b.buffer = make([]byte, 0, len(b.buffer)+20)
|
||||
b.copied = true
|
||||
}
|
||||
b.buffer = append(b.buffer, value...)
|
||||
}
|
||||
|
||||
// Append appends given bytes to the buffer.
|
||||
// Append copy buffer at the first time.
|
||||
func (b *CopyOnWriteBuffer) Append(value []byte) {
|
||||
if !b.copied {
|
||||
tmp := make([]byte, len(b.buffer), len(b.buffer)+20)
|
||||
copy(tmp, b.buffer)
|
||||
b.buffer = tmp
|
||||
b.copied = true
|
||||
}
|
||||
b.buffer = append(b.buffer, value...)
|
||||
}
|
||||
|
||||
// WriteByte writes the given byte to the buffer.
|
||||
// WriteByte allocate new buffer and clears it at the first time.
|
||||
func (b *CopyOnWriteBuffer) WriteByte(c byte) {
|
||||
if !b.copied {
|
||||
b.buffer = make([]byte, 0, len(b.buffer)+20)
|
||||
b.copied = true
|
||||
}
|
||||
b.buffer = append(b.buffer, c)
|
||||
}
|
||||
|
||||
// AppendByte appends given bytes to the buffer.
|
||||
// AppendByte copy buffer at the first time.
|
||||
func (b *CopyOnWriteBuffer) AppendByte(c byte) {
|
||||
if !b.copied {
|
||||
tmp := make([]byte, len(b.buffer), len(b.buffer)+20)
|
||||
copy(tmp, b.buffer)
|
||||
b.buffer = tmp
|
||||
b.copied = true
|
||||
}
|
||||
b.buffer = append(b.buffer, c)
|
||||
}
|
||||
|
||||
// Bytes returns bytes of this buffer.
|
||||
func (b *CopyOnWriteBuffer) Bytes() []byte {
|
||||
return b.buffer
|
||||
}
|
||||
|
||||
// IsCopied returns true if buffer has been copied, otherwise false.
|
||||
func (b *CopyOnWriteBuffer) IsCopied() bool {
|
||||
return b.copied
|
||||
}
|
||||
|
||||
// IsEscapedPunctuation returns true if character at a given index i
|
||||
// is an escaped punctuation, otherwise false.
|
||||
func IsEscapedPunctuation(source []byte, i int) bool {
|
||||
return source[i] == '\\' && i < len(source)-1 && IsPunct(source[i+1])
|
||||
}
|
||||
|
||||
// ReadWhile read the given source while pred is true.
|
||||
func ReadWhile(source []byte, index [2]int, pred func(byte) bool) (int, bool) {
|
||||
j := index[0]
|
||||
ok := false
|
||||
for ; j < index[1]; j++ {
|
||||
c1 := source[j]
|
||||
if pred(c1) {
|
||||
ok = true
|
||||
continue
|
||||
}
|
||||
break
|
||||
}
|
||||
return j, ok
|
||||
}
|
||||
|
||||
// IsBlank returns true if the given string is all space characters.
|
||||
func IsBlank(bs []byte) bool {
|
||||
for _, b := range bs {
|
||||
if !IsSpace(b) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// VisualizeSpaces visualize invisible space characters.
|
||||
func VisualizeSpaces(bs []byte) []byte {
|
||||
bs = bytes.Replace(bs, []byte(" "), []byte("[SPACE]"), -1)
|
||||
bs = bytes.Replace(bs, []byte("\t"), []byte("[TAB]"), -1)
|
||||
bs = bytes.Replace(bs, []byte("\n"), []byte("[NEWLINE]\n"), -1)
|
||||
bs = bytes.Replace(bs, []byte("\r"), []byte("[CR]"), -1)
|
||||
return bs
|
||||
}
|
||||
|
||||
// TabWidth calculates actual width of a tab at the given position.
|
||||
func TabWidth(currentPos int) int {
|
||||
return 4 - currentPos%4
|
||||
}
|
||||
|
||||
// IndentPosition searches an indent position with the given width for the given line.
|
||||
// If the line contains tab characters, paddings may be not zero.
|
||||
// currentPos==0 and width==2:
|
||||
//
|
||||
// position: 0 1
|
||||
// [TAB]aaaa
|
||||
// width: 1234 5678
|
||||
//
|
||||
// width=2 is in the tab character. In this case, IndentPosition returns
|
||||
// (pos=1, padding=2)
|
||||
func IndentPosition(bs []byte, currentPos, width int) (pos, padding int) {
|
||||
if width == 0 {
|
||||
return 0, 0
|
||||
}
|
||||
w := 0
|
||||
l := len(bs)
|
||||
i := 0
|
||||
hasTab := false
|
||||
for ; i < l; i++ {
|
||||
if bs[i] == '\t' {
|
||||
w += TabWidth(currentPos + w)
|
||||
hasTab = true
|
||||
} else if bs[i] == ' ' {
|
||||
w++
|
||||
} else {
|
||||
break
|
||||
}
|
||||
}
|
||||
if w >= width {
|
||||
if !hasTab {
|
||||
return width, 0
|
||||
}
|
||||
return i, w - width
|
||||
}
|
||||
return -1, -1
|
||||
}
|
||||
|
||||
// IndentPositionPadding searches an indent position with the given width for the given line.
|
||||
// This function is mostly same as IndentPosition except this function
|
||||
// takes account into additional paddings.
|
||||
func IndentPositionPadding(bs []byte, currentPos, paddingv, width int) (pos, padding int) {
|
||||
if width == 0 {
|
||||
return 0, paddingv
|
||||
}
|
||||
w := 0
|
||||
i := 0
|
||||
l := len(bs)
|
||||
for ; i < l; i++ {
|
||||
if bs[i] == '\t' {
|
||||
w += TabWidth(currentPos + w)
|
||||
} else if bs[i] == ' ' {
|
||||
w++
|
||||
} else {
|
||||
break
|
||||
}
|
||||
}
|
||||
if w >= width {
|
||||
return i - paddingv, w - width
|
||||
}
|
||||
return -1, -1
|
||||
}
|
||||
|
||||
// DedentPosition dedents lines by the given width.
|
||||
func DedentPosition(bs []byte, currentPos, width int) (pos, padding int) {
|
||||
if width == 0 {
|
||||
return 0, 0
|
||||
}
|
||||
w := 0
|
||||
l := len(bs)
|
||||
i := 0
|
||||
for ; i < l; i++ {
|
||||
if bs[i] == '\t' {
|
||||
w += TabWidth(currentPos + w)
|
||||
} else if bs[i] == ' ' {
|
||||
w++
|
||||
} else {
|
||||
break
|
||||
}
|
||||
}
|
||||
if w >= width {
|
||||
return i, w - width
|
||||
}
|
||||
return i, 0
|
||||
}
|
||||
|
||||
// DedentPositionPadding dedents lines by the given width.
|
||||
// This function is mostly same as DedentPosition except this function
|
||||
// takes account into additional paddings.
|
||||
func DedentPositionPadding(bs []byte, currentPos, paddingv, width int) (pos, padding int) {
|
||||
if width == 0 {
|
||||
return 0, paddingv
|
||||
}
|
||||
|
||||
w := 0
|
||||
i := 0
|
||||
l := len(bs)
|
||||
for ; i < l; i++ {
|
||||
if bs[i] == '\t' {
|
||||
w += TabWidth(currentPos + w)
|
||||
} else if bs[i] == ' ' {
|
||||
w++
|
||||
} else {
|
||||
break
|
||||
}
|
||||
}
|
||||
if w >= width {
|
||||
return i - paddingv, w - width
|
||||
}
|
||||
return i - paddingv, 0
|
||||
}
|
||||
|
||||
// IndentWidth calculate an indent width for the given line.
|
||||
func IndentWidth(bs []byte, currentPos int) (width, pos int) {
|
||||
l := len(bs)
|
||||
for i := 0; i < l; i++ {
|
||||
b := bs[i]
|
||||
if b == ' ' {
|
||||
width++
|
||||
pos++
|
||||
} else if b == '\t' {
|
||||
width += TabWidth(currentPos + width)
|
||||
pos++
|
||||
} else {
|
||||
break
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// FirstNonSpacePosition returns a position line that is a first nonspace
|
||||
// character.
|
||||
func FirstNonSpacePosition(bs []byte) int {
|
||||
i := 0
|
||||
for ; i < len(bs); i++ {
|
||||
c := bs[i]
|
||||
if c == ' ' || c == '\t' {
|
||||
continue
|
||||
}
|
||||
if c == '\n' {
|
||||
return -1
|
||||
}
|
||||
return i
|
||||
}
|
||||
return -1
|
||||
}
|
||||
|
||||
// FindClosure returns a position that closes the given opener.
|
||||
// If codeSpan is set true, it ignores characters in code spans.
|
||||
// If allowNesting is set true, closures correspond to nested opener will be
|
||||
// ignored.
|
||||
func FindClosure(bs []byte, opener, closure byte, codeSpan, allowNesting bool) int {
|
||||
i := 0
|
||||
opened := 1
|
||||
codeSpanOpener := 0
|
||||
for i < len(bs) {
|
||||
c := bs[i]
|
||||
if codeSpan && codeSpanOpener != 0 && c == '`' {
|
||||
codeSpanCloser := 0
|
||||
for ; i < len(bs); i++ {
|
||||
if bs[i] == '`' {
|
||||
codeSpanCloser++
|
||||
} else {
|
||||
i--
|
||||
break
|
||||
}
|
||||
}
|
||||
if codeSpanCloser == codeSpanOpener {
|
||||
codeSpanOpener = 0
|
||||
}
|
||||
} else if codeSpanOpener == 0 && c == '\\' && i < len(bs)-1 && IsPunct(bs[i+1]) {
|
||||
i += 2
|
||||
continue
|
||||
} else if codeSpan && codeSpanOpener == 0 && c == '`' {
|
||||
for ; i < len(bs); i++ {
|
||||
if bs[i] == '`' {
|
||||
codeSpanOpener++
|
||||
} else {
|
||||
i--
|
||||
break
|
||||
}
|
||||
}
|
||||
} else if (codeSpan && codeSpanOpener == 0) || !codeSpan {
|
||||
if c == closure {
|
||||
opened--
|
||||
if opened == 0 {
|
||||
return i
|
||||
}
|
||||
} else if c == opener {
|
||||
if !allowNesting {
|
||||
return -1
|
||||
}
|
||||
opened++
|
||||
}
|
||||
}
|
||||
i++
|
||||
}
|
||||
return -1
|
||||
}
|
||||
|
||||
// TrimLeft trims characters in the given s from head of the source.
|
||||
// bytes.TrimLeft offers same functionalities, but bytes.TrimLeft
|
||||
// allocates new buffer for the result.
|
||||
func TrimLeft(source, b []byte) []byte {
|
||||
i := 0
|
||||
for ; i < len(source); i++ {
|
||||
c := source[i]
|
||||
found := false
|
||||
for j := 0; j < len(b); j++ {
|
||||
if c == b[j] {
|
||||
found = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if !found {
|
||||
break
|
||||
}
|
||||
}
|
||||
return source[i:]
|
||||
}
|
||||
|
||||
// TrimRight trims characters in the given s from tail of the source.
|
||||
func TrimRight(source, b []byte) []byte {
|
||||
i := len(source) - 1
|
||||
for ; i >= 0; i-- {
|
||||
c := source[i]
|
||||
found := false
|
||||
for j := 0; j < len(b); j++ {
|
||||
if c == b[j] {
|
||||
found = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if !found {
|
||||
break
|
||||
}
|
||||
}
|
||||
return source[:i+1]
|
||||
}
|
||||
|
||||
// TrimLeftLength returns a length of leading specified characters.
|
||||
func TrimLeftLength(source, s []byte) int {
|
||||
return len(source) - len(TrimLeft(source, s))
|
||||
}
|
||||
|
||||
// TrimRightLength returns a length of trailing specified characters.
|
||||
func TrimRightLength(source, s []byte) int {
|
||||
return len(source) - len(TrimRight(source, s))
|
||||
}
|
||||
|
||||
// TrimLeftSpaceLength returns a length of leading space characters.
|
||||
func TrimLeftSpaceLength(source []byte) int {
|
||||
i := 0
|
||||
for ; i < len(source); i++ {
|
||||
if !IsSpace(source[i]) {
|
||||
break
|
||||
}
|
||||
}
|
||||
return i
|
||||
}
|
||||
|
||||
// TrimRightSpaceLength returns a length of trailing space characters.
|
||||
func TrimRightSpaceLength(source []byte) int {
|
||||
l := len(source)
|
||||
i := l - 1
|
||||
for ; i >= 0; i-- {
|
||||
if !IsSpace(source[i]) {
|
||||
break
|
||||
}
|
||||
}
|
||||
if i < 0 {
|
||||
return l
|
||||
}
|
||||
return l - 1 - i
|
||||
}
|
||||
|
||||
// TrimLeftSpace returns a subslice of the given string by slicing off all leading
|
||||
// space characters.
|
||||
func TrimLeftSpace(source []byte) []byte {
|
||||
return TrimLeft(source, spaces)
|
||||
}
|
||||
|
||||
// TrimRightSpace returns a subslice of the given string by slicing off all trailing
|
||||
// space characters.
|
||||
func TrimRightSpace(source []byte) []byte {
|
||||
return TrimRight(source, spaces)
|
||||
}
|
||||
|
||||
// DoFullUnicodeCaseFolding performs full unicode case folding to given bytes.
|
||||
func DoFullUnicodeCaseFolding(v []byte) []byte {
|
||||
var rbuf []byte
|
||||
cob := NewCopyOnWriteBuffer(v)
|
||||
n := 0
|
||||
for i := 0; i < len(v); i++ {
|
||||
c := v[i]
|
||||
if c < 0xb5 {
|
||||
if c >= 0x41 && c <= 0x5a {
|
||||
// A-Z to a-z
|
||||
cob.Write(v[n:i])
|
||||
cob.WriteByte(c + 32)
|
||||
n = i + 1
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
if !utf8.RuneStart(c) {
|
||||
continue
|
||||
}
|
||||
r, length := utf8.DecodeRune(v[i:])
|
||||
if r == utf8.RuneError {
|
||||
continue
|
||||
}
|
||||
folded, ok := unicodeCaseFoldings[r]
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
|
||||
cob.Write(v[n:i])
|
||||
if rbuf == nil {
|
||||
rbuf = make([]byte, 4)
|
||||
}
|
||||
for _, f := range folded {
|
||||
l := utf8.EncodeRune(rbuf, f)
|
||||
cob.Write(rbuf[:l])
|
||||
}
|
||||
i += length - 1
|
||||
n = i + 1
|
||||
}
|
||||
if cob.IsCopied() {
|
||||
cob.Write(v[n:])
|
||||
}
|
||||
return cob.Bytes()
|
||||
}
|
||||
|
||||
// ReplaceSpaces replaces sequence of spaces with the given repl.
|
||||
func ReplaceSpaces(source []byte, repl byte) []byte {
|
||||
var ret []byte
|
||||
start := -1
|
||||
for i, c := range source {
|
||||
iss := IsSpace(c)
|
||||
if start < 0 && iss {
|
||||
start = i
|
||||
continue
|
||||
} else if start >= 0 && iss {
|
||||
continue
|
||||
} else if start >= 0 {
|
||||
if ret == nil {
|
||||
ret = make([]byte, 0, len(source))
|
||||
ret = append(ret, source[:start]...)
|
||||
}
|
||||
ret = append(ret, repl)
|
||||
start = -1
|
||||
}
|
||||
if ret != nil {
|
||||
ret = append(ret, c)
|
||||
}
|
||||
}
|
||||
if start >= 0 && ret != nil {
|
||||
ret = append(ret, repl)
|
||||
}
|
||||
if ret == nil {
|
||||
return source
|
||||
}
|
||||
return ret
|
||||
}
|
||||
|
||||
// ToRune decode given bytes start at pos and returns a rune.
|
||||
func ToRune(source []byte, pos int) rune {
|
||||
i := pos
|
||||
for ; i >= 0; i-- {
|
||||
if utf8.RuneStart(source[i]) {
|
||||
break
|
||||
}
|
||||
}
|
||||
r, _ := utf8.DecodeRune(source[i:])
|
||||
return r
|
||||
}
|
||||
|
||||
// ToValidRune returns 0xFFFD if the given rune is invalid, otherwise v.
|
||||
func ToValidRune(v rune) rune {
|
||||
if v == 0 || !utf8.ValidRune(v) {
|
||||
return rune(0xFFFD)
|
||||
}
|
||||
return v
|
||||
}
|
||||
|
||||
// ToLinkReference converts given bytes into a valid link reference string.
|
||||
// ToLinkReference performs unicode case folding, trims leading and trailing spaces, converts into lower
|
||||
// case and replace spaces with a single space character.
|
||||
func ToLinkReference(v []byte) string {
|
||||
v = TrimLeftSpace(v)
|
||||
v = TrimRightSpace(v)
|
||||
v = DoFullUnicodeCaseFolding(v)
|
||||
return string(ReplaceSpaces(v, ' '))
|
||||
}
|
||||
|
||||
var htmlEscapeTable = [256][]byte{nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, []byte("""), nil, nil, nil, []byte("&"), nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, []byte("<"), nil, []byte(">"), nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil}
|
||||
|
||||
// EscapeHTMLByte returns HTML escaped bytes if the given byte should be escaped,
|
||||
// otherwise nil.
|
||||
func EscapeHTMLByte(b byte) []byte {
|
||||
return htmlEscapeTable[b]
|
||||
}
|
||||
|
||||
// EscapeHTML escapes characters that should be escaped in HTML text.
|
||||
func EscapeHTML(v []byte) []byte {
|
||||
cob := NewCopyOnWriteBuffer(v)
|
||||
n := 0
|
||||
for i := 0; i < len(v); i++ {
|
||||
c := v[i]
|
||||
escaped := htmlEscapeTable[c]
|
||||
if escaped != nil {
|
||||
cob.Write(v[n:i])
|
||||
cob.Write(escaped)
|
||||
n = i + 1
|
||||
}
|
||||
}
|
||||
if cob.IsCopied() {
|
||||
cob.Write(v[n:])
|
||||
}
|
||||
return cob.Bytes()
|
||||
}
|
||||
|
||||
// UnescapePunctuations unescapes blackslash escaped punctuations.
|
||||
func UnescapePunctuations(source []byte) []byte {
|
||||
cob := NewCopyOnWriteBuffer(source)
|
||||
limit := len(source)
|
||||
n := 0
|
||||
for i := 0; i < limit; {
|
||||
c := source[i]
|
||||
if i < limit-1 && c == '\\' && IsPunct(source[i+1]) {
|
||||
cob.Write(source[n:i])
|
||||
cob.WriteByte(source[i+1])
|
||||
i += 2
|
||||
n = i
|
||||
continue
|
||||
}
|
||||
i++
|
||||
}
|
||||
if cob.IsCopied() {
|
||||
cob.Write(source[n:])
|
||||
}
|
||||
return cob.Bytes()
|
||||
}
|
||||
|
||||
// ResolveNumericReferences resolve numeric references like 'Ӓ" .
|
||||
func ResolveNumericReferences(source []byte) []byte {
|
||||
cob := NewCopyOnWriteBuffer(source)
|
||||
buf := make([]byte, 6, 6)
|
||||
limit := len(source)
|
||||
ok := false
|
||||
n := 0
|
||||
for i := 0; i < limit; i++ {
|
||||
if source[i] == '&' {
|
||||
pos := i
|
||||
next := i + 1
|
||||
if next < limit && source[next] == '#' {
|
||||
nnext := next + 1
|
||||
if nnext < limit {
|
||||
nc := source[nnext]
|
||||
// code point like #x22;
|
||||
if nnext < limit && nc == 'x' || nc == 'X' {
|
||||
start := nnext + 1
|
||||
i, ok = ReadWhile(source, [2]int{start, limit}, IsHexDecimal)
|
||||
if ok && i < limit && source[i] == ';' {
|
||||
v, _ := strconv.ParseUint(BytesToReadOnlyString(source[start:i]), 16, 32)
|
||||
cob.Write(source[n:pos])
|
||||
n = i + 1
|
||||
runeSize := utf8.EncodeRune(buf, ToValidRune(rune(v)))
|
||||
cob.Write(buf[:runeSize])
|
||||
continue
|
||||
}
|
||||
// code point like #1234;
|
||||
} else if nc >= '0' && nc <= '9' {
|
||||
start := nnext
|
||||
i, ok = ReadWhile(source, [2]int{start, limit}, IsNumeric)
|
||||
if ok && i < limit && i-start < 8 && source[i] == ';' {
|
||||
v, _ := strconv.ParseUint(BytesToReadOnlyString(source[start:i]), 0, 32)
|
||||
cob.Write(source[n:pos])
|
||||
n = i + 1
|
||||
runeSize := utf8.EncodeRune(buf, ToValidRune(rune(v)))
|
||||
cob.Write(buf[:runeSize])
|
||||
continue
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
i = next - 1
|
||||
}
|
||||
}
|
||||
if cob.IsCopied() {
|
||||
cob.Write(source[n:])
|
||||
}
|
||||
return cob.Bytes()
|
||||
}
|
||||
|
||||
// ResolveEntityNames resolve entity references like 'ö" .
|
||||
func ResolveEntityNames(source []byte) []byte {
|
||||
cob := NewCopyOnWriteBuffer(source)
|
||||
limit := len(source)
|
||||
ok := false
|
||||
n := 0
|
||||
for i := 0; i < limit; i++ {
|
||||
if source[i] == '&' {
|
||||
pos := i
|
||||
next := i + 1
|
||||
if !(next < limit && source[next] == '#') {
|
||||
start := next
|
||||
i, ok = ReadWhile(source, [2]int{start, limit}, IsAlphaNumeric)
|
||||
if ok && i < limit && source[i] == ';' {
|
||||
name := BytesToReadOnlyString(source[start:i])
|
||||
entity, ok := LookUpHTML5EntityByName(name)
|
||||
if ok {
|
||||
cob.Write(source[n:pos])
|
||||
n = i + 1
|
||||
cob.Write(entity.Characters)
|
||||
continue
|
||||
}
|
||||
}
|
||||
}
|
||||
i = next - 1
|
||||
}
|
||||
}
|
||||
if cob.IsCopied() {
|
||||
cob.Write(source[n:])
|
||||
}
|
||||
return cob.Bytes()
|
||||
}
|
||||
|
||||
var htmlSpace = []byte("%20")
|
||||
|
||||
// URLEscape escape the given URL.
|
||||
// If resolveReference is set true:
|
||||
// 1. unescape punctuations
|
||||
// 2. resolve numeric references
|
||||
// 3. resolve entity references
|
||||
//
|
||||
// URL encoded values (%xx) are kept as is.
|
||||
func URLEscape(v []byte, resolveReference bool) []byte {
|
||||
if resolveReference {
|
||||
v = UnescapePunctuations(v)
|
||||
v = ResolveNumericReferences(v)
|
||||
v = ResolveEntityNames(v)
|
||||
}
|
||||
cob := NewCopyOnWriteBuffer(v)
|
||||
limit := len(v)
|
||||
n := 0
|
||||
|
||||
for i := 0; i < limit; {
|
||||
c := v[i]
|
||||
if urlEscapeTable[c] == 1 {
|
||||
i++
|
||||
continue
|
||||
}
|
||||
if c == '%' && i+2 < limit && IsHexDecimal(v[i+1]) && IsHexDecimal(v[i+1]) {
|
||||
i += 3
|
||||
continue
|
||||
}
|
||||
u8len := utf8lenTable[c]
|
||||
if u8len == 99 { // invalid utf8 leading byte, skip it
|
||||
i++
|
||||
continue
|
||||
}
|
||||
if c == ' ' {
|
||||
cob.Write(v[n:i])
|
||||
cob.Write(htmlSpace)
|
||||
i++
|
||||
n = i
|
||||
continue
|
||||
}
|
||||
if int(u8len) >= len(v) {
|
||||
u8len = int8(len(v) - 1)
|
||||
}
|
||||
if u8len == 0 {
|
||||
i++
|
||||
n = i
|
||||
continue
|
||||
}
|
||||
cob.Write(v[n:i])
|
||||
stop := i + int(u8len)
|
||||
if stop > len(v) {
|
||||
i++
|
||||
n = i
|
||||
continue
|
||||
}
|
||||
cob.Write(StringToReadOnlyBytes(url.QueryEscape(string(v[i:stop]))))
|
||||
i += int(u8len)
|
||||
n = i
|
||||
}
|
||||
if cob.IsCopied() && n < limit {
|
||||
cob.Write(v[n:])
|
||||
}
|
||||
return cob.Bytes()
|
||||
}
|
||||
|
||||
// FindURLIndex returns a stop index value if the given bytes seem an URL.
|
||||
// This function is equivalent to [A-Za-z][A-Za-z0-9.+-]{1,31}:[^<>\x00-\x20]* .
|
||||
func FindURLIndex(b []byte) int {
|
||||
i := 0
|
||||
if !(len(b) > 0 && urlTable[b[i]]&7 == 7) {
|
||||
return -1
|
||||
}
|
||||
i++
|
||||
for ; i < len(b); i++ {
|
||||
c := b[i]
|
||||
if urlTable[c]&4 != 4 {
|
||||
break
|
||||
}
|
||||
}
|
||||
if i == 1 || i > 33 || i >= len(b) {
|
||||
return -1
|
||||
}
|
||||
if b[i] != ':' {
|
||||
return -1
|
||||
}
|
||||
i++
|
||||
for ; i < len(b); i++ {
|
||||
c := b[i]
|
||||
if urlTable[c]&1 != 1 {
|
||||
break
|
||||
}
|
||||
}
|
||||
return i
|
||||
}
|
||||
|
||||
var emailDomainRegexp = regexp.MustCompile(`^[a-zA-Z0-9](?:[a-zA-Z0-9-]{0,61}[a-zA-Z0-9])?(?:\.[a-zA-Z0-9](?:[a-zA-Z0-9-]{0,61}[a-zA-Z0-9])?)*`)
|
||||
|
||||
// FindEmailIndex returns a stop index value if the given bytes seem an email address.
|
||||
func FindEmailIndex(b []byte) int {
|
||||
// TODO: eliminate regexps
|
||||
i := 0
|
||||
for ; i < len(b); i++ {
|
||||
c := b[i]
|
||||
if emailTable[c]&1 != 1 {
|
||||
break
|
||||
}
|
||||
}
|
||||
if i == 0 {
|
||||
return -1
|
||||
}
|
||||
if i >= len(b) || b[i] != '@' {
|
||||
return -1
|
||||
}
|
||||
i++
|
||||
if i >= len(b) {
|
||||
return -1
|
||||
}
|
||||
match := emailDomainRegexp.FindSubmatchIndex(b[i:])
|
||||
if match == nil {
|
||||
return -1
|
||||
}
|
||||
return i + match[1]
|
||||
}
|
||||
|
||||
var spaces = []byte(" \t\n\x0b\x0c\x0d")
|
||||
|
||||
var spaceTable = [256]int8{0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
|
||||
|
||||
var punctTable = [256]int8{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
|
||||
|
||||
// a-zA-Z0-9, ;/?:@&=+$,-_.!~*'()#
|
||||
var urlEscapeTable = [256]int8{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
|
||||
|
||||
var utf8lenTable = [256]int8{1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4, 99, 99, 99, 99, 99, 99, 99, 99}
|
||||
|
||||
var urlTable = [256]uint8{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 5, 1, 5, 5, 1, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 1, 1, 0, 1, 0, 1, 1, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 1, 1, 1, 1, 1, 1, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1}
|
||||
|
||||
var emailTable = [256]uint8{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 1, 1, 1, 1, 0, 0, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
|
||||
|
||||
// UTF8Len returns a byte length of the utf-8 character.
|
||||
func UTF8Len(b byte) int8 {
|
||||
return utf8lenTable[b]
|
||||
}
|
||||
|
||||
// IsPunct returns true if the given character is a punctuation, otherwise false.
|
||||
func IsPunct(c byte) bool {
|
||||
return punctTable[c] == 1
|
||||
}
|
||||
|
||||
// IsPunct returns true if the given rune is a punctuation, otherwise false.
|
||||
func IsPunctRune(r rune) bool {
|
||||
return int32(r) <= 256 && IsPunct(byte(r)) || unicode.IsPunct(r)
|
||||
}
|
||||
|
||||
// IsSpace returns true if the given character is a space, otherwise false.
|
||||
func IsSpace(c byte) bool {
|
||||
return spaceTable[c] == 1
|
||||
}
|
||||
|
||||
// IsSpace returns true if the given rune is a space, otherwise false.
|
||||
func IsSpaceRune(r rune) bool {
|
||||
return int32(r) <= 256 && IsSpace(byte(r)) || unicode.IsSpace(r)
|
||||
}
|
||||
|
||||
// IsNumeric returns true if the given character is a numeric, otherwise false.
|
||||
func IsNumeric(c byte) bool {
|
||||
return c >= '0' && c <= '9'
|
||||
}
|
||||
|
||||
// IsHexDecimal returns true if the given character is a hexdecimal, otherwise false.
|
||||
func IsHexDecimal(c byte) bool {
|
||||
return c >= '0' && c <= '9' || c >= 'a' && c <= 'f' || c >= 'A' && c <= 'F'
|
||||
}
|
||||
|
||||
// IsAlphaNumeric returns true if the given character is a alphabet or a numeric, otherwise false.
|
||||
func IsAlphaNumeric(c byte) bool {
|
||||
return c >= 'a' && c <= 'z' || c >= 'A' && c <= 'Z' || c >= '0' && c <= '9'
|
||||
}
|
||||
|
||||
// A BufWriter is a subset of the bufio.Writer .
|
||||
type BufWriter interface {
|
||||
io.Writer
|
||||
Available() int
|
||||
Buffered() int
|
||||
Flush() error
|
||||
WriteByte(c byte) error
|
||||
WriteRune(r rune) (size int, err error)
|
||||
WriteString(s string) (int, error)
|
||||
}
|
||||
|
||||
// A PrioritizedValue struct holds pair of an arbitrary value and a priority.
|
||||
type PrioritizedValue struct {
|
||||
// Value is an arbitrary value that you want to prioritize.
|
||||
Value interface{}
|
||||
// Priority is a priority of the value.
|
||||
Priority int
|
||||
}
|
||||
|
||||
// PrioritizedSlice is a slice of the PrioritizedValues
|
||||
type PrioritizedSlice []PrioritizedValue
|
||||
|
||||
// Sort sorts the PrioritizedSlice in ascending order.
|
||||
func (s PrioritizedSlice) Sort() {
|
||||
sort.Slice(s, func(i, j int) bool {
|
||||
return s[i].Priority < s[j].Priority
|
||||
})
|
||||
}
|
||||
|
||||
// Remove removes the given value from this slice.
|
||||
func (s PrioritizedSlice) Remove(v interface{}) PrioritizedSlice {
|
||||
i := 0
|
||||
found := false
|
||||
for ; i < len(s); i++ {
|
||||
if s[i].Value == v {
|
||||
found = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if !found {
|
||||
return s
|
||||
}
|
||||
return append(s[:i], s[i+1:]...)
|
||||
}
|
||||
|
||||
// Prioritized returns a new PrioritizedValue.
|
||||
func Prioritized(v interface{}, priority int) PrioritizedValue {
|
||||
return PrioritizedValue{v, priority}
|
||||
}
|
||||
|
||||
func bytesHash(b []byte) uint64 {
|
||||
var hash uint64 = 5381
|
||||
for _, c := range b {
|
||||
hash = ((hash << 5) + hash) + uint64(c)
|
||||
}
|
||||
return hash
|
||||
}
|
||||
|
||||
// BytesFilter is a efficient data structure for checking whether bytes exist or not.
|
||||
// BytesFilter is thread-safe.
|
||||
type BytesFilter interface {
|
||||
// Add adds given bytes to this set.
|
||||
Add([]byte)
|
||||
|
||||
// Contains return true if this set contains given bytes, otherwise false.
|
||||
Contains([]byte) bool
|
||||
|
||||
// Extend copies this filter and adds given bytes to new filter.
|
||||
Extend(...[]byte) BytesFilter
|
||||
}
|
||||
|
||||
type bytesFilter struct {
|
||||
chars [256]uint8
|
||||
threshold int
|
||||
slots [][][]byte
|
||||
}
|
||||
|
||||
// NewBytesFilter returns a new BytesFilter.
|
||||
func NewBytesFilter(elements ...[]byte) BytesFilter {
|
||||
s := &bytesFilter{
|
||||
threshold: 3,
|
||||
slots: make([][][]byte, 64),
|
||||
}
|
||||
for _, element := range elements {
|
||||
s.Add(element)
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
||||
func (s *bytesFilter) Add(b []byte) {
|
||||
l := len(b)
|
||||
m := s.threshold
|
||||
if l < s.threshold {
|
||||
m = l
|
||||
}
|
||||
for i := 0; i < m; i++ {
|
||||
s.chars[b[i]] |= 1 << uint8(i)
|
||||
}
|
||||
h := bytesHash(b) % uint64(len(s.slots))
|
||||
slot := s.slots[h]
|
||||
if slot == nil {
|
||||
slot = [][]byte{}
|
||||
}
|
||||
s.slots[h] = append(slot, b)
|
||||
}
|
||||
|
||||
func (s *bytesFilter) Extend(bs ...[]byte) BytesFilter {
|
||||
newFilter := NewBytesFilter().(*bytesFilter)
|
||||
newFilter.chars = s.chars
|
||||
newFilter.threshold = s.threshold
|
||||
for k, v := range s.slots {
|
||||
newSlot := make([][]byte, len(v))
|
||||
copy(newSlot, v)
|
||||
newFilter.slots[k] = v
|
||||
}
|
||||
for _, b := range bs {
|
||||
newFilter.Add(b)
|
||||
}
|
||||
return newFilter
|
||||
}
|
||||
|
||||
func (s *bytesFilter) Contains(b []byte) bool {
|
||||
l := len(b)
|
||||
m := s.threshold
|
||||
if l < s.threshold {
|
||||
m = l
|
||||
}
|
||||
for i := 0; i < m; i++ {
|
||||
if (s.chars[b[i]] & (1 << uint8(i))) == 0 {
|
||||
return false
|
||||
}
|
||||
}
|
||||
h := bytesHash(b) % uint64(len(s.slots))
|
||||
slot := s.slots[h]
|
||||
if slot == nil || len(slot) == 0 {
|
||||
return false
|
||||
}
|
||||
for _, element := range slot {
|
||||
if bytes.Equal(element, b) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
13
vendor/github.com/yuin/goldmark/util/util_safe.go
generated
vendored
Normal file
13
vendor/github.com/yuin/goldmark/util/util_safe.go
generated
vendored
Normal file
@ -0,0 +1,13 @@
|
||||
// +build appengine js
|
||||
|
||||
package util
|
||||
|
||||
// BytesToReadOnlyString returns a string converted from given bytes.
|
||||
func BytesToReadOnlyString(b []byte) string {
|
||||
return string(b)
|
||||
}
|
||||
|
||||
// StringToReadOnlyBytes returns bytes converted from given string.
|
||||
func StringToReadOnlyBytes(s string) []byte {
|
||||
return []byte(s)
|
||||
}
|
23
vendor/github.com/yuin/goldmark/util/util_unsafe.go
generated
vendored
Normal file
23
vendor/github.com/yuin/goldmark/util/util_unsafe.go
generated
vendored
Normal file
@ -0,0 +1,23 @@
|
||||
// +build !appengine,!js
|
||||
|
||||
package util
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
// BytesToReadOnlyString returns a string converted from given bytes.
|
||||
func BytesToReadOnlyString(b []byte) string {
|
||||
return *(*string)(unsafe.Pointer(&b))
|
||||
}
|
||||
|
||||
// StringToReadOnlyBytes returns bytes converted from given string.
|
||||
func StringToReadOnlyBytes(s string) (bs []byte) {
|
||||
sh := (*reflect.StringHeader)(unsafe.Pointer(&s))
|
||||
bh := (*reflect.SliceHeader)(unsafe.Pointer(&bs))
|
||||
bh.Data = sh.Data
|
||||
bh.Cap = sh.Len
|
||||
bh.Len = sh.Len
|
||||
return
|
||||
}
|
Reference in New Issue
Block a user