aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--README.adoc9
-rw-r--r--hasp/main.go91
-rw-r--r--hswg/main.go75
3 files changed, 61 insertions, 114 deletions
diff --git a/README.adoc b/README.adoc
index 62a7aac..0c0fa23 100644
--- a/README.adoc
+++ b/README.adoc
@@ -189,10 +189,11 @@ An improved replacement for autocutsel in selection synchronization "mode":
Only UTF8_STRING-convertible selections are synchronized.
-hasp -- (lib)asciidoc syntax preprocessor
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-Provisional tool to make libasciidoc understand more syntax, namely
-two-line/underlined titles for my Gitea projects.
+hswg -- static website generator
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+Wraps libasciidoc to make it understand more syntax, namely two-line/underlined
+titles, and can be run either as an AsciiDoc processor for my Gitea, or as
+a trivial wiki-like site generator.
ht -- terminal emulator
~~~~~~~~~~~~~~~~~~~~~~~
diff --git a/hasp/main.go b/hasp/main.go
deleted file mode 100644
index dd2ba7a..0000000
--- a/hasp/main.go
+++ /dev/null
@@ -1,91 +0,0 @@
-// Program hasp is a preprocessor for libasciidoc to make it understand
-// two-line/underlined titles, intended to be used in Gitea.
-package main
-
-import (
- "bytes"
- "encoding/xml"
- "io"
- "io/ioutil"
- "os"
- "strings"
- "unicode"
- "unicode/utf8"
-
- "github.com/bytesparadise/libasciidoc"
- "github.com/bytesparadise/libasciidoc/pkg/configuration"
-)
-
-// isTitle returns the title level if the lines seem to form a title,
-// zero otherwise. Input lines may inclide trailing newlines.
-func isTitle(line1, line2 []byte) int {
- // This is a very naïve method, we should target graphemes (thus at least
- // NFC normalize the lines first) and account for wide characters.
- diff := utf8.RuneCount(line1) - utf8.RuneCount(line2)
- if len(line2) < 2 || diff < -1 || diff > 1 {
- return 0
- }
-
- // "Don't be fooled by back-to-back delimited blocks."
- // Still gets fooled by other things, though.
- if bytes.IndexFunc(line1, func(r rune) bool {
- return unicode.IsLetter(r) || unicode.IsNumber(r)
- }) < 0 {
- return 0
- }
-
- // The underline must be homogenous.
- for _, r := range bytes.TrimRight(line2, "\r\n") {
- if r != line2[0] {
- return 0
- }
- }
- return 1 + strings.IndexByte("=-~^+", line2[0])
-}
-
-func writeLine(w *io.PipeWriter, cur, next []byte) []byte {
- if level := isTitle(cur, next); level > 0 {
- w.Write(append(bytes.Repeat([]byte{'='}, level), ' '))
- next = nil
- }
- w.Write(cur)
- return next
-}
-
-// ConvertTitles converts AsciiDoc two-line (underlined) titles to single-line.
-func ConvertTitles(w *io.PipeWriter, input []byte) {
- var last []byte
- for _, cur := range bytes.SplitAfter(input, []byte{'\n'}) {
- last = writeLine(w, last, cur)
- }
- writeLine(w, last, nil)
-}
-
-func main() {
- input, err := ioutil.ReadAll(os.Stdin)
- if err != nil {
- panic(err)
- }
-
- pr, pw := io.Pipe()
- go func() {
- defer pw.Close()
- ConvertTitles(pw, input)
- }()
-
- // io.Copy(os.Stdout, pr)
- // return
-
- config := configuration.NewConfiguration(
- configuration.WithHeaderFooter(true))
- _, err = libasciidoc.ConvertToHTML(pr, os.Stdout, config)
- if err != nil {
- // Fallback: output all the text sanitized for direct inclusion.
- os.Stdout.WriteString("<pre>")
- for _, line := range bytes.Split(input, []byte{'\n'}) {
- xml.EscapeText(os.Stdout, line)
- os.Stdout.WriteString("\n")
- }
- os.Stdout.WriteString("</pre>")
- }
-}
diff --git a/hswg/main.go b/hswg/main.go
index 595bb4a..2a46c6e 100644
--- a/hswg/main.go
+++ b/hswg/main.go
@@ -4,6 +4,7 @@ package main
import (
"bytes"
+ "encoding/xml"
"fmt"
"io"
"io/ioutil"
@@ -67,6 +68,41 @@ func ConvertTitles(w *io.PipeWriter, input []byte) {
writeLine(w, last, nil)
}
+// Render converts an io.Reader with an AsciiDoc document to HTML. So long as
+// the file could be read at all, it will always return a non-empty document.
+func Render(doc io.Reader, config configuration.Configuration) (
+ html *bytes.Buffer, meta types.Metadata, err error) {
+ html = bytes.NewBuffer(nil)
+
+ var input []byte
+ if input, err = ioutil.ReadAll(doc); err != nil {
+ return
+ }
+
+ pr, pw := io.Pipe()
+ go func() {
+ defer pw.Close()
+ ConvertTitles(pw, input)
+ }()
+
+ // io.Copy(os.Stdout, pr)
+ // return
+
+ meta, err = libasciidoc.ConvertToHTML(pr, html, config)
+ if err != nil {
+ // Fallback: output all the text sanitized for direct inclusion.
+ html.Reset()
+
+ _, _ = html.WriteString("<pre>")
+ for _, line := range bytes.Split(input, []byte{'\n'}) {
+ _ = xml.EscapeText(html, line)
+ _, _ = html.WriteString("\n")
+ }
+ _, _ = html.WriteString("</pre>")
+ }
+ return
+}
+
// entry contains all context information about a single page.
type entry struct {
path string // path
@@ -106,7 +142,23 @@ func expand(m *map[string]*entry, name string, chunk []byte) []byte {
})
}
+func singleFile() {
+ html, meta, err := Render(os.Stdin, configuration.NewConfiguration())
+ if err != nil {
+ log.Println(err)
+ } else if meta.Title != "" {
+ _, _ = os.Stdout.WriteString("<h1>")
+ _ = xml.EscapeText(os.Stdout, []byte(meta.Title))
+ _, _ = os.Stdout.WriteString("</h1>\n")
+ }
+ _, _ = io.Copy(os.Stdout, html)
+}
+
func main() {
+ if len(os.Args) < 2 {
+ singleFile()
+ return
+ }
if len(os.Args) < 3 {
log.Fatalf("usage: %s TEMPLATE GLOB...\n", os.Args[0])
}
@@ -146,32 +198,17 @@ func main() {
e.mtime = i.ModTime()
}
- input, err := ioutil.ReadAll(f)
- if err != nil {
- log.Fatalln(err)
- }
-
- pr, pw := io.Pipe()
- go func() {
- defer pw.Close()
- ConvertTitles(pw, input)
- }()
-
- config := configuration.NewConfiguration(
- configuration.WithHeaderFooter(false),
+ var html *bytes.Buffer
+ if html, e.metadata, err = Render(f, configuration.NewConfiguration(
configuration.WithFilename(e.path),
configuration.WithLastUpdated(e.mtime),
- )
-
- buf := bytes.NewBuffer(nil)
- e.metadata, err = libasciidoc.ConvertToHTML(pr, buf, config)
- if err != nil {
+ )); err != nil {
log.Fatalln(err)
}
// Expand LinkWords anywhere between <tags>.
// We want something like the inverse of Regexp.ReplaceAllStringFunc.
- raw, last, expanded := buf.Bytes(), 0, bytes.NewBuffer(nil)
+ raw, last, expanded := html.Bytes(), 0, bytes.NewBuffer(nil)
for _, where := range tagRE.FindAllIndex(raw, -1) {
_, _ = expanded.Write(expand(&entries, name, raw[last:where[0]]))
_, _ = expanded.Write(raw[where[0]:where[1]])