Compare commits

..

19 Commits

Author SHA1 Message Date
Wim
051e6e76e9 Release v1.25.1 (#1832) 2022-05-09 23:28:02 +02:00
Wim
1e55dd47f2 Update dependencies (#1831) 2022-05-09 23:00:23 +02:00
Andy
700b95546b Improve Slack attachments formatting (slack) (#1807)
* Improve Slack attachments formatting (slack)

* Add TitleLink
* Add Footer

* Fix linter issues
2022-05-09 22:56:19 +02:00
Wim
2fa96ec0ed Add KeepQuotedReply option for matrix to fix regression (#1823)
Matrix quotes replies and as of matterbridge 1.24.0 we strip those as this causes
issues with bridges support threading and have PreserveThreading enabled.

Introduced via 9a8ce9b17e

But if you for example use mattermost or discord with webhooks you'll need to enable
this if you want something that looks like a reply from matrix.
See issues:
- https://github.com/42wim/matterbridge/issues/1819
- https://github.com/42wim/matterbridge/issues/1780
2022-05-06 23:32:25 +02:00
Wim
81e6f75aa4 Update dependencies (#1822) 2022-05-02 00:10:54 +02:00
Wim
888c8b9a84 Add space between filename and URL (mattermost). Fixes #1820 2022-05-01 23:28:51 +02:00
Wim
e775a8a22e Revert "Clear existing IRC event handlers before connecting new ones (#1795)"
This reverts commit f044b948e2.

Fixes #1815
2022-05-01 22:28:42 +02:00
Alexander
99fbd9cae6 Fix telegram message deletion request (#1818) 2022-05-01 22:00:50 +02:00
Wim
67adad3e08 Update dependencies (#1813) 2022-04-25 23:50:10 +02:00
Wim
2fca3c7563 Add CGO_ENABLED info also to whatsappmulti build 2022-04-24 16:52:25 +02:00
Wim
c3573f1a46 Add CGO_ENABLED info to README 2022-04-24 16:51:16 +02:00
Daniil Suvorov
ee932a9f8e Fix UploadMessagesPhoto for vk community chat (vk) (#1812) 2022-04-22 23:37:09 +02:00
ValdikSS
ce18c948e6 Do not apply any markup to URL entities (telegram) (#1808)
handleEntities code uses simple modification offset which does not
allow to detect whether the offset is placed before or after
the element in already modified string.
This works fine is most cases as Telegram server always sort the
elements by offset, in ascending order.
However, this is not the case when the modification, for example bold
text, is applied to the URL. In this case, the offset of URL and
bold entity is equal, which raises the issue.

This commit introduces additional hack for this case, stripping
any entities which intersect with URL.
2022-04-22 01:00:57 +02:00
Wim
7bc93c5506 Do not modify .webm files (telegram). Fixes #17** (#1802) 2022-04-17 13:35:11 +02:00
Wim
d7cad3b404 Update matterbridge/gomatrix. Fixes #1772 (#1803) 2022-04-12 00:59:30 +02:00
Wim
7740a362c9 Fix build command for latest stable 2022-04-12 00:39:06 +02:00
Wim
281ef53e7d Update dependencies (#1800) 2022-04-12 00:30:21 +02:00
Bryan Davis
f044b948e2 Clear existing IRC event handlers before connecting new ones (#1795)
Clear all existing IRC event handler registrations before registering
new handlers in case we are connecting via a BNC and are seeing
a reconnect.

Fixes #1564
2022-04-07 23:00:17 +02:00
Wim
32474a5f4d Bump version 2022-04-07 22:51:22 +02:00
718 changed files with 674832 additions and 167320 deletions

View File

@@ -172,7 +172,7 @@ See <https://github.com/42wim/matterbridge/wiki>
### Binaries ### Binaries
- Latest stable release [v1.25.0](https://github.com/42wim/matterbridge/releases/latest) - Latest stable release [v1.25.1](https://github.com/42wim/matterbridge/releases/latest)
- Development releases (follows master) can be downloaded [here](https://github.com/42wim/matterbridge/actions) selecting the latest green build and then artifacts. - Development releases (follows master) can be downloaded [here](https://github.com/42wim/matterbridge/actions) selecting the latest green build and then artifacts.
To install or upgrade just download the latest [binary](https://github.com/42wim/matterbridge/releases/latest). On \*nix platforms you may need to make the binary executable - you can do this by running `chmod a+x` on the binary (example: `chmod a+x matterbridge-1.24.1-linux-64bit`). After downloading (and making the binary executable, if necessary), follow the instructions on the [howto](https://github.com/42wim/matterbridge/wiki/How-to-create-your-config) for a step by step walkthrough for creating your configuration. To install or upgrade just download the latest [binary](https://github.com/42wim/matterbridge/releases/latest). On \*nix platforms you may need to make the binary executable - you can do this by running `chmod a+x` on the binary (example: `chmod a+x matterbridge-1.24.1-linux-64bit`). After downloading (and making the binary executable, if necessary), follow the instructions on the [howto](https://github.com/42wim/matterbridge/wiki/How-to-create-your-config) for a step by step walkthrough for creating your configuration.
@@ -191,12 +191,14 @@ If you really want to build from source, follow these instructions:
Go 1.17+ is required. Make sure you have [Go](https://golang.org/doc/install) properly installed. Go 1.17+ is required. Make sure you have [Go](https://golang.org/doc/install) properly installed.
Building the binary with **all** the bridges enabled needs about 3GB RAM to compile. Building the binary with **all** the bridges enabled needs about 3GB RAM to compile.
You can reduce this memory requirement to 0,5GB RAM by adding the `nomsteams` tag if you don't need/use the Microsoft Teams bridge You can reduce this memory requirement to 0,5GB RAM by adding the `nomsteams` tag if you don't need/use the Microsoft Teams bridge.
Matterbridge can be build without gcc/c-compiler: If you're running on windows first run `set CGO_ENABLED=0` on other platforms you prepend `CGO_ENABLED=0` to the `go build` command. (eg `CGO_ENABLED=0 go install github.com/42wim/matterbridge`)
To install the latest stable run: To install the latest stable run:
```bash ```bash
go install github.com/42wim/matterbridge@17da95b094e4bb433e5fe240fa42067d94d908c1 go install github.com/42wim/matterbridge
``` ```
To install the latest dev run: To install the latest dev run:
@@ -208,7 +210,7 @@ go install github.com/42wim/matterbridge@master
To install the latest stable run without msteams or zulip bridge: To install the latest stable run without msteams or zulip bridge:
```bash ```bash
go install -tags nomsteams,nozulip github.com/42wim/matterbridge@17da95b094e4bb433e5fe240fa42067d94d908c1 go install -tags nomsteams,nozulip github.com/42wim/matterbridge
``` ```
You should now have matterbridge binary in the ~/go/bin directory: You should now have matterbridge binary in the ~/go/bin directory:
@@ -223,6 +225,8 @@ matterbridge
Because the library we use for Whatsapp multidevice support includes a GPL3 library we can not provide you binaries. Because the library we use for Whatsapp multidevice support includes a GPL3 library we can not provide you binaries.
(as this would require the Matterbridge to change it license to GPL) (as this would require the Matterbridge to change it license to GPL)
Matterbridge can be build without gcc/c-compiler: If you're running on windows first run `set CGO_ENABLED=0` on other platforms you prepend `CGO_ENABLED=0` to the `go build` command. (eg `CGO_ENABLED=0 go install github.com/42wim/matterbridge`)
So this means you have to build it yourself using the instructions below: So this means you have to build it yourself using the instructions below:
```bash ```bash

View File

@@ -272,8 +272,6 @@ func (b *Bdiscord) Send(msg config.Message) (string, error) {
// Handle prefix hint for unthreaded messages. // Handle prefix hint for unthreaded messages.
if msg.ParentNotFound() { if msg.ParentNotFound() {
msg.ParentID = "" msg.ParentID = ""
msg.Text = strings.TrimPrefix(msg.Text, "\n")
msg.Text = fmt.Sprintf("> %s %s", msg.Username, msg.Text)
} }
// Use webhook to send the message // Use webhook to send the message

View File

@@ -428,12 +428,15 @@ func (b *Bmatrix) handleReply(ev *matrix.Event, rmsg config.Message) bool {
} }
body := rmsg.Text body := rmsg.Text
for strings.HasPrefix(body, "> ") {
lineIdx := strings.IndexRune(body, '\n') if !b.GetBool("keepquotedreply") {
if lineIdx == -1 { for strings.HasPrefix(body, "> ") {
body = "" lineIdx := strings.IndexRune(body, '\n')
} else { if lineIdx == -1 {
body = body[(lineIdx + 1):] body = ""
} else {
body = body[(lineIdx + 1):]
}
} }
} }

View File

@@ -183,6 +183,7 @@ func (b *Bmattermost) sendWebhook(msg config.Message) (string, error) {
if b.GetBool("PrefixMessagesWithNick") { if b.GetBool("PrefixMessagesWithNick") {
msg.Text = msg.Username + msg.Text msg.Text = msg.Username + msg.Text
} }
if msg.Extra != nil { if msg.Extra != nil {
// this sends a message only if we received a config.EVENT_FILE_FAILURE_SIZE // this sends a message only if we received a config.EVENT_FILE_FAILURE_SIZE
for _, rmsg := range helper.HandleExtra(&msg, b.General) { for _, rmsg := range helper.HandleExtra(&msg, b.General) {
@@ -206,7 +207,7 @@ func (b *Bmattermost) sendWebhook(msg config.Message) (string, error) {
for _, f := range msg.Extra["file"] { for _, f := range msg.Extra["file"] {
fi := f.(config.FileInfo) fi := f.(config.FileInfo)
if fi.URL != "" { if fi.URL != "" {
msg.Text += fi.URL msg.Text += " " + fi.URL
} }
} }
} }

View File

@@ -282,6 +282,13 @@ func (b *Bslack) handleStatusEvent(ev *slack.MessageEvent, rmsg *config.Message)
return false return false
} }
func getMessageTitle(attach *slack.Attachment) string {
if attach.TitleLink != "" {
return fmt.Sprintf("[%s](%s)\n", attach.Title, attach.TitleLink)
}
return attach.Title
}
func (b *Bslack) handleAttachments(ev *slack.MessageEvent, rmsg *config.Message) { func (b *Bslack) handleAttachments(ev *slack.MessageEvent, rmsg *config.Message) {
// File comments are set by the system (because there is no username given). // File comments are set by the system (because there is no username given).
if ev.SubType == sFileComment { if ev.SubType == sFileComment {
@@ -290,12 +297,15 @@ func (b *Bslack) handleAttachments(ev *slack.MessageEvent, rmsg *config.Message)
// See if we have some text in the attachments. // See if we have some text in the attachments.
if rmsg.Text == "" { if rmsg.Text == "" {
for _, attach := range ev.Attachments { for i, attach := range ev.Attachments {
if attach.Text != "" { if attach.Text != "" {
if attach.Title != "" { if attach.Title != "" {
rmsg.Text = attach.Title + "\n" rmsg.Text = getMessageTitle(&ev.Attachments[i])
} }
rmsg.Text += attach.Text rmsg.Text += attach.Text
if attach.Footer != "" {
rmsg.Text += "\n\n" + attach.Footer
}
} else { } else {
rmsg.Text = attach.Fallback rmsg.Text = attach.Fallback
} }

View File

@@ -127,7 +127,7 @@ var (
mentionRE = regexp.MustCompile(`<@([a-zA-Z0-9]+)>`) mentionRE = regexp.MustCompile(`<@([a-zA-Z0-9]+)>`)
channelRE = regexp.MustCompile(`<#[a-zA-Z0-9]+\|(.+?)>`) channelRE = regexp.MustCompile(`<#[a-zA-Z0-9]+\|(.+?)>`)
variableRE = regexp.MustCompile(`<!((?:subteam\^)?[a-zA-Z0-9]+)(?:\|@?(.+?))?>`) variableRE = regexp.MustCompile(`<!((?:subteam\^)?[a-zA-Z0-9]+)(?:\|@?(.+?))?>`)
urlRE = regexp.MustCompile(`<(.*?)(\|.*?)?>`) urlRE = regexp.MustCompile(`<([^<\|]+)\|([^>]+)>`)
codeFenceRE = regexp.MustCompile(`(?m)^` + "```" + `\w+$`) codeFenceRE = regexp.MustCompile(`(?m)^` + "```" + `\w+$`)
topicOrPurposeRE = regexp.MustCompile(`(?s)(@.+) (cleared|set)(?: the)? channel (topic|purpose)(?:: (.*))?`) topicOrPurposeRE = regexp.MustCompile(`(?s)(@.+) (cleared|set)(?: the)? channel (topic|purpose)(?:: (.*))?`)
) )
@@ -181,14 +181,7 @@ func (b *Bslack) replaceVariable(text string) string {
// @see https://api.slack.com/docs/message-formatting#linking_to_urls // @see https://api.slack.com/docs/message-formatting#linking_to_urls
func (b *Bslack) replaceURL(text string) string { func (b *Bslack) replaceURL(text string) string {
for _, r := range urlRE.FindAllStringSubmatch(text, -1) { return urlRE.ReplaceAllString(text, "[${2}](${1})")
if len(strings.TrimSpace(r[2])) == 1 { // A display text separator was found, but the text was blank
text = strings.Replace(text, r[0], "", 1)
} else {
text = strings.Replace(text, r[0], r[1], 1)
}
}
return text
} }
func (b *Bslack) replaceb0rkedMarkDown(text string) string { func (b *Bslack) replaceb0rkedMarkDown(text string) string {

View File

@@ -385,7 +385,7 @@ func (b *Btelegram) getDownloadInfo(id string, suffix string, urlpart bool) (str
urlPart := strings.Split(url, "/") urlPart := strings.Split(url, "/")
name = urlPart[len(urlPart)-1] name = urlPart[len(urlPart)-1]
} }
if suffix != "" && !strings.HasSuffix(name, suffix) { if suffix != "" && !strings.HasSuffix(name, suffix) && !strings.HasSuffix(name, ".webm") {
name += suffix name += suffix
} }
text := " " + url text := " " + url
@@ -404,7 +404,7 @@ func (b *Btelegram) handleDelete(msg *config.Message, chatid int64) (string, err
} }
cfg := tgbotapi.NewDeleteMessage(chatid, msgid) cfg := tgbotapi.NewDeleteMessage(chatid, msgid)
_, err = b.c.Send(cfg) _, err = b.c.Request(cfg)
return "", err return "", err
} }
@@ -517,8 +517,8 @@ func (b *Btelegram) handleEntities(rmsg *config.Message, message *tgbotapi.Messa
} }
indexMovedBy := 0 indexMovedBy := 0
prevLinkOffset := -1
// for now only do URL replacements
for _, e := range message.Entities { for _, e := range message.Entities {
asRunes := utf16.Encode([]rune(rmsg.Text)) asRunes := utf16.Encode([]rune(rmsg.Text))
@@ -537,6 +537,11 @@ func (b *Btelegram) handleEntities(rmsg *config.Message, message *tgbotapi.Messa
} }
rmsg.Text = string(utf16.Decode(asRunes[:offset+e.Length])) + " (" + url.String() + ")" + string(utf16.Decode(asRunes[offset+e.Length:])) rmsg.Text = string(utf16.Decode(asRunes[:offset+e.Length])) + " (" + url.String() + ")" + string(utf16.Decode(asRunes[offset+e.Length:]))
indexMovedBy += len(url.String()) + 3 indexMovedBy += len(url.String()) + 3
prevLinkOffset = e.Offset
}
if e.Offset == prevLinkOffset {
continue
} }
if e.Type == "code" { if e.Type == "code" {

View File

@@ -64,7 +64,7 @@ func (b *Bvk) Connect() error {
go func() { go func() {
err := b.lp.Run() err := b.lp.Run()
if err != nil { if err != nil {
b.Log.Fatal("Enable longpoll in group management") b.Log.WithError(err).Fatal("Enable longpoll in group management")
} }
}() }()
@@ -223,7 +223,7 @@ func (b *Bvk) uploadFiles(extra map[string][]interface{}, peerID int) (string, s
} }
a, err := b.uploadFile(fi, peerID) a, err := b.uploadFile(fi, peerID)
if err != nil { if err != nil {
b.Log.Error("File upload error ", fi.Name) b.Log.WithError(err).Error("File upload error ", fi.Name)
} }
attachments = append(attachments, a) attachments = append(attachments, a)
@@ -237,7 +237,8 @@ func (b *Bvk) uploadFile(file config.FileInfo, peerID int) (string, error) {
photoRE := regexp.MustCompile(".(jpg|jpe|png)$") photoRE := regexp.MustCompile(".(jpg|jpe|png)$")
if photoRE.MatchString(file.Name) { if photoRE.MatchString(file.Name) {
p, err := b.c.UploadMessagesPhoto(peerID, r) // BUG(VK): for community chat peerID=0
p, err := b.c.UploadMessagesPhoto(0, r)
if err != nil { if err != nil {
return "", err return "", err
} }

View File

@@ -1,3 +1,23 @@
# v1.25.1
## Enhancements
- matrix: Add KeepQuotedReply option for matrix to fix regression (#1823)
- slack: Improve Slack attachments formatting (slack) (#1807)
## Bugfix
- general: Update dependencies (#1813,#1822,#1833)
- mattermost: Add space between filename and URL (mattermost). Fixes #1820
- matrix: Update matterbridge/gomatrix. Fixes #1772 (#1803)
- telegram: Do not modify .webm files (telegram). Fixes #17**88 (#1802)
- telegram: Do not apply any markup to URL entities (telegram) (#1808)
- telegram: Fix telegram message deletion request (#1818)
- vk: Fix UploadMessagesPhoto for vk community chat (vk) (#1812)
This release couldn't exist without the following contributors:
@bd808, @chugunov, @sas1024, @SevereCloud, @ValdikSS
# v1.25.0 # v1.25.0
## Breaking changes ## Breaking changes

71
go.mod
View File

@@ -6,56 +6,56 @@ require (
github.com/Benau/tgsconverter v0.0.0-20210809170556-99f4a4f6337f github.com/Benau/tgsconverter v0.0.0-20210809170556-99f4a4f6337f
github.com/Philipp15b/go-steam v1.0.1-0.20200727090957-6ae9b3c0a560 github.com/Philipp15b/go-steam v1.0.1-0.20200727090957-6ae9b3c0a560
github.com/Rhymen/go-whatsapp v0.1.2-0.20211102134409-31a2e740845c github.com/Rhymen/go-whatsapp v0.1.2-0.20211102134409-31a2e740845c
github.com/SevereCloud/vksdk/v2 v2.13.1 github.com/SevereCloud/vksdk/v2 v2.14.0
github.com/bwmarrin/discordgo v0.24.0 github.com/bwmarrin/discordgo v0.25.0
github.com/d5/tengo/v2 v2.10.1 github.com/d5/tengo/v2 v2.10.1
github.com/davecgh/go-spew v1.1.1 github.com/davecgh/go-spew v1.1.1
github.com/fsnotify/fsnotify v1.5.1 github.com/fsnotify/fsnotify v1.5.4
github.com/go-telegram-bot-api/telegram-bot-api/v5 v5.5.1 github.com/go-telegram-bot-api/telegram-bot-api/v5 v5.5.1
github.com/gomarkdown/markdown v0.0.0-20220310201231-552c6011c0b8 github.com/gomarkdown/markdown v0.0.0-20220509074759-a57bf950ab8c
github.com/google/gops v0.3.22 github.com/google/gops v0.3.23
github.com/gorilla/schema v1.2.0 github.com/gorilla/schema v1.2.0
github.com/gorilla/websocket v1.5.0 github.com/gorilla/websocket v1.5.0
github.com/harmony-development/shibshib v0.0.0-20220101224523-c98059d09cfa github.com/harmony-development/shibshib v0.0.0-20220101224523-c98059d09cfa
github.com/hashicorp/golang-lru v0.5.4 github.com/hashicorp/golang-lru v0.5.4
github.com/jpillora/backoff v1.0.0 github.com/jpillora/backoff v1.0.0
github.com/keybase/go-keybase-chat-bot v0.0.0-20211201215354-ee4b23828b55 github.com/keybase/go-keybase-chat-bot v0.0.0-20220322223021-75d497527469
github.com/kyokomi/emoji/v2 v2.2.9 github.com/kyokomi/emoji/v2 v2.2.9
github.com/labstack/echo/v4 v4.7.2 github.com/labstack/echo/v4 v4.7.2
github.com/lrstanley/girc v0.0.0-20220321215535-9664730c7858 github.com/lrstanley/girc v0.0.0-20220507183218-96757fe3d2a2
github.com/matterbridge/Rocket.Chat.Go.SDK v0.0.0-20211016222428-79310a412696 github.com/matterbridge/Rocket.Chat.Go.SDK v0.0.0-20211016222428-79310a412696
github.com/matterbridge/go-xmpp v0.0.0-20211030125215-791a06c5f1be github.com/matterbridge/go-xmpp v0.0.0-20211030125215-791a06c5f1be
github.com/matterbridge/gomatrix v0.0.0-20220205235239-607eb9ee6419 github.com/matterbridge/gomatrix v0.0.0-20220411225302-271e5088ea27
github.com/matterbridge/gozulipbot v0.0.0-20211023205727-a19d6c1f3b75 github.com/matterbridge/gozulipbot v0.0.0-20211023205727-a19d6c1f3b75
github.com/matterbridge/logrus-prefixed-formatter v0.5.3-0.20200523233437-d971309a77ba github.com/matterbridge/logrus-prefixed-formatter v0.5.3-0.20200523233437-d971309a77ba
github.com/matterbridge/matterclient v0.0.0-20211107234719-faca3cd42315 github.com/matterbridge/matterclient v0.0.0-20220430213656-07aca2731bc9
github.com/mattermost/mattermost-server/v5 v5.39.3 github.com/mattermost/mattermost-server/v5 v5.39.3
github.com/mattermost/mattermost-server/v6 v6.5.0 github.com/mattermost/mattermost-server/v6 v6.6.1
github.com/mattn/godown v0.0.1 github.com/mattn/godown v0.0.1
github.com/mdp/qrterminal v1.0.1 github.com/mdp/qrterminal v1.0.1
github.com/nelsonken/gomf v0.0.0-20180504123937-a9dd2f9deae9 github.com/nelsonken/gomf v0.0.0-20190423072027-c65cc0469e94
github.com/paulrosania/go-charset v0.0.0-20190326053356-55c9d7a5834c github.com/paulrosania/go-charset v0.0.0-20190326053356-55c9d7a5834c
github.com/rs/xid v1.3.0 github.com/rs/xid v1.4.0
github.com/russross/blackfriday v1.6.0 github.com/russross/blackfriday v1.6.0
github.com/saintfish/chardet v0.0.0-20120816061221-3af4cd4741ca github.com/saintfish/chardet v0.0.0-20120816061221-3af4cd4741ca
github.com/shazow/ssh-chat v1.10.1 github.com/shazow/ssh-chat v1.10.1
github.com/sirupsen/logrus v1.8.1 github.com/sirupsen/logrus v1.8.1
github.com/slack-go/slack v0.10.2 github.com/slack-go/slack v0.10.3
github.com/spf13/viper v1.10.1 github.com/spf13/viper v1.11.0
github.com/stretchr/testify v1.7.0 github.com/stretchr/testify v1.7.1
github.com/vincent-petithory/dataurl v1.0.0 github.com/vincent-petithory/dataurl v1.0.0
github.com/writeas/go-strip-markdown v2.0.1+incompatible github.com/writeas/go-strip-markdown v2.0.1+incompatible
github.com/yaegashi/msgraph.go v0.1.4 github.com/yaegashi/msgraph.go v0.1.4
github.com/zfjagann/golang-ring v0.0.0-20210116075443-7c86fdb43134 github.com/zfjagann/golang-ring v0.0.0-20220330170733-19bcea1b6289
go.mau.fi/whatsmeow v0.0.0-20220329131721-9f73bc00d158 go.mau.fi/whatsmeow v0.0.0-20220504135614-f1f2a9d231fb
golang.org/x/image v0.0.0-20220302094943-723b81ca9867 golang.org/x/image v0.0.0-20220413100746-70e8d0d3baa9
golang.org/x/oauth2 v0.0.0-20220309155454-6242fa91716a golang.org/x/oauth2 v0.0.0-20220411215720-9780585627b5
golang.org/x/text v0.3.7 golang.org/x/text v0.3.7
gomod.garykim.dev/nc-talk v0.3.0 gomod.garykim.dev/nc-talk v0.3.0
google.golang.org/protobuf v1.27.1 google.golang.org/protobuf v1.28.0
gopkg.in/olahol/melody.v1 v1.0.0-20170518105555-d52139073376 gopkg.in/olahol/melody.v1 v1.0.0-20170518105555-d52139073376
layeh.com/gumble v0.0.0-20200818122324-146f9205029b layeh.com/gumble v0.0.0-20200818122324-146f9205029b
modernc.org/sqlite v1.15.4 modernc.org/sqlite v1.17.2
) )
require ( require (
@@ -80,10 +80,10 @@ require (
github.com/json-iterator/go v1.1.12 // indirect github.com/json-iterator/go v1.1.12 // indirect
github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51 // indirect github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51 // indirect
github.com/kettek/apng v0.0.0-20191108220231-414630eed80f // indirect github.com/kettek/apng v0.0.0-20191108220231-414630eed80f // indirect
github.com/klauspost/compress v1.14.2 // indirect github.com/klauspost/compress v1.15.1 // indirect
github.com/klauspost/cpuid/v2 v2.0.11 // indirect github.com/klauspost/cpuid/v2 v2.0.11 // indirect
github.com/labstack/gommon v0.3.1 // indirect github.com/labstack/gommon v0.3.1 // indirect
github.com/magiconair/properties v1.8.5 // indirect github.com/magiconair/properties v1.8.6 // indirect
github.com/mattermost/go-i18n v1.11.1-0.20211013152124-5c415071e404 // indirect github.com/mattermost/go-i18n v1.11.1-0.20211013152124-5c415071e404 // indirect
github.com/mattermost/ldap v0.0.0-20201202150706-ee0e6284187d // indirect github.com/mattermost/ldap v0.0.0-20201202150706-ee0e6284187d // indirect
github.com/mattermost/logr v1.0.13 // indirect github.com/mattermost/logr v1.0.13 // indirect
@@ -93,7 +93,7 @@ require (
github.com/mattn/go-runewidth v0.0.13 // indirect github.com/mattn/go-runewidth v0.0.13 // indirect
github.com/mgutz/ansi v0.0.0-20200706080929-d51e80ef957d // indirect github.com/mgutz/ansi v0.0.0-20200706080929-d51e80ef957d // indirect
github.com/minio/md5-simd v1.1.2 // indirect github.com/minio/md5-simd v1.1.2 // indirect
github.com/minio/minio-go/v7 v7.0.21 // indirect github.com/minio/minio-go/v7 v7.0.23 // indirect
github.com/minio/sha256-simd v1.0.0 // indirect github.com/minio/sha256-simd v1.0.0 // indirect
github.com/mitchellh/go-homedir v1.1.0 // indirect github.com/mitchellh/go-homedir v1.1.0 // indirect
github.com/mitchellh/mapstructure v1.4.3 // indirect github.com/mitchellh/mapstructure v1.4.3 // indirect
@@ -105,6 +105,7 @@ require (
github.com/opentracing/opentracing-go v1.2.0 // indirect github.com/opentracing/opentracing-go v1.2.0 // indirect
github.com/pborman/uuid v1.2.1 // indirect github.com/pborman/uuid v1.2.1 // indirect
github.com/pelletier/go-toml v1.9.4 // indirect github.com/pelletier/go-toml v1.9.4 // indirect
github.com/pelletier/go-toml/v2 v2.0.0-beta.8 // indirect
github.com/philhofer/fwd v1.1.1 // indirect github.com/philhofer/fwd v1.1.1 // indirect
github.com/pkg/errors v0.9.1 // indirect github.com/pkg/errors v0.9.1 // indirect
github.com/pmezard/go-difflib v1.0.0 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect
@@ -115,7 +116,7 @@ require (
github.com/shazow/rateio v0.0.0-20200113175441-4461efc8bdc4 // indirect github.com/shazow/rateio v0.0.0-20200113175441-4461efc8bdc4 // indirect
github.com/sizeofint/webpanimation v0.0.0-20210809145948-1d2b32119882 // indirect github.com/sizeofint/webpanimation v0.0.0-20210809145948-1d2b32119882 // indirect
github.com/skip2/go-qrcode v0.0.0-20190110000554-dc11ecdae0a9 // indirect github.com/skip2/go-qrcode v0.0.0-20190110000554-dc11ecdae0a9 // indirect
github.com/spf13/afero v1.6.0 // indirect github.com/spf13/afero v1.8.2 // indirect
github.com/spf13/cast v1.4.1 // indirect github.com/spf13/cast v1.4.1 // indirect
github.com/spf13/jwalterweatherman v1.1.0 // indirect github.com/spf13/jwalterweatherman v1.1.0 // indirect
github.com/spf13/pflag v1.0.5 // indirect github.com/spf13/pflag v1.0.5 // indirect
@@ -128,29 +129,29 @@ require (
github.com/wiggin77/cfg v1.0.2 // indirect github.com/wiggin77/cfg v1.0.2 // indirect
github.com/wiggin77/merror v1.0.3 // indirect github.com/wiggin77/merror v1.0.3 // indirect
github.com/wiggin77/srslog v1.0.1 // indirect github.com/wiggin77/srslog v1.0.1 // indirect
go.mau.fi/libsignal v0.0.0-20220315232917-871a40435d3b // indirect go.mau.fi/libsignal v0.0.0-20220425070825-c40c839ee6a0 // indirect
go.uber.org/atomic v1.9.0 // indirect go.uber.org/atomic v1.9.0 // indirect
go.uber.org/multierr v1.7.0 // indirect go.uber.org/multierr v1.7.0 // indirect
go.uber.org/zap v1.17.0 // indirect go.uber.org/zap v1.17.0 // indirect
golang.org/x/crypto v0.0.0-20220315160706-3147a52a75dd // indirect golang.org/x/crypto v0.0.0-20220411220226-7b82a4e95df4 // indirect
golang.org/x/mod v0.5.1 // indirect golang.org/x/mod v0.5.1 // indirect
golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd // indirect golang.org/x/net v0.0.0-20220412020605-290c469a71a5 // indirect
golang.org/x/sys v0.0.0-20220207234003-57398862261d // indirect golang.org/x/sys v0.0.0-20220412211240-33da011f77ad // indirect
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211 // indirect golang.org/x/term v0.0.0-20210927222741-03fcf44c2211 // indirect
golang.org/x/time v0.0.0-20201208040808-7e3f01d25324 // indirect golang.org/x/time v0.0.0-20201208040808-7e3f01d25324 // indirect
golang.org/x/tools v0.1.9 // indirect golang.org/x/tools v0.1.9 // indirect
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 // indirect golang.org/x/xerrors v0.0.0-20220411194840-2f41105eb62f // indirect
google.golang.org/appengine v1.6.7 // indirect google.golang.org/appengine v1.6.7 // indirect
gopkg.in/ini.v1 v1.66.3 // indirect gopkg.in/ini.v1 v1.66.4 // indirect
gopkg.in/natefinch/lumberjack.v2 v2.0.0 // indirect gopkg.in/natefinch/lumberjack.v2 v2.0.0 // indirect
gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect
gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b // indirect gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b // indirect
lukechampine.com/uint128 v1.1.1 // indirect lukechampine.com/uint128 v1.1.1 // indirect
modernc.org/cc/v3 v3.35.24 // indirect modernc.org/cc/v3 v3.36.0 // indirect
modernc.org/ccgo/v3 v3.15.18 // indirect modernc.org/ccgo/v3 v3.16.6 // indirect
modernc.org/libc v1.14.12 // indirect modernc.org/libc v1.16.7 // indirect
modernc.org/mathutil v1.4.1 // indirect modernc.org/mathutil v1.4.1 // indirect
modernc.org/memory v1.0.7 // indirect modernc.org/memory v1.1.1 // indirect
modernc.org/opt v0.1.1 // indirect modernc.org/opt v0.1.1 // indirect
modernc.org/strutil v1.1.1 // indirect modernc.org/strutil v1.1.1 // indirect
modernc.org/token v1.0.0 // indirect modernc.org/token v1.0.0 // indirect

255
go.sum
View File

@@ -7,6 +7,7 @@ cloud.google.com/go v0.37.1/go.mod h1:SAbnLi6YTSPKSI0dTUEOVLCkyPfKXK8n4ibqiMoj4o
cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU= cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU=
cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU= cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU=
cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY=
cloud.google.com/go v0.44.3/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY=
cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc= cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc=
cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0= cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0=
cloud.google.com/go v0.50.0/go.mod h1:r9sluTvynVuxRIOHXQEHMFffphuXHOMZMycpNR5e6To= cloud.google.com/go v0.50.0/go.mod h1:r9sluTvynVuxRIOHXQEHMFffphuXHOMZMycpNR5e6To=
@@ -21,6 +22,7 @@ cloud.google.com/go v0.64.0/go.mod h1:xfORb36jGvE+6EexW71nMEtL025s3x6xvuYUKM4JLv
cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY= cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY=
cloud.google.com/go v0.72.0/go.mod h1:M+5Vjvlc2wnp6tjzE102Dw08nGShTscUx2nZMufOKPI= cloud.google.com/go v0.72.0/go.mod h1:M+5Vjvlc2wnp6tjzE102Dw08nGShTscUx2nZMufOKPI=
cloud.google.com/go v0.74.0/go.mod h1:VV1xSbzvo+9QJOxLDaJfTjx5e+MePCpCWwvftOeQmWk= cloud.google.com/go v0.74.0/go.mod h1:VV1xSbzvo+9QJOxLDaJfTjx5e+MePCpCWwvftOeQmWk=
cloud.google.com/go v0.75.0/go.mod h1:VGuuCn7PG0dwsd5XPVm2Mm3wlh3EL55/79EKB6hlPTY=
cloud.google.com/go v0.78.0/go.mod h1:QjdrLG0uq+YwhjoVOLsS1t7TW8fs36kLs4XO5R5ECHg= cloud.google.com/go v0.78.0/go.mod h1:QjdrLG0uq+YwhjoVOLsS1t7TW8fs36kLs4XO5R5ECHg=
cloud.google.com/go v0.79.0/go.mod h1:3bzgcEeQlzbuEAYu4mrWhKqWjmpprinYgKJLgKHnbb8= cloud.google.com/go v0.79.0/go.mod h1:3bzgcEeQlzbuEAYu4mrWhKqWjmpprinYgKJLgKHnbb8=
cloud.google.com/go v0.81.0/go.mod h1:mk/AM35KwGk/Nm2YSeZbxXdrNK3KZOYHmLkOqC2V6E0= cloud.google.com/go v0.81.0/go.mod h1:mk/AM35KwGk/Nm2YSeZbxXdrNK3KZOYHmLkOqC2V6E0=
@@ -34,12 +36,16 @@ cloud.google.com/go v0.94.1/go.mod h1:qAlAugsXlC+JWO+Bke5vCtc9ONxjQT3drlTTnAplMW
cloud.google.com/go v0.97.0/go.mod h1:GF7l59pYBVlXQIBLx3a761cZ41F9bBH3JUlihCt2Udc= cloud.google.com/go v0.97.0/go.mod h1:GF7l59pYBVlXQIBLx3a761cZ41F9bBH3JUlihCt2Udc=
cloud.google.com/go v0.98.0/go.mod h1:ua6Ush4NALrHk5QXDWnjvZHN93OuF0HfuEPq9I1X0cM= cloud.google.com/go v0.98.0/go.mod h1:ua6Ush4NALrHk5QXDWnjvZHN93OuF0HfuEPq9I1X0cM=
cloud.google.com/go v0.99.0/go.mod h1:w0Xx2nLzqWJPuozYQX+hFfCSI8WioryfRDzkoI/Y2ZA= cloud.google.com/go v0.99.0/go.mod h1:w0Xx2nLzqWJPuozYQX+hFfCSI8WioryfRDzkoI/Y2ZA=
cloud.google.com/go v0.100.2/go.mod h1:4Xra9TjzAeYHrl5+oeLlzbM2k3mjVhZh4UqTZ//w99A=
cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o=
cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE=
cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc= cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc=
cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg= cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg=
cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc= cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc=
cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ= cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ=
cloud.google.com/go/compute v0.1.0/go.mod h1:GAesmwr110a34z04OlxYkATPBEfVhkymfTBXtfbBFow=
cloud.google.com/go/compute v1.3.0/go.mod h1:cCZiE1NHEtai4wiufUhW8I8S1JKkAnhnQJWM7YD99wM=
cloud.google.com/go/compute v1.5.0/go.mod h1:9SMHyhJlzhlkJqrPAc839t2BZFTSk6Jdj6mkzQJeu0M=
cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE=
cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk=
cloud.google.com/go/firestore v1.1.0/go.mod h1:ulACoGHTpvq5r8rxGJ4ddJZBZqakUQqClKRT5SZwBmk= cloud.google.com/go/firestore v1.1.0/go.mod h1:ulACoGHTpvq5r8rxGJ4ddJZBZqakUQqClKRT5SZwBmk=
@@ -55,6 +61,7 @@ cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0Zeo
cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk= cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk=
cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs= cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs=
cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0= cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0=
cloud.google.com/go/storage v1.14.0/go.mod h1:GrKmX003DSIwi9o29oFT7YDnHYwZoctc3fOKtUw0Xmo=
code.sajari.com/docconv v1.1.1-0.20210427001343-7b3472bc323a/go.mod h1:KPNt2zuWplps1W0TpOb6ltHj4Xu+j6h7a+YkqGHrxQE= code.sajari.com/docconv v1.1.1-0.20210427001343-7b3472bc323a/go.mod h1:KPNt2zuWplps1W0TpOb6ltHj4Xu+j6h7a+YkqGHrxQE=
code.sajari.com/docconv v1.2.0/go.mod h1:r8yfCP6OKbZ9Xkd87aBa4nfpk6ud/PoyLwex3n6cXSc= code.sajari.com/docconv v1.2.0/go.mod h1:r8yfCP6OKbZ9Xkd87aBa4nfpk6ud/PoyLwex3n6cXSc=
contrib.go.opencensus.io/exporter/ocagent v0.4.9/go.mod h1:ueLzZcP7LPhPulEBukGn4aLh7Mx9YJwpVJ9nL2FYltw= contrib.go.opencensus.io/exporter/ocagent v0.4.9/go.mod h1:ueLzZcP7LPhPulEBukGn4aLh7Mx9YJwpVJ9nL2FYltw=
@@ -152,8 +159,8 @@ github.com/Rhymen/go-whatsapp v0.1.2-0.20211102134409-31a2e740845c/go.mod h1:DNS
github.com/RoaringBitmap/roaring v0.4.23/go.mod h1:D0gp8kJQgE1A4LQ5wFLggQEyvDi06Mq5mKs52e1TwOo= github.com/RoaringBitmap/roaring v0.4.23/go.mod h1:D0gp8kJQgE1A4LQ5wFLggQEyvDi06Mq5mKs52e1TwOo=
github.com/RoaringBitmap/roaring v0.8.0/go.mod h1:jdT9ykXwHFNdJbEtxePexlFYH9LXucApeS0/+/g+p1I= github.com/RoaringBitmap/roaring v0.8.0/go.mod h1:jdT9ykXwHFNdJbEtxePexlFYH9LXucApeS0/+/g+p1I=
github.com/RoaringBitmap/roaring v0.9.4/go.mod h1:icnadbWcNyfEHlYdr+tDlOTih1Bf/h+rzPpv4sbomAA= github.com/RoaringBitmap/roaring v0.9.4/go.mod h1:icnadbWcNyfEHlYdr+tDlOTih1Bf/h+rzPpv4sbomAA=
github.com/SevereCloud/vksdk/v2 v2.13.1 h1:D11NaP275mW01v2hRF0ycDHdJaIyZEvasZV4MSkg5Sk= github.com/SevereCloud/vksdk/v2 v2.14.0 h1:1lciJC4FWhSQIjjFb3NGyJI7x9sPKk/P6aAvR0ibh1o=
github.com/SevereCloud/vksdk/v2 v2.13.1/go.mod h1:UyOgSj/CYt2dByu3Fyf/y1yT1NoahVi4zECvvrbtPU4= github.com/SevereCloud/vksdk/v2 v2.14.0/go.mod h1:J/iPooVfldjVADo47G5aNxkvlRWAsZnMHpri8sZmck4=
github.com/Shopify/goreferrer v0.0.0-20181106222321-ec9c9a553398/go.mod h1:a1uqRtAwp2Xwc6WNPJEufxJ7fx3npB4UV/JOLmbu5I0= github.com/Shopify/goreferrer v0.0.0-20181106222321-ec9c9a553398/go.mod h1:a1uqRtAwp2Xwc6WNPJEufxJ7fx3npB4UV/JOLmbu5I0=
github.com/Shopify/logrus-bugsnag v0.0.0-20171204204709-577dee27f20d/go.mod h1:HI8ITrYtUY+O+ZhtlqUnD8+KwNPOyugEhfP9fdUIaEQ= github.com/Shopify/logrus-bugsnag v0.0.0-20171204204709-577dee27f20d/go.mod h1:HI8ITrYtUY+O+ZhtlqUnD8+KwNPOyugEhfP9fdUIaEQ=
github.com/Shopify/sarama v1.19.0/go.mod h1:FVkBWblsNy7DGZRfXLU0O9RCGt5g3g3yEuWXgklEdEo= github.com/Shopify/sarama v1.19.0/go.mod h1:FVkBWblsNy7DGZRfXLU0O9RCGt5g3g3yEuWXgklEdEo=
@@ -209,7 +216,7 @@ github.com/aws/aws-sdk-go v1.17.7/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN
github.com/aws/aws-sdk-go v1.19.0/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= github.com/aws/aws-sdk-go v1.19.0/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo=
github.com/aws/aws-sdk-go v1.20.6/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= github.com/aws/aws-sdk-go v1.20.6/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo=
github.com/aws/aws-sdk-go v1.38.67/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro= github.com/aws/aws-sdk-go v1.38.67/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro=
github.com/aws/aws-sdk-go v1.42.49/go.mod h1:OGr6lGMAKGlG9CVrYnWYDKIyb829c6EVBRjxqjmPepc= github.com/aws/aws-sdk-go v1.43.6/go.mod h1:y4AeaBuwd2Lk+GepC1E9v0qOiTws0MIWAX4oIKwKHZo=
github.com/aws/aws-sdk-go-v2 v1.8.0/go.mod h1:xEFuWz+3TYdlPRuo+CqATbeDWIWyaT5uAPwPaWtgse0= github.com/aws/aws-sdk-go-v2 v1.8.0/go.mod h1:xEFuWz+3TYdlPRuo+CqATbeDWIWyaT5uAPwPaWtgse0=
github.com/aws/aws-sdk-go-v2 v1.9.2/go.mod h1:cK/D0BBs0b/oWPIcX/Z/obahJK1TT7IPVjy53i/mX/4= github.com/aws/aws-sdk-go-v2 v1.9.2/go.mod h1:cK/D0BBs0b/oWPIcX/Z/obahJK1TT7IPVjy53i/mX/4=
github.com/aws/aws-sdk-go-v2/config v1.6.0/go.mod h1:TNtBVmka80lRPk5+S9ZqVfFszOQAGJJ9KbT3EM3CHNU= github.com/aws/aws-sdk-go-v2/config v1.6.0/go.mod h1:TNtBVmka80lRPk5+S9ZqVfFszOQAGJJ9KbT3EM3CHNU=
@@ -256,7 +263,7 @@ github.com/blang/semver v3.1.0+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnweb
github.com/blang/semver v3.5.1+incompatible h1:cQNTCjp13qL8KC3Nbxr/y2Bqb63oX6wdnnjpJbkM4JQ= github.com/blang/semver v3.5.1+incompatible h1:cQNTCjp13qL8KC3Nbxr/y2Bqb63oX6wdnnjpJbkM4JQ=
github.com/blang/semver v3.5.1+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk= github.com/blang/semver v3.5.1+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk=
github.com/blevesearch/bleve v1.0.14/go.mod h1:e/LJTr+E7EaoVdkQZTfoz7dt4KoDNvDbLb8MSKuNTLQ= github.com/blevesearch/bleve v1.0.14/go.mod h1:e/LJTr+E7EaoVdkQZTfoz7dt4KoDNvDbLb8MSKuNTLQ=
github.com/blevesearch/bleve/v2 v2.3.0/go.mod h1:egW/6gZEhM3oBvRjuHXGvGb92cKZ9867OqPZAmCG8MQ= github.com/blevesearch/bleve/v2 v2.3.1/go.mod h1:kAJuWn2L1TNSUyxtPJD4AGma2/PgMSm7GBlx61F9OBs=
github.com/blevesearch/bleve_index_api v1.0.1/go.mod h1:fiwKS0xLEm+gBRgv5mumf0dhgFr2mDgZah1pqv1c1M4= github.com/blevesearch/bleve_index_api v1.0.1/go.mod h1:fiwKS0xLEm+gBRgv5mumf0dhgFr2mDgZah1pqv1c1M4=
github.com/blevesearch/blevex v1.0.0/go.mod h1:2rNVqoG2BZI8t1/P1awgTKnGlx5MP9ZbtEciQaNhswc= github.com/blevesearch/blevex v1.0.0/go.mod h1:2rNVqoG2BZI8t1/P1awgTKnGlx5MP9ZbtEciQaNhswc=
github.com/blevesearch/cld2 v0.0.0-20200327141045-8b5f551d37f5/go.mod h1:PN0QNTLs9+j1bKy3d/GB/59wsNBFC4sWLWG3k69lWbc= github.com/blevesearch/cld2 v0.0.0-20200327141045-8b5f551d37f5/go.mod h1:PN0QNTLs9+j1bKy3d/GB/59wsNBFC4sWLWG3k69lWbc=
@@ -273,11 +280,11 @@ github.com/blevesearch/zap/v12 v12.0.14/go.mod h1:rOnuZOiMKPQj18AEKEHJxuI14236tT
github.com/blevesearch/zap/v13 v13.0.6/go.mod h1:L89gsjdRKGyGrRN6nCpIScCvvkyxvmeDCwZRcjjPCrw= github.com/blevesearch/zap/v13 v13.0.6/go.mod h1:L89gsjdRKGyGrRN6nCpIScCvvkyxvmeDCwZRcjjPCrw=
github.com/blevesearch/zap/v14 v14.0.5/go.mod h1:bWe8S7tRrSBTIaZ6cLRbgNH4TUDaC9LZSpRGs85AsGY= github.com/blevesearch/zap/v14 v14.0.5/go.mod h1:bWe8S7tRrSBTIaZ6cLRbgNH4TUDaC9LZSpRGs85AsGY=
github.com/blevesearch/zap/v15 v15.0.3/go.mod h1:iuwQrImsh1WjWJ0Ue2kBqY83a0rFtJTqfa9fp1rbVVU= github.com/blevesearch/zap/v15 v15.0.3/go.mod h1:iuwQrImsh1WjWJ0Ue2kBqY83a0rFtJTqfa9fp1rbVVU=
github.com/blevesearch/zapx/v11 v11.3.2/go.mod h1:YzTfUm4kS3e8OmTXDHVV8OzC5MWPO/VPJZQgPNVb4Lc= github.com/blevesearch/zapx/v11 v11.3.3/go.mod h1:YzTfUm4kS3e8OmTXDHVV8OzC5MWPO/VPJZQgPNVb4Lc=
github.com/blevesearch/zapx/v12 v12.3.2/go.mod h1:RMl6lOZqF+sTxKvhQDJ5yK2LT3Mu7E2p/jGdjAaiRxs= github.com/blevesearch/zapx/v12 v12.3.3/go.mod h1:RMl6lOZqF+sTxKvhQDJ5yK2LT3Mu7E2p/jGdjAaiRxs=
github.com/blevesearch/zapx/v13 v13.3.2/go.mod h1:eppobNM35U4C22yDvTuxV9xPqo10pwfP/jugL4INWG4= github.com/blevesearch/zapx/v13 v13.3.3/go.mod h1:eppobNM35U4C22yDvTuxV9xPqo10pwfP/jugL4INWG4=
github.com/blevesearch/zapx/v14 v14.3.2/go.mod h1:zXNcVzukh0AvG57oUtT1T0ndi09H0kELNaNmekEy0jw= github.com/blevesearch/zapx/v14 v14.3.3/go.mod h1:zXNcVzukh0AvG57oUtT1T0ndi09H0kELNaNmekEy0jw=
github.com/blevesearch/zapx/v15 v15.3.2/go.mod h1:C+f/97ZzTzK6vt/7sVlZdzZxKu+5+j4SrGCvr9dJzaY= github.com/blevesearch/zapx/v15 v15.3.3/go.mod h1:C+f/97ZzTzK6vt/7sVlZdzZxKu+5+j4SrGCvr9dJzaY=
github.com/bmizerany/assert v0.0.0-20160611221934-b7ed37b82869/go.mod h1:Ekp36dRnpXw/yCqJaO+ZrUyxD+3VXMFFr56k5XYrpB4= github.com/bmizerany/assert v0.0.0-20160611221934-b7ed37b82869/go.mod h1:Ekp36dRnpXw/yCqJaO+ZrUyxD+3VXMFFr56k5XYrpB4=
github.com/boombuler/barcode v1.0.0/go.mod h1:paBWMcWSl3LHKBqUq+rly7CNSldXjb2rDl3JlRe0mD8= github.com/boombuler/barcode v1.0.0/go.mod h1:paBWMcWSl3LHKBqUq+rly7CNSldXjb2rDl3JlRe0mD8=
github.com/bradfitz/go-smtpd v0.0.0-20170404230938-deb6d6237625/go.mod h1:HYsPBTaaSFSlLx/70C2HPIMNZpVV8+vt/A+FMnYP11g= github.com/bradfitz/go-smtpd v0.0.0-20170404230938-deb6d6237625/go.mod h1:HYsPBTaaSFSlLx/70C2HPIMNZpVV8+vt/A+FMnYP11g=
@@ -287,8 +294,8 @@ github.com/buger/jsonparser v0.0.0-20181115193947-bf1c66bbce23/go.mod h1:bbYlZJ7
github.com/bugsnag/bugsnag-go v0.0.0-20141110184014-b1d153021fcd/go.mod h1:2oa8nejYd4cQ/b0hMIopN0lCRxU0bueqREvZLWFrtK8= github.com/bugsnag/bugsnag-go v0.0.0-20141110184014-b1d153021fcd/go.mod h1:2oa8nejYd4cQ/b0hMIopN0lCRxU0bueqREvZLWFrtK8=
github.com/bugsnag/osext v0.0.0-20130617224835-0dd3f918b21b/go.mod h1:obH5gd0BsqsP2LwDJ9aOkm/6J86V6lyAXCoQWGw3K50= github.com/bugsnag/osext v0.0.0-20130617224835-0dd3f918b21b/go.mod h1:obH5gd0BsqsP2LwDJ9aOkm/6J86V6lyAXCoQWGw3K50=
github.com/bugsnag/panicwrap v0.0.0-20151223152923-e2c28503fcd0/go.mod h1:D/8v3kj0zr8ZAKg1AQ6crr+5VwKN5eIywRkfhyM/+dE= github.com/bugsnag/panicwrap v0.0.0-20151223152923-e2c28503fcd0/go.mod h1:D/8v3kj0zr8ZAKg1AQ6crr+5VwKN5eIywRkfhyM/+dE=
github.com/bwmarrin/discordgo v0.24.0 h1:Gw4MYxqHdvhO99A3nXnSLy97z5pmIKHZVJ1JY5ZDPqY= github.com/bwmarrin/discordgo v0.25.0 h1:NXhdfHRNxtwso6FPdzW2i3uBvvU7UIQTghmV2T4nqAs=
github.com/bwmarrin/discordgo v0.24.0/go.mod h1:NJZpH+1AfhIcyQsPeuBKsUtYrRnjkyu0kIVMCHkZtRY= github.com/bwmarrin/discordgo v0.25.0/go.mod h1:NJZpH+1AfhIcyQsPeuBKsUtYrRnjkyu0kIVMCHkZtRY=
github.com/cenkalti/backoff/v4 v4.0.2/go.mod h1:eEew/i+1Q6OrCDZh3WiXYv3+nJwBASZ8Bog/87DQnVg= github.com/cenkalti/backoff/v4 v4.0.2/go.mod h1:eEew/i+1Q6OrCDZh3WiXYv3+nJwBASZ8Bog/87DQnVg=
github.com/census-instrumentation/opencensus-proto v0.2.0/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/census-instrumentation/opencensus-proto v0.2.0/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
@@ -542,8 +549,9 @@ github.com/francoispqt/gojay v1.2.13/go.mod h1:ehT5mTG4ua4581f1++1WLG0vPdaA9HaiD
github.com/frankban/quicktest v1.11.3/go.mod h1:wRf/ReqHper53s+kmmSZizM8NamnL3IM0I9ntUbOk+k= github.com/frankban/quicktest v1.11.3/go.mod h1:wRf/ReqHper53s+kmmSZizM8NamnL3IM0I9ntUbOk+k=
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ=
github.com/fsnotify/fsnotify v1.5.1 h1:mZcQUHVQUQWoPXXtuf9yuEXKudkV2sx1E06UadKWpgI=
github.com/fsnotify/fsnotify v1.5.1/go.mod h1:T3375wBYaZdLLcVNkcVbzGHY7f1l/uK5T5Ai1i3InKU= github.com/fsnotify/fsnotify v1.5.1/go.mod h1:T3375wBYaZdLLcVNkcVbzGHY7f1l/uK5T5Ai1i3InKU=
github.com/fsnotify/fsnotify v1.5.4 h1:jRbGcIw6P2Meqdwuo0H1p6JVLbL5DHKAKlYndzMwVZI=
github.com/fsnotify/fsnotify v1.5.4/go.mod h1:OVB6XrOHzAwXMpEM7uPOzcehqUV2UqJxmVXmkdnm1bU=
github.com/fsouza/fake-gcs-server v1.17.0/go.mod h1:D1rTE4YCyHFNa99oyJJ5HyclvN/0uQR+pM/VdlL83bw= github.com/fsouza/fake-gcs-server v1.17.0/go.mod h1:D1rTE4YCyHFNa99oyJJ5HyclvN/0uQR+pM/VdlL83bw=
github.com/fullsailor/pkcs7 v0.0.0-20190404230743-d7302db945fa/go.mod h1:KnogPXtdwXqoenmZCw6S+25EAm2MkxbG0deNDu4cbSA= github.com/fullsailor/pkcs7 v0.0.0-20190404230743-d7302db945fa/go.mod h1:KnogPXtdwXqoenmZCw6S+25EAm2MkxbG0deNDu4cbSA=
github.com/gabriel-vasile/mimetype v1.3.1/go.mod h1:fA8fi6KUiG7MgQQ+mEWotXoEOvmxRtOJlERCzSmRvr8= github.com/gabriel-vasile/mimetype v1.3.1/go.mod h1:fA8fi6KUiG7MgQQ+mEWotXoEOvmxRtOJlERCzSmRvr8=
@@ -584,7 +592,6 @@ github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG
github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7sIas= github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7sIas=
github.com/go-logr/logr v0.2.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU= github.com/go-logr/logr v0.2.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU=
github.com/go-martini/martini v0.0.0-20170121215854-22fa46961aab/go.mod h1:/P9AEU963A2AYjv4d1V5eVL1CQbEJq6aCNHDDjibzu8= github.com/go-martini/martini v0.0.0-20170121215854-22fa46961aab/go.mod h1:/P9AEU963A2AYjv4d1V5eVL1CQbEJq6aCNHDDjibzu8=
github.com/go-morph/morph v0.2.3-0.20220215130848-76392b135ee5/go.mod h1:XQh5WcM351wOV3z3zEWRM8RaJ65E2p4P7WWbmFAi8x4=
github.com/go-ole/go-ole v1.2.5/go.mod h1:pprOEPIfldk/42T2oK7lQ4v4JSDwmV0As9GaiUsvbm0= github.com/go-ole/go-ole v1.2.5/go.mod h1:pprOEPIfldk/42T2oK7lQ4v4JSDwmV0As9GaiUsvbm0=
github.com/go-ole/go-ole v1.2.6-0.20210915003542-8b1f7f90f6b1/go.mod h1:pprOEPIfldk/42T2oK7lQ4v4JSDwmV0As9GaiUsvbm0= github.com/go-ole/go-ole v1.2.6-0.20210915003542-8b1f7f90f6b1/go.mod h1:pprOEPIfldk/42T2oK7lQ4v4JSDwmV0As9GaiUsvbm0=
github.com/go-openapi/jsonpointer v0.19.2/go.mod h1:3akKfEdA7DF1sugOqz1dVQHBcuDBPKZGEoHC/NkiQRg= github.com/go-openapi/jsonpointer v0.19.2/go.mod h1:3akKfEdA7DF1sugOqz1dVQHBcuDBPKZGEoHC/NkiQRg=
@@ -704,8 +711,8 @@ github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEW
github.com/golang/snappy v0.0.2/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/golang/snappy v0.0.2/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
github.com/golang/snappy v0.0.3/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/golang/snappy v0.0.3/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
github.com/gomarkdown/markdown v0.0.0-20220310201231-552c6011c0b8 h1:YVvt637ygnOO9qjLBVmPOvrUmCz/i8YECSu/8UlOQW0= github.com/gomarkdown/markdown v0.0.0-20220509074759-a57bf950ab8c h1:yGxnjZegu9T/94575b5UGf2uDDYN3elzreWYpkhw2f4=
github.com/gomarkdown/markdown v0.0.0-20220310201231-552c6011c0b8/go.mod h1:JDGcbDT52eL4fju3sZ4TeHGsQwhG9nbDV21aMyhwPoA= github.com/gomarkdown/markdown v0.0.0-20220509074759-a57bf950ab8c/go.mod h1:JDGcbDT52eL4fju3sZ4TeHGsQwhG9nbDV21aMyhwPoA=
github.com/gomodule/redigo v1.7.1-0.20190724094224-574c33c3df38/go.mod h1:B4C85qUVwatsJoIUNIfCRsp7qO0iAmpGFZ4EELWSbC4= github.com/gomodule/redigo v1.7.1-0.20190724094224-574c33c3df38/go.mod h1:B4C85qUVwatsJoIUNIfCRsp7qO0iAmpGFZ4EELWSbC4=
github.com/gomodule/redigo v2.0.0+incompatible/go.mod h1:B4C85qUVwatsJoIUNIfCRsp7qO0iAmpGFZ4EELWSbC4= github.com/gomodule/redigo v2.0.0+incompatible/go.mod h1:B4C85qUVwatsJoIUNIfCRsp7qO0iAmpGFZ4EELWSbC4=
github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
@@ -724,15 +731,16 @@ github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/
github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.6 h1:BKbKCqvP6I+rmFHt06ZmyQtvB8xAkWdhFyr0ZUNZcxQ=
github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.7 h1:81/ik6ipDQS2aGcBfIN5dHDB36BwrStyeAQquSYCV4o=
github.com/google/go-cmp v0.5.7/go.mod h1:n+brtR0CgQNWTVd5ZUFpTBC8YFBDLK/h/bpaJ8/DtOE=
github.com/google/go-github v17.0.0+incompatible/go.mod h1:zLgOLi98H3fifZn+44m+umXrS52loVEgC2AApnigrVQ= github.com/google/go-github v17.0.0+incompatible/go.mod h1:zLgOLi98H3fifZn+44m+umXrS52loVEgC2AApnigrVQ=
github.com/google/go-github/v35 v35.2.0/go.mod h1:s0515YVTI+IMrDoy9Y4pHt9ShGpzHvHO8rZ7L7acgvs= github.com/google/go-github/v35 v35.2.0/go.mod h1:s0515YVTI+IMrDoy9Y4pHt9ShGpzHvHO8rZ7L7acgvs=
github.com/google/go-querystring v1.0.0/go.mod h1:odCYkC5MyYFN7vkCjXpyrEuKhc/BUO6wN/zVPAxq5ck= github.com/google/go-querystring v1.0.0/go.mod h1:odCYkC5MyYFN7vkCjXpyrEuKhc/BUO6wN/zVPAxq5ck=
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
github.com/google/gofuzz v1.1.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/gofuzz v1.1.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
github.com/google/gops v0.3.22 h1:lyvhDxfPLHAOR2xIYwjPhN387qHxyU21Sk9sz/GhmhQ= github.com/google/gops v0.3.23 h1:OjsHRINl5FiIyTc8jivIg4UN0GY6Nh32SL8KRbl8GQo=
github.com/google/gops v0.3.22/go.mod h1:7diIdLsqpCihPSX3fQagksT/Ku/y4RL9LHTlKyEUDl8= github.com/google/gops v0.3.23/go.mod h1:7diIdLsqpCihPSX3fQagksT/Ku/y4RL9LHTlKyEUDl8=
github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs=
github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0=
github.com/google/martian/v3 v3.1.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= github.com/google/martian/v3 v3.1.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0=
@@ -746,6 +754,7 @@ github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hf
github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
github.com/google/pprof v0.0.0-20201023163331-3e6fc7fc9c4c/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/pprof v0.0.0-20201023163331-3e6fc7fc9c4c/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
github.com/google/pprof v0.0.0-20201203190320-1bf35d6f28c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/pprof v0.0.0-20201203190320-1bf35d6f28c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
github.com/google/pprof v0.0.0-20201218002935-b9804c9f04c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
github.com/google/pprof v0.0.0-20210122040257-d980be63207e/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/pprof v0.0.0-20210122040257-d980be63207e/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
github.com/google/pprof v0.0.0-20210226084205-cbba55b83ad5/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/pprof v0.0.0-20210226084205-cbba55b83ad5/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
github.com/google/pprof v0.0.0-20210601050228-01bbb1931b22/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/pprof v0.0.0-20210601050228-01bbb1931b22/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
@@ -765,14 +774,17 @@ github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+
github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk=
github.com/googleapis/gax-go/v2 v2.1.0/go.mod h1:Q3nei7sK6ybPYH7twZdmQpAd1MKb7pfu6SK+H1/DsU0= github.com/googleapis/gax-go/v2 v2.1.0/go.mod h1:Q3nei7sK6ybPYH7twZdmQpAd1MKb7pfu6SK+H1/DsU0=
github.com/googleapis/gax-go/v2 v2.1.1/go.mod h1:hddJymUZASv3XPyGkUpKj8pPO47Rmb0eJc8R6ouapiM= github.com/googleapis/gax-go/v2 v2.1.1/go.mod h1:hddJymUZASv3XPyGkUpKj8pPO47Rmb0eJc8R6ouapiM=
github.com/googleapis/gax-go/v2 v2.2.0/go.mod h1:as02EH8zWkzwUoLbBaFeQ+arQaj/OthfcblKl4IGNaM=
github.com/googleapis/gax-go/v2 v2.3.0/go.mod h1:b8LNqSzNabLiUpXKkY7HAR5jr6bIT99EXz9pXxye9YM=
github.com/googleapis/gnostic v0.4.1/go.mod h1:LRhVm6pbyptWbWbuZ38d1eyptfvIytN3ir6b65WBswg= github.com/googleapis/gnostic v0.4.1/go.mod h1:LRhVm6pbyptWbWbuZ38d1eyptfvIytN3ir6b65WBswg=
github.com/googleapis/google-cloud-go-testing v0.0.0-20200911160855-bcd43fbb19e8/go.mod h1:dvDLG8qkwmyD9a/MJJN3XJcT3xFxOKAvTZGvuZmac9g=
github.com/gopackage/ddp v0.0.3 h1:fd0DxScoiS+ogq22ktey6DjDSDybtJPAn69geMpUtFc= github.com/gopackage/ddp v0.0.3 h1:fd0DxScoiS+ogq22ktey6DjDSDybtJPAn69geMpUtFc=
github.com/gopackage/ddp v0.0.3/go.mod h1:3hUXYG6C/6JsoxKsQaK7st09+GP9RZBFPzyAlU/0SLg= github.com/gopackage/ddp v0.0.3/go.mod h1:3hUXYG6C/6JsoxKsQaK7st09+GP9RZBFPzyAlU/0SLg=
github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY=
github.com/gopherjs/gopherjs v0.0.0-20190910122728-9d188e94fb99/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= github.com/gopherjs/gopherjs v0.0.0-20190910122728-9d188e94fb99/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY=
github.com/gopherjs/gopherjs v0.0.0-20210621113107-84c6004145de/go.mod h1:MtKwTfDNYAP5EtbQSMYjTSqvj1aXJKQRASWq3bwaP+g= github.com/gopherjs/gopherjs v0.0.0-20210621113107-84c6004145de/go.mod h1:MtKwTfDNYAP5EtbQSMYjTSqvj1aXJKQRASWq3bwaP+g=
github.com/gopherjs/gopherjs v0.0.0-20220104163920-15ed2e8cf2bd h1:D/H64OK+VY7O0guGbCQaFKwAZlU5t764R++kgIdAGog= github.com/gopherjs/gopherjs v0.0.0-20220221023154-0b2280d3ff96 h1:QJq7UBOuoynsywLk+aC75rC2Cbi2+lQRDaLaizhA+fA=
github.com/gopherjs/gopherjs v0.0.0-20220104163920-15ed2e8cf2bd/go.mod h1:cz9oNYuRUWGdHmLF2IodMLkAhcPtXeULvcBNagUrxTI= github.com/gopherjs/gopherjs v0.0.0-20220221023154-0b2280d3ff96/go.mod h1:pRRIvn/QzFLrKfvEz3qUuEhtE/zLCWfreZ6J5gM2i+k=
github.com/gorilla/context v1.1.1/go.mod h1:kBGZzfjB9CEq2AlWe17Uuf7NDRt0dE0s8S51q0aT7Yg= github.com/gorilla/context v1.1.1/go.mod h1:kBGZzfjB9CEq2AlWe17Uuf7NDRt0dE0s8S51q0aT7Yg=
github.com/gorilla/css v1.0.0/go.mod h1:Dn721qIggHpt4+EFCcTLTU/vk5ySda2ReITrtgBl60c= github.com/gorilla/css v1.0.0/go.mod h1:Dn721qIggHpt4+EFCcTLTU/vk5ySda2ReITrtgBl60c=
github.com/gorilla/handlers v0.0.0-20150720190736-60c7bfde3e33/go.mod h1:Qkdc/uu4tH4g6mTK6auzZ766c4CA0Ng8+o/OAirnOIQ= github.com/gorilla/handlers v0.0.0-20150720190736-60c7bfde3e33/go.mod h1:Qkdc/uu4tH4g6mTK6auzZ766c4CA0Ng8+o/OAirnOIQ=
@@ -827,6 +839,7 @@ github.com/hashicorp/go-hclog v0.14.1/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39
github.com/hashicorp/go-hclog v0.16.1/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ= github.com/hashicorp/go-hclog v0.16.1/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ=
github.com/hashicorp/go-hclog v1.0.0/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ= github.com/hashicorp/go-hclog v1.0.0/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ=
github.com/hashicorp/go-hclog v1.1.0/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ= github.com/hashicorp/go-hclog v1.1.0/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ=
github.com/hashicorp/go-hclog v1.2.0/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ=
github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60=
github.com/hashicorp/go-immutable-radix v1.3.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= github.com/hashicorp/go-immutable-radix v1.3.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60=
github.com/hashicorp/go-immutable-radix v1.3.1/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= github.com/hashicorp/go-immutable-radix v1.3.1/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60=
@@ -867,6 +880,7 @@ github.com/hashicorp/memberlist v0.3.1/go.mod h1:MS2lj3INKhZjWNqd3N0m3J+Jxf3DAOn
github.com/hashicorp/serf v0.8.2/go.mod h1:6hOLApaqBFA1NXqRQAsxw9QxuDEvNxSQRwA/JwenrHc= github.com/hashicorp/serf v0.8.2/go.mod h1:6hOLApaqBFA1NXqRQAsxw9QxuDEvNxSQRwA/JwenrHc=
github.com/hashicorp/serf v0.9.5/go.mod h1:UWDWwZeL5cuWDJdl0C6wrvrUwEqtQ4ZKBKKENpqIUyk= github.com/hashicorp/serf v0.9.5/go.mod h1:UWDWwZeL5cuWDJdl0C6wrvrUwEqtQ4ZKBKKENpqIUyk=
github.com/hashicorp/serf v0.9.6/go.mod h1:TXZNMjZQijwlDvp+r0b63xZ45H7JmCmgg4gpTwn9UV4= github.com/hashicorp/serf v0.9.6/go.mod h1:TXZNMjZQijwlDvp+r0b63xZ45H7JmCmgg4gpTwn9UV4=
github.com/hashicorp/serf v0.9.7/go.mod h1:TXZNMjZQijwlDvp+r0b63xZ45H7JmCmgg4gpTwn9UV4=
github.com/hashicorp/yamux v0.0.0-20180604194846-3520598351bb/go.mod h1:+NfK9FKeTrX5uv1uIXGdwYDTeHna2qgaIlx54MXqjAM= github.com/hashicorp/yamux v0.0.0-20180604194846-3520598351bb/go.mod h1:+NfK9FKeTrX5uv1uIXGdwYDTeHna2qgaIlx54MXqjAM=
github.com/hashicorp/yamux v0.0.0-20210316155119-a95892c5f864/go.mod h1:CtWFDAQgb7dxtzFs4tWbplKIe2jSi3+5vKbgIO0SLnQ= github.com/hashicorp/yamux v0.0.0-20210316155119-a95892c5f864/go.mod h1:CtWFDAQgb7dxtzFs4tWbplKIe2jSi3+5vKbgIO0SLnQ=
github.com/hashicorp/yamux v0.0.0-20211028200310-0bc27b27de87/go.mod h1:CtWFDAQgb7dxtzFs4tWbplKIe2jSi3+5vKbgIO0SLnQ= github.com/hashicorp/yamux v0.0.0-20211028200310-0bc27b27de87/go.mod h1:CtWFDAQgb7dxtzFs4tWbplKIe2jSi3+5vKbgIO0SLnQ=
@@ -989,8 +1003,8 @@ github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51 h1:Z9n2FFNU
github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51/go.mod h1:CzGEWj7cYgsdH8dAjBGEr58BoE7ScuLd+fwFZ44+/x8= github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51/go.mod h1:CzGEWj7cYgsdH8dAjBGEr58BoE7ScuLd+fwFZ44+/x8=
github.com/kettek/apng v0.0.0-20191108220231-414630eed80f h1:dnCYnTSltLuPMfc7dMrkz2uBUcEf/OFBR8yRh3oRT98= github.com/kettek/apng v0.0.0-20191108220231-414630eed80f h1:dnCYnTSltLuPMfc7dMrkz2uBUcEf/OFBR8yRh3oRT98=
github.com/kettek/apng v0.0.0-20191108220231-414630eed80f/go.mod h1:x78/VRQYKuCftMWS0uK5e+F5RJ7S4gSlESRWI0Prl6Q= github.com/kettek/apng v0.0.0-20191108220231-414630eed80f/go.mod h1:x78/VRQYKuCftMWS0uK5e+F5RJ7S4gSlESRWI0Prl6Q=
github.com/keybase/go-keybase-chat-bot v0.0.0-20211201215354-ee4b23828b55 h1:4UNIfjcTO4D6RmO7QhXxh4IyM5yANg6pwBldHR4a27w= github.com/keybase/go-keybase-chat-bot v0.0.0-20220322223021-75d497527469 h1:TNT0A/iqWZhj0T82eaSxwgjtvkhUPsx4W2HMC3079Mw=
github.com/keybase/go-keybase-chat-bot v0.0.0-20211201215354-ee4b23828b55/go.mod h1:0tIbyC7O87PimRbeghiJW5JAsmdKwNmAoNWFczKDZUA= github.com/keybase/go-keybase-chat-bot v0.0.0-20220322223021-75d497527469/go.mod h1:0tIbyC7O87PimRbeghiJW5JAsmdKwNmAoNWFczKDZUA=
github.com/keybase/go-ps v0.0.0-20190827175125-91aafc93ba19/go.mod h1:hY+WOq6m2FpbvyrI93sMaypsttvaIL5nhVR92dTMUcQ= github.com/keybase/go-ps v0.0.0-20190827175125-91aafc93ba19/go.mod h1:hY+WOq6m2FpbvyrI93sMaypsttvaIL5nhVR92dTMUcQ=
github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q= github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q=
github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00= github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00=
@@ -1008,8 +1022,9 @@ github.com/klauspost/compress v1.13.1/go.mod h1:8dP1Hq4DHOhN9w426knH3Rhby4rFm6D8
github.com/klauspost/compress v1.13.4/go.mod h1:8dP1Hq4DHOhN9w426knH3Rhby4rFm6D8eO+e+Dq5Gzg= github.com/klauspost/compress v1.13.4/go.mod h1:8dP1Hq4DHOhN9w426knH3Rhby4rFm6D8eO+e+Dq5Gzg=
github.com/klauspost/compress v1.13.5/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk= github.com/klauspost/compress v1.13.5/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk=
github.com/klauspost/compress v1.13.6/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk= github.com/klauspost/compress v1.13.6/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk=
github.com/klauspost/compress v1.14.2 h1:S0OHlFk/Gbon/yauFJ4FfJJF5V0fc5HbBTJazi28pRw= github.com/klauspost/compress v1.14.4/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk=
github.com/klauspost/compress v1.14.2/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk= github.com/klauspost/compress v1.15.1 h1:y9FcTHGyrebwfP0ZZqFiaxTaiDnUrGkJkI+f583BL1A=
github.com/klauspost/compress v1.15.1/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk=
github.com/klauspost/cpuid v1.2.0/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek= github.com/klauspost/cpuid v1.2.0/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek=
github.com/klauspost/cpuid v1.2.1/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek= github.com/klauspost/cpuid v1.2.1/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek=
github.com/klauspost/cpuid v1.2.3/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek= github.com/klauspost/cpuid v1.2.3/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek=
@@ -1062,14 +1077,15 @@ github.com/lib/pq v1.8.0/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o=
github.com/lib/pq v1.10.0/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= github.com/lib/pq v1.10.0/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o=
github.com/lib/pq v1.10.2/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= github.com/lib/pq v1.10.2/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o=
github.com/lib/pq v1.10.4/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= github.com/lib/pq v1.10.4/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o=
github.com/lrstanley/girc v0.0.0-20220321215535-9664730c7858 h1:IIbHCRHuANbPoQymk4BWGcRI47RXHOt7bDxPbxQ9rms= github.com/lrstanley/girc v0.0.0-20220507183218-96757fe3d2a2 h1:iqJKGIChW2+aPIpnofEZAKgCNwG2tqytB2a1rJS6B6w=
github.com/lrstanley/girc v0.0.0-20220321215535-9664730c7858/go.mod h1:liX5MxHPrwgHaKowoLkYGwbXfYABh1jbZ6FpElbGF1I= github.com/lrstanley/girc v0.0.0-20220507183218-96757fe3d2a2/go.mod h1:lgrnhcF8bg/Bd5HA5DOb4Z+uGqUqGnp4skr+J2GwVgI=
github.com/lunixbochs/vtclean v1.0.0/go.mod h1:pHhQNgMf3btfWnGBVipUOjRYhoOsdGqdm/+2c2E2WMI= github.com/lunixbochs/vtclean v1.0.0/go.mod h1:pHhQNgMf3btfWnGBVipUOjRYhoOsdGqdm/+2c2E2WMI=
github.com/lyft/protoc-gen-star v0.5.3/go.mod h1:V0xaHgaf5oCCqmcxYcWiDfTiKsZsRc87/1qhoTACD8w= github.com/lyft/protoc-gen-star v0.5.3/go.mod h1:V0xaHgaf5oCCqmcxYcWiDfTiKsZsRc87/1qhoTACD8w=
github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ=
github.com/magiconair/properties v1.8.1/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= github.com/magiconair/properties v1.8.1/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ=
github.com/magiconair/properties v1.8.5 h1:b6kJs+EmPFMYGkow9GiUyCyOvIwYetYJ3fSaWak/Gls=
github.com/magiconair/properties v1.8.5/go.mod h1:y3VJvCyxH9uVvJTWEGAELF3aiYNyPKd5NZ3oSwXrF60= github.com/magiconair/properties v1.8.5/go.mod h1:y3VJvCyxH9uVvJTWEGAELF3aiYNyPKd5NZ3oSwXrF60=
github.com/magiconair/properties v1.8.6 h1:5ibWZ6iY0NctNGWo87LalDlEZ6R41TqbbDamhfG/Qzo=
github.com/magiconair/properties v1.8.6/go.mod h1:y3VJvCyxH9uVvJTWEGAELF3aiYNyPKd5NZ3oSwXrF60=
github.com/mailru/easyjson v0.0.0-20190312143242-1de009706dbe/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/mailru/easyjson v0.0.0-20190312143242-1de009706dbe/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
@@ -1084,14 +1100,14 @@ github.com/matterbridge/Rocket.Chat.Go.SDK v0.0.0-20211016222428-79310a412696 h1
github.com/matterbridge/Rocket.Chat.Go.SDK v0.0.0-20211016222428-79310a412696/go.mod h1:c6MxwqHD+0HvtAJjsHMIdPCiAwGiQwPRPTp69ACMg8A= github.com/matterbridge/Rocket.Chat.Go.SDK v0.0.0-20211016222428-79310a412696/go.mod h1:c6MxwqHD+0HvtAJjsHMIdPCiAwGiQwPRPTp69ACMg8A=
github.com/matterbridge/go-xmpp v0.0.0-20211030125215-791a06c5f1be h1:zlirT+LngOJ60G6FVzI87DljGZLUnfNzmXja61EjtYM= github.com/matterbridge/go-xmpp v0.0.0-20211030125215-791a06c5f1be h1:zlirT+LngOJ60G6FVzI87DljGZLUnfNzmXja61EjtYM=
github.com/matterbridge/go-xmpp v0.0.0-20211030125215-791a06c5f1be/go.mod h1:ECDRehsR9TYTKCAsRS8/wLeOk6UUqDydw47ln7wG41Q= github.com/matterbridge/go-xmpp v0.0.0-20211030125215-791a06c5f1be/go.mod h1:ECDRehsR9TYTKCAsRS8/wLeOk6UUqDydw47ln7wG41Q=
github.com/matterbridge/gomatrix v0.0.0-20220205235239-607eb9ee6419 h1:dx8x2J3EsVwP3hBGNmVT/otz4b42p7TRQ6Cu4BK2910= github.com/matterbridge/gomatrix v0.0.0-20220411225302-271e5088ea27 h1:9XSppnbvvReVom+wphkeF4lbhuT6vCYIdyzpwFtW89c=
github.com/matterbridge/gomatrix v0.0.0-20220205235239-607eb9ee6419/go.mod h1:/x38AoZf70fK9yZ5gs3BNCaF7/J4QEo4ZpwtLjX95eQ= github.com/matterbridge/gomatrix v0.0.0-20220411225302-271e5088ea27/go.mod h1:/x38AoZf70fK9yZ5gs3BNCaF7/J4QEo4ZpwtLjX95eQ=
github.com/matterbridge/gozulipbot v0.0.0-20211023205727-a19d6c1f3b75 h1:GslZKF7lW7oSisycGLpxPO+TnKJuA4VZuTWIfYZrClc= github.com/matterbridge/gozulipbot v0.0.0-20211023205727-a19d6c1f3b75 h1:GslZKF7lW7oSisycGLpxPO+TnKJuA4VZuTWIfYZrClc=
github.com/matterbridge/gozulipbot v0.0.0-20211023205727-a19d6c1f3b75/go.mod h1:yAjnZ34DuDyPHMPHHjOsTk/FefW4JJjoMMCGt/8uuQA= github.com/matterbridge/gozulipbot v0.0.0-20211023205727-a19d6c1f3b75/go.mod h1:yAjnZ34DuDyPHMPHHjOsTk/FefW4JJjoMMCGt/8uuQA=
github.com/matterbridge/logrus-prefixed-formatter v0.5.3-0.20200523233437-d971309a77ba h1:XleOY4IjAEIcxAh+IFwT5JT5Ze3RHiYz6m+4ZfZ0rc0= github.com/matterbridge/logrus-prefixed-formatter v0.5.3-0.20200523233437-d971309a77ba h1:XleOY4IjAEIcxAh+IFwT5JT5Ze3RHiYz6m+4ZfZ0rc0=
github.com/matterbridge/logrus-prefixed-formatter v0.5.3-0.20200523233437-d971309a77ba/go.mod h1:iXGEotOvwI1R1SjLxRc+BF5rUORTMtE0iMZBT2lxqAU= github.com/matterbridge/logrus-prefixed-formatter v0.5.3-0.20200523233437-d971309a77ba/go.mod h1:iXGEotOvwI1R1SjLxRc+BF5rUORTMtE0iMZBT2lxqAU=
github.com/matterbridge/matterclient v0.0.0-20211107234719-faca3cd42315 h1:vkdXJ8GsuIR0JH9e4eHomVliTou08PMqAImZNBlLd/A= github.com/matterbridge/matterclient v0.0.0-20220430213656-07aca2731bc9 h1:EOwZ5yJv4o2px4TEJh15aqbZsKoY9MVunan6ngqs0Rk=
github.com/matterbridge/matterclient v0.0.0-20211107234719-faca3cd42315/go.mod h1:Gh3tFUjkcPIBBeEkfXBbGio4ONMMKNmlmGECvXLY0TE= github.com/matterbridge/matterclient v0.0.0-20220430213656-07aca2731bc9/go.mod h1:Gh3tFUjkcPIBBeEkfXBbGio4ONMMKNmlmGECvXLY0TE=
github.com/mattermost/go-i18n v1.11.0/go.mod h1:RyS7FDNQlzF1PsjbJWHRI35exqaKGSO9qD4iv8QjE34= github.com/mattermost/go-i18n v1.11.0/go.mod h1:RyS7FDNQlzF1PsjbJWHRI35exqaKGSO9qD4iv8QjE34=
github.com/mattermost/go-i18n v1.11.1-0.20211013152124-5c415071e404 h1:Khvh6waxG1cHc4Cz5ef9n3XVCxRWpAKUtqg9PJl5+y8= github.com/mattermost/go-i18n v1.11.1-0.20211013152124-5c415071e404 h1:Khvh6waxG1cHc4Cz5ef9n3XVCxRWpAKUtqg9PJl5+y8=
github.com/mattermost/go-i18n v1.11.1-0.20211013152124-5c415071e404/go.mod h1:RyS7FDNQlzF1PsjbJWHRI35exqaKGSO9qD4iv8QjE34= github.com/mattermost/go-i18n v1.11.1-0.20211013152124-5c415071e404/go.mod h1:RyS7FDNQlzF1PsjbJWHRI35exqaKGSO9qD4iv8QjE34=
@@ -1109,8 +1125,9 @@ github.com/mattermost/logr/v2 v2.0.15/go.mod h1:mpPp935r5dIkFDo2y9Q87cQWhFR/4xXp
github.com/mattermost/mattermost-server/v5 v5.39.3 h1:A5z/NlR4Xcwxx5UnlaNgUGP5hgj4KOV/CwpFg3OtlvQ= github.com/mattermost/mattermost-server/v5 v5.39.3 h1:A5z/NlR4Xcwxx5UnlaNgUGP5hgj4KOV/CwpFg3OtlvQ=
github.com/mattermost/mattermost-server/v5 v5.39.3/go.mod h1:MDmVSmsSsqwNkuZ7rQ0osuXVCzrR1IUqGR7I0QU91sY= github.com/mattermost/mattermost-server/v5 v5.39.3/go.mod h1:MDmVSmsSsqwNkuZ7rQ0osuXVCzrR1IUqGR7I0QU91sY=
github.com/mattermost/mattermost-server/v6 v6.0.0/go.mod h1:+S8CsNEPv1FOl1usaPBQ6Gu9+Sm1Cc9YdU/Qh1YMGVI= github.com/mattermost/mattermost-server/v6 v6.0.0/go.mod h1:+S8CsNEPv1FOl1usaPBQ6Gu9+Sm1Cc9YdU/Qh1YMGVI=
github.com/mattermost/mattermost-server/v6 v6.5.0 h1:9S+/vQirO4XNpT+4TC/MlHNYpd2bgFvoRnBkImRCtNQ= github.com/mattermost/mattermost-server/v6 v6.6.1 h1:jza7N9OMqFe+z7s9LZeSj1M4E/2DOV/llIUpi9VWg2U=
github.com/mattermost/mattermost-server/v6 v6.5.0/go.mod h1:JRRn3uSrynvCY45ystGEJqis/Xo8PFhCDSO4SNDMncU= github.com/mattermost/mattermost-server/v6 v6.6.1/go.mod h1:oR6UCRo+SEvnfN2FEOdzHs1UljrskyCKU8tWeKlxgMo=
github.com/mattermost/morph v0.0.0-20220401091636-39f834798da8/go.mod h1:jxM3g1bx+k2Thz7jofcHguBS8TZn5Pc+o5MGmORObhw=
github.com/mattermost/rsc v0.0.0-20160330161541-bbaefb05eaa0/go.mod h1:nV5bfVpT//+B1RPD2JvRnxbkLmJEYXmRaaVl15fsXjs= github.com/mattermost/rsc v0.0.0-20160330161541-bbaefb05eaa0/go.mod h1:nV5bfVpT//+B1RPD2JvRnxbkLmJEYXmRaaVl15fsXjs=
github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU=
github.com/mattn/go-colorable v0.1.1/go.mod h1:FuOcm+DKB9mbwrcAfNl7/TZVBZ6rcnceauSikq3lYCQ= github.com/mattn/go-colorable v0.1.1/go.mod h1:FuOcm+DKB9mbwrcAfNl7/TZVBZ6rcnceauSikq3lYCQ=
@@ -1147,6 +1164,7 @@ github.com/mattn/go-shellwords v1.0.3/go.mod h1:3xCvwCdWdlDJUrvuMn7Wuy9eWs4pE8vq
github.com/mattn/go-sqlite3 v1.9.0/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc= github.com/mattn/go-sqlite3 v1.9.0/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc=
github.com/mattn/go-sqlite3 v1.10.0/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc= github.com/mattn/go-sqlite3 v1.10.0/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc=
github.com/mattn/go-sqlite3 v1.14.6/go.mod h1:NyWgC/yNuGj7Q9rpYnZvas74GogHl5/Z4A/KQRfk6bU= github.com/mattn/go-sqlite3 v1.14.6/go.mod h1:NyWgC/yNuGj7Q9rpYnZvas74GogHl5/Z4A/KQRfk6bU=
github.com/mattn/go-sqlite3 v1.14.9/go.mod h1:NyWgC/yNuGj7Q9rpYnZvas74GogHl5/Z4A/KQRfk6bU=
github.com/mattn/go-sqlite3 v1.14.12/go.mod h1:NyWgC/yNuGj7Q9rpYnZvas74GogHl5/Z4A/KQRfk6bU= github.com/mattn/go-sqlite3 v1.14.12/go.mod h1:NyWgC/yNuGj7Q9rpYnZvas74GogHl5/Z4A/KQRfk6bU=
github.com/mattn/go-sqlite3 v2.0.3+incompatible h1:gXHsfypPkaMZrKbD5209QV9jbUTJKjyR5WD3HYQSd+U= github.com/mattn/go-sqlite3 v2.0.3+incompatible h1:gXHsfypPkaMZrKbD5209QV9jbUTJKjyR5WD3HYQSd+U=
github.com/mattn/go-sqlite3 v2.0.3+incompatible/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc= github.com/mattn/go-sqlite3 v2.0.3+incompatible/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc=
@@ -1176,8 +1194,8 @@ github.com/minio/md5-simd v1.1.0/go.mod h1:XpBqgZULrMYD3R+M28PcmP0CkI7PEMzB3U77Z
github.com/minio/md5-simd v1.1.2 h1:Gdi1DZK69+ZVMoNHRXJyNcxrMA4dSxoYHZSQbirFg34= github.com/minio/md5-simd v1.1.2 h1:Gdi1DZK69+ZVMoNHRXJyNcxrMA4dSxoYHZSQbirFg34=
github.com/minio/md5-simd v1.1.2/go.mod h1:MzdKDxYpY2BT9XQFocsiZf/NKVtR7nkE4RoEpN+20RM= github.com/minio/md5-simd v1.1.2/go.mod h1:MzdKDxYpY2BT9XQFocsiZf/NKVtR7nkE4RoEpN+20RM=
github.com/minio/minio-go/v7 v7.0.11/go.mod h1:WoyW+ySKAKjY98B9+7ZbI8z8S3jaxaisdcvj9TGlazA= github.com/minio/minio-go/v7 v7.0.11/go.mod h1:WoyW+ySKAKjY98B9+7ZbI8z8S3jaxaisdcvj9TGlazA=
github.com/minio/minio-go/v7 v7.0.21 h1:xrc4BQr1Fa4s5RwY0xfMjPZFJ1bcYBCCHYlngBdWV+k= github.com/minio/minio-go/v7 v7.0.23 h1:NleyGQvAn9VQMU+YHVrgV4CX+EPtxPt/78lHOOTncy4=
github.com/minio/minio-go/v7 v7.0.21/go.mod h1:ei5JjmxwHaMrgsMrn4U/+Nmg+d8MKS1U2DAn1ou4+Do= github.com/minio/minio-go/v7 v7.0.23/go.mod h1:ei5JjmxwHaMrgsMrn4U/+Nmg+d8MKS1U2DAn1ou4+Do=
github.com/minio/sha256-simd v0.1.1/go.mod h1:B5e1o+1/KgNmWrSQK08Y6Z1Vb5pwIktudl0J58iy0KM= github.com/minio/sha256-simd v0.1.1/go.mod h1:B5e1o+1/KgNmWrSQK08Y6Z1Vb5pwIktudl0J58iy0KM=
github.com/minio/sha256-simd v1.0.0 h1:v1ta+49hkWZyvaKwrQB8elexRqm6Y0aMLjCNsrYxo6g= github.com/minio/sha256-simd v1.0.0 h1:v1ta+49hkWZyvaKwrQB8elexRqm6Y0aMLjCNsrYxo6g=
github.com/minio/sha256-simd v1.0.0/go.mod h1:OuYzVNI5vcoYIAmbIvHPl3N3jUzVedXbKy5RFepssQM= github.com/minio/sha256-simd v1.0.0/go.mod h1:OuYzVNI5vcoYIAmbIvHPl3N3jUzVedXbKy5RFepssQM=
@@ -1246,8 +1264,8 @@ github.com/ncw/swift v1.0.47/go.mod h1:23YIA4yWVnGwv2dQlN4bB7egfYX6YLn0Yo/S6zZO/
github.com/neelance/astrewrite v0.0.0-20160511093645-99348263ae86/go.mod h1:kHJEU3ofeGjhHklVoIGuVj85JJwZ6kWPaJwCIxgnFmo= github.com/neelance/astrewrite v0.0.0-20160511093645-99348263ae86/go.mod h1:kHJEU3ofeGjhHklVoIGuVj85JJwZ6kWPaJwCIxgnFmo=
github.com/neelance/sourcemap v0.0.0-20151028013722-8c68805598ab/go.mod h1:Qr6/a/Q4r9LP1IltGz7tA7iOK1WonHEYhu1HRBA7ZiM= github.com/neelance/sourcemap v0.0.0-20151028013722-8c68805598ab/go.mod h1:Qr6/a/Q4r9LP1IltGz7tA7iOK1WonHEYhu1HRBA7ZiM=
github.com/neelance/sourcemap v0.0.0-20200213170602-2833bce08e4c/go.mod h1:Qr6/a/Q4r9LP1IltGz7tA7iOK1WonHEYhu1HRBA7ZiM= github.com/neelance/sourcemap v0.0.0-20200213170602-2833bce08e4c/go.mod h1:Qr6/a/Q4r9LP1IltGz7tA7iOK1WonHEYhu1HRBA7ZiM=
github.com/nelsonken/gomf v0.0.0-20180504123937-a9dd2f9deae9 h1:mp6tU1r0xLostUGLkTspf/9/AiHuVD7ptyXhySkDEsE= github.com/nelsonken/gomf v0.0.0-20190423072027-c65cc0469e94 h1:GRBho7BMw6le5OVegxDeFX+Ul1WPykzQs9CVrifrjik=
github.com/nelsonken/gomf v0.0.0-20180504123937-a9dd2f9deae9/go.mod h1:A5SRAcpTemjGgIuBq6Kic2yHcoeUFWUinOAlMP/i9xo= github.com/nelsonken/gomf v0.0.0-20190423072027-c65cc0469e94/go.mod h1:UT7a3UHzG7jEEeoW/AI2/e0RuLbkntdsiY94qt6Jv7Q=
github.com/neo4j/neo4j-go-driver v1.8.1-0.20200803113522-b626aa943eba/go.mod h1:ncO5VaFWh0Nrt+4KT4mOZboaczBZcLuHrG+/sUeP8gI= github.com/neo4j/neo4j-go-driver v1.8.1-0.20200803113522-b626aa943eba/go.mod h1:ncO5VaFWh0Nrt+4KT4mOZboaczBZcLuHrG+/sUeP8gI=
github.com/nfnt/resize v0.0.0-20180221191011-83c6a9932646/go.mod h1:jpp1/29i3P1S/RLdc7JQKbRpFeM1dOBd8T9ki5s+AY8= github.com/nfnt/resize v0.0.0-20180221191011-83c6a9932646/go.mod h1:jpp1/29i3P1S/RLdc7JQKbRpFeM1dOBd8T9ki5s+AY8=
github.com/ngdinhtoan/glide-cleanup v0.2.0/go.mod h1:UQzsmiDOb8YV3nOsCxK/c9zPpCZVNoHScRE3EO9pVMM= github.com/ngdinhtoan/glide-cleanup v0.2.0/go.mod h1:UQzsmiDOb8YV3nOsCxK/c9zPpCZVNoHScRE3EO9pVMM=
@@ -1341,6 +1359,8 @@ github.com/pelletier/go-toml v1.8.1/go.mod h1:T2/BmBdy8dvIRq1a/8aqjN41wvWlN4lrap
github.com/pelletier/go-toml v1.9.3/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCkoOuaOx1Y+c= github.com/pelletier/go-toml v1.9.3/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCkoOuaOx1Y+c=
github.com/pelletier/go-toml v1.9.4 h1:tjENF6MfZAg8e4ZmZTeWaWiT2vXtsoO6+iuOjFhECwM= github.com/pelletier/go-toml v1.9.4 h1:tjENF6MfZAg8e4ZmZTeWaWiT2vXtsoO6+iuOjFhECwM=
github.com/pelletier/go-toml v1.9.4/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCkoOuaOx1Y+c= github.com/pelletier/go-toml v1.9.4/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCkoOuaOx1Y+c=
github.com/pelletier/go-toml/v2 v2.0.0-beta.8 h1:dy81yyLYJDwMTifq24Oi/IslOslRrDSb3jwDggjz3Z0=
github.com/pelletier/go-toml/v2 v2.0.0-beta.8/go.mod h1:r9LEWfGN8R5k0VXJ+0BkIe7MYkRdwZOjgMj2KwnJFUo=
github.com/peterbourgon/diskv v0.0.0-20171120014656-2973218375c3/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU= github.com/peterbourgon/diskv v0.0.0-20171120014656-2973218375c3/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU=
github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU= github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU=
github.com/philhofer/fwd v1.0.0/go.mod h1:gk3iGcWd9+svBvR0sR+KPcfE+RNWozjowpeBVG3ZVNU= github.com/philhofer/fwd v1.0.0/go.mod h1:gk3iGcWd9+svBvR0sR+KPcfE+RNWozjowpeBVG3ZVNU=
@@ -1364,6 +1384,7 @@ github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINE
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pkg/sftp v1.10.1/go.mod h1:lYOWFsE0bwd1+KfKJaKeuokY15vzFx25BLbzYYoAxZI= github.com/pkg/sftp v1.10.1/go.mod h1:lYOWFsE0bwd1+KfKJaKeuokY15vzFx25BLbzYYoAxZI=
github.com/pkg/sftp v1.13.1/go.mod h1:3HaPG6Dq1ILlpPZRO0HVMrsydcdLt6HRDccSgb87qRg=
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI= github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI=
@@ -1442,8 +1463,9 @@ github.com/rogpeppe/go-internal v1.8.0/go.mod h1:WmiCO8CzOY8rg0OYDC4/i/2WRWAB6po
github.com/rs/cors v1.7.0/go.mod h1:gFx+x8UowdsKA9AchylcLynDq+nNFfI8FkUZdN/jGCU= github.com/rs/cors v1.7.0/go.mod h1:gFx+x8UowdsKA9AchylcLynDq+nNFfI8FkUZdN/jGCU=
github.com/rs/cors v1.8.2/go.mod h1:XyqrcTp5zjWr1wsJ8PIRZssZ8b/WMcMf71DJnit4EMU= github.com/rs/cors v1.8.2/go.mod h1:XyqrcTp5zjWr1wsJ8PIRZssZ8b/WMcMf71DJnit4EMU=
github.com/rs/xid v1.2.1/go.mod h1:+uKXf+4Djp6Md1KODXJxgGQPKngRmWyn10oCKFzNHOQ= github.com/rs/xid v1.2.1/go.mod h1:+uKXf+4Djp6Md1KODXJxgGQPKngRmWyn10oCKFzNHOQ=
github.com/rs/xid v1.3.0 h1:6NjYksEUlhurdVehpc7S7dk6DAmcKv8V9gG0FsVN2U4=
github.com/rs/xid v1.3.0/go.mod h1:trrq9SKmegXys3aeAKXMUTdJsYXVwGY3RLcfgqegfbg= github.com/rs/xid v1.3.0/go.mod h1:trrq9SKmegXys3aeAKXMUTdJsYXVwGY3RLcfgqegfbg=
github.com/rs/xid v1.4.0 h1:qd7wPTDkN6KQx2VmMBLrpHkiyQwgFXRnkOLacUiaSNY=
github.com/rs/xid v1.4.0/go.mod h1:trrq9SKmegXys3aeAKXMUTdJsYXVwGY3RLcfgqegfbg=
github.com/rs/zerolog v1.13.0/go.mod h1:YbFCdg8HfsridGWAh22vktObvhZbQsZXe4/zB0OKkWU= github.com/rs/zerolog v1.13.0/go.mod h1:YbFCdg8HfsridGWAh22vktObvhZbQsZXe4/zB0OKkWU=
github.com/rs/zerolog v1.15.0/go.mod h1:xYTKnLHcpfU2225ny5qZjxnj9NvkumZYjJHlAThCjNc= github.com/rs/zerolog v1.15.0/go.mod h1:xYTKnLHcpfU2225ny5qZjxnj9NvkumZYjJHlAThCjNc=
github.com/rudderlabs/analytics-go v3.3.1+incompatible/go.mod h1:LF8/ty9kUX4PTY3l5c97K3nZZaX5Hwsvt+NBaRL/f30= github.com/rudderlabs/analytics-go v3.3.1+incompatible/go.mod h1:LF8/ty9kUX4PTY3l5c97K3nZZaX5Hwsvt+NBaRL/f30=
@@ -1461,7 +1483,7 @@ github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb
github.com/ryanuber/columnize v2.1.0+incompatible/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= github.com/ryanuber/columnize v2.1.0+incompatible/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts=
github.com/safchain/ethtool v0.0.0-20190326074333-42ed695e3de8/go.mod h1:Z0q5wiBQGYcxhMZ6gUqHn6pYNLypFAvaL3UvgZLR0U4= github.com/safchain/ethtool v0.0.0-20190326074333-42ed695e3de8/go.mod h1:Z0q5wiBQGYcxhMZ6gUqHn6pYNLypFAvaL3UvgZLR0U4=
github.com/sagikazarmark/crypt v0.3.0/go.mod h1:uD/D+6UF4SrIR1uGEv7bBNkNqLGqUr43MRiaGWX1Nig= github.com/sagikazarmark/crypt v0.3.0/go.mod h1:uD/D+6UF4SrIR1uGEv7bBNkNqLGqUr43MRiaGWX1Nig=
github.com/sagikazarmark/crypt v0.4.0/go.mod h1:ALv2SRj7GxYV4HO9elxH9nS6M9gW+xDNxqmyJ6RfDFM= github.com/sagikazarmark/crypt v0.5.0/go.mod h1:l+nzl7KWh51rpzp2h7t4MZWyiEWdhNpOAnclKvg+mdA=
github.com/saintfish/chardet v0.0.0-20120816061221-3af4cd4741ca h1:NugYot0LIVPxTvN8n+Kvkn6TrbMyxQiuvKdEwFdR9vI= github.com/saintfish/chardet v0.0.0-20120816061221-3af4cd4741ca h1:NugYot0LIVPxTvN8n+Kvkn6TrbMyxQiuvKdEwFdR9vI=
github.com/saintfish/chardet v0.0.0-20120816061221-3af4cd4741ca/go.mod h1:uugorj2VCxiV1x+LzaIdVa9b4S4qGAcH6cbhh4qVxOU= github.com/saintfish/chardet v0.0.0-20120816061221-3af4cd4741ca/go.mod h1:uugorj2VCxiV1x+LzaIdVa9b4S4qGAcH6cbhh4qVxOU=
github.com/satori/go.uuid v0.0.0-20180103174451-36e9d2ebbde5/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0= github.com/satori/go.uuid v0.0.0-20180103174451-36e9d2ebbde5/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0=
@@ -1522,8 +1544,8 @@ github.com/sizeofint/webpanimation v0.0.0-20210809145948-1d2b32119882 h1:A7o8tOE
github.com/sizeofint/webpanimation v0.0.0-20210809145948-1d2b32119882/go.mod h1:5IwJoz9Pw7JsrCN4/skkxUtSWT7myuUPLhCgv6Q5vvQ= github.com/sizeofint/webpanimation v0.0.0-20210809145948-1d2b32119882/go.mod h1:5IwJoz9Pw7JsrCN4/skkxUtSWT7myuUPLhCgv6Q5vvQ=
github.com/skip2/go-qrcode v0.0.0-20190110000554-dc11ecdae0a9 h1:lpEzuenPuO1XNTeikEmvqYFcU37GVLl8SRNblzyvGBE= github.com/skip2/go-qrcode v0.0.0-20190110000554-dc11ecdae0a9 h1:lpEzuenPuO1XNTeikEmvqYFcU37GVLl8SRNblzyvGBE=
github.com/skip2/go-qrcode v0.0.0-20190110000554-dc11ecdae0a9/go.mod h1:PLPIyL7ikehBD1OAjmKKiOEhbvWyHGaNDjquXMcYABo= github.com/skip2/go-qrcode v0.0.0-20190110000554-dc11ecdae0a9/go.mod h1:PLPIyL7ikehBD1OAjmKKiOEhbvWyHGaNDjquXMcYABo=
github.com/slack-go/slack v0.10.2 h1:KMN/h2sgUninHXvQI8PrR/PHBUuWp2NPvz2Kr66tki4= github.com/slack-go/slack v0.10.3 h1:kKYwlKY73AfSrtAk9UHWCXXfitudkDztNI9GYBviLxw=
github.com/slack-go/slack v0.10.2/go.mod h1:5FLdBRv7VW/d9EBxx/eEktOptWygbA9K2QK/KW7ds1s= github.com/slack-go/slack v0.10.3/go.mod h1:hlGi5oXA+Gt+yWTPP0plCdRKmjsDxecdHxYQdlMQKOw=
github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc=
github.com/smartystreets/assertions v1.0.0 h1:UVQPSSmc3qtTi+zPPkCXvZX9VvW/xT/NsRvKfwY81a8= github.com/smartystreets/assertions v1.0.0 h1:UVQPSSmc3qtTi+zPPkCXvZX9VvW/xT/NsRvKfwY81a8=
github.com/smartystreets/assertions v1.0.0/go.mod h1:kHHU4qYBaI3q23Pp3VPrmWhuIUrLW/7eUrw0BU5VaoM= github.com/smartystreets/assertions v1.0.0/go.mod h1:kHHU4qYBaI3q23Pp3VPrmWhuIUrLW/7eUrw0BU5VaoM=
@@ -1542,8 +1564,9 @@ github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasO
github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ= github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ=
github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk= github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk=
github.com/spf13/afero v1.3.3/go.mod h1:5KUK8ByomD5Ti5Artl0RtHeI5pTF7MIDuXL3yY520V4= github.com/spf13/afero v1.3.3/go.mod h1:5KUK8ByomD5Ti5Artl0RtHeI5pTF7MIDuXL3yY520V4=
github.com/spf13/afero v1.6.0 h1:xoax2sJ2DT8S8xA2paPFjDCScCNeWsg75VG0DLRreiY=
github.com/spf13/afero v1.6.0/go.mod h1:Ai8FlHk4v/PARR026UzYexafAt9roJ7LcLMAmO6Z93I= github.com/spf13/afero v1.6.0/go.mod h1:Ai8FlHk4v/PARR026UzYexafAt9roJ7LcLMAmO6Z93I=
github.com/spf13/afero v1.8.2 h1:xehSyVa0YnHWsJ49JFljMpg1HX19V6NDZ1fkm1Xznbo=
github.com/spf13/afero v1.8.2/go.mod h1:CtAatgMJh6bJEIs48Ay/FOnkljP3WeGUG0MC1RfAqwo=
github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE=
github.com/spf13/cast v1.3.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= github.com/spf13/cast v1.3.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE=
github.com/spf13/cast v1.4.1 h1:s0hze+J0196ZfEMTs80N7UlFt0BDuQ7Q+JDnHiMWKdA= github.com/spf13/cast v1.4.1 h1:s0hze+J0196ZfEMTs80N7UlFt0BDuQ7Q+JDnHiMWKdA=
@@ -1569,8 +1592,8 @@ github.com/spf13/viper v1.4.0/go.mod h1:PTJ7Z/lr49W6bUbkmS1V3by4uWynFiR9p7+dSq/y
github.com/spf13/viper v1.7.0/go.mod h1:8WkrPz2fc9jxqZNCJI/76HCieCp4Q8HaLFoCha5qpdg= github.com/spf13/viper v1.7.0/go.mod h1:8WkrPz2fc9jxqZNCJI/76HCieCp4Q8HaLFoCha5qpdg=
github.com/spf13/viper v1.8.1/go.mod h1:o0Pch8wJ9BVSWGQMbra6iw0oQ5oktSIBaujf1rJH9Ns= github.com/spf13/viper v1.8.1/go.mod h1:o0Pch8wJ9BVSWGQMbra6iw0oQ5oktSIBaujf1rJH9Ns=
github.com/spf13/viper v1.10.0/go.mod h1:SoyBPwAtKDzypXNDFKN5kzH7ppppbGZtls1UpIy5AsM= github.com/spf13/viper v1.10.0/go.mod h1:SoyBPwAtKDzypXNDFKN5kzH7ppppbGZtls1UpIy5AsM=
github.com/spf13/viper v1.10.1 h1:nuJZuYpG7gTj/XqiUwg8bA0cp1+M2mC3J4g5luUYBKk= github.com/spf13/viper v1.11.0 h1:7OX/1FS6n7jHD1zGrZTM7WtY13ZELRyosK4k93oPr44=
github.com/spf13/viper v1.10.1/go.mod h1:IGlFPqhNAPKRxohIzWpI5QEy4kuI7tcl5WvR+8qy1rU= github.com/spf13/viper v1.11.0/go.mod h1:djo0X/bA5+tYVoCn+C7cAYJGcVn/qYLFTG8gdUsX7Zk=
github.com/splitio/go-client/v6 v6.1.0/go.mod h1:CEGAEFT99Fwb32ZIRcnZoXTMXddtB6IIpTmt3RP8mnM= github.com/splitio/go-client/v6 v6.1.0/go.mod h1:CEGAEFT99Fwb32ZIRcnZoXTMXddtB6IIpTmt3RP8mnM=
github.com/splitio/go-split-commons/v3 v3.1.0/go.mod h1:29NCy20oAS4ZMy4qkwTd6277eieVDonx4V/aeDU/wUQ= github.com/splitio/go-split-commons/v3 v3.1.0/go.mod h1:29NCy20oAS4ZMy4qkwTd6277eieVDonx4V/aeDU/wUQ=
github.com/splitio/go-toolkit/v4 v4.2.0/go.mod h1:EdIHN0yzB1GTXDYQc0KdKvnjkO/jfUM2YqHVYfhD3Wo= github.com/splitio/go-toolkit/v4 v4.2.0/go.mod h1:EdIHN0yzB1GTXDYQc0KdKvnjkO/jfUM2YqHVYfhD3Wo=
@@ -1590,8 +1613,9 @@ github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UV
github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA=
github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stretchr/testify v1.7.0 h1:nwc3DEeHmmLAfoZucVR881uASk0Mfjw8xYJ99tb5CcY=
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stretchr/testify v1.7.1 h1:5TQK59W5E3v0r2duFAb7P95B6hEeOyEnHRa8MjYSMTY=
github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/subosito/gotenv v1.2.0 h1:Slr1R9HxAlEKefgq5jn9U+DnETlIUa6HfgEzj0g5d7s= github.com/subosito/gotenv v1.2.0 h1:Slr1R9HxAlEKefgq5jn9U+DnETlIUa6HfgEzj0g5d7s=
github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw= github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw=
github.com/syndtr/gocapability v0.0.0-20170704070218-db04d3cc01c8/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww= github.com/syndtr/gocapability v0.0.0-20170704070218-db04d3cc01c8/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww=
@@ -1711,13 +1735,13 @@ github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9dec
github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k=
github.com/yuin/goldmark v1.3.8/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= github.com/yuin/goldmark v1.3.8/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k=
github.com/yuin/goldmark v1.4.1/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= github.com/yuin/goldmark v1.4.1/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k=
github.com/yuin/goldmark v1.4.5/go.mod h1:rmuwmfZ0+bvzB24eSC//bk1R1Zp3hM0OXYv/G2LIilg= github.com/yuin/goldmark v1.4.7/go.mod h1:rmuwmfZ0+bvzB24eSC//bk1R1Zp3hM0OXYv/G2LIilg=
github.com/yvasiyarov/go-metrics v0.0.0-20140926110328-57bccd1ccd43/go.mod h1:aX5oPXxHm3bOH+xeAttToC8pqch2ScQN/JoXYupl6xs= github.com/yvasiyarov/go-metrics v0.0.0-20140926110328-57bccd1ccd43/go.mod h1:aX5oPXxHm3bOH+xeAttToC8pqch2ScQN/JoXYupl6xs=
github.com/yvasiyarov/gorelic v0.0.0-20141212073537-a9bba5b9ab50/go.mod h1:NUSPSUX/bi6SeDMUh6brw0nXpxHnc96TguQh0+r/ssA= github.com/yvasiyarov/gorelic v0.0.0-20141212073537-a9bba5b9ab50/go.mod h1:NUSPSUX/bi6SeDMUh6brw0nXpxHnc96TguQh0+r/ssA=
github.com/yvasiyarov/newrelic_platform_go v0.0.0-20140908184405-b21fdbd4370f/go.mod h1:GlGEuHIJweS1mbCqG+7vt2nvWLzLLnRHbXz5JKd/Qbg= github.com/yvasiyarov/newrelic_platform_go v0.0.0-20140908184405-b21fdbd4370f/go.mod h1:GlGEuHIJweS1mbCqG+7vt2nvWLzLLnRHbXz5JKd/Qbg=
github.com/zenazn/goji v0.9.0/go.mod h1:7S9M489iMyHBNxwZnk9/EHS098H4/F6TATF2mIxtB1Q= github.com/zenazn/goji v0.9.0/go.mod h1:7S9M489iMyHBNxwZnk9/EHS098H4/F6TATF2mIxtB1Q=
github.com/zfjagann/golang-ring v0.0.0-20210116075443-7c86fdb43134 h1:itYC8Ycx8aVBN7a8q1Yr187W5WmQthvYU13+f4rOWkU= github.com/zfjagann/golang-ring v0.0.0-20220330170733-19bcea1b6289 h1:dEdcEes8Aki8XrgZFyrZvtazFlW4U7eNvX9NuyFJAtQ=
github.com/zfjagann/golang-ring v0.0.0-20210116075443-7c86fdb43134/go.mod h1:0MsIttMJIF/8Y7x0XjonJP7K99t3sR6bjj4m5S4JmqU= github.com/zfjagann/golang-ring v0.0.0-20220330170733-19bcea1b6289/go.mod h1:0MsIttMJIF/8Y7x0XjonJP7K99t3sR6bjj4m5S4JmqU=
github.com/ziutek/mymysql v1.5.4/go.mod h1:LMSpPZ6DbqWFxNCHW77HeMg9I646SAhApZ/wKdgO/C0= github.com/ziutek/mymysql v1.5.4/go.mod h1:LMSpPZ6DbqWFxNCHW77HeMg9I646SAhApZ/wKdgO/C0=
gitlab.com/nyarla/go-crypt v0.0.0-20160106005555-d9a5dc2b789b/go.mod h1:T3BPAOm2cqquPa0MKWeNkmOM5RQsRhkrwMWonFMN7fE= gitlab.com/nyarla/go-crypt v0.0.0-20160106005555-d9a5dc2b789b/go.mod h1:T3BPAOm2cqquPa0MKWeNkmOM5RQsRhkrwMWonFMN7fE=
go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU=
@@ -1727,14 +1751,17 @@ go.etcd.io/bbolt v1.3.6/go.mod h1:qXsaaIqmgQH0T+OPdb99Bf+PKfBBQVAdyD6TY9G8XM4=
go.etcd.io/etcd v0.5.0-alpha.5.0.20200910180754-dd1b699fc489/go.mod h1:yVHk9ub3CSBatqGNg7GRmsnfLWtoW60w4eDYfh7vHDg= go.etcd.io/etcd v0.5.0-alpha.5.0.20200910180754-dd1b699fc489/go.mod h1:yVHk9ub3CSBatqGNg7GRmsnfLWtoW60w4eDYfh7vHDg=
go.etcd.io/etcd/api/v3 v3.5.0/go.mod h1:cbVKeC6lCfl7j/8jBhAK6aIYO9XOjdptoxU/nLQcPvs= go.etcd.io/etcd/api/v3 v3.5.0/go.mod h1:cbVKeC6lCfl7j/8jBhAK6aIYO9XOjdptoxU/nLQcPvs=
go.etcd.io/etcd/api/v3 v3.5.1/go.mod h1:cbVKeC6lCfl7j/8jBhAK6aIYO9XOjdptoxU/nLQcPvs= go.etcd.io/etcd/api/v3 v3.5.1/go.mod h1:cbVKeC6lCfl7j/8jBhAK6aIYO9XOjdptoxU/nLQcPvs=
go.etcd.io/etcd/api/v3 v3.5.2/go.mod h1:5GB2vv4A4AOn3yk7MftYGHkUfGtDHnEraIjym4dYz5A=
go.etcd.io/etcd/client/pkg/v3 v3.5.0/go.mod h1:IJHfcCEKxYu1Os13ZdwCwIUTUVGYTSAM3YSwc9/Ac1g= go.etcd.io/etcd/client/pkg/v3 v3.5.0/go.mod h1:IJHfcCEKxYu1Os13ZdwCwIUTUVGYTSAM3YSwc9/Ac1g=
go.etcd.io/etcd/client/pkg/v3 v3.5.1/go.mod h1:IJHfcCEKxYu1Os13ZdwCwIUTUVGYTSAM3YSwc9/Ac1g= go.etcd.io/etcd/client/pkg/v3 v3.5.1/go.mod h1:IJHfcCEKxYu1Os13ZdwCwIUTUVGYTSAM3YSwc9/Ac1g=
go.etcd.io/etcd/client/pkg/v3 v3.5.2/go.mod h1:IJHfcCEKxYu1Os13ZdwCwIUTUVGYTSAM3YSwc9/Ac1g=
go.etcd.io/etcd/client/v2 v2.305.0/go.mod h1:h9puh54ZTgAKtEbut2oe9P4L/oqKCVB6xsXlzd7alYQ= go.etcd.io/etcd/client/v2 v2.305.0/go.mod h1:h9puh54ZTgAKtEbut2oe9P4L/oqKCVB6xsXlzd7alYQ=
go.etcd.io/etcd/client/v2 v2.305.1/go.mod h1:pMEacxZW7o8pg4CrFE7pquyCJJzZvkvdD2RibOCCCGs= go.etcd.io/etcd/client/v2 v2.305.1/go.mod h1:pMEacxZW7o8pg4CrFE7pquyCJJzZvkvdD2RibOCCCGs=
go.mau.fi/libsignal v0.0.0-20220315232917-871a40435d3b h1:BGm0ceth6OtoXgBksEyeruiCL2ngZCwt86DUmUI6TmQ= go.etcd.io/etcd/client/v2 v2.305.2/go.mod h1:2D7ZejHVMIfog1221iLSYlQRzrtECw3kz4I4VAQm3qI=
go.mau.fi/libsignal v0.0.0-20220315232917-871a40435d3b/go.mod h1:XYWsswZT1LfDmguWKYDuj+OugtdGX6CP3iwTtOcAGt4= go.mau.fi/libsignal v0.0.0-20220425070825-c40c839ee6a0 h1:3IQF2bgAyibdo77hTejwuJe4jlypj9QaE4xCQuxrThM=
go.mau.fi/whatsmeow v0.0.0-20220329131721-9f73bc00d158 h1:FLwtX7WD+SHGaSiHZwSiCCP155A7+oYdQ+6nr1Lvhms= go.mau.fi/libsignal v0.0.0-20220425070825-c40c839ee6a0/go.mod h1:kBOXTvYyDG/q1Ihgvd4J6WenGPh7wtEGvPKF6vmf5ak=
go.mau.fi/whatsmeow v0.0.0-20220329131721-9f73bc00d158/go.mod h1:P7OA9XyJ/0dWHhUPKNESpC1wVOErnhY4pLEaMC1a8yg= go.mau.fi/whatsmeow v0.0.0-20220504135614-f1f2a9d231fb h1:xI4HiJwBMmztBXFzjKWt7Ea8xmOO7LyYCYV0/ROU7kY=
go.mau.fi/whatsmeow v0.0.0-20220504135614-f1f2a9d231fb/go.mod h1:iUBgOLNaqShLrR17u0kIiRptIGFH+nbT1tRhaWBEX/c=
go.mongodb.org/mongo-driver v1.1.0/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qLUO4lqsUM= go.mongodb.org/mongo-driver v1.1.0/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qLUO4lqsUM=
go.mongodb.org/mongo-driver v1.7.0/go.mod h1:Q4oFMbo1+MSNqICAdYMlC/zSTrwCogR4R8NzkI+yfU8= go.mongodb.org/mongo-driver v1.7.0/go.mod h1:Q4oFMbo1+MSNqICAdYMlC/zSTrwCogR4R8NzkI+yfU8=
go.mozilla.org/pkcs7 v0.0.0-20200128120323-432b2356ecb1/go.mod h1:SNgMg+EgDFwmvSmLRTNKC5fegJjB7v23qTQ0XLGUNHk= go.mozilla.org/pkcs7 v0.0.0-20200128120323-432b2356ecb1/go.mod h1:SNgMg+EgDFwmvSmLRTNKC5fegJjB7v23qTQ0XLGUNHk=
@@ -1815,9 +1842,10 @@ golang.org/x/crypto v0.0.0-20210616213533-5ff15b29337e/go.mod h1:GvvjBRRGRdwPK5y
golang.org/x/crypto v0.0.0-20210711020723-a769d52b0f97/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.0.0-20210711020723-a769d52b0f97/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
golang.org/x/crypto v0.0.0-20210817164053-32db794688a5/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.0.0-20210817164053-32db794688a5/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
golang.org/x/crypto v0.0.0-20220208233918-bba287dce954/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/crypto v0.0.0-20211108221036-ceb1ce70b4fa/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
golang.org/x/crypto v0.0.0-20220315160706-3147a52a75dd h1:XcWmESyNjXJMLahc3mqVQJcgSTDxFxhETVlfk9uGc38= golang.org/x/crypto v0.0.0-20220214200702-86341886e292/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
golang.org/x/crypto v0.0.0-20220315160706-3147a52a75dd/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/crypto v0.0.0-20220411220226-7b82a4e95df4 h1:kUhD7nTDoI3fVd9G4ORWrbV5NY0liEs/Jg2pv5f+bBA=
golang.org/x/crypto v0.0.0-20220411220226-7b82a4e95df4/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
golang.org/x/exp v0.0.0-20180321215751-8460e604b9de/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20180321215751-8460e604b9de/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20180807140117-3d87b88a115f/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20180807140117-3d87b88a115f/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
@@ -1846,8 +1874,8 @@ golang.org/x/image v0.0.0-20201208152932-35266b937fa6/go.mod h1:FeLwcggjj3mMvU+o
golang.org/x/image v0.0.0-20210216034530-4410531fe030/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= golang.org/x/image v0.0.0-20210216034530-4410531fe030/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0=
golang.org/x/image v0.0.0-20210622092929-e6eecd499c2c/go.mod h1:023OzeP/+EPmXeapQh35lcL3II3LrY8Ic+EFFKVhULM= golang.org/x/image v0.0.0-20210622092929-e6eecd499c2c/go.mod h1:023OzeP/+EPmXeapQh35lcL3II3LrY8Ic+EFFKVhULM=
golang.org/x/image v0.0.0-20211028202545-6944b10bf410/go.mod h1:023OzeP/+EPmXeapQh35lcL3II3LrY8Ic+EFFKVhULM= golang.org/x/image v0.0.0-20211028202545-6944b10bf410/go.mod h1:023OzeP/+EPmXeapQh35lcL3II3LrY8Ic+EFFKVhULM=
golang.org/x/image v0.0.0-20220302094943-723b81ca9867 h1:TcHcE0vrmgzNH1v3ppjcMGbhG5+9fMuvOmUYwNEF4q4= golang.org/x/image v0.0.0-20220413100746-70e8d0d3baa9 h1:LRtI4W37N+KFebI/qV0OFiLUv4GLOWeEW5hn/KEJvxE=
golang.org/x/image v0.0.0-20220302094943-723b81ca9867/go.mod h1:023OzeP/+EPmXeapQh35lcL3II3LrY8Ic+EFFKVhULM= golang.org/x/image v0.0.0-20220413100746-70e8d0d3baa9/go.mod h1:023OzeP/+EPmXeapQh35lcL3II3LrY8Ic+EFFKVhULM=
golang.org/x/lint v0.0.0-20180702182130-06c8688daad7/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20180702182130-06c8688daad7/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
golang.org/x/lint v0.0.0-20181217174547-8f45f776aaf1/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20181217174547-8f45f776aaf1/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
@@ -1956,9 +1984,11 @@ golang.org/x/net v0.0.0-20211013171255-e13a2654a71e/go.mod h1:9nx3DQGgdP8bBQD5qx
golang.org/x/net v0.0.0-20211015210444-4f30a5c0130f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20211015210444-4f30a5c0130f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
golang.org/x/net v0.0.0-20211029224645-99673261e6eb/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20211029224645-99673261e6eb/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
golang.org/x/net v0.0.0-20211216030914-fe4d6282115f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd h1:O7DYs+zxREGLKzKoMQrtrEacpb0ZVXA5rIwylE2Xchk=
golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
golang.org/x/net v0.0.0-20220325170049-de3da57026de/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
golang.org/x/net v0.0.0-20220412020605-290c469a71a5 h1:bRb386wvrE+oBNdF1d/Xh9mQrfQ4ecYhW5qJ5GvTGT4=
golang.org/x/net v0.0.0-20220412020605-290c469a71a5/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
golang.org/x/oauth2 v0.0.0-20180227000427-d7d64896b5ff/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20180227000427-d7d64896b5ff/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
golang.org/x/oauth2 v0.0.0-20181017192945-9dcd33a902f4/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20181017192945-9dcd33a902f4/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
@@ -1982,8 +2012,10 @@ golang.org/x/oauth2 v0.0.0-20210805134026-6f1e6394065a/go.mod h1:KelEdhl1UZF7XfJ
golang.org/x/oauth2 v0.0.0-20210819190943-2bc19b11175f/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20210819190943-2bc19b11175f/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
golang.org/x/oauth2 v0.0.0-20211005180243-6b3c2da341f1/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20211005180243-6b3c2da341f1/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
golang.org/x/oauth2 v0.0.0-20220309155454-6242fa91716a h1:qfl7ob3DIEs3Ml9oLuPwY2N04gymzAW04WsUQHIClgM= golang.org/x/oauth2 v0.0.0-20220223155221-ee480838109b/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc=
golang.org/x/oauth2 v0.0.0-20220309155454-6242fa91716a/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc= golang.org/x/oauth2 v0.0.0-20220309155454-6242fa91716a/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc=
golang.org/x/oauth2 v0.0.0-20220411215720-9780585627b5 h1:OSnWWcOd/CtWQC2cYSBgbTSJv3ciqd8r54ySIW2y3RE=
golang.org/x/oauth2 v0.0.0-20220411215720-9780585627b5/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc=
golang.org/x/perf v0.0.0-20180704124530-6e6d33e29852/go.mod h1:JLpeXjPJfIyPr5TlbXLkXWLhP8nz10XfvxElABhCtcw= golang.org/x/perf v0.0.0-20180704124530-6e6d33e29852/go.mod h1:JLpeXjPJfIyPr5TlbXLkXWLhP8nz10XfvxElABhCtcw=
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
@@ -2094,16 +2126,17 @@ golang.org/x/sys v0.0.0-20210112080510-489259a85091/go.mod h1:h1NjWce9XRLGQEsW7w
golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210220050731-9a76102bfb43/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210220050731-9a76102bfb43/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210225134936-a50acf3fe073/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210303074136-134d130e1a04/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210303074136-134d130e1a04/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210304124612-50617c2ba197/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210304124612-50617c2ba197/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210305230114-8fe3ee5dd75b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210305230114-8fe3ee5dd75b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210315160823-c6e025ad8005/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210315160823-c6e025ad8005/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210320140829-1e4c9ba3b0c4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210320140829-1e4c9ba3b0c4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210324051608-47abb6519492/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210324051608-47abb6519492/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210326220804-49726bf1d181/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210403161142-5e06dd20ab57/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210403161142-5e06dd20ab57/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210423185535-09eb48e85fd7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210426230700-d19ff857e887/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210426230700-d19ff857e887/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210514084401-e8d321eab015/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210514084401-e8d321eab015/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
@@ -2130,8 +2163,13 @@ golang.org/x/sys v0.0.0-20211205182925-97ca703d548d/go.mod h1:oPkhp1MJrh7nUepCBc
golang.org/x/sys v0.0.0-20211210111614-af8b64212486/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211210111614-af8b64212486/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220114195835-da31bd327af9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220114195835-da31bd327af9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220207234003-57398862261d h1:Bm7BNOQt2Qv7ZqysjeLjgCBanX+88Z/OtdvsrEv1Djc= golang.org/x/sys v0.0.0-20220128215802-99c3d69c2c27/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220207234003-57398862261d/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220209214540-3681064d5158/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220224120231-95c6836cb0e7/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220227234510-4e6760a101f9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220328115105-d36c6a25d886/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220412211240-33da011f77ad h1:ntjMns5wyP/fN65tdBD4g8J5w8n015+iIIs9rtjXkY0=
golang.org/x/sys v0.0.0-20220412211240-33da011f77ad/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211 h1:JGgROgKl9N8DuW20oFS5gxc+lE67/N3FcwmBPMe7ArY= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211 h1:JGgROgKl9N8DuW20oFS5gxc+lE67/N3FcwmBPMe7ArY=
@@ -2234,6 +2272,7 @@ golang.org/x/tools v0.0.0-20201208233053-a543418bbed2/go.mod h1:emZCQorbCU4vsT4f
golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
golang.org/x/tools v0.0.0-20210105154028-b0ab187a4818/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20210105154028-b0ab187a4818/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
golang.org/x/tools v0.0.0-20210108195828-e2f9c7f1fc8e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0= golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0=
golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
golang.org/x/tools v0.1.2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
@@ -2248,8 +2287,9 @@ golang.org/x/xerrors v0.0.0-20190513163551-3ee3066db522/go.mod h1:I/5z698sn9Ka8T
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE=
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20220411194840-2f41105eb62f h1:GGU+dLjvlC3qDwqYgL6UgRmHXhOOgns0bZu2Ty5mm6U=
golang.org/x/xerrors v0.0.0-20220411194840-2f41105eb62f/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
gomod.garykim.dev/nc-talk v0.3.0 h1:MZxLc/gX2/+bdOw4xt6pi+qQFUQld1woGfw1hEJ0fbM= gomod.garykim.dev/nc-talk v0.3.0 h1:MZxLc/gX2/+bdOw4xt6pi+qQFUQld1woGfw1hEJ0fbM=
gomod.garykim.dev/nc-talk v0.3.0/go.mod h1:q/Adot/H7iqi+H4lANopV7/xcMf+sX3AZXUXqiITwok= gomod.garykim.dev/nc-talk v0.3.0/go.mod h1:q/Adot/H7iqi+H4lANopV7/xcMf+sX3AZXUXqiITwok=
gonum.org/v1/gonum v0.0.0-20180816165407-929014505bf4/go.mod h1:Y+Yx5eoAFn32cQvJDxZx5Dpnq+c3wtXuadVZAcxbbBo= gonum.org/v1/gonum v0.0.0-20180816165407-929014505bf4/go.mod h1:Y+Yx5eoAFn32cQvJDxZx5Dpnq+c3wtXuadVZAcxbbBo=
@@ -2298,6 +2338,10 @@ google.golang.org/api v0.59.0/go.mod h1:sT2boj7M9YJxZzgeZqXogmhfmRWDtPzT31xkieUb
google.golang.org/api v0.61.0/go.mod h1:xQRti5UdCmoCEqFxcz93fTl338AVqDgyaDRuOZ3hg9I= google.golang.org/api v0.61.0/go.mod h1:xQRti5UdCmoCEqFxcz93fTl338AVqDgyaDRuOZ3hg9I=
google.golang.org/api v0.62.0/go.mod h1:dKmwPCydfsad4qCH08MSdgWjfHOyfpd4VtDGgRFdavw= google.golang.org/api v0.62.0/go.mod h1:dKmwPCydfsad4qCH08MSdgWjfHOyfpd4VtDGgRFdavw=
google.golang.org/api v0.63.0/go.mod h1:gs4ij2ffTRXwuzzgJl/56BdwJaA194ijkfn++9tDuPo= google.golang.org/api v0.63.0/go.mod h1:gs4ij2ffTRXwuzzgJl/56BdwJaA194ijkfn++9tDuPo=
google.golang.org/api v0.67.0/go.mod h1:ShHKP8E60yPsKNw/w8w+VYaj9H6buA5UqDp8dhbQZ6g=
google.golang.org/api v0.70.0/go.mod h1:Bs4ZM2HGifEvXwd50TtW70ovgJffJYw2oRCOFU/SkfA=
google.golang.org/api v0.71.0/go.mod h1:4PyU6e6JogV1f9eA4voyrTY2batOLdgZ5qZ5HOCc4j8=
google.golang.org/api v0.74.0/go.mod h1:ZpfMZOVRMywNyvJFeqL9HRWBgAuRfSjJFpe9QtRRyDs=
google.golang.org/appengine v1.0.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.0.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
google.golang.org/appengine v1.2.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.2.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
@@ -2359,8 +2403,10 @@ google.golang.org/genproto v0.0.0-20201110150050-8816d57aaa9a/go.mod h1:FWY/as6D
google.golang.org/genproto v0.0.0-20201201144952-b05cb90ed32e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20201201144952-b05cb90ed32e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
google.golang.org/genproto v0.0.0-20201210142538-e3217bee35cc/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20201210142538-e3217bee35cc/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
google.golang.org/genproto v0.0.0-20201214200347-8c77b98c765d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20201214200347-8c77b98c765d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
google.golang.org/genproto v0.0.0-20210108203827-ffc7fda8c3d7/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
google.golang.org/genproto v0.0.0-20210207032614-bba0dbe2a9ea/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20210207032614-bba0dbe2a9ea/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
google.golang.org/genproto v0.0.0-20210222152913-aa3ee6e6a81c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20210222152913-aa3ee6e6a81c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
google.golang.org/genproto v0.0.0-20210226172003-ab064af71705/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
google.golang.org/genproto v0.0.0-20210303154014-9728d6b83eeb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20210303154014-9728d6b83eeb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
google.golang.org/genproto v0.0.0-20210310155132-4ce2db91004e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20210310155132-4ce2db91004e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
google.golang.org/genproto v0.0.0-20210319143718-93e7006c17a6/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20210319143718-93e7006c17a6/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
@@ -2392,7 +2438,15 @@ google.golang.org/genproto v0.0.0-20211129164237-f09f9a12af12/go.mod h1:5CzLGKJ6
google.golang.org/genproto v0.0.0-20211203200212-54befc351ae9/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= google.golang.org/genproto v0.0.0-20211203200212-54befc351ae9/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
google.golang.org/genproto v0.0.0-20211206160659-862468c7d6e0/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= google.golang.org/genproto v0.0.0-20211206160659-862468c7d6e0/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
google.golang.org/genproto v0.0.0-20211208223120-3a66f561d7aa/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= google.golang.org/genproto v0.0.0-20211208223120-3a66f561d7aa/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
google.golang.org/genproto v0.0.0-20220208230804-65c12eb4c068/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI= google.golang.org/genproto v0.0.0-20211221195035-429b39de9b1c/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
google.golang.org/genproto v0.0.0-20220126215142-9970aeb2e350/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
google.golang.org/genproto v0.0.0-20220207164111-0872dc986b00/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
google.golang.org/genproto v0.0.0-20220218161850-94dd64e39d7c/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI=
google.golang.org/genproto v0.0.0-20220222213610-43724f9ea8cf/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI=
google.golang.org/genproto v0.0.0-20220304144024-325a89244dc8/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI=
google.golang.org/genproto v0.0.0-20220310185008-1973136f34c6/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI=
google.golang.org/genproto v0.0.0-20220324131243-acbaeb5b85eb/go.mod h1:hAL49I2IFola2sVEjAn7MEwsja0xp51I0tlGAf9hz4E=
google.golang.org/genproto v0.0.0-20220407144326-9054f6ed7bac/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo=
google.golang.org/grpc v0.0.0-20160317175043-d3ddb4469d5a/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= google.golang.org/grpc v0.0.0-20160317175043-d3ddb4469d5a/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw=
google.golang.org/grpc v1.8.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= google.golang.org/grpc v1.8.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw=
google.golang.org/grpc v1.14.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= google.golang.org/grpc v1.14.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw=
@@ -2431,8 +2485,8 @@ google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9K
google.golang.org/grpc v1.40.1/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34= google.golang.org/grpc v1.40.1/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34=
google.golang.org/grpc v1.41.0/go.mod h1:U3l9uK9J0sini8mHphKoXyaqDA/8VyGnDee1zzIUK6k= google.golang.org/grpc v1.41.0/go.mod h1:U3l9uK9J0sini8mHphKoXyaqDA/8VyGnDee1zzIUK6k=
google.golang.org/grpc v1.42.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU= google.golang.org/grpc v1.42.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU=
google.golang.org/grpc v1.43.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU=
google.golang.org/grpc v1.44.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU= google.golang.org/grpc v1.44.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU=
google.golang.org/grpc v1.45.0/go.mod h1:lN7owxKUQEqMfSyQikvvk5tf/6zMPsrK+ONuO11+0rQ=
google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.1.0/go.mod h1:6Kw0yEErY5E/yWrBtf03jp27GLLJujG4z/JK95pnjjw= google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.1.0/go.mod h1:6Kw0yEErY5E/yWrBtf03jp27GLLJujG4z/JK95pnjjw=
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
@@ -2446,8 +2500,9 @@ google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGj
google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c=
google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
google.golang.org/protobuf v1.27.1 h1:SnqbnDw1V7RiZcXPx5MEeqPv2s79L9i7BJUlG/+RurQ=
google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
google.golang.org/protobuf v1.28.0 h1:w43yiav+6bVFTBQFZX0r7ipe9JQ1QsbMgHwbBziscLw=
google.golang.org/protobuf v1.28.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
gopkg.in/airbrake/gobrake.v2 v2.0.9/go.mod h1:/h5ZAUhDkGaJfjzjKLSjv6zCL6O0LLBxU4K+aSYdM/U= gopkg.in/airbrake/gobrake.v2 v2.0.9/go.mod h1:/h5ZAUhDkGaJfjzjKLSjv6zCL6O0LLBxU4K+aSYdM/U=
gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
gopkg.in/alexcesaro/quotedprintable.v3 v3.0.0-20150716171945-2caba252f4dc/go.mod h1:m7x9LTH6d71AHyAX77c9yqWCCa3UKHcVEj9y7hAtKDk= gopkg.in/alexcesaro/quotedprintable.v3 v3.0.0-20150716171945-2caba252f4dc/go.mod h1:m7x9LTH6d71AHyAX77c9yqWCCa3UKHcVEj9y7hAtKDk=
@@ -2470,8 +2525,8 @@ gopkg.in/ini.v1 v1.51.1/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k=
gopkg.in/ini.v1 v1.57.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= gopkg.in/ini.v1 v1.57.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k=
gopkg.in/ini.v1 v1.62.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= gopkg.in/ini.v1 v1.62.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k=
gopkg.in/ini.v1 v1.66.2/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= gopkg.in/ini.v1 v1.66.2/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k=
gopkg.in/ini.v1 v1.66.3 h1:jRskFVxYaMGAMUbN0UZ7niA9gzL9B49DOqE78vg0k3w= gopkg.in/ini.v1 v1.66.4 h1:SsAcf+mM7mRZo2nJNGt8mZCjG8ZRaNGMURJw7BsIST4=
gopkg.in/ini.v1 v1.66.3/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= gopkg.in/ini.v1 v1.66.4/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k=
gopkg.in/mail.v2 v2.3.1/go.mod h1:htwXN1Qh09vZJ1NVKxQqHPBaCBbzKhp5GzuJEA4VJWw= gopkg.in/mail.v2 v2.3.1/go.mod h1:htwXN1Qh09vZJ1NVKxQqHPBaCBbzKhp5GzuJEA4VJWw=
gopkg.in/mgo.v2 v2.0.0-20180705113604-9856a29383ce/go.mod h1:yeKp02qBN3iKW1OzL3MGk2IdtZzaj7SFntXj72NppTA= gopkg.in/mgo.v2 v2.0.0-20180705113604-9856a29383ce/go.mod h1:yeKp02qBN3iKW1OzL3MGk2IdtZzaj7SFntXj72NppTA=
gopkg.in/mgo.v2 v2.0.0-20190816093944-a6b53ec6cb22/go.mod h1:yeKp02qBN3iKW1OzL3MGk2IdtZzaj7SFntXj72NppTA= gopkg.in/mgo.v2 v2.0.0-20190816093944-a6b53ec6cb22/go.mod h1:yeKp02qBN3iKW1OzL3MGk2IdtZzaj7SFntXj72NppTA=
@@ -2565,10 +2620,10 @@ modernc.org/cc/v3 v3.35.15/go.mod h1:iPJg1pkwXqAV16SNgFBVYmggfMg6xhs+2oiO0vclK3g
modernc.org/cc/v3 v3.35.16/go.mod h1:iPJg1pkwXqAV16SNgFBVYmggfMg6xhs+2oiO0vclK3g= modernc.org/cc/v3 v3.35.16/go.mod h1:iPJg1pkwXqAV16SNgFBVYmggfMg6xhs+2oiO0vclK3g=
modernc.org/cc/v3 v3.35.17/go.mod h1:iPJg1pkwXqAV16SNgFBVYmggfMg6xhs+2oiO0vclK3g= modernc.org/cc/v3 v3.35.17/go.mod h1:iPJg1pkwXqAV16SNgFBVYmggfMg6xhs+2oiO0vclK3g=
modernc.org/cc/v3 v3.35.18/go.mod h1:iPJg1pkwXqAV16SNgFBVYmggfMg6xhs+2oiO0vclK3g= modernc.org/cc/v3 v3.35.18/go.mod h1:iPJg1pkwXqAV16SNgFBVYmggfMg6xhs+2oiO0vclK3g=
modernc.org/cc/v3 v3.35.20/go.mod h1:iPJg1pkwXqAV16SNgFBVYmggfMg6xhs+2oiO0vclK3g= modernc.org/cc/v3 v3.36.0 h1:0kmRkTmqNidmu3c7BNDSdVHCxXCkWLmWmCIVX4LUboo=
modernc.org/cc/v3 v3.35.22/go.mod h1:iPJg1pkwXqAV16SNgFBVYmggfMg6xhs+2oiO0vclK3g= modernc.org/cc/v3 v3.36.0/go.mod h1:NFUHyPn4ekoC/JHeZFfZurN6ixxawE1BnVonP/oahEI=
modernc.org/cc/v3 v3.35.24 h1:vlCqjhVwX15t1uwlMPpOpNRC7JTjMZ9lT9DYHKQTFuA= modernc.org/ccgo/v3 v3.0.0-20220428102840-41399a37e894/go.mod h1:eI31LL8EwEBKPpNpA4bU1/i+sKOwOrQy8D87zWUcRZc=
modernc.org/cc/v3 v3.35.24/go.mod h1:NFUHyPn4ekoC/JHeZFfZurN6ixxawE1BnVonP/oahEI= modernc.org/ccgo/v3 v3.0.0-20220430103911-bc99d88307be/go.mod h1:bwdAnOoaIt8Ax9YdWGjxWsdkPcZyRPHqrOvJxaKAKGw=
modernc.org/ccgo/v3 v3.9.2/go.mod h1:gnJpy6NIVqkETT+L5zPsQFj7L2kkhfPMzOghRNv/CFo= modernc.org/ccgo/v3 v3.9.2/go.mod h1:gnJpy6NIVqkETT+L5zPsQFj7L2kkhfPMzOghRNv/CFo=
modernc.org/ccgo/v3 v3.9.5/go.mod h1:umuo2EP2oDSBnD3ckjaVUXMrmeAw8C8OSICVa0iFf60= modernc.org/ccgo/v3 v3.9.5/go.mod h1:umuo2EP2oDSBnD3ckjaVUXMrmeAw8C8OSICVa0iFf60=
modernc.org/ccgo/v3 v3.10.0/go.mod h1:c0yBmkRFi7uW4J7fwx/JiijwOjeAeR2NoSaRVFPmjMw= modernc.org/ccgo/v3 v3.10.0/go.mod h1:c0yBmkRFi7uW4J7fwx/JiijwOjeAeR2NoSaRVFPmjMw=
@@ -2605,18 +2660,13 @@ modernc.org/ccgo/v3 v3.12.73/go.mod h1:hngkB+nUUqzOf3iqsM48Gf1FZhY599qzVg1iX+BT3
modernc.org/ccgo/v3 v3.12.81/go.mod h1:p2A1duHoBBg1mFtYvnhAnQyI6vL0uw5PGYLSIgF6rYY= modernc.org/ccgo/v3 v3.12.81/go.mod h1:p2A1duHoBBg1mFtYvnhAnQyI6vL0uw5PGYLSIgF6rYY=
modernc.org/ccgo/v3 v3.12.84/go.mod h1:ApbflUfa5BKadjHynCficldU1ghjen84tuM5jRynB7w= modernc.org/ccgo/v3 v3.12.84/go.mod h1:ApbflUfa5BKadjHynCficldU1ghjen84tuM5jRynB7w=
modernc.org/ccgo/v3 v3.12.86/go.mod h1:dN7S26DLTgVSni1PVA3KxxHTcykyDurf3OgUzNqTSrU= modernc.org/ccgo/v3 v3.12.86/go.mod h1:dN7S26DLTgVSni1PVA3KxxHTcykyDurf3OgUzNqTSrU=
modernc.org/ccgo/v3 v3.12.88/go.mod h1:0MFzUHIuSIthpVZyMWiFYMwjiFnhrN5MkvBrUwON+ZM=
modernc.org/ccgo/v3 v3.12.90/go.mod h1:obhSc3CdivCRpYZmrvO88TXlW0NvoSVvdh/ccRjJYko= modernc.org/ccgo/v3 v3.12.90/go.mod h1:obhSc3CdivCRpYZmrvO88TXlW0NvoSVvdh/ccRjJYko=
modernc.org/ccgo/v3 v3.12.92/go.mod h1:5yDdN7ti9KWPi5bRVWPl8UNhpEAtCjuEE7ayQnzzqHA= modernc.org/ccgo/v3 v3.12.92/go.mod h1:5yDdN7ti9KWPi5bRVWPl8UNhpEAtCjuEE7ayQnzzqHA=
modernc.org/ccgo/v3 v3.13.1/go.mod h1:aBYVOUfIlcSnrsRVU8VRS35y2DIfpgkmVkYZ0tpIXi4= modernc.org/ccgo/v3 v3.12.95/go.mod h1:ZcLyvtocXYi8uF+9Ebm3G8EF8HNY5hGomBqthDp4eC8=
modernc.org/ccgo/v3 v3.15.9/go.mod h1:md59wBwDT2LznX/OTCPoVS6KIsdRgY8xqQwBV+hkTH0= modernc.org/ccgo/v3 v3.16.4/go.mod h1:tGtX0gE9Jn7hdZFeU88slbTh1UtCYKusWOoCJuvkWsQ=
modernc.org/ccgo/v3 v3.15.10/go.mod h1:wQKxoFn0ynxMuCLfFD09c8XPUCc8obfchoVR9Cn0fI8= modernc.org/ccgo/v3 v3.16.6 h1:3l18poV+iUemQ98O3X5OMr97LOqlzis+ytivU4NqGhA=
modernc.org/ccgo/v3 v3.15.12/go.mod h1:VFePOWoCd8uDGRJpq/zfJ29D0EVzMSyID8LCMWYbX6I= modernc.org/ccgo/v3 v3.16.6/go.mod h1:tGtX0gE9Jn7hdZFeU88slbTh1UtCYKusWOoCJuvkWsQ=
modernc.org/ccgo/v3 v3.15.14/go.mod h1:144Sz2iBCKogb9OKwsu7hQEub3EVgOlyI8wMUPGKUXQ=
modernc.org/ccgo/v3 v3.15.15/go.mod h1:z5qltXjU4PJl0pE5nhYQCvA9DhPHiWsl5GWl89+NSYE=
modernc.org/ccgo/v3 v3.15.16/go.mod h1:XbKRMeMWMdq712Tr5ECgATYMrzJ+g9zAZEj2ktzBe24=
modernc.org/ccgo/v3 v3.15.17/go.mod h1:bofnFkpRFf5gLY+mBZIyTW6FEcp26xi2lgOFk2Rlvs0=
modernc.org/ccgo/v3 v3.15.18 h1:X5ym656Ye7/ubL+wox0SeF9aRX5od1UDFn1tAbQR+90=
modernc.org/ccgo/v3 v3.15.18/go.mod h1:/2lv3WjHyanEr2sAPdGKRC38n6f0werut9BRXUjjX+A=
modernc.org/ccorpus v1.11.1/go.mod h1:2gEUTrWqdpH2pXsmTM1ZkjeSrUWDpjMu2T6m29L/ErQ= modernc.org/ccorpus v1.11.1/go.mod h1:2gEUTrWqdpH2pXsmTM1ZkjeSrUWDpjMu2T6m29L/ErQ=
modernc.org/ccorpus v1.11.6 h1:J16RXiiqiCgua6+ZvQot4yUuUy8zxgqbqEEUuGPlISk= modernc.org/ccorpus v1.11.6 h1:J16RXiiqiCgua6+ZvQot4yUuUy8zxgqbqEEUuGPlISk=
modernc.org/ccorpus v1.11.6/go.mod h1:2gEUTrWqdpH2pXsmTM1ZkjeSrUWDpjMu2T6m29L/ErQ= modernc.org/ccorpus v1.11.6/go.mod h1:2gEUTrWqdpH2pXsmTM1ZkjeSrUWDpjMu2T6m29L/ErQ=
@@ -2627,6 +2677,7 @@ modernc.org/golex v1.0.0/go.mod h1:b/QX9oBD/LhixY6NDh+IdGv17hgB+51fET1i2kPSmvk=
modernc.org/httpfs v1.0.6 h1:AAgIpFZRXuYnkjftxTAZwMIiwEqAfk8aVB2/oA6nAeM= modernc.org/httpfs v1.0.6 h1:AAgIpFZRXuYnkjftxTAZwMIiwEqAfk8aVB2/oA6nAeM=
modernc.org/httpfs v1.0.6/go.mod h1:7dosgurJGp0sPaRanU53W4xZYKh14wfzX420oZADeHM= modernc.org/httpfs v1.0.6/go.mod h1:7dosgurJGp0sPaRanU53W4xZYKh14wfzX420oZADeHM=
modernc.org/internal v1.0.0/go.mod h1:VUD/+JAkhCpvkUitlEOnhpVxCgsBI90oTzSCRcqQVSM= modernc.org/internal v1.0.0/go.mod h1:VUD/+JAkhCpvkUitlEOnhpVxCgsBI90oTzSCRcqQVSM=
modernc.org/libc v0.0.0-20220428101251-2d5f3daf273b/go.mod h1:p7Mg4+koNjc8jkqwcoFBJx7tXkpj00G77X7A72jXPXA=
modernc.org/libc v1.7.13-0.20210308123627-12f642a52bb8/go.mod h1:U1eq8YWr/Kc1RWCMFUWEdkTg8OTcfLw2kY8EDwl039w= modernc.org/libc v1.7.13-0.20210308123627-12f642a52bb8/go.mod h1:U1eq8YWr/Kc1RWCMFUWEdkTg8OTcfLw2kY8EDwl039w=
modernc.org/libc v1.9.5/go.mod h1:U1eq8YWr/Kc1RWCMFUWEdkTg8OTcfLw2kY8EDwl039w= modernc.org/libc v1.9.5/go.mod h1:U1eq8YWr/Kc1RWCMFUWEdkTg8OTcfLw2kY8EDwl039w=
modernc.org/libc v1.9.8/go.mod h1:U1eq8YWr/Kc1RWCMFUWEdkTg8OTcfLw2kY8EDwl039w= modernc.org/libc v1.9.8/go.mod h1:U1eq8YWr/Kc1RWCMFUWEdkTg8OTcfLw2kY8EDwl039w=
@@ -2664,19 +2715,15 @@ modernc.org/libc v1.11.82/go.mod h1:NF+Ek1BOl2jeC7lw3a7Jj5PWyHPwWD4aq3wVKxqV1fI=
modernc.org/libc v1.11.86/go.mod h1:ePuYgoQLmvxdNT06RpGnaDKJmDNEkV7ZPKI2jnsvZoE= modernc.org/libc v1.11.86/go.mod h1:ePuYgoQLmvxdNT06RpGnaDKJmDNEkV7ZPKI2jnsvZoE=
modernc.org/libc v1.11.87/go.mod h1:Qvd5iXTeLhI5PS0XSyqMY99282y+3euapQFxM7jYnpY= modernc.org/libc v1.11.87/go.mod h1:Qvd5iXTeLhI5PS0XSyqMY99282y+3euapQFxM7jYnpY=
modernc.org/libc v1.11.88/go.mod h1:h3oIVe8dxmTcchcFuCcJ4nAWaoiwzKCdv82MM0oiIdQ= modernc.org/libc v1.11.88/go.mod h1:h3oIVe8dxmTcchcFuCcJ4nAWaoiwzKCdv82MM0oiIdQ=
modernc.org/libc v1.11.90/go.mod h1:ynK5sbjsU77AP+nn61+k+wxUGRx9rOFcIqWYYMaDZ4c=
modernc.org/libc v1.11.98/go.mod h1:ynK5sbjsU77AP+nn61+k+wxUGRx9rOFcIqWYYMaDZ4c= modernc.org/libc v1.11.98/go.mod h1:ynK5sbjsU77AP+nn61+k+wxUGRx9rOFcIqWYYMaDZ4c=
modernc.org/libc v1.11.99/go.mod h1:wLLYgEiY2D17NbBOEp+mIJJJBGSiy7fLL4ZrGGZ+8jI=
modernc.org/libc v1.11.101/go.mod h1:wLLYgEiY2D17NbBOEp+mIJJJBGSiy7fLL4ZrGGZ+8jI= modernc.org/libc v1.11.101/go.mod h1:wLLYgEiY2D17NbBOEp+mIJJJBGSiy7fLL4ZrGGZ+8jI=
modernc.org/libc v1.12.0/go.mod h1:2MH3DaF/gCU8i/UBiVE1VFRos4o523M7zipmwH8SIgQ= modernc.org/libc v1.11.104/go.mod h1:2MH3DaF/gCU8i/UBiVE1VFRos4o523M7zipmwH8SIgQ=
modernc.org/libc v1.14.1/go.mod h1:npFeGWjmZTjFeWALQLrvklVmAxv4m80jnG3+xI8FdJk= modernc.org/libc v1.16.0/go.mod h1:N4LD6DBE9cf+Dzf9buBlzVJndKr/iJHG97vGLHYnb5A=
modernc.org/libc v1.14.2/go.mod h1:MX1GBLnRLNdvmK9azU9LCxZ5lMyhrbEMK8rG3X/Fe34= modernc.org/libc v1.16.1/go.mod h1:JjJE0eu4yeK7tab2n4S1w8tlWd9MxXLRzheaRnAKymU=
modernc.org/libc v1.14.3/go.mod h1:GPIvQVOVPizzlqyRX3l756/3ppsAgg1QgPxjr5Q4agQ= modernc.org/libc v1.16.7 h1:qzQtHhsZNpVPpeCu+aMIQldXeV1P0vRhSqCL0nOIJOA=
modernc.org/libc v1.14.6/go.mod h1:2PJHINagVxO4QW/5OQdRrvMYo+bm5ClpUFfyXCYl9ak= modernc.org/libc v1.16.7/go.mod h1:hYIV5VZczAmGZAnG15Vdngn5HSF5cSkbvfz2B7GRuVU=
modernc.org/libc v1.14.7/go.mod h1:f8xfWXW8LW41qb4X5+huVQo5dcfPlq7Cbny2TDheMv0=
modernc.org/libc v1.14.8/go.mod h1:9+JCLb1MWSY23smyOpIPbd5ED+rSS/ieiDWUpdyO3mo=
modernc.org/libc v1.14.10/go.mod h1:y1MtIWhwpJFpLYm6grAThtuXJKEsY6xkdZmXbRngIdo=
modernc.org/libc v1.14.11/go.mod h1:l5/Mz/GrZwOqzwRHA3abgSCnSeJzzTl+Ify0bAwKbAw=
modernc.org/libc v1.14.12 h1:pUBZTYoISfbb4pCf4PECENpbvwDBxeKc+/dS9LyOWFM=
modernc.org/libc v1.14.12/go.mod h1:fJdoe23MHu2ruPQkFPPqCpToDi5cckzsbmkI6Ez0LqQ=
modernc.org/lldb v1.0.0/go.mod h1:jcRvJGWfCGodDZz8BPwiKMJxGJngQ/5DrRapkQnLob8= modernc.org/lldb v1.0.0/go.mod h1:jcRvJGWfCGodDZz8BPwiKMJxGJngQ/5DrRapkQnLob8=
modernc.org/mathutil v1.0.0/go.mod h1:wU0vUrJsVWBZ4P6e7xtFJEhFSNsfRLJ8H458uRjg03k= modernc.org/mathutil v1.0.0/go.mod h1:wU0vUrJsVWBZ4P6e7xtFJEhFSNsfRLJ8H458uRjg03k=
modernc.org/mathutil v1.1.1/go.mod h1:mZW8CKdRPY1v87qxC/wUdX5O1qDzXMP5TH3wjfpga6E= modernc.org/mathutil v1.1.1/go.mod h1:mZW8CKdRPY1v87qxC/wUdX5O1qDzXMP5TH3wjfpga6E=
@@ -2686,28 +2733,30 @@ modernc.org/mathutil v1.4.1 h1:ij3fYGe8zBF4Vu+g0oT7mB06r8sqGWKuJu1yXeR4by8=
modernc.org/mathutil v1.4.1/go.mod h1:mZW8CKdRPY1v87qxC/wUdX5O1qDzXMP5TH3wjfpga6E= modernc.org/mathutil v1.4.1/go.mod h1:mZW8CKdRPY1v87qxC/wUdX5O1qDzXMP5TH3wjfpga6E=
modernc.org/memory v1.0.4/go.mod h1:nV2OApxradM3/OVbs2/0OsP6nPfakXpi50C7dcoHXlc= modernc.org/memory v1.0.4/go.mod h1:nV2OApxradM3/OVbs2/0OsP6nPfakXpi50C7dcoHXlc=
modernc.org/memory v1.0.5/go.mod h1:B7OYswTRnfGg+4tDH1t1OeUNnsy2viGTdME4tzd+IjM= modernc.org/memory v1.0.5/go.mod h1:B7OYswTRnfGg+4tDH1t1OeUNnsy2viGTdME4tzd+IjM=
modernc.org/memory v1.0.6/go.mod h1:/0wo5ibyrQiaoUoH7f9D8dnglAmILJ5/cxZlRECf+Nw= modernc.org/memory v1.1.1 h1:bDOL0DIDLQv7bWhP3gMvIrnoFw+Eo6F7a2QK9HPDiFU=
modernc.org/memory v1.0.7 h1:UE3cxTRFa5tfUibAV7Jqq8P7zRY0OlJg+yWVIIaluEE= modernc.org/memory v1.1.1/go.mod h1:/0wo5ibyrQiaoUoH7f9D8dnglAmILJ5/cxZlRECf+Nw=
modernc.org/memory v1.0.7/go.mod h1:/0wo5ibyrQiaoUoH7f9D8dnglAmILJ5/cxZlRECf+Nw=
modernc.org/opt v0.1.1 h1:/0RX92k9vwVeDXj+Xn23DKp2VJubL7k8qNffND6qn3A= modernc.org/opt v0.1.1 h1:/0RX92k9vwVeDXj+Xn23DKp2VJubL7k8qNffND6qn3A=
modernc.org/opt v0.1.1/go.mod h1:WdSiB5evDcignE70guQKxYUl14mgWtbClRi5wmkkTX0= modernc.org/opt v0.1.1/go.mod h1:WdSiB5evDcignE70guQKxYUl14mgWtbClRi5wmkkTX0=
modernc.org/ql v1.0.0/go.mod h1:xGVyrLIatPcO2C1JvI/Co8c0sr6y91HKFNy4pt9JXEY= modernc.org/ql v1.0.0/go.mod h1:xGVyrLIatPcO2C1JvI/Co8c0sr6y91HKFNy4pt9JXEY=
modernc.org/sortutil v1.1.0/go.mod h1:ZyL98OQHJgH9IEfN71VsamvJgrtRX9Dj2gX+vH86L1k= modernc.org/sortutil v1.1.0/go.mod h1:ZyL98OQHJgH9IEfN71VsamvJgrtRX9Dj2gX+vH86L1k=
modernc.org/sqlite v1.10.6/go.mod h1:Z9FEjUtZP4qFEg6/SiADg9XCER7aYy9a/j7Pg9P7CPs= modernc.org/sqlite v1.10.6/go.mod h1:Z9FEjUtZP4qFEg6/SiADg9XCER7aYy9a/j7Pg9P7CPs=
modernc.org/sqlite v1.15.4 h1:pr3EA3Rety3j1c/9pCyGAe5d3vjF6wQwusHdgGCjIqc= modernc.org/sqlite v1.14.3/go.mod h1:xMpicS1i2MJ4C8+Ap0vYBqTwYfpFvdnPE6brbFOtV2Y=
modernc.org/sqlite v1.15.4/go.mod h1:Jwe13ItpESZ+78K5WS6+AjXsUg+JvirsjN3iIDO4C8k= modernc.org/sqlite v1.17.2 h1:TjmF36Wi5QcPYqRoAacV1cAyJ7xB/CD0ExpVUEMebnw=
modernc.org/sqlite v1.17.2/go.mod h1:GOQmuiXd6pTTes1Fi2s9apiCcD/wbKQtBZ0Nw6/etjM=
modernc.org/strutil v1.1.0/go.mod h1:lstksw84oURvj9y3tn8lGvRxyRC1S2+g5uuIzNfIOBs= modernc.org/strutil v1.1.0/go.mod h1:lstksw84oURvj9y3tn8lGvRxyRC1S2+g5uuIzNfIOBs=
modernc.org/strutil v1.1.1 h1:xv+J1BXY3Opl2ALrBwyfEikFAj8pmqcpnfmuwUwcozs= modernc.org/strutil v1.1.1 h1:xv+J1BXY3Opl2ALrBwyfEikFAj8pmqcpnfmuwUwcozs=
modernc.org/strutil v1.1.1/go.mod h1:DE+MQQ/hjKBZS2zNInV5hhcipt5rLPWkmpbGeW5mmdw= modernc.org/strutil v1.1.1/go.mod h1:DE+MQQ/hjKBZS2zNInV5hhcipt5rLPWkmpbGeW5mmdw=
modernc.org/tcl v1.5.2/go.mod h1:pmJYOLgpiys3oI4AeAafkcUfE+TKKilminxNyU/+Zlo= modernc.org/tcl v1.5.2/go.mod h1:pmJYOLgpiys3oI4AeAafkcUfE+TKKilminxNyU/+Zlo=
modernc.org/tcl v1.11.2 h1:mXpsx3AZqJt83uDiFu9UYQVBjNjaWKGCF1YDSlpCL6Y= modernc.org/tcl v1.9.2/go.mod h1:aw7OnlIoiuJgu1gwbTZtrKnGpDqH9wyH++jZcxdqNsg=
modernc.org/tcl v1.11.2/go.mod h1:BRzgpajcGdS2qTxniOx9c/dcxjlbA7p12eJNmiriQYo= modernc.org/tcl v1.13.1 h1:npxzTwFTZYM8ghWicVIX1cRWzj7Nd8i6AqqX2p+IYao=
modernc.org/tcl v1.13.1/go.mod h1:XOLfOwzhkljL4itZkK6T72ckMgvj0BDsnKNdZVUOecw=
modernc.org/token v1.0.0 h1:a0jaWiNMDhDUtqOj09wvjWWAqd3q7WpBulmL9H2egsk= modernc.org/token v1.0.0 h1:a0jaWiNMDhDUtqOj09wvjWWAqd3q7WpBulmL9H2egsk=
modernc.org/token v1.0.0/go.mod h1:UGzOrNV1mAFSEB63lOFHIpNRUVMvYTc6yu1SMY/XTDM= modernc.org/token v1.0.0/go.mod h1:UGzOrNV1mAFSEB63lOFHIpNRUVMvYTc6yu1SMY/XTDM=
modernc.org/z v1.0.1-0.20210308123920-1f282aa71362/go.mod h1:8/SRk5C/HgiQWCgXdfpb+1RvhORdkz5sw72d3jjtyqA= modernc.org/z v1.0.1-0.20210308123920-1f282aa71362/go.mod h1:8/SRk5C/HgiQWCgXdfpb+1RvhORdkz5sw72d3jjtyqA=
modernc.org/z v1.0.1/go.mod h1:8/SRk5C/HgiQWCgXdfpb+1RvhORdkz5sw72d3jjtyqA= modernc.org/z v1.0.1/go.mod h1:8/SRk5C/HgiQWCgXdfpb+1RvhORdkz5sw72d3jjtyqA=
modernc.org/z v1.3.2 h1:4GWBVMa48UDC7KQ9tnaggN/yTlXg+CdCX9bhgHPQ9AM= modernc.org/z v1.2.20/go.mod h1:zU9FiF4PbHdOTUxw+IF8j7ArBMRPsHgq10uVPt6xTzo=
modernc.org/z v1.3.2/go.mod h1:PEU2oK2OEA1CfzDTd+8E908qEXhC9s0MfyKp5LZsd+k= modernc.org/z v1.5.1 h1:RTNHdsrOpeoSeOF4FbzTo8gBYByaJ5xT7NgZ9ZqRiJM=
modernc.org/z v1.5.1/go.mod h1:eWFB510QWW5Th9YGZT81s+LwvaAs3Q2yr4sP0rmLkv8=
modernc.org/zappy v1.0.0/go.mod h1:hHe+oGahLVII/aTTyWK/b53VDHMAGCBYYeZ9sn83HC4= modernc.org/zappy v1.0.0/go.mod h1:hHe+oGahLVII/aTTyWK/b53VDHMAGCBYYeZ9sn83HC4=
rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8=
rsc.io/goversion v1.2.0/go.mod h1:Eih9y/uIBS3ulggl7KNJ09xGSLcuNaLgmvvqa07sgfo= rsc.io/goversion v1.2.0/go.mod h1:Eih9y/uIBS3ulggl7KNJ09xGSLcuNaLgmvvqa07sgfo=

View File

@@ -1328,6 +1328,15 @@ HTMLDisable=false
# UseUserName shows the username instead of the server nickname # UseUserName shows the username instead of the server nickname
UseUserName=false UseUserName=false
# Matrix quotes replies and as of matterbridge 1.24.0 we strip those as this causes
# issues with bridges support threading and have PreserveThreading enabled.
# But if you for example use mattermost or discord with webhooks you'll need to enable
# this (and keep PreserveThreading disabled) if you want something that looks like a reply from matrix.
# See issues:
# - https://github.com/42wim/matterbridge/issues/1819
# - https://github.com/42wim/matterbridge/issues/1780
KeepQuotedReply=false
#Nicks you want to ignore. #Nicks you want to ignore.
#Regular expressions supported #Regular expressions supported
#Messages from those users will not be sent to other bridges. #Messages from those users will not be sent to other bridges.

View File

@@ -1,7 +1,7 @@
# VK SDK for Golang # VK SDK for Golang
[![PkgGoDev](https://pkg.go.dev/badge/github.com/SevereCloud/vksdk/v2/v2)](https://pkg.go.dev/github.com/SevereCloud/vksdk/v2?tab=subdirectories) [![PkgGoDev](https://pkg.go.dev/badge/github.com/SevereCloud/vksdk/v2/v2)](https://pkg.go.dev/github.com/SevereCloud/vksdk/v2?tab=subdirectories)
[![VK Developers](https://img.shields.io/badge/developers-%234a76a8.svg?logo=VK&logoColor=white)](https://vk.com/dev/) [![VK Developers](https://img.shields.io/badge/developers-%234a76a8.svg?logo=VK&logoColor=white)](https://dev.vk.com/)
[![codecov](https://codecov.io/gh/SevereCloud/vksdk/branch/master/graph/badge.svg)](https://codecov.io/gh/SevereCloud/vksdk) [![codecov](https://codecov.io/gh/SevereCloud/vksdk/branch/master/graph/badge.svg)](https://codecov.io/gh/SevereCloud/vksdk)
[![VK chat](https://img.shields.io/badge/VK%20chat-%234a76a8.svg?logo=VK&logoColor=white)](https://vk.me/join/AJQ1d6Or8Q00Y_CSOESfbqGt) [![VK chat](https://img.shields.io/badge/VK%20chat-%234a76a8.svg?logo=VK&logoColor=white)](https://vk.me/join/AJQ1d6Or8Q00Y_CSOESfbqGt)
[![release](https://img.shields.io/github/v/tag/SevereCloud/vksdk?label=release)](https://github.com/SevereCloud/vksdk/releases) [![release](https://img.shields.io/github/v/tag/SevereCloud/vksdk?label=release)](https://github.com/SevereCloud/vksdk/releases)

View File

@@ -141,6 +141,8 @@ func (vk *VK) AccountSetInfo(params Params) (response int, err error) {
// AccountSetNameInMenu sets an application screen name // AccountSetNameInMenu sets an application screen name
// (up to 17 characters), that is shown to the user in the left menu. // (up to 17 characters), that is shown to the user in the left menu.
// //
// Deprecated: This method is deprecated and may be disabled soon, please avoid
//
// https://vk.com/dev/account.setNameInMenu // https://vk.com/dev/account.setNameInMenu
func (vk *VK) AccountSetNameInMenu(params Params) (response int, err error) { func (vk *VK) AccountSetNameInMenu(params Params) (response int, err error) {
err = vk.RequestUnmarshal("account.setNameInMenu", &response, params) err = vk.RequestUnmarshal("account.setNameInMenu", &response, params)

View File

@@ -1,4 +1,4 @@
package api // import "github.com/SevereCloud/vksdk/api" package api // import "github.com/SevereCloud/vksdk/v2/api"
import ( import (
"github.com/SevereCloud/vksdk/v2/object" "github.com/SevereCloud/vksdk/v2/object"

View File

@@ -235,6 +235,17 @@ func (vk *VK) VideoGetCommentsExtended(params Params) (response VideoGetComments
return return
} }
// VideoLiveGetCategoriesResponse struct.
type VideoLiveGetCategoriesResponse []object.VideoLiveCategory
// VideoLiveGetCategories method.
//
// https://vk.com/dev/video.liveGetCategories
func (vk *VK) VideoLiveGetCategories(params Params) (response VideoLiveGetCategoriesResponse, err error) {
err = vk.RequestUnmarshal("video.liveGetCategories", &response, params)
return
}
// VideoRemoveFromAlbum allows you to remove the video from the album. // VideoRemoveFromAlbum allows you to remove the video from the album.
// //
// https://vk.com/dev/video.removeFromAlbum // https://vk.com/dev/video.removeFromAlbum
@@ -336,3 +347,27 @@ func (vk *VK) VideoSearchExtended(params Params) (response VideoSearchExtendedRe
return return
} }
// VideoStartStreamingResponse struct.
type VideoStartStreamingResponse object.VideoLive
// VideoStartStreaming method.
//
// https://vk.com/dev/video.startStreaming
func (vk *VK) VideoStartStreaming(params Params) (response VideoStartStreamingResponse, err error) {
err = vk.RequestUnmarshal("video.startStreaming", &response, params)
return
}
// VideoStopStreamingResponse struct.
type VideoStopStreamingResponse struct {
UniqueViewers int `json:"unique_viewers"`
}
// VideoStopStreaming method.
//
// https://vk.com/dev/video.stopStreaming
func (vk *VK) VideoStopStreaming(params Params) (response VideoStopStreamingResponse, err error) {
err = vk.RequestUnmarshal("video.stopStreaming", &response, params)
return
}

View File

@@ -7,6 +7,6 @@ package vksdk
// Module constants. // Module constants.
const ( const (
Version = "2.13.1" Version = "2.14.0"
API = "5.131" API = "5.131"
) )

View File

@@ -15,3 +15,8 @@ func GroupIDFromContext(ctx context.Context) int {
func EventIDFromContext(ctx context.Context) string { func EventIDFromContext(ctx context.Context) string {
return ctx.Value(internal.EventIDKey).(string) return ctx.Value(internal.EventIDKey).(string)
} }
// VersionFromContext returns the version from context.
func VersionFromContext(ctx context.Context) string {
return ctx.Value(internal.EventVersionKey).(string)
}

View File

@@ -81,6 +81,7 @@ type GroupEvent struct {
Object json.RawMessage `json:"object"` Object json.RawMessage `json:"object"`
GroupID int `json:"group_id"` GroupID int `json:"group_id"`
EventID string `json:"event_id"` EventID string `json:"event_id"`
V string `json:"v"`
Secret string `json:"secret"` Secret string `json:"secret"`
} }
@@ -158,6 +159,7 @@ func NewFuncList() *FuncList {
func (fl FuncList) Handler(ctx context.Context, e GroupEvent) error { // nolint:gocyclo func (fl FuncList) Handler(ctx context.Context, e GroupEvent) error { // nolint:gocyclo
ctx = context.WithValue(ctx, internal.GroupIDKey, e.GroupID) ctx = context.WithValue(ctx, internal.GroupIDKey, e.GroupID)
ctx = context.WithValue(ctx, internal.EventIDKey, e.EventID) ctx = context.WithValue(ctx, internal.EventIDKey, e.EventID)
ctx = context.WithValue(ctx, internal.EventVersionKey, e.V)
if sliceFunc, ok := fl.special[e.Type]; ok { if sliceFunc, ok := fl.special[e.Type]; ok {
for _, f := range sliceFunc { for _, f := range sliceFunc {

View File

@@ -28,6 +28,7 @@ const (
CallbackRetryCounterKey CallbackRetryCounterKey
CallbackRetryAfterKey CallbackRetryAfterKey
CallbackRemove CallbackRemove
EventVersionKey
) )
// ContextClient return *http.Client. // ContextClient return *http.Client.

View File

@@ -84,7 +84,6 @@ type AccountAccountCounters struct {
// AccountInfo struct. // AccountInfo struct.
type AccountInfo struct { type AccountInfo struct {
// Country code. // Country code.
Country string `json:"country"` Country string `json:"country"`

View File

@@ -409,7 +409,6 @@ type MessageContentSource struct {
Type string `json:"type"` Type string `json:"type"`
MessageContentSourceMessage // type message MessageContentSourceMessage // type message
MessageContentSourceURL // type url MessageContentSourceURL // type url
} }
// NewMessageContentSourceMessage ... // NewMessageContentSourceMessage ...

View File

@@ -24,110 +24,113 @@ const (
// UsersUser struct. // UsersUser struct.
type UsersUser struct { type UsersUser struct {
ID int `json:"id"` ID int `json:"id"`
FirstName string `json:"first_name"` FirstName string `json:"first_name"`
LastName string `json:"last_name"` LastName string `json:"last_name"`
FirstNameNom string `json:"first_name_nom"` FirstNameNom string `json:"first_name_nom"`
FirstNameGen string `json:"first_name_gen"` FirstNameGen string `json:"first_name_gen"`
FirstNameDat string `json:"first_name_dat"` FirstNameDat string `json:"first_name_dat"`
FirstNameAcc string `json:"first_name_acc"` FirstNameAcc string `json:"first_name_acc"`
FirstNameIns string `json:"first_name_ins"` FirstNameIns string `json:"first_name_ins"`
FirstNameAbl string `json:"first_name_abl"` FirstNameAbl string `json:"first_name_abl"`
LastNameNom string `json:"last_name_nom"` LastNameNom string `json:"last_name_nom"`
LastNameGen string `json:"last_name_gen"` LastNameGen string `json:"last_name_gen"`
LastNameDat string `json:"last_name_dat"` LastNameDat string `json:"last_name_dat"`
LastNameAcc string `json:"last_name_acc"` LastNameAcc string `json:"last_name_acc"`
LastNameIns string `json:"last_name_ins"` LastNameIns string `json:"last_name_ins"`
LastNameAbl string `json:"last_name_abl"` LastNameAbl string `json:"last_name_abl"`
MaidenName string `json:"maiden_name"` MaidenName string `json:"maiden_name"`
Sex int `json:"sex"` Sex int `json:"sex"`
Nickname string `json:"nickname"` Nickname string `json:"nickname"`
Domain string `json:"domain"` Domain string `json:"domain"`
ScreenName string `json:"screen_name"` ScreenName string `json:"screen_name"`
Bdate string `json:"bdate"` Bdate string `json:"bdate"`
City BaseObject `json:"city"` City BaseObject `json:"city"`
Country BaseObject `json:"country"` Country BaseObject `json:"country"`
Photo50 string `json:"photo_50"` Photo50 string `json:"photo_50"`
Photo100 string `json:"photo_100"` Photo100 string `json:"photo_100"`
Photo200 string `json:"photo_200"` Photo200 string `json:"photo_200"`
PhotoMax string `json:"photo_max"` PhotoMax string `json:"photo_max"`
Photo200Orig string `json:"photo_200_orig"` Photo200Orig string `json:"photo_200_orig"`
Photo400Orig string `json:"photo_400_orig"` Photo400Orig string `json:"photo_400_orig"`
PhotoMaxOrig string `json:"photo_max_orig"` PhotoMaxOrig string `json:"photo_max_orig"`
PhotoID string `json:"photo_id"` PhotoID string `json:"photo_id"`
FriendStatus int `json:"friend_status"` // see FriendStatus const FriendStatus int `json:"friend_status"` // see FriendStatus const
OnlineApp int `json:"online_app"` OnlineApp int `json:"online_app"`
Online BaseBoolInt `json:"online"` Online BaseBoolInt `json:"online"`
OnlineMobile BaseBoolInt `json:"online_mobile"` OnlineMobile BaseBoolInt `json:"online_mobile"`
HasPhoto BaseBoolInt `json:"has_photo"` HasPhoto BaseBoolInt `json:"has_photo"`
HasMobile BaseBoolInt `json:"has_mobile"` HasMobile BaseBoolInt `json:"has_mobile"`
IsClosed BaseBoolInt `json:"is_closed"` IsClosed BaseBoolInt `json:"is_closed"`
IsFriend BaseBoolInt `json:"is_friend"` IsFriend BaseBoolInt `json:"is_friend"`
IsFavorite BaseBoolInt `json:"is_favorite"` IsFavorite BaseBoolInt `json:"is_favorite"`
IsHiddenFromFeed BaseBoolInt `json:"is_hidden_from_feed"` IsHiddenFromFeed BaseBoolInt `json:"is_hidden_from_feed"`
CanAccessClosed BaseBoolInt `json:"can_access_closed"` CanAccessClosed BaseBoolInt `json:"can_access_closed"`
CanBeInvitedGroup BaseBoolInt `json:"can_be_invited_group"` CanBeInvitedGroup BaseBoolInt `json:"can_be_invited_group"`
CanPost BaseBoolInt `json:"can_post"` CanPost BaseBoolInt `json:"can_post"`
CanSeeAllPosts BaseBoolInt `json:"can_see_all_posts"` CanSeeAllPosts BaseBoolInt `json:"can_see_all_posts"`
CanSeeAudio BaseBoolInt `json:"can_see_audio"` CanSeeAudio BaseBoolInt `json:"can_see_audio"`
CanWritePrivateMessage BaseBoolInt `json:"can_write_private_message"` CanWritePrivateMessage BaseBoolInt `json:"can_write_private_message"`
CanSendFriendRequest BaseBoolInt `json:"can_send_friend_request"` CanSendFriendRequest BaseBoolInt `json:"can_send_friend_request"`
CanCallFromGroup BaseBoolInt `json:"can_call_from_group"` CanCallFromGroup BaseBoolInt `json:"can_call_from_group"`
Verified BaseBoolInt `json:"verified"` Verified BaseBoolInt `json:"verified"`
Trending BaseBoolInt `json:"trending"` Trending BaseBoolInt `json:"trending"`
Blacklisted BaseBoolInt `json:"blacklisted"` Blacklisted BaseBoolInt `json:"blacklisted"`
BlacklistedByMe BaseBoolInt `json:"blacklisted_by_me"` BlacklistedByMe BaseBoolInt `json:"blacklisted_by_me"`
Facebook string `json:"facebook"` // Deprecated: Facebook и Instagram запрещены в России, Meta признана экстремистской организацией...
FacebookName string `json:"facebook_name"` Facebook string `json:"facebook"`
Twitter string `json:"twitter"` // Deprecated: Facebook и Instagram запрещены в России, Meta признана экстремистской организацией...
Instagram string `json:"instagram"` FacebookName string `json:"facebook_name"`
Site string `json:"site"` // Deprecated: Facebook и Instagram запрещены в России, Meta признана экстремистской организацией...
Status string `json:"status"` Instagram string `json:"instagram"`
StatusAudio AudioAudio `json:"status_audio"` Twitter string `json:"twitter"`
LastSeen UsersLastSeen `json:"last_seen"` Site string `json:"site"`
CropPhoto UsersCropPhoto `json:"crop_photo"` Status string `json:"status"`
FollowersCount int `json:"followers_count"` StatusAudio AudioAudio `json:"status_audio"`
CommonCount int `json:"common_count"` LastSeen UsersLastSeen `json:"last_seen"`
Occupation UsersOccupation `json:"occupation"` CropPhoto UsersCropPhoto `json:"crop_photo"`
Career []UsersCareer `json:"career"` FollowersCount int `json:"followers_count"`
Military []UsersMilitary `json:"military"` CommonCount int `json:"common_count"`
University int `json:"university"` Occupation UsersOccupation `json:"occupation"`
UniversityName string `json:"university_name"` Career []UsersCareer `json:"career"`
Faculty int `json:"faculty"` Military []UsersMilitary `json:"military"`
FacultyName string `json:"faculty_name"` University int `json:"university"`
Graduation int `json:"graduation"` UniversityName string `json:"university_name"`
EducationForm string `json:"education_form"` Faculty int `json:"faculty"`
EducationStatus string `json:"education_status"` FacultyName string `json:"faculty_name"`
HomeTown string `json:"home_town"` Graduation int `json:"graduation"`
Relation int `json:"relation"` EducationForm string `json:"education_form"`
Personal UsersPersonal `json:"personal"` EducationStatus string `json:"education_status"`
Interests string `json:"interests"` HomeTown string `json:"home_town"`
Music string `json:"music"` Relation int `json:"relation"`
Activities string `json:"activities"` Personal UsersPersonal `json:"personal"`
Movies string `json:"movies"` Interests string `json:"interests"`
Tv string `json:"tv"` Music string `json:"music"`
Books string `json:"books"` Activities string `json:"activities"`
Games string `json:"games"` Movies string `json:"movies"`
Universities []UsersUniversity `json:"universities"` Tv string `json:"tv"`
Schools []UsersSchool `json:"schools"` Books string `json:"books"`
About string `json:"about"` Games string `json:"games"`
Relatives []UsersRelative `json:"relatives"` Universities []UsersUniversity `json:"universities"`
Quotes string `json:"quotes"` Schools []UsersSchool `json:"schools"`
Lists []int `json:"lists"` About string `json:"about"`
Deactivated string `json:"deactivated"` Relatives []UsersRelative `json:"relatives"`
WallDefault string `json:"wall_default"` Quotes string `json:"quotes"`
Timezone int `json:"timezone"` Lists []int `json:"lists"`
Exports UsersExports `json:"exports"` Deactivated string `json:"deactivated"`
Counters UsersUserCounters `json:"counters"` WallDefault string `json:"wall_default"`
MobilePhone string `json:"mobile_phone"` Timezone int `json:"timezone"`
HomePhone string `json:"home_phone"` Exports UsersExports `json:"exports"`
FoundWith int `json:"found_with"` // TODO: check it Counters UsersUserCounters `json:"counters"`
OnlineInfo UsersOnlineInfo `json:"online_info"` MobilePhone string `json:"mobile_phone"`
Mutual FriendsRequestsMutual `json:"mutual"` HomePhone string `json:"home_phone"`
TrackCode string `json:"track_code"` FoundWith int `json:"found_with"` // TODO: check it
RelationPartner UsersUserMin `json:"relation_partner"` OnlineInfo UsersOnlineInfo `json:"online_info"`
Type string `json:"type"` Mutual FriendsRequestsMutual `json:"mutual"`
Skype string `json:"skype"` TrackCode string `json:"track_code"`
RelationPartner UsersUserMin `json:"relation_partner"`
Type string `json:"type"`
Skype string `json:"skype"`
} }
// ToMention return mention. // ToMention return mention.

View File

@@ -297,3 +297,27 @@ type VideoVideoImage struct {
BaseImage BaseImage
WithPadding BaseBoolInt `json:"with_padding"` WithPadding BaseBoolInt `json:"with_padding"`
} }
// VideoLive struct.
type VideoLive struct {
OwnerID int `json:"owner_id"`
VideoID int `json:"video_id"`
Name string `json:"name"`
Description string `json:"description"`
AccessKey string `json:"access_key"`
Stream VideoLiveStream `json:"stream"`
}
// VideoLiveStream struct.
type VideoLiveStream struct {
URL string `json:"url"`
Key string `json:"key"`
OKMPURL string `json:"okmp_url"`
}
// VideoLiveCategory struct.
type VideoLiveCategory struct {
ID int `json:"id"`
Label string `json:"label"`
Sublist []VideoLiveCategory `json:"sublist,omitempty"`
}

View File

@@ -4,6 +4,8 @@ go:
- 1.14.x - 1.14.x
- 1.15.x - 1.15.x
- 1.16.x - 1.16.x
- 1.17.x
- 1.18.x
env: env:
- GO111MODULE=on - GO111MODULE=on
install: install:

View File

@@ -70,7 +70,7 @@ type ActionsRow struct {
func (r ActionsRow) MarshalJSON() ([]byte, error) { func (r ActionsRow) MarshalJSON() ([]byte, error) {
type actionsRow ActionsRow type actionsRow ActionsRow
return json.Marshal(struct { return Marshal(struct {
actionsRow actionsRow
Type ComponentType `json:"type"` Type ComponentType `json:"type"`
}{ }{
@@ -145,7 +145,7 @@ func (b Button) MarshalJSON() ([]byte, error) {
b.Style = PrimaryButton b.Style = PrimaryButton
} }
return json.Marshal(struct { return Marshal(struct {
button button
Type ComponentType `json:"type"` Type ComponentType `json:"type"`
}{ }{
@@ -192,7 +192,7 @@ func (SelectMenu) Type() ComponentType {
func (m SelectMenu) MarshalJSON() ([]byte, error) { func (m SelectMenu) MarshalJSON() ([]byte, error) {
type selectMenu SelectMenu type selectMenu SelectMenu
return json.Marshal(struct { return Marshal(struct {
selectMenu selectMenu
Type ComponentType `json:"type"` Type ComponentType `json:"type"`
}{ }{
@@ -208,7 +208,7 @@ type TextInput struct {
Style TextInputStyle `json:"style"` Style TextInputStyle `json:"style"`
Placeholder string `json:"placeholder,omitempty"` Placeholder string `json:"placeholder,omitempty"`
Value string `json:"value,omitempty"` Value string `json:"value,omitempty"`
Required bool `json:"required,omitempty"` Required bool `json:"required"`
MinLength int `json:"min_length,omitempty"` MinLength int `json:"min_length,omitempty"`
MaxLength int `json:"max_length,omitempty"` MaxLength int `json:"max_length,omitempty"`
} }
@@ -222,7 +222,7 @@ func (TextInput) Type() ComponentType {
func (m TextInput) MarshalJSON() ([]byte, error) { func (m TextInput) MarshalJSON() ([]byte, error) {
type inputText TextInput type inputText TextInput
return json.Marshal(struct { return Marshal(struct {
inputText inputText
Type ComponentType `json:"type"` Type ComponentType `json:"type"`
}{ }{

View File

@@ -20,7 +20,7 @@ import (
) )
// VERSION of DiscordGo, follows Semantic Versioning. (http://semver.org/) // VERSION of DiscordGo, follows Semantic Versioning. (http://semver.org/)
const VERSION = "0.24.0" const VERSION = "0.25.0"
// New creates a new Discord session with provided token. // New creates a new Discord session with provided token.
// If the token is for a bot, it must be prefixed with "Bot " // If the token is for a bot, it must be prefixed with "Bot "
@@ -36,6 +36,7 @@ func New(token string) (s *Session, err error) {
StateEnabled: true, StateEnabled: true,
Compress: true, Compress: true,
ShouldReconnectOnError: true, ShouldReconnectOnError: true,
ShouldRetryOnRateLimit: true,
ShardID: 0, ShardID: 0,
ShardCount: 1, ShardCount: 1,
MaxRestRetries: 3, MaxRestRetries: 3,
@@ -49,7 +50,6 @@ func New(token string) (s *Session, err error) {
// These can be modified prior to calling Open() // These can be modified prior to calling Open()
s.Identify.Compress = true s.Identify.Compress = true
s.Identify.LargeThreshold = 250 s.Identify.LargeThreshold = 250
s.Identify.GuildSubscriptions = true
s.Identify.Properties.OS = runtime.GOOS s.Identify.Properties.OS = runtime.GOOS
s.Identify.Properties.Browser = "DiscordGo v" + VERSION s.Identify.Properties.Browser = "DiscordGo v" + VERSION
s.Identify.Intents = IntentsAllWithoutPrivileged s.Identify.Intents = IntentsAllWithoutPrivileged

View File

@@ -23,15 +23,16 @@ var (
EndpointSmActive = EndpointSm + "active.json" EndpointSmActive = EndpointSm + "active.json"
EndpointSmUpcoming = EndpointSm + "upcoming.json" EndpointSmUpcoming = EndpointSm + "upcoming.json"
EndpointDiscord = "https://discord.com/" EndpointDiscord = "https://discord.com/"
EndpointAPI = EndpointDiscord + "api/v" + APIVersion + "/" EndpointAPI = EndpointDiscord + "api/v" + APIVersion + "/"
EndpointGuilds = EndpointAPI + "guilds/" EndpointGuilds = EndpointAPI + "guilds/"
EndpointChannels = EndpointAPI + "channels/" EndpointChannels = EndpointAPI + "channels/"
EndpointUsers = EndpointAPI + "users/" EndpointUsers = EndpointAPI + "users/"
EndpointGateway = EndpointAPI + "gateway" EndpointGateway = EndpointAPI + "gateway"
EndpointGatewayBot = EndpointGateway + "/bot" EndpointGatewayBot = EndpointGateway + "/bot"
EndpointWebhooks = EndpointAPI + "webhooks/" EndpointWebhooks = EndpointAPI + "webhooks/"
EndpointStickers = EndpointAPI + "stickers/" EndpointStickers = EndpointAPI + "stickers/"
EndpointStageInstances = EndpointAPI + "stage-instances"
EndpointCDN = "https://cdn.discordapp.com/" EndpointCDN = "https://cdn.discordapp.com/"
EndpointCDNAttachments = EndpointCDN + "attachments/" EndpointCDNAttachments = EndpointCDN + "attachments/"
@@ -72,6 +73,7 @@ var (
EndpointGuildPreview = func(gID string) string { return EndpointGuilds + gID + "/preview" } EndpointGuildPreview = func(gID string) string { return EndpointGuilds + gID + "/preview" }
EndpointGuildChannels = func(gID string) string { return EndpointGuilds + gID + "/channels" } EndpointGuildChannels = func(gID string) string { return EndpointGuilds + gID + "/channels" }
EndpointGuildMembers = func(gID string) string { return EndpointGuilds + gID + "/members" } EndpointGuildMembers = func(gID string) string { return EndpointGuilds + gID + "/members" }
EndpointGuildMembersSearch = func(gID string) string { return EndpointGuildMembers(gID) + "/search" }
EndpointGuildMember = func(gID, uID string) string { return EndpointGuilds + gID + "/members/" + uID } EndpointGuildMember = func(gID, uID string) string { return EndpointGuilds + gID + "/members/" + uID }
EndpointGuildMemberRole = func(gID, uID, rID string) string { return EndpointGuilds + gID + "/members/" + uID + "/roles/" + rID } EndpointGuildMemberRole = func(gID, uID, rID string) string { return EndpointGuilds + gID + "/members/" + uID + "/roles/" + rID }
EndpointGuildBans = func(gID string) string { return EndpointGuilds + gID + "/bans" } EndpointGuildBans = func(gID string) string { return EndpointGuilds + gID + "/bans" }
@@ -94,6 +96,7 @@ var (
EndpointGuildBanner = func(gID, hash string) string { return EndpointCDNBanners + gID + "/" + hash + ".png" } EndpointGuildBanner = func(gID, hash string) string { return EndpointCDNBanners + gID + "/" + hash + ".png" }
EndpointGuildStickers = func(gID string) string { return EndpointGuilds + gID + "/stickers" } EndpointGuildStickers = func(gID string) string { return EndpointGuilds + gID + "/stickers" }
EndpointGuildSticker = func(gID, sID string) string { return EndpointGuilds + gID + "/stickers/" + sID } EndpointGuildSticker = func(gID, sID string) string { return EndpointGuilds + gID + "/stickers/" + sID }
EndpointStageInstance = func(cID string) string { return EndpointStageInstances + "/" + cID }
EndpointGuildScheduledEvents = func(gID string) string { return EndpointGuilds + gID + "/scheduled-events" } EndpointGuildScheduledEvents = func(gID string) string { return EndpointGuilds + gID + "/scheduled-events" }
EndpointGuildScheduledEvent = func(gID, eID string) string { return EndpointGuilds + gID + "/scheduled-events/" + eID } EndpointGuildScheduledEvent = func(gID, eID string) string { return EndpointGuilds + gID + "/scheduled-events/" + eID }
EndpointGuildScheduledEventUsers = func(gID, eID string) string { return EndpointGuildScheduledEvent(gID, eID) + "/users" } EndpointGuildScheduledEventUsers = func(gID, eID string) string { return EndpointGuildScheduledEvent(gID, eID) + "/users" }

View File

@@ -7,62 +7,67 @@ package discordgo
// Event type values are used to match the events returned by Discord. // Event type values are used to match the events returned by Discord.
// EventTypes surrounded by __ are synthetic and are internal to DiscordGo. // EventTypes surrounded by __ are synthetic and are internal to DiscordGo.
const ( const (
channelCreateEventType = "CHANNEL_CREATE" channelCreateEventType = "CHANNEL_CREATE"
channelDeleteEventType = "CHANNEL_DELETE" channelDeleteEventType = "CHANNEL_DELETE"
channelPinsUpdateEventType = "CHANNEL_PINS_UPDATE" channelPinsUpdateEventType = "CHANNEL_PINS_UPDATE"
channelUpdateEventType = "CHANNEL_UPDATE" channelUpdateEventType = "CHANNEL_UPDATE"
connectEventType = "__CONNECT__" connectEventType = "__CONNECT__"
disconnectEventType = "__DISCONNECT__" disconnectEventType = "__DISCONNECT__"
eventEventType = "__EVENT__" eventEventType = "__EVENT__"
guildBanAddEventType = "GUILD_BAN_ADD" guildBanAddEventType = "GUILD_BAN_ADD"
guildBanRemoveEventType = "GUILD_BAN_REMOVE" guildBanRemoveEventType = "GUILD_BAN_REMOVE"
guildCreateEventType = "GUILD_CREATE" guildCreateEventType = "GUILD_CREATE"
guildDeleteEventType = "GUILD_DELETE" guildDeleteEventType = "GUILD_DELETE"
guildEmojisUpdateEventType = "GUILD_EMOJIS_UPDATE" guildEmojisUpdateEventType = "GUILD_EMOJIS_UPDATE"
guildIntegrationsUpdateEventType = "GUILD_INTEGRATIONS_UPDATE" guildIntegrationsUpdateEventType = "GUILD_INTEGRATIONS_UPDATE"
guildMemberAddEventType = "GUILD_MEMBER_ADD" guildMemberAddEventType = "GUILD_MEMBER_ADD"
guildMemberRemoveEventType = "GUILD_MEMBER_REMOVE" guildMemberRemoveEventType = "GUILD_MEMBER_REMOVE"
guildMemberUpdateEventType = "GUILD_MEMBER_UPDATE" guildMemberUpdateEventType = "GUILD_MEMBER_UPDATE"
guildMembersChunkEventType = "GUILD_MEMBERS_CHUNK" guildMembersChunkEventType = "GUILD_MEMBERS_CHUNK"
guildRoleCreateEventType = "GUILD_ROLE_CREATE" guildRoleCreateEventType = "GUILD_ROLE_CREATE"
guildRoleDeleteEventType = "GUILD_ROLE_DELETE" guildRoleDeleteEventType = "GUILD_ROLE_DELETE"
guildRoleUpdateEventType = "GUILD_ROLE_UPDATE" guildRoleUpdateEventType = "GUILD_ROLE_UPDATE"
guildUpdateEventType = "GUILD_UPDATE" guildStageInstanceCreateEventType = "STAGE_INSTANCE_CREATE"
guildScheduledEventCreateEventType = "GUILD_SCHEDULED_EVENT_CREATE" guildStageInstanceUpdateEventType = "STAGE_INSTANCE_UPDATE"
guildScheduledEventUpdateEventType = "GUILD_SCHEDULED_EVENT_UPDATE" guildStageInstanceDeleteEventType = "STAGE_INSTANCE_DELETE"
guildScheduledEventDeleteEventType = "GUILD_SCHEDULED_EVENT_DELETE" guildScheduledEventCreateEventType = "GUILD_SCHEDULED_EVENT_CREATE"
interactionCreateEventType = "INTERACTION_CREATE" guildScheduledEventDeleteEventType = "GUILD_SCHEDULED_EVENT_DELETE"
inviteCreateEventType = "INVITE_CREATE" guildScheduledEventUpdateEventType = "GUILD_SCHEDULED_EVENT_UPDATE"
inviteDeleteEventType = "INVITE_DELETE" guildScheduledEventUserAddEventType = "GUILD_SCHEDULED_EVENT_USER_ADD"
messageAckEventType = "MESSAGE_ACK" guildScheduledEventUserRemoveEventType = "GUILD_SCHEDULED_EVENT_USER_REMOVE"
messageCreateEventType = "MESSAGE_CREATE" guildUpdateEventType = "GUILD_UPDATE"
messageDeleteEventType = "MESSAGE_DELETE" interactionCreateEventType = "INTERACTION_CREATE"
messageDeleteBulkEventType = "MESSAGE_DELETE_BULK" inviteCreateEventType = "INVITE_CREATE"
messageReactionAddEventType = "MESSAGE_REACTION_ADD" inviteDeleteEventType = "INVITE_DELETE"
messageReactionRemoveEventType = "MESSAGE_REACTION_REMOVE" messageAckEventType = "MESSAGE_ACK"
messageReactionRemoveAllEventType = "MESSAGE_REACTION_REMOVE_ALL" messageCreateEventType = "MESSAGE_CREATE"
messageUpdateEventType = "MESSAGE_UPDATE" messageDeleteEventType = "MESSAGE_DELETE"
presenceUpdateEventType = "PRESENCE_UPDATE" messageDeleteBulkEventType = "MESSAGE_DELETE_BULK"
presencesReplaceEventType = "PRESENCES_REPLACE" messageReactionAddEventType = "MESSAGE_REACTION_ADD"
rateLimitEventType = "__RATE_LIMIT__" messageReactionRemoveEventType = "MESSAGE_REACTION_REMOVE"
readyEventType = "READY" messageReactionRemoveAllEventType = "MESSAGE_REACTION_REMOVE_ALL"
relationshipAddEventType = "RELATIONSHIP_ADD" messageUpdateEventType = "MESSAGE_UPDATE"
relationshipRemoveEventType = "RELATIONSHIP_REMOVE" presenceUpdateEventType = "PRESENCE_UPDATE"
resumedEventType = "RESUMED" presencesReplaceEventType = "PRESENCES_REPLACE"
threadCreateEventType = "THREAD_CREATE" rateLimitEventType = "__RATE_LIMIT__"
threadDeleteEventType = "THREAD_DELETE" readyEventType = "READY"
threadListSyncEventType = "THREAD_LIST_SYNC" relationshipAddEventType = "RELATIONSHIP_ADD"
threadMemberUpdateEventType = "THREAD_MEMBER_UPDATE" relationshipRemoveEventType = "RELATIONSHIP_REMOVE"
threadMembersUpdateEventType = "THREAD_MEMBERS_UPDATE" resumedEventType = "RESUMED"
threadUpdateEventType = "THREAD_UPDATE" threadCreateEventType = "THREAD_CREATE"
typingStartEventType = "TYPING_START" threadDeleteEventType = "THREAD_DELETE"
userGuildSettingsUpdateEventType = "USER_GUILD_SETTINGS_UPDATE" threadListSyncEventType = "THREAD_LIST_SYNC"
userNoteUpdateEventType = "USER_NOTE_UPDATE" threadMemberUpdateEventType = "THREAD_MEMBER_UPDATE"
userSettingsUpdateEventType = "USER_SETTINGS_UPDATE" threadMembersUpdateEventType = "THREAD_MEMBERS_UPDATE"
userUpdateEventType = "USER_UPDATE" threadUpdateEventType = "THREAD_UPDATE"
voiceServerUpdateEventType = "VOICE_SERVER_UPDATE" typingStartEventType = "TYPING_START"
voiceStateUpdateEventType = "VOICE_STATE_UPDATE" userGuildSettingsUpdateEventType = "USER_GUILD_SETTINGS_UPDATE"
webhooksUpdateEventType = "WEBHOOKS_UPDATE" userNoteUpdateEventType = "USER_NOTE_UPDATE"
userSettingsUpdateEventType = "USER_SETTINGS_UPDATE"
userUpdateEventType = "USER_UPDATE"
voiceServerUpdateEventType = "VOICE_SERVER_UPDATE"
voiceStateUpdateEventType = "VOICE_STATE_UPDATE"
webhooksUpdateEventType = "WEBHOOKS_UPDATE"
) )
// channelCreateEventHandler is an event handler for ChannelCreate events. // channelCreateEventHandler is an event handler for ChannelCreate events.
@@ -310,66 +315,6 @@ func (eh guildIntegrationsUpdateEventHandler) Handle(s *Session, i interface{})
} }
} }
// guildScheduledEventCreateEventHandler is an event handler for GuildScheduledEventCreate events.
type guildScheduledEventCreateEventHandler func(*Session, *GuildScheduledEventCreate)
// Type returns the event type for GuildScheduledEventCreate events.
func (eh guildScheduledEventCreateEventHandler) Type() string {
return guildScheduledEventCreateEventType
}
// New returns a new instance of GuildScheduledEventCreate.
func (eh guildScheduledEventCreateEventHandler) New() interface{} {
return &GuildScheduledEventCreate{}
}
// Handle is the handler for GuildScheduledEventCreate events.
func (eh guildScheduledEventCreateEventHandler) Handle(s *Session, i interface{}) {
if t, ok := i.(*GuildScheduledEventCreate); ok {
eh(s, t)
}
}
// guildScheduledEventUpdateEventHandler is an event handler for GuildScheduledEventUpdate events.
type guildScheduledEventUpdateEventHandler func(*Session, *GuildScheduledEventUpdate)
// Type returns the event type for GuildScheduledEventUpdate events.
func (eh guildScheduledEventUpdateEventHandler) Type() string {
return guildScheduledEventUpdateEventType
}
// New returns a new instance of GuildScheduledEventUpdate.
func (eh guildScheduledEventUpdateEventHandler) New() interface{} {
return &GuildScheduledEventUpdate{}
}
// Handle is the handler for GuildScheduledEventUpdate events.
func (eh guildScheduledEventUpdateEventHandler) Handle(s *Session, i interface{}) {
if t, ok := i.(*GuildScheduledEventUpdate); ok {
eh(s, t)
}
}
// guildScheduledEventDeleteEventHandler is an event handler for GuildScheduledEventDelete events.
type guildScheduledEventDeleteEventHandler func(*Session, *GuildScheduledEventDelete)
// Type returns the event type for GuildScheduledEventDelete events.
func (eh guildScheduledEventDeleteEventHandler) Type() string {
return guildScheduledEventDeleteEventType
}
// New returns a new instance of GuildScheduledEventDelete.
func (eh guildScheduledEventDeleteEventHandler) New() interface{} {
return &GuildScheduledEventDelete{}
}
// Handle is the handler for GuildScheduledEventDelete events.
func (eh guildScheduledEventDeleteEventHandler) Handle(s *Session, i interface{}) {
if t, ok := i.(*GuildScheduledEventDelete); ok {
eh(s, t)
}
}
// guildMemberAddEventHandler is an event handler for GuildMemberAdd events. // guildMemberAddEventHandler is an event handler for GuildMemberAdd events.
type guildMemberAddEventHandler func(*Session, *GuildMemberAdd) type guildMemberAddEventHandler func(*Session, *GuildMemberAdd)
@@ -510,6 +455,166 @@ func (eh guildRoleUpdateEventHandler) Handle(s *Session, i interface{}) {
} }
} }
// guildStageInstanceEventCreateHandler is an event handler for StageInstanceEventCreate events.
type guildStageInstanceEventCreateHandler func(*Session, *StageInstanceEventCreate)
// Type returns the event type for StageInstanceEventCreate events.
func (eh guildStageInstanceEventCreateHandler) Type() string {
return guildStageInstanceCreateEventType
}
// New returns a new instance of StageInstanceEventCreate.
func (eh guildStageInstanceEventCreateHandler) New() interface{} {
return &StageInstanceEventCreate{}
}
// Handle is the handler for StageInstanceEventCreate events.
func (eh guildStageInstanceEventCreateHandler) Handle(s *Session, i interface{}) {
if t, ok := i.(*StageInstanceEventCreate); ok {
eh(s, t)
}
}
// guildStageInstanceEventUpdateHandler is an event handler for StageInstanceEventUpdate events.
type guildStageInstanceEventUpdateHandler func(*Session, *StageInstanceEventUpdate)
// Type returns the event type for StageInstanceEventUpdate events.
func (eh guildStageInstanceEventUpdateHandler) Type() string {
return guildStageInstanceCreateEventType
}
// New returns a new instance of StageInstanceEventUpdate.
func (eh guildStageInstanceEventUpdateHandler) New() interface{} {
return &StageInstanceEventUpdate{}
}
// Handle is the handler for StageInstanceEventUpdate events.
func (eh guildStageInstanceEventUpdateHandler) Handle(s *Session, i interface{}) {
if t, ok := i.(*StageInstanceEventUpdate); ok {
eh(s, t)
}
}
// guildStageInstanceEventDeleteHandler is an event handler for StageInstanceEventDelete events.
type guildStageInstanceEventDeleteHandler func(*Session, *StageInstanceEventDelete)
// Type returns the event type for StageInstanceEventDelete events.
func (eh guildStageInstanceEventDeleteHandler) Type() string {
return guildStageInstanceCreateEventType
}
// New returns a new instance of StageInstanceEventDelete.
func (eh guildStageInstanceEventDeleteHandler) New() interface{} {
return &StageInstanceEventDelete{}
}
// Handle is the handler for StageInstanceEventDelete events.
func (eh guildStageInstanceEventDeleteHandler) Handle(s *Session, i interface{}) {
if t, ok := i.(*StageInstanceEventDelete); ok {
eh(s, t)
}
}
// guildScheduledEventCreateEventHandler is an event handler for GuildScheduledEventCreate events.
type guildScheduledEventCreateEventHandler func(*Session, *GuildScheduledEventCreate)
// Type returns the event type for GuildScheduledEventCreate events.
func (eh guildScheduledEventCreateEventHandler) Type() string {
return guildScheduledEventCreateEventType
}
// New returns a new instance of GuildScheduledEventCreate.
func (eh guildScheduledEventCreateEventHandler) New() interface{} {
return &GuildScheduledEventCreate{}
}
// Handle is the handler for GuildScheduledEventCreate events.
func (eh guildScheduledEventCreateEventHandler) Handle(s *Session, i interface{}) {
if t, ok := i.(*GuildScheduledEventCreate); ok {
eh(s, t)
}
}
// guildScheduledEventDeleteEventHandler is an event handler for GuildScheduledEventDelete events.
type guildScheduledEventDeleteEventHandler func(*Session, *GuildScheduledEventDelete)
// Type returns the event type for GuildScheduledEventDelete events.
func (eh guildScheduledEventDeleteEventHandler) Type() string {
return guildScheduledEventDeleteEventType
}
// New returns a new instance of GuildScheduledEventDelete.
func (eh guildScheduledEventDeleteEventHandler) New() interface{} {
return &GuildScheduledEventDelete{}
}
// Handle is the handler for GuildScheduledEventDelete events.
func (eh guildScheduledEventDeleteEventHandler) Handle(s *Session, i interface{}) {
if t, ok := i.(*GuildScheduledEventDelete); ok {
eh(s, t)
}
}
// guildScheduledEventUpdateEventHandler is an event handler for GuildScheduledEventUpdate events.
type guildScheduledEventUpdateEventHandler func(*Session, *GuildScheduledEventUpdate)
// Type returns the event type for GuildScheduledEventUpdate events.
func (eh guildScheduledEventUpdateEventHandler) Type() string {
return guildScheduledEventUpdateEventType
}
// New returns a new instance of GuildScheduledEventUpdate.
func (eh guildScheduledEventUpdateEventHandler) New() interface{} {
return &GuildScheduledEventUpdate{}
}
// Handle is the handler for GuildScheduledEventUpdate events.
func (eh guildScheduledEventUpdateEventHandler) Handle(s *Session, i interface{}) {
if t, ok := i.(*GuildScheduledEventUpdate); ok {
eh(s, t)
}
}
// guildScheduledEventUserAddEventHandler is an event handler for GuildScheduledEventUserAdd events.
type guildScheduledEventUserAddEventHandler func(*Session, *GuildScheduledEventUserAdd)
// Type returns the event type for GuildScheduledEventUserAdd events.
func (eh guildScheduledEventUserAddEventHandler) Type() string {
return guildScheduledEventUserAddEventType
}
// New returns a new instance of GuildScheduledEventUserAdd.
func (eh guildScheduledEventUserAddEventHandler) New() interface{} {
return &GuildScheduledEventUserAdd{}
}
// Handle is the handler for GuildScheduledEventUserAdd events.
func (eh guildScheduledEventUserAddEventHandler) Handle(s *Session, i interface{}) {
if t, ok := i.(*GuildScheduledEventUserAdd); ok {
eh(s, t)
}
}
// guildScheduledEventUserRemoveEventHandler is an event handler for GuildScheduledEventUserRemove events.
type guildScheduledEventUserRemoveEventHandler func(*Session, *GuildScheduledEventUserRemove)
// Type returns the event type for GuildScheduledEventUserRemove events.
func (eh guildScheduledEventUserRemoveEventHandler) Type() string {
return guildScheduledEventUserRemoveEventType
}
// New returns a new instance of GuildScheduledEventUserRemove.
func (eh guildScheduledEventUserRemoveEventHandler) New() interface{} {
return &GuildScheduledEventUserRemove{}
}
// Handle is the handler for GuildScheduledEventUserRemove events.
func (eh guildScheduledEventUserRemoveEventHandler) Handle(s *Session, i interface{}) {
if t, ok := i.(*GuildScheduledEventUserRemove); ok {
eh(s, t)
}
}
// guildUpdateEventHandler is an event handler for GuildUpdate events. // guildUpdateEventHandler is an event handler for GuildUpdate events.
type guildUpdateEventHandler func(*Session, *GuildUpdate) type guildUpdateEventHandler func(*Session, *GuildUpdate)
@@ -1195,12 +1300,6 @@ func handlerForInterface(handler interface{}) EventHandler {
return guildEmojisUpdateEventHandler(v) return guildEmojisUpdateEventHandler(v)
case func(*Session, *GuildIntegrationsUpdate): case func(*Session, *GuildIntegrationsUpdate):
return guildIntegrationsUpdateEventHandler(v) return guildIntegrationsUpdateEventHandler(v)
case func(*Session, *GuildScheduledEventCreate):
return guildScheduledEventCreateEventHandler(v)
case func(*Session, *GuildScheduledEventUpdate):
return guildScheduledEventUpdateEventHandler(v)
case func(*Session, *GuildScheduledEventDelete):
return guildScheduledEventDeleteEventHandler(v)
case func(*Session, *GuildMemberAdd): case func(*Session, *GuildMemberAdd):
return guildMemberAddEventHandler(v) return guildMemberAddEventHandler(v)
case func(*Session, *GuildMemberRemove): case func(*Session, *GuildMemberRemove):
@@ -1215,6 +1314,16 @@ func handlerForInterface(handler interface{}) EventHandler {
return guildRoleDeleteEventHandler(v) return guildRoleDeleteEventHandler(v)
case func(*Session, *GuildRoleUpdate): case func(*Session, *GuildRoleUpdate):
return guildRoleUpdateEventHandler(v) return guildRoleUpdateEventHandler(v)
case func(*Session, *GuildScheduledEventCreate):
return guildScheduledEventCreateEventHandler(v)
case func(*Session, *GuildScheduledEventDelete):
return guildScheduledEventDeleteEventHandler(v)
case func(*Session, *GuildScheduledEventUpdate):
return guildScheduledEventUpdateEventHandler(v)
case func(*Session, *GuildScheduledEventUserAdd):
return guildScheduledEventUserAddEventHandler(v)
case func(*Session, *GuildScheduledEventUserRemove):
return guildScheduledEventUserRemoveEventHandler(v)
case func(*Session, *GuildUpdate): case func(*Session, *GuildUpdate):
return guildUpdateEventHandler(v) return guildUpdateEventHandler(v)
case func(*Session, *InteractionCreate): case func(*Session, *InteractionCreate):
@@ -1297,9 +1406,6 @@ func init() {
registerInterfaceProvider(guildDeleteEventHandler(nil)) registerInterfaceProvider(guildDeleteEventHandler(nil))
registerInterfaceProvider(guildEmojisUpdateEventHandler(nil)) registerInterfaceProvider(guildEmojisUpdateEventHandler(nil))
registerInterfaceProvider(guildIntegrationsUpdateEventHandler(nil)) registerInterfaceProvider(guildIntegrationsUpdateEventHandler(nil))
registerInterfaceProvider(guildScheduledEventCreateEventHandler(nil))
registerInterfaceProvider(guildScheduledEventUpdateEventHandler(nil))
registerInterfaceProvider(guildScheduledEventDeleteEventHandler(nil))
registerInterfaceProvider(guildMemberAddEventHandler(nil)) registerInterfaceProvider(guildMemberAddEventHandler(nil))
registerInterfaceProvider(guildMemberRemoveEventHandler(nil)) registerInterfaceProvider(guildMemberRemoveEventHandler(nil))
registerInterfaceProvider(guildMemberUpdateEventHandler(nil)) registerInterfaceProvider(guildMemberUpdateEventHandler(nil))
@@ -1307,6 +1413,11 @@ func init() {
registerInterfaceProvider(guildRoleCreateEventHandler(nil)) registerInterfaceProvider(guildRoleCreateEventHandler(nil))
registerInterfaceProvider(guildRoleDeleteEventHandler(nil)) registerInterfaceProvider(guildRoleDeleteEventHandler(nil))
registerInterfaceProvider(guildRoleUpdateEventHandler(nil)) registerInterfaceProvider(guildRoleUpdateEventHandler(nil))
registerInterfaceProvider(guildScheduledEventCreateEventHandler(nil))
registerInterfaceProvider(guildScheduledEventDeleteEventHandler(nil))
registerInterfaceProvider(guildScheduledEventUpdateEventHandler(nil))
registerInterfaceProvider(guildScheduledEventUserAddEventHandler(nil))
registerInterfaceProvider(guildScheduledEventUserRemoveEventHandler(nil))
registerInterfaceProvider(guildUpdateEventHandler(nil)) registerInterfaceProvider(guildUpdateEventHandler(nil))
registerInterfaceProvider(interactionCreateEventHandler(nil)) registerInterfaceProvider(interactionCreateEventHandler(nil))
registerInterfaceProvider(inviteCreateEventHandler(nil)) registerInterfaceProvider(inviteCreateEventHandler(nil))

View File

@@ -191,7 +191,9 @@ type GuildMembersChunk struct {
Members []*Member `json:"members"` Members []*Member `json:"members"`
ChunkIndex int `json:"chunk_index"` ChunkIndex int `json:"chunk_index"`
ChunkCount int `json:"chunk_count"` ChunkCount int `json:"chunk_count"`
NotFound []string `json:"not_found,omitempty"`
Presences []*Presence `json:"presences,omitempty"` Presences []*Presence `json:"presences,omitempty"`
Nonce string `json:"nonce,omitempty"`
} }
// GuildIntegrationsUpdate is the data for a GuildIntegrationsUpdate event. // GuildIntegrationsUpdate is the data for a GuildIntegrationsUpdate event.
@@ -199,6 +201,21 @@ type GuildIntegrationsUpdate struct {
GuildID string `json:"guild_id"` GuildID string `json:"guild_id"`
} }
// StageInstanceEventCreate is the data for a StageInstanceEventCreate event.
type StageInstanceEventCreate struct {
*StageInstance
}
// StageInstanceEventUpdate is the data for a StageInstanceEventUpdate event.
type StageInstanceEventUpdate struct {
*StageInstance
}
// StageInstanceEventDelete is the data for a StageInstanceEventDelete event.
type StageInstanceEventDelete struct {
*StageInstance
}
// GuildScheduledEventCreate is the data for a GuildScheduledEventCreate event. // GuildScheduledEventCreate is the data for a GuildScheduledEventCreate event.
type GuildScheduledEventCreate struct { type GuildScheduledEventCreate struct {
*GuildScheduledEvent *GuildScheduledEvent
@@ -214,6 +231,20 @@ type GuildScheduledEventDelete struct {
*GuildScheduledEvent *GuildScheduledEvent
} }
// GuildScheduledEventUserAdd is the data for a GuildScheduledEventUserAdd event.
type GuildScheduledEventUserAdd struct {
GuildScheduledEventID string `json:"guild_scheduled_event_id"`
UserID string `json:"user_id"`
GuildID string `json:"guild_id"`
}
// GuildScheduledEventUserRemove is the data for a GuildScheduledEventUserRemove event.
type GuildScheduledEventUserRemove struct {
GuildScheduledEventID string `json:"guild_scheduled_event_id"`
UserID string `json:"user_id"`
GuildID string `json:"guild_id"`
}
// MessageAck is the data for a MessageAck event. // MessageAck is the data for a MessageAck event.
type MessageAck struct { type MessageAck struct {
MessageID string `json:"message_id"` MessageID string `json:"message_id"`

View File

@@ -35,12 +35,14 @@ type ApplicationCommand struct {
Version string `json:"version,omitempty"` Version string `json:"version,omitempty"`
Type ApplicationCommandType `json:"type,omitempty"` Type ApplicationCommandType `json:"type,omitempty"`
Name string `json:"name"` Name string `json:"name"`
NameLocalizations *map[Locale]string `json:"name_localizations,omitempty"`
DefaultPermission *bool `json:"default_permission,omitempty"` DefaultPermission *bool `json:"default_permission,omitempty"`
// NOTE: Chat commands only. Otherwise it mustn't be set. // NOTE: Chat commands only. Otherwise it mustn't be set.
Description string `json:"description,omitempty"` Description string `json:"description,omitempty"`
Options []*ApplicationCommandOption `json:"options"` DescriptionLocalizations *map[Locale]string `json:"description_localizations,omitempty"`
Options []*ApplicationCommandOption `json:"options"`
} }
// ApplicationCommandOptionType indicates the type of a slash command's option. // ApplicationCommandOptionType indicates the type of a slash command's option.
@@ -91,9 +93,11 @@ func (t ApplicationCommandOptionType) String() string {
// ApplicationCommandOption represents an option/subcommand/subcommands group. // ApplicationCommandOption represents an option/subcommand/subcommands group.
type ApplicationCommandOption struct { type ApplicationCommandOption struct {
Type ApplicationCommandOptionType `json:"type"` Type ApplicationCommandOptionType `json:"type"`
Name string `json:"name"` Name string `json:"name"`
Description string `json:"description,omitempty"` NameLocalizations map[Locale]string `json:"name_localizations,omitempty"`
Description string `json:"description,omitempty"`
DescriptionLocalizations map[Locale]string `json:"description_localizations,omitempty"`
// NOTE: This feature was on the API, but at some point developers decided to remove it. // NOTE: This feature was on the API, but at some point developers decided to remove it.
// So I commented it, until it will be officially on the docs. // So I commented it, until it will be officially on the docs.
// Default bool `json:"default"` // Default bool `json:"default"`
@@ -113,8 +117,9 @@ type ApplicationCommandOption struct {
// ApplicationCommandOptionChoice represents a slash command option choice. // ApplicationCommandOptionChoice represents a slash command option choice.
type ApplicationCommandOptionChoice struct { type ApplicationCommandOptionChoice struct {
Name string `json:"name"` Name string `json:"name"`
Value interface{} `json:"value"` NameLocalizations map[Locale]string `json:"name_localizations,omitempty"`
Value interface{} `json:"value"`
} }
// ApplicationCommandPermissions represents a single user or role permission for a command. // ApplicationCommandPermissions represents a single user or role permission for a command.
@@ -175,6 +180,7 @@ func (t InteractionType) String() string {
// Interaction represents data of an interaction. // Interaction represents data of an interaction.
type Interaction struct { type Interaction struct {
ID string `json:"id"` ID string `json:"id"`
AppID string `json:"application_id"`
Type InteractionType `json:"type"` Type InteractionType `json:"type"`
Data InteractionData `json:"data"` Data InteractionData `json:"data"`
GuildID string `json:"guild_id"` GuildID string `json:"guild_id"`
@@ -509,7 +515,7 @@ type InteractionResponseData struct {
TTS bool `json:"tts"` TTS bool `json:"tts"`
Content string `json:"content"` Content string `json:"content"`
Components []MessageComponent `json:"components"` Components []MessageComponent `json:"components"`
Embeds []*MessageEmbed `json:"embeds,omitempty"` Embeds []*MessageEmbed `json:"embeds"`
AllowedMentions *MessageAllowedMentions `json:"allowed_mentions,omitempty"` AllowedMentions *MessageAllowedMentions `json:"allowed_mentions,omitempty"`
Flags uint64 `json:"flags,omitempty"` Flags uint64 `json:"flags,omitempty"`
Files []*File `json:"-"` Files []*File `json:"-"`

View File

@@ -199,7 +199,9 @@ const (
MessageFlagsCrossPosted MessageFlags = 1 << 0 MessageFlagsCrossPosted MessageFlags = 1 << 0
// MessageFlagsIsCrossPosted this message originated from a message in another channel (via Channel Following). // MessageFlagsIsCrossPosted this message originated from a message in another channel (via Channel Following).
MessageFlagsIsCrossPosted MessageFlags = 1 << 1 MessageFlagsIsCrossPosted MessageFlags = 1 << 1
// MessageFlagsSupressEmbeds do not include any embeds when serializing this message. // MessageFlagsSuppressEmbeds do not include any embeds when serializing this message.
MessageFlagsSuppressEmbeds MessageFlags = 1 << 2
// TODO: deprecated, remove when compatibility is not needed
MessageFlagsSupressEmbeds MessageFlags = 1 << 2 MessageFlagsSupressEmbeds MessageFlags = 1 << 2
// MessageFlagsSourceMessageDeleted the source message for this crosspost has been deleted (via Channel Following). // MessageFlagsSourceMessageDeleted the source message for this crosspost has been deleted (via Channel Following).
MessageFlagsSourceMessageDeleted MessageFlags = 1 << 3 MessageFlagsSourceMessageDeleted MessageFlags = 1 << 3
@@ -225,7 +227,7 @@ type File struct {
// MessageSend stores all parameters you can send with ChannelMessageSendComplex. // MessageSend stores all parameters you can send with ChannelMessageSendComplex.
type MessageSend struct { type MessageSend struct {
Content string `json:"content,omitempty"` Content string `json:"content,omitempty"`
Embeds []*MessageEmbed `json:"embeds,omitempty"` Embeds []*MessageEmbed `json:"embeds"`
TTS bool `json:"tts"` TTS bool `json:"tts"`
Components []MessageComponent `json:"components"` Components []MessageComponent `json:"components"`
Files []*File `json:"-"` Files []*File `json:"-"`
@@ -244,8 +246,9 @@ type MessageSend struct {
type MessageEdit struct { type MessageEdit struct {
Content *string `json:"content,omitempty"` Content *string `json:"content,omitempty"`
Components []MessageComponent `json:"components"` Components []MessageComponent `json:"components"`
Embeds []*MessageEmbed `json:"embeds,omitempty"` Embeds []*MessageEmbed `json:"embeds"`
AllowedMentions *MessageAllowedMentions `json:"allowed_mentions,omitempty"` AllowedMentions *MessageAllowedMentions `json:"allowed_mentions,omitempty"`
Flags MessageFlags `json:"flags,omitempty"`
ID string ID string
Channel string Channel string
@@ -342,7 +345,7 @@ type MessageEmbedFooter struct {
// MessageEmbedImage is a part of a MessageEmbed struct. // MessageEmbedImage is a part of a MessageEmbed struct.
type MessageEmbedImage struct { type MessageEmbedImage struct {
URL string `json:"url,omitempty"` URL string `json:"url"`
ProxyURL string `json:"proxy_url,omitempty"` ProxyURL string `json:"proxy_url,omitempty"`
Width int `json:"width,omitempty"` Width int `json:"width,omitempty"`
Height int `json:"height,omitempty"` Height int `json:"height,omitempty"`
@@ -350,7 +353,7 @@ type MessageEmbedImage struct {
// MessageEmbedThumbnail is a part of a MessageEmbed struct. // MessageEmbedThumbnail is a part of a MessageEmbed struct.
type MessageEmbedThumbnail struct { type MessageEmbedThumbnail struct {
URL string `json:"url,omitempty"` URL string `json:"url"`
ProxyURL string `json:"proxy_url,omitempty"` ProxyURL string `json:"proxy_url,omitempty"`
Width int `json:"width,omitempty"` Width int `json:"width,omitempty"`
Height int `json:"height,omitempty"` Height int `json:"height,omitempty"`
@@ -372,7 +375,7 @@ type MessageEmbedProvider struct {
// MessageEmbedAuthor is a part of a MessageEmbed struct. // MessageEmbedAuthor is a part of a MessageEmbed struct.
type MessageEmbedAuthor struct { type MessageEmbedAuthor struct {
URL string `json:"url,omitempty"` URL string `json:"url,omitempty"`
Name string `json:"name,omitempty"` Name string `json:"name"`
IconURL string `json:"icon_url,omitempty"` IconURL string `json:"icon_url,omitempty"`
ProxyIconURL string `json:"proxy_icon_url,omitempty"` ProxyIconURL string `json:"proxy_icon_url,omitempty"`
} }

View File

@@ -39,6 +39,59 @@ var (
ErrUnauthorized = errors.New("HTTP request was unauthorized. This could be because the provided token was not a bot token. Please add \"Bot \" to the start of your token. https://discord.com/developers/docs/reference#authentication-example-bot-token-authorization-header") ErrUnauthorized = errors.New("HTTP request was unauthorized. This could be because the provided token was not a bot token. Please add \"Bot \" to the start of your token. https://discord.com/developers/docs/reference#authentication-example-bot-token-authorization-header")
) )
var (
// Marshal defines function used to encode JSON payloads
Marshal func(v interface{}) ([]byte, error) = json.Marshal
// Unmarshal defines function used to decode JSON payloads
Unmarshal func(src []byte, v interface{}) error = json.Unmarshal
)
// RESTError stores error information about a request with a bad response code.
// Message is not always present, there are cases where api calls can fail
// without returning a json message.
type RESTError struct {
Request *http.Request
Response *http.Response
ResponseBody []byte
Message *APIErrorMessage // Message may be nil.
}
// newRestError returns a new REST API error.
func newRestError(req *http.Request, resp *http.Response, body []byte) *RESTError {
restErr := &RESTError{
Request: req,
Response: resp,
ResponseBody: body,
}
// Attempt to decode the error and assume no message was provided if it fails
var msg *APIErrorMessage
err := Unmarshal(body, &msg)
if err == nil {
restErr.Message = msg
}
return restErr
}
// Error returns a Rest API Error with its status code and body.
func (r RESTError) Error() string {
return "HTTP " + r.Response.Status + ", " + string(r.ResponseBody)
}
// RateLimitError is returned when a request exceeds a rate limit
// and ShouldRetryOnRateLimit is false. The request may be manually
// retried after waiting the duration specified by RetryAfter.
type RateLimitError struct {
*RateLimit
}
// Error returns a rate limit error with rate limited endpoint and retry time.
func (e RateLimitError) Error() string {
return "Rate limit exceeded on " + e.URL + ", retry after " + e.RetryAfter.String()
}
// Request is the same as RequestWithBucketID but the bucket id is the same as the urlStr // Request is the same as RequestWithBucketID but the bucket id is the same as the urlStr
func (s *Session) Request(method, urlStr string, data interface{}) (response []byte, err error) { func (s *Session) Request(method, urlStr string, data interface{}) (response []byte, err error) {
return s.RequestWithBucketID(method, urlStr, data, strings.SplitN(urlStr, "?", 2)[0]) return s.RequestWithBucketID(method, urlStr, data, strings.SplitN(urlStr, "?", 2)[0])
@@ -48,7 +101,7 @@ func (s *Session) Request(method, urlStr string, data interface{}) (response []b
func (s *Session) RequestWithBucketID(method, urlStr string, data interface{}, bucketID string) (response []byte, err error) { func (s *Session) RequestWithBucketID(method, urlStr string, data interface{}, bucketID string) (response []byte, err error) {
var body []byte var body []byte
if data != nil { if data != nil {
body, err = json.Marshal(data) body, err = Marshal(data)
if err != nil { if err != nil {
return return
} }
@@ -108,7 +161,7 @@ func (s *Session) RequestWithLockedBucket(method, urlStr, contentType string, b
} }
defer func() { defer func() {
err2 := resp.Body.Close() err2 := resp.Body.Close()
if err2 != nil { if s.Debug && err2 != nil {
log.Println("error closing resp body") log.Println("error closing resp body")
} }
}() }()
@@ -147,19 +200,24 @@ func (s *Session) RequestWithLockedBucket(method, urlStr, contentType string, b
} }
case 429: // TOO MANY REQUESTS - Rate limiting case 429: // TOO MANY REQUESTS - Rate limiting
rl := TooManyRequests{} rl := TooManyRequests{}
err = json.Unmarshal(response, &rl) err = Unmarshal(response, &rl)
if err != nil { if err != nil {
s.log(LogError, "rate limit unmarshal error, %s", err) s.log(LogError, "rate limit unmarshal error, %s", err)
return return
} }
s.log(LogInformational, "Rate Limiting %s, retry in %v", urlStr, rl.RetryAfter)
s.handleEvent(rateLimitEventType, &RateLimit{TooManyRequests: &rl, URL: urlStr})
time.Sleep(rl.RetryAfter) if s.ShouldRetryOnRateLimit {
// we can make the above smarter s.log(LogInformational, "Rate Limiting %s, retry in %v", urlStr, rl.RetryAfter)
// this method can cause longer delays than required s.handleEvent(rateLimitEventType, &RateLimit{TooManyRequests: &rl, URL: urlStr})
response, err = s.RequestWithLockedBucket(method, urlStr, contentType, b, s.Ratelimiter.LockBucketObject(bucket), sequence) time.Sleep(rl.RetryAfter)
// we can make the above smarter
// this method can cause longer delays than required
response, err = s.RequestWithLockedBucket(method, urlStr, contentType, b, s.Ratelimiter.LockBucketObject(bucket), sequence)
} else {
err = &RateLimitError{&RateLimit{TooManyRequests: &rl, URL: urlStr}}
}
case http.StatusUnauthorized: case http.StatusUnauthorized:
if strings.Index(s.Token, "Bot ") != 0 { if strings.Index(s.Token, "Bot ") != 0 {
s.log(LogInformational, ErrUnauthorized.Error()) s.log(LogInformational, ErrUnauthorized.Error())
@@ -174,7 +232,7 @@ func (s *Session) RequestWithLockedBucket(method, urlStr, contentType string, b
} }
func unmarshal(data []byte, v interface{}) error { func unmarshal(data []byte, v interface{}) error {
err := json.Unmarshal(data, v) err := Unmarshal(data, v)
if err != nil { if err != nil {
return fmt.Errorf("%w: %s", ErrJSONUnmarshal, err) return fmt.Errorf("%w: %s", ErrJSONUnmarshal, err)
} }
@@ -438,6 +496,19 @@ func (s *Session) Guild(guildID string) (st *Guild, err error) {
return return
} }
// GuildWithCounts returns a Guild structure of a specific Guild with approximate member and presence counts.
// guildID : The ID of a Guild
func (s *Session) GuildWithCounts(guildID string) (st *Guild, err error) {
body, err := s.RequestWithBucketID("GET", EndpointGuild(guildID)+"?with_counts=true", nil, EndpointGuild(guildID))
if err != nil {
return
}
err = unmarshal(body, &st)
return
}
// GuildPreview returns a GuildPreview structure of a specific public Guild. // GuildPreview returns a GuildPreview structure of a specific public Guild.
// guildID : The ID of a Guild // guildID : The ID of a Guild
func (s *Session) GuildPreview(guildID string) (st *GuildPreview, err error) { func (s *Session) GuildPreview(guildID string) (st *GuildPreview, err error) {
@@ -481,7 +552,7 @@ func (s *Session) GuildEdit(guildID string, g GuildParams) (st *Guild, err error
} }
} }
//Bounds checking for regions // Bounds checking for regions
if g.Region != "" { if g.Region != "" {
isValid := false isValid := false
regions, _ := s.VoiceRegions() regions, _ := s.VoiceRegions()
@@ -530,12 +601,30 @@ func (s *Session) GuildLeave(guildID string) (err error) {
return return
} }
// GuildBans returns an array of GuildBan structures for all bans of a // GuildBans returns an array of GuildBan structures for bans in the given guild.
// given guild. // guildID : The ID of a Guild
// guildID : The ID of a Guild. // limit : Max number of bans to return (max 1000)
func (s *Session) GuildBans(guildID string) (st []*GuildBan, err error) { // beforeID : If not empty all returned users will be after the given id
// afterID : If not empty all returned users will be before the given id
func (s *Session) GuildBans(guildID string, limit int, beforeID, afterID string) (st []*GuildBan, err error) {
uri := EndpointGuildBans(guildID)
body, err := s.RequestWithBucketID("GET", EndpointGuildBans(guildID), nil, EndpointGuildBans(guildID)) v := url.Values{}
if limit != 0 {
v.Set("limit", strconv.Itoa(limit))
}
if beforeID != "" {
v.Set("before", beforeID)
}
if afterID != "" {
v.Set("after", afterID)
}
if len(v) > 0 {
uri += "?" + v.Encode()
}
body, err := s.RequestWithBucketID("GET", uri, nil, EndpointGuildBans(guildID))
if err != nil { if err != nil {
return return
} }
@@ -631,6 +720,29 @@ func (s *Session) GuildMembers(guildID string, after string, limit int) (st []*M
return return
} }
// GuildMembersSearch returns a list of guild member objects whose username or nickname starts with a provided string
// guildID : The ID of a Guild
// query : Query string to match username(s) and nickname(s) against
// limit : Max number of members to return (default 1, min 1, max 1000)
func (s *Session) GuildMembersSearch(guildID, query string, limit int) (st []*Member, err error) {
uri := EndpointGuildMembersSearch(guildID)
queryParams := url.Values{}
queryParams.Set("query", query)
if limit > 1 {
queryParams.Set("limit", strconv.Itoa(limit))
}
body, err := s.RequestWithBucketID("GET", uri+"?"+queryParams.Encode(), nil, uri)
if err != nil {
return
}
err = unmarshal(body, &st)
return
}
// GuildMember returns a member of a guild. // GuildMember returns a member of a guild.
// guildID : The ID of a Guild. // guildID : The ID of a Guild.
// userID : The ID of a User // userID : The ID of a User
@@ -710,6 +822,21 @@ func (s *Session) GuildMemberEdit(guildID, userID string, roles []string) (err e
return return
} }
// GuildMemberEditComplex edits the nickname and roles of a member.
// guildID : The ID of a Guild.
// userID : The ID of a User.
// data : A GuildMemberEditData struct with the new nickname and roles
func (s *Session) GuildMemberEditComplex(guildID, userID string, data GuildMemberParams) (st *Member, err error) {
var body []byte
body, err = s.RequestWithBucketID("PATCH", EndpointGuildMember(guildID, userID), data, EndpointGuildMember(guildID, ""))
if err != nil {
return nil, err
}
err = unmarshal(body, &st)
return
}
// GuildMemberMove moves a guild member from one voice channel to another/none // GuildMemberMove moves a guild member from one voice channel to another/none
// guildID : The ID of a Guild. // guildID : The ID of a Guild.
// userID : The ID of a User. // userID : The ID of a User.
@@ -1218,6 +1345,20 @@ func (s *Session) GuildEmojis(guildID string) (emoji []*Emoji, err error) {
return return
} }
// GuildEmoji returns specified emoji.
// guildID : The ID of a Guild
// emojiID : The ID of an Emoji to retrieve
func (s *Session) GuildEmoji(guildID, emojiID string) (emoji *Emoji, err error) {
var body []byte
body, err = s.RequestWithBucketID("GET", EndpointGuildEmoji(guildID, emojiID), nil, EndpointGuildEmoji(guildID, emojiID))
if err != nil {
return
}
err = unmarshal(body, &emoji)
return
}
// GuildEmojiCreate creates a new emoji // GuildEmojiCreate creates a new emoji
// guildID : The ID of a Guild. // guildID : The ID of a Guild.
// name : The Name of the Emoji. // name : The Name of the Emoji.
@@ -1244,12 +1385,12 @@ func (s *Session) GuildEmojiCreate(guildID, name, image string, roles []string)
// guildID : The ID of a Guild. // guildID : The ID of a Guild.
// emojiID : The ID of an Emoji. // emojiID : The ID of an Emoji.
// name : The Name of the Emoji. // name : The Name of the Emoji.
// roles : The roles for which this emoji will be whitelisted, can be nil. // roles : The roles for which this emoji will be whitelisted, if nil or empty the roles will be reset.
func (s *Session) GuildEmojiEdit(guildID, emojiID, name string, roles []string) (emoji *Emoji, err error) { func (s *Session) GuildEmojiEdit(guildID, emojiID, name string, roles []string) (emoji *Emoji, err error) {
data := struct { data := struct {
Name string `json:"name"` Name string `json:"name"`
Roles []string `json:"roles,omitempty"` Roles []string `json:"roles"`
}{name, roles} }{name, roles}
body, err := s.RequestWithBucketID("PATCH", EndpointGuildEmoji(guildID, emojiID), data, EndpointGuildEmojis(guildID)) body, err := s.RequestWithBucketID("PATCH", EndpointGuildEmoji(guildID, emojiID), data, EndpointGuildEmojis(guildID))
@@ -1851,6 +1992,37 @@ func (s *Session) InviteWithCounts(inviteID string) (st *Invite, err error) {
return return
} }
// InviteComplex returns an Invite structure of the given invite including specified fields.
// inviteID : The invite code
// guildScheduledEventID : If specified, includes specified guild scheduled event.
// withCounts : Whether to include approximate member counts or not
// withExpiration : Whether to include expiration time or not
func (s *Session) InviteComplex(inviteID, guildScheduledEventID string, withCounts, withExpiration bool) (st *Invite, err error) {
endpoint := EndpointInvite(inviteID)
v := url.Values{}
if guildScheduledEventID != "" {
v.Set("guild_scheduled_event_id", guildScheduledEventID)
}
if withCounts {
v.Set("with_counts", "true")
}
if withExpiration {
v.Set("with_expiration", "true")
}
if len(v) != 0 {
endpoint += "?" + v.Encode()
}
body, err := s.RequestWithBucketID("GET", endpoint, nil, EndpointInvite(""))
if err != nil {
return
}
err = unmarshal(body, &st)
return
}
// InviteDelete deletes an existing invite // InviteDelete deletes an existing invite
// inviteID : the code of an invite // inviteID : the code of an invite
func (s *Session) InviteDelete(inviteID string) (st *Invite, err error) { func (s *Session) InviteDelete(inviteID string) (st *Invite, err error) {
@@ -2158,7 +2330,7 @@ func (s *Session) WebhookMessage(webhookID, token, messageID string) (message *M
return return
} }
err = json.Unmarshal(body, &message) err = Unmarshal(body, &message)
return return
} }
@@ -2207,7 +2379,7 @@ func (s *Session) WebhookMessageDelete(webhookID, token, messageID string) (err
// MessageReactionAdd creates an emoji reaction to a message. // MessageReactionAdd creates an emoji reaction to a message.
// channelID : The channel ID. // channelID : The channel ID.
// messageID : The message ID. // messageID : The message ID.
// emojiID : Either the unicode emoji for the reaction, or a guild emoji identifier. // emojiID : Either the unicode emoji for the reaction, or a guild emoji identifier in name:id format (e.g. "hello:1234567654321")
func (s *Session) MessageReactionAdd(channelID, messageID, emojiID string) error { func (s *Session) MessageReactionAdd(channelID, messageID, emojiID string) error {
// emoji such as #⃣ need to have # escaped // emoji such as #⃣ need to have # escaped
@@ -2687,10 +2859,9 @@ func (s *Session) ApplicationCommandPermissionsBatchEdit(appID, guildID string,
} }
// InteractionRespond creates the response to an interaction. // InteractionRespond creates the response to an interaction.
// appID : The application ID.
// interaction : Interaction instance. // interaction : Interaction instance.
// resp : Response message data. // resp : Response message data.
func (s *Session) InteractionRespond(interaction *Interaction, resp *InteractionResponse) (err error) { func (s *Session) InteractionRespond(interaction *Interaction, resp *InteractionResponse) error {
endpoint := EndpointInteractionResponse(interaction.ID, interaction.Token) endpoint := EndpointInteractionResponse(interaction.ID, interaction.Token)
if resp.Data != nil && len(resp.Data.Files) > 0 { if resp.Data != nil && len(resp.Data.Files) > 0 {
@@ -2700,32 +2871,30 @@ func (s *Session) InteractionRespond(interaction *Interaction, resp *Interaction
} }
_, err = s.request("POST", endpoint, contentType, body, endpoint, 0) _, err = s.request("POST", endpoint, contentType, body, endpoint, 0)
} else { return err
_, err = s.RequestWithBucketID("POST", endpoint, *resp, endpoint)
} }
_, err := s.RequestWithBucketID("POST", endpoint, *resp, endpoint)
return err return err
} }
// InteractionResponse gets the response to an interaction. // InteractionResponse gets the response to an interaction.
// appID : The application ID.
// interaction : Interaction instance. // interaction : Interaction instance.
func (s *Session) InteractionResponse(appID string, interaction *Interaction) (*Message, error) { func (s *Session) InteractionResponse(interaction *Interaction) (*Message, error) {
return s.WebhookMessage(appID, interaction.Token, "@original") return s.WebhookMessage(interaction.AppID, interaction.Token, "@original")
} }
// InteractionResponseEdit edits the response to an interaction. // InteractionResponseEdit edits the response to an interaction.
// appID : The application ID.
// interaction : Interaction instance. // interaction : Interaction instance.
// newresp : Updated response message data. // newresp : Updated response message data.
func (s *Session) InteractionResponseEdit(appID string, interaction *Interaction, newresp *WebhookEdit) (*Message, error) { func (s *Session) InteractionResponseEdit(interaction *Interaction, newresp *WebhookEdit) (*Message, error) {
return s.WebhookMessageEdit(appID, interaction.Token, "@original", newresp) return s.WebhookMessageEdit(interaction.AppID, interaction.Token, "@original", newresp)
} }
// InteractionResponseDelete deletes the response to an interaction. // InteractionResponseDelete deletes the response to an interaction.
// appID : The application ID.
// interaction : Interaction instance. // interaction : Interaction instance.
func (s *Session) InteractionResponseDelete(appID string, interaction *Interaction) error { func (s *Session) InteractionResponseDelete(interaction *Interaction) error {
endpoint := EndpointInteractionResponseActions(appID, interaction.Token) endpoint := EndpointInteractionResponseActions(interaction.AppID, interaction.Token)
_, err := s.RequestWithBucketID("DELETE", endpoint, nil, endpoint) _, err := s.RequestWithBucketID("DELETE", endpoint, nil, endpoint)
@@ -2733,29 +2902,76 @@ func (s *Session) InteractionResponseDelete(appID string, interaction *Interacti
} }
// FollowupMessageCreate creates the followup message for an interaction. // FollowupMessageCreate creates the followup message for an interaction.
// appID : The application ID.
// interaction : Interaction instance. // interaction : Interaction instance.
// wait : Waits for server confirmation of message send and ensures that the return struct is populated (it is nil otherwise) // wait : Waits for server confirmation of message send and ensures that the return struct is populated (it is nil otherwise)
// data : Data of the message to send. // data : Data of the message to send.
func (s *Session) FollowupMessageCreate(appID string, interaction *Interaction, wait bool, data *WebhookParams) (*Message, error) { func (s *Session) FollowupMessageCreate(interaction *Interaction, wait bool, data *WebhookParams) (*Message, error) {
return s.WebhookExecute(appID, interaction.Token, wait, data) return s.WebhookExecute(interaction.AppID, interaction.Token, wait, data)
} }
// FollowupMessageEdit edits a followup message of an interaction. // FollowupMessageEdit edits a followup message of an interaction.
// appID : The application ID.
// interaction : Interaction instance. // interaction : Interaction instance.
// messageID : The followup message ID. // messageID : The followup message ID.
// data : Data to update the message // data : Data to update the message
func (s *Session) FollowupMessageEdit(appID string, interaction *Interaction, messageID string, data *WebhookEdit) (*Message, error) { func (s *Session) FollowupMessageEdit(interaction *Interaction, messageID string, data *WebhookEdit) (*Message, error) {
return s.WebhookMessageEdit(appID, interaction.Token, messageID, data) return s.WebhookMessageEdit(interaction.AppID, interaction.Token, messageID, data)
} }
// FollowupMessageDelete deletes a followup message of an interaction. // FollowupMessageDelete deletes a followup message of an interaction.
// appID : The application ID.
// interaction : Interaction instance. // interaction : Interaction instance.
// messageID : The followup message ID. // messageID : The followup message ID.
func (s *Session) FollowupMessageDelete(appID string, interaction *Interaction, messageID string) error { func (s *Session) FollowupMessageDelete(interaction *Interaction, messageID string) error {
return s.WebhookMessageDelete(appID, interaction.Token, messageID) return s.WebhookMessageDelete(interaction.AppID, interaction.Token, messageID)
}
// ------------------------------------------------------------------------------------------------
// Functions specific to stage instances
// ------------------------------------------------------------------------------------------------
// StageInstanceCreate creates and returns a new Stage instance associated to a Stage channel.
// data : Parameters needed to create a stage instance.
// data : The data of the Stage instance to create
func (s *Session) StageInstanceCreate(data *StageInstanceParams) (si *StageInstance, err error) {
body, err := s.RequestWithBucketID("POST", EndpointStageInstances, data, EndpointStageInstances)
if err != nil {
return
}
err = unmarshal(body, &si)
return
}
// StageInstance will retrieve a Stage instance by ID of the Stage channel.
// channelID : The ID of the Stage channel
func (s *Session) StageInstance(channelID string) (si *StageInstance, err error) {
body, err := s.RequestWithBucketID("GET", EndpointStageInstance(channelID), nil, EndpointStageInstance(channelID))
if err != nil {
return
}
err = unmarshal(body, &si)
return
}
// StageInstanceEdit will edit a Stage instance by ID of the Stage channel.
// channelID : The ID of the Stage channel
// data : The data to edit the Stage instance
func (s *Session) StageInstanceEdit(channelID string, data *StageInstanceParams) (si *StageInstance, err error) {
body, err := s.RequestWithBucketID("PATCH", EndpointStageInstance(channelID), data, EndpointStageInstance(channelID))
if err != nil {
return
}
err = unmarshal(body, &si)
return
}
// StageInstanceDelete will delete a Stage instance by ID of the Stage channel.
// channelID : The ID of the Stage channel
func (s *Session) StageInstanceDelete(channelID string) (err error) {
_, err = s.RequestWithBucketID("DELETE", EndpointStageInstance(channelID), nil, EndpointStageInstance(channelID))
return
} }
// ------------------------------------------------------------------------------------------------ // ------------------------------------------------------------------------------------------------

View File

@@ -979,8 +979,9 @@ func (s *State) OnInterface(se *Session, i interface{}) (err error) {
err = s.GuildRemove(t.Guild) err = s.GuildRemove(t.Guild)
case *GuildMemberAdd: case *GuildMemberAdd:
var guild *Guild
// Updates the MemberCount of the guild. // Updates the MemberCount of the guild.
guild, err := s.Guild(t.Member.GuildID) guild, err = s.Guild(t.Member.GuildID)
if err != nil { if err != nil {
return err return err
} }
@@ -995,8 +996,9 @@ func (s *State) OnInterface(se *Session, i interface{}) (err error) {
err = s.MemberAdd(t.Member) err = s.MemberAdd(t.Member)
} }
case *GuildMemberRemove: case *GuildMemberRemove:
var guild *Guild
// Updates the MemberCount of the guild. // Updates the MemberCount of the guild.
guild, err := s.Guild(t.Member.GuildID) guild, err = s.Guild(t.Member.GuildID)
if err != nil { if err != nil {
return err return err
} }

View File

@@ -43,6 +43,9 @@ type Session struct {
// Should the session reconnect the websocket on errors. // Should the session reconnect the websocket on errors.
ShouldReconnectOnError bool ShouldReconnectOnError bool
// Should the session retry requests when rate limited.
ShouldRetryOnRateLimit bool
// Identify is sent during initial handshake with the discord gateway. // Identify is sent during initial handshake with the discord gateway.
// https://discord.com/developers/docs/topics/gateway#identify // https://discord.com/developers/docs/topics/gateway#identify
Identify Identify Identify Identify
@@ -260,6 +263,7 @@ const (
ChannelTypeGuildNewsThread ChannelType = 10 ChannelTypeGuildNewsThread ChannelType = 10
ChannelTypeGuildPublicThread ChannelType = 11 ChannelTypeGuildPublicThread ChannelType = 11
ChannelTypeGuildPrivateThread ChannelType = 12 ChannelTypeGuildPrivateThread ChannelType = 12
ChannelTypeGuildStageVoice ChannelType = 13
) )
// A Channel holds all data related to an individual Discord channel. // A Channel holds all data related to an individual Discord channel.
@@ -360,7 +364,7 @@ type ChannelEdit struct {
UserLimit int `json:"user_limit,omitempty"` UserLimit int `json:"user_limit,omitempty"`
PermissionOverwrites []*PermissionOverwrite `json:"permission_overwrites,omitempty"` PermissionOverwrites []*PermissionOverwrite `json:"permission_overwrites,omitempty"`
ParentID string `json:"parent_id,omitempty"` ParentID string `json:"parent_id,omitempty"`
RateLimitPerUser int `json:"rate_limit_per_user,omitempty"` RateLimitPerUser *int `json:"rate_limit_per_user,omitempty"`
// NOTE: threads only // NOTE: threads only
@@ -552,6 +556,17 @@ const (
ExplicitContentFilterAllMembers ExplicitContentFilterLevel = 2 ExplicitContentFilterAllMembers ExplicitContentFilterLevel = 2
) )
// GuildNSFWLevel type definition
type GuildNSFWLevel int
// Constants for GuildNSFWLevel levels from 0 to 3 inclusive
const (
GuildNSFWLevelDefault GuildNSFWLevel = 0
GuildNSFWLevelExplicit GuildNSFWLevel = 1
GuildNSFWLevelSafe GuildNSFWLevel = 2
GuildNSFWLevelAgeRestricted GuildNSFWLevel = 3
)
// MfaLevel type definition // MfaLevel type definition
type MfaLevel int type MfaLevel int
@@ -675,6 +690,9 @@ type Guild struct {
// The explicit content filter level // The explicit content filter level
ExplicitContentFilter ExplicitContentFilterLevel `json:"explicit_content_filter"` ExplicitContentFilter ExplicitContentFilterLevel `json:"explicit_content_filter"`
// The NSFW Level of the guild
NSFWLevel GuildNSFWLevel `json:"nsfw_level"`
// The list of enabled guild features // The list of enabled guild features
Features []string `json:"features"` Features []string `json:"features"`
@@ -731,6 +749,9 @@ type Guild struct {
// Permissions of our user // Permissions of our user
Permissions int64 `json:"permissions,string"` Permissions int64 `json:"permissions,string"`
// Stage instances in the guild
StageInstances []*StageInstance `json:"stage_instances"`
} }
// A GuildPreview holds data related to a specific public Discord Guild, even if the user is not in the guild. // A GuildPreview holds data related to a specific public Discord Guild, even if the user is not in the guild.
@@ -757,16 +778,31 @@ type GuildPreview struct {
// The list of enabled guild features // The list of enabled guild features
Features []string `json:"features"` Features []string `json:"features"`
// Approximate number of members in this guild, returned from the GET /guild/<id> endpoint when with_counts is true // Approximate number of members in this guild
// NOTE: this field is only filled when using GuildWithCounts
ApproximateMemberCount int `json:"approximate_member_count"` ApproximateMemberCount int `json:"approximate_member_count"`
// Approximate number of non-offline members in this guild, returned from the GET /guild/<id> endpoint when with_counts is true // Approximate number of non-offline members in this guild
// NOTE: this field is only filled when using GuildWithCounts
ApproximatePresenceCount int `json:"approximate_presence_count"` ApproximatePresenceCount int `json:"approximate_presence_count"`
// the description for the guild // the description for the guild
Description string `json:"description"` Description string `json:"description"`
} }
// IconURL returns a URL to the guild's icon.
func (g *GuildPreview) IconURL() string {
if g.Icon == "" {
return ""
}
if strings.HasPrefix(g.Icon, "a_") {
return EndpointGuildIconAnimated(g.ID, g.Icon)
}
return EndpointGuildIcon(g.ID, g.Icon)
}
// GuildScheduledEvent is a representation of a scheduled event in a guild. Only for retrieval of the data. // GuildScheduledEvent is a representation of a scheduled event in a guild. Only for retrieval of the data.
// https://discord.com/developers/docs/resources/guild-scheduled-event#guild-scheduled-event // https://discord.com/developers/docs/resources/guild-scheduled-event#guild-scheduled-event
type GuildScheduledEvent struct { type GuildScheduledEvent struct {
@@ -842,7 +878,7 @@ func (p GuildScheduledEventParams) MarshalJSON() ([]byte, error) {
type guildScheduledEventParams GuildScheduledEventParams type guildScheduledEventParams GuildScheduledEventParams
if p.EntityType == GuildScheduledEventEntityTypeExternal && p.ChannelID == "" { if p.EntityType == GuildScheduledEventEntityTypeExternal && p.ChannelID == "" {
return json.Marshal(struct { return Marshal(struct {
guildScheduledEventParams guildScheduledEventParams
ChannelID json.RawMessage `json:"channel_id"` ChannelID json.RawMessage `json:"channel_id"`
}{ }{
@@ -851,7 +887,7 @@ func (p GuildScheduledEventParams) MarshalJSON() ([]byte, error) {
}) })
} }
return json.Marshal(guildScheduledEventParams(p)) return Marshal(guildScheduledEventParams(p))
} }
// GuildScheduledEventEntityMetadata holds additional metadata for guild scheduled event. // GuildScheduledEventEntityMetadata holds additional metadata for guild scheduled event.
@@ -1093,7 +1129,7 @@ func (t *TimeStamps) UnmarshalJSON(b []byte) error {
End float64 `json:"end,omitempty"` End float64 `json:"end,omitempty"`
Start float64 `json:"start,omitempty"` Start float64 `json:"start,omitempty"`
}{} }{}
err := json.Unmarshal(b, &temp) err := Unmarshal(b, &temp)
if err != nil { if err != nil {
return err return err
} }
@@ -1231,7 +1267,7 @@ func (t *TooManyRequests) UnmarshalJSON(b []byte) error {
Message string `json:"message"` Message string `json:"message"`
RetryAfter float64 `json:"retry_after"` RetryAfter float64 `json:"retry_after"`
}{} }{}
err := json.Unmarshal(b, &u) err := Unmarshal(b, &u)
if err != nil { if err != nil {
return err return err
} }
@@ -1566,6 +1602,15 @@ type UserGuildSettingsEdit struct {
ChannelOverrides map[string]*UserGuildSettingsChannelOverride `json:"channel_overrides"` ChannelOverrides map[string]*UserGuildSettingsChannelOverride `json:"channel_overrides"`
} }
// GuildMemberParams stores data needed to update a member
// https://discord.com/developers/docs/resources/guild#modify-guild-member
type GuildMemberParams struct {
// Value to set user's nickname to
Nick string `json:"nick,omitempty"`
// Array of role ids the member is assigned
Roles *[]string `json:"roles,omitempty"`
}
// An APIErrorMessage is an api error message returned from discord // An APIErrorMessage is an api error message returned from discord
type APIErrorMessage struct { type APIErrorMessage struct {
Code int `json:"code"` Code int `json:"code"`
@@ -1642,7 +1687,7 @@ func (activity *Activity) UnmarshalJSON(b []byte) error {
Instance bool `json:"instance,omitempty"` Instance bool `json:"instance,omitempty"`
Flags int `json:"flags,omitempty"` Flags int `json:"flags,omitempty"`
}{} }{}
err := json.Unmarshal(b, &temp) err := Unmarshal(b, &temp)
if err != nil { if err != nil {
return err return err
} }
@@ -1695,14 +1740,13 @@ const (
// Identify is sent during initial handshake with the discord gateway. // Identify is sent during initial handshake with the discord gateway.
// https://discord.com/developers/docs/topics/gateway#identify // https://discord.com/developers/docs/topics/gateway#identify
type Identify struct { type Identify struct {
Token string `json:"token"` Token string `json:"token"`
Properties IdentifyProperties `json:"properties"` Properties IdentifyProperties `json:"properties"`
Compress bool `json:"compress"` Compress bool `json:"compress"`
LargeThreshold int `json:"large_threshold"` LargeThreshold int `json:"large_threshold"`
Shard *[2]int `json:"shard,omitempty"` Shard *[2]int `json:"shard,omitempty"`
Presence GatewayStatusUpdate `json:"presence,omitempty"` Presence GatewayStatusUpdate `json:"presence,omitempty"`
GuildSubscriptions bool `json:"guild_subscriptions"` Intents Intent `json:"intents"`
Intents Intent `json:"intents"`
} }
// IdentifyProperties contains the "properties" portion of an Identify packet // IdentifyProperties contains the "properties" portion of an Identify packet
@@ -1715,6 +1759,49 @@ type IdentifyProperties struct {
ReferringDomain string `json:"$referring_domain"` ReferringDomain string `json:"$referring_domain"`
} }
// StageInstance holds information about a live stage.
// https://discord.com/developers/docs/resources/stage-instance#stage-instance-resource
type StageInstance struct {
// The id of this Stage instance
ID string `json:"id"`
// The guild id of the associated Stage channel
GuildID string `json:"guild_id"`
// The id of the associated Stage channel
ChannelID string `json:"channel_id"`
// The topic of the Stage instance (1-120 characters)
Topic string `json:"topic"`
// The privacy level of the Stage instance
// https://discord.com/developers/docs/resources/stage-instance#stage-instance-object-privacy-level
PrivacyLevel StageInstancePrivacyLevel `json:"privacy_level"`
// Whether or not Stage Discovery is disabled (deprecated)
DiscoverableDisabled bool `json:"discoverable_disabled"`
// The id of the scheduled event for this Stage instance
GuildScheduledEventID string `json:"guild_scheduled_event_id"`
}
// StageInstanceParams represents the parameters needed to create or edit a stage instance
type StageInstanceParams struct {
// ChannelID represents the id of the Stage channel
ChannelID string `json:"channel_id,omitempty"`
// Topic of the Stage instance (1-120 characters)
Topic string `json:"topic,omitempty"`
// PrivacyLevel of the Stage instance (default GUILD_ONLY)
PrivacyLevel StageInstancePrivacyLevel `json:"privacy_level,omitempty"`
// SendStartNotification will notify @everyone that a Stage instance has started
SendStartNotification bool `json:"send_start_notification,omitempty"`
}
// StageInstancePrivacyLevel represents the privacy level of a Stage instance
// https://discord.com/developers/docs/resources/stage-instance#stage-instance-object-privacy-level
type StageInstancePrivacyLevel int
const (
// StageInstancePrivacyLevelPublic The Stage instance is visible publicly. (deprecated)
StageInstancePrivacyLevelPublic StageInstancePrivacyLevel = 1
// StageInstancePrivacyLevelGuildOnly The Stage instance is visible to only guild members.
StageInstancePrivacyLevelGuildOnly StageInstancePrivacyLevel = 2
)
// Constants for the different bit offsets of text channel permissions // Constants for the different bit offsets of text channel permissions
const ( const (
// Deprecated: PermissionReadMessages has been replaced with PermissionViewChannel for text and voice channels // Deprecated: PermissionReadMessages has been replaced with PermissionViewChannel for text and voice channels
@@ -1731,6 +1818,7 @@ const (
PermissionManageThreads = 0x0000000400000000 PermissionManageThreads = 0x0000000400000000
PermissionCreatePublicThreads = 0x0000000800000000 PermissionCreatePublicThreads = 0x0000000800000000
PermissionCreatePrivateThreads = 0x0000001000000000 PermissionCreatePrivateThreads = 0x0000001000000000
PermissionUseExternalStickers = 0x0000002000000000
PermissionSendMessagesInThreads = 0x0000004000000000 PermissionSendMessagesInThreads = 0x0000004000000000
) )
@@ -1745,6 +1833,7 @@ const (
PermissionVoiceMoveMembers = 0x0000000001000000 PermissionVoiceMoveMembers = 0x0000000001000000
PermissionVoiceUseVAD = 0x0000000002000000 PermissionVoiceUseVAD = 0x0000000002000000
PermissionVoiceRequestToSpeak = 0x0000000100000000 PermissionVoiceRequestToSpeak = 0x0000000100000000
PermissionUseActivities = 0x0000008000000000
) )
// Constants for general management. // Constants for general management.
@@ -1754,6 +1843,7 @@ const (
PermissionManageRoles = 0x0000000010000000 PermissionManageRoles = 0x0000000010000000
PermissionManageWebhooks = 0x0000000020000000 PermissionManageWebhooks = 0x0000000020000000
PermissionManageEmojis = 0x0000000040000000 PermissionManageEmojis = 0x0000000040000000
PermissionManageEvents = 0x0000000200000000
) )
// Constants for the different bit offsets of general permissions // Constants for the different bit offsets of general permissions

View File

@@ -1,47 +0,0 @@
// Discordgo - Discord bindings for Go
// Available at https://github.com/bwmarrin/discordgo
// Copyright 2015-2016 Bruce Marriner <bruce@sqls.net>. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// This file contains custom types, currently only a timestamp wrapper.
package discordgo
import (
"encoding/json"
"net/http"
)
// RESTError stores error information about a request with a bad response code.
// Message is not always present, there are cases where api calls can fail
// without returning a json message.
type RESTError struct {
Request *http.Request
Response *http.Response
ResponseBody []byte
Message *APIErrorMessage // Message may be nil.
}
func newRestError(req *http.Request, resp *http.Response, body []byte) *RESTError {
restErr := &RESTError{
Request: req,
Response: resp,
ResponseBody: body,
}
// Attempt to decode the error and assume no message was provided if it fails
var msg *APIErrorMessage
err := json.Unmarshal(body, &msg)
if err == nil {
restErr.Message = msg
}
return restErr
}
func (r RESTError) Error() string {
return "HTTP " + r.Response.Status + ", " + string(r.ResponseBody)
}

View File

@@ -2,7 +2,6 @@ package discordgo
import ( import (
"bytes" "bytes"
"encoding/json"
"fmt" "fmt"
"io" "io"
"mime/multipart" "mime/multipart"
@@ -30,7 +29,7 @@ func MultipartBodyWithJSON(data interface{}, files []*File) (requestContentType
body := &bytes.Buffer{} body := &bytes.Buffer{}
bodywriter := multipart.NewWriter(body) bodywriter := multipart.NewWriter(body)
payload, err := json.Marshal(data) payload, err := Marshal(data)
if err != nil { if err != nil {
return return
} }

View File

@@ -409,10 +409,13 @@ func (s *Session) UpdateStatusComplex(usd UpdateStatusData) (err error) {
} }
type requestGuildMembersData struct { type requestGuildMembersData struct {
GuildIDs []string `json:"guild_id"` // TODO: Deprecated. Use string instead of []string
Query string `json:"query"` GuildIDs []string `json:"guild_id"`
Limit int `json:"limit"` Query *string `json:"query,omitempty"`
Presences bool `json:"presences"` UserIDs *[]string `json:"user_ids,omitempty"`
Limit int `json:"limit"`
Nonce string `json:"nonce,omitempty"`
Presences bool `json:"presences"`
} }
type requestGuildMembersOp struct { type requestGuildMembersOp struct {
@@ -425,16 +428,21 @@ type requestGuildMembersOp struct {
// guildID : Single Guild ID to request members of // guildID : Single Guild ID to request members of
// query : String that username starts with, leave empty to return all members // query : String that username starts with, leave empty to return all members
// limit : Max number of items to return, or 0 to request all members matched // limit : Max number of items to return, or 0 to request all members matched
// nonce : Nonce to identify the Guild Members Chunk response
// presences : Whether to request presences of guild members // presences : Whether to request presences of guild members
func (s *Session) RequestGuildMembers(guildID string, query string, limit int, presences bool) (err error) { func (s *Session) RequestGuildMembers(guildID, query string, limit int, nonce string, presences bool) error {
data := requestGuildMembersData{ return s.RequestGuildMembersBatch([]string{guildID}, query, limit, nonce, presences)
GuildIDs: []string{guildID}, }
Query: query,
Limit: limit, // RequestGuildMembersList requests guild members from the gateway
Presences: presences, // The gateway responds with GuildMembersChunk events
} // guildID : Single Guild ID to request members of
err = s.requestGuildMembers(data) // userIDs : IDs of users to fetch
return // limit : Max number of items to return, or 0 to request all members matched
// nonce : Nonce to identify the Guild Members Chunk response
// presences : Whether to request presences of guild members
func (s *Session) RequestGuildMembersList(guildID string, userIDs []string, limit int, nonce string, presences bool) error {
return s.RequestGuildMembersBatchList([]string{guildID}, userIDs, limit, nonce, presences)
} }
// RequestGuildMembersBatch requests guild members from the gateway // RequestGuildMembersBatch requests guild members from the gateway
@@ -442,12 +450,37 @@ func (s *Session) RequestGuildMembers(guildID string, query string, limit int, p
// guildID : Slice of guild IDs to request members of // guildID : Slice of guild IDs to request members of
// query : String that username starts with, leave empty to return all members // query : String that username starts with, leave empty to return all members
// limit : Max number of items to return, or 0 to request all members matched // limit : Max number of items to return, or 0 to request all members matched
// nonce : Nonce to identify the Guild Members Chunk response
// presences : Whether to request presences of guild members // presences : Whether to request presences of guild members
func (s *Session) RequestGuildMembersBatch(guildIDs []string, query string, limit int, presences bool) (err error) { //
// NOTE: this function is deprecated, please use RequestGuildMembers instead
func (s *Session) RequestGuildMembersBatch(guildIDs []string, query string, limit int, nonce string, presences bool) (err error) {
data := requestGuildMembersData{ data := requestGuildMembersData{
GuildIDs: guildIDs, GuildIDs: guildIDs,
Query: query, Query: &query,
Limit: limit, Limit: limit,
Nonce: nonce,
Presences: presences,
}
err = s.requestGuildMembers(data)
return
}
// RequestGuildMembersBatchList requests guild members from the gateway
// The gateway responds with GuildMembersChunk events
// guildID : Slice of guild IDs to request members of
// userIDs : IDs of users to fetch
// limit : Max number of items to return, or 0 to request all members matched
// nonce : Nonce to identify the Guild Members Chunk response
// presences : Whether to request presences of guild members
//
// NOTE: this function is deprecated, please use RequestGuildMembersList instead
func (s *Session) RequestGuildMembersBatchList(guildIDs []string, userIDs []string, limit int, nonce string, presences bool) (err error) {
data := requestGuildMembersData{
GuildIDs: guildIDs,
UserIDs: &userIDs,
Limit: limit,
Nonce: nonce,
Presences: presences, Presences: presences,
} }
err = s.requestGuildMembers(data) err = s.requestGuildMembers(data)

View File

@@ -7,9 +7,27 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
## [Unreleased] ## [Unreleased]
## [1.5.4] - 2022-04-25
* Windows: add missing defer to `Watcher.WatchList` [#447](https://github.com/fsnotify/fsnotify/pull/447)
* go.mod: use latest x/sys [#444](https://github.com/fsnotify/fsnotify/pull/444)
* Fix compilation for OpenBSD [#443](https://github.com/fsnotify/fsnotify/pull/443)
## [1.5.3] - 2022-04-22
* This version is retracted. An incorrect branch is published accidentally [#445](https://github.com/fsnotify/fsnotify/issues/445)
## [1.5.2] - 2022-04-21
* Add a feature to return the directories and files that are being monitored [#374](https://github.com/fsnotify/fsnotify/pull/374)
* Fix potential crash on windows if `raw.FileNameLength` exceeds `syscall.MAX_PATH` [#361](https://github.com/fsnotify/fsnotify/pull/361)
* Allow build on unsupported GOOS [#424](https://github.com/fsnotify/fsnotify/pull/424)
* Don't set `poller.fd` twice in `newFdPoller` [#406](https://github.com/fsnotify/fsnotify/pull/406)
* fix go vet warnings: call to `(*T).Fatalf` from a non-test goroutine [#416](https://github.com/fsnotify/fsnotify/pull/416)
## [1.5.1] - 2021-08-24 ## [1.5.1] - 2021-08-24
* Revert Add AddRaw to not follow symlinks * Revert Add AddRaw to not follow symlinks [#394](https://github.com/fsnotify/fsnotify/pull/394)
## [1.5.0] - 2021-08-20 ## [1.5.0] - 2021-08-20

View File

@@ -48,18 +48,6 @@ fsnotify uses build tags to compile different code on Linux, BSD, macOS, and Win
Before doing a pull request, please do your best to test your changes on multiple platforms, and list which platforms you were able/unable to test on. Before doing a pull request, please do your best to test your changes on multiple platforms, and list which platforms you were able/unable to test on.
To aid in cross-platform testing there is a Vagrantfile for Linux and BSD.
* Install [Vagrant](http://www.vagrantup.com/) and [VirtualBox](https://www.virtualbox.org/)
* Setup [Vagrant Gopher](https://github.com/nathany/vagrant-gopher) in your `src` folder.
* Run `vagrant up` from the project folder. You can also setup just one box with `vagrant up linux` or `vagrant up bsd` (note: the BSD box doesn't support Windows hosts at this time, and NFS may prompt for your host OS password)
* Once setup, you can run the test suite on a given OS with a single command `vagrant ssh linux -c 'cd fsnotify/fsnotify; go test'`.
* When you're done, you will want to halt or destroy the Vagrant boxes.
Notice: fsnotify file system events won't trigger in shared folders. The tests get around this limitation by using the /tmp directory.
Right now there is no equivalent solution for Windows and macOS, but there are Windows VMs [freely available from Microsoft](http://www.modern.ie/en-us/virtualization-tools#downloads).
### Maintainers ### Maintainers
Help maintaining fsnotify is welcome. To be a maintainer: Help maintaining fsnotify is welcome. To be a maintainer:
@@ -67,11 +55,6 @@ Help maintaining fsnotify is welcome. To be a maintainer:
* Submit a pull request and sign the CLA as above. * Submit a pull request and sign the CLA as above.
* You must be able to run the test suite on Mac, Windows, Linux and BSD. * You must be able to run the test suite on Mac, Windows, Linux and BSD.
To keep master clean, the fsnotify project uses the "apply mail" workflow outlined in Nathaniel Talbott's post ["Merge pull request" Considered Harmful][am]. This requires installing [hub][].
All code changes should be internal pull requests. All code changes should be internal pull requests.
Releases are tagged using [Semantic Versioning](http://semver.org/). Releases are tagged using [Semantic Versioning](http://semver.org/).
[hub]: https://github.com/github/hub
[am]: http://blog.spreedly.com/2014/06/24/merge-pull-request-considered-harmful/#.VGa5yZPF_Zs

View File

@@ -1,12 +1,8 @@
# File system notifications for Go # File system notifications for Go
[![GoDoc](https://godoc.org/github.com/fsnotify/fsnotify?status.svg)](https://godoc.org/github.com/fsnotify/fsnotify) [![Go Report Card](https://goreportcard.com/badge/github.com/fsnotify/fsnotify)](https://goreportcard.com/report/github.com/fsnotify/fsnotify) [![Go Reference](https://pkg.go.dev/badge/github.com/fsnotify/fsnotify.svg)](https://pkg.go.dev/github.com/fsnotify/fsnotify) [![Go Report Card](https://goreportcard.com/badge/github.com/fsnotify/fsnotify)](https://goreportcard.com/report/github.com/fsnotify/fsnotify) [![Maintainers Wanted](https://img.shields.io/badge/maintainers-wanted-red.svg)](https://github.com/fsnotify/fsnotify/issues/413)
fsnotify utilizes [golang.org/x/sys](https://godoc.org/golang.org/x/sys) rather than `syscall` from the standard library. Ensure you have the latest version installed by running: fsnotify utilizes [`golang.org/x/sys`](https://pkg.go.dev/golang.org/x/sys) rather than [`syscall`](https://pkg.go.dev/syscall) from the standard library.
```console
go get -u golang.org/x/sys/...
```
Cross platform: Windows, Linux, BSD and macOS. Cross platform: Windows, Linux, BSD and macOS.
@@ -16,22 +12,20 @@ Cross platform: Windows, Linux, BSD and macOS.
| kqueue | BSD, macOS, iOS\* | Supported | | kqueue | BSD, macOS, iOS\* | Supported |
| ReadDirectoryChangesW | Windows | Supported | | ReadDirectoryChangesW | Windows | Supported |
| FSEvents | macOS | [Planned](https://github.com/fsnotify/fsnotify/issues/11) | | FSEvents | macOS | [Planned](https://github.com/fsnotify/fsnotify/issues/11) |
| FEN | Solaris 11 | [In Progress](https://github.com/fsnotify/fsnotify/issues/12) | | FEN | Solaris 11 | [In Progress](https://github.com/fsnotify/fsnotify/pull/371) |
| fanotify | Linux 2.6.37+ | [Planned](https://github.com/fsnotify/fsnotify/issues/114) | | fanotify | Linux 2.6.37+ | [Maybe](https://github.com/fsnotify/fsnotify/issues/114) |
| USN Journals | Windows | [Maybe](https://github.com/fsnotify/fsnotify/issues/53) | | USN Journals | Windows | [Maybe](https://github.com/fsnotify/fsnotify/issues/53) |
| Polling | *All* | [Maybe](https://github.com/fsnotify/fsnotify/issues/9) | | Polling | *All* | [Maybe](https://github.com/fsnotify/fsnotify/issues/9) |
\* Android and iOS are untested. \* Android and iOS are untested.
Please see [the documentation](https://godoc.org/github.com/fsnotify/fsnotify) and consult the [FAQ](#faq) for usage information. Please see [the documentation](https://pkg.go.dev/github.com/fsnotify/fsnotify) and consult the [FAQ](#faq) for usage information.
## API stability ## API stability
fsnotify is a fork of [howeyc/fsnotify](https://godoc.org/github.com/howeyc/fsnotify) with a new API as of v1.0. The API is based on [this design document](http://goo.gl/MrYxyA). fsnotify is a fork of [howeyc/fsnotify](https://github.com/howeyc/fsnotify) with a new API as of v1.0. The API is based on [this design document](http://goo.gl/MrYxyA).
All [releases](https://github.com/fsnotify/fsnotify/releases) are tagged based on [Semantic Versioning](http://semver.org/). Further API changes are [planned](https://github.com/fsnotify/fsnotify/milestones), and will be tagged with a new major revision number. All [releases](https://github.com/fsnotify/fsnotify/releases) are tagged based on [Semantic Versioning](http://semver.org/).
Go 1.6 supports dependencies located in the `vendor/` folder. Unless you are creating a library, it is recommended that you copy fsnotify into `vendor/github.com/fsnotify/fsnotify` within your project, and likewise for `golang.org/x/sys`.
## Usage ## Usage
@@ -84,10 +78,6 @@ func main() {
Please refer to [CONTRIBUTING][] before opening an issue or pull request. Please refer to [CONTRIBUTING][] before opening an issue or pull request.
## Example
See [example_test.go](https://github.com/fsnotify/fsnotify/blob/master/example_test.go).
## FAQ ## FAQ
**When a file is moved to another directory is it still being watched?** **When a file is moved to another directory is it still being watched?**

View File

@@ -0,0 +1,36 @@
// Copyright 2022 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build !darwin && !dragonfly && !freebsd && !openbsd && !linux && !netbsd && !solaris && !windows
// +build !darwin,!dragonfly,!freebsd,!openbsd,!linux,!netbsd,!solaris,!windows
package fsnotify
import (
"fmt"
"runtime"
)
// Watcher watches a set of files, delivering events to a channel.
type Watcher struct{}
// NewWatcher establishes a new watcher with the underlying OS and begins waiting for events.
func NewWatcher() (*Watcher, error) {
return nil, fmt.Errorf("fsnotify not supported on %s", runtime.GOOS)
}
// Close removes all watches and closes the events channel.
func (w *Watcher) Close() error {
return nil
}
// Add starts watching the named file or directory (non-recursively).
func (w *Watcher) Add(name string) error {
return nil
}
// Remove stops watching the the named file or directory (non-recursively).
func (w *Watcher) Remove(name string) error {
return nil
}

View File

@@ -163,6 +163,19 @@ func (w *Watcher) Remove(name string) error {
return nil return nil
} }
// WatchList returns the directories and files that are being monitered.
func (w *Watcher) WatchList() []string {
w.mu.Lock()
defer w.mu.Unlock()
entries := make([]string, 0, len(w.watches))
for pathname := range w.watches {
entries = append(entries, pathname)
}
return entries
}
type watch struct { type watch struct {
wd uint32 // Watch descriptor (as returned by the inotify_add_watch() syscall) wd uint32 // Watch descriptor (as returned by the inotify_add_watch() syscall)
flags uint32 // inotify flags of this watch (see inotify(7) for the list of valid flags) flags uint32 // inotify flags of this watch (see inotify(7) for the list of valid flags)

View File

@@ -38,7 +38,6 @@ func newFdPoller(fd int) (*fdPoller, error) {
poller.close() poller.close()
} }
}() }()
poller.fd = fd
// Create epoll fd // Create epoll fd
poller.epfd, errno = unix.EpollCreate1(unix.EPOLL_CLOEXEC) poller.epfd, errno = unix.EpollCreate1(unix.EPOLL_CLOEXEC)

View File

@@ -148,6 +148,19 @@ func (w *Watcher) Remove(name string) error {
return nil return nil
} }
// WatchList returns the directories and files that are being monitered.
func (w *Watcher) WatchList() []string {
w.mu.Lock()
defer w.mu.Unlock()
entries := make([]string, 0, len(w.watches))
for pathname := range w.watches {
entries = append(entries, pathname)
}
return entries
}
// Watch all events (except NOTE_EXTEND, NOTE_LINK, NOTE_REVOKE) // Watch all events (except NOTE_EXTEND, NOTE_LINK, NOTE_REVOKE)
const noteAllEvents = unix.NOTE_DELETE | unix.NOTE_WRITE | unix.NOTE_ATTRIB | unix.NOTE_RENAME const noteAllEvents = unix.NOTE_DELETE | unix.NOTE_WRITE | unix.NOTE_ATTRIB | unix.NOTE_RENAME

View File

@@ -12,6 +12,7 @@ import (
"fmt" "fmt"
"os" "os"
"path/filepath" "path/filepath"
"reflect"
"runtime" "runtime"
"sync" "sync"
"syscall" "syscall"
@@ -96,6 +97,21 @@ func (w *Watcher) Remove(name string) error {
return <-in.reply return <-in.reply
} }
// WatchList returns the directories and files that are being monitered.
func (w *Watcher) WatchList() []string {
w.mu.Lock()
defer w.mu.Unlock()
entries := make([]string, 0, len(w.watches))
for _, entry := range w.watches {
for _, watchEntry := range entry {
entries = append(entries, watchEntry.path)
}
}
return entries
}
const ( const (
// Options for AddWatch // Options for AddWatch
sysFSONESHOT = 0x80000000 sysFSONESHOT = 0x80000000
@@ -452,8 +468,16 @@ func (w *Watcher) readEvents() {
// Point "raw" to the event in the buffer // Point "raw" to the event in the buffer
raw := (*syscall.FileNotifyInformation)(unsafe.Pointer(&watch.buf[offset])) raw := (*syscall.FileNotifyInformation)(unsafe.Pointer(&watch.buf[offset]))
buf := (*[syscall.MAX_PATH]uint16)(unsafe.Pointer(&raw.FileName)) // TODO: Consider using unsafe.Slice that is available from go1.17
name := syscall.UTF16ToString(buf[:raw.FileNameLength/2]) // https://stackoverflow.com/questions/51187973/how-to-create-an-array-or-a-slice-from-an-array-unsafe-pointer-in-golang
// instead of using a fixed syscall.MAX_PATH buf, we create a buf that is the size of the path name
size := int(raw.FileNameLength / 2)
var buf []uint16
sh := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
sh.Data = uintptr(unsafe.Pointer(&raw.FileName))
sh.Len = size
sh.Cap = size
name := syscall.UTF16ToString(buf)
fullname := filepath.Join(watch.path, name) fullname := filepath.Join(watch.path, name)
var mask uint64 var mask uint64

View File

@@ -1,6 +1,6 @@
# Markdown Parser and HTML Renderer for Go # Markdown Parser and HTML Renderer for Go
[![pkg.go.dev](https://pkg.go.dev/badge/github.com/gomarkdown/markdown)](https://pkg.go.dev/badge/github.com/gomarkdown/markdown) [![pkg.go.dev](https://pkg.go.dev/badge/github.com/gomarkdown/markdown)](https://pkg.go.dev/github.com/gomarkdown/markdown)
Package `github.com/gomarkdown/markdown` is a very fast Go library for parsing [Markdown](https://daringfireball.net/projects/markdown/) documents and rendering them to HTML. Package `github.com/gomarkdown/markdown` is a very fast Go library for parsing [Markdown](https://daringfireball.net/projects/markdown/) documents and rendering them to HTML.

View File

@@ -26,8 +26,8 @@ links or code blocks.
// a very dummy render hook that will output "code_replacements" instead of // a very dummy render hook that will output "code_replacements" instead of
// <code>${content}</code> emitted by html.Renderer // <code>${content}</code> emitted by html.Renderer
func renderHookCodeBlock(w io.Writer, node *ast.Node, entering bool) (ast.WalkStatus, bool) { func renderHookCodeBlock(w io.Writer, node ast.Node, entering bool) (ast.WalkStatus, bool) {
_, ok := node.Data.(*ast.CodeBlockData) _, ok := node.(*ast.CodeBlock)
if !ok { if !ok {
return ast.GoToNext, false return ast.GoToNext, false
} }

View File

@@ -858,12 +858,9 @@ func isFenceLine(data []byte, syntax *string, oldmarker string) (end int, marker
return 0, "" return 0, ""
} }
// TODO(shurcooL): It's probably a good idea to simplify the 2 code paths here // if just read the beginning marker, read the syntax
// into one, always get the syntax, and discard it if the caller doesn't care. if oldmarker == "" {
if syntax != nil {
syn := 0
i = skipChar(data, i, ' ') i = skipChar(data, i, ' ')
if i >= n { if i >= n {
if i == n { if i == n {
return i, marker return i, marker
@@ -871,41 +868,15 @@ func isFenceLine(data []byte, syntax *string, oldmarker string) (end int, marker
return 0, "" return 0, ""
} }
syntaxStart := i syntaxStart, syntaxLen := syntaxRange(data, &i)
if syntaxStart == 0 && syntaxLen == 0 {
if data[i] == '{' { return 0, ""
i++
syntaxStart++
for i < n && data[i] != '}' && data[i] != '\n' {
syn++
i++
}
if i >= n || data[i] != '}' {
return 0, ""
}
// strip all whitespace at the beginning and the end
// of the {} block
for syn > 0 && isSpace(data[syntaxStart]) {
syntaxStart++
syn--
}
for syn > 0 && isSpace(data[syntaxStart+syn-1]) {
syn--
}
i++
} else {
for i < n && !isSpace(data[i]) {
syn++
i++
}
} }
*syntax = string(data[syntaxStart : syntaxStart+syn]) // caller wants the syntax
if syntax != nil {
*syntax = string(data[syntaxStart : syntaxStart+syntaxLen])
}
} }
i = skipChar(data, i, ' ') i = skipChar(data, i, ' ')
@@ -918,6 +889,47 @@ func isFenceLine(data []byte, syntax *string, oldmarker string) (end int, marker
return i + 1, marker // Take newline into account. return i + 1, marker // Take newline into account.
} }
func syntaxRange(data []byte, iout *int) (int, int) {
n := len(data)
syn := 0
i := *iout
syntaxStart := i
if data[i] == '{' {
i++
syntaxStart++
for i < n && data[i] != '}' && data[i] != '\n' {
syn++
i++
}
if i >= n || data[i] != '}' {
return 0, 0
}
// strip all whitespace at the beginning and the end
// of the {} block
for syn > 0 && isSpace(data[syntaxStart]) {
syntaxStart++
syn--
}
for syn > 0 && isSpace(data[syntaxStart+syn-1]) {
syn--
}
i++
} else {
for i < n && !isSpace(data[i]) {
syn++
i++
}
}
*iout = i
return syntaxStart, syn
}
// fencedCodeBlock returns the end index if data contains a fenced code block at the beginning, // fencedCodeBlock returns the end index if data contains a fenced code block at the beginning,
// or 0 otherwise. It writes to out if doRender is true, otherwise it has no side effects. // or 0 otherwise. It writes to out if doRender is true, otherwise it has no side effects.
// If doRender is true, a final newline is mandatory to recognize the fenced code block. // If doRender is true, a final newline is mandatory to recognize the fenced code block.
@@ -1416,7 +1428,7 @@ gatherlines:
// we need to add leadingWhiteSpaces + 1 spaces in the beginning of the chunk // we need to add leadingWhiteSpaces + 1 spaces in the beginning of the chunk
if indentIndex >= 4 && p.dliPrefix(chunk) <= 0 { if indentIndex >= 4 && p.dliPrefix(chunk) <= 0 {
leadingWhiteSpaces := skipChar(chunk, 0, ' ') leadingWhiteSpaces := skipChar(chunk, 0, ' ')
chunk = data[ line+indentIndex - (leadingWhiteSpaces + 1) : i] chunk = data[line+indentIndex-(leadingWhiteSpaces+1) : i]
} }
// to be a nested list, it must be indented more // to be a nested list, it must be indented more

View File

@@ -131,7 +131,11 @@ func codeSpan(p *Parser, data []byte, offset int) (int, ast.Node) {
// find the next delimiter // find the next delimiter
i, end := 0, 0 i, end := 0, 0
hasLFBeforeDelimiter := false
for end = nb; end < len(data) && i < nb; end++ { for end = nb; end < len(data) && i < nb; end++ {
if data[end] == '\n' {
hasLFBeforeDelimiter = true
}
if data[end] == '`' { if data[end] == '`' {
i++ i++
} else { } else {
@@ -144,6 +148,18 @@ func codeSpan(p *Parser, data []byte, offset int) (int, ast.Node) {
return 0, nil return 0, nil
} }
// If there are non-space chars after the ending delimiter and before a '\n',
// flag that this is not a well formed fenced code block.
hasCharsAfterDelimiter := false
for j := end; j < len(data); j++ {
if data[j] == '\n' {
break
}
if !isSpace(data[j]) {
hasCharsAfterDelimiter = true
}
}
// trim outside whitespace // trim outside whitespace
fBegin := nb fBegin := nb
for fBegin < end && data[fBegin] == ' ' { for fBegin < end && data[fBegin] == ' ' {
@@ -155,14 +171,31 @@ func codeSpan(p *Parser, data []byte, offset int) (int, ast.Node) {
fEnd-- fEnd--
} }
// render the code span if fBegin == fEnd {
if fBegin != fEnd { return end, nil
code := &ast.Code{}
code.Literal = data[fBegin:fEnd]
return end, code
} }
return end, nil // if delimiter has 3 backticks
if nb == 3 {
i := fBegin
syntaxStart, syntaxLen := syntaxRange(data, &i)
// If we found a '\n' before the end marker and there are only spaces
// after the end marker, then this is a code block.
if hasLFBeforeDelimiter && !hasCharsAfterDelimiter {
codeblock := &ast.CodeBlock{
IsFenced: true,
Info: data[syntaxStart : syntaxStart+syntaxLen],
}
codeblock.Literal = data[i:fEnd]
return end, codeblock
}
}
// render the code span
code := &ast.Code{}
code.Literal = data[fBegin:fEnd]
return end, code
} }
// newline preceded by two spaces becomes <br> // newline preceded by two spaces becomes <br>
@@ -781,7 +814,7 @@ func entity(p *Parser, data []byte, offset int) (int, ast.Node) {
codepoint, err = strconv.ParseUint(string(ent[2:len(ent)-1]), 10, 64) codepoint, err = strconv.ParseUint(string(ent[2:len(ent)-1]), 10, 64)
} }
if err == nil { // only if conversion was valid return here. if err == nil { // only if conversion was valid return here.
return end, newTextNode([]byte(string(codepoint))) return end, newTextNode([]byte(string(rune(codepoint))))
} }
return end, newTextNode(ent) return end, newTextNode(ent)

View File

@@ -303,7 +303,8 @@ func (a *API) startPipes() (err error) {
cmd := a.runOpts.Command("chat", "notification-settings", fmt.Sprintf("-disable-typing=%v", !a.runOpts.EnableTyping)) cmd := a.runOpts.Command("chat", "notification-settings", fmt.Sprintf("-disable-typing=%v", !a.runOpts.EnableTyping))
if err = cmd.Run(); err != nil { if err = cmd.Run(); err != nil {
return fmt.Errorf("unable to set notifiation settings %v", err) // This is a performance optimization but isn't a fatal error.
a.Debug("unable to set notifiation settings %v", err)
} }
a.apiCmd = a.runOpts.Command("chat", "api") a.apiCmd = a.runOpts.Command("chat", "api")
@@ -312,7 +313,7 @@ func (a *API) startPipes() (err error) {
} }
output, err := a.apiCmd.StdoutPipe() output, err := a.apiCmd.StdoutPipe()
if err != nil { if err != nil {
return fmt.Errorf("unabel to get api stdout: %v", err) return fmt.Errorf("unable to get api stdout: %v", err)
} }
if runtime.GOOS != "windows" { if runtime.GOOS != "windows" {
a.apiCmd.ExtraFiles = []*os.File{output.(*os.File)} a.apiCmd.ExtraFiles = []*os.File{output.(*os.File)}

View File

@@ -17,6 +17,42 @@ This package provides various compression algorithms.
# changelog # changelog
* Mar 3, 2022 (v1.15.0)
* zstd: Refactor decoder by @klauspost in [#498](https://github.com/klauspost/compress/pull/498)
* zstd: Add stream encoding without goroutines by @klauspost in [#505](https://github.com/klauspost/compress/pull/505)
* huff0: Prevent single blocks exceeding 16 bits by @klauspost in[#507](https://github.com/klauspost/compress/pull/507)
* flate: Inline literal emission by @klauspost in [#509](https://github.com/klauspost/compress/pull/509)
* gzhttp: Add zstd to transport by @klauspost in [#400](https://github.com/klauspost/compress/pull/400)
* gzhttp: Make content-type optional by @klauspost in [#510](https://github.com/klauspost/compress/pull/510)
<details>
<summary>See Details</summary>
Both compression and decompression now supports "synchronous" stream operations. This means that whenever "concurrency" is set to 1, they will operate without spawning goroutines.
Stream decompression is now faster on asynchronous, since the goroutine allocation much more effectively splits the workload. On typical streams this will typically use 2 cores fully for decompression. When a stream has finished decoding no goroutines will be left over, so decoders can now safely be pooled and still be garbage collected.
While the release has been extensively tested, it is recommended to testing when upgrading.
</details>
* Feb 22, 2022 (v1.14.4)
* flate: Fix rare huffman only (-2) corruption. [#503](https://github.com/klauspost/compress/pull/503)
* zip: Update deprecated CreateHeaderRaw to correctly call CreateRaw by @saracen in [#502](https://github.com/klauspost/compress/pull/502)
* zip: don't read data descriptor early by @saracen in [#501](https://github.com/klauspost/compress/pull/501) #501
* huff0: Use static decompression buffer up to 30% faster by @klauspost in [#499](https://github.com/klauspost/compress/pull/499) [#500](https://github.com/klauspost/compress/pull/500)
* Feb 17, 2022 (v1.14.3)
* flate: Improve fastest levels compression speed ~10% more throughput. [#482](https://github.com/klauspost/compress/pull/482) [#489](https://github.com/klauspost/compress/pull/489) [#490](https://github.com/klauspost/compress/pull/490) [#491](https://github.com/klauspost/compress/pull/491) [#494](https://github.com/klauspost/compress/pull/494) [#478](https://github.com/klauspost/compress/pull/478)
* flate: Faster decompression speed, ~5-10%. [#483](https://github.com/klauspost/compress/pull/483)
* s2: Faster compression with Go v1.18 and amd64 microarch level 3+. [#484](https://github.com/klauspost/compress/pull/484) [#486](https://github.com/klauspost/compress/pull/486)
* Jan 25, 2022 (v1.14.2)
* zstd: improve header decoder by @dsnet [#476](https://github.com/klauspost/compress/pull/476)
* zstd: Add bigger default blocks [#469](https://github.com/klauspost/compress/pull/469)
* zstd: Remove unused decompression buffer [#470](https://github.com/klauspost/compress/pull/470)
* zstd: Fix logically dead code by @ningmingxiao [#472](https://github.com/klauspost/compress/pull/472)
* flate: Improve level 7-9 [#471](https://github.com/klauspost/compress/pull/471) [#473](https://github.com/klauspost/compress/pull/473)
* zstd: Add noasm tag for xxhash [#475](https://github.com/klauspost/compress/pull/475)
* Jan 11, 2022 (v1.14.1) * Jan 11, 2022 (v1.14.1)
* s2: Add stream index in [#462](https://github.com/klauspost/compress/pull/462) * s2: Add stream index in [#462](https://github.com/klauspost/compress/pull/462)
* flate: Speed and efficiency improvements in [#439](https://github.com/klauspost/compress/pull/439) [#461](https://github.com/klauspost/compress/pull/461) [#455](https://github.com/klauspost/compress/pull/455) [#452](https://github.com/klauspost/compress/pull/452) [#458](https://github.com/klauspost/compress/pull/458) * flate: Speed and efficiency improvements in [#439](https://github.com/klauspost/compress/pull/439) [#461](https://github.com/klauspost/compress/pull/461) [#455](https://github.com/klauspost/compress/pull/455) [#452](https://github.com/klauspost/compress/pull/452) [#458](https://github.com/klauspost/compress/pull/458)
@@ -53,6 +89,9 @@ This package provides various compression algorithms.
* zstd: Detect short invalid signatures [#382](https://github.com/klauspost/compress/pull/382) * zstd: Detect short invalid signatures [#382](https://github.com/klauspost/compress/pull/382)
* zstd: Spawn decoder goroutine only if needed. [#380](https://github.com/klauspost/compress/pull/380) * zstd: Spawn decoder goroutine only if needed. [#380](https://github.com/klauspost/compress/pull/380)
<details>
<summary>See changes to v1.12.x</summary>
* May 25, 2021 (v1.12.3) * May 25, 2021 (v1.12.3)
* deflate: Better/faster Huffman encoding [#374](https://github.com/klauspost/compress/pull/374) * deflate: Better/faster Huffman encoding [#374](https://github.com/klauspost/compress/pull/374)
* deflate: Allocate less for history. [#375](https://github.com/klauspost/compress/pull/375) * deflate: Allocate less for history. [#375](https://github.com/klauspost/compress/pull/375)
@@ -74,9 +113,10 @@ This package provides various compression algorithms.
* s2c/s2d/s2sx: Always truncate when writing files [#352](https://github.com/klauspost/compress/pull/352) * s2c/s2d/s2sx: Always truncate when writing files [#352](https://github.com/klauspost/compress/pull/352)
* zstd: Reduce memory usage further when using [WithLowerEncoderMem](https://pkg.go.dev/github.com/klauspost/compress/zstd#WithLowerEncoderMem) [#346](https://github.com/klauspost/compress/pull/346) * zstd: Reduce memory usage further when using [WithLowerEncoderMem](https://pkg.go.dev/github.com/klauspost/compress/zstd#WithLowerEncoderMem) [#346](https://github.com/klauspost/compress/pull/346)
* s2: Fix potential problem with amd64 assembly and profilers [#349](https://github.com/klauspost/compress/pull/349) * s2: Fix potential problem with amd64 assembly and profilers [#349](https://github.com/klauspost/compress/pull/349)
</details>
<details> <details>
<summary>See changes prior to v1.12.1</summary> <summary>See changes to v1.11.x</summary>
* Mar 26, 2021 (v1.11.13) * Mar 26, 2021 (v1.11.13)
* zstd: Big speedup on small dictionary encodes [#344](https://github.com/klauspost/compress/pull/344) [#345](https://github.com/klauspost/compress/pull/345) * zstd: Big speedup on small dictionary encodes [#344](https://github.com/klauspost/compress/pull/344) [#345](https://github.com/klauspost/compress/pull/345)
@@ -135,7 +175,7 @@ This package provides various compression algorithms.
</details> </details>
<details> <details>
<summary>See changes prior to v1.11.0</summary> <summary>See changes to v1.10.x</summary>
* July 8, 2020 (v1.10.11) * July 8, 2020 (v1.10.11)
* zstd: Fix extra block when compressing with ReadFrom. [#278](https://github.com/klauspost/compress/pull/278) * zstd: Fix extra block when compressing with ReadFrom. [#278](https://github.com/klauspost/compress/pull/278)
@@ -297,11 +337,6 @@ This package provides various compression algorithms.
# deflate usage # deflate usage
* [High Throughput Benchmark](http://blog.klauspost.com/go-gzipdeflate-benchmarks/).
* [Small Payload/Webserver Benchmarks](http://blog.klauspost.com/gzip-performance-for-go-webservers/).
* [Linear Time Compression](http://blog.klauspost.com/constant-time-gzipzip-compression/).
* [Re-balancing Deflate Compression Levels](https://blog.klauspost.com/rebalancing-deflate-compression-levels/)
The packages are drop-in replacements for standard libraries. Simply replace the import path to use them: The packages are drop-in replacements for standard libraries. Simply replace the import path to use them:
| old import | new import | Documentation | old import | new import | Documentation
@@ -323,6 +358,8 @@ Memory usage is typically 1MB for a Writer. stdlib is in the same range.
If you expect to have a lot of concurrently allocated Writers consider using If you expect to have a lot of concurrently allocated Writers consider using
the stateless compress described below. the stateless compress described below.
For compression performance, see: [this spreadsheet](https://docs.google.com/spreadsheets/d/1nuNE2nPfuINCZJRMt6wFWhKpToF95I47XjSsc-1rbPQ/edit?usp=sharing).
# Stateless compression # Stateless compression
This package offers stateless compression as a special option for gzip/deflate. This package offers stateless compression as a special option for gzip/deflate.

View File

@@ -0,0 +1,5 @@
package huff0
//go:generate go run generate.go
//go:generate asmfmt -w decompress_amd64.s
//go:generate asmfmt -w decompress_8b_amd64.s

View File

@@ -8,115 +8,10 @@ package huff0
import ( import (
"encoding/binary" "encoding/binary"
"errors" "errors"
"fmt"
"io" "io"
) )
// bitReader reads a bitstream in reverse.
// The last set bit indicates the start of the stream and is used
// for aligning the input.
type bitReader struct {
in []byte
off uint // next byte to read is at in[off - 1]
value uint64
bitsRead uint8
}
// init initializes and resets the bit reader.
func (b *bitReader) init(in []byte) error {
if len(in) < 1 {
return errors.New("corrupt stream: too short")
}
b.in = in
b.off = uint(len(in))
// The highest bit of the last byte indicates where to start
v := in[len(in)-1]
if v == 0 {
return errors.New("corrupt stream, did not find end of stream")
}
b.bitsRead = 64
b.value = 0
if len(in) >= 8 {
b.fillFastStart()
} else {
b.fill()
b.fill()
}
b.bitsRead += 8 - uint8(highBit32(uint32(v)))
return nil
}
// peekBitsFast requires that at least one bit is requested every time.
// There are no checks if the buffer is filled.
func (b *bitReader) peekBitsFast(n uint8) uint16 {
const regMask = 64 - 1
v := uint16((b.value << (b.bitsRead & regMask)) >> ((regMask + 1 - n) & regMask))
return v
}
// fillFast() will make sure at least 32 bits are available.
// There must be at least 4 bytes available.
func (b *bitReader) fillFast() {
if b.bitsRead < 32 {
return
}
// 2 bounds checks.
v := b.in[b.off-4 : b.off]
v = v[:4]
low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24)
b.value = (b.value << 32) | uint64(low)
b.bitsRead -= 32
b.off -= 4
}
func (b *bitReader) advance(n uint8) {
b.bitsRead += n
}
// fillFastStart() assumes the bitreader is empty and there is at least 8 bytes to read.
func (b *bitReader) fillFastStart() {
// Do single re-slice to avoid bounds checks.
b.value = binary.LittleEndian.Uint64(b.in[b.off-8:])
b.bitsRead = 0
b.off -= 8
}
// fill() will make sure at least 32 bits are available.
func (b *bitReader) fill() {
if b.bitsRead < 32 {
return
}
if b.off > 4 {
v := b.in[b.off-4:]
v = v[:4]
low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24)
b.value = (b.value << 32) | uint64(low)
b.bitsRead -= 32
b.off -= 4
return
}
for b.off > 0 {
b.value = (b.value << 8) | uint64(b.in[b.off-1])
b.bitsRead -= 8
b.off--
}
}
// finished returns true if all bits have been read from the bit stream.
func (b *bitReader) finished() bool {
return b.off == 0 && b.bitsRead >= 64
}
// close the bitstream and returns an error if out-of-buffer reads occurred.
func (b *bitReader) close() error {
// Release reference.
b.in = nil
if b.bitsRead > 64 {
return io.ErrUnexpectedEOF
}
return nil
}
// bitReader reads a bitstream in reverse. // bitReader reads a bitstream in reverse.
// The last set bit indicates the start of the stream and is used // The last set bit indicates the start of the stream and is used
// for aligning the input. // for aligning the input.
@@ -213,10 +108,17 @@ func (b *bitReaderBytes) finished() bool {
return b.off == 0 && b.bitsRead >= 64 return b.off == 0 && b.bitsRead >= 64
} }
func (b *bitReaderBytes) remaining() uint {
return b.off*8 + uint(64-b.bitsRead)
}
// close the bitstream and returns an error if out-of-buffer reads occurred. // close the bitstream and returns an error if out-of-buffer reads occurred.
func (b *bitReaderBytes) close() error { func (b *bitReaderBytes) close() error {
// Release reference. // Release reference.
b.in = nil b.in = nil
if b.remaining() > 0 {
return fmt.Errorf("corrupt input: %d bits remain on stream", b.remaining())
}
if b.bitsRead > 64 { if b.bitsRead > 64 {
return io.ErrUnexpectedEOF return io.ErrUnexpectedEOF
} }
@@ -263,6 +165,11 @@ func (b *bitReaderShifted) peekBitsFast(n uint8) uint16 {
return uint16(b.value >> ((64 - n) & 63)) return uint16(b.value >> ((64 - n) & 63))
} }
// peekTopBits(n) is equvialent to peekBitFast(64 - n)
func (b *bitReaderShifted) peekTopBits(n uint8) uint16 {
return uint16(b.value >> n)
}
func (b *bitReaderShifted) advance(n uint8) { func (b *bitReaderShifted) advance(n uint8) {
b.bitsRead += n b.bitsRead += n
b.value <<= n & 63 b.value <<= n & 63
@@ -318,10 +225,17 @@ func (b *bitReaderShifted) finished() bool {
return b.off == 0 && b.bitsRead >= 64 return b.off == 0 && b.bitsRead >= 64
} }
func (b *bitReaderShifted) remaining() uint {
return b.off*8 + uint(64-b.bitsRead)
}
// close the bitstream and returns an error if out-of-buffer reads occurred. // close the bitstream and returns an error if out-of-buffer reads occurred.
func (b *bitReaderShifted) close() error { func (b *bitReaderShifted) close() error {
// Release reference. // Release reference.
b.in = nil b.in = nil
if b.remaining() > 0 {
return fmt.Errorf("corrupt input: %d bits remain on stream", b.remaining())
}
if b.bitsRead > 64 { if b.bitsRead > 64 {
return io.ErrUnexpectedEOF return io.ErrUnexpectedEOF
} }

View File

@@ -2,6 +2,7 @@ package huff0
import ( import (
"fmt" "fmt"
"math"
"runtime" "runtime"
"sync" "sync"
) )
@@ -289,6 +290,10 @@ func (s *Scratch) compress4X(src []byte) ([]byte, error) {
if err != nil { if err != nil {
return nil, err return nil, err
} }
if len(s.Out)-idx > math.MaxUint16 {
// We cannot store the size in the jump table
return nil, ErrIncompressible
}
// Write compressed length as little endian before block. // Write compressed length as little endian before block.
if i < 3 { if i < 3 {
// Last length is not written. // Last length is not written.
@@ -332,6 +337,10 @@ func (s *Scratch) compress4Xp(src []byte) ([]byte, error) {
return nil, errs[i] return nil, errs[i]
} }
o := s.tmpOut[i] o := s.tmpOut[i]
if len(o) > math.MaxUint16 {
// We cannot store the size in the jump table
return nil, ErrIncompressible
}
// Write compressed length as little endian before block. // Write compressed length as little endian before block.
if i < 3 { if i < 3 {
// Last length is not written. // Last length is not written.

View File

@@ -4,6 +4,7 @@ import (
"errors" "errors"
"fmt" "fmt"
"io" "io"
"sync"
"github.com/klauspost/compress/fse" "github.com/klauspost/compress/fse"
) )
@@ -216,6 +217,7 @@ func (s *Scratch) Decoder() *Decoder {
return &Decoder{ return &Decoder{
dt: s.dt, dt: s.dt,
actualTableLog: s.actualTableLog, actualTableLog: s.actualTableLog,
bufs: &s.decPool,
} }
} }
@@ -223,6 +225,15 @@ func (s *Scratch) Decoder() *Decoder {
type Decoder struct { type Decoder struct {
dt dTable dt dTable
actualTableLog uint8 actualTableLog uint8
bufs *sync.Pool
}
func (d *Decoder) buffer() *[4][256]byte {
buf, ok := d.bufs.Get().(*[4][256]byte)
if ok {
return buf
}
return &[4][256]byte{}
} }
// Decompress1X will decompress a 1X encoded stream. // Decompress1X will decompress a 1X encoded stream.
@@ -249,7 +260,8 @@ func (d *Decoder) Decompress1X(dst, src []byte) ([]byte, error) {
dt := d.dt.single[:tlSize] dt := d.dt.single[:tlSize]
// Use temp table to avoid bound checks/append penalty. // Use temp table to avoid bound checks/append penalty.
var buf [256]byte bufs := d.buffer()
buf := &bufs[0]
var off uint8 var off uint8
for br.off >= 8 { for br.off >= 8 {
@@ -277,6 +289,7 @@ func (d *Decoder) Decompress1X(dst, src []byte) ([]byte, error) {
if off == 0 { if off == 0 {
if len(dst)+256 > maxDecodedSize { if len(dst)+256 > maxDecodedSize {
br.close() br.close()
d.bufs.Put(bufs)
return nil, ErrMaxDecodedSizeExceeded return nil, ErrMaxDecodedSizeExceeded
} }
dst = append(dst, buf[:]...) dst = append(dst, buf[:]...)
@@ -284,6 +297,7 @@ func (d *Decoder) Decompress1X(dst, src []byte) ([]byte, error) {
} }
if len(dst)+int(off) > maxDecodedSize { if len(dst)+int(off) > maxDecodedSize {
d.bufs.Put(bufs)
br.close() br.close()
return nil, ErrMaxDecodedSizeExceeded return nil, ErrMaxDecodedSizeExceeded
} }
@@ -310,6 +324,7 @@ func (d *Decoder) Decompress1X(dst, src []byte) ([]byte, error) {
} }
} }
if len(dst) >= maxDecodedSize { if len(dst) >= maxDecodedSize {
d.bufs.Put(bufs)
br.close() br.close()
return nil, ErrMaxDecodedSizeExceeded return nil, ErrMaxDecodedSizeExceeded
} }
@@ -319,6 +334,7 @@ func (d *Decoder) Decompress1X(dst, src []byte) ([]byte, error) {
bitsLeft -= nBits bitsLeft -= nBits
dst = append(dst, uint8(v.entry>>8)) dst = append(dst, uint8(v.entry>>8))
} }
d.bufs.Put(bufs)
return dst, br.close() return dst, br.close()
} }
@@ -341,7 +357,8 @@ func (d *Decoder) decompress1X8Bit(dst, src []byte) ([]byte, error) {
dt := d.dt.single[:256] dt := d.dt.single[:256]
// Use temp table to avoid bound checks/append penalty. // Use temp table to avoid bound checks/append penalty.
var buf [256]byte bufs := d.buffer()
buf := &bufs[0]
var off uint8 var off uint8
switch d.actualTableLog { switch d.actualTableLog {
@@ -369,6 +386,7 @@ func (d *Decoder) decompress1X8Bit(dst, src []byte) ([]byte, error) {
if off == 0 { if off == 0 {
if len(dst)+256 > maxDecodedSize { if len(dst)+256 > maxDecodedSize {
br.close() br.close()
d.bufs.Put(bufs)
return nil, ErrMaxDecodedSizeExceeded return nil, ErrMaxDecodedSizeExceeded
} }
dst = append(dst, buf[:]...) dst = append(dst, buf[:]...)
@@ -398,6 +416,7 @@ func (d *Decoder) decompress1X8Bit(dst, src []byte) ([]byte, error) {
if off == 0 { if off == 0 {
if len(dst)+256 > maxDecodedSize { if len(dst)+256 > maxDecodedSize {
br.close() br.close()
d.bufs.Put(bufs)
return nil, ErrMaxDecodedSizeExceeded return nil, ErrMaxDecodedSizeExceeded
} }
dst = append(dst, buf[:]...) dst = append(dst, buf[:]...)
@@ -426,6 +445,7 @@ func (d *Decoder) decompress1X8Bit(dst, src []byte) ([]byte, error) {
off += 4 off += 4
if off == 0 { if off == 0 {
if len(dst)+256 > maxDecodedSize { if len(dst)+256 > maxDecodedSize {
d.bufs.Put(bufs)
br.close() br.close()
return nil, ErrMaxDecodedSizeExceeded return nil, ErrMaxDecodedSizeExceeded
} }
@@ -455,6 +475,7 @@ func (d *Decoder) decompress1X8Bit(dst, src []byte) ([]byte, error) {
off += 4 off += 4
if off == 0 { if off == 0 {
if len(dst)+256 > maxDecodedSize { if len(dst)+256 > maxDecodedSize {
d.bufs.Put(bufs)
br.close() br.close()
return nil, ErrMaxDecodedSizeExceeded return nil, ErrMaxDecodedSizeExceeded
} }
@@ -484,6 +505,7 @@ func (d *Decoder) decompress1X8Bit(dst, src []byte) ([]byte, error) {
off += 4 off += 4
if off == 0 { if off == 0 {
if len(dst)+256 > maxDecodedSize { if len(dst)+256 > maxDecodedSize {
d.bufs.Put(bufs)
br.close() br.close()
return nil, ErrMaxDecodedSizeExceeded return nil, ErrMaxDecodedSizeExceeded
} }
@@ -513,6 +535,7 @@ func (d *Decoder) decompress1X8Bit(dst, src []byte) ([]byte, error) {
off += 4 off += 4
if off == 0 { if off == 0 {
if len(dst)+256 > maxDecodedSize { if len(dst)+256 > maxDecodedSize {
d.bufs.Put(bufs)
br.close() br.close()
return nil, ErrMaxDecodedSizeExceeded return nil, ErrMaxDecodedSizeExceeded
} }
@@ -542,6 +565,7 @@ func (d *Decoder) decompress1X8Bit(dst, src []byte) ([]byte, error) {
off += 4 off += 4
if off == 0 { if off == 0 {
if len(dst)+256 > maxDecodedSize { if len(dst)+256 > maxDecodedSize {
d.bufs.Put(bufs)
br.close() br.close()
return nil, ErrMaxDecodedSizeExceeded return nil, ErrMaxDecodedSizeExceeded
} }
@@ -571,6 +595,7 @@ func (d *Decoder) decompress1X8Bit(dst, src []byte) ([]byte, error) {
off += 4 off += 4
if off == 0 { if off == 0 {
if len(dst)+256 > maxDecodedSize { if len(dst)+256 > maxDecodedSize {
d.bufs.Put(bufs)
br.close() br.close()
return nil, ErrMaxDecodedSizeExceeded return nil, ErrMaxDecodedSizeExceeded
} }
@@ -578,10 +603,12 @@ func (d *Decoder) decompress1X8Bit(dst, src []byte) ([]byte, error) {
} }
} }
default: default:
d.bufs.Put(bufs)
return nil, fmt.Errorf("invalid tablelog: %d", d.actualTableLog) return nil, fmt.Errorf("invalid tablelog: %d", d.actualTableLog)
} }
if len(dst)+int(off) > maxDecodedSize { if len(dst)+int(off) > maxDecodedSize {
d.bufs.Put(bufs)
br.close() br.close()
return nil, ErrMaxDecodedSizeExceeded return nil, ErrMaxDecodedSizeExceeded
} }
@@ -601,6 +628,7 @@ func (d *Decoder) decompress1X8Bit(dst, src []byte) ([]byte, error) {
} }
if len(dst) >= maxDecodedSize { if len(dst) >= maxDecodedSize {
br.close() br.close()
d.bufs.Put(bufs)
return nil, ErrMaxDecodedSizeExceeded return nil, ErrMaxDecodedSizeExceeded
} }
v := dt[br.peekByteFast()>>shift] v := dt[br.peekByteFast()>>shift]
@@ -609,6 +637,7 @@ func (d *Decoder) decompress1X8Bit(dst, src []byte) ([]byte, error) {
bitsLeft -= int8(nBits) bitsLeft -= int8(nBits)
dst = append(dst, uint8(v.entry>>8)) dst = append(dst, uint8(v.entry>>8))
} }
d.bufs.Put(bufs)
return dst, br.close() return dst, br.close()
} }
@@ -628,7 +657,8 @@ func (d *Decoder) decompress1X8BitExactly(dst, src []byte) ([]byte, error) {
dt := d.dt.single[:256] dt := d.dt.single[:256]
// Use temp table to avoid bound checks/append penalty. // Use temp table to avoid bound checks/append penalty.
var buf [256]byte bufs := d.buffer()
buf := &bufs[0]
var off uint8 var off uint8
const shift = 56 const shift = 56
@@ -655,6 +685,7 @@ func (d *Decoder) decompress1X8BitExactly(dst, src []byte) ([]byte, error) {
off += 4 off += 4
if off == 0 { if off == 0 {
if len(dst)+256 > maxDecodedSize { if len(dst)+256 > maxDecodedSize {
d.bufs.Put(bufs)
br.close() br.close()
return nil, ErrMaxDecodedSizeExceeded return nil, ErrMaxDecodedSizeExceeded
} }
@@ -663,6 +694,7 @@ func (d *Decoder) decompress1X8BitExactly(dst, src []byte) ([]byte, error) {
} }
if len(dst)+int(off) > maxDecodedSize { if len(dst)+int(off) > maxDecodedSize {
d.bufs.Put(bufs)
br.close() br.close()
return nil, ErrMaxDecodedSizeExceeded return nil, ErrMaxDecodedSizeExceeded
} }
@@ -679,6 +711,7 @@ func (d *Decoder) decompress1X8BitExactly(dst, src []byte) ([]byte, error) {
} }
} }
if len(dst) >= maxDecodedSize { if len(dst) >= maxDecodedSize {
d.bufs.Put(bufs)
br.close() br.close()
return nil, ErrMaxDecodedSizeExceeded return nil, ErrMaxDecodedSizeExceeded
} }
@@ -688,195 +721,10 @@ func (d *Decoder) decompress1X8BitExactly(dst, src []byte) ([]byte, error) {
bitsLeft -= int8(nBits) bitsLeft -= int8(nBits)
dst = append(dst, uint8(v.entry>>8)) dst = append(dst, uint8(v.entry>>8))
} }
d.bufs.Put(bufs)
return dst, br.close() return dst, br.close()
} }
// Decompress4X will decompress a 4X encoded stream.
// The length of the supplied input must match the end of a block exactly.
// The *capacity* of the dst slice must match the destination size of
// the uncompressed data exactly.
func (d *Decoder) Decompress4X(dst, src []byte) ([]byte, error) {
if len(d.dt.single) == 0 {
return nil, errors.New("no table loaded")
}
if len(src) < 6+(4*1) {
return nil, errors.New("input too small")
}
if use8BitTables && d.actualTableLog <= 8 {
return d.decompress4X8bit(dst, src)
}
var br [4]bitReaderShifted
start := 6
for i := 0; i < 3; i++ {
length := int(src[i*2]) | (int(src[i*2+1]) << 8)
if start+length >= len(src) {
return nil, errors.New("truncated input (or invalid offset)")
}
err := br[i].init(src[start : start+length])
if err != nil {
return nil, err
}
start += length
}
err := br[3].init(src[start:])
if err != nil {
return nil, err
}
// destination, offset to match first output
dstSize := cap(dst)
dst = dst[:dstSize]
out := dst
dstEvery := (dstSize + 3) / 4
const tlSize = 1 << tableLogMax
const tlMask = tlSize - 1
single := d.dt.single[:tlSize]
// Use temp table to avoid bound checks/append penalty.
var buf [256]byte
var off uint8
var decoded int
// Decode 2 values from each decoder/loop.
const bufoff = 256 / 4
for {
if br[0].off < 4 || br[1].off < 4 || br[2].off < 4 || br[3].off < 4 {
break
}
{
const stream = 0
const stream2 = 1
br[stream].fillFast()
br[stream2].fillFast()
val := br[stream].peekBitsFast(d.actualTableLog)
val2 := br[stream2].peekBitsFast(d.actualTableLog)
v := single[val&tlMask]
v2 := single[val2&tlMask]
br[stream].advance(uint8(v.entry))
br[stream2].advance(uint8(v2.entry))
buf[off+bufoff*stream] = uint8(v.entry >> 8)
buf[off+bufoff*stream2] = uint8(v2.entry >> 8)
val = br[stream].peekBitsFast(d.actualTableLog)
val2 = br[stream2].peekBitsFast(d.actualTableLog)
v = single[val&tlMask]
v2 = single[val2&tlMask]
br[stream].advance(uint8(v.entry))
br[stream2].advance(uint8(v2.entry))
buf[off+bufoff*stream+1] = uint8(v.entry >> 8)
buf[off+bufoff*stream2+1] = uint8(v2.entry >> 8)
}
{
const stream = 2
const stream2 = 3
br[stream].fillFast()
br[stream2].fillFast()
val := br[stream].peekBitsFast(d.actualTableLog)
val2 := br[stream2].peekBitsFast(d.actualTableLog)
v := single[val&tlMask]
v2 := single[val2&tlMask]
br[stream].advance(uint8(v.entry))
br[stream2].advance(uint8(v2.entry))
buf[off+bufoff*stream] = uint8(v.entry >> 8)
buf[off+bufoff*stream2] = uint8(v2.entry >> 8)
val = br[stream].peekBitsFast(d.actualTableLog)
val2 = br[stream2].peekBitsFast(d.actualTableLog)
v = single[val&tlMask]
v2 = single[val2&tlMask]
br[stream].advance(uint8(v.entry))
br[stream2].advance(uint8(v2.entry))
buf[off+bufoff*stream+1] = uint8(v.entry >> 8)
buf[off+bufoff*stream2+1] = uint8(v2.entry >> 8)
}
off += 2
if off == bufoff {
if bufoff > dstEvery {
return nil, errors.New("corruption detected: stream overrun 1")
}
copy(out, buf[:bufoff])
copy(out[dstEvery:], buf[bufoff:bufoff*2])
copy(out[dstEvery*2:], buf[bufoff*2:bufoff*3])
copy(out[dstEvery*3:], buf[bufoff*3:bufoff*4])
off = 0
out = out[bufoff:]
decoded += 256
// There must at least be 3 buffers left.
if len(out) < dstEvery*3 {
return nil, errors.New("corruption detected: stream overrun 2")
}
}
}
if off > 0 {
ioff := int(off)
if len(out) < dstEvery*3+ioff {
return nil, errors.New("corruption detected: stream overrun 3")
}
copy(out, buf[:off])
copy(out[dstEvery:dstEvery+ioff], buf[bufoff:bufoff*2])
copy(out[dstEvery*2:dstEvery*2+ioff], buf[bufoff*2:bufoff*3])
copy(out[dstEvery*3:dstEvery*3+ioff], buf[bufoff*3:bufoff*4])
decoded += int(off) * 4
out = out[off:]
}
// Decode remaining.
for i := range br {
offset := dstEvery * i
br := &br[i]
bitsLeft := br.off*8 + uint(64-br.bitsRead)
for bitsLeft > 0 {
br.fill()
if false && br.bitsRead >= 32 {
if br.off >= 4 {
v := br.in[br.off-4:]
v = v[:4]
low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24)
br.value = (br.value << 32) | uint64(low)
br.bitsRead -= 32
br.off -= 4
} else {
for br.off > 0 {
br.value = (br.value << 8) | uint64(br.in[br.off-1])
br.bitsRead -= 8
br.off--
}
}
}
// end inline...
if offset >= len(out) {
return nil, errors.New("corruption detected: stream overrun 4")
}
// Read value and increment offset.
val := br.peekBitsFast(d.actualTableLog)
v := single[val&tlMask].entry
nBits := uint8(v)
br.advance(nBits)
bitsLeft -= uint(nBits)
out[offset] = uint8(v >> 8)
offset++
}
decoded += offset - dstEvery*i
err = br.close()
if err != nil {
return nil, err
}
}
if dstSize != decoded {
return nil, errors.New("corruption detected: short output block")
}
return dst, nil
}
// Decompress4X will decompress a 4X encoded stream. // Decompress4X will decompress a 4X encoded stream.
// The length of the supplied input must match the end of a block exactly. // The length of the supplied input must match the end of a block exactly.
// The *capacity* of the dst slice must match the destination size of // The *capacity* of the dst slice must match the destination size of
@@ -916,12 +764,12 @@ func (d *Decoder) decompress4X8bit(dst, src []byte) ([]byte, error) {
single := d.dt.single[:tlSize] single := d.dt.single[:tlSize]
// Use temp table to avoid bound checks/append penalty. // Use temp table to avoid bound checks/append penalty.
var buf [256]byte buf := d.buffer()
var off uint8 var off uint8
var decoded int var decoded int
// Decode 4 values from each decoder/loop. // Decode 4 values from each decoder/loop.
const bufoff = 256 / 4 const bufoff = 256
for { for {
if br[0].off < 4 || br[1].off < 4 || br[2].off < 4 || br[3].off < 4 { if br[0].off < 4 || br[1].off < 4 || br[2].off < 4 || br[3].off < 4 {
break break
@@ -942,8 +790,8 @@ func (d *Decoder) decompress4X8bit(dst, src []byte) ([]byte, error) {
br1.value <<= v & 63 br1.value <<= v & 63
br2.bitsRead += uint8(v2) br2.bitsRead += uint8(v2)
br2.value <<= v2 & 63 br2.value <<= v2 & 63
buf[off+bufoff*stream] = uint8(v >> 8) buf[stream][off] = uint8(v >> 8)
buf[off+bufoff*stream2] = uint8(v2 >> 8) buf[stream2][off] = uint8(v2 >> 8)
v = single[uint8(br1.value>>shift)].entry v = single[uint8(br1.value>>shift)].entry
v2 = single[uint8(br2.value>>shift)].entry v2 = single[uint8(br2.value>>shift)].entry
@@ -951,8 +799,8 @@ func (d *Decoder) decompress4X8bit(dst, src []byte) ([]byte, error) {
br1.value <<= v & 63 br1.value <<= v & 63
br2.bitsRead += uint8(v2) br2.bitsRead += uint8(v2)
br2.value <<= v2 & 63 br2.value <<= v2 & 63
buf[off+bufoff*stream+1] = uint8(v >> 8) buf[stream][off+1] = uint8(v >> 8)
buf[off+bufoff*stream2+1] = uint8(v2 >> 8) buf[stream2][off+1] = uint8(v2 >> 8)
v = single[uint8(br1.value>>shift)].entry v = single[uint8(br1.value>>shift)].entry
v2 = single[uint8(br2.value>>shift)].entry v2 = single[uint8(br2.value>>shift)].entry
@@ -960,8 +808,8 @@ func (d *Decoder) decompress4X8bit(dst, src []byte) ([]byte, error) {
br1.value <<= v & 63 br1.value <<= v & 63
br2.bitsRead += uint8(v2) br2.bitsRead += uint8(v2)
br2.value <<= v2 & 63 br2.value <<= v2 & 63
buf[off+bufoff*stream+2] = uint8(v >> 8) buf[stream][off+2] = uint8(v >> 8)
buf[off+bufoff*stream2+2] = uint8(v2 >> 8) buf[stream2][off+2] = uint8(v2 >> 8)
v = single[uint8(br1.value>>shift)].entry v = single[uint8(br1.value>>shift)].entry
v2 = single[uint8(br2.value>>shift)].entry v2 = single[uint8(br2.value>>shift)].entry
@@ -969,8 +817,8 @@ func (d *Decoder) decompress4X8bit(dst, src []byte) ([]byte, error) {
br1.value <<= v & 63 br1.value <<= v & 63
br2.bitsRead += uint8(v2) br2.bitsRead += uint8(v2)
br2.value <<= v2 & 63 br2.value <<= v2 & 63
buf[off+bufoff*stream2+3] = uint8(v2 >> 8) buf[stream][off+3] = uint8(v >> 8)
buf[off+bufoff*stream+3] = uint8(v >> 8) buf[stream2][off+3] = uint8(v2 >> 8)
} }
{ {
@@ -987,8 +835,8 @@ func (d *Decoder) decompress4X8bit(dst, src []byte) ([]byte, error) {
br1.value <<= v & 63 br1.value <<= v & 63
br2.bitsRead += uint8(v2) br2.bitsRead += uint8(v2)
br2.value <<= v2 & 63 br2.value <<= v2 & 63
buf[off+bufoff*stream] = uint8(v >> 8) buf[stream][off] = uint8(v >> 8)
buf[off+bufoff*stream2] = uint8(v2 >> 8) buf[stream2][off] = uint8(v2 >> 8)
v = single[uint8(br1.value>>shift)].entry v = single[uint8(br1.value>>shift)].entry
v2 = single[uint8(br2.value>>shift)].entry v2 = single[uint8(br2.value>>shift)].entry
@@ -996,8 +844,8 @@ func (d *Decoder) decompress4X8bit(dst, src []byte) ([]byte, error) {
br1.value <<= v & 63 br1.value <<= v & 63
br2.bitsRead += uint8(v2) br2.bitsRead += uint8(v2)
br2.value <<= v2 & 63 br2.value <<= v2 & 63
buf[off+bufoff*stream+1] = uint8(v >> 8) buf[stream][off+1] = uint8(v >> 8)
buf[off+bufoff*stream2+1] = uint8(v2 >> 8) buf[stream2][off+1] = uint8(v2 >> 8)
v = single[uint8(br1.value>>shift)].entry v = single[uint8(br1.value>>shift)].entry
v2 = single[uint8(br2.value>>shift)].entry v2 = single[uint8(br2.value>>shift)].entry
@@ -1005,8 +853,8 @@ func (d *Decoder) decompress4X8bit(dst, src []byte) ([]byte, error) {
br1.value <<= v & 63 br1.value <<= v & 63
br2.bitsRead += uint8(v2) br2.bitsRead += uint8(v2)
br2.value <<= v2 & 63 br2.value <<= v2 & 63
buf[off+bufoff*stream+2] = uint8(v >> 8) buf[stream][off+2] = uint8(v >> 8)
buf[off+bufoff*stream2+2] = uint8(v2 >> 8) buf[stream2][off+2] = uint8(v2 >> 8)
v = single[uint8(br1.value>>shift)].entry v = single[uint8(br1.value>>shift)].entry
v2 = single[uint8(br2.value>>shift)].entry v2 = single[uint8(br2.value>>shift)].entry
@@ -1014,25 +862,26 @@ func (d *Decoder) decompress4X8bit(dst, src []byte) ([]byte, error) {
br1.value <<= v & 63 br1.value <<= v & 63
br2.bitsRead += uint8(v2) br2.bitsRead += uint8(v2)
br2.value <<= v2 & 63 br2.value <<= v2 & 63
buf[off+bufoff*stream2+3] = uint8(v2 >> 8) buf[stream][off+3] = uint8(v >> 8)
buf[off+bufoff*stream+3] = uint8(v >> 8) buf[stream2][off+3] = uint8(v2 >> 8)
} }
off += 4 off += 4
if off == bufoff { if off == 0 {
if bufoff > dstEvery { if bufoff > dstEvery {
d.bufs.Put(buf)
return nil, errors.New("corruption detected: stream overrun 1") return nil, errors.New("corruption detected: stream overrun 1")
} }
copy(out, buf[:bufoff]) copy(out, buf[0][:])
copy(out[dstEvery:], buf[bufoff:bufoff*2]) copy(out[dstEvery:], buf[1][:])
copy(out[dstEvery*2:], buf[bufoff*2:bufoff*3]) copy(out[dstEvery*2:], buf[2][:])
copy(out[dstEvery*3:], buf[bufoff*3:bufoff*4]) copy(out[dstEvery*3:], buf[3][:])
off = 0
out = out[bufoff:] out = out[bufoff:]
decoded += 256 decoded += bufoff * 4
// There must at least be 3 buffers left. // There must at least be 3 buffers left.
if len(out) < dstEvery*3 { if len(out) < dstEvery*3 {
d.bufs.Put(buf)
return nil, errors.New("corruption detected: stream overrun 2") return nil, errors.New("corruption detected: stream overrun 2")
} }
} }
@@ -1040,23 +889,31 @@ func (d *Decoder) decompress4X8bit(dst, src []byte) ([]byte, error) {
if off > 0 { if off > 0 {
ioff := int(off) ioff := int(off)
if len(out) < dstEvery*3+ioff { if len(out) < dstEvery*3+ioff {
d.bufs.Put(buf)
return nil, errors.New("corruption detected: stream overrun 3") return nil, errors.New("corruption detected: stream overrun 3")
} }
copy(out, buf[:off]) copy(out, buf[0][:off])
copy(out[dstEvery:dstEvery+ioff], buf[bufoff:bufoff*2]) copy(out[dstEvery:], buf[1][:off])
copy(out[dstEvery*2:dstEvery*2+ioff], buf[bufoff*2:bufoff*3]) copy(out[dstEvery*2:], buf[2][:off])
copy(out[dstEvery*3:dstEvery*3+ioff], buf[bufoff*3:bufoff*4]) copy(out[dstEvery*3:], buf[3][:off])
decoded += int(off) * 4 decoded += int(off) * 4
out = out[off:] out = out[off:]
} }
// Decode remaining. // Decode remaining.
// Decode remaining.
remainBytes := dstEvery - (decoded / 4)
for i := range br { for i := range br {
offset := dstEvery * i offset := dstEvery * i
endsAt := offset + remainBytes
if endsAt > len(out) {
endsAt = len(out)
}
br := &br[i] br := &br[i]
bitsLeft := int(br.off*8) + int(64-br.bitsRead) bitsLeft := br.remaining()
for bitsLeft > 0 { for bitsLeft > 0 {
if br.finished() { if br.finished() {
d.bufs.Put(buf)
return nil, io.ErrUnexpectedEOF return nil, io.ErrUnexpectedEOF
} }
if br.bitsRead >= 56 { if br.bitsRead >= 56 {
@@ -1076,7 +933,8 @@ func (d *Decoder) decompress4X8bit(dst, src []byte) ([]byte, error) {
} }
} }
// end inline... // end inline...
if offset >= len(out) { if offset >= endsAt {
d.bufs.Put(buf)
return nil, errors.New("corruption detected: stream overrun 4") return nil, errors.New("corruption detected: stream overrun 4")
} }
@@ -1084,16 +942,22 @@ func (d *Decoder) decompress4X8bit(dst, src []byte) ([]byte, error) {
v := single[uint8(br.value>>shift)].entry v := single[uint8(br.value>>shift)].entry
nBits := uint8(v) nBits := uint8(v)
br.advance(nBits) br.advance(nBits)
bitsLeft -= int(nBits) bitsLeft -= uint(nBits)
out[offset] = uint8(v >> 8) out[offset] = uint8(v >> 8)
offset++ offset++
} }
if offset != endsAt {
d.bufs.Put(buf)
return nil, fmt.Errorf("corruption detected: short output block %d, end %d != %d", i, offset, endsAt)
}
decoded += offset - dstEvery*i decoded += offset - dstEvery*i
err = br.close() err = br.close()
if err != nil { if err != nil {
d.bufs.Put(buf)
return nil, err return nil, err
} }
} }
d.bufs.Put(buf)
if dstSize != decoded { if dstSize != decoded {
return nil, errors.New("corruption detected: short output block") return nil, errors.New("corruption detected: short output block")
} }
@@ -1135,12 +999,12 @@ func (d *Decoder) decompress4X8bitExactly(dst, src []byte) ([]byte, error) {
single := d.dt.single[:tlSize] single := d.dt.single[:tlSize]
// Use temp table to avoid bound checks/append penalty. // Use temp table to avoid bound checks/append penalty.
var buf [256]byte buf := d.buffer()
var off uint8 var off uint8
var decoded int var decoded int
// Decode 4 values from each decoder/loop. // Decode 4 values from each decoder/loop.
const bufoff = 256 / 4 const bufoff = 256
for { for {
if br[0].off < 4 || br[1].off < 4 || br[2].off < 4 || br[3].off < 4 { if br[0].off < 4 || br[1].off < 4 || br[2].off < 4 || br[3].off < 4 {
break break
@@ -1150,104 +1014,109 @@ func (d *Decoder) decompress4X8bitExactly(dst, src []byte) ([]byte, error) {
// Interleave 2 decodes. // Interleave 2 decodes.
const stream = 0 const stream = 0
const stream2 = 1 const stream2 = 1
br[stream].fillFast() br1 := &br[stream]
br[stream2].fillFast() br2 := &br[stream2]
br1.fillFast()
br2.fillFast()
v := single[uint8(br[stream].value>>shift)].entry v := single[uint8(br1.value>>shift)].entry
v2 := single[uint8(br[stream2].value>>shift)].entry v2 := single[uint8(br2.value>>shift)].entry
br[stream].bitsRead += uint8(v) br1.bitsRead += uint8(v)
br[stream].value <<= v & 63 br1.value <<= v & 63
br[stream2].bitsRead += uint8(v2) br2.bitsRead += uint8(v2)
br[stream2].value <<= v2 & 63 br2.value <<= v2 & 63
buf[off+bufoff*stream] = uint8(v >> 8) buf[stream][off] = uint8(v >> 8)
buf[off+bufoff*stream2] = uint8(v2 >> 8) buf[stream2][off] = uint8(v2 >> 8)
v = single[uint8(br[stream].value>>shift)].entry v = single[uint8(br1.value>>shift)].entry
v2 = single[uint8(br[stream2].value>>shift)].entry v2 = single[uint8(br2.value>>shift)].entry
br[stream].bitsRead += uint8(v) br1.bitsRead += uint8(v)
br[stream].value <<= v & 63 br1.value <<= v & 63
br[stream2].bitsRead += uint8(v2) br2.bitsRead += uint8(v2)
br[stream2].value <<= v2 & 63 br2.value <<= v2 & 63
buf[off+bufoff*stream+1] = uint8(v >> 8) buf[stream][off+1] = uint8(v >> 8)
buf[off+bufoff*stream2+1] = uint8(v2 >> 8) buf[stream2][off+1] = uint8(v2 >> 8)
v = single[uint8(br[stream].value>>shift)].entry v = single[uint8(br1.value>>shift)].entry
v2 = single[uint8(br[stream2].value>>shift)].entry v2 = single[uint8(br2.value>>shift)].entry
br[stream].bitsRead += uint8(v) br1.bitsRead += uint8(v)
br[stream].value <<= v & 63 br1.value <<= v & 63
br[stream2].bitsRead += uint8(v2) br2.bitsRead += uint8(v2)
br[stream2].value <<= v2 & 63 br2.value <<= v2 & 63
buf[off+bufoff*stream+2] = uint8(v >> 8) buf[stream][off+2] = uint8(v >> 8)
buf[off+bufoff*stream2+2] = uint8(v2 >> 8) buf[stream2][off+2] = uint8(v2 >> 8)
v = single[uint8(br[stream].value>>shift)].entry v = single[uint8(br1.value>>shift)].entry
v2 = single[uint8(br[stream2].value>>shift)].entry v2 = single[uint8(br2.value>>shift)].entry
br[stream].bitsRead += uint8(v) br1.bitsRead += uint8(v)
br[stream].value <<= v & 63 br1.value <<= v & 63
br[stream2].bitsRead += uint8(v2) br2.bitsRead += uint8(v2)
br[stream2].value <<= v2 & 63 br2.value <<= v2 & 63
buf[off+bufoff*stream+3] = uint8(v >> 8) buf[stream][off+3] = uint8(v >> 8)
buf[off+bufoff*stream2+3] = uint8(v2 >> 8) buf[stream2][off+3] = uint8(v2 >> 8)
} }
{ {
const stream = 2 const stream = 2
const stream2 = 3 const stream2 = 3
br[stream].fillFast() br1 := &br[stream]
br[stream2].fillFast() br2 := &br[stream2]
br1.fillFast()
br2.fillFast()
v := single[uint8(br[stream].value>>shift)].entry v := single[uint8(br1.value>>shift)].entry
v2 := single[uint8(br[stream2].value>>shift)].entry v2 := single[uint8(br2.value>>shift)].entry
br[stream].bitsRead += uint8(v) br1.bitsRead += uint8(v)
br[stream].value <<= v & 63 br1.value <<= v & 63
br[stream2].bitsRead += uint8(v2) br2.bitsRead += uint8(v2)
br[stream2].value <<= v2 & 63 br2.value <<= v2 & 63
buf[off+bufoff*stream] = uint8(v >> 8) buf[stream][off] = uint8(v >> 8)
buf[off+bufoff*stream2] = uint8(v2 >> 8) buf[stream2][off] = uint8(v2 >> 8)
v = single[uint8(br[stream].value>>shift)].entry v = single[uint8(br1.value>>shift)].entry
v2 = single[uint8(br[stream2].value>>shift)].entry v2 = single[uint8(br2.value>>shift)].entry
br[stream].bitsRead += uint8(v) br1.bitsRead += uint8(v)
br[stream].value <<= v & 63 br1.value <<= v & 63
br[stream2].bitsRead += uint8(v2) br2.bitsRead += uint8(v2)
br[stream2].value <<= v2 & 63 br2.value <<= v2 & 63
buf[off+bufoff*stream+1] = uint8(v >> 8) buf[stream][off+1] = uint8(v >> 8)
buf[off+bufoff*stream2+1] = uint8(v2 >> 8) buf[stream2][off+1] = uint8(v2 >> 8)
v = single[uint8(br[stream].value>>shift)].entry v = single[uint8(br1.value>>shift)].entry
v2 = single[uint8(br[stream2].value>>shift)].entry v2 = single[uint8(br2.value>>shift)].entry
br[stream].bitsRead += uint8(v) br1.bitsRead += uint8(v)
br[stream].value <<= v & 63 br1.value <<= v & 63
br[stream2].bitsRead += uint8(v2) br2.bitsRead += uint8(v2)
br[stream2].value <<= v2 & 63 br2.value <<= v2 & 63
buf[off+bufoff*stream+2] = uint8(v >> 8) buf[stream][off+2] = uint8(v >> 8)
buf[off+bufoff*stream2+2] = uint8(v2 >> 8) buf[stream2][off+2] = uint8(v2 >> 8)
v = single[uint8(br[stream].value>>shift)].entry v = single[uint8(br1.value>>shift)].entry
v2 = single[uint8(br[stream2].value>>shift)].entry v2 = single[uint8(br2.value>>shift)].entry
br[stream].bitsRead += uint8(v) br1.bitsRead += uint8(v)
br[stream].value <<= v & 63 br1.value <<= v & 63
br[stream2].bitsRead += uint8(v2) br2.bitsRead += uint8(v2)
br[stream2].value <<= v2 & 63 br2.value <<= v2 & 63
buf[off+bufoff*stream+3] = uint8(v >> 8) buf[stream][off+3] = uint8(v >> 8)
buf[off+bufoff*stream2+3] = uint8(v2 >> 8) buf[stream2][off+3] = uint8(v2 >> 8)
} }
off += 4 off += 4
if off == bufoff { if off == 0 {
if bufoff > dstEvery { if bufoff > dstEvery {
d.bufs.Put(buf)
return nil, errors.New("corruption detected: stream overrun 1") return nil, errors.New("corruption detected: stream overrun 1")
} }
copy(out, buf[:bufoff]) copy(out, buf[0][:])
copy(out[dstEvery:], buf[bufoff:bufoff*2]) copy(out[dstEvery:], buf[1][:])
copy(out[dstEvery*2:], buf[bufoff*2:bufoff*3]) copy(out[dstEvery*2:], buf[2][:])
copy(out[dstEvery*3:], buf[bufoff*3:bufoff*4]) copy(out[dstEvery*3:], buf[3][:])
off = 0
out = out[bufoff:] out = out[bufoff:]
decoded += 256 decoded += bufoff * 4
// There must at least be 3 buffers left. // There must at least be 3 buffers left.
if len(out) < dstEvery*3 { if len(out) < dstEvery*3 {
d.bufs.Put(buf)
return nil, errors.New("corruption detected: stream overrun 2") return nil, errors.New("corruption detected: stream overrun 2")
} }
} }
@@ -1257,21 +1126,27 @@ func (d *Decoder) decompress4X8bitExactly(dst, src []byte) ([]byte, error) {
if len(out) < dstEvery*3+ioff { if len(out) < dstEvery*3+ioff {
return nil, errors.New("corruption detected: stream overrun 3") return nil, errors.New("corruption detected: stream overrun 3")
} }
copy(out, buf[:off]) copy(out, buf[0][:off])
copy(out[dstEvery:dstEvery+ioff], buf[bufoff:bufoff*2]) copy(out[dstEvery:], buf[1][:off])
copy(out[dstEvery*2:dstEvery*2+ioff], buf[bufoff*2:bufoff*3]) copy(out[dstEvery*2:], buf[2][:off])
copy(out[dstEvery*3:dstEvery*3+ioff], buf[bufoff*3:bufoff*4]) copy(out[dstEvery*3:], buf[3][:off])
decoded += int(off) * 4 decoded += int(off) * 4
out = out[off:] out = out[off:]
} }
// Decode remaining. // Decode remaining.
remainBytes := dstEvery - (decoded / 4)
for i := range br { for i := range br {
offset := dstEvery * i offset := dstEvery * i
endsAt := offset + remainBytes
if endsAt > len(out) {
endsAt = len(out)
}
br := &br[i] br := &br[i]
bitsLeft := int(br.off*8) + int(64-br.bitsRead) bitsLeft := br.remaining()
for bitsLeft > 0 { for bitsLeft > 0 {
if br.finished() { if br.finished() {
d.bufs.Put(buf)
return nil, io.ErrUnexpectedEOF return nil, io.ErrUnexpectedEOF
} }
if br.bitsRead >= 56 { if br.bitsRead >= 56 {
@@ -1291,7 +1166,8 @@ func (d *Decoder) decompress4X8bitExactly(dst, src []byte) ([]byte, error) {
} }
} }
// end inline... // end inline...
if offset >= len(out) { if offset >= endsAt {
d.bufs.Put(buf)
return nil, errors.New("corruption detected: stream overrun 4") return nil, errors.New("corruption detected: stream overrun 4")
} }
@@ -1299,16 +1175,23 @@ func (d *Decoder) decompress4X8bitExactly(dst, src []byte) ([]byte, error) {
v := single[br.peekByteFast()].entry v := single[br.peekByteFast()].entry
nBits := uint8(v) nBits := uint8(v)
br.advance(nBits) br.advance(nBits)
bitsLeft -= int(nBits) bitsLeft -= uint(nBits)
out[offset] = uint8(v >> 8) out[offset] = uint8(v >> 8)
offset++ offset++
} }
if offset != endsAt {
d.bufs.Put(buf)
return nil, fmt.Errorf("corruption detected: short output block %d, end %d != %d", i, offset, endsAt)
}
decoded += offset - dstEvery*i decoded += offset - dstEvery*i
err = br.close() err = br.close()
if err != nil { if err != nil {
d.bufs.Put(buf)
return nil, err return nil, err
} }
} }
d.bufs.Put(buf)
if dstSize != decoded { if dstSize != decoded {
return nil, errors.New("corruption detected: short output block") return nil, errors.New("corruption detected: short output block")
} }

View File

@@ -0,0 +1,488 @@
// +build !appengine
// +build gc
// +build !noasm
#include "textflag.h"
#include "funcdata.h"
#include "go_asm.h"
#define bufoff 256 // see decompress.go, we're using [4][256]byte table
// func decompress4x_main_loop_x86(pbr0, pbr1, pbr2, pbr3 *bitReaderShifted,
// peekBits uint8, buf *byte, tbl *dEntrySingle) (int, bool)
TEXT ·decompress4x_8b_loop_x86(SB), NOSPLIT, $8
#define off R8
#define buffer DI
#define table SI
#define br_bits_read R9
#define br_value R10
#define br_offset R11
#define peek_bits R12
#define exhausted DX
#define br0 R13
#define br1 R14
#define br2 R15
#define br3 BP
MOVQ BP, 0(SP)
XORQ exhausted, exhausted // exhausted = false
XORQ off, off // off = 0
MOVBQZX peekBits+32(FP), peek_bits
MOVQ buf+40(FP), buffer
MOVQ tbl+48(FP), table
MOVQ pbr0+0(FP), br0
MOVQ pbr1+8(FP), br1
MOVQ pbr2+16(FP), br2
MOVQ pbr3+24(FP), br3
main_loop:
// const stream = 0
// br0.fillFast()
MOVBQZX bitReaderShifted_bitsRead(br0), br_bits_read
MOVQ bitReaderShifted_value(br0), br_value
MOVQ bitReaderShifted_off(br0), br_offset
// if b.bitsRead >= 32 {
CMPQ br_bits_read, $32
JB skip_fill0
SUBQ $32, br_bits_read // b.bitsRead -= 32
SUBQ $4, br_offset // b.off -= 4
// v := b.in[b.off-4 : b.off]
// v = v[:4]
// low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24)
MOVQ bitReaderShifted_in(br0), AX
MOVL 0(br_offset)(AX*1), AX // AX = uint32(b.in[b.off:b.off+4])
// b.value |= uint64(low) << (b.bitsRead & 63)
MOVQ br_bits_read, CX
SHLQ CL, AX
ORQ AX, br_value
// exhausted = exhausted || (br0.off < 4)
CMPQ br_offset, $4
SETLT DL
ORB DL, DH
// }
skip_fill0:
// val0 := br0.peekTopBits(peekBits)
MOVQ br_value, AX
MOVQ peek_bits, CX
SHRQ CL, AX // AX = (value >> peek_bits) & mask
// v0 := table[val0&mask]
MOVW 0(table)(AX*2), AX // AX - v0
// br0.advance(uint8(v0.entry))
MOVB AH, BL // BL = uint8(v0.entry >> 8)
MOVBQZX AL, CX
SHLQ CL, br_value // value <<= n
ADDQ CX, br_bits_read // bits_read += n
// val1 := br0.peekTopBits(peekBits)
MOVQ peek_bits, CX
MOVQ br_value, AX
SHRQ CL, AX // AX = (value >> peek_bits) & mask
// v1 := table[val1&mask]
MOVW 0(table)(AX*2), AX // AX - v1
// br0.advance(uint8(v1.entry))
MOVB AH, BH // BH = uint8(v1.entry >> 8)
MOVBQZX AL, CX
SHLQ CX, br_value // value <<= n
ADDQ CX, br_bits_read // bits_read += n
// these two writes get coalesced
// buf[stream][off] = uint8(v0.entry >> 8)
// buf[stream][off+1] = uint8(v1.entry >> 8)
MOVW BX, 0(buffer)(off*1)
// SECOND PART:
// val2 := br0.peekTopBits(peekBits)
MOVQ br_value, AX
MOVQ peek_bits, CX
SHRQ CL, AX // AX = (value >> peek_bits) & mask
// v2 := table[val0&mask]
MOVW 0(table)(AX*2), AX // AX - v0
// br0.advance(uint8(v0.entry))
MOVB AH, BL // BL = uint8(v0.entry >> 8)
MOVBQZX AL, CX
SHLQ CL, br_value // value <<= n
ADDQ CX, br_bits_read // bits_read += n
// val3 := br0.peekTopBits(peekBits)
MOVQ peek_bits, CX
MOVQ br_value, AX
SHRQ CL, AX // AX = (value >> peek_bits) & mask
// v3 := table[val1&mask]
MOVW 0(table)(AX*2), AX // AX - v1
// br0.advance(uint8(v1.entry))
MOVB AH, BH // BH = uint8(v1.entry >> 8)
MOVBQZX AL, CX
SHLQ CX, br_value // value <<= n
ADDQ CX, br_bits_read // bits_read += n
// these two writes get coalesced
// buf[stream][off+2] = uint8(v2.entry >> 8)
// buf[stream][off+3] = uint8(v3.entry >> 8)
MOVW BX, 0+2(buffer)(off*1)
// update the bitrader reader structure
MOVB br_bits_read, bitReaderShifted_bitsRead(br0)
MOVQ br_value, bitReaderShifted_value(br0)
MOVQ br_offset, bitReaderShifted_off(br0)
// const stream = 1
// br1.fillFast()
MOVBQZX bitReaderShifted_bitsRead(br1), br_bits_read
MOVQ bitReaderShifted_value(br1), br_value
MOVQ bitReaderShifted_off(br1), br_offset
// if b.bitsRead >= 32 {
CMPQ br_bits_read, $32
JB skip_fill1
SUBQ $32, br_bits_read // b.bitsRead -= 32
SUBQ $4, br_offset // b.off -= 4
// v := b.in[b.off-4 : b.off]
// v = v[:4]
// low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24)
MOVQ bitReaderShifted_in(br1), AX
MOVL 0(br_offset)(AX*1), AX // AX = uint32(b.in[b.off:b.off+4])
// b.value |= uint64(low) << (b.bitsRead & 63)
MOVQ br_bits_read, CX
SHLQ CL, AX
ORQ AX, br_value
// exhausted = exhausted || (br1.off < 4)
CMPQ br_offset, $4
SETLT DL
ORB DL, DH
// }
skip_fill1:
// val0 := br1.peekTopBits(peekBits)
MOVQ br_value, AX
MOVQ peek_bits, CX
SHRQ CL, AX // AX = (value >> peek_bits) & mask
// v0 := table[val0&mask]
MOVW 0(table)(AX*2), AX // AX - v0
// br1.advance(uint8(v0.entry))
MOVB AH, BL // BL = uint8(v0.entry >> 8)
MOVBQZX AL, CX
SHLQ CL, br_value // value <<= n
ADDQ CX, br_bits_read // bits_read += n
// val1 := br1.peekTopBits(peekBits)
MOVQ peek_bits, CX
MOVQ br_value, AX
SHRQ CL, AX // AX = (value >> peek_bits) & mask
// v1 := table[val1&mask]
MOVW 0(table)(AX*2), AX // AX - v1
// br1.advance(uint8(v1.entry))
MOVB AH, BH // BH = uint8(v1.entry >> 8)
MOVBQZX AL, CX
SHLQ CX, br_value // value <<= n
ADDQ CX, br_bits_read // bits_read += n
// these two writes get coalesced
// buf[stream][off] = uint8(v0.entry >> 8)
// buf[stream][off+1] = uint8(v1.entry >> 8)
MOVW BX, 256(buffer)(off*1)
// SECOND PART:
// val2 := br1.peekTopBits(peekBits)
MOVQ br_value, AX
MOVQ peek_bits, CX
SHRQ CL, AX // AX = (value >> peek_bits) & mask
// v2 := table[val0&mask]
MOVW 0(table)(AX*2), AX // AX - v0
// br1.advance(uint8(v0.entry))
MOVB AH, BL // BL = uint8(v0.entry >> 8)
MOVBQZX AL, CX
SHLQ CL, br_value // value <<= n
ADDQ CX, br_bits_read // bits_read += n
// val3 := br1.peekTopBits(peekBits)
MOVQ peek_bits, CX
MOVQ br_value, AX
SHRQ CL, AX // AX = (value >> peek_bits) & mask
// v3 := table[val1&mask]
MOVW 0(table)(AX*2), AX // AX - v1
// br1.advance(uint8(v1.entry))
MOVB AH, BH // BH = uint8(v1.entry >> 8)
MOVBQZX AL, CX
SHLQ CX, br_value // value <<= n
ADDQ CX, br_bits_read // bits_read += n
// these two writes get coalesced
// buf[stream][off+2] = uint8(v2.entry >> 8)
// buf[stream][off+3] = uint8(v3.entry >> 8)
MOVW BX, 256+2(buffer)(off*1)
// update the bitrader reader structure
MOVB br_bits_read, bitReaderShifted_bitsRead(br1)
MOVQ br_value, bitReaderShifted_value(br1)
MOVQ br_offset, bitReaderShifted_off(br1)
// const stream = 2
// br2.fillFast()
MOVBQZX bitReaderShifted_bitsRead(br2), br_bits_read
MOVQ bitReaderShifted_value(br2), br_value
MOVQ bitReaderShifted_off(br2), br_offset
// if b.bitsRead >= 32 {
CMPQ br_bits_read, $32
JB skip_fill2
SUBQ $32, br_bits_read // b.bitsRead -= 32
SUBQ $4, br_offset // b.off -= 4
// v := b.in[b.off-4 : b.off]
// v = v[:4]
// low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24)
MOVQ bitReaderShifted_in(br2), AX
MOVL 0(br_offset)(AX*1), AX // AX = uint32(b.in[b.off:b.off+4])
// b.value |= uint64(low) << (b.bitsRead & 63)
MOVQ br_bits_read, CX
SHLQ CL, AX
ORQ AX, br_value
// exhausted = exhausted || (br2.off < 4)
CMPQ br_offset, $4
SETLT DL
ORB DL, DH
// }
skip_fill2:
// val0 := br2.peekTopBits(peekBits)
MOVQ br_value, AX
MOVQ peek_bits, CX
SHRQ CL, AX // AX = (value >> peek_bits) & mask
// v0 := table[val0&mask]
MOVW 0(table)(AX*2), AX // AX - v0
// br2.advance(uint8(v0.entry))
MOVB AH, BL // BL = uint8(v0.entry >> 8)
MOVBQZX AL, CX
SHLQ CL, br_value // value <<= n
ADDQ CX, br_bits_read // bits_read += n
// val1 := br2.peekTopBits(peekBits)
MOVQ peek_bits, CX
MOVQ br_value, AX
SHRQ CL, AX // AX = (value >> peek_bits) & mask
// v1 := table[val1&mask]
MOVW 0(table)(AX*2), AX // AX - v1
// br2.advance(uint8(v1.entry))
MOVB AH, BH // BH = uint8(v1.entry >> 8)
MOVBQZX AL, CX
SHLQ CX, br_value // value <<= n
ADDQ CX, br_bits_read // bits_read += n
// these two writes get coalesced
// buf[stream][off] = uint8(v0.entry >> 8)
// buf[stream][off+1] = uint8(v1.entry >> 8)
MOVW BX, 512(buffer)(off*1)
// SECOND PART:
// val2 := br2.peekTopBits(peekBits)
MOVQ br_value, AX
MOVQ peek_bits, CX
SHRQ CL, AX // AX = (value >> peek_bits) & mask
// v2 := table[val0&mask]
MOVW 0(table)(AX*2), AX // AX - v0
// br2.advance(uint8(v0.entry))
MOVB AH, BL // BL = uint8(v0.entry >> 8)
MOVBQZX AL, CX
SHLQ CL, br_value // value <<= n
ADDQ CX, br_bits_read // bits_read += n
// val3 := br2.peekTopBits(peekBits)
MOVQ peek_bits, CX
MOVQ br_value, AX
SHRQ CL, AX // AX = (value >> peek_bits) & mask
// v3 := table[val1&mask]
MOVW 0(table)(AX*2), AX // AX - v1
// br2.advance(uint8(v1.entry))
MOVB AH, BH // BH = uint8(v1.entry >> 8)
MOVBQZX AL, CX
SHLQ CX, br_value // value <<= n
ADDQ CX, br_bits_read // bits_read += n
// these two writes get coalesced
// buf[stream][off+2] = uint8(v2.entry >> 8)
// buf[stream][off+3] = uint8(v3.entry >> 8)
MOVW BX, 512+2(buffer)(off*1)
// update the bitrader reader structure
MOVB br_bits_read, bitReaderShifted_bitsRead(br2)
MOVQ br_value, bitReaderShifted_value(br2)
MOVQ br_offset, bitReaderShifted_off(br2)
// const stream = 3
// br3.fillFast()
MOVBQZX bitReaderShifted_bitsRead(br3), br_bits_read
MOVQ bitReaderShifted_value(br3), br_value
MOVQ bitReaderShifted_off(br3), br_offset
// if b.bitsRead >= 32 {
CMPQ br_bits_read, $32
JB skip_fill3
SUBQ $32, br_bits_read // b.bitsRead -= 32
SUBQ $4, br_offset // b.off -= 4
// v := b.in[b.off-4 : b.off]
// v = v[:4]
// low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24)
MOVQ bitReaderShifted_in(br3), AX
MOVL 0(br_offset)(AX*1), AX // AX = uint32(b.in[b.off:b.off+4])
// b.value |= uint64(low) << (b.bitsRead & 63)
MOVQ br_bits_read, CX
SHLQ CL, AX
ORQ AX, br_value
// exhausted = exhausted || (br3.off < 4)
CMPQ br_offset, $4
SETLT DL
ORB DL, DH
// }
skip_fill3:
// val0 := br3.peekTopBits(peekBits)
MOVQ br_value, AX
MOVQ peek_bits, CX
SHRQ CL, AX // AX = (value >> peek_bits) & mask
// v0 := table[val0&mask]
MOVW 0(table)(AX*2), AX // AX - v0
// br3.advance(uint8(v0.entry))
MOVB AH, BL // BL = uint8(v0.entry >> 8)
MOVBQZX AL, CX
SHLQ CL, br_value // value <<= n
ADDQ CX, br_bits_read // bits_read += n
// val1 := br3.peekTopBits(peekBits)
MOVQ peek_bits, CX
MOVQ br_value, AX
SHRQ CL, AX // AX = (value >> peek_bits) & mask
// v1 := table[val1&mask]
MOVW 0(table)(AX*2), AX // AX - v1
// br3.advance(uint8(v1.entry))
MOVB AH, BH // BH = uint8(v1.entry >> 8)
MOVBQZX AL, CX
SHLQ CX, br_value // value <<= n
ADDQ CX, br_bits_read // bits_read += n
// these two writes get coalesced
// buf[stream][off] = uint8(v0.entry >> 8)
// buf[stream][off+1] = uint8(v1.entry >> 8)
MOVW BX, 768(buffer)(off*1)
// SECOND PART:
// val2 := br3.peekTopBits(peekBits)
MOVQ br_value, AX
MOVQ peek_bits, CX
SHRQ CL, AX // AX = (value >> peek_bits) & mask
// v2 := table[val0&mask]
MOVW 0(table)(AX*2), AX // AX - v0
// br3.advance(uint8(v0.entry))
MOVB AH, BL // BL = uint8(v0.entry >> 8)
MOVBQZX AL, CX
SHLQ CL, br_value // value <<= n
ADDQ CX, br_bits_read // bits_read += n
// val3 := br3.peekTopBits(peekBits)
MOVQ peek_bits, CX
MOVQ br_value, AX
SHRQ CL, AX // AX = (value >> peek_bits) & mask
// v3 := table[val1&mask]
MOVW 0(table)(AX*2), AX // AX - v1
// br3.advance(uint8(v1.entry))
MOVB AH, BH // BH = uint8(v1.entry >> 8)
MOVBQZX AL, CX
SHLQ CX, br_value // value <<= n
ADDQ CX, br_bits_read // bits_read += n
// these two writes get coalesced
// buf[stream][off+2] = uint8(v2.entry >> 8)
// buf[stream][off+3] = uint8(v3.entry >> 8)
MOVW BX, 768+2(buffer)(off*1)
// update the bitrader reader structure
MOVB br_bits_read, bitReaderShifted_bitsRead(br3)
MOVQ br_value, bitReaderShifted_value(br3)
MOVQ br_offset, bitReaderShifted_off(br3)
ADDQ $4, off // off += 2
TESTB DH, DH // any br[i].ofs < 4?
JNZ end
CMPQ off, $bufoff
JL main_loop
end:
MOVQ 0(SP), BP
MOVB off, ret+56(FP)
RET
#undef off
#undef buffer
#undef table
#undef br_bits_read
#undef br_value
#undef br_offset
#undef peek_bits
#undef exhausted
#undef br0
#undef br1
#undef br2
#undef br3

View File

@@ -0,0 +1,197 @@
// +build !appengine
// +build gc
// +build !noasm
#include "textflag.h"
#include "funcdata.h"
#include "go_asm.h"
#define bufoff 256 // see decompress.go, we're using [4][256]byte table
//func decompress4x_main_loop_x86(pbr0, pbr1, pbr2, pbr3 *bitReaderShifted,
// peekBits uint8, buf *byte, tbl *dEntrySingle) (int, bool)
TEXT ·decompress4x_8b_loop_x86(SB), NOSPLIT, $8
#define off R8
#define buffer DI
#define table SI
#define br_bits_read R9
#define br_value R10
#define br_offset R11
#define peek_bits R12
#define exhausted DX
#define br0 R13
#define br1 R14
#define br2 R15
#define br3 BP
MOVQ BP, 0(SP)
XORQ exhausted, exhausted // exhausted = false
XORQ off, off // off = 0
MOVBQZX peekBits+32(FP), peek_bits
MOVQ buf+40(FP), buffer
MOVQ tbl+48(FP), table
MOVQ pbr0+0(FP), br0
MOVQ pbr1+8(FP), br1
MOVQ pbr2+16(FP), br2
MOVQ pbr3+24(FP), br3
main_loop:
{{ define "decode_2_values_x86" }}
// const stream = {{ var "id" }}
// br{{ var "id"}}.fillFast()
MOVBQZX bitReaderShifted_bitsRead(br{{ var "id" }}), br_bits_read
MOVQ bitReaderShifted_value(br{{ var "id" }}), br_value
MOVQ bitReaderShifted_off(br{{ var "id" }}), br_offset
// if b.bitsRead >= 32 {
CMPQ br_bits_read, $32
JB skip_fill{{ var "id" }}
SUBQ $32, br_bits_read // b.bitsRead -= 32
SUBQ $4, br_offset // b.off -= 4
// v := b.in[b.off-4 : b.off]
// v = v[:4]
// low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24)
MOVQ bitReaderShifted_in(br{{ var "id" }}), AX
MOVL 0(br_offset)(AX*1), AX // AX = uint32(b.in[b.off:b.off+4])
// b.value |= uint64(low) << (b.bitsRead & 63)
MOVQ br_bits_read, CX
SHLQ CL, AX
ORQ AX, br_value
// exhausted = exhausted || (br{{ var "id"}}.off < 4)
CMPQ br_offset, $4
SETLT DL
ORB DL, DH
// }
skip_fill{{ var "id" }}:
// val0 := br{{ var "id"}}.peekTopBits(peekBits)
MOVQ br_value, AX
MOVQ peek_bits, CX
SHRQ CL, AX // AX = (value >> peek_bits) & mask
// v0 := table[val0&mask]
MOVW 0(table)(AX*2), AX // AX - v0
// br{{ var "id"}}.advance(uint8(v0.entry))
MOVB AH, BL // BL = uint8(v0.entry >> 8)
MOVBQZX AL, CX
SHLQ CL, br_value // value <<= n
ADDQ CX, br_bits_read // bits_read += n
// val1 := br{{ var "id"}}.peekTopBits(peekBits)
MOVQ peek_bits, CX
MOVQ br_value, AX
SHRQ CL, AX // AX = (value >> peek_bits) & mask
// v1 := table[val1&mask]
MOVW 0(table)(AX*2), AX // AX - v1
// br{{ var "id"}}.advance(uint8(v1.entry))
MOVB AH, BH // BH = uint8(v1.entry >> 8)
MOVBQZX AL, CX
SHLQ CX, br_value // value <<= n
ADDQ CX, br_bits_read // bits_read += n
// these two writes get coalesced
// buf[stream][off] = uint8(v0.entry >> 8)
// buf[stream][off+1] = uint8(v1.entry >> 8)
MOVW BX, {{ var "bufofs" }}(buffer)(off*1)
// SECOND PART:
// val2 := br{{ var "id"}}.peekTopBits(peekBits)
MOVQ br_value, AX
MOVQ peek_bits, CX
SHRQ CL, AX // AX = (value >> peek_bits) & mask
// v2 := table[val0&mask]
MOVW 0(table)(AX*2), AX // AX - v0
// br{{ var "id"}}.advance(uint8(v0.entry))
MOVB AH, BL // BL = uint8(v0.entry >> 8)
MOVBQZX AL, CX
SHLQ CL, br_value // value <<= n
ADDQ CX, br_bits_read // bits_read += n
// val3 := br{{ var "id"}}.peekTopBits(peekBits)
MOVQ peek_bits, CX
MOVQ br_value, AX
SHRQ CL, AX // AX = (value >> peek_bits) & mask
// v3 := table[val1&mask]
MOVW 0(table)(AX*2), AX // AX - v1
// br{{ var "id"}}.advance(uint8(v1.entry))
MOVB AH, BH // BH = uint8(v1.entry >> 8)
MOVBQZX AL, CX
SHLQ CX, br_value // value <<= n
ADDQ CX, br_bits_read // bits_read += n
// these two writes get coalesced
// buf[stream][off+2] = uint8(v2.entry >> 8)
// buf[stream][off+3] = uint8(v3.entry >> 8)
MOVW BX, {{ var "bufofs" }}+2(buffer)(off*1)
// update the bitrader reader structure
MOVB br_bits_read, bitReaderShifted_bitsRead(br{{ var "id" }})
MOVQ br_value, bitReaderShifted_value(br{{ var "id" }})
MOVQ br_offset, bitReaderShifted_off(br{{ var "id" }})
{{ end }}
{{ set "id" "0" }}
{{ set "ofs" "0" }}
{{ set "bufofs" "0" }} {{/* id * bufoff */}}
{{ template "decode_2_values_x86" . }}
{{ set "id" "1" }}
{{ set "ofs" "8" }}
{{ set "bufofs" "256" }}
{{ template "decode_2_values_x86" . }}
{{ set "id" "2" }}
{{ set "ofs" "16" }}
{{ set "bufofs" "512" }}
{{ template "decode_2_values_x86" . }}
{{ set "id" "3" }}
{{ set "ofs" "24" }}
{{ set "bufofs" "768" }}
{{ template "decode_2_values_x86" . }}
ADDQ $4, off // off += 2
TESTB DH, DH // any br[i].ofs < 4?
JNZ end
CMPQ off, $bufoff
JL main_loop
end:
MOVQ 0(SP), BP
MOVB off, ret+56(FP)
RET
#undef off
#undef buffer
#undef table
#undef br_bits_read
#undef br_value
#undef br_offset
#undef peek_bits
#undef exhausted
#undef br0
#undef br1
#undef br2
#undef br3

View File

@@ -0,0 +1,181 @@
//go:build amd64 && !appengine && !noasm && gc
// +build amd64,!appengine,!noasm,gc
// This file contains the specialisation of Decoder.Decompress4X
// that uses an asm implementation of its main loop.
package huff0
import (
"errors"
"fmt"
)
// decompress4x_main_loop_x86 is an x86 assembler implementation
// of Decompress4X when tablelog > 8.
// go:noescape
func decompress4x_main_loop_x86(pbr0, pbr1, pbr2, pbr3 *bitReaderShifted,
peekBits uint8, buf *byte, tbl *dEntrySingle) uint8
// decompress4x_8b_loop_x86 is an x86 assembler implementation
// of Decompress4X when tablelog <= 8 which decodes 4 entries
// per loop.
// go:noescape
func decompress4x_8b_loop_x86(pbr0, pbr1, pbr2, pbr3 *bitReaderShifted,
peekBits uint8, buf *byte, tbl *dEntrySingle) uint8
// fallback8BitSize is the size where using Go version is faster.
const fallback8BitSize = 800
// Decompress4X will decompress a 4X encoded stream.
// The length of the supplied input must match the end of a block exactly.
// The *capacity* of the dst slice must match the destination size of
// the uncompressed data exactly.
func (d *Decoder) Decompress4X(dst, src []byte) ([]byte, error) {
if len(d.dt.single) == 0 {
return nil, errors.New("no table loaded")
}
if len(src) < 6+(4*1) {
return nil, errors.New("input too small")
}
use8BitTables := d.actualTableLog <= 8
if cap(dst) < fallback8BitSize && use8BitTables {
return d.decompress4X8bit(dst, src)
}
var br [4]bitReaderShifted
// Decode "jump table"
start := 6
for i := 0; i < 3; i++ {
length := int(src[i*2]) | (int(src[i*2+1]) << 8)
if start+length >= len(src) {
return nil, errors.New("truncated input (or invalid offset)")
}
err := br[i].init(src[start : start+length])
if err != nil {
return nil, err
}
start += length
}
err := br[3].init(src[start:])
if err != nil {
return nil, err
}
// destination, offset to match first output
dstSize := cap(dst)
dst = dst[:dstSize]
out := dst
dstEvery := (dstSize + 3) / 4
const tlSize = 1 << tableLogMax
const tlMask = tlSize - 1
single := d.dt.single[:tlSize]
// Use temp table to avoid bound checks/append penalty.
buf := d.buffer()
var off uint8
var decoded int
const debug = false
// see: bitReaderShifted.peekBitsFast()
peekBits := uint8((64 - d.actualTableLog) & 63)
// Decode 2 values from each decoder/loop.
const bufoff = 256
for {
if br[0].off < 4 || br[1].off < 4 || br[2].off < 4 || br[3].off < 4 {
break
}
if use8BitTables {
off = decompress4x_8b_loop_x86(&br[0], &br[1], &br[2], &br[3], peekBits, &buf[0][0], &single[0])
} else {
off = decompress4x_main_loop_x86(&br[0], &br[1], &br[2], &br[3], peekBits, &buf[0][0], &single[0])
}
if debug {
fmt.Print("DEBUG: ")
fmt.Printf("off=%d,", off)
for i := 0; i < 4; i++ {
fmt.Printf(" br[%d]={bitsRead=%d, value=%x, off=%d}",
i, br[i].bitsRead, br[i].value, br[i].off)
}
fmt.Println("")
}
if off != 0 {
break
}
if bufoff > dstEvery {
d.bufs.Put(buf)
return nil, errors.New("corruption detected: stream overrun 1")
}
copy(out, buf[0][:])
copy(out[dstEvery:], buf[1][:])
copy(out[dstEvery*2:], buf[2][:])
copy(out[dstEvery*3:], buf[3][:])
out = out[bufoff:]
decoded += bufoff * 4
// There must at least be 3 buffers left.
if len(out) < dstEvery*3 {
d.bufs.Put(buf)
return nil, errors.New("corruption detected: stream overrun 2")
}
}
if off > 0 {
ioff := int(off)
if len(out) < dstEvery*3+ioff {
d.bufs.Put(buf)
return nil, errors.New("corruption detected: stream overrun 3")
}
copy(out, buf[0][:off])
copy(out[dstEvery:], buf[1][:off])
copy(out[dstEvery*2:], buf[2][:off])
copy(out[dstEvery*3:], buf[3][:off])
decoded += int(off) * 4
out = out[off:]
}
// Decode remaining.
remainBytes := dstEvery - (decoded / 4)
for i := range br {
offset := dstEvery * i
endsAt := offset + remainBytes
if endsAt > len(out) {
endsAt = len(out)
}
br := &br[i]
bitsLeft := br.remaining()
for bitsLeft > 0 {
br.fill()
if offset >= endsAt {
d.bufs.Put(buf)
return nil, errors.New("corruption detected: stream overrun 4")
}
// Read value and increment offset.
val := br.peekBitsFast(d.actualTableLog)
v := single[val&tlMask].entry
nBits := uint8(v)
br.advance(nBits)
bitsLeft -= uint(nBits)
out[offset] = uint8(v >> 8)
offset++
}
if offset != endsAt {
d.bufs.Put(buf)
return nil, fmt.Errorf("corruption detected: short output block %d, end %d != %d", i, offset, endsAt)
}
decoded += offset - dstEvery*i
err = br.close()
if err != nil {
return nil, err
}
}
d.bufs.Put(buf)
if dstSize != decoded {
return nil, errors.New("corruption detected: short output block")
}
return dst, nil
}

View File

@@ -0,0 +1,506 @@
// +build !appengine
// +build gc
// +build !noasm
#include "textflag.h"
#include "funcdata.h"
#include "go_asm.h"
#ifdef GOAMD64_v4
#ifndef GOAMD64_v3
#define GOAMD64_v3
#endif
#endif
#define bufoff 256 // see decompress.go, we're using [4][256]byte table
// func decompress4x_main_loop_x86(pbr0, pbr1, pbr2, pbr3 *bitReaderShifted,
// peekBits uint8, buf *byte, tbl *dEntrySingle) (int, bool)
TEXT ·decompress4x_main_loop_x86(SB), NOSPLIT, $8
#define off R8
#define buffer DI
#define table SI
#define br_bits_read R9
#define br_value R10
#define br_offset R11
#define peek_bits R12
#define exhausted DX
#define br0 R13
#define br1 R14
#define br2 R15
#define br3 BP
MOVQ BP, 0(SP)
XORQ exhausted, exhausted // exhausted = false
XORQ off, off // off = 0
MOVBQZX peekBits+32(FP), peek_bits
MOVQ buf+40(FP), buffer
MOVQ tbl+48(FP), table
MOVQ pbr0+0(FP), br0
MOVQ pbr1+8(FP), br1
MOVQ pbr2+16(FP), br2
MOVQ pbr3+24(FP), br3
main_loop:
// const stream = 0
// br0.fillFast()
MOVBQZX bitReaderShifted_bitsRead(br0), br_bits_read
MOVQ bitReaderShifted_value(br0), br_value
MOVQ bitReaderShifted_off(br0), br_offset
// We must have at least 2 * max tablelog left
CMPQ br_bits_read, $64-22
JBE skip_fill0
SUBQ $32, br_bits_read // b.bitsRead -= 32
SUBQ $4, br_offset // b.off -= 4
// v := b.in[b.off-4 : b.off]
// v = v[:4]
// low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24)
MOVQ bitReaderShifted_in(br0), AX
// b.value |= uint64(low) << (b.bitsRead & 63)
#ifdef GOAMD64_v3
SHLXQ br_bits_read, 0(br_offset)(AX*1), AX // AX = uint32(b.in[b.off:b.off+4]) << (b.bitsRead & 63)
#else
MOVL 0(br_offset)(AX*1), AX // AX = uint32(b.in[b.off:b.off+4])
MOVQ br_bits_read, CX
SHLQ CL, AX
#endif
ORQ AX, br_value
// exhausted = exhausted || (br0.off < 4)
CMPQ br_offset, $4
SETLT DL
ORB DL, DH
// }
skip_fill0:
// val0 := br0.peekTopBits(peekBits)
#ifdef GOAMD64_v3
SHRXQ peek_bits, br_value, AX // AX = (value >> peek_bits) & mask
#else
MOVQ br_value, AX
MOVQ peek_bits, CX
SHRQ CL, AX // AX = (value >> peek_bits) & mask
#endif
// v0 := table[val0&mask]
MOVW 0(table)(AX*2), AX // AX - v0
// br0.advance(uint8(v0.entry))
MOVB AH, BL // BL = uint8(v0.entry >> 8)
#ifdef GOAMD64_v3
MOVBQZX AL, CX
SHLXQ AX, br_value, br_value // value <<= n
#else
MOVBQZX AL, CX
SHLQ CL, br_value // value <<= n
#endif
ADDQ CX, br_bits_read // bits_read += n
#ifdef GOAMD64_v3
SHRXQ peek_bits, br_value, AX // AX = (value >> peek_bits) & mask
#else
// val1 := br0.peekTopBits(peekBits)
MOVQ peek_bits, CX
MOVQ br_value, AX
SHRQ CL, AX // AX = (value >> peek_bits) & mask
#endif
// v1 := table[val1&mask]
MOVW 0(table)(AX*2), AX // AX - v1
// br0.advance(uint8(v1.entry))
MOVB AH, BH // BH = uint8(v1.entry >> 8)
#ifdef GOAMD64_v3
MOVBQZX AL, CX
SHLXQ AX, br_value, br_value // value <<= n
#else
MOVBQZX AL, CX
SHLQ CL, br_value // value <<= n
#endif
ADDQ CX, br_bits_read // bits_read += n
// these two writes get coalesced
// buf[stream][off] = uint8(v0.entry >> 8)
// buf[stream][off+1] = uint8(v1.entry >> 8)
MOVW BX, 0(buffer)(off*1)
// update the bitrader reader structure
MOVB br_bits_read, bitReaderShifted_bitsRead(br0)
MOVQ br_value, bitReaderShifted_value(br0)
MOVQ br_offset, bitReaderShifted_off(br0)
// const stream = 1
// br1.fillFast()
MOVBQZX bitReaderShifted_bitsRead(br1), br_bits_read
MOVQ bitReaderShifted_value(br1), br_value
MOVQ bitReaderShifted_off(br1), br_offset
// We must have at least 2 * max tablelog left
CMPQ br_bits_read, $64-22
JBE skip_fill1
SUBQ $32, br_bits_read // b.bitsRead -= 32
SUBQ $4, br_offset // b.off -= 4
// v := b.in[b.off-4 : b.off]
// v = v[:4]
// low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24)
MOVQ bitReaderShifted_in(br1), AX
// b.value |= uint64(low) << (b.bitsRead & 63)
#ifdef GOAMD64_v3
SHLXQ br_bits_read, 0(br_offset)(AX*1), AX // AX = uint32(b.in[b.off:b.off+4]) << (b.bitsRead & 63)
#else
MOVL 0(br_offset)(AX*1), AX // AX = uint32(b.in[b.off:b.off+4])
MOVQ br_bits_read, CX
SHLQ CL, AX
#endif
ORQ AX, br_value
// exhausted = exhausted || (br1.off < 4)
CMPQ br_offset, $4
SETLT DL
ORB DL, DH
// }
skip_fill1:
// val0 := br1.peekTopBits(peekBits)
#ifdef GOAMD64_v3
SHRXQ peek_bits, br_value, AX // AX = (value >> peek_bits) & mask
#else
MOVQ br_value, AX
MOVQ peek_bits, CX
SHRQ CL, AX // AX = (value >> peek_bits) & mask
#endif
// v0 := table[val0&mask]
MOVW 0(table)(AX*2), AX // AX - v0
// br1.advance(uint8(v0.entry))
MOVB AH, BL // BL = uint8(v0.entry >> 8)
#ifdef GOAMD64_v3
MOVBQZX AL, CX
SHLXQ AX, br_value, br_value // value <<= n
#else
MOVBQZX AL, CX
SHLQ CL, br_value // value <<= n
#endif
ADDQ CX, br_bits_read // bits_read += n
#ifdef GOAMD64_v3
SHRXQ peek_bits, br_value, AX // AX = (value >> peek_bits) & mask
#else
// val1 := br1.peekTopBits(peekBits)
MOVQ peek_bits, CX
MOVQ br_value, AX
SHRQ CL, AX // AX = (value >> peek_bits) & mask
#endif
// v1 := table[val1&mask]
MOVW 0(table)(AX*2), AX // AX - v1
// br1.advance(uint8(v1.entry))
MOVB AH, BH // BH = uint8(v1.entry >> 8)
#ifdef GOAMD64_v3
MOVBQZX AL, CX
SHLXQ AX, br_value, br_value // value <<= n
#else
MOVBQZX AL, CX
SHLQ CL, br_value // value <<= n
#endif
ADDQ CX, br_bits_read // bits_read += n
// these two writes get coalesced
// buf[stream][off] = uint8(v0.entry >> 8)
// buf[stream][off+1] = uint8(v1.entry >> 8)
MOVW BX, 256(buffer)(off*1)
// update the bitrader reader structure
MOVB br_bits_read, bitReaderShifted_bitsRead(br1)
MOVQ br_value, bitReaderShifted_value(br1)
MOVQ br_offset, bitReaderShifted_off(br1)
// const stream = 2
// br2.fillFast()
MOVBQZX bitReaderShifted_bitsRead(br2), br_bits_read
MOVQ bitReaderShifted_value(br2), br_value
MOVQ bitReaderShifted_off(br2), br_offset
// We must have at least 2 * max tablelog left
CMPQ br_bits_read, $64-22
JBE skip_fill2
SUBQ $32, br_bits_read // b.bitsRead -= 32
SUBQ $4, br_offset // b.off -= 4
// v := b.in[b.off-4 : b.off]
// v = v[:4]
// low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24)
MOVQ bitReaderShifted_in(br2), AX
// b.value |= uint64(low) << (b.bitsRead & 63)
#ifdef GOAMD64_v3
SHLXQ br_bits_read, 0(br_offset)(AX*1), AX // AX = uint32(b.in[b.off:b.off+4]) << (b.bitsRead & 63)
#else
MOVL 0(br_offset)(AX*1), AX // AX = uint32(b.in[b.off:b.off+4])
MOVQ br_bits_read, CX
SHLQ CL, AX
#endif
ORQ AX, br_value
// exhausted = exhausted || (br2.off < 4)
CMPQ br_offset, $4
SETLT DL
ORB DL, DH
// }
skip_fill2:
// val0 := br2.peekTopBits(peekBits)
#ifdef GOAMD64_v3
SHRXQ peek_bits, br_value, AX // AX = (value >> peek_bits) & mask
#else
MOVQ br_value, AX
MOVQ peek_bits, CX
SHRQ CL, AX // AX = (value >> peek_bits) & mask
#endif
// v0 := table[val0&mask]
MOVW 0(table)(AX*2), AX // AX - v0
// br2.advance(uint8(v0.entry))
MOVB AH, BL // BL = uint8(v0.entry >> 8)
#ifdef GOAMD64_v3
MOVBQZX AL, CX
SHLXQ AX, br_value, br_value // value <<= n
#else
MOVBQZX AL, CX
SHLQ CL, br_value // value <<= n
#endif
ADDQ CX, br_bits_read // bits_read += n
#ifdef GOAMD64_v3
SHRXQ peek_bits, br_value, AX // AX = (value >> peek_bits) & mask
#else
// val1 := br2.peekTopBits(peekBits)
MOVQ peek_bits, CX
MOVQ br_value, AX
SHRQ CL, AX // AX = (value >> peek_bits) & mask
#endif
// v1 := table[val1&mask]
MOVW 0(table)(AX*2), AX // AX - v1
// br2.advance(uint8(v1.entry))
MOVB AH, BH // BH = uint8(v1.entry >> 8)
#ifdef GOAMD64_v3
MOVBQZX AL, CX
SHLXQ AX, br_value, br_value // value <<= n
#else
MOVBQZX AL, CX
SHLQ CL, br_value // value <<= n
#endif
ADDQ CX, br_bits_read // bits_read += n
// these two writes get coalesced
// buf[stream][off] = uint8(v0.entry >> 8)
// buf[stream][off+1] = uint8(v1.entry >> 8)
MOVW BX, 512(buffer)(off*1)
// update the bitrader reader structure
MOVB br_bits_read, bitReaderShifted_bitsRead(br2)
MOVQ br_value, bitReaderShifted_value(br2)
MOVQ br_offset, bitReaderShifted_off(br2)
// const stream = 3
// br3.fillFast()
MOVBQZX bitReaderShifted_bitsRead(br3), br_bits_read
MOVQ bitReaderShifted_value(br3), br_value
MOVQ bitReaderShifted_off(br3), br_offset
// We must have at least 2 * max tablelog left
CMPQ br_bits_read, $64-22
JBE skip_fill3
SUBQ $32, br_bits_read // b.bitsRead -= 32
SUBQ $4, br_offset // b.off -= 4
// v := b.in[b.off-4 : b.off]
// v = v[:4]
// low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24)
MOVQ bitReaderShifted_in(br3), AX
// b.value |= uint64(low) << (b.bitsRead & 63)
#ifdef GOAMD64_v3
SHLXQ br_bits_read, 0(br_offset)(AX*1), AX // AX = uint32(b.in[b.off:b.off+4]) << (b.bitsRead & 63)
#else
MOVL 0(br_offset)(AX*1), AX // AX = uint32(b.in[b.off:b.off+4])
MOVQ br_bits_read, CX
SHLQ CL, AX
#endif
ORQ AX, br_value
// exhausted = exhausted || (br3.off < 4)
CMPQ br_offset, $4
SETLT DL
ORB DL, DH
// }
skip_fill3:
// val0 := br3.peekTopBits(peekBits)
#ifdef GOAMD64_v3
SHRXQ peek_bits, br_value, AX // AX = (value >> peek_bits) & mask
#else
MOVQ br_value, AX
MOVQ peek_bits, CX
SHRQ CL, AX // AX = (value >> peek_bits) & mask
#endif
// v0 := table[val0&mask]
MOVW 0(table)(AX*2), AX // AX - v0
// br3.advance(uint8(v0.entry))
MOVB AH, BL // BL = uint8(v0.entry >> 8)
#ifdef GOAMD64_v3
MOVBQZX AL, CX
SHLXQ AX, br_value, br_value // value <<= n
#else
MOVBQZX AL, CX
SHLQ CL, br_value // value <<= n
#endif
ADDQ CX, br_bits_read // bits_read += n
#ifdef GOAMD64_v3
SHRXQ peek_bits, br_value, AX // AX = (value >> peek_bits) & mask
#else
// val1 := br3.peekTopBits(peekBits)
MOVQ peek_bits, CX
MOVQ br_value, AX
SHRQ CL, AX // AX = (value >> peek_bits) & mask
#endif
// v1 := table[val1&mask]
MOVW 0(table)(AX*2), AX // AX - v1
// br3.advance(uint8(v1.entry))
MOVB AH, BH // BH = uint8(v1.entry >> 8)
#ifdef GOAMD64_v3
MOVBQZX AL, CX
SHLXQ AX, br_value, br_value // value <<= n
#else
MOVBQZX AL, CX
SHLQ CL, br_value // value <<= n
#endif
ADDQ CX, br_bits_read // bits_read += n
// these two writes get coalesced
// buf[stream][off] = uint8(v0.entry >> 8)
// buf[stream][off+1] = uint8(v1.entry >> 8)
MOVW BX, 768(buffer)(off*1)
// update the bitrader reader structure
MOVB br_bits_read, bitReaderShifted_bitsRead(br3)
MOVQ br_value, bitReaderShifted_value(br3)
MOVQ br_offset, bitReaderShifted_off(br3)
ADDQ $2, off // off += 2
TESTB DH, DH // any br[i].ofs < 4?
JNZ end
CMPQ off, $bufoff
JL main_loop
end:
MOVQ 0(SP), BP
MOVB off, ret+56(FP)
RET
#undef off
#undef buffer
#undef table
#undef br_bits_read
#undef br_value
#undef br_offset
#undef peek_bits
#undef exhausted
#undef br0
#undef br1
#undef br2
#undef br3

View File

@@ -0,0 +1,195 @@
// +build !appengine
// +build gc
// +build !noasm
#include "textflag.h"
#include "funcdata.h"
#include "go_asm.h"
#ifdef GOAMD64_v4
#ifndef GOAMD64_v3
#define GOAMD64_v3
#endif
#endif
#define bufoff 256 // see decompress.go, we're using [4][256]byte table
//func decompress4x_main_loop_x86(pbr0, pbr1, pbr2, pbr3 *bitReaderShifted,
// peekBits uint8, buf *byte, tbl *dEntrySingle) (int, bool)
TEXT ·decompress4x_main_loop_x86(SB), NOSPLIT, $8
#define off R8
#define buffer DI
#define table SI
#define br_bits_read R9
#define br_value R10
#define br_offset R11
#define peek_bits R12
#define exhausted DX
#define br0 R13
#define br1 R14
#define br2 R15
#define br3 BP
MOVQ BP, 0(SP)
XORQ exhausted, exhausted // exhausted = false
XORQ off, off // off = 0
MOVBQZX peekBits+32(FP), peek_bits
MOVQ buf+40(FP), buffer
MOVQ tbl+48(FP), table
MOVQ pbr0+0(FP), br0
MOVQ pbr1+8(FP), br1
MOVQ pbr2+16(FP), br2
MOVQ pbr3+24(FP), br3
main_loop:
{{ define "decode_2_values_x86" }}
// const stream = {{ var "id" }}
// br{{ var "id"}}.fillFast()
MOVBQZX bitReaderShifted_bitsRead(br{{ var "id" }}), br_bits_read
MOVQ bitReaderShifted_value(br{{ var "id" }}), br_value
MOVQ bitReaderShifted_off(br{{ var "id" }}), br_offset
// We must have at least 2 * max tablelog left
CMPQ br_bits_read, $64-22
JBE skip_fill{{ var "id" }}
SUBQ $32, br_bits_read // b.bitsRead -= 32
SUBQ $4, br_offset // b.off -= 4
// v := b.in[b.off-4 : b.off]
// v = v[:4]
// low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24)
MOVQ bitReaderShifted_in(br{{ var "id" }}), AX
// b.value |= uint64(low) << (b.bitsRead & 63)
#ifdef GOAMD64_v3
SHLXQ br_bits_read, 0(br_offset)(AX*1), AX // AX = uint32(b.in[b.off:b.off+4]) << (b.bitsRead & 63)
#else
MOVL 0(br_offset)(AX*1), AX // AX = uint32(b.in[b.off:b.off+4])
MOVQ br_bits_read, CX
SHLQ CL, AX
#endif
ORQ AX, br_value
// exhausted = exhausted || (br{{ var "id"}}.off < 4)
CMPQ br_offset, $4
SETLT DL
ORB DL, DH
// }
skip_fill{{ var "id" }}:
// val0 := br{{ var "id"}}.peekTopBits(peekBits)
#ifdef GOAMD64_v3
SHRXQ peek_bits, br_value, AX // AX = (value >> peek_bits) & mask
#else
MOVQ br_value, AX
MOVQ peek_bits, CX
SHRQ CL, AX // AX = (value >> peek_bits) & mask
#endif
// v0 := table[val0&mask]
MOVW 0(table)(AX*2), AX // AX - v0
// br{{ var "id"}}.advance(uint8(v0.entry))
MOVB AH, BL // BL = uint8(v0.entry >> 8)
#ifdef GOAMD64_v3
MOVBQZX AL, CX
SHLXQ AX, br_value, br_value // value <<= n
#else
MOVBQZX AL, CX
SHLQ CL, br_value // value <<= n
#endif
ADDQ CX, br_bits_read // bits_read += n
#ifdef GOAMD64_v3
SHRXQ peek_bits, br_value, AX // AX = (value >> peek_bits) & mask
#else
// val1 := br{{ var "id"}}.peekTopBits(peekBits)
MOVQ peek_bits, CX
MOVQ br_value, AX
SHRQ CL, AX // AX = (value >> peek_bits) & mask
#endif
// v1 := table[val1&mask]
MOVW 0(table)(AX*2), AX // AX - v1
// br{{ var "id"}}.advance(uint8(v1.entry))
MOVB AH, BH // BH = uint8(v1.entry >> 8)
#ifdef GOAMD64_v3
MOVBQZX AL, CX
SHLXQ AX, br_value, br_value // value <<= n
#else
MOVBQZX AL, CX
SHLQ CL, br_value // value <<= n
#endif
ADDQ CX, br_bits_read // bits_read += n
// these two writes get coalesced
// buf[stream][off] = uint8(v0.entry >> 8)
// buf[stream][off+1] = uint8(v1.entry >> 8)
MOVW BX, {{ var "bufofs" }}(buffer)(off*1)
// update the bitrader reader structure
MOVB br_bits_read, bitReaderShifted_bitsRead(br{{ var "id" }})
MOVQ br_value, bitReaderShifted_value(br{{ var "id" }})
MOVQ br_offset, bitReaderShifted_off(br{{ var "id" }})
{{ end }}
{{ set "id" "0" }}
{{ set "ofs" "0" }}
{{ set "bufofs" "0" }} {{/* id * bufoff */}}
{{ template "decode_2_values_x86" . }}
{{ set "id" "1" }}
{{ set "ofs" "8" }}
{{ set "bufofs" "256" }}
{{ template "decode_2_values_x86" . }}
{{ set "id" "2" }}
{{ set "ofs" "16" }}
{{ set "bufofs" "512" }}
{{ template "decode_2_values_x86" . }}
{{ set "id" "3" }}
{{ set "ofs" "24" }}
{{ set "bufofs" "768" }}
{{ template "decode_2_values_x86" . }}
ADDQ $2, off // off += 2
TESTB DH, DH // any br[i].ofs < 4?
JNZ end
CMPQ off, $bufoff
JL main_loop
end:
MOVQ 0(SP), BP
MOVB off, ret+56(FP)
RET
#undef off
#undef buffer
#undef table
#undef br_bits_read
#undef br_value
#undef br_offset
#undef peek_bits
#undef exhausted
#undef br0
#undef br1
#undef br2
#undef br3

View File

@@ -0,0 +1,193 @@
//go:build !amd64 || appengine || !gc || noasm
// +build !amd64 appengine !gc noasm
// This file contains a generic implementation of Decoder.Decompress4X.
package huff0
import (
"errors"
"fmt"
)
// Decompress4X will decompress a 4X encoded stream.
// The length of the supplied input must match the end of a block exactly.
// The *capacity* of the dst slice must match the destination size of
// the uncompressed data exactly.
func (d *Decoder) Decompress4X(dst, src []byte) ([]byte, error) {
if len(d.dt.single) == 0 {
return nil, errors.New("no table loaded")
}
if len(src) < 6+(4*1) {
return nil, errors.New("input too small")
}
if use8BitTables && d.actualTableLog <= 8 {
return d.decompress4X8bit(dst, src)
}
var br [4]bitReaderShifted
// Decode "jump table"
start := 6
for i := 0; i < 3; i++ {
length := int(src[i*2]) | (int(src[i*2+1]) << 8)
if start+length >= len(src) {
return nil, errors.New("truncated input (or invalid offset)")
}
err := br[i].init(src[start : start+length])
if err != nil {
return nil, err
}
start += length
}
err := br[3].init(src[start:])
if err != nil {
return nil, err
}
// destination, offset to match first output
dstSize := cap(dst)
dst = dst[:dstSize]
out := dst
dstEvery := (dstSize + 3) / 4
const tlSize = 1 << tableLogMax
const tlMask = tlSize - 1
single := d.dt.single[:tlSize]
// Use temp table to avoid bound checks/append penalty.
buf := d.buffer()
var off uint8
var decoded int
// Decode 2 values from each decoder/loop.
const bufoff = 256
for {
if br[0].off < 4 || br[1].off < 4 || br[2].off < 4 || br[3].off < 4 {
break
}
{
const stream = 0
const stream2 = 1
br[stream].fillFast()
br[stream2].fillFast()
val := br[stream].peekBitsFast(d.actualTableLog)
val2 := br[stream2].peekBitsFast(d.actualTableLog)
v := single[val&tlMask]
v2 := single[val2&tlMask]
br[stream].advance(uint8(v.entry))
br[stream2].advance(uint8(v2.entry))
buf[stream][off] = uint8(v.entry >> 8)
buf[stream2][off] = uint8(v2.entry >> 8)
val = br[stream].peekBitsFast(d.actualTableLog)
val2 = br[stream2].peekBitsFast(d.actualTableLog)
v = single[val&tlMask]
v2 = single[val2&tlMask]
br[stream].advance(uint8(v.entry))
br[stream2].advance(uint8(v2.entry))
buf[stream][off+1] = uint8(v.entry >> 8)
buf[stream2][off+1] = uint8(v2.entry >> 8)
}
{
const stream = 2
const stream2 = 3
br[stream].fillFast()
br[stream2].fillFast()
val := br[stream].peekBitsFast(d.actualTableLog)
val2 := br[stream2].peekBitsFast(d.actualTableLog)
v := single[val&tlMask]
v2 := single[val2&tlMask]
br[stream].advance(uint8(v.entry))
br[stream2].advance(uint8(v2.entry))
buf[stream][off] = uint8(v.entry >> 8)
buf[stream2][off] = uint8(v2.entry >> 8)
val = br[stream].peekBitsFast(d.actualTableLog)
val2 = br[stream2].peekBitsFast(d.actualTableLog)
v = single[val&tlMask]
v2 = single[val2&tlMask]
br[stream].advance(uint8(v.entry))
br[stream2].advance(uint8(v2.entry))
buf[stream][off+1] = uint8(v.entry >> 8)
buf[stream2][off+1] = uint8(v2.entry >> 8)
}
off += 2
if off == 0 {
if bufoff > dstEvery {
d.bufs.Put(buf)
return nil, errors.New("corruption detected: stream overrun 1")
}
copy(out, buf[0][:])
copy(out[dstEvery:], buf[1][:])
copy(out[dstEvery*2:], buf[2][:])
copy(out[dstEvery*3:], buf[3][:])
out = out[bufoff:]
decoded += bufoff * 4
// There must at least be 3 buffers left.
if len(out) < dstEvery*3 {
d.bufs.Put(buf)
return nil, errors.New("corruption detected: stream overrun 2")
}
}
}
if off > 0 {
ioff := int(off)
if len(out) < dstEvery*3+ioff {
d.bufs.Put(buf)
return nil, errors.New("corruption detected: stream overrun 3")
}
copy(out, buf[0][:off])
copy(out[dstEvery:], buf[1][:off])
copy(out[dstEvery*2:], buf[2][:off])
copy(out[dstEvery*3:], buf[3][:off])
decoded += int(off) * 4
out = out[off:]
}
// Decode remaining.
remainBytes := dstEvery - (decoded / 4)
for i := range br {
offset := dstEvery * i
endsAt := offset + remainBytes
if endsAt > len(out) {
endsAt = len(out)
}
br := &br[i]
bitsLeft := br.remaining()
for bitsLeft > 0 {
br.fill()
if offset >= endsAt {
d.bufs.Put(buf)
return nil, errors.New("corruption detected: stream overrun 4")
}
// Read value and increment offset.
val := br.peekBitsFast(d.actualTableLog)
v := single[val&tlMask].entry
nBits := uint8(v)
br.advance(nBits)
bitsLeft -= uint(nBits)
out[offset] = uint8(v >> 8)
offset++
}
if offset != endsAt {
d.bufs.Put(buf)
return nil, fmt.Errorf("corruption detected: short output block %d, end %d != %d", i, offset, endsAt)
}
decoded += offset - dstEvery*i
err = br.close()
if err != nil {
return nil, err
}
}
d.bufs.Put(buf)
if dstSize != decoded {
return nil, errors.New("corruption detected: short output block")
}
return dst, nil
}

View File

@@ -8,6 +8,7 @@ import (
"fmt" "fmt"
"math" "math"
"math/bits" "math/bits"
"sync"
"github.com/klauspost/compress/fse" "github.com/klauspost/compress/fse"
) )
@@ -116,6 +117,7 @@ type Scratch struct {
nodes []nodeElt nodes []nodeElt
tmpOut [4][]byte tmpOut [4][]byte
fse *fse.Scratch fse *fse.Scratch
decPool sync.Pool // *[4][256]byte buffers.
huffWeight [maxSymbolValue + 1]byte huffWeight [maxSymbolValue + 1]byte
} }

View File

@@ -1,7 +1,7 @@
// Code generated by command: go run gen.go -out ../encodeblock_amd64.s -stubs ../encodeblock_amd64.go -pkg=s2. DO NOT EDIT. // Code generated by command: go run gen.go -out ../encodeblock_amd64.s -stubs ../encodeblock_amd64.go -pkg=s2. DO NOT EDIT.
//go:build !appengine && !noasm && gc //go:build !appengine && !noasm && gc && !noasm
// +build !appengine,!noasm,gc // +build !appengine,!noasm,gc,!noasm
package s2 package s2

File diff suppressed because it is too large Load Diff

View File

@@ -78,6 +78,9 @@ of a stream. This is independent of the `WithEncoderConcurrency(n)`, but that is
in the future. So if you want to limit concurrency for future updates, specify the concurrency in the future. So if you want to limit concurrency for future updates, specify the concurrency
you would like. you would like.
If you would like stream encoding to be done without spawning async goroutines, use `WithEncoderConcurrency(1)`
which will compress input as each block is completed, blocking on writes until each has completed.
You can specify your desired compression level using `WithEncoderLevel()` option. Currently only pre-defined You can specify your desired compression level using `WithEncoderLevel()` option. Currently only pre-defined
compression settings can be specified. compression settings can be specified.
@@ -104,7 +107,8 @@ and seems to ignore concatenated streams, even though [it is part of the spec](h
For compressing small blocks, the returned encoder has a function called `EncodeAll(src, dst []byte) []byte`. For compressing small blocks, the returned encoder has a function called `EncodeAll(src, dst []byte) []byte`.
`EncodeAll` will encode all input in src and append it to dst. `EncodeAll` will encode all input in src and append it to dst.
This function can be called concurrently, but each call will only run on a single goroutine. This function can be called concurrently.
Each call will only run on a same goroutine as the caller.
Encoded blocks can be concatenated and the result will be the combined input stream. Encoded blocks can be concatenated and the result will be the combined input stream.
Data compressed with EncodeAll can be decoded with the Decoder, using either a stream or `DecodeAll`. Data compressed with EncodeAll can be decoded with the Decoder, using either a stream or `DecodeAll`.
@@ -149,10 +153,10 @@ http://sun.aei.polsl.pl/~sdeor/corpus/silesia.zip
This package: This package:
file out level insize outsize millis mb/s file out level insize outsize millis mb/s
silesia.tar zskp 1 211947520 73101992 643 313.87 silesia.tar zskp 1 211947520 73821326 634 318.47
silesia.tar zskp 2 211947520 67504318 969 208.38 silesia.tar zskp 2 211947520 67655404 1508 133.96
silesia.tar zskp 3 211947520 64595893 2007 100.68 silesia.tar zskp 3 211947520 64746933 3000 67.37
silesia.tar zskp 4 211947520 60995370 8825 22.90 silesia.tar zskp 4 211947520 60073508 16926 11.94
cgo zstd: cgo zstd:
silesia.tar zstd 1 211947520 73605392 543 371.56 silesia.tar zstd 1 211947520 73605392 543 371.56
@@ -161,94 +165,94 @@ silesia.tar zstd 6 211947520 62916450 1913 105.66
silesia.tar zstd 9 211947520 60212393 5063 39.92 silesia.tar zstd 9 211947520 60212393 5063 39.92
gzip, stdlib/this package: gzip, stdlib/this package:
silesia.tar gzstd 1 211947520 80007735 1654 122.21 silesia.tar gzstd 1 211947520 80007735 1498 134.87
silesia.tar gzkp 1 211947520 80136201 1152 175.45 silesia.tar gzkp 1 211947520 80088272 1009 200.31
GOB stream of binary data. Highly compressible. GOB stream of binary data. Highly compressible.
https://files.klauspost.com/compress/gob-stream.7z https://files.klauspost.com/compress/gob-stream.7z
file out level insize outsize millis mb/s file out level insize outsize millis mb/s
gob-stream zskp 1 1911399616 235022249 3088 590.30 gob-stream zskp 1 1911399616 233948096 3230 564.34
gob-stream zskp 2 1911399616 205669791 3786 481.34 gob-stream zskp 2 1911399616 203997694 4997 364.73
gob-stream zskp 3 1911399616 175034659 9636 189.17 gob-stream zskp 3 1911399616 173526523 13435 135.68
gob-stream zskp 4 1911399616 165609838 50369 36.19 gob-stream zskp 4 1911399616 162195235 47559 38.33
gob-stream zstd 1 1911399616 249810424 2637 691.26 gob-stream zstd 1 1911399616 249810424 2637 691.26
gob-stream zstd 3 1911399616 208192146 3490 522.31 gob-stream zstd 3 1911399616 208192146 3490 522.31
gob-stream zstd 6 1911399616 193632038 6687 272.56 gob-stream zstd 6 1911399616 193632038 6687 272.56
gob-stream zstd 9 1911399616 177620386 16175 112.70 gob-stream zstd 9 1911399616 177620386 16175 112.70
gob-stream gzstd 1 1911399616 357382641 10251 177.82 gob-stream gzstd 1 1911399616 357382013 9046 201.49
gob-stream gzkp 1 1911399616 359753026 5438 335.20 gob-stream gzkp 1 1911399616 359136669 4885 373.08
The test data for the Large Text Compression Benchmark is the first The test data for the Large Text Compression Benchmark is the first
10^9 bytes of the English Wikipedia dump on Mar. 3, 2006. 10^9 bytes of the English Wikipedia dump on Mar. 3, 2006.
http://mattmahoney.net/dc/textdata.html http://mattmahoney.net/dc/textdata.html
file out level insize outsize millis mb/s file out level insize outsize millis mb/s
enwik9 zskp 1 1000000000 343848582 3609 264.18 enwik9 zskp 1 1000000000 343833605 3687 258.64
enwik9 zskp 2 1000000000 317276632 5746 165.97 enwik9 zskp 2 1000000000 317001237 7672 124.29
enwik9 zskp 3 1000000000 292243069 12162 78.41 enwik9 zskp 3 1000000000 291915823 15923 59.89
enwik9 zskp 4 1000000000 262183768 82837 11.51 enwik9 zskp 4 1000000000 261710291 77697 12.27
enwik9 zstd 1 1000000000 358072021 3110 306.65 enwik9 zstd 1 1000000000 358072021 3110 306.65
enwik9 zstd 3 1000000000 313734672 4784 199.35 enwik9 zstd 3 1000000000 313734672 4784 199.35
enwik9 zstd 6 1000000000 295138875 10290 92.68 enwik9 zstd 6 1000000000 295138875 10290 92.68
enwik9 zstd 9 1000000000 278348700 28549 33.40 enwik9 zstd 9 1000000000 278348700 28549 33.40
enwik9 gzstd 1 1000000000 382578136 9604 99.30 enwik9 gzstd 1 1000000000 382578136 8608 110.78
enwik9 gzkp 1 1000000000 383825945 6544 145.73 enwik9 gzkp 1 1000000000 382781160 5628 169.45
Highly compressible JSON file. Highly compressible JSON file.
https://files.klauspost.com/compress/github-june-2days-2019.json.zst https://files.klauspost.com/compress/github-june-2days-2019.json.zst
file out level insize outsize millis mb/s file out level insize outsize millis mb/s
github-june-2days-2019.json zskp 1 6273951764 699045015 10620 563.40 github-june-2days-2019.json zskp 1 6273951764 697439532 9789 611.17
github-june-2days-2019.json zskp 2 6273951764 617881763 11687 511.96 github-june-2days-2019.json zskp 2 6273951764 610876538 18553 322.49
github-june-2days-2019.json zskp 3 6273951764 524340691 34043 175.75 github-june-2days-2019.json zskp 3 6273951764 517662858 44186 135.41
github-june-2days-2019.json zskp 4 6273951764 470320075 170190 35.16 github-june-2days-2019.json zskp 4 6273951764 464617114 165373 36.18
github-june-2days-2019.json zstd 1 6273951764 766284037 8450 708.00 github-june-2days-2019.json zstd 1 6273951764 766284037 8450 708.00
github-june-2days-2019.json zstd 3 6273951764 661889476 10927 547.57 github-june-2days-2019.json zstd 3 6273951764 661889476 10927 547.57
github-june-2days-2019.json zstd 6 6273951764 642756859 22996 260.18 github-june-2days-2019.json zstd 6 6273951764 642756859 22996 260.18
github-june-2days-2019.json zstd 9 6273951764 601974523 52413 114.16 github-june-2days-2019.json zstd 9 6273951764 601974523 52413 114.16
github-june-2days-2019.json gzstd 1 6273951764 1164400847 29948 199.79 github-june-2days-2019.json gzstd 1 6273951764 1164397768 26793 223.32
github-june-2days-2019.json gzkp 1 6273951764 1125417694 21788 274.61 github-june-2days-2019.json gzkp 1 6273951764 1120631856 17693 338.16
VM Image, Linux mint with a few installed applications: VM Image, Linux mint with a few installed applications:
https://files.klauspost.com/compress/rawstudio-mint14.7z https://files.klauspost.com/compress/rawstudio-mint14.7z
file out level insize outsize millis mb/s file out level insize outsize millis mb/s
rawstudio-mint14.tar zskp 1 8558382592 3667489370 20210 403.84 rawstudio-mint14.tar zskp 1 8558382592 3718400221 18206 448.29
rawstudio-mint14.tar zskp 2 8558382592 3364592300 31873 256.07 rawstudio-mint14.tar zskp 2 8558382592 3326118337 37074 220.15
rawstudio-mint14.tar zskp 3 8558382592 3158085214 77675 105.08 rawstudio-mint14.tar zskp 3 8558382592 3163842361 87306 93.49
rawstudio-mint14.tar zskp 4 8558382592 2965110639 857750 9.52 rawstudio-mint14.tar zskp 4 8558382592 2970480650 783862 10.41
rawstudio-mint14.tar zstd 1 8558382592 3609250104 17136 476.27 rawstudio-mint14.tar zstd 1 8558382592 3609250104 17136 476.27
rawstudio-mint14.tar zstd 3 8558382592 3341679997 29262 278.92 rawstudio-mint14.tar zstd 3 8558382592 3341679997 29262 278.92
rawstudio-mint14.tar zstd 6 8558382592 3235846406 77904 104.77 rawstudio-mint14.tar zstd 6 8558382592 3235846406 77904 104.77
rawstudio-mint14.tar zstd 9 8558382592 3160778861 140946 57.91 rawstudio-mint14.tar zstd 9 8558382592 3160778861 140946 57.91
rawstudio-mint14.tar gzstd 1 8558382592 3926257486 57722 141.40 rawstudio-mint14.tar gzstd 1 8558382592 3926234992 51345 158.96
rawstudio-mint14.tar gzkp 1 8558382592 3962605659 45113 180.92 rawstudio-mint14.tar gzkp 1 8558382592 3960117298 36722 222.26
CSV data: CSV data:
https://files.klauspost.com/compress/nyc-taxi-data-10M.csv.zst https://files.klauspost.com/compress/nyc-taxi-data-10M.csv.zst
file out level insize outsize millis mb/s file out level insize outsize millis mb/s
nyc-taxi-data-10M.csv zskp 1 3325605752 641339945 8925 355.35 nyc-taxi-data-10M.csv zskp 1 3325605752 641319332 9462 335.17
nyc-taxi-data-10M.csv zskp 2 3325605752 591748091 11268 281.44 nyc-taxi-data-10M.csv zskp 2 3325605752 588976126 17570 180.50
nyc-taxi-data-10M.csv zskp 3 3325605752 530289687 25239 125.66 nyc-taxi-data-10M.csv zskp 3 3325605752 529329260 32432 97.79
nyc-taxi-data-10M.csv zskp 4 3325605752 476268884 135958 23.33 nyc-taxi-data-10M.csv zskp 4 3325605752 474949772 138025 22.98
nyc-taxi-data-10M.csv zstd 1 3325605752 687399637 8233 385.18 nyc-taxi-data-10M.csv zstd 1 3325605752 687399637 8233 385.18
nyc-taxi-data-10M.csv zstd 3 3325605752 598514411 10065 315.07 nyc-taxi-data-10M.csv zstd 3 3325605752 598514411 10065 315.07
nyc-taxi-data-10M.csv zstd 6 3325605752 570522953 20038 158.27 nyc-taxi-data-10M.csv zstd 6 3325605752 570522953 20038 158.27
nyc-taxi-data-10M.csv zstd 9 3325605752 517554797 64565 49.12 nyc-taxi-data-10M.csv zstd 9 3325605752 517554797 64565 49.12
nyc-taxi-data-10M.csv gzstd 1 3325605752 928656485 23876 132.83 nyc-taxi-data-10M.csv gzstd 1 3325605752 928654908 21270 149.11
nyc-taxi-data-10M.csv gzkp 1 3325605752 922257165 16780 189.00 nyc-taxi-data-10M.csv gzkp 1 3325605752 922273214 13929 227.68
``` ```
## Decompressor ## Decompressor
@@ -283,8 +287,13 @@ func Decompress(in io.Reader, out io.Writer) error {
} }
``` ```
It is important to use the "Close" function when you no longer need the Reader to stop running goroutines. It is important to use the "Close" function when you no longer need the Reader to stop running goroutines,
See "Allocation-less operation" below. when running with default settings.
Goroutines will exit once an error has been returned, including `io.EOF` at the end of a stream.
Streams are decoded concurrently in 4 asynchronous stages to give the best possible throughput.
However, if you prefer synchronous decompression, use `WithDecoderConcurrency(1)` which will decompress data
as it is being requested only.
For decoding buffers, it could look something like this: For decoding buffers, it could look something like this:
@@ -293,7 +302,7 @@ import "github.com/klauspost/compress/zstd"
// Create a reader that caches decompressors. // Create a reader that caches decompressors.
// For this operation type we supply a nil Reader. // For this operation type we supply a nil Reader.
var decoder, _ = zstd.NewReader(nil) var decoder, _ = zstd.NewReader(nil, WithDecoderConcurrency(0))
// Decompress a buffer. We don't supply a destination buffer, // Decompress a buffer. We don't supply a destination buffer,
// so it will be allocated by the decoder. // so it will be allocated by the decoder.
@@ -303,9 +312,12 @@ func Decompress(src []byte) ([]byte, error) {
``` ```
Both of these cases should provide the functionality needed. Both of these cases should provide the functionality needed.
The decoder can be used for *concurrent* decompression of multiple buffers. The decoder can be used for *concurrent* decompression of multiple buffers.
By default 4 decompressors will be created.
It will only allow a certain number of concurrent operations to run. It will only allow a certain number of concurrent operations to run.
To tweak that yourself use the `WithDecoderConcurrency(n)` option when creating the decoder. To tweak that yourself use the `WithDecoderConcurrency(n)` option when creating the decoder.
It is possible to use `WithDecoderConcurrency(0)` to create GOMAXPROCS decoders.
### Dictionaries ### Dictionaries
@@ -357,19 +369,21 @@ In this case no unneeded allocations should be made.
The buffer decoder does everything on the same goroutine and does nothing concurrently. The buffer decoder does everything on the same goroutine and does nothing concurrently.
It can however decode several buffers concurrently. Use `WithDecoderConcurrency(n)` to limit that. It can however decode several buffers concurrently. Use `WithDecoderConcurrency(n)` to limit that.
The stream decoder operates on The stream decoder will create goroutines that:
* One goroutine reads input and splits the input to several block decoders. 1) Reads input and splits the input into blocks.
* A number of decoders will decode blocks. 2) Decompression of literals.
* A goroutine coordinates these blocks and sends history from one to the next. 3) Decompression of sequences.
4) Reconstruction of output stream.
So effectively this also means the decoder will "read ahead" and prepare data to always be available for output. So effectively this also means the decoder will "read ahead" and prepare data to always be available for output.
The concurrency level will, for streams, determine how many blocks ahead the compression will start.
Since "blocks" are quite dependent on the output of the previous block stream decoding will only have limited concurrency. Since "blocks" are quite dependent on the output of the previous block stream decoding will only have limited concurrency.
In practice this means that concurrency is often limited to utilizing about 2 cores effectively. In practice this means that concurrency is often limited to utilizing about 3 cores effectively.
### Benchmarks ### Benchmarks
These are some examples of performance compared to [datadog cgo library](https://github.com/DataDog/zstd). These are some examples of performance compared to [datadog cgo library](https://github.com/DataDog/zstd).

View File

@@ -7,6 +7,7 @@ package zstd
import ( import (
"encoding/binary" "encoding/binary"
"errors" "errors"
"fmt"
"io" "io"
"math/bits" "math/bits"
) )
@@ -132,6 +133,9 @@ func (b *bitReader) remain() uint {
func (b *bitReader) close() error { func (b *bitReader) close() error {
// Release reference. // Release reference.
b.in = nil b.in = nil
if !b.finished() {
return fmt.Errorf("%d extra bits on block, should be 0", b.remain())
}
if b.bitsRead > 64 { if b.bitsRead > 64 {
return io.ErrUnexpectedEOF return io.ErrUnexpectedEOF
} }

View File

@@ -76,16 +76,25 @@ type blockDec struct {
// Window size of the block. // Window size of the block.
WindowSize uint64 WindowSize uint64
history chan *history err error
input chan struct{}
result chan decodeOutput // Check against this crc
err error checkCRC []byte
decWG sync.WaitGroup
// Frame to use for singlethreaded decoding. // Frame to use for singlethreaded decoding.
// Should not be used by the decoder itself since parent may be another frame. // Should not be used by the decoder itself since parent may be another frame.
localFrame *frameDec localFrame *frameDec
sequence []seqVals
async struct {
newHist *history
literals []byte
seqData []byte
seqSize int // Size of uncompressed sequences
fcs uint64
}
// Block is RLE, this is the size. // Block is RLE, this is the size.
RLESize uint32 RLESize uint32
tmp [4]byte tmp [4]byte
@@ -108,13 +117,8 @@ func (b *blockDec) String() string {
func newBlockDec(lowMem bool) *blockDec { func newBlockDec(lowMem bool) *blockDec {
b := blockDec{ b := blockDec{
lowMem: lowMem, lowMem: lowMem,
result: make(chan decodeOutput, 1),
input: make(chan struct{}, 1),
history: make(chan *history, 1),
} }
b.decWG.Add(1)
go b.startDecoder()
return &b return &b
} }
@@ -137,6 +141,12 @@ func (b *blockDec) reset(br byteBuffer, windowSize uint64) error {
case blockTypeReserved: case blockTypeReserved:
return ErrReservedBlockType return ErrReservedBlockType
case blockTypeRLE: case blockTypeRLE:
if cSize > maxCompressedBlockSize || cSize > int(b.WindowSize) {
if debugDecoder {
printf("rle block too big: csize:%d block: %+v\n", uint64(cSize), b)
}
return ErrWindowSizeExceeded
}
b.RLESize = uint32(cSize) b.RLESize = uint32(cSize)
if b.lowMem { if b.lowMem {
maxSize = cSize maxSize = cSize
@@ -157,7 +167,19 @@ func (b *blockDec) reset(br byteBuffer, windowSize uint64) error {
} }
return ErrCompressedSizeTooBig return ErrCompressedSizeTooBig
} }
// Empty compressed blocks must at least be 2 bytes
// for Literals_Block_Type and one for Sequences_Section_Header.
if cSize < 2 {
return ErrBlockTooSmall
}
case blockTypeRaw: case blockTypeRaw:
if cSize > maxCompressedBlockSize || cSize > int(b.WindowSize) {
if debugDecoder {
printf("rle block too big: csize:%d block: %+v\n", uint64(cSize), b)
}
return ErrWindowSizeExceeded
}
b.RLESize = 0 b.RLESize = 0
// We do not need a destination for raw blocks. // We do not need a destination for raw blocks.
maxSize = -1 maxSize = -1
@@ -192,85 +214,14 @@ func (b *blockDec) sendErr(err error) {
b.Last = true b.Last = true
b.Type = blockTypeReserved b.Type = blockTypeReserved
b.err = err b.err = err
b.input <- struct{}{}
} }
// Close will release resources. // Close will release resources.
// Closed blockDec cannot be reset. // Closed blockDec cannot be reset.
func (b *blockDec) Close() { func (b *blockDec) Close() {
close(b.input)
close(b.history)
close(b.result)
b.decWG.Wait()
} }
// decodeAsync will prepare decoding the block when it receives input. // decodeBuf
// This will separate output and history.
func (b *blockDec) startDecoder() {
defer b.decWG.Done()
for range b.input {
//println("blockDec: Got block input")
switch b.Type {
case blockTypeRLE:
if cap(b.dst) < int(b.RLESize) {
if b.lowMem {
b.dst = make([]byte, b.RLESize)
} else {
b.dst = make([]byte, maxBlockSize)
}
}
o := decodeOutput{
d: b,
b: b.dst[:b.RLESize],
err: nil,
}
v := b.data[0]
for i := range o.b {
o.b[i] = v
}
hist := <-b.history
hist.append(o.b)
b.result <- o
case blockTypeRaw:
o := decodeOutput{
d: b,
b: b.data,
err: nil,
}
hist := <-b.history
hist.append(o.b)
b.result <- o
case blockTypeCompressed:
b.dst = b.dst[:0]
err := b.decodeCompressed(nil)
o := decodeOutput{
d: b,
b: b.dst,
err: err,
}
if debugDecoder {
println("Decompressed to", len(b.dst), "bytes, error:", err)
}
b.result <- o
case blockTypeReserved:
// Used for returning errors.
<-b.history
b.result <- decodeOutput{
d: b,
b: nil,
err: b.err,
}
default:
panic("Invalid block type")
}
if debugDecoder {
println("blockDec: Finished block")
}
}
}
// decodeAsync will prepare decoding the block when it receives the history.
// If history is provided, it will not fetch it from the channel.
func (b *blockDec) decodeBuf(hist *history) error { func (b *blockDec) decodeBuf(hist *history) error {
switch b.Type { switch b.Type {
case blockTypeRLE: case blockTypeRLE:
@@ -293,14 +244,23 @@ func (b *blockDec) decodeBuf(hist *history) error {
return nil return nil
case blockTypeCompressed: case blockTypeCompressed:
saved := b.dst saved := b.dst
b.dst = hist.b // Append directly to history
hist.b = nil if hist.ignoreBuffer == 0 {
b.dst = hist.b
hist.b = nil
} else {
b.dst = b.dst[:0]
}
err := b.decodeCompressed(hist) err := b.decodeCompressed(hist)
if debugDecoder { if debugDecoder {
println("Decompressed to total", len(b.dst), "bytes, hash:", xxhash.Sum64(b.dst), "error:", err) println("Decompressed to total", len(b.dst), "bytes, hash:", xxhash.Sum64(b.dst), "error:", err)
} }
hist.b = b.dst if hist.ignoreBuffer == 0 {
b.dst = saved hist.b = b.dst
b.dst = saved
} else {
hist.appendKeep(b.dst)
}
return err return err
case blockTypeReserved: case blockTypeReserved:
// Used for returning errors. // Used for returning errors.
@@ -310,30 +270,18 @@ func (b *blockDec) decodeBuf(hist *history) error {
} }
} }
// decodeCompressed will start decompressing a block. func (b *blockDec) decodeLiterals(in []byte, hist *history) (remain []byte, err error) {
// If no history is supplied the decoder will decodeAsync as much as possible
// before fetching from blockDec.history
func (b *blockDec) decodeCompressed(hist *history) error {
in := b.data
delayedHistory := hist == nil
if delayedHistory {
// We must always grab history.
defer func() {
if hist == nil {
<-b.history
}
}()
}
// There must be at least one byte for Literals_Block_Type and one for Sequences_Section_Header // There must be at least one byte for Literals_Block_Type and one for Sequences_Section_Header
if len(in) < 2 { if len(in) < 2 {
return ErrBlockTooSmall return in, ErrBlockTooSmall
} }
litType := literalsBlockType(in[0] & 3) litType := literalsBlockType(in[0] & 3)
var litRegenSize int var litRegenSize int
var litCompSize int var litCompSize int
sizeFormat := (in[0] >> 2) & 3 sizeFormat := (in[0] >> 2) & 3
var fourStreams bool var fourStreams bool
var literals []byte
switch litType { switch litType {
case literalsBlockRaw, literalsBlockRLE: case literalsBlockRaw, literalsBlockRLE:
switch sizeFormat { switch sizeFormat {
@@ -349,7 +297,7 @@ func (b *blockDec) decodeCompressed(hist *history) error {
// Regenerated_Size uses 20 bits (0-1048575). Literals_Section_Header uses 3 bytes. // Regenerated_Size uses 20 bits (0-1048575). Literals_Section_Header uses 3 bytes.
if len(in) < 3 { if len(in) < 3 {
println("too small: litType:", litType, " sizeFormat", sizeFormat, len(in)) println("too small: litType:", litType, " sizeFormat", sizeFormat, len(in))
return ErrBlockTooSmall return in, ErrBlockTooSmall
} }
litRegenSize = int(in[0]>>4) + (int(in[1]) << 4) + (int(in[2]) << 12) litRegenSize = int(in[0]>>4) + (int(in[1]) << 4) + (int(in[2]) << 12)
in = in[3:] in = in[3:]
@@ -360,7 +308,7 @@ func (b *blockDec) decodeCompressed(hist *history) error {
// Both Regenerated_Size and Compressed_Size use 10 bits (0-1023). // Both Regenerated_Size and Compressed_Size use 10 bits (0-1023).
if len(in) < 3 { if len(in) < 3 {
println("too small: litType:", litType, " sizeFormat", sizeFormat, len(in)) println("too small: litType:", litType, " sizeFormat", sizeFormat, len(in))
return ErrBlockTooSmall return in, ErrBlockTooSmall
} }
n := uint64(in[0]>>4) + (uint64(in[1]) << 4) + (uint64(in[2]) << 12) n := uint64(in[0]>>4) + (uint64(in[1]) << 4) + (uint64(in[2]) << 12)
litRegenSize = int(n & 1023) litRegenSize = int(n & 1023)
@@ -371,7 +319,7 @@ func (b *blockDec) decodeCompressed(hist *history) error {
fourStreams = true fourStreams = true
if len(in) < 4 { if len(in) < 4 {
println("too small: litType:", litType, " sizeFormat", sizeFormat, len(in)) println("too small: litType:", litType, " sizeFormat", sizeFormat, len(in))
return ErrBlockTooSmall return in, ErrBlockTooSmall
} }
n := uint64(in[0]>>4) + (uint64(in[1]) << 4) + (uint64(in[2]) << 12) + (uint64(in[3]) << 20) n := uint64(in[0]>>4) + (uint64(in[1]) << 4) + (uint64(in[2]) << 12) + (uint64(in[3]) << 20)
litRegenSize = int(n & 16383) litRegenSize = int(n & 16383)
@@ -381,7 +329,7 @@ func (b *blockDec) decodeCompressed(hist *history) error {
fourStreams = true fourStreams = true
if len(in) < 5 { if len(in) < 5 {
println("too small: litType:", litType, " sizeFormat", sizeFormat, len(in)) println("too small: litType:", litType, " sizeFormat", sizeFormat, len(in))
return ErrBlockTooSmall return in, ErrBlockTooSmall
} }
n := uint64(in[0]>>4) + (uint64(in[1]) << 4) + (uint64(in[2]) << 12) + (uint64(in[3]) << 20) + (uint64(in[4]) << 28) n := uint64(in[0]>>4) + (uint64(in[1]) << 4) + (uint64(in[2]) << 12) + (uint64(in[3]) << 20) + (uint64(in[4]) << 28)
litRegenSize = int(n & 262143) litRegenSize = int(n & 262143)
@@ -392,13 +340,15 @@ func (b *blockDec) decodeCompressed(hist *history) error {
if debugDecoder { if debugDecoder {
println("literals type:", litType, "litRegenSize:", litRegenSize, "litCompSize:", litCompSize, "sizeFormat:", sizeFormat, "4X:", fourStreams) println("literals type:", litType, "litRegenSize:", litRegenSize, "litCompSize:", litCompSize, "sizeFormat:", sizeFormat, "4X:", fourStreams)
} }
var literals []byte if litRegenSize > int(b.WindowSize) || litRegenSize > maxCompressedBlockSize {
var huff *huff0.Scratch return in, ErrWindowSizeExceeded
}
switch litType { switch litType {
case literalsBlockRaw: case literalsBlockRaw:
if len(in) < litRegenSize { if len(in) < litRegenSize {
println("too small: litType:", litType, " sizeFormat", sizeFormat, "remain:", len(in), "want:", litRegenSize) println("too small: litType:", litType, " sizeFormat", sizeFormat, "remain:", len(in), "want:", litRegenSize)
return ErrBlockTooSmall return in, ErrBlockTooSmall
} }
literals = in[:litRegenSize] literals = in[:litRegenSize]
in = in[litRegenSize:] in = in[litRegenSize:]
@@ -406,7 +356,7 @@ func (b *blockDec) decodeCompressed(hist *history) error {
case literalsBlockRLE: case literalsBlockRLE:
if len(in) < 1 { if len(in) < 1 {
println("too small: litType:", litType, " sizeFormat", sizeFormat, "remain:", len(in), "want:", 1) println("too small: litType:", litType, " sizeFormat", sizeFormat, "remain:", len(in), "want:", 1)
return ErrBlockTooSmall return in, ErrBlockTooSmall
} }
if cap(b.literalBuf) < litRegenSize { if cap(b.literalBuf) < litRegenSize {
if b.lowMem { if b.lowMem {
@@ -417,7 +367,6 @@ func (b *blockDec) decodeCompressed(hist *history) error {
b.literalBuf = make([]byte, litRegenSize) b.literalBuf = make([]byte, litRegenSize)
} else { } else {
b.literalBuf = make([]byte, litRegenSize, maxCompressedLiteralSize) b.literalBuf = make([]byte, litRegenSize, maxCompressedLiteralSize)
} }
} }
} }
@@ -433,7 +382,7 @@ func (b *blockDec) decodeCompressed(hist *history) error {
case literalsBlockTreeless: case literalsBlockTreeless:
if len(in) < litCompSize { if len(in) < litCompSize {
println("too small: litType:", litType, " sizeFormat", sizeFormat, "remain:", len(in), "want:", litCompSize) println("too small: litType:", litType, " sizeFormat", sizeFormat, "remain:", len(in), "want:", litCompSize)
return ErrBlockTooSmall return in, ErrBlockTooSmall
} }
// Store compressed literals, so we defer decoding until we get history. // Store compressed literals, so we defer decoding until we get history.
literals = in[:litCompSize] literals = in[:litCompSize]
@@ -441,15 +390,10 @@ func (b *blockDec) decodeCompressed(hist *history) error {
if debugDecoder { if debugDecoder {
printf("Found %d compressed literals\n", litCompSize) printf("Found %d compressed literals\n", litCompSize)
} }
case literalsBlockCompressed: huff := hist.huffTree
if len(in) < litCompSize { if huff == nil {
println("too small: litType:", litType, " sizeFormat", sizeFormat, "remain:", len(in), "want:", litCompSize) return in, errors.New("literal block was treeless, but no history was defined")
return ErrBlockTooSmall
} }
literals = in[:litCompSize]
in = in[litCompSize:]
huff = huffDecoderPool.Get().(*huff0.Scratch)
var err error
// Ensure we have space to store it. // Ensure we have space to store it.
if cap(b.literalBuf) < litRegenSize { if cap(b.literalBuf) < litRegenSize {
if b.lowMem { if b.lowMem {
@@ -458,14 +402,53 @@ func (b *blockDec) decodeCompressed(hist *history) error {
b.literalBuf = make([]byte, 0, maxCompressedLiteralSize) b.literalBuf = make([]byte, 0, maxCompressedLiteralSize)
} }
} }
if huff == nil { var err error
huff = &huff0.Scratch{} // Use our out buffer.
huff.MaxDecodedSize = maxCompressedBlockSize
if fourStreams {
literals, err = huff.Decoder().Decompress4X(b.literalBuf[:0:litRegenSize], literals)
} else {
literals, err = huff.Decoder().Decompress1X(b.literalBuf[:0:litRegenSize], literals)
} }
// Make sure we don't leak our literals buffer
if err != nil {
println("decompressing literals:", err)
return in, err
}
if len(literals) != litRegenSize {
return in, fmt.Errorf("literal output size mismatch want %d, got %d", litRegenSize, len(literals))
}
case literalsBlockCompressed:
if len(in) < litCompSize {
println("too small: litType:", litType, " sizeFormat", sizeFormat, "remain:", len(in), "want:", litCompSize)
return in, ErrBlockTooSmall
}
literals = in[:litCompSize]
in = in[litCompSize:]
// Ensure we have space to store it.
if cap(b.literalBuf) < litRegenSize {
if b.lowMem {
b.literalBuf = make([]byte, 0, litRegenSize)
} else {
b.literalBuf = make([]byte, 0, maxCompressedBlockSize)
}
}
huff := hist.huffTree
if huff == nil || (hist.dict != nil && huff == hist.dict.litEnc) {
huff = huffDecoderPool.Get().(*huff0.Scratch)
if huff == nil {
huff = &huff0.Scratch{}
}
}
var err error
huff, literals, err = huff0.ReadTable(literals, huff) huff, literals, err = huff0.ReadTable(literals, huff)
if err != nil { if err != nil {
println("reading huffman table:", err) println("reading huffman table:", err)
return err return in, err
} }
hist.huffTree = huff
huff.MaxDecodedSize = maxCompressedBlockSize
// Use our out buffer. // Use our out buffer.
if fourStreams { if fourStreams {
literals, err = huff.Decoder().Decompress4X(b.literalBuf[:0:litRegenSize], literals) literals, err = huff.Decoder().Decompress4X(b.literalBuf[:0:litRegenSize], literals)
@@ -474,27 +457,56 @@ func (b *blockDec) decodeCompressed(hist *history) error {
} }
if err != nil { if err != nil {
println("decoding compressed literals:", err) println("decoding compressed literals:", err)
return err return in, err
} }
// Make sure we don't leak our literals buffer // Make sure we don't leak our literals buffer
if len(literals) != litRegenSize { if len(literals) != litRegenSize {
return fmt.Errorf("literal output size mismatch want %d, got %d", litRegenSize, len(literals)) return in, fmt.Errorf("literal output size mismatch want %d, got %d", litRegenSize, len(literals))
} }
if debugDecoder { if debugDecoder {
printf("Decompressed %d literals into %d bytes\n", litCompSize, litRegenSize) printf("Decompressed %d literals into %d bytes\n", litCompSize, litRegenSize)
} }
} }
hist.decoders.literals = literals
return in, nil
}
// decodeCompressed will start decompressing a block.
func (b *blockDec) decodeCompressed(hist *history) error {
in := b.data
in, err := b.decodeLiterals(in, hist)
if err != nil {
return err
}
err = b.prepareSequences(in, hist)
if err != nil {
return err
}
if hist.decoders.nSeqs == 0 {
b.dst = append(b.dst, hist.decoders.literals...)
return nil
}
err = hist.decoders.decodeSync(hist)
if err != nil {
return err
}
b.dst = hist.decoders.out
hist.recentOffsets = hist.decoders.prevOffset
return nil
}
func (b *blockDec) prepareSequences(in []byte, hist *history) (err error) {
if debugDecoder {
printf("prepareSequences: %d byte(s) input\n", len(in))
}
// Decode Sequences // Decode Sequences
// https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#sequences-section // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#sequences-section
if len(in) < 1 { if len(in) < 1 {
return ErrBlockTooSmall return ErrBlockTooSmall
} }
var nSeqs int
seqHeader := in[0] seqHeader := in[0]
nSeqs := 0
switch { switch {
case seqHeader == 0:
in = in[1:]
case seqHeader < 128: case seqHeader < 128:
nSeqs = int(seqHeader) nSeqs = int(seqHeader)
in = in[1:] in = in[1:]
@@ -511,8 +523,16 @@ func (b *blockDec) decodeCompressed(hist *history) error {
nSeqs = 0x7f00 + int(in[1]) + (int(in[2]) << 8) nSeqs = 0x7f00 + int(in[1]) + (int(in[2]) << 8)
in = in[3:] in = in[3:]
} }
if nSeqs == 0 && len(in) != 0 {
// When no sequences, there should not be any more data...
if debugDecoder {
printf("prepareSequences: 0 sequences, but %d byte(s) left on stream\n", len(in))
}
return ErrUnexpectedBlockSize
}
var seqs = &sequenceDecs{} var seqs = &hist.decoders
seqs.nSeqs = nSeqs
if nSeqs > 0 { if nSeqs > 0 {
if len(in) < 1 { if len(in) < 1 {
return ErrBlockTooSmall return ErrBlockTooSmall
@@ -541,6 +561,9 @@ func (b *blockDec) decodeCompressed(hist *history) error {
} }
switch mode { switch mode {
case compModePredefined: case compModePredefined:
if seq.fse != nil && !seq.fse.preDefined {
fseDecoderPool.Put(seq.fse)
}
seq.fse = &fsePredef[i] seq.fse = &fsePredef[i]
case compModeRLE: case compModeRLE:
if br.remain() < 1 { if br.remain() < 1 {
@@ -548,34 +571,36 @@ func (b *blockDec) decodeCompressed(hist *history) error {
} }
v := br.Uint8() v := br.Uint8()
br.advance(1) br.advance(1)
dec := fseDecoderPool.Get().(*fseDecoder) if seq.fse == nil || seq.fse.preDefined {
seq.fse = fseDecoderPool.Get().(*fseDecoder)
}
symb, err := decSymbolValue(v, symbolTableX[i]) symb, err := decSymbolValue(v, symbolTableX[i])
if err != nil { if err != nil {
printf("RLE Transform table (%v) error: %v", tableIndex(i), err) printf("RLE Transform table (%v) error: %v", tableIndex(i), err)
return err return err
} }
dec.setRLE(symb) seq.fse.setRLE(symb)
seq.fse = dec
if debugDecoder { if debugDecoder {
printf("RLE set to %+v, code: %v", symb, v) printf("RLE set to %+v, code: %v", symb, v)
} }
case compModeFSE: case compModeFSE:
println("Reading table for", tableIndex(i)) println("Reading table for", tableIndex(i))
dec := fseDecoderPool.Get().(*fseDecoder) if seq.fse == nil || seq.fse.preDefined {
err := dec.readNCount(&br, uint16(maxTableSymbol[i])) seq.fse = fseDecoderPool.Get().(*fseDecoder)
}
err := seq.fse.readNCount(&br, uint16(maxTableSymbol[i]))
if err != nil { if err != nil {
println("Read table error:", err) println("Read table error:", err)
return err return err
} }
err = dec.transform(symbolTableX[i]) err = seq.fse.transform(symbolTableX[i])
if err != nil { if err != nil {
println("Transform table error:", err) println("Transform table error:", err)
return err return err
} }
if debugDecoder { if debugDecoder {
println("Read table ok", "symbolLen:", dec.symbolLen) println("Read table ok", "symbolLen:", seq.fse.symbolLen)
} }
seq.fse = dec
case compModeRepeat: case compModeRepeat:
seq.repeat = true seq.repeat = true
} }
@@ -585,140 +610,89 @@ func (b *blockDec) decodeCompressed(hist *history) error {
} }
in = br.unread() in = br.unread()
} }
// Wait for history.
// All time spent after this is critical since it is strictly sequential.
if hist == nil {
hist = <-b.history
if hist.error {
return ErrDecoderClosed
}
}
// Decode treeless literal block.
if litType == literalsBlockTreeless {
// TODO: We could send the history early WITHOUT the stream history.
// This would allow decoding treeless literals before the byte history is available.
// Silencia stats: Treeless 4393, with: 32775, total: 37168, 11% treeless.
// So not much obvious gain here.
if hist.huffTree == nil {
return errors.New("literal block was treeless, but no history was defined")
}
// Ensure we have space to store it.
if cap(b.literalBuf) < litRegenSize {
if b.lowMem {
b.literalBuf = make([]byte, 0, litRegenSize)
} else {
b.literalBuf = make([]byte, 0, maxCompressedLiteralSize)
}
}
var err error
// Use our out buffer.
huff = hist.huffTree
if fourStreams {
literals, err = huff.Decoder().Decompress4X(b.literalBuf[:0:litRegenSize], literals)
} else {
literals, err = huff.Decoder().Decompress1X(b.literalBuf[:0:litRegenSize], literals)
}
// Make sure we don't leak our literals buffer
if err != nil {
println("decompressing literals:", err)
return err
}
if len(literals) != litRegenSize {
return fmt.Errorf("literal output size mismatch want %d, got %d", litRegenSize, len(literals))
}
} else {
if hist.huffTree != nil && huff != nil {
if hist.dict == nil || hist.dict.litEnc != hist.huffTree {
huffDecoderPool.Put(hist.huffTree)
}
hist.huffTree = nil
}
}
if huff != nil {
hist.huffTree = huff
}
if debugDecoder { if debugDecoder {
println("Final literals:", len(literals), "hash:", xxhash.Sum64(literals), "and", nSeqs, "sequences.") println("Literals:", len(seqs.literals), "hash:", xxhash.Sum64(seqs.literals), "and", seqs.nSeqs, "sequences.")
} }
if nSeqs == 0 { if nSeqs == 0 {
// Decompressed content is defined entirely as Literals Section content. if len(b.sequence) > 0 {
b.dst = append(b.dst, literals...) b.sequence = b.sequence[:0]
if delayedHistory {
hist.append(literals)
} }
return nil return nil
} }
br := seqs.br
seqs, err := seqs.mergeHistory(&hist.decoders) if br == nil {
if err != nil { br = &bitReader{}
return err
} }
if debugDecoder {
println("History merged ok")
}
br := &bitReader{}
if err := br.init(in); err != nil { if err := br.init(in); err != nil {
return err return err
} }
// TODO: Investigate if sending history without decoders are faster. if err := seqs.initialize(br, hist, b.dst); err != nil {
// This would allow the sequences to be decoded async and only have to construct stream history. println("initializing sequences:", err)
// If only recent offsets were not transferred, this would be an obvious win. return err
// Also, if first 3 sequences don't reference recent offsets, all sequences can be decoded. }
return nil
}
func (b *blockDec) decodeSequences(hist *history) error {
if cap(b.sequence) < hist.decoders.nSeqs {
if b.lowMem {
b.sequence = make([]seqVals, 0, hist.decoders.nSeqs)
} else {
b.sequence = make([]seqVals, 0, 0x7F00+0xffff)
}
}
b.sequence = b.sequence[:hist.decoders.nSeqs]
if hist.decoders.nSeqs == 0 {
hist.decoders.seqSize = len(hist.decoders.literals)
return nil
}
hist.decoders.windowSize = hist.windowSize
hist.decoders.prevOffset = hist.recentOffsets
err := hist.decoders.decode(b.sequence)
hist.recentOffsets = hist.decoders.prevOffset
return err
}
func (b *blockDec) executeSequences(hist *history) error {
hbytes := hist.b hbytes := hist.b
if len(hbytes) > hist.windowSize { if len(hbytes) > hist.windowSize {
hbytes = hbytes[len(hbytes)-hist.windowSize:] hbytes = hbytes[len(hbytes)-hist.windowSize:]
// We do not need history any more. // We do not need history anymore.
if hist.dict != nil { if hist.dict != nil {
hist.dict.content = nil hist.dict.content = nil
} }
} }
hist.decoders.windowSize = hist.windowSize
if err := seqs.initialize(br, hist, literals, b.dst); err != nil { hist.decoders.out = b.dst[:0]
println("initializing sequences:", err) err := hist.decoders.execute(b.sequence, hbytes)
return err
}
err = seqs.decode(nSeqs, br, hbytes)
if err != nil { if err != nil {
return err return err
} }
if !br.finished() { return b.updateHistory(hist)
return fmt.Errorf("%d extra bits on block, should be 0", br.remain()) }
}
err = br.close() func (b *blockDec) updateHistory(hist *history) error {
if err != nil {
printf("Closing sequences: %v, %+v\n", err, *br)
}
if len(b.data) > maxCompressedBlockSize { if len(b.data) > maxCompressedBlockSize {
return fmt.Errorf("compressed block size too large (%d)", len(b.data)) return fmt.Errorf("compressed block size too large (%d)", len(b.data))
} }
// Set output and release references. // Set output and release references.
b.dst = seqs.out b.dst = hist.decoders.out
seqs.out, seqs.literals, seqs.hist = nil, nil, nil hist.recentOffsets = hist.decoders.prevOffset
if !delayedHistory {
// If we don't have delayed history, no need to update.
hist.recentOffsets = seqs.prevOffset
return nil
}
if b.Last { if b.Last {
// if last block we don't care about history. // if last block we don't care about history.
println("Last block, no history returned") println("Last block, no history returned")
hist.b = hist.b[:0] hist.b = hist.b[:0]
return nil return nil
} else {
hist.append(b.dst)
if debugDecoder {
println("Finished block with ", len(b.sequence), "sequences. Added", len(b.dst), "to history, now length", len(hist.b))
}
} }
hist.append(b.dst) hist.decoders.out, hist.decoders.literals = nil, nil
hist.recentOffsets = seqs.prevOffset
if debugDecoder {
println("Finished block with literals:", len(literals), "and", nSeqs, "sequences.")
}
return nil return nil
} }

View File

@@ -113,6 +113,9 @@ func (r *readerWrapper) readBig(n int, dst []byte) ([]byte, error) {
func (r *readerWrapper) readByte() (byte, error) { func (r *readerWrapper) readByte() (byte, error) {
n2, err := r.r.Read(r.tmp[:1]) n2, err := r.r.Read(r.tmp[:1])
if err != nil { if err != nil {
if err == io.EOF {
err = io.ErrUnexpectedEOF
}
return 0, err return 0, err
} }
if n2 != 1 { if n2 != 1 {

View File

@@ -5,9 +5,13 @@
package zstd package zstd
import ( import (
"errors" "bytes"
"context"
"encoding/binary"
"io" "io"
"sync" "sync"
"github.com/klauspost/compress/zstd/internal/xxhash"
) )
// Decoder provides decoding of zstandard streams. // Decoder provides decoding of zstandard streams.
@@ -22,12 +26,19 @@ type Decoder struct {
// Unreferenced decoders, ready for use. // Unreferenced decoders, ready for use.
decoders chan *blockDec decoders chan *blockDec
// Streams ready to be decoded.
stream chan decodeStream
// Current read position used for Reader functionality. // Current read position used for Reader functionality.
current decoderState current decoderState
// sync stream decoding
syncStream struct {
decodedFrame uint64
br readerWrapper
enabled bool
inFrame bool
}
frame *frameDec
// Custom dictionaries. // Custom dictionaries.
// Always uses copies. // Always uses copies.
dicts map[uint32]dict dicts map[uint32]dict
@@ -46,7 +57,10 @@ type decoderState struct {
output chan decodeOutput output chan decodeOutput
// cancel remaining output. // cancel remaining output.
cancel chan struct{} cancel context.CancelFunc
// crc of current frame
crc *xxhash.Digest
flushed bool flushed bool
} }
@@ -81,7 +95,7 @@ func NewReader(r io.Reader, opts ...DOption) (*Decoder, error) {
return nil, err return nil, err
} }
} }
d.current.output = make(chan decodeOutput, d.o.concurrent) d.current.crc = xxhash.New()
d.current.flushed = true d.current.flushed = true
if r == nil { if r == nil {
@@ -130,7 +144,7 @@ func (d *Decoder) Read(p []byte) (int, error) {
break break
} }
if !d.nextBlock(n == 0) { if !d.nextBlock(n == 0) {
return n, nil return n, d.current.err
} }
} }
} }
@@ -162,6 +176,7 @@ func (d *Decoder) Reset(r io.Reader) error {
d.drainOutput() d.drainOutput()
d.syncStream.br.r = nil
if r == nil { if r == nil {
d.current.err = ErrDecoderNilInput d.current.err = ErrDecoderNilInput
if len(d.current.b) > 0 { if len(d.current.b) > 0 {
@@ -195,33 +210,39 @@ func (d *Decoder) Reset(r io.Reader) error {
} }
return nil return nil
} }
if d.stream == nil {
d.stream = make(chan decodeStream, 1)
d.streamWg.Add(1)
go d.startStreamDecoder(d.stream)
}
// Remove current block. // Remove current block.
d.stashDecoder()
d.current.decodeOutput = decodeOutput{} d.current.decodeOutput = decodeOutput{}
d.current.err = nil d.current.err = nil
d.current.cancel = make(chan struct{})
d.current.flushed = false d.current.flushed = false
d.current.d = nil d.current.d = nil
d.stream <- decodeStream{ // Ensure no-one else is still running...
r: r, d.streamWg.Wait()
output: d.current.output, if d.frame == nil {
cancel: d.current.cancel, d.frame = newFrameDec(d.o)
} }
if d.o.concurrent == 1 {
return d.startSyncDecoder(r)
}
d.current.output = make(chan decodeOutput, d.o.concurrent)
ctx, cancel := context.WithCancel(context.Background())
d.current.cancel = cancel
d.streamWg.Add(1)
go d.startStreamDecoder(ctx, r, d.current.output)
return nil return nil
} }
// drainOutput will drain the output until errEndOfStream is sent. // drainOutput will drain the output until errEndOfStream is sent.
func (d *Decoder) drainOutput() { func (d *Decoder) drainOutput() {
if d.current.cancel != nil { if d.current.cancel != nil {
println("cancelling current") if debugDecoder {
close(d.current.cancel) println("cancelling current")
}
d.current.cancel()
d.current.cancel = nil d.current.cancel = nil
} }
if d.current.d != nil { if d.current.d != nil {
@@ -243,12 +264,9 @@ func (d *Decoder) drainOutput() {
} }
d.decoders <- v.d d.decoders <- v.d
} }
if v.err == errEndOfStream {
println("current flushed")
d.current.flushed = true
return
}
} }
d.current.output = nil
d.current.flushed = true
} }
// WriteTo writes data to w until there's no more data to write or when an error occurs. // WriteTo writes data to w until there's no more data to write or when an error occurs.
@@ -287,7 +305,7 @@ func (d *Decoder) WriteTo(w io.Writer) (int64, error) {
// DecodeAll can be used concurrently. // DecodeAll can be used concurrently.
// The Decoder concurrency limits will be respected. // The Decoder concurrency limits will be respected.
func (d *Decoder) DecodeAll(input, dst []byte) ([]byte, error) { func (d *Decoder) DecodeAll(input, dst []byte) ([]byte, error) {
if d.current.err == ErrDecoderClosed { if d.decoders == nil {
return dst, ErrDecoderClosed return dst, ErrDecoderClosed
} }
@@ -300,6 +318,9 @@ func (d *Decoder) DecodeAll(input, dst []byte) ([]byte, error) {
} }
frame.rawInput = nil frame.rawInput = nil
frame.bBuf = nil frame.bBuf = nil
if frame.history.decoders.br != nil {
frame.history.decoders.br.in = nil
}
d.decoders <- block d.decoders <- block
}() }()
frame.bBuf = input frame.bBuf = input
@@ -307,27 +328,31 @@ func (d *Decoder) DecodeAll(input, dst []byte) ([]byte, error) {
for { for {
frame.history.reset() frame.history.reset()
err := frame.reset(&frame.bBuf) err := frame.reset(&frame.bBuf)
if err == io.EOF { if err != nil {
if debugDecoder { if err == io.EOF {
println("frame reset return EOF") if debugDecoder {
println("frame reset return EOF")
}
return dst, nil
} }
return dst, nil return dst, err
} }
if frame.DictionaryID != nil { if frame.DictionaryID != nil {
dict, ok := d.dicts[*frame.DictionaryID] dict, ok := d.dicts[*frame.DictionaryID]
if !ok { if !ok {
return nil, ErrUnknownDictionary return nil, ErrUnknownDictionary
} }
if debugDecoder {
println("setting dict", frame.DictionaryID)
}
frame.history.setDict(&dict) frame.history.setDict(&dict)
} }
if err != nil {
return dst, err if frame.FrameContentSize != fcsUnknown && frame.FrameContentSize > d.o.maxDecodedSize-uint64(len(dst)) {
}
if frame.FrameContentSize > d.o.maxDecodedSize-uint64(len(dst)) {
return dst, ErrDecoderSizeExceeded return dst, ErrDecoderSizeExceeded
} }
if frame.FrameContentSize > 0 && frame.FrameContentSize < 1<<30 { if frame.FrameContentSize < 1<<30 {
// Never preallocate moe than 1 GB up front. // Never preallocate more than 1 GB up front.
if cap(dst)-len(dst) < int(frame.FrameContentSize) { if cap(dst)-len(dst) < int(frame.FrameContentSize) {
dst2 := make([]byte, len(dst), len(dst)+int(frame.FrameContentSize)) dst2 := make([]byte, len(dst), len(dst)+int(frame.FrameContentSize))
copy(dst2, dst) copy(dst2, dst)
@@ -368,6 +393,161 @@ func (d *Decoder) DecodeAll(input, dst []byte) ([]byte, error) {
// If non-blocking mode is used the returned boolean will be false // If non-blocking mode is used the returned boolean will be false
// if no data was available without blocking. // if no data was available without blocking.
func (d *Decoder) nextBlock(blocking bool) (ok bool) { func (d *Decoder) nextBlock(blocking bool) (ok bool) {
if d.current.err != nil {
// Keep error state.
return false
}
d.current.b = d.current.b[:0]
// SYNC:
if d.syncStream.enabled {
if !blocking {
return false
}
ok = d.nextBlockSync()
if !ok {
d.stashDecoder()
}
return ok
}
//ASYNC:
d.stashDecoder()
if blocking {
d.current.decodeOutput, ok = <-d.current.output
} else {
select {
case d.current.decodeOutput, ok = <-d.current.output:
default:
return false
}
}
if !ok {
// This should not happen, so signal error state...
d.current.err = io.ErrUnexpectedEOF
return false
}
next := d.current.decodeOutput
if next.d != nil && next.d.async.newHist != nil {
d.current.crc.Reset()
}
if debugDecoder {
var tmp [4]byte
binary.LittleEndian.PutUint32(tmp[:], uint32(xxhash.Sum64(next.b)))
println("got", len(d.current.b), "bytes, error:", d.current.err, "data crc:", tmp)
}
if len(next.b) > 0 {
n, err := d.current.crc.Write(next.b)
if err == nil {
if n != len(next.b) {
d.current.err = io.ErrShortWrite
}
}
}
if next.err == nil && next.d != nil && len(next.d.checkCRC) != 0 {
got := d.current.crc.Sum64()
var tmp [4]byte
binary.LittleEndian.PutUint32(tmp[:], uint32(got))
if !bytes.Equal(tmp[:], next.d.checkCRC) && !ignoreCRC {
if debugDecoder {
println("CRC Check Failed:", tmp[:], " (got) !=", next.d.checkCRC, "(on stream)")
}
d.current.err = ErrCRCMismatch
} else {
if debugDecoder {
println("CRC ok", tmp[:])
}
}
}
return true
}
func (d *Decoder) nextBlockSync() (ok bool) {
if d.current.d == nil {
d.current.d = <-d.decoders
}
for len(d.current.b) == 0 {
if !d.syncStream.inFrame {
d.frame.history.reset()
d.current.err = d.frame.reset(&d.syncStream.br)
if d.current.err != nil {
return false
}
if d.frame.DictionaryID != nil {
dict, ok := d.dicts[*d.frame.DictionaryID]
if !ok {
d.current.err = ErrUnknownDictionary
return false
} else {
d.frame.history.setDict(&dict)
}
}
if d.frame.WindowSize > d.o.maxDecodedSize || d.frame.WindowSize > d.o.maxWindowSize {
d.current.err = ErrDecoderSizeExceeded
return false
}
d.syncStream.decodedFrame = 0
d.syncStream.inFrame = true
}
d.current.err = d.frame.next(d.current.d)
if d.current.err != nil {
return false
}
d.frame.history.ensureBlock()
if debugDecoder {
println("History trimmed:", len(d.frame.history.b), "decoded already:", d.syncStream.decodedFrame)
}
histBefore := len(d.frame.history.b)
d.current.err = d.current.d.decodeBuf(&d.frame.history)
if d.current.err != nil {
println("error after:", d.current.err)
return false
}
d.current.b = d.frame.history.b[histBefore:]
if debugDecoder {
println("history after:", len(d.frame.history.b))
}
// Check frame size (before CRC)
d.syncStream.decodedFrame += uint64(len(d.current.b))
if d.syncStream.decodedFrame > d.frame.FrameContentSize {
if debugDecoder {
printf("DecodedFrame (%d) > FrameContentSize (%d)\n", d.syncStream.decodedFrame, d.frame.FrameContentSize)
}
d.current.err = ErrFrameSizeExceeded
return false
}
// Check FCS
if d.current.d.Last && d.frame.FrameContentSize != fcsUnknown && d.syncStream.decodedFrame != d.frame.FrameContentSize {
if debugDecoder {
printf("DecodedFrame (%d) != FrameContentSize (%d)\n", d.syncStream.decodedFrame, d.frame.FrameContentSize)
}
d.current.err = ErrFrameSizeMismatch
return false
}
// Update/Check CRC
if d.frame.HasCheckSum {
d.frame.crc.Write(d.current.b)
if d.current.d.Last {
d.current.err = d.frame.checkCRC()
if d.current.err != nil {
println("CRC error:", d.current.err)
return false
}
}
}
d.syncStream.inFrame = !d.current.d.Last
}
return true
}
func (d *Decoder) stashDecoder() {
if d.current.d != nil { if d.current.d != nil {
if debugDecoder { if debugDecoder {
printf("re-adding current decoder %p", d.current.d) printf("re-adding current decoder %p", d.current.d)
@@ -375,24 +555,6 @@ func (d *Decoder) nextBlock(blocking bool) (ok bool) {
d.decoders <- d.current.d d.decoders <- d.current.d
d.current.d = nil d.current.d = nil
} }
if d.current.err != nil {
// Keep error state.
return blocking
}
if blocking {
d.current.decodeOutput = <-d.current.output
} else {
select {
case d.current.decodeOutput = <-d.current.output:
default:
return false
}
}
if debugDecoder {
println("got", len(d.current.b), "bytes, error:", d.current.err)
}
return true
} }
// Close will release all resources. // Close will release all resources.
@@ -402,10 +564,10 @@ func (d *Decoder) Close() {
return return
} }
d.drainOutput() d.drainOutput()
if d.stream != nil { if d.current.cancel != nil {
close(d.stream) d.current.cancel()
d.streamWg.Wait() d.streamWg.Wait()
d.stream = nil d.current.cancel = nil
} }
if d.decoders != nil { if d.decoders != nil {
close(d.decoders) close(d.decoders)
@@ -456,100 +618,307 @@ type decodeOutput struct {
err error err error
} }
type decodeStream struct { func (d *Decoder) startSyncDecoder(r io.Reader) error {
r io.Reader d.frame.history.reset()
d.syncStream.br = readerWrapper{r: r}
// Blocks ready to be written to output. d.syncStream.inFrame = false
output chan decodeOutput d.syncStream.enabled = true
d.syncStream.decodedFrame = 0
// cancel reading from the input return nil
cancel chan struct{}
} }
// errEndOfStream indicates that everything from the stream was read.
var errEndOfStream = errors.New("end-of-stream")
// Create Decoder: // Create Decoder:
// Spawn n block decoders. These accept tasks to decode a block. // ASYNC:
// Create goroutine that handles stream processing, this will send history to decoders as they are available. // Spawn 4 go routines.
// Decoders update the history as they decode. // 0: Read frames and decode blocks.
// When a block is returned: // 1: Decode block and literals. Receives hufftree and seqdecs, returns seqdecs and huff tree.
// a) history is sent to the next decoder, // 2: Wait for recentOffsets if needed. Decode sequences, send recentOffsets.
// b) content written to CRC. // 3: Wait for stream history, execute sequences, send stream history.
// c) return data to WRITER. func (d *Decoder) startStreamDecoder(ctx context.Context, r io.Reader, output chan decodeOutput) {
// d) wait for next block to return data.
// Once WRITTEN, the decoders reused by the writer frame decoder for re-use.
func (d *Decoder) startStreamDecoder(inStream chan decodeStream) {
defer d.streamWg.Done() defer d.streamWg.Done()
frame := newFrameDec(d.o) br := readerWrapper{r: r}
for stream := range inStream {
if debugDecoder { var seqPrepare = make(chan *blockDec, d.o.concurrent)
println("got new stream") var seqDecode = make(chan *blockDec, d.o.concurrent)
var seqExecute = make(chan *blockDec, d.o.concurrent)
// Async 1: Prepare blocks...
go func() {
var hist history
var hasErr bool
for block := range seqPrepare {
if hasErr {
if block != nil {
seqDecode <- block
}
continue
}
if block.async.newHist != nil {
if debugDecoder {
println("Async 1: new history")
}
hist.reset()
if block.async.newHist.dict != nil {
hist.setDict(block.async.newHist.dict)
}
}
if block.err != nil || block.Type != blockTypeCompressed {
hasErr = block.err != nil
seqDecode <- block
continue
}
remain, err := block.decodeLiterals(block.data, &hist)
block.err = err
hasErr = block.err != nil
if err == nil {
block.async.literals = hist.decoders.literals
block.async.seqData = remain
} else if debugDecoder {
println("decodeLiterals error:", err)
}
seqDecode <- block
} }
br := readerWrapper{r: stream.r} close(seqDecode)
decodeStream: }()
for {
frame.history.reset() // Async 2: Decode sequences...
err := frame.reset(&br) go func() {
if debugDecoder && err != nil { var hist history
println("Frame decoder returned", err) var hasErr bool
for block := range seqDecode {
if hasErr {
if block != nil {
seqExecute <- block
}
continue
} }
if err == nil && frame.DictionaryID != nil { if block.async.newHist != nil {
dict, ok := d.dicts[*frame.DictionaryID] if debugDecoder {
if !ok { println("Async 2: new history, recent:", block.async.newHist.recentOffsets)
err = ErrUnknownDictionary }
hist.decoders = block.async.newHist.decoders
hist.recentOffsets = block.async.newHist.recentOffsets
hist.windowSize = block.async.newHist.windowSize
if block.async.newHist.dict != nil {
hist.setDict(block.async.newHist.dict)
}
}
if block.err != nil || block.Type != blockTypeCompressed {
hasErr = block.err != nil
seqExecute <- block
continue
}
hist.decoders.literals = block.async.literals
block.err = block.prepareSequences(block.async.seqData, &hist)
if debugDecoder && block.err != nil {
println("prepareSequences returned:", block.err)
}
hasErr = block.err != nil
if block.err == nil {
block.err = block.decodeSequences(&hist)
if debugDecoder && block.err != nil {
println("decodeSequences returned:", block.err)
}
hasErr = block.err != nil
// block.async.sequence = hist.decoders.seq[:hist.decoders.nSeqs]
block.async.seqSize = hist.decoders.seqSize
}
seqExecute <- block
}
close(seqExecute)
}()
var wg sync.WaitGroup
wg.Add(1)
// Async 3: Execute sequences...
frameHistCache := d.frame.history.b
go func() {
var hist history
var decodedFrame uint64
var fcs uint64
var hasErr bool
for block := range seqExecute {
out := decodeOutput{err: block.err, d: block}
if block.err != nil || hasErr {
hasErr = true
output <- out
continue
}
if block.async.newHist != nil {
if debugDecoder {
println("Async 3: new history")
}
hist.windowSize = block.async.newHist.windowSize
hist.allocFrameBuffer = block.async.newHist.allocFrameBuffer
if block.async.newHist.dict != nil {
hist.setDict(block.async.newHist.dict)
}
if cap(hist.b) < hist.allocFrameBuffer {
if cap(frameHistCache) >= hist.allocFrameBuffer {
hist.b = frameHistCache
} else {
hist.b = make([]byte, 0, hist.allocFrameBuffer)
println("Alloc history sized", hist.allocFrameBuffer)
}
}
hist.b = hist.b[:0]
fcs = block.async.fcs
decodedFrame = 0
}
do := decodeOutput{err: block.err, d: block}
switch block.Type {
case blockTypeRLE:
if debugDecoder {
println("add rle block length:", block.RLESize)
}
if cap(block.dst) < int(block.RLESize) {
if block.lowMem {
block.dst = make([]byte, block.RLESize)
} else {
block.dst = make([]byte, maxBlockSize)
}
}
block.dst = block.dst[:block.RLESize]
v := block.data[0]
for i := range block.dst {
block.dst[i] = v
}
hist.append(block.dst)
do.b = block.dst
case blockTypeRaw:
if debugDecoder {
println("add raw block length:", len(block.data))
}
hist.append(block.data)
do.b = block.data
case blockTypeCompressed:
if debugDecoder {
println("execute with history length:", len(hist.b), "window:", hist.windowSize)
}
hist.decoders.seqSize = block.async.seqSize
hist.decoders.literals = block.async.literals
do.err = block.executeSequences(&hist)
hasErr = do.err != nil
if debugDecoder && hasErr {
println("executeSequences returned:", do.err)
}
do.b = block.dst
}
if !hasErr {
decodedFrame += uint64(len(do.b))
if decodedFrame > fcs {
println("fcs exceeded", block.Last, fcs, decodedFrame)
do.err = ErrFrameSizeExceeded
hasErr = true
} else if block.Last && fcs != fcsUnknown && decodedFrame != fcs {
do.err = ErrFrameSizeMismatch
hasErr = true
} else { } else {
frame.history.setDict(&dict) if debugDecoder {
println("fcs ok", block.Last, fcs, decodedFrame)
}
} }
} }
if err != nil { output <- do
stream.output <- decodeOutput{ }
err: err, close(output)
frameHistCache = hist.b
wg.Done()
if debugDecoder {
println("decoder goroutines finished")
}
}()
decodeStream:
for {
frame := d.frame
if debugDecoder {
println("New frame...")
}
var historySent bool
frame.history.reset()
err := frame.reset(&br)
if debugDecoder && err != nil {
println("Frame decoder returned", err)
}
if err == nil && frame.DictionaryID != nil {
dict, ok := d.dicts[*frame.DictionaryID]
if !ok {
err = ErrUnknownDictionary
} else {
frame.history.setDict(&dict)
}
}
if err == nil && d.frame.WindowSize > d.o.maxWindowSize {
err = ErrDecoderSizeExceeded
}
if err != nil {
select {
case <-ctx.Done():
case dec := <-d.decoders:
dec.sendErr(err)
seqPrepare <- dec
}
break decodeStream
}
// Go through all blocks of the frame.
for {
var dec *blockDec
select {
case <-ctx.Done():
break decodeStream
case dec = <-d.decoders:
// Once we have a decoder, we MUST return it.
}
err := frame.next(dec)
if !historySent {
h := frame.history
if debugDecoder {
println("Alloc History:", h.allocFrameBuffer)
} }
dec.async.newHist = &h
dec.async.fcs = frame.FrameContentSize
historySent = true
} else {
dec.async.newHist = nil
}
if debugDecoder && err != nil {
println("next block returned error:", err)
}
dec.err = err
dec.checkCRC = nil
if dec.Last && frame.HasCheckSum && err == nil {
crc, err := frame.rawInput.readSmall(4)
if err != nil {
println("CRC missing?", err)
dec.err = err
}
var tmp [4]byte
copy(tmp[:], crc)
dec.checkCRC = tmp[:]
if debugDecoder {
println("found crc to check:", dec.checkCRC)
}
}
err = dec.err
last := dec.Last
seqPrepare <- dec
if err != nil {
break decodeStream
}
if last {
break break
} }
if debugDecoder {
println("starting frame decoder")
}
// This goroutine will forward history between frames.
frame.frameDone.Add(1)
frame.initAsync()
go frame.startDecoder(stream.output)
decodeFrame:
// Go through all blocks of the frame.
for {
dec := <-d.decoders
select {
case <-stream.cancel:
if !frame.sendErr(dec, io.EOF) {
// To not let the decoder dangle, send it back.
stream.output <- decodeOutput{d: dec}
}
break decodeStream
default:
}
err := frame.next(dec)
switch err {
case io.EOF:
// End of current frame, no error
println("EOF on next block")
break decodeFrame
case nil:
continue
default:
println("block decoder returned", err)
break decodeStream
}
}
// All blocks have started decoding, check if there are more frames.
println("waiting for done")
frame.frameDone.Wait()
println("done waiting...")
} }
frame.frameDone.Wait()
println("Sending EOS")
stream.output <- decodeOutput{err: errEndOfStream}
} }
close(seqPrepare)
wg.Wait()
d.frame.history.b = frameHistCache
} }

View File

@@ -28,6 +28,9 @@ func (o *decoderOptions) setDefault() {
concurrent: runtime.GOMAXPROCS(0), concurrent: runtime.GOMAXPROCS(0),
maxWindowSize: MaxWindowSize, maxWindowSize: MaxWindowSize,
} }
if o.concurrent > 4 {
o.concurrent = 4
}
o.maxDecodedSize = 1 << 63 o.maxDecodedSize = 1 << 63
} }
@@ -37,16 +40,25 @@ func WithDecoderLowmem(b bool) DOption {
return func(o *decoderOptions) error { o.lowMem = b; return nil } return func(o *decoderOptions) error { o.lowMem = b; return nil }
} }
// WithDecoderConcurrency will set the concurrency, // WithDecoderConcurrency sets the number of created decoders.
// meaning the maximum number of decoders to run concurrently. // When decoding block with DecodeAll, this will limit the number
// The value supplied must be at least 1. // of possible concurrently running decodes.
// By default this will be set to GOMAXPROCS. // When decoding streams, this will limit the number of
// inflight blocks.
// When decoding streams and setting maximum to 1,
// no async decoding will be done.
// When a value of 0 is provided GOMAXPROCS will be used.
// By default this will be set to 4 or GOMAXPROCS, whatever is lower.
func WithDecoderConcurrency(n int) DOption { func WithDecoderConcurrency(n int) DOption {
return func(o *decoderOptions) error { return func(o *decoderOptions) error {
if n <= 0 { if n < 0 {
return errors.New("concurrency must be at least 1") return errors.New("concurrency must be at least 1")
} }
o.concurrent = n if n == 0 {
o.concurrent = runtime.GOMAXPROCS(0)
} else {
o.concurrent = n
}
return nil return nil
} }
} }

View File

@@ -85,7 +85,7 @@ func (e *fastEncoder) Encode(blk *blockEnc, src []byte) {
// TEMPLATE // TEMPLATE
const hashLog = tableBits const hashLog = tableBits
// seems global, but would be nice to tweak. // seems global, but would be nice to tweak.
const kSearchStrength = 7 const kSearchStrength = 6
// nextEmit is where in src the next emitLiteral should start from. // nextEmit is where in src the next emitLiteral should start from.
nextEmit := s nextEmit := s
@@ -334,7 +334,7 @@ func (e *fastEncoder) EncodeNoHist(blk *blockEnc, src []byte) {
// TEMPLATE // TEMPLATE
const hashLog = tableBits const hashLog = tableBits
// seems global, but would be nice to tweak. // seems global, but would be nice to tweak.
const kSearchStrength = 8 const kSearchStrength = 6
// nextEmit is where in src the next emitLiteral should start from. // nextEmit is where in src the next emitLiteral should start from.
nextEmit := s nextEmit := s

View File

@@ -98,23 +98,25 @@ func (e *Encoder) Reset(w io.Writer) {
if cap(s.filling) == 0 { if cap(s.filling) == 0 {
s.filling = make([]byte, 0, e.o.blockSize) s.filling = make([]byte, 0, e.o.blockSize)
} }
if cap(s.current) == 0 { if e.o.concurrent > 1 {
s.current = make([]byte, 0, e.o.blockSize) if cap(s.current) == 0 {
} s.current = make([]byte, 0, e.o.blockSize)
if cap(s.previous) == 0 { }
s.previous = make([]byte, 0, e.o.blockSize) if cap(s.previous) == 0 {
s.previous = make([]byte, 0, e.o.blockSize)
}
s.current = s.current[:0]
s.previous = s.previous[:0]
if s.writing == nil {
s.writing = &blockEnc{lowMem: e.o.lowMem}
s.writing.init()
}
s.writing.initNewEncode()
} }
if s.encoder == nil { if s.encoder == nil {
s.encoder = e.o.encoder() s.encoder = e.o.encoder()
} }
if s.writing == nil {
s.writing = &blockEnc{lowMem: e.o.lowMem}
s.writing.init()
}
s.writing.initNewEncode()
s.filling = s.filling[:0] s.filling = s.filling[:0]
s.current = s.current[:0]
s.previous = s.previous[:0]
s.encoder.Reset(e.o.dict, false) s.encoder.Reset(e.o.dict, false)
s.headerWritten = false s.headerWritten = false
s.eofWritten = false s.eofWritten = false
@@ -258,6 +260,46 @@ func (e *Encoder) nextBlock(final bool) error {
return s.err return s.err
} }
// SYNC:
if e.o.concurrent == 1 {
src := s.filling
s.nInput += int64(len(s.filling))
if debugEncoder {
println("Adding sync block,", len(src), "bytes, final:", final)
}
enc := s.encoder
blk := enc.Block()
blk.reset(nil)
enc.Encode(blk, src)
blk.last = final
if final {
s.eofWritten = true
}
err := errIncompressible
// If we got the exact same number of literals as input,
// assume the literals cannot be compressed.
if len(src) != len(blk.literals) || len(src) != e.o.blockSize {
err = blk.encode(src, e.o.noEntropy, !e.o.allLitEntropy)
}
switch err {
case errIncompressible:
if debugEncoder {
println("Storing incompressible block as raw")
}
blk.encodeRaw(src)
// In fast mode, we do not transfer offsets, so we don't have to deal with changing the.
case nil:
default:
s.err = err
return err
}
_, s.err = s.w.Write(blk.output)
s.nWritten += int64(len(blk.output))
s.filling = s.filling[:0]
return s.err
}
// Move blocks forward. // Move blocks forward.
s.filling, s.current, s.previous = s.previous[:0], s.filling, s.current s.filling, s.current, s.previous = s.previous[:0], s.filling, s.current
s.nInput += int64(len(s.current)) s.nInput += int64(len(s.current))

View File

@@ -76,6 +76,7 @@ func WithEncoderCRC(b bool) EOption {
// WithEncoderConcurrency will set the concurrency, // WithEncoderConcurrency will set the concurrency,
// meaning the maximum number of encoders to run concurrently. // meaning the maximum number of encoders to run concurrently.
// The value supplied must be at least 1. // The value supplied must be at least 1.
// For streams, setting a value of 1 will disable async compression.
// By default this will be set to GOMAXPROCS. // By default this will be set to GOMAXPROCS.
func WithEncoderConcurrency(n int) EOption { func WithEncoderConcurrency(n int) EOption {
return func(o *encoderOptions) error { return func(o *encoderOptions) error {

View File

@@ -8,23 +8,17 @@ import (
"bytes" "bytes"
"encoding/hex" "encoding/hex"
"errors" "errors"
"hash"
"io" "io"
"sync"
"github.com/klauspost/compress/zstd/internal/xxhash" "github.com/klauspost/compress/zstd/internal/xxhash"
) )
type frameDec struct { type frameDec struct {
o decoderOptions o decoderOptions
crc hash.Hash64 crc *xxhash.Digest
offset int64
WindowSize uint64 WindowSize uint64
// In order queue of blocks being decoded.
decoding chan *blockDec
// Frame history passed between blocks // Frame history passed between blocks
history history history history
@@ -34,15 +28,10 @@ type frameDec struct {
bBuf byteBuf bBuf byteBuf
FrameContentSize uint64 FrameContentSize uint64
frameDone sync.WaitGroup
DictionaryID *uint32 DictionaryID *uint32
HasCheckSum bool HasCheckSum bool
SingleSegment bool SingleSegment bool
// asyncRunning indicates whether the async routine processes input on 'decoding'.
asyncRunningMu sync.Mutex
asyncRunning bool
} }
const ( const (
@@ -208,7 +197,7 @@ func (d *frameDec) reset(br byteBuffer) error {
default: default:
fcsSize = 1 << v fcsSize = 1 << v
} }
d.FrameContentSize = 0 d.FrameContentSize = fcsUnknown
if fcsSize > 0 { if fcsSize > 0 {
b, err := br.readSmall(fcsSize) b, err := br.readSmall(fcsSize)
if err != nil { if err != nil {
@@ -229,9 +218,10 @@ func (d *frameDec) reset(br byteBuffer) error {
d.FrameContentSize = uint64(d1) | (uint64(d2) << 32) d.FrameContentSize = uint64(d1) | (uint64(d2) << 32)
} }
if debugDecoder { if debugDecoder {
println("field size bits:", v, "fcsSize:", fcsSize, "FrameContentSize:", d.FrameContentSize, hex.EncodeToString(b[:fcsSize]), "singleseg:", d.SingleSegment, "window:", d.WindowSize) println("Read FCS:", d.FrameContentSize)
} }
} }
// Move this to shared. // Move this to shared.
d.HasCheckSum = fhd&(1<<2) != 0 d.HasCheckSum = fhd&(1<<2) != 0
if d.HasCheckSum { if d.HasCheckSum {
@@ -264,10 +254,16 @@ func (d *frameDec) reset(br byteBuffer) error {
} }
d.history.windowSize = int(d.WindowSize) d.history.windowSize = int(d.WindowSize)
if d.o.lowMem && d.history.windowSize < maxBlockSize { if d.o.lowMem && d.history.windowSize < maxBlockSize {
d.history.maxSize = d.history.windowSize * 2 d.history.allocFrameBuffer = d.history.windowSize * 2
// TODO: Maybe use FrameContent size
} else { } else {
d.history.maxSize = d.history.windowSize + maxBlockSize d.history.allocFrameBuffer = d.history.windowSize + maxBlockSize
} }
if debugDecoder {
println("Frame: Dict:", d.DictionaryID, "FrameContentSize:", d.FrameContentSize, "singleseg:", d.SingleSegment, "window:", d.WindowSize, "crc:", d.HasCheckSum)
}
// history contains input - maybe we do something // history contains input - maybe we do something
d.rawInput = br d.rawInput = br
return nil return nil
@@ -276,49 +272,18 @@ func (d *frameDec) reset(br byteBuffer) error {
// next will start decoding the next block from stream. // next will start decoding the next block from stream.
func (d *frameDec) next(block *blockDec) error { func (d *frameDec) next(block *blockDec) error {
if debugDecoder { if debugDecoder {
printf("decoding new block %p:%p", block, block.data) println("decoding new block")
} }
err := block.reset(d.rawInput, d.WindowSize) err := block.reset(d.rawInput, d.WindowSize)
if err != nil { if err != nil {
println("block error:", err) println("block error:", err)
// Signal the frame decoder we have a problem. // Signal the frame decoder we have a problem.
d.sendErr(block, err) block.sendErr(err)
return err return err
} }
block.input <- struct{}{}
if debugDecoder {
println("next block:", block)
}
d.asyncRunningMu.Lock()
defer d.asyncRunningMu.Unlock()
if !d.asyncRunning {
return nil
}
if block.Last {
// We indicate the frame is done by sending io.EOF
d.decoding <- block
return io.EOF
}
d.decoding <- block
return nil return nil
} }
// sendEOF will queue an error block on the frame.
// This will cause the frame decoder to return when it encounters the block.
// Returns true if the decoder was added.
func (d *frameDec) sendErr(block *blockDec, err error) bool {
d.asyncRunningMu.Lock()
defer d.asyncRunningMu.Unlock()
if !d.asyncRunning {
return false
}
println("sending error", err.Error())
block.sendErr(err)
d.decoding <- block
return true
}
// checkCRC will check the checksum if the frame has one. // checkCRC will check the checksum if the frame has one.
// Will return ErrCRCMismatch if crc check failed, otherwise nil. // Will return ErrCRCMismatch if crc check failed, otherwise nil.
func (d *frameDec) checkCRC() error { func (d *frameDec) checkCRC() error {
@@ -340,7 +305,7 @@ func (d *frameDec) checkCRC() error {
return err return err
} }
if !bytes.Equal(tmp[:], want) { if !bytes.Equal(tmp[:], want) && !ignoreCRC {
if debugDecoder { if debugDecoder {
println("CRC Check Failed:", tmp[:], "!=", want) println("CRC Check Failed:", tmp[:], "!=", want)
} }
@@ -352,131 +317,13 @@ func (d *frameDec) checkCRC() error {
return nil return nil
} }
func (d *frameDec) initAsync() {
if !d.o.lowMem && !d.SingleSegment {
// set max extra size history to 2MB.
d.history.maxSize = d.history.windowSize + maxBlockSize
}
// re-alloc if more than one extra block size.
if d.o.lowMem && cap(d.history.b) > d.history.maxSize+maxBlockSize {
d.history.b = make([]byte, 0, d.history.maxSize)
}
if cap(d.history.b) < d.history.maxSize {
d.history.b = make([]byte, 0, d.history.maxSize)
}
if cap(d.decoding) < d.o.concurrent {
d.decoding = make(chan *blockDec, d.o.concurrent)
}
if debugDecoder {
h := d.history
printf("history init. len: %d, cap: %d", len(h.b), cap(h.b))
}
d.asyncRunningMu.Lock()
d.asyncRunning = true
d.asyncRunningMu.Unlock()
}
// startDecoder will start decoding blocks and write them to the writer.
// The decoder will stop as soon as an error occurs or at end of frame.
// When the frame has finished decoding the *bufio.Reader
// containing the remaining input will be sent on frameDec.frameDone.
func (d *frameDec) startDecoder(output chan decodeOutput) {
written := int64(0)
defer func() {
d.asyncRunningMu.Lock()
d.asyncRunning = false
d.asyncRunningMu.Unlock()
// Drain the currently decoding.
d.history.error = true
flushdone:
for {
select {
case b := <-d.decoding:
b.history <- &d.history
output <- <-b.result
default:
break flushdone
}
}
println("frame decoder done, signalling done")
d.frameDone.Done()
}()
// Get decoder for first block.
block := <-d.decoding
block.history <- &d.history
for {
var next *blockDec
// Get result
r := <-block.result
if r.err != nil {
println("Result contained error", r.err)
output <- r
return
}
if debugDecoder {
println("got result, from ", d.offset, "to", d.offset+int64(len(r.b)))
d.offset += int64(len(r.b))
}
if !block.Last {
// Send history to next block
select {
case next = <-d.decoding:
if debugDecoder {
println("Sending ", len(d.history.b), "bytes as history")
}
next.history <- &d.history
default:
// Wait until we have sent the block, so
// other decoders can potentially get the decoder.
next = nil
}
}
// Add checksum, async to decoding.
if d.HasCheckSum {
n, err := d.crc.Write(r.b)
if err != nil {
r.err = err
if n != len(r.b) {
r.err = io.ErrShortWrite
}
output <- r
return
}
}
written += int64(len(r.b))
if d.SingleSegment && uint64(written) > d.FrameContentSize {
println("runDecoder: single segment and", uint64(written), ">", d.FrameContentSize)
r.err = ErrFrameSizeExceeded
output <- r
return
}
if block.Last {
r.err = d.checkCRC()
output <- r
return
}
output <- r
if next == nil {
// There was no decoder available, we wait for one now that we have sent to the writer.
if debugDecoder {
println("Sending ", len(d.history.b), " bytes as history")
}
next = <-d.decoding
next.history <- &d.history
}
block = next
}
}
// runDecoder will create a sync decoder that will decode a block of data. // runDecoder will create a sync decoder that will decode a block of data.
func (d *frameDec) runDecoder(dst []byte, dec *blockDec) ([]byte, error) { func (d *frameDec) runDecoder(dst []byte, dec *blockDec) ([]byte, error) {
saved := d.history.b saved := d.history.b
// We use the history for output to avoid copying it. // We use the history for output to avoid copying it.
d.history.b = dst d.history.b = dst
d.history.ignoreBuffer = len(dst)
// Store input length, so we only check new data. // Store input length, so we only check new data.
crcStart := len(dst) crcStart := len(dst)
var err error var err error
@@ -489,22 +336,30 @@ func (d *frameDec) runDecoder(dst []byte, dec *blockDec) ([]byte, error) {
println("next block:", dec) println("next block:", dec)
} }
err = dec.decodeBuf(&d.history) err = dec.decodeBuf(&d.history)
if err != nil || dec.Last { if err != nil {
break break
} }
if uint64(len(d.history.b)) > d.o.maxDecodedSize { if uint64(len(d.history.b)) > d.o.maxDecodedSize {
err = ErrDecoderSizeExceeded err = ErrDecoderSizeExceeded
break break
} }
if d.SingleSegment && uint64(len(d.history.b)) > d.o.maxDecodedSize { if uint64(len(d.history.b)-crcStart) > d.FrameContentSize {
println("runDecoder: single segment and", uint64(len(d.history.b)), ">", d.o.maxDecodedSize) println("runDecoder: FrameContentSize exceeded", uint64(len(d.history.b)-crcStart), ">", d.FrameContentSize)
err = ErrFrameSizeExceeded err = ErrFrameSizeExceeded
break break
} }
if dec.Last {
break
}
if debugDecoder {
println("runDecoder: FrameContentSize", uint64(len(d.history.b)-crcStart), "<=", d.FrameContentSize)
}
} }
dst = d.history.b dst = d.history.b
if err == nil { if err == nil {
if d.HasCheckSum { if d.FrameContentSize != fcsUnknown && uint64(len(d.history.b)-crcStart) != d.FrameContentSize {
err = ErrFrameSizeMismatch
} else if d.HasCheckSum {
var n int var n int
n, err = d.crc.Write(dst[crcStart:]) n, err = d.crc.Write(dst[crcStart:])
if err == nil { if err == nil {

11
vendor/github.com/klauspost/compress/zstd/fuzz.go generated vendored Normal file
View File

@@ -0,0 +1,11 @@
//go:build ignorecrc
// +build ignorecrc
// Copyright 2019+ Klaus Post. All rights reserved.
// License information can be found in the LICENSE file.
// Based on work by Yann Collet, released under BSD License.
package zstd
// ignoreCRC can be used for fuzz testing to ignore CRC values...
const ignoreCRC = true

11
vendor/github.com/klauspost/compress/zstd/fuzz_none.go generated vendored Normal file
View File

@@ -0,0 +1,11 @@
//go:build !ignorecrc
// +build !ignorecrc
// Copyright 2019+ Klaus Post. All rights reserved.
// License information can be found in the LICENSE file.
// Based on work by Yann Collet, released under BSD License.
package zstd
// ignoreCRC can be used for fuzz testing to ignore CRC values...
const ignoreCRC = false

View File

@@ -10,20 +10,31 @@ import (
// history contains the information transferred between blocks. // history contains the information transferred between blocks.
type history struct { type history struct {
b []byte // Literal decompression
huffTree *huff0.Scratch huffTree *huff0.Scratch
recentOffsets [3]int
// Sequence decompression
decoders sequenceDecs decoders sequenceDecs
windowSize int recentOffsets [3]int
maxSize int
error bool // History buffer...
dict *dict b []byte
// ignoreBuffer is meant to ignore a number of bytes
// when checking for matches in history
ignoreBuffer int
windowSize int
allocFrameBuffer int // needed?
error bool
dict *dict
} }
// reset will reset the history to initial state of a frame. // reset will reset the history to initial state of a frame.
// The history must already have been initialized to the desired size. // The history must already have been initialized to the desired size.
func (h *history) reset() { func (h *history) reset() {
h.b = h.b[:0] h.b = h.b[:0]
h.ignoreBuffer = 0
h.error = false h.error = false
h.recentOffsets = [3]int{1, 4, 8} h.recentOffsets = [3]int{1, 4, 8}
if f := h.decoders.litLengths.fse; f != nil && !f.preDefined { if f := h.decoders.litLengths.fse; f != nil && !f.preDefined {
@@ -35,7 +46,7 @@ func (h *history) reset() {
if f := h.decoders.matchLengths.fse; f != nil && !f.preDefined { if f := h.decoders.matchLengths.fse; f != nil && !f.preDefined {
fseDecoderPool.Put(f) fseDecoderPool.Put(f)
} }
h.decoders = sequenceDecs{} h.decoders = sequenceDecs{br: h.decoders.br}
if h.huffTree != nil { if h.huffTree != nil {
if h.dict == nil || h.dict.litEnc != h.huffTree { if h.dict == nil || h.dict.litEnc != h.huffTree {
huffDecoderPool.Put(h.huffTree) huffDecoderPool.Put(h.huffTree)
@@ -54,6 +65,7 @@ func (h *history) setDict(dict *dict) {
h.decoders.litLengths = dict.llDec h.decoders.litLengths = dict.llDec
h.decoders.offsets = dict.ofDec h.decoders.offsets = dict.ofDec
h.decoders.matchLengths = dict.mlDec h.decoders.matchLengths = dict.mlDec
h.decoders.dict = dict.content
h.recentOffsets = dict.offsets h.recentOffsets = dict.offsets
h.huffTree = dict.litEnc h.huffTree = dict.litEnc
} }
@@ -83,6 +95,24 @@ func (h *history) append(b []byte) {
copy(h.b[h.windowSize-len(b):], b) copy(h.b[h.windowSize-len(b):], b)
} }
// ensureBlock will ensure there is space for at least one block...
func (h *history) ensureBlock() {
if cap(h.b) < h.allocFrameBuffer {
h.b = make([]byte, 0, h.allocFrameBuffer)
return
}
avail := cap(h.b) - len(h.b)
if avail >= h.windowSize || avail > maxCompressedBlockSize {
return
}
// Move data down so we only have window size left.
// We know we have less than window size in b at this point.
discard := len(h.b) - h.windowSize
copy(h.b, h.b[discard:])
h.b = h.b[:h.windowSize]
}
// append bytes to history without ever discarding anything. // append bytes to history without ever discarding anything.
func (h *history) appendKeep(b []byte) { func (h *history) appendKeep(b []byte) {
h.b = append(h.b, b...) h.b = append(h.b, b...)

View File

@@ -20,6 +20,10 @@ type seq struct {
llCode, mlCode, ofCode uint8 llCode, mlCode, ofCode uint8
} }
type seqVals struct {
ll, ml, mo int
}
func (s seq) String() string { func (s seq) String() string {
if s.offset <= 3 { if s.offset <= 3 {
if s.offset == 0 { if s.offset == 0 {
@@ -61,16 +65,18 @@ type sequenceDecs struct {
offsets sequenceDec offsets sequenceDec
matchLengths sequenceDec matchLengths sequenceDec
prevOffset [3]int prevOffset [3]int
hist []byte
dict []byte dict []byte
literals []byte literals []byte
out []byte out []byte
nSeqs int
br *bitReader
seqSize int
windowSize int windowSize int
maxBits uint8 maxBits uint8
} }
// initialize all 3 decoders from the stream input. // initialize all 3 decoders from the stream input.
func (s *sequenceDecs) initialize(br *bitReader, hist *history, literals, out []byte) error { func (s *sequenceDecs) initialize(br *bitReader, hist *history, out []byte) error {
if err := s.litLengths.init(br); err != nil { if err := s.litLengths.init(br); err != nil {
return errors.New("litLengths:" + err.Error()) return errors.New("litLengths:" + err.Error())
} }
@@ -80,8 +86,7 @@ func (s *sequenceDecs) initialize(br *bitReader, hist *history, literals, out []
if err := s.matchLengths.init(br); err != nil { if err := s.matchLengths.init(br); err != nil {
return errors.New("matchLengths:" + err.Error()) return errors.New("matchLengths:" + err.Error())
} }
s.literals = literals s.br = br
s.hist = hist.b
s.prevOffset = hist.recentOffsets s.prevOffset = hist.recentOffsets
s.maxBits = s.litLengths.fse.maxBits + s.offsets.fse.maxBits + s.matchLengths.fse.maxBits s.maxBits = s.litLengths.fse.maxBits + s.offsets.fse.maxBits + s.matchLengths.fse.maxBits
s.windowSize = hist.windowSize s.windowSize = hist.windowSize
@@ -94,11 +99,261 @@ func (s *sequenceDecs) initialize(br *bitReader, hist *history, literals, out []
} }
// decode sequences from the stream with the provided history. // decode sequences from the stream with the provided history.
func (s *sequenceDecs) decode(seqs int, br *bitReader, hist []byte) error { func (s *sequenceDecs) decode(seqs []seqVals) error {
br := s.br
// Grab full sizes tables, to avoid bounds checks.
llTable, mlTable, ofTable := s.litLengths.fse.dt[:maxTablesize], s.matchLengths.fse.dt[:maxTablesize], s.offsets.fse.dt[:maxTablesize]
llState, mlState, ofState := s.litLengths.state.state, s.matchLengths.state.state, s.offsets.state.state
s.seqSize = 0
litRemain := len(s.literals)
maxBlockSize := maxCompressedBlockSize
if s.windowSize < maxBlockSize {
maxBlockSize = s.windowSize
}
for i := range seqs {
var ll, mo, ml int
if br.off > 4+((maxOffsetBits+16+16)>>3) {
// inlined function:
// ll, mo, ml = s.nextFast(br, llState, mlState, ofState)
// Final will not read from stream.
var llB, mlB, moB uint8
ll, llB = llState.final()
ml, mlB = mlState.final()
mo, moB = ofState.final()
// extra bits are stored in reverse order.
br.fillFast()
mo += br.getBits(moB)
if s.maxBits > 32 {
br.fillFast()
}
ml += br.getBits(mlB)
ll += br.getBits(llB)
if moB > 1 {
s.prevOffset[2] = s.prevOffset[1]
s.prevOffset[1] = s.prevOffset[0]
s.prevOffset[0] = mo
} else {
// mo = s.adjustOffset(mo, ll, moB)
// Inlined for rather big speedup
if ll == 0 {
// There is an exception though, when current sequence's literals_length = 0.
// In this case, repeated offsets are shifted by one, so an offset_value of 1 means Repeated_Offset2,
// an offset_value of 2 means Repeated_Offset3, and an offset_value of 3 means Repeated_Offset1 - 1_byte.
mo++
}
if mo == 0 {
mo = s.prevOffset[0]
} else {
var temp int
if mo == 3 {
temp = s.prevOffset[0] - 1
} else {
temp = s.prevOffset[mo]
}
if temp == 0 {
// 0 is not valid; input is corrupted; force offset to 1
println("WARNING: temp was 0")
temp = 1
}
if mo != 1 {
s.prevOffset[2] = s.prevOffset[1]
}
s.prevOffset[1] = s.prevOffset[0]
s.prevOffset[0] = temp
mo = temp
}
}
br.fillFast()
} else {
if br.overread() {
if debugDecoder {
printf("reading sequence %d, exceeded available data\n", i)
}
return io.ErrUnexpectedEOF
}
ll, mo, ml = s.next(br, llState, mlState, ofState)
br.fill()
}
if debugSequences {
println("Seq", i, "Litlen:", ll, "mo:", mo, "(abs) ml:", ml)
}
// Evaluate.
// We might be doing this async, so do it early.
if mo == 0 && ml > 0 {
return fmt.Errorf("zero matchoff and matchlen (%d) > 0", ml)
}
if ml > maxMatchLen {
return fmt.Errorf("match len (%d) bigger than max allowed length", ml)
}
s.seqSize += ll + ml
if s.seqSize > maxBlockSize {
return fmt.Errorf("output (%d) bigger than max block size (%d)", s.seqSize, maxBlockSize)
}
litRemain -= ll
if litRemain < 0 {
return fmt.Errorf("unexpected literal count, want %d bytes, but only %d is available", ll, litRemain+ll)
}
seqs[i] = seqVals{
ll: ll,
ml: ml,
mo: mo,
}
if i == len(seqs)-1 {
// This is the last sequence, so we shouldn't update state.
break
}
// Manually inlined, ~ 5-20% faster
// Update all 3 states at once. Approx 20% faster.
nBits := llState.nbBits() + mlState.nbBits() + ofState.nbBits()
if nBits == 0 {
llState = llTable[llState.newState()&maxTableMask]
mlState = mlTable[mlState.newState()&maxTableMask]
ofState = ofTable[ofState.newState()&maxTableMask]
} else {
bits := br.get32BitsFast(nBits)
lowBits := uint16(bits >> ((ofState.nbBits() + mlState.nbBits()) & 31))
llState = llTable[(llState.newState()+lowBits)&maxTableMask]
lowBits = uint16(bits >> (ofState.nbBits() & 31))
lowBits &= bitMask[mlState.nbBits()&15]
mlState = mlTable[(mlState.newState()+lowBits)&maxTableMask]
lowBits = uint16(bits) & bitMask[ofState.nbBits()&15]
ofState = ofTable[(ofState.newState()+lowBits)&maxTableMask]
}
}
s.seqSize += litRemain
if s.seqSize > maxBlockSize {
return fmt.Errorf("output (%d) bigger than max block size (%d)", s.seqSize, maxBlockSize)
}
err := br.close()
if err != nil {
printf("Closing sequences: %v, %+v\n", err, *br)
}
return err
}
// execute will execute the decoded sequence with the provided history.
// The sequence must be evaluated before being sent.
func (s *sequenceDecs) execute(seqs []seqVals, hist []byte) error {
// Ensure we have enough output size...
if len(s.out)+s.seqSize > cap(s.out) {
addBytes := s.seqSize + len(s.out)
s.out = append(s.out, make([]byte, addBytes)...)
s.out = s.out[:len(s.out)-addBytes]
}
if debugDecoder {
printf("Execute %d seqs with hist %d, dict %d, literals: %d into %d bytes\n", len(seqs), len(hist), len(s.dict), len(s.literals), s.seqSize)
}
var t = len(s.out)
out := s.out[:t+s.seqSize]
for _, seq := range seqs {
// Add literals
copy(out[t:], s.literals[:seq.ll])
t += seq.ll
s.literals = s.literals[seq.ll:]
// Copy from dictionary...
if seq.mo > t+len(hist) || seq.mo > s.windowSize {
if len(s.dict) == 0 {
return fmt.Errorf("match offset (%d) bigger than current history (%d)", seq.mo, t+len(hist))
}
// we may be in dictionary.
dictO := len(s.dict) - (seq.mo - (t + len(hist)))
if dictO < 0 || dictO >= len(s.dict) {
return fmt.Errorf("match offset (%d) bigger than current history+dict (%d)", seq.mo, t+len(hist)+len(s.dict))
}
end := dictO + seq.ml
if end > len(s.dict) {
n := len(s.dict) - dictO
copy(out[t:], s.dict[dictO:])
t += n
seq.ml -= n
} else {
copy(out[t:], s.dict[dictO:end])
t += end - dictO
continue
}
}
// Copy from history.
if v := seq.mo - t; v > 0 {
// v is the start position in history from end.
start := len(hist) - v
if seq.ml > v {
// Some goes into current block.
// Copy remainder of history
copy(out[t:], hist[start:])
t += v
seq.ml -= v
} else {
copy(out[t:], hist[start:start+seq.ml])
t += seq.ml
continue
}
}
// We must be in current buffer now
if seq.ml > 0 {
start := t - seq.mo
if seq.ml <= t-start {
// No overlap
copy(out[t:], out[start:start+seq.ml])
t += seq.ml
continue
} else {
// Overlapping copy
// Extend destination slice and copy one byte at the time.
src := out[start : start+seq.ml]
dst := out[t:]
dst = dst[:len(src)]
t += len(src)
// Destination is the space we just added.
for i := range src {
dst[i] = src[i]
}
}
}
}
// Add final literals
copy(out[t:], s.literals)
if debugDecoder {
t += len(s.literals)
if t != len(out) {
panic(fmt.Errorf("length mismatch, want %d, got %d, ss: %d", len(out), t, s.seqSize))
}
}
s.out = out
return nil
}
// decode sequences from the stream with the provided history.
func (s *sequenceDecs) decodeSync(history *history) error {
br := s.br
seqs := s.nSeqs
startSize := len(s.out) startSize := len(s.out)
// Grab full sizes tables, to avoid bounds checks. // Grab full sizes tables, to avoid bounds checks.
llTable, mlTable, ofTable := s.litLengths.fse.dt[:maxTablesize], s.matchLengths.fse.dt[:maxTablesize], s.offsets.fse.dt[:maxTablesize] llTable, mlTable, ofTable := s.litLengths.fse.dt[:maxTablesize], s.matchLengths.fse.dt[:maxTablesize], s.offsets.fse.dt[:maxTablesize]
llState, mlState, ofState := s.litLengths.state.state, s.matchLengths.state.state, s.offsets.state.state llState, mlState, ofState := s.litLengths.state.state, s.matchLengths.state.state, s.offsets.state.state
hist := history.b[history.ignoreBuffer:]
out := s.out
maxBlockSize := maxCompressedBlockSize
if s.windowSize < maxBlockSize {
maxBlockSize = s.windowSize
}
for i := seqs - 1; i >= 0; i-- { for i := seqs - 1; i >= 0; i-- {
if br.overread() { if br.overread() {
@@ -151,7 +406,7 @@ func (s *sequenceDecs) decode(seqs int, br *bitReader, hist []byte) error {
if temp == 0 { if temp == 0 {
// 0 is not valid; input is corrupted; force offset to 1 // 0 is not valid; input is corrupted; force offset to 1
println("temp was 0") println("WARNING: temp was 0")
temp = 1 temp = 1
} }
@@ -176,51 +431,49 @@ func (s *sequenceDecs) decode(seqs int, br *bitReader, hist []byte) error {
if ll > len(s.literals) { if ll > len(s.literals) {
return fmt.Errorf("unexpected literal count, want %d bytes, but only %d is available", ll, len(s.literals)) return fmt.Errorf("unexpected literal count, want %d bytes, but only %d is available", ll, len(s.literals))
} }
size := ll + ml + len(s.out) size := ll + ml + len(out)
if size-startSize > maxBlockSize { if size-startSize > maxBlockSize {
return fmt.Errorf("output (%d) bigger than max block size", size) return fmt.Errorf("output (%d) bigger than max block size (%d)", size, maxBlockSize)
} }
if size > cap(s.out) { if size > cap(out) {
// Not enough size, which can happen under high volume block streaming conditions // Not enough size, which can happen under high volume block streaming conditions
// but could be if destination slice is too small for sync operations. // but could be if destination slice is too small for sync operations.
// over-allocating here can create a large amount of GC pressure so we try to keep // over-allocating here can create a large amount of GC pressure so we try to keep
// it as contained as possible // it as contained as possible
used := len(s.out) - startSize used := len(out) - startSize
addBytes := 256 + ll + ml + used>>2 addBytes := 256 + ll + ml + used>>2
// Clamp to max block size. // Clamp to max block size.
if used+addBytes > maxBlockSize { if used+addBytes > maxBlockSize {
addBytes = maxBlockSize - used addBytes = maxBlockSize - used
} }
s.out = append(s.out, make([]byte, addBytes)...) out = append(out, make([]byte, addBytes)...)
s.out = s.out[:len(s.out)-addBytes] out = out[:len(out)-addBytes]
} }
if ml > maxMatchLen { if ml > maxMatchLen {
return fmt.Errorf("match len (%d) bigger than max allowed length", ml) return fmt.Errorf("match len (%d) bigger than max allowed length", ml)
} }
// Add literals // Add literals
s.out = append(s.out, s.literals[:ll]...) out = append(out, s.literals[:ll]...)
s.literals = s.literals[ll:] s.literals = s.literals[ll:]
out := s.out
if mo == 0 && ml > 0 { if mo == 0 && ml > 0 {
return fmt.Errorf("zero matchoff and matchlen (%d) > 0", ml) return fmt.Errorf("zero matchoff and matchlen (%d) > 0", ml)
} }
if mo > len(s.out)+len(hist) || mo > s.windowSize { if mo > len(out)+len(hist) || mo > s.windowSize {
if len(s.dict) == 0 { if len(s.dict) == 0 {
return fmt.Errorf("match offset (%d) bigger than current history (%d)", mo, len(s.out)+len(hist)) return fmt.Errorf("match offset (%d) bigger than current history (%d)", mo, len(out)+len(hist))
} }
// we may be in dictionary. // we may be in dictionary.
dictO := len(s.dict) - (mo - (len(s.out) + len(hist))) dictO := len(s.dict) - (mo - (len(out) + len(hist)))
if dictO < 0 || dictO >= len(s.dict) { if dictO < 0 || dictO >= len(s.dict) {
return fmt.Errorf("match offset (%d) bigger than current history (%d)", mo, len(s.out)+len(hist)) return fmt.Errorf("match offset (%d) bigger than current history (%d)", mo, len(out)+len(hist))
} }
end := dictO + ml end := dictO + ml
if end > len(s.dict) { if end > len(s.dict) {
out = append(out, s.dict[dictO:]...) out = append(out, s.dict[dictO:]...)
mo -= len(s.dict) - dictO
ml -= len(s.dict) - dictO ml -= len(s.dict) - dictO
} else { } else {
out = append(out, s.dict[dictO:end]...) out = append(out, s.dict[dictO:end]...)
@@ -231,26 +484,25 @@ func (s *sequenceDecs) decode(seqs int, br *bitReader, hist []byte) error {
// Copy from history. // Copy from history.
// TODO: Blocks without history could be made to ignore this completely. // TODO: Blocks without history could be made to ignore this completely.
if v := mo - len(s.out); v > 0 { if v := mo - len(out); v > 0 {
// v is the start position in history from end. // v is the start position in history from end.
start := len(s.hist) - v start := len(hist) - v
if ml > v { if ml > v {
// Some goes into current block. // Some goes into current block.
// Copy remainder of history // Copy remainder of history
out = append(out, s.hist[start:]...) out = append(out, hist[start:]...)
mo -= v
ml -= v ml -= v
} else { } else {
out = append(out, s.hist[start:start+ml]...) out = append(out, hist[start:start+ml]...)
ml = 0 ml = 0
} }
} }
// We must be in current buffer now // We must be in current buffer now
if ml > 0 { if ml > 0 {
start := len(s.out) - mo start := len(out) - mo
if ml <= len(s.out)-start { if ml <= len(out)-start {
// No overlap // No overlap
out = append(out, s.out[start:start+ml]...) out = append(out, out[start:start+ml]...)
} else { } else {
// Overlapping copy // Overlapping copy
// Extend destination slice and copy one byte at the time. // Extend destination slice and copy one byte at the time.
@@ -264,7 +516,6 @@ func (s *sequenceDecs) decode(seqs int, br *bitReader, hist []byte) error {
} }
} }
} }
s.out = out
if i == 0 { if i == 0 {
// This is the last sequence, so we shouldn't update state. // This is the last sequence, so we shouldn't update state.
break break
@@ -291,9 +542,14 @@ func (s *sequenceDecs) decode(seqs int, br *bitReader, hist []byte) error {
} }
} }
// Check if space for literals
if len(s.literals)+len(s.out)-startSize > maxBlockSize {
return fmt.Errorf("output (%d) bigger than max block size (%d)", len(s.out), maxBlockSize)
}
// Add final literals // Add final literals
s.out = append(s.out, s.literals...) s.out = append(out, s.literals...)
return nil return br.close()
} }
// update states, at least 27 bits must be available. // update states, at least 27 bits must be available.
@@ -457,36 +713,3 @@ func (s *sequenceDecs) adjustOffset(offset, litLen int, offsetB uint8) int {
s.prevOffset[0] = temp s.prevOffset[0] = temp
return temp return temp
} }
// mergeHistory will merge history.
func (s *sequenceDecs) mergeHistory(hist *sequenceDecs) (*sequenceDecs, error) {
for i := uint(0); i < 3; i++ {
var sNew, sHist *sequenceDec
switch i {
default:
// same as "case 0":
sNew = &s.litLengths
sHist = &hist.litLengths
case 1:
sNew = &s.offsets
sHist = &hist.offsets
case 2:
sNew = &s.matchLengths
sHist = &hist.matchLengths
}
if sNew.repeat {
if sHist.fse == nil {
return nil, fmt.Errorf("sequence stream %d, repeat requested, but no history", i)
}
continue
}
if sNew.fse == nil {
return nil, fmt.Errorf("sequence stream %d, no fse found", i)
}
if sHist.fse != nil && !sHist.fse.preDefined {
fseDecoderPool.Put(sHist.fse)
}
sHist.fse = sNew.fse
}
return hist, nil
}

View File

@@ -20,7 +20,7 @@ const ZipMethodPKWare = 20
var zipReaderPool sync.Pool var zipReaderPool sync.Pool
// newZipReader cannot be used since we would leak goroutines... // newZipReader creates a pooled zip decompressor.
func newZipReader(r io.Reader) io.ReadCloser { func newZipReader(r io.Reader) io.ReadCloser {
dec, ok := zipReaderPool.Get().(*Decoder) dec, ok := zipReaderPool.Get().(*Decoder)
if ok { if ok {
@@ -44,10 +44,14 @@ func (r *pooledZipReader) Read(p []byte) (n int, err error) {
r.mu.Lock() r.mu.Lock()
defer r.mu.Unlock() defer r.mu.Unlock()
if r.dec == nil { if r.dec == nil {
return 0, errors.New("Read after Close") return 0, errors.New("read after close or EOF")
} }
dec, err := r.dec.Read(p) dec, err := r.dec.Read(p)
if err == io.EOF {
err = r.dec.Reset(nil)
zipReaderPool.Put(r.dec)
r.dec = nil
}
return dec, err return dec, err
} }
@@ -112,11 +116,5 @@ func ZipCompressor(opts ...EOption) func(w io.Writer) (io.WriteCloser, error) {
// ZipDecompressor returns a decompressor that can be registered with zip libraries. // ZipDecompressor returns a decompressor that can be registered with zip libraries.
// See ZipCompressor for example. // See ZipCompressor for example.
func ZipDecompressor() func(r io.Reader) io.ReadCloser { func ZipDecompressor() func(r io.Reader) io.ReadCloser {
return func(r io.Reader) io.ReadCloser { return newZipReader
d, err := NewReader(r, WithDecoderConcurrency(1), WithDecoderLowmem(true))
if err != nil {
panic(err)
}
return d.IOReadCloser()
}
} }

View File

@@ -39,6 +39,9 @@ const zstdMinMatch = 3
// Reset the buffer offset when reaching this. // Reset the buffer offset when reaching this.
const bufferReset = math.MaxInt32 - MaxWindowSize const bufferReset = math.MaxInt32 - MaxWindowSize
// fcsUnknown is used for unknown frame content size.
const fcsUnknown = math.MaxUint64
var ( var (
// ErrReservedBlockType is returned when a reserved block type is found. // ErrReservedBlockType is returned when a reserved block type is found.
// Typically this indicates wrong or corrupted input. // Typically this indicates wrong or corrupted input.
@@ -52,6 +55,10 @@ var (
// Typically returned on invalid input. // Typically returned on invalid input.
ErrBlockTooSmall = errors.New("block too small") ErrBlockTooSmall = errors.New("block too small")
// ErrUnexpectedBlockSize is returned when a block has unexpected size.
// Typically returned on invalid input.
ErrUnexpectedBlockSize = errors.New("unexpected block size")
// ErrMagicMismatch is returned when a "magic" number isn't what is expected. // ErrMagicMismatch is returned when a "magic" number isn't what is expected.
// Typically this indicates wrong or corrupted input. // Typically this indicates wrong or corrupted input.
ErrMagicMismatch = errors.New("invalid input: magic number mismatch") ErrMagicMismatch = errors.New("invalid input: magic number mismatch")
@@ -75,6 +82,10 @@ var (
// This is only returned if SingleSegment is specified on the frame. // This is only returned if SingleSegment is specified on the frame.
ErrFrameSizeExceeded = errors.New("frame size exceeded") ErrFrameSizeExceeded = errors.New("frame size exceeded")
// ErrFrameSizeMismatch is returned if the stated frame size does not match the expected size.
// This is only returned if SingleSegment is specified on the frame.
ErrFrameSizeMismatch = errors.New("frame size does not match size on stream")
// ErrCRCMismatch is returned if CRC mismatches. // ErrCRCMismatch is returned if CRC mismatches.
ErrCRCMismatch = errors.New("CRC check failed") ErrCRCMismatch = errors.New("CRC check failed")

50
vendor/github.com/lrstanley/girc/.editorconfig generated vendored Normal file
View File

@@ -0,0 +1,50 @@
# THIS FILE IS GENERATED! DO NOT EDIT! Maintained by Terraform.
#
# editorconfig.org
root = true
[*]
charset = utf-8
end_of_line = lf
indent_size = 4
indent_style = space
insert_final_newline = true
trim_trailing_whitespace = true
max_line_length = 100
[*.tf]
indent_size = 2
[*.go]
indent_style = tab
indent_size = 4
[*.md]
trim_trailing_whitespace = false
[*.{md,py,sh,yml,yaml,js,ts,vue,css}]
max_line_length = 105
[*.{yml,yaml,toml}]
indent_size = 2
[*.json]
indent_size = 2
insert_final_newline = ignore
[*.html]
max_line_length = 140
indent_size = 2
[*.{js,ts,vue,css}]
indent_size = 2
[Makefile]
indent_style = tab
[**.min.js]
indent_style = ignore
insert_final_newline = ignore
[*.bat]
indent_style = tab

38
vendor/github.com/lrstanley/girc/.golangci.yml generated vendored Normal file
View File

@@ -0,0 +1,38 @@
# THIS FILE IS GENERATED! DO NOT EDIT! Maintained by Terraform.
run:
tests: False
timeout: 3m
issues:
max-per-linter: 0
max-same-issues: 0
severity:
default-severity: error
rules:
- linters:
- errcheck
- gocritic
severity: warning
linters:
enable:
- asciicheck
- exportloopref
- gci
- gocritic
- gofmt
- misspell
linters-settings:
gocritic:
disabled-checks:
- hugeParam
- ifElseChain
enabled-tags:
- diagnostic
- opinionated
- performance
- style
govet:
check-shadowing: true

View File

@@ -1,25 +0,0 @@
language: go
go:
- 1.11.x
- tip
before_install:
- go get -v golang.org/x/lint/golint
script:
- $HOME/gopath/bin/golint -min_confidence 0.9 -set_exit_status
- GORACE="exitcode=1 halt_on_error=1" go test -v -coverprofile=coverage.txt -race -timeout 3m -count 3 -cpu 1,4
- go vet -v .
after_success:
- bash <(curl -s https://codecov.io/bash)
branches:
only:
- master
notifications:
irc:
channels:
- irc.byteirc.org#/dev/null
template:
- "%{repository} #%{build_number} %{branch}/%{commit}: %{author} -- %{message}
%{build_url}"
on_success: change
on_failure: change
skip_join: false

View File

@@ -1,32 +0,0 @@
# Contributing
## Issue submission
* When submitting an issue or bug report, please ensure to provide as much
information as possible, please ensure that you are running on the latest
stable version (tagged), or when using master, provide the specific commit
being used.
* Provide the minimum needed viable source to replicate the problem.
## Pull requests
To review what is currently being worked on, or looked into, feel free to head
over to the [issues list](../../issues).
Below are a few guidelines if you would like to contribute. Keep the code
clean, standardized, and much of the quality should match Golang's standard
library and common idioms.
* Always test using the latest Go version.
* Always use `gofmt` before committing anything.
* Always have proper documentation before committing.
* Keep the same whitespacing, documentation, and newline format as the
rest of the project.
* Only use 3rd party libraries if necessary. If only a small portion of
the library is needed, simply rewrite it within the library to prevent
useless imports.
* Also see [golang/go/wiki/CodeReviewComments](https://github.com/golang/go/wiki/CodeReviewComments)
If you would like to assist, and the pull request is quite large and/or it has
the potential of being a breaking change, please open an issue first so it can
be discussed.

View File

@@ -1,12 +1,59 @@
<p align="center"><a href="https://pkg.go.dev/github.com/lrstanley/girc"><img width="270" src="http://i.imgur.com/DEnyrdB.png"></a></p> <p align="center"><a href="https://pkg.go.dev/github.com/lrstanley/girc"><img width="270" src="http://i.imgur.com/DEnyrdB.png"></a></p>
<p align="center">girc, a flexible IRC library for Go</p> <!-- template:begin:header -->
<!-- do not edit anything in this "template" block, its auto-generated -->
<p align="center">girc -- :bomb: girc is a flexible IRC library for Go :ok_hand:</p>
<p align="center"> <p align="center">
<a href="https://github.com/lrstanley/girc/actions"><img src="https://github.com/lrstanley/girc/workflows/test/badge.svg" alt="Test Status"></a> <a href="https://github.com/lrstanley/girc/tags">
<a href="https://codecov.io/gh/lrstanley/girc"><img src="https://codecov.io/gh/lrstanley/girc/branch/master/graph/badge.svg" alt="Coverage Status"></a> <img title="Latest Semver Tag" src="https://img.shields.io/github/v/tag/lrstanley/girc?style=flat-square">
<a href="https://pkg.go.dev/github.com/lrstanley/girc"><img src="https://pkg.go.dev/badge/github.com/lrstanley/girc" alt="GoDoc"></a> </a>
<a href="https://goreportcard.com/report/github.com/lrstanley/girc"><img src="https://goreportcard.com/badge/github.com/lrstanley/girc" alt="Go Report Card"></a> <a href="https://github.com/lrstanley/girc/commits/master">
<a href="https://liam.sh/chat"><img src="https://img.shields.io/badge/community-chat%20with%20us-green.svg" alt="Community Chat"></a> <img title="Last commit" src="https://img.shields.io/github/last-commit/lrstanley/girc?style=flat-square">
</a>
<a href="https://github.com/lrstanley/girc/actions?query=workflow%3Atest+event%3Apush">
<img title="GitHub Workflow Status (test @ master)" src="https://img.shields.io/github/workflow/status/lrstanley/girc/test/master?label=test&style=flat-square&event=push">
</a>
<a href="https://codecov.io/gh/lrstanley/girc">
<img title="Code Coverage" src="https://img.shields.io/codecov/c/github/lrstanley/girc/master?style=flat-square">
</a>
<a href="https://pkg.go.dev/github.com/lrstanley/girc">
<img title="Go Documentation" src="https://pkg.go.dev/badge/github.com/lrstanley/girc?style=flat-square">
</a>
<a href="https://goreportcard.com/report/github.com/lrstanley/girc">
<img title="Go Report Card" src="https://goreportcard.com/badge/github.com/lrstanley/girc?style=flat-square">
</a>
</p> </p>
<p align="center">
<a href="https://github.com/lrstanley/girc/issues?q=is:open+is:issue+label:bug">
<img title="Bug reports" src="https://img.shields.io/github/issues/lrstanley/girc/bug?label=issues&style=flat-square">
</a>
<a href="https://github.com/lrstanley/girc/issues?q=is:open+is:issue+label:enhancement">
<img title="Feature requests" src="https://img.shields.io/github/issues/lrstanley/girc/enhancement?label=feature%20requests&style=flat-square">
</a>
<a href="https://github.com/lrstanley/girc/pulls">
<img title="Open Pull Requests" src="https://img.shields.io/github/issues-pr/lrstanley/girc?label=prs&style=flat-square">
</a>
<a href="https://github.com/lrstanley/girc/discussions/new?category=q-a">
<img title="Ask a Question" src="https://img.shields.io/badge/support-ask_a_question!-blue?style=flat-square">
</a>
<a href="https://liam.sh/chat"><img src="https://img.shields.io/badge/discord-bytecord-blue.svg?style=flat-square" title="Discord Chat"></a>
</p>
<!-- template:end:header -->
<!-- template:begin:toc -->
<!-- do not edit anything in this "template" block, its auto-generated -->
## :link: Table of Contents
- [Features](#features)
- [Installing](#installing)
- [Examples](#examples)
- [References](#references)
- [Support &amp; Assistance](#raising_hand_man-support--assistance)
- [Contributing](#handshake-contributing)
- [License](#balance_scale-license)
<!-- template:end:toc -->
## Features ## Features
@@ -48,35 +95,6 @@ usecases/examples/projects which utilize girc:
Working on a project and want to add it to the list? Submit a pull request! Working on a project and want to add it to the list? Submit a pull request!
## Contributing
Please review the [CONTRIBUTING](CONTRIBUTING.md) doc for submitting issues/a guide
on submitting pull requests and helping out.
## License
Copyright (c) 2016 Liam Stanley <me@liamstanley.io>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
girc artwork licensed under [CC 3.0](http://creativecommons.org/licenses/by/3.0/) based on Renee French under Creative Commons 3.0 Attributions.
## References ## References
* [IRCv3: Specification Docs](http://ircv3.net/irc/) * [IRCv3: Specification Docs](http://ircv3.net/irc/)
@@ -93,3 +111,61 @@ girc artwork licensed under [CC 3.0](http://creativecommons.org/licenses/by/3.0/
* [RFC7194: Default Port for Internet Relay Chat (IRC) via TLS/SSL](https://tools.ietf.org/html/rfc7194) * [RFC7194: Default Port for Internet Relay Chat (IRC) via TLS/SSL](https://tools.ietf.org/html/rfc7194)
* [RFC4422: Simple Authentication and Security Layer](https://tools.ietf.org/html/rfc4422) ([SASL EXTERNAL](https://tools.ietf.org/html/rfc4422#appendix-A)) * [RFC4422: Simple Authentication and Security Layer](https://tools.ietf.org/html/rfc4422) ([SASL EXTERNAL](https://tools.ietf.org/html/rfc4422#appendix-A))
* [RFC4616: The PLAIN SASL Mechanism](https://tools.ietf.org/html/rfc4616) * [RFC4616: The PLAIN SASL Mechanism](https://tools.ietf.org/html/rfc4616)
<!-- template:begin:support -->
<!-- do not edit anything in this "template" block, its auto-generated -->
## :raising_hand_man: Support & Assistance
* :heart: Please review the [Code of Conduct](.github/CODE_OF_CONDUCT.md) for
guidelines on ensuring everyone has the best experience interacting with
the community.
* :raising_hand_man: Take a look at the [support](.github/SUPPORT.md) document on
guidelines for tips on how to ask the right questions.
* :lady_beetle: For all features/bugs/issues/questions/etc, [head over here](https://github.com/lrstanley/girc/issues/new/choose).
<!-- template:end:support -->
<!-- template:begin:contributing -->
<!-- do not edit anything in this "template" block, its auto-generated -->
## :handshake: Contributing
* :heart: Please review the [Code of Conduct](.github/CODE_OF_CONDUCT.md) for guidelines
on ensuring everyone has the best experience interacting with the
community.
* :clipboard: Please review the [contributing](.github/CONTRIBUTING.md) doc for submitting
issues/a guide on submitting pull requests and helping out.
* :old_key: For anything security related, please review this repositories [security policy](https://github.com/lrstanley/girc/security/policy).
<!-- template:end:contributing -->
<!-- template:begin:license -->
<!-- do not edit anything in this "template" block, its auto-generated -->
## :balance_scale: License
```
MIT License
Copyright (c) 2016 Liam Stanley <me@liamstanley.io>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
```
_Also located [here](LICENSE)_
<!-- template:end:license -->
girc artwork licensed under [CC 3.0](http://creativecommons.org/licenses/by/3.0/)
based on Renee French under Creative Commons 3.0 Attributions.

View File

@@ -427,7 +427,7 @@ func handleMOTD(c *Client, e Event) {
} }
// Otherwise, assume we're getting sent the MOTD line-by-line. // Otherwise, assume we're getting sent the MOTD line-by-line.
if len(c.state.motd) != 0 { if c.state.motd != "" {
c.state.motd += "\n" c.state.motd += "\n"
} }
c.state.motd += e.Last() c.state.motd += e.Last()

View File

@@ -64,7 +64,7 @@ func possibleCapList(c *Client) map[string][]string {
if !c.Config.DisableSTS && !c.Config.SSL { if !c.Config.DisableSTS && !c.Config.SSL {
// If fallback supported, and we failed recently, don't try negotiating STS. // If fallback supported, and we failed recently, don't try negotiating STS.
// ONLY do this fallback if we're expired (primarily useful during the first // ONLY do this fallback if we're expired (primarily useful during the first
// sts negotation). // sts negotiation).
if time.Since(c.state.sts.lastFailed) < 5*time.Minute && !c.Config.DisableSTSFallback { if time.Since(c.state.sts.lastFailed) < 5*time.Minute && !c.Config.DisableSTSFallback {
c.debug.Println("skipping strict transport policy negotiation; failed within the last 5 minutes") c.debug.Println("skipping strict transport policy negotiation; failed within the last 5 minutes")
} else { } else {

View File

@@ -10,7 +10,6 @@ import (
"errors" "errors"
"fmt" "fmt"
"io" "io"
"io/ioutil"
"log" "log"
"net" "net"
"os" "os"
@@ -268,7 +267,7 @@ func New(config Config) *Client {
if envDebug { if envDebug {
c.debug = log.New(os.Stderr, "debug:", log.Ltime|log.Lshortfile) c.debug = log.New(os.Stderr, "debug:", log.Ltime|log.Lshortfile)
} else { } else {
c.debug = log.New(ioutil.Discard, "", 0) c.debug = log.New(io.Discard, "", 0)
} }
} else { } else {
if envDebug { if envDebug {
@@ -411,7 +410,7 @@ func (c *Client) execLoop(ctx context.Context, errs chan error, wg *sync.WaitGro
// Handles incoming ERROR responses. These are only ever sent // Handles incoming ERROR responses. These are only ever sent
// by the server (with the exception that this library may use // by the server (with the exception that this library may use
// them as a lower level way of signalling to disconnect due // them as a lower level way of signalling to disconnect due
// to some other client-choosen error), and should always be // to some other client-chosen error), and should always be
// followed up by the server disconnecting the client. If for // followed up by the server disconnecting the client. If for
// some reason the server doesn't disconnect the client, or // some reason the server doesn't disconnect the client, or
// if this library is the source of the error, this should // if this library is the source of the error, this should
@@ -446,7 +445,7 @@ func (c *Client) DisableTracking() {
// Server returns the string representation of host+port pair for the connection. // Server returns the string representation of host+port pair for the connection.
func (c *Client) Server() string { func (c *Client) Server() string {
c.state.Lock() c.state.Lock()
defer c.state.Lock() defer c.state.Unlock()
return c.server() return c.server()
} }

View File

@@ -5,9 +5,9 @@
package girc package girc
import ( import (
"strconv"
"errors" "errors"
"fmt" "fmt"
"strconv"
) )
// Commands holds a large list of useful methods to interact with the server, // Commands holds a large list of useful methods to interact with the server,
@@ -37,7 +37,7 @@ func (cmd *Commands) Join(channels ...string) {
continue continue
} }
if len(buffer) == 0 { if buffer == "" {
buffer = channels[i] buffer = channels[i]
} else { } else {
buffer += "," + channels[i] buffer += "," + channels[i]
@@ -341,7 +341,7 @@ func (cmd *Commands) List(channels ...string) {
continue continue
} }
if len(buffer) == 0 { if buffer == "" {
buffer = channels[i] buffer = channels[i]
} else { } else {
buffer += "," + channels[i] buffer += "," + channels[i]

View File

@@ -149,7 +149,7 @@ const (
RPL_ENDOFWHOWAS = "369" RPL_ENDOFWHOWAS = "369"
RPL_LISTSTART = "321" RPL_LISTSTART = "321"
RPL_LIST = "322" RPL_LIST = "322"
RPL_LISTEND = "323" RPL_LISTEND = "323" //nolint:misspell // it's correct.
RPL_UNIQOPIS = "325" RPL_UNIQOPIS = "325"
RPL_CHANNELMODEIS = "324" RPL_CHANNELMODEIS = "324"
RPL_NOTOPIC = "331" RPL_NOTOPIC = "331"

View File

@@ -104,7 +104,7 @@ func EncodeCTCP(ctcp *CTCPEvent) (out string) {
// EncodeCTCPRaw is much like EncodeCTCP, however accepts a raw command and // EncodeCTCPRaw is much like EncodeCTCP, however accepts a raw command and
// string as input. // string as input.
func EncodeCTCPRaw(cmd, text string) (out string) { func EncodeCTCPRaw(cmd, text string) (out string) {
if len(cmd) <= 0 { if cmd == "" {
return "" return ""
} }

View File

@@ -52,7 +52,7 @@ func ParseEvent(raw string) (e *Event) {
i = 0 i = 0
} }
if raw[0] == messagePrefix { if raw != "" && raw[0] == messagePrefix {
// Prefix ends with a space. // Prefix ends with a space.
i = strings.IndexByte(raw, eventSpace) i = strings.IndexByte(raw, eventSpace)
@@ -283,7 +283,6 @@ func (e *Event) Bytes() []byte {
// Space separated list of arguments. // Space separated list of arguments.
if len(e.Params) > 0 { if len(e.Params) > 0 {
// buffer.WriteByte(eventSpace)
for i := 0; i < len(e.Params); i++ { for i := 0; i < len(e.Params); i++ {
if i == len(e.Params)-1 && (strings.Contains(e.Params[i], " ") || strings.HasPrefix(e.Params[i], ":") || e.Params[i] == "") { if i == len(e.Params)-1 && (strings.Contains(e.Params[i], " ") || strings.HasPrefix(e.Params[i], ":") || e.Params[i] == "") {
buffer.WriteString(string(eventSpace) + string(messagePrefix) + e.Params[i]) buffer.WriteString(string(eventSpace) + string(messagePrefix) + e.Params[i])
@@ -298,7 +297,9 @@ func (e *Event) Bytes() []byte {
buffer.Truncate(maxLength) buffer.Truncate(maxLength)
} }
out := buffer.Bytes() // If we truncated in the middle of a utf8 character, we need to remove
// the other (now invalid) bytes.
out := bytes.ToValidUTF8(buffer.Bytes(), nil)
// Strip newlines and carriage returns. // Strip newlines and carriage returns.
for i := 0; i < len(out); i++ { for i := 0; i < len(out); i++ {
@@ -621,7 +622,7 @@ func (s *Source) IsHostmask() bool {
// IsServer returns true if this source looks like a server name. // IsServer returns true if this source looks like a server name.
func (s *Source) IsServer() bool { func (s *Source) IsServer() bool {
return len(s.Ident) <= 0 && len(s.Host) <= 0 return s.Ident == "" && s.Host == ""
} }
// writeTo is an utility function to write the source to the bytes.Buffer // writeTo is an utility function to write the source to the bytes.Buffer

View File

@@ -127,10 +127,10 @@ func Fmt(text string) string {
// See Fmt() for more information. // See Fmt() for more information.
func TrimFmt(text string) string { func TrimFmt(text string) string {
for color := range fmtColors { for color := range fmtColors {
text = strings.Replace(text, string(fmtOpenChar)+color+string(fmtCloseChar), "", -1) text = strings.ReplaceAll(text, string(fmtOpenChar)+color+string(fmtCloseChar), "")
} }
for code := range fmtCodes { for code := range fmtCodes {
text = strings.Replace(text, string(fmtOpenChar)+code+string(fmtCloseChar), "", -1) text = strings.ReplaceAll(text, string(fmtOpenChar)+code+string(fmtCloseChar), "")
} }
return text return text
@@ -138,7 +138,7 @@ func TrimFmt(text string) string {
// This is really the only fastest way of doing this (marginally better than // This is really the only fastest way of doing this (marginally better than
// actually trying to parse it manually.) // actually trying to parse it manually.)
var reStripColor = regexp.MustCompile(`\x03([019]?[0-9](,[019]?[0-9])?)?`) var reStripColor = regexp.MustCompile(`\x03([019]?\d(,[019]?\d)?)?`)
// StripRaw tries to strip all ASCII format codes that are used for IRC. // StripRaw tries to strip all ASCII format codes that are used for IRC.
// Primarily, foreground/background colors, and other control bytes like // Primarily, foreground/background colors, and other control bytes like
@@ -148,7 +148,7 @@ func StripRaw(text string) string {
text = reStripColor.ReplaceAllString(text, "") text = reStripColor.ReplaceAllString(text, "")
for _, code := range fmtCodes { for _, code := range fmtCodes {
text = strings.Replace(text, code, "", -1) text = strings.ReplaceAll(text, code, "")
} }
return text return text
@@ -219,7 +219,7 @@ func IsValidChannel(channel string) bool {
// digit = 0x30-0x39 // digit = 0x30-0x39
// special = 0x5B-0x60 / 0x7B-0x7D // special = 0x5B-0x60 / 0x7B-0x7D
func IsValidNick(nick string) bool { func IsValidNick(nick string) bool {
if len(nick) <= 0 { if nick == "" {
return false return false
} }
@@ -256,7 +256,7 @@ func IsValidNick(nick string) bool {
// user = 1*( %x01-09 / %x0B-0C / %x0E-1F / %x21-3F / %x41-FF ) // user = 1*( %x01-09 / %x0B-0C / %x0E-1F / %x21-3F / %x41-FF )
// ; any octet except NUL, CR, LF, " " and "@" // ; any octet except NUL, CR, LF, " " and "@"
func IsValidUser(name string) bool { func IsValidUser(name string) bool {
if len(name) <= 0 { if name == "" {
return false return false
} }

View File

@@ -287,7 +287,7 @@ func (c *Caller) Remove(cuid string) (success bool) {
// on your own. // on your own.
func (c *Caller) remove(cuid string) (success bool) { func (c *Caller) remove(cuid string) (success bool) {
cmd, uid := c.cuidToID(cuid) cmd, uid := c.cuidToID(cuid)
if len(cmd) == 0 || len(uid) == 0 { if cmd == "" || uid == "" {
return false return false
} }

View File

@@ -34,7 +34,7 @@ func (c *CMode) Short() string {
// String returns a string representation of a mode, including optional // String returns a string representation of a mode, including optional
// arguments. E.g. "+b user*!ident@host.*.com" // arguments. E.g. "+b user*!ident@host.*.com"
func (c *CMode) String() string { func (c *CMode) String() string {
if len(c.args) == 0 { if c.args == "" {
return c.Short() return c.Short()
} }
@@ -106,7 +106,7 @@ func (c *CModes) HasMode(mode string) bool {
func (c *CModes) Get(mode string) (args string, ok bool) { func (c *CModes) Get(mode string) (args string, ok bool) {
for i := 0; i < len(c.modes); i++ { for i := 0; i < len(c.modes); i++ {
if string(c.modes[i].name) == mode { if string(c.modes[i].name) == mode {
if len(c.modes[i].args) == 0 { if c.modes[i].args == "" {
return "", false return "", false
} }
@@ -157,7 +157,7 @@ func (c *CModes) hasArg(set bool, mode byte) (hasArgs, isSetting bool) {
// For example, the latter would mean applying an incoming MODE with the modes // For example, the latter would mean applying an incoming MODE with the modes
// stored for a channel. // stored for a channel.
func (c *CModes) Apply(modes []CMode) { func (c *CModes) Apply(modes []CMode) {
var new []CMode var newModes []CMode
for j := 0; j < len(c.modes); j++ { for j := 0; j < len(c.modes); j++ {
isin := false isin := false
@@ -166,14 +166,14 @@ func (c *CModes) Apply(modes []CMode) {
continue continue
} }
if c.modes[j].name == modes[i].name && modes[i].add { if c.modes[j].name == modes[i].name && modes[i].add {
new = append(new, modes[i]) newModes = append(newModes, modes[i])
isin = true isin = true
break break
} }
} }
if !isin { if !isin {
new = append(new, c.modes[j]) newModes = append(newModes, c.modes[j])
} }
} }
@@ -183,19 +183,19 @@ func (c *CModes) Apply(modes []CMode) {
} }
isin := false isin := false
for j := 0; j < len(new); j++ { for j := 0; j < len(newModes); j++ {
if modes[i].name == new[j].name { if modes[i].name == newModes[j].name {
isin = true isin = true
break break
} }
} }
if !isin { if !isin {
new = append(new, modes[i]) newModes = append(newModes, modes[i])
} }
} }
c.modes = new c.modes = newModes
} }
// Parse parses a set of flags and args, returning the necessary list of // Parse parses a set of flags and args, returning the necessary list of
@@ -221,7 +221,7 @@ func (c *CModes) Parse(flags string, args []string) (out []CMode) {
} }
hasArgs, isSetting := c.hasArg(add, flags[i]) hasArgs, isSetting := c.hasArg(add, flags[i])
if hasArgs && len(args) >= argCount+1 { if hasArgs && len(args) > argCount {
mode.args = args[argCount] mode.args = args[argCount]
argCount++ argCount++
} }
@@ -351,7 +351,7 @@ func handleMODE(c *Client, e Event) {
// Loop through and update users modes as necessary. // Loop through and update users modes as necessary.
for i := 0; i < len(modes); i++ { for i := 0; i < len(modes); i++ {
if modes[i].setting || len(modes[i].args) == 0 { if modes[i].setting || modes[i].args == "" {
continue continue
} }
@@ -493,8 +493,8 @@ func (m *Perms) reset() {
// set translates raw prefix characters into proper permissions. Only // set translates raw prefix characters into proper permissions. Only
// use this function when you have a session lock. // use this function when you have a session lock.
func (m *Perms) set(prefix string, append bool) { func (m *Perms) set(prefix string, add bool) {
if !append { if !add {
m.reset() m.reset()
} }

View File

@@ -128,18 +128,6 @@ func (l *lexer) acceptRun(valid string) {
l.backup() l.backup()
} }
// acceptRunUntil consumes a run of runes up to a terminator.
func (l *lexer) acceptRunUntil(term rune) {
for term != l.next() {
}
l.backup()
}
// hasText returns true if the current parsed text is not empty.
func (l *lexer) isNotEmpty() bool {
return l.pos > l.start
}
// lineNumber reports which line we're on, based on the position of // lineNumber reports which line we're on, based on the position of
// the previous item returned by nextItem. Doing it this way // the previous item returned by nextItem. Doing it this way
// means we don't have to worry about peek double counting. // means we don't have to worry about peek double counting.

View File

@@ -59,14 +59,6 @@ func (p *parser) errorf(format string, args ...interface{}) {
panic(fmt.Errorf(format, args...)) panic(fmt.Errorf(format, args...))
} }
func (p *parser) expect(expected itemType) (token item) {
token = p.lex.nextItem()
if token.typ != expected {
p.unexpected(token)
}
return token
}
func (p *parser) expectOneOf(expected ...itemType) (token item) { func (p *parser) expectOneOf(expected ...itemType) (token item) {
token = p.lex.nextItem() token = p.lex.nextItem()
for _, v := range expected { for _, v := range expected {
@@ -91,5 +83,4 @@ func (p *parser) recover(errp *error) {
} }
*errp = e.(error) *errp = e.(error)
} }
return
} }

View File

@@ -786,7 +786,6 @@ func expand(s string, keys []string, prefix, postfix string, values map[string]s
} }
s = s[:start] + new_val + s[end+1:] s = s[:start] + new_val + s[end+1:]
} }
return s, nil
} }
// encode encodes a UTF-8 string to ISO-8859-1 and escapes some characters. // encode encodes a UTF-8 string to ISO-8859-1 and escapes some characters.

View File

@@ -591,7 +591,7 @@ func (cli *Client) RedactEvent(roomID, eventID string, req *ReqRedact) (resp *Re
// MarkRead marks eventID in roomID as read, signifying the event, and all before it have been read. See https://matrix.org/docs/spec/client_server/r0.6.0#post-matrix-client-r0-rooms-roomid-receipt-receipttype-eventid // MarkRead marks eventID in roomID as read, signifying the event, and all before it have been read. See https://matrix.org/docs/spec/client_server/r0.6.0#post-matrix-client-r0-rooms-roomid-receipt-receipttype-eventid
func (cli *Client) MarkRead(roomID, eventID string) error { func (cli *Client) MarkRead(roomID, eventID string) error {
urlPath := cli.BuildURL("rooms", roomID, "receipt", "m.read", eventID) urlPath := cli.BuildURL("rooms", roomID, "receipt", "m.read", eventID)
return cli.MakeRequest("POST", urlPath, nil, nil) return cli.MakeRequest("POST", urlPath, struct{}{}, nil)
} }
// CreateRoom creates a new Matrix room. See https://matrix.org/docs/spec/client_server/r0.2.0.html#post-matrix-client-r0-createroom // CreateRoom creates a new Matrix room. See https://matrix.org/docs/spec/client_server/r0.2.0.html#post-matrix-client-r0-createroom

Some files were not shown because too many files have changed in this diff Show More