Compare commits
	
		
			6 Commits
		
	
	
		
	
	| Author | SHA1 | Date | |
|---|---|---|---|
| 
						 | 
					20f841c513 | ||
| 
						 | 
					d07a3e09c9 | ||
| 
						 | 
					4649876956 | ||
| 
						 | 
					5604d140e3 | ||
| 
						 | 
					8751fb4bb1 | ||
| 
						 | 
					3819062574 | 
@@ -172,7 +172,7 @@ See <https://github.com/42wim/matterbridge/wiki>
 | 
			
		||||
 | 
			
		||||
### Binaries
 | 
			
		||||
 | 
			
		||||
- Latest stable release [v1.25.1](https://github.com/42wim/matterbridge/releases/latest)
 | 
			
		||||
- Latest stable release [v1.25.2](https://github.com/42wim/matterbridge/releases/latest)
 | 
			
		||||
- Development releases (follows master) can be downloaded [here](https://github.com/42wim/matterbridge/actions) selecting the latest green build and then artifacts.
 | 
			
		||||
 | 
			
		||||
To install or upgrade just download the latest [binary](https://github.com/42wim/matterbridge/releases/latest). On \*nix platforms you may need to make the binary executable - you can do this by running `chmod a+x` on the binary (example: `chmod a+x matterbridge-1.24.1-linux-64bit`). After downloading (and making the binary executable, if necessary), follow the instructions on the [howto](https://github.com/42wim/matterbridge/wiki/How-to-create-your-config) for a step by step walkthrough for creating your configuration.
 | 
			
		||||
 
 | 
			
		||||
@@ -83,12 +83,12 @@ func (b *Bdiscord) Connect() error {
 | 
			
		||||
	b.Log.Info("Connection succeeded")
 | 
			
		||||
	b.c.AddHandler(b.messageCreate)
 | 
			
		||||
	b.c.AddHandler(b.messageTyping)
 | 
			
		||||
	b.c.AddHandler(b.memberUpdate)
 | 
			
		||||
	b.c.AddHandler(b.messageUpdate)
 | 
			
		||||
	b.c.AddHandler(b.messageDelete)
 | 
			
		||||
	b.c.AddHandler(b.messageDeleteBulk)
 | 
			
		||||
	b.c.AddHandler(b.memberAdd)
 | 
			
		||||
	b.c.AddHandler(b.memberRemove)
 | 
			
		||||
	b.c.AddHandler(b.memberUpdate)
 | 
			
		||||
	if b.GetInt("debuglevel") == 1 {
 | 
			
		||||
		b.c.AddHandler(b.messageEvent)
 | 
			
		||||
	}
 | 
			
		||||
 
 | 
			
		||||
@@ -7,6 +7,10 @@ import (
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
func (b *Bdiscord) messageDelete(s *discordgo.Session, m *discordgo.MessageDelete) { //nolint:unparam
 | 
			
		||||
	if m.GuildID != b.guildID {
 | 
			
		||||
		b.Log.Debugf("Ignoring messageDelete because it originates from a different guild")
 | 
			
		||||
		return
 | 
			
		||||
	}
 | 
			
		||||
	rmsg := config.Message{Account: b.Account, ID: m.ID, Event: config.EventMsgDelete, Text: config.EventMsgDelete}
 | 
			
		||||
	rmsg.Channel = b.getChannelName(m.ChannelID)
 | 
			
		||||
 | 
			
		||||
@@ -17,6 +21,10 @@ func (b *Bdiscord) messageDelete(s *discordgo.Session, m *discordgo.MessageDelet
 | 
			
		||||
 | 
			
		||||
// TODO(qaisjp): if other bridges support bulk deletions, it could be fanned out centrally
 | 
			
		||||
func (b *Bdiscord) messageDeleteBulk(s *discordgo.Session, m *discordgo.MessageDeleteBulk) { //nolint:unparam
 | 
			
		||||
	if m.GuildID != b.guildID {
 | 
			
		||||
		b.Log.Debugf("Ignoring messageDeleteBulk because it originates from a different guild")
 | 
			
		||||
		return
 | 
			
		||||
	}
 | 
			
		||||
	for _, msgID := range m.Messages {
 | 
			
		||||
		rmsg := config.Message{
 | 
			
		||||
			Account: b.Account,
 | 
			
		||||
@@ -37,6 +45,10 @@ func (b *Bdiscord) messageEvent(s *discordgo.Session, m *discordgo.Event) {
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (b *Bdiscord) messageTyping(s *discordgo.Session, m *discordgo.TypingStart) {
 | 
			
		||||
	if m.GuildID != b.guildID {
 | 
			
		||||
		b.Log.Debugf("Ignoring messageTyping because it originates from a different guild")
 | 
			
		||||
		return
 | 
			
		||||
	}
 | 
			
		||||
	if !b.GetBool("ShowUserTyping") {
 | 
			
		||||
		return
 | 
			
		||||
	}
 | 
			
		||||
@@ -52,6 +64,10 @@ func (b *Bdiscord) messageTyping(s *discordgo.Session, m *discordgo.TypingStart)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (b *Bdiscord) messageUpdate(s *discordgo.Session, m *discordgo.MessageUpdate) { //nolint:unparam
 | 
			
		||||
	if m.GuildID != b.guildID {
 | 
			
		||||
		b.Log.Debugf("Ignoring messageUpdate because it originates from a different guild")
 | 
			
		||||
		return
 | 
			
		||||
	}
 | 
			
		||||
	if b.GetBool("EditDisable") {
 | 
			
		||||
		return
 | 
			
		||||
	}
 | 
			
		||||
@@ -67,6 +83,10 @@ func (b *Bdiscord) messageUpdate(s *discordgo.Session, m *discordgo.MessageUpdat
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (b *Bdiscord) messageCreate(s *discordgo.Session, m *discordgo.MessageCreate) { //nolint:unparam
 | 
			
		||||
	if m.GuildID != b.guildID {
 | 
			
		||||
		b.Log.Debugf("Ignoring messageCreate because it originates from a different guild")
 | 
			
		||||
		return
 | 
			
		||||
	}
 | 
			
		||||
	var err error
 | 
			
		||||
 | 
			
		||||
	// not relay our own messages
 | 
			
		||||
@@ -144,6 +164,10 @@ func (b *Bdiscord) messageCreate(s *discordgo.Session, m *discordgo.MessageCreat
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (b *Bdiscord) memberUpdate(s *discordgo.Session, m *discordgo.GuildMemberUpdate) {
 | 
			
		||||
	if m.GuildID != b.guildID {
 | 
			
		||||
		b.Log.Debugf("Ignoring memberUpdate because it originates from a different guild")
 | 
			
		||||
		return
 | 
			
		||||
	}
 | 
			
		||||
	if m.Member == nil {
 | 
			
		||||
		b.Log.Warnf("Received member update with no member information: %#v", m)
 | 
			
		||||
	}
 | 
			
		||||
@@ -171,6 +195,13 @@ func (b *Bdiscord) memberUpdate(s *discordgo.Session, m *discordgo.GuildMemberUp
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (b *Bdiscord) memberAdd(s *discordgo.Session, m *discordgo.GuildMemberAdd) {
 | 
			
		||||
	if m.GuildID != b.guildID {
 | 
			
		||||
		b.Log.Debugf("Ignoring memberAdd because it originates from a different guild")
 | 
			
		||||
		return
 | 
			
		||||
	}
 | 
			
		||||
	if b.GetBool("nosendjoinpart") {
 | 
			
		||||
		return
 | 
			
		||||
	}
 | 
			
		||||
	if m.Member == nil {
 | 
			
		||||
		b.Log.Warnf("Received member update with no member information: %#v", m)
 | 
			
		||||
		return
 | 
			
		||||
@@ -192,6 +223,13 @@ func (b *Bdiscord) memberAdd(s *discordgo.Session, m *discordgo.GuildMemberAdd)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (b *Bdiscord) memberRemove(s *discordgo.Session, m *discordgo.GuildMemberRemove) {
 | 
			
		||||
	if m.GuildID != b.guildID {
 | 
			
		||||
		b.Log.Debugf("Ignoring memberRemove because it originates from a different guild")
 | 
			
		||||
		return
 | 
			
		||||
	}
 | 
			
		||||
	if b.GetBool("nosendjoinpart") {
 | 
			
		||||
		return
 | 
			
		||||
	}
 | 
			
		||||
	if m.Member == nil {
 | 
			
		||||
		b.Log.Warnf("Received member update with no member information: %#v", m)
 | 
			
		||||
		return
 | 
			
		||||
 
 | 
			
		||||
@@ -52,7 +52,7 @@ func (b *Bmattermost) Connect() error {
 | 
			
		||||
		return nil
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	if strings.HasPrefix(b.getVersion(), "6.") {
 | 
			
		||||
	if strings.HasPrefix(b.getVersion(), "6.") || strings.HasPrefix(b.getVersion(), "7.") {
 | 
			
		||||
		if !b.v6 {
 | 
			
		||||
			b.v6 = true
 | 
			
		||||
		}
 | 
			
		||||
 
 | 
			
		||||
							
								
								
									
										15
									
								
								changelog.md
									
									
									
									
									
								
							
							
						
						
									
										15
									
								
								changelog.md
									
									
									
									
									
								
							@@ -1,3 +1,18 @@
 | 
			
		||||
# v1.25.2
 | 
			
		||||
 | 
			
		||||
## Enhancements
 | 
			
		||||
 | 
			
		||||
- general: Update dependencies (#1851,#1841)
 | 
			
		||||
- mattermost: Support mattermost v7.x (#1852)
 | 
			
		||||
 | 
			
		||||
## Bugfix
 | 
			
		||||
 | 
			
		||||
- discord: Fix Unwanted join notifications from one Discord server to another (#1612)
 | 
			
		||||
- discord: Ignore events from other guilds, add nosendjoinpart support (#1846)
 | 
			
		||||
 | 
			
		||||
This release couldn't exist without the following contributors:
 | 
			
		||||
@wlcx
 | 
			
		||||
 | 
			
		||||
# v1.25.1
 | 
			
		||||
 | 
			
		||||
## Enhancements
 | 
			
		||||
 
 | 
			
		||||
							
								
								
									
										50
									
								
								go.mod
									
									
									
									
									
								
							
							
						
						
									
										50
									
								
								go.mod
									
									
									
									
									
								
							@@ -6,13 +6,13 @@ require (
 | 
			
		||||
	github.com/Benau/tgsconverter v0.0.0-20210809170556-99f4a4f6337f
 | 
			
		||||
	github.com/Philipp15b/go-steam v1.0.1-0.20200727090957-6ae9b3c0a560
 | 
			
		||||
	github.com/Rhymen/go-whatsapp v0.1.2-0.20211102134409-31a2e740845c
 | 
			
		||||
	github.com/SevereCloud/vksdk/v2 v2.14.0
 | 
			
		||||
	github.com/SevereCloud/vksdk/v2 v2.14.1
 | 
			
		||||
	github.com/bwmarrin/discordgo v0.25.0
 | 
			
		||||
	github.com/d5/tengo/v2 v2.10.1
 | 
			
		||||
	github.com/d5/tengo/v2 v2.12.0
 | 
			
		||||
	github.com/davecgh/go-spew v1.1.1
 | 
			
		||||
	github.com/fsnotify/fsnotify v1.5.4
 | 
			
		||||
	github.com/go-telegram-bot-api/telegram-bot-api/v5 v5.5.1
 | 
			
		||||
	github.com/gomarkdown/markdown v0.0.0-20220509074759-a57bf950ab8c
 | 
			
		||||
	github.com/gomarkdown/markdown v0.0.0-20220607163217-45f7c050e2d1
 | 
			
		||||
	github.com/google/gops v0.3.23
 | 
			
		||||
	github.com/gorilla/schema v1.2.0
 | 
			
		||||
	github.com/gorilla/websocket v1.5.0
 | 
			
		||||
@@ -30,7 +30,7 @@ require (
 | 
			
		||||
	github.com/matterbridge/logrus-prefixed-formatter v0.5.3-0.20200523233437-d971309a77ba
 | 
			
		||||
	github.com/matterbridge/matterclient v0.0.0-20220430213656-07aca2731bc9
 | 
			
		||||
	github.com/mattermost/mattermost-server/v5 v5.39.3
 | 
			
		||||
	github.com/mattermost/mattermost-server/v6 v6.6.1
 | 
			
		||||
	github.com/mattermost/mattermost-server/v6 v6.7.0
 | 
			
		||||
	github.com/mattn/godown v0.0.1
 | 
			
		||||
	github.com/mdp/qrterminal v1.0.1
 | 
			
		||||
	github.com/nelsonken/gomf v0.0.0-20190423072027-c65cc0469e94
 | 
			
		||||
@@ -40,22 +40,22 @@ require (
 | 
			
		||||
	github.com/saintfish/chardet v0.0.0-20120816061221-3af4cd4741ca
 | 
			
		||||
	github.com/shazow/ssh-chat v1.10.1
 | 
			
		||||
	github.com/sirupsen/logrus v1.8.1
 | 
			
		||||
	github.com/slack-go/slack v0.10.3
 | 
			
		||||
	github.com/spf13/viper v1.11.0
 | 
			
		||||
	github.com/stretchr/testify v1.7.1
 | 
			
		||||
	github.com/slack-go/slack v0.11.0
 | 
			
		||||
	github.com/spf13/viper v1.12.0
 | 
			
		||||
	github.com/stretchr/testify v1.7.2
 | 
			
		||||
	github.com/vincent-petithory/dataurl v1.0.0
 | 
			
		||||
	github.com/writeas/go-strip-markdown v2.0.1+incompatible
 | 
			
		||||
	github.com/yaegashi/msgraph.go v0.1.4
 | 
			
		||||
	github.com/zfjagann/golang-ring v0.0.0-20220330170733-19bcea1b6289
 | 
			
		||||
	go.mau.fi/whatsmeow v0.0.0-20220504135614-f1f2a9d231fb
 | 
			
		||||
	golang.org/x/image v0.0.0-20220413100746-70e8d0d3baa9
 | 
			
		||||
	golang.org/x/oauth2 v0.0.0-20220411215720-9780585627b5
 | 
			
		||||
	go.mau.fi/whatsmeow v0.0.0-20220624184947-57a69a641154
 | 
			
		||||
	golang.org/x/image v0.0.0-20220617043117-41969df76e82
 | 
			
		||||
	golang.org/x/oauth2 v0.0.0-20220622183110-fd043fe589d2
 | 
			
		||||
	golang.org/x/text v0.3.7
 | 
			
		||||
	gomod.garykim.dev/nc-talk v0.3.0
 | 
			
		||||
	google.golang.org/protobuf v1.28.0
 | 
			
		||||
	gopkg.in/olahol/melody.v1 v1.0.0-20170518105555-d52139073376
 | 
			
		||||
	layeh.com/gumble v0.0.0-20200818122324-146f9205029b
 | 
			
		||||
	modernc.org/sqlite v1.17.2
 | 
			
		||||
	modernc.org/sqlite v1.17.3
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
require (
 | 
			
		||||
@@ -80,8 +80,8 @@ require (
 | 
			
		||||
	github.com/json-iterator/go v1.1.12 // indirect
 | 
			
		||||
	github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51 // indirect
 | 
			
		||||
	github.com/kettek/apng v0.0.0-20191108220231-414630eed80f // indirect
 | 
			
		||||
	github.com/klauspost/compress v1.15.1 // indirect
 | 
			
		||||
	github.com/klauspost/cpuid/v2 v2.0.11 // indirect
 | 
			
		||||
	github.com/klauspost/compress v1.15.6 // indirect
 | 
			
		||||
	github.com/klauspost/cpuid/v2 v2.0.12 // indirect
 | 
			
		||||
	github.com/labstack/gommon v0.3.1 // indirect
 | 
			
		||||
	github.com/magiconair/properties v1.8.6 // indirect
 | 
			
		||||
	github.com/mattermost/go-i18n v1.11.1-0.20211013152124-5c415071e404 // indirect
 | 
			
		||||
@@ -93,10 +93,10 @@ require (
 | 
			
		||||
	github.com/mattn/go-runewidth v0.0.13 // indirect
 | 
			
		||||
	github.com/mgutz/ansi v0.0.0-20200706080929-d51e80ef957d // indirect
 | 
			
		||||
	github.com/minio/md5-simd v1.1.2 // indirect
 | 
			
		||||
	github.com/minio/minio-go/v7 v7.0.23 // indirect
 | 
			
		||||
	github.com/minio/minio-go/v7 v7.0.24 // indirect
 | 
			
		||||
	github.com/minio/sha256-simd v1.0.0 // indirect
 | 
			
		||||
	github.com/mitchellh/go-homedir v1.1.0 // indirect
 | 
			
		||||
	github.com/mitchellh/mapstructure v1.4.3 // indirect
 | 
			
		||||
	github.com/mitchellh/mapstructure v1.5.0 // indirect
 | 
			
		||||
	github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
 | 
			
		||||
	github.com/modern-go/reflect2 v1.0.2 // indirect
 | 
			
		||||
	github.com/monaco-io/request v1.0.5 // indirect
 | 
			
		||||
@@ -104,8 +104,8 @@ require (
 | 
			
		||||
	github.com/mrexodia/wray v0.0.0-20160318003008-78a2c1f284ff // indirect
 | 
			
		||||
	github.com/opentracing/opentracing-go v1.2.0 // indirect
 | 
			
		||||
	github.com/pborman/uuid v1.2.1 // indirect
 | 
			
		||||
	github.com/pelletier/go-toml v1.9.4 // indirect
 | 
			
		||||
	github.com/pelletier/go-toml/v2 v2.0.0-beta.8 // indirect
 | 
			
		||||
	github.com/pelletier/go-toml v1.9.5 // indirect
 | 
			
		||||
	github.com/pelletier/go-toml/v2 v2.0.1 // indirect
 | 
			
		||||
	github.com/philhofer/fwd v1.1.1 // indirect
 | 
			
		||||
	github.com/pkg/errors v0.9.1 // indirect
 | 
			
		||||
	github.com/pmezard/go-difflib v1.0.0 // indirect
 | 
			
		||||
@@ -117,10 +117,10 @@ require (
 | 
			
		||||
	github.com/sizeofint/webpanimation v0.0.0-20210809145948-1d2b32119882 // indirect
 | 
			
		||||
	github.com/skip2/go-qrcode v0.0.0-20190110000554-dc11ecdae0a9 // indirect
 | 
			
		||||
	github.com/spf13/afero v1.8.2 // indirect
 | 
			
		||||
	github.com/spf13/cast v1.4.1 // indirect
 | 
			
		||||
	github.com/spf13/cast v1.5.0 // indirect
 | 
			
		||||
	github.com/spf13/jwalterweatherman v1.1.0 // indirect
 | 
			
		||||
	github.com/spf13/pflag v1.0.5 // indirect
 | 
			
		||||
	github.com/subosito/gotenv v1.2.0 // indirect
 | 
			
		||||
	github.com/subosito/gotenv v1.3.0 // indirect
 | 
			
		||||
	github.com/tinylib/msgp v1.1.6 // indirect
 | 
			
		||||
	github.com/valyala/bytebufferpool v1.0.0 // indirect
 | 
			
		||||
	github.com/valyala/fasttemplate v1.2.1 // indirect
 | 
			
		||||
@@ -134,18 +134,18 @@ require (
 | 
			
		||||
	go.uber.org/multierr v1.7.0 // indirect
 | 
			
		||||
	go.uber.org/zap v1.17.0 // indirect
 | 
			
		||||
	golang.org/x/crypto v0.0.0-20220411220226-7b82a4e95df4 // indirect
 | 
			
		||||
	golang.org/x/mod v0.5.1 // indirect
 | 
			
		||||
	golang.org/x/net v0.0.0-20220412020605-290c469a71a5 // indirect
 | 
			
		||||
	golang.org/x/sys v0.0.0-20220412211240-33da011f77ad // indirect
 | 
			
		||||
	golang.org/x/mod v0.6.0-dev.0.20220106191415-9b9b3d81d5e3 // indirect
 | 
			
		||||
	golang.org/x/net v0.0.0-20220520000938-2e3eb7b945c2 // indirect
 | 
			
		||||
	golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a // indirect
 | 
			
		||||
	golang.org/x/term v0.0.0-20210927222741-03fcf44c2211 // indirect
 | 
			
		||||
	golang.org/x/time v0.0.0-20201208040808-7e3f01d25324 // indirect
 | 
			
		||||
	golang.org/x/tools v0.1.9 // indirect
 | 
			
		||||
	golang.org/x/xerrors v0.0.0-20220411194840-2f41105eb62f // indirect
 | 
			
		||||
	golang.org/x/tools v0.1.10 // indirect
 | 
			
		||||
	golang.org/x/xerrors v0.0.0-20220517211312-f3a8303e98df // indirect
 | 
			
		||||
	google.golang.org/appengine v1.6.7 // indirect
 | 
			
		||||
	gopkg.in/ini.v1 v1.66.4 // indirect
 | 
			
		||||
	gopkg.in/natefinch/lumberjack.v2 v2.0.0 // indirect
 | 
			
		||||
	gopkg.in/yaml.v2 v2.4.0 // indirect
 | 
			
		||||
	gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b // indirect
 | 
			
		||||
	gopkg.in/yaml.v3 v3.0.1 // indirect
 | 
			
		||||
	lukechampine.com/uint128 v1.1.1 // indirect
 | 
			
		||||
	modernc.org/cc/v3 v3.36.0 // indirect
 | 
			
		||||
	modernc.org/ccgo/v3 v3.16.6 // indirect
 | 
			
		||||
 
 | 
			
		||||
							
								
								
									
										208
									
								
								go.sum
									
									
									
									
									
								
							
							
						
						
									
										208
									
								
								go.sum
									
									
									
									
									
								
							@@ -34,7 +34,6 @@ cloud.google.com/go v0.90.0/go.mod h1:kRX0mNRHe0e2rC6oNakvwQqzyDmg57xJ+SZU1eT2aD
 | 
			
		||||
cloud.google.com/go v0.93.3/go.mod h1:8utlLll2EF5XMAV15woO4lSbWQlk8rer9aLOfLh7+YI=
 | 
			
		||||
cloud.google.com/go v0.94.1/go.mod h1:qAlAugsXlC+JWO+Bke5vCtc9ONxjQT3drlTTnAplMW4=
 | 
			
		||||
cloud.google.com/go v0.97.0/go.mod h1:GF7l59pYBVlXQIBLx3a761cZ41F9bBH3JUlihCt2Udc=
 | 
			
		||||
cloud.google.com/go v0.98.0/go.mod h1:ua6Ush4NALrHk5QXDWnjvZHN93OuF0HfuEPq9I1X0cM=
 | 
			
		||||
cloud.google.com/go v0.99.0/go.mod h1:w0Xx2nLzqWJPuozYQX+hFfCSI8WioryfRDzkoI/Y2ZA=
 | 
			
		||||
cloud.google.com/go v0.100.2/go.mod h1:4Xra9TjzAeYHrl5+oeLlzbM2k3mjVhZh4UqTZ//w99A=
 | 
			
		||||
cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o=
 | 
			
		||||
@@ -46,6 +45,8 @@ cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM7
 | 
			
		||||
cloud.google.com/go/compute v0.1.0/go.mod h1:GAesmwr110a34z04OlxYkATPBEfVhkymfTBXtfbBFow=
 | 
			
		||||
cloud.google.com/go/compute v1.3.0/go.mod h1:cCZiE1NHEtai4wiufUhW8I8S1JKkAnhnQJWM7YD99wM=
 | 
			
		||||
cloud.google.com/go/compute v1.5.0/go.mod h1:9SMHyhJlzhlkJqrPAc839t2BZFTSk6Jdj6mkzQJeu0M=
 | 
			
		||||
cloud.google.com/go/compute v1.6.0/go.mod h1:T29tfhtVbq1wvAPo0E3+7vhgmkOYeXjhFvz/FMzPu0s=
 | 
			
		||||
cloud.google.com/go/compute v1.6.1/go.mod h1:g85FgpzFvNULZ+S8AYq87axRKuf2Kh7deLqV/jJ3thU=
 | 
			
		||||
cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE=
 | 
			
		||||
cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk=
 | 
			
		||||
cloud.google.com/go/firestore v1.1.0/go.mod h1:ulACoGHTpvq5r8rxGJ4ddJZBZqakUQqClKRT5SZwBmk=
 | 
			
		||||
@@ -159,8 +160,8 @@ github.com/Rhymen/go-whatsapp v0.1.2-0.20211102134409-31a2e740845c/go.mod h1:DNS
 | 
			
		||||
github.com/RoaringBitmap/roaring v0.4.23/go.mod h1:D0gp8kJQgE1A4LQ5wFLggQEyvDi06Mq5mKs52e1TwOo=
 | 
			
		||||
github.com/RoaringBitmap/roaring v0.8.0/go.mod h1:jdT9ykXwHFNdJbEtxePexlFYH9LXucApeS0/+/g+p1I=
 | 
			
		||||
github.com/RoaringBitmap/roaring v0.9.4/go.mod h1:icnadbWcNyfEHlYdr+tDlOTih1Bf/h+rzPpv4sbomAA=
 | 
			
		||||
github.com/SevereCloud/vksdk/v2 v2.14.0 h1:1lciJC4FWhSQIjjFb3NGyJI7x9sPKk/P6aAvR0ibh1o=
 | 
			
		||||
github.com/SevereCloud/vksdk/v2 v2.14.0/go.mod h1:J/iPooVfldjVADo47G5aNxkvlRWAsZnMHpri8sZmck4=
 | 
			
		||||
github.com/SevereCloud/vksdk/v2 v2.14.1 h1:pToB5uvNn6CUpPAs4jINlv5Z9qArTs+muATDOWNFJo8=
 | 
			
		||||
github.com/SevereCloud/vksdk/v2 v2.14.1/go.mod h1:OW11r2PqGTGc/oxuodjgeqr2uxutasJGTmhjLMHMImg=
 | 
			
		||||
github.com/Shopify/goreferrer v0.0.0-20181106222321-ec9c9a553398/go.mod h1:a1uqRtAwp2Xwc6WNPJEufxJ7fx3npB4UV/JOLmbu5I0=
 | 
			
		||||
github.com/Shopify/logrus-bugsnag v0.0.0-20171204204709-577dee27f20d/go.mod h1:HI8ITrYtUY+O+ZhtlqUnD8+KwNPOyugEhfP9fdUIaEQ=
 | 
			
		||||
github.com/Shopify/sarama v1.19.0/go.mod h1:FVkBWblsNy7DGZRfXLU0O9RCGt5g3g3yEuWXgklEdEo=
 | 
			
		||||
@@ -216,7 +217,7 @@ github.com/aws/aws-sdk-go v1.17.7/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN
 | 
			
		||||
github.com/aws/aws-sdk-go v1.19.0/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo=
 | 
			
		||||
github.com/aws/aws-sdk-go v1.20.6/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo=
 | 
			
		||||
github.com/aws/aws-sdk-go v1.38.67/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro=
 | 
			
		||||
github.com/aws/aws-sdk-go v1.43.6/go.mod h1:y4AeaBuwd2Lk+GepC1E9v0qOiTws0MIWAX4oIKwKHZo=
 | 
			
		||||
github.com/aws/aws-sdk-go v1.43.31/go.mod h1:y4AeaBuwd2Lk+GepC1E9v0qOiTws0MIWAX4oIKwKHZo=
 | 
			
		||||
github.com/aws/aws-sdk-go-v2 v1.8.0/go.mod h1:xEFuWz+3TYdlPRuo+CqATbeDWIWyaT5uAPwPaWtgse0=
 | 
			
		||||
github.com/aws/aws-sdk-go-v2 v1.9.2/go.mod h1:cK/D0BBs0b/oWPIcX/Z/obahJK1TT7IPVjy53i/mX/4=
 | 
			
		||||
github.com/aws/aws-sdk-go-v2/config v1.6.0/go.mod h1:TNtBVmka80lRPk5+S9ZqVfFszOQAGJJ9KbT3EM3CHNU=
 | 
			
		||||
@@ -255,7 +256,7 @@ github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kB
 | 
			
		||||
github.com/bitly/go-hostpool v0.0.0-20171023180738-a3a6125de932/go.mod h1:NOuUCSz6Q9T7+igc/hlvDOUdtWKryOrtFyIVABv/p7k=
 | 
			
		||||
github.com/bitly/go-simplejson v0.5.0/go.mod h1:cXHtHw4XUPsvGaxgjIAn8PhEWG9NfngEKAMDJEczWVA=
 | 
			
		||||
github.com/bits-and-blooms/bitset v1.2.0/go.mod h1:gIdJ4wp64HaoK2YrL1Q5/N7Y16edYb8uY+O0FJTyyDA=
 | 
			
		||||
github.com/bits-and-blooms/bitset v1.2.1/go.mod h1:gIdJ4wp64HaoK2YrL1Q5/N7Y16edYb8uY+O0FJTyyDA=
 | 
			
		||||
github.com/bits-and-blooms/bitset v1.2.2/go.mod h1:gIdJ4wp64HaoK2YrL1Q5/N7Y16edYb8uY+O0FJTyyDA=
 | 
			
		||||
github.com/bkaradzic/go-lz4 v1.0.0/go.mod h1:0YdlkowM3VswSROI7qDxhRvJ3sLhlFrRRwjwegp5jy4=
 | 
			
		||||
github.com/bketelsen/crypt v0.0.3-0.20200106085610-5cbc8cc4026c/go.mod h1:MKsuJmJgSg28kpZDP6UIiPt0e0Oz0kqKNGyRaWEPv84=
 | 
			
		||||
github.com/bketelsen/crypt v0.0.4/go.mod h1:aI6NrJ0pMGgvZKL1iVgXLnfIFJtfV+bKCoqOes/6LfM=
 | 
			
		||||
@@ -263,15 +264,19 @@ github.com/blang/semver v3.1.0+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnweb
 | 
			
		||||
github.com/blang/semver v3.5.1+incompatible h1:cQNTCjp13qL8KC3Nbxr/y2Bqb63oX6wdnnjpJbkM4JQ=
 | 
			
		||||
github.com/blang/semver v3.5.1+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk=
 | 
			
		||||
github.com/blevesearch/bleve v1.0.14/go.mod h1:e/LJTr+E7EaoVdkQZTfoz7dt4KoDNvDbLb8MSKuNTLQ=
 | 
			
		||||
github.com/blevesearch/bleve/v2 v2.3.1/go.mod h1:kAJuWn2L1TNSUyxtPJD4AGma2/PgMSm7GBlx61F9OBs=
 | 
			
		||||
github.com/blevesearch/bleve/v2 v2.3.2/go.mod h1:96+xE5pZUOsr3Y4vHzV1cBC837xZCpwLlX0hrrxnvIg=
 | 
			
		||||
github.com/blevesearch/bleve_index_api v1.0.1/go.mod h1:fiwKS0xLEm+gBRgv5mumf0dhgFr2mDgZah1pqv1c1M4=
 | 
			
		||||
github.com/blevesearch/blevex v1.0.0/go.mod h1:2rNVqoG2BZI8t1/P1awgTKnGlx5MP9ZbtEciQaNhswc=
 | 
			
		||||
github.com/blevesearch/cld2 v0.0.0-20200327141045-8b5f551d37f5/go.mod h1:PN0QNTLs9+j1bKy3d/GB/59wsNBFC4sWLWG3k69lWbc=
 | 
			
		||||
github.com/blevesearch/go-metrics v0.0.0-20190826022208-cac0b30c2563/go.mod h1:9eJDeqxJ3E7WnLebQUlPD7ZjSce7AnDb9vjGmMCbD0A=
 | 
			
		||||
github.com/blevesearch/go-porterstemmer v1.0.3/go.mod h1:angGc5Ht+k2xhJdZi511LtmxuEf0OVpvUUNrwmM1P7M=
 | 
			
		||||
github.com/blevesearch/goleveldb v1.0.1/go.mod h1:WrU8ltZbIp0wAoig/MHbrPCXSOLpe79nz5lv5nqfYrQ=
 | 
			
		||||
github.com/blevesearch/gtreap v0.1.1/go.mod h1:QaQyDRAT51sotthUWAH4Sj08awFSSWzgYICSZ3w0tYk=
 | 
			
		||||
github.com/blevesearch/mmap-go v1.0.2/go.mod h1:ol2qBqYaOUsGdm7aRMRrYGgPvnwLe6Y+7LMvAB5IbSA=
 | 
			
		||||
github.com/blevesearch/mmap-go v1.0.3/go.mod h1:pYvKl/grLQrBxuaRYgoTssa4rVujYYeenDp++2E+yvs=
 | 
			
		||||
github.com/blevesearch/scorch_segment_api/v2 v2.1.0/go.mod h1:uch7xyyO/Alxkuxa+CGs79vw0QY8BENSBjg6Mw5L5DE=
 | 
			
		||||
github.com/blevesearch/segment v0.9.0/go.mod h1:9PfHYUdQCgHktBgvtUOF4x+pc4/l8rdH0u5spnW85UQ=
 | 
			
		||||
github.com/blevesearch/snowball v0.6.1/go.mod h1:ZF0IBg5vgpeoUhnMza2v0A/z8m1cWPlwhke08LpNusg=
 | 
			
		||||
github.com/blevesearch/snowballstem v0.9.0/go.mod h1:PivSj3JMc8WuaFkTSRDW2SlrulNWPl4ABg1tC/hlgLs=
 | 
			
		||||
github.com/blevesearch/upsidedown_store_api v1.0.1/go.mod h1:MQDVGpHZrpe3Uy26zJBf/a8h0FZY6xJbthIMm8myH2Q=
 | 
			
		||||
github.com/blevesearch/vellum v1.0.7/go.mod h1:doBZpmRhwTsASB4QdUZANlJvqVAUdUyX0ZK7QJCTeBE=
 | 
			
		||||
@@ -299,7 +304,6 @@ github.com/bwmarrin/discordgo v0.25.0/go.mod h1:NJZpH+1AfhIcyQsPeuBKsUtYrRnjkyu0
 | 
			
		||||
github.com/cenkalti/backoff/v4 v4.0.2/go.mod h1:eEew/i+1Q6OrCDZh3WiXYv3+nJwBASZ8Bog/87DQnVg=
 | 
			
		||||
github.com/census-instrumentation/opencensus-proto v0.2.0/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
 | 
			
		||||
github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
 | 
			
		||||
github.com/census-instrumentation/opencensus-proto v0.3.0/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
 | 
			
		||||
github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc=
 | 
			
		||||
github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
 | 
			
		||||
github.com/cespare/xxhash/v2 v2.1.2/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
 | 
			
		||||
@@ -326,7 +330,6 @@ github.com/cncf/xds/go v0.0.0-20210805033703-aa0b78936158/go.mod h1:eXthEFrGJvWH
 | 
			
		||||
github.com/cncf/xds/go v0.0.0-20210922020428-25de7278fc84/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
 | 
			
		||||
github.com/cncf/xds/go v0.0.0-20211001041855-01bcc9b48dfe/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
 | 
			
		||||
github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
 | 
			
		||||
github.com/cncf/xds/go v0.0.0-20211130200136-a8f946100490/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
 | 
			
		||||
github.com/cockroachdb/apd v1.1.0/go.mod h1:8Sl8LxpKi29FqWXR16WEFZRNSz3SoPzUzeMeY4+DwBQ=
 | 
			
		||||
github.com/cockroachdb/cockroach-go v0.0.0-20190925194419-606b3d062051/go.mod h1:XGLbWH/ujMcbPbhZq52Nv6UrCghb1yGn//133kEsvDk=
 | 
			
		||||
github.com/cockroachdb/cockroach-go/v2 v2.1.1/go.mod h1:7NtUnP6eK+l6k483WSYNrq3Kb23bWV10IRV1TyeSpwM=
 | 
			
		||||
@@ -457,8 +460,8 @@ github.com/d2g/dhcp4 v0.0.0-20170904100407-a1d1b6c41b1c/go.mod h1:Ct2BUK8SB0YC1S
 | 
			
		||||
github.com/d2g/dhcp4client v1.0.0/go.mod h1:j0hNfjhrt2SxUOw55nL0ATM/z4Yt3t2Kd1mW34z5W5s=
 | 
			
		||||
github.com/d2g/dhcp4server v0.0.0-20181031114812-7d4a0a7f59a5/go.mod h1:Eo87+Kg/IX2hfWJfwxMzLyuSZyxSoAug2nGa1G2QAi8=
 | 
			
		||||
github.com/d2g/hardwareaddr v0.0.0-20190221164911-e7d9fbe030e4/go.mod h1:bMl4RjIciD2oAxI7DmWRx6gbeqrkoLqv3MV0vzNad+I=
 | 
			
		||||
github.com/d5/tengo/v2 v2.10.1 h1:Z7vmTAQfdoExNEB9kxgqxvoBBW9bf+8uYMiDyriX5HM=
 | 
			
		||||
github.com/d5/tengo/v2 v2.10.1/go.mod h1:XRGjEs5I9jYIKTxly6HCF8oiiilk5E/RYXOZ5b0DZC8=
 | 
			
		||||
github.com/d5/tengo/v2 v2.12.0 h1:EJLSMheqt1Kv/WjV5D0BvqJ/Qq/J6H3ZBpSZgw6Hn7Y=
 | 
			
		||||
github.com/d5/tengo/v2 v2.12.0/go.mod h1:XRGjEs5I9jYIKTxly6HCF8oiiilk5E/RYXOZ5b0DZC8=
 | 
			
		||||
github.com/dave/jennifer v1.4.1/go.mod h1:7jEdnm+qBcxl8PC0zyp7vxcpSRnzXSt9r39tpTVGlwA=
 | 
			
		||||
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
 | 
			
		||||
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
 | 
			
		||||
@@ -520,9 +523,8 @@ github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.m
 | 
			
		||||
github.com/envoyproxy/go-control-plane v0.9.9-0.20210217033140-668b12f5399d/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk=
 | 
			
		||||
github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0/go.mod h1:hliV/p42l8fGbc6Y9bQ70uLwIvmJyVE5k4iMKlh8wCQ=
 | 
			
		||||
github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021/go.mod h1:AFq3mo9L8Lqqiid3OhADV3RfLJnjiw63cSpi+fDTRC0=
 | 
			
		||||
github.com/envoyproxy/go-control-plane v0.10.1/go.mod h1:AY7fTTXNdv/aJ2O5jwpxAPOWUZ7hQAEvzN5Pf27BkQQ=
 | 
			
		||||
github.com/envoyproxy/go-control-plane v0.10.2-0.20220325020618-49ff273808a1/go.mod h1:KJwIaB5Mv44NWtYuAOFCVOjcI94vtpEz2JU/D2v6IjE=
 | 
			
		||||
github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
 | 
			
		||||
github.com/envoyproxy/protoc-gen-validate v0.6.2/go.mod h1:2t7qjJNvHPx8IjnBOzl9E9/baC+qXE/TeeyBRzgJDws=
 | 
			
		||||
github.com/etcd-io/bbolt v1.3.3/go.mod h1:ZF2nL25h33cCyBtcyWeZ2/I3HQOfTP+0PIEvHjkjCrw=
 | 
			
		||||
github.com/evanphx/json-patch v4.9.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk=
 | 
			
		||||
github.com/facebookgo/ensure v0.0.0-20200202191622-63f1cf65ac4c/go.mod h1:Yg+htXGokKKdzcwhuNDwVvN+uBxDGXJ7G/VN1d8fa64=
 | 
			
		||||
@@ -547,6 +549,8 @@ github.com/fortytw2/leaktest v1.3.0/go.mod h1:jDsjWgpAGjm2CA7WthBh/CdZYEPF31XHqu
 | 
			
		||||
github.com/francoispqt/gojay v1.2.13 h1:d2m3sFjloqoIUQU3TsHBgj6qg/BVGlTBeHDUmyJnXKk=
 | 
			
		||||
github.com/francoispqt/gojay v1.2.13/go.mod h1:ehT5mTG4ua4581f1++1WLG0vPdaA9HaiDsoyrBGkyDY=
 | 
			
		||||
github.com/frankban/quicktest v1.11.3/go.mod h1:wRf/ReqHper53s+kmmSZizM8NamnL3IM0I9ntUbOk+k=
 | 
			
		||||
github.com/frankban/quicktest v1.14.3 h1:FJKSZTDHjyhriyC81FLQ0LY93eSai0ZyR/ZIkd3ZUKE=
 | 
			
		||||
github.com/frankban/quicktest v1.14.3/go.mod h1:mgiwOwqx65TmIk1wJ6Q7wvnVMocbUorkibMOrVTHZps=
 | 
			
		||||
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
 | 
			
		||||
github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ=
 | 
			
		||||
github.com/fsnotify/fsnotify v1.5.1/go.mod h1:T3375wBYaZdLLcVNkcVbzGHY7f1l/uK5T5Ai1i3InKU=
 | 
			
		||||
@@ -560,12 +564,14 @@ github.com/garyburd/redigo v0.0.0-20150301180006-535138d7bcd7/go.mod h1:NR3MbYis
 | 
			
		||||
github.com/garyburd/redigo v1.6.0/go.mod h1:NR3MbYisc3/PwhQ00EMzDiPmrwpPxAn5GI05/YaO1SY=
 | 
			
		||||
github.com/gavv/httpexpect v2.0.0+incompatible/go.mod h1:x+9tiU1YnrOvnB725RkpoLv1M62hOWzwo5OXotisrKc=
 | 
			
		||||
github.com/getsentry/sentry-go v0.11.0/go.mod h1:KBQIxiZAetw62Cj8Ri964vAEWVdgfaUCn30Q3bCvANo=
 | 
			
		||||
github.com/getsentry/sentry-go v0.12.0/go.mod h1:NSap0JBYWzHND8oMbyi0+XZhUalc1TBdRL1M71JZW2c=
 | 
			
		||||
github.com/getsentry/sentry-go v0.13.0/go.mod h1:EOsfu5ZdvKPfeHYV6pTVQnsjfp30+XA7//UooKNumH0=
 | 
			
		||||
github.com/ghodss/yaml v0.0.0-20150909031657-73d445a93680/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
 | 
			
		||||
github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
 | 
			
		||||
github.com/gigawattio/window v0.0.0-20180317192513-0f5467e35573/go.mod h1:eBvb3i++NHDH4Ugo9qCvMw8t0mTSctaEa5blJbWcNxs=
 | 
			
		||||
github.com/gin-contrib/sse v0.0.0-20190301062529-5545eab6dad3/go.mod h1:VJ0WA2NBN22VlZ2dKZQPAPnyWw5XTlK1KymzLKsr59s=
 | 
			
		||||
github.com/gin-contrib/sse v0.1.0/go.mod h1:RHrZQHXnP2xjPF+u1gW/2HnVO7nvIa9PG3Gm+fLHvGI=
 | 
			
		||||
github.com/gin-gonic/gin v1.4.0/go.mod h1:OW2EZn3DO8Ln9oIKOvM++LBO+5UPHJJDH72/q/3rZdM=
 | 
			
		||||
github.com/gin-gonic/gin v1.7.7/go.mod h1:axIBovoeJpVj8S3BwE0uPMTeReE4+AfFtqpqaZ1qq1U=
 | 
			
		||||
github.com/gliderlabs/ssh v0.1.1/go.mod h1:U7qILu1NlMHj9FlMhZLlkCdDnU1DBEAqr0aevW3Awn0=
 | 
			
		||||
github.com/glycerine/go-unsnap-stream v0.0.0-20181221182339-f9677308dec2/go.mod h1:/20jfyN9Y5QPEAprSgKAUr+glWDY39ZiUEAYOEv5dsE=
 | 
			
		||||
github.com/glycerine/goconvey v0.0.0-20190410193231-58a59202ab31/go.mod h1:Ogl1Tioa0aV7gstGFO7KhffUsb9M4ydbEbbxpcEDc24=
 | 
			
		||||
@@ -585,10 +591,12 @@ github.com/go-ini/ini v1.25.4/go.mod h1:ByCAeIL28uOIIG0E3PJtZPDL8WnHpFKFOtgjp+3I
 | 
			
		||||
github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
 | 
			
		||||
github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
 | 
			
		||||
github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY=
 | 
			
		||||
github.com/go-kit/log v0.2.0/go.mod h1:NwTd00d/i8cPZ3xOwwiv2PO5MOcx78fFErGNcVmBjv0=
 | 
			
		||||
github.com/go-latex/latex v0.0.0-20210118124228-b3d85cf34e07/go.mod h1:CO1AlKB2CSIqUrmQPqA0gdRIlnLEY0gK5JGjh37zN5U=
 | 
			
		||||
github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE=
 | 
			
		||||
github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk=
 | 
			
		||||
github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A=
 | 
			
		||||
github.com/go-logfmt/logfmt v0.5.1/go.mod h1:WYhtIu8zTZfxdn5+rREduYbwxfcBr/Vr6KEVveWlfTs=
 | 
			
		||||
github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7sIas=
 | 
			
		||||
github.com/go-logr/logr v0.2.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU=
 | 
			
		||||
github.com/go-martini/martini v0.0.0-20170121215854-22fa46961aab/go.mod h1:/P9AEU963A2AYjv4d1V5eVL1CQbEJq6aCNHDDjibzu8=
 | 
			
		||||
@@ -601,9 +609,13 @@ github.com/go-openapi/jsonreference v0.19.3/go.mod h1:rjx6GuL8TTa9VaixXglHmQmIL9
 | 
			
		||||
github.com/go-openapi/spec v0.19.3/go.mod h1:FpwSN1ksY1eteniUU7X0N/BgJ7a4WvBFVA8Lj9mJglo=
 | 
			
		||||
github.com/go-openapi/swag v0.19.2/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk=
 | 
			
		||||
github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk=
 | 
			
		||||
github.com/go-playground/assert/v2 v2.0.1/go.mod h1:VDjEfimB/XKnb+ZQfWdccd7VUvScMdVu0Titje2rxJ4=
 | 
			
		||||
github.com/go-playground/locales v0.13.0/go.mod h1:taPMhCMXrRLJO55olJkUXHZBHCxTMfnGwq/HNwmWNS8=
 | 
			
		||||
github.com/go-playground/universal-translator v0.17.0/go.mod h1:UkSxE5sNxxRwHyU+Scu5vgOQjsIJAF8j9muTVoKLVtA=
 | 
			
		||||
github.com/go-playground/validator/v10 v10.4.1/go.mod h1:nlOn6nFhuKACm19sB/8EGNn9GlaMV7XkbRSipzJ0Ii4=
 | 
			
		||||
github.com/go-redis/redis/v8 v8.0.0/go.mod h1:isLoQT/NFSP7V67lyvM9GmdvLdyZ7pEhsXvvyQtnQTo=
 | 
			
		||||
github.com/go-redis/redis/v8 v8.10.0/go.mod h1:vXLTvigok0VtUX0znvbcEW1SOt4OA9CU1ZfnOtKOaiM=
 | 
			
		||||
github.com/go-redis/redis/v8 v8.11.4/go.mod h1:2Z2wHZXdQpCDXEGzqMockDpNyYvi2l4Pxt6RJr792+w=
 | 
			
		||||
github.com/go-redis/redis/v8 v8.11.5/go.mod h1:gREzHqY1hg6oD9ngVRbLStwAWKhA0FEgq8Jd4h5lpwo=
 | 
			
		||||
github.com/go-resty/resty/v2 v2.0.0/go.mod h1:dZGr0i9PLlaaTD4H/hoZIDjQ+r6xq8mgbRzHZf7f2J8=
 | 
			
		||||
github.com/go-resty/resty/v2 v2.3.0/go.mod h1:UpN9CgLZNsv4e9XG50UU8xdI0F43UQ4HmxLBDwaroHU=
 | 
			
		||||
github.com/go-resty/resty/v2 v2.6.0/go.mod h1:PwvJS6hvaPkjtjNg9ph+VrSD92bi5Zq73w/BIH7cC3Q=
 | 
			
		||||
@@ -711,8 +723,8 @@ github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEW
 | 
			
		||||
github.com/golang/snappy v0.0.2/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
 | 
			
		||||
github.com/golang/snappy v0.0.3/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
 | 
			
		||||
github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
 | 
			
		||||
github.com/gomarkdown/markdown v0.0.0-20220509074759-a57bf950ab8c h1:yGxnjZegu9T/94575b5UGf2uDDYN3elzreWYpkhw2f4=
 | 
			
		||||
github.com/gomarkdown/markdown v0.0.0-20220509074759-a57bf950ab8c/go.mod h1:JDGcbDT52eL4fju3sZ4TeHGsQwhG9nbDV21aMyhwPoA=
 | 
			
		||||
github.com/gomarkdown/markdown v0.0.0-20220607163217-45f7c050e2d1 h1:wAupuFkZ/yq219/mSbqDtMfUZQY0gTYEtoz3/LKtppU=
 | 
			
		||||
github.com/gomarkdown/markdown v0.0.0-20220607163217-45f7c050e2d1/go.mod h1:JDGcbDT52eL4fju3sZ4TeHGsQwhG9nbDV21aMyhwPoA=
 | 
			
		||||
github.com/gomodule/redigo v1.7.1-0.20190724094224-574c33c3df38/go.mod h1:B4C85qUVwatsJoIUNIfCRsp7qO0iAmpGFZ4EELWSbC4=
 | 
			
		||||
github.com/gomodule/redigo v2.0.0+incompatible/go.mod h1:B4C85qUVwatsJoIUNIfCRsp7qO0iAmpGFZ4EELWSbC4=
 | 
			
		||||
github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
 | 
			
		||||
@@ -732,8 +744,9 @@ github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/
 | 
			
		||||
github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
 | 
			
		||||
github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
 | 
			
		||||
github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
 | 
			
		||||
github.com/google/go-cmp v0.5.7 h1:81/ik6ipDQS2aGcBfIN5dHDB36BwrStyeAQquSYCV4o=
 | 
			
		||||
github.com/google/go-cmp v0.5.7/go.mod h1:n+brtR0CgQNWTVd5ZUFpTBC8YFBDLK/h/bpaJ8/DtOE=
 | 
			
		||||
github.com/google/go-cmp v0.5.8 h1:e6P7q2lk1O+qJJb4BtCQXlK8vWEO8V1ZeuEdJNOqZyg=
 | 
			
		||||
github.com/google/go-cmp v0.5.8/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
 | 
			
		||||
github.com/google/go-github v17.0.0+incompatible/go.mod h1:zLgOLi98H3fifZn+44m+umXrS52loVEgC2AApnigrVQ=
 | 
			
		||||
github.com/google/go-github/v35 v35.2.0/go.mod h1:s0515YVTI+IMrDoy9Y4pHt9ShGpzHvHO8rZ7L7acgvs=
 | 
			
		||||
github.com/google/go-querystring v1.0.0/go.mod h1:odCYkC5MyYFN7vkCjXpyrEuKhc/BUO6wN/zVPAxq5ck=
 | 
			
		||||
@@ -757,6 +770,7 @@ github.com/google/pprof v0.0.0-20201203190320-1bf35d6f28c2/go.mod h1:kpwsk12EmLe
 | 
			
		||||
github.com/google/pprof v0.0.0-20201218002935-b9804c9f04c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
 | 
			
		||||
github.com/google/pprof v0.0.0-20210122040257-d980be63207e/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
 | 
			
		||||
github.com/google/pprof v0.0.0-20210226084205-cbba55b83ad5/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
 | 
			
		||||
github.com/google/pprof v0.0.0-20210407192527-94a9f03dee38/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
 | 
			
		||||
github.com/google/pprof v0.0.0-20210601050228-01bbb1931b22/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
 | 
			
		||||
github.com/google/pprof v0.0.0-20210609004039-a478d1d731e9/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
 | 
			
		||||
github.com/google/pprof v0.0.0-20210715191844-86eeefc3e471/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
 | 
			
		||||
@@ -776,6 +790,7 @@ github.com/googleapis/gax-go/v2 v2.1.0/go.mod h1:Q3nei7sK6ybPYH7twZdmQpAd1MKb7pf
 | 
			
		||||
github.com/googleapis/gax-go/v2 v2.1.1/go.mod h1:hddJymUZASv3XPyGkUpKj8pPO47Rmb0eJc8R6ouapiM=
 | 
			
		||||
github.com/googleapis/gax-go/v2 v2.2.0/go.mod h1:as02EH8zWkzwUoLbBaFeQ+arQaj/OthfcblKl4IGNaM=
 | 
			
		||||
github.com/googleapis/gax-go/v2 v2.3.0/go.mod h1:b8LNqSzNabLiUpXKkY7HAR5jr6bIT99EXz9pXxye9YM=
 | 
			
		||||
github.com/googleapis/gax-go/v2 v2.4.0/go.mod h1:XOTVJ59hdnfJLIP/dh8n5CGryZR2LxK9wbMD5+iXC6c=
 | 
			
		||||
github.com/googleapis/gnostic v0.4.1/go.mod h1:LRhVm6pbyptWbWbuZ38d1eyptfvIytN3ir6b65WBswg=
 | 
			
		||||
github.com/googleapis/google-cloud-go-testing v0.0.0-20200911160855-bcd43fbb19e8/go.mod h1:dvDLG8qkwmyD9a/MJJN3XJcT3xFxOKAvTZGvuZmac9g=
 | 
			
		||||
github.com/gopackage/ddp v0.0.3 h1:fd0DxScoiS+ogq22ktey6DjDSDybtJPAn69geMpUtFc=
 | 
			
		||||
@@ -803,6 +818,7 @@ github.com/gorilla/websocket v1.4.1/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/ad
 | 
			
		||||
github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
 | 
			
		||||
github.com/gorilla/websocket v1.5.0 h1:PPwGk2jz7EePpoHN/+ClbZu8SPxiqlu12wZP/3sWmnc=
 | 
			
		||||
github.com/gorilla/websocket v1.5.0/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
 | 
			
		||||
github.com/graph-gophers/dataloader/v6 v6.0.0/go.mod h1:J15OZSnOoZgMkijpbZcwCmglIDYqlUiTEE1xLPbyqZM=
 | 
			
		||||
github.com/graph-gophers/graphql-go v1.3.0 h1:Eb9x/q6MFpCLz7jBCiP/WTxjSDrYLR1QY41SORZyNJ0=
 | 
			
		||||
github.com/graph-gophers/graphql-go v1.3.0/go.mod h1:9CQHMSxwO4MprSdzoIEobiHpoLtHm77vfxsvsIN5Vuc=
 | 
			
		||||
github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA=
 | 
			
		||||
@@ -823,7 +839,6 @@ github.com/hako/durafmt v0.0.0-20210608085754-5c1018a4e16b/go.mod h1:VzxiSdG6j1p
 | 
			
		||||
github.com/harmony-development/shibshib v0.0.0-20220101224523-c98059d09cfa h1:0EefSRfsNrdEwmoGVz4+cMG8++5M2XhvJ1tTRmmrJu8=
 | 
			
		||||
github.com/harmony-development/shibshib v0.0.0-20220101224523-c98059d09cfa/go.mod h1:+KEOMb29OC2kRa5BajwNM2NEjHTbQA/Z3gKYARLHREI=
 | 
			
		||||
github.com/hashicorp/consul/api v1.1.0/go.mod h1:VmuI/Lkw1nC05EYQWNKwWGbkg+FbDBtguAZLlVdkD9Q=
 | 
			
		||||
github.com/hashicorp/consul/api v1.11.0/go.mod h1:XjsvQN+RJGWI2TWy1/kqaE16HrR2J/FWgkYjdZQsX9M=
 | 
			
		||||
github.com/hashicorp/consul/api v1.12.0/go.mod h1:6pVBMo0ebnYdt2S3H87XhekM/HHrUoTD2XXb/VrZVy0=
 | 
			
		||||
github.com/hashicorp/consul/sdk v0.1.1/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8=
 | 
			
		||||
github.com/hashicorp/consul/sdk v0.8.0/go.mod h1:GBvyrGALthsZObzUGsfgHZQDXjg4lOjagTIwIR1vPms=
 | 
			
		||||
@@ -837,8 +852,6 @@ github.com/hashicorp/go-cleanhttp v0.5.2/go.mod h1:kO/YDlP8L1346E6Sodw+PrpBSV4/S
 | 
			
		||||
github.com/hashicorp/go-hclog v0.12.0/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ=
 | 
			
		||||
github.com/hashicorp/go-hclog v0.14.1/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ=
 | 
			
		||||
github.com/hashicorp/go-hclog v0.16.1/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ=
 | 
			
		||||
github.com/hashicorp/go-hclog v1.0.0/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ=
 | 
			
		||||
github.com/hashicorp/go-hclog v1.1.0/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ=
 | 
			
		||||
github.com/hashicorp/go-hclog v1.2.0/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ=
 | 
			
		||||
github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60=
 | 
			
		||||
github.com/hashicorp/go-immutable-radix v1.3.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60=
 | 
			
		||||
@@ -870,15 +883,12 @@ github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4=
 | 
			
		||||
github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ=
 | 
			
		||||
github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO+LraFDTW64=
 | 
			
		||||
github.com/hashicorp/mdns v1.0.0/go.mod h1:tL+uN++7HEJ6SQLQ2/p+z2pH24WQKWjBPkE0mNTz8vQ=
 | 
			
		||||
github.com/hashicorp/mdns v1.0.1/go.mod h1:4gW7WsVCke5TE7EPeYliwHlRUyBtfCwuFwuMg2DmyNY=
 | 
			
		||||
github.com/hashicorp/mdns v1.0.4/go.mod h1:mtBihi+LeNXGtG8L9dX59gAEa12BDtBQSp4v/YAJqrc=
 | 
			
		||||
github.com/hashicorp/memberlist v0.1.3/go.mod h1:ajVTdAv/9Im8oMAAj5G31PhhMCZJV2pPBoIllUwCN7I=
 | 
			
		||||
github.com/hashicorp/memberlist v0.2.2/go.mod h1:MS2lj3INKhZjWNqd3N0m3J+Jxf3DAOnAH9VT3Sh9MUE=
 | 
			
		||||
github.com/hashicorp/memberlist v0.2.4/go.mod h1:MS2lj3INKhZjWNqd3N0m3J+Jxf3DAOnAH9VT3Sh9MUE=
 | 
			
		||||
github.com/hashicorp/memberlist v0.3.0/go.mod h1:MS2lj3INKhZjWNqd3N0m3J+Jxf3DAOnAH9VT3Sh9MUE=
 | 
			
		||||
github.com/hashicorp/memberlist v0.3.1/go.mod h1:MS2lj3INKhZjWNqd3N0m3J+Jxf3DAOnAH9VT3Sh9MUE=
 | 
			
		||||
github.com/hashicorp/serf v0.8.2/go.mod h1:6hOLApaqBFA1NXqRQAsxw9QxuDEvNxSQRwA/JwenrHc=
 | 
			
		||||
github.com/hashicorp/serf v0.9.5/go.mod h1:UWDWwZeL5cuWDJdl0C6wrvrUwEqtQ4ZKBKKENpqIUyk=
 | 
			
		||||
github.com/hashicorp/serf v0.9.6/go.mod h1:TXZNMjZQijwlDvp+r0b63xZ45H7JmCmgg4gpTwn9UV4=
 | 
			
		||||
github.com/hashicorp/serf v0.9.7/go.mod h1:TXZNMjZQijwlDvp+r0b63xZ45H7JmCmgg4gpTwn9UV4=
 | 
			
		||||
github.com/hashicorp/yamux v0.0.0-20180604194846-3520598351bb/go.mod h1:+NfK9FKeTrX5uv1uIXGdwYDTeHna2qgaIlx54MXqjAM=
 | 
			
		||||
@@ -886,7 +896,6 @@ github.com/hashicorp/yamux v0.0.0-20210316155119-a95892c5f864/go.mod h1:CtWFDAQg
 | 
			
		||||
github.com/hashicorp/yamux v0.0.0-20211028200310-0bc27b27de87/go.mod h1:CtWFDAQgb7dxtzFs4tWbplKIe2jSi3+5vKbgIO0SLnQ=
 | 
			
		||||
github.com/howeyc/gopass v0.0.0-20190910152052-7cb4b85ec19c/go.mod h1:lADxMC39cJJqL93Duh1xhAs4I2Zs8mKS89XWXFGp9cs=
 | 
			
		||||
github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
 | 
			
		||||
github.com/iancoleman/strcase v0.2.0/go.mod h1:iwCmte+B7n89clKwxIoIXy/HfoL7AsD47ZCWhYzw7ho=
 | 
			
		||||
github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc=
 | 
			
		||||
github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc=
 | 
			
		||||
github.com/icrowley/fake v0.0.0-20180203215853-4178557ae428/go.mod h1:uhpZMVGznybq1itEKXj6RYw9I71qK4kH+OGMjRC4KEo=
 | 
			
		||||
@@ -970,6 +979,7 @@ github.com/joho/godotenv v1.3.0/go.mod h1:7hK45KPybAkOC6peb+G5yklZfMxEjkZhHbwpqx
 | 
			
		||||
github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo=
 | 
			
		||||
github.com/jonboulle/clockwork v0.2.0/go.mod h1:Pkfl5aHPm1nk2H9h0bjmnJD/BcgbGXUBGnn1kMkgxc8=
 | 
			
		||||
github.com/jonboulle/clockwork v0.2.2/go.mod h1:Pkfl5aHPm1nk2H9h0bjmnJD/BcgbGXUBGnn1kMkgxc8=
 | 
			
		||||
github.com/jonboulle/clockwork v0.2.3/go.mod h1:Pkfl5aHPm1nk2H9h0bjmnJD/BcgbGXUBGnn1kMkgxc8=
 | 
			
		||||
github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y=
 | 
			
		||||
github.com/jpillora/backoff v0.0.0-20180909062703-3050d21c67d7/go.mod h1:2iMrUgbbvHEiQClaW2NsSzMyGHqN+rDFqY705q49KG0=
 | 
			
		||||
github.com/jpillora/backoff v1.0.0 h1:uvFg412JmmHBHw7iwprIxkPMI+sGQ4kzOWsMeHnm2EA=
 | 
			
		||||
@@ -1022,9 +1032,9 @@ github.com/klauspost/compress v1.13.1/go.mod h1:8dP1Hq4DHOhN9w426knH3Rhby4rFm6D8
 | 
			
		||||
github.com/klauspost/compress v1.13.4/go.mod h1:8dP1Hq4DHOhN9w426knH3Rhby4rFm6D8eO+e+Dq5Gzg=
 | 
			
		||||
github.com/klauspost/compress v1.13.5/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk=
 | 
			
		||||
github.com/klauspost/compress v1.13.6/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk=
 | 
			
		||||
github.com/klauspost/compress v1.14.4/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk=
 | 
			
		||||
github.com/klauspost/compress v1.15.1 h1:y9FcTHGyrebwfP0ZZqFiaxTaiDnUrGkJkI+f583BL1A=
 | 
			
		||||
github.com/klauspost/compress v1.15.1/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk=
 | 
			
		||||
github.com/klauspost/compress v1.15.6 h1:6D9PcO8QWu0JyaQ2zUMmu16T1T+zjjEpP91guRsvDfY=
 | 
			
		||||
github.com/klauspost/compress v1.15.6/go.mod h1:PhcZ0MbTNciWF3rruxRgKxI5NkcHHrHUDtV4Yw2GlzU=
 | 
			
		||||
github.com/klauspost/cpuid v1.2.0/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek=
 | 
			
		||||
github.com/klauspost/cpuid v1.2.1/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek=
 | 
			
		||||
github.com/klauspost/cpuid v1.2.3/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek=
 | 
			
		||||
@@ -1033,8 +1043,8 @@ github.com/klauspost/cpuid v1.3.1/go.mod h1:bYW4mA6ZgKPob1/Dlai2LviZJO7KGI3uoWLd
 | 
			
		||||
github.com/klauspost/cpuid/v2 v2.0.1/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg=
 | 
			
		||||
github.com/klauspost/cpuid/v2 v2.0.4/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg=
 | 
			
		||||
github.com/klauspost/cpuid/v2 v2.0.6/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg=
 | 
			
		||||
github.com/klauspost/cpuid/v2 v2.0.11 h1:i2lw1Pm7Yi/4O6XCSyJWqEHI2MDw2FzUK6o/D21xn2A=
 | 
			
		||||
github.com/klauspost/cpuid/v2 v2.0.11/go.mod h1:g2LTdtYhdyuGPqyWyv7qRAmj1WBqxuObKfj5c0PQa7c=
 | 
			
		||||
github.com/klauspost/cpuid/v2 v2.0.12 h1:p9dKCg8i4gmOxtv35DvrYoWqYzQrvEVdjQ762Y0OqZE=
 | 
			
		||||
github.com/klauspost/cpuid/v2 v2.0.12/go.mod h1:g2LTdtYhdyuGPqyWyv7qRAmj1WBqxuObKfj5c0PQa7c=
 | 
			
		||||
github.com/klauspost/pgzip v1.2.4/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQKQE9RUs=
 | 
			
		||||
github.com/klauspost/pgzip v1.2.5/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQKQE9RUs=
 | 
			
		||||
github.com/kljensen/snowball v0.6.0/go.mod h1:27N7E8fVU5H68RlUmnWwZCfxgt4POBJfENGMvNRhldw=
 | 
			
		||||
@@ -1068,6 +1078,8 @@ github.com/labstack/gommon v0.3.1/go.mod h1:uW6kP17uPlLJsD3ijUYn3/M5bAxtlZhMI6m3
 | 
			
		||||
github.com/lann/builder v0.0.0-20180802200727-47ae307949d0/go.mod h1:dXGbAdH5GtBTC4WfIxhKZfyBF/HBFgRZSWwZ9g/He9o=
 | 
			
		||||
github.com/lann/ps v0.0.0-20150810152359-62de8c46ede0/go.mod h1:vmVJ0l/dxyfGW6FmdpVm2joNMFikkuWg0EoCKLGUMNw=
 | 
			
		||||
github.com/ledongthuc/pdf v0.0.0-20210621053716-e28cb8259002/go.mod h1:imJHygn/1yfhB7XSJJKlFZKl/J+dCPAknuiaGOshXAs=
 | 
			
		||||
github.com/ledongthuc/pdf v0.0.0-20220302134840-0c2507a12d80/go.mod h1:imJHygn/1yfhB7XSJJKlFZKl/J+dCPAknuiaGOshXAs=
 | 
			
		||||
github.com/leodido/go-urn v1.2.0/go.mod h1:+8+nEpDfqqsY+g338gtMEUOtuK+4dEMhiQEgxpxOKII=
 | 
			
		||||
github.com/levigross/exp-html v0.0.0-20120902181939-8df60c69a8f5/go.mod h1:QMe2wuKJ0o7zIVE8AqiT8rd8epmm6WDIZ2wyuBqYPzM=
 | 
			
		||||
github.com/lib/pq v1.0.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo=
 | 
			
		||||
github.com/lib/pq v1.1.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo=
 | 
			
		||||
@@ -1080,7 +1092,6 @@ github.com/lib/pq v1.10.4/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o=
 | 
			
		||||
github.com/lrstanley/girc v0.0.0-20220507183218-96757fe3d2a2 h1:iqJKGIChW2+aPIpnofEZAKgCNwG2tqytB2a1rJS6B6w=
 | 
			
		||||
github.com/lrstanley/girc v0.0.0-20220507183218-96757fe3d2a2/go.mod h1:lgrnhcF8bg/Bd5HA5DOb4Z+uGqUqGnp4skr+J2GwVgI=
 | 
			
		||||
github.com/lunixbochs/vtclean v1.0.0/go.mod h1:pHhQNgMf3btfWnGBVipUOjRYhoOsdGqdm/+2c2E2WMI=
 | 
			
		||||
github.com/lyft/protoc-gen-star v0.5.3/go.mod h1:V0xaHgaf5oCCqmcxYcWiDfTiKsZsRc87/1qhoTACD8w=
 | 
			
		||||
github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ=
 | 
			
		||||
github.com/magiconair/properties v1.8.1/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ=
 | 
			
		||||
github.com/magiconair/properties v1.8.5/go.mod h1:y3VJvCyxH9uVvJTWEGAELF3aiYNyPKd5NZ3oSwXrF60=
 | 
			
		||||
@@ -1125,8 +1136,8 @@ github.com/mattermost/logr/v2 v2.0.15/go.mod h1:mpPp935r5dIkFDo2y9Q87cQWhFR/4xXp
 | 
			
		||||
github.com/mattermost/mattermost-server/v5 v5.39.3 h1:A5z/NlR4Xcwxx5UnlaNgUGP5hgj4KOV/CwpFg3OtlvQ=
 | 
			
		||||
github.com/mattermost/mattermost-server/v5 v5.39.3/go.mod h1:MDmVSmsSsqwNkuZ7rQ0osuXVCzrR1IUqGR7I0QU91sY=
 | 
			
		||||
github.com/mattermost/mattermost-server/v6 v6.0.0/go.mod h1:+S8CsNEPv1FOl1usaPBQ6Gu9+Sm1Cc9YdU/Qh1YMGVI=
 | 
			
		||||
github.com/mattermost/mattermost-server/v6 v6.6.1 h1:jza7N9OMqFe+z7s9LZeSj1M4E/2DOV/llIUpi9VWg2U=
 | 
			
		||||
github.com/mattermost/mattermost-server/v6 v6.6.1/go.mod h1:oR6UCRo+SEvnfN2FEOdzHs1UljrskyCKU8tWeKlxgMo=
 | 
			
		||||
github.com/mattermost/mattermost-server/v6 v6.7.0 h1:DqNZFuzXU4rtAzmmrpk6wXYI06GzfN+TsGqWf9mwlXc=
 | 
			
		||||
github.com/mattermost/mattermost-server/v6 v6.7.0/go.mod h1:b/iDf7Jn2Pd2jWGzaznoVNT811JZpemdmNGP7M/a7Ao=
 | 
			
		||||
github.com/mattermost/morph v0.0.0-20220401091636-39f834798da8/go.mod h1:jxM3g1bx+k2Thz7jofcHguBS8TZn5Pc+o5MGmORObhw=
 | 
			
		||||
github.com/mattermost/rsc v0.0.0-20160330161541-bbaefb05eaa0/go.mod h1:nV5bfVpT//+B1RPD2JvRnxbkLmJEYXmRaaVl15fsXjs=
 | 
			
		||||
github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU=
 | 
			
		||||
@@ -1188,14 +1199,14 @@ github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3N
 | 
			
		||||
github.com/miekg/dns v1.1.26/go.mod h1:bPDLeHnStXmXAq1m/Ch/hvfNHr14JKNPMBo3VZKjuso=
 | 
			
		||||
github.com/miekg/dns v1.1.41/go.mod h1:p6aan82bvRIyn+zDIv9xYNUpwa73JcSh9BKwknJysuI=
 | 
			
		||||
github.com/miekg/dns v1.1.43/go.mod h1:+evo5L0630/F6ca/Z9+GAqzhjGyn8/c+TBaOyfEl0V4=
 | 
			
		||||
github.com/miekg/dns v1.1.46/go.mod h1:e3IlAVfNqAllflbibAZEWOXOQ+Ynzk/dDozDxY7XnME=
 | 
			
		||||
github.com/miekg/dns v1.1.48/go.mod h1:e3IlAVfNqAllflbibAZEWOXOQ+Ynzk/dDozDxY7XnME=
 | 
			
		||||
github.com/miekg/pkcs11 v1.0.3/go.mod h1:XsNlhZGX73bx86s2hdc/FuaLm2CPZJemRLMA+WTFxgs=
 | 
			
		||||
github.com/minio/md5-simd v1.1.0/go.mod h1:XpBqgZULrMYD3R+M28PcmP0CkI7PEMzB3U77ZrKZ0Gw=
 | 
			
		||||
github.com/minio/md5-simd v1.1.2 h1:Gdi1DZK69+ZVMoNHRXJyNcxrMA4dSxoYHZSQbirFg34=
 | 
			
		||||
github.com/minio/md5-simd v1.1.2/go.mod h1:MzdKDxYpY2BT9XQFocsiZf/NKVtR7nkE4RoEpN+20RM=
 | 
			
		||||
github.com/minio/minio-go/v7 v7.0.11/go.mod h1:WoyW+ySKAKjY98B9+7ZbI8z8S3jaxaisdcvj9TGlazA=
 | 
			
		||||
github.com/minio/minio-go/v7 v7.0.23 h1:NleyGQvAn9VQMU+YHVrgV4CX+EPtxPt/78lHOOTncy4=
 | 
			
		||||
github.com/minio/minio-go/v7 v7.0.23/go.mod h1:ei5JjmxwHaMrgsMrn4U/+Nmg+d8MKS1U2DAn1ou4+Do=
 | 
			
		||||
github.com/minio/minio-go/v7 v7.0.24 h1:HPlHiET6L5gIgrHRaw1xFo1OaN4bEP/082asWh3WJtI=
 | 
			
		||||
github.com/minio/minio-go/v7 v7.0.24/go.mod h1:x81+AX5gHSfCSqw7jxRKHvxUXMlE5uKX0Vb75Xk5yYg=
 | 
			
		||||
github.com/minio/sha256-simd v0.1.1/go.mod h1:B5e1o+1/KgNmWrSQK08Y6Z1Vb5pwIktudl0J58iy0KM=
 | 
			
		||||
github.com/minio/sha256-simd v1.0.0 h1:v1ta+49hkWZyvaKwrQB8elexRqm6Y0aMLjCNsrYxo6g=
 | 
			
		||||
github.com/minio/sha256-simd v1.0.0/go.mod h1:OuYzVNI5vcoYIAmbIvHPl3N3jUzVedXbKy5RFepssQM=
 | 
			
		||||
@@ -1219,8 +1230,9 @@ github.com/mitchellh/mapstructure v0.0.0-20160808181253-ca63d7c062ee/go.mod h1:F
 | 
			
		||||
github.com/mitchellh/mapstructure v0.0.0-20180220230111-00c29f56e238/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
 | 
			
		||||
github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
 | 
			
		||||
github.com/mitchellh/mapstructure v1.4.1/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
 | 
			
		||||
github.com/mitchellh/mapstructure v1.4.3 h1:OVowDSCllw/YjdLkam3/sm7wEtOy59d8ndGgCcyj8cs=
 | 
			
		||||
github.com/mitchellh/mapstructure v1.4.3/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
 | 
			
		||||
github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY=
 | 
			
		||||
github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
 | 
			
		||||
github.com/mitchellh/osext v0.0.0-20151018003038-5e2d6d41470f/go.mod h1:OkQIRizQZAeMln+1tSwduZz7+Af5oFlKirV/MSYes2A=
 | 
			
		||||
github.com/moby/locker v1.0.1/go.mod h1:S7SDdo5zpBK84bzzVlKr2V0hz+7x9hWbYC/kq7oQppc=
 | 
			
		||||
github.com/moby/sys/mountinfo v0.4.0/go.mod h1:rEr8tzG/lsIZHBtN/JjGG+LMYx9eXgW2JI+6q0qou+A=
 | 
			
		||||
@@ -1270,7 +1282,7 @@ github.com/neo4j/neo4j-go-driver v1.8.1-0.20200803113522-b626aa943eba/go.mod h1:
 | 
			
		||||
github.com/nfnt/resize v0.0.0-20180221191011-83c6a9932646/go.mod h1:jpp1/29i3P1S/RLdc7JQKbRpFeM1dOBd8T9ki5s+AY8=
 | 
			
		||||
github.com/ngdinhtoan/glide-cleanup v0.2.0/go.mod h1:UQzsmiDOb8YV3nOsCxK/c9zPpCZVNoHScRE3EO9pVMM=
 | 
			
		||||
github.com/nwaples/rardecode v1.1.0/go.mod h1:5DzqNKiOdpKKBH87u8VlvAnPZMXcGRhxWkRpHbbfGS0=
 | 
			
		||||
github.com/nwaples/rardecode v1.1.2/go.mod h1:5DzqNKiOdpKKBH87u8VlvAnPZMXcGRhxWkRpHbbfGS0=
 | 
			
		||||
github.com/nwaples/rardecode v1.1.3/go.mod h1:5DzqNKiOdpKKBH87u8VlvAnPZMXcGRhxWkRpHbbfGS0=
 | 
			
		||||
github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A=
 | 
			
		||||
github.com/nxadm/tail v1.4.8 h1:nPr65rt6Y5JFSKQO7qToXr7pePgD6Gwiw05lkbyAQTE=
 | 
			
		||||
github.com/nxadm/tail v1.4.8/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+AU=
 | 
			
		||||
@@ -1295,8 +1307,10 @@ github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108
 | 
			
		||||
github.com/onsi/ginkgo v1.13.0/go.mod h1:+REjRxOmWfHCjfv9TTWB1jD1Frx4XydAD3zm1lskyM0=
 | 
			
		||||
github.com/onsi/ginkgo v1.14.1/go.mod h1:iSB4RoI2tjJc9BBv4NKIKWKya62Rps+oPG/Lv9klQyY=
 | 
			
		||||
github.com/onsi/ginkgo v1.15.0/go.mod h1:hF8qUzuuC8DJGygJH3726JnCZX4MYbRB8yFfISqnKUg=
 | 
			
		||||
github.com/onsi/ginkgo v1.16.4 h1:29JGrr5oVBm5ulCWet69zQkzWipVXIol6ygQUe/EzNc=
 | 
			
		||||
github.com/onsi/ginkgo v1.16.4/go.mod h1:dX+/inL/fNMqNlz0e9LfyB9TswhZpCVdJM/Z6Vvnwo0=
 | 
			
		||||
github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE=
 | 
			
		||||
github.com/onsi/ginkgo v1.16.5/go.mod h1:+E8gABHa3K6zRBolWtd+ROzc/U5bkGt0FwiG042wbpU=
 | 
			
		||||
github.com/onsi/ginkgo/v2 v2.0.0/go.mod h1:vw5CSIxN1JObi/U8gcbwft7ZxR2dgaR70JSE3/PpL4c=
 | 
			
		||||
github.com/onsi/gomega v0.0.0-20151007035656-2152b45fa28a/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA=
 | 
			
		||||
github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA=
 | 
			
		||||
github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
 | 
			
		||||
@@ -1308,8 +1322,9 @@ github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1y
 | 
			
		||||
github.com/onsi/gomega v1.10.2/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo=
 | 
			
		||||
github.com/onsi/gomega v1.10.3/go.mod h1:V9xEwhxec5O8UDM77eCW8vLymOMltsqPVYWrpDsH8xc=
 | 
			
		||||
github.com/onsi/gomega v1.10.5/go.mod h1:gza4q3jKQJijlu05nKWRCW/GavJumGt8aNRxWg7mt48=
 | 
			
		||||
github.com/onsi/gomega v1.16.0 h1:6gjqkI8iiRHMvdccRJM8rVKjCWk6ZIm6FTm3ddIe4/c=
 | 
			
		||||
github.com/onsi/gomega v1.16.0/go.mod h1:HnhC7FXeEQY45zxNK3PPoIUhzk/80Xly9PcubAlGdZY=
 | 
			
		||||
github.com/onsi/gomega v1.17.0/go.mod h1:HnhC7FXeEQY45zxNK3PPoIUhzk/80Xly9PcubAlGdZY=
 | 
			
		||||
github.com/onsi/gomega v1.18.1 h1:M1GfJqGRrBrrGGsbxzV5dqM2U2ApXefZCQpkukxYRLE=
 | 
			
		||||
github.com/onsi/gomega v1.18.1/go.mod h1:0q+aL8jAiMXy9hbwj2mr5GziHiwhAIQpFmmtT5hitRs=
 | 
			
		||||
github.com/oov/psd v0.0.0-20210618170533-9fb823ddb631/go.mod h1:GHI1bnmAcbp96z6LNfBJvtrjxhaXGkbsk967utPlvL8=
 | 
			
		||||
github.com/oov/psd v0.0.0-20220121172623-5db5eafcecbb/go.mod h1:GHI1bnmAcbp96z6LNfBJvtrjxhaXGkbsk967utPlvL8=
 | 
			
		||||
github.com/opencontainers/go-digest v0.0.0-20170106003457-a6d0ee40d420/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s=
 | 
			
		||||
@@ -1349,6 +1364,7 @@ github.com/otiai10/mint v1.3.0/go.mod h1:F5AjcsTsWUqX+Na9fpHb52P8pcRX2CI6A3ctIT9
 | 
			
		||||
github.com/otiai10/mint v1.3.2/go.mod h1:/yxELlJQ0ufhjUwhshSj+wFjZ78CnZ48/1wtmBH1OTc=
 | 
			
		||||
github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc=
 | 
			
		||||
github.com/pascaldekloe/goe v0.1.0/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc=
 | 
			
		||||
github.com/patrickmn/go-cache v2.1.0+incompatible/go.mod h1:3Qf8kWWT7OJRJbdiICTKqZju1ZixQ/KpMGzzAfe6+WQ=
 | 
			
		||||
github.com/paulrosania/go-charset v0.0.0-20190326053356-55c9d7a5834c h1:P6XGcuPTigoHf4TSu+3D/7QOQ1MbL6alNwrGhcW7sKw=
 | 
			
		||||
github.com/paulrosania/go-charset v0.0.0-20190326053356-55c9d7a5834c/go.mod h1:YnNlZP7l4MhyGQ4CBRwv6ohZTPrUJJZtEv4ZgADkbs4=
 | 
			
		||||
github.com/pborman/uuid v1.2.1 h1:+ZZIw58t/ozdjRaXh/3awHfmWRbzYxJoAdNJxe/3pvw=
 | 
			
		||||
@@ -1357,10 +1373,11 @@ github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/9
 | 
			
		||||
github.com/pelletier/go-toml v1.7.0/go.mod h1:vwGMzjaWMwyfHwgIBhI2YUM4fB6nL6lVAvS1LBMMhTE=
 | 
			
		||||
github.com/pelletier/go-toml v1.8.1/go.mod h1:T2/BmBdy8dvIRq1a/8aqjN41wvWlN4lrapLU/GW4pbc=
 | 
			
		||||
github.com/pelletier/go-toml v1.9.3/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCkoOuaOx1Y+c=
 | 
			
		||||
github.com/pelletier/go-toml v1.9.4 h1:tjENF6MfZAg8e4ZmZTeWaWiT2vXtsoO6+iuOjFhECwM=
 | 
			
		||||
github.com/pelletier/go-toml v1.9.4/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCkoOuaOx1Y+c=
 | 
			
		||||
github.com/pelletier/go-toml/v2 v2.0.0-beta.8 h1:dy81yyLYJDwMTifq24Oi/IslOslRrDSb3jwDggjz3Z0=
 | 
			
		||||
github.com/pelletier/go-toml/v2 v2.0.0-beta.8/go.mod h1:r9LEWfGN8R5k0VXJ+0BkIe7MYkRdwZOjgMj2KwnJFUo=
 | 
			
		||||
github.com/pelletier/go-toml v1.9.5 h1:4yBQzkHv+7BHq2PQUZF3Mx0IYxG7LsP222s7Agd3ve8=
 | 
			
		||||
github.com/pelletier/go-toml v1.9.5/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCkoOuaOx1Y+c=
 | 
			
		||||
github.com/pelletier/go-toml/v2 v2.0.1 h1:8e3L2cCQzLFi2CR4g7vGFuFxX7Jl1kKX8gW+iV0GUKU=
 | 
			
		||||
github.com/pelletier/go-toml/v2 v2.0.1/go.mod h1:r9LEWfGN8R5k0VXJ+0BkIe7MYkRdwZOjgMj2KwnJFUo=
 | 
			
		||||
github.com/peterbourgon/diskv v0.0.0-20171120014656-2973218375c3/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU=
 | 
			
		||||
github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU=
 | 
			
		||||
github.com/philhofer/fwd v1.0.0/go.mod h1:gk3iGcWd9+svBvR0sR+KPcfE+RNWozjowpeBVG3ZVNU=
 | 
			
		||||
@@ -1401,6 +1418,7 @@ github.com/prometheus/client_golang v1.4.0/go.mod h1:e9GMxYsXl05ICDXkRhurwBS4Q3O
 | 
			
		||||
github.com/prometheus/client_golang v1.4.1/go.mod h1:e9GMxYsXl05ICDXkRhurwBS4Q3OK1iX/F2sw+iXX5zU=
 | 
			
		||||
github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M=
 | 
			
		||||
github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0=
 | 
			
		||||
github.com/prometheus/client_golang v1.11.1/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0=
 | 
			
		||||
github.com/prometheus/client_golang v1.12.1/go.mod h1:3Z9XVyYiZYEO+YQWt3RD2R3jrbd179Rt297l4aS6nDY=
 | 
			
		||||
github.com/prometheus/client_model v0.0.0-20171117100541-99fa1f4be8e5/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
 | 
			
		||||
github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
 | 
			
		||||
@@ -1419,6 +1437,7 @@ github.com/prometheus/common v0.9.1/go.mod h1:yhUN8i9wzaXS3w1O07YhxHEBxD+W35wd8b
 | 
			
		||||
github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo=
 | 
			
		||||
github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc=
 | 
			
		||||
github.com/prometheus/common v0.32.1/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls=
 | 
			
		||||
github.com/prometheus/common v0.33.0/go.mod h1:gB3sOl7P0TvJabZpLY5uQMpUqRCPPCyRLCZYc7JZTNE=
 | 
			
		||||
github.com/prometheus/procfs v0.0.0-20180125133057-cb4147076ac7/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
 | 
			
		||||
github.com/prometheus/procfs v0.0.0-20180725123919-05ee40e3a273/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
 | 
			
		||||
github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
 | 
			
		||||
@@ -1471,7 +1490,7 @@ github.com/rs/zerolog v1.15.0/go.mod h1:xYTKnLHcpfU2225ny5qZjxnj9NvkumZYjJHlAThC
 | 
			
		||||
github.com/rudderlabs/analytics-go v3.3.1+incompatible/go.mod h1:LF8/ty9kUX4PTY3l5c97K3nZZaX5Hwsvt+NBaRL/f30=
 | 
			
		||||
github.com/rudderlabs/analytics-go v3.3.2+incompatible/go.mod h1:LF8/ty9kUX4PTY3l5c97K3nZZaX5Hwsvt+NBaRL/f30=
 | 
			
		||||
github.com/russellhaering/goxmldsig v1.1.0/go.mod h1:QK8GhXPB3+AfuCrfo0oRISa9NfzeCpWmxeGnqEpDF9o=
 | 
			
		||||
github.com/russellhaering/goxmldsig v1.1.1/go.mod h1:gM4MDENBQf7M+V824SGfyIUVFWydB7n0KkEubVJl+Tw=
 | 
			
		||||
github.com/russellhaering/goxmldsig v1.2.0/go.mod h1:gM4MDENBQf7M+V824SGfyIUVFWydB7n0KkEubVJl+Tw=
 | 
			
		||||
github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g=
 | 
			
		||||
github.com/russross/blackfriday v1.6.0 h1:KqfZb0pUVN2lYqZUYRddxF4OR8ZMURnJIG5Y3VRLtww=
 | 
			
		||||
github.com/russross/blackfriday v1.6.0/go.mod h1:ti0ldHuxg49ri4ksnFxlkCfN+hvslNlmVHqNRXXJNAY=
 | 
			
		||||
@@ -1482,8 +1501,7 @@ github.com/rwcarlsen/goexif v0.0.0-20190401172101-9e8deecbddbd/go.mod h1:hPqNNc0
 | 
			
		||||
github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts=
 | 
			
		||||
github.com/ryanuber/columnize v2.1.0+incompatible/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts=
 | 
			
		||||
github.com/safchain/ethtool v0.0.0-20190326074333-42ed695e3de8/go.mod h1:Z0q5wiBQGYcxhMZ6gUqHn6pYNLypFAvaL3UvgZLR0U4=
 | 
			
		||||
github.com/sagikazarmark/crypt v0.3.0/go.mod h1:uD/D+6UF4SrIR1uGEv7bBNkNqLGqUr43MRiaGWX1Nig=
 | 
			
		||||
github.com/sagikazarmark/crypt v0.5.0/go.mod h1:l+nzl7KWh51rpzp2h7t4MZWyiEWdhNpOAnclKvg+mdA=
 | 
			
		||||
github.com/sagikazarmark/crypt v0.6.0/go.mod h1:U8+INwJo3nBv1m6A/8OBXAq7Jnpspk5AxSgDyEQcea8=
 | 
			
		||||
github.com/saintfish/chardet v0.0.0-20120816061221-3af4cd4741ca h1:NugYot0LIVPxTvN8n+Kvkn6TrbMyxQiuvKdEwFdR9vI=
 | 
			
		||||
github.com/saintfish/chardet v0.0.0-20120816061221-3af4cd4741ca/go.mod h1:uugorj2VCxiV1x+LzaIdVa9b4S4qGAcH6cbhh4qVxOU=
 | 
			
		||||
github.com/satori/go.uuid v0.0.0-20180103174451-36e9d2ebbde5/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0=
 | 
			
		||||
@@ -1544,8 +1562,8 @@ github.com/sizeofint/webpanimation v0.0.0-20210809145948-1d2b32119882 h1:A7o8tOE
 | 
			
		||||
github.com/sizeofint/webpanimation v0.0.0-20210809145948-1d2b32119882/go.mod h1:5IwJoz9Pw7JsrCN4/skkxUtSWT7myuUPLhCgv6Q5vvQ=
 | 
			
		||||
github.com/skip2/go-qrcode v0.0.0-20190110000554-dc11ecdae0a9 h1:lpEzuenPuO1XNTeikEmvqYFcU37GVLl8SRNblzyvGBE=
 | 
			
		||||
github.com/skip2/go-qrcode v0.0.0-20190110000554-dc11ecdae0a9/go.mod h1:PLPIyL7ikehBD1OAjmKKiOEhbvWyHGaNDjquXMcYABo=
 | 
			
		||||
github.com/slack-go/slack v0.10.3 h1:kKYwlKY73AfSrtAk9UHWCXXfitudkDztNI9GYBviLxw=
 | 
			
		||||
github.com/slack-go/slack v0.10.3/go.mod h1:hlGi5oXA+Gt+yWTPP0plCdRKmjsDxecdHxYQdlMQKOw=
 | 
			
		||||
github.com/slack-go/slack v0.11.0 h1:sBBjQz8LY++6eeWhGJNZpRm5jvLRNnWBFZ/cAq58a6k=
 | 
			
		||||
github.com/slack-go/slack v0.11.0/go.mod h1:hlGi5oXA+Gt+yWTPP0plCdRKmjsDxecdHxYQdlMQKOw=
 | 
			
		||||
github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc=
 | 
			
		||||
github.com/smartystreets/assertions v1.0.0 h1:UVQPSSmc3qtTi+zPPkCXvZX9VvW/xT/NsRvKfwY81a8=
 | 
			
		||||
github.com/smartystreets/assertions v1.0.0/go.mod h1:kHHU4qYBaI3q23Pp3VPrmWhuIUrLW/7eUrw0BU5VaoM=
 | 
			
		||||
@@ -1563,21 +1581,20 @@ github.com/sourcegraph/syntaxhighlight v0.0.0-20170531221838-bd320f5d308e/go.mod
 | 
			
		||||
github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA=
 | 
			
		||||
github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ=
 | 
			
		||||
github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk=
 | 
			
		||||
github.com/spf13/afero v1.3.3/go.mod h1:5KUK8ByomD5Ti5Artl0RtHeI5pTF7MIDuXL3yY520V4=
 | 
			
		||||
github.com/spf13/afero v1.6.0/go.mod h1:Ai8FlHk4v/PARR026UzYexafAt9roJ7LcLMAmO6Z93I=
 | 
			
		||||
github.com/spf13/afero v1.8.2 h1:xehSyVa0YnHWsJ49JFljMpg1HX19V6NDZ1fkm1Xznbo=
 | 
			
		||||
github.com/spf13/afero v1.8.2/go.mod h1:CtAatgMJh6bJEIs48Ay/FOnkljP3WeGUG0MC1RfAqwo=
 | 
			
		||||
github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE=
 | 
			
		||||
github.com/spf13/cast v1.3.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE=
 | 
			
		||||
github.com/spf13/cast v1.4.1 h1:s0hze+J0196ZfEMTs80N7UlFt0BDuQ7Q+JDnHiMWKdA=
 | 
			
		||||
github.com/spf13/cast v1.4.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE=
 | 
			
		||||
github.com/spf13/cast v1.5.0 h1:rj3WzYc11XZaIZMPKmwP96zkFEnnAmV8s6XbB2aY32w=
 | 
			
		||||
github.com/spf13/cast v1.5.0/go.mod h1:SpXXQ5YoyJw6s3/6cMTQuxvgRl3PCJiyaX9p6b155UU=
 | 
			
		||||
github.com/spf13/cobra v0.0.2-0.20171109065643-2da4a54c5cee/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ=
 | 
			
		||||
github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ=
 | 
			
		||||
github.com/spf13/cobra v0.0.5/go.mod h1:3K3wKZymM7VvHMDS9+Akkh4K60UwM26emMESw8tLCHU=
 | 
			
		||||
github.com/spf13/cobra v1.0.0/go.mod h1:/6GTrnGXV9HjY+aR4k0oJ5tcvakLuG6EuKReYlHNrgE=
 | 
			
		||||
github.com/spf13/cobra v1.1.3/go.mod h1:pGADOWyqRD/YMrPZigI/zbliZ2wVD/23d+is3pSWzOo=
 | 
			
		||||
github.com/spf13/cobra v1.2.1/go.mod h1:ExllRjgxM/piMAM+3tAZvg8fsklGAf3tPfi+i8t68Nk=
 | 
			
		||||
github.com/spf13/cobra v1.3.0/go.mod h1:BrRVncBjOJa/eUcVVm9CE+oC6as8k+VYr4NY7WCi9V4=
 | 
			
		||||
github.com/spf13/cobra v1.4.0/go.mod h1:Wo4iy3BUC+X2Fybo0PDqwJIv3dNRiZLHQymsfxlB84g=
 | 
			
		||||
github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo=
 | 
			
		||||
github.com/spf13/jwalterweatherman v1.1.0 h1:ue6voC5bR5F8YxI5S67j9i582FU4Qvo2bmqnqMYADFk=
 | 
			
		||||
github.com/spf13/jwalterweatherman v1.1.0/go.mod h1:aNWZUN0dPAAO/Ljvb5BEdw96iTZ0EXowPYD95IqWIGo=
 | 
			
		||||
@@ -1591,9 +1608,8 @@ github.com/spf13/viper v1.3.2/go.mod h1:ZiWeW+zYFKm7srdB9IoDzzZXaJaI5eL9QjNiN/DM
 | 
			
		||||
github.com/spf13/viper v1.4.0/go.mod h1:PTJ7Z/lr49W6bUbkmS1V3by4uWynFiR9p7+dSq/yZzE=
 | 
			
		||||
github.com/spf13/viper v1.7.0/go.mod h1:8WkrPz2fc9jxqZNCJI/76HCieCp4Q8HaLFoCha5qpdg=
 | 
			
		||||
github.com/spf13/viper v1.8.1/go.mod h1:o0Pch8wJ9BVSWGQMbra6iw0oQ5oktSIBaujf1rJH9Ns=
 | 
			
		||||
github.com/spf13/viper v1.10.0/go.mod h1:SoyBPwAtKDzypXNDFKN5kzH7ppppbGZtls1UpIy5AsM=
 | 
			
		||||
github.com/spf13/viper v1.11.0 h1:7OX/1FS6n7jHD1zGrZTM7WtY13ZELRyosK4k93oPr44=
 | 
			
		||||
github.com/spf13/viper v1.11.0/go.mod h1:djo0X/bA5+tYVoCn+C7cAYJGcVn/qYLFTG8gdUsX7Zk=
 | 
			
		||||
github.com/spf13/viper v1.12.0 h1:CZ7eSOd3kZoaYDLbXnmzgQI5RlciuXBMA+18HwHRfZQ=
 | 
			
		||||
github.com/spf13/viper v1.12.0/go.mod h1:b6COn30jlNxbm/V2IqWiNWkJ+vZNiMNksliPCiuKtSI=
 | 
			
		||||
github.com/splitio/go-client/v6 v6.1.0/go.mod h1:CEGAEFT99Fwb32ZIRcnZoXTMXddtB6IIpTmt3RP8mnM=
 | 
			
		||||
github.com/splitio/go-split-commons/v3 v3.1.0/go.mod h1:29NCy20oAS4ZMy4qkwTd6277eieVDonx4V/aeDU/wUQ=
 | 
			
		||||
github.com/splitio/go-toolkit/v4 v4.2.0/go.mod h1:EdIHN0yzB1GTXDYQc0KdKvnjkO/jfUM2YqHVYfhD3Wo=
 | 
			
		||||
@@ -1614,10 +1630,12 @@ github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81P
 | 
			
		||||
github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA=
 | 
			
		||||
github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
 | 
			
		||||
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
 | 
			
		||||
github.com/stretchr/testify v1.7.1 h1:5TQK59W5E3v0r2duFAb7P95B6hEeOyEnHRa8MjYSMTY=
 | 
			
		||||
github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
 | 
			
		||||
github.com/subosito/gotenv v1.2.0 h1:Slr1R9HxAlEKefgq5jn9U+DnETlIUa6HfgEzj0g5d7s=
 | 
			
		||||
github.com/stretchr/testify v1.7.2 h1:4jaiDzPyXQvSd7D0EjG45355tLlV3VOECpq10pLC+8s=
 | 
			
		||||
github.com/stretchr/testify v1.7.2/go.mod h1:R6va5+xMeoiuVRoj+gSkQ7d3FALtqAAGI1FQKckRals=
 | 
			
		||||
github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw=
 | 
			
		||||
github.com/subosito/gotenv v1.3.0 h1:mjC+YW8QpAdXibNi+vNWgzmgBH4+5l5dCXv8cNysBLI=
 | 
			
		||||
github.com/subosito/gotenv v1.3.0/go.mod h1:YzJjq/33h7nrwdY+iHMhEOEEbW0ovIz0tB6t6PwAXzs=
 | 
			
		||||
github.com/syndtr/gocapability v0.0.0-20170704070218-db04d3cc01c8/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww=
 | 
			
		||||
github.com/syndtr/gocapability v0.0.0-20180916011248-d98352740cb2/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww=
 | 
			
		||||
github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww=
 | 
			
		||||
@@ -1735,7 +1753,7 @@ github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9dec
 | 
			
		||||
github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k=
 | 
			
		||||
github.com/yuin/goldmark v1.3.8/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k=
 | 
			
		||||
github.com/yuin/goldmark v1.4.1/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k=
 | 
			
		||||
github.com/yuin/goldmark v1.4.7/go.mod h1:rmuwmfZ0+bvzB24eSC//bk1R1Zp3hM0OXYv/G2LIilg=
 | 
			
		||||
github.com/yuin/goldmark v1.4.11/go.mod h1:rmuwmfZ0+bvzB24eSC//bk1R1Zp3hM0OXYv/G2LIilg=
 | 
			
		||||
github.com/yvasiyarov/go-metrics v0.0.0-20140926110328-57bccd1ccd43/go.mod h1:aX5oPXxHm3bOH+xeAttToC8pqch2ScQN/JoXYupl6xs=
 | 
			
		||||
github.com/yvasiyarov/gorelic v0.0.0-20141212073537-a9bba5b9ab50/go.mod h1:NUSPSUX/bi6SeDMUh6brw0nXpxHnc96TguQh0+r/ssA=
 | 
			
		||||
github.com/yvasiyarov/newrelic_platform_go v0.0.0-20140908184405-b21fdbd4370f/go.mod h1:GlGEuHIJweS1mbCqG+7vt2nvWLzLLnRHbXz5JKd/Qbg=
 | 
			
		||||
@@ -1750,18 +1768,16 @@ go.etcd.io/bbolt v1.3.5/go.mod h1:G5EMThwa9y8QZGBClrRx5EY+Yw9kAhnjy3bSjsnlVTQ=
 | 
			
		||||
go.etcd.io/bbolt v1.3.6/go.mod h1:qXsaaIqmgQH0T+OPdb99Bf+PKfBBQVAdyD6TY9G8XM4=
 | 
			
		||||
go.etcd.io/etcd v0.5.0-alpha.5.0.20200910180754-dd1b699fc489/go.mod h1:yVHk9ub3CSBatqGNg7GRmsnfLWtoW60w4eDYfh7vHDg=
 | 
			
		||||
go.etcd.io/etcd/api/v3 v3.5.0/go.mod h1:cbVKeC6lCfl7j/8jBhAK6aIYO9XOjdptoxU/nLQcPvs=
 | 
			
		||||
go.etcd.io/etcd/api/v3 v3.5.1/go.mod h1:cbVKeC6lCfl7j/8jBhAK6aIYO9XOjdptoxU/nLQcPvs=
 | 
			
		||||
go.etcd.io/etcd/api/v3 v3.5.2/go.mod h1:5GB2vv4A4AOn3yk7MftYGHkUfGtDHnEraIjym4dYz5A=
 | 
			
		||||
go.etcd.io/etcd/api/v3 v3.5.4/go.mod h1:5GB2vv4A4AOn3yk7MftYGHkUfGtDHnEraIjym4dYz5A=
 | 
			
		||||
go.etcd.io/etcd/client/pkg/v3 v3.5.0/go.mod h1:IJHfcCEKxYu1Os13ZdwCwIUTUVGYTSAM3YSwc9/Ac1g=
 | 
			
		||||
go.etcd.io/etcd/client/pkg/v3 v3.5.1/go.mod h1:IJHfcCEKxYu1Os13ZdwCwIUTUVGYTSAM3YSwc9/Ac1g=
 | 
			
		||||
go.etcd.io/etcd/client/pkg/v3 v3.5.2/go.mod h1:IJHfcCEKxYu1Os13ZdwCwIUTUVGYTSAM3YSwc9/Ac1g=
 | 
			
		||||
go.etcd.io/etcd/client/pkg/v3 v3.5.4/go.mod h1:IJHfcCEKxYu1Os13ZdwCwIUTUVGYTSAM3YSwc9/Ac1g=
 | 
			
		||||
go.etcd.io/etcd/client/v2 v2.305.0/go.mod h1:h9puh54ZTgAKtEbut2oe9P4L/oqKCVB6xsXlzd7alYQ=
 | 
			
		||||
go.etcd.io/etcd/client/v2 v2.305.1/go.mod h1:pMEacxZW7o8pg4CrFE7pquyCJJzZvkvdD2RibOCCCGs=
 | 
			
		||||
go.etcd.io/etcd/client/v2 v2.305.2/go.mod h1:2D7ZejHVMIfog1221iLSYlQRzrtECw3kz4I4VAQm3qI=
 | 
			
		||||
go.etcd.io/etcd/client/v2 v2.305.4/go.mod h1:Ud+VUwIi9/uQHOMA+4ekToJ12lTxlv0zB/+DHwTGEbU=
 | 
			
		||||
go.etcd.io/etcd/client/v3 v3.5.4/go.mod h1:ZaRkVgBZC+L+dLCjTcF1hRXpgZXQPOvnA/Ak/gq3kiY=
 | 
			
		||||
go.mau.fi/libsignal v0.0.0-20220425070825-c40c839ee6a0 h1:3IQF2bgAyibdo77hTejwuJe4jlypj9QaE4xCQuxrThM=
 | 
			
		||||
go.mau.fi/libsignal v0.0.0-20220425070825-c40c839ee6a0/go.mod h1:kBOXTvYyDG/q1Ihgvd4J6WenGPh7wtEGvPKF6vmf5ak=
 | 
			
		||||
go.mau.fi/whatsmeow v0.0.0-20220504135614-f1f2a9d231fb h1:xI4HiJwBMmztBXFzjKWt7Ea8xmOO7LyYCYV0/ROU7kY=
 | 
			
		||||
go.mau.fi/whatsmeow v0.0.0-20220504135614-f1f2a9d231fb/go.mod h1:iUBgOLNaqShLrR17u0kIiRptIGFH+nbT1tRhaWBEX/c=
 | 
			
		||||
go.mau.fi/whatsmeow v0.0.0-20220624184947-57a69a641154 h1:jUe0Re+w8/YHfxYryxjVkG3PEQDujCzGhbqsk6Qadtg=
 | 
			
		||||
go.mau.fi/whatsmeow v0.0.0-20220624184947-57a69a641154/go.mod h1:iUBgOLNaqShLrR17u0kIiRptIGFH+nbT1tRhaWBEX/c=
 | 
			
		||||
go.mongodb.org/mongo-driver v1.1.0/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qLUO4lqsUM=
 | 
			
		||||
go.mongodb.org/mongo-driver v1.7.0/go.mod h1:Q4oFMbo1+MSNqICAdYMlC/zSTrwCogR4R8NzkI+yfU8=
 | 
			
		||||
go.mozilla.org/pkcs7 v0.0.0-20200128120323-432b2356ecb1/go.mod h1:SNgMg+EgDFwmvSmLRTNKC5fegJjB7v23qTQ0XLGUNHk=
 | 
			
		||||
@@ -1843,7 +1859,7 @@ golang.org/x/crypto v0.0.0-20210711020723-a769d52b0f97/go.mod h1:GvvjBRRGRdwPK5y
 | 
			
		||||
golang.org/x/crypto v0.0.0-20210817164053-32db794688a5/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
 | 
			
		||||
golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
 | 
			
		||||
golang.org/x/crypto v0.0.0-20211108221036-ceb1ce70b4fa/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
 | 
			
		||||
golang.org/x/crypto v0.0.0-20220214200702-86341886e292/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
 | 
			
		||||
golang.org/x/crypto v0.0.0-20220331220935-ae2d96664a29/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
 | 
			
		||||
golang.org/x/crypto v0.0.0-20220411220226-7b82a4e95df4 h1:kUhD7nTDoI3fVd9G4ORWrbV5NY0liEs/Jg2pv5f+bBA=
 | 
			
		||||
golang.org/x/crypto v0.0.0-20220411220226-7b82a4e95df4/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
 | 
			
		||||
golang.org/x/exp v0.0.0-20180321215751-8460e604b9de/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
 | 
			
		||||
@@ -1873,9 +1889,9 @@ golang.org/x/image v0.0.0-20200618115811-c13761719519/go.mod h1:FeLwcggjj3mMvU+o
 | 
			
		||||
golang.org/x/image v0.0.0-20201208152932-35266b937fa6/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0=
 | 
			
		||||
golang.org/x/image v0.0.0-20210216034530-4410531fe030/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0=
 | 
			
		||||
golang.org/x/image v0.0.0-20210622092929-e6eecd499c2c/go.mod h1:023OzeP/+EPmXeapQh35lcL3II3LrY8Ic+EFFKVhULM=
 | 
			
		||||
golang.org/x/image v0.0.0-20211028202545-6944b10bf410/go.mod h1:023OzeP/+EPmXeapQh35lcL3II3LrY8Ic+EFFKVhULM=
 | 
			
		||||
golang.org/x/image v0.0.0-20220413100746-70e8d0d3baa9 h1:LRtI4W37N+KFebI/qV0OFiLUv4GLOWeEW5hn/KEJvxE=
 | 
			
		||||
golang.org/x/image v0.0.0-20220413100746-70e8d0d3baa9/go.mod h1:023OzeP/+EPmXeapQh35lcL3II3LrY8Ic+EFFKVhULM=
 | 
			
		||||
golang.org/x/image v0.0.0-20220321031419-a8550c1d254a/go.mod h1:023OzeP/+EPmXeapQh35lcL3II3LrY8Ic+EFFKVhULM=
 | 
			
		||||
golang.org/x/image v0.0.0-20220617043117-41969df76e82 h1:KpZB5pUSBvrHltNEdK/tw0xlPeD13M6M6aGP32gKqiw=
 | 
			
		||||
golang.org/x/image v0.0.0-20220617043117-41969df76e82/go.mod h1:doUCurBvlfPMKfmIpRIywoHmhN3VyhnoFDbvIEWF4hY=
 | 
			
		||||
golang.org/x/lint v0.0.0-20180702182130-06c8688daad7/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
 | 
			
		||||
golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
 | 
			
		||||
golang.org/x/lint v0.0.0-20181217174547-8f45f776aaf1/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
 | 
			
		||||
@@ -1902,9 +1918,8 @@ golang.org/x/mod v0.3.1-0.20200828183125-ce943fd02449/go.mod h1:s0Qsj1ACt9ePp/hM
 | 
			
		||||
golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
 | 
			
		||||
golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
 | 
			
		||||
golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
 | 
			
		||||
golang.org/x/mod v0.5.0/go.mod h1:5OXOZSfqPIIbmVBIIKWRFfZjPR0E5r58TLhUjH0a2Ro=
 | 
			
		||||
golang.org/x/mod v0.5.1 h1:OJxoQ/rynoF0dcCdI7cLPktw/hR2cueqYfjm43oqK38=
 | 
			
		||||
golang.org/x/mod v0.5.1/go.mod h1:5OXOZSfqPIIbmVBIIKWRFfZjPR0E5r58TLhUjH0a2Ro=
 | 
			
		||||
golang.org/x/mod v0.6.0-dev.0.20220106191415-9b9b3d81d5e3 h1:kQgndtyPBW/JIYERgdxfwMYh3AVStj88WQTlNDi2a+o=
 | 
			
		||||
golang.org/x/mod v0.6.0-dev.0.20220106191415-9b9b3d81d5e3/go.mod h1:3p9vT2HGsQu2K1YbXdKPJLVgG5VJdoTa1poYQBtP1AY=
 | 
			
		||||
golang.org/x/net v0.0.0-20180218175443-cbe0f9307d01/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
 | 
			
		||||
golang.org/x/net v0.0.0-20180530234432-1e491301e022/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
 | 
			
		||||
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
 | 
			
		||||
@@ -1987,8 +2002,11 @@ golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qx
 | 
			
		||||
golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
 | 
			
		||||
golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
 | 
			
		||||
golang.org/x/net v0.0.0-20220325170049-de3da57026de/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
 | 
			
		||||
golang.org/x/net v0.0.0-20220412020605-290c469a71a5 h1:bRb386wvrE+oBNdF1d/Xh9mQrfQ4ecYhW5qJ5GvTGT4=
 | 
			
		||||
golang.org/x/net v0.0.0-20220403103023-749bd193bc2b/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
 | 
			
		||||
golang.org/x/net v0.0.0-20220412020605-290c469a71a5/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
 | 
			
		||||
golang.org/x/net v0.0.0-20220425223048-2871e0cb64e4/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
 | 
			
		||||
golang.org/x/net v0.0.0-20220520000938-2e3eb7b945c2 h1:NWy5+hlRbC7HK+PmcXVUmW1IMyFce7to56IUvhUFm7Y=
 | 
			
		||||
golang.org/x/net v0.0.0-20220520000938-2e3eb7b945c2/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
 | 
			
		||||
golang.org/x/oauth2 v0.0.0-20180227000427-d7d64896b5ff/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
 | 
			
		||||
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
 | 
			
		||||
golang.org/x/oauth2 v0.0.0-20181017192945-9dcd33a902f4/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
 | 
			
		||||
@@ -2014,8 +2032,9 @@ golang.org/x/oauth2 v0.0.0-20211005180243-6b3c2da341f1/go.mod h1:KelEdhl1UZF7XfJ
 | 
			
		||||
golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
 | 
			
		||||
golang.org/x/oauth2 v0.0.0-20220223155221-ee480838109b/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc=
 | 
			
		||||
golang.org/x/oauth2 v0.0.0-20220309155454-6242fa91716a/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc=
 | 
			
		||||
golang.org/x/oauth2 v0.0.0-20220411215720-9780585627b5 h1:OSnWWcOd/CtWQC2cYSBgbTSJv3ciqd8r54ySIW2y3RE=
 | 
			
		||||
golang.org/x/oauth2 v0.0.0-20220411215720-9780585627b5/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc=
 | 
			
		||||
golang.org/x/oauth2 v0.0.0-20220622183110-fd043fe589d2 h1:+jnHzr9VPj32ykQVai5DNahi9+NSp7yYuCsl5eAQtL0=
 | 
			
		||||
golang.org/x/oauth2 v0.0.0-20220622183110-fd043fe589d2/go.mod h1:jaDAt6Dkxork7LmZnYtzbRWj0W47D86a3TGe0YHBvmE=
 | 
			
		||||
golang.org/x/perf v0.0.0-20180704124530-6e6d33e29852/go.mod h1:JLpeXjPJfIyPr5TlbXLkXWLhP8nz10XfvxElABhCtcw=
 | 
			
		||||
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
 | 
			
		||||
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
 | 
			
		||||
@@ -2029,6 +2048,7 @@ golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJ
 | 
			
		||||
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
 | 
			
		||||
golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
 | 
			
		||||
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
 | 
			
		||||
golang.org/x/sync v0.0.0-20220513210516-0976fa681c29/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
 | 
			
		||||
golang.org/x/sys v0.0.0-20180224232135-f6cff0780e54/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
 | 
			
		||||
golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
 | 
			
		||||
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
 | 
			
		||||
@@ -2148,7 +2168,6 @@ golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBc
 | 
			
		||||
golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
 | 
			
		||||
golang.org/x/sys v0.0.0-20210806184541-e5e7981a1069/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
 | 
			
		||||
golang.org/x/sys v0.0.0-20210816074244-15123e1e1f71/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
 | 
			
		||||
golang.org/x/sys v0.0.0-20210816183151-1e6c022a8912/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
 | 
			
		||||
golang.org/x/sys v0.0.0-20210818153620-00dd8d7831e7/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
 | 
			
		||||
golang.org/x/sys v0.0.0-20210823070655-63515b42dcdf/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
 | 
			
		||||
golang.org/x/sys v0.0.0-20210902050250-f475640dd07b/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
 | 
			
		||||
@@ -2159,17 +2178,18 @@ golang.org/x/sys v0.0.0-20211013075003-97ac67df715c/go.mod h1:oPkhp1MJrh7nUepCBc
 | 
			
		||||
golang.org/x/sys v0.0.0-20211019181941-9d821ace8654/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
 | 
			
		||||
golang.org/x/sys v0.0.0-20211103235746-7861aae1554b/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
 | 
			
		||||
golang.org/x/sys v0.0.0-20211124211545-fe61309f8881/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
 | 
			
		||||
golang.org/x/sys v0.0.0-20211205182925-97ca703d548d/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
 | 
			
		||||
golang.org/x/sys v0.0.0-20211210111614-af8b64212486/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
 | 
			
		||||
golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
 | 
			
		||||
golang.org/x/sys v0.0.0-20220114195835-da31bd327af9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
 | 
			
		||||
golang.org/x/sys v0.0.0-20220128215802-99c3d69c2c27/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
 | 
			
		||||
golang.org/x/sys v0.0.0-20220209214540-3681064d5158/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
 | 
			
		||||
golang.org/x/sys v0.0.0-20220224120231-95c6836cb0e7/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
 | 
			
		||||
golang.org/x/sys v0.0.0-20220227234510-4e6760a101f9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
 | 
			
		||||
golang.org/x/sys v0.0.0-20220328115105-d36c6a25d886/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
 | 
			
		||||
golang.org/x/sys v0.0.0-20220412211240-33da011f77ad h1:ntjMns5wyP/fN65tdBD4g8J5w8n015+iIIs9rtjXkY0=
 | 
			
		||||
golang.org/x/sys v0.0.0-20220403205710-6acee93ad0eb/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
 | 
			
		||||
golang.org/x/sys v0.0.0-20220412211240-33da011f77ad/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
 | 
			
		||||
golang.org/x/sys v0.0.0-20220502124256-b6088ccd6cba/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
 | 
			
		||||
golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a h1:dGzPydgVsqGcTRVwiLJ1jVbufYwmzD3LfVPLKsKg+0k=
 | 
			
		||||
golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
 | 
			
		||||
golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw=
 | 
			
		||||
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
 | 
			
		||||
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211 h1:JGgROgKl9N8DuW20oFS5gxc+lE67/N3FcwmBPMe7ArY=
 | 
			
		||||
@@ -2280,16 +2300,17 @@ golang.org/x/tools v0.1.3/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
 | 
			
		||||
golang.org/x/tools v0.1.4/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
 | 
			
		||||
golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
 | 
			
		||||
golang.org/x/tools v0.1.6-0.20210726203631-07bc1bf47fb2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
 | 
			
		||||
golang.org/x/tools v0.1.9 h1:j9KsMiaP1c3B0OTQGth0/k+miLGTgLsAFUCrF2vLcF8=
 | 
			
		||||
golang.org/x/tools v0.1.9/go.mod h1:nABZi5QlRsZVlzPpHl034qft6wpY4eDcsTt5AaioBiU=
 | 
			
		||||
golang.org/x/tools v0.1.10 h1:QjFRCZxdOhBJ/UNgnBZLbNV13DlbnK0quyivTnXJM20=
 | 
			
		||||
golang.org/x/tools v0.1.10/go.mod h1:Uh6Zz+xoGYZom868N8YTex3t7RhtHDBrE8Gzo9bV56E=
 | 
			
		||||
golang.org/x/xerrors v0.0.0-20190410155217-1f06c39b4373/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
 | 
			
		||||
golang.org/x/xerrors v0.0.0-20190513163551-3ee3066db522/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
 | 
			
		||||
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
 | 
			
		||||
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
 | 
			
		||||
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
 | 
			
		||||
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
 | 
			
		||||
golang.org/x/xerrors v0.0.0-20220411194840-2f41105eb62f h1:GGU+dLjvlC3qDwqYgL6UgRmHXhOOgns0bZu2Ty5mm6U=
 | 
			
		||||
golang.org/x/xerrors v0.0.0-20220411194840-2f41105eb62f/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
 | 
			
		||||
golang.org/x/xerrors v0.0.0-20220517211312-f3a8303e98df h1:5Pf6pFKu98ODmgnpvkJ3kFUOQGGLIzLIkbzUHp47618=
 | 
			
		||||
golang.org/x/xerrors v0.0.0-20220517211312-f3a8303e98df/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8=
 | 
			
		||||
gomod.garykim.dev/nc-talk v0.3.0 h1:MZxLc/gX2/+bdOw4xt6pi+qQFUQld1woGfw1hEJ0fbM=
 | 
			
		||||
gomod.garykim.dev/nc-talk v0.3.0/go.mod h1:q/Adot/H7iqi+H4lANopV7/xcMf+sX3AZXUXqiITwok=
 | 
			
		||||
gonum.org/v1/gonum v0.0.0-20180816165407-929014505bf4/go.mod h1:Y+Yx5eoAFn32cQvJDxZx5Dpnq+c3wtXuadVZAcxbbBo=
 | 
			
		||||
@@ -2336,12 +2357,14 @@ google.golang.org/api v0.56.0/go.mod h1:38yMfeP1kfjsl8isn0tliTjIb1rJXcQi4UXlbqiv
 | 
			
		||||
google.golang.org/api v0.57.0/go.mod h1:dVPlbZyBo2/OjBpmvNdpn2GRm6rPy75jyU7bmhdrMgI=
 | 
			
		||||
google.golang.org/api v0.59.0/go.mod h1:sT2boj7M9YJxZzgeZqXogmhfmRWDtPzT31xkieUbuZU=
 | 
			
		||||
google.golang.org/api v0.61.0/go.mod h1:xQRti5UdCmoCEqFxcz93fTl338AVqDgyaDRuOZ3hg9I=
 | 
			
		||||
google.golang.org/api v0.62.0/go.mod h1:dKmwPCydfsad4qCH08MSdgWjfHOyfpd4VtDGgRFdavw=
 | 
			
		||||
google.golang.org/api v0.63.0/go.mod h1:gs4ij2ffTRXwuzzgJl/56BdwJaA194ijkfn++9tDuPo=
 | 
			
		||||
google.golang.org/api v0.67.0/go.mod h1:ShHKP8E60yPsKNw/w8w+VYaj9H6buA5UqDp8dhbQZ6g=
 | 
			
		||||
google.golang.org/api v0.70.0/go.mod h1:Bs4ZM2HGifEvXwd50TtW70ovgJffJYw2oRCOFU/SkfA=
 | 
			
		||||
google.golang.org/api v0.71.0/go.mod h1:4PyU6e6JogV1f9eA4voyrTY2batOLdgZ5qZ5HOCc4j8=
 | 
			
		||||
google.golang.org/api v0.74.0/go.mod h1:ZpfMZOVRMywNyvJFeqL9HRWBgAuRfSjJFpe9QtRRyDs=
 | 
			
		||||
google.golang.org/api v0.75.0/go.mod h1:pU9QmyHLnzlpar1Mjt4IbapUCy8J+6HD6GeELN69ljA=
 | 
			
		||||
google.golang.org/api v0.78.0/go.mod h1:1Sg78yoMLOhlQTeF+ARBoytAcH1NNyyl390YMy6rKmw=
 | 
			
		||||
google.golang.org/api v0.81.0/go.mod h1:FA6Mb/bZxj706H2j+j2d6mHEEaHBmbbWnkfvmorOCko=
 | 
			
		||||
google.golang.org/appengine v1.0.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
 | 
			
		||||
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
 | 
			
		||||
google.golang.org/appengine v1.2.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
 | 
			
		||||
@@ -2434,8 +2457,6 @@ google.golang.org/genproto v0.0.0-20211008145708-270636b82663/go.mod h1:5CzLGKJ6
 | 
			
		||||
google.golang.org/genproto v0.0.0-20211013025323-ce878158c4d4/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
 | 
			
		||||
google.golang.org/genproto v0.0.0-20211028162531-8db9c33dc351/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
 | 
			
		||||
google.golang.org/genproto v0.0.0-20211118181313-81c1377c94b1/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
 | 
			
		||||
google.golang.org/genproto v0.0.0-20211129164237-f09f9a12af12/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
 | 
			
		||||
google.golang.org/genproto v0.0.0-20211203200212-54befc351ae9/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
 | 
			
		||||
google.golang.org/genproto v0.0.0-20211206160659-862468c7d6e0/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
 | 
			
		||||
google.golang.org/genproto v0.0.0-20211208223120-3a66f561d7aa/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
 | 
			
		||||
google.golang.org/genproto v0.0.0-20211221195035-429b39de9b1c/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
 | 
			
		||||
@@ -2446,7 +2467,14 @@ google.golang.org/genproto v0.0.0-20220222213610-43724f9ea8cf/go.mod h1:kGP+zUP2
 | 
			
		||||
google.golang.org/genproto v0.0.0-20220304144024-325a89244dc8/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI=
 | 
			
		||||
google.golang.org/genproto v0.0.0-20220310185008-1973136f34c6/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI=
 | 
			
		||||
google.golang.org/genproto v0.0.0-20220324131243-acbaeb5b85eb/go.mod h1:hAL49I2IFola2sVEjAn7MEwsja0xp51I0tlGAf9hz4E=
 | 
			
		||||
google.golang.org/genproto v0.0.0-20220401170504-314d38edb7de/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo=
 | 
			
		||||
google.golang.org/genproto v0.0.0-20220407144326-9054f6ed7bac/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo=
 | 
			
		||||
google.golang.org/genproto v0.0.0-20220413183235-5e96e2839df9/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo=
 | 
			
		||||
google.golang.org/genproto v0.0.0-20220414192740-2d67ff6cf2b4/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo=
 | 
			
		||||
google.golang.org/genproto v0.0.0-20220421151946-72621c1f0bd3/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo=
 | 
			
		||||
google.golang.org/genproto v0.0.0-20220429170224-98d788798c3e/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo=
 | 
			
		||||
google.golang.org/genproto v0.0.0-20220505152158-f39f71e6c8f3/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4=
 | 
			
		||||
google.golang.org/genproto v0.0.0-20220519153652-3a47de7e79bd/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4=
 | 
			
		||||
google.golang.org/grpc v0.0.0-20160317175043-d3ddb4469d5a/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw=
 | 
			
		||||
google.golang.org/grpc v1.8.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw=
 | 
			
		||||
google.golang.org/grpc v1.14.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw=
 | 
			
		||||
@@ -2484,9 +2512,10 @@ google.golang.org/grpc v1.39.1/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnD
 | 
			
		||||
google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34=
 | 
			
		||||
google.golang.org/grpc v1.40.1/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34=
 | 
			
		||||
google.golang.org/grpc v1.41.0/go.mod h1:U3l9uK9J0sini8mHphKoXyaqDA/8VyGnDee1zzIUK6k=
 | 
			
		||||
google.golang.org/grpc v1.42.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU=
 | 
			
		||||
google.golang.org/grpc v1.44.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU=
 | 
			
		||||
google.golang.org/grpc v1.45.0/go.mod h1:lN7owxKUQEqMfSyQikvvk5tf/6zMPsrK+ONuO11+0rQ=
 | 
			
		||||
google.golang.org/grpc v1.46.0/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk=
 | 
			
		||||
google.golang.org/grpc v1.46.2/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk=
 | 
			
		||||
google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.1.0/go.mod h1:6Kw0yEErY5E/yWrBtf03jp27GLLJujG4z/JK95pnjjw=
 | 
			
		||||
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
 | 
			
		||||
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
 | 
			
		||||
@@ -2524,7 +2553,6 @@ gopkg.in/ini.v1 v1.51.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k=
 | 
			
		||||
gopkg.in/ini.v1 v1.51.1/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k=
 | 
			
		||||
gopkg.in/ini.v1 v1.57.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k=
 | 
			
		||||
gopkg.in/ini.v1 v1.62.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k=
 | 
			
		||||
gopkg.in/ini.v1 v1.66.2/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k=
 | 
			
		||||
gopkg.in/ini.v1 v1.66.4 h1:SsAcf+mM7mRZo2nJNGt8mZCjG8ZRaNGMURJw7BsIST4=
 | 
			
		||||
gopkg.in/ini.v1 v1.66.4/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k=
 | 
			
		||||
gopkg.in/mail.v2 v2.3.1/go.mod h1:htwXN1Qh09vZJ1NVKxQqHPBaCBbzKhp5GzuJEA4VJWw=
 | 
			
		||||
@@ -2556,8 +2584,10 @@ gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
 | 
			
		||||
gopkg.in/yaml.v3 v3.0.0-20191120175047-4206685974f2/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
 | 
			
		||||
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
 | 
			
		||||
gopkg.in/yaml.v3 v3.0.0-20200605160147-a5ece683394c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
 | 
			
		||||
gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b h1:h8qDotaEPuJATrMmW04NCwg7v22aHH28wwpauUhK9Oo=
 | 
			
		||||
gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
 | 
			
		||||
gopkg.in/yaml.v3 v3.0.0/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
 | 
			
		||||
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
 | 
			
		||||
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
 | 
			
		||||
gorm.io/driver/postgres v1.0.8/go.mod h1:4eOzrI1MUfm6ObJU/UcmbXyiHSs8jSwH95G5P5dxcAg=
 | 
			
		||||
gorm.io/gorm v1.20.12/go.mod h1:0HFTzE/SqkGTzK6TlDPPQbAYCluiVvhzoA1+aVyzenw=
 | 
			
		||||
gorm.io/gorm v1.21.4/go.mod h1:0HFTzE/SqkGTzK6TlDPPQbAYCluiVvhzoA1+aVyzenw=
 | 
			
		||||
@@ -2741,8 +2771,8 @@ modernc.org/ql v1.0.0/go.mod h1:xGVyrLIatPcO2C1JvI/Co8c0sr6y91HKFNy4pt9JXEY=
 | 
			
		||||
modernc.org/sortutil v1.1.0/go.mod h1:ZyL98OQHJgH9IEfN71VsamvJgrtRX9Dj2gX+vH86L1k=
 | 
			
		||||
modernc.org/sqlite v1.10.6/go.mod h1:Z9FEjUtZP4qFEg6/SiADg9XCER7aYy9a/j7Pg9P7CPs=
 | 
			
		||||
modernc.org/sqlite v1.14.3/go.mod h1:xMpicS1i2MJ4C8+Ap0vYBqTwYfpFvdnPE6brbFOtV2Y=
 | 
			
		||||
modernc.org/sqlite v1.17.2 h1:TjmF36Wi5QcPYqRoAacV1cAyJ7xB/CD0ExpVUEMebnw=
 | 
			
		||||
modernc.org/sqlite v1.17.2/go.mod h1:GOQmuiXd6pTTes1Fi2s9apiCcD/wbKQtBZ0Nw6/etjM=
 | 
			
		||||
modernc.org/sqlite v1.17.3 h1:iE+coC5g17LtByDYDWKpR6m2Z9022YrSh3bumwOnIrI=
 | 
			
		||||
modernc.org/sqlite v1.17.3/go.mod h1:10hPVYar9C0kfXuTWGz8s0XtB8uAGymUy51ZzStYe3k=
 | 
			
		||||
modernc.org/strutil v1.1.0/go.mod h1:lstksw84oURvj9y3tn8lGvRxyRC1S2+g5uuIzNfIOBs=
 | 
			
		||||
modernc.org/strutil v1.1.1 h1:xv+J1BXY3Opl2ALrBwyfEikFAj8pmqcpnfmuwUwcozs=
 | 
			
		||||
modernc.org/strutil v1.1.1/go.mod h1:DE+MQQ/hjKBZS2zNInV5hhcipt5rLPWkmpbGeW5mmdw=
 | 
			
		||||
 
 | 
			
		||||
@@ -197,7 +197,7 @@ ShowJoinPart=false
 | 
			
		||||
VerboseJoinPart=false
 | 
			
		||||
 | 
			
		||||
#Do not send joins/parts to other bridges
 | 
			
		||||
#Currently works for messages from the following bridges: irc, mattermost, slack
 | 
			
		||||
#Currently works for messages from the following bridges: irc, mattermost, slack, discord
 | 
			
		||||
#OPTIONAL (default false)
 | 
			
		||||
NoSendJoinPart=false
 | 
			
		||||
 | 
			
		||||
@@ -496,7 +496,7 @@ RemoteNickFormat="[{PROTOCOL}] <{NICK}> "
 | 
			
		||||
ShowJoinPart=false
 | 
			
		||||
 | 
			
		||||
#Do not send joins/parts to other bridges
 | 
			
		||||
#Currently works for messages from the following bridges: irc, mattermost, slack
 | 
			
		||||
#Currently works for messages from the following bridges: irc, mattermost, slack, discord
 | 
			
		||||
#OPTIONAL (default false)
 | 
			
		||||
NoSendJoinPart=false
 | 
			
		||||
 | 
			
		||||
@@ -830,7 +830,7 @@ RemoteNickFormat="[{PROTOCOL}] <{NICK}> "
 | 
			
		||||
ShowJoinPart=false
 | 
			
		||||
 | 
			
		||||
#Do not send joins/parts to other bridges
 | 
			
		||||
#Currently works for messages from the following bridges: irc, mattermost, slack
 | 
			
		||||
#Currently works for messages from the following bridges: irc, mattermost, slack, discord
 | 
			
		||||
#OPTIONAL (default false)
 | 
			
		||||
NoSendJoinPart=false
 | 
			
		||||
 | 
			
		||||
 
 | 
			
		||||
							
								
								
									
										3
									
								
								vendor/github.com/SevereCloud/vksdk/v2/.golangci.yml
									
									
									
										generated
									
									
										vendored
									
									
								
							
							
						
						
									
										3
									
								
								vendor/github.com/SevereCloud/vksdk/v2/.golangci.yml
									
									
									
										generated
									
									
										vendored
									
									
								
							@@ -57,6 +57,8 @@ linters:
 | 
			
		||||
    - grouper
 | 
			
		||||
    - decorder
 | 
			
		||||
    - containedctx
 | 
			
		||||
    # - execinquery # FIXME: panic in 1.46.0
 | 
			
		||||
    - nosprintfhostport
 | 
			
		||||
 | 
			
		||||
# - wrapcheck # TODO: v3 Fix
 | 
			
		||||
# - testpackage # TODO: Fix testpackage
 | 
			
		||||
@@ -87,6 +89,7 @@ linters:
 | 
			
		||||
# - varnamelen
 | 
			
		||||
# - errchkjson
 | 
			
		||||
# - maintidx
 | 
			
		||||
# - nonamedreturns
 | 
			
		||||
 | 
			
		||||
# depricated
 | 
			
		||||
# - maligned
 | 
			
		||||
 
 | 
			
		||||
							
								
								
									
										2
									
								
								vendor/github.com/SevereCloud/vksdk/v2/doc.go
									
									
									
										generated
									
									
										vendored
									
									
								
							
							
						
						
									
										2
									
								
								vendor/github.com/SevereCloud/vksdk/v2/doc.go
									
									
									
										generated
									
									
										vendored
									
									
								
							@@ -7,6 +7,6 @@ package vksdk
 | 
			
		||||
 | 
			
		||||
// Module constants.
 | 
			
		||||
const (
 | 
			
		||||
	Version = "2.14.0"
 | 
			
		||||
	Version = "2.14.1"
 | 
			
		||||
	API     = "5.131"
 | 
			
		||||
)
 | 
			
		||||
 
 | 
			
		||||
							
								
								
									
										10
									
								
								vendor/github.com/SevereCloud/vksdk/v2/object/groups.go
									
									
									
										generated
									
									
										vendored
									
									
								
							
							
						
						
									
										10
									
								
								vendor/github.com/SevereCloud/vksdk/v2/object/groups.go
									
									
									
										generated
									
									
										vendored
									
									
								
							@@ -210,7 +210,7 @@ type GroupsGroup struct {
 | 
			
		||||
	MainSection     int                  `json:"main_section,omitempty"`
 | 
			
		||||
	OnlineStatus    GroupsOnlineStatus   `json:"online_status,omitempty"` // Status of replies in community messages
 | 
			
		||||
	AgeLimits       int                  `json:"age_limits,omitempty"`    // Information whether age limit
 | 
			
		||||
	BanInfo         GroupsGroupBanInfo   `json:"ban_info,omitempty"`      // User ban info
 | 
			
		||||
	BanInfo         *GroupsGroupBanInfo  `json:"ban_info,omitempty"`      // User ban info
 | 
			
		||||
	Addresses       GroupsAddressesInfo  `json:"addresses,omitempty"`     // Info about addresses in Groups
 | 
			
		||||
	LiveCovers      GroupsLiveCovers     `json:"live_covers,omitempty"`
 | 
			
		||||
	CropPhoto       UsersCropPhoto       `json:"crop_photo,omitempty"`
 | 
			
		||||
@@ -963,10 +963,10 @@ type GroupsOnlineStatus struct {
 | 
			
		||||
 | 
			
		||||
// GroupsOwnerXtrBanInfo struct.
 | 
			
		||||
type GroupsOwnerXtrBanInfo struct {
 | 
			
		||||
	BanInfo GroupsBanInfo `json:"ban_info"`
 | 
			
		||||
	Group   GroupsGroup   `json:"group"`
 | 
			
		||||
	Profile UsersUser     `json:"profile"`
 | 
			
		||||
	Type    string        `json:"type"`
 | 
			
		||||
	BanInfo *GroupsBanInfo `json:"ban_info"`
 | 
			
		||||
	Group   GroupsGroup    `json:"group"`
 | 
			
		||||
	Profile UsersUser      `json:"profile"`
 | 
			
		||||
	Type    string         `json:"type"`
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// GroupsSubjectItem struct.
 | 
			
		||||
 
 | 
			
		||||
							
								
								
									
										2
									
								
								vendor/github.com/SevereCloud/vksdk/v2/object/video.go
									
									
									
										generated
									
									
										vendored
									
									
								
							
							
						
						
									
										2
									
								
								vendor/github.com/SevereCloud/vksdk/v2/object/video.go
									
									
									
										generated
									
									
										vendored
									
									
								
							@@ -31,7 +31,7 @@ type VideoVideo struct {
 | 
			
		||||
	CanLike BaseBoolInt `json:"can_like"`
 | 
			
		||||
 | 
			
		||||
	// Information whether current user can download the video.
 | 
			
		||||
	CanDownload BaseBoolInt `json:"can_download"`
 | 
			
		||||
	CanDownload int `json:"can_download"`
 | 
			
		||||
 | 
			
		||||
	// Information whether current user can repost this video.
 | 
			
		||||
	CanRepost         BaseBoolInt       `json:"can_repost"`
 | 
			
		||||
 
 | 
			
		||||
							
								
								
									
										7
									
								
								vendor/github.com/d5/tengo/v2/compiler.go
									
									
									
										generated
									
									
										vendored
									
									
								
							
							
						
						
									
										7
									
								
								vendor/github.com/d5/tengo/v2/compiler.go
									
									
									
										generated
									
									
										vendored
									
									
								
							@@ -1220,14 +1220,14 @@ func (c *Compiler) optimizeFunc(node parser.Node) {
 | 
			
		||||
	iterateInstructions(c.scopes[c.scopeIndex].Instructions,
 | 
			
		||||
		func(pos int, opcode parser.Opcode, operands []int) bool {
 | 
			
		||||
			switch {
 | 
			
		||||
			case dsts[pos]:
 | 
			
		||||
				dstIdx++
 | 
			
		||||
				deadCode = false
 | 
			
		||||
			case opcode == parser.OpReturn:
 | 
			
		||||
				if deadCode {
 | 
			
		||||
					return true
 | 
			
		||||
				}
 | 
			
		||||
				deadCode = true
 | 
			
		||||
			case dsts[pos]:
 | 
			
		||||
				dstIdx++
 | 
			
		||||
				deadCode = false
 | 
			
		||||
			case deadCode:
 | 
			
		||||
				return true
 | 
			
		||||
			}
 | 
			
		||||
@@ -1242,6 +1242,7 @@ func (c *Compiler) optimizeFunc(node parser.Node) {
 | 
			
		||||
	var appendReturn bool
 | 
			
		||||
	endPos := len(c.scopes[c.scopeIndex].Instructions)
 | 
			
		||||
	newEndPost := len(newInsts)
 | 
			
		||||
 | 
			
		||||
	iterateInstructions(newInsts,
 | 
			
		||||
		func(pos int, opcode parser.Opcode, operands []int) bool {
 | 
			
		||||
			switch opcode {
 | 
			
		||||
 
 | 
			
		||||
							
								
								
									
										18
									
								
								vendor/github.com/d5/tengo/v2/parser/parser.go
									
									
									
										generated
									
									
										vendored
									
									
								
							
							
						
						
									
										18
									
								
								vendor/github.com/d5/tengo/v2/parser/parser.go
									
									
									
										generated
									
									
										vendored
									
									
								
							@@ -375,7 +375,12 @@ func (p *Parser) parseOperand() Expr {
 | 
			
		||||
	case token.Ident:
 | 
			
		||||
		return p.parseIdent()
 | 
			
		||||
	case token.Int:
 | 
			
		||||
		v, _ := strconv.ParseInt(p.tokenLit, 10, 64)
 | 
			
		||||
		v, err := strconv.ParseInt(p.tokenLit, 0, 64)
 | 
			
		||||
		if err == strconv.ErrRange {
 | 
			
		||||
			p.error(p.pos, "number out of range")
 | 
			
		||||
		} else if err != nil {
 | 
			
		||||
			p.error(p.pos, "invalid integer")
 | 
			
		||||
		}
 | 
			
		||||
		x := &IntLit{
 | 
			
		||||
			Value:    v,
 | 
			
		||||
			ValuePos: p.pos,
 | 
			
		||||
@@ -383,8 +388,14 @@ func (p *Parser) parseOperand() Expr {
 | 
			
		||||
		}
 | 
			
		||||
		p.next()
 | 
			
		||||
		return x
 | 
			
		||||
 | 
			
		||||
	case token.Float:
 | 
			
		||||
		v, _ := strconv.ParseFloat(p.tokenLit, 64)
 | 
			
		||||
		v, err := strconv.ParseFloat(p.tokenLit, 64)
 | 
			
		||||
		if err == strconv.ErrRange {
 | 
			
		||||
			p.error(p.pos, "number out of range")
 | 
			
		||||
		} else if err != nil {
 | 
			
		||||
			p.error(p.pos, "invalid float")
 | 
			
		||||
		}
 | 
			
		||||
		x := &FloatLit{
 | 
			
		||||
			Value:    v,
 | 
			
		||||
			ValuePos: p.pos,
 | 
			
		||||
@@ -447,10 +458,11 @@ func (p *Parser) parseOperand() Expr {
 | 
			
		||||
		return p.parseErrorExpr()
 | 
			
		||||
	case token.Immutable: // immutable expression
 | 
			
		||||
		return p.parseImmutableExpr()
 | 
			
		||||
	default:
 | 
			
		||||
		p.errorExpected(p.pos, "operand")
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	pos := p.pos
 | 
			
		||||
	p.errorExpected(pos, "operand")
 | 
			
		||||
	p.advance(stmtStart)
 | 
			
		||||
	return &BadExpr{From: pos, To: p.pos}
 | 
			
		||||
}
 | 
			
		||||
 
 | 
			
		||||
							
								
								
									
										113
									
								
								vendor/github.com/d5/tengo/v2/parser/scanner.go
									
									
									
										generated
									
									
										vendored
									
									
								
							
							
						
						
									
										113
									
								
								vendor/github.com/d5/tengo/v2/parser/scanner.go
									
									
									
										generated
									
									
										vendored
									
									
								
							@@ -93,9 +93,9 @@ func (s *Scanner) Scan() (
 | 
			
		||||
			token.Export, token.True, token.False, token.Undefined:
 | 
			
		||||
			insertSemi = true
 | 
			
		||||
		}
 | 
			
		||||
	case '0' <= ch && ch <= '9':
 | 
			
		||||
	case ('0' <= ch && ch <= '9') || (ch == '.' && '0' <= s.peek() && s.peek() <= '9'):
 | 
			
		||||
		insertSemi = true
 | 
			
		||||
		tok, literal = s.scanNumber(false)
 | 
			
		||||
		tok, literal = s.scanNumber()
 | 
			
		||||
	default:
 | 
			
		||||
		s.next() // always make progress
 | 
			
		||||
 | 
			
		||||
@@ -125,16 +125,11 @@ func (s *Scanner) Scan() (
 | 
			
		||||
		case ':':
 | 
			
		||||
			tok = s.switch2(token.Colon, token.Define)
 | 
			
		||||
		case '.':
 | 
			
		||||
			if '0' <= s.ch && s.ch <= '9' {
 | 
			
		||||
				insertSemi = true
 | 
			
		||||
				tok, literal = s.scanNumber(true)
 | 
			
		||||
			} else {
 | 
			
		||||
				tok = token.Period
 | 
			
		||||
				if s.ch == '.' && s.peek() == '.' {
 | 
			
		||||
					s.next()
 | 
			
		||||
					s.next() // consume last '.'
 | 
			
		||||
					tok = token.Ellipsis
 | 
			
		||||
				}
 | 
			
		||||
			tok = token.Period
 | 
			
		||||
			if s.ch == '.' && s.peek() == '.' {
 | 
			
		||||
				s.next()
 | 
			
		||||
				s.next() // consume last '.'
 | 
			
		||||
				tok = token.Ellipsis
 | 
			
		||||
			}
 | 
			
		||||
		case ',':
 | 
			
		||||
			tok = token.Comma
 | 
			
		||||
@@ -379,86 +374,58 @@ func (s *Scanner) scanIdentifier() string {
 | 
			
		||||
	return string(s.src[offs:s.offset])
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (s *Scanner) scanMantissa(base int) {
 | 
			
		||||
	for digitVal(s.ch) < base {
 | 
			
		||||
func (s *Scanner) scanDigits(base int) {
 | 
			
		||||
	for s.ch == '_' || digitVal(s.ch) < base {
 | 
			
		||||
		s.next()
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (s *Scanner) scanNumber(
 | 
			
		||||
	seenDecimalPoint bool,
 | 
			
		||||
) (tok token.Token, lit string) {
 | 
			
		||||
	// digitVal(s.ch) < 10
 | 
			
		||||
func (s *Scanner) scanNumber() (token.Token, string) {
 | 
			
		||||
	offs := s.offset
 | 
			
		||||
	tok = token.Int
 | 
			
		||||
	tok := token.Int
 | 
			
		||||
	base := 10
 | 
			
		||||
 | 
			
		||||
	defer func() {
 | 
			
		||||
		lit = string(s.src[offs:s.offset])
 | 
			
		||||
	}()
 | 
			
		||||
 | 
			
		||||
	if seenDecimalPoint {
 | 
			
		||||
		offs--
 | 
			
		||||
		tok = token.Float
 | 
			
		||||
		s.scanMantissa(10)
 | 
			
		||||
		goto exponent
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	if s.ch == '0' {
 | 
			
		||||
		// int or float
 | 
			
		||||
		offs := s.offset
 | 
			
		||||
	// Determine base
 | 
			
		||||
	switch {
 | 
			
		||||
	case s.ch == '0' && lower(s.peek()) == 'b':
 | 
			
		||||
		base = 2
 | 
			
		||||
		s.next()
 | 
			
		||||
		s.next()
 | 
			
		||||
	case s.ch == '0' && lower(s.peek()) == 'o':
 | 
			
		||||
		base = 8
 | 
			
		||||
		s.next()
 | 
			
		||||
		s.next()
 | 
			
		||||
	case s.ch == '0' && lower(s.peek()) == 'x':
 | 
			
		||||
		base = 16
 | 
			
		||||
		s.next()
 | 
			
		||||
		s.next()
 | 
			
		||||
		if s.ch == 'x' || s.ch == 'X' {
 | 
			
		||||
			// hexadecimal int
 | 
			
		||||
			s.next()
 | 
			
		||||
			s.scanMantissa(16)
 | 
			
		||||
			if s.offset-offs <= 2 {
 | 
			
		||||
				// only scanned "0x" or "0X"
 | 
			
		||||
				s.error(offs, "illegal hexadecimal number")
 | 
			
		||||
			}
 | 
			
		||||
		} else {
 | 
			
		||||
			// octal int or float
 | 
			
		||||
			seenDecimalDigit := false
 | 
			
		||||
			s.scanMantissa(8)
 | 
			
		||||
			if s.ch == '8' || s.ch == '9' {
 | 
			
		||||
				// illegal octal int or float
 | 
			
		||||
				seenDecimalDigit = true
 | 
			
		||||
				s.scanMantissa(10)
 | 
			
		||||
			}
 | 
			
		||||
			if s.ch == '.' || s.ch == 'e' || s.ch == 'E' || s.ch == 'i' {
 | 
			
		||||
				goto fraction
 | 
			
		||||
			}
 | 
			
		||||
			// octal int
 | 
			
		||||
			if seenDecimalDigit {
 | 
			
		||||
				s.error(offs, "illegal octal number")
 | 
			
		||||
			}
 | 
			
		||||
		}
 | 
			
		||||
		return
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	// decimal int or float
 | 
			
		||||
	s.scanMantissa(10)
 | 
			
		||||
	// Scan whole number
 | 
			
		||||
	s.scanDigits(base)
 | 
			
		||||
 | 
			
		||||
fraction:
 | 
			
		||||
	if s.ch == '.' {
 | 
			
		||||
	// Scan fractional part
 | 
			
		||||
	if s.ch == '.' && (base == 10 || base == 16) {
 | 
			
		||||
		tok = token.Float
 | 
			
		||||
		s.next()
 | 
			
		||||
		s.scanMantissa(10)
 | 
			
		||||
		s.scanDigits(base)
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
exponent:
 | 
			
		||||
	if s.ch == 'e' || s.ch == 'E' {
 | 
			
		||||
	// Scan exponent
 | 
			
		||||
	if s.ch == 'e' || s.ch == 'E' || s.ch == 'p' || s.ch == 'P' {
 | 
			
		||||
		tok = token.Float
 | 
			
		||||
		s.next()
 | 
			
		||||
		if s.ch == '-' || s.ch == '+' {
 | 
			
		||||
			s.next()
 | 
			
		||||
		}
 | 
			
		||||
		if digitVal(s.ch) < 10 {
 | 
			
		||||
			s.scanMantissa(10)
 | 
			
		||||
		} else {
 | 
			
		||||
			s.error(offs, "illegal floating-point exponent")
 | 
			
		||||
		offs := s.offset
 | 
			
		||||
		s.scanDigits(10)
 | 
			
		||||
		if offs == s.offset {
 | 
			
		||||
			s.error(offs, "exponent has no digits")
 | 
			
		||||
		}
 | 
			
		||||
	}
 | 
			
		||||
	return
 | 
			
		||||
 | 
			
		||||
	return tok, string(s.src[offs:s.offset])
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (s *Scanner) scanEscape(quote rune) bool {
 | 
			
		||||
@@ -687,3 +654,7 @@ func digitVal(ch rune) int {
 | 
			
		||||
	}
 | 
			
		||||
	return 16 // larger than any legal digit val
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func lower(c byte) byte {
 | 
			
		||||
	return c | ('x' - 'X')
 | 
			
		||||
}
 | 
			
		||||
 
 | 
			
		||||
							
								
								
									
										9
									
								
								vendor/github.com/gomarkdown/markdown/.gitpod.yml
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										9
									
								
								vendor/github.com/gomarkdown/markdown/.gitpod.yml
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							@@ -0,0 +1,9 @@
 | 
			
		||||
# This configuration file was automatically generated by Gitpod.
 | 
			
		||||
# Please adjust to your needs (see https://www.gitpod.io/docs/config-gitpod-file)
 | 
			
		||||
# and commit this file to your remote git repository to share the goodness with others.
 | 
			
		||||
 | 
			
		||||
tasks:
 | 
			
		||||
  - init: go get && go build ./... && go test ./...
 | 
			
		||||
    command: go run
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
							
								
								
									
										1
									
								
								vendor/github.com/gomarkdown/markdown/fuzz.go
									
									
									
										generated
									
									
										vendored
									
									
								
							
							
						
						
									
										1
									
								
								vendor/github.com/gomarkdown/markdown/fuzz.go
									
									
									
										generated
									
									
										vendored
									
									
								
							@@ -1,3 +1,4 @@
 | 
			
		||||
//go:build gofuzz
 | 
			
		||||
// +build gofuzz
 | 
			
		||||
 | 
			
		||||
package markdown
 | 
			
		||||
 
 | 
			
		||||
							
								
								
									
										90
									
								
								vendor/github.com/gomarkdown/markdown/html/renderer.go
									
									
									
										generated
									
									
										vendored
									
									
								
							
							
						
						
									
										90
									
								
								vendor/github.com/gomarkdown/markdown/html/renderer.go
									
									
									
										generated
									
									
										vendored
									
									
								
							@@ -11,6 +11,7 @@ import (
 | 
			
		||||
	"strings"
 | 
			
		||||
 | 
			
		||||
	"github.com/gomarkdown/markdown/ast"
 | 
			
		||||
	"github.com/gomarkdown/markdown/internal/valid"
 | 
			
		||||
	"github.com/gomarkdown/markdown/parser"
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
@@ -211,70 +212,6 @@ func NewRenderer(opts RendererOptions) *Renderer {
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func isHTMLTag(tag []byte, tagname string) bool {
 | 
			
		||||
	found, _ := findHTMLTagPos(tag, tagname)
 | 
			
		||||
	return found
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// Look for a character, but ignore it when it's in any kind of quotes, it
 | 
			
		||||
// might be JavaScript
 | 
			
		||||
func skipUntilCharIgnoreQuotes(html []byte, start int, char byte) int {
 | 
			
		||||
	inSingleQuote := false
 | 
			
		||||
	inDoubleQuote := false
 | 
			
		||||
	inGraveQuote := false
 | 
			
		||||
	i := start
 | 
			
		||||
	for i < len(html) {
 | 
			
		||||
		switch {
 | 
			
		||||
		case html[i] == char && !inSingleQuote && !inDoubleQuote && !inGraveQuote:
 | 
			
		||||
			return i
 | 
			
		||||
		case html[i] == '\'':
 | 
			
		||||
			inSingleQuote = !inSingleQuote
 | 
			
		||||
		case html[i] == '"':
 | 
			
		||||
			inDoubleQuote = !inDoubleQuote
 | 
			
		||||
		case html[i] == '`':
 | 
			
		||||
			inGraveQuote = !inGraveQuote
 | 
			
		||||
		}
 | 
			
		||||
		i++
 | 
			
		||||
	}
 | 
			
		||||
	return start
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func findHTMLTagPos(tag []byte, tagname string) (bool, int) {
 | 
			
		||||
	i := 0
 | 
			
		||||
	if i < len(tag) && tag[0] != '<' {
 | 
			
		||||
		return false, -1
 | 
			
		||||
	}
 | 
			
		||||
	i++
 | 
			
		||||
	i = skipSpace(tag, i)
 | 
			
		||||
 | 
			
		||||
	if i < len(tag) && tag[i] == '/' {
 | 
			
		||||
		i++
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	i = skipSpace(tag, i)
 | 
			
		||||
	j := 0
 | 
			
		||||
	for ; i < len(tag); i, j = i+1, j+1 {
 | 
			
		||||
		if j >= len(tagname) {
 | 
			
		||||
			break
 | 
			
		||||
		}
 | 
			
		||||
 | 
			
		||||
		if strings.ToLower(string(tag[i]))[0] != tagname[j] {
 | 
			
		||||
			return false, -1
 | 
			
		||||
		}
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	if i == len(tag) {
 | 
			
		||||
		return false, -1
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	rightAngle := skipUntilCharIgnoreQuotes(tag, i, '>')
 | 
			
		||||
	if rightAngle >= i {
 | 
			
		||||
		return true, rightAngle
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	return false, -1
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func isRelativeLink(link []byte) (yes bool) {
 | 
			
		||||
	// a tag begin with '#'
 | 
			
		||||
	if link[0] == '#' {
 | 
			
		||||
@@ -351,14 +288,6 @@ func needSkipLink(flags Flags, dest []byte) bool {
 | 
			
		||||
	return flags&Safelink != 0 && !isSafeLink(dest) && !isMailto(dest)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func isSmartypantable(node ast.Node) bool {
 | 
			
		||||
	switch node.GetParent().(type) {
 | 
			
		||||
	case *ast.Link, *ast.CodeBlock, *ast.Code:
 | 
			
		||||
		return false
 | 
			
		||||
	}
 | 
			
		||||
	return true
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func appendLanguageAttr(attrs []string, info []byte) []string {
 | 
			
		||||
	if len(info) == 0 {
 | 
			
		||||
		return attrs
 | 
			
		||||
@@ -1297,21 +1226,8 @@ func isListItemTerm(node ast.Node) bool {
 | 
			
		||||
	return ok && data.ListFlags&ast.ListTypeTerm != 0
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// TODO: move to internal package
 | 
			
		||||
func skipSpace(data []byte, i int) int {
 | 
			
		||||
	n := len(data)
 | 
			
		||||
	for i < n && isSpace(data[i]) {
 | 
			
		||||
		i++
 | 
			
		||||
	}
 | 
			
		||||
	return i
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// TODO: move to internal package
 | 
			
		||||
var validUris = [][]byte{[]byte("http://"), []byte("https://"), []byte("ftp://"), []byte("mailto://")}
 | 
			
		||||
var validPaths = [][]byte{[]byte("/"), []byte("./"), []byte("../")}
 | 
			
		||||
 | 
			
		||||
func isSafeLink(link []byte) bool {
 | 
			
		||||
	for _, path := range validPaths {
 | 
			
		||||
	for _, path := range valid.Paths {
 | 
			
		||||
		if len(link) >= len(path) && bytes.Equal(link[:len(path)], path) {
 | 
			
		||||
			if len(link) == len(path) {
 | 
			
		||||
				return true
 | 
			
		||||
@@ -1321,7 +1237,7 @@ func isSafeLink(link []byte) bool {
 | 
			
		||||
		}
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	for _, prefix := range validUris {
 | 
			
		||||
	for _, prefix := range valid.URIs {
 | 
			
		||||
		// TODO: handle unicode here
 | 
			
		||||
		// case-insensitive prefix test
 | 
			
		||||
		if len(link) > len(prefix) && bytes.Equal(bytes.ToLower(link[:len(prefix)]), prefix) && isAlnum(link[len(prefix)]) {
 | 
			
		||||
 
 | 
			
		||||
							
								
								
									
										14
									
								
								vendor/github.com/gomarkdown/markdown/internal/valid/valid.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										14
									
								
								vendor/github.com/gomarkdown/markdown/internal/valid/valid.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							@@ -0,0 +1,14 @@
 | 
			
		||||
package valid
 | 
			
		||||
 | 
			
		||||
var URIs = [][]byte{
 | 
			
		||||
	[]byte("http://"),
 | 
			
		||||
	[]byte("https://"),
 | 
			
		||||
	[]byte("ftp://"),
 | 
			
		||||
	[]byte("mailto:"),
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
var Paths = [][]byte{
 | 
			
		||||
	[]byte("/"),
 | 
			
		||||
	[]byte("./"),
 | 
			
		||||
	[]byte("../"),
 | 
			
		||||
}
 | 
			
		||||
							
								
								
									
										10
									
								
								vendor/github.com/gomarkdown/markdown/parser/block.go
									
									
									
										generated
									
									
										vendored
									
									
								
							
							
						
						
									
										10
									
								
								vendor/github.com/gomarkdown/markdown/parser/block.go
									
									
									
										generated
									
									
										vendored
									
									
								
							@@ -24,8 +24,8 @@ const (
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
var (
 | 
			
		||||
	reBackslashOrAmp      = regexp.MustCompile("[\\&]")
 | 
			
		||||
	reEntityOrEscapedChar = regexp.MustCompile("(?i)\\\\" + escapable + "|" + charEntity)
 | 
			
		||||
	reBackslashOrAmp      = regexp.MustCompile(`[\&]`)
 | 
			
		||||
	reEntityOrEscapedChar = regexp.MustCompile(`(?i)\\` + escapable + "|" + charEntity)
 | 
			
		||||
 | 
			
		||||
	// blockTags is a set of tags that are recognized as HTML block tags.
 | 
			
		||||
	// Any of these can be included in markdown text without special escaping.
 | 
			
		||||
@@ -1675,6 +1675,12 @@ func (p *Parser) paragraph(data []byte) int {
 | 
			
		||||
			return i
 | 
			
		||||
		}
 | 
			
		||||
 | 
			
		||||
		// if there's a block quote, paragraph is over
 | 
			
		||||
		if p.quotePrefix(current) > 0 {
 | 
			
		||||
			p.renderParagraph(data[:i])
 | 
			
		||||
			return i
 | 
			
		||||
		}
 | 
			
		||||
 | 
			
		||||
		// if there's a fenced code block, paragraph is over
 | 
			
		||||
		if p.extensions&FencedCode != 0 {
 | 
			
		||||
			if p.fencedCodeBlock(current, false) > 0 {
 | 
			
		||||
 
 | 
			
		||||
							
								
								
									
										10
									
								
								vendor/github.com/gomarkdown/markdown/parser/inline.go
									
									
									
										generated
									
									
										vendored
									
									
								
							
							
						
						
									
										10
									
								
								vendor/github.com/gomarkdown/markdown/parser/inline.go
									
									
									
										generated
									
									
										vendored
									
									
								
							@@ -6,6 +6,7 @@ import (
 | 
			
		||||
	"strconv"
 | 
			
		||||
 | 
			
		||||
	"github.com/gomarkdown/markdown/ast"
 | 
			
		||||
	"github.com/gomarkdown/markdown/internal/valid"
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
// Parsing of inline elements
 | 
			
		||||
@@ -994,12 +995,9 @@ func isEndOfLink(char byte) bool {
 | 
			
		||||
	return isSpace(char) || char == '<'
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
var validUris = [][]byte{[]byte("http://"), []byte("https://"), []byte("ftp://"), []byte("mailto://")}
 | 
			
		||||
var validPaths = [][]byte{[]byte("/"), []byte("./"), []byte("../")}
 | 
			
		||||
 | 
			
		||||
func isSafeLink(link []byte) bool {
 | 
			
		||||
	nLink := len(link)
 | 
			
		||||
	for _, path := range validPaths {
 | 
			
		||||
	for _, path := range valid.Paths {
 | 
			
		||||
		nPath := len(path)
 | 
			
		||||
		linkPrefix := link[:nPath]
 | 
			
		||||
		if nLink >= nPath && bytes.Equal(linkPrefix, path) {
 | 
			
		||||
@@ -1011,7 +1009,7 @@ func isSafeLink(link []byte) bool {
 | 
			
		||||
		}
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	for _, prefix := range validUris {
 | 
			
		||||
	for _, prefix := range valid.URIs {
 | 
			
		||||
		// TODO: handle unicode here
 | 
			
		||||
		// case-insensitive prefix test
 | 
			
		||||
		nPrefix := len(prefix)
 | 
			
		||||
@@ -1119,7 +1117,7 @@ func isMailtoAutoLink(data []byte) int {
 | 
			
		||||
			nb++
 | 
			
		||||
 | 
			
		||||
		case '-', '.', '_':
 | 
			
		||||
			break
 | 
			
		||||
			// no-op but not defult
 | 
			
		||||
 | 
			
		||||
		case '>':
 | 
			
		||||
			if nb == 1 {
 | 
			
		||||
 
 | 
			
		||||
							
								
								
									
										3
									
								
								vendor/github.com/gomarkdown/markdown/parser/parser.go
									
									
									
										generated
									
									
										vendored
									
									
								
							
							
						
						
									
										3
									
								
								vendor/github.com/gomarkdown/markdown/parser/parser.go
									
									
									
										generated
									
									
										vendored
									
									
								
							@@ -8,7 +8,6 @@ import (
 | 
			
		||||
	"fmt"
 | 
			
		||||
	"strconv"
 | 
			
		||||
	"strings"
 | 
			
		||||
	"unicode/utf8"
 | 
			
		||||
 | 
			
		||||
	"github.com/gomarkdown/markdown/ast"
 | 
			
		||||
)
 | 
			
		||||
@@ -720,6 +719,7 @@ func isAlnum(c byte) bool {
 | 
			
		||||
// TODO: this is not used
 | 
			
		||||
// Replace tab characters with spaces, aligning to the next TAB_SIZE column.
 | 
			
		||||
// always ends output with a newline
 | 
			
		||||
/*
 | 
			
		||||
func expandTabs(out *bytes.Buffer, line []byte, tabSize int) {
 | 
			
		||||
	// first, check for common cases: no tabs, or only tabs at beginning of line
 | 
			
		||||
	i, prefix := 0, 0
 | 
			
		||||
@@ -775,6 +775,7 @@ func expandTabs(out *bytes.Buffer, line []byte, tabSize int) {
 | 
			
		||||
		i++
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
*/
 | 
			
		||||
 | 
			
		||||
// Find if a line counts as indented or not.
 | 
			
		||||
// Returns number of characters the indent is (0 = not indented).
 | 
			
		||||
 
 | 
			
		||||
							
								
								
									
										7
									
								
								vendor/github.com/klauspost/compress/.gitignore
									
									
									
										generated
									
									
										vendored
									
									
								
							
							
						
						
									
										7
									
								
								vendor/github.com/klauspost/compress/.gitignore
									
									
									
										generated
									
									
										vendored
									
									
								
							@@ -23,3 +23,10 @@ _testmain.go
 | 
			
		||||
*.test
 | 
			
		||||
*.prof
 | 
			
		||||
/s2/cmd/_s2sx/sfx-exe
 | 
			
		||||
 | 
			
		||||
# Linux perf files
 | 
			
		||||
perf.data
 | 
			
		||||
perf.data.old
 | 
			
		||||
 | 
			
		||||
# gdb history
 | 
			
		||||
.gdb_history
 | 
			
		||||
 
 | 
			
		||||
							
								
								
									
										40
									
								
								vendor/github.com/klauspost/compress/README.md
									
									
									
										generated
									
									
										vendored
									
									
								
							
							
						
						
									
										40
									
								
								vendor/github.com/klauspost/compress/README.md
									
									
									
										generated
									
									
										vendored
									
									
								
							@@ -17,6 +17,41 @@ This package provides various compression algorithms.
 | 
			
		||||
 | 
			
		||||
# changelog
 | 
			
		||||
 | 
			
		||||
* May 25, 2022 (v1.15.5)
 | 
			
		||||
	* s2: Add concurrent stream decompression https://github.com/klauspost/compress/pull/602
 | 
			
		||||
	* s2: Fix final emit oob read crash on amd64 https://github.com/klauspost/compress/pull/601
 | 
			
		||||
	* huff0: asm implementation of Decompress1X by @WojciechMula https://github.com/klauspost/compress/pull/596
 | 
			
		||||
	* zstd: Use 1 less goroutine for stream decoding https://github.com/klauspost/compress/pull/588
 | 
			
		||||
	* zstd: Copy literal in 16 byte blocks when possible https://github.com/klauspost/compress/pull/592
 | 
			
		||||
	* zstd: Speed up when WithDecoderLowmem(false) https://github.com/klauspost/compress/pull/599
 | 
			
		||||
	* zstd: faster next state update in BMI2 version of decode by @WojciechMula in https://github.com/klauspost/compress/pull/593
 | 
			
		||||
	* huff0: Do not check max size when reading table. https://github.com/klauspost/compress/pull/586
 | 
			
		||||
	* flate: Inplace hashing for level 7-9 by @klauspost in https://github.com/klauspost/compress/pull/590
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
* May 11, 2022 (v1.15.4)
 | 
			
		||||
	* huff0: decompress directly into output by @WojciechMula in [#577](https://github.com/klauspost/compress/pull/577)
 | 
			
		||||
	* inflate: Keep dict on stack [#581](https://github.com/klauspost/compress/pull/581)
 | 
			
		||||
	* zstd: Faster decoding memcopy in asm [#583](https://github.com/klauspost/compress/pull/583)
 | 
			
		||||
	* zstd: Fix ignored crc [#580](https://github.com/klauspost/compress/pull/580)
 | 
			
		||||
 | 
			
		||||
* May 5, 2022 (v1.15.3)
 | 
			
		||||
	* zstd: Allow to ignore checksum checking by @WojciechMula [#572](https://github.com/klauspost/compress/pull/572)
 | 
			
		||||
	* s2: Fix incorrect seek for io.SeekEnd in [#575](https://github.com/klauspost/compress/pull/575)
 | 
			
		||||
 | 
			
		||||
* Apr 26, 2022 (v1.15.2)
 | 
			
		||||
	* zstd: Add x86-64 assembly for decompression on streams and blocks. Contributed by [@WojciechMula](https://github.com/WojciechMula). Typically 2x faster.  [#528](https://github.com/klauspost/compress/pull/528) [#531](https://github.com/klauspost/compress/pull/531) [#545](https://github.com/klauspost/compress/pull/545) [#537](https://github.com/klauspost/compress/pull/537)
 | 
			
		||||
	* zstd: Add options to ZipDecompressor and fixes [#539](https://github.com/klauspost/compress/pull/539)
 | 
			
		||||
	* s2: Use sorted search for index [#555](https://github.com/klauspost/compress/pull/555)
 | 
			
		||||
	* Minimum version is Go 1.16, added CI test on 1.18.
 | 
			
		||||
 | 
			
		||||
* Mar 11, 2022 (v1.15.1)
 | 
			
		||||
	* huff0: Add x86 assembly of Decode4X by @WojciechMula in [#512](https://github.com/klauspost/compress/pull/512)
 | 
			
		||||
	* zstd: Reuse zip decoders in [#514](https://github.com/klauspost/compress/pull/514)
 | 
			
		||||
	* zstd: Detect extra block data and report as corrupted in [#520](https://github.com/klauspost/compress/pull/520)
 | 
			
		||||
	* zstd: Handle zero sized frame content size stricter in [#521](https://github.com/klauspost/compress/pull/521)
 | 
			
		||||
	* zstd: Add stricter block size checks in [#523](https://github.com/klauspost/compress/pull/523)
 | 
			
		||||
 | 
			
		||||
* Mar 3, 2022 (v1.15.0)
 | 
			
		||||
	* zstd: Refactor decoder by @klauspost in [#498](https://github.com/klauspost/compress/pull/498)
 | 
			
		||||
	* zstd: Add stream encoding without goroutines by @klauspost in [#505](https://github.com/klauspost/compress/pull/505)
 | 
			
		||||
@@ -60,6 +95,9 @@ While the release has been extensively tested, it is recommended to testing when
 | 
			
		||||
	* zstd: add arm64 xxhash assembly in [#464](https://github.com/klauspost/compress/pull/464)
 | 
			
		||||
	* Add garbled for binaries for s2 in [#445](https://github.com/klauspost/compress/pull/445)
 | 
			
		||||
 | 
			
		||||
<details>
 | 
			
		||||
	<summary>See changes to v1.13.x</summary>
 | 
			
		||||
	
 | 
			
		||||
* Aug 30, 2021 (v1.13.5)
 | 
			
		||||
	* gz/zlib/flate: Alias stdlib errors [#425](https://github.com/klauspost/compress/pull/425)
 | 
			
		||||
	* s2: Add block support to commandline tools [#413](https://github.com/klauspost/compress/pull/413)
 | 
			
		||||
@@ -88,6 +126,8 @@ While the release has been extensively tested, it is recommended to testing when
 | 
			
		||||
	* Added [gzhttp](https://github.com/klauspost/compress/tree/master/gzhttp#gzip-handler) which allows wrapping HTTP servers and clients with GZIP compressors.
 | 
			
		||||
	* zstd: Detect short invalid signatures [#382](https://github.com/klauspost/compress/pull/382)
 | 
			
		||||
	* zstd: Spawn decoder goroutine only if needed. [#380](https://github.com/klauspost/compress/pull/380)
 | 
			
		||||
</details>
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
<details>
 | 
			
		||||
	<summary>See changes to v1.12.x</summary>
 | 
			
		||||
 
 | 
			
		||||
							
								
								
									
										5
									
								
								vendor/github.com/klauspost/compress/huff0/autogen.go
									
									
									
										generated
									
									
										vendored
									
									
								
							
							
						
						
									
										5
									
								
								vendor/github.com/klauspost/compress/huff0/autogen.go
									
									
									
										generated
									
									
										vendored
									
									
								
							@@ -1,5 +0,0 @@
 | 
			
		||||
package huff0
 | 
			
		||||
 | 
			
		||||
//go:generate go run generate.go
 | 
			
		||||
//go:generate asmfmt -w decompress_amd64.s
 | 
			
		||||
//go:generate asmfmt -w decompress_8b_amd64.s
 | 
			
		||||
							
								
								
									
										10
									
								
								vendor/github.com/klauspost/compress/huff0/bitreader.go
									
									
									
										generated
									
									
										vendored
									
									
								
							
							
						
						
									
										10
									
								
								vendor/github.com/klauspost/compress/huff0/bitreader.go
									
									
									
										generated
									
									
										vendored
									
									
								
							@@ -165,11 +165,6 @@ func (b *bitReaderShifted) peekBitsFast(n uint8) uint16 {
 | 
			
		||||
	return uint16(b.value >> ((64 - n) & 63))
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// peekTopBits(n) is equvialent to peekBitFast(64 - n)
 | 
			
		||||
func (b *bitReaderShifted) peekTopBits(n uint8) uint16 {
 | 
			
		||||
	return uint16(b.value >> n)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (b *bitReaderShifted) advance(n uint8) {
 | 
			
		||||
	b.bitsRead += n
 | 
			
		||||
	b.value <<= n & 63
 | 
			
		||||
@@ -220,11 +215,6 @@ func (b *bitReaderShifted) fill() {
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// finished returns true if all bits have been read from the bit stream.
 | 
			
		||||
func (b *bitReaderShifted) finished() bool {
 | 
			
		||||
	return b.off == 0 && b.bitsRead >= 64
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (b *bitReaderShifted) remaining() uint {
 | 
			
		||||
	return b.off*8 + uint(64-b.bitsRead)
 | 
			
		||||
}
 | 
			
		||||
 
 | 
			
		||||
							
								
								
									
										115
									
								
								vendor/github.com/klauspost/compress/huff0/bitwriter.go
									
									
									
										generated
									
									
										vendored
									
									
								
							
							
						
						
									
										115
									
								
								vendor/github.com/klauspost/compress/huff0/bitwriter.go
									
									
									
										generated
									
									
										vendored
									
									
								
							@@ -5,8 +5,6 @@
 | 
			
		||||
 | 
			
		||||
package huff0
 | 
			
		||||
 | 
			
		||||
import "fmt"
 | 
			
		||||
 | 
			
		||||
// bitWriter will write bits.
 | 
			
		||||
// First bit will be LSB of the first byte of output.
 | 
			
		||||
type bitWriter struct {
 | 
			
		||||
@@ -23,14 +21,6 @@ var bitMask16 = [32]uint16{
 | 
			
		||||
	0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF,
 | 
			
		||||
	0xFFFF, 0xFFFF} /* up to 16 bits */
 | 
			
		||||
 | 
			
		||||
// addBits16NC will add up to 16 bits.
 | 
			
		||||
// It will not check if there is space for them,
 | 
			
		||||
// so the caller must ensure that it has flushed recently.
 | 
			
		||||
func (b *bitWriter) addBits16NC(value uint16, bits uint8) {
 | 
			
		||||
	b.bitContainer |= uint64(value&bitMask16[bits&31]) << (b.nBits & 63)
 | 
			
		||||
	b.nBits += bits
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// addBits16Clean will add up to 16 bits. value may not contain more set bits than indicated.
 | 
			
		||||
// It will not check if there is space for them, so the caller must ensure that it has flushed recently.
 | 
			
		||||
func (b *bitWriter) addBits16Clean(value uint16, bits uint8) {
 | 
			
		||||
@@ -70,104 +60,6 @@ func (b *bitWriter) encTwoSymbols(ct cTable, av, bv byte) {
 | 
			
		||||
	b.nBits += encA.nBits + encB.nBits
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// addBits16ZeroNC will add up to 16 bits.
 | 
			
		||||
// It will not check if there is space for them,
 | 
			
		||||
// so the caller must ensure that it has flushed recently.
 | 
			
		||||
// This is fastest if bits can be zero.
 | 
			
		||||
func (b *bitWriter) addBits16ZeroNC(value uint16, bits uint8) {
 | 
			
		||||
	if bits == 0 {
 | 
			
		||||
		return
 | 
			
		||||
	}
 | 
			
		||||
	value <<= (16 - bits) & 15
 | 
			
		||||
	value >>= (16 - bits) & 15
 | 
			
		||||
	b.bitContainer |= uint64(value) << (b.nBits & 63)
 | 
			
		||||
	b.nBits += bits
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// flush will flush all pending full bytes.
 | 
			
		||||
// There will be at least 56 bits available for writing when this has been called.
 | 
			
		||||
// Using flush32 is faster, but leaves less space for writing.
 | 
			
		||||
func (b *bitWriter) flush() {
 | 
			
		||||
	v := b.nBits >> 3
 | 
			
		||||
	switch v {
 | 
			
		||||
	case 0:
 | 
			
		||||
		return
 | 
			
		||||
	case 1:
 | 
			
		||||
		b.out = append(b.out,
 | 
			
		||||
			byte(b.bitContainer),
 | 
			
		||||
		)
 | 
			
		||||
		b.bitContainer >>= 1 << 3
 | 
			
		||||
	case 2:
 | 
			
		||||
		b.out = append(b.out,
 | 
			
		||||
			byte(b.bitContainer),
 | 
			
		||||
			byte(b.bitContainer>>8),
 | 
			
		||||
		)
 | 
			
		||||
		b.bitContainer >>= 2 << 3
 | 
			
		||||
	case 3:
 | 
			
		||||
		b.out = append(b.out,
 | 
			
		||||
			byte(b.bitContainer),
 | 
			
		||||
			byte(b.bitContainer>>8),
 | 
			
		||||
			byte(b.bitContainer>>16),
 | 
			
		||||
		)
 | 
			
		||||
		b.bitContainer >>= 3 << 3
 | 
			
		||||
	case 4:
 | 
			
		||||
		b.out = append(b.out,
 | 
			
		||||
			byte(b.bitContainer),
 | 
			
		||||
			byte(b.bitContainer>>8),
 | 
			
		||||
			byte(b.bitContainer>>16),
 | 
			
		||||
			byte(b.bitContainer>>24),
 | 
			
		||||
		)
 | 
			
		||||
		b.bitContainer >>= 4 << 3
 | 
			
		||||
	case 5:
 | 
			
		||||
		b.out = append(b.out,
 | 
			
		||||
			byte(b.bitContainer),
 | 
			
		||||
			byte(b.bitContainer>>8),
 | 
			
		||||
			byte(b.bitContainer>>16),
 | 
			
		||||
			byte(b.bitContainer>>24),
 | 
			
		||||
			byte(b.bitContainer>>32),
 | 
			
		||||
		)
 | 
			
		||||
		b.bitContainer >>= 5 << 3
 | 
			
		||||
	case 6:
 | 
			
		||||
		b.out = append(b.out,
 | 
			
		||||
			byte(b.bitContainer),
 | 
			
		||||
			byte(b.bitContainer>>8),
 | 
			
		||||
			byte(b.bitContainer>>16),
 | 
			
		||||
			byte(b.bitContainer>>24),
 | 
			
		||||
			byte(b.bitContainer>>32),
 | 
			
		||||
			byte(b.bitContainer>>40),
 | 
			
		||||
		)
 | 
			
		||||
		b.bitContainer >>= 6 << 3
 | 
			
		||||
	case 7:
 | 
			
		||||
		b.out = append(b.out,
 | 
			
		||||
			byte(b.bitContainer),
 | 
			
		||||
			byte(b.bitContainer>>8),
 | 
			
		||||
			byte(b.bitContainer>>16),
 | 
			
		||||
			byte(b.bitContainer>>24),
 | 
			
		||||
			byte(b.bitContainer>>32),
 | 
			
		||||
			byte(b.bitContainer>>40),
 | 
			
		||||
			byte(b.bitContainer>>48),
 | 
			
		||||
		)
 | 
			
		||||
		b.bitContainer >>= 7 << 3
 | 
			
		||||
	case 8:
 | 
			
		||||
		b.out = append(b.out,
 | 
			
		||||
			byte(b.bitContainer),
 | 
			
		||||
			byte(b.bitContainer>>8),
 | 
			
		||||
			byte(b.bitContainer>>16),
 | 
			
		||||
			byte(b.bitContainer>>24),
 | 
			
		||||
			byte(b.bitContainer>>32),
 | 
			
		||||
			byte(b.bitContainer>>40),
 | 
			
		||||
			byte(b.bitContainer>>48),
 | 
			
		||||
			byte(b.bitContainer>>56),
 | 
			
		||||
		)
 | 
			
		||||
		b.bitContainer = 0
 | 
			
		||||
		b.nBits = 0
 | 
			
		||||
		return
 | 
			
		||||
	default:
 | 
			
		||||
		panic(fmt.Errorf("bits (%d) > 64", b.nBits))
 | 
			
		||||
	}
 | 
			
		||||
	b.nBits &= 7
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// flush32 will flush out, so there are at least 32 bits available for writing.
 | 
			
		||||
func (b *bitWriter) flush32() {
 | 
			
		||||
	if b.nBits < 32 {
 | 
			
		||||
@@ -201,10 +93,3 @@ func (b *bitWriter) close() error {
 | 
			
		||||
	b.flushAlign()
 | 
			
		||||
	return nil
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// reset and continue writing by appending to out.
 | 
			
		||||
func (b *bitWriter) reset(out []byte) {
 | 
			
		||||
	b.bitContainer = 0
 | 
			
		||||
	b.nBits = 0
 | 
			
		||||
	b.out = out
 | 
			
		||||
}
 | 
			
		||||
 
 | 
			
		||||
							
								
								
									
										10
									
								
								vendor/github.com/klauspost/compress/huff0/bytereader.go
									
									
									
										generated
									
									
										vendored
									
									
								
							
							
						
						
									
										10
									
								
								vendor/github.com/klauspost/compress/huff0/bytereader.go
									
									
									
										generated
									
									
										vendored
									
									
								
							@@ -20,11 +20,6 @@ func (b *byteReader) init(in []byte) {
 | 
			
		||||
	b.off = 0
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// advance the stream b n bytes.
 | 
			
		||||
func (b *byteReader) advance(n uint) {
 | 
			
		||||
	b.off += int(n)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// Int32 returns a little endian int32 starting at current offset.
 | 
			
		||||
func (b byteReader) Int32() int32 {
 | 
			
		||||
	v3 := int32(b.b[b.off+3])
 | 
			
		||||
@@ -43,11 +38,6 @@ func (b byteReader) Uint32() uint32 {
 | 
			
		||||
	return (v3 << 24) | (v2 << 16) | (v1 << 8) | v0
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// unread returns the unread portion of the input.
 | 
			
		||||
func (b byteReader) unread() []byte {
 | 
			
		||||
	return b.b[b.off:]
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// remain will return the number of bytes remaining.
 | 
			
		||||
func (b byteReader) remain() int {
 | 
			
		||||
	return len(b.b) - b.off
 | 
			
		||||
 
 | 
			
		||||
							
								
								
									
										1
									
								
								vendor/github.com/klauspost/compress/huff0/compress.go
									
									
									
										generated
									
									
										vendored
									
									
								
							
							
						
						
									
										1
									
								
								vendor/github.com/klauspost/compress/huff0/compress.go
									
									
									
										generated
									
									
										vendored
									
									
								
							@@ -404,6 +404,7 @@ func (s *Scratch) canUseTable(c cTable) bool {
 | 
			
		||||
	return true
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
//lint:ignore U1000 used for debugging
 | 
			
		||||
func (s *Scratch) validateTable(c cTable) bool {
 | 
			
		||||
	if len(c) < int(s.symbolLen) {
 | 
			
		||||
		return false
 | 
			
		||||
 
 | 
			
		||||
							
								
								
									
										113
									
								
								vendor/github.com/klauspost/compress/huff0/decompress.go
									
									
									
										generated
									
									
										vendored
									
									
								
							
							
						
						
									
										113
									
								
								vendor/github.com/klauspost/compress/huff0/decompress.go
									
									
									
										generated
									
									
										vendored
									
									
								
							@@ -11,7 +11,6 @@ import (
 | 
			
		||||
 | 
			
		||||
type dTable struct {
 | 
			
		||||
	single []dEntrySingle
 | 
			
		||||
	double []dEntryDouble
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// single-symbols decoding
 | 
			
		||||
@@ -19,13 +18,6 @@ type dEntrySingle struct {
 | 
			
		||||
	entry uint16
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// double-symbols decoding
 | 
			
		||||
type dEntryDouble struct {
 | 
			
		||||
	seq   [4]byte
 | 
			
		||||
	nBits uint8
 | 
			
		||||
	len   uint8
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// Uses special code for all tables that are < 8 bits.
 | 
			
		||||
const use8BitTables = true
 | 
			
		||||
 | 
			
		||||
@@ -35,7 +27,7 @@ const use8BitTables = true
 | 
			
		||||
// If no Scratch is provided a new one is allocated.
 | 
			
		||||
// The returned Scratch can be used for encoding or decoding input using this table.
 | 
			
		||||
func ReadTable(in []byte, s *Scratch) (s2 *Scratch, remain []byte, err error) {
 | 
			
		||||
	s, err = s.prepare(in)
 | 
			
		||||
	s, err = s.prepare(nil)
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		return s, nil, err
 | 
			
		||||
	}
 | 
			
		||||
@@ -236,108 +228,6 @@ func (d *Decoder) buffer() *[4][256]byte {
 | 
			
		||||
	return &[4][256]byte{}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// Decompress1X will decompress a 1X encoded stream.
 | 
			
		||||
// The cap of the output buffer will be the maximum decompressed size.
 | 
			
		||||
// The length of the supplied input must match the end of a block exactly.
 | 
			
		||||
func (d *Decoder) Decompress1X(dst, src []byte) ([]byte, error) {
 | 
			
		||||
	if len(d.dt.single) == 0 {
 | 
			
		||||
		return nil, errors.New("no table loaded")
 | 
			
		||||
	}
 | 
			
		||||
	if use8BitTables && d.actualTableLog <= 8 {
 | 
			
		||||
		return d.decompress1X8Bit(dst, src)
 | 
			
		||||
	}
 | 
			
		||||
	var br bitReaderShifted
 | 
			
		||||
	err := br.init(src)
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		return dst, err
 | 
			
		||||
	}
 | 
			
		||||
	maxDecodedSize := cap(dst)
 | 
			
		||||
	dst = dst[:0]
 | 
			
		||||
 | 
			
		||||
	// Avoid bounds check by always having full sized table.
 | 
			
		||||
	const tlSize = 1 << tableLogMax
 | 
			
		||||
	const tlMask = tlSize - 1
 | 
			
		||||
	dt := d.dt.single[:tlSize]
 | 
			
		||||
 | 
			
		||||
	// Use temp table to avoid bound checks/append penalty.
 | 
			
		||||
	bufs := d.buffer()
 | 
			
		||||
	buf := &bufs[0]
 | 
			
		||||
	var off uint8
 | 
			
		||||
 | 
			
		||||
	for br.off >= 8 {
 | 
			
		||||
		br.fillFast()
 | 
			
		||||
		v := dt[br.peekBitsFast(d.actualTableLog)&tlMask]
 | 
			
		||||
		br.advance(uint8(v.entry))
 | 
			
		||||
		buf[off+0] = uint8(v.entry >> 8)
 | 
			
		||||
 | 
			
		||||
		v = dt[br.peekBitsFast(d.actualTableLog)&tlMask]
 | 
			
		||||
		br.advance(uint8(v.entry))
 | 
			
		||||
		buf[off+1] = uint8(v.entry >> 8)
 | 
			
		||||
 | 
			
		||||
		// Refill
 | 
			
		||||
		br.fillFast()
 | 
			
		||||
 | 
			
		||||
		v = dt[br.peekBitsFast(d.actualTableLog)&tlMask]
 | 
			
		||||
		br.advance(uint8(v.entry))
 | 
			
		||||
		buf[off+2] = uint8(v.entry >> 8)
 | 
			
		||||
 | 
			
		||||
		v = dt[br.peekBitsFast(d.actualTableLog)&tlMask]
 | 
			
		||||
		br.advance(uint8(v.entry))
 | 
			
		||||
		buf[off+3] = uint8(v.entry >> 8)
 | 
			
		||||
 | 
			
		||||
		off += 4
 | 
			
		||||
		if off == 0 {
 | 
			
		||||
			if len(dst)+256 > maxDecodedSize {
 | 
			
		||||
				br.close()
 | 
			
		||||
				d.bufs.Put(bufs)
 | 
			
		||||
				return nil, ErrMaxDecodedSizeExceeded
 | 
			
		||||
			}
 | 
			
		||||
			dst = append(dst, buf[:]...)
 | 
			
		||||
		}
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	if len(dst)+int(off) > maxDecodedSize {
 | 
			
		||||
		d.bufs.Put(bufs)
 | 
			
		||||
		br.close()
 | 
			
		||||
		return nil, ErrMaxDecodedSizeExceeded
 | 
			
		||||
	}
 | 
			
		||||
	dst = append(dst, buf[:off]...)
 | 
			
		||||
 | 
			
		||||
	// br < 8, so uint8 is fine
 | 
			
		||||
	bitsLeft := uint8(br.off)*8 + 64 - br.bitsRead
 | 
			
		||||
	for bitsLeft > 0 {
 | 
			
		||||
		br.fill()
 | 
			
		||||
		if false && br.bitsRead >= 32 {
 | 
			
		||||
			if br.off >= 4 {
 | 
			
		||||
				v := br.in[br.off-4:]
 | 
			
		||||
				v = v[:4]
 | 
			
		||||
				low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24)
 | 
			
		||||
				br.value = (br.value << 32) | uint64(low)
 | 
			
		||||
				br.bitsRead -= 32
 | 
			
		||||
				br.off -= 4
 | 
			
		||||
			} else {
 | 
			
		||||
				for br.off > 0 {
 | 
			
		||||
					br.value = (br.value << 8) | uint64(br.in[br.off-1])
 | 
			
		||||
					br.bitsRead -= 8
 | 
			
		||||
					br.off--
 | 
			
		||||
				}
 | 
			
		||||
			}
 | 
			
		||||
		}
 | 
			
		||||
		if len(dst) >= maxDecodedSize {
 | 
			
		||||
			d.bufs.Put(bufs)
 | 
			
		||||
			br.close()
 | 
			
		||||
			return nil, ErrMaxDecodedSizeExceeded
 | 
			
		||||
		}
 | 
			
		||||
		v := d.dt.single[br.peekBitsFast(d.actualTableLog)&tlMask]
 | 
			
		||||
		nBits := uint8(v.entry)
 | 
			
		||||
		br.advance(nBits)
 | 
			
		||||
		bitsLeft -= nBits
 | 
			
		||||
		dst = append(dst, uint8(v.entry>>8))
 | 
			
		||||
	}
 | 
			
		||||
	d.bufs.Put(bufs)
 | 
			
		||||
	return dst, br.close()
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// decompress1X8Bit will decompress a 1X encoded stream with tablelog <= 8.
 | 
			
		||||
// The cap of the output buffer will be the maximum decompressed size.
 | 
			
		||||
// The length of the supplied input must match the end of a block exactly.
 | 
			
		||||
@@ -995,7 +885,6 @@ func (d *Decoder) decompress4X8bitExactly(dst, src []byte) ([]byte, error) {
 | 
			
		||||
 | 
			
		||||
	const shift = 56
 | 
			
		||||
	const tlSize = 1 << 8
 | 
			
		||||
	const tlMask = tlSize - 1
 | 
			
		||||
	single := d.dt.single[:tlSize]
 | 
			
		||||
 | 
			
		||||
	// Use temp table to avoid bound checks/append penalty.
 | 
			
		||||
 
 | 
			
		||||
							
								
								
									
										488
									
								
								vendor/github.com/klauspost/compress/huff0/decompress_8b_amd64.s
									
									
									
										generated
									
									
										vendored
									
									
								
							
							
						
						
									
										488
									
								
								vendor/github.com/klauspost/compress/huff0/decompress_8b_amd64.s
									
									
									
										generated
									
									
										vendored
									
									
								
							@@ -1,488 +0,0 @@
 | 
			
		||||
// +build !appengine
 | 
			
		||||
// +build gc
 | 
			
		||||
// +build !noasm
 | 
			
		||||
 | 
			
		||||
#include "textflag.h"
 | 
			
		||||
#include "funcdata.h"
 | 
			
		||||
#include "go_asm.h"
 | 
			
		||||
 | 
			
		||||
#define bufoff      256 // see decompress.go, we're using [4][256]byte table
 | 
			
		||||
 | 
			
		||||
// func decompress4x_main_loop_x86(pbr0, pbr1, pbr2, pbr3 *bitReaderShifted,
 | 
			
		||||
//	peekBits uint8, buf *byte, tbl *dEntrySingle) (int, bool)
 | 
			
		||||
TEXT ·decompress4x_8b_loop_x86(SB), NOSPLIT, $8
 | 
			
		||||
#define off             R8
 | 
			
		||||
#define buffer          DI
 | 
			
		||||
#define table           SI
 | 
			
		||||
 | 
			
		||||
#define br_bits_read    R9
 | 
			
		||||
#define br_value        R10
 | 
			
		||||
#define br_offset       R11
 | 
			
		||||
#define peek_bits       R12
 | 
			
		||||
#define exhausted       DX
 | 
			
		||||
 | 
			
		||||
#define br0             R13
 | 
			
		||||
#define br1             R14
 | 
			
		||||
#define br2             R15
 | 
			
		||||
#define br3             BP
 | 
			
		||||
 | 
			
		||||
	MOVQ BP, 0(SP)
 | 
			
		||||
 | 
			
		||||
	XORQ exhausted, exhausted // exhausted = false
 | 
			
		||||
	XORQ off, off             // off = 0
 | 
			
		||||
 | 
			
		||||
	MOVBQZX peekBits+32(FP), peek_bits
 | 
			
		||||
	MOVQ    buf+40(FP), buffer
 | 
			
		||||
	MOVQ    tbl+48(FP), table
 | 
			
		||||
 | 
			
		||||
	MOVQ pbr0+0(FP), br0
 | 
			
		||||
	MOVQ pbr1+8(FP), br1
 | 
			
		||||
	MOVQ pbr2+16(FP), br2
 | 
			
		||||
	MOVQ pbr3+24(FP), br3
 | 
			
		||||
 | 
			
		||||
main_loop:
 | 
			
		||||
 | 
			
		||||
	// const stream = 0
 | 
			
		||||
	// br0.fillFast()
 | 
			
		||||
	MOVBQZX bitReaderShifted_bitsRead(br0), br_bits_read
 | 
			
		||||
	MOVQ    bitReaderShifted_value(br0), br_value
 | 
			
		||||
	MOVQ    bitReaderShifted_off(br0), br_offset
 | 
			
		||||
 | 
			
		||||
	// if b.bitsRead >= 32 {
 | 
			
		||||
	CMPQ br_bits_read, $32
 | 
			
		||||
	JB   skip_fill0
 | 
			
		||||
 | 
			
		||||
	SUBQ $32, br_bits_read // b.bitsRead -= 32
 | 
			
		||||
	SUBQ $4, br_offset     // b.off -= 4
 | 
			
		||||
 | 
			
		||||
	// v := b.in[b.off-4 : b.off]
 | 
			
		||||
	// v = v[:4]
 | 
			
		||||
	// low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24)
 | 
			
		||||
	MOVQ bitReaderShifted_in(br0), AX
 | 
			
		||||
	MOVL 0(br_offset)(AX*1), AX       // AX = uint32(b.in[b.off:b.off+4])
 | 
			
		||||
 | 
			
		||||
	// b.value |= uint64(low) << (b.bitsRead & 63)
 | 
			
		||||
	MOVQ br_bits_read, CX
 | 
			
		||||
	SHLQ CL, AX
 | 
			
		||||
	ORQ  AX, br_value
 | 
			
		||||
 | 
			
		||||
	// exhausted = exhausted || (br0.off < 4)
 | 
			
		||||
	CMPQ  br_offset, $4
 | 
			
		||||
	SETLT DL
 | 
			
		||||
	ORB   DL, DH
 | 
			
		||||
 | 
			
		||||
	// }
 | 
			
		||||
skip_fill0:
 | 
			
		||||
 | 
			
		||||
	// val0 := br0.peekTopBits(peekBits)
 | 
			
		||||
	MOVQ br_value, AX
 | 
			
		||||
	MOVQ peek_bits, CX
 | 
			
		||||
	SHRQ CL, AX        // AX = (value >> peek_bits) & mask
 | 
			
		||||
 | 
			
		||||
	// v0 := table[val0&mask]
 | 
			
		||||
	MOVW 0(table)(AX*2), AX // AX - v0
 | 
			
		||||
 | 
			
		||||
	// br0.advance(uint8(v0.entry))
 | 
			
		||||
	MOVB    AH, BL           // BL = uint8(v0.entry >> 8)
 | 
			
		||||
	MOVBQZX AL, CX
 | 
			
		||||
	SHLQ    CL, br_value     // value <<= n
 | 
			
		||||
	ADDQ    CX, br_bits_read // bits_read += n
 | 
			
		||||
 | 
			
		||||
	// val1 := br0.peekTopBits(peekBits)
 | 
			
		||||
	MOVQ peek_bits, CX
 | 
			
		||||
	MOVQ br_value, AX
 | 
			
		||||
	SHRQ CL, AX        // AX = (value >> peek_bits) & mask
 | 
			
		||||
 | 
			
		||||
	// v1 := table[val1&mask]
 | 
			
		||||
	MOVW 0(table)(AX*2), AX // AX - v1
 | 
			
		||||
 | 
			
		||||
	// br0.advance(uint8(v1.entry))
 | 
			
		||||
	MOVB    AH, BH           // BH = uint8(v1.entry >> 8)
 | 
			
		||||
	MOVBQZX AL, CX
 | 
			
		||||
	SHLQ    CX, br_value     // value <<= n
 | 
			
		||||
	ADDQ    CX, br_bits_read // bits_read += n
 | 
			
		||||
 | 
			
		||||
	// these two writes get coalesced
 | 
			
		||||
	// buf[stream][off] = uint8(v0.entry >> 8)
 | 
			
		||||
	// buf[stream][off+1] = uint8(v1.entry >> 8)
 | 
			
		||||
	MOVW BX, 0(buffer)(off*1)
 | 
			
		||||
 | 
			
		||||
	// SECOND PART:
 | 
			
		||||
	// val2 := br0.peekTopBits(peekBits)
 | 
			
		||||
	MOVQ br_value, AX
 | 
			
		||||
	MOVQ peek_bits, CX
 | 
			
		||||
	SHRQ CL, AX        // AX = (value >> peek_bits) & mask
 | 
			
		||||
 | 
			
		||||
	// v2 := table[val0&mask]
 | 
			
		||||
	MOVW 0(table)(AX*2), AX // AX - v0
 | 
			
		||||
 | 
			
		||||
	// br0.advance(uint8(v0.entry))
 | 
			
		||||
	MOVB    AH, BL           // BL = uint8(v0.entry >> 8)
 | 
			
		||||
	MOVBQZX AL, CX
 | 
			
		||||
	SHLQ    CL, br_value     // value <<= n
 | 
			
		||||
	ADDQ    CX, br_bits_read // bits_read += n
 | 
			
		||||
 | 
			
		||||
	// val3 := br0.peekTopBits(peekBits)
 | 
			
		||||
	MOVQ peek_bits, CX
 | 
			
		||||
	MOVQ br_value, AX
 | 
			
		||||
	SHRQ CL, AX        // AX = (value >> peek_bits) & mask
 | 
			
		||||
 | 
			
		||||
	// v3 := table[val1&mask]
 | 
			
		||||
	MOVW 0(table)(AX*2), AX // AX - v1
 | 
			
		||||
 | 
			
		||||
	// br0.advance(uint8(v1.entry))
 | 
			
		||||
	MOVB    AH, BH           // BH = uint8(v1.entry >> 8)
 | 
			
		||||
	MOVBQZX AL, CX
 | 
			
		||||
	SHLQ    CX, br_value     // value <<= n
 | 
			
		||||
	ADDQ    CX, br_bits_read // bits_read += n
 | 
			
		||||
 | 
			
		||||
	// these two writes get coalesced
 | 
			
		||||
	// buf[stream][off+2] = uint8(v2.entry >> 8)
 | 
			
		||||
	// buf[stream][off+3] = uint8(v3.entry >> 8)
 | 
			
		||||
	MOVW BX, 0+2(buffer)(off*1)
 | 
			
		||||
 | 
			
		||||
	// update the bitrader reader structure
 | 
			
		||||
	MOVB br_bits_read, bitReaderShifted_bitsRead(br0)
 | 
			
		||||
	MOVQ br_value, bitReaderShifted_value(br0)
 | 
			
		||||
	MOVQ br_offset, bitReaderShifted_off(br0)
 | 
			
		||||
 | 
			
		||||
	// const stream = 1
 | 
			
		||||
	// br1.fillFast()
 | 
			
		||||
	MOVBQZX bitReaderShifted_bitsRead(br1), br_bits_read
 | 
			
		||||
	MOVQ    bitReaderShifted_value(br1), br_value
 | 
			
		||||
	MOVQ    bitReaderShifted_off(br1), br_offset
 | 
			
		||||
 | 
			
		||||
	// if b.bitsRead >= 32 {
 | 
			
		||||
	CMPQ br_bits_read, $32
 | 
			
		||||
	JB   skip_fill1
 | 
			
		||||
 | 
			
		||||
	SUBQ $32, br_bits_read // b.bitsRead -= 32
 | 
			
		||||
	SUBQ $4, br_offset     // b.off -= 4
 | 
			
		||||
 | 
			
		||||
	// v := b.in[b.off-4 : b.off]
 | 
			
		||||
	// v = v[:4]
 | 
			
		||||
	// low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24)
 | 
			
		||||
	MOVQ bitReaderShifted_in(br1), AX
 | 
			
		||||
	MOVL 0(br_offset)(AX*1), AX       // AX = uint32(b.in[b.off:b.off+4])
 | 
			
		||||
 | 
			
		||||
	// b.value |= uint64(low) << (b.bitsRead & 63)
 | 
			
		||||
	MOVQ br_bits_read, CX
 | 
			
		||||
	SHLQ CL, AX
 | 
			
		||||
	ORQ  AX, br_value
 | 
			
		||||
 | 
			
		||||
	// exhausted = exhausted || (br1.off < 4)
 | 
			
		||||
	CMPQ  br_offset, $4
 | 
			
		||||
	SETLT DL
 | 
			
		||||
	ORB   DL, DH
 | 
			
		||||
 | 
			
		||||
	// }
 | 
			
		||||
skip_fill1:
 | 
			
		||||
 | 
			
		||||
	// val0 := br1.peekTopBits(peekBits)
 | 
			
		||||
	MOVQ br_value, AX
 | 
			
		||||
	MOVQ peek_bits, CX
 | 
			
		||||
	SHRQ CL, AX        // AX = (value >> peek_bits) & mask
 | 
			
		||||
 | 
			
		||||
	// v0 := table[val0&mask]
 | 
			
		||||
	MOVW 0(table)(AX*2), AX // AX - v0
 | 
			
		||||
 | 
			
		||||
	// br1.advance(uint8(v0.entry))
 | 
			
		||||
	MOVB    AH, BL           // BL = uint8(v0.entry >> 8)
 | 
			
		||||
	MOVBQZX AL, CX
 | 
			
		||||
	SHLQ    CL, br_value     // value <<= n
 | 
			
		||||
	ADDQ    CX, br_bits_read // bits_read += n
 | 
			
		||||
 | 
			
		||||
	// val1 := br1.peekTopBits(peekBits)
 | 
			
		||||
	MOVQ peek_bits, CX
 | 
			
		||||
	MOVQ br_value, AX
 | 
			
		||||
	SHRQ CL, AX        // AX = (value >> peek_bits) & mask
 | 
			
		||||
 | 
			
		||||
	// v1 := table[val1&mask]
 | 
			
		||||
	MOVW 0(table)(AX*2), AX // AX - v1
 | 
			
		||||
 | 
			
		||||
	// br1.advance(uint8(v1.entry))
 | 
			
		||||
	MOVB    AH, BH           // BH = uint8(v1.entry >> 8)
 | 
			
		||||
	MOVBQZX AL, CX
 | 
			
		||||
	SHLQ    CX, br_value     // value <<= n
 | 
			
		||||
	ADDQ    CX, br_bits_read // bits_read += n
 | 
			
		||||
 | 
			
		||||
	// these two writes get coalesced
 | 
			
		||||
	// buf[stream][off] = uint8(v0.entry >> 8)
 | 
			
		||||
	// buf[stream][off+1] = uint8(v1.entry >> 8)
 | 
			
		||||
	MOVW BX, 256(buffer)(off*1)
 | 
			
		||||
 | 
			
		||||
	// SECOND PART:
 | 
			
		||||
	// val2 := br1.peekTopBits(peekBits)
 | 
			
		||||
	MOVQ br_value, AX
 | 
			
		||||
	MOVQ peek_bits, CX
 | 
			
		||||
	SHRQ CL, AX        // AX = (value >> peek_bits) & mask
 | 
			
		||||
 | 
			
		||||
	// v2 := table[val0&mask]
 | 
			
		||||
	MOVW 0(table)(AX*2), AX // AX - v0
 | 
			
		||||
 | 
			
		||||
	// br1.advance(uint8(v0.entry))
 | 
			
		||||
	MOVB    AH, BL           // BL = uint8(v0.entry >> 8)
 | 
			
		||||
	MOVBQZX AL, CX
 | 
			
		||||
	SHLQ    CL, br_value     // value <<= n
 | 
			
		||||
	ADDQ    CX, br_bits_read // bits_read += n
 | 
			
		||||
 | 
			
		||||
	// val3 := br1.peekTopBits(peekBits)
 | 
			
		||||
	MOVQ peek_bits, CX
 | 
			
		||||
	MOVQ br_value, AX
 | 
			
		||||
	SHRQ CL, AX        // AX = (value >> peek_bits) & mask
 | 
			
		||||
 | 
			
		||||
	// v3 := table[val1&mask]
 | 
			
		||||
	MOVW 0(table)(AX*2), AX // AX - v1
 | 
			
		||||
 | 
			
		||||
	// br1.advance(uint8(v1.entry))
 | 
			
		||||
	MOVB    AH, BH           // BH = uint8(v1.entry >> 8)
 | 
			
		||||
	MOVBQZX AL, CX
 | 
			
		||||
	SHLQ    CX, br_value     // value <<= n
 | 
			
		||||
	ADDQ    CX, br_bits_read // bits_read += n
 | 
			
		||||
 | 
			
		||||
	// these two writes get coalesced
 | 
			
		||||
	// buf[stream][off+2] = uint8(v2.entry >> 8)
 | 
			
		||||
	// buf[stream][off+3] = uint8(v3.entry >> 8)
 | 
			
		||||
	MOVW BX, 256+2(buffer)(off*1)
 | 
			
		||||
 | 
			
		||||
	// update the bitrader reader structure
 | 
			
		||||
	MOVB br_bits_read, bitReaderShifted_bitsRead(br1)
 | 
			
		||||
	MOVQ br_value, bitReaderShifted_value(br1)
 | 
			
		||||
	MOVQ br_offset, bitReaderShifted_off(br1)
 | 
			
		||||
 | 
			
		||||
	// const stream = 2
 | 
			
		||||
	// br2.fillFast()
 | 
			
		||||
	MOVBQZX bitReaderShifted_bitsRead(br2), br_bits_read
 | 
			
		||||
	MOVQ    bitReaderShifted_value(br2), br_value
 | 
			
		||||
	MOVQ    bitReaderShifted_off(br2), br_offset
 | 
			
		||||
 | 
			
		||||
	// if b.bitsRead >= 32 {
 | 
			
		||||
	CMPQ br_bits_read, $32
 | 
			
		||||
	JB   skip_fill2
 | 
			
		||||
 | 
			
		||||
	SUBQ $32, br_bits_read // b.bitsRead -= 32
 | 
			
		||||
	SUBQ $4, br_offset     // b.off -= 4
 | 
			
		||||
 | 
			
		||||
	// v := b.in[b.off-4 : b.off]
 | 
			
		||||
	// v = v[:4]
 | 
			
		||||
	// low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24)
 | 
			
		||||
	MOVQ bitReaderShifted_in(br2), AX
 | 
			
		||||
	MOVL 0(br_offset)(AX*1), AX       // AX = uint32(b.in[b.off:b.off+4])
 | 
			
		||||
 | 
			
		||||
	// b.value |= uint64(low) << (b.bitsRead & 63)
 | 
			
		||||
	MOVQ br_bits_read, CX
 | 
			
		||||
	SHLQ CL, AX
 | 
			
		||||
	ORQ  AX, br_value
 | 
			
		||||
 | 
			
		||||
	// exhausted = exhausted || (br2.off < 4)
 | 
			
		||||
	CMPQ  br_offset, $4
 | 
			
		||||
	SETLT DL
 | 
			
		||||
	ORB   DL, DH
 | 
			
		||||
 | 
			
		||||
	// }
 | 
			
		||||
skip_fill2:
 | 
			
		||||
 | 
			
		||||
	// val0 := br2.peekTopBits(peekBits)
 | 
			
		||||
	MOVQ br_value, AX
 | 
			
		||||
	MOVQ peek_bits, CX
 | 
			
		||||
	SHRQ CL, AX        // AX = (value >> peek_bits) & mask
 | 
			
		||||
 | 
			
		||||
	// v0 := table[val0&mask]
 | 
			
		||||
	MOVW 0(table)(AX*2), AX // AX - v0
 | 
			
		||||
 | 
			
		||||
	// br2.advance(uint8(v0.entry))
 | 
			
		||||
	MOVB    AH, BL           // BL = uint8(v0.entry >> 8)
 | 
			
		||||
	MOVBQZX AL, CX
 | 
			
		||||
	SHLQ    CL, br_value     // value <<= n
 | 
			
		||||
	ADDQ    CX, br_bits_read // bits_read += n
 | 
			
		||||
 | 
			
		||||
	// val1 := br2.peekTopBits(peekBits)
 | 
			
		||||
	MOVQ peek_bits, CX
 | 
			
		||||
	MOVQ br_value, AX
 | 
			
		||||
	SHRQ CL, AX        // AX = (value >> peek_bits) & mask
 | 
			
		||||
 | 
			
		||||
	// v1 := table[val1&mask]
 | 
			
		||||
	MOVW 0(table)(AX*2), AX // AX - v1
 | 
			
		||||
 | 
			
		||||
	// br2.advance(uint8(v1.entry))
 | 
			
		||||
	MOVB    AH, BH           // BH = uint8(v1.entry >> 8)
 | 
			
		||||
	MOVBQZX AL, CX
 | 
			
		||||
	SHLQ    CX, br_value     // value <<= n
 | 
			
		||||
	ADDQ    CX, br_bits_read // bits_read += n
 | 
			
		||||
 | 
			
		||||
	// these two writes get coalesced
 | 
			
		||||
	// buf[stream][off] = uint8(v0.entry >> 8)
 | 
			
		||||
	// buf[stream][off+1] = uint8(v1.entry >> 8)
 | 
			
		||||
	MOVW BX, 512(buffer)(off*1)
 | 
			
		||||
 | 
			
		||||
	// SECOND PART:
 | 
			
		||||
	// val2 := br2.peekTopBits(peekBits)
 | 
			
		||||
	MOVQ br_value, AX
 | 
			
		||||
	MOVQ peek_bits, CX
 | 
			
		||||
	SHRQ CL, AX        // AX = (value >> peek_bits) & mask
 | 
			
		||||
 | 
			
		||||
	// v2 := table[val0&mask]
 | 
			
		||||
	MOVW 0(table)(AX*2), AX // AX - v0
 | 
			
		||||
 | 
			
		||||
	// br2.advance(uint8(v0.entry))
 | 
			
		||||
	MOVB    AH, BL           // BL = uint8(v0.entry >> 8)
 | 
			
		||||
	MOVBQZX AL, CX
 | 
			
		||||
	SHLQ    CL, br_value     // value <<= n
 | 
			
		||||
	ADDQ    CX, br_bits_read // bits_read += n
 | 
			
		||||
 | 
			
		||||
	// val3 := br2.peekTopBits(peekBits)
 | 
			
		||||
	MOVQ peek_bits, CX
 | 
			
		||||
	MOVQ br_value, AX
 | 
			
		||||
	SHRQ CL, AX        // AX = (value >> peek_bits) & mask
 | 
			
		||||
 | 
			
		||||
	// v3 := table[val1&mask]
 | 
			
		||||
	MOVW 0(table)(AX*2), AX // AX - v1
 | 
			
		||||
 | 
			
		||||
	// br2.advance(uint8(v1.entry))
 | 
			
		||||
	MOVB    AH, BH           // BH = uint8(v1.entry >> 8)
 | 
			
		||||
	MOVBQZX AL, CX
 | 
			
		||||
	SHLQ    CX, br_value     // value <<= n
 | 
			
		||||
	ADDQ    CX, br_bits_read // bits_read += n
 | 
			
		||||
 | 
			
		||||
	// these two writes get coalesced
 | 
			
		||||
	// buf[stream][off+2] = uint8(v2.entry >> 8)
 | 
			
		||||
	// buf[stream][off+3] = uint8(v3.entry >> 8)
 | 
			
		||||
	MOVW BX, 512+2(buffer)(off*1)
 | 
			
		||||
 | 
			
		||||
	// update the bitrader reader structure
 | 
			
		||||
	MOVB br_bits_read, bitReaderShifted_bitsRead(br2)
 | 
			
		||||
	MOVQ br_value, bitReaderShifted_value(br2)
 | 
			
		||||
	MOVQ br_offset, bitReaderShifted_off(br2)
 | 
			
		||||
 | 
			
		||||
	// const stream = 3
 | 
			
		||||
	// br3.fillFast()
 | 
			
		||||
	MOVBQZX bitReaderShifted_bitsRead(br3), br_bits_read
 | 
			
		||||
	MOVQ    bitReaderShifted_value(br3), br_value
 | 
			
		||||
	MOVQ    bitReaderShifted_off(br3), br_offset
 | 
			
		||||
 | 
			
		||||
	// if b.bitsRead >= 32 {
 | 
			
		||||
	CMPQ br_bits_read, $32
 | 
			
		||||
	JB   skip_fill3
 | 
			
		||||
 | 
			
		||||
	SUBQ $32, br_bits_read // b.bitsRead -= 32
 | 
			
		||||
	SUBQ $4, br_offset     // b.off -= 4
 | 
			
		||||
 | 
			
		||||
	// v := b.in[b.off-4 : b.off]
 | 
			
		||||
	// v = v[:4]
 | 
			
		||||
	// low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24)
 | 
			
		||||
	MOVQ bitReaderShifted_in(br3), AX
 | 
			
		||||
	MOVL 0(br_offset)(AX*1), AX       // AX = uint32(b.in[b.off:b.off+4])
 | 
			
		||||
 | 
			
		||||
	// b.value |= uint64(low) << (b.bitsRead & 63)
 | 
			
		||||
	MOVQ br_bits_read, CX
 | 
			
		||||
	SHLQ CL, AX
 | 
			
		||||
	ORQ  AX, br_value
 | 
			
		||||
 | 
			
		||||
	// exhausted = exhausted || (br3.off < 4)
 | 
			
		||||
	CMPQ  br_offset, $4
 | 
			
		||||
	SETLT DL
 | 
			
		||||
	ORB   DL, DH
 | 
			
		||||
 | 
			
		||||
	// }
 | 
			
		||||
skip_fill3:
 | 
			
		||||
 | 
			
		||||
	// val0 := br3.peekTopBits(peekBits)
 | 
			
		||||
	MOVQ br_value, AX
 | 
			
		||||
	MOVQ peek_bits, CX
 | 
			
		||||
	SHRQ CL, AX        // AX = (value >> peek_bits) & mask
 | 
			
		||||
 | 
			
		||||
	// v0 := table[val0&mask]
 | 
			
		||||
	MOVW 0(table)(AX*2), AX // AX - v0
 | 
			
		||||
 | 
			
		||||
	// br3.advance(uint8(v0.entry))
 | 
			
		||||
	MOVB    AH, BL           // BL = uint8(v0.entry >> 8)
 | 
			
		||||
	MOVBQZX AL, CX
 | 
			
		||||
	SHLQ    CL, br_value     // value <<= n
 | 
			
		||||
	ADDQ    CX, br_bits_read // bits_read += n
 | 
			
		||||
 | 
			
		||||
	// val1 := br3.peekTopBits(peekBits)
 | 
			
		||||
	MOVQ peek_bits, CX
 | 
			
		||||
	MOVQ br_value, AX
 | 
			
		||||
	SHRQ CL, AX        // AX = (value >> peek_bits) & mask
 | 
			
		||||
 | 
			
		||||
	// v1 := table[val1&mask]
 | 
			
		||||
	MOVW 0(table)(AX*2), AX // AX - v1
 | 
			
		||||
 | 
			
		||||
	// br3.advance(uint8(v1.entry))
 | 
			
		||||
	MOVB    AH, BH           // BH = uint8(v1.entry >> 8)
 | 
			
		||||
	MOVBQZX AL, CX
 | 
			
		||||
	SHLQ    CX, br_value     // value <<= n
 | 
			
		||||
	ADDQ    CX, br_bits_read // bits_read += n
 | 
			
		||||
 | 
			
		||||
	// these two writes get coalesced
 | 
			
		||||
	// buf[stream][off] = uint8(v0.entry >> 8)
 | 
			
		||||
	// buf[stream][off+1] = uint8(v1.entry >> 8)
 | 
			
		||||
	MOVW BX, 768(buffer)(off*1)
 | 
			
		||||
 | 
			
		||||
	// SECOND PART:
 | 
			
		||||
	// val2 := br3.peekTopBits(peekBits)
 | 
			
		||||
	MOVQ br_value, AX
 | 
			
		||||
	MOVQ peek_bits, CX
 | 
			
		||||
	SHRQ CL, AX        // AX = (value >> peek_bits) & mask
 | 
			
		||||
 | 
			
		||||
	// v2 := table[val0&mask]
 | 
			
		||||
	MOVW 0(table)(AX*2), AX // AX - v0
 | 
			
		||||
 | 
			
		||||
	// br3.advance(uint8(v0.entry))
 | 
			
		||||
	MOVB    AH, BL           // BL = uint8(v0.entry >> 8)
 | 
			
		||||
	MOVBQZX AL, CX
 | 
			
		||||
	SHLQ    CL, br_value     // value <<= n
 | 
			
		||||
	ADDQ    CX, br_bits_read // bits_read += n
 | 
			
		||||
 | 
			
		||||
	// val3 := br3.peekTopBits(peekBits)
 | 
			
		||||
	MOVQ peek_bits, CX
 | 
			
		||||
	MOVQ br_value, AX
 | 
			
		||||
	SHRQ CL, AX        // AX = (value >> peek_bits) & mask
 | 
			
		||||
 | 
			
		||||
	// v3 := table[val1&mask]
 | 
			
		||||
	MOVW 0(table)(AX*2), AX // AX - v1
 | 
			
		||||
 | 
			
		||||
	// br3.advance(uint8(v1.entry))
 | 
			
		||||
	MOVB    AH, BH           // BH = uint8(v1.entry >> 8)
 | 
			
		||||
	MOVBQZX AL, CX
 | 
			
		||||
	SHLQ    CX, br_value     // value <<= n
 | 
			
		||||
	ADDQ    CX, br_bits_read // bits_read += n
 | 
			
		||||
 | 
			
		||||
	// these two writes get coalesced
 | 
			
		||||
	// buf[stream][off+2] = uint8(v2.entry >> 8)
 | 
			
		||||
	// buf[stream][off+3] = uint8(v3.entry >> 8)
 | 
			
		||||
	MOVW BX, 768+2(buffer)(off*1)
 | 
			
		||||
 | 
			
		||||
	// update the bitrader reader structure
 | 
			
		||||
	MOVB br_bits_read, bitReaderShifted_bitsRead(br3)
 | 
			
		||||
	MOVQ br_value, bitReaderShifted_value(br3)
 | 
			
		||||
	MOVQ br_offset, bitReaderShifted_off(br3)
 | 
			
		||||
 | 
			
		||||
	ADDQ $4, off // off += 2
 | 
			
		||||
 | 
			
		||||
	TESTB DH, DH // any br[i].ofs < 4?
 | 
			
		||||
	JNZ   end
 | 
			
		||||
 | 
			
		||||
	CMPQ off, $bufoff
 | 
			
		||||
	JL   main_loop
 | 
			
		||||
 | 
			
		||||
end:
 | 
			
		||||
	MOVQ 0(SP), BP
 | 
			
		||||
 | 
			
		||||
	MOVB off, ret+56(FP)
 | 
			
		||||
	RET
 | 
			
		||||
 | 
			
		||||
#undef off
 | 
			
		||||
#undef buffer
 | 
			
		||||
#undef table
 | 
			
		||||
 | 
			
		||||
#undef br_bits_read
 | 
			
		||||
#undef br_value
 | 
			
		||||
#undef br_offset
 | 
			
		||||
#undef peek_bits
 | 
			
		||||
#undef exhausted
 | 
			
		||||
 | 
			
		||||
#undef br0
 | 
			
		||||
#undef br1
 | 
			
		||||
#undef br2
 | 
			
		||||
#undef br3
 | 
			
		||||
							
								
								
									
										197
									
								
								vendor/github.com/klauspost/compress/huff0/decompress_8b_amd64.s.in
									
									
									
										generated
									
									
										vendored
									
									
								
							
							
						
						
									
										197
									
								
								vendor/github.com/klauspost/compress/huff0/decompress_8b_amd64.s.in
									
									
									
										generated
									
									
										vendored
									
									
								
							@@ -1,197 +0,0 @@
 | 
			
		||||
// +build !appengine
 | 
			
		||||
// +build gc
 | 
			
		||||
// +build !noasm
 | 
			
		||||
 | 
			
		||||
#include "textflag.h"
 | 
			
		||||
#include "funcdata.h"
 | 
			
		||||
#include "go_asm.h"
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
#define bufoff      256     // see decompress.go, we're using [4][256]byte table
 | 
			
		||||
 | 
			
		||||
//func decompress4x_main_loop_x86(pbr0, pbr1, pbr2, pbr3 *bitReaderShifted,
 | 
			
		||||
//	peekBits uint8, buf *byte, tbl *dEntrySingle) (int, bool)
 | 
			
		||||
TEXT ·decompress4x_8b_loop_x86(SB), NOSPLIT, $8
 | 
			
		||||
#define off             R8
 | 
			
		||||
#define buffer          DI
 | 
			
		||||
#define table           SI
 | 
			
		||||
 | 
			
		||||
#define br_bits_read    R9
 | 
			
		||||
#define br_value        R10
 | 
			
		||||
#define br_offset       R11
 | 
			
		||||
#define peek_bits       R12
 | 
			
		||||
#define exhausted       DX
 | 
			
		||||
 | 
			
		||||
#define br0             R13
 | 
			
		||||
#define br1             R14
 | 
			
		||||
#define br2             R15
 | 
			
		||||
#define br3             BP
 | 
			
		||||
 | 
			
		||||
    MOVQ    BP, 0(SP)
 | 
			
		||||
 | 
			
		||||
    XORQ    exhausted, exhausted    // exhausted = false
 | 
			
		||||
    XORQ    off, off                // off = 0
 | 
			
		||||
 | 
			
		||||
    MOVBQZX peekBits+32(FP), peek_bits
 | 
			
		||||
    MOVQ    buf+40(FP), buffer
 | 
			
		||||
    MOVQ    tbl+48(FP), table
 | 
			
		||||
 | 
			
		||||
    MOVQ    pbr0+0(FP), br0
 | 
			
		||||
    MOVQ    pbr1+8(FP), br1
 | 
			
		||||
    MOVQ    pbr2+16(FP), br2
 | 
			
		||||
    MOVQ    pbr3+24(FP), br3
 | 
			
		||||
 | 
			
		||||
main_loop:
 | 
			
		||||
{{ define "decode_2_values_x86" }}
 | 
			
		||||
    // const stream = {{ var "id" }}
 | 
			
		||||
    // br{{ var "id"}}.fillFast()
 | 
			
		||||
    MOVBQZX bitReaderShifted_bitsRead(br{{ var "id" }}), br_bits_read
 | 
			
		||||
    MOVQ    bitReaderShifted_value(br{{ var "id" }}), br_value
 | 
			
		||||
    MOVQ    bitReaderShifted_off(br{{ var "id" }}), br_offset
 | 
			
		||||
 | 
			
		||||
	// if b.bitsRead >= 32 {
 | 
			
		||||
    CMPQ    br_bits_read, $32
 | 
			
		||||
    JB      skip_fill{{ var "id" }}
 | 
			
		||||
 | 
			
		||||
    SUBQ    $32, br_bits_read       // b.bitsRead -= 32
 | 
			
		||||
    SUBQ    $4, br_offset           // b.off -= 4
 | 
			
		||||
 | 
			
		||||
	// v := b.in[b.off-4 : b.off]
 | 
			
		||||
	// v = v[:4]
 | 
			
		||||
	// low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24)
 | 
			
		||||
    MOVQ    bitReaderShifted_in(br{{ var "id" }}), AX
 | 
			
		||||
    MOVL    0(br_offset)(AX*1), AX  // AX = uint32(b.in[b.off:b.off+4])
 | 
			
		||||
 | 
			
		||||
	// b.value |= uint64(low) << (b.bitsRead & 63)
 | 
			
		||||
    MOVQ    br_bits_read, CX
 | 
			
		||||
    SHLQ    CL, AX
 | 
			
		||||
    ORQ     AX, br_value
 | 
			
		||||
 | 
			
		||||
    // exhausted = exhausted || (br{{ var "id"}}.off < 4)
 | 
			
		||||
    CMPQ    br_offset, $4
 | 
			
		||||
    SETLT   DL
 | 
			
		||||
    ORB     DL, DH
 | 
			
		||||
    // }
 | 
			
		||||
skip_fill{{ var "id" }}:
 | 
			
		||||
 | 
			
		||||
    // val0 := br{{ var "id"}}.peekTopBits(peekBits)
 | 
			
		||||
    MOVQ    br_value, AX
 | 
			
		||||
    MOVQ    peek_bits, CX
 | 
			
		||||
    SHRQ    CL, AX                  // AX = (value >> peek_bits) & mask
 | 
			
		||||
 | 
			
		||||
    // v0 := table[val0&mask]
 | 
			
		||||
    MOVW    0(table)(AX*2), AX      // AX - v0
 | 
			
		||||
 | 
			
		||||
    // br{{ var "id"}}.advance(uint8(v0.entry))
 | 
			
		||||
    MOVB    AH, BL                  // BL = uint8(v0.entry >> 8)
 | 
			
		||||
    MOVBQZX AL, CX
 | 
			
		||||
    SHLQ    CL, br_value            // value <<= n
 | 
			
		||||
    ADDQ    CX, br_bits_read        // bits_read += n
 | 
			
		||||
 | 
			
		||||
    // val1 := br{{ var "id"}}.peekTopBits(peekBits)
 | 
			
		||||
    MOVQ    peek_bits, CX
 | 
			
		||||
    MOVQ    br_value, AX
 | 
			
		||||
    SHRQ    CL, AX                  // AX = (value >> peek_bits) & mask
 | 
			
		||||
 | 
			
		||||
    // v1 := table[val1&mask]
 | 
			
		||||
    MOVW    0(table)(AX*2), AX      // AX - v1
 | 
			
		||||
 | 
			
		||||
    // br{{ var "id"}}.advance(uint8(v1.entry))
 | 
			
		||||
    MOVB    AH, BH                  // BH = uint8(v1.entry >> 8)
 | 
			
		||||
    MOVBQZX AL, CX
 | 
			
		||||
    SHLQ    CX, br_value            // value <<= n
 | 
			
		||||
    ADDQ    CX, br_bits_read        // bits_read += n
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
    // these two writes get coalesced
 | 
			
		||||
    // buf[stream][off] = uint8(v0.entry >> 8)
 | 
			
		||||
    // buf[stream][off+1] = uint8(v1.entry >> 8)
 | 
			
		||||
    MOVW    BX, {{ var "bufofs" }}(buffer)(off*1)
 | 
			
		||||
 | 
			
		||||
    // SECOND PART:
 | 
			
		||||
    // val2 := br{{ var "id"}}.peekTopBits(peekBits)
 | 
			
		||||
    MOVQ    br_value, AX
 | 
			
		||||
    MOVQ    peek_bits, CX
 | 
			
		||||
    SHRQ    CL, AX                  // AX = (value >> peek_bits) & mask
 | 
			
		||||
 | 
			
		||||
    // v2 := table[val0&mask]
 | 
			
		||||
    MOVW    0(table)(AX*2), AX      // AX - v0
 | 
			
		||||
 | 
			
		||||
    // br{{ var "id"}}.advance(uint8(v0.entry))
 | 
			
		||||
    MOVB    AH, BL                  // BL = uint8(v0.entry >> 8)
 | 
			
		||||
    MOVBQZX AL, CX
 | 
			
		||||
    SHLQ    CL, br_value            // value <<= n
 | 
			
		||||
    ADDQ    CX, br_bits_read        // bits_read += n
 | 
			
		||||
 | 
			
		||||
    // val3 := br{{ var "id"}}.peekTopBits(peekBits)
 | 
			
		||||
    MOVQ    peek_bits, CX
 | 
			
		||||
    MOVQ    br_value, AX
 | 
			
		||||
    SHRQ    CL, AX                  // AX = (value >> peek_bits) & mask
 | 
			
		||||
 | 
			
		||||
    // v3 := table[val1&mask]
 | 
			
		||||
    MOVW    0(table)(AX*2), AX      // AX - v1
 | 
			
		||||
 | 
			
		||||
    // br{{ var "id"}}.advance(uint8(v1.entry))
 | 
			
		||||
    MOVB    AH, BH                  // BH = uint8(v1.entry >> 8)
 | 
			
		||||
    MOVBQZX AL, CX
 | 
			
		||||
    SHLQ    CX, br_value            // value <<= n
 | 
			
		||||
    ADDQ    CX, br_bits_read        // bits_read += n
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
    // these two writes get coalesced
 | 
			
		||||
    // buf[stream][off+2] = uint8(v2.entry >> 8)
 | 
			
		||||
    // buf[stream][off+3] = uint8(v3.entry >> 8)
 | 
			
		||||
    MOVW    BX, {{ var "bufofs" }}+2(buffer)(off*1)
 | 
			
		||||
 | 
			
		||||
    // update the bitrader reader structure
 | 
			
		||||
    MOVB    br_bits_read, bitReaderShifted_bitsRead(br{{ var "id" }})
 | 
			
		||||
    MOVQ    br_value, bitReaderShifted_value(br{{ var "id" }})
 | 
			
		||||
    MOVQ    br_offset, bitReaderShifted_off(br{{ var "id" }})
 | 
			
		||||
{{ end }}
 | 
			
		||||
 | 
			
		||||
    {{ set "id" "0" }}
 | 
			
		||||
    {{ set "ofs" "0" }}
 | 
			
		||||
    {{ set "bufofs" "0" }} {{/* id * bufoff */}}
 | 
			
		||||
    {{ template "decode_2_values_x86" . }}
 | 
			
		||||
 | 
			
		||||
    {{ set "id" "1" }}
 | 
			
		||||
    {{ set "ofs" "8" }}
 | 
			
		||||
    {{ set "bufofs" "256" }}
 | 
			
		||||
    {{ template "decode_2_values_x86" . }}
 | 
			
		||||
 | 
			
		||||
    {{ set "id" "2" }}
 | 
			
		||||
    {{ set "ofs" "16" }}
 | 
			
		||||
    {{ set "bufofs" "512" }}
 | 
			
		||||
    {{ template "decode_2_values_x86" . }}
 | 
			
		||||
 | 
			
		||||
    {{ set "id" "3" }}
 | 
			
		||||
    {{ set "ofs" "24" }}
 | 
			
		||||
    {{ set "bufofs" "768" }}
 | 
			
		||||
    {{ template "decode_2_values_x86" . }}
 | 
			
		||||
 | 
			
		||||
    ADDQ    $4, off     // off += 2
 | 
			
		||||
 | 
			
		||||
    TESTB   DH, DH      // any br[i].ofs < 4?
 | 
			
		||||
    JNZ     end
 | 
			
		||||
 | 
			
		||||
    CMPQ    off, $bufoff
 | 
			
		||||
    JL      main_loop
 | 
			
		||||
end:
 | 
			
		||||
    MOVQ    0(SP), BP
 | 
			
		||||
 | 
			
		||||
    MOVB    off, ret+56(FP)
 | 
			
		||||
    RET
 | 
			
		||||
#undef  off
 | 
			
		||||
#undef  buffer
 | 
			
		||||
#undef  table
 | 
			
		||||
 | 
			
		||||
#undef  br_bits_read
 | 
			
		||||
#undef  br_value
 | 
			
		||||
#undef  br_offset
 | 
			
		||||
#undef  peek_bits
 | 
			
		||||
#undef  exhausted
 | 
			
		||||
 | 
			
		||||
#undef  br0
 | 
			
		||||
#undef  br1
 | 
			
		||||
#undef  br2
 | 
			
		||||
#undef  br3
 | 
			
		||||
							
								
								
									
										181
									
								
								vendor/github.com/klauspost/compress/huff0/decompress_amd64.go
									
									
									
										generated
									
									
										vendored
									
									
								
							
							
						
						
									
										181
									
								
								vendor/github.com/klauspost/compress/huff0/decompress_amd64.go
									
									
									
										generated
									
									
										vendored
									
									
								
							@@ -2,30 +2,43 @@
 | 
			
		||||
// +build amd64,!appengine,!noasm,gc
 | 
			
		||||
 | 
			
		||||
// This file contains the specialisation of Decoder.Decompress4X
 | 
			
		||||
// that uses an asm implementation of its main loop.
 | 
			
		||||
// and Decoder.Decompress1X that use an asm implementation of thir main loops.
 | 
			
		||||
package huff0
 | 
			
		||||
 | 
			
		||||
import (
 | 
			
		||||
	"errors"
 | 
			
		||||
	"fmt"
 | 
			
		||||
 | 
			
		||||
	"github.com/klauspost/compress/internal/cpuinfo"
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
// decompress4x_main_loop_x86 is an x86 assembler implementation
 | 
			
		||||
// of Decompress4X when tablelog > 8.
 | 
			
		||||
// go:noescape
 | 
			
		||||
func decompress4x_main_loop_x86(pbr0, pbr1, pbr2, pbr3 *bitReaderShifted,
 | 
			
		||||
	peekBits uint8, buf *byte, tbl *dEntrySingle) uint8
 | 
			
		||||
//go:noescape
 | 
			
		||||
func decompress4x_main_loop_amd64(ctx *decompress4xContext)
 | 
			
		||||
 | 
			
		||||
// decompress4x_8b_loop_x86 is an x86 assembler implementation
 | 
			
		||||
// of Decompress4X when tablelog <= 8 which decodes 4 entries
 | 
			
		||||
// per loop.
 | 
			
		||||
// go:noescape
 | 
			
		||||
func decompress4x_8b_loop_x86(pbr0, pbr1, pbr2, pbr3 *bitReaderShifted,
 | 
			
		||||
	peekBits uint8, buf *byte, tbl *dEntrySingle) uint8
 | 
			
		||||
//go:noescape
 | 
			
		||||
func decompress4x_8b_main_loop_amd64(ctx *decompress4xContext)
 | 
			
		||||
 | 
			
		||||
// fallback8BitSize is the size where using Go version is faster.
 | 
			
		||||
const fallback8BitSize = 800
 | 
			
		||||
 | 
			
		||||
type decompress4xContext struct {
 | 
			
		||||
	pbr0     *bitReaderShifted
 | 
			
		||||
	pbr1     *bitReaderShifted
 | 
			
		||||
	pbr2     *bitReaderShifted
 | 
			
		||||
	pbr3     *bitReaderShifted
 | 
			
		||||
	peekBits uint8
 | 
			
		||||
	out      *byte
 | 
			
		||||
	dstEvery int
 | 
			
		||||
	tbl      *dEntrySingle
 | 
			
		||||
	decoded  int
 | 
			
		||||
	limit    *byte
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// Decompress4X will decompress a 4X encoded stream.
 | 
			
		||||
// The length of the supplied input must match the end of a block exactly.
 | 
			
		||||
// The *capacity* of the dst slice must match the destination size of
 | 
			
		||||
@@ -42,6 +55,7 @@ func (d *Decoder) Decompress4X(dst, src []byte) ([]byte, error) {
 | 
			
		||||
	if cap(dst) < fallback8BitSize && use8BitTables {
 | 
			
		||||
		return d.decompress4X8bit(dst, src)
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	var br [4]bitReaderShifted
 | 
			
		||||
	// Decode "jump table"
 | 
			
		||||
	start := 6
 | 
			
		||||
@@ -71,70 +85,28 @@ func (d *Decoder) Decompress4X(dst, src []byte) ([]byte, error) {
 | 
			
		||||
	const tlMask = tlSize - 1
 | 
			
		||||
	single := d.dt.single[:tlSize]
 | 
			
		||||
 | 
			
		||||
	// Use temp table to avoid bound checks/append penalty.
 | 
			
		||||
	buf := d.buffer()
 | 
			
		||||
	var off uint8
 | 
			
		||||
	var decoded int
 | 
			
		||||
 | 
			
		||||
	const debug = false
 | 
			
		||||
 | 
			
		||||
	// see: bitReaderShifted.peekBitsFast()
 | 
			
		||||
	peekBits := uint8((64 - d.actualTableLog) & 63)
 | 
			
		||||
 | 
			
		||||
	// Decode 2 values from each decoder/loop.
 | 
			
		||||
	const bufoff = 256
 | 
			
		||||
	for {
 | 
			
		||||
		if br[0].off < 4 || br[1].off < 4 || br[2].off < 4 || br[3].off < 4 {
 | 
			
		||||
			break
 | 
			
		||||
	if len(out) > 4*4 && !(br[0].off < 4 || br[1].off < 4 || br[2].off < 4 || br[3].off < 4) {
 | 
			
		||||
		ctx := decompress4xContext{
 | 
			
		||||
			pbr0:     &br[0],
 | 
			
		||||
			pbr1:     &br[1],
 | 
			
		||||
			pbr2:     &br[2],
 | 
			
		||||
			pbr3:     &br[3],
 | 
			
		||||
			peekBits: uint8((64 - d.actualTableLog) & 63), // see: bitReaderShifted.peekBitsFast()
 | 
			
		||||
			out:      &out[0],
 | 
			
		||||
			dstEvery: dstEvery,
 | 
			
		||||
			tbl:      &single[0],
 | 
			
		||||
			limit:    &out[dstEvery-4], // Always stop decoding when first buffer gets here to avoid writing OOB on last.
 | 
			
		||||
		}
 | 
			
		||||
 | 
			
		||||
		if use8BitTables {
 | 
			
		||||
			off = decompress4x_8b_loop_x86(&br[0], &br[1], &br[2], &br[3], peekBits, &buf[0][0], &single[0])
 | 
			
		||||
			decompress4x_8b_main_loop_amd64(&ctx)
 | 
			
		||||
		} else {
 | 
			
		||||
			off = decompress4x_main_loop_x86(&br[0], &br[1], &br[2], &br[3], peekBits, &buf[0][0], &single[0])
 | 
			
		||||
		}
 | 
			
		||||
		if debug {
 | 
			
		||||
			fmt.Print("DEBUG: ")
 | 
			
		||||
			fmt.Printf("off=%d,", off)
 | 
			
		||||
			for i := 0; i < 4; i++ {
 | 
			
		||||
				fmt.Printf(" br[%d]={bitsRead=%d, value=%x, off=%d}",
 | 
			
		||||
					i, br[i].bitsRead, br[i].value, br[i].off)
 | 
			
		||||
			}
 | 
			
		||||
			fmt.Println("")
 | 
			
		||||
			decompress4x_main_loop_amd64(&ctx)
 | 
			
		||||
		}
 | 
			
		||||
 | 
			
		||||
		if off != 0 {
 | 
			
		||||
			break
 | 
			
		||||
		}
 | 
			
		||||
 | 
			
		||||
		if bufoff > dstEvery {
 | 
			
		||||
			d.bufs.Put(buf)
 | 
			
		||||
			return nil, errors.New("corruption detected: stream overrun 1")
 | 
			
		||||
		}
 | 
			
		||||
		copy(out, buf[0][:])
 | 
			
		||||
		copy(out[dstEvery:], buf[1][:])
 | 
			
		||||
		copy(out[dstEvery*2:], buf[2][:])
 | 
			
		||||
		copy(out[dstEvery*3:], buf[3][:])
 | 
			
		||||
		out = out[bufoff:]
 | 
			
		||||
		decoded += bufoff * 4
 | 
			
		||||
		// There must at least be 3 buffers left.
 | 
			
		||||
		if len(out) < dstEvery*3 {
 | 
			
		||||
			d.bufs.Put(buf)
 | 
			
		||||
			return nil, errors.New("corruption detected: stream overrun 2")
 | 
			
		||||
		}
 | 
			
		||||
	}
 | 
			
		||||
	if off > 0 {
 | 
			
		||||
		ioff := int(off)
 | 
			
		||||
		if len(out) < dstEvery*3+ioff {
 | 
			
		||||
			d.bufs.Put(buf)
 | 
			
		||||
			return nil, errors.New("corruption detected: stream overrun 3")
 | 
			
		||||
		}
 | 
			
		||||
		copy(out, buf[0][:off])
 | 
			
		||||
		copy(out[dstEvery:], buf[1][:off])
 | 
			
		||||
		copy(out[dstEvery*2:], buf[2][:off])
 | 
			
		||||
		copy(out[dstEvery*3:], buf[3][:off])
 | 
			
		||||
		decoded += int(off) * 4
 | 
			
		||||
		out = out[off:]
 | 
			
		||||
		decoded = ctx.decoded
 | 
			
		||||
		out = out[decoded/4:]
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	// Decode remaining.
 | 
			
		||||
@@ -150,7 +122,6 @@ func (d *Decoder) Decompress4X(dst, src []byte) ([]byte, error) {
 | 
			
		||||
		for bitsLeft > 0 {
 | 
			
		||||
			br.fill()
 | 
			
		||||
			if offset >= endsAt {
 | 
			
		||||
				d.bufs.Put(buf)
 | 
			
		||||
				return nil, errors.New("corruption detected: stream overrun 4")
 | 
			
		||||
			}
 | 
			
		||||
 | 
			
		||||
@@ -164,7 +135,6 @@ func (d *Decoder) Decompress4X(dst, src []byte) ([]byte, error) {
 | 
			
		||||
			offset++
 | 
			
		||||
		}
 | 
			
		||||
		if offset != endsAt {
 | 
			
		||||
			d.bufs.Put(buf)
 | 
			
		||||
			return nil, fmt.Errorf("corruption detected: short output block %d, end %d != %d", i, offset, endsAt)
 | 
			
		||||
		}
 | 
			
		||||
		decoded += offset - dstEvery*i
 | 
			
		||||
@@ -173,9 +143,86 @@ func (d *Decoder) Decompress4X(dst, src []byte) ([]byte, error) {
 | 
			
		||||
			return nil, err
 | 
			
		||||
		}
 | 
			
		||||
	}
 | 
			
		||||
	d.bufs.Put(buf)
 | 
			
		||||
	if dstSize != decoded {
 | 
			
		||||
		return nil, errors.New("corruption detected: short output block")
 | 
			
		||||
	}
 | 
			
		||||
	return dst, nil
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// decompress4x_main_loop_x86 is an x86 assembler implementation
 | 
			
		||||
// of Decompress1X when tablelog > 8.
 | 
			
		||||
//go:noescape
 | 
			
		||||
func decompress1x_main_loop_amd64(ctx *decompress1xContext)
 | 
			
		||||
 | 
			
		||||
// decompress4x_main_loop_x86 is an x86 with BMI2 assembler implementation
 | 
			
		||||
// of Decompress1X when tablelog > 8.
 | 
			
		||||
//go:noescape
 | 
			
		||||
func decompress1x_main_loop_bmi2(ctx *decompress1xContext)
 | 
			
		||||
 | 
			
		||||
type decompress1xContext struct {
 | 
			
		||||
	pbr      *bitReaderShifted
 | 
			
		||||
	peekBits uint8
 | 
			
		||||
	out      *byte
 | 
			
		||||
	outCap   int
 | 
			
		||||
	tbl      *dEntrySingle
 | 
			
		||||
	decoded  int
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// Error reported by asm implementations
 | 
			
		||||
const error_max_decoded_size_exeeded = -1
 | 
			
		||||
 | 
			
		||||
// Decompress1X will decompress a 1X encoded stream.
 | 
			
		||||
// The cap of the output buffer will be the maximum decompressed size.
 | 
			
		||||
// The length of the supplied input must match the end of a block exactly.
 | 
			
		||||
func (d *Decoder) Decompress1X(dst, src []byte) ([]byte, error) {
 | 
			
		||||
	if len(d.dt.single) == 0 {
 | 
			
		||||
		return nil, errors.New("no table loaded")
 | 
			
		||||
	}
 | 
			
		||||
	var br bitReaderShifted
 | 
			
		||||
	err := br.init(src)
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		return dst, err
 | 
			
		||||
	}
 | 
			
		||||
	maxDecodedSize := cap(dst)
 | 
			
		||||
	dst = dst[:maxDecodedSize]
 | 
			
		||||
 | 
			
		||||
	const tlSize = 1 << tableLogMax
 | 
			
		||||
	const tlMask = tlSize - 1
 | 
			
		||||
 | 
			
		||||
	if maxDecodedSize >= 4 {
 | 
			
		||||
		ctx := decompress1xContext{
 | 
			
		||||
			pbr:      &br,
 | 
			
		||||
			out:      &dst[0],
 | 
			
		||||
			outCap:   maxDecodedSize,
 | 
			
		||||
			peekBits: uint8((64 - d.actualTableLog) & 63), // see: bitReaderShifted.peekBitsFast()
 | 
			
		||||
			tbl:      &d.dt.single[0],
 | 
			
		||||
		}
 | 
			
		||||
 | 
			
		||||
		if cpuinfo.HasBMI2() {
 | 
			
		||||
			decompress1x_main_loop_bmi2(&ctx)
 | 
			
		||||
		} else {
 | 
			
		||||
			decompress1x_main_loop_amd64(&ctx)
 | 
			
		||||
		}
 | 
			
		||||
		if ctx.decoded == error_max_decoded_size_exeeded {
 | 
			
		||||
			return nil, ErrMaxDecodedSizeExceeded
 | 
			
		||||
		}
 | 
			
		||||
 | 
			
		||||
		dst = dst[:ctx.decoded]
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	// br < 8, so uint8 is fine
 | 
			
		||||
	bitsLeft := uint8(br.off)*8 + 64 - br.bitsRead
 | 
			
		||||
	for bitsLeft > 0 {
 | 
			
		||||
		br.fill()
 | 
			
		||||
		if len(dst) >= maxDecodedSize {
 | 
			
		||||
			br.close()
 | 
			
		||||
			return nil, ErrMaxDecodedSizeExceeded
 | 
			
		||||
		}
 | 
			
		||||
		v := d.dt.single[br.peekBitsFast(d.actualTableLog)&tlMask]
 | 
			
		||||
		nBits := uint8(v.entry)
 | 
			
		||||
		br.advance(nBits)
 | 
			
		||||
		bitsLeft -= nBits
 | 
			
		||||
		dst = append(dst, uint8(v.entry>>8))
 | 
			
		||||
	}
 | 
			
		||||
	return dst, br.close()
 | 
			
		||||
}
 | 
			
		||||
 
 | 
			
		||||
							
								
								
									
										1185
									
								
								vendor/github.com/klauspost/compress/huff0/decompress_amd64.s
									
									
									
										generated
									
									
										vendored
									
									
								
							
							
						
						
									
										1185
									
								
								vendor/github.com/klauspost/compress/huff0/decompress_amd64.s
									
									
									
										generated
									
									
										vendored
									
									
								
							
										
											
												File diff suppressed because it is too large
												Load Diff
											
										
									
								
							
							
								
								
									
										195
									
								
								vendor/github.com/klauspost/compress/huff0/decompress_amd64.s.in
									
									
									
										generated
									
									
										vendored
									
									
								
							
							
						
						
									
										195
									
								
								vendor/github.com/klauspost/compress/huff0/decompress_amd64.s.in
									
									
									
										generated
									
									
										vendored
									
									
								
							@@ -1,195 +0,0 @@
 | 
			
		||||
// +build !appengine
 | 
			
		||||
// +build gc
 | 
			
		||||
// +build !noasm
 | 
			
		||||
 | 
			
		||||
#include "textflag.h"
 | 
			
		||||
#include "funcdata.h"
 | 
			
		||||
#include "go_asm.h"
 | 
			
		||||
 | 
			
		||||
#ifdef GOAMD64_v4
 | 
			
		||||
#ifndef GOAMD64_v3
 | 
			
		||||
#define GOAMD64_v3
 | 
			
		||||
#endif
 | 
			
		||||
#endif
 | 
			
		||||
 | 
			
		||||
#define bufoff      256     // see decompress.go, we're using [4][256]byte table
 | 
			
		||||
 | 
			
		||||
//func decompress4x_main_loop_x86(pbr0, pbr1, pbr2, pbr3 *bitReaderShifted,
 | 
			
		||||
//	peekBits uint8, buf *byte, tbl *dEntrySingle) (int, bool)
 | 
			
		||||
TEXT ·decompress4x_main_loop_x86(SB), NOSPLIT, $8
 | 
			
		||||
#define off             R8
 | 
			
		||||
#define buffer          DI
 | 
			
		||||
#define table           SI
 | 
			
		||||
 | 
			
		||||
#define br_bits_read    R9
 | 
			
		||||
#define br_value        R10
 | 
			
		||||
#define br_offset       R11
 | 
			
		||||
#define peek_bits       R12
 | 
			
		||||
#define exhausted       DX
 | 
			
		||||
 | 
			
		||||
#define br0             R13
 | 
			
		||||
#define br1             R14
 | 
			
		||||
#define br2             R15
 | 
			
		||||
#define br3             BP
 | 
			
		||||
 | 
			
		||||
    MOVQ    BP, 0(SP)
 | 
			
		||||
 | 
			
		||||
    XORQ    exhausted, exhausted    // exhausted = false
 | 
			
		||||
    XORQ    off, off                // off = 0
 | 
			
		||||
 | 
			
		||||
    MOVBQZX peekBits+32(FP), peek_bits
 | 
			
		||||
    MOVQ    buf+40(FP), buffer
 | 
			
		||||
    MOVQ    tbl+48(FP), table
 | 
			
		||||
 | 
			
		||||
    MOVQ    pbr0+0(FP), br0
 | 
			
		||||
    MOVQ    pbr1+8(FP), br1
 | 
			
		||||
    MOVQ    pbr2+16(FP), br2
 | 
			
		||||
    MOVQ    pbr3+24(FP), br3
 | 
			
		||||
 | 
			
		||||
main_loop:
 | 
			
		||||
{{ define "decode_2_values_x86" }}
 | 
			
		||||
    // const stream = {{ var "id" }}
 | 
			
		||||
    // br{{ var "id"}}.fillFast()
 | 
			
		||||
    MOVBQZX bitReaderShifted_bitsRead(br{{ var "id" }}), br_bits_read
 | 
			
		||||
    MOVQ    bitReaderShifted_value(br{{ var "id" }}), br_value
 | 
			
		||||
    MOVQ    bitReaderShifted_off(br{{ var "id" }}), br_offset
 | 
			
		||||
 | 
			
		||||
    // We must have at least 2 * max tablelog left
 | 
			
		||||
    CMPQ    br_bits_read, $64-22
 | 
			
		||||
    JBE     skip_fill{{ var "id" }}
 | 
			
		||||
 | 
			
		||||
    SUBQ    $32, br_bits_read       // b.bitsRead -= 32
 | 
			
		||||
    SUBQ    $4, br_offset           // b.off -= 4
 | 
			
		||||
 | 
			
		||||
	// v := b.in[b.off-4 : b.off]
 | 
			
		||||
	// v = v[:4]
 | 
			
		||||
	// low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24)
 | 
			
		||||
    MOVQ    bitReaderShifted_in(br{{ var "id" }}), AX
 | 
			
		||||
 | 
			
		||||
	// b.value |= uint64(low) << (b.bitsRead & 63)
 | 
			
		||||
#ifdef GOAMD64_v3
 | 
			
		||||
    SHLXQ   br_bits_read, 0(br_offset)(AX*1), AX // AX = uint32(b.in[b.off:b.off+4]) << (b.bitsRead & 63)
 | 
			
		||||
#else
 | 
			
		||||
    MOVL    0(br_offset)(AX*1), AX  // AX = uint32(b.in[b.off:b.off+4])
 | 
			
		||||
    MOVQ    br_bits_read, CX
 | 
			
		||||
    SHLQ    CL, AX
 | 
			
		||||
#endif
 | 
			
		||||
 | 
			
		||||
    ORQ     AX, br_value
 | 
			
		||||
 | 
			
		||||
    // exhausted = exhausted || (br{{ var "id"}}.off < 4)
 | 
			
		||||
    CMPQ    br_offset, $4
 | 
			
		||||
    SETLT   DL
 | 
			
		||||
    ORB     DL, DH
 | 
			
		||||
    // }
 | 
			
		||||
skip_fill{{ var "id" }}:
 | 
			
		||||
 | 
			
		||||
    // val0 := br{{ var "id"}}.peekTopBits(peekBits)
 | 
			
		||||
#ifdef GOAMD64_v3
 | 
			
		||||
    SHRXQ   peek_bits, br_value, AX // AX = (value >> peek_bits) & mask
 | 
			
		||||
#else
 | 
			
		||||
    MOVQ    br_value, AX
 | 
			
		||||
    MOVQ    peek_bits, CX
 | 
			
		||||
    SHRQ    CL, AX                  // AX = (value >> peek_bits) & mask
 | 
			
		||||
#endif
 | 
			
		||||
 | 
			
		||||
    // v0 := table[val0&mask]
 | 
			
		||||
    MOVW    0(table)(AX*2), AX      // AX - v0
 | 
			
		||||
 | 
			
		||||
    // br{{ var "id"}}.advance(uint8(v0.entry))
 | 
			
		||||
    MOVB    AH, BL                  // BL = uint8(v0.entry >> 8)
 | 
			
		||||
 | 
			
		||||
#ifdef GOAMD64_v3
 | 
			
		||||
    MOVBQZX AL, CX
 | 
			
		||||
    SHLXQ   AX, br_value, br_value // value <<= n
 | 
			
		||||
#else
 | 
			
		||||
    MOVBQZX AL, CX
 | 
			
		||||
    SHLQ    CL, br_value            // value <<= n
 | 
			
		||||
#endif
 | 
			
		||||
 | 
			
		||||
    ADDQ    CX, br_bits_read        // bits_read += n
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
#ifdef GOAMD64_v3
 | 
			
		||||
    SHRXQ    peek_bits, br_value, AX  // AX = (value >> peek_bits) & mask
 | 
			
		||||
#else
 | 
			
		||||
    // val1 := br{{ var "id"}}.peekTopBits(peekBits)
 | 
			
		||||
    MOVQ    peek_bits, CX
 | 
			
		||||
    MOVQ    br_value, AX
 | 
			
		||||
    SHRQ    CL, AX                  // AX = (value >> peek_bits) & mask
 | 
			
		||||
#endif
 | 
			
		||||
 | 
			
		||||
    // v1 := table[val1&mask]
 | 
			
		||||
    MOVW    0(table)(AX*2), AX      // AX - v1
 | 
			
		||||
 | 
			
		||||
    // br{{ var "id"}}.advance(uint8(v1.entry))
 | 
			
		||||
    MOVB    AH, BH                  // BH = uint8(v1.entry >> 8)
 | 
			
		||||
 | 
			
		||||
#ifdef GOAMD64_v3
 | 
			
		||||
    MOVBQZX AL, CX
 | 
			
		||||
    SHLXQ   AX, br_value, br_value // value <<= n
 | 
			
		||||
#else
 | 
			
		||||
    MOVBQZX AL, CX
 | 
			
		||||
    SHLQ    CL, br_value            // value <<= n
 | 
			
		||||
#endif
 | 
			
		||||
 | 
			
		||||
    ADDQ    CX, br_bits_read        // bits_read += n
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
    // these two writes get coalesced
 | 
			
		||||
    // buf[stream][off] = uint8(v0.entry >> 8)
 | 
			
		||||
    // buf[stream][off+1] = uint8(v1.entry >> 8)
 | 
			
		||||
    MOVW    BX, {{ var "bufofs" }}(buffer)(off*1)
 | 
			
		||||
 | 
			
		||||
    // update the bitrader reader structure
 | 
			
		||||
    MOVB    br_bits_read, bitReaderShifted_bitsRead(br{{ var "id" }})
 | 
			
		||||
    MOVQ    br_value, bitReaderShifted_value(br{{ var "id" }})
 | 
			
		||||
    MOVQ    br_offset, bitReaderShifted_off(br{{ var "id" }})
 | 
			
		||||
{{ end }}
 | 
			
		||||
 | 
			
		||||
    {{ set "id" "0" }}
 | 
			
		||||
    {{ set "ofs" "0" }}
 | 
			
		||||
    {{ set "bufofs" "0" }} {{/* id * bufoff */}}
 | 
			
		||||
    {{ template "decode_2_values_x86" . }}
 | 
			
		||||
 | 
			
		||||
    {{ set "id" "1" }}
 | 
			
		||||
    {{ set "ofs" "8" }}
 | 
			
		||||
    {{ set "bufofs" "256" }}
 | 
			
		||||
    {{ template "decode_2_values_x86" . }}
 | 
			
		||||
 | 
			
		||||
    {{ set "id" "2" }}
 | 
			
		||||
    {{ set "ofs" "16" }}
 | 
			
		||||
    {{ set "bufofs" "512" }}
 | 
			
		||||
    {{ template "decode_2_values_x86" . }}
 | 
			
		||||
 | 
			
		||||
    {{ set "id" "3" }}
 | 
			
		||||
    {{ set "ofs" "24" }}
 | 
			
		||||
    {{ set "bufofs" "768" }}
 | 
			
		||||
    {{ template "decode_2_values_x86" . }}
 | 
			
		||||
 | 
			
		||||
    ADDQ    $2, off     // off += 2
 | 
			
		||||
 | 
			
		||||
    TESTB   DH, DH      // any br[i].ofs < 4?
 | 
			
		||||
    JNZ     end
 | 
			
		||||
 | 
			
		||||
    CMPQ    off, $bufoff
 | 
			
		||||
    JL      main_loop
 | 
			
		||||
end:
 | 
			
		||||
    MOVQ    0(SP), BP
 | 
			
		||||
 | 
			
		||||
    MOVB    off, ret+56(FP)
 | 
			
		||||
    RET
 | 
			
		||||
#undef  off
 | 
			
		||||
#undef  buffer
 | 
			
		||||
#undef  table
 | 
			
		||||
 | 
			
		||||
#undef  br_bits_read
 | 
			
		||||
#undef  br_value
 | 
			
		||||
#undef  br_offset
 | 
			
		||||
#undef  peek_bits
 | 
			
		||||
#undef  exhausted
 | 
			
		||||
 | 
			
		||||
#undef  br0
 | 
			
		||||
#undef  br1
 | 
			
		||||
#undef  br2
 | 
			
		||||
#undef  br3
 | 
			
		||||
							
								
								
									
										102
									
								
								vendor/github.com/klauspost/compress/huff0/decompress_generic.go
									
									
									
										generated
									
									
										vendored
									
									
								
							
							
						
						
									
										102
									
								
								vendor/github.com/klauspost/compress/huff0/decompress_generic.go
									
									
									
										generated
									
									
										vendored
									
									
								
							@@ -191,3 +191,105 @@ func (d *Decoder) Decompress4X(dst, src []byte) ([]byte, error) {
 | 
			
		||||
	}
 | 
			
		||||
	return dst, nil
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// Decompress1X will decompress a 1X encoded stream.
 | 
			
		||||
// The cap of the output buffer will be the maximum decompressed size.
 | 
			
		||||
// The length of the supplied input must match the end of a block exactly.
 | 
			
		||||
func (d *Decoder) Decompress1X(dst, src []byte) ([]byte, error) {
 | 
			
		||||
	if len(d.dt.single) == 0 {
 | 
			
		||||
		return nil, errors.New("no table loaded")
 | 
			
		||||
	}
 | 
			
		||||
	if use8BitTables && d.actualTableLog <= 8 {
 | 
			
		||||
		return d.decompress1X8Bit(dst, src)
 | 
			
		||||
	}
 | 
			
		||||
	var br bitReaderShifted
 | 
			
		||||
	err := br.init(src)
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		return dst, err
 | 
			
		||||
	}
 | 
			
		||||
	maxDecodedSize := cap(dst)
 | 
			
		||||
	dst = dst[:0]
 | 
			
		||||
 | 
			
		||||
	// Avoid bounds check by always having full sized table.
 | 
			
		||||
	const tlSize = 1 << tableLogMax
 | 
			
		||||
	const tlMask = tlSize - 1
 | 
			
		||||
	dt := d.dt.single[:tlSize]
 | 
			
		||||
 | 
			
		||||
	// Use temp table to avoid bound checks/append penalty.
 | 
			
		||||
	bufs := d.buffer()
 | 
			
		||||
	buf := &bufs[0]
 | 
			
		||||
	var off uint8
 | 
			
		||||
 | 
			
		||||
	for br.off >= 8 {
 | 
			
		||||
		br.fillFast()
 | 
			
		||||
		v := dt[br.peekBitsFast(d.actualTableLog)&tlMask]
 | 
			
		||||
		br.advance(uint8(v.entry))
 | 
			
		||||
		buf[off+0] = uint8(v.entry >> 8)
 | 
			
		||||
 | 
			
		||||
		v = dt[br.peekBitsFast(d.actualTableLog)&tlMask]
 | 
			
		||||
		br.advance(uint8(v.entry))
 | 
			
		||||
		buf[off+1] = uint8(v.entry >> 8)
 | 
			
		||||
 | 
			
		||||
		// Refill
 | 
			
		||||
		br.fillFast()
 | 
			
		||||
 | 
			
		||||
		v = dt[br.peekBitsFast(d.actualTableLog)&tlMask]
 | 
			
		||||
		br.advance(uint8(v.entry))
 | 
			
		||||
		buf[off+2] = uint8(v.entry >> 8)
 | 
			
		||||
 | 
			
		||||
		v = dt[br.peekBitsFast(d.actualTableLog)&tlMask]
 | 
			
		||||
		br.advance(uint8(v.entry))
 | 
			
		||||
		buf[off+3] = uint8(v.entry >> 8)
 | 
			
		||||
 | 
			
		||||
		off += 4
 | 
			
		||||
		if off == 0 {
 | 
			
		||||
			if len(dst)+256 > maxDecodedSize {
 | 
			
		||||
				br.close()
 | 
			
		||||
				d.bufs.Put(bufs)
 | 
			
		||||
				return nil, ErrMaxDecodedSizeExceeded
 | 
			
		||||
			}
 | 
			
		||||
			dst = append(dst, buf[:]...)
 | 
			
		||||
		}
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	if len(dst)+int(off) > maxDecodedSize {
 | 
			
		||||
		d.bufs.Put(bufs)
 | 
			
		||||
		br.close()
 | 
			
		||||
		return nil, ErrMaxDecodedSizeExceeded
 | 
			
		||||
	}
 | 
			
		||||
	dst = append(dst, buf[:off]...)
 | 
			
		||||
 | 
			
		||||
	// br < 8, so uint8 is fine
 | 
			
		||||
	bitsLeft := uint8(br.off)*8 + 64 - br.bitsRead
 | 
			
		||||
	for bitsLeft > 0 {
 | 
			
		||||
		br.fill()
 | 
			
		||||
		if false && br.bitsRead >= 32 {
 | 
			
		||||
			if br.off >= 4 {
 | 
			
		||||
				v := br.in[br.off-4:]
 | 
			
		||||
				v = v[:4]
 | 
			
		||||
				low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24)
 | 
			
		||||
				br.value = (br.value << 32) | uint64(low)
 | 
			
		||||
				br.bitsRead -= 32
 | 
			
		||||
				br.off -= 4
 | 
			
		||||
			} else {
 | 
			
		||||
				for br.off > 0 {
 | 
			
		||||
					br.value = (br.value << 8) | uint64(br.in[br.off-1])
 | 
			
		||||
					br.bitsRead -= 8
 | 
			
		||||
					br.off--
 | 
			
		||||
				}
 | 
			
		||||
			}
 | 
			
		||||
		}
 | 
			
		||||
		if len(dst) >= maxDecodedSize {
 | 
			
		||||
			d.bufs.Put(bufs)
 | 
			
		||||
			br.close()
 | 
			
		||||
			return nil, ErrMaxDecodedSizeExceeded
 | 
			
		||||
		}
 | 
			
		||||
		v := d.dt.single[br.peekBitsFast(d.actualTableLog)&tlMask]
 | 
			
		||||
		nBits := uint8(v.entry)
 | 
			
		||||
		br.advance(nBits)
 | 
			
		||||
		bitsLeft -= nBits
 | 
			
		||||
		dst = append(dst, uint8(v.entry>>8))
 | 
			
		||||
	}
 | 
			
		||||
	d.bufs.Put(bufs)
 | 
			
		||||
	return dst, br.close()
 | 
			
		||||
}
 | 
			
		||||
 
 | 
			
		||||
							
								
								
									
										34
									
								
								vendor/github.com/klauspost/compress/internal/cpuinfo/cpuinfo.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										34
									
								
								vendor/github.com/klauspost/compress/internal/cpuinfo/cpuinfo.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							@@ -0,0 +1,34 @@
 | 
			
		||||
// Package cpuinfo gives runtime info about the current CPU.
 | 
			
		||||
//
 | 
			
		||||
// This is a very limited module meant for use internally
 | 
			
		||||
// in this project. For more versatile solution check
 | 
			
		||||
// https://github.com/klauspost/cpuid.
 | 
			
		||||
package cpuinfo
 | 
			
		||||
 | 
			
		||||
// HasBMI1 checks whether an x86 CPU supports the BMI1 extension.
 | 
			
		||||
func HasBMI1() bool {
 | 
			
		||||
	return hasBMI1
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// HasBMI2 checks whether an x86 CPU supports the BMI2 extension.
 | 
			
		||||
func HasBMI2() bool {
 | 
			
		||||
	return hasBMI2
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// DisableBMI2 will disable BMI2, for testing purposes.
 | 
			
		||||
// Call returned function to restore previous state.
 | 
			
		||||
func DisableBMI2() func() {
 | 
			
		||||
	old := hasBMI2
 | 
			
		||||
	hasBMI2 = false
 | 
			
		||||
	return func() {
 | 
			
		||||
		hasBMI2 = old
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// HasBMI checks whether an x86 CPU supports both BMI1 and BMI2 extensions.
 | 
			
		||||
func HasBMI() bool {
 | 
			
		||||
	return HasBMI1() && HasBMI2()
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
var hasBMI1 bool
 | 
			
		||||
var hasBMI2 bool
 | 
			
		||||
							
								
								
									
										11
									
								
								vendor/github.com/klauspost/compress/internal/cpuinfo/cpuinfo_amd64.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										11
									
								
								vendor/github.com/klauspost/compress/internal/cpuinfo/cpuinfo_amd64.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							@@ -0,0 +1,11 @@
 | 
			
		||||
//go:build amd64 && !appengine && !noasm && gc
 | 
			
		||||
// +build amd64,!appengine,!noasm,gc
 | 
			
		||||
 | 
			
		||||
package cpuinfo
 | 
			
		||||
 | 
			
		||||
// go:noescape
 | 
			
		||||
func x86extensions() (bmi1, bmi2 bool)
 | 
			
		||||
 | 
			
		||||
func init() {
 | 
			
		||||
	hasBMI1, hasBMI2 = x86extensions()
 | 
			
		||||
}
 | 
			
		||||
							
								
								
									
										36
									
								
								vendor/github.com/klauspost/compress/internal/cpuinfo/cpuinfo_amd64.s
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										36
									
								
								vendor/github.com/klauspost/compress/internal/cpuinfo/cpuinfo_amd64.s
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							@@ -0,0 +1,36 @@
 | 
			
		||||
// +build !appengine
 | 
			
		||||
// +build gc
 | 
			
		||||
// +build !noasm
 | 
			
		||||
 | 
			
		||||
#include "textflag.h"
 | 
			
		||||
#include "funcdata.h"
 | 
			
		||||
#include "go_asm.h"
 | 
			
		||||
 | 
			
		||||
TEXT ·x86extensions(SB), NOSPLIT, $0
 | 
			
		||||
	// 1. determine max EAX value
 | 
			
		||||
	XORQ AX, AX
 | 
			
		||||
	CPUID
 | 
			
		||||
 | 
			
		||||
	CMPQ AX, $7
 | 
			
		||||
	JB   unsupported
 | 
			
		||||
 | 
			
		||||
	// 2. EAX = 7, ECX = 0 --- see Table 3-8 "Information Returned by CPUID Instruction"
 | 
			
		||||
	MOVQ $7, AX
 | 
			
		||||
	MOVQ $0, CX
 | 
			
		||||
	CPUID
 | 
			
		||||
 | 
			
		||||
	BTQ   $3, BX // bit 3 = BMI1
 | 
			
		||||
	SETCS AL
 | 
			
		||||
 | 
			
		||||
	BTQ   $8, BX // bit 8 = BMI2
 | 
			
		||||
	SETCS AH
 | 
			
		||||
 | 
			
		||||
	MOVB AL, bmi1+0(FP)
 | 
			
		||||
	MOVB AH, bmi2+1(FP)
 | 
			
		||||
	RET
 | 
			
		||||
 | 
			
		||||
unsupported:
 | 
			
		||||
	XORQ AX, AX
 | 
			
		||||
	MOVB AL, bmi1+0(FP)
 | 
			
		||||
	MOVB AL, bmi2+1(FP)
 | 
			
		||||
	RET
 | 
			
		||||
							
								
								
									
										30
									
								
								vendor/github.com/klauspost/compress/s2/README.md
									
									
									
										generated
									
									
										vendored
									
									
								
							
							
						
						
									
										30
									
								
								vendor/github.com/klauspost/compress/s2/README.md
									
									
									
										generated
									
									
										vendored
									
									
								
							@@ -19,6 +19,7 @@ This is important, so you don't have to worry about spending CPU cycles on alrea
 | 
			
		||||
* Adjustable compression (3 levels) 
 | 
			
		||||
* Concurrent stream compression
 | 
			
		||||
* Faster decompression, even for Snappy compatible content
 | 
			
		||||
* Concurrent Snappy/S2 stream decompression
 | 
			
		||||
* Ability to quickly skip forward in compressed stream
 | 
			
		||||
* Random seeking with indexes
 | 
			
		||||
* Compatible with reading Snappy compressed content
 | 
			
		||||
@@ -415,6 +416,25 @@ Without assembly decompression is also very fast; single goroutine decompression
 | 
			
		||||
 | 
			
		||||
Even though S2 typically compresses better than Snappy, decompression speed is always better. 
 | 
			
		||||
 | 
			
		||||
### Concurrent Stream Decompression
 | 
			
		||||
 | 
			
		||||
For full stream decompression S2 offers a [DecodeConcurrent](https://pkg.go.dev/github.com/klauspost/compress/s2#Reader.DecodeConcurrent) 
 | 
			
		||||
that will decode a full stream using multiple goroutines.
 | 
			
		||||
 | 
			
		||||
Example scaling, AMD Ryzen 3950X, 16 cores, decompression using `s2d -bench=3 <input>`, best of 3: 
 | 
			
		||||
 | 
			
		||||
| Input                                     | `-cpu=1`   | `-cpu=2`   | `-cpu=4`   | `-cpu=8`   | `-cpu=16`   |
 | 
			
		||||
|-------------------------------------------|------------|------------|------------|------------|-------------|
 | 
			
		||||
| enwik10.snappy                            | 1098.6MB/s | 1819.8MB/s | 3625.6MB/s | 6910.6MB/s | 10818.2MB/s |
 | 
			
		||||
| enwik10.s2                                | 1303.5MB/s | 2606.1MB/s | 4847.9MB/s | 8878.4MB/s | 9592.1MB/s  |
 | 
			
		||||
| sofia-air-quality-dataset.tar.snappy      | 1302.0MB/s | 2165.0MB/s | 4244.5MB/s | 8241.0MB/s | 12920.5MB/s |
 | 
			
		||||
| sofia-air-quality-dataset.tar.s2          | 1399.2MB/s | 2463.2MB/s | 5196.5MB/s | 9639.8MB/s | 11439.5MB/s |
 | 
			
		||||
| sofia-air-quality-dataset.tar.s2 (no asm) | 837.5MB/s  | 1652.6MB/s | 3183.6MB/s | 5945.0MB/s | 9620.7MB/s  |
 | 
			
		||||
 | 
			
		||||
Scaling can be expected to be pretty linear until memory bandwidth is saturated. 
 | 
			
		||||
 | 
			
		||||
For now the DecodeConcurrent can only be used for full streams without seeking or combining with regular reads.
 | 
			
		||||
 | 
			
		||||
## Block compression
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
@@ -873,7 +893,7 @@ for each entry {
 | 
			
		||||
    }
 | 
			
		||||
    
 | 
			
		||||
    // Uncompressed uses previous offset and adds EstBlockSize
 | 
			
		||||
    entry[entryNum].UncompressedOffset = entry[entryNum-1].UncompressedOffset + EstBlockSize
 | 
			
		||||
    entry[entryNum].UncompressedOffset = entry[entryNum-1].UncompressedOffset + EstBlockSize + uOff
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
@@ -901,6 +921,14 @@ for each entry {
 | 
			
		||||
}
 | 
			
		||||
```
 | 
			
		||||
 | 
			
		||||
To decode from any given uncompressed offset `(wantOffset)`:
 | 
			
		||||
 | 
			
		||||
* Iterate entries until `entry[n].UncompressedOffset > wantOffset`.
 | 
			
		||||
* Start decoding from `entry[n-1].CompressedOffset`.
 | 
			
		||||
* Discard `entry[n-1].UncompressedOffset - wantOffset` bytes from the decoded stream.
 | 
			
		||||
 | 
			
		||||
See [using indexes](https://github.com/klauspost/compress/tree/master/s2#using-indexes) for functions that perform the operations with a simpler interface.
 | 
			
		||||
 | 
			
		||||
# Format Extensions
 | 
			
		||||
 | 
			
		||||
* Frame [Stream identifier](https://github.com/google/snappy/blob/master/framing_format.txt#L68) changed from `sNaPpY` to `S2sTwO`.
 | 
			
		||||
 
 | 
			
		||||
							
								
								
									
										289
									
								
								vendor/github.com/klauspost/compress/s2/decode.go
									
									
									
										generated
									
									
										vendored
									
									
								
							
							
						
						
									
										289
									
								
								vendor/github.com/klauspost/compress/s2/decode.go
									
									
									
										generated
									
									
										vendored
									
									
								
							@@ -11,6 +11,8 @@ import (
 | 
			
		||||
	"fmt"
 | 
			
		||||
	"io"
 | 
			
		||||
	"io/ioutil"
 | 
			
		||||
	"runtime"
 | 
			
		||||
	"sync"
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
var (
 | 
			
		||||
@@ -169,6 +171,14 @@ func ReaderSkippableCB(id uint8, fn func(r io.Reader) error) ReaderOption {
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// ReaderIgnoreCRC will make the reader skip CRC calculation and checks.
 | 
			
		||||
func ReaderIgnoreCRC() ReaderOption {
 | 
			
		||||
	return func(r *Reader) error {
 | 
			
		||||
		r.ignoreCRC = true
 | 
			
		||||
		return nil
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// Reader is an io.Reader that can read Snappy-compressed bytes.
 | 
			
		||||
type Reader struct {
 | 
			
		||||
	r           io.Reader
 | 
			
		||||
@@ -191,18 +201,19 @@ type Reader struct {
 | 
			
		||||
	paramsOK       bool
 | 
			
		||||
	snappyFrame    bool
 | 
			
		||||
	ignoreStreamID bool
 | 
			
		||||
	ignoreCRC      bool
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// ensureBufferSize will ensure that the buffer can take at least n bytes.
 | 
			
		||||
// If false is returned the buffer exceeds maximum allowed size.
 | 
			
		||||
func (r *Reader) ensureBufferSize(n int) bool {
 | 
			
		||||
	if len(r.buf) >= n {
 | 
			
		||||
		return true
 | 
			
		||||
	}
 | 
			
		||||
	if n > r.maxBufSize {
 | 
			
		||||
		r.err = ErrCorrupt
 | 
			
		||||
		return false
 | 
			
		||||
	}
 | 
			
		||||
	if cap(r.buf) >= n {
 | 
			
		||||
		return true
 | 
			
		||||
	}
 | 
			
		||||
	// Realloc buffer.
 | 
			
		||||
	r.buf = make([]byte, n)
 | 
			
		||||
	return true
 | 
			
		||||
@@ -220,6 +231,7 @@ func (r *Reader) Reset(reader io.Reader) {
 | 
			
		||||
	r.err = nil
 | 
			
		||||
	r.i = 0
 | 
			
		||||
	r.j = 0
 | 
			
		||||
	r.blockStart = 0
 | 
			
		||||
	r.readHeader = r.ignoreStreamID
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
@@ -344,7 +356,7 @@ func (r *Reader) Read(p []byte) (int, error) {
 | 
			
		||||
				r.err = err
 | 
			
		||||
				return 0, r.err
 | 
			
		||||
			}
 | 
			
		||||
			if crc(r.decoded[:n]) != checksum {
 | 
			
		||||
			if !r.ignoreCRC && crc(r.decoded[:n]) != checksum {
 | 
			
		||||
				r.err = ErrCRC
 | 
			
		||||
				return 0, r.err
 | 
			
		||||
			}
 | 
			
		||||
@@ -385,7 +397,7 @@ func (r *Reader) Read(p []byte) (int, error) {
 | 
			
		||||
			if !r.readFull(r.decoded[:n], false) {
 | 
			
		||||
				return 0, r.err
 | 
			
		||||
			}
 | 
			
		||||
			if crc(r.decoded[:n]) != checksum {
 | 
			
		||||
			if !r.ignoreCRC && crc(r.decoded[:n]) != checksum {
 | 
			
		||||
				r.err = ErrCRC
 | 
			
		||||
				return 0, r.err
 | 
			
		||||
			}
 | 
			
		||||
@@ -435,6 +447,259 @@ func (r *Reader) Read(p []byte) (int, error) {
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// DecodeConcurrent will decode the full stream to w.
 | 
			
		||||
// This function should not be combined with reading, seeking or other operations.
 | 
			
		||||
// Up to 'concurrent' goroutines will be used.
 | 
			
		||||
// If <= 0, runtime.NumCPU will be used.
 | 
			
		||||
// On success the number of bytes decompressed nil and is returned.
 | 
			
		||||
// This is mainly intended for bigger streams.
 | 
			
		||||
func (r *Reader) DecodeConcurrent(w io.Writer, concurrent int) (written int64, err error) {
 | 
			
		||||
	if r.i > 0 || r.j > 0 || r.blockStart > 0 {
 | 
			
		||||
		return 0, errors.New("DecodeConcurrent called after ")
 | 
			
		||||
	}
 | 
			
		||||
	if concurrent <= 0 {
 | 
			
		||||
		concurrent = runtime.NumCPU()
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	// Write to output
 | 
			
		||||
	var errMu sync.Mutex
 | 
			
		||||
	var aErr error
 | 
			
		||||
	setErr := func(e error) (ok bool) {
 | 
			
		||||
		errMu.Lock()
 | 
			
		||||
		defer errMu.Unlock()
 | 
			
		||||
		if e == nil {
 | 
			
		||||
			return aErr == nil
 | 
			
		||||
		}
 | 
			
		||||
		if aErr == nil {
 | 
			
		||||
			aErr = e
 | 
			
		||||
		}
 | 
			
		||||
		return false
 | 
			
		||||
	}
 | 
			
		||||
	hasErr := func() (ok bool) {
 | 
			
		||||
		errMu.Lock()
 | 
			
		||||
		v := aErr != nil
 | 
			
		||||
		errMu.Unlock()
 | 
			
		||||
		return v
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	var aWritten int64
 | 
			
		||||
	toRead := make(chan []byte, concurrent)
 | 
			
		||||
	writtenBlocks := make(chan []byte, concurrent)
 | 
			
		||||
	queue := make(chan chan []byte, concurrent)
 | 
			
		||||
	reUse := make(chan chan []byte, concurrent)
 | 
			
		||||
	for i := 0; i < concurrent; i++ {
 | 
			
		||||
		toRead <- make([]byte, 0, r.maxBufSize)
 | 
			
		||||
		writtenBlocks <- make([]byte, 0, r.maxBufSize)
 | 
			
		||||
		reUse <- make(chan []byte, 1)
 | 
			
		||||
	}
 | 
			
		||||
	// Writer
 | 
			
		||||
	var wg sync.WaitGroup
 | 
			
		||||
	wg.Add(1)
 | 
			
		||||
	go func() {
 | 
			
		||||
		defer wg.Done()
 | 
			
		||||
		for toWrite := range queue {
 | 
			
		||||
			entry := <-toWrite
 | 
			
		||||
			reUse <- toWrite
 | 
			
		||||
			if hasErr() {
 | 
			
		||||
				writtenBlocks <- entry
 | 
			
		||||
				continue
 | 
			
		||||
			}
 | 
			
		||||
			n, err := w.Write(entry)
 | 
			
		||||
			want := len(entry)
 | 
			
		||||
			writtenBlocks <- entry
 | 
			
		||||
			if err != nil {
 | 
			
		||||
				setErr(err)
 | 
			
		||||
				continue
 | 
			
		||||
			}
 | 
			
		||||
			if n != want {
 | 
			
		||||
				setErr(io.ErrShortWrite)
 | 
			
		||||
				continue
 | 
			
		||||
			}
 | 
			
		||||
			aWritten += int64(n)
 | 
			
		||||
		}
 | 
			
		||||
	}()
 | 
			
		||||
 | 
			
		||||
	// Reader
 | 
			
		||||
	defer func() {
 | 
			
		||||
		close(queue)
 | 
			
		||||
		if r.err != nil {
 | 
			
		||||
			err = r.err
 | 
			
		||||
			setErr(r.err)
 | 
			
		||||
		}
 | 
			
		||||
		wg.Wait()
 | 
			
		||||
		if err == nil {
 | 
			
		||||
			err = aErr
 | 
			
		||||
		}
 | 
			
		||||
		written = aWritten
 | 
			
		||||
	}()
 | 
			
		||||
 | 
			
		||||
	for !hasErr() {
 | 
			
		||||
		if !r.readFull(r.buf[:4], true) {
 | 
			
		||||
			if r.err == io.EOF {
 | 
			
		||||
				r.err = nil
 | 
			
		||||
			}
 | 
			
		||||
			return 0, r.err
 | 
			
		||||
		}
 | 
			
		||||
		chunkType := r.buf[0]
 | 
			
		||||
		if !r.readHeader {
 | 
			
		||||
			if chunkType != chunkTypeStreamIdentifier {
 | 
			
		||||
				r.err = ErrCorrupt
 | 
			
		||||
				return 0, r.err
 | 
			
		||||
			}
 | 
			
		||||
			r.readHeader = true
 | 
			
		||||
		}
 | 
			
		||||
		chunkLen := int(r.buf[1]) | int(r.buf[2])<<8 | int(r.buf[3])<<16
 | 
			
		||||
 | 
			
		||||
		// The chunk types are specified at
 | 
			
		||||
		// https://github.com/google/snappy/blob/master/framing_format.txt
 | 
			
		||||
		switch chunkType {
 | 
			
		||||
		case chunkTypeCompressedData:
 | 
			
		||||
			r.blockStart += int64(r.j)
 | 
			
		||||
			// Section 4.2. Compressed data (chunk type 0x00).
 | 
			
		||||
			if chunkLen < checksumSize {
 | 
			
		||||
				r.err = ErrCorrupt
 | 
			
		||||
				return 0, r.err
 | 
			
		||||
			}
 | 
			
		||||
			if chunkLen > r.maxBufSize {
 | 
			
		||||
				r.err = ErrCorrupt
 | 
			
		||||
				return 0, r.err
 | 
			
		||||
			}
 | 
			
		||||
			orgBuf := <-toRead
 | 
			
		||||
			buf := orgBuf[:chunkLen]
 | 
			
		||||
 | 
			
		||||
			if !r.readFull(buf, false) {
 | 
			
		||||
				return 0, r.err
 | 
			
		||||
			}
 | 
			
		||||
 | 
			
		||||
			checksum := uint32(buf[0]) | uint32(buf[1])<<8 | uint32(buf[2])<<16 | uint32(buf[3])<<24
 | 
			
		||||
			buf = buf[checksumSize:]
 | 
			
		||||
 | 
			
		||||
			n, err := DecodedLen(buf)
 | 
			
		||||
			if err != nil {
 | 
			
		||||
				r.err = err
 | 
			
		||||
				return 0, r.err
 | 
			
		||||
			}
 | 
			
		||||
			if r.snappyFrame && n > maxSnappyBlockSize {
 | 
			
		||||
				r.err = ErrCorrupt
 | 
			
		||||
				return 0, r.err
 | 
			
		||||
			}
 | 
			
		||||
 | 
			
		||||
			if n > r.maxBlock {
 | 
			
		||||
				r.err = ErrCorrupt
 | 
			
		||||
				return 0, r.err
 | 
			
		||||
			}
 | 
			
		||||
			wg.Add(1)
 | 
			
		||||
 | 
			
		||||
			decoded := <-writtenBlocks
 | 
			
		||||
			entry := <-reUse
 | 
			
		||||
			queue <- entry
 | 
			
		||||
			go func() {
 | 
			
		||||
				defer wg.Done()
 | 
			
		||||
				decoded = decoded[:n]
 | 
			
		||||
				_, err := Decode(decoded, buf)
 | 
			
		||||
				toRead <- orgBuf
 | 
			
		||||
				if err != nil {
 | 
			
		||||
					writtenBlocks <- decoded
 | 
			
		||||
					setErr(err)
 | 
			
		||||
					return
 | 
			
		||||
				}
 | 
			
		||||
				if !r.ignoreCRC && crc(decoded) != checksum {
 | 
			
		||||
					writtenBlocks <- decoded
 | 
			
		||||
					setErr(ErrCRC)
 | 
			
		||||
					return
 | 
			
		||||
				}
 | 
			
		||||
				entry <- decoded
 | 
			
		||||
			}()
 | 
			
		||||
			continue
 | 
			
		||||
 | 
			
		||||
		case chunkTypeUncompressedData:
 | 
			
		||||
 | 
			
		||||
			// Section 4.3. Uncompressed data (chunk type 0x01).
 | 
			
		||||
			if chunkLen < checksumSize {
 | 
			
		||||
				r.err = ErrCorrupt
 | 
			
		||||
				return 0, r.err
 | 
			
		||||
			}
 | 
			
		||||
			if chunkLen > r.maxBufSize {
 | 
			
		||||
				r.err = ErrCorrupt
 | 
			
		||||
				return 0, r.err
 | 
			
		||||
			}
 | 
			
		||||
			// Grab write buffer
 | 
			
		||||
			orgBuf := <-writtenBlocks
 | 
			
		||||
			buf := orgBuf[:checksumSize]
 | 
			
		||||
			if !r.readFull(buf, false) {
 | 
			
		||||
				return 0, r.err
 | 
			
		||||
			}
 | 
			
		||||
			checksum := uint32(buf[0]) | uint32(buf[1])<<8 | uint32(buf[2])<<16 | uint32(buf[3])<<24
 | 
			
		||||
			// Read content.
 | 
			
		||||
			n := chunkLen - checksumSize
 | 
			
		||||
 | 
			
		||||
			if r.snappyFrame && n > maxSnappyBlockSize {
 | 
			
		||||
				r.err = ErrCorrupt
 | 
			
		||||
				return 0, r.err
 | 
			
		||||
			}
 | 
			
		||||
			if n > r.maxBlock {
 | 
			
		||||
				r.err = ErrCorrupt
 | 
			
		||||
				return 0, r.err
 | 
			
		||||
			}
 | 
			
		||||
			// Read uncompressed
 | 
			
		||||
			buf = orgBuf[:n]
 | 
			
		||||
			if !r.readFull(buf, false) {
 | 
			
		||||
				return 0, r.err
 | 
			
		||||
			}
 | 
			
		||||
 | 
			
		||||
			if !r.ignoreCRC && crc(buf) != checksum {
 | 
			
		||||
				r.err = ErrCRC
 | 
			
		||||
				return 0, r.err
 | 
			
		||||
			}
 | 
			
		||||
			entry := <-reUse
 | 
			
		||||
			queue <- entry
 | 
			
		||||
			entry <- buf
 | 
			
		||||
			continue
 | 
			
		||||
 | 
			
		||||
		case chunkTypeStreamIdentifier:
 | 
			
		||||
			// Section 4.1. Stream identifier (chunk type 0xff).
 | 
			
		||||
			if chunkLen != len(magicBody) {
 | 
			
		||||
				r.err = ErrCorrupt
 | 
			
		||||
				return 0, r.err
 | 
			
		||||
			}
 | 
			
		||||
			if !r.readFull(r.buf[:len(magicBody)], false) {
 | 
			
		||||
				return 0, r.err
 | 
			
		||||
			}
 | 
			
		||||
			if string(r.buf[:len(magicBody)]) != magicBody {
 | 
			
		||||
				if string(r.buf[:len(magicBody)]) != magicBodySnappy {
 | 
			
		||||
					r.err = ErrCorrupt
 | 
			
		||||
					return 0, r.err
 | 
			
		||||
				} else {
 | 
			
		||||
					r.snappyFrame = true
 | 
			
		||||
				}
 | 
			
		||||
			} else {
 | 
			
		||||
				r.snappyFrame = false
 | 
			
		||||
			}
 | 
			
		||||
			continue
 | 
			
		||||
		}
 | 
			
		||||
 | 
			
		||||
		if chunkType <= 0x7f {
 | 
			
		||||
			// Section 4.5. Reserved unskippable chunks (chunk types 0x02-0x7f).
 | 
			
		||||
			// fmt.Printf("ERR chunktype: 0x%x\n", chunkType)
 | 
			
		||||
			r.err = ErrUnsupported
 | 
			
		||||
			return 0, r.err
 | 
			
		||||
		}
 | 
			
		||||
		// Section 4.4 Padding (chunk type 0xfe).
 | 
			
		||||
		// Section 4.6. Reserved skippable chunks (chunk types 0x80-0xfd).
 | 
			
		||||
		if chunkLen > maxChunkSize {
 | 
			
		||||
			// fmt.Printf("ERR chunkLen: 0x%x\n", chunkLen)
 | 
			
		||||
			r.err = ErrUnsupported
 | 
			
		||||
			return 0, r.err
 | 
			
		||||
		}
 | 
			
		||||
 | 
			
		||||
		// fmt.Printf("skippable: ID: 0x%x, len: 0x%x\n", chunkType, chunkLen)
 | 
			
		||||
		if !r.skippable(r.buf, chunkLen, false, chunkType) {
 | 
			
		||||
			return 0, r.err
 | 
			
		||||
		}
 | 
			
		||||
	}
 | 
			
		||||
	return 0, r.err
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// Skip will skip n bytes forward in the decompressed output.
 | 
			
		||||
// For larger skips this consumes less CPU and is faster than reading output and discarding it.
 | 
			
		||||
// CRC is not checked on skipped blocks.
 | 
			
		||||
@@ -699,8 +964,16 @@ func (r *ReadSeeker) Seek(offset int64, whence int) (int64, error) {
 | 
			
		||||
	case io.SeekCurrent:
 | 
			
		||||
		offset += r.blockStart + int64(r.i)
 | 
			
		||||
	case io.SeekEnd:
 | 
			
		||||
		offset = -offset
 | 
			
		||||
		if offset > 0 {
 | 
			
		||||
			return 0, errors.New("seek after end of file")
 | 
			
		||||
		}
 | 
			
		||||
		offset = r.index.TotalUncompressed + offset
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	if offset < 0 {
 | 
			
		||||
		return 0, errors.New("seek before start of file")
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	c, u, err := r.index.Find(offset)
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		return r.blockStart + int64(r.i), err
 | 
			
		||||
@@ -712,10 +985,6 @@ func (r *ReadSeeker) Seek(offset int64, whence int) (int64, error) {
 | 
			
		||||
		return 0, err
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	if offset < 0 {
 | 
			
		||||
		offset = r.index.TotalUncompressed + offset
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	r.i = r.j // Remove rest of current block.
 | 
			
		||||
	if u < offset {
 | 
			
		||||
		// Forward inside block
 | 
			
		||||
 
 | 
			
		||||
							
								
								
									
										6
									
								
								vendor/github.com/klauspost/compress/s2/encode.go
									
									
									
										generated
									
									
										vendored
									
									
								
							
							
						
						
									
										6
									
								
								vendor/github.com/klauspost/compress/s2/encode.go
									
									
									
										generated
									
									
										vendored
									
									
								
							@@ -1119,12 +1119,6 @@ func (w *Writer) closeIndex(idx bool) ([]byte, error) {
 | 
			
		||||
			if w.appendIndex {
 | 
			
		||||
				w.written += int64(len(index))
 | 
			
		||||
			}
 | 
			
		||||
			if true {
 | 
			
		||||
				_, err := w.index.Load(index)
 | 
			
		||||
				if err != nil {
 | 
			
		||||
					panic(err)
 | 
			
		||||
				}
 | 
			
		||||
			}
 | 
			
		||||
		}
 | 
			
		||||
 | 
			
		||||
		if w.pad > 1 {
 | 
			
		||||
 
 | 
			
		||||
							
								
								
									
										28
									
								
								vendor/github.com/klauspost/compress/s2/encode_best.go
									
									
									
										generated
									
									
										vendored
									
									
								
							
							
						
						
									
										28
									
								
								vendor/github.com/klauspost/compress/s2/encode_best.go
									
									
									
										generated
									
									
										vendored
									
									
								
							@@ -370,7 +370,7 @@ func encodeBlockBestSnappy(dst, src []byte) (d int) {
 | 
			
		||||
				}
 | 
			
		||||
				offset := m.s - m.offset
 | 
			
		||||
 | 
			
		||||
				return score - emitCopySize(offset, m.length)
 | 
			
		||||
				return score - emitCopyNoRepeatSize(offset, m.length)
 | 
			
		||||
			}
 | 
			
		||||
 | 
			
		||||
			matchAt := func(offset, s int, first uint32) match {
 | 
			
		||||
@@ -567,6 +567,10 @@ func emitCopySize(offset, length int) int {
 | 
			
		||||
 | 
			
		||||
	// Offset no more than 2 bytes.
 | 
			
		||||
	if length > 64 {
 | 
			
		||||
		if offset < 2048 {
 | 
			
		||||
			// Emit 8 bytes, then rest as repeats...
 | 
			
		||||
			return 2 + emitRepeatSize(offset, length-8)
 | 
			
		||||
		}
 | 
			
		||||
		// Emit remaining as repeats, at least 4 bytes remain.
 | 
			
		||||
		return 3 + emitRepeatSize(offset, length-60)
 | 
			
		||||
	}
 | 
			
		||||
@@ -577,6 +581,28 @@ func emitCopySize(offset, length int) int {
 | 
			
		||||
	return 2
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// emitCopyNoRepeatSize returns the size to encode the offset+length
 | 
			
		||||
//
 | 
			
		||||
// It assumes that:
 | 
			
		||||
//	1 <= offset && offset <= math.MaxUint32
 | 
			
		||||
//	4 <= length && length <= 1 << 24
 | 
			
		||||
func emitCopyNoRepeatSize(offset, length int) int {
 | 
			
		||||
	if offset >= 65536 {
 | 
			
		||||
		return 5 + 5*(length/64)
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	// Offset no more than 2 bytes.
 | 
			
		||||
	if length > 64 {
 | 
			
		||||
		// Emit remaining as repeats, at least 4 bytes remain.
 | 
			
		||||
		return 3 + 3*(length/60)
 | 
			
		||||
	}
 | 
			
		||||
	if length >= 12 || offset >= 2048 {
 | 
			
		||||
		return 3
 | 
			
		||||
	}
 | 
			
		||||
	// Emit the remaining copy, encoded as 2 bytes.
 | 
			
		||||
	return 2
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// emitRepeatSize returns the number of bytes required to encode a repeat.
 | 
			
		||||
// Length must be at least 4 and < 1<<24
 | 
			
		||||
func emitRepeatSize(offset, length int) int {
 | 
			
		||||
 
 | 
			
		||||
							
								
								
									
										23
									
								
								vendor/github.com/klauspost/compress/s2/encode_go.go
									
									
									
										generated
									
									
										vendored
									
									
								
							
							
						
						
									
										23
									
								
								vendor/github.com/klauspost/compress/s2/encode_go.go
									
									
									
										generated
									
									
										vendored
									
									
								
							@@ -180,14 +180,23 @@ func emitCopy(dst []byte, offset, length int) int {
 | 
			
		||||
 | 
			
		||||
	// Offset no more than 2 bytes.
 | 
			
		||||
	if length > 64 {
 | 
			
		||||
		// Emit a length 60 copy, encoded as 3 bytes.
 | 
			
		||||
		// Emit remaining as repeat value (minimum 4 bytes).
 | 
			
		||||
		dst[2] = uint8(offset >> 8)
 | 
			
		||||
		dst[1] = uint8(offset)
 | 
			
		||||
		dst[0] = 59<<2 | tagCopy2
 | 
			
		||||
		length -= 60
 | 
			
		||||
		off := 3
 | 
			
		||||
		if offset < 2048 {
 | 
			
		||||
			// emit 8 bytes as tagCopy1, rest as repeats.
 | 
			
		||||
			dst[1] = uint8(offset)
 | 
			
		||||
			dst[0] = uint8(offset>>8)<<5 | uint8(8-4)<<2 | tagCopy1
 | 
			
		||||
			length -= 8
 | 
			
		||||
			off = 2
 | 
			
		||||
		} else {
 | 
			
		||||
			// Emit a length 60 copy, encoded as 3 bytes.
 | 
			
		||||
			// Emit remaining as repeat value (minimum 4 bytes).
 | 
			
		||||
			dst[2] = uint8(offset >> 8)
 | 
			
		||||
			dst[1] = uint8(offset)
 | 
			
		||||
			dst[0] = 59<<2 | tagCopy2
 | 
			
		||||
			length -= 60
 | 
			
		||||
		}
 | 
			
		||||
		// Emit remaining as repeats, at least 4 bytes remain.
 | 
			
		||||
		return 3 + emitRepeat(dst[3:], offset, length)
 | 
			
		||||
		return off + emitRepeat(dst[off:], offset, length)
 | 
			
		||||
	}
 | 
			
		||||
	if length >= 12 || offset >= 2048 {
 | 
			
		||||
		// Emit the remaining copy, encoded as 3 bytes.
 | 
			
		||||
 
 | 
			
		||||
							
								
								
									
										2
									
								
								vendor/github.com/klauspost/compress/s2/encodeblock_amd64.go
									
									
									
										generated
									
									
										vendored
									
									
								
							
							
						
						
									
										2
									
								
								vendor/github.com/klauspost/compress/s2/encodeblock_amd64.go
									
									
									
										generated
									
									
										vendored
									
									
								
							@@ -5,6 +5,8 @@
 | 
			
		||||
 | 
			
		||||
package s2
 | 
			
		||||
 | 
			
		||||
func _dummy_()
 | 
			
		||||
 | 
			
		||||
// encodeBlockAsm encodes a non-empty src to a guaranteed-large-enough dst.
 | 
			
		||||
// Maximum input 4294967295 bytes.
 | 
			
		||||
// It assumes that the varint-encoded length of the decompressed bytes has already been written.
 | 
			
		||||
 
 | 
			
		||||
							
								
								
									
										1930
									
								
								vendor/github.com/klauspost/compress/s2/encodeblock_amd64.s
									
									
									
										generated
									
									
										vendored
									
									
								
							
							
						
						
									
										1930
									
								
								vendor/github.com/klauspost/compress/s2/encodeblock_amd64.s
									
									
									
										generated
									
									
										vendored
									
									
								
							
										
											
												File diff suppressed because it is too large
												Load Diff
											
										
									
								
							
							
								
								
									
										10
									
								
								vendor/github.com/klauspost/compress/s2/index.go
									
									
									
										generated
									
									
										vendored
									
									
								
							
							
						
						
									
										10
									
								
								vendor/github.com/klauspost/compress/s2/index.go
									
									
									
										generated
									
									
										vendored
									
									
								
							@@ -10,6 +10,7 @@ import (
 | 
			
		||||
	"encoding/json"
 | 
			
		||||
	"fmt"
 | 
			
		||||
	"io"
 | 
			
		||||
	"sort"
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
const (
 | 
			
		||||
@@ -100,6 +101,15 @@ func (i *Index) Find(offset int64) (compressedOff, uncompressedOff int64, err er
 | 
			
		||||
	if offset > i.TotalUncompressed {
 | 
			
		||||
		return 0, 0, io.ErrUnexpectedEOF
 | 
			
		||||
	}
 | 
			
		||||
	if len(i.info) > 200 {
 | 
			
		||||
		n := sort.Search(len(i.info), func(n int) bool {
 | 
			
		||||
			return i.info[n].uncompressedOffset > offset
 | 
			
		||||
		})
 | 
			
		||||
		if n == 0 {
 | 
			
		||||
			n = 1
 | 
			
		||||
		}
 | 
			
		||||
		return i.info[n-1].compressedOffset, i.info[n-1].uncompressedOffset, nil
 | 
			
		||||
	}
 | 
			
		||||
	for _, info := range i.info {
 | 
			
		||||
		if info.uncompressedOffset > offset {
 | 
			
		||||
			break
 | 
			
		||||
 
 | 
			
		||||
							
								
								
									
										54
									
								
								vendor/github.com/klauspost/compress/zstd/README.md
									
									
									
										generated
									
									
										vendored
									
									
								
							
							
						
						
									
										54
									
								
								vendor/github.com/klauspost/compress/zstd/README.md
									
									
									
										generated
									
									
										vendored
									
									
								
							@@ -386,47 +386,31 @@ In practice this means that concurrency is often limited to utilizing about 3 co
 | 
			
		||||
  
 | 
			
		||||
### Benchmarks
 | 
			
		||||
 | 
			
		||||
These are some examples of performance compared to [datadog cgo library](https://github.com/DataDog/zstd).
 | 
			
		||||
 | 
			
		||||
The first two are streaming decodes and the last are smaller inputs. 
 | 
			
		||||
 
 | 
			
		||||
 | 
			
		||||
Running on AMD Ryzen 9 3950X 16-Core Processor. AMD64 assembly used.
 | 
			
		||||
 | 
			
		||||
```
 | 
			
		||||
BenchmarkDecoderSilesia-8                          3     385000067 ns/op     550.51 MB/s        5498 B/op          8 allocs/op
 | 
			
		||||
BenchmarkDecoderSilesiaCgo-8                       6     197666567 ns/op    1072.25 MB/s      270672 B/op          8 allocs/op
 | 
			
		||||
BenchmarkDecoderSilesia-32    	                   5	 206878840 ns/op	1024.50 MB/s	   49808 B/op	      43 allocs/op
 | 
			
		||||
BenchmarkDecoderEnwik9-32                          1	1271809000 ns/op	 786.28 MB/s	   72048 B/op	      52 allocs/op
 | 
			
		||||
 | 
			
		||||
BenchmarkDecoderEnwik9-8                           1    2027001600 ns/op     493.34 MB/s       10496 B/op         18 allocs/op
 | 
			
		||||
BenchmarkDecoderEnwik9Cgo-8                        2     979499200 ns/op    1020.93 MB/s      270672 B/op          8 allocs/op
 | 
			
		||||
Concurrent blocks, performance:
 | 
			
		||||
 | 
			
		||||
Concurrent performance:
 | 
			
		||||
 | 
			
		||||
BenchmarkDecoder_DecodeAllParallel/kppkn.gtb.zst-16                28915         42469 ns/op    4340.07 MB/s         114 B/op          0 allocs/op
 | 
			
		||||
BenchmarkDecoder_DecodeAllParallel/geo.protodata.zst-16           116505          9965 ns/op    11900.16 MB/s         16 B/op          0 allocs/op
 | 
			
		||||
BenchmarkDecoder_DecodeAllParallel/plrabn12.txt.zst-16              8952        134272 ns/op    3588.70 MB/s         915 B/op          0 allocs/op
 | 
			
		||||
BenchmarkDecoder_DecodeAllParallel/lcet10.txt.zst-16               11820        102538 ns/op    4161.90 MB/s         594 B/op          0 allocs/op
 | 
			
		||||
BenchmarkDecoder_DecodeAllParallel/asyoulik.txt.zst-16             34782         34184 ns/op    3661.88 MB/s          60 B/op          0 allocs/op
 | 
			
		||||
BenchmarkDecoder_DecodeAllParallel/alice29.txt.zst-16              27712         43447 ns/op    3500.58 MB/s          99 B/op          0 allocs/op
 | 
			
		||||
BenchmarkDecoder_DecodeAllParallel/html_x_4.zst-16                 62826         18750 ns/op    21845.10 MB/s        104 B/op          0 allocs/op
 | 
			
		||||
BenchmarkDecoder_DecodeAllParallel/paper-100k.pdf.zst-16          631545          1794 ns/op    57078.74 MB/s          2 B/op          0 allocs/op
 | 
			
		||||
BenchmarkDecoder_DecodeAllParallel/fireworks.jpeg.zst-16         1690140           712 ns/op    172938.13 MB/s         1 B/op          0 allocs/op
 | 
			
		||||
BenchmarkDecoder_DecodeAllParallel/urls.10K.zst-16                 10432        113593 ns/op    6180.73 MB/s        1143 B/op          0 allocs/op
 | 
			
		||||
BenchmarkDecoder_DecodeAllParallel/html.zst-16                    113206         10671 ns/op    9596.27 MB/s          15 B/op          0 allocs/op
 | 
			
		||||
BenchmarkDecoder_DecodeAllParallel/comp-data.bin.zst-16          1530615           779 ns/op    5229.49 MB/s           0 B/op          0 allocs/op
 | 
			
		||||
 | 
			
		||||
BenchmarkDecoder_DecodeAllParallelCgo/kppkn.gtb.zst-16             65217         16192 ns/op    11383.34 MB/s         46 B/op          0 allocs/op
 | 
			
		||||
BenchmarkDecoder_DecodeAllParallelCgo/geo.protodata.zst-16        292671          4039 ns/op    29363.19 MB/s          6 B/op          0 allocs/op
 | 
			
		||||
BenchmarkDecoder_DecodeAllParallelCgo/plrabn12.txt.zst-16          26314         46021 ns/op    10470.43 MB/s        293 B/op          0 allocs/op
 | 
			
		||||
BenchmarkDecoder_DecodeAllParallelCgo/lcet10.txt.zst-16            33897         34900 ns/op    12227.96 MB/s        205 B/op          0 allocs/op
 | 
			
		||||
BenchmarkDecoder_DecodeAllParallelCgo/asyoulik.txt.zst-16         104348         11433 ns/op    10949.01 MB/s         20 B/op          0 allocs/op
 | 
			
		||||
BenchmarkDecoder_DecodeAllParallelCgo/alice29.txt.zst-16           75949         15510 ns/op    9805.60 MB/s          32 B/op          0 allocs/op
 | 
			
		||||
BenchmarkDecoder_DecodeAllParallelCgo/html_x_4.zst-16             173910          6756 ns/op    60624.29 MB/s         37 B/op          0 allocs/op
 | 
			
		||||
BenchmarkDecoder_DecodeAllParallelCgo/paper-100k.pdf.zst-16       923076          1339 ns/op    76474.87 MB/s          1 B/op          0 allocs/op
 | 
			
		||||
BenchmarkDecoder_DecodeAllParallelCgo/fireworks.jpeg.zst-16       922920          1351 ns/op    91102.57 MB/s          2 B/op          0 allocs/op
 | 
			
		||||
BenchmarkDecoder_DecodeAllParallelCgo/urls.10K.zst-16              27649         43618 ns/op    16096.19 MB/s        407 B/op          0 allocs/op
 | 
			
		||||
BenchmarkDecoder_DecodeAllParallelCgo/html.zst-16                 279073          4160 ns/op    24614.18 MB/s          6 B/op          0 allocs/op
 | 
			
		||||
BenchmarkDecoder_DecodeAllParallelCgo/comp-data.bin.zst-16        749938          1579 ns/op    2581.71 MB/s           0 B/op          0 allocs/op
 | 
			
		||||
BenchmarkDecoder_DecodeAllParallel/kppkn.gtb.zst-32         	   67356	     17857 ns/op	10321.96 MB/s	        22.48 pct	     102 B/op	       0 allocs/op
 | 
			
		||||
BenchmarkDecoder_DecodeAllParallel/geo.protodata.zst-32     	  266656	      4421 ns/op	26823.21 MB/s	        11.89 pct	      19 B/op	       0 allocs/op
 | 
			
		||||
BenchmarkDecoder_DecodeAllParallel/plrabn12.txt.zst-32      	   20992	     56842 ns/op	8477.17 MB/s	        39.90 pct	     754 B/op	       0 allocs/op
 | 
			
		||||
BenchmarkDecoder_DecodeAllParallel/lcet10.txt.zst-32        	   27456	     43932 ns/op	9714.01 MB/s	        33.27 pct	     524 B/op	       0 allocs/op
 | 
			
		||||
BenchmarkDecoder_DecodeAllParallel/asyoulik.txt.zst-32      	   78432	     15047 ns/op	8319.15 MB/s	        40.34 pct	      66 B/op	       0 allocs/op
 | 
			
		||||
BenchmarkDecoder_DecodeAllParallel/alice29.txt.zst-32       	   65800	     18436 ns/op	8249.63 MB/s	        37.75 pct	      88 B/op	       0 allocs/op
 | 
			
		||||
BenchmarkDecoder_DecodeAllParallel/html_x_4.zst-32          	  102993	     11523 ns/op	35546.09 MB/s	         3.637 pct	     143 B/op	       0 allocs/op
 | 
			
		||||
BenchmarkDecoder_DecodeAllParallel/paper-100k.pdf.zst-32    	 1000000	      1070 ns/op	95720.98 MB/s	        80.53 pct	       3 B/op	       0 allocs/op
 | 
			
		||||
BenchmarkDecoder_DecodeAllParallel/fireworks.jpeg.zst-32    	  749802	      1752 ns/op	70272.35 MB/s	       100.0 pct	       5 B/op	       0 allocs/op
 | 
			
		||||
BenchmarkDecoder_DecodeAllParallel/urls.10K.zst-32          	   22640	     52934 ns/op	13263.37 MB/s	        26.25 pct	    1014 B/op	       0 allocs/op
 | 
			
		||||
BenchmarkDecoder_DecodeAllParallel/html.zst-32              	  226412	      5232 ns/op	19572.27 MB/s	        14.49 pct	      20 B/op	       0 allocs/op
 | 
			
		||||
BenchmarkDecoder_DecodeAllParallel/comp-data.bin.zst-32     	  923041	      1276 ns/op	3194.71 MB/s	        31.26 pct	       0 B/op	       0 allocs/op
 | 
			
		||||
```
 | 
			
		||||
 | 
			
		||||
This reflects the performance around May 2020, but this may be out of date.
 | 
			
		||||
This reflects the performance around May 2022, but this may be out of date.
 | 
			
		||||
 | 
			
		||||
## Zstd inside ZIP files
 | 
			
		||||
 | 
			
		||||
 
 | 
			
		||||
							
								
								
									
										7
									
								
								vendor/github.com/klauspost/compress/zstd/bitreader.go
									
									
									
										generated
									
									
										vendored
									
									
								
							
							
						
						
									
										7
									
								
								vendor/github.com/klauspost/compress/zstd/bitreader.go
									
									
									
										generated
									
									
										vendored
									
									
								
							@@ -63,13 +63,6 @@ func (b *bitReader) get32BitsFast(n uint8) uint32 {
 | 
			
		||||
	return v
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (b *bitReader) get16BitsFast(n uint8) uint16 {
 | 
			
		||||
	const regMask = 64 - 1
 | 
			
		||||
	v := uint16((b.value << (b.bitsRead & regMask)) >> ((regMask + 1 - n) & regMask))
 | 
			
		||||
	b.bitsRead += n
 | 
			
		||||
	return v
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// fillFast() will make sure at least 32 bits are available.
 | 
			
		||||
// There must be at least 4 bytes available.
 | 
			
		||||
func (b *bitReader) fillFast() {
 | 
			
		||||
 
 | 
			
		||||
							
								
								
									
										76
									
								
								vendor/github.com/klauspost/compress/zstd/bitwriter.go
									
									
									
										generated
									
									
										vendored
									
									
								
							
							
						
						
									
										76
									
								
								vendor/github.com/klauspost/compress/zstd/bitwriter.go
									
									
									
										generated
									
									
										vendored
									
									
								
							@@ -5,8 +5,6 @@
 | 
			
		||||
 | 
			
		||||
package zstd
 | 
			
		||||
 | 
			
		||||
import "fmt"
 | 
			
		||||
 | 
			
		||||
// bitWriter will write bits.
 | 
			
		||||
// First bit will be LSB of the first byte of output.
 | 
			
		||||
type bitWriter struct {
 | 
			
		||||
@@ -73,80 +71,6 @@ func (b *bitWriter) addBits16Clean(value uint16, bits uint8) {
 | 
			
		||||
	b.nBits += bits
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// flush will flush all pending full bytes.
 | 
			
		||||
// There will be at least 56 bits available for writing when this has been called.
 | 
			
		||||
// Using flush32 is faster, but leaves less space for writing.
 | 
			
		||||
func (b *bitWriter) flush() {
 | 
			
		||||
	v := b.nBits >> 3
 | 
			
		||||
	switch v {
 | 
			
		||||
	case 0:
 | 
			
		||||
	case 1:
 | 
			
		||||
		b.out = append(b.out,
 | 
			
		||||
			byte(b.bitContainer),
 | 
			
		||||
		)
 | 
			
		||||
	case 2:
 | 
			
		||||
		b.out = append(b.out,
 | 
			
		||||
			byte(b.bitContainer),
 | 
			
		||||
			byte(b.bitContainer>>8),
 | 
			
		||||
		)
 | 
			
		||||
	case 3:
 | 
			
		||||
		b.out = append(b.out,
 | 
			
		||||
			byte(b.bitContainer),
 | 
			
		||||
			byte(b.bitContainer>>8),
 | 
			
		||||
			byte(b.bitContainer>>16),
 | 
			
		||||
		)
 | 
			
		||||
	case 4:
 | 
			
		||||
		b.out = append(b.out,
 | 
			
		||||
			byte(b.bitContainer),
 | 
			
		||||
			byte(b.bitContainer>>8),
 | 
			
		||||
			byte(b.bitContainer>>16),
 | 
			
		||||
			byte(b.bitContainer>>24),
 | 
			
		||||
		)
 | 
			
		||||
	case 5:
 | 
			
		||||
		b.out = append(b.out,
 | 
			
		||||
			byte(b.bitContainer),
 | 
			
		||||
			byte(b.bitContainer>>8),
 | 
			
		||||
			byte(b.bitContainer>>16),
 | 
			
		||||
			byte(b.bitContainer>>24),
 | 
			
		||||
			byte(b.bitContainer>>32),
 | 
			
		||||
		)
 | 
			
		||||
	case 6:
 | 
			
		||||
		b.out = append(b.out,
 | 
			
		||||
			byte(b.bitContainer),
 | 
			
		||||
			byte(b.bitContainer>>8),
 | 
			
		||||
			byte(b.bitContainer>>16),
 | 
			
		||||
			byte(b.bitContainer>>24),
 | 
			
		||||
			byte(b.bitContainer>>32),
 | 
			
		||||
			byte(b.bitContainer>>40),
 | 
			
		||||
		)
 | 
			
		||||
	case 7:
 | 
			
		||||
		b.out = append(b.out,
 | 
			
		||||
			byte(b.bitContainer),
 | 
			
		||||
			byte(b.bitContainer>>8),
 | 
			
		||||
			byte(b.bitContainer>>16),
 | 
			
		||||
			byte(b.bitContainer>>24),
 | 
			
		||||
			byte(b.bitContainer>>32),
 | 
			
		||||
			byte(b.bitContainer>>40),
 | 
			
		||||
			byte(b.bitContainer>>48),
 | 
			
		||||
		)
 | 
			
		||||
	case 8:
 | 
			
		||||
		b.out = append(b.out,
 | 
			
		||||
			byte(b.bitContainer),
 | 
			
		||||
			byte(b.bitContainer>>8),
 | 
			
		||||
			byte(b.bitContainer>>16),
 | 
			
		||||
			byte(b.bitContainer>>24),
 | 
			
		||||
			byte(b.bitContainer>>32),
 | 
			
		||||
			byte(b.bitContainer>>40),
 | 
			
		||||
			byte(b.bitContainer>>48),
 | 
			
		||||
			byte(b.bitContainer>>56),
 | 
			
		||||
		)
 | 
			
		||||
	default:
 | 
			
		||||
		panic(fmt.Errorf("bits (%d) > 64", b.nBits))
 | 
			
		||||
	}
 | 
			
		||||
	b.bitContainer >>= v << 3
 | 
			
		||||
	b.nBits &= 7
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// flush32 will flush out, so there are at least 32 bits available for writing.
 | 
			
		||||
func (b *bitWriter) flush32() {
 | 
			
		||||
	if b.nBits < 32 {
 | 
			
		||||
 
 | 
			
		||||
							
								
								
									
										73
									
								
								vendor/github.com/klauspost/compress/zstd/blockdec.go
									
									
									
										generated
									
									
										vendored
									
									
								
							
							
						
						
									
										73
									
								
								vendor/github.com/klauspost/compress/zstd/blockdec.go
									
									
									
										generated
									
									
										vendored
									
									
								
							@@ -5,9 +5,14 @@
 | 
			
		||||
package zstd
 | 
			
		||||
 | 
			
		||||
import (
 | 
			
		||||
	"bytes"
 | 
			
		||||
	"encoding/binary"
 | 
			
		||||
	"errors"
 | 
			
		||||
	"fmt"
 | 
			
		||||
	"io"
 | 
			
		||||
	"io/ioutil"
 | 
			
		||||
	"os"
 | 
			
		||||
	"path/filepath"
 | 
			
		||||
	"sync"
 | 
			
		||||
 | 
			
		||||
	"github.com/klauspost/compress/huff0"
 | 
			
		||||
@@ -38,14 +43,14 @@ const (
 | 
			
		||||
	// maxCompressedBlockSize is the biggest allowed compressed block size (128KB)
 | 
			
		||||
	maxCompressedBlockSize = 128 << 10
 | 
			
		||||
 | 
			
		||||
	compressedBlockOverAlloc    = 16
 | 
			
		||||
	maxCompressedBlockSizeAlloc = 128<<10 + compressedBlockOverAlloc
 | 
			
		||||
 | 
			
		||||
	// Maximum possible block size (all Raw+Uncompressed).
 | 
			
		||||
	maxBlockSize = (1 << 21) - 1
 | 
			
		||||
 | 
			
		||||
	// https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#literals_section_header
 | 
			
		||||
	maxCompressedLiteralSize = 1 << 18
 | 
			
		||||
	maxRLELiteralSize        = 1 << 20
 | 
			
		||||
	maxMatchLen              = 131074
 | 
			
		||||
	maxSequences             = 0x7f00 + 0xffff
 | 
			
		||||
	maxMatchLen  = 131074
 | 
			
		||||
	maxSequences = 0x7f00 + 0xffff
 | 
			
		||||
 | 
			
		||||
	// We support slightly less than the reference decoder to be able to
 | 
			
		||||
	// use ints on 32 bit archs.
 | 
			
		||||
@@ -97,7 +102,6 @@ type blockDec struct {
 | 
			
		||||
 | 
			
		||||
	// Block is RLE, this is the size.
 | 
			
		||||
	RLESize uint32
 | 
			
		||||
	tmp     [4]byte
 | 
			
		||||
 | 
			
		||||
	Type blockType
 | 
			
		||||
 | 
			
		||||
@@ -136,7 +140,7 @@ func (b *blockDec) reset(br byteBuffer, windowSize uint64) error {
 | 
			
		||||
	b.Type = blockType((bh >> 1) & 3)
 | 
			
		||||
	// find size.
 | 
			
		||||
	cSize := int(bh >> 3)
 | 
			
		||||
	maxSize := maxBlockSize
 | 
			
		||||
	maxSize := maxCompressedBlockSizeAlloc
 | 
			
		||||
	switch b.Type {
 | 
			
		||||
	case blockTypeReserved:
 | 
			
		||||
		return ErrReservedBlockType
 | 
			
		||||
@@ -157,9 +161,9 @@ func (b *blockDec) reset(br byteBuffer, windowSize uint64) error {
 | 
			
		||||
			println("Data size on stream:", cSize)
 | 
			
		||||
		}
 | 
			
		||||
		b.RLESize = 0
 | 
			
		||||
		maxSize = maxCompressedBlockSize
 | 
			
		||||
		maxSize = maxCompressedBlockSizeAlloc
 | 
			
		||||
		if windowSize < maxCompressedBlockSize && b.lowMem {
 | 
			
		||||
			maxSize = int(windowSize)
 | 
			
		||||
			maxSize = int(windowSize) + compressedBlockOverAlloc
 | 
			
		||||
		}
 | 
			
		||||
		if cSize > maxCompressedBlockSize || uint64(cSize) > b.WindowSize {
 | 
			
		||||
			if debugDecoder {
 | 
			
		||||
@@ -190,9 +194,9 @@ func (b *blockDec) reset(br byteBuffer, windowSize uint64) error {
 | 
			
		||||
	// Read block data.
 | 
			
		||||
	if cap(b.dataStorage) < cSize {
 | 
			
		||||
		if b.lowMem || cSize > maxCompressedBlockSize {
 | 
			
		||||
			b.dataStorage = make([]byte, 0, cSize)
 | 
			
		||||
			b.dataStorage = make([]byte, 0, cSize+compressedBlockOverAlloc)
 | 
			
		||||
		} else {
 | 
			
		||||
			b.dataStorage = make([]byte, 0, maxCompressedBlockSize)
 | 
			
		||||
			b.dataStorage = make([]byte, 0, maxCompressedBlockSizeAlloc)
 | 
			
		||||
		}
 | 
			
		||||
	}
 | 
			
		||||
	if cap(b.dst) <= maxSize {
 | 
			
		||||
@@ -360,14 +364,9 @@ func (b *blockDec) decodeLiterals(in []byte, hist *history) (remain []byte, err
 | 
			
		||||
		}
 | 
			
		||||
		if cap(b.literalBuf) < litRegenSize {
 | 
			
		||||
			if b.lowMem {
 | 
			
		||||
				b.literalBuf = make([]byte, litRegenSize)
 | 
			
		||||
				b.literalBuf = make([]byte, litRegenSize, litRegenSize+compressedBlockOverAlloc)
 | 
			
		||||
			} else {
 | 
			
		||||
				if litRegenSize > maxCompressedLiteralSize {
 | 
			
		||||
					// Exceptional
 | 
			
		||||
					b.literalBuf = make([]byte, litRegenSize)
 | 
			
		||||
				} else {
 | 
			
		||||
					b.literalBuf = make([]byte, litRegenSize, maxCompressedLiteralSize)
 | 
			
		||||
				}
 | 
			
		||||
				b.literalBuf = make([]byte, litRegenSize, maxCompressedBlockSize+compressedBlockOverAlloc)
 | 
			
		||||
			}
 | 
			
		||||
		}
 | 
			
		||||
		literals = b.literalBuf[:litRegenSize]
 | 
			
		||||
@@ -397,14 +396,14 @@ func (b *blockDec) decodeLiterals(in []byte, hist *history) (remain []byte, err
 | 
			
		||||
		// Ensure we have space to store it.
 | 
			
		||||
		if cap(b.literalBuf) < litRegenSize {
 | 
			
		||||
			if b.lowMem {
 | 
			
		||||
				b.literalBuf = make([]byte, 0, litRegenSize)
 | 
			
		||||
				b.literalBuf = make([]byte, 0, litRegenSize+compressedBlockOverAlloc)
 | 
			
		||||
			} else {
 | 
			
		||||
				b.literalBuf = make([]byte, 0, maxCompressedLiteralSize)
 | 
			
		||||
				b.literalBuf = make([]byte, 0, maxCompressedBlockSize+compressedBlockOverAlloc)
 | 
			
		||||
			}
 | 
			
		||||
		}
 | 
			
		||||
		var err error
 | 
			
		||||
		// Use our out buffer.
 | 
			
		||||
		huff.MaxDecodedSize = maxCompressedBlockSize
 | 
			
		||||
		huff.MaxDecodedSize = litRegenSize
 | 
			
		||||
		if fourStreams {
 | 
			
		||||
			literals, err = huff.Decoder().Decompress4X(b.literalBuf[:0:litRegenSize], literals)
 | 
			
		||||
		} else {
 | 
			
		||||
@@ -429,9 +428,9 @@ func (b *blockDec) decodeLiterals(in []byte, hist *history) (remain []byte, err
 | 
			
		||||
		// Ensure we have space to store it.
 | 
			
		||||
		if cap(b.literalBuf) < litRegenSize {
 | 
			
		||||
			if b.lowMem {
 | 
			
		||||
				b.literalBuf = make([]byte, 0, litRegenSize)
 | 
			
		||||
				b.literalBuf = make([]byte, 0, litRegenSize+compressedBlockOverAlloc)
 | 
			
		||||
			} else {
 | 
			
		||||
				b.literalBuf = make([]byte, 0, maxCompressedBlockSize)
 | 
			
		||||
				b.literalBuf = make([]byte, 0, maxCompressedBlockSize+compressedBlockOverAlloc)
 | 
			
		||||
			}
 | 
			
		||||
		}
 | 
			
		||||
		huff := hist.huffTree
 | 
			
		||||
@@ -448,7 +447,7 @@ func (b *blockDec) decodeLiterals(in []byte, hist *history) (remain []byte, err
 | 
			
		||||
			return in, err
 | 
			
		||||
		}
 | 
			
		||||
		hist.huffTree = huff
 | 
			
		||||
		huff.MaxDecodedSize = maxCompressedBlockSize
 | 
			
		||||
		huff.MaxDecodedSize = litRegenSize
 | 
			
		||||
		// Use our out buffer.
 | 
			
		||||
		if fourStreams {
 | 
			
		||||
			literals, err = huff.Decoder().Decompress4X(b.literalBuf[:0:litRegenSize], literals)
 | 
			
		||||
@@ -463,6 +462,8 @@ func (b *blockDec) decodeLiterals(in []byte, hist *history) (remain []byte, err
 | 
			
		||||
		if len(literals) != litRegenSize {
 | 
			
		||||
			return in, fmt.Errorf("literal output size mismatch want %d, got %d", litRegenSize, len(literals))
 | 
			
		||||
		}
 | 
			
		||||
		// Re-cap to get extra size.
 | 
			
		||||
		literals = b.literalBuf[:len(literals)]
 | 
			
		||||
		if debugDecoder {
 | 
			
		||||
			printf("Decompressed %d literals into %d bytes\n", litCompSize, litRegenSize)
 | 
			
		||||
		}
 | 
			
		||||
@@ -486,10 +487,15 @@ func (b *blockDec) decodeCompressed(hist *history) error {
 | 
			
		||||
		b.dst = append(b.dst, hist.decoders.literals...)
 | 
			
		||||
		return nil
 | 
			
		||||
	}
 | 
			
		||||
	err = hist.decoders.decodeSync(hist)
 | 
			
		||||
	before := len(hist.decoders.out)
 | 
			
		||||
	err = hist.decoders.decodeSync(hist.b[hist.ignoreBuffer:])
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		return err
 | 
			
		||||
	}
 | 
			
		||||
	if hist.decoders.maxSyncLen > 0 {
 | 
			
		||||
		hist.decoders.maxSyncLen += uint64(before)
 | 
			
		||||
		hist.decoders.maxSyncLen -= uint64(len(hist.decoders.out))
 | 
			
		||||
	}
 | 
			
		||||
	b.dst = hist.decoders.out
 | 
			
		||||
	hist.recentOffsets = hist.decoders.prevOffset
 | 
			
		||||
	return nil
 | 
			
		||||
@@ -632,6 +638,22 @@ func (b *blockDec) prepareSequences(in []byte, hist *history) (err error) {
 | 
			
		||||
		println("initializing sequences:", err)
 | 
			
		||||
		return err
 | 
			
		||||
	}
 | 
			
		||||
	// Extract blocks...
 | 
			
		||||
	if false && hist.dict == nil {
 | 
			
		||||
		fatalErr := func(err error) {
 | 
			
		||||
			if err != nil {
 | 
			
		||||
				panic(err)
 | 
			
		||||
			}
 | 
			
		||||
		}
 | 
			
		||||
		fn := fmt.Sprintf("n-%d-lits-%d-prev-%d-%d-%d-win-%d.blk", hist.decoders.nSeqs, len(hist.decoders.literals), hist.recentOffsets[0], hist.recentOffsets[1], hist.recentOffsets[2], hist.windowSize)
 | 
			
		||||
		var buf bytes.Buffer
 | 
			
		||||
		fatalErr(binary.Write(&buf, binary.LittleEndian, hist.decoders.litLengths.fse))
 | 
			
		||||
		fatalErr(binary.Write(&buf, binary.LittleEndian, hist.decoders.matchLengths.fse))
 | 
			
		||||
		fatalErr(binary.Write(&buf, binary.LittleEndian, hist.decoders.offsets.fse))
 | 
			
		||||
		buf.Write(in)
 | 
			
		||||
		ioutil.WriteFile(filepath.Join("testdata", "seqs", fn), buf.Bytes(), os.ModePerm)
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	return nil
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
@@ -650,6 +672,7 @@ func (b *blockDec) decodeSequences(hist *history) error {
 | 
			
		||||
	}
 | 
			
		||||
	hist.decoders.windowSize = hist.windowSize
 | 
			
		||||
	hist.decoders.prevOffset = hist.recentOffsets
 | 
			
		||||
 | 
			
		||||
	err := hist.decoders.decode(b.sequence)
 | 
			
		||||
	hist.recentOffsets = hist.decoders.prevOffset
 | 
			
		||||
	return err
 | 
			
		||||
 
 | 
			
		||||
							
								
								
									
										4
									
								
								vendor/github.com/klauspost/compress/zstd/bytebuf.go
									
									
									
										generated
									
									
										vendored
									
									
								
							
							
						
						
									
										4
									
								
								vendor/github.com/klauspost/compress/zstd/bytebuf.go
									
									
									
										generated
									
									
										vendored
									
									
								
							@@ -52,10 +52,6 @@ func (b *byteBuf) readBig(n int, dst []byte) ([]byte, error) {
 | 
			
		||||
	return r, nil
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (b *byteBuf) remain() []byte {
 | 
			
		||||
	return *b
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (b *byteBuf) readByte() (byte, error) {
 | 
			
		||||
	bb := *b
 | 
			
		||||
	if len(bb) < 1 {
 | 
			
		||||
 
 | 
			
		||||
							
								
								
									
										6
									
								
								vendor/github.com/klauspost/compress/zstd/bytereader.go
									
									
									
										generated
									
									
										vendored
									
									
								
							
							
						
						
									
										6
									
								
								vendor/github.com/klauspost/compress/zstd/bytereader.go
									
									
									
										generated
									
									
										vendored
									
									
								
							@@ -13,12 +13,6 @@ type byteReader struct {
 | 
			
		||||
	off int
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// init will initialize the reader and set the input.
 | 
			
		||||
func (b *byteReader) init(in []byte) {
 | 
			
		||||
	b.b = in
 | 
			
		||||
	b.off = 0
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// advance the stream b n bytes.
 | 
			
		||||
func (b *byteReader) advance(n uint) {
 | 
			
		||||
	b.off += int(n)
 | 
			
		||||
 
 | 
			
		||||
							
								
								
									
										121
									
								
								vendor/github.com/klauspost/compress/zstd/decoder.go
									
									
									
										generated
									
									
										vendored
									
									
								
							
							
						
						
									
										121
									
								
								vendor/github.com/klauspost/compress/zstd/decoder.go
									
									
									
										generated
									
									
										vendored
									
									
								
							@@ -347,18 +347,20 @@ func (d *Decoder) DecodeAll(input, dst []byte) ([]byte, error) {
 | 
			
		||||
			}
 | 
			
		||||
			frame.history.setDict(&dict)
 | 
			
		||||
		}
 | 
			
		||||
 | 
			
		||||
		if frame.FrameContentSize != fcsUnknown && frame.FrameContentSize > d.o.maxDecodedSize-uint64(len(dst)) {
 | 
			
		||||
			return dst, ErrDecoderSizeExceeded
 | 
			
		||||
		if frame.WindowSize > d.o.maxWindowSize {
 | 
			
		||||
			return dst, ErrWindowSizeExceeded
 | 
			
		||||
		}
 | 
			
		||||
		if frame.FrameContentSize < 1<<30 {
 | 
			
		||||
			// Never preallocate more than 1 GB up front.
 | 
			
		||||
		if frame.FrameContentSize != fcsUnknown {
 | 
			
		||||
			if frame.FrameContentSize > d.o.maxDecodedSize-uint64(len(dst)) {
 | 
			
		||||
				return dst, ErrDecoderSizeExceeded
 | 
			
		||||
			}
 | 
			
		||||
			if cap(dst)-len(dst) < int(frame.FrameContentSize) {
 | 
			
		||||
				dst2 := make([]byte, len(dst), len(dst)+int(frame.FrameContentSize))
 | 
			
		||||
				dst2 := make([]byte, len(dst), len(dst)+int(frame.FrameContentSize)+compressedBlockOverAlloc)
 | 
			
		||||
				copy(dst2, dst)
 | 
			
		||||
				dst = dst2
 | 
			
		||||
			}
 | 
			
		||||
		}
 | 
			
		||||
 | 
			
		||||
		if cap(dst) == 0 {
 | 
			
		||||
			// Allocate len(input) * 2 by default if nothing is provided
 | 
			
		||||
			// and we didn't get frame content size.
 | 
			
		||||
@@ -437,7 +439,7 @@ func (d *Decoder) nextBlock(blocking bool) (ok bool) {
 | 
			
		||||
		println("got", len(d.current.b), "bytes, error:", d.current.err, "data crc:", tmp)
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	if len(next.b) > 0 {
 | 
			
		||||
	if !d.o.ignoreChecksum && len(next.b) > 0 {
 | 
			
		||||
		n, err := d.current.crc.Write(next.b)
 | 
			
		||||
		if err == nil {
 | 
			
		||||
			if n != len(next.b) {
 | 
			
		||||
@@ -449,7 +451,7 @@ func (d *Decoder) nextBlock(blocking bool) (ok bool) {
 | 
			
		||||
		got := d.current.crc.Sum64()
 | 
			
		||||
		var tmp [4]byte
 | 
			
		||||
		binary.LittleEndian.PutUint32(tmp[:], uint32(got))
 | 
			
		||||
		if !bytes.Equal(tmp[:], next.d.checkCRC) && !ignoreCRC {
 | 
			
		||||
		if !d.o.ignoreChecksum && !bytes.Equal(tmp[:], next.d.checkCRC) {
 | 
			
		||||
			if debugDecoder {
 | 
			
		||||
				println("CRC Check Failed:", tmp[:], " (got) !=", next.d.checkCRC, "(on stream)")
 | 
			
		||||
			}
 | 
			
		||||
@@ -533,9 +535,15 @@ func (d *Decoder) nextBlockSync() (ok bool) {
 | 
			
		||||
 | 
			
		||||
		// Update/Check CRC
 | 
			
		||||
		if d.frame.HasCheckSum {
 | 
			
		||||
			d.frame.crc.Write(d.current.b)
 | 
			
		||||
			if !d.o.ignoreChecksum {
 | 
			
		||||
				d.frame.crc.Write(d.current.b)
 | 
			
		||||
			}
 | 
			
		||||
			if d.current.d.Last {
 | 
			
		||||
				d.current.err = d.frame.checkCRC()
 | 
			
		||||
				if !d.o.ignoreChecksum {
 | 
			
		||||
					d.current.err = d.frame.checkCRC()
 | 
			
		||||
				} else {
 | 
			
		||||
					d.current.err = d.frame.consumeCRC()
 | 
			
		||||
				}
 | 
			
		||||
				if d.current.err != nil {
 | 
			
		||||
					println("CRC error:", d.current.err)
 | 
			
		||||
					return false
 | 
			
		||||
@@ -629,60 +637,18 @@ func (d *Decoder) startSyncDecoder(r io.Reader) error {
 | 
			
		||||
 | 
			
		||||
// Create Decoder:
 | 
			
		||||
// ASYNC:
 | 
			
		||||
// Spawn 4 go routines.
 | 
			
		||||
// 0: Read frames and decode blocks.
 | 
			
		||||
// 1: Decode block and literals. Receives hufftree and seqdecs, returns seqdecs and huff tree.
 | 
			
		||||
// 2: Wait for recentOffsets if needed. Decode sequences, send recentOffsets.
 | 
			
		||||
// 3: Wait for stream history, execute sequences, send stream history.
 | 
			
		||||
// Spawn 3 go routines.
 | 
			
		||||
// 0: Read frames and decode block literals.
 | 
			
		||||
// 1: Decode sequences.
 | 
			
		||||
// 2: Execute sequences, send to output.
 | 
			
		||||
func (d *Decoder) startStreamDecoder(ctx context.Context, r io.Reader, output chan decodeOutput) {
 | 
			
		||||
	defer d.streamWg.Done()
 | 
			
		||||
	br := readerWrapper{r: r}
 | 
			
		||||
 | 
			
		||||
	var seqPrepare = make(chan *blockDec, d.o.concurrent)
 | 
			
		||||
	var seqDecode = make(chan *blockDec, d.o.concurrent)
 | 
			
		||||
	var seqExecute = make(chan *blockDec, d.o.concurrent)
 | 
			
		||||
 | 
			
		||||
	// Async 1: Prepare blocks...
 | 
			
		||||
	go func() {
 | 
			
		||||
		var hist history
 | 
			
		||||
		var hasErr bool
 | 
			
		||||
		for block := range seqPrepare {
 | 
			
		||||
			if hasErr {
 | 
			
		||||
				if block != nil {
 | 
			
		||||
					seqDecode <- block
 | 
			
		||||
				}
 | 
			
		||||
				continue
 | 
			
		||||
			}
 | 
			
		||||
			if block.async.newHist != nil {
 | 
			
		||||
				if debugDecoder {
 | 
			
		||||
					println("Async 1: new history")
 | 
			
		||||
				}
 | 
			
		||||
				hist.reset()
 | 
			
		||||
				if block.async.newHist.dict != nil {
 | 
			
		||||
					hist.setDict(block.async.newHist.dict)
 | 
			
		||||
				}
 | 
			
		||||
			}
 | 
			
		||||
			if block.err != nil || block.Type != blockTypeCompressed {
 | 
			
		||||
				hasErr = block.err != nil
 | 
			
		||||
				seqDecode <- block
 | 
			
		||||
				continue
 | 
			
		||||
			}
 | 
			
		||||
 | 
			
		||||
			remain, err := block.decodeLiterals(block.data, &hist)
 | 
			
		||||
			block.err = err
 | 
			
		||||
			hasErr = block.err != nil
 | 
			
		||||
			if err == nil {
 | 
			
		||||
				block.async.literals = hist.decoders.literals
 | 
			
		||||
				block.async.seqData = remain
 | 
			
		||||
			} else if debugDecoder {
 | 
			
		||||
				println("decodeLiterals error:", err)
 | 
			
		||||
			}
 | 
			
		||||
			seqDecode <- block
 | 
			
		||||
		}
 | 
			
		||||
		close(seqDecode)
 | 
			
		||||
	}()
 | 
			
		||||
 | 
			
		||||
	// Async 2: Decode sequences...
 | 
			
		||||
	// Async 1: Decode sequences...
 | 
			
		||||
	go func() {
 | 
			
		||||
		var hist history
 | 
			
		||||
		var hasErr bool
 | 
			
		||||
@@ -696,7 +662,7 @@ func (d *Decoder) startStreamDecoder(ctx context.Context, r io.Reader, output ch
 | 
			
		||||
			}
 | 
			
		||||
			if block.async.newHist != nil {
 | 
			
		||||
				if debugDecoder {
 | 
			
		||||
					println("Async 2: new history, recent:", block.async.newHist.recentOffsets)
 | 
			
		||||
					println("Async 1: new history, recent:", block.async.newHist.recentOffsets)
 | 
			
		||||
				}
 | 
			
		||||
				hist.decoders = block.async.newHist.decoders
 | 
			
		||||
				hist.recentOffsets = block.async.newHist.recentOffsets
 | 
			
		||||
@@ -750,7 +716,7 @@ func (d *Decoder) startStreamDecoder(ctx context.Context, r io.Reader, output ch
 | 
			
		||||
			}
 | 
			
		||||
			if block.async.newHist != nil {
 | 
			
		||||
				if debugDecoder {
 | 
			
		||||
					println("Async 3: new history")
 | 
			
		||||
					println("Async 2: new history")
 | 
			
		||||
				}
 | 
			
		||||
				hist.windowSize = block.async.newHist.windowSize
 | 
			
		||||
				hist.allocFrameBuffer = block.async.newHist.allocFrameBuffer
 | 
			
		||||
@@ -837,6 +803,33 @@ func (d *Decoder) startStreamDecoder(ctx context.Context, r io.Reader, output ch
 | 
			
		||||
 | 
			
		||||
decodeStream:
 | 
			
		||||
	for {
 | 
			
		||||
		var hist history
 | 
			
		||||
		var hasErr bool
 | 
			
		||||
 | 
			
		||||
		decodeBlock := func(block *blockDec) {
 | 
			
		||||
			if hasErr {
 | 
			
		||||
				if block != nil {
 | 
			
		||||
					seqDecode <- block
 | 
			
		||||
				}
 | 
			
		||||
				return
 | 
			
		||||
			}
 | 
			
		||||
			if block.err != nil || block.Type != blockTypeCompressed {
 | 
			
		||||
				hasErr = block.err != nil
 | 
			
		||||
				seqDecode <- block
 | 
			
		||||
				return
 | 
			
		||||
			}
 | 
			
		||||
 | 
			
		||||
			remain, err := block.decodeLiterals(block.data, &hist)
 | 
			
		||||
			block.err = err
 | 
			
		||||
			hasErr = block.err != nil
 | 
			
		||||
			if err == nil {
 | 
			
		||||
				block.async.literals = hist.decoders.literals
 | 
			
		||||
				block.async.seqData = remain
 | 
			
		||||
			} else if debugDecoder {
 | 
			
		||||
				println("decodeLiterals error:", err)
 | 
			
		||||
			}
 | 
			
		||||
			seqDecode <- block
 | 
			
		||||
		}
 | 
			
		||||
		frame := d.frame
 | 
			
		||||
		if debugDecoder {
 | 
			
		||||
			println("New frame...")
 | 
			
		||||
@@ -863,7 +856,7 @@ decodeStream:
 | 
			
		||||
			case <-ctx.Done():
 | 
			
		||||
			case dec := <-d.decoders:
 | 
			
		||||
				dec.sendErr(err)
 | 
			
		||||
				seqPrepare <- dec
 | 
			
		||||
				decodeBlock(dec)
 | 
			
		||||
			}
 | 
			
		||||
			break decodeStream
 | 
			
		||||
		}
 | 
			
		||||
@@ -883,6 +876,10 @@ decodeStream:
 | 
			
		||||
				if debugDecoder {
 | 
			
		||||
					println("Alloc History:", h.allocFrameBuffer)
 | 
			
		||||
				}
 | 
			
		||||
				hist.reset()
 | 
			
		||||
				if h.dict != nil {
 | 
			
		||||
					hist.setDict(h.dict)
 | 
			
		||||
				}
 | 
			
		||||
				dec.async.newHist = &h
 | 
			
		||||
				dec.async.fcs = frame.FrameContentSize
 | 
			
		||||
				historySent = true
 | 
			
		||||
@@ -909,7 +906,7 @@ decodeStream:
 | 
			
		||||
			}
 | 
			
		||||
			err = dec.err
 | 
			
		||||
			last := dec.Last
 | 
			
		||||
			seqPrepare <- dec
 | 
			
		||||
			decodeBlock(dec)
 | 
			
		||||
			if err != nil {
 | 
			
		||||
				break decodeStream
 | 
			
		||||
			}
 | 
			
		||||
@@ -918,7 +915,7 @@ decodeStream:
 | 
			
		||||
			}
 | 
			
		||||
		}
 | 
			
		||||
	}
 | 
			
		||||
	close(seqPrepare)
 | 
			
		||||
	close(seqDecode)
 | 
			
		||||
	wg.Wait()
 | 
			
		||||
	d.frame.history.b = frameHistCache
 | 
			
		||||
}
 | 
			
		||||
 
 | 
			
		||||
							
								
								
									
										13
									
								
								vendor/github.com/klauspost/compress/zstd/decoder_options.go
									
									
									
										generated
									
									
										vendored
									
									
								
							
							
						
						
									
										13
									
								
								vendor/github.com/klauspost/compress/zstd/decoder_options.go
									
									
									
										generated
									
									
										vendored
									
									
								
							@@ -19,6 +19,7 @@ type decoderOptions struct {
 | 
			
		||||
	maxDecodedSize uint64
 | 
			
		||||
	maxWindowSize  uint64
 | 
			
		||||
	dicts          []dict
 | 
			
		||||
	ignoreChecksum bool
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (o *decoderOptions) setDefault() {
 | 
			
		||||
@@ -31,7 +32,7 @@ func (o *decoderOptions) setDefault() {
 | 
			
		||||
	if o.concurrent > 4 {
 | 
			
		||||
		o.concurrent = 4
 | 
			
		||||
	}
 | 
			
		||||
	o.maxDecodedSize = 1 << 63
 | 
			
		||||
	o.maxDecodedSize = 64 << 30
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// WithDecoderLowmem will set whether to use a lower amount of memory,
 | 
			
		||||
@@ -66,7 +67,7 @@ func WithDecoderConcurrency(n int) DOption {
 | 
			
		||||
// WithDecoderMaxMemory allows to set a maximum decoded size for in-memory
 | 
			
		||||
// non-streaming operations or maximum window size for streaming operations.
 | 
			
		||||
// This can be used to control memory usage of potentially hostile content.
 | 
			
		||||
// Maximum and default is 1 << 63 bytes.
 | 
			
		||||
// Maximum is 1 << 63 bytes. Default is 64GiB.
 | 
			
		||||
func WithDecoderMaxMemory(n uint64) DOption {
 | 
			
		||||
	return func(o *decoderOptions) error {
 | 
			
		||||
		if n == 0 {
 | 
			
		||||
@@ -112,3 +113,11 @@ func WithDecoderMaxWindow(size uint64) DOption {
 | 
			
		||||
		return nil
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// IgnoreChecksum allows to forcibly ignore checksum checking.
 | 
			
		||||
func IgnoreChecksum(b bool) DOption {
 | 
			
		||||
	return func(o *decoderOptions) error {
 | 
			
		||||
		o.ignoreChecksum = b
 | 
			
		||||
		return nil
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
 
 | 
			
		||||
							
								
								
									
										8
									
								
								vendor/github.com/klauspost/compress/zstd/enc_better.go
									
									
									
										generated
									
									
										vendored
									
									
								
							
							
						
						
									
										8
									
								
								vendor/github.com/klauspost/compress/zstd/enc_better.go
									
									
									
										generated
									
									
										vendored
									
									
								
							@@ -156,8 +156,8 @@ encodeLoop:
 | 
			
		||||
				panic("offset0 was 0")
 | 
			
		||||
			}
 | 
			
		||||
 | 
			
		||||
			nextHashS := hashLen(cv, betterShortTableBits, betterShortLen)
 | 
			
		||||
			nextHashL := hashLen(cv, betterLongTableBits, betterLongLen)
 | 
			
		||||
			nextHashS := hashLen(cv, betterShortTableBits, betterShortLen)
 | 
			
		||||
			candidateL := e.longTable[nextHashL]
 | 
			
		||||
			candidateS := e.table[nextHashS]
 | 
			
		||||
 | 
			
		||||
@@ -518,8 +518,8 @@ encodeLoop:
 | 
			
		||||
			}
 | 
			
		||||
 | 
			
		||||
			// Store this, since we have it.
 | 
			
		||||
			nextHashS := hashLen(cv, betterShortTableBits, betterShortLen)
 | 
			
		||||
			nextHashL := hashLen(cv, betterLongTableBits, betterLongLen)
 | 
			
		||||
			nextHashS := hashLen(cv, betterShortTableBits, betterShortLen)
 | 
			
		||||
 | 
			
		||||
			// We have at least 4 byte match.
 | 
			
		||||
			// No need to check backwards. We come straight from a match
 | 
			
		||||
@@ -674,8 +674,8 @@ encodeLoop:
 | 
			
		||||
				panic("offset0 was 0")
 | 
			
		||||
			}
 | 
			
		||||
 | 
			
		||||
			nextHashS := hashLen(cv, betterShortTableBits, betterShortLen)
 | 
			
		||||
			nextHashL := hashLen(cv, betterLongTableBits, betterLongLen)
 | 
			
		||||
			nextHashS := hashLen(cv, betterShortTableBits, betterShortLen)
 | 
			
		||||
			candidateL := e.longTable[nextHashL]
 | 
			
		||||
			candidateS := e.table[nextHashS]
 | 
			
		||||
 | 
			
		||||
@@ -1047,8 +1047,8 @@ encodeLoop:
 | 
			
		||||
			}
 | 
			
		||||
 | 
			
		||||
			// Store this, since we have it.
 | 
			
		||||
			nextHashS := hashLen(cv, betterShortTableBits, betterShortLen)
 | 
			
		||||
			nextHashL := hashLen(cv, betterLongTableBits, betterLongLen)
 | 
			
		||||
			nextHashS := hashLen(cv, betterShortTableBits, betterShortLen)
 | 
			
		||||
 | 
			
		||||
			// We have at least 4 byte match.
 | 
			
		||||
			// No need to check backwards. We come straight from a match
 | 
			
		||||
 
 | 
			
		||||
							
								
								
									
										10
									
								
								vendor/github.com/klauspost/compress/zstd/enc_dfast.go
									
									
									
										generated
									
									
										vendored
									
									
								
							
							
						
						
									
										10
									
								
								vendor/github.com/klauspost/compress/zstd/enc_dfast.go
									
									
									
										generated
									
									
										vendored
									
									
								
							@@ -127,8 +127,8 @@ encodeLoop:
 | 
			
		||||
				panic("offset0 was 0")
 | 
			
		||||
			}
 | 
			
		||||
 | 
			
		||||
			nextHashS := hashLen(cv, dFastShortTableBits, dFastShortLen)
 | 
			
		||||
			nextHashL := hashLen(cv, dFastLongTableBits, dFastLongLen)
 | 
			
		||||
			nextHashS := hashLen(cv, dFastShortTableBits, dFastShortLen)
 | 
			
		||||
			candidateL := e.longTable[nextHashL]
 | 
			
		||||
			candidateS := e.table[nextHashS]
 | 
			
		||||
 | 
			
		||||
@@ -439,8 +439,8 @@ encodeLoop:
 | 
			
		||||
		var t int32
 | 
			
		||||
		for {
 | 
			
		||||
 | 
			
		||||
			nextHashS := hashLen(cv, dFastShortTableBits, dFastShortLen)
 | 
			
		||||
			nextHashL := hashLen(cv, dFastLongTableBits, dFastLongLen)
 | 
			
		||||
			nextHashS := hashLen(cv, dFastShortTableBits, dFastShortLen)
 | 
			
		||||
			candidateL := e.longTable[nextHashL]
 | 
			
		||||
			candidateS := e.table[nextHashS]
 | 
			
		||||
 | 
			
		||||
@@ -785,8 +785,8 @@ encodeLoop:
 | 
			
		||||
				panic("offset0 was 0")
 | 
			
		||||
			}
 | 
			
		||||
 | 
			
		||||
			nextHashS := hashLen(cv, dFastShortTableBits, dFastShortLen)
 | 
			
		||||
			nextHashL := hashLen(cv, dFastLongTableBits, dFastLongLen)
 | 
			
		||||
			nextHashS := hashLen(cv, dFastShortTableBits, dFastShortLen)
 | 
			
		||||
			candidateL := e.longTable[nextHashL]
 | 
			
		||||
			candidateS := e.table[nextHashS]
 | 
			
		||||
 | 
			
		||||
@@ -969,7 +969,7 @@ encodeLoop:
 | 
			
		||||
		te0 := tableEntry{offset: index0 + e.cur, val: uint32(cv0)}
 | 
			
		||||
		te1 := tableEntry{offset: index1 + e.cur, val: uint32(cv1)}
 | 
			
		||||
		longHash1 := hashLen(cv0, dFastLongTableBits, dFastLongLen)
 | 
			
		||||
		longHash2 := hashLen(cv0, dFastLongTableBits, dFastLongLen)
 | 
			
		||||
		longHash2 := hashLen(cv1, dFastLongTableBits, dFastLongLen)
 | 
			
		||||
		e.longTable[longHash1] = te0
 | 
			
		||||
		e.longTable[longHash2] = te1
 | 
			
		||||
		e.markLongShardDirty(longHash1)
 | 
			
		||||
@@ -1002,8 +1002,8 @@ encodeLoop:
 | 
			
		||||
			}
 | 
			
		||||
 | 
			
		||||
			// Store this, since we have it.
 | 
			
		||||
			nextHashS := hashLen(cv, dFastShortTableBits, dFastShortLen)
 | 
			
		||||
			nextHashL := hashLen(cv, dFastLongTableBits, dFastLongLen)
 | 
			
		||||
			nextHashS := hashLen(cv, dFastShortTableBits, dFastShortLen)
 | 
			
		||||
 | 
			
		||||
			// We have at least 4 byte match.
 | 
			
		||||
			// No need to check backwards. We come straight from a match
 | 
			
		||||
 
 | 
			
		||||
							
								
								
									
										2
									
								
								vendor/github.com/klauspost/compress/zstd/encoder.go
									
									
									
										generated
									
									
										vendored
									
									
								
							
							
						
						
									
										2
									
								
								vendor/github.com/klauspost/compress/zstd/encoder.go
									
									
									
										generated
									
									
										vendored
									
									
								
							@@ -551,7 +551,7 @@ func (e *Encoder) EncodeAll(src, dst []byte) []byte {
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	// If we can do everything in one block, prefer that.
 | 
			
		||||
	if len(src) <= maxCompressedBlockSize {
 | 
			
		||||
	if len(src) <= e.o.blockSize {
 | 
			
		||||
		enc.Reset(e.o.dict, true)
 | 
			
		||||
		// Slightly faster with no history and everything in one block.
 | 
			
		||||
		if e.o.crc {
 | 
			
		||||
 
 | 
			
		||||
							
								
								
									
										70
									
								
								vendor/github.com/klauspost/compress/zstd/framedec.go
									
									
									
										generated
									
									
										vendored
									
									
								
							
							
						
						
									
										70
									
								
								vendor/github.com/klauspost/compress/zstd/framedec.go
									
									
									
										generated
									
									
										vendored
									
									
								
							@@ -253,10 +253,11 @@ func (d *frameDec) reset(br byteBuffer) error {
 | 
			
		||||
		return ErrWindowSizeTooSmall
 | 
			
		||||
	}
 | 
			
		||||
	d.history.windowSize = int(d.WindowSize)
 | 
			
		||||
	if d.o.lowMem && d.history.windowSize < maxBlockSize {
 | 
			
		||||
	if !d.o.lowMem || d.history.windowSize < maxBlockSize {
 | 
			
		||||
		// Alloc 2x window size if not low-mem, or very small window size.
 | 
			
		||||
		d.history.allocFrameBuffer = d.history.windowSize * 2
 | 
			
		||||
		// TODO: Maybe use FrameContent size
 | 
			
		||||
	} else {
 | 
			
		||||
		// Alloc with one additional block
 | 
			
		||||
		d.history.allocFrameBuffer = d.history.windowSize + maxBlockSize
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
@@ -290,13 +291,6 @@ func (d *frameDec) checkCRC() error {
 | 
			
		||||
	if !d.HasCheckSum {
 | 
			
		||||
		return nil
 | 
			
		||||
	}
 | 
			
		||||
	var tmp [4]byte
 | 
			
		||||
	got := d.crc.Sum64()
 | 
			
		||||
	// Flip to match file order.
 | 
			
		||||
	tmp[0] = byte(got >> 0)
 | 
			
		||||
	tmp[1] = byte(got >> 8)
 | 
			
		||||
	tmp[2] = byte(got >> 16)
 | 
			
		||||
	tmp[3] = byte(got >> 24)
 | 
			
		||||
 | 
			
		||||
	// We can overwrite upper tmp now
 | 
			
		||||
	want, err := d.rawInput.readSmall(4)
 | 
			
		||||
@@ -305,7 +299,19 @@ func (d *frameDec) checkCRC() error {
 | 
			
		||||
		return err
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	if !bytes.Equal(tmp[:], want) && !ignoreCRC {
 | 
			
		||||
	if d.o.ignoreChecksum {
 | 
			
		||||
		return nil
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	var tmp [4]byte
 | 
			
		||||
	got := d.crc.Sum64()
 | 
			
		||||
	// Flip to match file order.
 | 
			
		||||
	tmp[0] = byte(got >> 0)
 | 
			
		||||
	tmp[1] = byte(got >> 8)
 | 
			
		||||
	tmp[2] = byte(got >> 16)
 | 
			
		||||
	tmp[3] = byte(got >> 24)
 | 
			
		||||
 | 
			
		||||
	if !bytes.Equal(tmp[:], want) {
 | 
			
		||||
		if debugDecoder {
 | 
			
		||||
			println("CRC Check Failed:", tmp[:], "!=", want)
 | 
			
		||||
		}
 | 
			
		||||
@@ -317,6 +323,19 @@ func (d *frameDec) checkCRC() error {
 | 
			
		||||
	return nil
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// consumeCRC reads the checksum data if the frame has one.
 | 
			
		||||
func (d *frameDec) consumeCRC() error {
 | 
			
		||||
	if d.HasCheckSum {
 | 
			
		||||
		_, err := d.rawInput.readSmall(4)
 | 
			
		||||
		if err != nil {
 | 
			
		||||
			println("CRC missing?", err)
 | 
			
		||||
			return err
 | 
			
		||||
		}
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	return nil
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// runDecoder will create a sync decoder that will decode a block of data.
 | 
			
		||||
func (d *frameDec) runDecoder(dst []byte, dec *blockDec) ([]byte, error) {
 | 
			
		||||
	saved := d.history.b
 | 
			
		||||
@@ -326,6 +345,19 @@ func (d *frameDec) runDecoder(dst []byte, dec *blockDec) ([]byte, error) {
 | 
			
		||||
	d.history.ignoreBuffer = len(dst)
 | 
			
		||||
	// Store input length, so we only check new data.
 | 
			
		||||
	crcStart := len(dst)
 | 
			
		||||
	d.history.decoders.maxSyncLen = 0
 | 
			
		||||
	if d.FrameContentSize != fcsUnknown {
 | 
			
		||||
		d.history.decoders.maxSyncLen = d.FrameContentSize + uint64(len(dst))
 | 
			
		||||
		if d.history.decoders.maxSyncLen > d.o.maxDecodedSize {
 | 
			
		||||
			return dst, ErrDecoderSizeExceeded
 | 
			
		||||
		}
 | 
			
		||||
		if uint64(cap(dst)) < d.history.decoders.maxSyncLen {
 | 
			
		||||
			// Alloc for output
 | 
			
		||||
			dst2 := make([]byte, len(dst), d.history.decoders.maxSyncLen+compressedBlockOverAlloc)
 | 
			
		||||
			copy(dst2, dst)
 | 
			
		||||
			dst = dst2
 | 
			
		||||
		}
 | 
			
		||||
	}
 | 
			
		||||
	var err error
 | 
			
		||||
	for {
 | 
			
		||||
		err = dec.reset(d.rawInput, d.WindowSize)
 | 
			
		||||
@@ -360,13 +392,17 @@ func (d *frameDec) runDecoder(dst []byte, dec *blockDec) ([]byte, error) {
 | 
			
		||||
		if d.FrameContentSize != fcsUnknown && uint64(len(d.history.b)-crcStart) != d.FrameContentSize {
 | 
			
		||||
			err = ErrFrameSizeMismatch
 | 
			
		||||
		} else if d.HasCheckSum {
 | 
			
		||||
			var n int
 | 
			
		||||
			n, err = d.crc.Write(dst[crcStart:])
 | 
			
		||||
			if err == nil {
 | 
			
		||||
				if n != len(dst)-crcStart {
 | 
			
		||||
					err = io.ErrShortWrite
 | 
			
		||||
				} else {
 | 
			
		||||
					err = d.checkCRC()
 | 
			
		||||
			if d.o.ignoreChecksum {
 | 
			
		||||
				err = d.consumeCRC()
 | 
			
		||||
			} else {
 | 
			
		||||
				var n int
 | 
			
		||||
				n, err = d.crc.Write(dst[crcStart:])
 | 
			
		||||
				if err == nil {
 | 
			
		||||
					if n != len(dst)-crcStart {
 | 
			
		||||
						err = io.ErrShortWrite
 | 
			
		||||
					} else {
 | 
			
		||||
						err = d.checkCRC()
 | 
			
		||||
					}
 | 
			
		||||
				}
 | 
			
		||||
			}
 | 
			
		||||
		}
 | 
			
		||||
 
 | 
			
		||||
							
								
								
									
										65
									
								
								vendor/github.com/klauspost/compress/zstd/fse_decoder.go
									
									
									
										generated
									
									
										vendored
									
									
								
							
							
						
						
									
										65
									
								
								vendor/github.com/klauspost/compress/zstd/fse_decoder.go
									
									
									
										generated
									
									
										vendored
									
									
								
							@@ -5,8 +5,10 @@
 | 
			
		||||
package zstd
 | 
			
		||||
 | 
			
		||||
import (
 | 
			
		||||
	"encoding/binary"
 | 
			
		||||
	"errors"
 | 
			
		||||
	"fmt"
 | 
			
		||||
	"io"
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
const (
 | 
			
		||||
@@ -182,6 +184,29 @@ func (s *fseDecoder) readNCount(b *byteReader, maxSymbol uint16) error {
 | 
			
		||||
	return s.buildDtable()
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (s *fseDecoder) mustReadFrom(r io.Reader) {
 | 
			
		||||
	fatalErr := func(err error) {
 | 
			
		||||
		if err != nil {
 | 
			
		||||
			panic(err)
 | 
			
		||||
		}
 | 
			
		||||
	}
 | 
			
		||||
	// 	dt             [maxTablesize]decSymbol // Decompression table.
 | 
			
		||||
	//	symbolLen      uint16                  // Length of active part of the symbol table.
 | 
			
		||||
	//	actualTableLog uint8                   // Selected tablelog.
 | 
			
		||||
	//	maxBits        uint8                   // Maximum number of additional bits
 | 
			
		||||
	//	// used for table creation to avoid allocations.
 | 
			
		||||
	//	stateTable [256]uint16
 | 
			
		||||
	//	norm       [maxSymbolValue + 1]int16
 | 
			
		||||
	//	preDefined bool
 | 
			
		||||
	fatalErr(binary.Read(r, binary.LittleEndian, &s.dt))
 | 
			
		||||
	fatalErr(binary.Read(r, binary.LittleEndian, &s.symbolLen))
 | 
			
		||||
	fatalErr(binary.Read(r, binary.LittleEndian, &s.actualTableLog))
 | 
			
		||||
	fatalErr(binary.Read(r, binary.LittleEndian, &s.maxBits))
 | 
			
		||||
	fatalErr(binary.Read(r, binary.LittleEndian, &s.stateTable))
 | 
			
		||||
	fatalErr(binary.Read(r, binary.LittleEndian, &s.norm))
 | 
			
		||||
	fatalErr(binary.Read(r, binary.LittleEndian, &s.preDefined))
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// decSymbol contains information about a state entry,
 | 
			
		||||
// Including the state offset base, the output symbol and
 | 
			
		||||
// the number of bits to read for the low part of the destination state.
 | 
			
		||||
@@ -204,18 +229,10 @@ func (d decSymbol) newState() uint16 {
 | 
			
		||||
	return uint16(d >> 16)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (d decSymbol) baseline() uint32 {
 | 
			
		||||
	return uint32(d >> 32)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (d decSymbol) baselineInt() int {
 | 
			
		||||
	return int(d >> 32)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (d *decSymbol) set(nbits, addBits uint8, newState uint16, baseline uint32) {
 | 
			
		||||
	*d = decSymbol(nbits) | (decSymbol(addBits) << 8) | (decSymbol(newState) << 16) | (decSymbol(baseline) << 32)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (d *decSymbol) setNBits(nBits uint8) {
 | 
			
		||||
	const mask = 0xffffffffffffff00
 | 
			
		||||
	*d = (*d & mask) | decSymbol(nBits)
 | 
			
		||||
@@ -231,11 +248,6 @@ func (d *decSymbol) setNewState(state uint16) {
 | 
			
		||||
	*d = (*d & mask) | decSymbol(state)<<16
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (d *decSymbol) setBaseline(baseline uint32) {
 | 
			
		||||
	const mask = 0xffffffff
 | 
			
		||||
	*d = (*d & mask) | decSymbol(baseline)<<32
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (d *decSymbol) setExt(addBits uint8, baseline uint32) {
 | 
			
		||||
	const mask = 0xffff00ff
 | 
			
		||||
	*d = (*d & mask) | (decSymbol(addBits) << 8) | (decSymbol(baseline) << 32)
 | 
			
		||||
@@ -352,34 +364,7 @@ func (s *fseState) init(br *bitReader, tableLog uint8, dt []decSymbol) {
 | 
			
		||||
	s.state = dt[br.getBits(tableLog)]
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// next returns the current symbol and sets the next state.
 | 
			
		||||
// At least tablelog bits must be available in the bit reader.
 | 
			
		||||
func (s *fseState) next(br *bitReader) {
 | 
			
		||||
	lowBits := uint16(br.getBits(s.state.nbBits()))
 | 
			
		||||
	s.state = s.dt[s.state.newState()+lowBits]
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// finished returns true if all bits have been read from the bitstream
 | 
			
		||||
// and the next state would require reading bits from the input.
 | 
			
		||||
func (s *fseState) finished(br *bitReader) bool {
 | 
			
		||||
	return br.finished() && s.state.nbBits() > 0
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// final returns the current state symbol without decoding the next.
 | 
			
		||||
func (s *fseState) final() (int, uint8) {
 | 
			
		||||
	return s.state.baselineInt(), s.state.addBits()
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// final returns the current state symbol without decoding the next.
 | 
			
		||||
func (s decSymbol) final() (int, uint8) {
 | 
			
		||||
	return s.baselineInt(), s.addBits()
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// nextFast returns the next symbol and sets the next state.
 | 
			
		||||
// This can only be used if no symbols are 0 bits.
 | 
			
		||||
// At least tablelog bits must be available in the bit reader.
 | 
			
		||||
func (s *fseState) nextFast(br *bitReader) (uint32, uint8) {
 | 
			
		||||
	lowBits := br.get16BitsFast(s.state.nbBits())
 | 
			
		||||
	s.state = s.dt[s.state.newState()+lowBits]
 | 
			
		||||
	return s.state.baseline(), s.state.addBits()
 | 
			
		||||
}
 | 
			
		||||
 
 | 
			
		||||
							
								
								
									
										23
									
								
								vendor/github.com/klauspost/compress/zstd/fse_encoder.go
									
									
									
										generated
									
									
										vendored
									
									
								
							
							
						
						
									
										23
									
								
								vendor/github.com/klauspost/compress/zstd/fse_encoder.go
									
									
									
										generated
									
									
										vendored
									
									
								
							@@ -76,21 +76,6 @@ func (s *fseEncoder) HistogramFinished(maxSymbol uint8, maxCount int) {
 | 
			
		||||
	s.clearCount = maxCount != 0
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// prepare will prepare and allocate scratch tables used for both compression and decompression.
 | 
			
		||||
func (s *fseEncoder) prepare() (*fseEncoder, error) {
 | 
			
		||||
	if s == nil {
 | 
			
		||||
		s = &fseEncoder{}
 | 
			
		||||
	}
 | 
			
		||||
	s.useRLE = false
 | 
			
		||||
	if s.clearCount && s.maxCount == 0 {
 | 
			
		||||
		for i := range s.count {
 | 
			
		||||
			s.count[i] = 0
 | 
			
		||||
		}
 | 
			
		||||
		s.clearCount = false
 | 
			
		||||
	}
 | 
			
		||||
	return s, nil
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// allocCtable will allocate tables needed for compression.
 | 
			
		||||
// If existing tables a re big enough, they are simply re-used.
 | 
			
		||||
func (s *fseEncoder) allocCtable() {
 | 
			
		||||
@@ -709,14 +694,6 @@ func (c *cState) init(bw *bitWriter, ct *cTable, first symbolTransform) {
 | 
			
		||||
	c.state = c.stateTable[lu]
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// encode the output symbol provided and write it to the bitstream.
 | 
			
		||||
func (c *cState) encode(symbolTT symbolTransform) {
 | 
			
		||||
	nbBitsOut := (uint32(c.state) + symbolTT.deltaNbBits) >> 16
 | 
			
		||||
	dstState := int32(c.state>>(nbBitsOut&15)) + int32(symbolTT.deltaFindState)
 | 
			
		||||
	c.bw.addBits16NC(c.state, uint8(nbBitsOut))
 | 
			
		||||
	c.state = c.stateTable[dstState]
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// flush will write the tablelog to the output and flush the remaining full bytes.
 | 
			
		||||
func (c *cState) flush(tableLog uint8) {
 | 
			
		||||
	c.bw.flush32()
 | 
			
		||||
 
 | 
			
		||||
							
								
								
									
										11
									
								
								vendor/github.com/klauspost/compress/zstd/fuzz.go
									
									
									
										generated
									
									
										vendored
									
									
								
							
							
						
						
									
										11
									
								
								vendor/github.com/klauspost/compress/zstd/fuzz.go
									
									
									
										generated
									
									
										vendored
									
									
								
							@@ -1,11 +0,0 @@
 | 
			
		||||
//go:build ignorecrc
 | 
			
		||||
// +build ignorecrc
 | 
			
		||||
 | 
			
		||||
// Copyright 2019+ Klaus Post. All rights reserved.
 | 
			
		||||
// License information can be found in the LICENSE file.
 | 
			
		||||
// Based on work by Yann Collet, released under BSD License.
 | 
			
		||||
 | 
			
		||||
package zstd
 | 
			
		||||
 | 
			
		||||
// ignoreCRC can be used for fuzz testing to ignore CRC values...
 | 
			
		||||
const ignoreCRC = true
 | 
			
		||||
							
								
								
									
										11
									
								
								vendor/github.com/klauspost/compress/zstd/fuzz_none.go
									
									
									
										generated
									
									
										vendored
									
									
								
							
							
						
						
									
										11
									
								
								vendor/github.com/klauspost/compress/zstd/fuzz_none.go
									
									
									
										generated
									
									
										vendored
									
									
								
							@@ -1,11 +0,0 @@
 | 
			
		||||
//go:build !ignorecrc
 | 
			
		||||
// +build !ignorecrc
 | 
			
		||||
 | 
			
		||||
// Copyright 2019+ Klaus Post. All rights reserved.
 | 
			
		||||
// License information can be found in the LICENSE file.
 | 
			
		||||
// Based on work by Yann Collet, released under BSD License.
 | 
			
		||||
 | 
			
		||||
package zstd
 | 
			
		||||
 | 
			
		||||
// ignoreCRC can be used for fuzz testing to ignore CRC values...
 | 
			
		||||
const ignoreCRC = false
 | 
			
		||||
							
								
								
									
										6
									
								
								vendor/github.com/klauspost/compress/zstd/hash.go
									
									
									
										generated
									
									
										vendored
									
									
								
							
							
						
						
									
										6
									
								
								vendor/github.com/klauspost/compress/zstd/hash.go
									
									
									
										generated
									
									
										vendored
									
									
								
							@@ -33,9 +33,3 @@ func hashLen(u uint64, length, mls uint8) uint32 {
 | 
			
		||||
		return (uint32(u) * prime4bytes) >> (32 - length)
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// hash3 returns the hash of the lower 3 bytes of u to fit in a hash table with h bits.
 | 
			
		||||
// Preferably h should be a constant and should always be <32.
 | 
			
		||||
func hash3(u uint32, h uint8) uint32 {
 | 
			
		||||
	return ((u << (32 - 24)) * prime3bytes) >> ((32 - h) & 31)
 | 
			
		||||
}
 | 
			
		||||
 
 | 
			
		||||
							
								
								
									
										260
									
								
								vendor/github.com/klauspost/compress/zstd/seqdec.go
									
									
									
										generated
									
									
										vendored
									
									
								
							
							
						
						
									
										260
									
								
								vendor/github.com/klauspost/compress/zstd/seqdec.go
									
									
									
										generated
									
									
										vendored
									
									
								
							@@ -73,6 +73,7 @@ type sequenceDecs struct {
 | 
			
		||||
	seqSize      int
 | 
			
		||||
	windowSize   int
 | 
			
		||||
	maxBits      uint8
 | 
			
		||||
	maxSyncLen   uint64
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// initialize all 3 decoders from the stream input.
 | 
			
		||||
@@ -98,153 +99,13 @@ func (s *sequenceDecs) initialize(br *bitReader, hist *history, out []byte) erro
 | 
			
		||||
	return nil
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// decode sequences from the stream with the provided history.
 | 
			
		||||
func (s *sequenceDecs) decode(seqs []seqVals) error {
 | 
			
		||||
	br := s.br
 | 
			
		||||
 | 
			
		||||
	// Grab full sizes tables, to avoid bounds checks.
 | 
			
		||||
	llTable, mlTable, ofTable := s.litLengths.fse.dt[:maxTablesize], s.matchLengths.fse.dt[:maxTablesize], s.offsets.fse.dt[:maxTablesize]
 | 
			
		||||
	llState, mlState, ofState := s.litLengths.state.state, s.matchLengths.state.state, s.offsets.state.state
 | 
			
		||||
	s.seqSize = 0
 | 
			
		||||
	litRemain := len(s.literals)
 | 
			
		||||
	maxBlockSize := maxCompressedBlockSize
 | 
			
		||||
	if s.windowSize < maxBlockSize {
 | 
			
		||||
		maxBlockSize = s.windowSize
 | 
			
		||||
	}
 | 
			
		||||
	for i := range seqs {
 | 
			
		||||
		var ll, mo, ml int
 | 
			
		||||
		if br.off > 4+((maxOffsetBits+16+16)>>3) {
 | 
			
		||||
			// inlined function:
 | 
			
		||||
			// ll, mo, ml = s.nextFast(br, llState, mlState, ofState)
 | 
			
		||||
 | 
			
		||||
			// Final will not read from stream.
 | 
			
		||||
			var llB, mlB, moB uint8
 | 
			
		||||
			ll, llB = llState.final()
 | 
			
		||||
			ml, mlB = mlState.final()
 | 
			
		||||
			mo, moB = ofState.final()
 | 
			
		||||
 | 
			
		||||
			// extra bits are stored in reverse order.
 | 
			
		||||
			br.fillFast()
 | 
			
		||||
			mo += br.getBits(moB)
 | 
			
		||||
			if s.maxBits > 32 {
 | 
			
		||||
				br.fillFast()
 | 
			
		||||
			}
 | 
			
		||||
			ml += br.getBits(mlB)
 | 
			
		||||
			ll += br.getBits(llB)
 | 
			
		||||
 | 
			
		||||
			if moB > 1 {
 | 
			
		||||
				s.prevOffset[2] = s.prevOffset[1]
 | 
			
		||||
				s.prevOffset[1] = s.prevOffset[0]
 | 
			
		||||
				s.prevOffset[0] = mo
 | 
			
		||||
			} else {
 | 
			
		||||
				// mo = s.adjustOffset(mo, ll, moB)
 | 
			
		||||
				// Inlined for rather big speedup
 | 
			
		||||
				if ll == 0 {
 | 
			
		||||
					// There is an exception though, when current sequence's literals_length = 0.
 | 
			
		||||
					// In this case, repeated offsets are shifted by one, so an offset_value of 1 means Repeated_Offset2,
 | 
			
		||||
					// an offset_value of 2 means Repeated_Offset3, and an offset_value of 3 means Repeated_Offset1 - 1_byte.
 | 
			
		||||
					mo++
 | 
			
		||||
				}
 | 
			
		||||
 | 
			
		||||
				if mo == 0 {
 | 
			
		||||
					mo = s.prevOffset[0]
 | 
			
		||||
				} else {
 | 
			
		||||
					var temp int
 | 
			
		||||
					if mo == 3 {
 | 
			
		||||
						temp = s.prevOffset[0] - 1
 | 
			
		||||
					} else {
 | 
			
		||||
						temp = s.prevOffset[mo]
 | 
			
		||||
					}
 | 
			
		||||
 | 
			
		||||
					if temp == 0 {
 | 
			
		||||
						// 0 is not valid; input is corrupted; force offset to 1
 | 
			
		||||
						println("WARNING: temp was 0")
 | 
			
		||||
						temp = 1
 | 
			
		||||
					}
 | 
			
		||||
 | 
			
		||||
					if mo != 1 {
 | 
			
		||||
						s.prevOffset[2] = s.prevOffset[1]
 | 
			
		||||
					}
 | 
			
		||||
					s.prevOffset[1] = s.prevOffset[0]
 | 
			
		||||
					s.prevOffset[0] = temp
 | 
			
		||||
					mo = temp
 | 
			
		||||
				}
 | 
			
		||||
			}
 | 
			
		||||
			br.fillFast()
 | 
			
		||||
		} else {
 | 
			
		||||
			if br.overread() {
 | 
			
		||||
				if debugDecoder {
 | 
			
		||||
					printf("reading sequence %d, exceeded available data\n", i)
 | 
			
		||||
				}
 | 
			
		||||
				return io.ErrUnexpectedEOF
 | 
			
		||||
			}
 | 
			
		||||
			ll, mo, ml = s.next(br, llState, mlState, ofState)
 | 
			
		||||
			br.fill()
 | 
			
		||||
		}
 | 
			
		||||
 | 
			
		||||
		if debugSequences {
 | 
			
		||||
			println("Seq", i, "Litlen:", ll, "mo:", mo, "(abs) ml:", ml)
 | 
			
		||||
		}
 | 
			
		||||
		// Evaluate.
 | 
			
		||||
		// We might be doing this async, so do it early.
 | 
			
		||||
		if mo == 0 && ml > 0 {
 | 
			
		||||
			return fmt.Errorf("zero matchoff and matchlen (%d) > 0", ml)
 | 
			
		||||
		}
 | 
			
		||||
		if ml > maxMatchLen {
 | 
			
		||||
			return fmt.Errorf("match len (%d) bigger than max allowed length", ml)
 | 
			
		||||
		}
 | 
			
		||||
		s.seqSize += ll + ml
 | 
			
		||||
		if s.seqSize > maxBlockSize {
 | 
			
		||||
			return fmt.Errorf("output (%d) bigger than max block size (%d)", s.seqSize, maxBlockSize)
 | 
			
		||||
		}
 | 
			
		||||
		litRemain -= ll
 | 
			
		||||
		if litRemain < 0 {
 | 
			
		||||
			return fmt.Errorf("unexpected literal count, want %d bytes, but only %d is available", ll, litRemain+ll)
 | 
			
		||||
		}
 | 
			
		||||
		seqs[i] = seqVals{
 | 
			
		||||
			ll: ll,
 | 
			
		||||
			ml: ml,
 | 
			
		||||
			mo: mo,
 | 
			
		||||
		}
 | 
			
		||||
		if i == len(seqs)-1 {
 | 
			
		||||
			// This is the last sequence, so we shouldn't update state.
 | 
			
		||||
			break
 | 
			
		||||
		}
 | 
			
		||||
 | 
			
		||||
		// Manually inlined, ~ 5-20% faster
 | 
			
		||||
		// Update all 3 states at once. Approx 20% faster.
 | 
			
		||||
		nBits := llState.nbBits() + mlState.nbBits() + ofState.nbBits()
 | 
			
		||||
		if nBits == 0 {
 | 
			
		||||
			llState = llTable[llState.newState()&maxTableMask]
 | 
			
		||||
			mlState = mlTable[mlState.newState()&maxTableMask]
 | 
			
		||||
			ofState = ofTable[ofState.newState()&maxTableMask]
 | 
			
		||||
		} else {
 | 
			
		||||
			bits := br.get32BitsFast(nBits)
 | 
			
		||||
			lowBits := uint16(bits >> ((ofState.nbBits() + mlState.nbBits()) & 31))
 | 
			
		||||
			llState = llTable[(llState.newState()+lowBits)&maxTableMask]
 | 
			
		||||
 | 
			
		||||
			lowBits = uint16(bits >> (ofState.nbBits() & 31))
 | 
			
		||||
			lowBits &= bitMask[mlState.nbBits()&15]
 | 
			
		||||
			mlState = mlTable[(mlState.newState()+lowBits)&maxTableMask]
 | 
			
		||||
 | 
			
		||||
			lowBits = uint16(bits) & bitMask[ofState.nbBits()&15]
 | 
			
		||||
			ofState = ofTable[(ofState.newState()+lowBits)&maxTableMask]
 | 
			
		||||
		}
 | 
			
		||||
	}
 | 
			
		||||
	s.seqSize += litRemain
 | 
			
		||||
	if s.seqSize > maxBlockSize {
 | 
			
		||||
		return fmt.Errorf("output (%d) bigger than max block size (%d)", s.seqSize, maxBlockSize)
 | 
			
		||||
	}
 | 
			
		||||
	err := br.close()
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		printf("Closing sequences: %v, %+v\n", err, *br)
 | 
			
		||||
	}
 | 
			
		||||
	return err
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// execute will execute the decoded sequence with the provided history.
 | 
			
		||||
// The sequence must be evaluated before being sent.
 | 
			
		||||
func (s *sequenceDecs) execute(seqs []seqVals, hist []byte) error {
 | 
			
		||||
	if len(s.dict) == 0 {
 | 
			
		||||
		return s.executeSimple(seqs, hist)
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	// Ensure we have enough output size...
 | 
			
		||||
	if len(s.out)+s.seqSize > cap(s.out) {
 | 
			
		||||
		addBytes := s.seqSize + len(s.out)
 | 
			
		||||
@@ -327,6 +188,7 @@ func (s *sequenceDecs) execute(seqs []seqVals, hist []byte) error {
 | 
			
		||||
			}
 | 
			
		||||
		}
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	// Add final literals
 | 
			
		||||
	copy(out[t:], s.literals)
 | 
			
		||||
	if debugDecoder {
 | 
			
		||||
@@ -341,14 +203,18 @@ func (s *sequenceDecs) execute(seqs []seqVals, hist []byte) error {
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// decode sequences from the stream with the provided history.
 | 
			
		||||
func (s *sequenceDecs) decodeSync(history *history) error {
 | 
			
		||||
func (s *sequenceDecs) decodeSync(hist []byte) error {
 | 
			
		||||
	supported, err := s.decodeSyncSimple(hist)
 | 
			
		||||
	if supported {
 | 
			
		||||
		return err
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	br := s.br
 | 
			
		||||
	seqs := s.nSeqs
 | 
			
		||||
	startSize := len(s.out)
 | 
			
		||||
	// Grab full sizes tables, to avoid bounds checks.
 | 
			
		||||
	llTable, mlTable, ofTable := s.litLengths.fse.dt[:maxTablesize], s.matchLengths.fse.dt[:maxTablesize], s.offsets.fse.dt[:maxTablesize]
 | 
			
		||||
	llState, mlState, ofState := s.litLengths.state.state, s.matchLengths.state.state, s.offsets.state.state
 | 
			
		||||
	hist := history.b[history.ignoreBuffer:]
 | 
			
		||||
	out := s.out
 | 
			
		||||
	maxBlockSize := maxCompressedBlockSize
 | 
			
		||||
	if s.windowSize < maxBlockSize {
 | 
			
		||||
@@ -433,7 +299,7 @@ func (s *sequenceDecs) decodeSync(history *history) error {
 | 
			
		||||
		}
 | 
			
		||||
		size := ll + ml + len(out)
 | 
			
		||||
		if size-startSize > maxBlockSize {
 | 
			
		||||
			return fmt.Errorf("output (%d) bigger than max block size (%d)", size, maxBlockSize)
 | 
			
		||||
			return fmt.Errorf("output (%d) bigger than max block size (%d)", size-startSize, maxBlockSize)
 | 
			
		||||
		}
 | 
			
		||||
		if size > cap(out) {
 | 
			
		||||
			// Not enough size, which can happen under high volume block streaming conditions
 | 
			
		||||
@@ -463,13 +329,13 @@ func (s *sequenceDecs) decodeSync(history *history) error {
 | 
			
		||||
 | 
			
		||||
		if mo > len(out)+len(hist) || mo > s.windowSize {
 | 
			
		||||
			if len(s.dict) == 0 {
 | 
			
		||||
				return fmt.Errorf("match offset (%d) bigger than current history (%d)", mo, len(out)+len(hist))
 | 
			
		||||
				return fmt.Errorf("match offset (%d) bigger than current history (%d)", mo, len(out)+len(hist)-startSize)
 | 
			
		||||
			}
 | 
			
		||||
 | 
			
		||||
			// we may be in dictionary.
 | 
			
		||||
			dictO := len(s.dict) - (mo - (len(out) + len(hist)))
 | 
			
		||||
			if dictO < 0 || dictO >= len(s.dict) {
 | 
			
		||||
				return fmt.Errorf("match offset (%d) bigger than current history (%d)", mo, len(out)+len(hist))
 | 
			
		||||
				return fmt.Errorf("match offset (%d) bigger than current history (%d)", mo, len(out)+len(hist)-startSize)
 | 
			
		||||
			}
 | 
			
		||||
			end := dictO + ml
 | 
			
		||||
			if end > len(s.dict) {
 | 
			
		||||
@@ -530,6 +396,7 @@ func (s *sequenceDecs) decodeSync(history *history) error {
 | 
			
		||||
			ofState = ofTable[ofState.newState()&maxTableMask]
 | 
			
		||||
		} else {
 | 
			
		||||
			bits := br.get32BitsFast(nBits)
 | 
			
		||||
 | 
			
		||||
			lowBits := uint16(bits >> ((ofState.nbBits() + mlState.nbBits()) & 31))
 | 
			
		||||
			llState = llTable[(llState.newState()+lowBits)&maxTableMask]
 | 
			
		||||
 | 
			
		||||
@@ -543,8 +410,8 @@ func (s *sequenceDecs) decodeSync(history *history) error {
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	// Check if space for literals
 | 
			
		||||
	if len(s.literals)+len(s.out)-startSize > maxBlockSize {
 | 
			
		||||
		return fmt.Errorf("output (%d) bigger than max block size (%d)", len(s.out), maxBlockSize)
 | 
			
		||||
	if size := len(s.literals) + len(s.out) - startSize; size > maxBlockSize {
 | 
			
		||||
		return fmt.Errorf("output (%d) bigger than max block size (%d)", size, maxBlockSize)
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	// Add final literals
 | 
			
		||||
@@ -552,16 +419,6 @@ func (s *sequenceDecs) decodeSync(history *history) error {
 | 
			
		||||
	return br.close()
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// update states, at least 27 bits must be available.
 | 
			
		||||
func (s *sequenceDecs) update(br *bitReader) {
 | 
			
		||||
	// Max 8 bits
 | 
			
		||||
	s.litLengths.state.next(br)
 | 
			
		||||
	// Max 9 bits
 | 
			
		||||
	s.matchLengths.state.next(br)
 | 
			
		||||
	// Max 8 bits
 | 
			
		||||
	s.offsets.state.next(br)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
var bitMask [16]uint16
 | 
			
		||||
 | 
			
		||||
func init() {
 | 
			
		||||
@@ -570,87 +427,6 @@ func init() {
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// update states, at least 27 bits must be available.
 | 
			
		||||
func (s *sequenceDecs) updateAlt(br *bitReader) {
 | 
			
		||||
	// Update all 3 states at once. Approx 20% faster.
 | 
			
		||||
	a, b, c := s.litLengths.state.state, s.matchLengths.state.state, s.offsets.state.state
 | 
			
		||||
 | 
			
		||||
	nBits := a.nbBits() + b.nbBits() + c.nbBits()
 | 
			
		||||
	if nBits == 0 {
 | 
			
		||||
		s.litLengths.state.state = s.litLengths.state.dt[a.newState()]
 | 
			
		||||
		s.matchLengths.state.state = s.matchLengths.state.dt[b.newState()]
 | 
			
		||||
		s.offsets.state.state = s.offsets.state.dt[c.newState()]
 | 
			
		||||
		return
 | 
			
		||||
	}
 | 
			
		||||
	bits := br.get32BitsFast(nBits)
 | 
			
		||||
	lowBits := uint16(bits >> ((c.nbBits() + b.nbBits()) & 31))
 | 
			
		||||
	s.litLengths.state.state = s.litLengths.state.dt[a.newState()+lowBits]
 | 
			
		||||
 | 
			
		||||
	lowBits = uint16(bits >> (c.nbBits() & 31))
 | 
			
		||||
	lowBits &= bitMask[b.nbBits()&15]
 | 
			
		||||
	s.matchLengths.state.state = s.matchLengths.state.dt[b.newState()+lowBits]
 | 
			
		||||
 | 
			
		||||
	lowBits = uint16(bits) & bitMask[c.nbBits()&15]
 | 
			
		||||
	s.offsets.state.state = s.offsets.state.dt[c.newState()+lowBits]
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// nextFast will return new states when there are at least 4 unused bytes left on the stream when done.
 | 
			
		||||
func (s *sequenceDecs) nextFast(br *bitReader, llState, mlState, ofState decSymbol) (ll, mo, ml int) {
 | 
			
		||||
	// Final will not read from stream.
 | 
			
		||||
	ll, llB := llState.final()
 | 
			
		||||
	ml, mlB := mlState.final()
 | 
			
		||||
	mo, moB := ofState.final()
 | 
			
		||||
 | 
			
		||||
	// extra bits are stored in reverse order.
 | 
			
		||||
	br.fillFast()
 | 
			
		||||
	mo += br.getBits(moB)
 | 
			
		||||
	if s.maxBits > 32 {
 | 
			
		||||
		br.fillFast()
 | 
			
		||||
	}
 | 
			
		||||
	ml += br.getBits(mlB)
 | 
			
		||||
	ll += br.getBits(llB)
 | 
			
		||||
 | 
			
		||||
	if moB > 1 {
 | 
			
		||||
		s.prevOffset[2] = s.prevOffset[1]
 | 
			
		||||
		s.prevOffset[1] = s.prevOffset[0]
 | 
			
		||||
		s.prevOffset[0] = mo
 | 
			
		||||
		return
 | 
			
		||||
	}
 | 
			
		||||
	// mo = s.adjustOffset(mo, ll, moB)
 | 
			
		||||
	// Inlined for rather big speedup
 | 
			
		||||
	if ll == 0 {
 | 
			
		||||
		// There is an exception though, when current sequence's literals_length = 0.
 | 
			
		||||
		// In this case, repeated offsets are shifted by one, so an offset_value of 1 means Repeated_Offset2,
 | 
			
		||||
		// an offset_value of 2 means Repeated_Offset3, and an offset_value of 3 means Repeated_Offset1 - 1_byte.
 | 
			
		||||
		mo++
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	if mo == 0 {
 | 
			
		||||
		mo = s.prevOffset[0]
 | 
			
		||||
		return
 | 
			
		||||
	}
 | 
			
		||||
	var temp int
 | 
			
		||||
	if mo == 3 {
 | 
			
		||||
		temp = s.prevOffset[0] - 1
 | 
			
		||||
	} else {
 | 
			
		||||
		temp = s.prevOffset[mo]
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	if temp == 0 {
 | 
			
		||||
		// 0 is not valid; input is corrupted; force offset to 1
 | 
			
		||||
		println("temp was 0")
 | 
			
		||||
		temp = 1
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	if mo != 1 {
 | 
			
		||||
		s.prevOffset[2] = s.prevOffset[1]
 | 
			
		||||
	}
 | 
			
		||||
	s.prevOffset[1] = s.prevOffset[0]
 | 
			
		||||
	s.prevOffset[0] = temp
 | 
			
		||||
	mo = temp
 | 
			
		||||
	return
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (s *sequenceDecs) next(br *bitReader, llState, mlState, ofState decSymbol) (ll, mo, ml int) {
 | 
			
		||||
	// Final will not read from stream.
 | 
			
		||||
	ll, llB := llState.final()
 | 
			
		||||
 
 | 
			
		||||
							
								
								
									
										362
									
								
								vendor/github.com/klauspost/compress/zstd/seqdec_amd64.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										362
									
								
								vendor/github.com/klauspost/compress/zstd/seqdec_amd64.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							@@ -0,0 +1,362 @@
 | 
			
		||||
//go:build amd64 && !appengine && !noasm && gc
 | 
			
		||||
// +build amd64,!appengine,!noasm,gc
 | 
			
		||||
 | 
			
		||||
package zstd
 | 
			
		||||
 | 
			
		||||
import (
 | 
			
		||||
	"fmt"
 | 
			
		||||
 | 
			
		||||
	"github.com/klauspost/compress/internal/cpuinfo"
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
type decodeSyncAsmContext struct {
 | 
			
		||||
	llTable     []decSymbol
 | 
			
		||||
	mlTable     []decSymbol
 | 
			
		||||
	ofTable     []decSymbol
 | 
			
		||||
	llState     uint64
 | 
			
		||||
	mlState     uint64
 | 
			
		||||
	ofState     uint64
 | 
			
		||||
	iteration   int
 | 
			
		||||
	litRemain   int
 | 
			
		||||
	out         []byte
 | 
			
		||||
	outPosition int
 | 
			
		||||
	literals    []byte
 | 
			
		||||
	litPosition int
 | 
			
		||||
	history     []byte
 | 
			
		||||
	windowSize  int
 | 
			
		||||
	ll          int // set on error (not for all errors, please refer to _generate/gen.go)
 | 
			
		||||
	ml          int // set on error (not for all errors, please refer to _generate/gen.go)
 | 
			
		||||
	mo          int // set on error (not for all errors, please refer to _generate/gen.go)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// sequenceDecs_decodeSync_amd64 implements the main loop of sequenceDecs.decodeSync in x86 asm.
 | 
			
		||||
//
 | 
			
		||||
// Please refer to seqdec_generic.go for the reference implementation.
 | 
			
		||||
//go:noescape
 | 
			
		||||
func sequenceDecs_decodeSync_amd64(s *sequenceDecs, br *bitReader, ctx *decodeSyncAsmContext) int
 | 
			
		||||
 | 
			
		||||
// sequenceDecs_decodeSync_bmi2 implements the main loop of sequenceDecs.decodeSync in x86 asm with BMI2 extensions.
 | 
			
		||||
//go:noescape
 | 
			
		||||
func sequenceDecs_decodeSync_bmi2(s *sequenceDecs, br *bitReader, ctx *decodeSyncAsmContext) int
 | 
			
		||||
 | 
			
		||||
// sequenceDecs_decodeSync_safe_amd64 does the same as above, but does not write more than output buffer.
 | 
			
		||||
//go:noescape
 | 
			
		||||
func sequenceDecs_decodeSync_safe_amd64(s *sequenceDecs, br *bitReader, ctx *decodeSyncAsmContext) int
 | 
			
		||||
 | 
			
		||||
// sequenceDecs_decodeSync_safe_bmi2 does the same as above, but does not write more than output buffer.
 | 
			
		||||
//go:noescape
 | 
			
		||||
func sequenceDecs_decodeSync_safe_bmi2(s *sequenceDecs, br *bitReader, ctx *decodeSyncAsmContext) int
 | 
			
		||||
 | 
			
		||||
// decode sequences from the stream with the provided history but without a dictionary.
 | 
			
		||||
func (s *sequenceDecs) decodeSyncSimple(hist []byte) (bool, error) {
 | 
			
		||||
	if len(s.dict) > 0 {
 | 
			
		||||
		return false, nil
 | 
			
		||||
	}
 | 
			
		||||
	if s.maxSyncLen == 0 && cap(s.out)-len(s.out) < maxCompressedBlockSize {
 | 
			
		||||
		return false, nil
 | 
			
		||||
	}
 | 
			
		||||
	useSafe := false
 | 
			
		||||
	if s.maxSyncLen == 0 && cap(s.out)-len(s.out) < maxCompressedBlockSizeAlloc {
 | 
			
		||||
		useSafe = true
 | 
			
		||||
	}
 | 
			
		||||
	if s.maxSyncLen > 0 && cap(s.out)-len(s.out)-compressedBlockOverAlloc < int(s.maxSyncLen) {
 | 
			
		||||
		useSafe = true
 | 
			
		||||
	}
 | 
			
		||||
	if cap(s.literals) < len(s.literals)+compressedBlockOverAlloc {
 | 
			
		||||
		useSafe = true
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	br := s.br
 | 
			
		||||
 | 
			
		||||
	maxBlockSize := maxCompressedBlockSize
 | 
			
		||||
	if s.windowSize < maxBlockSize {
 | 
			
		||||
		maxBlockSize = s.windowSize
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	ctx := decodeSyncAsmContext{
 | 
			
		||||
		llTable:     s.litLengths.fse.dt[:maxTablesize],
 | 
			
		||||
		mlTable:     s.matchLengths.fse.dt[:maxTablesize],
 | 
			
		||||
		ofTable:     s.offsets.fse.dt[:maxTablesize],
 | 
			
		||||
		llState:     uint64(s.litLengths.state.state),
 | 
			
		||||
		mlState:     uint64(s.matchLengths.state.state),
 | 
			
		||||
		ofState:     uint64(s.offsets.state.state),
 | 
			
		||||
		iteration:   s.nSeqs - 1,
 | 
			
		||||
		litRemain:   len(s.literals),
 | 
			
		||||
		out:         s.out,
 | 
			
		||||
		outPosition: len(s.out),
 | 
			
		||||
		literals:    s.literals,
 | 
			
		||||
		windowSize:  s.windowSize,
 | 
			
		||||
		history:     hist,
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	s.seqSize = 0
 | 
			
		||||
	startSize := len(s.out)
 | 
			
		||||
 | 
			
		||||
	var errCode int
 | 
			
		||||
	if cpuinfo.HasBMI2() {
 | 
			
		||||
		if useSafe {
 | 
			
		||||
			errCode = sequenceDecs_decodeSync_safe_bmi2(s, br, &ctx)
 | 
			
		||||
		} else {
 | 
			
		||||
			errCode = sequenceDecs_decodeSync_bmi2(s, br, &ctx)
 | 
			
		||||
		}
 | 
			
		||||
	} else {
 | 
			
		||||
		if useSafe {
 | 
			
		||||
			errCode = sequenceDecs_decodeSync_safe_amd64(s, br, &ctx)
 | 
			
		||||
		} else {
 | 
			
		||||
			errCode = sequenceDecs_decodeSync_amd64(s, br, &ctx)
 | 
			
		||||
		}
 | 
			
		||||
	}
 | 
			
		||||
	switch errCode {
 | 
			
		||||
	case noError:
 | 
			
		||||
		break
 | 
			
		||||
 | 
			
		||||
	case errorMatchLenOfsMismatch:
 | 
			
		||||
		return true, fmt.Errorf("zero matchoff and matchlen (%d) > 0", ctx.ml)
 | 
			
		||||
 | 
			
		||||
	case errorMatchLenTooBig:
 | 
			
		||||
		return true, fmt.Errorf("match len (%d) bigger than max allowed length", ctx.ml)
 | 
			
		||||
 | 
			
		||||
	case errorMatchOffTooBig:
 | 
			
		||||
		return true, fmt.Errorf("match offset (%d) bigger than current history (%d)",
 | 
			
		||||
			ctx.mo, ctx.outPosition+len(hist)-startSize)
 | 
			
		||||
 | 
			
		||||
	case errorNotEnoughLiterals:
 | 
			
		||||
		return true, fmt.Errorf("unexpected literal count, want %d bytes, but only %d is available",
 | 
			
		||||
			ctx.ll, ctx.litRemain+ctx.ll)
 | 
			
		||||
 | 
			
		||||
	case errorNotEnoughSpace:
 | 
			
		||||
		size := ctx.outPosition + ctx.ll + ctx.ml
 | 
			
		||||
		if debugDecoder {
 | 
			
		||||
			println("msl:", s.maxSyncLen, "cap", cap(s.out), "bef:", startSize, "sz:", size-startSize, "mbs:", maxBlockSize, "outsz:", cap(s.out)-startSize)
 | 
			
		||||
		}
 | 
			
		||||
		return true, fmt.Errorf("output (%d) bigger than max block size (%d)", size-startSize, maxBlockSize)
 | 
			
		||||
 | 
			
		||||
	default:
 | 
			
		||||
		return true, fmt.Errorf("sequenceDecs_decode returned erronous code %d", errCode)
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	s.seqSize += ctx.litRemain
 | 
			
		||||
	if s.seqSize > maxBlockSize {
 | 
			
		||||
		return true, fmt.Errorf("output (%d) bigger than max block size (%d)", s.seqSize, maxBlockSize)
 | 
			
		||||
	}
 | 
			
		||||
	err := br.close()
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		printf("Closing sequences: %v, %+v\n", err, *br)
 | 
			
		||||
		return true, err
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	s.literals = s.literals[ctx.litPosition:]
 | 
			
		||||
	t := ctx.outPosition
 | 
			
		||||
	s.out = s.out[:t]
 | 
			
		||||
 | 
			
		||||
	// Add final literals
 | 
			
		||||
	s.out = append(s.out, s.literals...)
 | 
			
		||||
	if debugDecoder {
 | 
			
		||||
		t += len(s.literals)
 | 
			
		||||
		if t != len(s.out) {
 | 
			
		||||
			panic(fmt.Errorf("length mismatch, want %d, got %d", len(s.out), t))
 | 
			
		||||
		}
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	return true, nil
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// --------------------------------------------------------------------------------
 | 
			
		||||
 | 
			
		||||
type decodeAsmContext struct {
 | 
			
		||||
	llTable   []decSymbol
 | 
			
		||||
	mlTable   []decSymbol
 | 
			
		||||
	ofTable   []decSymbol
 | 
			
		||||
	llState   uint64
 | 
			
		||||
	mlState   uint64
 | 
			
		||||
	ofState   uint64
 | 
			
		||||
	iteration int
 | 
			
		||||
	seqs      []seqVals
 | 
			
		||||
	litRemain int
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
const noError = 0
 | 
			
		||||
 | 
			
		||||
// error reported when mo == 0 && ml > 0
 | 
			
		||||
const errorMatchLenOfsMismatch = 1
 | 
			
		||||
 | 
			
		||||
// error reported when ml > maxMatchLen
 | 
			
		||||
const errorMatchLenTooBig = 2
 | 
			
		||||
 | 
			
		||||
// error reported when mo > available history or mo > s.windowSize
 | 
			
		||||
const errorMatchOffTooBig = 3
 | 
			
		||||
 | 
			
		||||
// error reported when the sum of literal lengths exeeceds the literal buffer size
 | 
			
		||||
const errorNotEnoughLiterals = 4
 | 
			
		||||
 | 
			
		||||
// error reported when capacity of `out` is too small
 | 
			
		||||
const errorNotEnoughSpace = 5
 | 
			
		||||
 | 
			
		||||
// sequenceDecs_decode implements the main loop of sequenceDecs in x86 asm.
 | 
			
		||||
//
 | 
			
		||||
// Please refer to seqdec_generic.go for the reference implementation.
 | 
			
		||||
//go:noescape
 | 
			
		||||
func sequenceDecs_decode_amd64(s *sequenceDecs, br *bitReader, ctx *decodeAsmContext) int
 | 
			
		||||
 | 
			
		||||
// sequenceDecs_decode implements the main loop of sequenceDecs in x86 asm.
 | 
			
		||||
//
 | 
			
		||||
// Please refer to seqdec_generic.go for the reference implementation.
 | 
			
		||||
//go:noescape
 | 
			
		||||
func sequenceDecs_decode_56_amd64(s *sequenceDecs, br *bitReader, ctx *decodeAsmContext) int
 | 
			
		||||
 | 
			
		||||
// sequenceDecs_decode implements the main loop of sequenceDecs in x86 asm with BMI2 extensions.
 | 
			
		||||
//go:noescape
 | 
			
		||||
func sequenceDecs_decode_bmi2(s *sequenceDecs, br *bitReader, ctx *decodeAsmContext) int
 | 
			
		||||
 | 
			
		||||
// sequenceDecs_decode implements the main loop of sequenceDecs in x86 asm with BMI2 extensions.
 | 
			
		||||
//go:noescape
 | 
			
		||||
func sequenceDecs_decode_56_bmi2(s *sequenceDecs, br *bitReader, ctx *decodeAsmContext) int
 | 
			
		||||
 | 
			
		||||
// decode sequences from the stream without the provided history.
 | 
			
		||||
func (s *sequenceDecs) decode(seqs []seqVals) error {
 | 
			
		||||
	br := s.br
 | 
			
		||||
 | 
			
		||||
	maxBlockSize := maxCompressedBlockSize
 | 
			
		||||
	if s.windowSize < maxBlockSize {
 | 
			
		||||
		maxBlockSize = s.windowSize
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	ctx := decodeAsmContext{
 | 
			
		||||
		llTable:   s.litLengths.fse.dt[:maxTablesize],
 | 
			
		||||
		mlTable:   s.matchLengths.fse.dt[:maxTablesize],
 | 
			
		||||
		ofTable:   s.offsets.fse.dt[:maxTablesize],
 | 
			
		||||
		llState:   uint64(s.litLengths.state.state),
 | 
			
		||||
		mlState:   uint64(s.matchLengths.state.state),
 | 
			
		||||
		ofState:   uint64(s.offsets.state.state),
 | 
			
		||||
		seqs:      seqs,
 | 
			
		||||
		iteration: len(seqs) - 1,
 | 
			
		||||
		litRemain: len(s.literals),
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	s.seqSize = 0
 | 
			
		||||
	lte56bits := s.maxBits+s.offsets.fse.actualTableLog+s.matchLengths.fse.actualTableLog+s.litLengths.fse.actualTableLog <= 56
 | 
			
		||||
	var errCode int
 | 
			
		||||
	if cpuinfo.HasBMI2() {
 | 
			
		||||
		if lte56bits {
 | 
			
		||||
			errCode = sequenceDecs_decode_56_bmi2(s, br, &ctx)
 | 
			
		||||
		} else {
 | 
			
		||||
			errCode = sequenceDecs_decode_bmi2(s, br, &ctx)
 | 
			
		||||
		}
 | 
			
		||||
	} else {
 | 
			
		||||
		if lte56bits {
 | 
			
		||||
			errCode = sequenceDecs_decode_56_amd64(s, br, &ctx)
 | 
			
		||||
		} else {
 | 
			
		||||
			errCode = sequenceDecs_decode_amd64(s, br, &ctx)
 | 
			
		||||
		}
 | 
			
		||||
	}
 | 
			
		||||
	if errCode != 0 {
 | 
			
		||||
		i := len(seqs) - ctx.iteration - 1
 | 
			
		||||
		switch errCode {
 | 
			
		||||
		case errorMatchLenOfsMismatch:
 | 
			
		||||
			ml := ctx.seqs[i].ml
 | 
			
		||||
			return fmt.Errorf("zero matchoff and matchlen (%d) > 0", ml)
 | 
			
		||||
 | 
			
		||||
		case errorMatchLenTooBig:
 | 
			
		||||
			ml := ctx.seqs[i].ml
 | 
			
		||||
			return fmt.Errorf("match len (%d) bigger than max allowed length", ml)
 | 
			
		||||
 | 
			
		||||
		case errorNotEnoughLiterals:
 | 
			
		||||
			ll := ctx.seqs[i].ll
 | 
			
		||||
			return fmt.Errorf("unexpected literal count, want %d bytes, but only %d is available", ll, ctx.litRemain+ll)
 | 
			
		||||
		}
 | 
			
		||||
 | 
			
		||||
		return fmt.Errorf("sequenceDecs_decode_amd64 returned erronous code %d", errCode)
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	if ctx.litRemain < 0 {
 | 
			
		||||
		return fmt.Errorf("literal count is too big: total available %d, total requested %d",
 | 
			
		||||
			len(s.literals), len(s.literals)-ctx.litRemain)
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	s.seqSize += ctx.litRemain
 | 
			
		||||
	if s.seqSize > maxBlockSize {
 | 
			
		||||
		return fmt.Errorf("output (%d) bigger than max block size (%d)", s.seqSize, maxBlockSize)
 | 
			
		||||
	}
 | 
			
		||||
	err := br.close()
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		printf("Closing sequences: %v, %+v\n", err, *br)
 | 
			
		||||
	}
 | 
			
		||||
	return err
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// --------------------------------------------------------------------------------
 | 
			
		||||
 | 
			
		||||
type executeAsmContext struct {
 | 
			
		||||
	seqs        []seqVals
 | 
			
		||||
	seqIndex    int
 | 
			
		||||
	out         []byte
 | 
			
		||||
	history     []byte
 | 
			
		||||
	literals    []byte
 | 
			
		||||
	outPosition int
 | 
			
		||||
	litPosition int
 | 
			
		||||
	windowSize  int
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// sequenceDecs_executeSimple_amd64 implements the main loop of sequenceDecs.executeSimple in x86 asm.
 | 
			
		||||
//
 | 
			
		||||
// Returns false if a match offset is too big.
 | 
			
		||||
//
 | 
			
		||||
// Please refer to seqdec_generic.go for the reference implementation.
 | 
			
		||||
//go:noescape
 | 
			
		||||
func sequenceDecs_executeSimple_amd64(ctx *executeAsmContext) bool
 | 
			
		||||
 | 
			
		||||
// Same as above, but with safe memcopies
 | 
			
		||||
//go:noescape
 | 
			
		||||
func sequenceDecs_executeSimple_safe_amd64(ctx *executeAsmContext) bool
 | 
			
		||||
 | 
			
		||||
// executeSimple handles cases when dictionary is not used.
 | 
			
		||||
func (s *sequenceDecs) executeSimple(seqs []seqVals, hist []byte) error {
 | 
			
		||||
	// Ensure we have enough output size...
 | 
			
		||||
	if len(s.out)+s.seqSize+compressedBlockOverAlloc > cap(s.out) {
 | 
			
		||||
		addBytes := s.seqSize + len(s.out) + compressedBlockOverAlloc
 | 
			
		||||
		s.out = append(s.out, make([]byte, addBytes)...)
 | 
			
		||||
		s.out = s.out[:len(s.out)-addBytes]
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	if debugDecoder {
 | 
			
		||||
		printf("Execute %d seqs with literals: %d into %d bytes\n", len(seqs), len(s.literals), s.seqSize)
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	var t = len(s.out)
 | 
			
		||||
	out := s.out[:t+s.seqSize]
 | 
			
		||||
 | 
			
		||||
	ctx := executeAsmContext{
 | 
			
		||||
		seqs:        seqs,
 | 
			
		||||
		seqIndex:    0,
 | 
			
		||||
		out:         out,
 | 
			
		||||
		history:     hist,
 | 
			
		||||
		outPosition: t,
 | 
			
		||||
		litPosition: 0,
 | 
			
		||||
		literals:    s.literals,
 | 
			
		||||
		windowSize:  s.windowSize,
 | 
			
		||||
	}
 | 
			
		||||
	var ok bool
 | 
			
		||||
	if cap(s.literals) < len(s.literals)+compressedBlockOverAlloc {
 | 
			
		||||
		ok = sequenceDecs_executeSimple_safe_amd64(&ctx)
 | 
			
		||||
	} else {
 | 
			
		||||
		ok = sequenceDecs_executeSimple_amd64(&ctx)
 | 
			
		||||
	}
 | 
			
		||||
	if !ok {
 | 
			
		||||
		return fmt.Errorf("match offset (%d) bigger than current history (%d)",
 | 
			
		||||
			seqs[ctx.seqIndex].mo, ctx.outPosition+len(hist))
 | 
			
		||||
	}
 | 
			
		||||
	s.literals = s.literals[ctx.litPosition:]
 | 
			
		||||
	t = ctx.outPosition
 | 
			
		||||
 | 
			
		||||
	// Add final literals
 | 
			
		||||
	copy(out[t:], s.literals)
 | 
			
		||||
	if debugDecoder {
 | 
			
		||||
		t += len(s.literals)
 | 
			
		||||
		if t != len(out) {
 | 
			
		||||
			panic(fmt.Errorf("length mismatch, want %d, got %d, ss: %d", len(out), t, s.seqSize))
 | 
			
		||||
		}
 | 
			
		||||
	}
 | 
			
		||||
	s.out = out
 | 
			
		||||
 | 
			
		||||
	return nil
 | 
			
		||||
}
 | 
			
		||||
							
								
								
									
										3689
									
								
								vendor/github.com/klauspost/compress/zstd/seqdec_amd64.s
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										3689
									
								
								vendor/github.com/klauspost/compress/zstd/seqdec_amd64.s
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
										
											
												File diff suppressed because it is too large
												Load Diff
											
										
									
								
							
							
								
								
									
										237
									
								
								vendor/github.com/klauspost/compress/zstd/seqdec_generic.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										237
									
								
								vendor/github.com/klauspost/compress/zstd/seqdec_generic.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							@@ -0,0 +1,237 @@
 | 
			
		||||
//go:build !amd64 || appengine || !gc || noasm
 | 
			
		||||
// +build !amd64 appengine !gc noasm
 | 
			
		||||
 | 
			
		||||
package zstd
 | 
			
		||||
 | 
			
		||||
import (
 | 
			
		||||
	"fmt"
 | 
			
		||||
	"io"
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
// decode sequences from the stream with the provided history but without dictionary.
 | 
			
		||||
func (s *sequenceDecs) decodeSyncSimple(hist []byte) (bool, error) {
 | 
			
		||||
	return false, nil
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// decode sequences from the stream without the provided history.
 | 
			
		||||
func (s *sequenceDecs) decode(seqs []seqVals) error {
 | 
			
		||||
	br := s.br
 | 
			
		||||
 | 
			
		||||
	// Grab full sizes tables, to avoid bounds checks.
 | 
			
		||||
	llTable, mlTable, ofTable := s.litLengths.fse.dt[:maxTablesize], s.matchLengths.fse.dt[:maxTablesize], s.offsets.fse.dt[:maxTablesize]
 | 
			
		||||
	llState, mlState, ofState := s.litLengths.state.state, s.matchLengths.state.state, s.offsets.state.state
 | 
			
		||||
	s.seqSize = 0
 | 
			
		||||
	litRemain := len(s.literals)
 | 
			
		||||
 | 
			
		||||
	maxBlockSize := maxCompressedBlockSize
 | 
			
		||||
	if s.windowSize < maxBlockSize {
 | 
			
		||||
		maxBlockSize = s.windowSize
 | 
			
		||||
	}
 | 
			
		||||
	for i := range seqs {
 | 
			
		||||
		var ll, mo, ml int
 | 
			
		||||
		if br.off > 4+((maxOffsetBits+16+16)>>3) {
 | 
			
		||||
			// inlined function:
 | 
			
		||||
			// ll, mo, ml = s.nextFast(br, llState, mlState, ofState)
 | 
			
		||||
 | 
			
		||||
			// Final will not read from stream.
 | 
			
		||||
			var llB, mlB, moB uint8
 | 
			
		||||
			ll, llB = llState.final()
 | 
			
		||||
			ml, mlB = mlState.final()
 | 
			
		||||
			mo, moB = ofState.final()
 | 
			
		||||
 | 
			
		||||
			// extra bits are stored in reverse order.
 | 
			
		||||
			br.fillFast()
 | 
			
		||||
			mo += br.getBits(moB)
 | 
			
		||||
			if s.maxBits > 32 {
 | 
			
		||||
				br.fillFast()
 | 
			
		||||
			}
 | 
			
		||||
			ml += br.getBits(mlB)
 | 
			
		||||
			ll += br.getBits(llB)
 | 
			
		||||
 | 
			
		||||
			if moB > 1 {
 | 
			
		||||
				s.prevOffset[2] = s.prevOffset[1]
 | 
			
		||||
				s.prevOffset[1] = s.prevOffset[0]
 | 
			
		||||
				s.prevOffset[0] = mo
 | 
			
		||||
			} else {
 | 
			
		||||
				// mo = s.adjustOffset(mo, ll, moB)
 | 
			
		||||
				// Inlined for rather big speedup
 | 
			
		||||
				if ll == 0 {
 | 
			
		||||
					// There is an exception though, when current sequence's literals_length = 0.
 | 
			
		||||
					// In this case, repeated offsets are shifted by one, so an offset_value of 1 means Repeated_Offset2,
 | 
			
		||||
					// an offset_value of 2 means Repeated_Offset3, and an offset_value of 3 means Repeated_Offset1 - 1_byte.
 | 
			
		||||
					mo++
 | 
			
		||||
				}
 | 
			
		||||
 | 
			
		||||
				if mo == 0 {
 | 
			
		||||
					mo = s.prevOffset[0]
 | 
			
		||||
				} else {
 | 
			
		||||
					var temp int
 | 
			
		||||
					if mo == 3 {
 | 
			
		||||
						temp = s.prevOffset[0] - 1
 | 
			
		||||
					} else {
 | 
			
		||||
						temp = s.prevOffset[mo]
 | 
			
		||||
					}
 | 
			
		||||
 | 
			
		||||
					if temp == 0 {
 | 
			
		||||
						// 0 is not valid; input is corrupted; force offset to 1
 | 
			
		||||
						println("WARNING: temp was 0")
 | 
			
		||||
						temp = 1
 | 
			
		||||
					}
 | 
			
		||||
 | 
			
		||||
					if mo != 1 {
 | 
			
		||||
						s.prevOffset[2] = s.prevOffset[1]
 | 
			
		||||
					}
 | 
			
		||||
					s.prevOffset[1] = s.prevOffset[0]
 | 
			
		||||
					s.prevOffset[0] = temp
 | 
			
		||||
					mo = temp
 | 
			
		||||
				}
 | 
			
		||||
			}
 | 
			
		||||
			br.fillFast()
 | 
			
		||||
		} else {
 | 
			
		||||
			if br.overread() {
 | 
			
		||||
				if debugDecoder {
 | 
			
		||||
					printf("reading sequence %d, exceeded available data\n", i)
 | 
			
		||||
				}
 | 
			
		||||
				return io.ErrUnexpectedEOF
 | 
			
		||||
			}
 | 
			
		||||
			ll, mo, ml = s.next(br, llState, mlState, ofState)
 | 
			
		||||
			br.fill()
 | 
			
		||||
		}
 | 
			
		||||
 | 
			
		||||
		if debugSequences {
 | 
			
		||||
			println("Seq", i, "Litlen:", ll, "mo:", mo, "(abs) ml:", ml)
 | 
			
		||||
		}
 | 
			
		||||
		// Evaluate.
 | 
			
		||||
		// We might be doing this async, so do it early.
 | 
			
		||||
		if mo == 0 && ml > 0 {
 | 
			
		||||
			return fmt.Errorf("zero matchoff and matchlen (%d) > 0", ml)
 | 
			
		||||
		}
 | 
			
		||||
		if ml > maxMatchLen {
 | 
			
		||||
			return fmt.Errorf("match len (%d) bigger than max allowed length", ml)
 | 
			
		||||
		}
 | 
			
		||||
		s.seqSize += ll + ml
 | 
			
		||||
		if s.seqSize > maxBlockSize {
 | 
			
		||||
			return fmt.Errorf("output (%d) bigger than max block size (%d)", s.seqSize, maxBlockSize)
 | 
			
		||||
		}
 | 
			
		||||
		litRemain -= ll
 | 
			
		||||
		if litRemain < 0 {
 | 
			
		||||
			return fmt.Errorf("unexpected literal count, want %d bytes, but only %d is available", ll, litRemain+ll)
 | 
			
		||||
		}
 | 
			
		||||
		seqs[i] = seqVals{
 | 
			
		||||
			ll: ll,
 | 
			
		||||
			ml: ml,
 | 
			
		||||
			mo: mo,
 | 
			
		||||
		}
 | 
			
		||||
		if i == len(seqs)-1 {
 | 
			
		||||
			// This is the last sequence, so we shouldn't update state.
 | 
			
		||||
			break
 | 
			
		||||
		}
 | 
			
		||||
 | 
			
		||||
		// Manually inlined, ~ 5-20% faster
 | 
			
		||||
		// Update all 3 states at once. Approx 20% faster.
 | 
			
		||||
		nBits := llState.nbBits() + mlState.nbBits() + ofState.nbBits()
 | 
			
		||||
		if nBits == 0 {
 | 
			
		||||
			llState = llTable[llState.newState()&maxTableMask]
 | 
			
		||||
			mlState = mlTable[mlState.newState()&maxTableMask]
 | 
			
		||||
			ofState = ofTable[ofState.newState()&maxTableMask]
 | 
			
		||||
		} else {
 | 
			
		||||
			bits := br.get32BitsFast(nBits)
 | 
			
		||||
			lowBits := uint16(bits >> ((ofState.nbBits() + mlState.nbBits()) & 31))
 | 
			
		||||
			llState = llTable[(llState.newState()+lowBits)&maxTableMask]
 | 
			
		||||
 | 
			
		||||
			lowBits = uint16(bits >> (ofState.nbBits() & 31))
 | 
			
		||||
			lowBits &= bitMask[mlState.nbBits()&15]
 | 
			
		||||
			mlState = mlTable[(mlState.newState()+lowBits)&maxTableMask]
 | 
			
		||||
 | 
			
		||||
			lowBits = uint16(bits) & bitMask[ofState.nbBits()&15]
 | 
			
		||||
			ofState = ofTable[(ofState.newState()+lowBits)&maxTableMask]
 | 
			
		||||
		}
 | 
			
		||||
	}
 | 
			
		||||
	s.seqSize += litRemain
 | 
			
		||||
	if s.seqSize > maxBlockSize {
 | 
			
		||||
		return fmt.Errorf("output (%d) bigger than max block size (%d)", s.seqSize, maxBlockSize)
 | 
			
		||||
	}
 | 
			
		||||
	err := br.close()
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		printf("Closing sequences: %v, %+v\n", err, *br)
 | 
			
		||||
	}
 | 
			
		||||
	return err
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// executeSimple handles cases when a dictionary is not used.
 | 
			
		||||
func (s *sequenceDecs) executeSimple(seqs []seqVals, hist []byte) error {
 | 
			
		||||
	// Ensure we have enough output size...
 | 
			
		||||
	if len(s.out)+s.seqSize > cap(s.out) {
 | 
			
		||||
		addBytes := s.seqSize + len(s.out)
 | 
			
		||||
		s.out = append(s.out, make([]byte, addBytes)...)
 | 
			
		||||
		s.out = s.out[:len(s.out)-addBytes]
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	if debugDecoder {
 | 
			
		||||
		printf("Execute %d seqs with literals: %d into %d bytes\n", len(seqs), len(s.literals), s.seqSize)
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	var t = len(s.out)
 | 
			
		||||
	out := s.out[:t+s.seqSize]
 | 
			
		||||
 | 
			
		||||
	for _, seq := range seqs {
 | 
			
		||||
		// Add literals
 | 
			
		||||
		copy(out[t:], s.literals[:seq.ll])
 | 
			
		||||
		t += seq.ll
 | 
			
		||||
		s.literals = s.literals[seq.ll:]
 | 
			
		||||
 | 
			
		||||
		// Malformed input
 | 
			
		||||
		if seq.mo > t+len(hist) || seq.mo > s.windowSize {
 | 
			
		||||
			return fmt.Errorf("match offset (%d) bigger than current history (%d)", seq.mo, t+len(hist))
 | 
			
		||||
		}
 | 
			
		||||
 | 
			
		||||
		// Copy from history.
 | 
			
		||||
		if v := seq.mo - t; v > 0 {
 | 
			
		||||
			// v is the start position in history from end.
 | 
			
		||||
			start := len(hist) - v
 | 
			
		||||
			if seq.ml > v {
 | 
			
		||||
				// Some goes into the current block.
 | 
			
		||||
				// Copy remainder of history
 | 
			
		||||
				copy(out[t:], hist[start:])
 | 
			
		||||
				t += v
 | 
			
		||||
				seq.ml -= v
 | 
			
		||||
			} else {
 | 
			
		||||
				copy(out[t:], hist[start:start+seq.ml])
 | 
			
		||||
				t += seq.ml
 | 
			
		||||
				continue
 | 
			
		||||
			}
 | 
			
		||||
		}
 | 
			
		||||
 | 
			
		||||
		// We must be in the current buffer now
 | 
			
		||||
		if seq.ml > 0 {
 | 
			
		||||
			start := t - seq.mo
 | 
			
		||||
			if seq.ml <= t-start {
 | 
			
		||||
				// No overlap
 | 
			
		||||
				copy(out[t:], out[start:start+seq.ml])
 | 
			
		||||
				t += seq.ml
 | 
			
		||||
			} else {
 | 
			
		||||
				// Overlapping copy
 | 
			
		||||
				// Extend destination slice and copy one byte at the time.
 | 
			
		||||
				src := out[start : start+seq.ml]
 | 
			
		||||
				dst := out[t:]
 | 
			
		||||
				dst = dst[:len(src)]
 | 
			
		||||
				t += len(src)
 | 
			
		||||
				// Destination is the space we just added.
 | 
			
		||||
				for i := range src {
 | 
			
		||||
					dst[i] = src[i]
 | 
			
		||||
				}
 | 
			
		||||
			}
 | 
			
		||||
		}
 | 
			
		||||
	}
 | 
			
		||||
	// Add final literals
 | 
			
		||||
	copy(out[t:], s.literals)
 | 
			
		||||
	if debugDecoder {
 | 
			
		||||
		t += len(s.literals)
 | 
			
		||||
		if t != len(out) {
 | 
			
		||||
			panic(fmt.Errorf("length mismatch, want %d, got %d, ss: %d", len(out), t, s.seqSize))
 | 
			
		||||
		}
 | 
			
		||||
	}
 | 
			
		||||
	s.out = out
 | 
			
		||||
 | 
			
		||||
	return nil
 | 
			
		||||
}
 | 
			
		||||
							
								
								
									
										59
									
								
								vendor/github.com/klauspost/compress/zstd/zip.go
									
									
									
										generated
									
									
										vendored
									
									
								
							
							
						
						
									
										59
									
								
								vendor/github.com/klauspost/compress/zstd/zip.go
									
									
									
										generated
									
									
										vendored
									
									
								
							@@ -18,26 +18,44 @@ const ZipMethodWinZip = 93
 | 
			
		||||
// See https://pkware.cachefly.net/webdocs/APPNOTE/APPNOTE-6.3.9.TXT
 | 
			
		||||
const ZipMethodPKWare = 20
 | 
			
		||||
 | 
			
		||||
var zipReaderPool sync.Pool
 | 
			
		||||
// zipReaderPool is the default reader pool.
 | 
			
		||||
var zipReaderPool = sync.Pool{New: func() interface{} {
 | 
			
		||||
	z, err := NewReader(nil, WithDecoderLowmem(true), WithDecoderMaxWindow(128<<20), WithDecoderConcurrency(1))
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		panic(err)
 | 
			
		||||
	}
 | 
			
		||||
	return z
 | 
			
		||||
}}
 | 
			
		||||
 | 
			
		||||
// newZipReader creates a pooled zip decompressor.
 | 
			
		||||
func newZipReader(r io.Reader) io.ReadCloser {
 | 
			
		||||
	dec, ok := zipReaderPool.Get().(*Decoder)
 | 
			
		||||
	if ok {
 | 
			
		||||
		dec.Reset(r)
 | 
			
		||||
	} else {
 | 
			
		||||
		d, err := NewReader(r, WithDecoderConcurrency(1), WithDecoderLowmem(true))
 | 
			
		||||
		if err != nil {
 | 
			
		||||
			panic(err)
 | 
			
		||||
		}
 | 
			
		||||
		dec = d
 | 
			
		||||
func newZipReader(opts ...DOption) func(r io.Reader) io.ReadCloser {
 | 
			
		||||
	pool := &zipReaderPool
 | 
			
		||||
	if len(opts) > 0 {
 | 
			
		||||
		opts = append([]DOption{WithDecoderLowmem(true), WithDecoderMaxWindow(128 << 20)}, opts...)
 | 
			
		||||
		// Force concurrency 1
 | 
			
		||||
		opts = append(opts, WithDecoderConcurrency(1))
 | 
			
		||||
		// Create our own pool
 | 
			
		||||
		pool = &sync.Pool{}
 | 
			
		||||
	}
 | 
			
		||||
	return func(r io.Reader) io.ReadCloser {
 | 
			
		||||
		dec, ok := pool.Get().(*Decoder)
 | 
			
		||||
		if ok {
 | 
			
		||||
			dec.Reset(r)
 | 
			
		||||
		} else {
 | 
			
		||||
			d, err := NewReader(r, opts...)
 | 
			
		||||
			if err != nil {
 | 
			
		||||
				panic(err)
 | 
			
		||||
			}
 | 
			
		||||
			dec = d
 | 
			
		||||
		}
 | 
			
		||||
		return &pooledZipReader{dec: dec, pool: pool}
 | 
			
		||||
	}
 | 
			
		||||
	return &pooledZipReader{dec: dec}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
type pooledZipReader struct {
 | 
			
		||||
	mu  sync.Mutex // guards Close and Read
 | 
			
		||||
	dec *Decoder
 | 
			
		||||
	mu   sync.Mutex // guards Close and Read
 | 
			
		||||
	pool *sync.Pool
 | 
			
		||||
	dec  *Decoder
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (r *pooledZipReader) Read(p []byte) (n int, err error) {
 | 
			
		||||
@@ -48,8 +66,8 @@ func (r *pooledZipReader) Read(p []byte) (n int, err error) {
 | 
			
		||||
	}
 | 
			
		||||
	dec, err := r.dec.Read(p)
 | 
			
		||||
	if err == io.EOF {
 | 
			
		||||
		err = r.dec.Reset(nil)
 | 
			
		||||
		zipReaderPool.Put(r.dec)
 | 
			
		||||
		r.dec.Reset(nil)
 | 
			
		||||
		r.pool.Put(r.dec)
 | 
			
		||||
		r.dec = nil
 | 
			
		||||
	}
 | 
			
		||||
	return dec, err
 | 
			
		||||
@@ -61,7 +79,7 @@ func (r *pooledZipReader) Close() error {
 | 
			
		||||
	var err error
 | 
			
		||||
	if r.dec != nil {
 | 
			
		||||
		err = r.dec.Reset(nil)
 | 
			
		||||
		zipReaderPool.Put(r.dec)
 | 
			
		||||
		r.pool.Put(r.dec)
 | 
			
		||||
		r.dec = nil
 | 
			
		||||
	}
 | 
			
		||||
	return err
 | 
			
		||||
@@ -115,6 +133,9 @@ func ZipCompressor(opts ...EOption) func(w io.Writer) (io.WriteCloser, error) {
 | 
			
		||||
 | 
			
		||||
// ZipDecompressor returns a decompressor that can be registered with zip libraries.
 | 
			
		||||
// See ZipCompressor for example.
 | 
			
		||||
func ZipDecompressor() func(r io.Reader) io.ReadCloser {
 | 
			
		||||
	return newZipReader
 | 
			
		||||
// Options can be specified. WithDecoderConcurrency(1) is forced,
 | 
			
		||||
// and by default a 128MB maximum decompression window is specified.
 | 
			
		||||
// The window size can be overridden if required.
 | 
			
		||||
func ZipDecompressor(opts ...DOption) func(r io.Reader) io.ReadCloser {
 | 
			
		||||
	return newZipReader(opts...)
 | 
			
		||||
}
 | 
			
		||||
 
 | 
			
		||||
							
								
								
									
										11
									
								
								vendor/github.com/klauspost/compress/zstd/zstd.go
									
									
									
										generated
									
									
										vendored
									
									
								
							
							
						
						
									
										11
									
								
								vendor/github.com/klauspost/compress/zstd/zstd.go
									
									
									
										generated
									
									
										vendored
									
									
								
							@@ -110,17 +110,6 @@ func printf(format string, a ...interface{}) {
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// matchLenFast does matching, but will not match the last up to 7 bytes.
 | 
			
		||||
func matchLenFast(a, b []byte) int {
 | 
			
		||||
	endI := len(a) & (math.MaxInt32 - 7)
 | 
			
		||||
	for i := 0; i < endI; i += 8 {
 | 
			
		||||
		if diff := load64(a, i) ^ load64(b, i); diff != 0 {
 | 
			
		||||
			return i + bits.TrailingZeros64(diff)>>3
 | 
			
		||||
		}
 | 
			
		||||
	}
 | 
			
		||||
	return endI
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// matchLen returns the maximum length.
 | 
			
		||||
// a must be the shortest of the two.
 | 
			
		||||
// The function also returns whether all bytes matched.
 | 
			
		||||
 
 | 
			
		||||
							
								
								
									
										4
									
								
								vendor/github.com/mattermost/mattermost-server/v6/model/channel.go
									
									
									
										generated
									
									
										vendored
									
									
								
							
							
						
						
									
										4
									
								
								vendor/github.com/mattermost/mattermost-server/v6/model/channel.go
									
									
									
										generated
									
									
										vendored
									
									
								
							@@ -27,7 +27,7 @@ const (
 | 
			
		||||
	ChannelGroupMinUsers       = 3
 | 
			
		||||
	DefaultChannelName         = "town-square"
 | 
			
		||||
	ChannelDisplayNameMaxRunes = 64
 | 
			
		||||
	ChannelNameMinLength       = 2
 | 
			
		||||
	ChannelNameMinLength       = 1
 | 
			
		||||
	ChannelNameMaxLength       = 64
 | 
			
		||||
	ChannelHeaderMaxRunes      = 1024
 | 
			
		||||
	ChannelPurposeMaxRunes     = 250
 | 
			
		||||
@@ -216,7 +216,7 @@ func (o *Channel) IsValid() *AppError {
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	if !IsValidChannelIdentifier(o.Name) {
 | 
			
		||||
		return NewAppError("Channel.IsValid", "model.channel.is_valid.2_or_more.app_error", nil, "id="+o.Id, http.StatusBadRequest)
 | 
			
		||||
		return NewAppError("Channel.IsValid", "model.channel.is_valid.1_or_more.app_error", nil, "id="+o.Id, http.StatusBadRequest)
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	if !(o.Type == ChannelTypeOpen || o.Type == ChannelTypePrivate || o.Type == ChannelTypeDirect || o.Type == ChannelTypeGroup) {
 | 
			
		||||
 
 | 
			
		||||
							
								
								
									
										13
									
								
								vendor/github.com/mattermost/mattermost-server/v6/model/channel_stats.go
									
									
									
										generated
									
									
										vendored
									
									
								
							
							
						
						
									
										13
									
								
								vendor/github.com/mattermost/mattermost-server/v6/model/channel_stats.go
									
									
									
										generated
									
									
										vendored
									
									
								
							@@ -8,4 +8,17 @@ type ChannelStats struct {
 | 
			
		||||
	MemberCount     int64  `json:"member_count"`
 | 
			
		||||
	GuestCount      int64  `json:"guest_count"`
 | 
			
		||||
	PinnedPostCount int64  `json:"pinnedpost_count"`
 | 
			
		||||
	FilesCount      int64  `json:"files_count"`
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (o *ChannelStats) MemberCount_() float64 {
 | 
			
		||||
	return float64(o.MemberCount)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (o *ChannelStats) GuestCount_() float64 {
 | 
			
		||||
	return float64(o.GuestCount)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (o *ChannelStats) PinnedPostCount_() float64 {
 | 
			
		||||
	return float64(o.PinnedPostCount)
 | 
			
		||||
}
 | 
			
		||||
 
 | 
			
		||||
							
								
								
									
										150
									
								
								vendor/github.com/mattermost/mattermost-server/v6/model/client4.go
									
									
									
										generated
									
									
										vendored
									
									
								
							
							
						
						
									
										150
									
								
								vendor/github.com/mattermost/mattermost-server/v6/model/client4.go
									
									
									
										generated
									
									
										vendored
									
									
								
							@@ -2638,6 +2638,30 @@ func (c *Client4) InviteGuestsToTeam(teamId string, userEmails []string, channel
 | 
			
		||||
// InviteUsersToTeam invite users by email to the team.
 | 
			
		||||
func (c *Client4) InviteUsersToTeamGracefully(teamId string, userEmails []string) ([]*EmailInviteWithError, *Response, error) {
 | 
			
		||||
	r, err := c.DoAPIPost(c.teamRoute(teamId)+"/invite/email?graceful="+c.boolString(true), ArrayToJSON(userEmails))
 | 
			
		||||
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		return nil, BuildResponse(r), err
 | 
			
		||||
	}
 | 
			
		||||
	defer closeBody(r)
 | 
			
		||||
	var list []*EmailInviteWithError
 | 
			
		||||
	if jsonErr := json.NewDecoder(r.Body).Decode(&list); jsonErr != nil {
 | 
			
		||||
		return nil, nil, NewAppError("InviteUsersToTeamGracefully", "api.unmarshal_error", nil, jsonErr.Error(), http.StatusInternalServerError)
 | 
			
		||||
	}
 | 
			
		||||
	return list, BuildResponse(r), nil
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// InviteUsersToTeam invite users by email to the team.
 | 
			
		||||
func (c *Client4) InviteUsersToTeamAndChannelsGracefully(teamId string, userEmails []string, channelIds []string, message string) ([]*EmailInviteWithError, *Response, error) {
 | 
			
		||||
	memberInvite := MemberInvite{
 | 
			
		||||
		Emails:     userEmails,
 | 
			
		||||
		ChannelIds: channelIds,
 | 
			
		||||
		Message:    message,
 | 
			
		||||
	}
 | 
			
		||||
	buf, err := json.Marshal(memberInvite)
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		return nil, nil, NewAppError("InviteMembersToTeamAndChannels", "api.marshal_error", nil, err.Error(), http.StatusInternalServerError)
 | 
			
		||||
	}
 | 
			
		||||
	r, err := c.DoAPIPostBytes(c.teamRoute(teamId)+"/invite/email?graceful="+c.boolString(true), buf)
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		return nil, BuildResponse(r), err
 | 
			
		||||
	}
 | 
			
		||||
@@ -3748,6 +3772,49 @@ func (c *Client4) GetPostThread(postId string, etag string, collapsedThreads boo
 | 
			
		||||
	return &list, BuildResponse(r), nil
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// GetPostThreadWithOpts gets a post with all the other posts in the same thread.
 | 
			
		||||
func (c *Client4) GetPostThreadWithOpts(postID string, etag string, opts GetPostsOptions) (*PostList, *Response, error) {
 | 
			
		||||
	urlVal := c.postRoute(postID) + "/thread"
 | 
			
		||||
 | 
			
		||||
	values := url.Values{}
 | 
			
		||||
	if opts.CollapsedThreads {
 | 
			
		||||
		values.Set("collapsedThreads", "true")
 | 
			
		||||
	}
 | 
			
		||||
	if opts.CollapsedThreadsExtended {
 | 
			
		||||
		values.Set("collapsedThreadsExtended", "true")
 | 
			
		||||
	}
 | 
			
		||||
	if opts.SkipFetchThreads {
 | 
			
		||||
		values.Set("skipFetchThreads", "true")
 | 
			
		||||
	}
 | 
			
		||||
	if opts.PerPage != 0 {
 | 
			
		||||
		values.Set("perPage", strconv.Itoa(opts.PerPage))
 | 
			
		||||
	}
 | 
			
		||||
	if opts.FromPost != "" {
 | 
			
		||||
		values.Set("fromPost", opts.FromPost)
 | 
			
		||||
	}
 | 
			
		||||
	if opts.FromCreateAt != 0 {
 | 
			
		||||
		values.Set("fromCreateAt", strconv.FormatInt(opts.FromCreateAt, 10))
 | 
			
		||||
	}
 | 
			
		||||
	if opts.Direction != "" {
 | 
			
		||||
		values.Set("direction", opts.Direction)
 | 
			
		||||
	}
 | 
			
		||||
	urlVal += "?" + values.Encode()
 | 
			
		||||
 | 
			
		||||
	r, err := c.DoAPIGet(urlVal, etag)
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		return nil, BuildResponse(r), err
 | 
			
		||||
	}
 | 
			
		||||
	defer closeBody(r)
 | 
			
		||||
	var list PostList
 | 
			
		||||
	if r.StatusCode == http.StatusNotModified {
 | 
			
		||||
		return &list, BuildResponse(r), nil
 | 
			
		||||
	}
 | 
			
		||||
	if jsonErr := json.NewDecoder(r.Body).Decode(&list); jsonErr != nil {
 | 
			
		||||
		return nil, nil, NewAppError("GetPostThread", "api.unmarshal_error", nil, jsonErr.Error(), http.StatusInternalServerError)
 | 
			
		||||
	}
 | 
			
		||||
	return &list, BuildResponse(r), nil
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// GetPostsForChannel gets a page of posts with an array for ordering for a channel.
 | 
			
		||||
func (c *Client4) GetPostsForChannel(channelId string, page, perPage int, etag string, collapsedThreads bool) (*PostList, *Response, error) {
 | 
			
		||||
	query := fmt.Sprintf("?page=%v&per_page=%v", page, perPage)
 | 
			
		||||
@@ -6429,6 +6496,39 @@ func (c *Client4) GetBulkReactions(postIds []string) (map[string][]*Reaction, *R
 | 
			
		||||
	return reactions, BuildResponse(r), nil
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (c *Client4) GetTopReactionsForTeamSince(teamId string, timeRange string, page int, perPage int) (*TopReactionList, *Response, error) {
 | 
			
		||||
	query := fmt.Sprintf("?time_range=%v&page=%v&per_page=%v", timeRange, page, perPage)
 | 
			
		||||
	r, err := c.DoAPIGet(c.teamRoute(teamId)+"/top/reactions"+query, "")
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		return nil, BuildResponse(r), err
 | 
			
		||||
	}
 | 
			
		||||
	defer closeBody(r)
 | 
			
		||||
	var topReactions *TopReactionList
 | 
			
		||||
	if jsonErr := json.NewDecoder(r.Body).Decode(&topReactions); jsonErr != nil {
 | 
			
		||||
		return nil, nil, NewAppError("GetTopReactionsForTeamSince", "api.unmarshal_error", nil, jsonErr.Error(), http.StatusInternalServerError)
 | 
			
		||||
	}
 | 
			
		||||
	return topReactions, BuildResponse(r), nil
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (c *Client4) GetTopReactionsForUserSince(teamId string, timeRange string, page int, perPage int) (*TopReactionList, *Response, error) {
 | 
			
		||||
	query := fmt.Sprintf("?time_range=%v&page=%v&per_page=%v", timeRange, page, perPage)
 | 
			
		||||
 | 
			
		||||
	if teamId != "" {
 | 
			
		||||
		query += fmt.Sprintf("&team_id=%v", teamId)
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	r, err := c.DoAPIGet(c.usersRoute()+"/me/top/reactions"+query, "")
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		return nil, BuildResponse(r), err
 | 
			
		||||
	}
 | 
			
		||||
	defer closeBody(r)
 | 
			
		||||
	var topReactions *TopReactionList
 | 
			
		||||
	if jsonErr := json.NewDecoder(r.Body).Decode(&topReactions); jsonErr != nil {
 | 
			
		||||
		return nil, nil, NewAppError("GetTopReactionsForUserSince", "api.unmarshal_error", nil, jsonErr.Error(), http.StatusInternalServerError)
 | 
			
		||||
	}
 | 
			
		||||
	return topReactions, BuildResponse(r), nil
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// Timezone Section
 | 
			
		||||
 | 
			
		||||
// GetSupportedTimezone returns a page of supported timezones on the system.
 | 
			
		||||
@@ -7658,18 +7758,6 @@ func (c *Client4) GetSubscription() (*Subscription, *Response, error) {
 | 
			
		||||
	return subscription, BuildResponse(r), nil
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (c *Client4) GetSubscriptionStats() (*SubscriptionStats, *Response, error) {
 | 
			
		||||
	r, err := c.DoAPIGet(c.cloudRoute()+"/subscription/stats", "")
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		return nil, BuildResponse(r), err
 | 
			
		||||
	}
 | 
			
		||||
	defer closeBody(r)
 | 
			
		||||
 | 
			
		||||
	var stats *SubscriptionStats
 | 
			
		||||
	json.NewDecoder(r.Body).Decode(&stats)
 | 
			
		||||
	return stats, BuildResponse(r), nil
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (c *Client4) GetInvoicesForSubscription() ([]*Invoice, *Response, error) {
 | 
			
		||||
	r, err := c.DoAPIGet(c.cloudRoute()+"/subscription/invoices", "")
 | 
			
		||||
	if err != nil {
 | 
			
		||||
@@ -7782,6 +7870,12 @@ func (c *Client4) GetUserThreads(userId, teamId string, options GetUserThreadsOp
 | 
			
		||||
	if options.Unread {
 | 
			
		||||
		v.Set("unread", "true")
 | 
			
		||||
	}
 | 
			
		||||
	if options.ThreadsOnly {
 | 
			
		||||
		v.Set("threadsOnly", "true")
 | 
			
		||||
	}
 | 
			
		||||
	if options.TotalsOnly {
 | 
			
		||||
		v.Set("totalsOnly", "true")
 | 
			
		||||
	}
 | 
			
		||||
	url := c.userThreadsRoute(userId, teamId)
 | 
			
		||||
	if len(v) > 0 {
 | 
			
		||||
		url += "?" + v.Encode()
 | 
			
		||||
@@ -7826,6 +7920,18 @@ func (c *Client4) UpdateThreadsReadForUser(userId, teamId string) (*Response, er
 | 
			
		||||
	return BuildResponse(r), nil
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (c *Client4) SetThreadUnreadByPostId(userId, teamId, threadId, postId string) (*ThreadResponse, *Response, error) {
 | 
			
		||||
	r, err := c.DoAPIPost(fmt.Sprintf("%s/set_unread/%s", c.userThreadRoute(userId, teamId, threadId), postId), "")
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		return nil, BuildResponse(r), err
 | 
			
		||||
	}
 | 
			
		||||
	defer closeBody(r)
 | 
			
		||||
	var thread ThreadResponse
 | 
			
		||||
	json.NewDecoder(r.Body).Decode(&thread)
 | 
			
		||||
 | 
			
		||||
	return &thread, BuildResponse(r), nil
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (c *Client4) UpdateThreadReadForUser(userId, teamId, threadId string, timestamp int64) (*ThreadResponse, *Response, error) {
 | 
			
		||||
	r, err := c.DoAPIPut(fmt.Sprintf("%s/read/%d", c.userThreadRoute(userId, teamId, threadId), timestamp), "")
 | 
			
		||||
	if err != nil {
 | 
			
		||||
@@ -7854,26 +7960,6 @@ func (c *Client4) UpdateThreadFollowForUser(userId, teamId, threadId string, sta
 | 
			
		||||
	return BuildResponse(r), nil
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (c *Client4) SendAdminUpgradeRequestEmail() (*Response, error) {
 | 
			
		||||
	r, err := c.DoAPIPost(c.cloudRoute()+"/subscription/limitreached/invite", "")
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		return BuildResponse(r), err
 | 
			
		||||
	}
 | 
			
		||||
	defer closeBody(r)
 | 
			
		||||
 | 
			
		||||
	return BuildResponse(r), nil
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (c *Client4) SendAdminUpgradeRequestEmailOnJoin() (*Response, error) {
 | 
			
		||||
	r, err := c.DoAPIPost(c.cloudRoute()+"/subscription/limitreached/join", "")
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		return BuildResponse(r), err
 | 
			
		||||
	}
 | 
			
		||||
	defer closeBody(r)
 | 
			
		||||
 | 
			
		||||
	return BuildResponse(r), nil
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (c *Client4) GetAllSharedChannels(teamID string, page, perPage int) ([]*SharedChannel, *Response, error) {
 | 
			
		||||
	url := fmt.Sprintf("%s/%s?page=%d&per_page=%d", c.sharedChannelsRoute(), teamID, page, perPage)
 | 
			
		||||
	r, err := c.DoAPIGet(url, "")
 | 
			
		||||
 
 | 
			
		||||
							
								
								
									
										8
									
								
								vendor/github.com/mattermost/mattermost-server/v6/model/cloud.go
									
									
									
										generated
									
									
										vendored
									
									
								
							
							
						
						
									
										8
									
								
								vendor/github.com/mattermost/mattermost-server/v6/model/cloud.go
									
									
									
										generated
									
									
										vendored
									
									
								
							@@ -11,8 +11,6 @@ const (
 | 
			
		||||
	EventTypeSendAdminWelcomeEmail = "send-admin-welcome-email"
 | 
			
		||||
	EventTypeTrialWillEnd          = "trial-will-end"
 | 
			
		||||
	EventTypeTrialEnded            = "trial-ended"
 | 
			
		||||
	JoinLimitation                 = "join"
 | 
			
		||||
	InviteLimitation               = "invite"
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
var MockCWS string
 | 
			
		||||
@@ -180,12 +178,6 @@ type FailedPayment struct {
 | 
			
		||||
type CloudWorkspaceOwner struct {
 | 
			
		||||
	UserName string `json:"username"`
 | 
			
		||||
}
 | 
			
		||||
type SubscriptionStats struct {
 | 
			
		||||
	RemainingSeats int    `json:"remaining_seats"`
 | 
			
		||||
	IsPaidTier     string `json:"is_paid_tier"`
 | 
			
		||||
	IsFreeTrial    string `json:"is_free_trial"`
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
type SubscriptionChange struct {
 | 
			
		||||
	ProductID string `json:"product_id"`
 | 
			
		||||
}
 | 
			
		||||
 
 | 
			
		||||
							
								
								
									
										1
									
								
								vendor/github.com/mattermost/mattermost-server/v6/model/cluster_message.go
									
									
									
										generated
									
									
										vendored
									
									
								
							
							
						
						
									
										1
									
								
								vendor/github.com/mattermost/mattermost-server/v6/model/cluster_message.go
									
									
									
										generated
									
									
										vendored
									
									
								
							@@ -26,6 +26,7 @@ const (
 | 
			
		||||
	ClusterEventInvalidateCacheForWebhooks                  ClusterEvent = "inv_webhooks"
 | 
			
		||||
	ClusterEventInvalidateCacheForEmojisById                ClusterEvent = "inv_emojis_by_id"
 | 
			
		||||
	ClusterEventInvalidateCacheForEmojisIdByName            ClusterEvent = "inv_emojis_id_by_name"
 | 
			
		||||
	ClusterEventInvalidateCacheForChannelFileCount          ClusterEvent = "inv_channel_file_count"
 | 
			
		||||
	ClusterEventInvalidateCacheForChannelPinnedpostsCounts  ClusterEvent = "inv_channel_pinnedposts_counts"
 | 
			
		||||
	ClusterEventInvalidateCacheForChannelMemberCounts       ClusterEvent = "inv_channel_member_counts"
 | 
			
		||||
	ClusterEventInvalidateCacheForLastPosts                 ClusterEvent = "inv_last_posts"
 | 
			
		||||
 
 | 
			
		||||
							
								
								
									
										126
									
								
								vendor/github.com/mattermost/mattermost-server/v6/model/config.go
									
									
									
										generated
									
									
										vendored
									
									
								
							
							
						
						
									
										126
									
								
								vendor/github.com/mattermost/mattermost-server/v6/model/config.go
									
									
									
										generated
									
									
										vendored
									
									
								
							@@ -184,24 +184,24 @@ const (
 | 
			
		||||
 | 
			
		||||
	TeamSettingsDefaultTeamText = "default"
 | 
			
		||||
 | 
			
		||||
	ElasticsearchSettingsDefaultConnectionURL                 = "http://localhost:9200"
 | 
			
		||||
	ElasticsearchSettingsDefaultUsername                      = "elastic"
 | 
			
		||||
	ElasticsearchSettingsDefaultPassword                      = "changeme"
 | 
			
		||||
	ElasticsearchSettingsDefaultPostIndexReplicas             = 1
 | 
			
		||||
	ElasticsearchSettingsDefaultPostIndexShards               = 1
 | 
			
		||||
	ElasticsearchSettingsDefaultChannelIndexReplicas          = 1
 | 
			
		||||
	ElasticsearchSettingsDefaultChannelIndexShards            = 1
 | 
			
		||||
	ElasticsearchSettingsDefaultUserIndexReplicas             = 1
 | 
			
		||||
	ElasticsearchSettingsDefaultUserIndexShards               = 1
 | 
			
		||||
	ElasticsearchSettingsDefaultAggregatePostsAfterDays       = 365
 | 
			
		||||
	ElasticsearchSettingsDefaultPostsAggregatorJobStartTime   = "03:00"
 | 
			
		||||
	ElasticsearchSettingsDefaultIndexPrefix                   = ""
 | 
			
		||||
	ElasticsearchSettingsDefaultLiveIndexingBatchSize         = 1
 | 
			
		||||
	ElasticsearchSettingsDefaultBulkIndexingTimeWindowSeconds = 3600
 | 
			
		||||
	ElasticsearchSettingsDefaultRequestTimeoutSeconds         = 30
 | 
			
		||||
	ElasticsearchSettingsDefaultConnectionURL               = "http://localhost:9200"
 | 
			
		||||
	ElasticsearchSettingsDefaultUsername                    = "elastic"
 | 
			
		||||
	ElasticsearchSettingsDefaultPassword                    = "changeme"
 | 
			
		||||
	ElasticsearchSettingsDefaultPostIndexReplicas           = 1
 | 
			
		||||
	ElasticsearchSettingsDefaultPostIndexShards             = 1
 | 
			
		||||
	ElasticsearchSettingsDefaultChannelIndexReplicas        = 1
 | 
			
		||||
	ElasticsearchSettingsDefaultChannelIndexShards          = 1
 | 
			
		||||
	ElasticsearchSettingsDefaultUserIndexReplicas           = 1
 | 
			
		||||
	ElasticsearchSettingsDefaultUserIndexShards             = 1
 | 
			
		||||
	ElasticsearchSettingsDefaultAggregatePostsAfterDays     = 365
 | 
			
		||||
	ElasticsearchSettingsDefaultPostsAggregatorJobStartTime = "03:00"
 | 
			
		||||
	ElasticsearchSettingsDefaultIndexPrefix                 = ""
 | 
			
		||||
	ElasticsearchSettingsDefaultLiveIndexingBatchSize       = 1
 | 
			
		||||
	ElasticsearchSettingsDefaultRequestTimeoutSeconds       = 30
 | 
			
		||||
	ElasticsearchSettingsDefaultBatchSize                   = 10000
 | 
			
		||||
 | 
			
		||||
	BleveSettingsDefaultIndexDir                      = ""
 | 
			
		||||
	BleveSettingsDefaultBulkIndexingTimeWindowSeconds = 3600
 | 
			
		||||
	BleveSettingsDefaultIndexDir  = ""
 | 
			
		||||
	BleveSettingsDefaultBatchSize = 10000
 | 
			
		||||
 | 
			
		||||
	DataRetentionSettingsDefaultMessageRetentionDays = 365
 | 
			
		||||
	DataRetentionSettingsDefaultFileRetentionDays    = 365
 | 
			
		||||
@@ -275,15 +275,16 @@ var ServerTLSSupportedCiphers = map[string]uint16{
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
type ServiceSettings struct {
 | 
			
		||||
	SiteURL                                           *string  `access:"environment_web_server,authentication_saml,write_restrictable"`
 | 
			
		||||
	WebsocketURL                                      *string  `access:"write_restrictable,cloud_restrictable"`
 | 
			
		||||
	LicenseFileLocation                               *string  `access:"write_restrictable,cloud_restrictable"`                        // telemetry: none
 | 
			
		||||
	ListenAddress                                     *string  `access:"environment_web_server,write_restrictable,cloud_restrictable"` // telemetry: none
 | 
			
		||||
	ConnectionSecurity                                *string  `access:"environment_web_server,write_restrictable,cloud_restrictable"`
 | 
			
		||||
	TLSCertFile                                       *string  `access:"environment_web_server,write_restrictable,cloud_restrictable"`
 | 
			
		||||
	TLSKeyFile                                        *string  `access:"environment_web_server,write_restrictable,cloud_restrictable"`
 | 
			
		||||
	TLSMinVer                                         *string  `access:"write_restrictable,cloud_restrictable"` // telemetry: none
 | 
			
		||||
	TLSStrictTransport                                *bool    `access:"write_restrictable,cloud_restrictable"`
 | 
			
		||||
	SiteURL             *string `access:"environment_web_server,authentication_saml,write_restrictable"`
 | 
			
		||||
	WebsocketURL        *string `access:"write_restrictable,cloud_restrictable"`
 | 
			
		||||
	LicenseFileLocation *string `access:"write_restrictable,cloud_restrictable"`                        // telemetry: none
 | 
			
		||||
	ListenAddress       *string `access:"environment_web_server,write_restrictable,cloud_restrictable"` // telemetry: none
 | 
			
		||||
	ConnectionSecurity  *string `access:"environment_web_server,write_restrictable,cloud_restrictable"`
 | 
			
		||||
	TLSCertFile         *string `access:"environment_web_server,write_restrictable,cloud_restrictable"`
 | 
			
		||||
	TLSKeyFile          *string `access:"environment_web_server,write_restrictable,cloud_restrictable"`
 | 
			
		||||
	TLSMinVer           *string `access:"write_restrictable,cloud_restrictable"` // telemetry: none
 | 
			
		||||
	TLSStrictTransport  *bool   `access:"write_restrictable,cloud_restrictable"`
 | 
			
		||||
	// In seconds.
 | 
			
		||||
	TLSStrictTransportMaxAge                          *int64   `access:"write_restrictable,cloud_restrictable"` // telemetry: none
 | 
			
		||||
	TLSOverwriteCiphers                               []string `access:"write_restrictable,cloud_restrictable"` // telemetry: none
 | 
			
		||||
	UseLetsEncrypt                                    *bool    `access:"environment_web_server,write_restrictable,cloud_restrictable"`
 | 
			
		||||
@@ -904,7 +905,6 @@ type ExperimentalSettings struct {
 | 
			
		||||
	LinkMetadataTimeoutMilliseconds *int64  `access:"experimental_features,write_restrictable,cloud_restrictable"`
 | 
			
		||||
	RestrictSystemAdmin             *bool   `access:"experimental_features,write_restrictable"`
 | 
			
		||||
	UseNewSAMLLibrary               *bool   `access:"experimental_features,cloud_restrictable"`
 | 
			
		||||
	CloudUserLimit                  *int64  `access:"experimental_features,write_restrictable"`
 | 
			
		||||
	CloudBilling                    *bool   `access:"experimental_features,write_restrictable"`
 | 
			
		||||
	EnableSharedChannels            *bool   `access:"experimental_features"`
 | 
			
		||||
	EnableRemoteClusterService      *bool   `access:"experimental_features"`
 | 
			
		||||
@@ -931,11 +931,6 @@ func (s *ExperimentalSettings) SetDefaults() {
 | 
			
		||||
		s.RestrictSystemAdmin = NewBool(false)
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	if s.CloudUserLimit == nil {
 | 
			
		||||
		// User limit 0 is treated as no limit
 | 
			
		||||
		s.CloudUserLimit = NewInt64(0)
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	if s.CloudBilling == nil {
 | 
			
		||||
		s.CloudBilling = NewBool(false)
 | 
			
		||||
	}
 | 
			
		||||
@@ -1541,6 +1536,7 @@ type EmailSettings struct {
 | 
			
		||||
	LoginButtonColor                  *string `access:"experimental_features"`
 | 
			
		||||
	LoginButtonBorderColor            *string `access:"experimental_features"`
 | 
			
		||||
	LoginButtonTextColor              *string `access:"experimental_features"`
 | 
			
		||||
	EnableInactivityEmail             *bool
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (s *EmailSettings) SetDefaults(isUpdate bool) {
 | 
			
		||||
@@ -1683,6 +1679,10 @@ func (s *EmailSettings) SetDefaults(isUpdate bool) {
 | 
			
		||||
	if s.LoginButtonTextColor == nil {
 | 
			
		||||
		s.LoginButtonTextColor = NewString("#2389D7")
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	if s.EnableInactivityEmail == nil {
 | 
			
		||||
		s.EnableInactivityEmail = NewBool(true)
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
type RateLimitSettings struct {
 | 
			
		||||
@@ -1885,17 +1885,18 @@ func (s *ThemeSettings) SetDefaults() {
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
type TeamSettings struct {
 | 
			
		||||
	SiteName                            *string  `access:"site_customization"`
 | 
			
		||||
	MaxUsersPerTeam                     *int     `access:"site_users_and_teams"`
 | 
			
		||||
	EnableUserCreation                  *bool    `access:"authentication_signup"`
 | 
			
		||||
	EnableOpenServer                    *bool    `access:"authentication_signup"`
 | 
			
		||||
	EnableUserDeactivation              *bool    `access:"experimental_features"`
 | 
			
		||||
	RestrictCreationToDomains           *string  `access:"authentication_signup"` // telemetry: none
 | 
			
		||||
	EnableCustomUserStatuses            *bool    `access:"site_users_and_teams"`
 | 
			
		||||
	EnableCustomBrand                   *bool    `access:"site_customization"`
 | 
			
		||||
	CustomBrandText                     *string  `access:"site_customization"`
 | 
			
		||||
	CustomDescriptionText               *string  `access:"site_customization"`
 | 
			
		||||
	RestrictDirectMessage               *string  `access:"site_users_and_teams"`
 | 
			
		||||
	SiteName                  *string `access:"site_customization"`
 | 
			
		||||
	MaxUsersPerTeam           *int    `access:"site_users_and_teams"`
 | 
			
		||||
	EnableUserCreation        *bool   `access:"authentication_signup"`
 | 
			
		||||
	EnableOpenServer          *bool   `access:"authentication_signup"`
 | 
			
		||||
	EnableUserDeactivation    *bool   `access:"experimental_features"`
 | 
			
		||||
	RestrictCreationToDomains *string `access:"authentication_signup"` // telemetry: none
 | 
			
		||||
	EnableCustomUserStatuses  *bool   `access:"site_users_and_teams"`
 | 
			
		||||
	EnableCustomBrand         *bool   `access:"site_customization"`
 | 
			
		||||
	CustomBrandText           *string `access:"site_customization"`
 | 
			
		||||
	CustomDescriptionText     *string `access:"site_customization"`
 | 
			
		||||
	RestrictDirectMessage     *string `access:"site_users_and_teams"`
 | 
			
		||||
	// In seconds.
 | 
			
		||||
	UserStatusAwayTimeout               *int64   `access:"experimental_features"`
 | 
			
		||||
	MaxChannelsPerTeam                  *int64   `access:"site_users_and_teams"`
 | 
			
		||||
	MaxNotificationsPerChannel          *int64   `access:"environment_push_notification_server"`
 | 
			
		||||
@@ -2475,7 +2476,8 @@ type ElasticsearchSettings struct {
 | 
			
		||||
	PostsAggregatorJobStartTime   *string `access:"environment_elasticsearch,write_restrictable,cloud_restrictable"` // telemetry: none
 | 
			
		||||
	IndexPrefix                   *string `access:"environment_elasticsearch,write_restrictable,cloud_restrictable"`
 | 
			
		||||
	LiveIndexingBatchSize         *int    `access:"environment_elasticsearch,write_restrictable,cloud_restrictable"`
 | 
			
		||||
	BulkIndexingTimeWindowSeconds *int    `access:"environment_elasticsearch,write_restrictable,cloud_restrictable"`
 | 
			
		||||
	BulkIndexingTimeWindowSeconds *int    `json:",omitempty"` // telemetry: none
 | 
			
		||||
	BatchSize                     *int    `access:"environment_elasticsearch,write_restrictable,cloud_restrictable"`
 | 
			
		||||
	RequestTimeoutSeconds         *int    `access:"environment_elasticsearch,write_restrictable,cloud_restrictable"`
 | 
			
		||||
	SkipTLSVerification           *bool   `access:"environment_elasticsearch,write_restrictable,cloud_restrictable"`
 | 
			
		||||
	Trace                         *string `access:"environment_elasticsearch,write_restrictable,cloud_restrictable"`
 | 
			
		||||
@@ -2550,8 +2552,8 @@ func (s *ElasticsearchSettings) SetDefaults() {
 | 
			
		||||
		s.LiveIndexingBatchSize = NewInt(ElasticsearchSettingsDefaultLiveIndexingBatchSize)
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	if s.BulkIndexingTimeWindowSeconds == nil {
 | 
			
		||||
		s.BulkIndexingTimeWindowSeconds = NewInt(ElasticsearchSettingsDefaultBulkIndexingTimeWindowSeconds)
 | 
			
		||||
	if s.BatchSize == nil {
 | 
			
		||||
		s.BatchSize = NewInt(ElasticsearchSettingsDefaultBatchSize)
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	if s.RequestTimeoutSeconds == nil {
 | 
			
		||||
@@ -2572,7 +2574,8 @@ type BleveSettings struct {
 | 
			
		||||
	EnableIndexing                *bool   `access:"experimental_bleve"`
 | 
			
		||||
	EnableSearching               *bool   `access:"experimental_bleve"`
 | 
			
		||||
	EnableAutocomplete            *bool   `access:"experimental_bleve"`
 | 
			
		||||
	BulkIndexingTimeWindowSeconds *int    `access:"experimental_bleve"`
 | 
			
		||||
	BulkIndexingTimeWindowSeconds *int    `json:",omitempty"` // telemetry: none
 | 
			
		||||
	BatchSize                     *int    `access:"experimental_bleve"`
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (bs *BleveSettings) SetDefaults() {
 | 
			
		||||
@@ -2592,8 +2595,8 @@ func (bs *BleveSettings) SetDefaults() {
 | 
			
		||||
		bs.EnableAutocomplete = NewBool(false)
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	if bs.BulkIndexingTimeWindowSeconds == nil {
 | 
			
		||||
		bs.BulkIndexingTimeWindowSeconds = NewInt(BleveSettingsDefaultBulkIndexingTimeWindowSeconds)
 | 
			
		||||
	if bs.BatchSize == nil {
 | 
			
		||||
		bs.BatchSize = NewInt(BleveSettingsDefaultBatchSize)
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
@@ -2643,9 +2646,10 @@ func (s *DataRetentionSettings) SetDefaults() {
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
type JobSettings struct {
 | 
			
		||||
	RunJobs                  *bool `access:"write_restrictable,cloud_restrictable"` // telemetry: none
 | 
			
		||||
	RunScheduler             *bool `access:"write_restrictable,cloud_restrictable"` // telemetry: none
 | 
			
		||||
	CleanupJobsThresholdDays *int  `access:"write_restrictable,cloud_restrictable"`
 | 
			
		||||
	RunJobs                    *bool `access:"write_restrictable,cloud_restrictable"` // telemetry: none
 | 
			
		||||
	RunScheduler               *bool `access:"write_restrictable,cloud_restrictable"` // telemetry: none
 | 
			
		||||
	CleanupJobsThresholdDays   *int  `access:"write_restrictable,cloud_restrictable"`
 | 
			
		||||
	CleanupConfigThresholdDays *int  `access:"write_restrictable,cloud_restrictable"`
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (s *JobSettings) SetDefaults() {
 | 
			
		||||
@@ -2660,6 +2664,10 @@ func (s *JobSettings) SetDefaults() {
 | 
			
		||||
	if s.CleanupJobsThresholdDays == nil {
 | 
			
		||||
		s.CleanupJobsThresholdDays = NewInt(-1)
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	if s.CleanupConfigThresholdDays == nil {
 | 
			
		||||
		s.CleanupConfigThresholdDays = NewInt(-1)
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
type CloudSettings struct {
 | 
			
		||||
@@ -3564,13 +3572,13 @@ func (s *ServiceSettings) isValid() *AppError {
 | 
			
		||||
 | 
			
		||||
	if *s.SiteURL != "" {
 | 
			
		||||
		if _, err := url.ParseRequestURI(*s.SiteURL); err != nil {
 | 
			
		||||
			return NewAppError("Config.IsValid", "model.config.is_valid.site_url.app_error", nil, "", http.StatusBadRequest)
 | 
			
		||||
			return NewAppError("Config.IsValid", "model.config.is_valid.site_url.app_error", nil, err.Error(), http.StatusBadRequest)
 | 
			
		||||
		}
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	if *s.WebsocketURL != "" {
 | 
			
		||||
		if _, err := url.ParseRequestURI(*s.WebsocketURL); err != nil {
 | 
			
		||||
			return NewAppError("Config.IsValid", "model.config.is_valid.websocket_url.app_error", nil, "", http.StatusBadRequest)
 | 
			
		||||
			return NewAppError("Config.IsValid", "model.config.is_valid.websocket_url.app_error", nil, err.Error(), http.StatusBadRequest)
 | 
			
		||||
		}
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
@@ -3632,8 +3640,9 @@ func (s *ElasticsearchSettings) isValid() *AppError {
 | 
			
		||||
		return NewAppError("Config.IsValid", "model.config.is_valid.elastic_search.live_indexing_batch_size.app_error", nil, "", http.StatusBadRequest)
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	if *s.BulkIndexingTimeWindowSeconds < 1 {
 | 
			
		||||
		return NewAppError("Config.IsValid", "model.config.is_valid.elastic_search.bulk_indexing_time_window_seconds.app_error", nil, "", http.StatusBadRequest)
 | 
			
		||||
	minBatchSize := 1
 | 
			
		||||
	if *s.BatchSize < minBatchSize {
 | 
			
		||||
		return NewAppError("Config.IsValid", "model.config.is_valid.elastic_search.bulk_indexing_batch_size.app_error", map[string]interface{}{"BatchSize": minBatchSize}, "", http.StatusBadRequest)
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	if *s.RequestTimeoutSeconds < 1 {
 | 
			
		||||
@@ -3656,8 +3665,9 @@ func (bs *BleveSettings) isValid() *AppError {
 | 
			
		||||
			return NewAppError("Config.IsValid", "model.config.is_valid.bleve_search.enable_autocomplete.app_error", nil, "", http.StatusBadRequest)
 | 
			
		||||
		}
 | 
			
		||||
	}
 | 
			
		||||
	if *bs.BulkIndexingTimeWindowSeconds < 1 {
 | 
			
		||||
		return NewAppError("Config.IsValid", "model.config.is_valid.bleve_search.bulk_indexing_time_window_seconds.app_error", nil, "", http.StatusBadRequest)
 | 
			
		||||
	minBatchSize := 1
 | 
			
		||||
	if *bs.BatchSize < minBatchSize {
 | 
			
		||||
		return NewAppError("Config.IsValid", "model.config.is_valid.bleve_search.bulk_indexing_batch_size.app_error", map[string]interface{}{"BatchSize": minBatchSize}, "", http.StatusBadRequest)
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	return nil
 | 
			
		||||
 
 | 
			
		||||
							
								
								
									
										14
									
								
								vendor/github.com/mattermost/mattermost-server/v6/model/data_retention_policy.go
									
									
									
										generated
									
									
										vendored
									
									
								
							
							
						
						
									
										14
									
								
								vendor/github.com/mattermost/mattermost-server/v6/model/data_retention_policy.go
									
									
									
										generated
									
									
										vendored
									
									
								
							@@ -13,9 +13,9 @@ type GlobalRetentionPolicy struct {
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
type RetentionPolicy struct {
 | 
			
		||||
	ID           string `db:"Id" json:"id"`
 | 
			
		||||
	DisplayName  string `json:"display_name"`
 | 
			
		||||
	PostDuration *int64 `json:"post_duration"`
 | 
			
		||||
	ID               string `db:"Id" json:"id"`
 | 
			
		||||
	DisplayName      string `json:"display_name"`
 | 
			
		||||
	PostDurationDays *int64 `db:"PostDuration" json:"post_duration"`
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
type RetentionPolicyWithTeamAndChannelIDs struct {
 | 
			
		||||
@@ -46,8 +46,8 @@ type RetentionPolicyWithTeamAndChannelCountsList struct {
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
type RetentionPolicyForTeam struct {
 | 
			
		||||
	TeamID       string `db:"Id" json:"team_id"`
 | 
			
		||||
	PostDuration int64  `json:"post_duration"`
 | 
			
		||||
	TeamID           string `db:"Id" json:"team_id"`
 | 
			
		||||
	PostDurationDays int64  `db:"PostDuration" json:"post_duration"`
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
type RetentionPolicyForTeamList struct {
 | 
			
		||||
@@ -56,8 +56,8 @@ type RetentionPolicyForTeamList struct {
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
type RetentionPolicyForChannel struct {
 | 
			
		||||
	ChannelID    string `db:"Id" json:"channel_id"`
 | 
			
		||||
	PostDuration int64  `json:"post_duration"`
 | 
			
		||||
	ChannelID        string `db:"Id" json:"channel_id"`
 | 
			
		||||
	PostDurationDays int64  `db:"PostDuration" json:"post_duration"`
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
type RetentionPolicyForChannelList struct {
 | 
			
		||||
 
 | 
			
		||||
							
								
								
									
										18
									
								
								vendor/github.com/mattermost/mattermost-server/v6/model/feature_flags.go
									
									
									
										generated
									
									
										vendored
									
									
								
							
							
						
						
									
										18
									
								
								vendor/github.com/mattermost/mattermost-server/v6/model/feature_flags.go
									
									
									
										generated
									
									
										vendored
									
									
								
							@@ -16,9 +16,6 @@ type FeatureFlags struct {
 | 
			
		||||
	// all other values as false.
 | 
			
		||||
	TestBoolFeature bool
 | 
			
		||||
 | 
			
		||||
	// Toggle on and off scheduled jobs for cloud user limit emails see MM-29999
 | 
			
		||||
	CloudDelinquentEmailJobsEnabled bool
 | 
			
		||||
 | 
			
		||||
	// Toggle on and off support for Collapsed Threads
 | 
			
		||||
	CollapsedThreads bool
 | 
			
		||||
 | 
			
		||||
@@ -38,18 +35,12 @@ type FeatureFlags struct {
 | 
			
		||||
 | 
			
		||||
	PermalinkPreviews bool
 | 
			
		||||
 | 
			
		||||
	// Determine whether when a user gets created, they'll have noisy notifications e.g. Send desktop notifications for all activity
 | 
			
		||||
	NewAccountNoisy bool
 | 
			
		||||
 | 
			
		||||
	// Enable Calls plugin support in the mobile app
 | 
			
		||||
	CallsMobile bool
 | 
			
		||||
 | 
			
		||||
	// A dash separated list for feature flags to turn on for Boards
 | 
			
		||||
	BoardsFeatureFlags string
 | 
			
		||||
 | 
			
		||||
	// A/B test for the add members to channel button, possible values = ("top", "bottom")
 | 
			
		||||
	AddMembersToChannel string
 | 
			
		||||
 | 
			
		||||
	// Enable Create First Channel
 | 
			
		||||
	GuidedChannelCreation bool
 | 
			
		||||
 | 
			
		||||
@@ -70,12 +61,15 @@ type FeatureFlags struct {
 | 
			
		||||
 | 
			
		||||
	// Enable GraphQL feature
 | 
			
		||||
	GraphQL bool
 | 
			
		||||
 | 
			
		||||
	InsightsEnabled bool
 | 
			
		||||
 | 
			
		||||
	CommandPalette bool
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (f *FeatureFlags) SetDefaults() {
 | 
			
		||||
	f.TestFeature = "off"
 | 
			
		||||
	f.TestBoolFeature = false
 | 
			
		||||
	f.CloudDelinquentEmailJobsEnabled = false
 | 
			
		||||
	f.CollapsedThreads = true
 | 
			
		||||
	f.EnableRemoteClusterService = false
 | 
			
		||||
	f.AppsEnabled = true
 | 
			
		||||
@@ -83,10 +77,8 @@ func (f *FeatureFlags) SetDefaults() {
 | 
			
		||||
	f.PluginApps = ""
 | 
			
		||||
	f.PluginFocalboard = ""
 | 
			
		||||
	f.PermalinkPreviews = true
 | 
			
		||||
	f.NewAccountNoisy = false
 | 
			
		||||
	f.CallsMobile = false
 | 
			
		||||
	f.BoardsFeatureFlags = ""
 | 
			
		||||
	f.AddMembersToChannel = "top"
 | 
			
		||||
	f.GuidedChannelCreation = false
 | 
			
		||||
	f.InviteToTeam = "none"
 | 
			
		||||
	f.CustomGroups = true
 | 
			
		||||
@@ -95,6 +87,8 @@ func (f *FeatureFlags) SetDefaults() {
 | 
			
		||||
	f.EnableInactivityCheckJob = true
 | 
			
		||||
	f.UseCaseOnboarding = true
 | 
			
		||||
	f.GraphQL = false
 | 
			
		||||
	f.InsightsEnabled = false
 | 
			
		||||
	f.CommandPalette = false
 | 
			
		||||
}
 | 
			
		||||
func (f *FeatureFlags) Plugins() map[string]string {
 | 
			
		||||
	rFFVal := reflect.ValueOf(f).Elem()
 | 
			
		||||
 
 | 
			
		||||
							
								
								
									
										76
									
								
								vendor/github.com/mattermost/mattermost-server/v6/model/insights.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										76
									
								
								vendor/github.com/mattermost/mattermost-server/v6/model/insights.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							@@ -0,0 +1,76 @@
 | 
			
		||||
// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved.
 | 
			
		||||
// See LICENSE.txt for license information.
 | 
			
		||||
 | 
			
		||||
package model
 | 
			
		||||
 | 
			
		||||
import (
 | 
			
		||||
	"net/http"
 | 
			
		||||
	"time"
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
const (
 | 
			
		||||
	TimeRangeToday string = "today"
 | 
			
		||||
	TimeRange7Day  string = "7_day"
 | 
			
		||||
	TimeRange28Day string = "28_day"
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
type InsightsOpts struct {
 | 
			
		||||
	StartUnixMilli int64
 | 
			
		||||
	Page           int
 | 
			
		||||
	PerPage        int
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
type InsightsListData struct {
 | 
			
		||||
	HasNext bool `json:"has_next"`
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
type InsightsData struct {
 | 
			
		||||
	Rank int `json:"rank"`
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
type TopReactionList struct {
 | 
			
		||||
	InsightsListData
 | 
			
		||||
	Items []*TopReaction `json:"items"`
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
type TopReaction struct {
 | 
			
		||||
	InsightsData
 | 
			
		||||
	EmojiName string `json:"emoji_name"`
 | 
			
		||||
	Count     int64  `json:"count"`
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// GetStartUnixMilliForTimeRange gets the unix start time in milliseconds from the given time range.
 | 
			
		||||
// Time range can be one of: "1_day", "7_day", or "28_day".
 | 
			
		||||
func GetStartUnixMilliForTimeRange(timeRange string) (int64, *AppError) {
 | 
			
		||||
	now := time.Now()
 | 
			
		||||
	_, offset := now.Zone()
 | 
			
		||||
	switch timeRange {
 | 
			
		||||
	case TimeRangeToday:
 | 
			
		||||
		return GetStartOfDayMillis(now, offset), nil
 | 
			
		||||
	case TimeRange7Day:
 | 
			
		||||
		return GetStartOfDayMillis(now.Add(time.Hour*time.Duration(-168)), offset), nil
 | 
			
		||||
	case TimeRange28Day:
 | 
			
		||||
		return GetStartOfDayMillis(now.Add(time.Hour*time.Duration(-672)), offset), nil
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	return GetStartOfDayMillis(now, offset), NewAppError("Insights.IsValidRequest", "model.insights.time_range.app_error", nil, "", http.StatusBadRequest)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// GetTopReactionListWithRankAndPagination adds a rank to each item in the given list of TopReaction and checks if there is
 | 
			
		||||
// another page that can be fetched based on the given limit and offset. The given list of TopReaction is assumed to be
 | 
			
		||||
// sorted by Count. Returns a TopReactionList.
 | 
			
		||||
func GetTopReactionListWithRankAndPagination(reactions []*TopReaction, limit int, offset int) *TopReactionList {
 | 
			
		||||
	// Add pagination support
 | 
			
		||||
	var hasNext bool
 | 
			
		||||
	if (limit != 0) && (len(reactions) == limit+1) {
 | 
			
		||||
		hasNext = true
 | 
			
		||||
		reactions = reactions[:len(reactions)-1]
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	// Assign rank to each reaction
 | 
			
		||||
	for i, reaction := range reactions {
 | 
			
		||||
		reaction.Rank = offset + i + 1
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	return &TopReactionList{InsightsListData: InsightsListData{HasNext: hasNext}, Items: reactions}
 | 
			
		||||
}
 | 
			
		||||
							
								
								
									
										7
									
								
								vendor/github.com/mattermost/mattermost-server/v6/model/license.go
									
									
									
										generated
									
									
										vendored
									
									
								
							
							
						
						
									
										7
									
								
								vendor/github.com/mattermost/mattermost-server/v6/model/license.go
									
									
									
										generated
									
									
										vendored
									
									
								
							@@ -11,9 +11,12 @@ import (
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
const (
 | 
			
		||||
	DayInSeconds      = 24 * 60 * 60
 | 
			
		||||
	DayInMilliseconds = DayInSeconds * 1000
 | 
			
		||||
 | 
			
		||||
	ExpiredLicenseError = "api.license.add_license.expired.app_error"
 | 
			
		||||
	InvalidLicenseError = "api.license.add_license.invalid.app_error"
 | 
			
		||||
	LicenseGracePeriod  = 1000 * 60 * 60 * 24 * 10 //10 days
 | 
			
		||||
	LicenseGracePeriod  = DayInMilliseconds * 10 //10 days
 | 
			
		||||
	LicenseRenewalLink  = "https://mattermost.com/renew/"
 | 
			
		||||
 | 
			
		||||
	LicenseShortSkuE10          = "E10"
 | 
			
		||||
@@ -307,7 +310,7 @@ func (l *License) HasEnterpriseMarketplacePlugins() bool {
 | 
			
		||||
// NewTestLicense returns a license that expires in the future and has the given features.
 | 
			
		||||
func NewTestLicense(features ...string) *License {
 | 
			
		||||
	ret := &License{
 | 
			
		||||
		ExpiresAt: GetMillis() + 90*24*60*60*1000,
 | 
			
		||||
		ExpiresAt: GetMillis() + 90*DayInMilliseconds,
 | 
			
		||||
		Customer:  &Customer{},
 | 
			
		||||
		Features:  &Features{},
 | 
			
		||||
	}
 | 
			
		||||
 
 | 
			
		||||
							
								
								
									
										49
									
								
								vendor/github.com/mattermost/mattermost-server/v6/model/member_invite.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										49
									
								
								vendor/github.com/mattermost/mattermost-server/v6/model/member_invite.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							@@ -0,0 +1,49 @@
 | 
			
		||||
// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved.
 | 
			
		||||
// See LICENSE.txt for license information.
 | 
			
		||||
 | 
			
		||||
package model
 | 
			
		||||
 | 
			
		||||
import (
 | 
			
		||||
	"encoding/json"
 | 
			
		||||
	"net/http"
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
type MemberInvite struct {
 | 
			
		||||
	Emails     []string `json:"emails"`
 | 
			
		||||
	ChannelIds []string `json:"channelIds,omitempty"`
 | 
			
		||||
	Message    string   `json:"message"`
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// IsValid validates that the invitation info is loaded correctly and with the correct structure
 | 
			
		||||
func (i *MemberInvite) IsValid() *AppError {
 | 
			
		||||
	if len(i.Emails) == 0 {
 | 
			
		||||
		return NewAppError("MemberInvite.IsValid", "model.member.is_valid.emails.app_error", nil, "", http.StatusBadRequest)
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	if len(i.ChannelIds) > 0 {
 | 
			
		||||
		for _, channel := range i.ChannelIds {
 | 
			
		||||
			if len(channel) != 26 {
 | 
			
		||||
				return NewAppError("MemberInvite.IsValid", "model.member.is_valid.channel.app_error", nil, "channel="+channel, http.StatusBadRequest)
 | 
			
		||||
			}
 | 
			
		||||
		}
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	return nil
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (i *MemberInvite) UnmarshalJSON(b []byte) error {
 | 
			
		||||
	var emails []string
 | 
			
		||||
	if err := json.Unmarshal(b, &emails); err == nil {
 | 
			
		||||
		*i = MemberInvite{}
 | 
			
		||||
		i.Emails = emails
 | 
			
		||||
		return nil
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	type TempMemberInvite MemberInvite
 | 
			
		||||
	var o2 TempMemberInvite
 | 
			
		||||
	if err := json.Unmarshal(b, &o2); err != nil {
 | 
			
		||||
		return err
 | 
			
		||||
	}
 | 
			
		||||
	*i = MemberInvite(o2)
 | 
			
		||||
	return nil
 | 
			
		||||
}
 | 
			
		||||
							
								
								
									
										12
									
								
								vendor/github.com/mattermost/mattermost-server/v6/model/permalink.go
									
									
									
										generated
									
									
										vendored
									
									
								
							
							
						
						
									
										12
									
								
								vendor/github.com/mattermost/mattermost-server/v6/model/permalink.go
									
									
									
										generated
									
									
										vendored
									
									
								
							@@ -8,10 +8,12 @@ type Permalink struct {
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
type PreviewPost struct {
 | 
			
		||||
	PostID             string `json:"post_id"`
 | 
			
		||||
	Post               *Post  `json:"post"`
 | 
			
		||||
	TeamName           string `json:"team_name"`
 | 
			
		||||
	ChannelDisplayName string `json:"channel_display_name"`
 | 
			
		||||
	PostID             string      `json:"post_id"`
 | 
			
		||||
	Post               *Post       `json:"post"`
 | 
			
		||||
	TeamName           string      `json:"team_name"`
 | 
			
		||||
	ChannelDisplayName string      `json:"channel_display_name"`
 | 
			
		||||
	ChannelType        ChannelType `json:"channel_type"`
 | 
			
		||||
	ChannelID          string      `json:"channel_id"`
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func NewPreviewPost(post *Post, team *Team, channel *Channel) *PreviewPost {
 | 
			
		||||
@@ -23,5 +25,7 @@ func NewPreviewPost(post *Post, team *Team, channel *Channel) *PreviewPost {
 | 
			
		||||
		Post:               post,
 | 
			
		||||
		TeamName:           team.Name,
 | 
			
		||||
		ChannelDisplayName: channel.DisplayName,
 | 
			
		||||
		ChannelType:        channel.Type,
 | 
			
		||||
		ChannelID:          channel.Id,
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
 
 | 
			
		||||
							
								
								
									
										3
									
								
								vendor/github.com/mattermost/mattermost-server/v6/model/post.go
									
									
									
										generated
									
									
										vendored
									
									
								
							
							
						
						
									
										3
									
								
								vendor/github.com/mattermost/mattermost-server/v6/model/post.go
									
									
									
										generated
									
									
										vendored
									
									
								
							@@ -263,6 +263,9 @@ type GetPostsOptions struct {
 | 
			
		||||
	SkipFetchThreads         bool
 | 
			
		||||
	CollapsedThreads         bool
 | 
			
		||||
	CollapsedThreadsExtended bool
 | 
			
		||||
	FromPost                 string // PostId after which to send the items
 | 
			
		||||
	FromCreateAt             int64  // CreateAt after which to send the items
 | 
			
		||||
	Direction                string // Only accepts up|down. Indicates the order in which to send the items.
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (o *Post) Etag() string {
 | 
			
		||||
 
 | 
			
		||||
							
								
								
									
										3
									
								
								vendor/github.com/mattermost/mattermost-server/v6/model/post_list.go
									
									
									
										generated
									
									
										vendored
									
									
								
							
							
						
						
									
										3
									
								
								vendor/github.com/mattermost/mattermost-server/v6/model/post_list.go
									
									
									
										generated
									
									
										vendored
									
									
								
							@@ -14,6 +14,8 @@ type PostList struct {
 | 
			
		||||
	Posts      map[string]*Post `json:"posts"`
 | 
			
		||||
	NextPostId string           `json:"next_post_id"`
 | 
			
		||||
	PrevPostId string           `json:"prev_post_id"`
 | 
			
		||||
	// HasNext indicates whether there are more items to be fetched or not.
 | 
			
		||||
	HasNext bool `json:"has_next"`
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func NewPostList() *PostList {
 | 
			
		||||
@@ -39,6 +41,7 @@ func (o *PostList) Clone() *PostList {
 | 
			
		||||
		Posts:      postsCopy,
 | 
			
		||||
		NextPostId: o.NextPostId,
 | 
			
		||||
		PrevPostId: o.PrevPostId,
 | 
			
		||||
		HasNext:    o.HasNext,
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
 
 | 
			
		||||
							
								
								
									
										2
									
								
								vendor/github.com/mattermost/mattermost-server/v6/model/shared_channel.go
									
									
									
										generated
									
									
										vendored
									
									
								
							
							
						
						
									
										2
									
								
								vendor/github.com/mattermost/mattermost-server/v6/model/shared_channel.go
									
									
									
										generated
									
									
										vendored
									
									
								
							@@ -51,7 +51,7 @@ func (sc *SharedChannel) IsValid() *AppError {
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	if !IsValidChannelIdentifier(sc.ShareName) {
 | 
			
		||||
		return NewAppError("SharedChannel.IsValid", "model.channel.is_valid.2_or_more.app_error", nil, "id="+sc.ChannelId, http.StatusBadRequest)
 | 
			
		||||
		return NewAppError("SharedChannel.IsValid", "model.channel.is_valid.1_or_more.app_error", nil, "id="+sc.ChannelId, http.StatusBadRequest)
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	if utf8.RuneCountInString(sc.ShareHeader) > ChannelHeaderMaxRunes {
 | 
			
		||||
 
 | 
			
		||||
							
								
								
									
										4
									
								
								vendor/github.com/mattermost/mattermost-server/v6/model/system.go
									
									
									
										generated
									
									
										vendored
									
									
								
							
							
						
						
									
										4
									
								
								vendor/github.com/mattermost/mattermost-server/v6/model/system.go
									
									
									
										generated
									
									
										vendored
									
									
								
							@@ -12,7 +12,6 @@ const (
 | 
			
		||||
	SystemRanUnitTests                     = "RanUnitTests"
 | 
			
		||||
	SystemLastSecurityTime                 = "LastSecurityTime"
 | 
			
		||||
	SystemActiveLicenseId                  = "ActiveLicenseId"
 | 
			
		||||
	SystemLicenseRenewalToken              = "LicenseRenewalToken"
 | 
			
		||||
	SystemLastComplianceTime               = "LastComplianceTime"
 | 
			
		||||
	SystemAsymmetricSigningKeyKey          = "AsymmetricSigningKey"
 | 
			
		||||
	SystemPostActionCookieSecretKey        = "PostActionCookieSecret"
 | 
			
		||||
@@ -34,9 +33,6 @@ const (
 | 
			
		||||
	SystemFirstAdminSetupComplete          = "FirstAdminSetupComplete"
 | 
			
		||||
	AwsMeteringReportInterval              = 1
 | 
			
		||||
	AwsMeteringDimensionUsageHrs           = "UsageHrs"
 | 
			
		||||
	UserLimitOverageCycleEndDate           = "UserLimitOverageCycleEndDate"
 | 
			
		||||
	OverUserLimitForgivenCount             = "OverUserLimitForgivenCount"
 | 
			
		||||
	OverUserLimitLastEmailSent             = "OverUserLimitLastEmailSent"
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
const (
 | 
			
		||||
 
 | 
			
		||||
							
								
								
									
										6
									
								
								vendor/github.com/mattermost/mattermost-server/v6/model/team.go
									
									
									
										generated
									
									
										vendored
									
									
								
							
							
						
						
									
										6
									
								
								vendor/github.com/mattermost/mattermost-server/v6/model/team.go
									
									
									
										generated
									
									
										vendored
									
									
								
							@@ -252,6 +252,12 @@ func (o *Team) IsGroupConstrained() bool {
 | 
			
		||||
	return o.GroupConstrained != nil && *o.GroupConstrained
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// ShallowCopy returns a shallow copy of team.
 | 
			
		||||
func (o *Team) ShallowCopy() *Team {
 | 
			
		||||
	c := *o
 | 
			
		||||
	return &c
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// The following are some GraphQL methods necessary to return the
 | 
			
		||||
// data in float64 type. The spec doesn't support 64 bit integers,
 | 
			
		||||
// so we have to pass the data in float64. The _ at the end is
 | 
			
		||||
 
 | 
			
		||||
							
								
								
									
										3
									
								
								vendor/github.com/mattermost/mattermost-server/v6/model/thread.go
									
									
									
										generated
									
									
										vendored
									
									
								
							
							
						
						
									
										3
									
								
								vendor/github.com/mattermost/mattermost-server/v6/model/thread.go
									
									
									
										generated
									
									
										vendored
									
									
								
							@@ -67,6 +67,9 @@ type GetUserThreadsOpts struct {
 | 
			
		||||
	// TotalsOnly will not fetch any threads and just fetch the total counts
 | 
			
		||||
	TotalsOnly bool
 | 
			
		||||
 | 
			
		||||
	// ThreadsOnly will fetch threads but not calculate totals and will return 0
 | 
			
		||||
	ThreadsOnly bool
 | 
			
		||||
 | 
			
		||||
	// TeamOnly will only fetch threads and unreads for the specified team and excludes DMs/GMs
 | 
			
		||||
	TeamOnly bool
 | 
			
		||||
}
 | 
			
		||||
 
 | 
			
		||||
							
								
								
									
										26
									
								
								vendor/github.com/mattermost/mattermost-server/v6/model/utils.go
									
									
									
										generated
									
									
										vendored
									
									
								
							
							
						
						
									
										26
									
								
								vendor/github.com/mattermost/mattermost-server/v6/model/utils.go
									
									
									
										generated
									
									
										vendored
									
									
								
							@@ -33,6 +33,7 @@ const (
 | 
			
		||||
	UppercaseLetters = "ABCDEFGHIJKLMNOPQRSTUVWXYZ"
 | 
			
		||||
	NUMBERS          = "0123456789"
 | 
			
		||||
	SYMBOLS          = " !\"\\#$%&'()*+,-./:;<=>?@[]^_`|~"
 | 
			
		||||
	BinaryParamKey   = "MM_BINARY_PARAMETERS"
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
type StringInterface map[string]interface{}
 | 
			
		||||
@@ -124,12 +125,19 @@ func (m *StringMap) Scan(value interface{}) error {
 | 
			
		||||
 | 
			
		||||
// Value converts StringMap to database value
 | 
			
		||||
func (m StringMap) Value() (driver.Value, error) {
 | 
			
		||||
	j, err := json.Marshal(m)
 | 
			
		||||
	ok := m[BinaryParamKey]
 | 
			
		||||
	delete(m, BinaryParamKey)
 | 
			
		||||
	buf, err := json.Marshal(m)
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		return nil, err
 | 
			
		||||
	}
 | 
			
		||||
	// non utf8 characters are not supported https://mattermost.atlassian.net/browse/MM-41066
 | 
			
		||||
	return string(j), err
 | 
			
		||||
	if ok == "true" {
 | 
			
		||||
		return append([]byte{0x01}, buf...), nil
 | 
			
		||||
	} else if ok == "false" {
 | 
			
		||||
		return buf, nil
 | 
			
		||||
	}
 | 
			
		||||
	// Key wasn't found. We fall back to the default case.
 | 
			
		||||
	return string(buf), nil
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (StringMap) ImplementsGraphQLType(name string) bool {
 | 
			
		||||
@@ -502,21 +510,13 @@ var reservedName = []string{
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func IsValidChannelIdentifier(s string) bool {
 | 
			
		||||
 | 
			
		||||
	if !IsValidAlphaNumHyphenUnderscore(s, true) {
 | 
			
		||||
		return false
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	if len(s) < ChannelNameMinLength {
 | 
			
		||||
		return false
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	return true
 | 
			
		||||
	return validSimpleAlphaNum.MatchString(s) && len(s) >= ChannelNameMinLength
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
var (
 | 
			
		||||
	validAlphaNum                           = regexp.MustCompile(`^[a-z0-9]+([a-z\-0-9]+|(__)?)[a-z0-9]+$`)
 | 
			
		||||
	validAlphaNumHyphenUnderscore           = regexp.MustCompile(`^[a-z0-9]+([a-z\-\_0-9]+|(__)?)[a-z0-9]+$`)
 | 
			
		||||
	validSimpleAlphaNum                     = regexp.MustCompile(`^[a-z0-9]+([a-z\-\_0-9]+|(__)?)[a-z0-9]*$`)
 | 
			
		||||
	validSimpleAlphaNumHyphenUnderscore     = regexp.MustCompile(`^[a-zA-Z0-9\-_]+$`)
 | 
			
		||||
	validSimpleAlphaNumHyphenUnderscorePlus = regexp.MustCompile(`^[a-zA-Z0-9+_-]+$`)
 | 
			
		||||
)
 | 
			
		||||
 
 | 
			
		||||
							
								
								
									
										2
									
								
								vendor/github.com/mattermost/mattermost-server/v6/model/version.go
									
									
									
										generated
									
									
										vendored
									
									
								
							
							
						
						
									
										2
									
								
								vendor/github.com/mattermost/mattermost-server/v6/model/version.go
									
									
									
										generated
									
									
										vendored
									
									
								
							@@ -13,7 +13,7 @@ import (
 | 
			
		||||
// It should be maintained in chronological order with most current
 | 
			
		||||
// release at the front of the list.
 | 
			
		||||
var versions = []string{
 | 
			
		||||
	"6.6.1",
 | 
			
		||||
	"6.7.0",
 | 
			
		||||
	"6.6.0",
 | 
			
		||||
	"6.5.0",
 | 
			
		||||
	"6.4.0",
 | 
			
		||||
 
 | 
			
		||||
							
								
								
									
										2
									
								
								vendor/github.com/mattermost/mattermost-server/v6/shared/filestore/s3store.go
									
									
									
										generated
									
									
										vendored
									
									
								
							
							
						
						
									
										2
									
								
								vendor/github.com/mattermost/mattermost-server/v6/shared/filestore/s3store.go
									
									
									
										generated
									
									
										vendored
									
									
								
							@@ -297,7 +297,7 @@ func (b *S3FileBackend) MoveFile(oldPath, newPath string) error {
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	if _, err := b.client.CopyObject(context.Background(), dstOpts, srcOpts); err != nil {
 | 
			
		||||
		return errors.Wrapf(err, "unable to copy the file to %s to the new destionation", newPath)
 | 
			
		||||
		return errors.Wrapf(err, "unable to copy the file to %s to the new destination", newPath)
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	if err := b.client.RemoveObject(context.Background(), b.bucket, oldPath, s3.RemoveObjectOptions{}); err != nil {
 | 
			
		||||
 
 | 
			
		||||
							
								
								
									
										2
									
								
								vendor/github.com/minio/minio-go/v7/Makefile
									
									
									
										generated
									
									
										vendored
									
									
								
							
							
						
						
									
										2
									
								
								vendor/github.com/minio/minio-go/v7/Makefile
									
									
									
										generated
									
									
										vendored
									
									
								
							@@ -9,7 +9,7 @@ checks: lint vet test examples functional-test
 | 
			
		||||
 | 
			
		||||
lint:
 | 
			
		||||
	@mkdir -p ${GOPATH}/bin
 | 
			
		||||
	@echo "Installing golangci-lint" && curl -sSfL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | sh -s -- -b $(GOPATH)/bin v1.40.1
 | 
			
		||||
	@echo "Installing golangci-lint" && curl -sSfL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | sh -s -- -b $(GOPATH)/bin v1.45.2
 | 
			
		||||
	@echo "Running $@ check"
 | 
			
		||||
	@GO111MODULE=on ${GOPATH}/bin/golangci-lint cache clean
 | 
			
		||||
	@GO111MODULE=on ${GOPATH}/bin/golangci-lint run --timeout=5m --config ./.golangci.yml
 | 
			
		||||
 
 | 
			
		||||
							
								
								
									
										3
									
								
								vendor/github.com/minio/minio-go/v7/api-bucket-notification.go
									
									
									
										generated
									
									
										vendored
									
									
								
							
							
						
						
									
										3
									
								
								vendor/github.com/minio/minio-go/v7/api-bucket-notification.go
									
									
									
										generated
									
									
										vendored
									
									
								
							@@ -103,7 +103,6 @@ func (c *Client) getBucketNotification(ctx context.Context, bucketName string) (
 | 
			
		||||
		return notification.Configuration{}, err
 | 
			
		||||
	}
 | 
			
		||||
	return processBucketNotificationResponse(bucketName, resp)
 | 
			
		||||
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// processes the GetNotification http response from the server.
 | 
			
		||||
@@ -207,7 +206,7 @@ func (c *Client) ListenBucketNotification(ctx context.Context, bucketName, prefi
 | 
			
		||||
			// Use a higher buffer to support unexpected
 | 
			
		||||
			// caching done by proxies
 | 
			
		||||
			bio.Buffer(notificationEventBuffer, notificationCapacity)
 | 
			
		||||
			var json = jsoniter.ConfigCompatibleWithStandardLibrary
 | 
			
		||||
			json := jsoniter.ConfigCompatibleWithStandardLibrary
 | 
			
		||||
 | 
			
		||||
			// Unmarshal each line, returns marshaled values.
 | 
			
		||||
			for bio.Scan() {
 | 
			
		||||
 
 | 
			
		||||
							
								
								
									
										14
									
								
								vendor/github.com/minio/minio-go/v7/api-compose-object.go
									
									
									
										generated
									
									
										vendored
									
									
								
							
							
						
						
									
										14
									
								
								vendor/github.com/minio/minio-go/v7/api-compose-object.go
									
									
									
										generated
									
									
										vendored
									
									
								
							@@ -202,8 +202,8 @@ func (opts CopySrcOptions) validate() (err error) {
 | 
			
		||||
 | 
			
		||||
// Low level implementation of CopyObject API, supports only upto 5GiB worth of copy.
 | 
			
		||||
func (c *Client) copyObjectDo(ctx context.Context, srcBucket, srcObject, destBucket, destObject string,
 | 
			
		||||
	metadata map[string]string, srcOpts CopySrcOptions, dstOpts PutObjectOptions) (ObjectInfo, error) {
 | 
			
		||||
 | 
			
		||||
	metadata map[string]string, srcOpts CopySrcOptions, dstOpts PutObjectOptions,
 | 
			
		||||
) (ObjectInfo, error) {
 | 
			
		||||
	// Build headers.
 | 
			
		||||
	headers := make(http.Header)
 | 
			
		||||
 | 
			
		||||
@@ -285,8 +285,8 @@ func (c *Client) copyObjectDo(ctx context.Context, srcBucket, srcObject, destBuc
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (c *Client) copyObjectPartDo(ctx context.Context, srcBucket, srcObject, destBucket, destObject string, uploadID string,
 | 
			
		||||
	partID int, startOffset int64, length int64, metadata map[string]string) (p CompletePart, err error) {
 | 
			
		||||
 | 
			
		||||
	partID int, startOffset int64, length int64, metadata map[string]string,
 | 
			
		||||
) (p CompletePart, err error) {
 | 
			
		||||
	headers := make(http.Header)
 | 
			
		||||
 | 
			
		||||
	// Set source
 | 
			
		||||
@@ -338,8 +338,8 @@ func (c *Client) copyObjectPartDo(ctx context.Context, srcBucket, srcObject, des
 | 
			
		||||
// upload via an upload-part-copy request
 | 
			
		||||
// https://docs.aws.amazon.com/AmazonS3/latest/API/mpUploadUploadPartCopy.html
 | 
			
		||||
func (c *Client) uploadPartCopy(ctx context.Context, bucket, object, uploadID string, partNumber int,
 | 
			
		||||
	headers http.Header) (p CompletePart, err error) {
 | 
			
		||||
 | 
			
		||||
	headers http.Header,
 | 
			
		||||
) (p CompletePart, err error) {
 | 
			
		||||
	// Build query parameters
 | 
			
		||||
	urlValues := make(url.Values)
 | 
			
		||||
	urlValues.Set("partNumber", strconv.Itoa(partNumber))
 | 
			
		||||
@@ -492,7 +492,7 @@ func (c *Client) ComposeObject(ctx context.Context, dst CopyDestOptions, srcs ..
 | 
			
		||||
	objParts := []CompletePart{}
 | 
			
		||||
	partIndex := 1
 | 
			
		||||
	for i, src := range srcs {
 | 
			
		||||
		var h = make(http.Header)
 | 
			
		||||
		h := make(http.Header)
 | 
			
		||||
		src.Marshal(h)
 | 
			
		||||
		if dst.Encryption != nil && dst.Encryption.Type() == encrypt.SSEC {
 | 
			
		||||
			dst.Encryption.Marshal(h)
 | 
			
		||||
 
 | 
			
		||||
							
								
								
									
										4
									
								
								vendor/github.com/minio/minio-go/v7/api-get-object-file.go
									
									
									
										generated
									
									
										vendored
									
									
								
							
							
						
						
									
										4
									
								
								vendor/github.com/minio/minio-go/v7/api-get-object-file.go
									
									
									
										generated
									
									
										vendored
									
									
								
							@@ -57,7 +57,7 @@ func (c *Client) FGetObject(ctx context.Context, bucketName, objectName, filePat
 | 
			
		||||
	objectDir, _ := filepath.Split(filePath)
 | 
			
		||||
	if objectDir != "" {
 | 
			
		||||
		// Create any missing top level directories.
 | 
			
		||||
		if err := os.MkdirAll(objectDir, 0700); err != nil {
 | 
			
		||||
		if err := os.MkdirAll(objectDir, 0o700); err != nil {
 | 
			
		||||
			return err
 | 
			
		||||
		}
 | 
			
		||||
	}
 | 
			
		||||
@@ -72,7 +72,7 @@ func (c *Client) FGetObject(ctx context.Context, bucketName, objectName, filePat
 | 
			
		||||
	filePartPath := filePath + objectStat.ETag + ".part.minio"
 | 
			
		||||
 | 
			
		||||
	// If exists, open in append mode. If not create it as a part file.
 | 
			
		||||
	filePart, err := os.OpenFile(filePartPath, os.O_CREATE|os.O_APPEND|os.O_WRONLY, 0600)
 | 
			
		||||
	filePart, err := os.OpenFile(filePartPath, os.O_CREATE|os.O_APPEND|os.O_WRONLY, 0o600)
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		return err
 | 
			
		||||
	}
 | 
			
		||||
 
 | 
			
		||||
							
								
								
									
										1
									
								
								vendor/github.com/minio/minio-go/v7/api-list.go
									
									
									
										generated
									
									
										vendored
									
									
								
							
							
						
						
									
										1
									
								
								vendor/github.com/minio/minio-go/v7/api-list.go
									
									
									
										generated
									
									
										vendored
									
									
								
							@@ -774,7 +774,6 @@ func (c *Client) listIncompleteUploads(ctx context.Context, bucketName, objectPr
 | 
			
		||||
	}(objectMultipartStatCh)
 | 
			
		||||
	// return.
 | 
			
		||||
	return objectMultipartStatCh
 | 
			
		||||
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// listMultipartUploadsQuery - (List Multipart Uploads).
 | 
			
		||||
 
 | 
			
		||||
							
								
								
									
										10
									
								
								vendor/github.com/minio/minio-go/v7/api-put-object-multipart.go
									
									
									
										generated
									
									
										vendored
									
									
								
							
							
						
						
									
										10
									
								
								vendor/github.com/minio/minio-go/v7/api-put-object-multipart.go
									
									
									
										generated
									
									
										vendored
									
									
								
							@@ -38,7 +38,8 @@ import (
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
func (c *Client) putObjectMultipart(ctx context.Context, bucketName, objectName string, reader io.Reader, size int64,
 | 
			
		||||
	opts PutObjectOptions) (info UploadInfo, err error) {
 | 
			
		||||
	opts PutObjectOptions,
 | 
			
		||||
) (info UploadInfo, err error) {
 | 
			
		||||
	info, err = c.putObjectMultipartNoStream(ctx, bucketName, objectName, reader, opts)
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		errResp := ToErrorResponse(err)
 | 
			
		||||
@@ -240,7 +241,8 @@ func (c *Client) initiateMultipartUpload(ctx context.Context, bucketName, object
 | 
			
		||||
 | 
			
		||||
// uploadPart - Uploads a part in a multipart upload.
 | 
			
		||||
func (c *Client) uploadPart(ctx context.Context, bucketName, objectName, uploadID string, reader io.Reader,
 | 
			
		||||
	partNumber int, md5Base64, sha256Hex string, size int64, sse encrypt.ServerSide) (ObjectPart, error) {
 | 
			
		||||
	partNumber int, md5Base64, sha256Hex string, size int64, sse encrypt.ServerSide,
 | 
			
		||||
) (ObjectPart, error) {
 | 
			
		||||
	// Input validation.
 | 
			
		||||
	if err := s3utils.CheckValidBucketName(bucketName); err != nil {
 | 
			
		||||
		return ObjectPart{}, err
 | 
			
		||||
@@ -311,7 +313,8 @@ func (c *Client) uploadPart(ctx context.Context, bucketName, objectName, uploadI
 | 
			
		||||
 | 
			
		||||
// completeMultipartUpload - Completes a multipart upload by assembling previously uploaded parts.
 | 
			
		||||
func (c *Client) completeMultipartUpload(ctx context.Context, bucketName, objectName, uploadID string,
 | 
			
		||||
	complete completeMultipartUpload, opts PutObjectOptions) (UploadInfo, error) {
 | 
			
		||||
	complete completeMultipartUpload, opts PutObjectOptions,
 | 
			
		||||
) (UploadInfo, error) {
 | 
			
		||||
	// Input validation.
 | 
			
		||||
	if err := s3utils.CheckValidBucketName(bucketName); err != nil {
 | 
			
		||||
		return UploadInfo{}, err
 | 
			
		||||
@@ -392,5 +395,4 @@ func (c *Client) completeMultipartUpload(ctx context.Context, bucketName, object
 | 
			
		||||
		Expiration:       expTime,
 | 
			
		||||
		ExpirationRuleID: ruleID,
 | 
			
		||||
	}, nil
 | 
			
		||||
 | 
			
		||||
}
 | 
			
		||||
 
 | 
			
		||||
							
								
								
									
										14
									
								
								vendor/github.com/minio/minio-go/v7/api-put-object-streaming.go
									
									
									
										generated
									
									
										vendored
									
									
								
							
							
						
						
									
										14
									
								
								vendor/github.com/minio/minio-go/v7/api-put-object-streaming.go
									
									
									
										generated
									
									
										vendored
									
									
								
							@@ -42,8 +42,8 @@ import (
 | 
			
		||||
//  - Any reader which has a method 'ReadAt()'
 | 
			
		||||
//
 | 
			
		||||
func (c *Client) putObjectMultipartStream(ctx context.Context, bucketName, objectName string,
 | 
			
		||||
	reader io.Reader, size int64, opts PutObjectOptions) (info UploadInfo, err error) {
 | 
			
		||||
 | 
			
		||||
	reader io.Reader, size int64, opts PutObjectOptions,
 | 
			
		||||
) (info UploadInfo, err error) {
 | 
			
		||||
	if !isObject(reader) && isReadAt(reader) && !opts.SendContentMd5 {
 | 
			
		||||
		// Verify if the reader implements ReadAt and it is not a *minio.Object then we will use parallel uploader.
 | 
			
		||||
		info, err = c.putObjectMultipartStreamFromReadAt(ctx, bucketName, objectName, reader.(io.ReaderAt), size, opts)
 | 
			
		||||
@@ -91,7 +91,8 @@ type uploadPartReq struct {
 | 
			
		||||
// cleaned automatically when the caller i.e http client closes the
 | 
			
		||||
// stream after uploading all the contents successfully.
 | 
			
		||||
func (c *Client) putObjectMultipartStreamFromReadAt(ctx context.Context, bucketName, objectName string,
 | 
			
		||||
	reader io.ReaderAt, size int64, opts PutObjectOptions) (info UploadInfo, err error) {
 | 
			
		||||
	reader io.ReaderAt, size int64, opts PutObjectOptions,
 | 
			
		||||
) (info UploadInfo, err error) {
 | 
			
		||||
	// Input validation.
 | 
			
		||||
	if err = s3utils.CheckValidBucketName(bucketName); err != nil {
 | 
			
		||||
		return UploadInfo{}, err
 | 
			
		||||
@@ -147,7 +148,7 @@ func (c *Client) putObjectMultipartStreamFromReadAt(ctx context.Context, bucketN
 | 
			
		||||
	}
 | 
			
		||||
	close(uploadPartsCh)
 | 
			
		||||
 | 
			
		||||
	var partsBuf = make([][]byte, opts.getNumThreads())
 | 
			
		||||
	partsBuf := make([][]byte, opts.getNumThreads())
 | 
			
		||||
	for i := range partsBuf {
 | 
			
		||||
		partsBuf[i] = make([]byte, 0, partSize)
 | 
			
		||||
	}
 | 
			
		||||
@@ -171,7 +172,7 @@ func (c *Client) putObjectMultipartStreamFromReadAt(ctx context.Context, bucketN
 | 
			
		||||
				}
 | 
			
		||||
 | 
			
		||||
				n, rerr := readFull(io.NewSectionReader(reader, readOffset, partSize), partsBuf[w-1][:partSize])
 | 
			
		||||
				if rerr != nil && rerr != io.ErrUnexpectedEOF && err != io.EOF {
 | 
			
		||||
				if rerr != nil && rerr != io.ErrUnexpectedEOF && rerr != io.EOF {
 | 
			
		||||
					uploadedPartsCh <- uploadedPartRes{
 | 
			
		||||
						Error: rerr,
 | 
			
		||||
					}
 | 
			
		||||
@@ -241,7 +242,8 @@ func (c *Client) putObjectMultipartStreamFromReadAt(ctx context.Context, bucketN
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (c *Client) putObjectMultipartStreamOptionalChecksum(ctx context.Context, bucketName, objectName string,
 | 
			
		||||
	reader io.Reader, size int64, opts PutObjectOptions) (info UploadInfo, err error) {
 | 
			
		||||
	reader io.Reader, size int64, opts PutObjectOptions,
 | 
			
		||||
) (info UploadInfo, err error) {
 | 
			
		||||
	// Input validation.
 | 
			
		||||
	if err = s3utils.CheckValidBucketName(bucketName); err != nil {
 | 
			
		||||
		return UploadInfo{}, err
 | 
			
		||||
 
 | 
			
		||||
							
								
								
									
										3
									
								
								vendor/github.com/minio/minio-go/v7/api-put-object.go
									
									
									
										generated
									
									
										vendored
									
									
								
							
							
						
						
									
										3
									
								
								vendor/github.com/minio/minio-go/v7/api-put-object.go
									
									
									
										generated
									
									
										vendored
									
									
								
							@@ -229,7 +229,8 @@ func (a completedParts) Less(i, j int) bool { return a[i].PartNumber < a[j].Part
 | 
			
		||||
//
 | 
			
		||||
// NOTE: Upon errors during upload multipart operation is entirely aborted.
 | 
			
		||||
func (c *Client) PutObject(ctx context.Context, bucketName, objectName string, reader io.Reader, objectSize int64,
 | 
			
		||||
	opts PutObjectOptions) (info UploadInfo, err error) {
 | 
			
		||||
	opts PutObjectOptions,
 | 
			
		||||
) (info UploadInfo, err error) {
 | 
			
		||||
	if objectSize < 0 && opts.DisableMultipart {
 | 
			
		||||
		return UploadInfo{}, errors.New("object size must be provided with disable multipart upload")
 | 
			
		||||
	}
 | 
			
		||||
 
 | 
			
		||||
							
								
								
									
										2
									
								
								vendor/github.com/minio/minio-go/v7/api-putobject-snowball.go
									
									
									
										generated
									
									
										vendored
									
									
								
							
							
						
						
									
										2
									
								
								vendor/github.com/minio/minio-go/v7/api-putobject-snowball.go
									
									
									
										generated
									
									
										vendored
									
									
								
							@@ -133,7 +133,7 @@ func (c Client) PutObjectsSnowball(ctx context.Context, bucketName string, opts
 | 
			
		||||
			return f, st.Size(), nil
 | 
			
		||||
		}
 | 
			
		||||
	}
 | 
			
		||||
	var flush = func() error { return nil }
 | 
			
		||||
	flush := func() error { return nil }
 | 
			
		||||
	if !opts.Compress {
 | 
			
		||||
		if !opts.InMemory {
 | 
			
		||||
			// Insert buffer for writes.
 | 
			
		||||
 
 | 
			
		||||
Some files were not shown because too many files have changed in this diff Show More
		Reference in New Issue
	
	Block a user