forked from jshiffer/matterbridge
parent
d5f9cdf912
commit
08779c2909
48
go.mod
48
go.mod
@ -6,29 +6,29 @@ require (
|
|||||||
github.com/Benau/tgsconverter v0.0.0-20210809170556-99f4a4f6337f
|
github.com/Benau/tgsconverter v0.0.0-20210809170556-99f4a4f6337f
|
||||||
github.com/Philipp15b/go-steam v1.0.1-0.20200727090957-6ae9b3c0a560
|
github.com/Philipp15b/go-steam v1.0.1-0.20200727090957-6ae9b3c0a560
|
||||||
github.com/Rhymen/go-whatsapp v0.1.2-0.20211102134409-31a2e740845c
|
github.com/Rhymen/go-whatsapp v0.1.2-0.20211102134409-31a2e740845c
|
||||||
github.com/SevereCloud/vksdk/v2 v2.15.0
|
github.com/SevereCloud/vksdk/v2 v2.16.0
|
||||||
github.com/bwmarrin/discordgo v0.27.0
|
github.com/bwmarrin/discordgo v0.27.0
|
||||||
github.com/d5/tengo/v2 v2.13.0
|
github.com/d5/tengo/v2 v2.13.0
|
||||||
github.com/davecgh/go-spew v1.1.1
|
github.com/davecgh/go-spew v1.1.1
|
||||||
github.com/fsnotify/fsnotify v1.6.0
|
github.com/fsnotify/fsnotify v1.6.0
|
||||||
github.com/go-telegram-bot-api/telegram-bot-api/v5 v5.5.1
|
github.com/go-telegram-bot-api/telegram-bot-api/v5 v5.5.1
|
||||||
github.com/gomarkdown/markdown v0.0.0-20221013030248-663e2500819c
|
github.com/gomarkdown/markdown v0.0.0-20221013030248-663e2500819c
|
||||||
github.com/google/gops v0.3.26
|
github.com/google/gops v0.3.27
|
||||||
github.com/gorilla/schema v1.2.0
|
github.com/gorilla/schema v1.2.0
|
||||||
github.com/gorilla/websocket v1.5.0
|
github.com/gorilla/websocket v1.5.0
|
||||||
github.com/harmony-development/shibshib v0.0.0-20220101224523-c98059d09cfa
|
github.com/harmony-development/shibshib v0.0.0-20220101224523-c98059d09cfa
|
||||||
github.com/hashicorp/golang-lru v0.6.0
|
github.com/hashicorp/golang-lru v0.6.0
|
||||||
github.com/jpillora/backoff v1.0.0
|
github.com/jpillora/backoff v1.0.0
|
||||||
github.com/keybase/go-keybase-chat-bot v0.0.0-20221220212439-e48d9abd2c20
|
github.com/keybase/go-keybase-chat-bot v0.0.0-20221220212439-e48d9abd2c20
|
||||||
github.com/kyokomi/emoji/v2 v2.2.11
|
github.com/kyokomi/emoji/v2 v2.2.12
|
||||||
github.com/labstack/echo/v4 v4.10.0
|
github.com/labstack/echo/v4 v4.10.2
|
||||||
github.com/lrstanley/girc v0.0.0-20221222153823-a92667a5c9b4
|
github.com/lrstanley/girc v0.0.0-20221222153823-a92667a5c9b4
|
||||||
github.com/matterbridge/Rocket.Chat.Go.SDK v0.0.0-20211016222428-79310a412696
|
github.com/matterbridge/Rocket.Chat.Go.SDK v0.0.0-20211016222428-79310a412696
|
||||||
github.com/matterbridge/go-xmpp v0.0.0-20211030125215-791a06c5f1be
|
github.com/matterbridge/go-xmpp v0.0.0-20211030125215-791a06c5f1be
|
||||||
github.com/matterbridge/gomatrix v0.0.0-20220411225302-271e5088ea27
|
github.com/matterbridge/gomatrix v0.0.0-20220411225302-271e5088ea27
|
||||||
github.com/matterbridge/gozulipbot v0.0.0-20211023205727-a19d6c1f3b75
|
github.com/matterbridge/gozulipbot v0.0.0-20211023205727-a19d6c1f3b75
|
||||||
github.com/matterbridge/logrus-prefixed-formatter v0.5.3-0.20200523233437-d971309a77ba
|
github.com/matterbridge/logrus-prefixed-formatter v0.5.3-0.20200523233437-d971309a77ba
|
||||||
github.com/matterbridge/matterclient v0.0.0-20220624224459-272af20c7ddf
|
github.com/matterbridge/matterclient v0.0.0-20221106190440-8bcf49695e0d
|
||||||
github.com/mattermost/mattermost-server/v5 v5.39.3
|
github.com/mattermost/mattermost-server/v5 v5.39.3
|
||||||
github.com/mattermost/mattermost-server/v6 v6.7.2
|
github.com/mattermost/mattermost-server/v6 v6.7.2
|
||||||
github.com/mattn/godown v0.0.1
|
github.com/mattn/godown v0.0.1
|
||||||
@ -47,15 +47,15 @@ require (
|
|||||||
github.com/writeas/go-strip-markdown v2.0.1+incompatible
|
github.com/writeas/go-strip-markdown v2.0.1+incompatible
|
||||||
github.com/yaegashi/msgraph.go v0.1.4
|
github.com/yaegashi/msgraph.go v0.1.4
|
||||||
github.com/zfjagann/golang-ring v0.0.0-20220330170733-19bcea1b6289
|
github.com/zfjagann/golang-ring v0.0.0-20220330170733-19bcea1b6289
|
||||||
go.mau.fi/whatsmeow v0.0.0-20230128195103-dcbc8dd31a22
|
go.mau.fi/whatsmeow v0.0.0-20230306190159-5caded34a872
|
||||||
golang.org/x/image v0.5.0
|
golang.org/x/image v0.6.0
|
||||||
golang.org/x/oauth2 v0.4.0
|
golang.org/x/oauth2 v0.6.0
|
||||||
golang.org/x/text v0.7.0
|
golang.org/x/text v0.8.0
|
||||||
gomod.garykim.dev/nc-talk v0.3.0
|
gomod.garykim.dev/nc-talk v0.3.0
|
||||||
google.golang.org/protobuf v1.28.1
|
google.golang.org/protobuf v1.29.0
|
||||||
gopkg.in/olahol/melody.v1 v1.0.0-20170518105555-d52139073376
|
gopkg.in/olahol/melody.v1 v1.0.0-20170518105555-d52139073376
|
||||||
layeh.com/gumble v0.0.0-20221205141517-d1df60a3cc14
|
layeh.com/gumble v0.0.0-20221205141517-d1df60a3cc14
|
||||||
modernc.org/sqlite v1.20.3
|
modernc.org/sqlite v1.21.0
|
||||||
)
|
)
|
||||||
|
|
||||||
require (
|
require (
|
||||||
@ -80,8 +80,8 @@ require (
|
|||||||
github.com/json-iterator/go v1.1.12 // indirect
|
github.com/json-iterator/go v1.1.12 // indirect
|
||||||
github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51 // indirect
|
github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51 // indirect
|
||||||
github.com/kettek/apng v0.0.0-20191108220231-414630eed80f // indirect
|
github.com/kettek/apng v0.0.0-20191108220231-414630eed80f // indirect
|
||||||
github.com/klauspost/compress v1.15.8 // indirect
|
github.com/klauspost/compress v1.16.0 // indirect
|
||||||
github.com/klauspost/cpuid/v2 v2.0.12 // indirect
|
github.com/klauspost/cpuid/v2 v2.2.3 // indirect
|
||||||
github.com/labstack/gommon v0.4.0 // indirect
|
github.com/labstack/gommon v0.4.0 // indirect
|
||||||
github.com/magiconair/properties v1.8.7 // indirect
|
github.com/magiconair/properties v1.8.7 // indirect
|
||||||
github.com/mattermost/go-i18n v1.11.1-0.20211013152124-5c415071e404 // indirect
|
github.com/mattermost/go-i18n v1.11.1-0.20211013152124-5c415071e404 // indirect
|
||||||
@ -89,7 +89,7 @@ require (
|
|||||||
github.com/mattermost/logr v1.0.13 // indirect
|
github.com/mattermost/logr v1.0.13 // indirect
|
||||||
github.com/mattermost/logr/v2 v2.0.15 // indirect
|
github.com/mattermost/logr/v2 v2.0.15 // indirect
|
||||||
github.com/mattn/go-colorable v0.1.13 // indirect
|
github.com/mattn/go-colorable v0.1.13 // indirect
|
||||||
github.com/mattn/go-isatty v0.0.16 // indirect
|
github.com/mattn/go-isatty v0.0.17 // indirect
|
||||||
github.com/mattn/go-runewidth v0.0.13 // indirect
|
github.com/mattn/go-runewidth v0.0.13 // indirect
|
||||||
github.com/mgutz/ansi v0.0.0-20200706080929-d51e80ef957d // indirect
|
github.com/mgutz/ansi v0.0.0-20200706080929-d51e80ef957d // indirect
|
||||||
github.com/minio/md5-simd v1.1.2 // indirect
|
github.com/minio/md5-simd v1.1.2 // indirect
|
||||||
@ -109,7 +109,7 @@ require (
|
|||||||
github.com/philhofer/fwd v1.1.1 // indirect
|
github.com/philhofer/fwd v1.1.1 // indirect
|
||||||
github.com/pkg/errors v0.9.1 // indirect
|
github.com/pkg/errors v0.9.1 // indirect
|
||||||
github.com/pmezard/go-difflib v1.0.0 // indirect
|
github.com/pmezard/go-difflib v1.0.0 // indirect
|
||||||
github.com/remyoudompheng/bigfft v0.0.0-20200410134404-eec4a21b6bb0 // indirect
|
github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec // indirect
|
||||||
github.com/rickb777/date v1.12.4 // indirect
|
github.com/rickb777/date v1.12.4 // indirect
|
||||||
github.com/rickb777/plural v1.2.0 // indirect
|
github.com/rickb777/plural v1.2.0 // indirect
|
||||||
github.com/rivo/uniseg v0.2.0 // indirect
|
github.com/rivo/uniseg v0.2.0 // indirect
|
||||||
@ -133,13 +133,13 @@ require (
|
|||||||
go.uber.org/atomic v1.9.0 // indirect
|
go.uber.org/atomic v1.9.0 // indirect
|
||||||
go.uber.org/multierr v1.8.0 // indirect
|
go.uber.org/multierr v1.8.0 // indirect
|
||||||
go.uber.org/zap v1.21.0 // indirect
|
go.uber.org/zap v1.21.0 // indirect
|
||||||
golang.org/x/crypto v0.4.0 // indirect
|
golang.org/x/crypto v0.6.0 // indirect
|
||||||
golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4 // indirect
|
golang.org/x/mod v0.8.0 // indirect
|
||||||
golang.org/x/net v0.7.0 // indirect
|
golang.org/x/net v0.8.0 // indirect
|
||||||
golang.org/x/sys v0.5.0 // indirect
|
golang.org/x/sys v0.6.0 // indirect
|
||||||
golang.org/x/term v0.5.0 // indirect
|
golang.org/x/term v0.6.0 // indirect
|
||||||
golang.org/x/time v0.2.0 // indirect
|
golang.org/x/time v0.3.0 // indirect
|
||||||
golang.org/x/tools v0.1.12 // indirect
|
golang.org/x/tools v0.6.0 // indirect
|
||||||
google.golang.org/appengine v1.6.7 // indirect
|
google.golang.org/appengine v1.6.7 // indirect
|
||||||
gopkg.in/ini.v1 v1.67.0 // indirect
|
gopkg.in/ini.v1 v1.67.0 // indirect
|
||||||
gopkg.in/natefinch/lumberjack.v2 v2.0.0 // indirect
|
gopkg.in/natefinch/lumberjack.v2 v2.0.0 // indirect
|
||||||
@ -148,9 +148,9 @@ require (
|
|||||||
lukechampine.com/uint128 v1.2.0 // indirect
|
lukechampine.com/uint128 v1.2.0 // indirect
|
||||||
modernc.org/cc/v3 v3.40.0 // indirect
|
modernc.org/cc/v3 v3.40.0 // indirect
|
||||||
modernc.org/ccgo/v3 v3.16.13 // indirect
|
modernc.org/ccgo/v3 v3.16.13 // indirect
|
||||||
modernc.org/libc v1.22.2 // indirect
|
modernc.org/libc v1.22.3 // indirect
|
||||||
modernc.org/mathutil v1.5.0 // indirect
|
modernc.org/mathutil v1.5.0 // indirect
|
||||||
modernc.org/memory v1.4.0 // indirect
|
modernc.org/memory v1.5.0 // indirect
|
||||||
modernc.org/opt v0.1.3 // indirect
|
modernc.org/opt v0.1.3 // indirect
|
||||||
modernc.org/strutil v1.1.3 // indirect
|
modernc.org/strutil v1.1.3 // indirect
|
||||||
modernc.org/token v1.0.1 // indirect
|
modernc.org/token v1.0.1 // indirect
|
||||||
|
110
go.sum
110
go.sum
@ -148,8 +148,8 @@ github.com/Rhymen/go-whatsapp v0.1.2-0.20211102134409-31a2e740845c/go.mod h1:DNS
|
|||||||
github.com/RoaringBitmap/roaring v0.4.23/go.mod h1:D0gp8kJQgE1A4LQ5wFLggQEyvDi06Mq5mKs52e1TwOo=
|
github.com/RoaringBitmap/roaring v0.4.23/go.mod h1:D0gp8kJQgE1A4LQ5wFLggQEyvDi06Mq5mKs52e1TwOo=
|
||||||
github.com/RoaringBitmap/roaring v0.8.0/go.mod h1:jdT9ykXwHFNdJbEtxePexlFYH9LXucApeS0/+/g+p1I=
|
github.com/RoaringBitmap/roaring v0.8.0/go.mod h1:jdT9ykXwHFNdJbEtxePexlFYH9LXucApeS0/+/g+p1I=
|
||||||
github.com/RoaringBitmap/roaring v0.9.4/go.mod h1:icnadbWcNyfEHlYdr+tDlOTih1Bf/h+rzPpv4sbomAA=
|
github.com/RoaringBitmap/roaring v0.9.4/go.mod h1:icnadbWcNyfEHlYdr+tDlOTih1Bf/h+rzPpv4sbomAA=
|
||||||
github.com/SevereCloud/vksdk/v2 v2.15.0 h1:ywyJvuJzN1sD5+GVcYendwNTpK3R/iBZOlOhulyI9ZQ=
|
github.com/SevereCloud/vksdk/v2 v2.16.0 h1:DQ90qqwY/yF1X/SWZQs1kQ/Ik+tphK82d+S6Rch46wQ=
|
||||||
github.com/SevereCloud/vksdk/v2 v2.15.0/go.mod h1:0Q20DuofWA78Vdy6aPjZAM6ep1UR6uVEf/fCqdmBYaY=
|
github.com/SevereCloud/vksdk/v2 v2.16.0/go.mod h1:VN6BH9nFUXcP7Uf0uX74Aht2DQ7+139aG3/Og+jia4w=
|
||||||
github.com/Shopify/goreferrer v0.0.0-20181106222321-ec9c9a553398/go.mod h1:a1uqRtAwp2Xwc6WNPJEufxJ7fx3npB4UV/JOLmbu5I0=
|
github.com/Shopify/goreferrer v0.0.0-20181106222321-ec9c9a553398/go.mod h1:a1uqRtAwp2Xwc6WNPJEufxJ7fx3npB4UV/JOLmbu5I0=
|
||||||
github.com/Shopify/logrus-bugsnag v0.0.0-20171204204709-577dee27f20d/go.mod h1:HI8ITrYtUY+O+ZhtlqUnD8+KwNPOyugEhfP9fdUIaEQ=
|
github.com/Shopify/logrus-bugsnag v0.0.0-20171204204709-577dee27f20d/go.mod h1:HI8ITrYtUY+O+ZhtlqUnD8+KwNPOyugEhfP9fdUIaEQ=
|
||||||
github.com/Shopify/sarama v1.19.0/go.mod h1:FVkBWblsNy7DGZRfXLU0O9RCGt5g3g3yEuWXgklEdEo=
|
github.com/Shopify/sarama v1.19.0/go.mod h1:FVkBWblsNy7DGZRfXLU0O9RCGt5g3g3yEuWXgklEdEo=
|
||||||
@ -435,7 +435,6 @@ github.com/cpuguy83/go-md2man v1.0.10/go.mod h1:SmD6nW6nTyfqj6ABTjUi3V3JVMnlJmwc
|
|||||||
github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU=
|
github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU=
|
||||||
github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU=
|
github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU=
|
||||||
github.com/cpuguy83/go-md2man/v2 v2.0.1/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
|
github.com/cpuguy83/go-md2man/v2 v2.0.1/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
|
||||||
github.com/cpuguy83/go-md2man/v2 v2.0.2/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
|
|
||||||
github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY=
|
github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY=
|
||||||
github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
|
github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
|
||||||
github.com/creack/pty v1.1.11/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
|
github.com/creack/pty v1.1.11/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
|
||||||
@ -585,7 +584,6 @@ github.com/go-logfmt/logfmt v0.5.1/go.mod h1:WYhtIu8zTZfxdn5+rREduYbwxfcBr/Vr6KE
|
|||||||
github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7sIas=
|
github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7sIas=
|
||||||
github.com/go-logr/logr v0.2.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU=
|
github.com/go-logr/logr v0.2.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU=
|
||||||
github.com/go-martini/martini v0.0.0-20170121215854-22fa46961aab/go.mod h1:/P9AEU963A2AYjv4d1V5eVL1CQbEJq6aCNHDDjibzu8=
|
github.com/go-martini/martini v0.0.0-20170121215854-22fa46961aab/go.mod h1:/P9AEU963A2AYjv4d1V5eVL1CQbEJq6aCNHDDjibzu8=
|
||||||
github.com/go-ole/go-ole v1.2.6/go.mod h1:pprOEPIfldk/42T2oK7lQ4v4JSDwmV0As9GaiUsvbm0=
|
|
||||||
github.com/go-openapi/jsonpointer v0.19.2/go.mod h1:3akKfEdA7DF1sugOqz1dVQHBcuDBPKZGEoHC/NkiQRg=
|
github.com/go-openapi/jsonpointer v0.19.2/go.mod h1:3akKfEdA7DF1sugOqz1dVQHBcuDBPKZGEoHC/NkiQRg=
|
||||||
github.com/go-openapi/jsonpointer v0.19.3/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg=
|
github.com/go-openapi/jsonpointer v0.19.3/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg=
|
||||||
github.com/go-openapi/jsonreference v0.19.2/go.mod h1:jMjeRr2HHw6nAVajTXJ4eiUwohSTlpa0o73RUL1owJc=
|
github.com/go-openapi/jsonreference v0.19.2/go.mod h1:jMjeRr2HHw6nAVajTXJ4eiUwohSTlpa0o73RUL1owJc=
|
||||||
@ -729,14 +727,13 @@ github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/
|
|||||||
github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||||
github.com/google/go-cmp v0.5.7/go.mod h1:n+brtR0CgQNWTVd5ZUFpTBC8YFBDLK/h/bpaJ8/DtOE=
|
github.com/google/go-cmp v0.5.7/go.mod h1:n+brtR0CgQNWTVd5ZUFpTBC8YFBDLK/h/bpaJ8/DtOE=
|
||||||
github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38=
|
github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38=
|
||||||
github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
|
|
||||||
github.com/google/go-github v17.0.0+incompatible/go.mod h1:zLgOLi98H3fifZn+44m+umXrS52loVEgC2AApnigrVQ=
|
github.com/google/go-github v17.0.0+incompatible/go.mod h1:zLgOLi98H3fifZn+44m+umXrS52loVEgC2AApnigrVQ=
|
||||||
github.com/google/go-github/v35 v35.2.0/go.mod h1:s0515YVTI+IMrDoy9Y4pHt9ShGpzHvHO8rZ7L7acgvs=
|
github.com/google/go-github/v35 v35.2.0/go.mod h1:s0515YVTI+IMrDoy9Y4pHt9ShGpzHvHO8rZ7L7acgvs=
|
||||||
github.com/google/go-querystring v1.0.0/go.mod h1:odCYkC5MyYFN7vkCjXpyrEuKhc/BUO6wN/zVPAxq5ck=
|
github.com/google/go-querystring v1.0.0/go.mod h1:odCYkC5MyYFN7vkCjXpyrEuKhc/BUO6wN/zVPAxq5ck=
|
||||||
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
|
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
|
||||||
github.com/google/gofuzz v1.1.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
|
github.com/google/gofuzz v1.1.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
|
||||||
github.com/google/gops v0.3.26 h1:Ziyfd8sEhWVbrCIy59c1WOKodI63Jzojwm0JSZbBPS4=
|
github.com/google/gops v0.3.27 h1:BDdWfedShsBbeatZ820oA4DbVOC8yJ4NI8xAlDFWfgI=
|
||||||
github.com/google/gops v0.3.26/go.mod h1:vZ68aOXu2zJoybPyGpaHMmrCyd51DCxJoex4cO3ht/o=
|
github.com/google/gops v0.3.27/go.mod h1:lYqabmfnq4Q6UumWNx96Hjup5BDAVc8zmfIy0SkNCSk=
|
||||||
github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs=
|
github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs=
|
||||||
github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0=
|
github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0=
|
||||||
github.com/google/martian/v3 v3.1.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0=
|
github.com/google/martian/v3 v3.1.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0=
|
||||||
@ -877,7 +874,6 @@ github.com/imdario/mergo v0.3.11/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH
|
|||||||
github.com/imdario/mergo v0.3.12/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA=
|
github.com/imdario/mergo v0.3.12/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA=
|
||||||
github.com/imkira/go-interpol v1.1.0/go.mod h1:z0h2/2T3XF8kyEPpRgJ3kmNv+C43p+I/CoI+jC3w2iA=
|
github.com/imkira/go-interpol v1.1.0/go.mod h1:z0h2/2T3XF8kyEPpRgJ3kmNv+C43p+I/CoI+jC3w2iA=
|
||||||
github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8=
|
github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8=
|
||||||
github.com/inconshreveable/mousetrap v1.0.1/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw=
|
|
||||||
github.com/iris-contrib/blackfriday v2.0.0+incompatible/go.mod h1:UzZ2bDEoaSGPbkg6SAB4att1aAwTmVIx/5gCVqeyUdI=
|
github.com/iris-contrib/blackfriday v2.0.0+incompatible/go.mod h1:UzZ2bDEoaSGPbkg6SAB4att1aAwTmVIx/5gCVqeyUdI=
|
||||||
github.com/iris-contrib/go.uuid v2.0.0+incompatible/go.mod h1:iz2lgM/1UnEf1kP0L/+fafWORmlnuysV2EMP8MW+qe0=
|
github.com/iris-contrib/go.uuid v2.0.0+incompatible/go.mod h1:iz2lgM/1UnEf1kP0L/+fafWORmlnuysV2EMP8MW+qe0=
|
||||||
github.com/iris-contrib/jade v1.1.3/go.mod h1:H/geBymxJhShH5kecoiOCSssPX7QWYH7UaeZTSWddIk=
|
github.com/iris-contrib/jade v1.1.3/go.mod h1:H/geBymxJhShH5kecoiOCSssPX7QWYH7UaeZTSWddIk=
|
||||||
@ -1003,8 +999,8 @@ github.com/klauspost/compress v1.13.4/go.mod h1:8dP1Hq4DHOhN9w426knH3Rhby4rFm6D8
|
|||||||
github.com/klauspost/compress v1.13.5/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk=
|
github.com/klauspost/compress v1.13.5/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk=
|
||||||
github.com/klauspost/compress v1.13.6/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk=
|
github.com/klauspost/compress v1.13.6/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk=
|
||||||
github.com/klauspost/compress v1.15.1/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk=
|
github.com/klauspost/compress v1.15.1/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk=
|
||||||
github.com/klauspost/compress v1.15.8 h1:JahtItbkWjf2jzm/T+qgMxkP9EMHsqEUA6vCMGmXvhA=
|
github.com/klauspost/compress v1.16.0 h1:iULayQNOReoYUe+1qtKOqw9CwJv3aNQu8ivo7lw1HU4=
|
||||||
github.com/klauspost/compress v1.15.8/go.mod h1:PhcZ0MbTNciWF3rruxRgKxI5NkcHHrHUDtV4Yw2GlzU=
|
github.com/klauspost/compress v1.16.0/go.mod h1:ntbaceVETuRiXiv4DpjP66DpAtAGkEQskQzEyD//IeE=
|
||||||
github.com/klauspost/cpuid v1.2.0/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek=
|
github.com/klauspost/cpuid v1.2.0/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek=
|
||||||
github.com/klauspost/cpuid v1.2.1/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek=
|
github.com/klauspost/cpuid v1.2.1/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek=
|
||||||
github.com/klauspost/cpuid v1.2.3/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek=
|
github.com/klauspost/cpuid v1.2.3/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek=
|
||||||
@ -1012,8 +1008,9 @@ github.com/klauspost/cpuid v1.3.1/go.mod h1:bYW4mA6ZgKPob1/Dlai2LviZJO7KGI3uoWLd
|
|||||||
github.com/klauspost/cpuid/v2 v2.0.1/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg=
|
github.com/klauspost/cpuid/v2 v2.0.1/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg=
|
||||||
github.com/klauspost/cpuid/v2 v2.0.4/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg=
|
github.com/klauspost/cpuid/v2 v2.0.4/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg=
|
||||||
github.com/klauspost/cpuid/v2 v2.0.6/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg=
|
github.com/klauspost/cpuid/v2 v2.0.6/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg=
|
||||||
github.com/klauspost/cpuid/v2 v2.0.12 h1:p9dKCg8i4gmOxtv35DvrYoWqYzQrvEVdjQ762Y0OqZE=
|
|
||||||
github.com/klauspost/cpuid/v2 v2.0.12/go.mod h1:g2LTdtYhdyuGPqyWyv7qRAmj1WBqxuObKfj5c0PQa7c=
|
github.com/klauspost/cpuid/v2 v2.0.12/go.mod h1:g2LTdtYhdyuGPqyWyv7qRAmj1WBqxuObKfj5c0PQa7c=
|
||||||
|
github.com/klauspost/cpuid/v2 v2.2.3 h1:sxCkb+qR91z4vsqw4vGGZlDgPz3G7gjaLyK3V8y70BU=
|
||||||
|
github.com/klauspost/cpuid/v2 v2.2.3/go.mod h1:RVVoqg1df56z8g3pUjL/3lE5UfnlrJX8tyFgg4nqhuY=
|
||||||
github.com/klauspost/pgzip v1.2.4/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQKQE9RUs=
|
github.com/klauspost/pgzip v1.2.4/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQKQE9RUs=
|
||||||
github.com/klauspost/pgzip v1.2.5/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQKQE9RUs=
|
github.com/klauspost/pgzip v1.2.5/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQKQE9RUs=
|
||||||
github.com/kljensen/snowball v0.6.0/go.mod h1:27N7E8fVU5H68RlUmnWwZCfxgt4POBJfENGMvNRhldw=
|
github.com/kljensen/snowball v0.6.0/go.mod h1:27N7E8fVU5H68RlUmnWwZCfxgt4POBJfENGMvNRhldw=
|
||||||
@ -1035,12 +1032,12 @@ github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
|
|||||||
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
|
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
|
||||||
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
|
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
|
||||||
github.com/ktrysmt/go-bitbucket v0.6.4/go.mod h1:9u0v3hsd2rqCHRIpbir1oP7F58uo5dq19sBYvuMoyQ4=
|
github.com/ktrysmt/go-bitbucket v0.6.4/go.mod h1:9u0v3hsd2rqCHRIpbir1oP7F58uo5dq19sBYvuMoyQ4=
|
||||||
github.com/kyokomi/emoji/v2 v2.2.11 h1:Pf/ZWVTbnAVkHOLJLWjPxM/FmgyPe+d85cv/OLP5Yus=
|
github.com/kyokomi/emoji/v2 v2.2.12 h1:sSVA5nH9ebR3Zji1o31wu3yOwD1zKXQA2z0zUyeit60=
|
||||||
github.com/kyokomi/emoji/v2 v2.2.11/go.mod h1:JUcn42DTdsXJo1SWanHh4HKDEyPaR5CqkmoirZZP9qE=
|
github.com/kyokomi/emoji/v2 v2.2.12/go.mod h1:JUcn42DTdsXJo1SWanHh4HKDEyPaR5CqkmoirZZP9qE=
|
||||||
github.com/labstack/echo/v4 v4.1.11/go.mod h1:i541M3Fj6f76NZtHSj7TXnyM8n2gaodfvfxNnFqi74g=
|
github.com/labstack/echo/v4 v4.1.11/go.mod h1:i541M3Fj6f76NZtHSj7TXnyM8n2gaodfvfxNnFqi74g=
|
||||||
github.com/labstack/echo/v4 v4.5.0/go.mod h1:czIriw4a0C1dFun+ObrXp7ok03xON0N1awStJ6ArI7Y=
|
github.com/labstack/echo/v4 v4.5.0/go.mod h1:czIriw4a0C1dFun+ObrXp7ok03xON0N1awStJ6ArI7Y=
|
||||||
github.com/labstack/echo/v4 v4.10.0 h1:5CiyngihEO4HXsz3vVsJn7f8xAlWwRr3aY6Ih280ZKA=
|
github.com/labstack/echo/v4 v4.10.2 h1:n1jAhnq/elIFTHr1EYpiYtyKgx4RW9ccVgkqByZaN2M=
|
||||||
github.com/labstack/echo/v4 v4.10.0/go.mod h1:S/T/5fy/GigaXnHTkh0ZGe4LpkkQysvRjFMSUTkDRNQ=
|
github.com/labstack/echo/v4 v4.10.2/go.mod h1:OEyqf2//K1DFdE57vw2DRgWY0M7s65IVQO2FzvI4J5k=
|
||||||
github.com/labstack/gommon v0.3.0/go.mod h1:MULnywXg0yavhxWKc+lOruYdAhDwPK9wf0OL7NoOu+k=
|
github.com/labstack/gommon v0.3.0/go.mod h1:MULnywXg0yavhxWKc+lOruYdAhDwPK9wf0OL7NoOu+k=
|
||||||
github.com/labstack/gommon v0.4.0 h1:y7cvthEAEbU0yHOf4axH8ZG2NH8knB9iNSoTO8dyIk8=
|
github.com/labstack/gommon v0.4.0 h1:y7cvthEAEbU0yHOf4axH8ZG2NH8knB9iNSoTO8dyIk8=
|
||||||
github.com/labstack/gommon v0.4.0/go.mod h1:uW6kP17uPlLJsD3ijUYn3/M5bAxtlZhMI6m3MFxTMTM=
|
github.com/labstack/gommon v0.4.0/go.mod h1:uW6kP17uPlLJsD3ijUYn3/M5bAxtlZhMI6m3MFxTMTM=
|
||||||
@ -1060,7 +1057,6 @@ github.com/lib/pq v1.10.2/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o=
|
|||||||
github.com/lib/pq v1.10.4/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o=
|
github.com/lib/pq v1.10.4/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o=
|
||||||
github.com/lrstanley/girc v0.0.0-20221222153823-a92667a5c9b4 h1:eOJJOM8RTmDcK1F0SqCBX/Ic1vgDnAZfdll6oik0Ups=
|
github.com/lrstanley/girc v0.0.0-20221222153823-a92667a5c9b4 h1:eOJJOM8RTmDcK1F0SqCBX/Ic1vgDnAZfdll6oik0Ups=
|
||||||
github.com/lrstanley/girc v0.0.0-20221222153823-a92667a5c9b4/go.mod h1:lgrnhcF8bg/Bd5HA5DOb4Z+uGqUqGnp4skr+J2GwVgI=
|
github.com/lrstanley/girc v0.0.0-20221222153823-a92667a5c9b4/go.mod h1:lgrnhcF8bg/Bd5HA5DOb4Z+uGqUqGnp4skr+J2GwVgI=
|
||||||
github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0/go.mod h1:zJYVVT2jmtg6P3p1VtQj7WsuWi/y4VnjVBn7F8KPB3I=
|
|
||||||
github.com/lunixbochs/vtclean v1.0.0/go.mod h1:pHhQNgMf3btfWnGBVipUOjRYhoOsdGqdm/+2c2E2WMI=
|
github.com/lunixbochs/vtclean v1.0.0/go.mod h1:pHhQNgMf3btfWnGBVipUOjRYhoOsdGqdm/+2c2E2WMI=
|
||||||
github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ=
|
github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ=
|
||||||
github.com/magiconair/properties v1.8.1/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ=
|
github.com/magiconair/properties v1.8.1/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ=
|
||||||
@ -1087,8 +1083,8 @@ github.com/matterbridge/gozulipbot v0.0.0-20211023205727-a19d6c1f3b75 h1:GslZKF7
|
|||||||
github.com/matterbridge/gozulipbot v0.0.0-20211023205727-a19d6c1f3b75/go.mod h1:yAjnZ34DuDyPHMPHHjOsTk/FefW4JJjoMMCGt/8uuQA=
|
github.com/matterbridge/gozulipbot v0.0.0-20211023205727-a19d6c1f3b75/go.mod h1:yAjnZ34DuDyPHMPHHjOsTk/FefW4JJjoMMCGt/8uuQA=
|
||||||
github.com/matterbridge/logrus-prefixed-formatter v0.5.3-0.20200523233437-d971309a77ba h1:XleOY4IjAEIcxAh+IFwT5JT5Ze3RHiYz6m+4ZfZ0rc0=
|
github.com/matterbridge/logrus-prefixed-formatter v0.5.3-0.20200523233437-d971309a77ba h1:XleOY4IjAEIcxAh+IFwT5JT5Ze3RHiYz6m+4ZfZ0rc0=
|
||||||
github.com/matterbridge/logrus-prefixed-formatter v0.5.3-0.20200523233437-d971309a77ba/go.mod h1:iXGEotOvwI1R1SjLxRc+BF5rUORTMtE0iMZBT2lxqAU=
|
github.com/matterbridge/logrus-prefixed-formatter v0.5.3-0.20200523233437-d971309a77ba/go.mod h1:iXGEotOvwI1R1SjLxRc+BF5rUORTMtE0iMZBT2lxqAU=
|
||||||
github.com/matterbridge/matterclient v0.0.0-20220624224459-272af20c7ddf h1:vaiRcLFKSD0fzlcLll53LU8HnpVv8XzP7C0mi8Tfvro=
|
github.com/matterbridge/matterclient v0.0.0-20221106190440-8bcf49695e0d h1:aI0ANEzy3dMv3vEAMQ80AItNie0fBR9ZxE2sAedORmM=
|
||||||
github.com/matterbridge/matterclient v0.0.0-20220624224459-272af20c7ddf/go.mod h1:Zg8PH1P/1CNUxozQ8blnjAV9PA4Qn2qWf33cX5yNKGM=
|
github.com/matterbridge/matterclient v0.0.0-20221106190440-8bcf49695e0d/go.mod h1:Zg8PH1P/1CNUxozQ8blnjAV9PA4Qn2qWf33cX5yNKGM=
|
||||||
github.com/mattermost/go-i18n v1.11.0/go.mod h1:RyS7FDNQlzF1PsjbJWHRI35exqaKGSO9qD4iv8QjE34=
|
github.com/mattermost/go-i18n v1.11.0/go.mod h1:RyS7FDNQlzF1PsjbJWHRI35exqaKGSO9qD4iv8QjE34=
|
||||||
github.com/mattermost/go-i18n v1.11.1-0.20211013152124-5c415071e404 h1:Khvh6waxG1cHc4Cz5ef9n3XVCxRWpAKUtqg9PJl5+y8=
|
github.com/mattermost/go-i18n v1.11.1-0.20211013152124-5c415071e404 h1:Khvh6waxG1cHc4Cz5ef9n3XVCxRWpAKUtqg9PJl5+y8=
|
||||||
github.com/mattermost/go-i18n v1.11.1-0.20211013152124-5c415071e404/go.mod h1:RyS7FDNQlzF1PsjbJWHRI35exqaKGSO9qD4iv8QjE34=
|
github.com/mattermost/go-i18n v1.11.1-0.20211013152124-5c415071e404/go.mod h1:RyS7FDNQlzF1PsjbJWHRI35exqaKGSO9qD4iv8QjE34=
|
||||||
@ -1129,8 +1125,9 @@ github.com/mattn/go-isatty v0.0.10/go.mod h1:qgIWMr58cqv1PHHyhnkY9lrL7etaEgOFcME
|
|||||||
github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU=
|
github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU=
|
||||||
github.com/mattn/go-isatty v0.0.13/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU=
|
github.com/mattn/go-isatty v0.0.13/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU=
|
||||||
github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94=
|
github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94=
|
||||||
github.com/mattn/go-isatty v0.0.16 h1:bq3VjFmv/sOjHtdEhmkEV4x1AJtvUvOJ2PFAZ5+peKQ=
|
|
||||||
github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM=
|
github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM=
|
||||||
|
github.com/mattn/go-isatty v0.0.17 h1:BTarxUcIeDqL27Mc+vyvdWYSL28zpIhv3RoTdsLMPng=
|
||||||
|
github.com/mattn/go-isatty v0.0.17/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM=
|
||||||
github.com/mattn/go-runewidth v0.0.2/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU=
|
github.com/mattn/go-runewidth v0.0.2/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU=
|
||||||
github.com/mattn/go-runewidth v0.0.3/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU=
|
github.com/mattn/go-runewidth v0.0.3/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU=
|
||||||
github.com/mattn/go-runewidth v0.0.7/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI=
|
github.com/mattn/go-runewidth v0.0.7/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI=
|
||||||
@ -1365,7 +1362,6 @@ github.com/pkg/sftp v1.13.1/go.mod h1:3HaPG6Dq1ILlpPZRO0HVMrsydcdLt6HRDccSgb87qR
|
|||||||
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
|
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
|
||||||
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||||
github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI=
|
github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI=
|
||||||
github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c/go.mod h1:OmDBASR4679mdNQnz2pUhc2G8CO2JrUAVFDRBDP/hJE=
|
|
||||||
github.com/pquerna/cachecontrol v0.0.0-20171018203845-0dec1b30a021/go.mod h1:prYjPmNq4d1NPVmpShWobRqXY3q7Vp+80DqgxxUrUIA=
|
github.com/pquerna/cachecontrol v0.0.0-20171018203845-0dec1b30a021/go.mod h1:prYjPmNq4d1NPVmpShWobRqXY3q7Vp+80DqgxxUrUIA=
|
||||||
github.com/prometheus/client_golang v0.0.0-20180209125602-c332b6f63c06/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
|
github.com/prometheus/client_golang v0.0.0-20180209125602-c332b6f63c06/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
|
||||||
github.com/prometheus/client_golang v0.8.0/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
|
github.com/prometheus/client_golang v0.8.0/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
|
||||||
@ -1416,8 +1412,9 @@ github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a/go.mod h1:bCqn
|
|||||||
github.com/rcrowley/go-metrics v0.0.0-20190826022208-cac0b30c2563/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4=
|
github.com/rcrowley/go-metrics v0.0.0-20190826022208-cac0b30c2563/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4=
|
||||||
github.com/reflog/dateconstraints v0.2.1/go.mod h1:Ax8AxTBcJc3E/oVS2hd2j7RDM/5MDtuPwuR7lIHtPLo=
|
github.com/reflog/dateconstraints v0.2.1/go.mod h1:Ax8AxTBcJc3E/oVS2hd2j7RDM/5MDtuPwuR7lIHtPLo=
|
||||||
github.com/remyoudompheng/bigfft v0.0.0-20190728182440-6a916e37a237/go.mod h1:qqbHyh8v60DhA7CoWK5oRCqLrMHRGoxYCSS9EjAz6Eo=
|
github.com/remyoudompheng/bigfft v0.0.0-20190728182440-6a916e37a237/go.mod h1:qqbHyh8v60DhA7CoWK5oRCqLrMHRGoxYCSS9EjAz6Eo=
|
||||||
github.com/remyoudompheng/bigfft v0.0.0-20200410134404-eec4a21b6bb0 h1:OdAsTTz6OkFY5QxjkYwrChwuRruF69c169dPK26NUlk=
|
|
||||||
github.com/remyoudompheng/bigfft v0.0.0-20200410134404-eec4a21b6bb0/go.mod h1:qqbHyh8v60DhA7CoWK5oRCqLrMHRGoxYCSS9EjAz6Eo=
|
github.com/remyoudompheng/bigfft v0.0.0-20200410134404-eec4a21b6bb0/go.mod h1:qqbHyh8v60DhA7CoWK5oRCqLrMHRGoxYCSS9EjAz6Eo=
|
||||||
|
github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec h1:W09IVJc94icq4NjY3clb7Lk8O1qJ8BdBEF8z0ibU0rE=
|
||||||
|
github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec/go.mod h1:qqbHyh8v60DhA7CoWK5oRCqLrMHRGoxYCSS9EjAz6Eo=
|
||||||
github.com/richardlehane/mscfb v1.0.3/go.mod h1:YzVpcZg9czvAuhk9T+a3avCpcFPMUWm7gK3DypaEsUk=
|
github.com/richardlehane/mscfb v1.0.3/go.mod h1:YzVpcZg9czvAuhk9T+a3avCpcFPMUWm7gK3DypaEsUk=
|
||||||
github.com/richardlehane/mscfb v1.0.4/go.mod h1:YzVpcZg9czvAuhk9T+a3avCpcFPMUWm7gK3DypaEsUk=
|
github.com/richardlehane/mscfb v1.0.4/go.mod h1:YzVpcZg9czvAuhk9T+a3avCpcFPMUWm7gK3DypaEsUk=
|
||||||
github.com/richardlehane/msoleps v1.0.1/go.mod h1:BWev5JBpU9Ko2WAgmZEuiz4/u3ZYTKbjLycmwiWUfWg=
|
github.com/richardlehane/msoleps v1.0.1/go.mod h1:BWev5JBpU9Ko2WAgmZEuiz4/u3ZYTKbjLycmwiWUfWg=
|
||||||
@ -1475,7 +1472,6 @@ github.com/shazow/rateio v0.0.0-20200113175441-4461efc8bdc4 h1:zwQ1HBo5FYwn1ksMd
|
|||||||
github.com/shazow/rateio v0.0.0-20200113175441-4461efc8bdc4/go.mod h1:vt2jWY/3Qw1bIzle5thrJWucsLuuX9iUNnp20CqCciI=
|
github.com/shazow/rateio v0.0.0-20200113175441-4461efc8bdc4/go.mod h1:vt2jWY/3Qw1bIzle5thrJWucsLuuX9iUNnp20CqCciI=
|
||||||
github.com/shazow/ssh-chat v1.10.1 h1:ePS+ngEYqm+yUuXegDPutysqLV2WoI22XDOeRgI6CE0=
|
github.com/shazow/ssh-chat v1.10.1 h1:ePS+ngEYqm+yUuXegDPutysqLV2WoI22XDOeRgI6CE0=
|
||||||
github.com/shazow/ssh-chat v1.10.1/go.mod h1:0+7szsKylcre0vljkVnbuI6q7Odtc+QCDHxa+fFNV54=
|
github.com/shazow/ssh-chat v1.10.1/go.mod h1:0+7szsKylcre0vljkVnbuI6q7Odtc+QCDHxa+fFNV54=
|
||||||
github.com/shirou/gopsutil/v3 v3.22.10/go.mod h1:QNza6r4YQoydyCfo6rH0blGfKahgibh4dQmV5xdFkQk=
|
|
||||||
github.com/shopspring/decimal v0.0.0-20180709203117-cd690d0c9e24/go.mod h1:M+9NzErvs504Cn4c5DxATwIqPbtswREoFCre64PpcG4=
|
github.com/shopspring/decimal v0.0.0-20180709203117-cd690d0c9e24/go.mod h1:M+9NzErvs504Cn4c5DxATwIqPbtswREoFCre64PpcG4=
|
||||||
github.com/shopspring/decimal v0.0.0-20200227202807-02e2044944cc/go.mod h1:DKyhrW/HYNuLGql+MJL6WCR6knT2jwCFRcu2hWCYk4o=
|
github.com/shopspring/decimal v0.0.0-20200227202807-02e2044944cc/go.mod h1:DKyhrW/HYNuLGql+MJL6WCR6knT2jwCFRcu2hWCYk4o=
|
||||||
github.com/shopspring/decimal v1.2.0/go.mod h1:DKyhrW/HYNuLGql+MJL6WCR6knT2jwCFRcu2hWCYk4o=
|
github.com/shopspring/decimal v1.2.0/go.mod h1:DKyhrW/HYNuLGql+MJL6WCR6knT2jwCFRcu2hWCYk4o=
|
||||||
@ -1554,7 +1550,6 @@ github.com/spf13/cobra v1.0.0/go.mod h1:/6GTrnGXV9HjY+aR4k0oJ5tcvakLuG6EuKReYlHN
|
|||||||
github.com/spf13/cobra v1.1.3/go.mod h1:pGADOWyqRD/YMrPZigI/zbliZ2wVD/23d+is3pSWzOo=
|
github.com/spf13/cobra v1.1.3/go.mod h1:pGADOWyqRD/YMrPZigI/zbliZ2wVD/23d+is3pSWzOo=
|
||||||
github.com/spf13/cobra v1.2.1/go.mod h1:ExllRjgxM/piMAM+3tAZvg8fsklGAf3tPfi+i8t68Nk=
|
github.com/spf13/cobra v1.2.1/go.mod h1:ExllRjgxM/piMAM+3tAZvg8fsklGAf3tPfi+i8t68Nk=
|
||||||
github.com/spf13/cobra v1.4.0/go.mod h1:Wo4iy3BUC+X2Fybo0PDqwJIv3dNRiZLHQymsfxlB84g=
|
github.com/spf13/cobra v1.4.0/go.mod h1:Wo4iy3BUC+X2Fybo0PDqwJIv3dNRiZLHQymsfxlB84g=
|
||||||
github.com/spf13/cobra v1.6.1/go.mod h1:IOw/AERYS7UzyrGinqmz6HLUo219MORXGxhbaJUqzrY=
|
|
||||||
github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo=
|
github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo=
|
||||||
github.com/spf13/jwalterweatherman v1.1.0 h1:ue6voC5bR5F8YxI5S67j9i582FU4Qvo2bmqnqMYADFk=
|
github.com/spf13/jwalterweatherman v1.1.0 h1:ue6voC5bR5F8YxI5S67j9i582FU4Qvo2bmqnqMYADFk=
|
||||||
github.com/spf13/jwalterweatherman v1.1.0/go.mod h1:aNWZUN0dPAAO/Ljvb5BEdw96iTZ0EXowPYD95IqWIGo=
|
github.com/spf13/jwalterweatherman v1.1.0/go.mod h1:aNWZUN0dPAAO/Ljvb5BEdw96iTZ0EXowPYD95IqWIGo=
|
||||||
@ -1593,7 +1588,6 @@ github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5
|
|||||||
github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||||
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||||
github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||||
github.com/stretchr/testify v1.7.2/go.mod h1:R6va5+xMeoiuVRoj+gSkQ7d3FALtqAAGI1FQKckRals=
|
|
||||||
github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
|
github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
|
||||||
github.com/stretchr/testify v1.8.1 h1:w7B6lhMri9wdJUVmEZPGGhZzrYTPvgJArz7wNPgYKsk=
|
github.com/stretchr/testify v1.8.1 h1:w7B6lhMri9wdJUVmEZPGGhZzrYTPvgJArz7wNPgYKsk=
|
||||||
github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
|
github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
|
||||||
@ -1627,8 +1621,6 @@ github.com/tj/go-buffer v1.1.0/go.mod h1:iyiJpfFcR2B9sXu7KvjbT9fpM4mOelRSDTbntVj
|
|||||||
github.com/tj/go-elastic v0.0.0-20171221160941-36157cbbebc2/go.mod h1:WjeM0Oo1eNAjXGDx2yma7uG2XoyRZTq1uv3M/o7imD0=
|
github.com/tj/go-elastic v0.0.0-20171221160941-36157cbbebc2/go.mod h1:WjeM0Oo1eNAjXGDx2yma7uG2XoyRZTq1uv3M/o7imD0=
|
||||||
github.com/tj/go-kinesis v0.0.0-20171128231115-08b17f58cb1b/go.mod h1:/yhzCV0xPfx6jb1bBgRFjl5lytqVqZXEaeqWP8lTEao=
|
github.com/tj/go-kinesis v0.0.0-20171128231115-08b17f58cb1b/go.mod h1:/yhzCV0xPfx6jb1bBgRFjl5lytqVqZXEaeqWP8lTEao=
|
||||||
github.com/tj/go-spin v1.1.0/go.mod h1:Mg1mzmePZm4dva8Qz60H2lHwmJ2loum4VIrLgVnKwh4=
|
github.com/tj/go-spin v1.1.0/go.mod h1:Mg1mzmePZm4dva8Qz60H2lHwmJ2loum4VIrLgVnKwh4=
|
||||||
github.com/tklauser/go-sysconf v0.3.10/go.mod h1:C8XykCvCb+Gn0oNCWPIlcb0RuglQTYaQ2hGm7jmxEFk=
|
|
||||||
github.com/tklauser/numcpus v0.4.0/go.mod h1:1+UI3pD8NW14VMwdgJNJ1ESk2UnwhAnz5hMwiKKqXCQ=
|
|
||||||
github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U=
|
github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U=
|
||||||
github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U=
|
github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U=
|
||||||
github.com/tv42/httpunix v0.0.0-20150427012821-b75d8614f926/go.mod h1:9ESjWnEqriFuLhtthL60Sar/7RFoluCcXsuvEwTV5KM=
|
github.com/tv42/httpunix v0.0.0-20150427012821-b75d8614f926/go.mod h1:9ESjWnEqriFuLhtthL60Sar/7RFoluCcXsuvEwTV5KM=
|
||||||
@ -1698,7 +1690,6 @@ github.com/xeipuuv/gojsonschema v0.0.0-20180618132009-1d523034197f/go.mod h1:5yf
|
|||||||
github.com/xeipuuv/gojsonschema v1.2.0/go.mod h1:anYRn/JVcOK2ZgGU+IjEV4nwlhoK5sQluxsYJ78Id3Y=
|
github.com/xeipuuv/gojsonschema v1.2.0/go.mod h1:anYRn/JVcOK2ZgGU+IjEV4nwlhoK5sQluxsYJ78Id3Y=
|
||||||
github.com/xi2/xz v0.0.0-20171230120015-48954b6210f8/go.mod h1:HUYIGzjTL3rfEspMxjDjgmT5uz5wzYJKVo23qUhYTos=
|
github.com/xi2/xz v0.0.0-20171230120015-48954b6210f8/go.mod h1:HUYIGzjTL3rfEspMxjDjgmT5uz5wzYJKVo23qUhYTos=
|
||||||
github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU=
|
github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU=
|
||||||
github.com/xlab/treeprint v1.1.0/go.mod h1:gj5Gd3gPdKtR1ikdDK6fnFLdmIS0X30kTTuNd/WEJu0=
|
|
||||||
github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q=
|
github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q=
|
||||||
github.com/xtgo/uuid v0.0.0-20140804021211-a0b114877d4c h1:3lbZUMbMiGUW/LMkfsEABsc5zNT9+b1CvsJx47JzJ8g=
|
github.com/xtgo/uuid v0.0.0-20140804021211-a0b114877d4c h1:3lbZUMbMiGUW/LMkfsEABsc5zNT9+b1CvsJx47JzJ8g=
|
||||||
github.com/xtgo/uuid v0.0.0-20140804021211-a0b114877d4c/go.mod h1:UrdRz5enIKZ63MEE3IF9l2/ebyx59GyGgPi+tICQdmM=
|
github.com/xtgo/uuid v0.0.0-20140804021211-a0b114877d4c/go.mod h1:UrdRz5enIKZ63MEE3IF9l2/ebyx59GyGgPi+tICQdmM=
|
||||||
@ -1719,7 +1710,6 @@ github.com/yuin/goldmark v1.3.8/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1
|
|||||||
github.com/yuin/goldmark v1.4.1/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k=
|
github.com/yuin/goldmark v1.4.1/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k=
|
||||||
github.com/yuin/goldmark v1.4.11/go.mod h1:rmuwmfZ0+bvzB24eSC//bk1R1Zp3hM0OXYv/G2LIilg=
|
github.com/yuin/goldmark v1.4.11/go.mod h1:rmuwmfZ0+bvzB24eSC//bk1R1Zp3hM0OXYv/G2LIilg=
|
||||||
github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY=
|
github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY=
|
||||||
github.com/yusufpapurcu/wmi v1.2.2/go.mod h1:SBZ9tNy3G9/m5Oi98Zks0QjeHVDvuK0qfxQmPyzfmi0=
|
|
||||||
github.com/yvasiyarov/go-metrics v0.0.0-20140926110328-57bccd1ccd43/go.mod h1:aX5oPXxHm3bOH+xeAttToC8pqch2ScQN/JoXYupl6xs=
|
github.com/yvasiyarov/go-metrics v0.0.0-20140926110328-57bccd1ccd43/go.mod h1:aX5oPXxHm3bOH+xeAttToC8pqch2ScQN/JoXYupl6xs=
|
||||||
github.com/yvasiyarov/gorelic v0.0.0-20141212073537-a9bba5b9ab50/go.mod h1:NUSPSUX/bi6SeDMUh6brw0nXpxHnc96TguQh0+r/ssA=
|
github.com/yvasiyarov/gorelic v0.0.0-20141212073537-a9bba5b9ab50/go.mod h1:NUSPSUX/bi6SeDMUh6brw0nXpxHnc96TguQh0+r/ssA=
|
||||||
github.com/yvasiyarov/newrelic_platform_go v0.0.0-20140908184405-b21fdbd4370f/go.mod h1:GlGEuHIJweS1mbCqG+7vt2nvWLzLLnRHbXz5JKd/Qbg=
|
github.com/yvasiyarov/newrelic_platform_go v0.0.0-20140908184405-b21fdbd4370f/go.mod h1:GlGEuHIJweS1mbCqG+7vt2nvWLzLLnRHbXz5JKd/Qbg=
|
||||||
@ -1738,8 +1728,8 @@ go.etcd.io/etcd/client/pkg/v3 v3.5.0/go.mod h1:IJHfcCEKxYu1Os13ZdwCwIUTUVGYTSAM3
|
|||||||
go.etcd.io/etcd/client/v2 v2.305.0/go.mod h1:h9puh54ZTgAKtEbut2oe9P4L/oqKCVB6xsXlzd7alYQ=
|
go.etcd.io/etcd/client/v2 v2.305.0/go.mod h1:h9puh54ZTgAKtEbut2oe9P4L/oqKCVB6xsXlzd7alYQ=
|
||||||
go.mau.fi/libsignal v0.1.0 h1:vAKI/nJ5tMhdzke4cTK1fb0idJzz1JuEIpmjprueC+c=
|
go.mau.fi/libsignal v0.1.0 h1:vAKI/nJ5tMhdzke4cTK1fb0idJzz1JuEIpmjprueC+c=
|
||||||
go.mau.fi/libsignal v0.1.0/go.mod h1:R8ovrTezxtUNzCQE5PH30StOQWWeBskBsWE55vMfY9I=
|
go.mau.fi/libsignal v0.1.0/go.mod h1:R8ovrTezxtUNzCQE5PH30StOQWWeBskBsWE55vMfY9I=
|
||||||
go.mau.fi/whatsmeow v0.0.0-20230128195103-dcbc8dd31a22 h1:za/zmM0hcfEKTRcLtr2zcUFE4VpUw8CndXNeV+v676c=
|
go.mau.fi/whatsmeow v0.0.0-20230306190159-5caded34a872 h1:jrIWy0l9kTxl7bdp3muFofZcyLyI1xxE7BXWeldVKr0=
|
||||||
go.mau.fi/whatsmeow v0.0.0-20230128195103-dcbc8dd31a22/go.mod h1:TrdC8N6SnPFxWo5FiMnDIDFuVyfOLzy5dWDaUPNjcHY=
|
go.mau.fi/whatsmeow v0.0.0-20230306190159-5caded34a872/go.mod h1:zoTtv1CupGEyTew7TOwnBmTbHB4pVad2OzjTf5CVwa0=
|
||||||
go.mongodb.org/mongo-driver v1.1.0/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qLUO4lqsUM=
|
go.mongodb.org/mongo-driver v1.1.0/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qLUO4lqsUM=
|
||||||
go.mongodb.org/mongo-driver v1.7.0/go.mod h1:Q4oFMbo1+MSNqICAdYMlC/zSTrwCogR4R8NzkI+yfU8=
|
go.mongodb.org/mongo-driver v1.7.0/go.mod h1:Q4oFMbo1+MSNqICAdYMlC/zSTrwCogR4R8NzkI+yfU8=
|
||||||
go.mozilla.org/pkcs7 v0.0.0-20200128120323-432b2356ecb1/go.mod h1:SNgMg+EgDFwmvSmLRTNKC5fegJjB7v23qTQ0XLGUNHk=
|
go.mozilla.org/pkcs7 v0.0.0-20200128120323-432b2356ecb1/go.mod h1:SNgMg+EgDFwmvSmLRTNKC5fegJjB7v23qTQ0XLGUNHk=
|
||||||
@ -1826,8 +1816,8 @@ golang.org/x/crypto v0.0.0-20210817164053-32db794688a5/go.mod h1:GvvjBRRGRdwPK5y
|
|||||||
golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
|
golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
|
||||||
golang.org/x/crypto v0.0.0-20211108221036-ceb1ce70b4fa/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
|
golang.org/x/crypto v0.0.0-20211108221036-ceb1ce70b4fa/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
|
||||||
golang.org/x/crypto v0.0.0-20220331220935-ae2d96664a29/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
|
golang.org/x/crypto v0.0.0-20220331220935-ae2d96664a29/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
|
||||||
golang.org/x/crypto v0.4.0 h1:UVQgzMY87xqpKNgb+kDsll2Igd33HszWHFLmpaRMq/8=
|
golang.org/x/crypto v0.6.0 h1:qfktjS5LUO+fFKeJXZ+ikTRijMmljikvG68fpMMruSc=
|
||||||
golang.org/x/crypto v0.4.0/go.mod h1:3quD/ATkf6oY+rnes5c3ExXTbLc8mueNue5/DoinL80=
|
golang.org/x/crypto v0.6.0/go.mod h1:OFC/31mSvZgRz0V1QTNCzfAI1aIRzbiufJtkMIlEp58=
|
||||||
golang.org/x/exp v0.0.0-20180321215751-8460e604b9de/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
golang.org/x/exp v0.0.0-20180321215751-8460e604b9de/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||||
golang.org/x/exp v0.0.0-20180807140117-3d87b88a115f/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
golang.org/x/exp v0.0.0-20180807140117-3d87b88a115f/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||||
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||||
@ -1856,8 +1846,8 @@ golang.org/x/image v0.0.0-20201208152932-35266b937fa6/go.mod h1:FeLwcggjj3mMvU+o
|
|||||||
golang.org/x/image v0.0.0-20210216034530-4410531fe030/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0=
|
golang.org/x/image v0.0.0-20210216034530-4410531fe030/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0=
|
||||||
golang.org/x/image v0.0.0-20210622092929-e6eecd499c2c/go.mod h1:023OzeP/+EPmXeapQh35lcL3II3LrY8Ic+EFFKVhULM=
|
golang.org/x/image v0.0.0-20210622092929-e6eecd499c2c/go.mod h1:023OzeP/+EPmXeapQh35lcL3II3LrY8Ic+EFFKVhULM=
|
||||||
golang.org/x/image v0.0.0-20220321031419-a8550c1d254a/go.mod h1:023OzeP/+EPmXeapQh35lcL3II3LrY8Ic+EFFKVhULM=
|
golang.org/x/image v0.0.0-20220321031419-a8550c1d254a/go.mod h1:023OzeP/+EPmXeapQh35lcL3II3LrY8Ic+EFFKVhULM=
|
||||||
golang.org/x/image v0.5.0 h1:5JMiNunQeQw++mMOz48/ISeNu3Iweh/JaZU8ZLqHRrI=
|
golang.org/x/image v0.6.0 h1:bR8b5okrPI3g/gyZakLZHeWxAR8Dn5CyxXv1hLH5g/4=
|
||||||
golang.org/x/image v0.5.0/go.mod h1:FVC7BI/5Ym8R25iw5OLsgshdUBbT1h5jZTpA+mvAdZ4=
|
golang.org/x/image v0.6.0/go.mod h1:MXLdDR43H7cDJq5GEGXEVeeNhPgi+YYEQ2pC1byI1x0=
|
||||||
golang.org/x/lint v0.0.0-20180702182130-06c8688daad7/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
|
golang.org/x/lint v0.0.0-20180702182130-06c8688daad7/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
|
||||||
golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
|
golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
|
||||||
golang.org/x/lint v0.0.0-20181217174547-8f45f776aaf1/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
|
golang.org/x/lint v0.0.0-20181217174547-8f45f776aaf1/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
|
||||||
@ -1885,8 +1875,9 @@ golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
|||||||
golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||||
golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||||
golang.org/x/mod v0.6.0-dev.0.20220106191415-9b9b3d81d5e3/go.mod h1:3p9vT2HGsQu2K1YbXdKPJLVgG5VJdoTa1poYQBtP1AY=
|
golang.org/x/mod v0.6.0-dev.0.20220106191415-9b9b3d81d5e3/go.mod h1:3p9vT2HGsQu2K1YbXdKPJLVgG5VJdoTa1poYQBtP1AY=
|
||||||
golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4 h1:6zppjxzCulZykYSLyVDYbneBfbaBIQPYMevg0bEwv2s=
|
|
||||||
golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4=
|
golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4=
|
||||||
|
golang.org/x/mod v0.8.0 h1:LUYupSeNrTNCGzR/hVBk2NHZO4hXcVaW1k4Qx7rjPx8=
|
||||||
|
golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
|
||||||
golang.org/x/net v0.0.0-20180218175443-cbe0f9307d01/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
golang.org/x/net v0.0.0-20180218175443-cbe0f9307d01/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||||
golang.org/x/net v0.0.0-20180530234432-1e491301e022/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
golang.org/x/net v0.0.0-20180530234432-1e491301e022/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||||
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||||
@ -1969,8 +1960,9 @@ golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su
|
|||||||
golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
|
golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
|
||||||
golang.org/x/net v0.0.0-20220403103023-749bd193bc2b/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
|
golang.org/x/net v0.0.0-20220403103023-749bd193bc2b/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
|
||||||
golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
|
golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
|
||||||
golang.org/x/net v0.7.0 h1:rJrUqqhjsgNp7KqAIc25s9pZnjU7TUcSY7HcVZjdn1g=
|
golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs=
|
||||||
golang.org/x/net v0.7.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs=
|
golang.org/x/net v0.8.0 h1:Zrh2ngAOFYneWTAIAPethzeaQLuHwhuBkuV6ZiRnUaQ=
|
||||||
|
golang.org/x/net v0.8.0/go.mod h1:QVkue5JL9kW//ek3r6jTKnTFis1tRmNAW2P1shuFdJc=
|
||||||
golang.org/x/oauth2 v0.0.0-20180227000427-d7d64896b5ff/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
golang.org/x/oauth2 v0.0.0-20180227000427-d7d64896b5ff/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
||||||
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
||||||
golang.org/x/oauth2 v0.0.0-20181017192945-9dcd33a902f4/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
golang.org/x/oauth2 v0.0.0-20181017192945-9dcd33a902f4/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
||||||
@ -1991,8 +1983,8 @@ golang.org/x/oauth2 v0.0.0-20210402161424-2e8d93401602/go.mod h1:KelEdhl1UZF7XfJ
|
|||||||
golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
|
golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
|
||||||
golang.org/x/oauth2 v0.0.0-20210628180205-a41e5a781914/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
|
golang.org/x/oauth2 v0.0.0-20210628180205-a41e5a781914/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
|
||||||
golang.org/x/oauth2 v0.0.0-20220223155221-ee480838109b/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc=
|
golang.org/x/oauth2 v0.0.0-20220223155221-ee480838109b/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc=
|
||||||
golang.org/x/oauth2 v0.4.0 h1:NF0gk8LVPg1Ml7SSbGyySuoxdsXitj7TvgvuRxIMc/M=
|
golang.org/x/oauth2 v0.6.0 h1:Lh8GPgSKBfWSwFvtuWOfeI3aAAnbXTSutYxJiOJFgIw=
|
||||||
golang.org/x/oauth2 v0.4.0/go.mod h1:RznEsdpjGAINPTOF0UH/t+xJ75L18YO3Ho6Pyn+uRec=
|
golang.org/x/oauth2 v0.6.0/go.mod h1:ycmewcwgD4Rpr3eZJLSB4Kyyljb3qDh40vJ8STE5HKw=
|
||||||
golang.org/x/perf v0.0.0-20180704124530-6e6d33e29852/go.mod h1:JLpeXjPJfIyPr5TlbXLkXWLhP8nz10XfvxElABhCtcw=
|
golang.org/x/perf v0.0.0-20180704124530-6e6d33e29852/go.mod h1:JLpeXjPJfIyPr5TlbXLkXWLhP8nz10XfvxElABhCtcw=
|
||||||
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||||
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||||
@ -2007,6 +1999,8 @@ golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJ
|
|||||||
golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||||
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||||
golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||||
|
golang.org/x/sync v0.1.0 h1:wsuoTGHzEhffawBOhz5CYhcrV4IdKZbEyZjBMuTp12o=
|
||||||
|
golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||||
golang.org/x/sys v0.0.0-20180224232135-f6cff0780e54/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
golang.org/x/sys v0.0.0-20180224232135-f6cff0780e54/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||||
golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||||
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||||
@ -2099,7 +2093,6 @@ golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7w
|
|||||||
golang.org/x/sys v0.0.0-20201126233918-771906719818/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
golang.org/x/sys v0.0.0-20201126233918-771906719818/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
golang.org/x/sys v0.0.0-20201201145000-ef89a241ccb3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
golang.org/x/sys v0.0.0-20201201145000-ef89a241ccb3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
golang.org/x/sys v0.0.0-20201202213521-69691e467435/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
golang.org/x/sys v0.0.0-20201202213521-69691e467435/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
golang.org/x/sys v0.0.0-20201204225414-ed752295db88/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
|
||||||
golang.org/x/sys v0.0.0-20210104204734-6f8348627aad/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
golang.org/x/sys v0.0.0-20210104204734-6f8348627aad/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
golang.org/x/sys v0.0.0-20210112080510-489259a85091/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
golang.org/x/sys v0.0.0-20210112080510-489259a85091/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
@ -2134,20 +2127,22 @@ golang.org/x/sys v0.0.0-20211019181941-9d821ace8654/go.mod h1:oPkhp1MJrh7nUepCBc
|
|||||||
golang.org/x/sys v0.0.0-20211103235746-7861aae1554b/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
golang.org/x/sys v0.0.0-20211103235746-7861aae1554b/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
golang.org/x/sys v0.0.0-20220114195835-da31bd327af9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
golang.org/x/sys v0.0.0-20220114195835-da31bd327af9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
golang.org/x/sys v0.0.0-20220128215802-99c3d69c2c27/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
|
||||||
golang.org/x/sys v0.0.0-20220403205710-6acee93ad0eb/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
golang.org/x/sys v0.0.0-20220403205710-6acee93ad0eb/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
|
golang.org/x/sys v0.0.0-20220704084225-05e143d24a9e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
golang.org/x/sys v0.0.0-20220908164124-27713097b956/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
golang.org/x/sys v0.0.0-20220908164124-27713097b956/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
golang.org/x/sys v0.5.0 h1:MUK/U/4lj1t1oPg0HfuXDN/Z1wv31ZJ/YcPiGccS4DU=
|
|
||||||
golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
|
golang.org/x/sys v0.6.0 h1:MVltZSvRTcU2ljQOhs94SXPftV6DCNnZViHeQps87pQ=
|
||||||
|
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw=
|
golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw=
|
||||||
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
||||||
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
|
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
|
||||||
golang.org/x/term v0.5.0 h1:n2a8QNdAb0sZNpU9R1ALUXBbY+w51fCQDN+7EdxNBsY=
|
|
||||||
golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k=
|
golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k=
|
||||||
|
golang.org/x/term v0.6.0 h1:clScbb1cHjoCkyRbWwBEUZ5H/tIFu5TAXIqaZD0Gcjw=
|
||||||
|
golang.org/x/term v0.6.0/go.mod h1:m6U89DPEgQRMq3DNkDClhWw02AUbt2daBVO4cn4Hv9U=
|
||||||
golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||||
golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||||
@ -2157,16 +2152,17 @@ golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
|||||||
golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||||
golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||||
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
|
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
|
||||||
golang.org/x/text v0.7.0 h1:4BRB4x83lYWy72KwLD/qYDuTu7q9PjSagHvijDw7cLo=
|
|
||||||
golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
|
golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
|
||||||
|
golang.org/x/text v0.8.0 h1:57P1ETyNKtuIjB4SRd15iJxuhj8Gc416Y78H3qgMh68=
|
||||||
|
golang.org/x/text v0.8.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8=
|
||||||
golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||||
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||||
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||||
golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||||
golang.org/x/time v0.0.0-20200630173020-3af7569d3a1e/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
golang.org/x/time v0.0.0-20200630173020-3af7569d3a1e/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||||
golang.org/x/time v0.0.0-20201208040808-7e3f01d25324/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
golang.org/x/time v0.0.0-20201208040808-7e3f01d25324/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||||
golang.org/x/time v0.2.0 h1:52I/1L54xyEQAYdtcSuxtiT84KGYTBGXwayxmIpNJhE=
|
golang.org/x/time v0.3.0 h1:rg5rLMjNzMS1RkNLzCG38eapWhnYLFYXDXj2gOlr8j4=
|
||||||
golang.org/x/time v0.2.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
golang.org/x/time v0.3.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||||
golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||||
golang.org/x/tools v0.0.0-20180525024113-a5b4c53f6e8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
golang.org/x/tools v0.0.0-20180525024113-a5b4c53f6e8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||||
golang.org/x/tools v0.0.0-20180828015842-6cd1fcedba52/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
golang.org/x/tools v0.0.0-20180828015842-6cd1fcedba52/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||||
@ -2257,8 +2253,9 @@ golang.org/x/tools v0.1.4/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
|
|||||||
golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
|
golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
|
||||||
golang.org/x/tools v0.1.6-0.20210726203631-07bc1bf47fb2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
|
golang.org/x/tools v0.1.6-0.20210726203631-07bc1bf47fb2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
|
||||||
golang.org/x/tools v0.1.10/go.mod h1:Uh6Zz+xoGYZom868N8YTex3t7RhtHDBrE8Gzo9bV56E=
|
golang.org/x/tools v0.1.10/go.mod h1:Uh6Zz+xoGYZom868N8YTex3t7RhtHDBrE8Gzo9bV56E=
|
||||||
golang.org/x/tools v0.1.12 h1:VveCTK38A2rkS8ZqFY25HIDFscX5X9OoEhJd3quQmXU=
|
|
||||||
golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc=
|
golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc=
|
||||||
|
golang.org/x/tools v0.6.0 h1:BOw41kyTf3PuCW1pVQf8+Cyg8pMlkYB1oo9iJ6D/lKM=
|
||||||
|
golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU=
|
||||||
golang.org/x/xerrors v0.0.0-20190410155217-1f06c39b4373/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
golang.org/x/xerrors v0.0.0-20190410155217-1f06c39b4373/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||||
golang.org/x/xerrors v0.0.0-20190513163551-3ee3066db522/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
golang.org/x/xerrors v0.0.0-20190513163551-3ee3066db522/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||||
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||||
@ -2437,8 +2434,8 @@ google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp0
|
|||||||
google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
|
google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
|
||||||
google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
|
google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
|
||||||
google.golang.org/protobuf v1.28.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
|
google.golang.org/protobuf v1.28.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
|
||||||
google.golang.org/protobuf v1.28.1 h1:d0NfwRgPtno5B1Wa6L2DAG+KivqkdutMf1UhdNx175w=
|
google.golang.org/protobuf v1.29.0 h1:44S3JjaKmLEE4YIkjzexaP+NzZsudE3Zin5Njn/pYX0=
|
||||||
google.golang.org/protobuf v1.28.1/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
|
google.golang.org/protobuf v1.29.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
|
||||||
gopkg.in/airbrake/gobrake.v2 v2.0.9/go.mod h1:/h5ZAUhDkGaJfjzjKLSjv6zCL6O0LLBxU4K+aSYdM/U=
|
gopkg.in/airbrake/gobrake.v2 v2.0.9/go.mod h1:/h5ZAUhDkGaJfjzjKLSjv6zCL6O0LLBxU4K+aSYdM/U=
|
||||||
gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
|
gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
|
||||||
gopkg.in/alexcesaro/quotedprintable.v3 v3.0.0-20150716171945-2caba252f4dc/go.mod h1:m7x9LTH6d71AHyAX77c9yqWCCa3UKHcVEj9y7hAtKDk=
|
gopkg.in/alexcesaro/quotedprintable.v3 v3.0.0-20150716171945-2caba252f4dc/go.mod h1:m7x9LTH6d71AHyAX77c9yqWCCa3UKHcVEj9y7hAtKDk=
|
||||||
@ -2653,8 +2650,8 @@ modernc.org/libc v1.11.98/go.mod h1:ynK5sbjsU77AP+nn61+k+wxUGRx9rOFcIqWYYMaDZ4c=
|
|||||||
modernc.org/libc v1.11.99/go.mod h1:wLLYgEiY2D17NbBOEp+mIJJJBGSiy7fLL4ZrGGZ+8jI=
|
modernc.org/libc v1.11.99/go.mod h1:wLLYgEiY2D17NbBOEp+mIJJJBGSiy7fLL4ZrGGZ+8jI=
|
||||||
modernc.org/libc v1.11.101/go.mod h1:wLLYgEiY2D17NbBOEp+mIJJJBGSiy7fLL4ZrGGZ+8jI=
|
modernc.org/libc v1.11.101/go.mod h1:wLLYgEiY2D17NbBOEp+mIJJJBGSiy7fLL4ZrGGZ+8jI=
|
||||||
modernc.org/libc v1.11.104/go.mod h1:2MH3DaF/gCU8i/UBiVE1VFRos4o523M7zipmwH8SIgQ=
|
modernc.org/libc v1.11.104/go.mod h1:2MH3DaF/gCU8i/UBiVE1VFRos4o523M7zipmwH8SIgQ=
|
||||||
modernc.org/libc v1.22.2 h1:4U7v51GyhlWqQmwCHj28Rdq2Yzwk55ovjFrdPjs8Hb0=
|
modernc.org/libc v1.22.3 h1:D/g6O5ftAfavceqlLOFwaZuA5KYafKwmr30A6iSqoyY=
|
||||||
modernc.org/libc v1.22.2/go.mod h1:uvQavJ1pZ0hIoC/jfqNoMLURIMhKzINIWypNM17puug=
|
modernc.org/libc v1.22.3/go.mod h1:MQrloYP209xa2zHome2a8HLiLm6k0UT8CoHpV74tOFw=
|
||||||
modernc.org/lldb v1.0.0/go.mod h1:jcRvJGWfCGodDZz8BPwiKMJxGJngQ/5DrRapkQnLob8=
|
modernc.org/lldb v1.0.0/go.mod h1:jcRvJGWfCGodDZz8BPwiKMJxGJngQ/5DrRapkQnLob8=
|
||||||
modernc.org/mathutil v1.0.0/go.mod h1:wU0vUrJsVWBZ4P6e7xtFJEhFSNsfRLJ8H458uRjg03k=
|
modernc.org/mathutil v1.0.0/go.mod h1:wU0vUrJsVWBZ4P6e7xtFJEhFSNsfRLJ8H458uRjg03k=
|
||||||
modernc.org/mathutil v1.1.1/go.mod h1:mZW8CKdRPY1v87qxC/wUdX5O1qDzXMP5TH3wjfpga6E=
|
modernc.org/mathutil v1.1.1/go.mod h1:mZW8CKdRPY1v87qxC/wUdX5O1qDzXMP5TH3wjfpga6E=
|
||||||
@ -2665,8 +2662,8 @@ modernc.org/mathutil v1.5.0 h1:rV0Ko/6SfM+8G+yKiyI830l3Wuz1zRutdslNoQ0kfiQ=
|
|||||||
modernc.org/mathutil v1.5.0/go.mod h1:mZW8CKdRPY1v87qxC/wUdX5O1qDzXMP5TH3wjfpga6E=
|
modernc.org/mathutil v1.5.0/go.mod h1:mZW8CKdRPY1v87qxC/wUdX5O1qDzXMP5TH3wjfpga6E=
|
||||||
modernc.org/memory v1.0.4/go.mod h1:nV2OApxradM3/OVbs2/0OsP6nPfakXpi50C7dcoHXlc=
|
modernc.org/memory v1.0.4/go.mod h1:nV2OApxradM3/OVbs2/0OsP6nPfakXpi50C7dcoHXlc=
|
||||||
modernc.org/memory v1.0.5/go.mod h1:B7OYswTRnfGg+4tDH1t1OeUNnsy2viGTdME4tzd+IjM=
|
modernc.org/memory v1.0.5/go.mod h1:B7OYswTRnfGg+4tDH1t1OeUNnsy2viGTdME4tzd+IjM=
|
||||||
modernc.org/memory v1.4.0 h1:crykUfNSnMAXaOJnnxcSzbUGMqkLWjklJKkBK2nwZwk=
|
modernc.org/memory v1.5.0 h1:N+/8c5rE6EqugZwHii4IFsaJ7MUhoWX07J5tC/iI5Ds=
|
||||||
modernc.org/memory v1.4.0/go.mod h1:PkUhL0Mugw21sHPeskwZW4D6VscE/GQJOnIpCnW6pSU=
|
modernc.org/memory v1.5.0/go.mod h1:PkUhL0Mugw21sHPeskwZW4D6VscE/GQJOnIpCnW6pSU=
|
||||||
modernc.org/opt v0.1.1/go.mod h1:WdSiB5evDcignE70guQKxYUl14mgWtbClRi5wmkkTX0=
|
modernc.org/opt v0.1.1/go.mod h1:WdSiB5evDcignE70guQKxYUl14mgWtbClRi5wmkkTX0=
|
||||||
modernc.org/opt v0.1.3 h1:3XOZf2yznlhC+ibLltsDGzABUGVx8J6pnFMS3E4dcq4=
|
modernc.org/opt v0.1.3 h1:3XOZf2yznlhC+ibLltsDGzABUGVx8J6pnFMS3E4dcq4=
|
||||||
modernc.org/opt v0.1.3/go.mod h1:WdSiB5evDcignE70guQKxYUl14mgWtbClRi5wmkkTX0=
|
modernc.org/opt v0.1.3/go.mod h1:WdSiB5evDcignE70guQKxYUl14mgWtbClRi5wmkkTX0=
|
||||||
@ -2674,15 +2671,15 @@ modernc.org/ql v1.0.0/go.mod h1:xGVyrLIatPcO2C1JvI/Co8c0sr6y91HKFNy4pt9JXEY=
|
|||||||
modernc.org/sortutil v1.1.0/go.mod h1:ZyL98OQHJgH9IEfN71VsamvJgrtRX9Dj2gX+vH86L1k=
|
modernc.org/sortutil v1.1.0/go.mod h1:ZyL98OQHJgH9IEfN71VsamvJgrtRX9Dj2gX+vH86L1k=
|
||||||
modernc.org/sqlite v1.10.6/go.mod h1:Z9FEjUtZP4qFEg6/SiADg9XCER7aYy9a/j7Pg9P7CPs=
|
modernc.org/sqlite v1.10.6/go.mod h1:Z9FEjUtZP4qFEg6/SiADg9XCER7aYy9a/j7Pg9P7CPs=
|
||||||
modernc.org/sqlite v1.14.3/go.mod h1:xMpicS1i2MJ4C8+Ap0vYBqTwYfpFvdnPE6brbFOtV2Y=
|
modernc.org/sqlite v1.14.3/go.mod h1:xMpicS1i2MJ4C8+Ap0vYBqTwYfpFvdnPE6brbFOtV2Y=
|
||||||
modernc.org/sqlite v1.20.3 h1:SqGJMMxjj1PHusLxdYxeQSodg7Jxn9WWkaAQjKrntZs=
|
modernc.org/sqlite v1.21.0 h1:4aP4MdUf15i3R3M2mx6Q90WHKz3nZLoz96zlB6tNdow=
|
||||||
modernc.org/sqlite v1.20.3/go.mod h1:zKcGyrICaxNTMEHSr1HQ2GUraP0j+845GYw37+EyT6A=
|
modernc.org/sqlite v1.21.0/go.mod h1:XwQ0wZPIh1iKb5mkvCJ3szzbhk+tykC8ZWqTRTgYRwI=
|
||||||
modernc.org/strutil v1.1.0/go.mod h1:lstksw84oURvj9y3tn8lGvRxyRC1S2+g5uuIzNfIOBs=
|
modernc.org/strutil v1.1.0/go.mod h1:lstksw84oURvj9y3tn8lGvRxyRC1S2+g5uuIzNfIOBs=
|
||||||
modernc.org/strutil v1.1.1/go.mod h1:DE+MQQ/hjKBZS2zNInV5hhcipt5rLPWkmpbGeW5mmdw=
|
modernc.org/strutil v1.1.1/go.mod h1:DE+MQQ/hjKBZS2zNInV5hhcipt5rLPWkmpbGeW5mmdw=
|
||||||
modernc.org/strutil v1.1.3 h1:fNMm+oJklMGYfU9Ylcywl0CO5O6nTfaowNsh2wpPjzY=
|
modernc.org/strutil v1.1.3 h1:fNMm+oJklMGYfU9Ylcywl0CO5O6nTfaowNsh2wpPjzY=
|
||||||
modernc.org/strutil v1.1.3/go.mod h1:MEHNA7PdEnEwLvspRMtWTNnp2nnyvMfkimT1NKNAGbw=
|
modernc.org/strutil v1.1.3/go.mod h1:MEHNA7PdEnEwLvspRMtWTNnp2nnyvMfkimT1NKNAGbw=
|
||||||
modernc.org/tcl v1.5.2/go.mod h1:pmJYOLgpiys3oI4AeAafkcUfE+TKKilminxNyU/+Zlo=
|
modernc.org/tcl v1.5.2/go.mod h1:pmJYOLgpiys3oI4AeAafkcUfE+TKKilminxNyU/+Zlo=
|
||||||
modernc.org/tcl v1.9.2/go.mod h1:aw7OnlIoiuJgu1gwbTZtrKnGpDqH9wyH++jZcxdqNsg=
|
modernc.org/tcl v1.9.2/go.mod h1:aw7OnlIoiuJgu1gwbTZtrKnGpDqH9wyH++jZcxdqNsg=
|
||||||
modernc.org/tcl v1.15.0 h1:oY+JeD11qVVSgVvodMJsu7Edf8tr5E/7tuhF5cNYz34=
|
modernc.org/tcl v1.15.1 h1:mOQwiEK4p7HruMZcwKTZPw/aqtGM4aY00uzWhlKKYws=
|
||||||
modernc.org/token v1.0.0/go.mod h1:UGzOrNV1mAFSEB63lOFHIpNRUVMvYTc6yu1SMY/XTDM=
|
modernc.org/token v1.0.0/go.mod h1:UGzOrNV1mAFSEB63lOFHIpNRUVMvYTc6yu1SMY/XTDM=
|
||||||
modernc.org/token v1.0.1 h1:A3qvTqOwexpfZZeyI0FeGPDlSWX5pjZu9hF4lU+EKWg=
|
modernc.org/token v1.0.1 h1:A3qvTqOwexpfZZeyI0FeGPDlSWX5pjZu9hF4lU+EKWg=
|
||||||
modernc.org/token v1.0.1/go.mod h1:UGzOrNV1mAFSEB63lOFHIpNRUVMvYTc6yu1SMY/XTDM=
|
modernc.org/token v1.0.1/go.mod h1:UGzOrNV1mAFSEB63lOFHIpNRUVMvYTc6yu1SMY/XTDM=
|
||||||
@ -2692,7 +2689,6 @@ modernc.org/z v1.2.20/go.mod h1:zU9FiF4PbHdOTUxw+IF8j7ArBMRPsHgq10uVPt6xTzo=
|
|||||||
modernc.org/z v1.7.0 h1:xkDw/KepgEjeizO2sNco+hqYkU12taxQFqPEmgm1GWE=
|
modernc.org/z v1.7.0 h1:xkDw/KepgEjeizO2sNco+hqYkU12taxQFqPEmgm1GWE=
|
||||||
modernc.org/zappy v1.0.0/go.mod h1:hHe+oGahLVII/aTTyWK/b53VDHMAGCBYYeZ9sn83HC4=
|
modernc.org/zappy v1.0.0/go.mod h1:hHe+oGahLVII/aTTyWK/b53VDHMAGCBYYeZ9sn83HC4=
|
||||||
rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8=
|
rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8=
|
||||||
rsc.io/goversion v1.2.0/go.mod h1:Eih9y/uIBS3ulggl7KNJ09xGSLcuNaLgmvvqa07sgfo=
|
|
||||||
rsc.io/pdf v0.1.1/go.mod h1:n8OzWcQ6Sp37PL01nO98y4iUCRdTGarVfzxY20ICaU4=
|
rsc.io/pdf v0.1.1/go.mod h1:n8OzWcQ6Sp37PL01nO98y4iUCRdTGarVfzxY20ICaU4=
|
||||||
rsc.io/qr v0.2.0 h1:6vBLea5/NRMVTz8V66gipeLycZMl/+UlFmk8DvqQ6WY=
|
rsc.io/qr v0.2.0 h1:6vBLea5/NRMVTz8V66gipeLycZMl/+UlFmk8DvqQ6WY=
|
||||||
rsc.io/qr v0.2.0/go.mod h1:IF+uZjkb9fqyeF/4tlBoynqmQxUoPfWEKh921coOuXs=
|
rsc.io/qr v0.2.0/go.mod h1:IF+uZjkb9fqyeF/4tlBoynqmQxUoPfWEKh921coOuXs=
|
||||||
|
33
vendor/github.com/SevereCloud/vksdk/v2/.golangci.yml
generated
vendored
33
vendor/github.com/SevereCloud/vksdk/v2/.golangci.yml
generated
vendored
@ -1,9 +1,11 @@
|
|||||||
---
|
---
|
||||||
|
run:
|
||||||
|
timeout: 5m
|
||||||
|
|
||||||
linters:
|
linters:
|
||||||
disable-all: true
|
disable-all: true
|
||||||
enable:
|
enable:
|
||||||
- bodyclose
|
- bodyclose
|
||||||
- deadcode
|
|
||||||
- errcheck
|
- errcheck
|
||||||
- gochecknoglobals
|
- gochecknoglobals
|
||||||
- goconst
|
- goconst
|
||||||
@ -19,13 +21,11 @@ linters:
|
|||||||
- nakedret
|
- nakedret
|
||||||
- prealloc
|
- prealloc
|
||||||
- staticcheck
|
- staticcheck
|
||||||
- structcheck
|
|
||||||
- stylecheck
|
- stylecheck
|
||||||
- typecheck
|
- typecheck
|
||||||
- unconvert
|
- unconvert
|
||||||
- unparam
|
- unparam
|
||||||
- unused
|
- unused
|
||||||
- varcheck
|
|
||||||
- whitespace
|
- whitespace
|
||||||
- wsl
|
- wsl
|
||||||
- godot
|
- godot
|
||||||
@ -40,7 +40,6 @@ linters:
|
|||||||
- makezero
|
- makezero
|
||||||
- thelper
|
- thelper
|
||||||
- predeclared
|
- predeclared
|
||||||
- ifshort
|
|
||||||
- revive
|
- revive
|
||||||
- durationcheck
|
- durationcheck
|
||||||
- gomoddirectives
|
- gomoddirectives
|
||||||
@ -57,9 +56,18 @@ linters:
|
|||||||
- grouper
|
- grouper
|
||||||
- decorder
|
- decorder
|
||||||
- containedctx
|
- containedctx
|
||||||
# - execinquery # FIXME: panic in 1.46.0
|
|
||||||
- nosprintfhostport
|
- nosprintfhostport
|
||||||
|
- usestdlibvars
|
||||||
|
|
||||||
|
- interfacebloat
|
||||||
|
- reassign
|
||||||
|
|
||||||
|
- testableexamples
|
||||||
|
|
||||||
|
- gocheckcompilerdirectives
|
||||||
|
- asasalint
|
||||||
|
|
||||||
|
# - musttag # TODO: need update golangci-lint
|
||||||
# - wrapcheck # TODO: v3 Fix
|
# - wrapcheck # TODO: v3 Fix
|
||||||
# - testpackage # TODO: Fix testpackage
|
# - testpackage # TODO: Fix testpackage
|
||||||
# - noctx # TODO: Fix noctx
|
# - noctx # TODO: Fix noctx
|
||||||
@ -90,11 +98,22 @@ linters:
|
|||||||
# - errchkjson
|
# - errchkjson
|
||||||
# - maintidx
|
# - maintidx
|
||||||
# - nonamedreturns
|
# - nonamedreturns
|
||||||
|
# - nosnakecase
|
||||||
|
# - execinquery
|
||||||
|
# - logrlint
|
||||||
|
|
||||||
|
# - dupword
|
||||||
|
|
||||||
|
# - ginkgolinter
|
||||||
|
|
||||||
# depricated
|
# depricated
|
||||||
# - maligned
|
# - maligned
|
||||||
# - interfacer
|
# - interfacer
|
||||||
# - golint
|
# - golint
|
||||||
|
# - ifshort
|
||||||
|
# - deadcode
|
||||||
|
# - structcheck
|
||||||
|
# - varcheck
|
||||||
|
|
||||||
issues:
|
issues:
|
||||||
exclude-rules:
|
exclude-rules:
|
||||||
@ -114,4 +133,8 @@ issues:
|
|||||||
- stylecheck
|
- stylecheck
|
||||||
text: "ST1003:.*(Ts|ts).*TS"
|
text: "ST1003:.*(Ts|ts).*TS"
|
||||||
|
|
||||||
|
- linters:
|
||||||
|
- gosec
|
||||||
|
text: "G307:"
|
||||||
|
|
||||||
exclude-use-default: false
|
exclude-use-default: false
|
||||||
|
7
vendor/github.com/SevereCloud/vksdk/v2/CONTRIBUTING.md
generated
vendored
7
vendor/github.com/SevereCloud/vksdk/v2/CONTRIBUTING.md
generated
vendored
@ -6,7 +6,7 @@
|
|||||||
|
|
||||||
Требования:
|
Требования:
|
||||||
|
|
||||||
- [Go 1.16+](https://golang.org/doc/install)
|
- [Go 1.18+](https://golang.org/doc/install)
|
||||||
- [golangci-lint](https://github.com/golangci/golangci-lint)
|
- [golangci-lint](https://github.com/golangci/golangci-lint)
|
||||||
- [global .gitignore](https://help.github.com/en/articles/ignoring-files#create-a-global-gitignore)
|
- [global .gitignore](https://help.github.com/en/articles/ignoring-files#create-a-global-gitignore)
|
||||||
|
|
||||||
@ -88,7 +88,4 @@ git push origin <name_of_your_new_branch>
|
|||||||
```
|
```
|
||||||
|
|
||||||
Затем откройте [pull request](https://github.com/SevereCloud/vksdk/pulls)
|
Затем откройте [pull request](https://github.com/SevereCloud/vksdk/pulls)
|
||||||
с веткой:
|
с веткой master
|
||||||
|
|
||||||
- `master` если это багфикс
|
|
||||||
- `dev-v1.2.3` если это новая фича
|
|
||||||
|
1
vendor/github.com/SevereCloud/vksdk/v2/api/ads.go
generated
vendored
1
vendor/github.com/SevereCloud/vksdk/v2/api/ads.go
generated
vendored
@ -203,7 +203,6 @@ type AdsDeleteCampaignsResponse []ErrorType
|
|||||||
|
|
||||||
// AdsDeleteCampaigns archives advertising campaigns.
|
// AdsDeleteCampaigns archives advertising campaigns.
|
||||||
//
|
//
|
||||||
//
|
|
||||||
// Warning! Maximum allowed number of campaigns archived in one request is 100.
|
// Warning! Maximum allowed number of campaigns archived in one request is 100.
|
||||||
//
|
//
|
||||||
// https://vk.com/dev/ads.deleteCampaigns
|
// https://vk.com/dev/ads.deleteCampaigns
|
||||||
|
2
vendor/github.com/SevereCloud/vksdk/v2/api/api.go
generated
vendored
2
vendor/github.com/SevereCloud/vksdk/v2/api/api.go
generated
vendored
@ -248,7 +248,7 @@ func (vk *VK) DefaultHandler(method string, sliceParams ...Params) (Response, er
|
|||||||
|
|
||||||
rawBody := bytes.NewBufferString(query.Encode())
|
rawBody := bytes.NewBufferString(query.Encode())
|
||||||
|
|
||||||
req, err := http.NewRequestWithContext(ctx, "POST", u, rawBody)
|
req, err := http.NewRequestWithContext(ctx, http.MethodPost, u, rawBody)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return response, err
|
return response, err
|
||||||
}
|
}
|
||||||
|
23
vendor/github.com/SevereCloud/vksdk/v2/api/calls.go
generated
vendored
Normal file
23
vendor/github.com/SevereCloud/vksdk/v2/api/calls.go
generated
vendored
Normal file
@ -0,0 +1,23 @@
|
|||||||
|
package api // import "github.com/SevereCloud/vksdk/v2/api"
|
||||||
|
|
||||||
|
// CallsStartResponse struct.
|
||||||
|
type CallsStartResponse struct {
|
||||||
|
JoinLink string `json:"join_link"`
|
||||||
|
CallID string `json:"call_id"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// CallsStart method.
|
||||||
|
//
|
||||||
|
// https://vk.com/dev/calls.start
|
||||||
|
func (vk *VK) CallsStart(params Params) (response CallsStartResponse, err error) {
|
||||||
|
err = vk.RequestUnmarshal("calls.start", &response, params)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// CallsForceFinish method.
|
||||||
|
//
|
||||||
|
// https://vk.com/dev/calls.forceFinish
|
||||||
|
func (vk *VK) CallsForceFinish(params Params) (response int, err error) {
|
||||||
|
err = vk.RequestUnmarshal("calls.forceFinish", &response, params)
|
||||||
|
return
|
||||||
|
}
|
2
vendor/github.com/SevereCloud/vksdk/v2/api/groups.go
generated
vendored
2
vendor/github.com/SevereCloud/vksdk/v2/api/groups.go
generated
vendored
@ -273,6 +273,8 @@ type GroupsGetCatalogResponse struct {
|
|||||||
|
|
||||||
// GroupsGetCatalog returns communities list for a catalog category.
|
// GroupsGetCatalog returns communities list for a catalog category.
|
||||||
//
|
//
|
||||||
|
// Deprecated: This method is deprecated and may be disabled soon, please avoid
|
||||||
|
//
|
||||||
// https://vk.com/dev/groups.getCatalog
|
// https://vk.com/dev/groups.getCatalog
|
||||||
func (vk *VK) GroupsGetCatalog(params Params) (response GroupsGetCatalogResponse, err error) {
|
func (vk *VK) GroupsGetCatalog(params Params) (response GroupsGetCatalogResponse, err error) {
|
||||||
err = vk.RequestUnmarshal("groups.getCatalog", &response, params)
|
err = vk.RequestUnmarshal("groups.getCatalog", &response, params)
|
||||||
|
4
vendor/github.com/SevereCloud/vksdk/v2/api/messages.go
generated
vendored
4
vendor/github.com/SevereCloud/vksdk/v2/api/messages.go
generated
vendored
@ -118,6 +118,8 @@ func (vk *VK) MessagesEditChat(params Params) (response int, err error) {
|
|||||||
|
|
||||||
// MessagesForceCallFinish method.
|
// MessagesForceCallFinish method.
|
||||||
//
|
//
|
||||||
|
// Deprecated: Use CallsForceFinish
|
||||||
|
//
|
||||||
// https://vk.com/dev/messages.forceCallFinish
|
// https://vk.com/dev/messages.forceCallFinish
|
||||||
func (vk *VK) MessagesForceCallFinish(params Params) (response int, err error) {
|
func (vk *VK) MessagesForceCallFinish(params Params) (response int, err error) {
|
||||||
err = vk.RequestUnmarshal("messages.forceCallFinish", &response, params)
|
err = vk.RequestUnmarshal("messages.forceCallFinish", &response, params)
|
||||||
@ -649,6 +651,8 @@ type MessagesStartCallResponse struct {
|
|||||||
|
|
||||||
// MessagesStartCall method.
|
// MessagesStartCall method.
|
||||||
//
|
//
|
||||||
|
// Deprecated: Use CallsStart
|
||||||
|
//
|
||||||
// https://vk.com/dev/messages.startCall
|
// https://vk.com/dev/messages.startCall
|
||||||
func (vk *VK) MessagesStartCall(params Params) (response MessagesStartCallResponse, err error) {
|
func (vk *VK) MessagesStartCall(params Params) (response MessagesStartCallResponse, err error) {
|
||||||
err = vk.RequestUnmarshal("messages.startCall", &response, params)
|
err = vk.RequestUnmarshal("messages.startCall", &response, params)
|
||||||
|
5
vendor/github.com/SevereCloud/vksdk/v2/api/upload.go
generated
vendored
5
vendor/github.com/SevereCloud/vksdk/v2/api/upload.go
generated
vendored
@ -4,7 +4,6 @@ import (
|
|||||||
"bytes"
|
"bytes"
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
"io"
|
"io"
|
||||||
"io/ioutil"
|
|
||||||
"mime/multipart"
|
"mime/multipart"
|
||||||
|
|
||||||
"github.com/SevereCloud/vksdk/v2/object"
|
"github.com/SevereCloud/vksdk/v2/object"
|
||||||
@ -34,7 +33,7 @@ func (vk *VK) UploadFile(url string, file io.Reader, fieldname, filename string)
|
|||||||
}
|
}
|
||||||
defer resp.Body.Close()
|
defer resp.Body.Close()
|
||||||
|
|
||||||
bodyContent, err = ioutil.ReadAll(resp.Body)
|
bodyContent, err = io.ReadAll(resp.Body)
|
||||||
|
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
@ -214,7 +213,7 @@ func (vk *VK) uploadOwnerPhoto(params Params, squareCrop string, file io.Reader)
|
|||||||
}
|
}
|
||||||
defer resp.Body.Close()
|
defer resp.Body.Close()
|
||||||
|
|
||||||
bodyContent, err := ioutil.ReadAll(resp.Body)
|
bodyContent, err := io.ReadAll(resp.Body)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
2
vendor/github.com/SevereCloud/vksdk/v2/doc.go
generated
vendored
2
vendor/github.com/SevereCloud/vksdk/v2/doc.go
generated
vendored
@ -7,6 +7,6 @@ package vksdk
|
|||||||
|
|
||||||
// Module constants.
|
// Module constants.
|
||||||
const (
|
const (
|
||||||
Version = "2.15.0"
|
Version = "2.16.0"
|
||||||
API = "5.131"
|
API = "5.131"
|
||||||
)
|
)
|
||||||
|
112
vendor/github.com/SevereCloud/vksdk/v2/events/events.go
generated
vendored
112
vendor/github.com/SevereCloud/vksdk/v2/events/events.go
generated
vendored
@ -163,6 +163,8 @@ func (fl FuncList) Handler(ctx context.Context, e GroupEvent) error { // nolint:
|
|||||||
|
|
||||||
if sliceFunc, ok := fl.special[e.Type]; ok {
|
if sliceFunc, ok := fl.special[e.Type]; ok {
|
||||||
for _, f := range sliceFunc {
|
for _, f := range sliceFunc {
|
||||||
|
f := f
|
||||||
|
|
||||||
if fl.goroutine {
|
if fl.goroutine {
|
||||||
go func() { f(ctx, e) }()
|
go func() { f(ctx, e) }()
|
||||||
} else {
|
} else {
|
||||||
@ -179,6 +181,8 @@ func (fl FuncList) Handler(ctx context.Context, e GroupEvent) error { // nolint:
|
|||||||
}
|
}
|
||||||
|
|
||||||
for _, f := range fl.messageNew {
|
for _, f := range fl.messageNew {
|
||||||
|
f := f
|
||||||
|
|
||||||
if fl.goroutine {
|
if fl.goroutine {
|
||||||
go func() { f(ctx, obj) }()
|
go func() { f(ctx, obj) }()
|
||||||
} else {
|
} else {
|
||||||
@ -192,6 +196,8 @@ func (fl FuncList) Handler(ctx context.Context, e GroupEvent) error { // nolint:
|
|||||||
}
|
}
|
||||||
|
|
||||||
for _, f := range fl.messageReply {
|
for _, f := range fl.messageReply {
|
||||||
|
f := f
|
||||||
|
|
||||||
if fl.goroutine {
|
if fl.goroutine {
|
||||||
go func() { f(ctx, obj) }()
|
go func() { f(ctx, obj) }()
|
||||||
} else {
|
} else {
|
||||||
@ -205,6 +211,8 @@ func (fl FuncList) Handler(ctx context.Context, e GroupEvent) error { // nolint:
|
|||||||
}
|
}
|
||||||
|
|
||||||
for _, f := range fl.messageEdit {
|
for _, f := range fl.messageEdit {
|
||||||
|
f := f
|
||||||
|
|
||||||
if fl.goroutine {
|
if fl.goroutine {
|
||||||
go func() { f(ctx, obj) }()
|
go func() { f(ctx, obj) }()
|
||||||
} else {
|
} else {
|
||||||
@ -218,6 +226,8 @@ func (fl FuncList) Handler(ctx context.Context, e GroupEvent) error { // nolint:
|
|||||||
}
|
}
|
||||||
|
|
||||||
for _, f := range fl.messageAllow {
|
for _, f := range fl.messageAllow {
|
||||||
|
f := f
|
||||||
|
|
||||||
if fl.goroutine {
|
if fl.goroutine {
|
||||||
go func() { f(ctx, obj) }()
|
go func() { f(ctx, obj) }()
|
||||||
} else {
|
} else {
|
||||||
@ -231,6 +241,8 @@ func (fl FuncList) Handler(ctx context.Context, e GroupEvent) error { // nolint:
|
|||||||
}
|
}
|
||||||
|
|
||||||
for _, f := range fl.messageDeny {
|
for _, f := range fl.messageDeny {
|
||||||
|
f := f
|
||||||
|
|
||||||
if fl.goroutine {
|
if fl.goroutine {
|
||||||
go func() { f(ctx, obj) }()
|
go func() { f(ctx, obj) }()
|
||||||
} else {
|
} else {
|
||||||
@ -244,6 +256,8 @@ func (fl FuncList) Handler(ctx context.Context, e GroupEvent) error { // nolint:
|
|||||||
}
|
}
|
||||||
|
|
||||||
for _, f := range fl.messageTypingState {
|
for _, f := range fl.messageTypingState {
|
||||||
|
f := f
|
||||||
|
|
||||||
if fl.goroutine {
|
if fl.goroutine {
|
||||||
go func() { f(ctx, obj) }()
|
go func() { f(ctx, obj) }()
|
||||||
} else {
|
} else {
|
||||||
@ -257,6 +271,8 @@ func (fl FuncList) Handler(ctx context.Context, e GroupEvent) error { // nolint:
|
|||||||
}
|
}
|
||||||
|
|
||||||
for _, f := range fl.messageEvent {
|
for _, f := range fl.messageEvent {
|
||||||
|
f := f
|
||||||
|
|
||||||
if fl.goroutine {
|
if fl.goroutine {
|
||||||
go func() { f(ctx, obj) }()
|
go func() { f(ctx, obj) }()
|
||||||
} else {
|
} else {
|
||||||
@ -270,6 +286,8 @@ func (fl FuncList) Handler(ctx context.Context, e GroupEvent) error { // nolint:
|
|||||||
}
|
}
|
||||||
|
|
||||||
for _, f := range fl.photoNew {
|
for _, f := range fl.photoNew {
|
||||||
|
f := f
|
||||||
|
|
||||||
if fl.goroutine {
|
if fl.goroutine {
|
||||||
go func() { f(ctx, obj) }()
|
go func() { f(ctx, obj) }()
|
||||||
} else {
|
} else {
|
||||||
@ -283,6 +301,8 @@ func (fl FuncList) Handler(ctx context.Context, e GroupEvent) error { // nolint:
|
|||||||
}
|
}
|
||||||
|
|
||||||
for _, f := range fl.photoCommentNew {
|
for _, f := range fl.photoCommentNew {
|
||||||
|
f := f
|
||||||
|
|
||||||
if fl.goroutine {
|
if fl.goroutine {
|
||||||
go func() { f(ctx, obj) }()
|
go func() { f(ctx, obj) }()
|
||||||
} else {
|
} else {
|
||||||
@ -296,6 +316,8 @@ func (fl FuncList) Handler(ctx context.Context, e GroupEvent) error { // nolint:
|
|||||||
}
|
}
|
||||||
|
|
||||||
for _, f := range fl.photoCommentEdit {
|
for _, f := range fl.photoCommentEdit {
|
||||||
|
f := f
|
||||||
|
|
||||||
if fl.goroutine {
|
if fl.goroutine {
|
||||||
go func() { f(ctx, obj) }()
|
go func() { f(ctx, obj) }()
|
||||||
} else {
|
} else {
|
||||||
@ -309,6 +331,8 @@ func (fl FuncList) Handler(ctx context.Context, e GroupEvent) error { // nolint:
|
|||||||
}
|
}
|
||||||
|
|
||||||
for _, f := range fl.photoCommentRestore {
|
for _, f := range fl.photoCommentRestore {
|
||||||
|
f := f
|
||||||
|
|
||||||
if fl.goroutine {
|
if fl.goroutine {
|
||||||
go func() { f(ctx, obj) }()
|
go func() { f(ctx, obj) }()
|
||||||
} else {
|
} else {
|
||||||
@ -322,6 +346,8 @@ func (fl FuncList) Handler(ctx context.Context, e GroupEvent) error { // nolint:
|
|||||||
}
|
}
|
||||||
|
|
||||||
for _, f := range fl.photoCommentDelete {
|
for _, f := range fl.photoCommentDelete {
|
||||||
|
f := f
|
||||||
|
|
||||||
if fl.goroutine {
|
if fl.goroutine {
|
||||||
go func() { f(ctx, obj) }()
|
go func() { f(ctx, obj) }()
|
||||||
} else {
|
} else {
|
||||||
@ -335,6 +361,8 @@ func (fl FuncList) Handler(ctx context.Context, e GroupEvent) error { // nolint:
|
|||||||
}
|
}
|
||||||
|
|
||||||
for _, f := range fl.audioNew {
|
for _, f := range fl.audioNew {
|
||||||
|
f := f
|
||||||
|
|
||||||
if fl.goroutine {
|
if fl.goroutine {
|
||||||
go func() { f(ctx, obj) }()
|
go func() { f(ctx, obj) }()
|
||||||
} else {
|
} else {
|
||||||
@ -348,6 +376,8 @@ func (fl FuncList) Handler(ctx context.Context, e GroupEvent) error { // nolint:
|
|||||||
}
|
}
|
||||||
|
|
||||||
for _, f := range fl.videoNew {
|
for _, f := range fl.videoNew {
|
||||||
|
f := f
|
||||||
|
|
||||||
if fl.goroutine {
|
if fl.goroutine {
|
||||||
go func() { f(ctx, obj) }()
|
go func() { f(ctx, obj) }()
|
||||||
} else {
|
} else {
|
||||||
@ -361,6 +391,8 @@ func (fl FuncList) Handler(ctx context.Context, e GroupEvent) error { // nolint:
|
|||||||
}
|
}
|
||||||
|
|
||||||
for _, f := range fl.videoCommentNew {
|
for _, f := range fl.videoCommentNew {
|
||||||
|
f := f
|
||||||
|
|
||||||
if fl.goroutine {
|
if fl.goroutine {
|
||||||
go func() { f(ctx, obj) }()
|
go func() { f(ctx, obj) }()
|
||||||
} else {
|
} else {
|
||||||
@ -374,6 +406,8 @@ func (fl FuncList) Handler(ctx context.Context, e GroupEvent) error { // nolint:
|
|||||||
}
|
}
|
||||||
|
|
||||||
for _, f := range fl.videoCommentEdit {
|
for _, f := range fl.videoCommentEdit {
|
||||||
|
f := f
|
||||||
|
|
||||||
if fl.goroutine {
|
if fl.goroutine {
|
||||||
go func() { f(ctx, obj) }()
|
go func() { f(ctx, obj) }()
|
||||||
} else {
|
} else {
|
||||||
@ -387,6 +421,8 @@ func (fl FuncList) Handler(ctx context.Context, e GroupEvent) error { // nolint:
|
|||||||
}
|
}
|
||||||
|
|
||||||
for _, f := range fl.videoCommentRestore {
|
for _, f := range fl.videoCommentRestore {
|
||||||
|
f := f
|
||||||
|
|
||||||
if fl.goroutine {
|
if fl.goroutine {
|
||||||
go func() { f(ctx, obj) }()
|
go func() { f(ctx, obj) }()
|
||||||
} else {
|
} else {
|
||||||
@ -400,6 +436,8 @@ func (fl FuncList) Handler(ctx context.Context, e GroupEvent) error { // nolint:
|
|||||||
}
|
}
|
||||||
|
|
||||||
for _, f := range fl.videoCommentDelete {
|
for _, f := range fl.videoCommentDelete {
|
||||||
|
f := f
|
||||||
|
|
||||||
if fl.goroutine {
|
if fl.goroutine {
|
||||||
go func() { f(ctx, obj) }()
|
go func() { f(ctx, obj) }()
|
||||||
} else {
|
} else {
|
||||||
@ -413,6 +451,8 @@ func (fl FuncList) Handler(ctx context.Context, e GroupEvent) error { // nolint:
|
|||||||
}
|
}
|
||||||
|
|
||||||
for _, f := range fl.wallPostNew {
|
for _, f := range fl.wallPostNew {
|
||||||
|
f := f
|
||||||
|
|
||||||
if fl.goroutine {
|
if fl.goroutine {
|
||||||
go func() { f(ctx, obj) }()
|
go func() { f(ctx, obj) }()
|
||||||
} else {
|
} else {
|
||||||
@ -426,6 +466,8 @@ func (fl FuncList) Handler(ctx context.Context, e GroupEvent) error { // nolint:
|
|||||||
}
|
}
|
||||||
|
|
||||||
for _, f := range fl.wallRepost {
|
for _, f := range fl.wallRepost {
|
||||||
|
f := f
|
||||||
|
|
||||||
if fl.goroutine {
|
if fl.goroutine {
|
||||||
go func() { f(ctx, obj) }()
|
go func() { f(ctx, obj) }()
|
||||||
} else {
|
} else {
|
||||||
@ -439,6 +481,8 @@ func (fl FuncList) Handler(ctx context.Context, e GroupEvent) error { // nolint:
|
|||||||
}
|
}
|
||||||
|
|
||||||
for _, f := range fl.wallReplyNew {
|
for _, f := range fl.wallReplyNew {
|
||||||
|
f := f
|
||||||
|
|
||||||
if fl.goroutine {
|
if fl.goroutine {
|
||||||
go func() { f(ctx, obj) }()
|
go func() { f(ctx, obj) }()
|
||||||
} else {
|
} else {
|
||||||
@ -452,6 +496,8 @@ func (fl FuncList) Handler(ctx context.Context, e GroupEvent) error { // nolint:
|
|||||||
}
|
}
|
||||||
|
|
||||||
for _, f := range fl.wallReplyEdit {
|
for _, f := range fl.wallReplyEdit {
|
||||||
|
f := f
|
||||||
|
|
||||||
if fl.goroutine {
|
if fl.goroutine {
|
||||||
go func() { f(ctx, obj) }()
|
go func() { f(ctx, obj) }()
|
||||||
} else {
|
} else {
|
||||||
@ -465,6 +511,8 @@ func (fl FuncList) Handler(ctx context.Context, e GroupEvent) error { // nolint:
|
|||||||
}
|
}
|
||||||
|
|
||||||
for _, f := range fl.wallReplyRestore {
|
for _, f := range fl.wallReplyRestore {
|
||||||
|
f := f
|
||||||
|
|
||||||
if fl.goroutine {
|
if fl.goroutine {
|
||||||
go func() { f(ctx, obj) }()
|
go func() { f(ctx, obj) }()
|
||||||
} else {
|
} else {
|
||||||
@ -478,6 +526,8 @@ func (fl FuncList) Handler(ctx context.Context, e GroupEvent) error { // nolint:
|
|||||||
}
|
}
|
||||||
|
|
||||||
for _, f := range fl.wallReplyDelete {
|
for _, f := range fl.wallReplyDelete {
|
||||||
|
f := f
|
||||||
|
|
||||||
if fl.goroutine {
|
if fl.goroutine {
|
||||||
go func() { f(ctx, obj) }()
|
go func() { f(ctx, obj) }()
|
||||||
} else {
|
} else {
|
||||||
@ -491,6 +541,8 @@ func (fl FuncList) Handler(ctx context.Context, e GroupEvent) error { // nolint:
|
|||||||
}
|
}
|
||||||
|
|
||||||
for _, f := range fl.boardPostNew {
|
for _, f := range fl.boardPostNew {
|
||||||
|
f := f
|
||||||
|
|
||||||
if fl.goroutine {
|
if fl.goroutine {
|
||||||
go func() { f(ctx, obj) }()
|
go func() { f(ctx, obj) }()
|
||||||
} else {
|
} else {
|
||||||
@ -504,6 +556,8 @@ func (fl FuncList) Handler(ctx context.Context, e GroupEvent) error { // nolint:
|
|||||||
}
|
}
|
||||||
|
|
||||||
for _, f := range fl.boardPostEdit {
|
for _, f := range fl.boardPostEdit {
|
||||||
|
f := f
|
||||||
|
|
||||||
if fl.goroutine {
|
if fl.goroutine {
|
||||||
go func() { f(ctx, obj) }()
|
go func() { f(ctx, obj) }()
|
||||||
} else {
|
} else {
|
||||||
@ -517,6 +571,8 @@ func (fl FuncList) Handler(ctx context.Context, e GroupEvent) error { // nolint:
|
|||||||
}
|
}
|
||||||
|
|
||||||
for _, f := range fl.boardPostRestore {
|
for _, f := range fl.boardPostRestore {
|
||||||
|
f := f
|
||||||
|
|
||||||
if fl.goroutine {
|
if fl.goroutine {
|
||||||
go func() { f(ctx, obj) }()
|
go func() { f(ctx, obj) }()
|
||||||
} else {
|
} else {
|
||||||
@ -530,6 +586,8 @@ func (fl FuncList) Handler(ctx context.Context, e GroupEvent) error { // nolint:
|
|||||||
}
|
}
|
||||||
|
|
||||||
for _, f := range fl.boardPostDelete {
|
for _, f := range fl.boardPostDelete {
|
||||||
|
f := f
|
||||||
|
|
||||||
if fl.goroutine {
|
if fl.goroutine {
|
||||||
go func() { f(ctx, obj) }()
|
go func() { f(ctx, obj) }()
|
||||||
} else {
|
} else {
|
||||||
@ -543,6 +601,8 @@ func (fl FuncList) Handler(ctx context.Context, e GroupEvent) error { // nolint:
|
|||||||
}
|
}
|
||||||
|
|
||||||
for _, f := range fl.marketCommentNew {
|
for _, f := range fl.marketCommentNew {
|
||||||
|
f := f
|
||||||
|
|
||||||
if fl.goroutine {
|
if fl.goroutine {
|
||||||
go func() { f(ctx, obj) }()
|
go func() { f(ctx, obj) }()
|
||||||
} else {
|
} else {
|
||||||
@ -556,6 +616,8 @@ func (fl FuncList) Handler(ctx context.Context, e GroupEvent) error { // nolint:
|
|||||||
}
|
}
|
||||||
|
|
||||||
for _, f := range fl.marketCommentEdit {
|
for _, f := range fl.marketCommentEdit {
|
||||||
|
f := f
|
||||||
|
|
||||||
if fl.goroutine {
|
if fl.goroutine {
|
||||||
go func() { f(ctx, obj) }()
|
go func() { f(ctx, obj) }()
|
||||||
} else {
|
} else {
|
||||||
@ -569,6 +631,8 @@ func (fl FuncList) Handler(ctx context.Context, e GroupEvent) error { // nolint:
|
|||||||
}
|
}
|
||||||
|
|
||||||
for _, f := range fl.marketCommentRestore {
|
for _, f := range fl.marketCommentRestore {
|
||||||
|
f := f
|
||||||
|
|
||||||
if fl.goroutine {
|
if fl.goroutine {
|
||||||
go func() { f(ctx, obj) }()
|
go func() { f(ctx, obj) }()
|
||||||
} else {
|
} else {
|
||||||
@ -582,6 +646,8 @@ func (fl FuncList) Handler(ctx context.Context, e GroupEvent) error { // nolint:
|
|||||||
}
|
}
|
||||||
|
|
||||||
for _, f := range fl.marketCommentDelete {
|
for _, f := range fl.marketCommentDelete {
|
||||||
|
f := f
|
||||||
|
|
||||||
if fl.goroutine {
|
if fl.goroutine {
|
||||||
go func() { f(ctx, obj) }()
|
go func() { f(ctx, obj) }()
|
||||||
} else {
|
} else {
|
||||||
@ -595,6 +661,8 @@ func (fl FuncList) Handler(ctx context.Context, e GroupEvent) error { // nolint:
|
|||||||
}
|
}
|
||||||
|
|
||||||
for _, f := range fl.marketOrderNew {
|
for _, f := range fl.marketOrderNew {
|
||||||
|
f := f
|
||||||
|
|
||||||
if fl.goroutine {
|
if fl.goroutine {
|
||||||
go func() { f(ctx, obj) }()
|
go func() { f(ctx, obj) }()
|
||||||
} else {
|
} else {
|
||||||
@ -608,6 +676,8 @@ func (fl FuncList) Handler(ctx context.Context, e GroupEvent) error { // nolint:
|
|||||||
}
|
}
|
||||||
|
|
||||||
for _, f := range fl.marketOrderEdit {
|
for _, f := range fl.marketOrderEdit {
|
||||||
|
f := f
|
||||||
|
|
||||||
if fl.goroutine {
|
if fl.goroutine {
|
||||||
go func() { f(ctx, obj) }()
|
go func() { f(ctx, obj) }()
|
||||||
} else {
|
} else {
|
||||||
@ -621,6 +691,8 @@ func (fl FuncList) Handler(ctx context.Context, e GroupEvent) error { // nolint:
|
|||||||
}
|
}
|
||||||
|
|
||||||
for _, f := range fl.groupLeave {
|
for _, f := range fl.groupLeave {
|
||||||
|
f := f
|
||||||
|
|
||||||
if fl.goroutine {
|
if fl.goroutine {
|
||||||
go func() { f(ctx, obj) }()
|
go func() { f(ctx, obj) }()
|
||||||
} else {
|
} else {
|
||||||
@ -634,6 +706,8 @@ func (fl FuncList) Handler(ctx context.Context, e GroupEvent) error { // nolint:
|
|||||||
}
|
}
|
||||||
|
|
||||||
for _, f := range fl.groupJoin {
|
for _, f := range fl.groupJoin {
|
||||||
|
f := f
|
||||||
|
|
||||||
if fl.goroutine {
|
if fl.goroutine {
|
||||||
go func() { f(ctx, obj) }()
|
go func() { f(ctx, obj) }()
|
||||||
} else {
|
} else {
|
||||||
@ -647,6 +721,8 @@ func (fl FuncList) Handler(ctx context.Context, e GroupEvent) error { // nolint:
|
|||||||
}
|
}
|
||||||
|
|
||||||
for _, f := range fl.userBlock {
|
for _, f := range fl.userBlock {
|
||||||
|
f := f
|
||||||
|
|
||||||
if fl.goroutine {
|
if fl.goroutine {
|
||||||
go func() { f(ctx, obj) }()
|
go func() { f(ctx, obj) }()
|
||||||
} else {
|
} else {
|
||||||
@ -660,6 +736,8 @@ func (fl FuncList) Handler(ctx context.Context, e GroupEvent) error { // nolint:
|
|||||||
}
|
}
|
||||||
|
|
||||||
for _, f := range fl.userUnblock {
|
for _, f := range fl.userUnblock {
|
||||||
|
f := f
|
||||||
|
|
||||||
if fl.goroutine {
|
if fl.goroutine {
|
||||||
go func() { f(ctx, obj) }()
|
go func() { f(ctx, obj) }()
|
||||||
} else {
|
} else {
|
||||||
@ -673,6 +751,8 @@ func (fl FuncList) Handler(ctx context.Context, e GroupEvent) error { // nolint:
|
|||||||
}
|
}
|
||||||
|
|
||||||
for _, f := range fl.pollVoteNew {
|
for _, f := range fl.pollVoteNew {
|
||||||
|
f := f
|
||||||
|
|
||||||
if fl.goroutine {
|
if fl.goroutine {
|
||||||
go func() { f(ctx, obj) }()
|
go func() { f(ctx, obj) }()
|
||||||
} else {
|
} else {
|
||||||
@ -686,6 +766,8 @@ func (fl FuncList) Handler(ctx context.Context, e GroupEvent) error { // nolint:
|
|||||||
}
|
}
|
||||||
|
|
||||||
for _, f := range fl.groupOfficersEdit {
|
for _, f := range fl.groupOfficersEdit {
|
||||||
|
f := f
|
||||||
|
|
||||||
if fl.goroutine {
|
if fl.goroutine {
|
||||||
go func() { f(ctx, obj) }()
|
go func() { f(ctx, obj) }()
|
||||||
} else {
|
} else {
|
||||||
@ -699,6 +781,8 @@ func (fl FuncList) Handler(ctx context.Context, e GroupEvent) error { // nolint:
|
|||||||
}
|
}
|
||||||
|
|
||||||
for _, f := range fl.groupChangeSettings {
|
for _, f := range fl.groupChangeSettings {
|
||||||
|
f := f
|
||||||
|
|
||||||
if fl.goroutine {
|
if fl.goroutine {
|
||||||
go func() { f(ctx, obj) }()
|
go func() { f(ctx, obj) }()
|
||||||
} else {
|
} else {
|
||||||
@ -712,6 +796,8 @@ func (fl FuncList) Handler(ctx context.Context, e GroupEvent) error { // nolint:
|
|||||||
}
|
}
|
||||||
|
|
||||||
for _, f := range fl.groupChangePhoto {
|
for _, f := range fl.groupChangePhoto {
|
||||||
|
f := f
|
||||||
|
|
||||||
if fl.goroutine {
|
if fl.goroutine {
|
||||||
go func() { f(ctx, obj) }()
|
go func() { f(ctx, obj) }()
|
||||||
} else {
|
} else {
|
||||||
@ -725,6 +811,8 @@ func (fl FuncList) Handler(ctx context.Context, e GroupEvent) error { // nolint:
|
|||||||
}
|
}
|
||||||
|
|
||||||
for _, f := range fl.vkpayTransaction {
|
for _, f := range fl.vkpayTransaction {
|
||||||
|
f := f
|
||||||
|
|
||||||
if fl.goroutine {
|
if fl.goroutine {
|
||||||
go func() { f(ctx, obj) }()
|
go func() { f(ctx, obj) }()
|
||||||
} else {
|
} else {
|
||||||
@ -738,6 +826,8 @@ func (fl FuncList) Handler(ctx context.Context, e GroupEvent) error { // nolint:
|
|||||||
}
|
}
|
||||||
|
|
||||||
for _, f := range fl.leadFormsNew {
|
for _, f := range fl.leadFormsNew {
|
||||||
|
f := f
|
||||||
|
|
||||||
if fl.goroutine {
|
if fl.goroutine {
|
||||||
go func() { f(ctx, obj) }()
|
go func() { f(ctx, obj) }()
|
||||||
} else {
|
} else {
|
||||||
@ -751,6 +841,8 @@ func (fl FuncList) Handler(ctx context.Context, e GroupEvent) error { // nolint:
|
|||||||
}
|
}
|
||||||
|
|
||||||
for _, f := range fl.appPayload {
|
for _, f := range fl.appPayload {
|
||||||
|
f := f
|
||||||
|
|
||||||
if fl.goroutine {
|
if fl.goroutine {
|
||||||
go func() { f(ctx, obj) }()
|
go func() { f(ctx, obj) }()
|
||||||
} else {
|
} else {
|
||||||
@ -764,6 +856,8 @@ func (fl FuncList) Handler(ctx context.Context, e GroupEvent) error { // nolint:
|
|||||||
}
|
}
|
||||||
|
|
||||||
for _, f := range fl.messageRead {
|
for _, f := range fl.messageRead {
|
||||||
|
f := f
|
||||||
|
|
||||||
if fl.goroutine {
|
if fl.goroutine {
|
||||||
go func() { f(ctx, obj) }()
|
go func() { f(ctx, obj) }()
|
||||||
} else {
|
} else {
|
||||||
@ -777,6 +871,8 @@ func (fl FuncList) Handler(ctx context.Context, e GroupEvent) error { // nolint:
|
|||||||
}
|
}
|
||||||
|
|
||||||
for _, f := range fl.likeAdd {
|
for _, f := range fl.likeAdd {
|
||||||
|
f := f
|
||||||
|
|
||||||
if fl.goroutine {
|
if fl.goroutine {
|
||||||
go func() { f(ctx, obj) }()
|
go func() { f(ctx, obj) }()
|
||||||
} else {
|
} else {
|
||||||
@ -790,6 +886,8 @@ func (fl FuncList) Handler(ctx context.Context, e GroupEvent) error { // nolint:
|
|||||||
}
|
}
|
||||||
|
|
||||||
for _, f := range fl.likeRemove {
|
for _, f := range fl.likeRemove {
|
||||||
|
f := f
|
||||||
|
|
||||||
if fl.goroutine {
|
if fl.goroutine {
|
||||||
go func() { f(ctx, obj) }()
|
go func() { f(ctx, obj) }()
|
||||||
} else {
|
} else {
|
||||||
@ -803,6 +901,8 @@ func (fl FuncList) Handler(ctx context.Context, e GroupEvent) error { // nolint:
|
|||||||
}
|
}
|
||||||
|
|
||||||
for _, f := range fl.donutSubscriptionCreate {
|
for _, f := range fl.donutSubscriptionCreate {
|
||||||
|
f := f
|
||||||
|
|
||||||
if fl.goroutine {
|
if fl.goroutine {
|
||||||
go func() { f(ctx, obj) }()
|
go func() { f(ctx, obj) }()
|
||||||
} else {
|
} else {
|
||||||
@ -816,6 +916,8 @@ func (fl FuncList) Handler(ctx context.Context, e GroupEvent) error { // nolint:
|
|||||||
}
|
}
|
||||||
|
|
||||||
for _, f := range fl.donutSubscriptionProlonged {
|
for _, f := range fl.donutSubscriptionProlonged {
|
||||||
|
f := f
|
||||||
|
|
||||||
if fl.goroutine {
|
if fl.goroutine {
|
||||||
go func() { f(ctx, obj) }()
|
go func() { f(ctx, obj) }()
|
||||||
} else {
|
} else {
|
||||||
@ -829,6 +931,8 @@ func (fl FuncList) Handler(ctx context.Context, e GroupEvent) error { // nolint:
|
|||||||
}
|
}
|
||||||
|
|
||||||
for _, f := range fl.donutSubscriptionExpired {
|
for _, f := range fl.donutSubscriptionExpired {
|
||||||
|
f := f
|
||||||
|
|
||||||
if fl.goroutine {
|
if fl.goroutine {
|
||||||
go func() { f(ctx, obj) }()
|
go func() { f(ctx, obj) }()
|
||||||
} else {
|
} else {
|
||||||
@ -842,6 +946,8 @@ func (fl FuncList) Handler(ctx context.Context, e GroupEvent) error { // nolint:
|
|||||||
}
|
}
|
||||||
|
|
||||||
for _, f := range fl.donutSubscriptionCancelled {
|
for _, f := range fl.donutSubscriptionCancelled {
|
||||||
|
f := f
|
||||||
|
|
||||||
if fl.goroutine {
|
if fl.goroutine {
|
||||||
go func() { f(ctx, obj) }()
|
go func() { f(ctx, obj) }()
|
||||||
} else {
|
} else {
|
||||||
@ -855,6 +961,8 @@ func (fl FuncList) Handler(ctx context.Context, e GroupEvent) error { // nolint:
|
|||||||
}
|
}
|
||||||
|
|
||||||
for _, f := range fl.donutSubscriptionPriceChanged {
|
for _, f := range fl.donutSubscriptionPriceChanged {
|
||||||
|
f := f
|
||||||
|
|
||||||
if fl.goroutine {
|
if fl.goroutine {
|
||||||
go func() { f(ctx, obj) }()
|
go func() { f(ctx, obj) }()
|
||||||
} else {
|
} else {
|
||||||
@ -868,6 +976,8 @@ func (fl FuncList) Handler(ctx context.Context, e GroupEvent) error { // nolint:
|
|||||||
}
|
}
|
||||||
|
|
||||||
for _, f := range fl.donutMoneyWithdraw {
|
for _, f := range fl.donutMoneyWithdraw {
|
||||||
|
f := f
|
||||||
|
|
||||||
if fl.goroutine {
|
if fl.goroutine {
|
||||||
go func() { f(ctx, obj) }()
|
go func() { f(ctx, obj) }()
|
||||||
} else {
|
} else {
|
||||||
@ -881,6 +991,8 @@ func (fl FuncList) Handler(ctx context.Context, e GroupEvent) error { // nolint:
|
|||||||
}
|
}
|
||||||
|
|
||||||
for _, f := range fl.donutMoneyWithdrawError {
|
for _, f := range fl.donutMoneyWithdrawError {
|
||||||
|
f := f
|
||||||
|
|
||||||
if fl.goroutine {
|
if fl.goroutine {
|
||||||
go func() { f(ctx, obj) }()
|
go func() { f(ctx, obj) }()
|
||||||
} else {
|
} else {
|
||||||
|
12
vendor/github.com/SevereCloud/vksdk/v2/object/stats.go
generated
vendored
12
vendor/github.com/SevereCloud/vksdk/v2/object/stats.go
generated
vendored
@ -66,12 +66,24 @@ type StatsViews struct {
|
|||||||
|
|
||||||
// StatsWallpostStat struct.
|
// StatsWallpostStat struct.
|
||||||
type StatsWallpostStat struct {
|
type StatsWallpostStat struct {
|
||||||
|
PostID int `json:"post_id"`
|
||||||
Hide int `json:"hide"` // Hidings number
|
Hide int `json:"hide"` // Hidings number
|
||||||
JoinGroup int `json:"join_group"` // People have joined the group
|
JoinGroup int `json:"join_group"` // People have joined the group
|
||||||
Links int `json:"links"` // Link click-through
|
Links int `json:"links"` // Link click-through
|
||||||
ReachSubscribers int `json:"reach_subscribers"` // Subscribers reach
|
ReachSubscribers int `json:"reach_subscribers"` // Subscribers reach
|
||||||
ReachTotal int `json:"reach_total"` // Total reach
|
ReachTotal int `json:"reach_total"` // Total reach
|
||||||
|
ReachViral int `json:"reach_viral"` // Viral reach
|
||||||
|
ReachAds int `json:"reach_ads"` // Advertising reach
|
||||||
Report int `json:"report"` // Reports number
|
Report int `json:"report"` // Reports number
|
||||||
ToGroup int `json:"to_group"` // Click-through to community
|
ToGroup int `json:"to_group"` // Click-through to community
|
||||||
Unsubscribe int `json:"unsubscribe"` // Unsubscribed members
|
Unsubscribe int `json:"unsubscribe"` // Unsubscribed members
|
||||||
|
AdViews int `json:"ad_views"`
|
||||||
|
AdSubscribers int `json:"ad_subscribers"`
|
||||||
|
AdHide int `json:"ad_hide"`
|
||||||
|
AdUnsubscribe int `json:"ad_unsubscribe"`
|
||||||
|
AdLinks int `json:"ad_links"`
|
||||||
|
AdToGroup int `json:"ad_to_group"`
|
||||||
|
AdJoinGroup int `json:"ad_join_group"`
|
||||||
|
AdCoverage int `json:"ad_coverage"`
|
||||||
|
AdReport int `json:"ad_report"`
|
||||||
}
|
}
|
||||||
|
8
vendor/github.com/SevereCloud/vksdk/v2/object/users.go
generated
vendored
8
vendor/github.com/SevereCloud/vksdk/v2/object/users.go
generated
vendored
@ -125,6 +125,7 @@ type UsersUser struct {
|
|||||||
MobilePhone string `json:"mobile_phone"`
|
MobilePhone string `json:"mobile_phone"`
|
||||||
HomePhone string `json:"home_phone"`
|
HomePhone string `json:"home_phone"`
|
||||||
FoundWith int `json:"found_with"` // TODO: check it
|
FoundWith int `json:"found_with"` // TODO: check it
|
||||||
|
ImageStatus ImageStatusInfo `json:"image_status"`
|
||||||
OnlineInfo UsersOnlineInfo `json:"online_info"`
|
OnlineInfo UsersOnlineInfo `json:"online_info"`
|
||||||
Mutual FriendsRequestsMutual `json:"mutual"`
|
Mutual FriendsRequestsMutual `json:"mutual"`
|
||||||
TrackCode string `json:"track_code"`
|
TrackCode string `json:"track_code"`
|
||||||
@ -138,6 +139,13 @@ func (user UsersUser) ToMention() string {
|
|||||||
return fmt.Sprintf("[id%d|%s %s]", user.ID, user.FirstName, user.LastName)
|
return fmt.Sprintf("[id%d|%s %s]", user.ID, user.FirstName, user.LastName)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// ImageStatusInfo struct.
|
||||||
|
type ImageStatusInfo struct {
|
||||||
|
ID int `json:"id"`
|
||||||
|
Name string `json:"name"`
|
||||||
|
Images []BaseImage `json:"images"`
|
||||||
|
}
|
||||||
|
|
||||||
// UsersOnlineInfo struct.
|
// UsersOnlineInfo struct.
|
||||||
type UsersOnlineInfo struct {
|
type UsersOnlineInfo struct {
|
||||||
AppID int `json:"app_id"`
|
AppID int `json:"app_id"`
|
||||||
|
1
vendor/github.com/SevereCloud/vksdk/v2/object/wall.go
generated
vendored
1
vendor/github.com/SevereCloud/vksdk/v2/object/wall.go
generated
vendored
@ -161,6 +161,7 @@ type WallWallpost struct {
|
|||||||
Edited int `json:"edited"` // Date of editing in Unixtime
|
Edited int `json:"edited"` // Date of editing in Unixtime
|
||||||
Copyright WallPostCopyright `json:"copyright"`
|
Copyright WallPostCopyright `json:"copyright"`
|
||||||
PostID int `json:"post_id"`
|
PostID int `json:"post_id"`
|
||||||
|
PostponedID int `json:"postponed_id"` // ID from scheduled posts
|
||||||
ParentsStack []int `json:"parents_stack"`
|
ParentsStack []int `json:"parents_stack"`
|
||||||
Donut WallWallpostDonut `json:"donut"`
|
Donut WallWallpostDonut `json:"donut"`
|
||||||
ShortTextRate float64 `json:"short_text_rate"`
|
ShortTextRate float64 `json:"short_text_rate"`
|
||||||
|
3
vendor/github.com/google/gops/agent/agent.go
generated
vendored
3
vendor/github.com/google/gops/agent/agent.go
generated
vendored
@ -12,7 +12,6 @@ import (
|
|||||||
"encoding/binary"
|
"encoding/binary"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"io/ioutil"
|
|
||||||
"net"
|
"net"
|
||||||
"os"
|
"os"
|
||||||
gosignal "os/signal"
|
gosignal "os/signal"
|
||||||
@ -115,7 +114,7 @@ func Listen(opts Options) error {
|
|||||||
}
|
}
|
||||||
port := listener.Addr().(*net.TCPAddr).Port
|
port := listener.Addr().(*net.TCPAddr).Port
|
||||||
portfile = filepath.Join(gopsdir, strconv.Itoa(os.Getpid()))
|
portfile = filepath.Join(gopsdir, strconv.Itoa(os.Getpid()))
|
||||||
err = ioutil.WriteFile(portfile, []byte(strconv.Itoa(port)), os.ModePerm)
|
err = os.WriteFile(portfile, []byte(strconv.Itoa(port)), os.ModePerm)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
12
vendor/github.com/google/gops/internal/internal.go
generated
vendored
12
vendor/github.com/google/gops/internal/internal.go
generated
vendored
@ -6,11 +6,9 @@ package internal
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"errors"
|
"errors"
|
||||||
"io/ioutil"
|
|
||||||
"os"
|
"os"
|
||||||
"os/user"
|
"os/user"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
"runtime"
|
|
||||||
"strconv"
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
)
|
)
|
||||||
@ -26,14 +24,6 @@ func ConfigDir() (string, error) {
|
|||||||
return filepath.Join(userConfigDir, "gops"), nil
|
return filepath.Join(userConfigDir, "gops"), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
if runtime.GOOS == "windows" {
|
|
||||||
return filepath.Join(os.Getenv("APPDATA"), "gops"), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
if xdgConfigDir := os.Getenv("XDG_CONFIG_HOME"); xdgConfigDir != "" {
|
|
||||||
return filepath.Join(xdgConfigDir, "gops"), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
homeDir := guessUnixHomeDir()
|
homeDir := guessUnixHomeDir()
|
||||||
if homeDir == "" {
|
if homeDir == "" {
|
||||||
return "", errors.New("unable to get current user home directory: os/user lookup failed; $HOME is empty")
|
return "", errors.New("unable to get current user home directory: os/user lookup failed; $HOME is empty")
|
||||||
@ -62,7 +52,7 @@ func GetPort(pid int) (string, error) {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return "", err
|
return "", err
|
||||||
}
|
}
|
||||||
b, err := ioutil.ReadFile(portfile)
|
b, err := os.ReadFile(portfile)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", err
|
return "", err
|
||||||
}
|
}
|
||||||
|
2
vendor/github.com/klauspost/compress/.goreleaser.yml
generated
vendored
2
vendor/github.com/klauspost/compress/.goreleaser.yml
generated
vendored
@ -3,7 +3,7 @@
|
|||||||
before:
|
before:
|
||||||
hooks:
|
hooks:
|
||||||
- ./gen.sh
|
- ./gen.sh
|
||||||
- go install mvdan.cc/garble@latest
|
- go install mvdan.cc/garble@v0.9.3
|
||||||
|
|
||||||
builds:
|
builds:
|
||||||
-
|
-
|
||||||
|
64
vendor/github.com/klauspost/compress/README.md
generated
vendored
64
vendor/github.com/klauspost/compress/README.md
generated
vendored
@ -9,7 +9,6 @@ This package provides various compression algorithms.
|
|||||||
* [huff0](https://github.com/klauspost/compress/tree/master/huff0) and [FSE](https://github.com/klauspost/compress/tree/master/fse) implementations for raw entropy encoding.
|
* [huff0](https://github.com/klauspost/compress/tree/master/huff0) and [FSE](https://github.com/klauspost/compress/tree/master/fse) implementations for raw entropy encoding.
|
||||||
* [gzhttp](https://github.com/klauspost/compress/tree/master/gzhttp) Provides client and server wrappers for handling gzipped requests efficiently.
|
* [gzhttp](https://github.com/klauspost/compress/tree/master/gzhttp) Provides client and server wrappers for handling gzipped requests efficiently.
|
||||||
* [pgzip](https://github.com/klauspost/pgzip) is a separate package that provides a very fast parallel gzip implementation.
|
* [pgzip](https://github.com/klauspost/pgzip) is a separate package that provides a very fast parallel gzip implementation.
|
||||||
* [fuzz package](https://github.com/klauspost/compress-fuzz) for fuzz testing all compressors/decompressors here.
|
|
||||||
|
|
||||||
[![Go Reference](https://pkg.go.dev/badge/klauspost/compress.svg)](https://pkg.go.dev/github.com/klauspost/compress?tab=subdirectories)
|
[![Go Reference](https://pkg.go.dev/badge/klauspost/compress.svg)](https://pkg.go.dev/github.com/klauspost/compress?tab=subdirectories)
|
||||||
[![Go](https://github.com/klauspost/compress/actions/workflows/go.yml/badge.svg)](https://github.com/klauspost/compress/actions/workflows/go.yml)
|
[![Go](https://github.com/klauspost/compress/actions/workflows/go.yml/badge.svg)](https://github.com/klauspost/compress/actions/workflows/go.yml)
|
||||||
@ -17,6 +16,62 @@ This package provides various compression algorithms.
|
|||||||
|
|
||||||
# changelog
|
# changelog
|
||||||
|
|
||||||
|
* Jan 21st, 2023 (v1.15.15)
|
||||||
|
* deflate: Improve level 7-9 by @klauspost in https://github.com/klauspost/compress/pull/739
|
||||||
|
* zstd: Add delta encoding support by @greatroar in https://github.com/klauspost/compress/pull/728
|
||||||
|
* zstd: Various speed improvements by @greatroar https://github.com/klauspost/compress/pull/741 https://github.com/klauspost/compress/pull/734 https://github.com/klauspost/compress/pull/736 https://github.com/klauspost/compress/pull/744 https://github.com/klauspost/compress/pull/743 https://github.com/klauspost/compress/pull/745
|
||||||
|
* gzhttp: Add SuffixETag() and DropETag() options to prevent ETag collisions on compressed responses by @willbicks in https://github.com/klauspost/compress/pull/740
|
||||||
|
|
||||||
|
* Jan 3rd, 2023 (v1.15.14)
|
||||||
|
|
||||||
|
* flate: Improve speed in big stateless blocks https://github.com/klauspost/compress/pull/718
|
||||||
|
* zstd: Minor speed tweaks by @greatroar in https://github.com/klauspost/compress/pull/716 https://github.com/klauspost/compress/pull/720
|
||||||
|
* export NoGzipResponseWriter for custom ResponseWriter wrappers by @harshavardhana in https://github.com/klauspost/compress/pull/722
|
||||||
|
* s2: Add example for indexing and existing stream https://github.com/klauspost/compress/pull/723
|
||||||
|
|
||||||
|
* Dec 11, 2022 (v1.15.13)
|
||||||
|
* zstd: Add [MaxEncodedSize](https://pkg.go.dev/github.com/klauspost/compress@v1.15.13/zstd#Encoder.MaxEncodedSize) to encoder https://github.com/klauspost/compress/pull/691
|
||||||
|
* zstd: Various tweaks and improvements https://github.com/klauspost/compress/pull/693 https://github.com/klauspost/compress/pull/695 https://github.com/klauspost/compress/pull/696 https://github.com/klauspost/compress/pull/701 https://github.com/klauspost/compress/pull/702 https://github.com/klauspost/compress/pull/703 https://github.com/klauspost/compress/pull/704 https://github.com/klauspost/compress/pull/705 https://github.com/klauspost/compress/pull/706 https://github.com/klauspost/compress/pull/707 https://github.com/klauspost/compress/pull/708
|
||||||
|
|
||||||
|
* Oct 26, 2022 (v1.15.12)
|
||||||
|
|
||||||
|
* zstd: Tweak decoder allocs. https://github.com/klauspost/compress/pull/680
|
||||||
|
* gzhttp: Always delete `HeaderNoCompression` https://github.com/klauspost/compress/pull/683
|
||||||
|
|
||||||
|
* Sept 26, 2022 (v1.15.11)
|
||||||
|
|
||||||
|
* flate: Improve level 1-3 compression https://github.com/klauspost/compress/pull/678
|
||||||
|
* zstd: Improve "best" compression by @nightwolfz in https://github.com/klauspost/compress/pull/677
|
||||||
|
* zstd: Fix+reduce decompression allocations https://github.com/klauspost/compress/pull/668
|
||||||
|
* zstd: Fix non-effective noescape tag https://github.com/klauspost/compress/pull/667
|
||||||
|
|
||||||
|
* Sept 16, 2022 (v1.15.10)
|
||||||
|
|
||||||
|
* zstd: Add [WithDecodeAllCapLimit](https://pkg.go.dev/github.com/klauspost/compress@v1.15.10/zstd#WithDecodeAllCapLimit) https://github.com/klauspost/compress/pull/649
|
||||||
|
* Add Go 1.19 - deprecate Go 1.16 https://github.com/klauspost/compress/pull/651
|
||||||
|
* flate: Improve level 5+6 compression https://github.com/klauspost/compress/pull/656
|
||||||
|
* zstd: Improve "better" compresssion https://github.com/klauspost/compress/pull/657
|
||||||
|
* s2: Improve "best" compression https://github.com/klauspost/compress/pull/658
|
||||||
|
* s2: Improve "better" compression. https://github.com/klauspost/compress/pull/635
|
||||||
|
* s2: Slightly faster non-assembly decompression https://github.com/klauspost/compress/pull/646
|
||||||
|
* Use arrays for constant size copies https://github.com/klauspost/compress/pull/659
|
||||||
|
|
||||||
|
* July 21, 2022 (v1.15.9)
|
||||||
|
|
||||||
|
* zstd: Fix decoder crash on amd64 (no BMI) on invalid input https://github.com/klauspost/compress/pull/645
|
||||||
|
* zstd: Disable decoder extended memory copies (amd64) due to possible crashes https://github.com/klauspost/compress/pull/644
|
||||||
|
* zstd: Allow single segments up to "max decoded size" by @klauspost in https://github.com/klauspost/compress/pull/643
|
||||||
|
|
||||||
|
* July 13, 2022 (v1.15.8)
|
||||||
|
|
||||||
|
* gzip: fix stack exhaustion bug in Reader.Read https://github.com/klauspost/compress/pull/641
|
||||||
|
* s2: Add Index header trim/restore https://github.com/klauspost/compress/pull/638
|
||||||
|
* zstd: Optimize seqdeq amd64 asm by @greatroar in https://github.com/klauspost/compress/pull/636
|
||||||
|
* zstd: Improve decoder memcopy https://github.com/klauspost/compress/pull/637
|
||||||
|
* huff0: Pass a single bitReader pointer to asm by @greatroar in https://github.com/klauspost/compress/pull/634
|
||||||
|
* zstd: Branchless getBits for amd64 w/o BMI2 by @greatroar in https://github.com/klauspost/compress/pull/640
|
||||||
|
* gzhttp: Remove header before writing https://github.com/klauspost/compress/pull/639
|
||||||
|
|
||||||
* June 29, 2022 (v1.15.7)
|
* June 29, 2022 (v1.15.7)
|
||||||
|
|
||||||
* s2: Fix absolute forward seeks https://github.com/klauspost/compress/pull/633
|
* s2: Fix absolute forward seeks https://github.com/klauspost/compress/pull/633
|
||||||
@ -81,14 +136,14 @@ This package provides various compression algorithms.
|
|||||||
* gzhttp: Add zstd to transport by @klauspost in [#400](https://github.com/klauspost/compress/pull/400)
|
* gzhttp: Add zstd to transport by @klauspost in [#400](https://github.com/klauspost/compress/pull/400)
|
||||||
* gzhttp: Make content-type optional by @klauspost in [#510](https://github.com/klauspost/compress/pull/510)
|
* gzhttp: Make content-type optional by @klauspost in [#510](https://github.com/klauspost/compress/pull/510)
|
||||||
|
|
||||||
<details>
|
|
||||||
<summary>See Details</summary>
|
|
||||||
Both compression and decompression now supports "synchronous" stream operations. This means that whenever "concurrency" is set to 1, they will operate without spawning goroutines.
|
Both compression and decompression now supports "synchronous" stream operations. This means that whenever "concurrency" is set to 1, they will operate without spawning goroutines.
|
||||||
|
|
||||||
Stream decompression is now faster on asynchronous, since the goroutine allocation much more effectively splits the workload. On typical streams this will typically use 2 cores fully for decompression. When a stream has finished decoding no goroutines will be left over, so decoders can now safely be pooled and still be garbage collected.
|
Stream decompression is now faster on asynchronous, since the goroutine allocation much more effectively splits the workload. On typical streams this will typically use 2 cores fully for decompression. When a stream has finished decoding no goroutines will be left over, so decoders can now safely be pooled and still be garbage collected.
|
||||||
|
|
||||||
While the release has been extensively tested, it is recommended to testing when upgrading.
|
While the release has been extensively tested, it is recommended to testing when upgrading.
|
||||||
</details>
|
|
||||||
|
<details>
|
||||||
|
<summary>See changes to v1.14.x</summary>
|
||||||
|
|
||||||
* Feb 22, 2022 (v1.14.4)
|
* Feb 22, 2022 (v1.14.4)
|
||||||
* flate: Fix rare huffman only (-2) corruption. [#503](https://github.com/klauspost/compress/pull/503)
|
* flate: Fix rare huffman only (-2) corruption. [#503](https://github.com/klauspost/compress/pull/503)
|
||||||
@ -115,6 +170,7 @@ While the release has been extensively tested, it is recommended to testing when
|
|||||||
* zstd: Performance improvement in [#420]( https://github.com/klauspost/compress/pull/420) [#456](https://github.com/klauspost/compress/pull/456) [#437](https://github.com/klauspost/compress/pull/437) [#467](https://github.com/klauspost/compress/pull/467) [#468](https://github.com/klauspost/compress/pull/468)
|
* zstd: Performance improvement in [#420]( https://github.com/klauspost/compress/pull/420) [#456](https://github.com/klauspost/compress/pull/456) [#437](https://github.com/klauspost/compress/pull/437) [#467](https://github.com/klauspost/compress/pull/467) [#468](https://github.com/klauspost/compress/pull/468)
|
||||||
* zstd: add arm64 xxhash assembly in [#464](https://github.com/klauspost/compress/pull/464)
|
* zstd: add arm64 xxhash assembly in [#464](https://github.com/klauspost/compress/pull/464)
|
||||||
* Add garbled for binaries for s2 in [#445](https://github.com/klauspost/compress/pull/445)
|
* Add garbled for binaries for s2 in [#445](https://github.com/klauspost/compress/pull/445)
|
||||||
|
</details>
|
||||||
|
|
||||||
<details>
|
<details>
|
||||||
<summary>See changes to v1.13.x</summary>
|
<summary>See changes to v1.13.x</summary>
|
||||||
|
31
vendor/github.com/klauspost/compress/fse/compress.go
generated
vendored
31
vendor/github.com/klauspost/compress/fse/compress.go
generated
vendored
@ -146,54 +146,51 @@ func (s *Scratch) compress(src []byte) error {
|
|||||||
c1.encodeZero(tt[src[ip-2]])
|
c1.encodeZero(tt[src[ip-2]])
|
||||||
ip -= 2
|
ip -= 2
|
||||||
}
|
}
|
||||||
|
src = src[:ip]
|
||||||
|
|
||||||
// Main compression loop.
|
// Main compression loop.
|
||||||
switch {
|
switch {
|
||||||
case !s.zeroBits && s.actualTableLog <= 8:
|
case !s.zeroBits && s.actualTableLog <= 8:
|
||||||
// We can encode 4 symbols without requiring a flush.
|
// We can encode 4 symbols without requiring a flush.
|
||||||
// We do not need to check if any output is 0 bits.
|
// We do not need to check if any output is 0 bits.
|
||||||
for ip >= 4 {
|
for ; len(src) >= 4; src = src[:len(src)-4] {
|
||||||
s.bw.flush32()
|
s.bw.flush32()
|
||||||
v3, v2, v1, v0 := src[ip-4], src[ip-3], src[ip-2], src[ip-1]
|
v3, v2, v1, v0 := src[len(src)-4], src[len(src)-3], src[len(src)-2], src[len(src)-1]
|
||||||
c2.encode(tt[v0])
|
c2.encode(tt[v0])
|
||||||
c1.encode(tt[v1])
|
c1.encode(tt[v1])
|
||||||
c2.encode(tt[v2])
|
c2.encode(tt[v2])
|
||||||
c1.encode(tt[v3])
|
c1.encode(tt[v3])
|
||||||
ip -= 4
|
|
||||||
}
|
}
|
||||||
case !s.zeroBits:
|
case !s.zeroBits:
|
||||||
// We do not need to check if any output is 0 bits.
|
// We do not need to check if any output is 0 bits.
|
||||||
for ip >= 4 {
|
for ; len(src) >= 4; src = src[:len(src)-4] {
|
||||||
s.bw.flush32()
|
s.bw.flush32()
|
||||||
v3, v2, v1, v0 := src[ip-4], src[ip-3], src[ip-2], src[ip-1]
|
v3, v2, v1, v0 := src[len(src)-4], src[len(src)-3], src[len(src)-2], src[len(src)-1]
|
||||||
c2.encode(tt[v0])
|
c2.encode(tt[v0])
|
||||||
c1.encode(tt[v1])
|
c1.encode(tt[v1])
|
||||||
s.bw.flush32()
|
s.bw.flush32()
|
||||||
c2.encode(tt[v2])
|
c2.encode(tt[v2])
|
||||||
c1.encode(tt[v3])
|
c1.encode(tt[v3])
|
||||||
ip -= 4
|
|
||||||
}
|
}
|
||||||
case s.actualTableLog <= 8:
|
case s.actualTableLog <= 8:
|
||||||
// We can encode 4 symbols without requiring a flush
|
// We can encode 4 symbols without requiring a flush
|
||||||
for ip >= 4 {
|
for ; len(src) >= 4; src = src[:len(src)-4] {
|
||||||
s.bw.flush32()
|
s.bw.flush32()
|
||||||
v3, v2, v1, v0 := src[ip-4], src[ip-3], src[ip-2], src[ip-1]
|
v3, v2, v1, v0 := src[len(src)-4], src[len(src)-3], src[len(src)-2], src[len(src)-1]
|
||||||
c2.encodeZero(tt[v0])
|
c2.encodeZero(tt[v0])
|
||||||
c1.encodeZero(tt[v1])
|
c1.encodeZero(tt[v1])
|
||||||
c2.encodeZero(tt[v2])
|
c2.encodeZero(tt[v2])
|
||||||
c1.encodeZero(tt[v3])
|
c1.encodeZero(tt[v3])
|
||||||
ip -= 4
|
|
||||||
}
|
}
|
||||||
default:
|
default:
|
||||||
for ip >= 4 {
|
for ; len(src) >= 4; src = src[:len(src)-4] {
|
||||||
s.bw.flush32()
|
s.bw.flush32()
|
||||||
v3, v2, v1, v0 := src[ip-4], src[ip-3], src[ip-2], src[ip-1]
|
v3, v2, v1, v0 := src[len(src)-4], src[len(src)-3], src[len(src)-2], src[len(src)-1]
|
||||||
c2.encodeZero(tt[v0])
|
c2.encodeZero(tt[v0])
|
||||||
c1.encodeZero(tt[v1])
|
c1.encodeZero(tt[v1])
|
||||||
s.bw.flush32()
|
s.bw.flush32()
|
||||||
c2.encodeZero(tt[v2])
|
c2.encodeZero(tt[v2])
|
||||||
c1.encodeZero(tt[v3])
|
c1.encodeZero(tt[v3])
|
||||||
ip -= 4
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -459,15 +456,17 @@ func (s *Scratch) countSimple(in []byte) (max int) {
|
|||||||
for _, v := range in {
|
for _, v := range in {
|
||||||
s.count[v]++
|
s.count[v]++
|
||||||
}
|
}
|
||||||
m := uint32(0)
|
m, symlen := uint32(0), s.symbolLen
|
||||||
for i, v := range s.count[:] {
|
for i, v := range s.count[:] {
|
||||||
|
if v == 0 {
|
||||||
|
continue
|
||||||
|
}
|
||||||
if v > m {
|
if v > m {
|
||||||
m = v
|
m = v
|
||||||
}
|
}
|
||||||
if v > 0 {
|
symlen = uint16(i) + 1
|
||||||
s.symbolLen = uint16(i) + 1
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
s.symbolLen = symlen
|
||||||
return int(m)
|
return int(m)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
8
vendor/github.com/klauspost/compress/huff0/bitreader.go
generated
vendored
8
vendor/github.com/klauspost/compress/huff0/bitreader.go
generated
vendored
@ -67,7 +67,6 @@ func (b *bitReaderBytes) fillFast() {
|
|||||||
|
|
||||||
// 2 bounds checks.
|
// 2 bounds checks.
|
||||||
v := b.in[b.off-4 : b.off]
|
v := b.in[b.off-4 : b.off]
|
||||||
v = v[:4]
|
|
||||||
low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24)
|
low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24)
|
||||||
b.value |= uint64(low) << (b.bitsRead - 32)
|
b.value |= uint64(low) << (b.bitsRead - 32)
|
||||||
b.bitsRead -= 32
|
b.bitsRead -= 32
|
||||||
@ -88,8 +87,7 @@ func (b *bitReaderBytes) fill() {
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
if b.off > 4 {
|
if b.off > 4 {
|
||||||
v := b.in[b.off-4:]
|
v := b.in[b.off-4 : b.off]
|
||||||
v = v[:4]
|
|
||||||
low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24)
|
low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24)
|
||||||
b.value |= uint64(low) << (b.bitsRead - 32)
|
b.value |= uint64(low) << (b.bitsRead - 32)
|
||||||
b.bitsRead -= 32
|
b.bitsRead -= 32
|
||||||
@ -179,7 +177,6 @@ func (b *bitReaderShifted) fillFast() {
|
|||||||
|
|
||||||
// 2 bounds checks.
|
// 2 bounds checks.
|
||||||
v := b.in[b.off-4 : b.off]
|
v := b.in[b.off-4 : b.off]
|
||||||
v = v[:4]
|
|
||||||
low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24)
|
low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24)
|
||||||
b.value |= uint64(low) << ((b.bitsRead - 32) & 63)
|
b.value |= uint64(low) << ((b.bitsRead - 32) & 63)
|
||||||
b.bitsRead -= 32
|
b.bitsRead -= 32
|
||||||
@ -200,8 +197,7 @@ func (b *bitReaderShifted) fill() {
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
if b.off > 4 {
|
if b.off > 4 {
|
||||||
v := b.in[b.off-4:]
|
v := b.in[b.off-4 : b.off]
|
||||||
v = v[:4]
|
|
||||||
low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24)
|
low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24)
|
||||||
b.value |= uint64(low) << ((b.bitsRead - 32) & 63)
|
b.value |= uint64(low) << ((b.bitsRead - 32) & 63)
|
||||||
b.bitsRead -= 32
|
b.bitsRead -= 32
|
||||||
|
104
vendor/github.com/klauspost/compress/huff0/compress.go
generated
vendored
104
vendor/github.com/klauspost/compress/huff0/compress.go
generated
vendored
@ -365,30 +365,30 @@ func (s *Scratch) countSimple(in []byte) (max int, reuse bool) {
|
|||||||
m := uint32(0)
|
m := uint32(0)
|
||||||
if len(s.prevTable) > 0 {
|
if len(s.prevTable) > 0 {
|
||||||
for i, v := range s.count[:] {
|
for i, v := range s.count[:] {
|
||||||
|
if v == 0 {
|
||||||
|
continue
|
||||||
|
}
|
||||||
if v > m {
|
if v > m {
|
||||||
m = v
|
m = v
|
||||||
}
|
}
|
||||||
if v > 0 {
|
|
||||||
s.symbolLen = uint16(i) + 1
|
s.symbolLen = uint16(i) + 1
|
||||||
if i >= len(s.prevTable) {
|
if i >= len(s.prevTable) {
|
||||||
reuse = false
|
reuse = false
|
||||||
} else {
|
} else if s.prevTable[i].nBits == 0 {
|
||||||
if s.prevTable[i].nBits == 0 {
|
|
||||||
reuse = false
|
reuse = false
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
|
||||||
}
|
|
||||||
return int(m), reuse
|
return int(m), reuse
|
||||||
}
|
}
|
||||||
for i, v := range s.count[:] {
|
for i, v := range s.count[:] {
|
||||||
|
if v == 0 {
|
||||||
|
continue
|
||||||
|
}
|
||||||
if v > m {
|
if v > m {
|
||||||
m = v
|
m = v
|
||||||
}
|
}
|
||||||
if v > 0 {
|
|
||||||
s.symbolLen = uint16(i) + 1
|
s.symbolLen = uint16(i) + 1
|
||||||
}
|
}
|
||||||
}
|
|
||||||
return int(m), false
|
return int(m), false
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -484,34 +484,35 @@ func (s *Scratch) buildCTable() error {
|
|||||||
// Different from reference implementation.
|
// Different from reference implementation.
|
||||||
huffNode0 := s.nodes[0 : huffNodesLen+1]
|
huffNode0 := s.nodes[0 : huffNodesLen+1]
|
||||||
|
|
||||||
for huffNode[nonNullRank].count == 0 {
|
for huffNode[nonNullRank].count() == 0 {
|
||||||
nonNullRank--
|
nonNullRank--
|
||||||
}
|
}
|
||||||
|
|
||||||
lowS := int16(nonNullRank)
|
lowS := int16(nonNullRank)
|
||||||
nodeRoot := nodeNb + lowS - 1
|
nodeRoot := nodeNb + lowS - 1
|
||||||
lowN := nodeNb
|
lowN := nodeNb
|
||||||
huffNode[nodeNb].count = huffNode[lowS].count + huffNode[lowS-1].count
|
huffNode[nodeNb].setCount(huffNode[lowS].count() + huffNode[lowS-1].count())
|
||||||
huffNode[lowS].parent, huffNode[lowS-1].parent = uint16(nodeNb), uint16(nodeNb)
|
huffNode[lowS].setParent(nodeNb)
|
||||||
|
huffNode[lowS-1].setParent(nodeNb)
|
||||||
nodeNb++
|
nodeNb++
|
||||||
lowS -= 2
|
lowS -= 2
|
||||||
for n := nodeNb; n <= nodeRoot; n++ {
|
for n := nodeNb; n <= nodeRoot; n++ {
|
||||||
huffNode[n].count = 1 << 30
|
huffNode[n].setCount(1 << 30)
|
||||||
}
|
}
|
||||||
// fake entry, strong barrier
|
// fake entry, strong barrier
|
||||||
huffNode0[0].count = 1 << 31
|
huffNode0[0].setCount(1 << 31)
|
||||||
|
|
||||||
// create parents
|
// create parents
|
||||||
for nodeNb <= nodeRoot {
|
for nodeNb <= nodeRoot {
|
||||||
var n1, n2 int16
|
var n1, n2 int16
|
||||||
if huffNode0[lowS+1].count < huffNode0[lowN+1].count {
|
if huffNode0[lowS+1].count() < huffNode0[lowN+1].count() {
|
||||||
n1 = lowS
|
n1 = lowS
|
||||||
lowS--
|
lowS--
|
||||||
} else {
|
} else {
|
||||||
n1 = lowN
|
n1 = lowN
|
||||||
lowN++
|
lowN++
|
||||||
}
|
}
|
||||||
if huffNode0[lowS+1].count < huffNode0[lowN+1].count {
|
if huffNode0[lowS+1].count() < huffNode0[lowN+1].count() {
|
||||||
n2 = lowS
|
n2 = lowS
|
||||||
lowS--
|
lowS--
|
||||||
} else {
|
} else {
|
||||||
@ -519,18 +520,19 @@ func (s *Scratch) buildCTable() error {
|
|||||||
lowN++
|
lowN++
|
||||||
}
|
}
|
||||||
|
|
||||||
huffNode[nodeNb].count = huffNode0[n1+1].count + huffNode0[n2+1].count
|
huffNode[nodeNb].setCount(huffNode0[n1+1].count() + huffNode0[n2+1].count())
|
||||||
huffNode0[n1+1].parent, huffNode0[n2+1].parent = uint16(nodeNb), uint16(nodeNb)
|
huffNode0[n1+1].setParent(nodeNb)
|
||||||
|
huffNode0[n2+1].setParent(nodeNb)
|
||||||
nodeNb++
|
nodeNb++
|
||||||
}
|
}
|
||||||
|
|
||||||
// distribute weights (unlimited tree height)
|
// distribute weights (unlimited tree height)
|
||||||
huffNode[nodeRoot].nbBits = 0
|
huffNode[nodeRoot].setNbBits(0)
|
||||||
for n := nodeRoot - 1; n >= startNode; n-- {
|
for n := nodeRoot - 1; n >= startNode; n-- {
|
||||||
huffNode[n].nbBits = huffNode[huffNode[n].parent].nbBits + 1
|
huffNode[n].setNbBits(huffNode[huffNode[n].parent()].nbBits() + 1)
|
||||||
}
|
}
|
||||||
for n := uint16(0); n <= nonNullRank; n++ {
|
for n := uint16(0); n <= nonNullRank; n++ {
|
||||||
huffNode[n].nbBits = huffNode[huffNode[n].parent].nbBits + 1
|
huffNode[n].setNbBits(huffNode[huffNode[n].parent()].nbBits() + 1)
|
||||||
}
|
}
|
||||||
s.actualTableLog = s.setMaxHeight(int(nonNullRank))
|
s.actualTableLog = s.setMaxHeight(int(nonNullRank))
|
||||||
maxNbBits := s.actualTableLog
|
maxNbBits := s.actualTableLog
|
||||||
@ -542,7 +544,7 @@ func (s *Scratch) buildCTable() error {
|
|||||||
var nbPerRank [tableLogMax + 1]uint16
|
var nbPerRank [tableLogMax + 1]uint16
|
||||||
var valPerRank [16]uint16
|
var valPerRank [16]uint16
|
||||||
for _, v := range huffNode[:nonNullRank+1] {
|
for _, v := range huffNode[:nonNullRank+1] {
|
||||||
nbPerRank[v.nbBits]++
|
nbPerRank[v.nbBits()]++
|
||||||
}
|
}
|
||||||
// determine stating value per rank
|
// determine stating value per rank
|
||||||
{
|
{
|
||||||
@ -557,7 +559,7 @@ func (s *Scratch) buildCTable() error {
|
|||||||
|
|
||||||
// push nbBits per symbol, symbol order
|
// push nbBits per symbol, symbol order
|
||||||
for _, v := range huffNode[:nonNullRank+1] {
|
for _, v := range huffNode[:nonNullRank+1] {
|
||||||
s.cTable[v.symbol].nBits = v.nbBits
|
s.cTable[v.symbol()].nBits = v.nbBits()
|
||||||
}
|
}
|
||||||
|
|
||||||
// assign value within rank, symbol order
|
// assign value within rank, symbol order
|
||||||
@ -603,12 +605,12 @@ func (s *Scratch) huffSort() {
|
|||||||
pos := rank[r].current
|
pos := rank[r].current
|
||||||
rank[r].current++
|
rank[r].current++
|
||||||
prev := nodes[(pos-1)&huffNodesMask]
|
prev := nodes[(pos-1)&huffNodesMask]
|
||||||
for pos > rank[r].base && c > prev.count {
|
for pos > rank[r].base && c > prev.count() {
|
||||||
nodes[pos&huffNodesMask] = prev
|
nodes[pos&huffNodesMask] = prev
|
||||||
pos--
|
pos--
|
||||||
prev = nodes[(pos-1)&huffNodesMask]
|
prev = nodes[(pos-1)&huffNodesMask]
|
||||||
}
|
}
|
||||||
nodes[pos&huffNodesMask] = nodeElt{count: c, symbol: byte(n)}
|
nodes[pos&huffNodesMask] = makeNodeElt(c, byte(n))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -617,7 +619,7 @@ func (s *Scratch) setMaxHeight(lastNonNull int) uint8 {
|
|||||||
huffNode := s.nodes[1 : huffNodesLen+1]
|
huffNode := s.nodes[1 : huffNodesLen+1]
|
||||||
//huffNode = huffNode[: huffNodesLen]
|
//huffNode = huffNode[: huffNodesLen]
|
||||||
|
|
||||||
largestBits := huffNode[lastNonNull].nbBits
|
largestBits := huffNode[lastNonNull].nbBits()
|
||||||
|
|
||||||
// early exit : no elt > maxNbBits
|
// early exit : no elt > maxNbBits
|
||||||
if largestBits <= maxNbBits {
|
if largestBits <= maxNbBits {
|
||||||
@ -627,14 +629,14 @@ func (s *Scratch) setMaxHeight(lastNonNull int) uint8 {
|
|||||||
baseCost := int(1) << (largestBits - maxNbBits)
|
baseCost := int(1) << (largestBits - maxNbBits)
|
||||||
n := uint32(lastNonNull)
|
n := uint32(lastNonNull)
|
||||||
|
|
||||||
for huffNode[n].nbBits > maxNbBits {
|
for huffNode[n].nbBits() > maxNbBits {
|
||||||
totalCost += baseCost - (1 << (largestBits - huffNode[n].nbBits))
|
totalCost += baseCost - (1 << (largestBits - huffNode[n].nbBits()))
|
||||||
huffNode[n].nbBits = maxNbBits
|
huffNode[n].setNbBits(maxNbBits)
|
||||||
n--
|
n--
|
||||||
}
|
}
|
||||||
// n stops at huffNode[n].nbBits <= maxNbBits
|
// n stops at huffNode[n].nbBits <= maxNbBits
|
||||||
|
|
||||||
for huffNode[n].nbBits == maxNbBits {
|
for huffNode[n].nbBits() == maxNbBits {
|
||||||
n--
|
n--
|
||||||
}
|
}
|
||||||
// n end at index of smallest symbol using < maxNbBits
|
// n end at index of smallest symbol using < maxNbBits
|
||||||
@ -655,10 +657,10 @@ func (s *Scratch) setMaxHeight(lastNonNull int) uint8 {
|
|||||||
{
|
{
|
||||||
currentNbBits := maxNbBits
|
currentNbBits := maxNbBits
|
||||||
for pos := int(n); pos >= 0; pos-- {
|
for pos := int(n); pos >= 0; pos-- {
|
||||||
if huffNode[pos].nbBits >= currentNbBits {
|
if huffNode[pos].nbBits() >= currentNbBits {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
currentNbBits = huffNode[pos].nbBits // < maxNbBits
|
currentNbBits = huffNode[pos].nbBits() // < maxNbBits
|
||||||
rankLast[maxNbBits-currentNbBits] = uint32(pos)
|
rankLast[maxNbBits-currentNbBits] = uint32(pos)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -675,8 +677,8 @@ func (s *Scratch) setMaxHeight(lastNonNull int) uint8 {
|
|||||||
if lowPos == noSymbol {
|
if lowPos == noSymbol {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
highTotal := huffNode[highPos].count
|
highTotal := huffNode[highPos].count()
|
||||||
lowTotal := 2 * huffNode[lowPos].count
|
lowTotal := 2 * huffNode[lowPos].count()
|
||||||
if highTotal <= lowTotal {
|
if highTotal <= lowTotal {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
@ -692,13 +694,14 @@ func (s *Scratch) setMaxHeight(lastNonNull int) uint8 {
|
|||||||
// this rank is no longer empty
|
// this rank is no longer empty
|
||||||
rankLast[nBitsToDecrease-1] = rankLast[nBitsToDecrease]
|
rankLast[nBitsToDecrease-1] = rankLast[nBitsToDecrease]
|
||||||
}
|
}
|
||||||
huffNode[rankLast[nBitsToDecrease]].nbBits++
|
huffNode[rankLast[nBitsToDecrease]].setNbBits(1 +
|
||||||
|
huffNode[rankLast[nBitsToDecrease]].nbBits())
|
||||||
if rankLast[nBitsToDecrease] == 0 {
|
if rankLast[nBitsToDecrease] == 0 {
|
||||||
/* special case, reached largest symbol */
|
/* special case, reached largest symbol */
|
||||||
rankLast[nBitsToDecrease] = noSymbol
|
rankLast[nBitsToDecrease] = noSymbol
|
||||||
} else {
|
} else {
|
||||||
rankLast[nBitsToDecrease]--
|
rankLast[nBitsToDecrease]--
|
||||||
if huffNode[rankLast[nBitsToDecrease]].nbBits != maxNbBits-nBitsToDecrease {
|
if huffNode[rankLast[nBitsToDecrease]].nbBits() != maxNbBits-nBitsToDecrease {
|
||||||
rankLast[nBitsToDecrease] = noSymbol /* this rank is now empty */
|
rankLast[nBitsToDecrease] = noSymbol /* this rank is now empty */
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -706,15 +709,15 @@ func (s *Scratch) setMaxHeight(lastNonNull int) uint8 {
|
|||||||
|
|
||||||
for totalCost < 0 { /* Sometimes, cost correction overshoot */
|
for totalCost < 0 { /* Sometimes, cost correction overshoot */
|
||||||
if rankLast[1] == noSymbol { /* special case : no rank 1 symbol (using maxNbBits-1); let's create one from largest rank 0 (using maxNbBits) */
|
if rankLast[1] == noSymbol { /* special case : no rank 1 symbol (using maxNbBits-1); let's create one from largest rank 0 (using maxNbBits) */
|
||||||
for huffNode[n].nbBits == maxNbBits {
|
for huffNode[n].nbBits() == maxNbBits {
|
||||||
n--
|
n--
|
||||||
}
|
}
|
||||||
huffNode[n+1].nbBits--
|
huffNode[n+1].setNbBits(huffNode[n+1].nbBits() - 1)
|
||||||
rankLast[1] = n + 1
|
rankLast[1] = n + 1
|
||||||
totalCost++
|
totalCost++
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
huffNode[rankLast[1]+1].nbBits--
|
huffNode[rankLast[1]+1].setNbBits(huffNode[rankLast[1]+1].nbBits() - 1)
|
||||||
rankLast[1]++
|
rankLast[1]++
|
||||||
totalCost++
|
totalCost++
|
||||||
}
|
}
|
||||||
@ -722,9 +725,26 @@ func (s *Scratch) setMaxHeight(lastNonNull int) uint8 {
|
|||||||
return maxNbBits
|
return maxNbBits
|
||||||
}
|
}
|
||||||
|
|
||||||
type nodeElt struct {
|
// A nodeElt is the fields
|
||||||
count uint32
|
//
|
||||||
parent uint16
|
// count uint32
|
||||||
symbol byte
|
// parent uint16
|
||||||
nbBits uint8
|
// symbol byte
|
||||||
|
// nbBits uint8
|
||||||
|
//
|
||||||
|
// in some order, all squashed into an integer so that the compiler
|
||||||
|
// always loads and stores entire nodeElts instead of separate fields.
|
||||||
|
type nodeElt uint64
|
||||||
|
|
||||||
|
func makeNodeElt(count uint32, symbol byte) nodeElt {
|
||||||
|
return nodeElt(count) | nodeElt(symbol)<<48
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (e *nodeElt) count() uint32 { return uint32(*e) }
|
||||||
|
func (e *nodeElt) parent() uint16 { return uint16(*e >> 32) }
|
||||||
|
func (e *nodeElt) symbol() byte { return byte(*e >> 48) }
|
||||||
|
func (e *nodeElt) nbBits() uint8 { return uint8(*e >> 56) }
|
||||||
|
|
||||||
|
func (e *nodeElt) setCount(c uint32) { *e = (*e)&0xffffffff00000000 | nodeElt(c) }
|
||||||
|
func (e *nodeElt) setParent(p int16) { *e = (*e)&0xffff0000ffffffff | nodeElt(uint16(p))<<32 }
|
||||||
|
func (e *nodeElt) setNbBits(n uint8) { *e = (*e)&0x00ffffffffffffff | nodeElt(n)<<56 }
|
||||||
|
38
vendor/github.com/klauspost/compress/huff0/decompress.go
generated
vendored
38
vendor/github.com/klauspost/compress/huff0/decompress.go
generated
vendored
@ -61,7 +61,7 @@ func ReadTable(in []byte, s *Scratch) (s2 *Scratch, remain []byte, err error) {
|
|||||||
b, err := fse.Decompress(in[:iSize], s.fse)
|
b, err := fse.Decompress(in[:iSize], s.fse)
|
||||||
s.fse.Out = nil
|
s.fse.Out = nil
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return s, nil, err
|
return s, nil, fmt.Errorf("fse decompress returned: %w", err)
|
||||||
}
|
}
|
||||||
if len(b) > 255 {
|
if len(b) > 255 {
|
||||||
return s, nil, errors.New("corrupt input: output table too large")
|
return s, nil, errors.New("corrupt input: output table too large")
|
||||||
@ -763,17 +763,20 @@ func (d *Decoder) decompress4X8bit(dst, src []byte) ([]byte, error) {
|
|||||||
d.bufs.Put(buf)
|
d.bufs.Put(buf)
|
||||||
return nil, errors.New("corruption detected: stream overrun 1")
|
return nil, errors.New("corruption detected: stream overrun 1")
|
||||||
}
|
}
|
||||||
copy(out, buf[0][:])
|
|
||||||
copy(out[dstEvery:], buf[1][:])
|
|
||||||
copy(out[dstEvery*2:], buf[2][:])
|
|
||||||
copy(out[dstEvery*3:], buf[3][:])
|
|
||||||
out = out[bufoff:]
|
|
||||||
decoded += bufoff * 4
|
|
||||||
// There must at least be 3 buffers left.
|
// There must at least be 3 buffers left.
|
||||||
if len(out) < dstEvery*3 {
|
if len(out)-bufoff < dstEvery*3 {
|
||||||
d.bufs.Put(buf)
|
d.bufs.Put(buf)
|
||||||
return nil, errors.New("corruption detected: stream overrun 2")
|
return nil, errors.New("corruption detected: stream overrun 2")
|
||||||
}
|
}
|
||||||
|
//copy(out, buf[0][:])
|
||||||
|
//copy(out[dstEvery:], buf[1][:])
|
||||||
|
//copy(out[dstEvery*2:], buf[2][:])
|
||||||
|
*(*[bufoff]byte)(out) = buf[0]
|
||||||
|
*(*[bufoff]byte)(out[dstEvery:]) = buf[1]
|
||||||
|
*(*[bufoff]byte)(out[dstEvery*2:]) = buf[2]
|
||||||
|
*(*[bufoff]byte)(out[dstEvery*3:]) = buf[3]
|
||||||
|
out = out[bufoff:]
|
||||||
|
decoded += bufoff * 4
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if off > 0 {
|
if off > 0 {
|
||||||
@ -997,17 +1000,22 @@ func (d *Decoder) decompress4X8bitExactly(dst, src []byte) ([]byte, error) {
|
|||||||
d.bufs.Put(buf)
|
d.bufs.Put(buf)
|
||||||
return nil, errors.New("corruption detected: stream overrun 1")
|
return nil, errors.New("corruption detected: stream overrun 1")
|
||||||
}
|
}
|
||||||
copy(out, buf[0][:])
|
|
||||||
copy(out[dstEvery:], buf[1][:])
|
|
||||||
copy(out[dstEvery*2:], buf[2][:])
|
|
||||||
copy(out[dstEvery*3:], buf[3][:])
|
|
||||||
out = out[bufoff:]
|
|
||||||
decoded += bufoff * 4
|
|
||||||
// There must at least be 3 buffers left.
|
// There must at least be 3 buffers left.
|
||||||
if len(out) < dstEvery*3 {
|
if len(out)-bufoff < dstEvery*3 {
|
||||||
d.bufs.Put(buf)
|
d.bufs.Put(buf)
|
||||||
return nil, errors.New("corruption detected: stream overrun 2")
|
return nil, errors.New("corruption detected: stream overrun 2")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
//copy(out, buf[0][:])
|
||||||
|
//copy(out[dstEvery:], buf[1][:])
|
||||||
|
//copy(out[dstEvery*2:], buf[2][:])
|
||||||
|
// copy(out[dstEvery*3:], buf[3][:])
|
||||||
|
*(*[bufoff]byte)(out) = buf[0]
|
||||||
|
*(*[bufoff]byte)(out[dstEvery:]) = buf[1]
|
||||||
|
*(*[bufoff]byte)(out[dstEvery*2:]) = buf[2]
|
||||||
|
*(*[bufoff]byte)(out[dstEvery*3:]) = buf[3]
|
||||||
|
out = out[bufoff:]
|
||||||
|
decoded += bufoff * 4
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if off > 0 {
|
if off > 0 {
|
||||||
|
4
vendor/github.com/klauspost/compress/huff0/decompress_amd64.go
generated
vendored
4
vendor/github.com/klauspost/compress/huff0/decompress_amd64.go
generated
vendored
@ -14,12 +14,14 @@ import (
|
|||||||
|
|
||||||
// decompress4x_main_loop_x86 is an x86 assembler implementation
|
// decompress4x_main_loop_x86 is an x86 assembler implementation
|
||||||
// of Decompress4X when tablelog > 8.
|
// of Decompress4X when tablelog > 8.
|
||||||
|
//
|
||||||
//go:noescape
|
//go:noescape
|
||||||
func decompress4x_main_loop_amd64(ctx *decompress4xContext)
|
func decompress4x_main_loop_amd64(ctx *decompress4xContext)
|
||||||
|
|
||||||
// decompress4x_8b_loop_x86 is an x86 assembler implementation
|
// decompress4x_8b_loop_x86 is an x86 assembler implementation
|
||||||
// of Decompress4X when tablelog <= 8 which decodes 4 entries
|
// of Decompress4X when tablelog <= 8 which decodes 4 entries
|
||||||
// per loop.
|
// per loop.
|
||||||
|
//
|
||||||
//go:noescape
|
//go:noescape
|
||||||
func decompress4x_8b_main_loop_amd64(ctx *decompress4xContext)
|
func decompress4x_8b_main_loop_amd64(ctx *decompress4xContext)
|
||||||
|
|
||||||
@ -145,11 +147,13 @@ func (d *Decoder) Decompress4X(dst, src []byte) ([]byte, error) {
|
|||||||
|
|
||||||
// decompress4x_main_loop_x86 is an x86 assembler implementation
|
// decompress4x_main_loop_x86 is an x86 assembler implementation
|
||||||
// of Decompress1X when tablelog > 8.
|
// of Decompress1X when tablelog > 8.
|
||||||
|
//
|
||||||
//go:noescape
|
//go:noescape
|
||||||
func decompress1x_main_loop_amd64(ctx *decompress1xContext)
|
func decompress1x_main_loop_amd64(ctx *decompress1xContext)
|
||||||
|
|
||||||
// decompress4x_main_loop_x86 is an x86 with BMI2 assembler implementation
|
// decompress4x_main_loop_x86 is an x86 with BMI2 assembler implementation
|
||||||
// of Decompress1X when tablelog > 8.
|
// of Decompress1X when tablelog > 8.
|
||||||
|
//
|
||||||
//go:noescape
|
//go:noescape
|
||||||
func decompress1x_main_loop_bmi2(ctx *decompress1xContext)
|
func decompress1x_main_loop_bmi2(ctx *decompress1xContext)
|
||||||
|
|
||||||
|
577
vendor/github.com/klauspost/compress/huff0/decompress_amd64.s
generated
vendored
577
vendor/github.com/klauspost/compress/huff0/decompress_amd64.s
generated
vendored
@ -1,364 +1,352 @@
|
|||||||
// Code generated by command: go run gen.go -out ../decompress_amd64.s -pkg=huff0. DO NOT EDIT.
|
// Code generated by command: go run gen.go -out ../decompress_amd64.s -pkg=huff0. DO NOT EDIT.
|
||||||
|
|
||||||
//go:build amd64 && !appengine && !noasm && gc
|
//go:build amd64 && !appengine && !noasm && gc
|
||||||
// +build amd64,!appengine,!noasm,gc
|
|
||||||
|
|
||||||
// func decompress4x_main_loop_amd64(ctx *decompress4xContext)
|
// func decompress4x_main_loop_amd64(ctx *decompress4xContext)
|
||||||
TEXT ·decompress4x_main_loop_amd64(SB), $0-8
|
TEXT ·decompress4x_main_loop_amd64(SB), $0-8
|
||||||
XORQ DX, DX
|
|
||||||
|
|
||||||
// Preload values
|
// Preload values
|
||||||
MOVQ ctx+0(FP), AX
|
MOVQ ctx+0(FP), AX
|
||||||
MOVBQZX 8(AX), DI
|
MOVBQZX 8(AX), DI
|
||||||
MOVQ 16(AX), SI
|
MOVQ 16(AX), BX
|
||||||
MOVQ 48(AX), BX
|
MOVQ 48(AX), SI
|
||||||
MOVQ 24(AX), R9
|
MOVQ 24(AX), R8
|
||||||
MOVQ 32(AX), R10
|
MOVQ 32(AX), R9
|
||||||
MOVQ (AX), R11
|
MOVQ (AX), R10
|
||||||
|
|
||||||
// Main loop
|
// Main loop
|
||||||
main_loop:
|
main_loop:
|
||||||
MOVQ SI, R8
|
XORL DX, DX
|
||||||
CMPQ R8, BX
|
CMPQ BX, SI
|
||||||
SETGE DL
|
SETGE DL
|
||||||
|
|
||||||
// br0.fillFast32()
|
// br0.fillFast32()
|
||||||
MOVQ 32(R11), R12
|
MOVQ 32(R10), R11
|
||||||
MOVBQZX 40(R11), R13
|
MOVBQZX 40(R10), R12
|
||||||
CMPQ R13, $0x20
|
CMPQ R12, $0x20
|
||||||
JBE skip_fill0
|
JBE skip_fill0
|
||||||
MOVQ 24(R11), AX
|
MOVQ 24(R10), AX
|
||||||
SUBQ $0x20, R13
|
SUBQ $0x20, R12
|
||||||
SUBQ $0x04, AX
|
SUBQ $0x04, AX
|
||||||
MOVQ (R11), R14
|
MOVQ (R10), R13
|
||||||
|
|
||||||
// b.value |= uint64(low) << (b.bitsRead & 63)
|
// b.value |= uint64(low) << (b.bitsRead & 63)
|
||||||
MOVL (AX)(R14*1), R14
|
MOVL (AX)(R13*1), R13
|
||||||
MOVQ R13, CX
|
MOVQ R12, CX
|
||||||
SHLQ CL, R14
|
SHLQ CL, R13
|
||||||
MOVQ AX, 24(R11)
|
MOVQ AX, 24(R10)
|
||||||
ORQ R14, R12
|
ORQ R13, R11
|
||||||
|
|
||||||
// exhausted = exhausted || (br0.off < 4)
|
// exhausted += (br0.off < 4)
|
||||||
CMPQ AX, $0x04
|
CMPQ AX, $0x04
|
||||||
SETLT AL
|
ADCB $+0, DL
|
||||||
ORB AL, DL
|
|
||||||
|
|
||||||
skip_fill0:
|
skip_fill0:
|
||||||
// val0 := br0.peekTopBits(peekBits)
|
// val0 := br0.peekTopBits(peekBits)
|
||||||
MOVQ R12, R14
|
MOVQ R11, R13
|
||||||
MOVQ DI, CX
|
MOVQ DI, CX
|
||||||
SHRQ CL, R14
|
SHRQ CL, R13
|
||||||
|
|
||||||
// v0 := table[val0&mask]
|
// v0 := table[val0&mask]
|
||||||
MOVW (R10)(R14*2), CX
|
MOVW (R9)(R13*2), CX
|
||||||
|
|
||||||
// br0.advance(uint8(v0.entry)
|
// br0.advance(uint8(v0.entry)
|
||||||
MOVB CH, AL
|
MOVB CH, AL
|
||||||
SHLQ CL, R12
|
SHLQ CL, R11
|
||||||
ADDB CL, R13
|
ADDB CL, R12
|
||||||
|
|
||||||
// val1 := br0.peekTopBits(peekBits)
|
// val1 := br0.peekTopBits(peekBits)
|
||||||
MOVQ DI, CX
|
MOVQ DI, CX
|
||||||
MOVQ R12, R14
|
MOVQ R11, R13
|
||||||
SHRQ CL, R14
|
SHRQ CL, R13
|
||||||
|
|
||||||
// v1 := table[val1&mask]
|
// v1 := table[val1&mask]
|
||||||
MOVW (R10)(R14*2), CX
|
MOVW (R9)(R13*2), CX
|
||||||
|
|
||||||
// br0.advance(uint8(v1.entry))
|
// br0.advance(uint8(v1.entry))
|
||||||
MOVB CH, AH
|
MOVB CH, AH
|
||||||
SHLQ CL, R12
|
SHLQ CL, R11
|
||||||
ADDB CL, R13
|
ADDB CL, R12
|
||||||
|
|
||||||
// these two writes get coalesced
|
// these two writes get coalesced
|
||||||
// out[id * dstEvery + 0] = uint8(v0.entry >> 8)
|
// out[id * dstEvery + 0] = uint8(v0.entry >> 8)
|
||||||
// out[id * dstEvery + 1] = uint8(v1.entry >> 8)
|
// out[id * dstEvery + 1] = uint8(v1.entry >> 8)
|
||||||
MOVW AX, (R8)
|
MOVW AX, (BX)
|
||||||
|
|
||||||
// update the bitreader structure
|
// update the bitreader structure
|
||||||
MOVQ R12, 32(R11)
|
MOVQ R11, 32(R10)
|
||||||
MOVB R13, 40(R11)
|
MOVB R12, 40(R10)
|
||||||
ADDQ R9, R8
|
|
||||||
|
|
||||||
// br1.fillFast32()
|
// br1.fillFast32()
|
||||||
MOVQ 80(R11), R12
|
MOVQ 80(R10), R11
|
||||||
MOVBQZX 88(R11), R13
|
MOVBQZX 88(R10), R12
|
||||||
CMPQ R13, $0x20
|
CMPQ R12, $0x20
|
||||||
JBE skip_fill1
|
JBE skip_fill1
|
||||||
MOVQ 72(R11), AX
|
MOVQ 72(R10), AX
|
||||||
SUBQ $0x20, R13
|
SUBQ $0x20, R12
|
||||||
SUBQ $0x04, AX
|
SUBQ $0x04, AX
|
||||||
MOVQ 48(R11), R14
|
MOVQ 48(R10), R13
|
||||||
|
|
||||||
// b.value |= uint64(low) << (b.bitsRead & 63)
|
// b.value |= uint64(low) << (b.bitsRead & 63)
|
||||||
MOVL (AX)(R14*1), R14
|
MOVL (AX)(R13*1), R13
|
||||||
MOVQ R13, CX
|
MOVQ R12, CX
|
||||||
SHLQ CL, R14
|
SHLQ CL, R13
|
||||||
MOVQ AX, 72(R11)
|
MOVQ AX, 72(R10)
|
||||||
ORQ R14, R12
|
ORQ R13, R11
|
||||||
|
|
||||||
// exhausted = exhausted || (br1.off < 4)
|
// exhausted += (br1.off < 4)
|
||||||
CMPQ AX, $0x04
|
CMPQ AX, $0x04
|
||||||
SETLT AL
|
ADCB $+0, DL
|
||||||
ORB AL, DL
|
|
||||||
|
|
||||||
skip_fill1:
|
skip_fill1:
|
||||||
// val0 := br1.peekTopBits(peekBits)
|
// val0 := br1.peekTopBits(peekBits)
|
||||||
MOVQ R12, R14
|
MOVQ R11, R13
|
||||||
MOVQ DI, CX
|
MOVQ DI, CX
|
||||||
SHRQ CL, R14
|
SHRQ CL, R13
|
||||||
|
|
||||||
// v0 := table[val0&mask]
|
// v0 := table[val0&mask]
|
||||||
MOVW (R10)(R14*2), CX
|
MOVW (R9)(R13*2), CX
|
||||||
|
|
||||||
// br1.advance(uint8(v0.entry)
|
// br1.advance(uint8(v0.entry)
|
||||||
MOVB CH, AL
|
MOVB CH, AL
|
||||||
SHLQ CL, R12
|
SHLQ CL, R11
|
||||||
ADDB CL, R13
|
ADDB CL, R12
|
||||||
|
|
||||||
// val1 := br1.peekTopBits(peekBits)
|
// val1 := br1.peekTopBits(peekBits)
|
||||||
MOVQ DI, CX
|
MOVQ DI, CX
|
||||||
MOVQ R12, R14
|
MOVQ R11, R13
|
||||||
SHRQ CL, R14
|
SHRQ CL, R13
|
||||||
|
|
||||||
// v1 := table[val1&mask]
|
// v1 := table[val1&mask]
|
||||||
MOVW (R10)(R14*2), CX
|
MOVW (R9)(R13*2), CX
|
||||||
|
|
||||||
// br1.advance(uint8(v1.entry))
|
// br1.advance(uint8(v1.entry))
|
||||||
MOVB CH, AH
|
MOVB CH, AH
|
||||||
SHLQ CL, R12
|
SHLQ CL, R11
|
||||||
ADDB CL, R13
|
ADDB CL, R12
|
||||||
|
|
||||||
// these two writes get coalesced
|
// these two writes get coalesced
|
||||||
// out[id * dstEvery + 0] = uint8(v0.entry >> 8)
|
// out[id * dstEvery + 0] = uint8(v0.entry >> 8)
|
||||||
// out[id * dstEvery + 1] = uint8(v1.entry >> 8)
|
// out[id * dstEvery + 1] = uint8(v1.entry >> 8)
|
||||||
MOVW AX, (R8)
|
MOVW AX, (BX)(R8*1)
|
||||||
|
|
||||||
// update the bitreader structure
|
// update the bitreader structure
|
||||||
MOVQ R12, 80(R11)
|
MOVQ R11, 80(R10)
|
||||||
MOVB R13, 88(R11)
|
MOVB R12, 88(R10)
|
||||||
ADDQ R9, R8
|
|
||||||
|
|
||||||
// br2.fillFast32()
|
// br2.fillFast32()
|
||||||
MOVQ 128(R11), R12
|
MOVQ 128(R10), R11
|
||||||
MOVBQZX 136(R11), R13
|
MOVBQZX 136(R10), R12
|
||||||
CMPQ R13, $0x20
|
CMPQ R12, $0x20
|
||||||
JBE skip_fill2
|
JBE skip_fill2
|
||||||
MOVQ 120(R11), AX
|
MOVQ 120(R10), AX
|
||||||
SUBQ $0x20, R13
|
SUBQ $0x20, R12
|
||||||
SUBQ $0x04, AX
|
SUBQ $0x04, AX
|
||||||
MOVQ 96(R11), R14
|
MOVQ 96(R10), R13
|
||||||
|
|
||||||
// b.value |= uint64(low) << (b.bitsRead & 63)
|
// b.value |= uint64(low) << (b.bitsRead & 63)
|
||||||
MOVL (AX)(R14*1), R14
|
MOVL (AX)(R13*1), R13
|
||||||
MOVQ R13, CX
|
MOVQ R12, CX
|
||||||
SHLQ CL, R14
|
SHLQ CL, R13
|
||||||
MOVQ AX, 120(R11)
|
MOVQ AX, 120(R10)
|
||||||
ORQ R14, R12
|
ORQ R13, R11
|
||||||
|
|
||||||
// exhausted = exhausted || (br2.off < 4)
|
// exhausted += (br2.off < 4)
|
||||||
CMPQ AX, $0x04
|
CMPQ AX, $0x04
|
||||||
SETLT AL
|
ADCB $+0, DL
|
||||||
ORB AL, DL
|
|
||||||
|
|
||||||
skip_fill2:
|
skip_fill2:
|
||||||
// val0 := br2.peekTopBits(peekBits)
|
// val0 := br2.peekTopBits(peekBits)
|
||||||
MOVQ R12, R14
|
MOVQ R11, R13
|
||||||
MOVQ DI, CX
|
MOVQ DI, CX
|
||||||
SHRQ CL, R14
|
SHRQ CL, R13
|
||||||
|
|
||||||
// v0 := table[val0&mask]
|
// v0 := table[val0&mask]
|
||||||
MOVW (R10)(R14*2), CX
|
MOVW (R9)(R13*2), CX
|
||||||
|
|
||||||
// br2.advance(uint8(v0.entry)
|
// br2.advance(uint8(v0.entry)
|
||||||
MOVB CH, AL
|
MOVB CH, AL
|
||||||
SHLQ CL, R12
|
SHLQ CL, R11
|
||||||
ADDB CL, R13
|
ADDB CL, R12
|
||||||
|
|
||||||
// val1 := br2.peekTopBits(peekBits)
|
// val1 := br2.peekTopBits(peekBits)
|
||||||
MOVQ DI, CX
|
MOVQ DI, CX
|
||||||
MOVQ R12, R14
|
MOVQ R11, R13
|
||||||
SHRQ CL, R14
|
SHRQ CL, R13
|
||||||
|
|
||||||
// v1 := table[val1&mask]
|
// v1 := table[val1&mask]
|
||||||
MOVW (R10)(R14*2), CX
|
MOVW (R9)(R13*2), CX
|
||||||
|
|
||||||
// br2.advance(uint8(v1.entry))
|
// br2.advance(uint8(v1.entry))
|
||||||
MOVB CH, AH
|
MOVB CH, AH
|
||||||
SHLQ CL, R12
|
SHLQ CL, R11
|
||||||
ADDB CL, R13
|
ADDB CL, R12
|
||||||
|
|
||||||
// these two writes get coalesced
|
// these two writes get coalesced
|
||||||
// out[id * dstEvery + 0] = uint8(v0.entry >> 8)
|
// out[id * dstEvery + 0] = uint8(v0.entry >> 8)
|
||||||
// out[id * dstEvery + 1] = uint8(v1.entry >> 8)
|
// out[id * dstEvery + 1] = uint8(v1.entry >> 8)
|
||||||
MOVW AX, (R8)
|
MOVW AX, (BX)(R8*2)
|
||||||
|
|
||||||
// update the bitreader structure
|
// update the bitreader structure
|
||||||
MOVQ R12, 128(R11)
|
MOVQ R11, 128(R10)
|
||||||
MOVB R13, 136(R11)
|
MOVB R12, 136(R10)
|
||||||
ADDQ R9, R8
|
|
||||||
|
|
||||||
// br3.fillFast32()
|
// br3.fillFast32()
|
||||||
MOVQ 176(R11), R12
|
MOVQ 176(R10), R11
|
||||||
MOVBQZX 184(R11), R13
|
MOVBQZX 184(R10), R12
|
||||||
CMPQ R13, $0x20
|
CMPQ R12, $0x20
|
||||||
JBE skip_fill3
|
JBE skip_fill3
|
||||||
MOVQ 168(R11), AX
|
MOVQ 168(R10), AX
|
||||||
SUBQ $0x20, R13
|
SUBQ $0x20, R12
|
||||||
SUBQ $0x04, AX
|
SUBQ $0x04, AX
|
||||||
MOVQ 144(R11), R14
|
MOVQ 144(R10), R13
|
||||||
|
|
||||||
// b.value |= uint64(low) << (b.bitsRead & 63)
|
// b.value |= uint64(low) << (b.bitsRead & 63)
|
||||||
MOVL (AX)(R14*1), R14
|
MOVL (AX)(R13*1), R13
|
||||||
MOVQ R13, CX
|
MOVQ R12, CX
|
||||||
SHLQ CL, R14
|
SHLQ CL, R13
|
||||||
MOVQ AX, 168(R11)
|
MOVQ AX, 168(R10)
|
||||||
ORQ R14, R12
|
ORQ R13, R11
|
||||||
|
|
||||||
// exhausted = exhausted || (br3.off < 4)
|
// exhausted += (br3.off < 4)
|
||||||
CMPQ AX, $0x04
|
CMPQ AX, $0x04
|
||||||
SETLT AL
|
ADCB $+0, DL
|
||||||
ORB AL, DL
|
|
||||||
|
|
||||||
skip_fill3:
|
skip_fill3:
|
||||||
// val0 := br3.peekTopBits(peekBits)
|
// val0 := br3.peekTopBits(peekBits)
|
||||||
MOVQ R12, R14
|
MOVQ R11, R13
|
||||||
MOVQ DI, CX
|
MOVQ DI, CX
|
||||||
SHRQ CL, R14
|
SHRQ CL, R13
|
||||||
|
|
||||||
// v0 := table[val0&mask]
|
// v0 := table[val0&mask]
|
||||||
MOVW (R10)(R14*2), CX
|
MOVW (R9)(R13*2), CX
|
||||||
|
|
||||||
// br3.advance(uint8(v0.entry)
|
// br3.advance(uint8(v0.entry)
|
||||||
MOVB CH, AL
|
MOVB CH, AL
|
||||||
SHLQ CL, R12
|
SHLQ CL, R11
|
||||||
ADDB CL, R13
|
ADDB CL, R12
|
||||||
|
|
||||||
// val1 := br3.peekTopBits(peekBits)
|
// val1 := br3.peekTopBits(peekBits)
|
||||||
MOVQ DI, CX
|
MOVQ DI, CX
|
||||||
MOVQ R12, R14
|
MOVQ R11, R13
|
||||||
SHRQ CL, R14
|
SHRQ CL, R13
|
||||||
|
|
||||||
// v1 := table[val1&mask]
|
// v1 := table[val1&mask]
|
||||||
MOVW (R10)(R14*2), CX
|
MOVW (R9)(R13*2), CX
|
||||||
|
|
||||||
// br3.advance(uint8(v1.entry))
|
// br3.advance(uint8(v1.entry))
|
||||||
MOVB CH, AH
|
MOVB CH, AH
|
||||||
SHLQ CL, R12
|
SHLQ CL, R11
|
||||||
ADDB CL, R13
|
ADDB CL, R12
|
||||||
|
|
||||||
// these two writes get coalesced
|
// these two writes get coalesced
|
||||||
// out[id * dstEvery + 0] = uint8(v0.entry >> 8)
|
// out[id * dstEvery + 0] = uint8(v0.entry >> 8)
|
||||||
// out[id * dstEvery + 1] = uint8(v1.entry >> 8)
|
// out[id * dstEvery + 1] = uint8(v1.entry >> 8)
|
||||||
MOVW AX, (R8)
|
LEAQ (R8)(R8*2), CX
|
||||||
|
MOVW AX, (BX)(CX*1)
|
||||||
|
|
||||||
// update the bitreader structure
|
// update the bitreader structure
|
||||||
MOVQ R12, 176(R11)
|
MOVQ R11, 176(R10)
|
||||||
MOVB R13, 184(R11)
|
MOVB R12, 184(R10)
|
||||||
ADDQ $0x02, SI
|
ADDQ $0x02, BX
|
||||||
TESTB DL, DL
|
TESTB DL, DL
|
||||||
JZ main_loop
|
JZ main_loop
|
||||||
MOVQ ctx+0(FP), AX
|
MOVQ ctx+0(FP), AX
|
||||||
SUBQ 16(AX), SI
|
SUBQ 16(AX), BX
|
||||||
SHLQ $0x02, SI
|
SHLQ $0x02, BX
|
||||||
MOVQ SI, 40(AX)
|
MOVQ BX, 40(AX)
|
||||||
RET
|
RET
|
||||||
|
|
||||||
// func decompress4x_8b_main_loop_amd64(ctx *decompress4xContext)
|
// func decompress4x_8b_main_loop_amd64(ctx *decompress4xContext)
|
||||||
TEXT ·decompress4x_8b_main_loop_amd64(SB), $0-8
|
TEXT ·decompress4x_8b_main_loop_amd64(SB), $0-8
|
||||||
XORQ DX, DX
|
|
||||||
|
|
||||||
// Preload values
|
// Preload values
|
||||||
MOVQ ctx+0(FP), CX
|
MOVQ ctx+0(FP), CX
|
||||||
MOVBQZX 8(CX), DI
|
MOVBQZX 8(CX), DI
|
||||||
MOVQ 16(CX), BX
|
MOVQ 16(CX), BX
|
||||||
MOVQ 48(CX), SI
|
MOVQ 48(CX), SI
|
||||||
MOVQ 24(CX), R9
|
MOVQ 24(CX), R8
|
||||||
MOVQ 32(CX), R10
|
MOVQ 32(CX), R9
|
||||||
MOVQ (CX), R11
|
MOVQ (CX), R10
|
||||||
|
|
||||||
// Main loop
|
// Main loop
|
||||||
main_loop:
|
main_loop:
|
||||||
MOVQ BX, R8
|
XORL DX, DX
|
||||||
CMPQ R8, SI
|
CMPQ BX, SI
|
||||||
SETGE DL
|
SETGE DL
|
||||||
|
|
||||||
// br0.fillFast32()
|
// br0.fillFast32()
|
||||||
MOVQ 32(R11), R12
|
MOVQ 32(R10), R11
|
||||||
MOVBQZX 40(R11), R13
|
MOVBQZX 40(R10), R12
|
||||||
CMPQ R13, $0x20
|
CMPQ R12, $0x20
|
||||||
JBE skip_fill0
|
JBE skip_fill0
|
||||||
MOVQ 24(R11), R14
|
MOVQ 24(R10), R13
|
||||||
SUBQ $0x20, R13
|
SUBQ $0x20, R12
|
||||||
SUBQ $0x04, R14
|
SUBQ $0x04, R13
|
||||||
MOVQ (R11), R15
|
MOVQ (R10), R14
|
||||||
|
|
||||||
// b.value |= uint64(low) << (b.bitsRead & 63)
|
// b.value |= uint64(low) << (b.bitsRead & 63)
|
||||||
MOVL (R14)(R15*1), R15
|
MOVL (R13)(R14*1), R14
|
||||||
MOVQ R13, CX
|
MOVQ R12, CX
|
||||||
SHLQ CL, R15
|
SHLQ CL, R14
|
||||||
MOVQ R14, 24(R11)
|
MOVQ R13, 24(R10)
|
||||||
ORQ R15, R12
|
ORQ R14, R11
|
||||||
|
|
||||||
// exhausted = exhausted || (br0.off < 4)
|
// exhausted += (br0.off < 4)
|
||||||
CMPQ R14, $0x04
|
CMPQ R13, $0x04
|
||||||
SETLT AL
|
ADCB $+0, DL
|
||||||
ORB AL, DL
|
|
||||||
|
|
||||||
skip_fill0:
|
skip_fill0:
|
||||||
// val0 := br0.peekTopBits(peekBits)
|
// val0 := br0.peekTopBits(peekBits)
|
||||||
MOVQ R12, R14
|
MOVQ R11, R13
|
||||||
MOVQ DI, CX
|
MOVQ DI, CX
|
||||||
SHRQ CL, R14
|
SHRQ CL, R13
|
||||||
|
|
||||||
// v0 := table[val0&mask]
|
// v0 := table[val0&mask]
|
||||||
MOVW (R10)(R14*2), CX
|
MOVW (R9)(R13*2), CX
|
||||||
|
|
||||||
// br0.advance(uint8(v0.entry)
|
// br0.advance(uint8(v0.entry)
|
||||||
MOVB CH, AL
|
MOVB CH, AL
|
||||||
SHLQ CL, R12
|
SHLQ CL, R11
|
||||||
ADDB CL, R13
|
ADDB CL, R12
|
||||||
|
|
||||||
// val1 := br0.peekTopBits(peekBits)
|
// val1 := br0.peekTopBits(peekBits)
|
||||||
MOVQ R12, R14
|
MOVQ R11, R13
|
||||||
MOVQ DI, CX
|
MOVQ DI, CX
|
||||||
SHRQ CL, R14
|
SHRQ CL, R13
|
||||||
|
|
||||||
// v1 := table[val0&mask]
|
// v1 := table[val0&mask]
|
||||||
MOVW (R10)(R14*2), CX
|
MOVW (R9)(R13*2), CX
|
||||||
|
|
||||||
// br0.advance(uint8(v1.entry)
|
// br0.advance(uint8(v1.entry)
|
||||||
MOVB CH, AH
|
MOVB CH, AH
|
||||||
SHLQ CL, R12
|
SHLQ CL, R11
|
||||||
ADDB CL, R13
|
ADDB CL, R12
|
||||||
BSWAPL AX
|
BSWAPL AX
|
||||||
|
|
||||||
// val2 := br0.peekTopBits(peekBits)
|
// val2 := br0.peekTopBits(peekBits)
|
||||||
MOVQ R12, R14
|
MOVQ R11, R13
|
||||||
MOVQ DI, CX
|
MOVQ DI, CX
|
||||||
SHRQ CL, R14
|
SHRQ CL, R13
|
||||||
|
|
||||||
// v2 := table[val0&mask]
|
// v2 := table[val0&mask]
|
||||||
MOVW (R10)(R14*2), CX
|
MOVW (R9)(R13*2), CX
|
||||||
|
|
||||||
// br0.advance(uint8(v2.entry)
|
// br0.advance(uint8(v2.entry)
|
||||||
MOVB CH, AH
|
MOVB CH, AH
|
||||||
SHLQ CL, R12
|
SHLQ CL, R11
|
||||||
ADDB CL, R13
|
ADDB CL, R12
|
||||||
|
|
||||||
// val3 := br0.peekTopBits(peekBits)
|
// val3 := br0.peekTopBits(peekBits)
|
||||||
MOVQ R12, R14
|
MOVQ R11, R13
|
||||||
MOVQ DI, CX
|
MOVQ DI, CX
|
||||||
SHRQ CL, R14
|
SHRQ CL, R13
|
||||||
|
|
||||||
// v3 := table[val0&mask]
|
// v3 := table[val0&mask]
|
||||||
MOVW (R10)(R14*2), CX
|
MOVW (R9)(R13*2), CX
|
||||||
|
|
||||||
// br0.advance(uint8(v3.entry)
|
// br0.advance(uint8(v3.entry)
|
||||||
MOVB CH, AL
|
MOVB CH, AL
|
||||||
SHLQ CL, R12
|
SHLQ CL, R11
|
||||||
ADDB CL, R13
|
ADDB CL, R12
|
||||||
BSWAPL AX
|
BSWAPL AX
|
||||||
|
|
||||||
// these four writes get coalesced
|
// these four writes get coalesced
|
||||||
@ -366,88 +354,86 @@ skip_fill0:
|
|||||||
// out[id * dstEvery + 1] = uint8(v1.entry >> 8)
|
// out[id * dstEvery + 1] = uint8(v1.entry >> 8)
|
||||||
// out[id * dstEvery + 3] = uint8(v2.entry >> 8)
|
// out[id * dstEvery + 3] = uint8(v2.entry >> 8)
|
||||||
// out[id * dstEvery + 4] = uint8(v3.entry >> 8)
|
// out[id * dstEvery + 4] = uint8(v3.entry >> 8)
|
||||||
MOVL AX, (R8)
|
MOVL AX, (BX)
|
||||||
|
|
||||||
// update the bitreader structure
|
// update the bitreader structure
|
||||||
MOVQ R12, 32(R11)
|
MOVQ R11, 32(R10)
|
||||||
MOVB R13, 40(R11)
|
MOVB R12, 40(R10)
|
||||||
ADDQ R9, R8
|
|
||||||
|
|
||||||
// br1.fillFast32()
|
// br1.fillFast32()
|
||||||
MOVQ 80(R11), R12
|
MOVQ 80(R10), R11
|
||||||
MOVBQZX 88(R11), R13
|
MOVBQZX 88(R10), R12
|
||||||
CMPQ R13, $0x20
|
CMPQ R12, $0x20
|
||||||
JBE skip_fill1
|
JBE skip_fill1
|
||||||
MOVQ 72(R11), R14
|
MOVQ 72(R10), R13
|
||||||
SUBQ $0x20, R13
|
SUBQ $0x20, R12
|
||||||
SUBQ $0x04, R14
|
SUBQ $0x04, R13
|
||||||
MOVQ 48(R11), R15
|
MOVQ 48(R10), R14
|
||||||
|
|
||||||
// b.value |= uint64(low) << (b.bitsRead & 63)
|
// b.value |= uint64(low) << (b.bitsRead & 63)
|
||||||
MOVL (R14)(R15*1), R15
|
MOVL (R13)(R14*1), R14
|
||||||
MOVQ R13, CX
|
MOVQ R12, CX
|
||||||
SHLQ CL, R15
|
SHLQ CL, R14
|
||||||
MOVQ R14, 72(R11)
|
MOVQ R13, 72(R10)
|
||||||
ORQ R15, R12
|
ORQ R14, R11
|
||||||
|
|
||||||
// exhausted = exhausted || (br1.off < 4)
|
// exhausted += (br1.off < 4)
|
||||||
CMPQ R14, $0x04
|
CMPQ R13, $0x04
|
||||||
SETLT AL
|
ADCB $+0, DL
|
||||||
ORB AL, DL
|
|
||||||
|
|
||||||
skip_fill1:
|
skip_fill1:
|
||||||
// val0 := br1.peekTopBits(peekBits)
|
// val0 := br1.peekTopBits(peekBits)
|
||||||
MOVQ R12, R14
|
MOVQ R11, R13
|
||||||
MOVQ DI, CX
|
MOVQ DI, CX
|
||||||
SHRQ CL, R14
|
SHRQ CL, R13
|
||||||
|
|
||||||
// v0 := table[val0&mask]
|
// v0 := table[val0&mask]
|
||||||
MOVW (R10)(R14*2), CX
|
MOVW (R9)(R13*2), CX
|
||||||
|
|
||||||
// br1.advance(uint8(v0.entry)
|
// br1.advance(uint8(v0.entry)
|
||||||
MOVB CH, AL
|
MOVB CH, AL
|
||||||
SHLQ CL, R12
|
SHLQ CL, R11
|
||||||
ADDB CL, R13
|
ADDB CL, R12
|
||||||
|
|
||||||
// val1 := br1.peekTopBits(peekBits)
|
// val1 := br1.peekTopBits(peekBits)
|
||||||
MOVQ R12, R14
|
MOVQ R11, R13
|
||||||
MOVQ DI, CX
|
MOVQ DI, CX
|
||||||
SHRQ CL, R14
|
SHRQ CL, R13
|
||||||
|
|
||||||
// v1 := table[val0&mask]
|
// v1 := table[val0&mask]
|
||||||
MOVW (R10)(R14*2), CX
|
MOVW (R9)(R13*2), CX
|
||||||
|
|
||||||
// br1.advance(uint8(v1.entry)
|
// br1.advance(uint8(v1.entry)
|
||||||
MOVB CH, AH
|
MOVB CH, AH
|
||||||
SHLQ CL, R12
|
SHLQ CL, R11
|
||||||
ADDB CL, R13
|
ADDB CL, R12
|
||||||
BSWAPL AX
|
BSWAPL AX
|
||||||
|
|
||||||
// val2 := br1.peekTopBits(peekBits)
|
// val2 := br1.peekTopBits(peekBits)
|
||||||
MOVQ R12, R14
|
MOVQ R11, R13
|
||||||
MOVQ DI, CX
|
MOVQ DI, CX
|
||||||
SHRQ CL, R14
|
SHRQ CL, R13
|
||||||
|
|
||||||
// v2 := table[val0&mask]
|
// v2 := table[val0&mask]
|
||||||
MOVW (R10)(R14*2), CX
|
MOVW (R9)(R13*2), CX
|
||||||
|
|
||||||
// br1.advance(uint8(v2.entry)
|
// br1.advance(uint8(v2.entry)
|
||||||
MOVB CH, AH
|
MOVB CH, AH
|
||||||
SHLQ CL, R12
|
SHLQ CL, R11
|
||||||
ADDB CL, R13
|
ADDB CL, R12
|
||||||
|
|
||||||
// val3 := br1.peekTopBits(peekBits)
|
// val3 := br1.peekTopBits(peekBits)
|
||||||
MOVQ R12, R14
|
MOVQ R11, R13
|
||||||
MOVQ DI, CX
|
MOVQ DI, CX
|
||||||
SHRQ CL, R14
|
SHRQ CL, R13
|
||||||
|
|
||||||
// v3 := table[val0&mask]
|
// v3 := table[val0&mask]
|
||||||
MOVW (R10)(R14*2), CX
|
MOVW (R9)(R13*2), CX
|
||||||
|
|
||||||
// br1.advance(uint8(v3.entry)
|
// br1.advance(uint8(v3.entry)
|
||||||
MOVB CH, AL
|
MOVB CH, AL
|
||||||
SHLQ CL, R12
|
SHLQ CL, R11
|
||||||
ADDB CL, R13
|
ADDB CL, R12
|
||||||
BSWAPL AX
|
BSWAPL AX
|
||||||
|
|
||||||
// these four writes get coalesced
|
// these four writes get coalesced
|
||||||
@ -455,88 +441,86 @@ skip_fill1:
|
|||||||
// out[id * dstEvery + 1] = uint8(v1.entry >> 8)
|
// out[id * dstEvery + 1] = uint8(v1.entry >> 8)
|
||||||
// out[id * dstEvery + 3] = uint8(v2.entry >> 8)
|
// out[id * dstEvery + 3] = uint8(v2.entry >> 8)
|
||||||
// out[id * dstEvery + 4] = uint8(v3.entry >> 8)
|
// out[id * dstEvery + 4] = uint8(v3.entry >> 8)
|
||||||
MOVL AX, (R8)
|
MOVL AX, (BX)(R8*1)
|
||||||
|
|
||||||
// update the bitreader structure
|
// update the bitreader structure
|
||||||
MOVQ R12, 80(R11)
|
MOVQ R11, 80(R10)
|
||||||
MOVB R13, 88(R11)
|
MOVB R12, 88(R10)
|
||||||
ADDQ R9, R8
|
|
||||||
|
|
||||||
// br2.fillFast32()
|
// br2.fillFast32()
|
||||||
MOVQ 128(R11), R12
|
MOVQ 128(R10), R11
|
||||||
MOVBQZX 136(R11), R13
|
MOVBQZX 136(R10), R12
|
||||||
CMPQ R13, $0x20
|
CMPQ R12, $0x20
|
||||||
JBE skip_fill2
|
JBE skip_fill2
|
||||||
MOVQ 120(R11), R14
|
MOVQ 120(R10), R13
|
||||||
SUBQ $0x20, R13
|
SUBQ $0x20, R12
|
||||||
SUBQ $0x04, R14
|
SUBQ $0x04, R13
|
||||||
MOVQ 96(R11), R15
|
MOVQ 96(R10), R14
|
||||||
|
|
||||||
// b.value |= uint64(low) << (b.bitsRead & 63)
|
// b.value |= uint64(low) << (b.bitsRead & 63)
|
||||||
MOVL (R14)(R15*1), R15
|
MOVL (R13)(R14*1), R14
|
||||||
MOVQ R13, CX
|
MOVQ R12, CX
|
||||||
SHLQ CL, R15
|
SHLQ CL, R14
|
||||||
MOVQ R14, 120(R11)
|
MOVQ R13, 120(R10)
|
||||||
ORQ R15, R12
|
ORQ R14, R11
|
||||||
|
|
||||||
// exhausted = exhausted || (br2.off < 4)
|
// exhausted += (br2.off < 4)
|
||||||
CMPQ R14, $0x04
|
CMPQ R13, $0x04
|
||||||
SETLT AL
|
ADCB $+0, DL
|
||||||
ORB AL, DL
|
|
||||||
|
|
||||||
skip_fill2:
|
skip_fill2:
|
||||||
// val0 := br2.peekTopBits(peekBits)
|
// val0 := br2.peekTopBits(peekBits)
|
||||||
MOVQ R12, R14
|
MOVQ R11, R13
|
||||||
MOVQ DI, CX
|
MOVQ DI, CX
|
||||||
SHRQ CL, R14
|
SHRQ CL, R13
|
||||||
|
|
||||||
// v0 := table[val0&mask]
|
// v0 := table[val0&mask]
|
||||||
MOVW (R10)(R14*2), CX
|
MOVW (R9)(R13*2), CX
|
||||||
|
|
||||||
// br2.advance(uint8(v0.entry)
|
// br2.advance(uint8(v0.entry)
|
||||||
MOVB CH, AL
|
MOVB CH, AL
|
||||||
SHLQ CL, R12
|
SHLQ CL, R11
|
||||||
ADDB CL, R13
|
ADDB CL, R12
|
||||||
|
|
||||||
// val1 := br2.peekTopBits(peekBits)
|
// val1 := br2.peekTopBits(peekBits)
|
||||||
MOVQ R12, R14
|
MOVQ R11, R13
|
||||||
MOVQ DI, CX
|
MOVQ DI, CX
|
||||||
SHRQ CL, R14
|
SHRQ CL, R13
|
||||||
|
|
||||||
// v1 := table[val0&mask]
|
// v1 := table[val0&mask]
|
||||||
MOVW (R10)(R14*2), CX
|
MOVW (R9)(R13*2), CX
|
||||||
|
|
||||||
// br2.advance(uint8(v1.entry)
|
// br2.advance(uint8(v1.entry)
|
||||||
MOVB CH, AH
|
MOVB CH, AH
|
||||||
SHLQ CL, R12
|
SHLQ CL, R11
|
||||||
ADDB CL, R13
|
ADDB CL, R12
|
||||||
BSWAPL AX
|
BSWAPL AX
|
||||||
|
|
||||||
// val2 := br2.peekTopBits(peekBits)
|
// val2 := br2.peekTopBits(peekBits)
|
||||||
MOVQ R12, R14
|
MOVQ R11, R13
|
||||||
MOVQ DI, CX
|
MOVQ DI, CX
|
||||||
SHRQ CL, R14
|
SHRQ CL, R13
|
||||||
|
|
||||||
// v2 := table[val0&mask]
|
// v2 := table[val0&mask]
|
||||||
MOVW (R10)(R14*2), CX
|
MOVW (R9)(R13*2), CX
|
||||||
|
|
||||||
// br2.advance(uint8(v2.entry)
|
// br2.advance(uint8(v2.entry)
|
||||||
MOVB CH, AH
|
MOVB CH, AH
|
||||||
SHLQ CL, R12
|
SHLQ CL, R11
|
||||||
ADDB CL, R13
|
ADDB CL, R12
|
||||||
|
|
||||||
// val3 := br2.peekTopBits(peekBits)
|
// val3 := br2.peekTopBits(peekBits)
|
||||||
MOVQ R12, R14
|
MOVQ R11, R13
|
||||||
MOVQ DI, CX
|
MOVQ DI, CX
|
||||||
SHRQ CL, R14
|
SHRQ CL, R13
|
||||||
|
|
||||||
// v3 := table[val0&mask]
|
// v3 := table[val0&mask]
|
||||||
MOVW (R10)(R14*2), CX
|
MOVW (R9)(R13*2), CX
|
||||||
|
|
||||||
// br2.advance(uint8(v3.entry)
|
// br2.advance(uint8(v3.entry)
|
||||||
MOVB CH, AL
|
MOVB CH, AL
|
||||||
SHLQ CL, R12
|
SHLQ CL, R11
|
||||||
ADDB CL, R13
|
ADDB CL, R12
|
||||||
BSWAPL AX
|
BSWAPL AX
|
||||||
|
|
||||||
// these four writes get coalesced
|
// these four writes get coalesced
|
||||||
@ -544,88 +528,86 @@ skip_fill2:
|
|||||||
// out[id * dstEvery + 1] = uint8(v1.entry >> 8)
|
// out[id * dstEvery + 1] = uint8(v1.entry >> 8)
|
||||||
// out[id * dstEvery + 3] = uint8(v2.entry >> 8)
|
// out[id * dstEvery + 3] = uint8(v2.entry >> 8)
|
||||||
// out[id * dstEvery + 4] = uint8(v3.entry >> 8)
|
// out[id * dstEvery + 4] = uint8(v3.entry >> 8)
|
||||||
MOVL AX, (R8)
|
MOVL AX, (BX)(R8*2)
|
||||||
|
|
||||||
// update the bitreader structure
|
// update the bitreader structure
|
||||||
MOVQ R12, 128(R11)
|
MOVQ R11, 128(R10)
|
||||||
MOVB R13, 136(R11)
|
MOVB R12, 136(R10)
|
||||||
ADDQ R9, R8
|
|
||||||
|
|
||||||
// br3.fillFast32()
|
// br3.fillFast32()
|
||||||
MOVQ 176(R11), R12
|
MOVQ 176(R10), R11
|
||||||
MOVBQZX 184(R11), R13
|
MOVBQZX 184(R10), R12
|
||||||
CMPQ R13, $0x20
|
CMPQ R12, $0x20
|
||||||
JBE skip_fill3
|
JBE skip_fill3
|
||||||
MOVQ 168(R11), R14
|
MOVQ 168(R10), R13
|
||||||
SUBQ $0x20, R13
|
SUBQ $0x20, R12
|
||||||
SUBQ $0x04, R14
|
SUBQ $0x04, R13
|
||||||
MOVQ 144(R11), R15
|
MOVQ 144(R10), R14
|
||||||
|
|
||||||
// b.value |= uint64(low) << (b.bitsRead & 63)
|
// b.value |= uint64(low) << (b.bitsRead & 63)
|
||||||
MOVL (R14)(R15*1), R15
|
MOVL (R13)(R14*1), R14
|
||||||
MOVQ R13, CX
|
MOVQ R12, CX
|
||||||
SHLQ CL, R15
|
SHLQ CL, R14
|
||||||
MOVQ R14, 168(R11)
|
MOVQ R13, 168(R10)
|
||||||
ORQ R15, R12
|
ORQ R14, R11
|
||||||
|
|
||||||
// exhausted = exhausted || (br3.off < 4)
|
// exhausted += (br3.off < 4)
|
||||||
CMPQ R14, $0x04
|
CMPQ R13, $0x04
|
||||||
SETLT AL
|
ADCB $+0, DL
|
||||||
ORB AL, DL
|
|
||||||
|
|
||||||
skip_fill3:
|
skip_fill3:
|
||||||
// val0 := br3.peekTopBits(peekBits)
|
// val0 := br3.peekTopBits(peekBits)
|
||||||
MOVQ R12, R14
|
MOVQ R11, R13
|
||||||
MOVQ DI, CX
|
MOVQ DI, CX
|
||||||
SHRQ CL, R14
|
SHRQ CL, R13
|
||||||
|
|
||||||
// v0 := table[val0&mask]
|
// v0 := table[val0&mask]
|
||||||
MOVW (R10)(R14*2), CX
|
MOVW (R9)(R13*2), CX
|
||||||
|
|
||||||
// br3.advance(uint8(v0.entry)
|
// br3.advance(uint8(v0.entry)
|
||||||
MOVB CH, AL
|
MOVB CH, AL
|
||||||
SHLQ CL, R12
|
SHLQ CL, R11
|
||||||
ADDB CL, R13
|
ADDB CL, R12
|
||||||
|
|
||||||
// val1 := br3.peekTopBits(peekBits)
|
// val1 := br3.peekTopBits(peekBits)
|
||||||
MOVQ R12, R14
|
MOVQ R11, R13
|
||||||
MOVQ DI, CX
|
MOVQ DI, CX
|
||||||
SHRQ CL, R14
|
SHRQ CL, R13
|
||||||
|
|
||||||
// v1 := table[val0&mask]
|
// v1 := table[val0&mask]
|
||||||
MOVW (R10)(R14*2), CX
|
MOVW (R9)(R13*2), CX
|
||||||
|
|
||||||
// br3.advance(uint8(v1.entry)
|
// br3.advance(uint8(v1.entry)
|
||||||
MOVB CH, AH
|
MOVB CH, AH
|
||||||
SHLQ CL, R12
|
SHLQ CL, R11
|
||||||
ADDB CL, R13
|
ADDB CL, R12
|
||||||
BSWAPL AX
|
BSWAPL AX
|
||||||
|
|
||||||
// val2 := br3.peekTopBits(peekBits)
|
// val2 := br3.peekTopBits(peekBits)
|
||||||
MOVQ R12, R14
|
MOVQ R11, R13
|
||||||
MOVQ DI, CX
|
MOVQ DI, CX
|
||||||
SHRQ CL, R14
|
SHRQ CL, R13
|
||||||
|
|
||||||
// v2 := table[val0&mask]
|
// v2 := table[val0&mask]
|
||||||
MOVW (R10)(R14*2), CX
|
MOVW (R9)(R13*2), CX
|
||||||
|
|
||||||
// br3.advance(uint8(v2.entry)
|
// br3.advance(uint8(v2.entry)
|
||||||
MOVB CH, AH
|
MOVB CH, AH
|
||||||
SHLQ CL, R12
|
SHLQ CL, R11
|
||||||
ADDB CL, R13
|
ADDB CL, R12
|
||||||
|
|
||||||
// val3 := br3.peekTopBits(peekBits)
|
// val3 := br3.peekTopBits(peekBits)
|
||||||
MOVQ R12, R14
|
MOVQ R11, R13
|
||||||
MOVQ DI, CX
|
MOVQ DI, CX
|
||||||
SHRQ CL, R14
|
SHRQ CL, R13
|
||||||
|
|
||||||
// v3 := table[val0&mask]
|
// v3 := table[val0&mask]
|
||||||
MOVW (R10)(R14*2), CX
|
MOVW (R9)(R13*2), CX
|
||||||
|
|
||||||
// br3.advance(uint8(v3.entry)
|
// br3.advance(uint8(v3.entry)
|
||||||
MOVB CH, AL
|
MOVB CH, AL
|
||||||
SHLQ CL, R12
|
SHLQ CL, R11
|
||||||
ADDB CL, R13
|
ADDB CL, R12
|
||||||
BSWAPL AX
|
BSWAPL AX
|
||||||
|
|
||||||
// these four writes get coalesced
|
// these four writes get coalesced
|
||||||
@ -633,11 +615,12 @@ skip_fill3:
|
|||||||
// out[id * dstEvery + 1] = uint8(v1.entry >> 8)
|
// out[id * dstEvery + 1] = uint8(v1.entry >> 8)
|
||||||
// out[id * dstEvery + 3] = uint8(v2.entry >> 8)
|
// out[id * dstEvery + 3] = uint8(v2.entry >> 8)
|
||||||
// out[id * dstEvery + 4] = uint8(v3.entry >> 8)
|
// out[id * dstEvery + 4] = uint8(v3.entry >> 8)
|
||||||
MOVL AX, (R8)
|
LEAQ (R8)(R8*2), CX
|
||||||
|
MOVL AX, (BX)(CX*1)
|
||||||
|
|
||||||
// update the bitreader structure
|
// update the bitreader structure
|
||||||
MOVQ R12, 176(R11)
|
MOVQ R11, 176(R10)
|
||||||
MOVB R13, 184(R11)
|
MOVB R12, 184(R10)
|
||||||
ADDQ $0x04, BX
|
ADDQ $0x04, BX
|
||||||
TESTB DL, DL
|
TESTB DL, DL
|
||||||
JZ main_loop
|
JZ main_loop
|
||||||
@ -653,7 +636,7 @@ TEXT ·decompress1x_main_loop_amd64(SB), $0-8
|
|||||||
MOVQ 16(CX), DX
|
MOVQ 16(CX), DX
|
||||||
MOVQ 24(CX), BX
|
MOVQ 24(CX), BX
|
||||||
CMPQ BX, $0x04
|
CMPQ BX, $0x04
|
||||||
JB error_max_decoded_size_exeeded
|
JB error_max_decoded_size_exceeded
|
||||||
LEAQ (DX)(BX*1), BX
|
LEAQ (DX)(BX*1), BX
|
||||||
MOVQ (CX), SI
|
MOVQ (CX), SI
|
||||||
MOVQ (SI), R8
|
MOVQ (SI), R8
|
||||||
@ -668,7 +651,7 @@ main_loop:
|
|||||||
// Check if we have room for 4 bytes in the output buffer
|
// Check if we have room for 4 bytes in the output buffer
|
||||||
LEAQ 4(DX), CX
|
LEAQ 4(DX), CX
|
||||||
CMPQ CX, BX
|
CMPQ CX, BX
|
||||||
JGE error_max_decoded_size_exeeded
|
JGE error_max_decoded_size_exceeded
|
||||||
|
|
||||||
// Decode 4 values
|
// Decode 4 values
|
||||||
CMPQ R11, $0x20
|
CMPQ R11, $0x20
|
||||||
@ -745,7 +728,7 @@ loop_condition:
|
|||||||
RET
|
RET
|
||||||
|
|
||||||
// Report error
|
// Report error
|
||||||
error_max_decoded_size_exeeded:
|
error_max_decoded_size_exceeded:
|
||||||
MOVQ ctx+0(FP), AX
|
MOVQ ctx+0(FP), AX
|
||||||
MOVQ $-1, CX
|
MOVQ $-1, CX
|
||||||
MOVQ CX, 40(AX)
|
MOVQ CX, 40(AX)
|
||||||
@ -758,7 +741,7 @@ TEXT ·decompress1x_main_loop_bmi2(SB), $0-8
|
|||||||
MOVQ 16(CX), DX
|
MOVQ 16(CX), DX
|
||||||
MOVQ 24(CX), BX
|
MOVQ 24(CX), BX
|
||||||
CMPQ BX, $0x04
|
CMPQ BX, $0x04
|
||||||
JB error_max_decoded_size_exeeded
|
JB error_max_decoded_size_exceeded
|
||||||
LEAQ (DX)(BX*1), BX
|
LEAQ (DX)(BX*1), BX
|
||||||
MOVQ (CX), SI
|
MOVQ (CX), SI
|
||||||
MOVQ (SI), R8
|
MOVQ (SI), R8
|
||||||
@ -773,7 +756,7 @@ main_loop:
|
|||||||
// Check if we have room for 4 bytes in the output buffer
|
// Check if we have room for 4 bytes in the output buffer
|
||||||
LEAQ 4(DX), CX
|
LEAQ 4(DX), CX
|
||||||
CMPQ CX, BX
|
CMPQ CX, BX
|
||||||
JGE error_max_decoded_size_exeeded
|
JGE error_max_decoded_size_exceeded
|
||||||
|
|
||||||
// Decode 4 values
|
// Decode 4 values
|
||||||
CMPQ R11, $0x20
|
CMPQ R11, $0x20
|
||||||
@ -840,7 +823,7 @@ loop_condition:
|
|||||||
RET
|
RET
|
||||||
|
|
||||||
// Report error
|
// Report error
|
||||||
error_max_decoded_size_exeeded:
|
error_max_decoded_size_exceeded:
|
||||||
MOVQ ctx+0(FP), AX
|
MOVQ ctx+0(FP), AX
|
||||||
MOVQ $-1, CX
|
MOVQ $-1, CX
|
||||||
MOVQ CX, 40(AX)
|
MOVQ CX, 40(AX)
|
||||||
|
18
vendor/github.com/klauspost/compress/huff0/decompress_generic.go
generated
vendored
18
vendor/github.com/klauspost/compress/huff0/decompress_generic.go
generated
vendored
@ -122,17 +122,21 @@ func (d *Decoder) Decompress4X(dst, src []byte) ([]byte, error) {
|
|||||||
d.bufs.Put(buf)
|
d.bufs.Put(buf)
|
||||||
return nil, errors.New("corruption detected: stream overrun 1")
|
return nil, errors.New("corruption detected: stream overrun 1")
|
||||||
}
|
}
|
||||||
copy(out, buf[0][:])
|
|
||||||
copy(out[dstEvery:], buf[1][:])
|
|
||||||
copy(out[dstEvery*2:], buf[2][:])
|
|
||||||
copy(out[dstEvery*3:], buf[3][:])
|
|
||||||
out = out[bufoff:]
|
|
||||||
decoded += bufoff * 4
|
|
||||||
// There must at least be 3 buffers left.
|
// There must at least be 3 buffers left.
|
||||||
if len(out) < dstEvery*3 {
|
if len(out)-bufoff < dstEvery*3 {
|
||||||
d.bufs.Put(buf)
|
d.bufs.Put(buf)
|
||||||
return nil, errors.New("corruption detected: stream overrun 2")
|
return nil, errors.New("corruption detected: stream overrun 2")
|
||||||
}
|
}
|
||||||
|
//copy(out, buf[0][:])
|
||||||
|
//copy(out[dstEvery:], buf[1][:])
|
||||||
|
//copy(out[dstEvery*2:], buf[2][:])
|
||||||
|
//copy(out[dstEvery*3:], buf[3][:])
|
||||||
|
*(*[bufoff]byte)(out) = buf[0]
|
||||||
|
*(*[bufoff]byte)(out[dstEvery:]) = buf[1]
|
||||||
|
*(*[bufoff]byte)(out[dstEvery*2:]) = buf[2]
|
||||||
|
*(*[bufoff]byte)(out[dstEvery*3:]) = buf[3]
|
||||||
|
out = out[bufoff:]
|
||||||
|
decoded += bufoff * 4
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if off > 0 {
|
if off > 0 {
|
||||||
|
26
vendor/github.com/klauspost/compress/internal/snapref/encode_other.go
generated
vendored
26
vendor/github.com/klauspost/compress/internal/snapref/encode_other.go
generated
vendored
@ -18,6 +18,7 @@ func load64(b []byte, i int) uint64 {
|
|||||||
// emitLiteral writes a literal chunk and returns the number of bytes written.
|
// emitLiteral writes a literal chunk and returns the number of bytes written.
|
||||||
//
|
//
|
||||||
// It assumes that:
|
// It assumes that:
|
||||||
|
//
|
||||||
// dst is long enough to hold the encoded bytes
|
// dst is long enough to hold the encoded bytes
|
||||||
// 1 <= len(lit) && len(lit) <= 65536
|
// 1 <= len(lit) && len(lit) <= 65536
|
||||||
func emitLiteral(dst, lit []byte) int {
|
func emitLiteral(dst, lit []byte) int {
|
||||||
@ -42,6 +43,7 @@ func emitLiteral(dst, lit []byte) int {
|
|||||||
// emitCopy writes a copy chunk and returns the number of bytes written.
|
// emitCopy writes a copy chunk and returns the number of bytes written.
|
||||||
//
|
//
|
||||||
// It assumes that:
|
// It assumes that:
|
||||||
|
//
|
||||||
// dst is long enough to hold the encoded bytes
|
// dst is long enough to hold the encoded bytes
|
||||||
// 1 <= offset && offset <= 65535
|
// 1 <= offset && offset <= 65535
|
||||||
// 4 <= length && length <= 65535
|
// 4 <= length && length <= 65535
|
||||||
@ -89,6 +91,7 @@ func emitCopy(dst []byte, offset, length int) int {
|
|||||||
// src[i:i+k-j] and src[j:k] have the same contents.
|
// src[i:i+k-j] and src[j:k] have the same contents.
|
||||||
//
|
//
|
||||||
// It assumes that:
|
// It assumes that:
|
||||||
|
//
|
||||||
// 0 <= i && i < j && j <= len(src)
|
// 0 <= i && i < j && j <= len(src)
|
||||||
func extendMatch(src []byte, i, j int) int {
|
func extendMatch(src []byte, i, j int) int {
|
||||||
for ; j < len(src) && src[i] == src[j]; i, j = i+1, j+1 {
|
for ; j < len(src) && src[i] == src[j]; i, j = i+1, j+1 {
|
||||||
@ -100,11 +103,34 @@ func hash(u, shift uint32) uint32 {
|
|||||||
return (u * 0x1e35a7bd) >> shift
|
return (u * 0x1e35a7bd) >> shift
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// EncodeBlockInto exposes encodeBlock but checks dst size.
|
||||||
|
func EncodeBlockInto(dst, src []byte) (d int) {
|
||||||
|
if MaxEncodedLen(len(src)) > len(dst) {
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
|
||||||
|
// encodeBlock breaks on too big blocks, so split.
|
||||||
|
for len(src) > 0 {
|
||||||
|
p := src
|
||||||
|
src = nil
|
||||||
|
if len(p) > maxBlockSize {
|
||||||
|
p, src = p[:maxBlockSize], p[maxBlockSize:]
|
||||||
|
}
|
||||||
|
if len(p) < minNonLiteralBlockSize {
|
||||||
|
d += emitLiteral(dst[d:], p)
|
||||||
|
} else {
|
||||||
|
d += encodeBlock(dst[d:], p)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return d
|
||||||
|
}
|
||||||
|
|
||||||
// encodeBlock encodes a non-empty src to a guaranteed-large-enough dst. It
|
// encodeBlock encodes a non-empty src to a guaranteed-large-enough dst. It
|
||||||
// assumes that the varint-encoded length of the decompressed bytes has already
|
// assumes that the varint-encoded length of the decompressed bytes has already
|
||||||
// been written.
|
// been written.
|
||||||
//
|
//
|
||||||
// It also assumes that:
|
// It also assumes that:
|
||||||
|
//
|
||||||
// len(dst) >= MaxEncodedLen(len(src)) &&
|
// len(dst) >= MaxEncodedLen(len(src)) &&
|
||||||
// minNonLiteralBlockSize <= len(src) && len(src) <= maxBlockSize
|
// minNonLiteralBlockSize <= len(src) && len(src) <= maxBlockSize
|
||||||
func encodeBlock(dst, src []byte) (d int) {
|
func encodeBlock(dst, src []byte) (d int) {
|
||||||
|
405
vendor/github.com/klauspost/compress/s2/README.md
generated
vendored
405
vendor/github.com/klauspost/compress/s2/README.md
generated
vendored
@ -20,11 +20,12 @@ This is important, so you don't have to worry about spending CPU cycles on alrea
|
|||||||
* Concurrent stream compression
|
* Concurrent stream compression
|
||||||
* Faster decompression, even for Snappy compatible content
|
* Faster decompression, even for Snappy compatible content
|
||||||
* Concurrent Snappy/S2 stream decompression
|
* Concurrent Snappy/S2 stream decompression
|
||||||
* Ability to quickly skip forward in compressed stream
|
* Skip forward in compressed stream
|
||||||
* Random seeking with indexes
|
* Random seeking with indexes
|
||||||
* Compatible with reading Snappy compressed content
|
* Compatible with reading Snappy compressed content
|
||||||
* Smaller block size overhead on incompressible blocks
|
* Smaller block size overhead on incompressible blocks
|
||||||
* Block concatenation
|
* Block concatenation
|
||||||
|
* Block Dictionary support
|
||||||
* Uncompressed stream mode
|
* Uncompressed stream mode
|
||||||
* Automatic stream size padding
|
* Automatic stream size padding
|
||||||
* Snappy compatible block compression
|
* Snappy compatible block compression
|
||||||
@ -325,35 +326,35 @@ The content compressed in this mode is fully compatible with the standard decode
|
|||||||
|
|
||||||
Snappy vs S2 **compression** speed on 16 core (32 thread) computer, using all threads and a single thread (1 CPU):
|
Snappy vs S2 **compression** speed on 16 core (32 thread) computer, using all threads and a single thread (1 CPU):
|
||||||
|
|
||||||
| File | S2 speed | S2 Throughput | S2 % smaller | S2 "better" | "better" throughput | "better" % smaller |
|
| File | S2 Speed | S2 Throughput | S2 % smaller | S2 "better" | "better" throughput | "better" % smaller |
|
||||||
|-----------------------------------------------------------------------------------------------------|----------|---------------|--------------|-------------|---------------------|--------------------|
|
|---------------------------------------------------------------------------------------------------------|----------|---------------|--------------|-------------|---------------------|--------------------|
|
||||||
| [rawstudio-mint14.tar](https://files.klauspost.com/compress/rawstudio-mint14.7z) | 12.70x | 10556 MB/s | 7.35% | 4.15x | 3455 MB/s | 12.79% |
|
| [rawstudio-mint14.tar](https://files.klauspost.com/compress/rawstudio-mint14.7z) | 16.33x | 10556 MB/s | 8.0% | 6.04x | 5252 MB/s | 14.7% |
|
||||||
| (1 CPU) | 1.14x | 948 MB/s | - | 0.42x | 349 MB/s | - |
|
| (1 CPU) | 1.08x | 940 MB/s | - | 0.46x | 400 MB/s | - |
|
||||||
| [github-june-2days-2019.json](https://files.klauspost.com/compress/github-june-2days-2019.json.zst) | 17.13x | 14484 MB/s | 31.60% | 10.09x | 8533 MB/s | 37.71% |
|
| [github-june-2days-2019.json](https://files.klauspost.com/compress/github-june-2days-2019.json.zst) | 16.51x | 15224 MB/s | 31.70% | 9.47x | 8734 MB/s | 37.71% |
|
||||||
| (1 CPU) | 1.33x | 1127 MB/s | - | 0.70x | 589 MB/s | - |
|
| (1 CPU) | 1.26x | 1157 MB/s | - | 0.60x | 556 MB/s | - |
|
||||||
| [github-ranks-backup.bin](https://files.klauspost.com/compress/github-ranks-backup.bin.zst) | 15.14x | 12000 MB/s | -5.79% | 6.59x | 5223 MB/s | 5.80% |
|
| [github-ranks-backup.bin](https://files.klauspost.com/compress/github-ranks-backup.bin.zst) | 15.14x | 12598 MB/s | -5.76% | 6.23x | 5675 MB/s | 3.62% |
|
||||||
| (1 CPU) | 1.11x | 877 MB/s | - | 0.47x | 370 MB/s | - |
|
| (1 CPU) | 1.02x | 932 MB/s | - | 0.47x | 432 MB/s | - |
|
||||||
| [consensus.db.10gb](https://files.klauspost.com/compress/consensus.db.10gb.zst) | 14.62x | 12116 MB/s | 15.90% | 5.35x | 4430 MB/s | 16.08% |
|
| [consensus.db.10gb](https://files.klauspost.com/compress/consensus.db.10gb.zst) | 11.21x | 12116 MB/s | 15.95% | 3.24x | 3500 MB/s | 18.00% |
|
||||||
| (1 CPU) | 1.38x | 1146 MB/s | - | 0.38x | 312 MB/s | - |
|
| (1 CPU) | 1.05x | 1135 MB/s | - | 0.27x | 292 MB/s | - |
|
||||||
| [adresser.json](https://files.klauspost.com/compress/adresser.json.zst) | 8.83x | 17579 MB/s | 43.86% | 6.54x | 13011 MB/s | 47.23% |
|
| [apache.log](https://files.klauspost.com/compress/apache.log.zst) | 8.55x | 16673 MB/s | 20.54% | 5.85x | 11420 MB/s | 24.97% |
|
||||||
| (1 CPU) | 1.14x | 2259 MB/s | - | 0.74x | 1475 MB/s | - |
|
| (1 CPU) | 1.91x | 1771 MB/s | - | 0.53x | 1041 MB/s | - |
|
||||||
| [gob-stream](https://files.klauspost.com/compress/gob-stream.7z) | 16.72x | 14019 MB/s | 24.02% | 10.11x | 8477 MB/s | 30.48% |
|
| [gob-stream](https://files.klauspost.com/compress/gob-stream.7z) | 15.76x | 14357 MB/s | 24.01% | 8.67x | 7891 MB/s | 33.68% |
|
||||||
| (1 CPU) | 1.24x | 1043 MB/s | - | 0.70x | 586 MB/s | - |
|
| (1 CPU) | 1.17x | 1064 MB/s | - | 0.65x | 595 MB/s | - |
|
||||||
| [10gb.tar](http://mattmahoney.net/dc/10gb.html) | 13.33x | 9254 MB/s | 1.84% | 6.75x | 4686 MB/s | 6.72% |
|
| [10gb.tar](http://mattmahoney.net/dc/10gb.html) | 13.33x | 9835 MB/s | 2.34% | 6.85x | 4863 MB/s | 9.96% |
|
||||||
| (1 CPU) | 0.97x | 672 MB/s | - | 0.53x | 366 MB/s | - |
|
| (1 CPU) | 0.97x | 689 MB/s | - | 0.55x | 387 MB/s | - |
|
||||||
| sharnd.out.2gb | 2.11x | 12639 MB/s | 0.01% | 1.98x | 11833 MB/s | 0.01% |
|
| sharnd.out.2gb | 9.11x | 13213 MB/s | 0.01% | 1.49x | 9184 MB/s | 0.01% |
|
||||||
| (1 CPU) | 0.93x | 5594 MB/s | - | 1.34x | 8030 MB/s | - |
|
| (1 CPU) | 0.88x | 5418 MB/s | - | 0.77x | 5417 MB/s | - |
|
||||||
| [enwik9](http://mattmahoney.net/dc/textdata.html) | 19.34x | 8220 MB/s | 3.98% | 7.87x | 3345 MB/s | 15.82% |
|
| [sofia-air-quality-dataset csv](https://files.klauspost.com/compress/sofia-air-quality-dataset.tar.zst) | 22.00x | 11477 MB/s | 18.73% | 11.15x | 5817 MB/s | 27.88% |
|
||||||
| (1 CPU) | 1.06x | 452 MB/s | - | 0.50x | 213 MB/s | - |
|
| (1 CPU) | 1.23x | 642 MB/s | - | 0.71x | 642 MB/s | - |
|
||||||
| [silesia.tar](http://sun.aei.polsl.pl/~sdeor/corpus/silesia.zip) | 10.48x | 6124 MB/s | 5.67% | 3.76x | 2197 MB/s | 12.60% |
|
| [silesia.tar](http://sun.aei.polsl.pl/~sdeor/corpus/silesia.zip) | 11.23x | 6520 MB/s | 5.9% | 5.35x | 3109 MB/s | 15.88% |
|
||||||
| (1 CPU) | 0.97x | 568 MB/s | - | 0.46x | 271 MB/s | - |
|
| (1 CPU) | 1.05x | 607 MB/s | - | 0.52x | 304 MB/s | - |
|
||||||
| [enwik10](https://encode.su/threads/3315-enwik10-benchmark-results) | 21.07x | 9020 MB/s | 6.36% | 6.91x | 2959 MB/s | 16.95% |
|
| [enwik9](https://files.klauspost.com/compress/enwik9.zst) | 19.28x | 8440 MB/s | 4.04% | 9.31x | 4076 MB/s | 18.04% |
|
||||||
| (1 CPU) | 1.07x | 460 MB/s | - | 0.51x | 220 MB/s | - |
|
| (1 CPU) | 1.12x | 488 MB/s | - | 0.57x | 250 MB/s | - |
|
||||||
|
|
||||||
### Legend
|
### Legend
|
||||||
|
|
||||||
* `S2 speed`: Speed of S2 compared to Snappy, using 16 cores and 1 core.
|
* `S2 Speed`: Speed of S2 compared to Snappy, using 16 cores and 1 core.
|
||||||
* `S2 throughput`: Throughput of S2 in MB/s.
|
* `S2 Throughput`: Throughput of S2 in MB/s.
|
||||||
* `S2 % smaller`: How many percent of the Snappy output size is S2 better.
|
* `S2 % smaller`: How many percent of the Snappy output size is S2 better.
|
||||||
* `S2 "better"`: Speed when enabling "better" compression mode in S2 compared to Snappy.
|
* `S2 "better"`: Speed when enabling "better" compression mode in S2 compared to Snappy.
|
||||||
* `"better" throughput`: Speed when enabling "better" compression mode in S2 compared to Snappy.
|
* `"better" throughput`: Speed when enabling "better" compression mode in S2 compared to Snappy.
|
||||||
@ -361,7 +362,7 @@ Snappy vs S2 **compression** speed on 16 core (32 thread) computer, using all th
|
|||||||
|
|
||||||
There is a good speedup across the board when using a single thread and a significant speedup when using multiple threads.
|
There is a good speedup across the board when using a single thread and a significant speedup when using multiple threads.
|
||||||
|
|
||||||
Machine generated data gets by far the biggest compression boost, with size being being reduced by up to 45% of Snappy size.
|
Machine generated data gets by far the biggest compression boost, with size being reduced by up to 35% of Snappy size.
|
||||||
|
|
||||||
The "better" compression mode sees a good improvement in all cases, but usually at a performance cost.
|
The "better" compression mode sees a good improvement in all cases, but usually at a performance cost.
|
||||||
|
|
||||||
@ -404,7 +405,7 @@ The "better" compression mode will actively look for shorter matches, which is w
|
|||||||
Without assembly decompression is also very fast; single goroutine decompression speed. No assembly:
|
Without assembly decompression is also very fast; single goroutine decompression speed. No assembly:
|
||||||
|
|
||||||
| File | S2 Throughput | S2 throughput |
|
| File | S2 Throughput | S2 throughput |
|
||||||
|--------------------------------|--------------|---------------|
|
|--------------------------------|---------------|---------------|
|
||||||
| consensus.db.10gb.s2 | 1.84x | 2289.8 MB/s |
|
| consensus.db.10gb.s2 | 1.84x | 2289.8 MB/s |
|
||||||
| 10gb.tar.s2 | 1.30x | 867.07 MB/s |
|
| 10gb.tar.s2 | 1.30x | 867.07 MB/s |
|
||||||
| rawstudio-mint14.tar.s2 | 1.66x | 1329.65 MB/s |
|
| rawstudio-mint14.tar.s2 | 1.66x | 1329.65 MB/s |
|
||||||
@ -451,13 +452,13 @@ For this we use [`webdevdata.org-2015-01-07-subset`](https://files.klauspost.com
|
|||||||
53927 files, total input size: 4,014,735,833 bytes. Single goroutine used.
|
53927 files, total input size: 4,014,735,833 bytes. Single goroutine used.
|
||||||
|
|
||||||
| * | Input | Output | Reduction | MB/s |
|
| * | Input | Output | Reduction | MB/s |
|
||||||
|-------------------|------------|------------|-----------|--------|
|
|-------------------|------------|------------|------------|------------|
|
||||||
| S2 | 4014735833 | 1059723369 | 73.60% | **934.34** |
|
| S2 | 4014735833 | 1059723369 | 73.60% | **936.73** |
|
||||||
| S2 Better | 4014735833 | 969670507 | 75.85% | 532.70 |
|
| S2 Better | 4014735833 | 961580539 | 76.05% | 451.10 |
|
||||||
| S2 Best | 4014735833 | 906625668 | **77.85%** | 46.84 |
|
| S2 Best | 4014735833 | 899182886 | **77.60%** | 46.84 |
|
||||||
| Snappy | 4014735833 | 1128706759 | 71.89% | 762.59 |
|
| Snappy | 4014735833 | 1128706759 | 71.89% | 790.15 |
|
||||||
| S2, Snappy Output | 4014735833 | 1093821420 | 72.75% | 908.60 |
|
| S2, Snappy Output | 4014735833 | 1093823291 | 72.75% | 936.60 |
|
||||||
| LZ4 | 4014735833 | 1079259294 | 73.12% | 526.94 |
|
| LZ4 | 4014735833 | 1063768713 | 73.50% | 452.02 |
|
||||||
|
|
||||||
S2 delivers both the best single threaded throughput with regular mode and the best compression rate with "best".
|
S2 delivers both the best single threaded throughput with regular mode and the best compression rate with "best".
|
||||||
"Better" mode provides the same compression speed as LZ4 with better compression ratio.
|
"Better" mode provides the same compression speed as LZ4 with better compression ratio.
|
||||||
@ -489,43 +490,24 @@ AMD64 assembly is use for both S2 and Snappy.
|
|||||||
|
|
||||||
| Absolute Perf | Snappy size | S2 Size | Snappy Speed | S2 Speed | Snappy dec | S2 dec |
|
| Absolute Perf | Snappy size | S2 Size | Snappy Speed | S2 Speed | Snappy dec | S2 dec |
|
||||||
|-----------------------|-------------|---------|--------------|-------------|-------------|-------------|
|
|-----------------------|-------------|---------|--------------|-------------|-------------|-------------|
|
||||||
| html | 22843 | 21111 | 16246 MB/s | 17438 MB/s | 40972 MB/s | 49263 MB/s |
|
| html | 22843 | 20868 | 16246 MB/s | 18617 MB/s | 40972 MB/s | 49263 MB/s |
|
||||||
| urls.10K | 335492 | 287326 | 7943 MB/s | 9693 MB/s | 22523 MB/s | 26484 MB/s |
|
| urls.10K | 335492 | 286541 | 7943 MB/s | 10201 MB/s | 22523 MB/s | 26484 MB/s |
|
||||||
| fireworks.jpeg | 123034 | 123100 | 349544 MB/s | 273889 MB/s | 718321 MB/s | 827552 MB/s |
|
| fireworks.jpeg | 123034 | 123100 | 349544 MB/s | 303228 MB/s | 718321 MB/s | 827552 MB/s |
|
||||||
| fireworks.jpeg (200B) | 146 | 155 | 8869 MB/s | 17773 MB/s | 33691 MB/s | 52421 MB/s |
|
| fireworks.jpeg (200B) | 146 | 155 | 8869 MB/s | 20180 MB/s | 33691 MB/s | 52421 MB/s |
|
||||||
| paper-100k.pdf | 85304 | 84459 | 167546 MB/s | 101263 MB/s | 326905 MB/s | 291944 MB/s |
|
| paper-100k.pdf | 85304 | 84202 | 167546 MB/s | 112988 MB/s | 326905 MB/s | 291944 MB/s |
|
||||||
| html_x_4 | 92234 | 21113 | 15194 MB/s | 50670 MB/s | 30843 MB/s | 32217 MB/s |
|
| html_x_4 | 92234 | 20870 | 15194 MB/s | 54457 MB/s | 30843 MB/s | 32217 MB/s |
|
||||||
| alice29.txt | 88034 | 85975 | 5936 MB/s | 6139 MB/s | 12882 MB/s | 20044 MB/s |
|
| alice29.txt | 88034 | 85934 | 5936 MB/s | 6540 MB/s | 12882 MB/s | 20044 MB/s |
|
||||||
| asyoulik.txt | 77503 | 79650 | 5517 MB/s | 6366 MB/s | 12735 MB/s | 22806 MB/s |
|
| asyoulik.txt | 77503 | 79575 | 5517 MB/s | 6657 MB/s | 12735 MB/s | 22806 MB/s |
|
||||||
| lcet10.txt | 234661 | 220670 | 6235 MB/s | 6067 MB/s | 14519 MB/s | 18697 MB/s |
|
| lcet10.txt | 234661 | 220383 | 6235 MB/s | 6303 MB/s | 14519 MB/s | 18697 MB/s |
|
||||||
| plrabn12.txt | 319267 | 317985 | 5159 MB/s | 5726 MB/s | 11923 MB/s | 19901 MB/s |
|
| plrabn12.txt | 319267 | 318196 | 5159 MB/s | 6074 MB/s | 11923 MB/s | 19901 MB/s |
|
||||||
| geo.protodata | 23335 | 18690 | 21220 MB/s | 26529 MB/s | 56271 MB/s | 62540 MB/s |
|
| geo.protodata | 23335 | 18606 | 21220 MB/s | 25432 MB/s | 56271 MB/s | 62540 MB/s |
|
||||||
| kppkn.gtb | 69526 | 65312 | 9732 MB/s | 8559 MB/s | 18491 MB/s | 18969 MB/s |
|
| kppkn.gtb | 69526 | 65019 | 9732 MB/s | 8905 MB/s | 18491 MB/s | 18969 MB/s |
|
||||||
| alice29.txt (128B) | 80 | 82 | 6691 MB/s | 15489 MB/s | 31883 MB/s | 38874 MB/s |
|
| alice29.txt (128B) | 80 | 82 | 6691 MB/s | 17179 MB/s | 31883 MB/s | 38874 MB/s |
|
||||||
| alice29.txt (1000B) | 774 | 774 | 12204 MB/s | 13000 MB/s | 48056 MB/s | 52341 MB/s |
|
| alice29.txt (1000B) | 774 | 774 | 12204 MB/s | 13273 MB/s | 48056 MB/s | 52341 MB/s |
|
||||||
| alice29.txt (10000B) | 6648 | 6933 | 10044 MB/s | 12806 MB/s | 32378 MB/s | 46322 MB/s |
|
| alice29.txt (10000B) | 6648 | 6933 | 10044 MB/s | 12824 MB/s | 32378 MB/s | 46322 MB/s |
|
||||||
| alice29.txt (20000B) | 12686 | 13574 | 7733 MB/s | 11210 MB/s | 30566 MB/s | 58969 MB/s |
|
| alice29.txt (20000B) | 12686 | 13516 | 7733 MB/s | 12160 MB/s | 30566 MB/s | 58969 MB/s |
|
||||||
|
|
||||||
|
|
||||||
| Relative Perf | Snappy size | S2 size improved | S2 Speed | S2 Dec Speed |
|
|
||||||
|-----------------------|-------------|------------------|----------|--------------|
|
|
||||||
| html | 22.31% | 7.58% | 1.07x | 1.20x |
|
|
||||||
| urls.10K | 47.78% | 14.36% | 1.22x | 1.18x |
|
|
||||||
| fireworks.jpeg | 99.95% | -0.05% | 0.78x | 1.15x |
|
|
||||||
| fireworks.jpeg (200B) | 73.00% | -6.16% | 2.00x | 1.56x |
|
|
||||||
| paper-100k.pdf | 83.30% | 0.99% | 0.60x | 0.89x |
|
|
||||||
| html_x_4 | 22.52% | 77.11% | 3.33x | 1.04x |
|
|
||||||
| alice29.txt | 57.88% | 2.34% | 1.03x | 1.56x |
|
|
||||||
| asyoulik.txt | 61.91% | -2.77% | 1.15x | 1.79x |
|
|
||||||
| lcet10.txt | 54.99% | 5.96% | 0.97x | 1.29x |
|
|
||||||
| plrabn12.txt | 66.26% | 0.40% | 1.11x | 1.67x |
|
|
||||||
| geo.protodata | 19.68% | 19.91% | 1.25x | 1.11x |
|
|
||||||
| kppkn.gtb | 37.72% | 6.06% | 0.88x | 1.03x |
|
|
||||||
| alice29.txt (128B) | 62.50% | -2.50% | 2.31x | 1.22x |
|
|
||||||
| alice29.txt (1000B) | 77.40% | 0.00% | 1.07x | 1.09x |
|
|
||||||
| alice29.txt (10000B) | 66.48% | -4.29% | 1.27x | 1.43x |
|
|
||||||
| alice29.txt (20000B) | 63.43% | -7.00% | 1.45x | 1.93x |
|
|
||||||
|
|
||||||
Speed is generally at or above Snappy. Small blocks gets a significant speedup, although at the expense of size.
|
Speed is generally at or above Snappy. Small blocks gets a significant speedup, although at the expense of size.
|
||||||
|
|
||||||
Decompression speed is better than Snappy, except in one case.
|
Decompression speed is better than Snappy, except in one case.
|
||||||
@ -543,43 +525,24 @@ So individual benchmarks should only be seen as a guideline and the overall pict
|
|||||||
|
|
||||||
| Absolute Perf | Snappy size | Better Size | Snappy Speed | Better Speed | Snappy dec | Better dec |
|
| Absolute Perf | Snappy size | Better Size | Snappy Speed | Better Speed | Snappy dec | Better dec |
|
||||||
|-----------------------|-------------|-------------|--------------|--------------|-------------|-------------|
|
|-----------------------|-------------|-------------|--------------|--------------|-------------|-------------|
|
||||||
| html | 22843 | 19833 | 16246 MB/s | 7731 MB/s | 40972 MB/s | 40292 MB/s |
|
| html | 22843 | 18972 | 16246 MB/s | 8621 MB/s | 40972 MB/s | 40292 MB/s |
|
||||||
| urls.10K | 335492 | 253529 | 7943 MB/s | 3980 MB/s | 22523 MB/s | 20981 MB/s |
|
| urls.10K | 335492 | 248079 | 7943 MB/s | 5104 MB/s | 22523 MB/s | 20981 MB/s |
|
||||||
| fireworks.jpeg | 123034 | 123100 | 349544 MB/s | 9760 MB/s | 718321 MB/s | 823698 MB/s |
|
| fireworks.jpeg | 123034 | 123100 | 349544 MB/s | 84429 MB/s | 718321 MB/s | 823698 MB/s |
|
||||||
| fireworks.jpeg (200B) | 146 | 142 | 8869 MB/s | 594 MB/s | 33691 MB/s | 30101 MB/s |
|
| fireworks.jpeg (200B) | 146 | 149 | 8869 MB/s | 7125 MB/s | 33691 MB/s | 30101 MB/s |
|
||||||
| paper-100k.pdf | 85304 | 82915 | 167546 MB/s | 7470 MB/s | 326905 MB/s | 198869 MB/s |
|
| paper-100k.pdf | 85304 | 82887 | 167546 MB/s | 11087 MB/s | 326905 MB/s | 198869 MB/s |
|
||||||
| html_x_4 | 92234 | 19841 | 15194 MB/s | 23403 MB/s | 30843 MB/s | 30937 MB/s |
|
| html_x_4 | 92234 | 18982 | 15194 MB/s | 29316 MB/s | 30843 MB/s | 30937 MB/s |
|
||||||
| alice29.txt | 88034 | 73218 | 5936 MB/s | 2945 MB/s | 12882 MB/s | 16611 MB/s |
|
| alice29.txt | 88034 | 71611 | 5936 MB/s | 3709 MB/s | 12882 MB/s | 16611 MB/s |
|
||||||
| asyoulik.txt | 77503 | 66844 | 5517 MB/s | 2739 MB/s | 12735 MB/s | 14975 MB/s |
|
| asyoulik.txt | 77503 | 65941 | 5517 MB/s | 3380 MB/s | 12735 MB/s | 14975 MB/s |
|
||||||
| lcet10.txt | 234661 | 190589 | 6235 MB/s | 3099 MB/s | 14519 MB/s | 16634 MB/s |
|
| lcet10.txt | 234661 | 184939 | 6235 MB/s | 3537 MB/s | 14519 MB/s | 16634 MB/s |
|
||||||
| plrabn12.txt | 319267 | 270828 | 5159 MB/s | 2600 MB/s | 11923 MB/s | 13382 MB/s |
|
| plrabn12.txt | 319267 | 264990 | 5159 MB/s | 2960 MB/s | 11923 MB/s | 13382 MB/s |
|
||||||
| geo.protodata | 23335 | 18278 | 21220 MB/s | 11208 MB/s | 56271 MB/s | 57961 MB/s |
|
| geo.protodata | 23335 | 17689 | 21220 MB/s | 10859 MB/s | 56271 MB/s | 57961 MB/s |
|
||||||
| kppkn.gtb | 69526 | 61851 | 9732 MB/s | 4556 MB/s | 18491 MB/s | 16524 MB/s |
|
| kppkn.gtb | 69526 | 55398 | 9732 MB/s | 5206 MB/s | 18491 MB/s | 16524 MB/s |
|
||||||
| alice29.txt (128B) | 80 | 81 | 6691 MB/s | 529 MB/s | 31883 MB/s | 34225 MB/s |
|
| alice29.txt (128B) | 80 | 78 | 6691 MB/s | 7422 MB/s | 31883 MB/s | 34225 MB/s |
|
||||||
| alice29.txt (1000B) | 774 | 748 | 12204 MB/s | 1943 MB/s | 48056 MB/s | 42068 MB/s |
|
| alice29.txt (1000B) | 774 | 746 | 12204 MB/s | 5734 MB/s | 48056 MB/s | 42068 MB/s |
|
||||||
| alice29.txt (10000B) | 6648 | 6234 | 10044 MB/s | 2949 MB/s | 32378 MB/s | 28813 MB/s |
|
| alice29.txt (10000B) | 6648 | 6218 | 10044 MB/s | 6055 MB/s | 32378 MB/s | 28813 MB/s |
|
||||||
| alice29.txt (20000B) | 12686 | 11584 | 7733 MB/s | 2822 MB/s | 30566 MB/s | 27315 MB/s |
|
| alice29.txt (20000B) | 12686 | 11492 | 7733 MB/s | 3143 MB/s | 30566 MB/s | 27315 MB/s |
|
||||||
|
|
||||||
|
|
||||||
| Relative Perf | Snappy size | Better size | Better Speed | Better dec |
|
|
||||||
|-----------------------|-------------|-------------|--------------|------------|
|
|
||||||
| html | 22.31% | 13.18% | 0.48x | 0.98x |
|
|
||||||
| urls.10K | 47.78% | 24.43% | 0.50x | 0.93x |
|
|
||||||
| fireworks.jpeg | 99.95% | -0.05% | 0.03x | 1.15x |
|
|
||||||
| fireworks.jpeg (200B) | 73.00% | 2.74% | 0.07x | 0.89x |
|
|
||||||
| paper-100k.pdf | 83.30% | 2.80% | 0.07x | 0.61x |
|
|
||||||
| html_x_4 | 22.52% | 78.49% | 0.04x | 1.00x |
|
|
||||||
| alice29.txt | 57.88% | 16.83% | 1.54x | 1.29x |
|
|
||||||
| asyoulik.txt | 61.91% | 13.75% | 0.50x | 1.18x |
|
|
||||||
| lcet10.txt | 54.99% | 18.78% | 0.50x | 1.15x |
|
|
||||||
| plrabn12.txt | 66.26% | 15.17% | 0.50x | 1.12x |
|
|
||||||
| geo.protodata | 19.68% | 21.67% | 0.50x | 1.03x |
|
|
||||||
| kppkn.gtb | 37.72% | 11.04% | 0.53x | 0.89x |
|
|
||||||
| alice29.txt (128B) | 62.50% | -1.25% | 0.47x | 1.07x |
|
|
||||||
| alice29.txt (1000B) | 77.40% | 3.36% | 0.08x | 0.88x |
|
|
||||||
| alice29.txt (10000B) | 66.48% | 6.23% | 0.16x | 0.89x |
|
|
||||||
| alice29.txt (20000B) | 63.43% | 8.69% | 0.29x | 0.89x |
|
|
||||||
|
|
||||||
Except for the mostly incompressible JPEG image compression is better and usually in the
|
Except for the mostly incompressible JPEG image compression is better and usually in the
|
||||||
double digits in terms of percentage reduction over Snappy.
|
double digits in terms of percentage reduction over Snappy.
|
||||||
|
|
||||||
@ -605,33 +568,150 @@ Some examples compared on 16 core CPU, amd64 assembly used:
|
|||||||
|
|
||||||
```
|
```
|
||||||
* enwik10
|
* enwik10
|
||||||
Default... 10000000000 -> 4761467548 [47.61%]; 1.098s, 8685.6MB/s
|
Default... 10000000000 -> 4759950115 [47.60%]; 1.03s, 9263.0MB/s
|
||||||
Better... 10000000000 -> 4219438251 [42.19%]; 1.925s, 4954.2MB/s
|
Better... 10000000000 -> 4084706676 [40.85%]; 2.16s, 4415.4MB/s
|
||||||
Best... 10000000000 -> 3627364337 [36.27%]; 43.051s, 221.5MB/s
|
Best... 10000000000 -> 3615520079 [36.16%]; 42.259s, 225.7MB/s
|
||||||
|
|
||||||
* github-june-2days-2019.json
|
* github-june-2days-2019.json
|
||||||
Default... 6273951764 -> 1043196283 [16.63%]; 431ms, 13882.3MB/s
|
Default... 6273951764 -> 1041700255 [16.60%]; 431ms, 13882.3MB/s
|
||||||
Better... 6273951764 -> 949146808 [15.13%]; 547ms, 10938.4MB/s
|
Better... 6273951764 -> 945841238 [15.08%]; 547ms, 10938.4MB/s
|
||||||
Best... 6273951764 -> 832855506 [13.27%]; 9.455s, 632.8MB/s
|
Best... 6273951764 -> 826392576 [13.17%]; 9.455s, 632.8MB/s
|
||||||
|
|
||||||
* nyc-taxi-data-10M.csv
|
* nyc-taxi-data-10M.csv
|
||||||
Default... 3325605752 -> 1095998837 [32.96%]; 324ms, 9788.7MB/s
|
Default... 3325605752 -> 1093516949 [32.88%]; 324ms, 9788.7MB/s
|
||||||
Better... 3325605752 -> 954776589 [28.71%]; 491ms, 6459.4MB/s
|
Better... 3325605752 -> 885394158 [26.62%]; 491ms, 6459.4MB/s
|
||||||
Best... 3325605752 -> 779098746 [23.43%]; 8.29s, 382.6MB/s
|
Best... 3325605752 -> 773681257 [23.26%]; 8.29s, 412.0MB/s
|
||||||
|
|
||||||
* 10gb.tar
|
* 10gb.tar
|
||||||
Default... 10065157632 -> 5916578242 [58.78%]; 1.028s, 9337.4MB/s
|
Default... 10065157632 -> 5915541066 [58.77%]; 1.028s, 9337.4MB/s
|
||||||
Better... 10065157632 -> 5649207485 [56.13%]; 1.597s, 6010.6MB/s
|
Better... 10065157632 -> 5453844650 [54.19%]; 1.597s, 4862.7MB/s
|
||||||
Best... 10065157632 -> 5208719802 [51.75%]; 32.78s, 292.8MB/
|
Best... 10065157632 -> 5192495021 [51.59%]; 32.78s, 308.2MB/
|
||||||
|
|
||||||
* consensus.db.10gb
|
* consensus.db.10gb
|
||||||
Default... 10737418240 -> 4562648848 [42.49%]; 882ms, 11610.0MB/s
|
Default... 10737418240 -> 4549762344 [42.37%]; 882ms, 12118.4MB/s
|
||||||
Better... 10737418240 -> 4542428129 [42.30%]; 1.533s, 6679.7MB/s
|
Better... 10737418240 -> 4438535064 [41.34%]; 1.533s, 3500.9MB/s
|
||||||
Best... 10737418240 -> 4244773384 [39.53%]; 42.96s, 238.4MB/s
|
Best... 10737418240 -> 4210602774 [39.21%]; 42.96s, 254.4MB/s
|
||||||
```
|
```
|
||||||
|
|
||||||
Decompression speed should be around the same as using the 'better' compression mode.
|
Decompression speed should be around the same as using the 'better' compression mode.
|
||||||
|
|
||||||
|
## Dictionaries
|
||||||
|
|
||||||
|
*Note: S2 dictionary compression is currently at an early implementation stage, with no assembly for
|
||||||
|
neither encoding nor decoding. Performance improvements can be expected in the future.*
|
||||||
|
|
||||||
|
Adding dictionaries allow providing a custom dictionary that will serve as lookup in the beginning of blocks.
|
||||||
|
|
||||||
|
The same dictionary *must* be used for both encoding and decoding.
|
||||||
|
S2 does not keep track of whether the same dictionary is used,
|
||||||
|
and using the wrong dictionary will most often not result in an error when decompressing.
|
||||||
|
|
||||||
|
Blocks encoded *without* dictionaries can be decompressed seamlessly *with* a dictionary.
|
||||||
|
This means it is possible to switch from an encoding without dictionaries to an encoding with dictionaries
|
||||||
|
and treat the blocks similarly.
|
||||||
|
|
||||||
|
Similar to [zStandard dictionaries](https://github.com/facebook/zstd#the-case-for-small-data-compression),
|
||||||
|
the same usage scenario applies to S2 dictionaries.
|
||||||
|
|
||||||
|
> Training works if there is some correlation in a family of small data samples. The more data-specific a dictionary is, the more efficient it is (there is no universal dictionary). Hence, deploying one dictionary per type of data will provide the greatest benefits. Dictionary gains are mostly effective in the first few KB. Then, the compression algorithm will gradually use previously decoded content to better compress the rest of the file.
|
||||||
|
|
||||||
|
S2 further limits the dictionary to only be enabled on the first 64KB of a block.
|
||||||
|
This will remove any negative (speed) impacts of the dictionaries on bigger blocks.
|
||||||
|
|
||||||
|
### Compression
|
||||||
|
|
||||||
|
Using the [github_users_sample_set](https://github.com/facebook/zstd/releases/download/v1.1.3/github_users_sample_set.tar.zst)
|
||||||
|
and a 64KB dictionary trained with zStandard the following sizes can be achieved.
|
||||||
|
|
||||||
|
| | Default | Better | Best |
|
||||||
|
|--------------------|------------------|------------------|-----------------------|
|
||||||
|
| Without Dictionary | 3362023 (44.92%) | 3083163 (41.19%) | 3057944 (40.86%) |
|
||||||
|
| With Dictionary | 921524 (12.31%) | 873154 (11.67%) | 785503 bytes (10.49%) |
|
||||||
|
|
||||||
|
So for highly repetitive content, this case provides an almost 3x reduction in size.
|
||||||
|
|
||||||
|
For less uniform data we will use the Go source code tree.
|
||||||
|
Compressing First 64KB of all `.go` files in `go/src`, Go 1.19.5, 8912 files, 51253563 bytes input:
|
||||||
|
|
||||||
|
| | Default | Better | Best |
|
||||||
|
|--------------------|-------------------|-------------------|-------------------|
|
||||||
|
| Without Dictionary | 22955767 (44.79%) | 20189613 (39.39% | 19482828 (38.01%) |
|
||||||
|
| With Dictionary | 19654568 (38.35%) | 16289357 (31.78%) | 15184589 (29.63%) |
|
||||||
|
| Saving/file | 362 bytes | 428 bytes | 472 bytes |
|
||||||
|
|
||||||
|
|
||||||
|
### Creating Dictionaries
|
||||||
|
|
||||||
|
There are no tools to create dictionaries in S2.
|
||||||
|
However, there are multiple ways to create a useful dictionary:
|
||||||
|
|
||||||
|
#### Using a Sample File
|
||||||
|
|
||||||
|
If your input is very uniform, you can just use a sample file as the dictionary.
|
||||||
|
|
||||||
|
For example in the `github_users_sample_set` above, the average compression only goes up from
|
||||||
|
10.49% to 11.48% by using the first file as dictionary compared to using a dedicated dictionary.
|
||||||
|
|
||||||
|
```Go
|
||||||
|
// Read a sample
|
||||||
|
sample, err := os.ReadFile("sample.json")
|
||||||
|
|
||||||
|
// Create a dictionary.
|
||||||
|
dict := s2.MakeDict(sample, nil)
|
||||||
|
|
||||||
|
// b := dict.Bytes() will provide a dictionary that can be saved
|
||||||
|
// and reloaded with s2.NewDict(b).
|
||||||
|
|
||||||
|
// To encode:
|
||||||
|
encoded := dict.Encode(nil, file)
|
||||||
|
|
||||||
|
// To decode:
|
||||||
|
decoded, err := dict.Decode(nil, file)
|
||||||
|
```
|
||||||
|
|
||||||
|
#### Using Zstandard
|
||||||
|
|
||||||
|
Zstandard dictionaries can easily be converted to S2 dictionaries.
|
||||||
|
|
||||||
|
This can be helpful to generate dictionaries for files that don't have a fixed structure.
|
||||||
|
|
||||||
|
|
||||||
|
Example, with training set files placed in `./training-set`:
|
||||||
|
|
||||||
|
`λ zstd -r --train-fastcover training-set/* --maxdict=65536 -o name.dict`
|
||||||
|
|
||||||
|
This will create a dictionary of 64KB, that can be converted to a dictionary like this:
|
||||||
|
|
||||||
|
```Go
|
||||||
|
// Decode the Zstandard dictionary.
|
||||||
|
insp, err := zstd.InspectDictionary(zdict)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// We are only interested in the contents.
|
||||||
|
// Assume that files start with "// Copyright (c) 2023".
|
||||||
|
// Search for the longest match for that.
|
||||||
|
// This may save a few bytes.
|
||||||
|
dict := s2.MakeDict(insp.Content(), []byte("// Copyright (c) 2023"))
|
||||||
|
|
||||||
|
// b := dict.Bytes() will provide a dictionary that can be saved
|
||||||
|
// and reloaded with s2.NewDict(b).
|
||||||
|
|
||||||
|
// We can now encode using this dictionary
|
||||||
|
encodedWithDict := dict.Encode(nil, payload)
|
||||||
|
|
||||||
|
// To decode content:
|
||||||
|
decoded, err := dict.Decode(nil, encodedWithDict)
|
||||||
|
```
|
||||||
|
|
||||||
|
It is recommended to save the dictionary returned by ` b:= dict.Bytes()`, since that will contain only the S2 dictionary.
|
||||||
|
|
||||||
|
This dictionary can later be loaded using `s2.NewDict(b)`. The dictionary then no longer requires `zstd` to be initialized.
|
||||||
|
|
||||||
|
Also note how `s2.MakeDict` allows you to search for a common starting sequence of your files.
|
||||||
|
This can be omitted, at the expense of a few bytes.
|
||||||
|
|
||||||
# Snappy Compatibility
|
# Snappy Compatibility
|
||||||
|
|
||||||
S2 now offers full compatibility with Snappy.
|
S2 now offers full compatibility with Snappy.
|
||||||
@ -649,7 +729,7 @@ Snappy compatible blocks can be generated with the S2 encoder.
|
|||||||
Compression and speed is typically a bit better `MaxEncodedLen` is also smaller for smaller memory usage. Replace
|
Compression and speed is typically a bit better `MaxEncodedLen` is also smaller for smaller memory usage. Replace
|
||||||
|
|
||||||
| Snappy | S2 replacement |
|
| Snappy | S2 replacement |
|
||||||
|----------------------------|-------------------------|
|
|---------------------------|-----------------------|
|
||||||
| snappy.Encode(...) | s2.EncodeSnappy(...) |
|
| snappy.Encode(...) | s2.EncodeSnappy(...) |
|
||||||
| snappy.MaxEncodedLen(...) | s2.MaxEncodedLen(...) |
|
| snappy.MaxEncodedLen(...) | s2.MaxEncodedLen(...) |
|
||||||
|
|
||||||
@ -661,7 +741,7 @@ Comparison of [`webdevdata.org-2015-01-07-subset`](https://files.klauspost.com/c
|
|||||||
53927 files, total input size: 4,014,735,833 bytes. amd64, single goroutine used:
|
53927 files, total input size: 4,014,735,833 bytes. amd64, single goroutine used:
|
||||||
|
|
||||||
| Encoder | Size | MB/s | Reduction |
|
| Encoder | Size | MB/s | Reduction |
|
||||||
|-----------------------|------------|------------|------------
|
|-----------------------|------------|------------|------------|
|
||||||
| snappy.Encode | 1128706759 | 725.59 | 71.89% |
|
| snappy.Encode | 1128706759 | 725.59 | 71.89% |
|
||||||
| s2.EncodeSnappy | 1093823291 | **899.16** | 72.75% |
|
| s2.EncodeSnappy | 1093823291 | **899.16** | 72.75% |
|
||||||
| s2.EncodeSnappyBetter | 1001158548 | 578.49 | 75.06% |
|
| s2.EncodeSnappyBetter | 1001158548 | 578.49 | 75.06% |
|
||||||
@ -835,6 +915,13 @@ This is done using the regular "Skip" function:
|
|||||||
|
|
||||||
This will ensure that we are at exactly the offset we want, and reading from `dec` will start at the requested offset.
|
This will ensure that we are at exactly the offset we want, and reading from `dec` will start at the requested offset.
|
||||||
|
|
||||||
|
# Compact storage
|
||||||
|
|
||||||
|
For compact storage [RemoveIndexHeaders](https://pkg.go.dev/github.com/klauspost/compress/s2#RemoveIndexHeaders) can be used to remove any redundant info from
|
||||||
|
a serialized index. If you remove the header it must be restored before [Loading](https://pkg.go.dev/github.com/klauspost/compress/s2#Index.Load).
|
||||||
|
|
||||||
|
This is expected to save 20 bytes. These can be restored using [RestoreIndexHeaders](https://pkg.go.dev/github.com/klauspost/compress/s2#RestoreIndexHeaders). This removes a layer of security, but is the most compact representation. Returns nil if headers contains errors.
|
||||||
|
|
||||||
## Index Format:
|
## Index Format:
|
||||||
|
|
||||||
Each block is structured as a snappy skippable block, with the chunk ID 0x99.
|
Each block is structured as a snappy skippable block, with the chunk ID 0x99.
|
||||||
@ -845,7 +932,7 @@ Numbers are stored as fixed size little endian values or [zigzag encoded](https:
|
|||||||
with un-encoded value length of 64 bits, unless other limits are specified.
|
with un-encoded value length of 64 bits, unless other limits are specified.
|
||||||
|
|
||||||
| Content | Format |
|
| Content | Format |
|
||||||
|---------------------------------------------------------------------------|-----------------------------------------------------------------------------------------------------------------------------|
|
|--------------------------------------|-------------------------------------------------------------------------------------------------------------------------------|
|
||||||
| ID, `[1]byte` | Always 0x99. |
|
| ID, `[1]byte` | Always 0x99. |
|
||||||
| Data Length, `[3]byte` | 3 byte little-endian length of the chunk in bytes, following this. |
|
| Data Length, `[3]byte` | 3 byte little-endian length of the chunk in bytes, following this. |
|
||||||
| Header `[6]byte` | Header, must be `[115, 50, 105, 100, 120, 0]` or in text: "s2idx\x00". |
|
| Header `[6]byte` | Header, must be `[115, 50, 105, 100, 120, 0]` or in text: "s2idx\x00". |
|
||||||
@ -929,6 +1016,7 @@ To decode from any given uncompressed offset `(wantOffset)`:
|
|||||||
|
|
||||||
See [using indexes](https://github.com/klauspost/compress/tree/master/s2#using-indexes) for functions that perform the operations with a simpler interface.
|
See [using indexes](https://github.com/klauspost/compress/tree/master/s2#using-indexes) for functions that perform the operations with a simpler interface.
|
||||||
|
|
||||||
|
|
||||||
# Format Extensions
|
# Format Extensions
|
||||||
|
|
||||||
* Frame [Stream identifier](https://github.com/google/snappy/blob/master/framing_format.txt#L68) changed from `sNaPpY` to `S2sTwO`.
|
* Frame [Stream identifier](https://github.com/google/snappy/blob/master/framing_format.txt#L68) changed from `sNaPpY` to `S2sTwO`.
|
||||||
@ -951,13 +1039,80 @@ The length is specified by reading the 3-bit length specified in the tag and dec
|
|||||||
| 7 | 65540 + read 3 bytes |
|
| 7 | 65540 + read 3 bytes |
|
||||||
|
|
||||||
This allows any repeat offset + length to be represented by 2 to 5 bytes.
|
This allows any repeat offset + length to be represented by 2 to 5 bytes.
|
||||||
|
It also allows to emit matches longer than 64 bytes with one copy + one repeat instead of several 64 byte copies.
|
||||||
|
|
||||||
Lengths are stored as little endian values.
|
Lengths are stored as little endian values.
|
||||||
|
|
||||||
The first copy of a block cannot be a repeat offset and the offset is not carried across blocks in streams.
|
The first copy of a block cannot be a repeat offset and the offset is reset on every block in streams.
|
||||||
|
|
||||||
Default streaming block size is 1MB.
|
Default streaming block size is 1MB.
|
||||||
|
|
||||||
|
# Dictionary Encoding
|
||||||
|
|
||||||
|
Adding dictionaries allow providing a custom dictionary that will serve as lookup in the beginning of blocks.
|
||||||
|
|
||||||
|
A dictionary provides an initial repeat value that can be used to point to a common header.
|
||||||
|
|
||||||
|
Other than that the dictionary contains values that can be used as back-references.
|
||||||
|
|
||||||
|
Often used data should be placed at the *end* of the dictionary since offsets < 2048 bytes will be smaller.
|
||||||
|
|
||||||
|
## Format
|
||||||
|
|
||||||
|
Dictionary *content* must at least 16 bytes and less or equal to 64KiB (65536 bytes).
|
||||||
|
|
||||||
|
Encoding: `[repeat value (uvarint)][dictionary content...]`
|
||||||
|
|
||||||
|
Before the dictionary content, an unsigned base-128 (uvarint) encoded value specifying the initial repeat offset.
|
||||||
|
This value is an offset into the dictionary content and not a back-reference offset,
|
||||||
|
so setting this to 0 will make the repeat value point to the first value of the dictionary.
|
||||||
|
|
||||||
|
The value must be less than the dictionary length-8
|
||||||
|
|
||||||
|
## Encoding
|
||||||
|
|
||||||
|
From the decoder point of view the dictionary content is seen as preceding the encoded content.
|
||||||
|
|
||||||
|
`[dictionary content][decoded output]`
|
||||||
|
|
||||||
|
Backreferences to the dictionary are encoded as ordinary backreferences that have an offset before the start of the decoded block.
|
||||||
|
|
||||||
|
Matches copying from the dictionary are **not** allowed to cross from the dictionary into the decoded data.
|
||||||
|
However, if a copy ends at the end of the dictionary the next repeat will point to the start of the decoded buffer, which is allowed.
|
||||||
|
|
||||||
|
The first match can be a repeat value, which will use the repeat offset stored in the dictionary.
|
||||||
|
|
||||||
|
When 64KB (65536 bytes) has been en/decoded it is no longer allowed to reference the dictionary,
|
||||||
|
neither by a copy nor repeat operations.
|
||||||
|
If the boundary is crossed while copying from the dictionary, the operation should complete,
|
||||||
|
but the next instruction is not allowed to reference the dictionary.
|
||||||
|
|
||||||
|
Valid blocks encoded *without* a dictionary can be decoded with any dictionary.
|
||||||
|
There are no checks whether the supplied dictionary is the correct for a block.
|
||||||
|
Because of this there is no overhead by using a dictionary.
|
||||||
|
|
||||||
|
## Example
|
||||||
|
|
||||||
|
This is the dictionary content. Elements are separated by `[]`.
|
||||||
|
|
||||||
|
Dictionary: `[0x0a][Yesterday 25 bananas were added to Benjamins brown bag]`.
|
||||||
|
|
||||||
|
Initial repeat offset is set at 10, which is the letter `2`.
|
||||||
|
|
||||||
|
Encoded `[LIT "10"][REPEAT len=10][LIT "hich"][MATCH off=50 len=6][MATCH off=31 len=6][MATCH off=61 len=10]`
|
||||||
|
|
||||||
|
Decoded: `[10][ bananas w][hich][ were ][brown ][were added]`
|
||||||
|
|
||||||
|
Output: `10 bananas which were brown were added`
|
||||||
|
|
||||||
|
|
||||||
|
## Streams
|
||||||
|
|
||||||
|
For streams each block can use the dictionary.
|
||||||
|
|
||||||
|
The dictionary cannot not currently be provided on the stream.
|
||||||
|
|
||||||
|
|
||||||
# LICENSE
|
# LICENSE
|
||||||
|
|
||||||
This code is based on the [Snappy-Go](https://github.com/golang/snappy) implementation.
|
This code is based on the [Snappy-Go](https://github.com/golang/snappy) implementation.
|
||||||
|
498
vendor/github.com/klauspost/compress/s2/decode.go
generated
vendored
498
vendor/github.com/klauspost/compress/s2/decode.go
generated
vendored
@ -11,7 +11,9 @@ import (
|
|||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"io/ioutil"
|
"io/ioutil"
|
||||||
|
"math"
|
||||||
"runtime"
|
"runtime"
|
||||||
|
"strconv"
|
||||||
"sync"
|
"sync"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -719,7 +721,11 @@ func (r *Reader) Skip(n int64) error {
|
|||||||
// decoded[i:j] contains decoded bytes that have not yet been passed on.
|
// decoded[i:j] contains decoded bytes that have not yet been passed on.
|
||||||
left := int64(r.j - r.i)
|
left := int64(r.j - r.i)
|
||||||
if left >= n {
|
if left >= n {
|
||||||
r.i += int(n)
|
tmp := int64(r.i) + n
|
||||||
|
if tmp > math.MaxInt32 {
|
||||||
|
return errors.New("s2: internal overflow in skip")
|
||||||
|
}
|
||||||
|
r.i = int(tmp)
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
n -= int64(r.j - r.i)
|
n -= int64(r.j - r.i)
|
||||||
@ -875,15 +881,20 @@ func (r *Reader) Skip(n int64) error {
|
|||||||
// See Reader.ReadSeeker
|
// See Reader.ReadSeeker
|
||||||
type ReadSeeker struct {
|
type ReadSeeker struct {
|
||||||
*Reader
|
*Reader
|
||||||
|
readAtMu sync.Mutex
|
||||||
}
|
}
|
||||||
|
|
||||||
// ReadSeeker will return an io.ReadSeeker compatible version of the reader.
|
// ReadSeeker will return an io.ReadSeeker and io.ReaderAt
|
||||||
|
// compatible version of the reader.
|
||||||
// If 'random' is specified the returned io.Seeker can be used for
|
// If 'random' is specified the returned io.Seeker can be used for
|
||||||
// random seeking, otherwise only forward seeking is supported.
|
// random seeking, otherwise only forward seeking is supported.
|
||||||
// Enabling random seeking requires the original input to support
|
// Enabling random seeking requires the original input to support
|
||||||
// the io.Seeker interface.
|
// the io.Seeker interface.
|
||||||
// A custom index can be specified which will be used if supplied.
|
// A custom index can be specified which will be used if supplied.
|
||||||
// When using a custom index, it will not be read from the input stream.
|
// When using a custom index, it will not be read from the input stream.
|
||||||
|
// The ReadAt position will affect regular reads and the current position of Seek.
|
||||||
|
// So using Read after ReadAt will continue from where the ReadAt stopped.
|
||||||
|
// No functions should be used concurrently.
|
||||||
// The returned ReadSeeker contains a shallow reference to the existing Reader,
|
// The returned ReadSeeker contains a shallow reference to the existing Reader,
|
||||||
// meaning changes performed to one is reflected in the other.
|
// meaning changes performed to one is reflected in the other.
|
||||||
func (r *Reader) ReadSeeker(random bool, index []byte) (*ReadSeeker, error) {
|
func (r *Reader) ReadSeeker(random bool, index []byte) (*ReadSeeker, error) {
|
||||||
@ -947,44 +958,61 @@ func (r *Reader) ReadSeeker(random bool, index []byte) (*ReadSeeker, error) {
|
|||||||
// Seek allows seeking in compressed data.
|
// Seek allows seeking in compressed data.
|
||||||
func (r *ReadSeeker) Seek(offset int64, whence int) (int64, error) {
|
func (r *ReadSeeker) Seek(offset int64, whence int) (int64, error) {
|
||||||
if r.err != nil {
|
if r.err != nil {
|
||||||
|
if !errors.Is(r.err, io.EOF) {
|
||||||
return 0, r.err
|
return 0, r.err
|
||||||
}
|
}
|
||||||
if offset == 0 && whence == io.SeekCurrent {
|
// Reset on EOF
|
||||||
return r.blockStart + int64(r.i), nil
|
r.err = nil
|
||||||
}
|
}
|
||||||
if !r.readHeader {
|
|
||||||
// Make sure we read the header.
|
|
||||||
_, r.err = r.Read([]byte{})
|
|
||||||
}
|
|
||||||
rs, ok := r.r.(io.ReadSeeker)
|
|
||||||
if r.index == nil || !ok {
|
|
||||||
if whence == io.SeekCurrent && offset >= 0 {
|
|
||||||
err := r.Skip(offset)
|
|
||||||
return r.blockStart + int64(r.i), err
|
|
||||||
}
|
|
||||||
if whence == io.SeekStart && offset >= r.blockStart+int64(r.i) {
|
|
||||||
err := r.Skip(offset - r.blockStart - int64(r.i))
|
|
||||||
return r.blockStart + int64(r.i), err
|
|
||||||
}
|
|
||||||
return 0, ErrUnsupported
|
|
||||||
|
|
||||||
}
|
// Calculate absolute offset.
|
||||||
|
absOffset := offset
|
||||||
|
|
||||||
switch whence {
|
switch whence {
|
||||||
|
case io.SeekStart:
|
||||||
case io.SeekCurrent:
|
case io.SeekCurrent:
|
||||||
offset += r.blockStart + int64(r.i)
|
absOffset = r.blockStart + int64(r.i) + offset
|
||||||
case io.SeekEnd:
|
case io.SeekEnd:
|
||||||
if offset > 0 {
|
if r.index == nil {
|
||||||
return 0, errors.New("seek after end of file")
|
return 0, ErrUnsupported
|
||||||
}
|
}
|
||||||
offset = r.index.TotalUncompressed + offset
|
absOffset = r.index.TotalUncompressed + offset
|
||||||
|
default:
|
||||||
|
r.err = ErrUnsupported
|
||||||
|
return 0, r.err
|
||||||
}
|
}
|
||||||
|
|
||||||
if offset < 0 {
|
if absOffset < 0 {
|
||||||
return 0, errors.New("seek before start of file")
|
return 0, errors.New("seek before start of file")
|
||||||
}
|
}
|
||||||
|
|
||||||
c, u, err := r.index.Find(offset)
|
if !r.readHeader {
|
||||||
|
// Make sure we read the header.
|
||||||
|
_, r.err = r.Read([]byte{})
|
||||||
|
if r.err != nil {
|
||||||
|
return 0, r.err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// If we are inside current block no need to seek.
|
||||||
|
// This includes no offset changes.
|
||||||
|
if absOffset >= r.blockStart && absOffset < r.blockStart+int64(r.j) {
|
||||||
|
r.i = int(absOffset - r.blockStart)
|
||||||
|
return r.blockStart + int64(r.i), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
rs, ok := r.r.(io.ReadSeeker)
|
||||||
|
if r.index == nil || !ok {
|
||||||
|
currOffset := r.blockStart + int64(r.i)
|
||||||
|
if absOffset >= currOffset {
|
||||||
|
err := r.Skip(absOffset - currOffset)
|
||||||
|
return r.blockStart + int64(r.i), err
|
||||||
|
}
|
||||||
|
return 0, ErrUnsupported
|
||||||
|
}
|
||||||
|
|
||||||
|
// We can seek and we have an index.
|
||||||
|
c, u, err := r.index.Find(absOffset)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return r.blockStart + int64(r.i), err
|
return r.blockStart + int64(r.i), err
|
||||||
}
|
}
|
||||||
@ -996,11 +1024,56 @@ func (r *ReadSeeker) Seek(offset int64, whence int) (int64, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
r.i = r.j // Remove rest of current block.
|
r.i = r.j // Remove rest of current block.
|
||||||
if u < offset {
|
r.blockStart = u - int64(r.j) // Adjust current block start for accounting.
|
||||||
|
if u < absOffset {
|
||||||
// Forward inside block
|
// Forward inside block
|
||||||
return offset, r.Skip(offset - u)
|
return absOffset, r.Skip(absOffset - u)
|
||||||
}
|
}
|
||||||
return offset, nil
|
if u > absOffset {
|
||||||
|
return 0, fmt.Errorf("s2 seek: (internal error) u (%d) > absOffset (%d)", u, absOffset)
|
||||||
|
}
|
||||||
|
return absOffset, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// ReadAt reads len(p) bytes into p starting at offset off in the
|
||||||
|
// underlying input source. It returns the number of bytes
|
||||||
|
// read (0 <= n <= len(p)) and any error encountered.
|
||||||
|
//
|
||||||
|
// When ReadAt returns n < len(p), it returns a non-nil error
|
||||||
|
// explaining why more bytes were not returned. In this respect,
|
||||||
|
// ReadAt is stricter than Read.
|
||||||
|
//
|
||||||
|
// Even if ReadAt returns n < len(p), it may use all of p as scratch
|
||||||
|
// space during the call. If some data is available but not len(p) bytes,
|
||||||
|
// ReadAt blocks until either all the data is available or an error occurs.
|
||||||
|
// In this respect ReadAt is different from Read.
|
||||||
|
//
|
||||||
|
// If the n = len(p) bytes returned by ReadAt are at the end of the
|
||||||
|
// input source, ReadAt may return either err == EOF or err == nil.
|
||||||
|
//
|
||||||
|
// If ReadAt is reading from an input source with a seek offset,
|
||||||
|
// ReadAt should not affect nor be affected by the underlying
|
||||||
|
// seek offset.
|
||||||
|
//
|
||||||
|
// Clients of ReadAt can execute parallel ReadAt calls on the
|
||||||
|
// same input source. This is however not recommended.
|
||||||
|
func (r *ReadSeeker) ReadAt(p []byte, offset int64) (int, error) {
|
||||||
|
r.readAtMu.Lock()
|
||||||
|
defer r.readAtMu.Unlock()
|
||||||
|
_, err := r.Seek(offset, io.SeekStart)
|
||||||
|
if err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
n := 0
|
||||||
|
for n < len(p) {
|
||||||
|
n2, err := r.Read(p[n:])
|
||||||
|
if err != nil {
|
||||||
|
// This will include io.EOF
|
||||||
|
return n + n2, err
|
||||||
|
}
|
||||||
|
n += n2
|
||||||
|
}
|
||||||
|
return n, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// ReadByte satisfies the io.ByteReader interface.
|
// ReadByte satisfies the io.ByteReader interface.
|
||||||
@ -1039,3 +1112,370 @@ func (r *Reader) SkippableCB(id uint8, fn func(r io.Reader) error) error {
|
|||||||
r.skippableCB[id] = fn
|
r.skippableCB[id] = fn
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// s2DecodeDict writes the decoding of src to dst. It assumes that the varint-encoded
|
||||||
|
// length of the decompressed bytes has already been read, and that len(dst)
|
||||||
|
// equals that length.
|
||||||
|
//
|
||||||
|
// It returns 0 on success or a decodeErrCodeXxx error code on failure.
|
||||||
|
func s2DecodeDict(dst, src []byte, dict *Dict) int {
|
||||||
|
if dict == nil {
|
||||||
|
return s2Decode(dst, src)
|
||||||
|
}
|
||||||
|
const debug = false
|
||||||
|
const debugErrs = debug
|
||||||
|
|
||||||
|
if debug {
|
||||||
|
fmt.Println("Starting decode, dst len:", len(dst))
|
||||||
|
}
|
||||||
|
var d, s, length int
|
||||||
|
offset := len(dict.dict) - dict.repeat
|
||||||
|
|
||||||
|
// As long as we can read at least 5 bytes...
|
||||||
|
for s < len(src)-5 {
|
||||||
|
// Removing bounds checks is SLOWER, when if doing
|
||||||
|
// in := src[s:s+5]
|
||||||
|
// Checked on Go 1.18
|
||||||
|
switch src[s] & 0x03 {
|
||||||
|
case tagLiteral:
|
||||||
|
x := uint32(src[s] >> 2)
|
||||||
|
switch {
|
||||||
|
case x < 60:
|
||||||
|
s++
|
||||||
|
case x == 60:
|
||||||
|
s += 2
|
||||||
|
x = uint32(src[s-1])
|
||||||
|
case x == 61:
|
||||||
|
in := src[s : s+3]
|
||||||
|
x = uint32(in[1]) | uint32(in[2])<<8
|
||||||
|
s += 3
|
||||||
|
case x == 62:
|
||||||
|
in := src[s : s+4]
|
||||||
|
// Load as 32 bit and shift down.
|
||||||
|
x = uint32(in[0]) | uint32(in[1])<<8 | uint32(in[2])<<16 | uint32(in[3])<<24
|
||||||
|
x >>= 8
|
||||||
|
s += 4
|
||||||
|
case x == 63:
|
||||||
|
in := src[s : s+5]
|
||||||
|
x = uint32(in[1]) | uint32(in[2])<<8 | uint32(in[3])<<16 | uint32(in[4])<<24
|
||||||
|
s += 5
|
||||||
|
}
|
||||||
|
length = int(x) + 1
|
||||||
|
if debug {
|
||||||
|
fmt.Println("literals, length:", length, "d-after:", d+length)
|
||||||
|
}
|
||||||
|
if length > len(dst)-d || length > len(src)-s || (strconv.IntSize == 32 && length <= 0) {
|
||||||
|
if debugErrs {
|
||||||
|
fmt.Println("corrupt literal: length:", length, "d-left:", len(dst)-d, "src-left:", len(src)-s)
|
||||||
|
}
|
||||||
|
return decodeErrCodeCorrupt
|
||||||
|
}
|
||||||
|
|
||||||
|
copy(dst[d:], src[s:s+length])
|
||||||
|
d += length
|
||||||
|
s += length
|
||||||
|
continue
|
||||||
|
|
||||||
|
case tagCopy1:
|
||||||
|
s += 2
|
||||||
|
toffset := int(uint32(src[s-2])&0xe0<<3 | uint32(src[s-1]))
|
||||||
|
length = int(src[s-2]) >> 2 & 0x7
|
||||||
|
if toffset == 0 {
|
||||||
|
if debug {
|
||||||
|
fmt.Print("(repeat) ")
|
||||||
|
}
|
||||||
|
// keep last offset
|
||||||
|
switch length {
|
||||||
|
case 5:
|
||||||
|
length = int(src[s]) + 4
|
||||||
|
s += 1
|
||||||
|
case 6:
|
||||||
|
in := src[s : s+2]
|
||||||
|
length = int(uint32(in[0])|(uint32(in[1])<<8)) + (1 << 8)
|
||||||
|
s += 2
|
||||||
|
case 7:
|
||||||
|
in := src[s : s+3]
|
||||||
|
length = int((uint32(in[2])<<16)|(uint32(in[1])<<8)|uint32(in[0])) + (1 << 16)
|
||||||
|
s += 3
|
||||||
|
default: // 0-> 4
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
offset = toffset
|
||||||
|
}
|
||||||
|
length += 4
|
||||||
|
case tagCopy2:
|
||||||
|
in := src[s : s+3]
|
||||||
|
offset = int(uint32(in[1]) | uint32(in[2])<<8)
|
||||||
|
length = 1 + int(in[0])>>2
|
||||||
|
s += 3
|
||||||
|
|
||||||
|
case tagCopy4:
|
||||||
|
in := src[s : s+5]
|
||||||
|
offset = int(uint32(in[1]) | uint32(in[2])<<8 | uint32(in[3])<<16 | uint32(in[4])<<24)
|
||||||
|
length = 1 + int(in[0])>>2
|
||||||
|
s += 5
|
||||||
|
}
|
||||||
|
|
||||||
|
if offset <= 0 || length > len(dst)-d {
|
||||||
|
if debugErrs {
|
||||||
|
fmt.Println("match error; offset:", offset, "length:", length, "dst-left:", len(dst)-d)
|
||||||
|
}
|
||||||
|
return decodeErrCodeCorrupt
|
||||||
|
}
|
||||||
|
|
||||||
|
// copy from dict
|
||||||
|
if d < offset {
|
||||||
|
if d > MaxDictSrcOffset {
|
||||||
|
if debugErrs {
|
||||||
|
fmt.Println("dict after", MaxDictSrcOffset, "d:", d, "offset:", offset, "length:", length)
|
||||||
|
}
|
||||||
|
return decodeErrCodeCorrupt
|
||||||
|
}
|
||||||
|
startOff := len(dict.dict) - offset + d
|
||||||
|
if startOff < 0 || startOff+length > len(dict.dict) {
|
||||||
|
if debugErrs {
|
||||||
|
fmt.Printf("offset (%d) + length (%d) bigger than dict (%d)\n", offset, length, len(dict.dict))
|
||||||
|
}
|
||||||
|
return decodeErrCodeCorrupt
|
||||||
|
}
|
||||||
|
if debug {
|
||||||
|
fmt.Println("dict copy, length:", length, "offset:", offset, "d-after:", d+length, "dict start offset:", startOff)
|
||||||
|
}
|
||||||
|
copy(dst[d:d+length], dict.dict[startOff:])
|
||||||
|
d += length
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
if debug {
|
||||||
|
fmt.Println("copy, length:", length, "offset:", offset, "d-after:", d+length)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Copy from an earlier sub-slice of dst to a later sub-slice.
|
||||||
|
// If no overlap, use the built-in copy:
|
||||||
|
if offset > length {
|
||||||
|
copy(dst[d:d+length], dst[d-offset:])
|
||||||
|
d += length
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
// Unlike the built-in copy function, this byte-by-byte copy always runs
|
||||||
|
// forwards, even if the slices overlap. Conceptually, this is:
|
||||||
|
//
|
||||||
|
// d += forwardCopy(dst[d:d+length], dst[d-offset:])
|
||||||
|
//
|
||||||
|
// We align the slices into a and b and show the compiler they are the same size.
|
||||||
|
// This allows the loop to run without bounds checks.
|
||||||
|
a := dst[d : d+length]
|
||||||
|
b := dst[d-offset:]
|
||||||
|
b = b[:len(a)]
|
||||||
|
for i := range a {
|
||||||
|
a[i] = b[i]
|
||||||
|
}
|
||||||
|
d += length
|
||||||
|
}
|
||||||
|
|
||||||
|
// Remaining with extra checks...
|
||||||
|
for s < len(src) {
|
||||||
|
switch src[s] & 0x03 {
|
||||||
|
case tagLiteral:
|
||||||
|
x := uint32(src[s] >> 2)
|
||||||
|
switch {
|
||||||
|
case x < 60:
|
||||||
|
s++
|
||||||
|
case x == 60:
|
||||||
|
s += 2
|
||||||
|
if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line.
|
||||||
|
if debugErrs {
|
||||||
|
fmt.Println("src went oob")
|
||||||
|
}
|
||||||
|
return decodeErrCodeCorrupt
|
||||||
|
}
|
||||||
|
x = uint32(src[s-1])
|
||||||
|
case x == 61:
|
||||||
|
s += 3
|
||||||
|
if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line.
|
||||||
|
if debugErrs {
|
||||||
|
fmt.Println("src went oob")
|
||||||
|
}
|
||||||
|
return decodeErrCodeCorrupt
|
||||||
|
}
|
||||||
|
x = uint32(src[s-2]) | uint32(src[s-1])<<8
|
||||||
|
case x == 62:
|
||||||
|
s += 4
|
||||||
|
if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line.
|
||||||
|
if debugErrs {
|
||||||
|
fmt.Println("src went oob")
|
||||||
|
}
|
||||||
|
return decodeErrCodeCorrupt
|
||||||
|
}
|
||||||
|
x = uint32(src[s-3]) | uint32(src[s-2])<<8 | uint32(src[s-1])<<16
|
||||||
|
case x == 63:
|
||||||
|
s += 5
|
||||||
|
if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line.
|
||||||
|
if debugErrs {
|
||||||
|
fmt.Println("src went oob")
|
||||||
|
}
|
||||||
|
return decodeErrCodeCorrupt
|
||||||
|
}
|
||||||
|
x = uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24
|
||||||
|
}
|
||||||
|
length = int(x) + 1
|
||||||
|
if length > len(dst)-d || length > len(src)-s || (strconv.IntSize == 32 && length <= 0) {
|
||||||
|
if debugErrs {
|
||||||
|
fmt.Println("corrupt literal: length:", length, "d-left:", len(dst)-d, "src-left:", len(src)-s)
|
||||||
|
}
|
||||||
|
return decodeErrCodeCorrupt
|
||||||
|
}
|
||||||
|
if debug {
|
||||||
|
fmt.Println("literals, length:", length, "d-after:", d+length)
|
||||||
|
}
|
||||||
|
|
||||||
|
copy(dst[d:], src[s:s+length])
|
||||||
|
d += length
|
||||||
|
s += length
|
||||||
|
continue
|
||||||
|
|
||||||
|
case tagCopy1:
|
||||||
|
s += 2
|
||||||
|
if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line.
|
||||||
|
if debugErrs {
|
||||||
|
fmt.Println("src went oob")
|
||||||
|
}
|
||||||
|
return decodeErrCodeCorrupt
|
||||||
|
}
|
||||||
|
length = int(src[s-2]) >> 2 & 0x7
|
||||||
|
toffset := int(uint32(src[s-2])&0xe0<<3 | uint32(src[s-1]))
|
||||||
|
if toffset == 0 {
|
||||||
|
if debug {
|
||||||
|
fmt.Print("(repeat) ")
|
||||||
|
}
|
||||||
|
// keep last offset
|
||||||
|
switch length {
|
||||||
|
case 5:
|
||||||
|
s += 1
|
||||||
|
if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line.
|
||||||
|
if debugErrs {
|
||||||
|
fmt.Println("src went oob")
|
||||||
|
}
|
||||||
|
return decodeErrCodeCorrupt
|
||||||
|
}
|
||||||
|
length = int(uint32(src[s-1])) + 4
|
||||||
|
case 6:
|
||||||
|
s += 2
|
||||||
|
if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line.
|
||||||
|
if debugErrs {
|
||||||
|
fmt.Println("src went oob")
|
||||||
|
}
|
||||||
|
return decodeErrCodeCorrupt
|
||||||
|
}
|
||||||
|
length = int(uint32(src[s-2])|(uint32(src[s-1])<<8)) + (1 << 8)
|
||||||
|
case 7:
|
||||||
|
s += 3
|
||||||
|
if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line.
|
||||||
|
if debugErrs {
|
||||||
|
fmt.Println("src went oob")
|
||||||
|
}
|
||||||
|
return decodeErrCodeCorrupt
|
||||||
|
}
|
||||||
|
length = int(uint32(src[s-3])|(uint32(src[s-2])<<8)|(uint32(src[s-1])<<16)) + (1 << 16)
|
||||||
|
default: // 0-> 4
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
offset = toffset
|
||||||
|
}
|
||||||
|
length += 4
|
||||||
|
case tagCopy2:
|
||||||
|
s += 3
|
||||||
|
if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line.
|
||||||
|
if debugErrs {
|
||||||
|
fmt.Println("src went oob")
|
||||||
|
}
|
||||||
|
return decodeErrCodeCorrupt
|
||||||
|
}
|
||||||
|
length = 1 + int(src[s-3])>>2
|
||||||
|
offset = int(uint32(src[s-2]) | uint32(src[s-1])<<8)
|
||||||
|
|
||||||
|
case tagCopy4:
|
||||||
|
s += 5
|
||||||
|
if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line.
|
||||||
|
if debugErrs {
|
||||||
|
fmt.Println("src went oob")
|
||||||
|
}
|
||||||
|
return decodeErrCodeCorrupt
|
||||||
|
}
|
||||||
|
length = 1 + int(src[s-5])>>2
|
||||||
|
offset = int(uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24)
|
||||||
|
}
|
||||||
|
|
||||||
|
if offset <= 0 || length > len(dst)-d {
|
||||||
|
if debugErrs {
|
||||||
|
fmt.Println("match error; offset:", offset, "length:", length, "dst-left:", len(dst)-d)
|
||||||
|
}
|
||||||
|
return decodeErrCodeCorrupt
|
||||||
|
}
|
||||||
|
|
||||||
|
// copy from dict
|
||||||
|
if d < offset {
|
||||||
|
if d > MaxDictSrcOffset {
|
||||||
|
if debugErrs {
|
||||||
|
fmt.Println("dict after", MaxDictSrcOffset, "d:", d, "offset:", offset, "length:", length)
|
||||||
|
}
|
||||||
|
return decodeErrCodeCorrupt
|
||||||
|
}
|
||||||
|
rOff := len(dict.dict) - (offset - d)
|
||||||
|
if debug {
|
||||||
|
fmt.Println("starting dict entry from dict offset", len(dict.dict)-rOff)
|
||||||
|
}
|
||||||
|
if rOff+length > len(dict.dict) {
|
||||||
|
if debugErrs {
|
||||||
|
fmt.Println("err: END offset", rOff+length, "bigger than dict", len(dict.dict), "dict offset:", rOff, "length:", length)
|
||||||
|
}
|
||||||
|
return decodeErrCodeCorrupt
|
||||||
|
}
|
||||||
|
if rOff < 0 {
|
||||||
|
if debugErrs {
|
||||||
|
fmt.Println("err: START offset", rOff, "less than 0", len(dict.dict), "dict offset:", rOff, "length:", length)
|
||||||
|
}
|
||||||
|
return decodeErrCodeCorrupt
|
||||||
|
}
|
||||||
|
copy(dst[d:d+length], dict.dict[rOff:])
|
||||||
|
d += length
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
if debug {
|
||||||
|
fmt.Println("copy, length:", length, "offset:", offset, "d-after:", d+length)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Copy from an earlier sub-slice of dst to a later sub-slice.
|
||||||
|
// If no overlap, use the built-in copy:
|
||||||
|
if offset > length {
|
||||||
|
copy(dst[d:d+length], dst[d-offset:])
|
||||||
|
d += length
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
// Unlike the built-in copy function, this byte-by-byte copy always runs
|
||||||
|
// forwards, even if the slices overlap. Conceptually, this is:
|
||||||
|
//
|
||||||
|
// d += forwardCopy(dst[d:d+length], dst[d-offset:])
|
||||||
|
//
|
||||||
|
// We align the slices into a and b and show the compiler they are the same size.
|
||||||
|
// This allows the loop to run without bounds checks.
|
||||||
|
a := dst[d : d+length]
|
||||||
|
b := dst[d-offset:]
|
||||||
|
b = b[:len(a)]
|
||||||
|
for i := range a {
|
||||||
|
a[i] = b[i]
|
||||||
|
}
|
||||||
|
d += length
|
||||||
|
}
|
||||||
|
|
||||||
|
if d != len(dst) {
|
||||||
|
if debugErrs {
|
||||||
|
fmt.Println("wanted length", len(dst), "got", d)
|
||||||
|
}
|
||||||
|
return decodeErrCodeCorrupt
|
||||||
|
}
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
47
vendor/github.com/klauspost/compress/s2/decode_other.go
generated
vendored
47
vendor/github.com/klauspost/compress/s2/decode_other.go
generated
vendored
@ -28,6 +28,9 @@ func s2Decode(dst, src []byte) int {
|
|||||||
|
|
||||||
// As long as we can read at least 5 bytes...
|
// As long as we can read at least 5 bytes...
|
||||||
for s < len(src)-5 {
|
for s < len(src)-5 {
|
||||||
|
// Removing bounds checks is SLOWER, when if doing
|
||||||
|
// in := src[s:s+5]
|
||||||
|
// Checked on Go 1.18
|
||||||
switch src[s] & 0x03 {
|
switch src[s] & 0x03 {
|
||||||
case tagLiteral:
|
case tagLiteral:
|
||||||
x := uint32(src[s] >> 2)
|
x := uint32(src[s] >> 2)
|
||||||
@ -38,17 +41,25 @@ func s2Decode(dst, src []byte) int {
|
|||||||
s += 2
|
s += 2
|
||||||
x = uint32(src[s-1])
|
x = uint32(src[s-1])
|
||||||
case x == 61:
|
case x == 61:
|
||||||
|
in := src[s : s+3]
|
||||||
|
x = uint32(in[1]) | uint32(in[2])<<8
|
||||||
s += 3
|
s += 3
|
||||||
x = uint32(src[s-2]) | uint32(src[s-1])<<8
|
|
||||||
case x == 62:
|
case x == 62:
|
||||||
|
in := src[s : s+4]
|
||||||
|
// Load as 32 bit and shift down.
|
||||||
|
x = uint32(in[0]) | uint32(in[1])<<8 | uint32(in[2])<<16 | uint32(in[3])<<24
|
||||||
|
x >>= 8
|
||||||
s += 4
|
s += 4
|
||||||
x = uint32(src[s-3]) | uint32(src[s-2])<<8 | uint32(src[s-1])<<16
|
|
||||||
case x == 63:
|
case x == 63:
|
||||||
|
in := src[s : s+5]
|
||||||
|
x = uint32(in[1]) | uint32(in[2])<<8 | uint32(in[3])<<16 | uint32(in[4])<<24
|
||||||
s += 5
|
s += 5
|
||||||
x = uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24
|
|
||||||
}
|
}
|
||||||
length = int(x) + 1
|
length = int(x) + 1
|
||||||
if length > len(dst)-d || length > len(src)-s || (strconv.IntSize == 32 && length <= 0) {
|
if length > len(dst)-d || length > len(src)-s || (strconv.IntSize == 32 && length <= 0) {
|
||||||
|
if debug {
|
||||||
|
fmt.Println("corrupt: lit size", length)
|
||||||
|
}
|
||||||
return decodeErrCodeCorrupt
|
return decodeErrCodeCorrupt
|
||||||
}
|
}
|
||||||
if debug {
|
if debug {
|
||||||
@ -62,8 +73,8 @@ func s2Decode(dst, src []byte) int {
|
|||||||
|
|
||||||
case tagCopy1:
|
case tagCopy1:
|
||||||
s += 2
|
s += 2
|
||||||
length = int(src[s-2]) >> 2 & 0x7
|
|
||||||
toffset := int(uint32(src[s-2])&0xe0<<3 | uint32(src[s-1]))
|
toffset := int(uint32(src[s-2])&0xe0<<3 | uint32(src[s-1]))
|
||||||
|
length = int(src[s-2]) >> 2 & 0x7
|
||||||
if toffset == 0 {
|
if toffset == 0 {
|
||||||
if debug {
|
if debug {
|
||||||
fmt.Print("(repeat) ")
|
fmt.Print("(repeat) ")
|
||||||
@ -71,14 +82,16 @@ func s2Decode(dst, src []byte) int {
|
|||||||
// keep last offset
|
// keep last offset
|
||||||
switch length {
|
switch length {
|
||||||
case 5:
|
case 5:
|
||||||
|
length = int(src[s]) + 4
|
||||||
s += 1
|
s += 1
|
||||||
length = int(uint32(src[s-1])) + 4
|
|
||||||
case 6:
|
case 6:
|
||||||
|
in := src[s : s+2]
|
||||||
|
length = int(uint32(in[0])|(uint32(in[1])<<8)) + (1 << 8)
|
||||||
s += 2
|
s += 2
|
||||||
length = int(uint32(src[s-2])|(uint32(src[s-1])<<8)) + (1 << 8)
|
|
||||||
case 7:
|
case 7:
|
||||||
|
in := src[s : s+3]
|
||||||
|
length = int((uint32(in[2])<<16)|(uint32(in[1])<<8)|uint32(in[0])) + (1 << 16)
|
||||||
s += 3
|
s += 3
|
||||||
length = int(uint32(src[s-3])|(uint32(src[s-2])<<8)|(uint32(src[s-1])<<16)) + (1 << 16)
|
|
||||||
default: // 0-> 4
|
default: // 0-> 4
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
@ -86,17 +99,23 @@ func s2Decode(dst, src []byte) int {
|
|||||||
}
|
}
|
||||||
length += 4
|
length += 4
|
||||||
case tagCopy2:
|
case tagCopy2:
|
||||||
|
in := src[s : s+3]
|
||||||
|
offset = int(uint32(in[1]) | uint32(in[2])<<8)
|
||||||
|
length = 1 + int(in[0])>>2
|
||||||
s += 3
|
s += 3
|
||||||
length = 1 + int(src[s-3])>>2
|
|
||||||
offset = int(uint32(src[s-2]) | uint32(src[s-1])<<8)
|
|
||||||
|
|
||||||
case tagCopy4:
|
case tagCopy4:
|
||||||
|
in := src[s : s+5]
|
||||||
|
offset = int(uint32(in[1]) | uint32(in[2])<<8 | uint32(in[3])<<16 | uint32(in[4])<<24)
|
||||||
|
length = 1 + int(in[0])>>2
|
||||||
s += 5
|
s += 5
|
||||||
length = 1 + int(src[s-5])>>2
|
|
||||||
offset = int(uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if offset <= 0 || d < offset || length > len(dst)-d {
|
if offset <= 0 || d < offset || length > len(dst)-d {
|
||||||
|
if debug {
|
||||||
|
fmt.Println("corrupt: match, length", length, "offset:", offset, "dst avail:", len(dst)-d, "dst pos:", d)
|
||||||
|
}
|
||||||
|
|
||||||
return decodeErrCodeCorrupt
|
return decodeErrCodeCorrupt
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -163,6 +182,9 @@ func s2Decode(dst, src []byte) int {
|
|||||||
}
|
}
|
||||||
length = int(x) + 1
|
length = int(x) + 1
|
||||||
if length > len(dst)-d || length > len(src)-s || (strconv.IntSize == 32 && length <= 0) {
|
if length > len(dst)-d || length > len(src)-s || (strconv.IntSize == 32 && length <= 0) {
|
||||||
|
if debug {
|
||||||
|
fmt.Println("corrupt: lit size", length)
|
||||||
|
}
|
||||||
return decodeErrCodeCorrupt
|
return decodeErrCodeCorrupt
|
||||||
}
|
}
|
||||||
if debug {
|
if debug {
|
||||||
@ -229,6 +251,9 @@ func s2Decode(dst, src []byte) int {
|
|||||||
}
|
}
|
||||||
|
|
||||||
if offset <= 0 || d < offset || length > len(dst)-d {
|
if offset <= 0 || d < offset || length > len(dst)-d {
|
||||||
|
if debug {
|
||||||
|
fmt.Println("corrupt: match, length", length, "offset:", offset, "dst avail:", len(dst)-d, "dst pos:", d)
|
||||||
|
}
|
||||||
return decodeErrCodeCorrupt
|
return decodeErrCodeCorrupt
|
||||||
}
|
}
|
||||||
|
|
||||||
|
331
vendor/github.com/klauspost/compress/s2/dict.go
generated
vendored
Normal file
331
vendor/github.com/klauspost/compress/s2/dict.go
generated
vendored
Normal file
@ -0,0 +1,331 @@
|
|||||||
|
// Copyright (c) 2022+ Klaus Post. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
package s2
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"encoding/binary"
|
||||||
|
"sync"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
// MinDictSize is the minimum dictionary size when repeat has been read.
|
||||||
|
MinDictSize = 16
|
||||||
|
|
||||||
|
// MaxDictSize is the maximum dictionary size when repeat has been read.
|
||||||
|
MaxDictSize = 65536
|
||||||
|
|
||||||
|
// MaxDictSrcOffset is the maximum offset where a dictionary entry can start.
|
||||||
|
MaxDictSrcOffset = 65535
|
||||||
|
)
|
||||||
|
|
||||||
|
// Dict contains a dictionary that can be used for encoding and decoding s2
|
||||||
|
type Dict struct {
|
||||||
|
dict []byte
|
||||||
|
repeat int // Repeat as index of dict
|
||||||
|
|
||||||
|
fast, better, best sync.Once
|
||||||
|
fastTable *[1 << 14]uint16
|
||||||
|
|
||||||
|
betterTableShort *[1 << 14]uint16
|
||||||
|
betterTableLong *[1 << 17]uint16
|
||||||
|
|
||||||
|
bestTableShort *[1 << 16]uint32
|
||||||
|
bestTableLong *[1 << 19]uint32
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewDict will read a dictionary.
|
||||||
|
// It will return nil if the dictionary is invalid.
|
||||||
|
func NewDict(dict []byte) *Dict {
|
||||||
|
if len(dict) == 0 {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
var d Dict
|
||||||
|
// Repeat is the first value of the dict
|
||||||
|
r, n := binary.Uvarint(dict)
|
||||||
|
if n <= 0 {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
dict = dict[n:]
|
||||||
|
d.dict = dict
|
||||||
|
if cap(d.dict) < len(d.dict)+16 {
|
||||||
|
d.dict = append(make([]byte, 0, len(d.dict)+16), d.dict...)
|
||||||
|
}
|
||||||
|
if len(dict) < MinDictSize || len(dict) > MaxDictSize {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
d.repeat = int(r)
|
||||||
|
if d.repeat > len(dict) {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
return &d
|
||||||
|
}
|
||||||
|
|
||||||
|
// Bytes will return a serialized version of the dictionary.
|
||||||
|
// The output can be sent to NewDict.
|
||||||
|
func (d *Dict) Bytes() []byte {
|
||||||
|
dst := make([]byte, binary.MaxVarintLen16+len(d.dict))
|
||||||
|
return append(dst[:binary.PutUvarint(dst, uint64(d.repeat))], d.dict...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// MakeDict will create a dictionary.
|
||||||
|
// 'data' must be at least MinDictSize.
|
||||||
|
// If data is longer than MaxDictSize only the last MaxDictSize bytes will be used.
|
||||||
|
// If searchStart is set the start repeat value will be set to the last
|
||||||
|
// match of this content.
|
||||||
|
// If no matches are found, it will attempt to find shorter matches.
|
||||||
|
// This content should match the typical start of a block.
|
||||||
|
// If at least 4 bytes cannot be matched, repeat is set to start of block.
|
||||||
|
func MakeDict(data []byte, searchStart []byte) *Dict {
|
||||||
|
if len(data) == 0 {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
if len(data) > MaxDictSize {
|
||||||
|
data = data[len(data)-MaxDictSize:]
|
||||||
|
}
|
||||||
|
var d Dict
|
||||||
|
dict := data
|
||||||
|
d.dict = dict
|
||||||
|
if cap(d.dict) < len(d.dict)+16 {
|
||||||
|
d.dict = append(make([]byte, 0, len(d.dict)+16), d.dict...)
|
||||||
|
}
|
||||||
|
if len(dict) < MinDictSize {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Find the longest match possible, last entry if multiple.
|
||||||
|
for s := len(searchStart); s > 4; s-- {
|
||||||
|
if idx := bytes.LastIndex(data, searchStart[:s]); idx >= 0 && idx <= len(data)-8 {
|
||||||
|
d.repeat = idx
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return &d
|
||||||
|
}
|
||||||
|
|
||||||
|
// Encode returns the encoded form of src. The returned slice may be a sub-
|
||||||
|
// slice of dst if dst was large enough to hold the entire encoded block.
|
||||||
|
// Otherwise, a newly allocated slice will be returned.
|
||||||
|
//
|
||||||
|
// The dst and src must not overlap. It is valid to pass a nil dst.
|
||||||
|
//
|
||||||
|
// The blocks will require the same amount of memory to decode as encoding,
|
||||||
|
// and does not make for concurrent decoding.
|
||||||
|
// Also note that blocks do not contain CRC information, so corruption may be undetected.
|
||||||
|
//
|
||||||
|
// If you need to encode larger amounts of data, consider using
|
||||||
|
// the streaming interface which gives all of these features.
|
||||||
|
func (d *Dict) Encode(dst, src []byte) []byte {
|
||||||
|
if n := MaxEncodedLen(len(src)); n < 0 {
|
||||||
|
panic(ErrTooLarge)
|
||||||
|
} else if cap(dst) < n {
|
||||||
|
dst = make([]byte, n)
|
||||||
|
} else {
|
||||||
|
dst = dst[:n]
|
||||||
|
}
|
||||||
|
|
||||||
|
// The block starts with the varint-encoded length of the decompressed bytes.
|
||||||
|
dstP := binary.PutUvarint(dst, uint64(len(src)))
|
||||||
|
|
||||||
|
if len(src) == 0 {
|
||||||
|
return dst[:dstP]
|
||||||
|
}
|
||||||
|
if len(src) < minNonLiteralBlockSize {
|
||||||
|
dstP += emitLiteral(dst[dstP:], src)
|
||||||
|
return dst[:dstP]
|
||||||
|
}
|
||||||
|
n := encodeBlockDictGo(dst[dstP:], src, d)
|
||||||
|
if n > 0 {
|
||||||
|
dstP += n
|
||||||
|
return dst[:dstP]
|
||||||
|
}
|
||||||
|
// Not compressible
|
||||||
|
dstP += emitLiteral(dst[dstP:], src)
|
||||||
|
return dst[:dstP]
|
||||||
|
}
|
||||||
|
|
||||||
|
// EncodeBetter returns the encoded form of src. The returned slice may be a sub-
|
||||||
|
// slice of dst if dst was large enough to hold the entire encoded block.
|
||||||
|
// Otherwise, a newly allocated slice will be returned.
|
||||||
|
//
|
||||||
|
// EncodeBetter compresses better than Encode but typically with a
|
||||||
|
// 10-40% speed decrease on both compression and decompression.
|
||||||
|
//
|
||||||
|
// The dst and src must not overlap. It is valid to pass a nil dst.
|
||||||
|
//
|
||||||
|
// The blocks will require the same amount of memory to decode as encoding,
|
||||||
|
// and does not make for concurrent decoding.
|
||||||
|
// Also note that blocks do not contain CRC information, so corruption may be undetected.
|
||||||
|
//
|
||||||
|
// If you need to encode larger amounts of data, consider using
|
||||||
|
// the streaming interface which gives all of these features.
|
||||||
|
func (d *Dict) EncodeBetter(dst, src []byte) []byte {
|
||||||
|
if n := MaxEncodedLen(len(src)); n < 0 {
|
||||||
|
panic(ErrTooLarge)
|
||||||
|
} else if len(dst) < n {
|
||||||
|
dst = make([]byte, n)
|
||||||
|
}
|
||||||
|
|
||||||
|
// The block starts with the varint-encoded length of the decompressed bytes.
|
||||||
|
dstP := binary.PutUvarint(dst, uint64(len(src)))
|
||||||
|
|
||||||
|
if len(src) == 0 {
|
||||||
|
return dst[:dstP]
|
||||||
|
}
|
||||||
|
if len(src) < minNonLiteralBlockSize {
|
||||||
|
dstP += emitLiteral(dst[dstP:], src)
|
||||||
|
return dst[:dstP]
|
||||||
|
}
|
||||||
|
n := encodeBlockBetterDict(dst[dstP:], src, d)
|
||||||
|
if n > 0 {
|
||||||
|
dstP += n
|
||||||
|
return dst[:dstP]
|
||||||
|
}
|
||||||
|
// Not compressible
|
||||||
|
dstP += emitLiteral(dst[dstP:], src)
|
||||||
|
return dst[:dstP]
|
||||||
|
}
|
||||||
|
|
||||||
|
// EncodeBest returns the encoded form of src. The returned slice may be a sub-
|
||||||
|
// slice of dst if dst was large enough to hold the entire encoded block.
|
||||||
|
// Otherwise, a newly allocated slice will be returned.
|
||||||
|
//
|
||||||
|
// EncodeBest compresses as good as reasonably possible but with a
|
||||||
|
// big speed decrease.
|
||||||
|
//
|
||||||
|
// The dst and src must not overlap. It is valid to pass a nil dst.
|
||||||
|
//
|
||||||
|
// The blocks will require the same amount of memory to decode as encoding,
|
||||||
|
// and does not make for concurrent decoding.
|
||||||
|
// Also note that blocks do not contain CRC information, so corruption may be undetected.
|
||||||
|
//
|
||||||
|
// If you need to encode larger amounts of data, consider using
|
||||||
|
// the streaming interface which gives all of these features.
|
||||||
|
func (d *Dict) EncodeBest(dst, src []byte) []byte {
|
||||||
|
if n := MaxEncodedLen(len(src)); n < 0 {
|
||||||
|
panic(ErrTooLarge)
|
||||||
|
} else if len(dst) < n {
|
||||||
|
dst = make([]byte, n)
|
||||||
|
}
|
||||||
|
|
||||||
|
// The block starts with the varint-encoded length of the decompressed bytes.
|
||||||
|
dstP := binary.PutUvarint(dst, uint64(len(src)))
|
||||||
|
|
||||||
|
if len(src) == 0 {
|
||||||
|
return dst[:dstP]
|
||||||
|
}
|
||||||
|
if len(src) < minNonLiteralBlockSize {
|
||||||
|
dstP += emitLiteral(dst[dstP:], src)
|
||||||
|
return dst[:dstP]
|
||||||
|
}
|
||||||
|
n := encodeBlockBest(dst[dstP:], src, d)
|
||||||
|
if n > 0 {
|
||||||
|
dstP += n
|
||||||
|
return dst[:dstP]
|
||||||
|
}
|
||||||
|
// Not compressible
|
||||||
|
dstP += emitLiteral(dst[dstP:], src)
|
||||||
|
return dst[:dstP]
|
||||||
|
}
|
||||||
|
|
||||||
|
// Decode returns the decoded form of src. The returned slice may be a sub-
|
||||||
|
// slice of dst if dst was large enough to hold the entire decoded block.
|
||||||
|
// Otherwise, a newly allocated slice will be returned.
|
||||||
|
//
|
||||||
|
// The dst and src must not overlap. It is valid to pass a nil dst.
|
||||||
|
func (d *Dict) Decode(dst, src []byte) ([]byte, error) {
|
||||||
|
dLen, s, err := decodedLen(src)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
if dLen <= cap(dst) {
|
||||||
|
dst = dst[:dLen]
|
||||||
|
} else {
|
||||||
|
dst = make([]byte, dLen)
|
||||||
|
}
|
||||||
|
if s2DecodeDict(dst, src[s:], d) != 0 {
|
||||||
|
return nil, ErrCorrupt
|
||||||
|
}
|
||||||
|
return dst, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *Dict) initFast() {
|
||||||
|
d.fast.Do(func() {
|
||||||
|
const (
|
||||||
|
tableBits = 14
|
||||||
|
maxTableSize = 1 << tableBits
|
||||||
|
)
|
||||||
|
|
||||||
|
var table [maxTableSize]uint16
|
||||||
|
// We stop so any entry of length 8 can always be read.
|
||||||
|
for i := 0; i < len(d.dict)-8-2; i += 3 {
|
||||||
|
x0 := load64(d.dict, i)
|
||||||
|
h0 := hash6(x0, tableBits)
|
||||||
|
h1 := hash6(x0>>8, tableBits)
|
||||||
|
h2 := hash6(x0>>16, tableBits)
|
||||||
|
table[h0] = uint16(i)
|
||||||
|
table[h1] = uint16(i + 1)
|
||||||
|
table[h2] = uint16(i + 2)
|
||||||
|
}
|
||||||
|
d.fastTable = &table
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *Dict) initBetter() {
|
||||||
|
d.better.Do(func() {
|
||||||
|
const (
|
||||||
|
// Long hash matches.
|
||||||
|
lTableBits = 17
|
||||||
|
maxLTableSize = 1 << lTableBits
|
||||||
|
|
||||||
|
// Short hash matches.
|
||||||
|
sTableBits = 14
|
||||||
|
maxSTableSize = 1 << sTableBits
|
||||||
|
)
|
||||||
|
|
||||||
|
var lTable [maxLTableSize]uint16
|
||||||
|
var sTable [maxSTableSize]uint16
|
||||||
|
|
||||||
|
// We stop so any entry of length 8 can always be read.
|
||||||
|
for i := 0; i < len(d.dict)-8; i++ {
|
||||||
|
cv := load64(d.dict, i)
|
||||||
|
lTable[hash7(cv, lTableBits)] = uint16(i)
|
||||||
|
sTable[hash4(cv, sTableBits)] = uint16(i)
|
||||||
|
}
|
||||||
|
d.betterTableShort = &sTable
|
||||||
|
d.betterTableLong = &lTable
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *Dict) initBest() {
|
||||||
|
d.best.Do(func() {
|
||||||
|
const (
|
||||||
|
// Long hash matches.
|
||||||
|
lTableBits = 19
|
||||||
|
maxLTableSize = 1 << lTableBits
|
||||||
|
|
||||||
|
// Short hash matches.
|
||||||
|
sTableBits = 16
|
||||||
|
maxSTableSize = 1 << sTableBits
|
||||||
|
)
|
||||||
|
|
||||||
|
var lTable [maxLTableSize]uint32
|
||||||
|
var sTable [maxSTableSize]uint32
|
||||||
|
|
||||||
|
// We stop so any entry of length 8 can always be read.
|
||||||
|
for i := 0; i < len(d.dict)-8; i++ {
|
||||||
|
cv := load64(d.dict, i)
|
||||||
|
hashL := hash8(cv, lTableBits)
|
||||||
|
hashS := hash4(cv, sTableBits)
|
||||||
|
candidateL := lTable[hashL]
|
||||||
|
candidateS := sTable[hashS]
|
||||||
|
lTable[hashL] = uint32(i) | candidateL<<16
|
||||||
|
sTable[hashS] = uint32(i) | candidateS<<16
|
||||||
|
}
|
||||||
|
d.bestTableShort = &sTable
|
||||||
|
d.bestTableLong = &lTable
|
||||||
|
})
|
||||||
|
}
|
46
vendor/github.com/klauspost/compress/s2/encode.go
generated
vendored
46
vendor/github.com/klauspost/compress/s2/encode.go
generated
vendored
@ -58,6 +58,32 @@ func Encode(dst, src []byte) []byte {
|
|||||||
return dst[:d]
|
return dst[:d]
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// EstimateBlockSize will perform a very fast compression
|
||||||
|
// without outputting the result and return the compressed output size.
|
||||||
|
// The function returns -1 if no improvement could be achieved.
|
||||||
|
// Using actual compression will most often produce better compression than the estimate.
|
||||||
|
func EstimateBlockSize(src []byte) (d int) {
|
||||||
|
if len(src) < 6 || int64(len(src)) > 0xffffffff {
|
||||||
|
return -1
|
||||||
|
}
|
||||||
|
if len(src) <= 1024 {
|
||||||
|
d = calcBlockSizeSmall(src)
|
||||||
|
} else {
|
||||||
|
d = calcBlockSize(src)
|
||||||
|
}
|
||||||
|
|
||||||
|
if d == 0 {
|
||||||
|
return -1
|
||||||
|
}
|
||||||
|
// Size of the varint encoded block size.
|
||||||
|
d += (bits.Len64(uint64(len(src))) + 7) / 7
|
||||||
|
|
||||||
|
if d >= len(src) {
|
||||||
|
return -1
|
||||||
|
}
|
||||||
|
return d
|
||||||
|
}
|
||||||
|
|
||||||
// EncodeBetter returns the encoded form of src. The returned slice may be a sub-
|
// EncodeBetter returns the encoded form of src. The returned slice may be a sub-
|
||||||
// slice of dst if dst was large enough to hold the entire encoded block.
|
// slice of dst if dst was large enough to hold the entire encoded block.
|
||||||
// Otherwise, a newly allocated slice will be returned.
|
// Otherwise, a newly allocated slice will be returned.
|
||||||
@ -132,7 +158,7 @@ func EncodeBest(dst, src []byte) []byte {
|
|||||||
d += emitLiteral(dst[d:], src)
|
d += emitLiteral(dst[d:], src)
|
||||||
return dst[:d]
|
return dst[:d]
|
||||||
}
|
}
|
||||||
n := encodeBlockBest(dst[d:], src)
|
n := encodeBlockBest(dst[d:], src, nil)
|
||||||
if n > 0 {
|
if n > 0 {
|
||||||
d += n
|
d += n
|
||||||
return dst[:d]
|
return dst[:d]
|
||||||
@ -408,6 +434,7 @@ type Writer struct {
|
|||||||
randSrc io.Reader
|
randSrc io.Reader
|
||||||
writerWg sync.WaitGroup
|
writerWg sync.WaitGroup
|
||||||
index Index
|
index Index
|
||||||
|
customEnc func(dst, src []byte) int
|
||||||
|
|
||||||
// wroteStreamHeader is whether we have written the stream header.
|
// wroteStreamHeader is whether we have written the stream header.
|
||||||
wroteStreamHeader bool
|
wroteStreamHeader bool
|
||||||
@ -773,6 +800,9 @@ func (w *Writer) EncodeBuffer(buf []byte) (err error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (w *Writer) encodeBlock(obuf, uncompressed []byte) int {
|
func (w *Writer) encodeBlock(obuf, uncompressed []byte) int {
|
||||||
|
if w.customEnc != nil {
|
||||||
|
return w.customEnc(obuf, uncompressed)
|
||||||
|
}
|
||||||
if w.snappy {
|
if w.snappy {
|
||||||
switch w.level {
|
switch w.level {
|
||||||
case levelFast:
|
case levelFast:
|
||||||
@ -790,7 +820,7 @@ func (w *Writer) encodeBlock(obuf, uncompressed []byte) int {
|
|||||||
case levelBetter:
|
case levelBetter:
|
||||||
return encodeBlockBetter(obuf, uncompressed)
|
return encodeBlockBetter(obuf, uncompressed)
|
||||||
case levelBest:
|
case levelBest:
|
||||||
return encodeBlockBest(obuf, uncompressed)
|
return encodeBlockBest(obuf, uncompressed, nil)
|
||||||
}
|
}
|
||||||
return 0
|
return 0
|
||||||
}
|
}
|
||||||
@ -1339,3 +1369,15 @@ func WriterFlushOnWrite() WriterOption {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// WriterCustomEncoder allows to override the encoder for blocks on the stream.
|
||||||
|
// The function must compress 'src' into 'dst' and return the bytes used in dst as an integer.
|
||||||
|
// Block size (initial varint) should not be added by the encoder.
|
||||||
|
// Returning value 0 indicates the block could not be compressed.
|
||||||
|
// The function should expect to be called concurrently.
|
||||||
|
func WriterCustomEncoder(fn func(dst, src []byte) int) WriterOption {
|
||||||
|
return func(w *Writer) error {
|
||||||
|
w.customEnc = fn
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
593
vendor/github.com/klauspost/compress/s2/encode_all.go
generated
vendored
593
vendor/github.com/klauspost/compress/s2/encode_all.go
generated
vendored
@ -8,6 +8,7 @@ package s2
|
|||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
||||||
"encoding/binary"
|
"encoding/binary"
|
||||||
|
"fmt"
|
||||||
"math/bits"
|
"math/bits"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -58,6 +59,7 @@ func encodeGo(dst, src []byte) []byte {
|
|||||||
// been written.
|
// been written.
|
||||||
//
|
//
|
||||||
// It also assumes that:
|
// It also assumes that:
|
||||||
|
//
|
||||||
// len(dst) >= MaxEncodedLen(len(src)) &&
|
// len(dst) >= MaxEncodedLen(len(src)) &&
|
||||||
// minNonLiteralBlockSize <= len(src) && len(src) <= maxBlockSize
|
// minNonLiteralBlockSize <= len(src) && len(src) <= maxBlockSize
|
||||||
func encodeBlockGo(dst, src []byte) (d int) {
|
func encodeBlockGo(dst, src []byte) (d int) {
|
||||||
@ -454,3 +456,594 @@ emitRemainder:
|
|||||||
}
|
}
|
||||||
return d
|
return d
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// encodeBlockGo encodes a non-empty src to a guaranteed-large-enough dst. It
|
||||||
|
// assumes that the varint-encoded length of the decompressed bytes has already
|
||||||
|
// been written.
|
||||||
|
//
|
||||||
|
// It also assumes that:
|
||||||
|
//
|
||||||
|
// len(dst) >= MaxEncodedLen(len(src)) &&
|
||||||
|
// minNonLiteralBlockSize <= len(src) && len(src) <= maxBlockSize
|
||||||
|
func encodeBlockDictGo(dst, src []byte, dict *Dict) (d int) {
|
||||||
|
// Initialize the hash table.
|
||||||
|
const (
|
||||||
|
tableBits = 14
|
||||||
|
maxTableSize = 1 << tableBits
|
||||||
|
maxAhead = 8 // maximum bytes ahead without checking sLimit
|
||||||
|
|
||||||
|
debug = false
|
||||||
|
)
|
||||||
|
dict.initFast()
|
||||||
|
|
||||||
|
var table [maxTableSize]uint32
|
||||||
|
|
||||||
|
// sLimit is when to stop looking for offset/length copies. The inputMargin
|
||||||
|
// lets us use a fast path for emitLiteral in the main loop, while we are
|
||||||
|
// looking for copies.
|
||||||
|
sLimit := len(src) - inputMargin
|
||||||
|
if sLimit > MaxDictSrcOffset-maxAhead {
|
||||||
|
sLimit = MaxDictSrcOffset - maxAhead
|
||||||
|
}
|
||||||
|
|
||||||
|
// Bail if we can't compress to at least this.
|
||||||
|
dstLimit := len(src) - len(src)>>5 - 5
|
||||||
|
|
||||||
|
// nextEmit is where in src the next emitLiteral should start from.
|
||||||
|
nextEmit := 0
|
||||||
|
|
||||||
|
// The encoded form can start with a dict entry (copy or repeat).
|
||||||
|
s := 0
|
||||||
|
|
||||||
|
// Convert dict repeat to offset
|
||||||
|
repeat := len(dict.dict) - dict.repeat
|
||||||
|
cv := load64(src, 0)
|
||||||
|
|
||||||
|
// While in dict
|
||||||
|
searchDict:
|
||||||
|
for {
|
||||||
|
// Next src position to check
|
||||||
|
nextS := s + (s-nextEmit)>>6 + 4
|
||||||
|
hash0 := hash6(cv, tableBits)
|
||||||
|
hash1 := hash6(cv>>8, tableBits)
|
||||||
|
if nextS > sLimit {
|
||||||
|
if debug {
|
||||||
|
fmt.Println("slimit reached", s, nextS)
|
||||||
|
}
|
||||||
|
break searchDict
|
||||||
|
}
|
||||||
|
candidateDict := int(dict.fastTable[hash0])
|
||||||
|
candidateDict2 := int(dict.fastTable[hash1])
|
||||||
|
candidate2 := int(table[hash1])
|
||||||
|
candidate := int(table[hash0])
|
||||||
|
table[hash0] = uint32(s)
|
||||||
|
table[hash1] = uint32(s + 1)
|
||||||
|
hash2 := hash6(cv>>16, tableBits)
|
||||||
|
|
||||||
|
// Check repeat at offset checkRep.
|
||||||
|
const checkRep = 1
|
||||||
|
|
||||||
|
if repeat > s {
|
||||||
|
candidate := len(dict.dict) - repeat + s
|
||||||
|
if repeat-s >= 4 && uint32(cv) == load32(dict.dict, candidate) {
|
||||||
|
// Extend back
|
||||||
|
base := s
|
||||||
|
for i := candidate; base > nextEmit && i > 0 && dict.dict[i-1] == src[base-1]; {
|
||||||
|
i--
|
||||||
|
base--
|
||||||
|
}
|
||||||
|
d += emitLiteral(dst[d:], src[nextEmit:base])
|
||||||
|
if debug && nextEmit != base {
|
||||||
|
fmt.Println("emitted ", base-nextEmit, "literals")
|
||||||
|
}
|
||||||
|
s += 4
|
||||||
|
candidate += 4
|
||||||
|
for candidate < len(dict.dict)-8 && s <= len(src)-8 {
|
||||||
|
if diff := load64(src, s) ^ load64(dict.dict, candidate); diff != 0 {
|
||||||
|
s += bits.TrailingZeros64(diff) >> 3
|
||||||
|
break
|
||||||
|
}
|
||||||
|
s += 8
|
||||||
|
candidate += 8
|
||||||
|
}
|
||||||
|
d += emitRepeat(dst[d:], repeat, s-base)
|
||||||
|
if debug {
|
||||||
|
fmt.Println("emitted dict repeat length", s-base, "offset:", repeat, "s:", s)
|
||||||
|
}
|
||||||
|
nextEmit = s
|
||||||
|
if s >= sLimit {
|
||||||
|
break searchDict
|
||||||
|
}
|
||||||
|
cv = load64(src, s)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
} else if uint32(cv>>(checkRep*8)) == load32(src, s-repeat+checkRep) {
|
||||||
|
base := s + checkRep
|
||||||
|
// Extend back
|
||||||
|
for i := base - repeat; base > nextEmit && i > 0 && src[i-1] == src[base-1]; {
|
||||||
|
i--
|
||||||
|
base--
|
||||||
|
}
|
||||||
|
d += emitLiteral(dst[d:], src[nextEmit:base])
|
||||||
|
if debug && nextEmit != base {
|
||||||
|
fmt.Println("emitted ", base-nextEmit, "literals")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Extend forward
|
||||||
|
candidate := s - repeat + 4 + checkRep
|
||||||
|
s += 4 + checkRep
|
||||||
|
for s <= sLimit {
|
||||||
|
if diff := load64(src, s) ^ load64(src, candidate); diff != 0 {
|
||||||
|
s += bits.TrailingZeros64(diff) >> 3
|
||||||
|
break
|
||||||
|
}
|
||||||
|
s += 8
|
||||||
|
candidate += 8
|
||||||
|
}
|
||||||
|
if debug {
|
||||||
|
// Validate match.
|
||||||
|
if s <= candidate {
|
||||||
|
panic("s <= candidate")
|
||||||
|
}
|
||||||
|
a := src[base:s]
|
||||||
|
b := src[base-repeat : base-repeat+(s-base)]
|
||||||
|
if !bytes.Equal(a, b) {
|
||||||
|
panic("mismatch")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if nextEmit > 0 {
|
||||||
|
// same as `add := emitCopy(dst[d:], repeat, s-base)` but skips storing offset.
|
||||||
|
d += emitRepeat(dst[d:], repeat, s-base)
|
||||||
|
} else {
|
||||||
|
// First match, cannot be repeat.
|
||||||
|
d += emitCopy(dst[d:], repeat, s-base)
|
||||||
|
}
|
||||||
|
|
||||||
|
nextEmit = s
|
||||||
|
if s >= sLimit {
|
||||||
|
break searchDict
|
||||||
|
}
|
||||||
|
if debug {
|
||||||
|
fmt.Println("emitted reg repeat", s-base, "s:", s)
|
||||||
|
}
|
||||||
|
cv = load64(src, s)
|
||||||
|
continue searchDict
|
||||||
|
}
|
||||||
|
if s == 0 {
|
||||||
|
cv = load64(src, nextS)
|
||||||
|
s = nextS
|
||||||
|
continue searchDict
|
||||||
|
}
|
||||||
|
// Start with table. These matches will always be closer.
|
||||||
|
if uint32(cv) == load32(src, candidate) {
|
||||||
|
goto emitMatch
|
||||||
|
}
|
||||||
|
candidate = int(table[hash2])
|
||||||
|
if uint32(cv>>8) == load32(src, candidate2) {
|
||||||
|
table[hash2] = uint32(s + 2)
|
||||||
|
candidate = candidate2
|
||||||
|
s++
|
||||||
|
goto emitMatch
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check dict. Dicts have longer offsets, so we want longer matches.
|
||||||
|
if cv == load64(dict.dict, candidateDict) {
|
||||||
|
table[hash2] = uint32(s + 2)
|
||||||
|
goto emitDict
|
||||||
|
}
|
||||||
|
|
||||||
|
candidateDict = int(dict.fastTable[hash2])
|
||||||
|
// Check if upper 7 bytes match
|
||||||
|
if candidateDict2 >= 1 {
|
||||||
|
if cv^load64(dict.dict, candidateDict2-1) < (1 << 8) {
|
||||||
|
table[hash2] = uint32(s + 2)
|
||||||
|
candidateDict = candidateDict2
|
||||||
|
s++
|
||||||
|
goto emitDict
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
table[hash2] = uint32(s + 2)
|
||||||
|
if uint32(cv>>16) == load32(src, candidate) {
|
||||||
|
s += 2
|
||||||
|
goto emitMatch
|
||||||
|
}
|
||||||
|
if candidateDict >= 2 {
|
||||||
|
// Check if upper 6 bytes match
|
||||||
|
if cv^load64(dict.dict, candidateDict-2) < (1 << 16) {
|
||||||
|
s += 2
|
||||||
|
goto emitDict
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
cv = load64(src, nextS)
|
||||||
|
s = nextS
|
||||||
|
continue searchDict
|
||||||
|
|
||||||
|
emitDict:
|
||||||
|
{
|
||||||
|
if debug {
|
||||||
|
if load32(dict.dict, candidateDict) != load32(src, s) {
|
||||||
|
panic("dict emit mismatch")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// Extend backwards.
|
||||||
|
// The top bytes will be rechecked to get the full match.
|
||||||
|
for candidateDict > 0 && s > nextEmit && dict.dict[candidateDict-1] == src[s-1] {
|
||||||
|
candidateDict--
|
||||||
|
s--
|
||||||
|
}
|
||||||
|
|
||||||
|
// Bail if we exceed the maximum size.
|
||||||
|
if d+(s-nextEmit) > dstLimit {
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
|
||||||
|
// A 4-byte match has been found. We'll later see if more than 4 bytes
|
||||||
|
// match. But, prior to the match, src[nextEmit:s] are unmatched. Emit
|
||||||
|
// them as literal bytes.
|
||||||
|
|
||||||
|
d += emitLiteral(dst[d:], src[nextEmit:s])
|
||||||
|
if debug && nextEmit != s {
|
||||||
|
fmt.Println("emitted ", s-nextEmit, "literals")
|
||||||
|
}
|
||||||
|
{
|
||||||
|
// Invariant: we have a 4-byte match at s, and no need to emit any
|
||||||
|
// literal bytes prior to s.
|
||||||
|
base := s
|
||||||
|
repeat = s + (len(dict.dict)) - candidateDict
|
||||||
|
|
||||||
|
// Extend the 4-byte match as long as possible.
|
||||||
|
s += 4
|
||||||
|
candidateDict += 4
|
||||||
|
for s <= len(src)-8 && len(dict.dict)-candidateDict >= 8 {
|
||||||
|
if diff := load64(src, s) ^ load64(dict.dict, candidateDict); diff != 0 {
|
||||||
|
s += bits.TrailingZeros64(diff) >> 3
|
||||||
|
break
|
||||||
|
}
|
||||||
|
s += 8
|
||||||
|
candidateDict += 8
|
||||||
|
}
|
||||||
|
|
||||||
|
// Matches longer than 64 are split.
|
||||||
|
if s <= sLimit || s-base < 8 {
|
||||||
|
d += emitCopy(dst[d:], repeat, s-base)
|
||||||
|
} else {
|
||||||
|
// Split to ensure we don't start a copy within next block
|
||||||
|
d += emitCopy(dst[d:], repeat, 4)
|
||||||
|
d += emitRepeat(dst[d:], repeat, s-base-4)
|
||||||
|
}
|
||||||
|
if false {
|
||||||
|
// Validate match.
|
||||||
|
if s <= candidate {
|
||||||
|
panic("s <= candidate")
|
||||||
|
}
|
||||||
|
a := src[base:s]
|
||||||
|
b := dict.dict[base-repeat : base-repeat+(s-base)]
|
||||||
|
if !bytes.Equal(a, b) {
|
||||||
|
panic("mismatch")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if debug {
|
||||||
|
fmt.Println("emitted dict copy, length", s-base, "offset:", repeat, "s:", s)
|
||||||
|
}
|
||||||
|
nextEmit = s
|
||||||
|
if s >= sLimit {
|
||||||
|
break searchDict
|
||||||
|
}
|
||||||
|
|
||||||
|
if d > dstLimit {
|
||||||
|
// Do we have space for more, if not bail.
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
|
||||||
|
// Index and continue loop to try new candidate.
|
||||||
|
x := load64(src, s-2)
|
||||||
|
m2Hash := hash6(x, tableBits)
|
||||||
|
currHash := hash6(x>>8, tableBits)
|
||||||
|
candidate = int(table[currHash])
|
||||||
|
table[m2Hash] = uint32(s - 2)
|
||||||
|
table[currHash] = uint32(s - 1)
|
||||||
|
cv = load64(src, s)
|
||||||
|
}
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
emitMatch:
|
||||||
|
|
||||||
|
// Extend backwards.
|
||||||
|
// The top bytes will be rechecked to get the full match.
|
||||||
|
for candidate > 0 && s > nextEmit && src[candidate-1] == src[s-1] {
|
||||||
|
candidate--
|
||||||
|
s--
|
||||||
|
}
|
||||||
|
|
||||||
|
// Bail if we exceed the maximum size.
|
||||||
|
if d+(s-nextEmit) > dstLimit {
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
|
||||||
|
// A 4-byte match has been found. We'll later see if more than 4 bytes
|
||||||
|
// match. But, prior to the match, src[nextEmit:s] are unmatched. Emit
|
||||||
|
// them as literal bytes.
|
||||||
|
|
||||||
|
d += emitLiteral(dst[d:], src[nextEmit:s])
|
||||||
|
if debug && nextEmit != s {
|
||||||
|
fmt.Println("emitted ", s-nextEmit, "literals")
|
||||||
|
}
|
||||||
|
// Call emitCopy, and then see if another emitCopy could be our next
|
||||||
|
// move. Repeat until we find no match for the input immediately after
|
||||||
|
// what was consumed by the last emitCopy call.
|
||||||
|
//
|
||||||
|
// If we exit this loop normally then we need to call emitLiteral next,
|
||||||
|
// though we don't yet know how big the literal will be. We handle that
|
||||||
|
// by proceeding to the next iteration of the main loop. We also can
|
||||||
|
// exit this loop via goto if we get close to exhausting the input.
|
||||||
|
for {
|
||||||
|
// Invariant: we have a 4-byte match at s, and no need to emit any
|
||||||
|
// literal bytes prior to s.
|
||||||
|
base := s
|
||||||
|
repeat = base - candidate
|
||||||
|
|
||||||
|
// Extend the 4-byte match as long as possible.
|
||||||
|
s += 4
|
||||||
|
candidate += 4
|
||||||
|
for s <= len(src)-8 {
|
||||||
|
if diff := load64(src, s) ^ load64(src, candidate); diff != 0 {
|
||||||
|
s += bits.TrailingZeros64(diff) >> 3
|
||||||
|
break
|
||||||
|
}
|
||||||
|
s += 8
|
||||||
|
candidate += 8
|
||||||
|
}
|
||||||
|
|
||||||
|
d += emitCopy(dst[d:], repeat, s-base)
|
||||||
|
if debug {
|
||||||
|
// Validate match.
|
||||||
|
if s <= candidate {
|
||||||
|
panic("s <= candidate")
|
||||||
|
}
|
||||||
|
a := src[base:s]
|
||||||
|
b := src[base-repeat : base-repeat+(s-base)]
|
||||||
|
if !bytes.Equal(a, b) {
|
||||||
|
panic("mismatch")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if debug {
|
||||||
|
fmt.Println("emitted src copy, length", s-base, "offset:", repeat, "s:", s)
|
||||||
|
}
|
||||||
|
nextEmit = s
|
||||||
|
if s >= sLimit {
|
||||||
|
break searchDict
|
||||||
|
}
|
||||||
|
|
||||||
|
if d > dstLimit {
|
||||||
|
// Do we have space for more, if not bail.
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
// Check for an immediate match, otherwise start search at s+1
|
||||||
|
x := load64(src, s-2)
|
||||||
|
m2Hash := hash6(x, tableBits)
|
||||||
|
currHash := hash6(x>>16, tableBits)
|
||||||
|
candidate = int(table[currHash])
|
||||||
|
table[m2Hash] = uint32(s - 2)
|
||||||
|
table[currHash] = uint32(s)
|
||||||
|
if debug && s == candidate {
|
||||||
|
panic("s == candidate")
|
||||||
|
}
|
||||||
|
if uint32(x>>16) != load32(src, candidate) {
|
||||||
|
cv = load64(src, s+1)
|
||||||
|
s++
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Search without dict:
|
||||||
|
if repeat > s {
|
||||||
|
repeat = 0
|
||||||
|
}
|
||||||
|
|
||||||
|
// No more dict
|
||||||
|
sLimit = len(src) - inputMargin
|
||||||
|
if s >= sLimit {
|
||||||
|
goto emitRemainder
|
||||||
|
}
|
||||||
|
if debug {
|
||||||
|
fmt.Println("non-dict matching at", s, "repeat:", repeat)
|
||||||
|
}
|
||||||
|
cv = load64(src, s)
|
||||||
|
if debug {
|
||||||
|
fmt.Println("now", s, "->", sLimit, "out:", d, "left:", len(src)-s, "nextemit:", nextEmit, "dstLimit:", dstLimit, "s:", s)
|
||||||
|
}
|
||||||
|
for {
|
||||||
|
candidate := 0
|
||||||
|
for {
|
||||||
|
// Next src position to check
|
||||||
|
nextS := s + (s-nextEmit)>>6 + 4
|
||||||
|
if nextS > sLimit {
|
||||||
|
goto emitRemainder
|
||||||
|
}
|
||||||
|
hash0 := hash6(cv, tableBits)
|
||||||
|
hash1 := hash6(cv>>8, tableBits)
|
||||||
|
candidate = int(table[hash0])
|
||||||
|
candidate2 := int(table[hash1])
|
||||||
|
table[hash0] = uint32(s)
|
||||||
|
table[hash1] = uint32(s + 1)
|
||||||
|
hash2 := hash6(cv>>16, tableBits)
|
||||||
|
|
||||||
|
// Check repeat at offset checkRep.
|
||||||
|
const checkRep = 1
|
||||||
|
if repeat > 0 && uint32(cv>>(checkRep*8)) == load32(src, s-repeat+checkRep) {
|
||||||
|
base := s + checkRep
|
||||||
|
// Extend back
|
||||||
|
for i := base - repeat; base > nextEmit && i > 0 && src[i-1] == src[base-1]; {
|
||||||
|
i--
|
||||||
|
base--
|
||||||
|
}
|
||||||
|
d += emitLiteral(dst[d:], src[nextEmit:base])
|
||||||
|
if debug && nextEmit != base {
|
||||||
|
fmt.Println("emitted ", base-nextEmit, "literals")
|
||||||
|
}
|
||||||
|
// Extend forward
|
||||||
|
candidate := s - repeat + 4 + checkRep
|
||||||
|
s += 4 + checkRep
|
||||||
|
for s <= sLimit {
|
||||||
|
if diff := load64(src, s) ^ load64(src, candidate); diff != 0 {
|
||||||
|
s += bits.TrailingZeros64(diff) >> 3
|
||||||
|
break
|
||||||
|
}
|
||||||
|
s += 8
|
||||||
|
candidate += 8
|
||||||
|
}
|
||||||
|
if debug {
|
||||||
|
// Validate match.
|
||||||
|
if s <= candidate {
|
||||||
|
panic("s <= candidate")
|
||||||
|
}
|
||||||
|
a := src[base:s]
|
||||||
|
b := src[base-repeat : base-repeat+(s-base)]
|
||||||
|
if !bytes.Equal(a, b) {
|
||||||
|
panic("mismatch")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if nextEmit > 0 {
|
||||||
|
// same as `add := emitCopy(dst[d:], repeat, s-base)` but skips storing offset.
|
||||||
|
d += emitRepeat(dst[d:], repeat, s-base)
|
||||||
|
} else {
|
||||||
|
// First match, cannot be repeat.
|
||||||
|
d += emitCopy(dst[d:], repeat, s-base)
|
||||||
|
}
|
||||||
|
if debug {
|
||||||
|
fmt.Println("emitted src repeat length", s-base, "offset:", repeat, "s:", s)
|
||||||
|
}
|
||||||
|
nextEmit = s
|
||||||
|
if s >= sLimit {
|
||||||
|
goto emitRemainder
|
||||||
|
}
|
||||||
|
|
||||||
|
cv = load64(src, s)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
if uint32(cv) == load32(src, candidate) {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
candidate = int(table[hash2])
|
||||||
|
if uint32(cv>>8) == load32(src, candidate2) {
|
||||||
|
table[hash2] = uint32(s + 2)
|
||||||
|
candidate = candidate2
|
||||||
|
s++
|
||||||
|
break
|
||||||
|
}
|
||||||
|
table[hash2] = uint32(s + 2)
|
||||||
|
if uint32(cv>>16) == load32(src, candidate) {
|
||||||
|
s += 2
|
||||||
|
break
|
||||||
|
}
|
||||||
|
|
||||||
|
cv = load64(src, nextS)
|
||||||
|
s = nextS
|
||||||
|
}
|
||||||
|
|
||||||
|
// Extend backwards.
|
||||||
|
// The top bytes will be rechecked to get the full match.
|
||||||
|
for candidate > 0 && s > nextEmit && src[candidate-1] == src[s-1] {
|
||||||
|
candidate--
|
||||||
|
s--
|
||||||
|
}
|
||||||
|
|
||||||
|
// Bail if we exceed the maximum size.
|
||||||
|
if d+(s-nextEmit) > dstLimit {
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
|
||||||
|
// A 4-byte match has been found. We'll later see if more than 4 bytes
|
||||||
|
// match. But, prior to the match, src[nextEmit:s] are unmatched. Emit
|
||||||
|
// them as literal bytes.
|
||||||
|
|
||||||
|
d += emitLiteral(dst[d:], src[nextEmit:s])
|
||||||
|
if debug && nextEmit != s {
|
||||||
|
fmt.Println("emitted ", s-nextEmit, "literals")
|
||||||
|
}
|
||||||
|
// Call emitCopy, and then see if another emitCopy could be our next
|
||||||
|
// move. Repeat until we find no match for the input immediately after
|
||||||
|
// what was consumed by the last emitCopy call.
|
||||||
|
//
|
||||||
|
// If we exit this loop normally then we need to call emitLiteral next,
|
||||||
|
// though we don't yet know how big the literal will be. We handle that
|
||||||
|
// by proceeding to the next iteration of the main loop. We also can
|
||||||
|
// exit this loop via goto if we get close to exhausting the input.
|
||||||
|
for {
|
||||||
|
// Invariant: we have a 4-byte match at s, and no need to emit any
|
||||||
|
// literal bytes prior to s.
|
||||||
|
base := s
|
||||||
|
repeat = base - candidate
|
||||||
|
|
||||||
|
// Extend the 4-byte match as long as possible.
|
||||||
|
s += 4
|
||||||
|
candidate += 4
|
||||||
|
for s <= len(src)-8 {
|
||||||
|
if diff := load64(src, s) ^ load64(src, candidate); diff != 0 {
|
||||||
|
s += bits.TrailingZeros64(diff) >> 3
|
||||||
|
break
|
||||||
|
}
|
||||||
|
s += 8
|
||||||
|
candidate += 8
|
||||||
|
}
|
||||||
|
|
||||||
|
d += emitCopy(dst[d:], repeat, s-base)
|
||||||
|
if debug {
|
||||||
|
// Validate match.
|
||||||
|
if s <= candidate {
|
||||||
|
panic("s <= candidate")
|
||||||
|
}
|
||||||
|
a := src[base:s]
|
||||||
|
b := src[base-repeat : base-repeat+(s-base)]
|
||||||
|
if !bytes.Equal(a, b) {
|
||||||
|
panic("mismatch")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if debug {
|
||||||
|
fmt.Println("emitted src copy, length", s-base, "offset:", repeat, "s:", s)
|
||||||
|
}
|
||||||
|
nextEmit = s
|
||||||
|
if s >= sLimit {
|
||||||
|
goto emitRemainder
|
||||||
|
}
|
||||||
|
|
||||||
|
if d > dstLimit {
|
||||||
|
// Do we have space for more, if not bail.
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
// Check for an immediate match, otherwise start search at s+1
|
||||||
|
x := load64(src, s-2)
|
||||||
|
m2Hash := hash6(x, tableBits)
|
||||||
|
currHash := hash6(x>>16, tableBits)
|
||||||
|
candidate = int(table[currHash])
|
||||||
|
table[m2Hash] = uint32(s - 2)
|
||||||
|
table[currHash] = uint32(s)
|
||||||
|
if debug && s == candidate {
|
||||||
|
panic("s == candidate")
|
||||||
|
}
|
||||||
|
if uint32(x>>16) != load32(src, candidate) {
|
||||||
|
cv = load64(src, s+1)
|
||||||
|
s++
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
emitRemainder:
|
||||||
|
if nextEmit < len(src) {
|
||||||
|
// Bail if we exceed the maximum size.
|
||||||
|
if d+len(src)-nextEmit > dstLimit {
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
d += emitLiteral(dst[d:], src[nextEmit:])
|
||||||
|
if debug && nextEmit != s {
|
||||||
|
fmt.Println("emitted ", len(src)-nextEmit, "literals")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return d
|
||||||
|
}
|
||||||
|
6
vendor/github.com/klauspost/compress/s2/encode_amd64.go
generated
vendored
6
vendor/github.com/klauspost/compress/s2/encode_amd64.go
generated
vendored
@ -3,11 +3,14 @@
|
|||||||
|
|
||||||
package s2
|
package s2
|
||||||
|
|
||||||
|
const hasAmd64Asm = true
|
||||||
|
|
||||||
// encodeBlock encodes a non-empty src to a guaranteed-large-enough dst. It
|
// encodeBlock encodes a non-empty src to a guaranteed-large-enough dst. It
|
||||||
// assumes that the varint-encoded length of the decompressed bytes has already
|
// assumes that the varint-encoded length of the decompressed bytes has already
|
||||||
// been written.
|
// been written.
|
||||||
//
|
//
|
||||||
// It also assumes that:
|
// It also assumes that:
|
||||||
|
//
|
||||||
// len(dst) >= MaxEncodedLen(len(src)) &&
|
// len(dst) >= MaxEncodedLen(len(src)) &&
|
||||||
// minNonLiteralBlockSize <= len(src) && len(src) <= maxBlockSize
|
// minNonLiteralBlockSize <= len(src) && len(src) <= maxBlockSize
|
||||||
func encodeBlock(dst, src []byte) (d int) {
|
func encodeBlock(dst, src []byte) (d int) {
|
||||||
@ -43,6 +46,7 @@ func encodeBlock(dst, src []byte) (d int) {
|
|||||||
// been written.
|
// been written.
|
||||||
//
|
//
|
||||||
// It also assumes that:
|
// It also assumes that:
|
||||||
|
//
|
||||||
// len(dst) >= MaxEncodedLen(len(src)) &&
|
// len(dst) >= MaxEncodedLen(len(src)) &&
|
||||||
// minNonLiteralBlockSize <= len(src) && len(src) <= maxBlockSize
|
// minNonLiteralBlockSize <= len(src) && len(src) <= maxBlockSize
|
||||||
func encodeBlockBetter(dst, src []byte) (d int) {
|
func encodeBlockBetter(dst, src []byte) (d int) {
|
||||||
@ -78,6 +82,7 @@ func encodeBlockBetter(dst, src []byte) (d int) {
|
|||||||
// been written.
|
// been written.
|
||||||
//
|
//
|
||||||
// It also assumes that:
|
// It also assumes that:
|
||||||
|
//
|
||||||
// len(dst) >= MaxEncodedLen(len(src)) &&
|
// len(dst) >= MaxEncodedLen(len(src)) &&
|
||||||
// minNonLiteralBlockSize <= len(src) && len(src) <= maxBlockSize
|
// minNonLiteralBlockSize <= len(src) && len(src) <= maxBlockSize
|
||||||
func encodeBlockSnappy(dst, src []byte) (d int) {
|
func encodeBlockSnappy(dst, src []byte) (d int) {
|
||||||
@ -112,6 +117,7 @@ func encodeBlockSnappy(dst, src []byte) (d int) {
|
|||||||
// been written.
|
// been written.
|
||||||
//
|
//
|
||||||
// It also assumes that:
|
// It also assumes that:
|
||||||
|
//
|
||||||
// len(dst) >= MaxEncodedLen(len(src)) &&
|
// len(dst) >= MaxEncodedLen(len(src)) &&
|
||||||
// minNonLiteralBlockSize <= len(src) && len(src) <= maxBlockSize
|
// minNonLiteralBlockSize <= len(src) && len(src) <= maxBlockSize
|
||||||
func encodeBlockBetterSnappy(dst, src []byte) (d int) {
|
func encodeBlockBetterSnappy(dst, src []byte) (d int) {
|
||||||
|
205
vendor/github.com/klauspost/compress/s2/encode_best.go
generated
vendored
205
vendor/github.com/klauspost/compress/s2/encode_best.go
generated
vendored
@ -7,6 +7,7 @@ package s2
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"math"
|
||||||
"math/bits"
|
"math/bits"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -15,9 +16,10 @@ import (
|
|||||||
// been written.
|
// been written.
|
||||||
//
|
//
|
||||||
// It also assumes that:
|
// It also assumes that:
|
||||||
|
//
|
||||||
// len(dst) >= MaxEncodedLen(len(src)) &&
|
// len(dst) >= MaxEncodedLen(len(src)) &&
|
||||||
// minNonLiteralBlockSize <= len(src) && len(src) <= maxBlockSize
|
// minNonLiteralBlockSize <= len(src) && len(src) <= maxBlockSize
|
||||||
func encodeBlockBest(dst, src []byte) (d int) {
|
func encodeBlockBest(dst, src []byte, dict *Dict) (d int) {
|
||||||
// Initialize the hash tables.
|
// Initialize the hash tables.
|
||||||
const (
|
const (
|
||||||
// Long hash matches.
|
// Long hash matches.
|
||||||
@ -29,6 +31,8 @@ func encodeBlockBest(dst, src []byte) (d int) {
|
|||||||
maxSTableSize = 1 << sTableBits
|
maxSTableSize = 1 << sTableBits
|
||||||
|
|
||||||
inputMargin = 8 + 2
|
inputMargin = 8 + 2
|
||||||
|
|
||||||
|
debug = false
|
||||||
)
|
)
|
||||||
|
|
||||||
// sLimit is when to stop looking for offset/length copies. The inputMargin
|
// sLimit is when to stop looking for offset/length copies. The inputMargin
|
||||||
@ -38,6 +42,10 @@ func encodeBlockBest(dst, src []byte) (d int) {
|
|||||||
if len(src) < minNonLiteralBlockSize {
|
if len(src) < minNonLiteralBlockSize {
|
||||||
return 0
|
return 0
|
||||||
}
|
}
|
||||||
|
sLimitDict := len(src) - inputMargin
|
||||||
|
if sLimitDict > MaxDictSrcOffset-inputMargin {
|
||||||
|
sLimitDict = MaxDictSrcOffset - inputMargin
|
||||||
|
}
|
||||||
|
|
||||||
var lTable [maxLTableSize]uint64
|
var lTable [maxLTableSize]uint64
|
||||||
var sTable [maxSTableSize]uint64
|
var sTable [maxSTableSize]uint64
|
||||||
@ -51,10 +59,15 @@ func encodeBlockBest(dst, src []byte) (d int) {
|
|||||||
// The encoded form must start with a literal, as there are no previous
|
// The encoded form must start with a literal, as there are no previous
|
||||||
// bytes to copy, so we start looking for hash matches at s == 1.
|
// bytes to copy, so we start looking for hash matches at s == 1.
|
||||||
s := 1
|
s := 1
|
||||||
|
repeat := 1
|
||||||
|
if dict != nil {
|
||||||
|
dict.initBest()
|
||||||
|
s = 0
|
||||||
|
repeat = len(dict.dict) - dict.repeat
|
||||||
|
}
|
||||||
cv := load64(src, s)
|
cv := load64(src, s)
|
||||||
|
|
||||||
// We search for a repeat at -1, but don't output repeats when nextEmit == 0
|
// We search for a repeat at -1, but don't output repeats when nextEmit == 0
|
||||||
repeat := 1
|
|
||||||
const lowbitMask = 0xffffffff
|
const lowbitMask = 0xffffffff
|
||||||
getCur := func(x uint64) int {
|
getCur := func(x uint64) int {
|
||||||
return int(x & lowbitMask)
|
return int(x & lowbitMask)
|
||||||
@ -70,7 +83,7 @@ func encodeBlockBest(dst, src []byte) (d int) {
|
|||||||
s int
|
s int
|
||||||
length int
|
length int
|
||||||
score int
|
score int
|
||||||
rep bool
|
rep, dict bool
|
||||||
}
|
}
|
||||||
var best match
|
var best match
|
||||||
for {
|
for {
|
||||||
@ -84,6 +97,12 @@ func encodeBlockBest(dst, src []byte) (d int) {
|
|||||||
if nextS > sLimit {
|
if nextS > sLimit {
|
||||||
goto emitRemainder
|
goto emitRemainder
|
||||||
}
|
}
|
||||||
|
if dict != nil && s >= MaxDictSrcOffset {
|
||||||
|
dict = nil
|
||||||
|
if repeat > s {
|
||||||
|
repeat = math.MinInt32
|
||||||
|
}
|
||||||
|
}
|
||||||
hashL := hash8(cv, lTableBits)
|
hashL := hash8(cv, lTableBits)
|
||||||
hashS := hash4(cv, sTableBits)
|
hashS := hash4(cv, sTableBits)
|
||||||
candidateL := lTable[hashL]
|
candidateL := lTable[hashL]
|
||||||
@ -113,7 +132,15 @@ func encodeBlockBest(dst, src []byte) (d int) {
|
|||||||
}
|
}
|
||||||
m := match{offset: offset, s: s, length: 4 + offset, rep: rep}
|
m := match{offset: offset, s: s, length: 4 + offset, rep: rep}
|
||||||
s += 4
|
s += 4
|
||||||
for s <= sLimit {
|
for s < len(src) {
|
||||||
|
if len(src)-s < 8 {
|
||||||
|
if src[s] == src[m.length] {
|
||||||
|
m.length++
|
||||||
|
s++
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
break
|
||||||
|
}
|
||||||
if diff := load64(src, s) ^ load64(src, m.length); diff != 0 {
|
if diff := load64(src, s) ^ load64(src, m.length); diff != 0 {
|
||||||
m.length += bits.TrailingZeros64(diff) >> 3
|
m.length += bits.TrailingZeros64(diff) >> 3
|
||||||
break
|
break
|
||||||
@ -129,6 +156,62 @@ func encodeBlockBest(dst, src []byte) (d int) {
|
|||||||
}
|
}
|
||||||
return m
|
return m
|
||||||
}
|
}
|
||||||
|
matchDict := func(candidate, s int, first uint32, rep bool) match {
|
||||||
|
// Calculate offset as if in continuous array with s
|
||||||
|
offset := -len(dict.dict) + candidate
|
||||||
|
if best.length != 0 && best.s-best.offset == s-offset && !rep {
|
||||||
|
// Don't retest if we have the same offset.
|
||||||
|
return match{offset: offset, s: s}
|
||||||
|
}
|
||||||
|
|
||||||
|
if load32(dict.dict, candidate) != first {
|
||||||
|
return match{offset: offset, s: s}
|
||||||
|
}
|
||||||
|
m := match{offset: offset, s: s, length: 4 + candidate, rep: rep, dict: true}
|
||||||
|
s += 4
|
||||||
|
if !rep {
|
||||||
|
for s < sLimitDict && m.length < len(dict.dict) {
|
||||||
|
if len(src)-s < 8 || len(dict.dict)-m.length < 8 {
|
||||||
|
if src[s] == dict.dict[m.length] {
|
||||||
|
m.length++
|
||||||
|
s++
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
break
|
||||||
|
}
|
||||||
|
if diff := load64(src, s) ^ load64(dict.dict, m.length); diff != 0 {
|
||||||
|
m.length += bits.TrailingZeros64(diff) >> 3
|
||||||
|
break
|
||||||
|
}
|
||||||
|
s += 8
|
||||||
|
m.length += 8
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
for s < len(src) && m.length < len(dict.dict) {
|
||||||
|
if len(src)-s < 8 || len(dict.dict)-m.length < 8 {
|
||||||
|
if src[s] == dict.dict[m.length] {
|
||||||
|
m.length++
|
||||||
|
s++
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
break
|
||||||
|
}
|
||||||
|
if diff := load64(src, s) ^ load64(dict.dict, m.length); diff != 0 {
|
||||||
|
m.length += bits.TrailingZeros64(diff) >> 3
|
||||||
|
break
|
||||||
|
}
|
||||||
|
s += 8
|
||||||
|
m.length += 8
|
||||||
|
}
|
||||||
|
}
|
||||||
|
m.length -= candidate
|
||||||
|
m.score = score(m)
|
||||||
|
if m.score <= -m.s {
|
||||||
|
// Eliminate if no savings, we might find a better one.
|
||||||
|
m.length = 0
|
||||||
|
}
|
||||||
|
return m
|
||||||
|
}
|
||||||
|
|
||||||
bestOf := func(a, b match) match {
|
bestOf := func(a, b match) match {
|
||||||
if b.length == 0 {
|
if b.length == 0 {
|
||||||
@ -145,45 +228,99 @@ func encodeBlockBest(dst, src []byte) (d int) {
|
|||||||
return b
|
return b
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if s > 0 {
|
||||||
best = bestOf(matchAt(getCur(candidateL), s, uint32(cv), false), matchAt(getPrev(candidateL), s, uint32(cv), false))
|
best = bestOf(matchAt(getCur(candidateL), s, uint32(cv), false), matchAt(getPrev(candidateL), s, uint32(cv), false))
|
||||||
best = bestOf(best, matchAt(getCur(candidateS), s, uint32(cv), false))
|
best = bestOf(best, matchAt(getCur(candidateS), s, uint32(cv), false))
|
||||||
best = bestOf(best, matchAt(getPrev(candidateS), s, uint32(cv), false))
|
best = bestOf(best, matchAt(getPrev(candidateS), s, uint32(cv), false))
|
||||||
|
}
|
||||||
|
if dict != nil {
|
||||||
|
candidateL := dict.bestTableLong[hashL]
|
||||||
|
candidateS := dict.bestTableShort[hashS]
|
||||||
|
best = bestOf(best, matchDict(int(candidateL&0xffff), s, uint32(cv), false))
|
||||||
|
best = bestOf(best, matchDict(int(candidateL>>16), s, uint32(cv), false))
|
||||||
|
best = bestOf(best, matchDict(int(candidateS&0xffff), s, uint32(cv), false))
|
||||||
|
best = bestOf(best, matchDict(int(candidateS>>16), s, uint32(cv), false))
|
||||||
|
}
|
||||||
{
|
{
|
||||||
|
if (dict == nil || repeat <= s) && repeat > 0 {
|
||||||
best = bestOf(best, matchAt(s-repeat+1, s+1, uint32(cv>>8), true))
|
best = bestOf(best, matchAt(s-repeat+1, s+1, uint32(cv>>8), true))
|
||||||
|
} else if s-repeat < -4 && dict != nil {
|
||||||
|
candidate := len(dict.dict) - (repeat - s)
|
||||||
|
best = bestOf(best, matchDict(candidate, s, uint32(cv), true))
|
||||||
|
candidate++
|
||||||
|
best = bestOf(best, matchDict(candidate, s+1, uint32(cv>>8), true))
|
||||||
|
}
|
||||||
|
|
||||||
if best.length > 0 {
|
if best.length > 0 {
|
||||||
|
hashS := hash4(cv>>8, sTableBits)
|
||||||
// s+1
|
// s+1
|
||||||
nextShort := sTable[hash4(cv>>8, sTableBits)]
|
nextShort := sTable[hashS]
|
||||||
s := s + 1
|
s := s + 1
|
||||||
cv := load64(src, s)
|
cv := load64(src, s)
|
||||||
nextLong := lTable[hash8(cv, lTableBits)]
|
hashL := hash8(cv, lTableBits)
|
||||||
|
nextLong := lTable[hashL]
|
||||||
best = bestOf(best, matchAt(getCur(nextShort), s, uint32(cv), false))
|
best = bestOf(best, matchAt(getCur(nextShort), s, uint32(cv), false))
|
||||||
best = bestOf(best, matchAt(getPrev(nextShort), s, uint32(cv), false))
|
best = bestOf(best, matchAt(getPrev(nextShort), s, uint32(cv), false))
|
||||||
best = bestOf(best, matchAt(getCur(nextLong), s, uint32(cv), false))
|
best = bestOf(best, matchAt(getCur(nextLong), s, uint32(cv), false))
|
||||||
best = bestOf(best, matchAt(getPrev(nextLong), s, uint32(cv), false))
|
best = bestOf(best, matchAt(getPrev(nextLong), s, uint32(cv), false))
|
||||||
// Repeat at + 2
|
|
||||||
best = bestOf(best, matchAt(s-repeat+1, s+1, uint32(cv>>8), true))
|
// Dict at + 1
|
||||||
|
if dict != nil {
|
||||||
|
candidateL := dict.bestTableLong[hashL]
|
||||||
|
candidateS := dict.bestTableShort[hashS]
|
||||||
|
|
||||||
|
best = bestOf(best, matchDict(int(candidateL&0xffff), s, uint32(cv), false))
|
||||||
|
best = bestOf(best, matchDict(int(candidateS&0xffff), s, uint32(cv), false))
|
||||||
|
}
|
||||||
|
|
||||||
// s+2
|
// s+2
|
||||||
if true {
|
if true {
|
||||||
nextShort = sTable[hash4(cv>>8, sTableBits)]
|
hashS := hash4(cv>>8, sTableBits)
|
||||||
|
|
||||||
|
nextShort = sTable[hashS]
|
||||||
s++
|
s++
|
||||||
cv = load64(src, s)
|
cv = load64(src, s)
|
||||||
nextLong = lTable[hash8(cv, lTableBits)]
|
hashL := hash8(cv, lTableBits)
|
||||||
|
nextLong = lTable[hashL]
|
||||||
|
|
||||||
|
if (dict == nil || repeat <= s) && repeat > 0 {
|
||||||
|
// Repeat at + 2
|
||||||
|
best = bestOf(best, matchAt(s-repeat, s, uint32(cv), true))
|
||||||
|
} else if repeat-s > 4 && dict != nil {
|
||||||
|
candidate := len(dict.dict) - (repeat - s)
|
||||||
|
best = bestOf(best, matchDict(candidate, s, uint32(cv), true))
|
||||||
|
}
|
||||||
best = bestOf(best, matchAt(getCur(nextShort), s, uint32(cv), false))
|
best = bestOf(best, matchAt(getCur(nextShort), s, uint32(cv), false))
|
||||||
best = bestOf(best, matchAt(getPrev(nextShort), s, uint32(cv), false))
|
best = bestOf(best, matchAt(getPrev(nextShort), s, uint32(cv), false))
|
||||||
best = bestOf(best, matchAt(getCur(nextLong), s, uint32(cv), false))
|
best = bestOf(best, matchAt(getCur(nextLong), s, uint32(cv), false))
|
||||||
best = bestOf(best, matchAt(getPrev(nextLong), s, uint32(cv), false))
|
best = bestOf(best, matchAt(getPrev(nextLong), s, uint32(cv), false))
|
||||||
|
|
||||||
|
// Dict at +2
|
||||||
|
// Very small gain
|
||||||
|
if dict != nil {
|
||||||
|
candidateL := dict.bestTableLong[hashL]
|
||||||
|
candidateS := dict.bestTableShort[hashS]
|
||||||
|
|
||||||
|
best = bestOf(best, matchDict(int(candidateL&0xffff), s, uint32(cv), false))
|
||||||
|
best = bestOf(best, matchDict(int(candidateS&0xffff), s, uint32(cv), false))
|
||||||
|
}
|
||||||
}
|
}
|
||||||
// Search for a match at best match end, see if that is better.
|
// Search for a match at best match end, see if that is better.
|
||||||
if sAt := best.s + best.length; sAt < sLimit {
|
// Allow some bytes at the beginning to mismatch.
|
||||||
sBack := best.s
|
// Sweet spot is around 1-2 bytes, but depends on input.
|
||||||
backL := best.length
|
// The skipped bytes are tested in Extend backwards,
|
||||||
|
// and still picked up as part of the match if they do.
|
||||||
|
const skipBeginning = 2
|
||||||
|
const skipEnd = 1
|
||||||
|
if sAt := best.s + best.length - skipEnd; sAt < sLimit {
|
||||||
|
|
||||||
|
sBack := best.s + skipBeginning - skipEnd
|
||||||
|
backL := best.length - skipBeginning
|
||||||
// Load initial values
|
// Load initial values
|
||||||
cv = load64(src, sBack)
|
cv = load64(src, sBack)
|
||||||
// Search for mismatch
|
|
||||||
|
// Grab candidates...
|
||||||
next := lTable[hash8(load64(src, sAt), lTableBits)]
|
next := lTable[hash8(load64(src, sAt), lTableBits)]
|
||||||
//next := sTable[hash4(load64(src, sAt), sTableBits)]
|
|
||||||
|
|
||||||
if checkAt := getCur(next) - backL; checkAt > 0 {
|
if checkAt := getCur(next) - backL; checkAt > 0 {
|
||||||
best = bestOf(best, matchAt(checkAt, sBack, uint32(cv), false))
|
best = bestOf(best, matchAt(checkAt, sBack, uint32(cv), false))
|
||||||
@ -191,6 +328,16 @@ func encodeBlockBest(dst, src []byte) (d int) {
|
|||||||
if checkAt := getPrev(next) - backL; checkAt > 0 {
|
if checkAt := getPrev(next) - backL; checkAt > 0 {
|
||||||
best = bestOf(best, matchAt(checkAt, sBack, uint32(cv), false))
|
best = bestOf(best, matchAt(checkAt, sBack, uint32(cv), false))
|
||||||
}
|
}
|
||||||
|
// Disabled: Extremely small gain
|
||||||
|
if false {
|
||||||
|
next = sTable[hash4(load64(src, sAt), sTableBits)]
|
||||||
|
if checkAt := getCur(next) - backL; checkAt > 0 {
|
||||||
|
best = bestOf(best, matchAt(checkAt, sBack, uint32(cv), false))
|
||||||
|
}
|
||||||
|
if checkAt := getPrev(next) - backL; checkAt > 0 {
|
||||||
|
best = bestOf(best, matchAt(checkAt, sBack, uint32(cv), false))
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -209,7 +356,7 @@ func encodeBlockBest(dst, src []byte) (d int) {
|
|||||||
|
|
||||||
// Extend backwards, not needed for repeats...
|
// Extend backwards, not needed for repeats...
|
||||||
s = best.s
|
s = best.s
|
||||||
if !best.rep {
|
if !best.rep && !best.dict {
|
||||||
for best.offset > 0 && s > nextEmit && src[best.offset-1] == src[s-1] {
|
for best.offset > 0 && s > nextEmit && src[best.offset-1] == src[s-1] {
|
||||||
best.offset--
|
best.offset--
|
||||||
best.length++
|
best.length++
|
||||||
@ -226,7 +373,6 @@ func encodeBlockBest(dst, src []byte) (d int) {
|
|||||||
|
|
||||||
base := s
|
base := s
|
||||||
offset := s - best.offset
|
offset := s - best.offset
|
||||||
|
|
||||||
s += best.length
|
s += best.length
|
||||||
|
|
||||||
if offset > 65535 && s-base <= 5 && !best.rep {
|
if offset > 65535 && s-base <= 5 && !best.rep {
|
||||||
@ -238,16 +384,28 @@ func encodeBlockBest(dst, src []byte) (d int) {
|
|||||||
cv = load64(src, s)
|
cv = load64(src, s)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
if debug && nextEmit != base {
|
||||||
|
fmt.Println("EMIT", base-nextEmit, "literals. base-after:", base)
|
||||||
|
}
|
||||||
d += emitLiteral(dst[d:], src[nextEmit:base])
|
d += emitLiteral(dst[d:], src[nextEmit:base])
|
||||||
if best.rep {
|
if best.rep {
|
||||||
if nextEmit > 0 {
|
if nextEmit > 0 || best.dict {
|
||||||
|
if debug {
|
||||||
|
fmt.Println("REPEAT, length", best.length, "offset:", offset, "s-after:", s, "dict:", best.dict, "best:", best)
|
||||||
|
}
|
||||||
// same as `add := emitCopy(dst[d:], repeat, s-base)` but skips storing offset.
|
// same as `add := emitCopy(dst[d:], repeat, s-base)` but skips storing offset.
|
||||||
d += emitRepeat(dst[d:], offset, best.length)
|
d += emitRepeat(dst[d:], offset, best.length)
|
||||||
} else {
|
} else {
|
||||||
// First match, cannot be repeat.
|
// First match without dict cannot be a repeat.
|
||||||
|
if debug {
|
||||||
|
fmt.Println("COPY, length", best.length, "offset:", offset, "s-after:", s, "dict:", best.dict, "best:", best)
|
||||||
|
}
|
||||||
d += emitCopy(dst[d:], offset, best.length)
|
d += emitCopy(dst[d:], offset, best.length)
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
|
if debug {
|
||||||
|
fmt.Println("COPY, length", best.length, "offset:", offset, "s-after:", s, "dict:", best.dict, "best:", best)
|
||||||
|
}
|
||||||
d += emitCopy(dst[d:], offset, best.length)
|
d += emitCopy(dst[d:], offset, best.length)
|
||||||
}
|
}
|
||||||
repeat = offset
|
repeat = offset
|
||||||
@ -278,6 +436,9 @@ emitRemainder:
|
|||||||
if d+len(src)-nextEmit > dstLimit {
|
if d+len(src)-nextEmit > dstLimit {
|
||||||
return 0
|
return 0
|
||||||
}
|
}
|
||||||
|
if debug && nextEmit != s {
|
||||||
|
fmt.Println("emitted ", len(src)-nextEmit, "literals")
|
||||||
|
}
|
||||||
d += emitLiteral(dst[d:], src[nextEmit:])
|
d += emitLiteral(dst[d:], src[nextEmit:])
|
||||||
}
|
}
|
||||||
return d
|
return d
|
||||||
@ -288,6 +449,7 @@ emitRemainder:
|
|||||||
// been written.
|
// been written.
|
||||||
//
|
//
|
||||||
// It also assumes that:
|
// It also assumes that:
|
||||||
|
//
|
||||||
// len(dst) >= MaxEncodedLen(len(src)) &&
|
// len(dst) >= MaxEncodedLen(len(src)) &&
|
||||||
// minNonLiteralBlockSize <= len(src) && len(src) <= maxBlockSize
|
// minNonLiteralBlockSize <= len(src) && len(src) <= maxBlockSize
|
||||||
func encodeBlockBestSnappy(dst, src []byte) (d int) {
|
func encodeBlockBestSnappy(dst, src []byte) (d int) {
|
||||||
@ -546,6 +708,7 @@ emitRemainder:
|
|||||||
// emitCopySize returns the size to encode the offset+length
|
// emitCopySize returns the size to encode the offset+length
|
||||||
//
|
//
|
||||||
// It assumes that:
|
// It assumes that:
|
||||||
|
//
|
||||||
// 1 <= offset && offset <= math.MaxUint32
|
// 1 <= offset && offset <= math.MaxUint32
|
||||||
// 4 <= length && length <= 1 << 24
|
// 4 <= length && length <= 1 << 24
|
||||||
func emitCopySize(offset, length int) int {
|
func emitCopySize(offset, length int) int {
|
||||||
@ -584,6 +747,7 @@ func emitCopySize(offset, length int) int {
|
|||||||
// emitCopyNoRepeatSize returns the size to encode the offset+length
|
// emitCopyNoRepeatSize returns the size to encode the offset+length
|
||||||
//
|
//
|
||||||
// It assumes that:
|
// It assumes that:
|
||||||
|
//
|
||||||
// 1 <= offset && offset <= math.MaxUint32
|
// 1 <= offset && offset <= math.MaxUint32
|
||||||
// 4 <= length && length <= 1 << 24
|
// 4 <= length && length <= 1 << 24
|
||||||
func emitCopyNoRepeatSize(offset, length int) int {
|
func emitCopyNoRepeatSize(offset, length int) int {
|
||||||
@ -621,7 +785,6 @@ func emitRepeatSize(offset, length int) int {
|
|||||||
left := 0
|
left := 0
|
||||||
if length > maxRepeat {
|
if length > maxRepeat {
|
||||||
left = length - maxRepeat + 4
|
left = length - maxRepeat + 4
|
||||||
length = maxRepeat - 4
|
|
||||||
}
|
}
|
||||||
if left > 0 {
|
if left > 0 {
|
||||||
return 5 + emitRepeatSize(offset, left)
|
return 5 + emitRepeatSize(offset, left)
|
||||||
|
719
vendor/github.com/klauspost/compress/s2/encode_better.go
generated
vendored
719
vendor/github.com/klauspost/compress/s2/encode_better.go
generated
vendored
@ -6,6 +6,8 @@
|
|||||||
package s2
|
package s2
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"bytes"
|
||||||
|
"fmt"
|
||||||
"math/bits"
|
"math/bits"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -42,6 +44,7 @@ func hash8(u uint64, h uint8) uint32 {
|
|||||||
// been written.
|
// been written.
|
||||||
//
|
//
|
||||||
// It also assumes that:
|
// It also assumes that:
|
||||||
|
//
|
||||||
// len(dst) >= MaxEncodedLen(len(src)) &&
|
// len(dst) >= MaxEncodedLen(len(src)) &&
|
||||||
// minNonLiteralBlockSize <= len(src) && len(src) <= maxBlockSize
|
// minNonLiteralBlockSize <= len(src) && len(src) <= maxBlockSize
|
||||||
func encodeBlockBetterGo(dst, src []byte) (d int) {
|
func encodeBlockBetterGo(dst, src []byte) (d int) {
|
||||||
@ -56,7 +59,7 @@ func encodeBlockBetterGo(dst, src []byte) (d int) {
|
|||||||
// Initialize the hash tables.
|
// Initialize the hash tables.
|
||||||
const (
|
const (
|
||||||
// Long hash matches.
|
// Long hash matches.
|
||||||
lTableBits = 16
|
lTableBits = 17
|
||||||
maxLTableSize = 1 << lTableBits
|
maxLTableSize = 1 << lTableBits
|
||||||
|
|
||||||
// Short hash matches.
|
// Short hash matches.
|
||||||
@ -97,9 +100,26 @@ func encodeBlockBetterGo(dst, src []byte) (d int) {
|
|||||||
lTable[hashL] = uint32(s)
|
lTable[hashL] = uint32(s)
|
||||||
sTable[hashS] = uint32(s)
|
sTable[hashS] = uint32(s)
|
||||||
|
|
||||||
|
valLong := load64(src, candidateL)
|
||||||
|
valShort := load64(src, candidateS)
|
||||||
|
|
||||||
|
// If long matches at least 8 bytes, use that.
|
||||||
|
if cv == valLong {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
if cv == valShort {
|
||||||
|
candidateL = candidateS
|
||||||
|
break
|
||||||
|
}
|
||||||
|
|
||||||
// Check repeat at offset checkRep.
|
// Check repeat at offset checkRep.
|
||||||
const checkRep = 1
|
const checkRep = 1
|
||||||
if false && uint32(cv>>(checkRep*8)) == load32(src, s-repeat+checkRep) {
|
// Minimum length of a repeat. Tested with various values.
|
||||||
|
// While 4-5 offers improvements in some, 6 reduces
|
||||||
|
// regressions significantly.
|
||||||
|
const wantRepeatBytes = 6
|
||||||
|
const repeatMask = ((1 << (wantRepeatBytes * 8)) - 1) << (8 * checkRep)
|
||||||
|
if false && repeat > 0 && cv&repeatMask == load64(src, s-repeat)&repeatMask {
|
||||||
base := s + checkRep
|
base := s + checkRep
|
||||||
// Extend back
|
// Extend back
|
||||||
for i := base - repeat; base > nextEmit && i > 0 && src[i-1] == src[base-1]; {
|
for i := base - repeat; base > nextEmit && i > 0 && src[i-1] == src[base-1]; {
|
||||||
@ -109,8 +129,8 @@ func encodeBlockBetterGo(dst, src []byte) (d int) {
|
|||||||
d += emitLiteral(dst[d:], src[nextEmit:base])
|
d += emitLiteral(dst[d:], src[nextEmit:base])
|
||||||
|
|
||||||
// Extend forward
|
// Extend forward
|
||||||
candidate := s - repeat + 4 + checkRep
|
candidate := s - repeat + wantRepeatBytes + checkRep
|
||||||
s += 4 + checkRep
|
s += wantRepeatBytes + checkRep
|
||||||
for s < len(src) {
|
for s < len(src) {
|
||||||
if len(src)-s < 8 {
|
if len(src)-s < 8 {
|
||||||
if src[s] == src[candidate] {
|
if src[s] == src[candidate] {
|
||||||
@ -127,28 +147,40 @@ func encodeBlockBetterGo(dst, src []byte) (d int) {
|
|||||||
s += 8
|
s += 8
|
||||||
candidate += 8
|
candidate += 8
|
||||||
}
|
}
|
||||||
if nextEmit > 0 {
|
|
||||||
// same as `add := emitCopy(dst[d:], repeat, s-base)` but skips storing offset.
|
// same as `add := emitCopy(dst[d:], repeat, s-base)` but skips storing offset.
|
||||||
d += emitRepeat(dst[d:], repeat, s-base)
|
d += emitRepeat(dst[d:], repeat, s-base)
|
||||||
} else {
|
|
||||||
// First match, cannot be repeat.
|
|
||||||
d += emitCopy(dst[d:], repeat, s-base)
|
|
||||||
}
|
|
||||||
nextEmit = s
|
nextEmit = s
|
||||||
if s >= sLimit {
|
if s >= sLimit {
|
||||||
goto emitRemainder
|
goto emitRemainder
|
||||||
}
|
}
|
||||||
|
// Index in-between
|
||||||
|
index0 := base + 1
|
||||||
|
index1 := s - 2
|
||||||
|
|
||||||
|
cv = load64(src, s)
|
||||||
|
for index0 < index1 {
|
||||||
|
cv0 := load64(src, index0)
|
||||||
|
cv1 := load64(src, index1)
|
||||||
|
lTable[hash7(cv0, lTableBits)] = uint32(index0)
|
||||||
|
sTable[hash4(cv0>>8, sTableBits)] = uint32(index0 + 1)
|
||||||
|
|
||||||
|
lTable[hash7(cv1, lTableBits)] = uint32(index1)
|
||||||
|
sTable[hash4(cv1>>8, sTableBits)] = uint32(index1 + 1)
|
||||||
|
index0 += 2
|
||||||
|
index1 -= 2
|
||||||
|
}
|
||||||
|
|
||||||
cv = load64(src, s)
|
cv = load64(src, s)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
if uint32(cv) == load32(src, candidateL) {
|
// Long likely matches 7, so take that.
|
||||||
|
if uint32(cv) == uint32(valLong) {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
|
|
||||||
// Check our short candidate
|
// Check our short candidate
|
||||||
if uint32(cv) == load32(src, candidateS) {
|
if uint32(cv) == uint32(valShort) {
|
||||||
// Try a long candidate at s+1
|
// Try a long candidate at s+1
|
||||||
hashL = hash7(cv>>8, lTableBits)
|
hashL = hash7(cv>>8, lTableBits)
|
||||||
candidateL = int(lTable[hashL])
|
candidateL = int(lTable[hashL])
|
||||||
@ -227,21 +259,29 @@ func encodeBlockBetterGo(dst, src []byte) (d int) {
|
|||||||
// Do we have space for more, if not bail.
|
// Do we have space for more, if not bail.
|
||||||
return 0
|
return 0
|
||||||
}
|
}
|
||||||
// Index match start+1 (long) and start+2 (short)
|
|
||||||
|
// Index short & long
|
||||||
index0 := base + 1
|
index0 := base + 1
|
||||||
// Index match end-2 (long) and end-1 (short)
|
|
||||||
index1 := s - 2
|
index1 := s - 2
|
||||||
|
|
||||||
cv0 := load64(src, index0)
|
cv0 := load64(src, index0)
|
||||||
cv1 := load64(src, index1)
|
cv1 := load64(src, index1)
|
||||||
cv = load64(src, s)
|
|
||||||
lTable[hash7(cv0, lTableBits)] = uint32(index0)
|
lTable[hash7(cv0, lTableBits)] = uint32(index0)
|
||||||
lTable[hash7(cv0>>8, lTableBits)] = uint32(index0 + 1)
|
|
||||||
lTable[hash7(cv1, lTableBits)] = uint32(index1)
|
|
||||||
lTable[hash7(cv1>>8, lTableBits)] = uint32(index1 + 1)
|
|
||||||
sTable[hash4(cv0>>8, sTableBits)] = uint32(index0 + 1)
|
sTable[hash4(cv0>>8, sTableBits)] = uint32(index0 + 1)
|
||||||
sTable[hash4(cv0>>16, sTableBits)] = uint32(index0 + 2)
|
|
||||||
|
lTable[hash7(cv1, lTableBits)] = uint32(index1)
|
||||||
sTable[hash4(cv1>>8, sTableBits)] = uint32(index1 + 1)
|
sTable[hash4(cv1>>8, sTableBits)] = uint32(index1 + 1)
|
||||||
|
index0 += 1
|
||||||
|
index1 -= 1
|
||||||
|
cv = load64(src, s)
|
||||||
|
|
||||||
|
// index every second long in between.
|
||||||
|
for index0 < index1 {
|
||||||
|
lTable[hash7(load64(src, index0), lTableBits)] = uint32(index0)
|
||||||
|
lTable[hash7(load64(src, index1), lTableBits)] = uint32(index1)
|
||||||
|
index0 += 2
|
||||||
|
index1 -= 2
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
emitRemainder:
|
emitRemainder:
|
||||||
@ -260,6 +300,7 @@ emitRemainder:
|
|||||||
// been written.
|
// been written.
|
||||||
//
|
//
|
||||||
// It also assumes that:
|
// It also assumes that:
|
||||||
|
//
|
||||||
// len(dst) >= MaxEncodedLen(len(src)) &&
|
// len(dst) >= MaxEncodedLen(len(src)) &&
|
||||||
// minNonLiteralBlockSize <= len(src) && len(src) <= maxBlockSize
|
// minNonLiteralBlockSize <= len(src) && len(src) <= maxBlockSize
|
||||||
func encodeBlockBetterSnappyGo(dst, src []byte) (d int) {
|
func encodeBlockBetterSnappyGo(dst, src []byte) (d int) {
|
||||||
@ -402,21 +443,649 @@ func encodeBlockBetterSnappyGo(dst, src []byte) (d int) {
|
|||||||
// Do we have space for more, if not bail.
|
// Do we have space for more, if not bail.
|
||||||
return 0
|
return 0
|
||||||
}
|
}
|
||||||
// Index match start+1 (long) and start+2 (short)
|
|
||||||
|
// Index short & long
|
||||||
index0 := base + 1
|
index0 := base + 1
|
||||||
// Index match end-2 (long) and end-1 (short)
|
|
||||||
index1 := s - 2
|
index1 := s - 2
|
||||||
|
|
||||||
cv0 := load64(src, index0)
|
cv0 := load64(src, index0)
|
||||||
cv1 := load64(src, index1)
|
cv1 := load64(src, index1)
|
||||||
cv = load64(src, s)
|
|
||||||
lTable[hash7(cv0, lTableBits)] = uint32(index0)
|
lTable[hash7(cv0, lTableBits)] = uint32(index0)
|
||||||
lTable[hash7(cv0>>8, lTableBits)] = uint32(index0 + 1)
|
|
||||||
lTable[hash7(cv1, lTableBits)] = uint32(index1)
|
|
||||||
lTable[hash7(cv1>>8, lTableBits)] = uint32(index1 + 1)
|
|
||||||
sTable[hash4(cv0>>8, sTableBits)] = uint32(index0 + 1)
|
sTable[hash4(cv0>>8, sTableBits)] = uint32(index0 + 1)
|
||||||
sTable[hash4(cv0>>16, sTableBits)] = uint32(index0 + 2)
|
|
||||||
|
lTable[hash7(cv1, lTableBits)] = uint32(index1)
|
||||||
sTable[hash4(cv1>>8, sTableBits)] = uint32(index1 + 1)
|
sTable[hash4(cv1>>8, sTableBits)] = uint32(index1 + 1)
|
||||||
|
index0 += 1
|
||||||
|
index1 -= 1
|
||||||
|
cv = load64(src, s)
|
||||||
|
|
||||||
|
// index every second long in between.
|
||||||
|
for index0 < index1 {
|
||||||
|
lTable[hash7(load64(src, index0), lTableBits)] = uint32(index0)
|
||||||
|
lTable[hash7(load64(src, index1), lTableBits)] = uint32(index1)
|
||||||
|
index0 += 2
|
||||||
|
index1 -= 2
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
emitRemainder:
|
||||||
|
if nextEmit < len(src) {
|
||||||
|
// Bail if we exceed the maximum size.
|
||||||
|
if d+len(src)-nextEmit > dstLimit {
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
d += emitLiteral(dst[d:], src[nextEmit:])
|
||||||
|
}
|
||||||
|
return d
|
||||||
|
}
|
||||||
|
|
||||||
|
// encodeBlockBetterDict encodes a non-empty src to a guaranteed-large-enough dst. It
|
||||||
|
// assumes that the varint-encoded length of the decompressed bytes has already
|
||||||
|
// been written.
|
||||||
|
//
|
||||||
|
// It also assumes that:
|
||||||
|
//
|
||||||
|
// len(dst) >= MaxEncodedLen(len(src)) &&
|
||||||
|
// minNonLiteralBlockSize <= len(src) && len(src) <= maxBlockSize
|
||||||
|
func encodeBlockBetterDict(dst, src []byte, dict *Dict) (d int) {
|
||||||
|
// sLimit is when to stop looking for offset/length copies. The inputMargin
|
||||||
|
// lets us use a fast path for emitLiteral in the main loop, while we are
|
||||||
|
// looking for copies.
|
||||||
|
// Initialize the hash tables.
|
||||||
|
const (
|
||||||
|
// Long hash matches.
|
||||||
|
lTableBits = 17
|
||||||
|
maxLTableSize = 1 << lTableBits
|
||||||
|
|
||||||
|
// Short hash matches.
|
||||||
|
sTableBits = 14
|
||||||
|
maxSTableSize = 1 << sTableBits
|
||||||
|
|
||||||
|
maxAhead = 8 // maximum bytes ahead without checking sLimit
|
||||||
|
|
||||||
|
debug = false
|
||||||
|
)
|
||||||
|
|
||||||
|
sLimit := len(src) - inputMargin
|
||||||
|
if sLimit > MaxDictSrcOffset-maxAhead {
|
||||||
|
sLimit = MaxDictSrcOffset - maxAhead
|
||||||
|
}
|
||||||
|
if len(src) < minNonLiteralBlockSize {
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
|
||||||
|
dict.initBetter()
|
||||||
|
|
||||||
|
var lTable [maxLTableSize]uint32
|
||||||
|
var sTable [maxSTableSize]uint32
|
||||||
|
|
||||||
|
// Bail if we can't compress to at least this.
|
||||||
|
dstLimit := len(src) - len(src)>>5 - 6
|
||||||
|
|
||||||
|
// nextEmit is where in src the next emitLiteral should start from.
|
||||||
|
nextEmit := 0
|
||||||
|
|
||||||
|
// The encoded form must start with a literal, as there are no previous
|
||||||
|
// bytes to copy, so we start looking for hash matches at s == 1.
|
||||||
|
s := 0
|
||||||
|
cv := load64(src, s)
|
||||||
|
|
||||||
|
// We initialize repeat to 0, so we never match on first attempt
|
||||||
|
repeat := len(dict.dict) - dict.repeat
|
||||||
|
|
||||||
|
// While in dict
|
||||||
|
searchDict:
|
||||||
|
for {
|
||||||
|
candidateL := 0
|
||||||
|
nextS := 0
|
||||||
|
for {
|
||||||
|
// Next src position to check
|
||||||
|
nextS = s + (s-nextEmit)>>7 + 1
|
||||||
|
if nextS > sLimit {
|
||||||
|
break searchDict
|
||||||
|
}
|
||||||
|
hashL := hash7(cv, lTableBits)
|
||||||
|
hashS := hash4(cv, sTableBits)
|
||||||
|
candidateL = int(lTable[hashL])
|
||||||
|
candidateS := int(sTable[hashS])
|
||||||
|
dictL := int(dict.betterTableLong[hashL])
|
||||||
|
dictS := int(dict.betterTableShort[hashS])
|
||||||
|
lTable[hashL] = uint32(s)
|
||||||
|
sTable[hashS] = uint32(s)
|
||||||
|
|
||||||
|
valLong := load64(src, candidateL)
|
||||||
|
valShort := load64(src, candidateS)
|
||||||
|
|
||||||
|
// If long matches at least 8 bytes, use that.
|
||||||
|
if s != 0 {
|
||||||
|
if cv == valLong {
|
||||||
|
goto emitMatch
|
||||||
|
}
|
||||||
|
if cv == valShort {
|
||||||
|
candidateL = candidateS
|
||||||
|
goto emitMatch
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check dict repeat.
|
||||||
|
if repeat >= s+4 {
|
||||||
|
candidate := len(dict.dict) - repeat + s
|
||||||
|
if candidate > 0 && uint32(cv) == load32(dict.dict, candidate) {
|
||||||
|
// Extend back
|
||||||
|
base := s
|
||||||
|
for i := candidate; base > nextEmit && i > 0 && dict.dict[i-1] == src[base-1]; {
|
||||||
|
i--
|
||||||
|
base--
|
||||||
|
}
|
||||||
|
d += emitLiteral(dst[d:], src[nextEmit:base])
|
||||||
|
if debug && nextEmit != base {
|
||||||
|
fmt.Println("emitted ", base-nextEmit, "literals")
|
||||||
|
}
|
||||||
|
s += 4
|
||||||
|
candidate += 4
|
||||||
|
for candidate < len(dict.dict)-8 && s <= len(src)-8 {
|
||||||
|
if diff := load64(src, s) ^ load64(dict.dict, candidate); diff != 0 {
|
||||||
|
s += bits.TrailingZeros64(diff) >> 3
|
||||||
|
break
|
||||||
|
}
|
||||||
|
s += 8
|
||||||
|
candidate += 8
|
||||||
|
}
|
||||||
|
d += emitRepeat(dst[d:], repeat, s-base)
|
||||||
|
if debug {
|
||||||
|
fmt.Println("emitted dict repeat length", s-base, "offset:", repeat, "s:", s)
|
||||||
|
}
|
||||||
|
nextEmit = s
|
||||||
|
if s >= sLimit {
|
||||||
|
break searchDict
|
||||||
|
}
|
||||||
|
cv = load64(src, s)
|
||||||
|
// Index in-between
|
||||||
|
index0 := base + 1
|
||||||
|
index1 := s - 2
|
||||||
|
|
||||||
|
cv = load64(src, s)
|
||||||
|
for index0 < index1 {
|
||||||
|
cv0 := load64(src, index0)
|
||||||
|
cv1 := load64(src, index1)
|
||||||
|
lTable[hash7(cv0, lTableBits)] = uint32(index0)
|
||||||
|
sTable[hash4(cv0>>8, sTableBits)] = uint32(index0 + 1)
|
||||||
|
|
||||||
|
lTable[hash7(cv1, lTableBits)] = uint32(index1)
|
||||||
|
sTable[hash4(cv1>>8, sTableBits)] = uint32(index1 + 1)
|
||||||
|
index0 += 2
|
||||||
|
index1 -= 2
|
||||||
|
}
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// Don't try to find match at s==0
|
||||||
|
if s == 0 {
|
||||||
|
cv = load64(src, nextS)
|
||||||
|
s = nextS
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
// Long likely matches 7, so take that.
|
||||||
|
if uint32(cv) == uint32(valLong) {
|
||||||
|
goto emitMatch
|
||||||
|
}
|
||||||
|
|
||||||
|
// Long dict...
|
||||||
|
if uint32(cv) == load32(dict.dict, dictL) {
|
||||||
|
candidateL = dictL
|
||||||
|
goto emitDict
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check our short candidate
|
||||||
|
if uint32(cv) == uint32(valShort) {
|
||||||
|
// Try a long candidate at s+1
|
||||||
|
hashL = hash7(cv>>8, lTableBits)
|
||||||
|
candidateL = int(lTable[hashL])
|
||||||
|
lTable[hashL] = uint32(s + 1)
|
||||||
|
if uint32(cv>>8) == load32(src, candidateL) {
|
||||||
|
s++
|
||||||
|
goto emitMatch
|
||||||
|
}
|
||||||
|
// Use our short candidate.
|
||||||
|
candidateL = candidateS
|
||||||
|
goto emitMatch
|
||||||
|
}
|
||||||
|
if uint32(cv) == load32(dict.dict, dictS) {
|
||||||
|
// Try a long candidate at s+1
|
||||||
|
hashL = hash7(cv>>8, lTableBits)
|
||||||
|
candidateL = int(lTable[hashL])
|
||||||
|
lTable[hashL] = uint32(s + 1)
|
||||||
|
if uint32(cv>>8) == load32(src, candidateL) {
|
||||||
|
s++
|
||||||
|
goto emitMatch
|
||||||
|
}
|
||||||
|
candidateL = dictS
|
||||||
|
goto emitDict
|
||||||
|
}
|
||||||
|
cv = load64(src, nextS)
|
||||||
|
s = nextS
|
||||||
|
}
|
||||||
|
emitDict:
|
||||||
|
{
|
||||||
|
if debug {
|
||||||
|
if load32(dict.dict, candidateL) != load32(src, s) {
|
||||||
|
panic("dict emit mismatch")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// Extend backwards.
|
||||||
|
// The top bytes will be rechecked to get the full match.
|
||||||
|
for candidateL > 0 && s > nextEmit && dict.dict[candidateL-1] == src[s-1] {
|
||||||
|
candidateL--
|
||||||
|
s--
|
||||||
|
}
|
||||||
|
|
||||||
|
// Bail if we exceed the maximum size.
|
||||||
|
if d+(s-nextEmit) > dstLimit {
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
|
||||||
|
// A 4-byte match has been found. We'll later see if more than 4 bytes
|
||||||
|
// match. But, prior to the match, src[nextEmit:s] are unmatched. Emit
|
||||||
|
// them as literal bytes.
|
||||||
|
|
||||||
|
d += emitLiteral(dst[d:], src[nextEmit:s])
|
||||||
|
if debug && nextEmit != s {
|
||||||
|
fmt.Println("emitted ", s-nextEmit, "literals")
|
||||||
|
}
|
||||||
|
{
|
||||||
|
// Invariant: we have a 4-byte match at s, and no need to emit any
|
||||||
|
// literal bytes prior to s.
|
||||||
|
base := s
|
||||||
|
offset := s + (len(dict.dict)) - candidateL
|
||||||
|
|
||||||
|
// Extend the 4-byte match as long as possible.
|
||||||
|
s += 4
|
||||||
|
candidateL += 4
|
||||||
|
for s <= len(src)-8 && len(dict.dict)-candidateL >= 8 {
|
||||||
|
if diff := load64(src, s) ^ load64(dict.dict, candidateL); diff != 0 {
|
||||||
|
s += bits.TrailingZeros64(diff) >> 3
|
||||||
|
break
|
||||||
|
}
|
||||||
|
s += 8
|
||||||
|
candidateL += 8
|
||||||
|
}
|
||||||
|
|
||||||
|
if repeat == offset {
|
||||||
|
if debug {
|
||||||
|
fmt.Println("emitted dict repeat, length", s-base, "offset:", offset, "s:", s, "dict offset:", candidateL)
|
||||||
|
}
|
||||||
|
d += emitRepeat(dst[d:], offset, s-base)
|
||||||
|
} else {
|
||||||
|
if debug {
|
||||||
|
fmt.Println("emitted dict copy, length", s-base, "offset:", offset, "s:", s, "dict offset:", candidateL)
|
||||||
|
}
|
||||||
|
// Matches longer than 64 are split.
|
||||||
|
if s <= sLimit || s-base < 8 {
|
||||||
|
d += emitCopy(dst[d:], offset, s-base)
|
||||||
|
} else {
|
||||||
|
// Split to ensure we don't start a copy within next block.
|
||||||
|
d += emitCopy(dst[d:], offset, 4)
|
||||||
|
d += emitRepeat(dst[d:], offset, s-base-4)
|
||||||
|
}
|
||||||
|
repeat = offset
|
||||||
|
}
|
||||||
|
if false {
|
||||||
|
// Validate match.
|
||||||
|
if s <= candidateL {
|
||||||
|
panic("s <= candidate")
|
||||||
|
}
|
||||||
|
a := src[base:s]
|
||||||
|
b := dict.dict[base-repeat : base-repeat+(s-base)]
|
||||||
|
if !bytes.Equal(a, b) {
|
||||||
|
panic("mismatch")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
nextEmit = s
|
||||||
|
if s >= sLimit {
|
||||||
|
break searchDict
|
||||||
|
}
|
||||||
|
|
||||||
|
if d > dstLimit {
|
||||||
|
// Do we have space for more, if not bail.
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
|
||||||
|
// Index short & long
|
||||||
|
index0 := base + 1
|
||||||
|
index1 := s - 2
|
||||||
|
|
||||||
|
cv0 := load64(src, index0)
|
||||||
|
cv1 := load64(src, index1)
|
||||||
|
lTable[hash7(cv0, lTableBits)] = uint32(index0)
|
||||||
|
sTable[hash4(cv0>>8, sTableBits)] = uint32(index0 + 1)
|
||||||
|
|
||||||
|
lTable[hash7(cv1, lTableBits)] = uint32(index1)
|
||||||
|
sTable[hash4(cv1>>8, sTableBits)] = uint32(index1 + 1)
|
||||||
|
index0 += 1
|
||||||
|
index1 -= 1
|
||||||
|
cv = load64(src, s)
|
||||||
|
|
||||||
|
// index every second long in between.
|
||||||
|
for index0 < index1 {
|
||||||
|
lTable[hash7(load64(src, index0), lTableBits)] = uint32(index0)
|
||||||
|
lTable[hash7(load64(src, index1), lTableBits)] = uint32(index1)
|
||||||
|
index0 += 2
|
||||||
|
index1 -= 2
|
||||||
|
}
|
||||||
|
}
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
emitMatch:
|
||||||
|
|
||||||
|
// Extend backwards
|
||||||
|
for candidateL > 0 && s > nextEmit && src[candidateL-1] == src[s-1] {
|
||||||
|
candidateL--
|
||||||
|
s--
|
||||||
|
}
|
||||||
|
|
||||||
|
// Bail if we exceed the maximum size.
|
||||||
|
if d+(s-nextEmit) > dstLimit {
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
|
||||||
|
base := s
|
||||||
|
offset := base - candidateL
|
||||||
|
|
||||||
|
// Extend the 4-byte match as long as possible.
|
||||||
|
s += 4
|
||||||
|
candidateL += 4
|
||||||
|
for s < len(src) {
|
||||||
|
if len(src)-s < 8 {
|
||||||
|
if src[s] == src[candidateL] {
|
||||||
|
s++
|
||||||
|
candidateL++
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
break
|
||||||
|
}
|
||||||
|
if diff := load64(src, s) ^ load64(src, candidateL); diff != 0 {
|
||||||
|
s += bits.TrailingZeros64(diff) >> 3
|
||||||
|
break
|
||||||
|
}
|
||||||
|
s += 8
|
||||||
|
candidateL += 8
|
||||||
|
}
|
||||||
|
|
||||||
|
if offset > 65535 && s-base <= 5 && repeat != offset {
|
||||||
|
// Bail if the match is equal or worse to the encoding.
|
||||||
|
s = nextS + 1
|
||||||
|
if s >= sLimit {
|
||||||
|
goto emitRemainder
|
||||||
|
}
|
||||||
|
cv = load64(src, s)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
d += emitLiteral(dst[d:], src[nextEmit:base])
|
||||||
|
if debug && nextEmit != s {
|
||||||
|
fmt.Println("emitted ", s-nextEmit, "literals")
|
||||||
|
}
|
||||||
|
if repeat == offset {
|
||||||
|
if debug {
|
||||||
|
fmt.Println("emitted match repeat, length", s-base, "offset:", offset, "s:", s)
|
||||||
|
}
|
||||||
|
d += emitRepeat(dst[d:], offset, s-base)
|
||||||
|
} else {
|
||||||
|
if debug {
|
||||||
|
fmt.Println("emitted match copy, length", s-base, "offset:", offset, "s:", s)
|
||||||
|
}
|
||||||
|
d += emitCopy(dst[d:], offset, s-base)
|
||||||
|
repeat = offset
|
||||||
|
}
|
||||||
|
|
||||||
|
nextEmit = s
|
||||||
|
if s >= sLimit {
|
||||||
|
goto emitRemainder
|
||||||
|
}
|
||||||
|
|
||||||
|
if d > dstLimit {
|
||||||
|
// Do we have space for more, if not bail.
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
|
||||||
|
// Index short & long
|
||||||
|
index0 := base + 1
|
||||||
|
index1 := s - 2
|
||||||
|
|
||||||
|
cv0 := load64(src, index0)
|
||||||
|
cv1 := load64(src, index1)
|
||||||
|
lTable[hash7(cv0, lTableBits)] = uint32(index0)
|
||||||
|
sTable[hash4(cv0>>8, sTableBits)] = uint32(index0 + 1)
|
||||||
|
|
||||||
|
lTable[hash7(cv1, lTableBits)] = uint32(index1)
|
||||||
|
sTable[hash4(cv1>>8, sTableBits)] = uint32(index1 + 1)
|
||||||
|
index0 += 1
|
||||||
|
index1 -= 1
|
||||||
|
cv = load64(src, s)
|
||||||
|
|
||||||
|
// index every second long in between.
|
||||||
|
for index0 < index1 {
|
||||||
|
lTable[hash7(load64(src, index0), lTableBits)] = uint32(index0)
|
||||||
|
lTable[hash7(load64(src, index1), lTableBits)] = uint32(index1)
|
||||||
|
index0 += 2
|
||||||
|
index1 -= 2
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Search without dict:
|
||||||
|
if repeat > s {
|
||||||
|
repeat = 0
|
||||||
|
}
|
||||||
|
|
||||||
|
// No more dict
|
||||||
|
sLimit = len(src) - inputMargin
|
||||||
|
if s >= sLimit {
|
||||||
|
goto emitRemainder
|
||||||
|
}
|
||||||
|
cv = load64(src, s)
|
||||||
|
if debug {
|
||||||
|
fmt.Println("now", s, "->", sLimit, "out:", d, "left:", len(src)-s, "nextemit:", nextEmit, "dstLimit:", dstLimit, "s:", s)
|
||||||
|
}
|
||||||
|
for {
|
||||||
|
candidateL := 0
|
||||||
|
nextS := 0
|
||||||
|
for {
|
||||||
|
// Next src position to check
|
||||||
|
nextS = s + (s-nextEmit)>>7 + 1
|
||||||
|
if nextS > sLimit {
|
||||||
|
goto emitRemainder
|
||||||
|
}
|
||||||
|
hashL := hash7(cv, lTableBits)
|
||||||
|
hashS := hash4(cv, sTableBits)
|
||||||
|
candidateL = int(lTable[hashL])
|
||||||
|
candidateS := int(sTable[hashS])
|
||||||
|
lTable[hashL] = uint32(s)
|
||||||
|
sTable[hashS] = uint32(s)
|
||||||
|
|
||||||
|
valLong := load64(src, candidateL)
|
||||||
|
valShort := load64(src, candidateS)
|
||||||
|
|
||||||
|
// If long matches at least 8 bytes, use that.
|
||||||
|
if cv == valLong {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
if cv == valShort {
|
||||||
|
candidateL = candidateS
|
||||||
|
break
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check repeat at offset checkRep.
|
||||||
|
const checkRep = 1
|
||||||
|
// Minimum length of a repeat. Tested with various values.
|
||||||
|
// While 4-5 offers improvements in some, 6 reduces
|
||||||
|
// regressions significantly.
|
||||||
|
const wantRepeatBytes = 6
|
||||||
|
const repeatMask = ((1 << (wantRepeatBytes * 8)) - 1) << (8 * checkRep)
|
||||||
|
if false && repeat > 0 && cv&repeatMask == load64(src, s-repeat)&repeatMask {
|
||||||
|
base := s + checkRep
|
||||||
|
// Extend back
|
||||||
|
for i := base - repeat; base > nextEmit && i > 0 && src[i-1] == src[base-1]; {
|
||||||
|
i--
|
||||||
|
base--
|
||||||
|
}
|
||||||
|
d += emitLiteral(dst[d:], src[nextEmit:base])
|
||||||
|
|
||||||
|
// Extend forward
|
||||||
|
candidate := s - repeat + wantRepeatBytes + checkRep
|
||||||
|
s += wantRepeatBytes + checkRep
|
||||||
|
for s < len(src) {
|
||||||
|
if len(src)-s < 8 {
|
||||||
|
if src[s] == src[candidate] {
|
||||||
|
s++
|
||||||
|
candidate++
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
break
|
||||||
|
}
|
||||||
|
if diff := load64(src, s) ^ load64(src, candidate); diff != 0 {
|
||||||
|
s += bits.TrailingZeros64(diff) >> 3
|
||||||
|
break
|
||||||
|
}
|
||||||
|
s += 8
|
||||||
|
candidate += 8
|
||||||
|
}
|
||||||
|
// same as `add := emitCopy(dst[d:], repeat, s-base)` but skips storing offset.
|
||||||
|
d += emitRepeat(dst[d:], repeat, s-base)
|
||||||
|
nextEmit = s
|
||||||
|
if s >= sLimit {
|
||||||
|
goto emitRemainder
|
||||||
|
}
|
||||||
|
// Index in-between
|
||||||
|
index0 := base + 1
|
||||||
|
index1 := s - 2
|
||||||
|
|
||||||
|
cv = load64(src, s)
|
||||||
|
for index0 < index1 {
|
||||||
|
cv0 := load64(src, index0)
|
||||||
|
cv1 := load64(src, index1)
|
||||||
|
lTable[hash7(cv0, lTableBits)] = uint32(index0)
|
||||||
|
sTable[hash4(cv0>>8, sTableBits)] = uint32(index0 + 1)
|
||||||
|
|
||||||
|
lTable[hash7(cv1, lTableBits)] = uint32(index1)
|
||||||
|
sTable[hash4(cv1>>8, sTableBits)] = uint32(index1 + 1)
|
||||||
|
index0 += 2
|
||||||
|
index1 -= 2
|
||||||
|
}
|
||||||
|
|
||||||
|
cv = load64(src, s)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
// Long likely matches 7, so take that.
|
||||||
|
if uint32(cv) == uint32(valLong) {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check our short candidate
|
||||||
|
if uint32(cv) == uint32(valShort) {
|
||||||
|
// Try a long candidate at s+1
|
||||||
|
hashL = hash7(cv>>8, lTableBits)
|
||||||
|
candidateL = int(lTable[hashL])
|
||||||
|
lTable[hashL] = uint32(s + 1)
|
||||||
|
if uint32(cv>>8) == load32(src, candidateL) {
|
||||||
|
s++
|
||||||
|
break
|
||||||
|
}
|
||||||
|
// Use our short candidate.
|
||||||
|
candidateL = candidateS
|
||||||
|
break
|
||||||
|
}
|
||||||
|
|
||||||
|
cv = load64(src, nextS)
|
||||||
|
s = nextS
|
||||||
|
}
|
||||||
|
|
||||||
|
// Extend backwards
|
||||||
|
for candidateL > 0 && s > nextEmit && src[candidateL-1] == src[s-1] {
|
||||||
|
candidateL--
|
||||||
|
s--
|
||||||
|
}
|
||||||
|
|
||||||
|
// Bail if we exceed the maximum size.
|
||||||
|
if d+(s-nextEmit) > dstLimit {
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
|
||||||
|
base := s
|
||||||
|
offset := base - candidateL
|
||||||
|
|
||||||
|
// Extend the 4-byte match as long as possible.
|
||||||
|
s += 4
|
||||||
|
candidateL += 4
|
||||||
|
for s < len(src) {
|
||||||
|
if len(src)-s < 8 {
|
||||||
|
if src[s] == src[candidateL] {
|
||||||
|
s++
|
||||||
|
candidateL++
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
break
|
||||||
|
}
|
||||||
|
if diff := load64(src, s) ^ load64(src, candidateL); diff != 0 {
|
||||||
|
s += bits.TrailingZeros64(diff) >> 3
|
||||||
|
break
|
||||||
|
}
|
||||||
|
s += 8
|
||||||
|
candidateL += 8
|
||||||
|
}
|
||||||
|
|
||||||
|
if offset > 65535 && s-base <= 5 && repeat != offset {
|
||||||
|
// Bail if the match is equal or worse to the encoding.
|
||||||
|
s = nextS + 1
|
||||||
|
if s >= sLimit {
|
||||||
|
goto emitRemainder
|
||||||
|
}
|
||||||
|
cv = load64(src, s)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
d += emitLiteral(dst[d:], src[nextEmit:base])
|
||||||
|
if repeat == offset {
|
||||||
|
d += emitRepeat(dst[d:], offset, s-base)
|
||||||
|
} else {
|
||||||
|
d += emitCopy(dst[d:], offset, s-base)
|
||||||
|
repeat = offset
|
||||||
|
}
|
||||||
|
|
||||||
|
nextEmit = s
|
||||||
|
if s >= sLimit {
|
||||||
|
goto emitRemainder
|
||||||
|
}
|
||||||
|
|
||||||
|
if d > dstLimit {
|
||||||
|
// Do we have space for more, if not bail.
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
|
||||||
|
// Index short & long
|
||||||
|
index0 := base + 1
|
||||||
|
index1 := s - 2
|
||||||
|
|
||||||
|
cv0 := load64(src, index0)
|
||||||
|
cv1 := load64(src, index1)
|
||||||
|
lTable[hash7(cv0, lTableBits)] = uint32(index0)
|
||||||
|
sTable[hash4(cv0>>8, sTableBits)] = uint32(index0 + 1)
|
||||||
|
|
||||||
|
lTable[hash7(cv1, lTableBits)] = uint32(index1)
|
||||||
|
sTable[hash4(cv1>>8, sTableBits)] = uint32(index1 + 1)
|
||||||
|
index0 += 1
|
||||||
|
index1 -= 1
|
||||||
|
cv = load64(src, s)
|
||||||
|
|
||||||
|
// index every second long in between.
|
||||||
|
for index0 < index1 {
|
||||||
|
lTable[hash7(load64(src, index0), lTableBits)] = uint32(index0)
|
||||||
|
lTable[hash7(load64(src, index1), lTableBits)] = uint32(index1)
|
||||||
|
index0 += 2
|
||||||
|
index1 -= 2
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
emitRemainder:
|
emitRemainder:
|
||||||
|
414
vendor/github.com/klauspost/compress/s2/encode_go.go
generated
vendored
414
vendor/github.com/klauspost/compress/s2/encode_go.go
generated
vendored
@ -4,14 +4,18 @@
|
|||||||
package s2
|
package s2
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"bytes"
|
||||||
"math/bits"
|
"math/bits"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
const hasAmd64Asm = false
|
||||||
|
|
||||||
// encodeBlock encodes a non-empty src to a guaranteed-large-enough dst. It
|
// encodeBlock encodes a non-empty src to a guaranteed-large-enough dst. It
|
||||||
// assumes that the varint-encoded length of the decompressed bytes has already
|
// assumes that the varint-encoded length of the decompressed bytes has already
|
||||||
// been written.
|
// been written.
|
||||||
//
|
//
|
||||||
// It also assumes that:
|
// It also assumes that:
|
||||||
|
//
|
||||||
// len(dst) >= MaxEncodedLen(len(src))
|
// len(dst) >= MaxEncodedLen(len(src))
|
||||||
func encodeBlock(dst, src []byte) (d int) {
|
func encodeBlock(dst, src []byte) (d int) {
|
||||||
if len(src) < minNonLiteralBlockSize {
|
if len(src) < minNonLiteralBlockSize {
|
||||||
@ -25,6 +29,7 @@ func encodeBlock(dst, src []byte) (d int) {
|
|||||||
// been written.
|
// been written.
|
||||||
//
|
//
|
||||||
// It also assumes that:
|
// It also assumes that:
|
||||||
|
//
|
||||||
// len(dst) >= MaxEncodedLen(len(src))
|
// len(dst) >= MaxEncodedLen(len(src))
|
||||||
func encodeBlockBetter(dst, src []byte) (d int) {
|
func encodeBlockBetter(dst, src []byte) (d int) {
|
||||||
return encodeBlockBetterGo(dst, src)
|
return encodeBlockBetterGo(dst, src)
|
||||||
@ -35,6 +40,7 @@ func encodeBlockBetter(dst, src []byte) (d int) {
|
|||||||
// been written.
|
// been written.
|
||||||
//
|
//
|
||||||
// It also assumes that:
|
// It also assumes that:
|
||||||
|
//
|
||||||
// len(dst) >= MaxEncodedLen(len(src))
|
// len(dst) >= MaxEncodedLen(len(src))
|
||||||
func encodeBlockBetterSnappy(dst, src []byte) (d int) {
|
func encodeBlockBetterSnappy(dst, src []byte) (d int) {
|
||||||
return encodeBlockBetterSnappyGo(dst, src)
|
return encodeBlockBetterSnappyGo(dst, src)
|
||||||
@ -45,6 +51,7 @@ func encodeBlockBetterSnappy(dst, src []byte) (d int) {
|
|||||||
// been written.
|
// been written.
|
||||||
//
|
//
|
||||||
// It also assumes that:
|
// It also assumes that:
|
||||||
|
//
|
||||||
// len(dst) >= MaxEncodedLen(len(src))
|
// len(dst) >= MaxEncodedLen(len(src))
|
||||||
func encodeBlockSnappy(dst, src []byte) (d int) {
|
func encodeBlockSnappy(dst, src []byte) (d int) {
|
||||||
if len(src) < minNonLiteralBlockSize {
|
if len(src) < minNonLiteralBlockSize {
|
||||||
@ -56,6 +63,7 @@ func encodeBlockSnappy(dst, src []byte) (d int) {
|
|||||||
// emitLiteral writes a literal chunk and returns the number of bytes written.
|
// emitLiteral writes a literal chunk and returns the number of bytes written.
|
||||||
//
|
//
|
||||||
// It assumes that:
|
// It assumes that:
|
||||||
|
//
|
||||||
// dst is long enough to hold the encoded bytes
|
// dst is long enough to hold the encoded bytes
|
||||||
// 0 <= len(lit) && len(lit) <= math.MaxUint32
|
// 0 <= len(lit) && len(lit) <= math.MaxUint32
|
||||||
func emitLiteral(dst, lit []byte) int {
|
func emitLiteral(dst, lit []byte) int {
|
||||||
@ -146,6 +154,7 @@ func emitRepeat(dst []byte, offset, length int) int {
|
|||||||
// emitCopy writes a copy chunk and returns the number of bytes written.
|
// emitCopy writes a copy chunk and returns the number of bytes written.
|
||||||
//
|
//
|
||||||
// It assumes that:
|
// It assumes that:
|
||||||
|
//
|
||||||
// dst is long enough to hold the encoded bytes
|
// dst is long enough to hold the encoded bytes
|
||||||
// 1 <= offset && offset <= math.MaxUint32
|
// 1 <= offset && offset <= math.MaxUint32
|
||||||
// 4 <= length && length <= 1 << 24
|
// 4 <= length && length <= 1 << 24
|
||||||
@ -214,6 +223,7 @@ func emitCopy(dst []byte, offset, length int) int {
|
|||||||
// emitCopyNoRepeat writes a copy chunk and returns the number of bytes written.
|
// emitCopyNoRepeat writes a copy chunk and returns the number of bytes written.
|
||||||
//
|
//
|
||||||
// It assumes that:
|
// It assumes that:
|
||||||
|
//
|
||||||
// dst is long enough to hold the encoded bytes
|
// dst is long enough to hold the encoded bytes
|
||||||
// 1 <= offset && offset <= math.MaxUint32
|
// 1 <= offset && offset <= math.MaxUint32
|
||||||
// 4 <= length && length <= 1 << 24
|
// 4 <= length && length <= 1 << 24
|
||||||
@ -273,8 +283,8 @@ func emitCopyNoRepeat(dst []byte, offset, length int) int {
|
|||||||
// matchLen returns how many bytes match in a and b
|
// matchLen returns how many bytes match in a and b
|
||||||
//
|
//
|
||||||
// It assumes that:
|
// It assumes that:
|
||||||
// len(a) <= len(b)
|
|
||||||
//
|
//
|
||||||
|
// len(a) <= len(b)
|
||||||
func matchLen(a []byte, b []byte) int {
|
func matchLen(a []byte, b []byte) int {
|
||||||
b = b[:len(a)]
|
b = b[:len(a)]
|
||||||
var checked int
|
var checked int
|
||||||
@ -305,3 +315,405 @@ func matchLen(a []byte, b []byte) int {
|
|||||||
}
|
}
|
||||||
return len(a) + checked
|
return len(a) + checked
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func calcBlockSize(src []byte) (d int) {
|
||||||
|
// Initialize the hash table.
|
||||||
|
const (
|
||||||
|
tableBits = 13
|
||||||
|
maxTableSize = 1 << tableBits
|
||||||
|
)
|
||||||
|
|
||||||
|
var table [maxTableSize]uint32
|
||||||
|
|
||||||
|
// sLimit is when to stop looking for offset/length copies. The inputMargin
|
||||||
|
// lets us use a fast path for emitLiteral in the main loop, while we are
|
||||||
|
// looking for copies.
|
||||||
|
sLimit := len(src) - inputMargin
|
||||||
|
|
||||||
|
// Bail if we can't compress to at least this.
|
||||||
|
dstLimit := len(src) - len(src)>>5 - 5
|
||||||
|
|
||||||
|
// nextEmit is where in src the next emitLiteral should start from.
|
||||||
|
nextEmit := 0
|
||||||
|
|
||||||
|
// The encoded form must start with a literal, as there are no previous
|
||||||
|
// bytes to copy, so we start looking for hash matches at s == 1.
|
||||||
|
s := 1
|
||||||
|
cv := load64(src, s)
|
||||||
|
|
||||||
|
// We search for a repeat at -1, but don't output repeats when nextEmit == 0
|
||||||
|
repeat := 1
|
||||||
|
|
||||||
|
for {
|
||||||
|
candidate := 0
|
||||||
|
for {
|
||||||
|
// Next src position to check
|
||||||
|
nextS := s + (s-nextEmit)>>6 + 4
|
||||||
|
if nextS > sLimit {
|
||||||
|
goto emitRemainder
|
||||||
|
}
|
||||||
|
hash0 := hash6(cv, tableBits)
|
||||||
|
hash1 := hash6(cv>>8, tableBits)
|
||||||
|
candidate = int(table[hash0])
|
||||||
|
candidate2 := int(table[hash1])
|
||||||
|
table[hash0] = uint32(s)
|
||||||
|
table[hash1] = uint32(s + 1)
|
||||||
|
hash2 := hash6(cv>>16, tableBits)
|
||||||
|
|
||||||
|
// Check repeat at offset checkRep.
|
||||||
|
const checkRep = 1
|
||||||
|
if uint32(cv>>(checkRep*8)) == load32(src, s-repeat+checkRep) {
|
||||||
|
base := s + checkRep
|
||||||
|
// Extend back
|
||||||
|
for i := base - repeat; base > nextEmit && i > 0 && src[i-1] == src[base-1]; {
|
||||||
|
i--
|
||||||
|
base--
|
||||||
|
}
|
||||||
|
d += emitLiteralSize(src[nextEmit:base])
|
||||||
|
|
||||||
|
// Extend forward
|
||||||
|
candidate := s - repeat + 4 + checkRep
|
||||||
|
s += 4 + checkRep
|
||||||
|
for s <= sLimit {
|
||||||
|
if diff := load64(src, s) ^ load64(src, candidate); diff != 0 {
|
||||||
|
s += bits.TrailingZeros64(diff) >> 3
|
||||||
|
break
|
||||||
|
}
|
||||||
|
s += 8
|
||||||
|
candidate += 8
|
||||||
|
}
|
||||||
|
|
||||||
|
d += emitCopyNoRepeatSize(repeat, s-base)
|
||||||
|
nextEmit = s
|
||||||
|
if s >= sLimit {
|
||||||
|
goto emitRemainder
|
||||||
|
}
|
||||||
|
|
||||||
|
cv = load64(src, s)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
if uint32(cv) == load32(src, candidate) {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
candidate = int(table[hash2])
|
||||||
|
if uint32(cv>>8) == load32(src, candidate2) {
|
||||||
|
table[hash2] = uint32(s + 2)
|
||||||
|
candidate = candidate2
|
||||||
|
s++
|
||||||
|
break
|
||||||
|
}
|
||||||
|
table[hash2] = uint32(s + 2)
|
||||||
|
if uint32(cv>>16) == load32(src, candidate) {
|
||||||
|
s += 2
|
||||||
|
break
|
||||||
|
}
|
||||||
|
|
||||||
|
cv = load64(src, nextS)
|
||||||
|
s = nextS
|
||||||
|
}
|
||||||
|
|
||||||
|
// Extend backwards
|
||||||
|
for candidate > 0 && s > nextEmit && src[candidate-1] == src[s-1] {
|
||||||
|
candidate--
|
||||||
|
s--
|
||||||
|
}
|
||||||
|
|
||||||
|
// Bail if we exceed the maximum size.
|
||||||
|
if d+(s-nextEmit) > dstLimit {
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
|
||||||
|
// A 4-byte match has been found. We'll later see if more than 4 bytes
|
||||||
|
// match. But, prior to the match, src[nextEmit:s] are unmatched. Emit
|
||||||
|
// them as literal bytes.
|
||||||
|
|
||||||
|
d += emitLiteralSize(src[nextEmit:s])
|
||||||
|
|
||||||
|
// Call emitCopy, and then see if another emitCopy could be our next
|
||||||
|
// move. Repeat until we find no match for the input immediately after
|
||||||
|
// what was consumed by the last emitCopy call.
|
||||||
|
//
|
||||||
|
// If we exit this loop normally then we need to call emitLiteral next,
|
||||||
|
// though we don't yet know how big the literal will be. We handle that
|
||||||
|
// by proceeding to the next iteration of the main loop. We also can
|
||||||
|
// exit this loop via goto if we get close to exhausting the input.
|
||||||
|
for {
|
||||||
|
// Invariant: we have a 4-byte match at s, and no need to emit any
|
||||||
|
// literal bytes prior to s.
|
||||||
|
base := s
|
||||||
|
repeat = base - candidate
|
||||||
|
|
||||||
|
// Extend the 4-byte match as long as possible.
|
||||||
|
s += 4
|
||||||
|
candidate += 4
|
||||||
|
for s <= len(src)-8 {
|
||||||
|
if diff := load64(src, s) ^ load64(src, candidate); diff != 0 {
|
||||||
|
s += bits.TrailingZeros64(diff) >> 3
|
||||||
|
break
|
||||||
|
}
|
||||||
|
s += 8
|
||||||
|
candidate += 8
|
||||||
|
}
|
||||||
|
|
||||||
|
d += emitCopyNoRepeatSize(repeat, s-base)
|
||||||
|
if false {
|
||||||
|
// Validate match.
|
||||||
|
a := src[base:s]
|
||||||
|
b := src[base-repeat : base-repeat+(s-base)]
|
||||||
|
if !bytes.Equal(a, b) {
|
||||||
|
panic("mismatch")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
nextEmit = s
|
||||||
|
if s >= sLimit {
|
||||||
|
goto emitRemainder
|
||||||
|
}
|
||||||
|
|
||||||
|
if d > dstLimit {
|
||||||
|
// Do we have space for more, if not bail.
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
// Check for an immediate match, otherwise start search at s+1
|
||||||
|
x := load64(src, s-2)
|
||||||
|
m2Hash := hash6(x, tableBits)
|
||||||
|
currHash := hash6(x>>16, tableBits)
|
||||||
|
candidate = int(table[currHash])
|
||||||
|
table[m2Hash] = uint32(s - 2)
|
||||||
|
table[currHash] = uint32(s)
|
||||||
|
if uint32(x>>16) != load32(src, candidate) {
|
||||||
|
cv = load64(src, s+1)
|
||||||
|
s++
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
emitRemainder:
|
||||||
|
if nextEmit < len(src) {
|
||||||
|
// Bail if we exceed the maximum size.
|
||||||
|
if d+len(src)-nextEmit > dstLimit {
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
d += emitLiteralSize(src[nextEmit:])
|
||||||
|
}
|
||||||
|
return d
|
||||||
|
}
|
||||||
|
|
||||||
|
func calcBlockSizeSmall(src []byte) (d int) {
|
||||||
|
// Initialize the hash table.
|
||||||
|
const (
|
||||||
|
tableBits = 9
|
||||||
|
maxTableSize = 1 << tableBits
|
||||||
|
)
|
||||||
|
|
||||||
|
var table [maxTableSize]uint32
|
||||||
|
|
||||||
|
// sLimit is when to stop looking for offset/length copies. The inputMargin
|
||||||
|
// lets us use a fast path for emitLiteral in the main loop, while we are
|
||||||
|
// looking for copies.
|
||||||
|
sLimit := len(src) - inputMargin
|
||||||
|
|
||||||
|
// Bail if we can't compress to at least this.
|
||||||
|
dstLimit := len(src) - len(src)>>5 - 5
|
||||||
|
|
||||||
|
// nextEmit is where in src the next emitLiteral should start from.
|
||||||
|
nextEmit := 0
|
||||||
|
|
||||||
|
// The encoded form must start with a literal, as there are no previous
|
||||||
|
// bytes to copy, so we start looking for hash matches at s == 1.
|
||||||
|
s := 1
|
||||||
|
cv := load64(src, s)
|
||||||
|
|
||||||
|
// We search for a repeat at -1, but don't output repeats when nextEmit == 0
|
||||||
|
repeat := 1
|
||||||
|
|
||||||
|
for {
|
||||||
|
candidate := 0
|
||||||
|
for {
|
||||||
|
// Next src position to check
|
||||||
|
nextS := s + (s-nextEmit)>>6 + 4
|
||||||
|
if nextS > sLimit {
|
||||||
|
goto emitRemainder
|
||||||
|
}
|
||||||
|
hash0 := hash6(cv, tableBits)
|
||||||
|
hash1 := hash6(cv>>8, tableBits)
|
||||||
|
candidate = int(table[hash0])
|
||||||
|
candidate2 := int(table[hash1])
|
||||||
|
table[hash0] = uint32(s)
|
||||||
|
table[hash1] = uint32(s + 1)
|
||||||
|
hash2 := hash6(cv>>16, tableBits)
|
||||||
|
|
||||||
|
// Check repeat at offset checkRep.
|
||||||
|
const checkRep = 1
|
||||||
|
if uint32(cv>>(checkRep*8)) == load32(src, s-repeat+checkRep) {
|
||||||
|
base := s + checkRep
|
||||||
|
// Extend back
|
||||||
|
for i := base - repeat; base > nextEmit && i > 0 && src[i-1] == src[base-1]; {
|
||||||
|
i--
|
||||||
|
base--
|
||||||
|
}
|
||||||
|
d += emitLiteralSize(src[nextEmit:base])
|
||||||
|
|
||||||
|
// Extend forward
|
||||||
|
candidate := s - repeat + 4 + checkRep
|
||||||
|
s += 4 + checkRep
|
||||||
|
for s <= sLimit {
|
||||||
|
if diff := load64(src, s) ^ load64(src, candidate); diff != 0 {
|
||||||
|
s += bits.TrailingZeros64(diff) >> 3
|
||||||
|
break
|
||||||
|
}
|
||||||
|
s += 8
|
||||||
|
candidate += 8
|
||||||
|
}
|
||||||
|
|
||||||
|
d += emitCopyNoRepeatSize(repeat, s-base)
|
||||||
|
nextEmit = s
|
||||||
|
if s >= sLimit {
|
||||||
|
goto emitRemainder
|
||||||
|
}
|
||||||
|
|
||||||
|
cv = load64(src, s)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
if uint32(cv) == load32(src, candidate) {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
candidate = int(table[hash2])
|
||||||
|
if uint32(cv>>8) == load32(src, candidate2) {
|
||||||
|
table[hash2] = uint32(s + 2)
|
||||||
|
candidate = candidate2
|
||||||
|
s++
|
||||||
|
break
|
||||||
|
}
|
||||||
|
table[hash2] = uint32(s + 2)
|
||||||
|
if uint32(cv>>16) == load32(src, candidate) {
|
||||||
|
s += 2
|
||||||
|
break
|
||||||
|
}
|
||||||
|
|
||||||
|
cv = load64(src, nextS)
|
||||||
|
s = nextS
|
||||||
|
}
|
||||||
|
|
||||||
|
// Extend backwards
|
||||||
|
for candidate > 0 && s > nextEmit && src[candidate-1] == src[s-1] {
|
||||||
|
candidate--
|
||||||
|
s--
|
||||||
|
}
|
||||||
|
|
||||||
|
// Bail if we exceed the maximum size.
|
||||||
|
if d+(s-nextEmit) > dstLimit {
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
|
||||||
|
// A 4-byte match has been found. We'll later see if more than 4 bytes
|
||||||
|
// match. But, prior to the match, src[nextEmit:s] are unmatched. Emit
|
||||||
|
// them as literal bytes.
|
||||||
|
|
||||||
|
d += emitLiteralSize(src[nextEmit:s])
|
||||||
|
|
||||||
|
// Call emitCopy, and then see if another emitCopy could be our next
|
||||||
|
// move. Repeat until we find no match for the input immediately after
|
||||||
|
// what was consumed by the last emitCopy call.
|
||||||
|
//
|
||||||
|
// If we exit this loop normally then we need to call emitLiteral next,
|
||||||
|
// though we don't yet know how big the literal will be. We handle that
|
||||||
|
// by proceeding to the next iteration of the main loop. We also can
|
||||||
|
// exit this loop via goto if we get close to exhausting the input.
|
||||||
|
for {
|
||||||
|
// Invariant: we have a 4-byte match at s, and no need to emit any
|
||||||
|
// literal bytes prior to s.
|
||||||
|
base := s
|
||||||
|
repeat = base - candidate
|
||||||
|
|
||||||
|
// Extend the 4-byte match as long as possible.
|
||||||
|
s += 4
|
||||||
|
candidate += 4
|
||||||
|
for s <= len(src)-8 {
|
||||||
|
if diff := load64(src, s) ^ load64(src, candidate); diff != 0 {
|
||||||
|
s += bits.TrailingZeros64(diff) >> 3
|
||||||
|
break
|
||||||
|
}
|
||||||
|
s += 8
|
||||||
|
candidate += 8
|
||||||
|
}
|
||||||
|
|
||||||
|
d += emitCopyNoRepeatSize(repeat, s-base)
|
||||||
|
if false {
|
||||||
|
// Validate match.
|
||||||
|
a := src[base:s]
|
||||||
|
b := src[base-repeat : base-repeat+(s-base)]
|
||||||
|
if !bytes.Equal(a, b) {
|
||||||
|
panic("mismatch")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
nextEmit = s
|
||||||
|
if s >= sLimit {
|
||||||
|
goto emitRemainder
|
||||||
|
}
|
||||||
|
|
||||||
|
if d > dstLimit {
|
||||||
|
// Do we have space for more, if not bail.
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
// Check for an immediate match, otherwise start search at s+1
|
||||||
|
x := load64(src, s-2)
|
||||||
|
m2Hash := hash6(x, tableBits)
|
||||||
|
currHash := hash6(x>>16, tableBits)
|
||||||
|
candidate = int(table[currHash])
|
||||||
|
table[m2Hash] = uint32(s - 2)
|
||||||
|
table[currHash] = uint32(s)
|
||||||
|
if uint32(x>>16) != load32(src, candidate) {
|
||||||
|
cv = load64(src, s+1)
|
||||||
|
s++
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
emitRemainder:
|
||||||
|
if nextEmit < len(src) {
|
||||||
|
// Bail if we exceed the maximum size.
|
||||||
|
if d+len(src)-nextEmit > dstLimit {
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
d += emitLiteralSize(src[nextEmit:])
|
||||||
|
}
|
||||||
|
return d
|
||||||
|
}
|
||||||
|
|
||||||
|
// emitLiteral writes a literal chunk and returns the number of bytes written.
|
||||||
|
//
|
||||||
|
// It assumes that:
|
||||||
|
//
|
||||||
|
// dst is long enough to hold the encoded bytes
|
||||||
|
// 0 <= len(lit) && len(lit) <= math.MaxUint32
|
||||||
|
func emitLiteralSize(lit []byte) int {
|
||||||
|
if len(lit) == 0 {
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
switch {
|
||||||
|
case len(lit) <= 60:
|
||||||
|
return len(lit) + 1
|
||||||
|
case len(lit) <= 1<<8:
|
||||||
|
return len(lit) + 2
|
||||||
|
case len(lit) <= 1<<16:
|
||||||
|
return len(lit) + 3
|
||||||
|
case len(lit) <= 1<<24:
|
||||||
|
return len(lit) + 4
|
||||||
|
default:
|
||||||
|
return len(lit) + 5
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func cvtLZ4BlockAsm(dst []byte, src []byte) (uncompressed int, dstUsed int) {
|
||||||
|
panic("cvtLZ4BlockAsm should be unreachable")
|
||||||
|
}
|
||||||
|
|
||||||
|
func cvtLZ4BlockSnappyAsm(dst []byte, src []byte) (uncompressed int, dstUsed int) {
|
||||||
|
panic("cvtLZ4BlockSnappyAsm should be unreachable")
|
||||||
|
}
|
||||||
|
29
vendor/github.com/klauspost/compress/s2/encodeblock_amd64.go
generated
vendored
29
vendor/github.com/klauspost/compress/s2/encodeblock_amd64.go
generated
vendored
@ -1,7 +1,6 @@
|
|||||||
// Code generated by command: go run gen.go -out ../encodeblock_amd64.s -stubs ../encodeblock_amd64.go -pkg=s2. DO NOT EDIT.
|
// Code generated by command: go run gen.go -out ../encodeblock_amd64.s -stubs ../encodeblock_amd64.go -pkg=s2. DO NOT EDIT.
|
||||||
|
|
||||||
//go:build !appengine && !noasm && gc && !noasm
|
//go:build !appengine && !noasm && gc && !noasm
|
||||||
// +build !appengine,!noasm,gc,!noasm
|
|
||||||
|
|
||||||
package s2
|
package s2
|
||||||
|
|
||||||
@ -147,9 +146,24 @@ func encodeSnappyBetterBlockAsm10B(dst []byte, src []byte) int
|
|||||||
//go:noescape
|
//go:noescape
|
||||||
func encodeSnappyBetterBlockAsm8B(dst []byte, src []byte) int
|
func encodeSnappyBetterBlockAsm8B(dst []byte, src []byte) int
|
||||||
|
|
||||||
|
// calcBlockSize encodes a non-empty src to a guaranteed-large-enough dst.
|
||||||
|
// Maximum input 4294967295 bytes.
|
||||||
|
// It assumes that the varint-encoded length of the decompressed bytes has already been written.
|
||||||
|
//
|
||||||
|
//go:noescape
|
||||||
|
func calcBlockSize(src []byte) int
|
||||||
|
|
||||||
|
// calcBlockSizeSmall encodes a non-empty src to a guaranteed-large-enough dst.
|
||||||
|
// Maximum input 1024 bytes.
|
||||||
|
// It assumes that the varint-encoded length of the decompressed bytes has already been written.
|
||||||
|
//
|
||||||
|
//go:noescape
|
||||||
|
func calcBlockSizeSmall(src []byte) int
|
||||||
|
|
||||||
// emitLiteral writes a literal chunk and returns the number of bytes written.
|
// emitLiteral writes a literal chunk and returns the number of bytes written.
|
||||||
//
|
//
|
||||||
// It assumes that:
|
// It assumes that:
|
||||||
|
//
|
||||||
// dst is long enough to hold the encoded bytes with margin of 0 bytes
|
// dst is long enough to hold the encoded bytes with margin of 0 bytes
|
||||||
// 0 <= len(lit) && len(lit) <= math.MaxUint32
|
// 0 <= len(lit) && len(lit) <= math.MaxUint32
|
||||||
//
|
//
|
||||||
@ -165,6 +179,7 @@ func emitRepeat(dst []byte, offset int, length int) int
|
|||||||
// emitCopy writes a copy chunk and returns the number of bytes written.
|
// emitCopy writes a copy chunk and returns the number of bytes written.
|
||||||
//
|
//
|
||||||
// It assumes that:
|
// It assumes that:
|
||||||
|
//
|
||||||
// dst is long enough to hold the encoded bytes
|
// dst is long enough to hold the encoded bytes
|
||||||
// 1 <= offset && offset <= math.MaxUint32
|
// 1 <= offset && offset <= math.MaxUint32
|
||||||
// 4 <= length && length <= 1 << 24
|
// 4 <= length && length <= 1 << 24
|
||||||
@ -175,6 +190,7 @@ func emitCopy(dst []byte, offset int, length int) int
|
|||||||
// emitCopyNoRepeat writes a copy chunk and returns the number of bytes written.
|
// emitCopyNoRepeat writes a copy chunk and returns the number of bytes written.
|
||||||
//
|
//
|
||||||
// It assumes that:
|
// It assumes that:
|
||||||
|
//
|
||||||
// dst is long enough to hold the encoded bytes
|
// dst is long enough to hold the encoded bytes
|
||||||
// 1 <= offset && offset <= math.MaxUint32
|
// 1 <= offset && offset <= math.MaxUint32
|
||||||
// 4 <= length && length <= 1 << 24
|
// 4 <= length && length <= 1 << 24
|
||||||
@ -185,7 +201,18 @@ func emitCopyNoRepeat(dst []byte, offset int, length int) int
|
|||||||
// matchLen returns how many bytes match in a and b
|
// matchLen returns how many bytes match in a and b
|
||||||
//
|
//
|
||||||
// It assumes that:
|
// It assumes that:
|
||||||
|
//
|
||||||
// len(a) <= len(b)
|
// len(a) <= len(b)
|
||||||
//
|
//
|
||||||
//go:noescape
|
//go:noescape
|
||||||
func matchLen(a []byte, b []byte) int
|
func matchLen(a []byte, b []byte) int
|
||||||
|
|
||||||
|
// cvtLZ4Block converts an LZ4 block to S2
|
||||||
|
//
|
||||||
|
//go:noescape
|
||||||
|
func cvtLZ4BlockAsm(dst []byte, src []byte) (uncompressed int, dstUsed int)
|
||||||
|
|
||||||
|
// cvtLZ4Block converts an LZ4 block to S2
|
||||||
|
//
|
||||||
|
//go:noescape
|
||||||
|
func cvtLZ4BlockSnappyAsm(dst []byte, src []byte) (uncompressed int, dstUsed int)
|
||||||
|
18171
vendor/github.com/klauspost/compress/s2/encodeblock_amd64.s
generated
vendored
18171
vendor/github.com/klauspost/compress/s2/encodeblock_amd64.s
generated
vendored
File diff suppressed because it is too large
Load Diff
585
vendor/github.com/klauspost/compress/s2/lz4convert.go
generated
vendored
Normal file
585
vendor/github.com/klauspost/compress/s2/lz4convert.go
generated
vendored
Normal file
@ -0,0 +1,585 @@
|
|||||||
|
// Copyright (c) 2022 Klaus Post. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
package s2
|
||||||
|
|
||||||
|
import (
|
||||||
|
"encoding/binary"
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
)
|
||||||
|
|
||||||
|
// LZ4Converter provides conversion from LZ4 blocks as defined here:
|
||||||
|
// https://github.com/lz4/lz4/blob/dev/doc/lz4_Block_format.md
|
||||||
|
type LZ4Converter struct {
|
||||||
|
}
|
||||||
|
|
||||||
|
// ErrDstTooSmall is returned when provided destination is too small.
|
||||||
|
var ErrDstTooSmall = errors.New("s2: destination too small")
|
||||||
|
|
||||||
|
// ConvertBlock will convert an LZ4 block and append it as an S2
|
||||||
|
// block without block length to dst.
|
||||||
|
// The uncompressed size is returned as well.
|
||||||
|
// dst must have capacity to contain the entire compressed block.
|
||||||
|
func (l *LZ4Converter) ConvertBlock(dst, src []byte) ([]byte, int, error) {
|
||||||
|
if len(src) == 0 {
|
||||||
|
return dst, 0, nil
|
||||||
|
}
|
||||||
|
const debug = false
|
||||||
|
const inline = true
|
||||||
|
const lz4MinMatch = 4
|
||||||
|
|
||||||
|
s, d := 0, len(dst)
|
||||||
|
dst = dst[:cap(dst)]
|
||||||
|
if !debug && hasAmd64Asm {
|
||||||
|
res, sz := cvtLZ4BlockAsm(dst[d:], src)
|
||||||
|
if res < 0 {
|
||||||
|
const (
|
||||||
|
errCorrupt = -1
|
||||||
|
errDstTooSmall = -2
|
||||||
|
)
|
||||||
|
switch res {
|
||||||
|
case errCorrupt:
|
||||||
|
return nil, 0, ErrCorrupt
|
||||||
|
case errDstTooSmall:
|
||||||
|
return nil, 0, ErrDstTooSmall
|
||||||
|
default:
|
||||||
|
return nil, 0, fmt.Errorf("unexpected result: %d", res)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if d+sz > len(dst) {
|
||||||
|
return nil, 0, ErrDstTooSmall
|
||||||
|
}
|
||||||
|
return dst[:d+sz], res, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
dLimit := len(dst) - 10
|
||||||
|
var lastOffset uint16
|
||||||
|
var uncompressed int
|
||||||
|
if debug {
|
||||||
|
fmt.Printf("convert block start: len(src): %d, len(dst):%d \n", len(src), len(dst))
|
||||||
|
}
|
||||||
|
|
||||||
|
for {
|
||||||
|
if s >= len(src) {
|
||||||
|
return dst[:d], 0, ErrCorrupt
|
||||||
|
}
|
||||||
|
// Read literal info
|
||||||
|
token := src[s]
|
||||||
|
ll := int(token >> 4)
|
||||||
|
ml := int(lz4MinMatch + (token & 0xf))
|
||||||
|
|
||||||
|
// If upper nibble is 15, literal length is extended
|
||||||
|
if token >= 0xf0 {
|
||||||
|
for {
|
||||||
|
s++
|
||||||
|
if s >= len(src) {
|
||||||
|
if debug {
|
||||||
|
fmt.Printf("error reading ll: s (%d) >= len(src) (%d)\n", s, len(src))
|
||||||
|
}
|
||||||
|
return dst[:d], 0, ErrCorrupt
|
||||||
|
}
|
||||||
|
val := src[s]
|
||||||
|
ll += int(val)
|
||||||
|
if val != 255 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// Skip past token
|
||||||
|
if s+ll >= len(src) {
|
||||||
|
if debug {
|
||||||
|
fmt.Printf("error literals: s+ll (%d+%d) >= len(src) (%d)\n", s, ll, len(src))
|
||||||
|
}
|
||||||
|
return nil, 0, ErrCorrupt
|
||||||
|
}
|
||||||
|
s++
|
||||||
|
if ll > 0 {
|
||||||
|
if d+ll > dLimit {
|
||||||
|
return nil, 0, ErrDstTooSmall
|
||||||
|
}
|
||||||
|
if debug {
|
||||||
|
fmt.Printf("emit %d literals\n", ll)
|
||||||
|
}
|
||||||
|
d += emitLiteralGo(dst[d:], src[s:s+ll])
|
||||||
|
s += ll
|
||||||
|
uncompressed += ll
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check if we are done...
|
||||||
|
if s == len(src) && ml == lz4MinMatch {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
// 2 byte offset
|
||||||
|
if s >= len(src)-2 {
|
||||||
|
if debug {
|
||||||
|
fmt.Printf("s (%d) >= len(src)-2 (%d)", s, len(src)-2)
|
||||||
|
}
|
||||||
|
return nil, 0, ErrCorrupt
|
||||||
|
}
|
||||||
|
offset := binary.LittleEndian.Uint16(src[s:])
|
||||||
|
s += 2
|
||||||
|
if offset == 0 {
|
||||||
|
if debug {
|
||||||
|
fmt.Printf("error: offset 0, ml: %d, len(src)-s: %d\n", ml, len(src)-s)
|
||||||
|
}
|
||||||
|
return nil, 0, ErrCorrupt
|
||||||
|
}
|
||||||
|
if int(offset) > uncompressed {
|
||||||
|
if debug {
|
||||||
|
fmt.Printf("error: offset (%d)> uncompressed (%d)\n", offset, uncompressed)
|
||||||
|
}
|
||||||
|
return nil, 0, ErrCorrupt
|
||||||
|
}
|
||||||
|
|
||||||
|
if ml == lz4MinMatch+15 {
|
||||||
|
for {
|
||||||
|
if s >= len(src) {
|
||||||
|
if debug {
|
||||||
|
fmt.Printf("error reading ml: s (%d) >= len(src) (%d)\n", s, len(src))
|
||||||
|
}
|
||||||
|
return nil, 0, ErrCorrupt
|
||||||
|
}
|
||||||
|
val := src[s]
|
||||||
|
s++
|
||||||
|
ml += int(val)
|
||||||
|
if val != 255 {
|
||||||
|
if s >= len(src) {
|
||||||
|
if debug {
|
||||||
|
fmt.Printf("error reading ml: s (%d) >= len(src) (%d)\n", s, len(src))
|
||||||
|
}
|
||||||
|
return nil, 0, ErrCorrupt
|
||||||
|
}
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if offset == lastOffset {
|
||||||
|
if debug {
|
||||||
|
fmt.Printf("emit repeat, length: %d, offset: %d\n", ml, offset)
|
||||||
|
}
|
||||||
|
if !inline {
|
||||||
|
d += emitRepeat16(dst[d:], offset, ml)
|
||||||
|
} else {
|
||||||
|
length := ml
|
||||||
|
dst := dst[d:]
|
||||||
|
for len(dst) > 5 {
|
||||||
|
// Repeat offset, make length cheaper
|
||||||
|
length -= 4
|
||||||
|
if length <= 4 {
|
||||||
|
dst[0] = uint8(length)<<2 | tagCopy1
|
||||||
|
dst[1] = 0
|
||||||
|
d += 2
|
||||||
|
break
|
||||||
|
}
|
||||||
|
if length < 8 && offset < 2048 {
|
||||||
|
// Encode WITH offset
|
||||||
|
dst[1] = uint8(offset)
|
||||||
|
dst[0] = uint8(offset>>8)<<5 | uint8(length)<<2 | tagCopy1
|
||||||
|
d += 2
|
||||||
|
break
|
||||||
|
}
|
||||||
|
if length < (1<<8)+4 {
|
||||||
|
length -= 4
|
||||||
|
dst[2] = uint8(length)
|
||||||
|
dst[1] = 0
|
||||||
|
dst[0] = 5<<2 | tagCopy1
|
||||||
|
d += 3
|
||||||
|
break
|
||||||
|
}
|
||||||
|
if length < (1<<16)+(1<<8) {
|
||||||
|
length -= 1 << 8
|
||||||
|
dst[3] = uint8(length >> 8)
|
||||||
|
dst[2] = uint8(length >> 0)
|
||||||
|
dst[1] = 0
|
||||||
|
dst[0] = 6<<2 | tagCopy1
|
||||||
|
d += 4
|
||||||
|
break
|
||||||
|
}
|
||||||
|
const maxRepeat = (1 << 24) - 1
|
||||||
|
length -= 1 << 16
|
||||||
|
left := 0
|
||||||
|
if length > maxRepeat {
|
||||||
|
left = length - maxRepeat + 4
|
||||||
|
length = maxRepeat - 4
|
||||||
|
}
|
||||||
|
dst[4] = uint8(length >> 16)
|
||||||
|
dst[3] = uint8(length >> 8)
|
||||||
|
dst[2] = uint8(length >> 0)
|
||||||
|
dst[1] = 0
|
||||||
|
dst[0] = 7<<2 | tagCopy1
|
||||||
|
if left > 0 {
|
||||||
|
d += 5 + emitRepeat16(dst[5:], offset, left)
|
||||||
|
break
|
||||||
|
}
|
||||||
|
d += 5
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
if debug {
|
||||||
|
fmt.Printf("emit copy, length: %d, offset: %d\n", ml, offset)
|
||||||
|
}
|
||||||
|
if !inline {
|
||||||
|
d += emitCopy16(dst[d:], offset, ml)
|
||||||
|
} else {
|
||||||
|
length := ml
|
||||||
|
dst := dst[d:]
|
||||||
|
for len(dst) > 5 {
|
||||||
|
// Offset no more than 2 bytes.
|
||||||
|
if length > 64 {
|
||||||
|
off := 3
|
||||||
|
if offset < 2048 {
|
||||||
|
// emit 8 bytes as tagCopy1, rest as repeats.
|
||||||
|
dst[1] = uint8(offset)
|
||||||
|
dst[0] = uint8(offset>>8)<<5 | uint8(8-4)<<2 | tagCopy1
|
||||||
|
length -= 8
|
||||||
|
off = 2
|
||||||
|
} else {
|
||||||
|
// Emit a length 60 copy, encoded as 3 bytes.
|
||||||
|
// Emit remaining as repeat value (minimum 4 bytes).
|
||||||
|
dst[2] = uint8(offset >> 8)
|
||||||
|
dst[1] = uint8(offset)
|
||||||
|
dst[0] = 59<<2 | tagCopy2
|
||||||
|
length -= 60
|
||||||
|
}
|
||||||
|
// Emit remaining as repeats, at least 4 bytes remain.
|
||||||
|
d += off + emitRepeat16(dst[off:], offset, length)
|
||||||
|
break
|
||||||
|
}
|
||||||
|
if length >= 12 || offset >= 2048 {
|
||||||
|
// Emit the remaining copy, encoded as 3 bytes.
|
||||||
|
dst[2] = uint8(offset >> 8)
|
||||||
|
dst[1] = uint8(offset)
|
||||||
|
dst[0] = uint8(length-1)<<2 | tagCopy2
|
||||||
|
d += 3
|
||||||
|
break
|
||||||
|
}
|
||||||
|
// Emit the remaining copy, encoded as 2 bytes.
|
||||||
|
dst[1] = uint8(offset)
|
||||||
|
dst[0] = uint8(offset>>8)<<5 | uint8(length-4)<<2 | tagCopy1
|
||||||
|
d += 2
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
lastOffset = offset
|
||||||
|
}
|
||||||
|
uncompressed += ml
|
||||||
|
if d > dLimit {
|
||||||
|
return nil, 0, ErrDstTooSmall
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return dst[:d], uncompressed, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// ConvertBlockSnappy will convert an LZ4 block and append it
|
||||||
|
// as a Snappy block without block length to dst.
|
||||||
|
// The uncompressed size is returned as well.
|
||||||
|
// dst must have capacity to contain the entire compressed block.
|
||||||
|
func (l *LZ4Converter) ConvertBlockSnappy(dst, src []byte) ([]byte, int, error) {
|
||||||
|
if len(src) == 0 {
|
||||||
|
return dst, 0, nil
|
||||||
|
}
|
||||||
|
const debug = false
|
||||||
|
const lz4MinMatch = 4
|
||||||
|
|
||||||
|
s, d := 0, len(dst)
|
||||||
|
dst = dst[:cap(dst)]
|
||||||
|
// Use assembly when possible
|
||||||
|
if !debug && hasAmd64Asm {
|
||||||
|
res, sz := cvtLZ4BlockSnappyAsm(dst[d:], src)
|
||||||
|
if res < 0 {
|
||||||
|
const (
|
||||||
|
errCorrupt = -1
|
||||||
|
errDstTooSmall = -2
|
||||||
|
)
|
||||||
|
switch res {
|
||||||
|
case errCorrupt:
|
||||||
|
return nil, 0, ErrCorrupt
|
||||||
|
case errDstTooSmall:
|
||||||
|
return nil, 0, ErrDstTooSmall
|
||||||
|
default:
|
||||||
|
return nil, 0, fmt.Errorf("unexpected result: %d", res)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if d+sz > len(dst) {
|
||||||
|
return nil, 0, ErrDstTooSmall
|
||||||
|
}
|
||||||
|
return dst[:d+sz], res, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
dLimit := len(dst) - 10
|
||||||
|
var uncompressed int
|
||||||
|
if debug {
|
||||||
|
fmt.Printf("convert block start: len(src): %d, len(dst):%d \n", len(src), len(dst))
|
||||||
|
}
|
||||||
|
|
||||||
|
for {
|
||||||
|
if s >= len(src) {
|
||||||
|
return nil, 0, ErrCorrupt
|
||||||
|
}
|
||||||
|
// Read literal info
|
||||||
|
token := src[s]
|
||||||
|
ll := int(token >> 4)
|
||||||
|
ml := int(lz4MinMatch + (token & 0xf))
|
||||||
|
|
||||||
|
// If upper nibble is 15, literal length is extended
|
||||||
|
if token >= 0xf0 {
|
||||||
|
for {
|
||||||
|
s++
|
||||||
|
if s >= len(src) {
|
||||||
|
if debug {
|
||||||
|
fmt.Printf("error reading ll: s (%d) >= len(src) (%d)\n", s, len(src))
|
||||||
|
}
|
||||||
|
return nil, 0, ErrCorrupt
|
||||||
|
}
|
||||||
|
val := src[s]
|
||||||
|
ll += int(val)
|
||||||
|
if val != 255 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// Skip past token
|
||||||
|
if s+ll >= len(src) {
|
||||||
|
if debug {
|
||||||
|
fmt.Printf("error literals: s+ll (%d+%d) >= len(src) (%d)\n", s, ll, len(src))
|
||||||
|
}
|
||||||
|
return nil, 0, ErrCorrupt
|
||||||
|
}
|
||||||
|
s++
|
||||||
|
if ll > 0 {
|
||||||
|
if d+ll > dLimit {
|
||||||
|
return nil, 0, ErrDstTooSmall
|
||||||
|
}
|
||||||
|
if debug {
|
||||||
|
fmt.Printf("emit %d literals\n", ll)
|
||||||
|
}
|
||||||
|
d += emitLiteralGo(dst[d:], src[s:s+ll])
|
||||||
|
s += ll
|
||||||
|
uncompressed += ll
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check if we are done...
|
||||||
|
if s == len(src) && ml == lz4MinMatch {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
// 2 byte offset
|
||||||
|
if s >= len(src)-2 {
|
||||||
|
if debug {
|
||||||
|
fmt.Printf("s (%d) >= len(src)-2 (%d)", s, len(src)-2)
|
||||||
|
}
|
||||||
|
return nil, 0, ErrCorrupt
|
||||||
|
}
|
||||||
|
offset := binary.LittleEndian.Uint16(src[s:])
|
||||||
|
s += 2
|
||||||
|
if offset == 0 {
|
||||||
|
if debug {
|
||||||
|
fmt.Printf("error: offset 0, ml: %d, len(src)-s: %d\n", ml, len(src)-s)
|
||||||
|
}
|
||||||
|
return nil, 0, ErrCorrupt
|
||||||
|
}
|
||||||
|
if int(offset) > uncompressed {
|
||||||
|
if debug {
|
||||||
|
fmt.Printf("error: offset (%d)> uncompressed (%d)\n", offset, uncompressed)
|
||||||
|
}
|
||||||
|
return nil, 0, ErrCorrupt
|
||||||
|
}
|
||||||
|
|
||||||
|
if ml == lz4MinMatch+15 {
|
||||||
|
for {
|
||||||
|
if s >= len(src) {
|
||||||
|
if debug {
|
||||||
|
fmt.Printf("error reading ml: s (%d) >= len(src) (%d)\n", s, len(src))
|
||||||
|
}
|
||||||
|
return nil, 0, ErrCorrupt
|
||||||
|
}
|
||||||
|
val := src[s]
|
||||||
|
s++
|
||||||
|
ml += int(val)
|
||||||
|
if val != 255 {
|
||||||
|
if s >= len(src) {
|
||||||
|
if debug {
|
||||||
|
fmt.Printf("error reading ml: s (%d) >= len(src) (%d)\n", s, len(src))
|
||||||
|
}
|
||||||
|
return nil, 0, ErrCorrupt
|
||||||
|
}
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if debug {
|
||||||
|
fmt.Printf("emit copy, length: %d, offset: %d\n", ml, offset)
|
||||||
|
}
|
||||||
|
length := ml
|
||||||
|
// d += emitCopyNoRepeat(dst[d:], int(offset), ml)
|
||||||
|
for length > 0 {
|
||||||
|
if d >= dLimit {
|
||||||
|
return nil, 0, ErrDstTooSmall
|
||||||
|
}
|
||||||
|
|
||||||
|
// Offset no more than 2 bytes.
|
||||||
|
if length > 64 {
|
||||||
|
// Emit a length 64 copy, encoded as 3 bytes.
|
||||||
|
dst[d+2] = uint8(offset >> 8)
|
||||||
|
dst[d+1] = uint8(offset)
|
||||||
|
dst[d+0] = 63<<2 | tagCopy2
|
||||||
|
length -= 64
|
||||||
|
d += 3
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if length >= 12 || offset >= 2048 || length < 4 {
|
||||||
|
// Emit the remaining copy, encoded as 3 bytes.
|
||||||
|
dst[d+2] = uint8(offset >> 8)
|
||||||
|
dst[d+1] = uint8(offset)
|
||||||
|
dst[d+0] = uint8(length-1)<<2 | tagCopy2
|
||||||
|
d += 3
|
||||||
|
break
|
||||||
|
}
|
||||||
|
// Emit the remaining copy, encoded as 2 bytes.
|
||||||
|
dst[d+1] = uint8(offset)
|
||||||
|
dst[d+0] = uint8(offset>>8)<<5 | uint8(length-4)<<2 | tagCopy1
|
||||||
|
d += 2
|
||||||
|
break
|
||||||
|
}
|
||||||
|
uncompressed += ml
|
||||||
|
if d > dLimit {
|
||||||
|
return nil, 0, ErrDstTooSmall
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return dst[:d], uncompressed, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// emitRepeat writes a repeat chunk and returns the number of bytes written.
|
||||||
|
// Length must be at least 4 and < 1<<24
|
||||||
|
func emitRepeat16(dst []byte, offset uint16, length int) int {
|
||||||
|
// Repeat offset, make length cheaper
|
||||||
|
length -= 4
|
||||||
|
if length <= 4 {
|
||||||
|
dst[0] = uint8(length)<<2 | tagCopy1
|
||||||
|
dst[1] = 0
|
||||||
|
return 2
|
||||||
|
}
|
||||||
|
if length < 8 && offset < 2048 {
|
||||||
|
// Encode WITH offset
|
||||||
|
dst[1] = uint8(offset)
|
||||||
|
dst[0] = uint8(offset>>8)<<5 | uint8(length)<<2 | tagCopy1
|
||||||
|
return 2
|
||||||
|
}
|
||||||
|
if length < (1<<8)+4 {
|
||||||
|
length -= 4
|
||||||
|
dst[2] = uint8(length)
|
||||||
|
dst[1] = 0
|
||||||
|
dst[0] = 5<<2 | tagCopy1
|
||||||
|
return 3
|
||||||
|
}
|
||||||
|
if length < (1<<16)+(1<<8) {
|
||||||
|
length -= 1 << 8
|
||||||
|
dst[3] = uint8(length >> 8)
|
||||||
|
dst[2] = uint8(length >> 0)
|
||||||
|
dst[1] = 0
|
||||||
|
dst[0] = 6<<2 | tagCopy1
|
||||||
|
return 4
|
||||||
|
}
|
||||||
|
const maxRepeat = (1 << 24) - 1
|
||||||
|
length -= 1 << 16
|
||||||
|
left := 0
|
||||||
|
if length > maxRepeat {
|
||||||
|
left = length - maxRepeat + 4
|
||||||
|
length = maxRepeat - 4
|
||||||
|
}
|
||||||
|
dst[4] = uint8(length >> 16)
|
||||||
|
dst[3] = uint8(length >> 8)
|
||||||
|
dst[2] = uint8(length >> 0)
|
||||||
|
dst[1] = 0
|
||||||
|
dst[0] = 7<<2 | tagCopy1
|
||||||
|
if left > 0 {
|
||||||
|
return 5 + emitRepeat16(dst[5:], offset, left)
|
||||||
|
}
|
||||||
|
return 5
|
||||||
|
}
|
||||||
|
|
||||||
|
// emitCopy writes a copy chunk and returns the number of bytes written.
|
||||||
|
//
|
||||||
|
// It assumes that:
|
||||||
|
//
|
||||||
|
// dst is long enough to hold the encoded bytes
|
||||||
|
// 1 <= offset && offset <= math.MaxUint16
|
||||||
|
// 4 <= length && length <= math.MaxUint32
|
||||||
|
func emitCopy16(dst []byte, offset uint16, length int) int {
|
||||||
|
// Offset no more than 2 bytes.
|
||||||
|
if length > 64 {
|
||||||
|
off := 3
|
||||||
|
if offset < 2048 {
|
||||||
|
// emit 8 bytes as tagCopy1, rest as repeats.
|
||||||
|
dst[1] = uint8(offset)
|
||||||
|
dst[0] = uint8(offset>>8)<<5 | uint8(8-4)<<2 | tagCopy1
|
||||||
|
length -= 8
|
||||||
|
off = 2
|
||||||
|
} else {
|
||||||
|
// Emit a length 60 copy, encoded as 3 bytes.
|
||||||
|
// Emit remaining as repeat value (minimum 4 bytes).
|
||||||
|
dst[2] = uint8(offset >> 8)
|
||||||
|
dst[1] = uint8(offset)
|
||||||
|
dst[0] = 59<<2 | tagCopy2
|
||||||
|
length -= 60
|
||||||
|
}
|
||||||
|
// Emit remaining as repeats, at least 4 bytes remain.
|
||||||
|
return off + emitRepeat16(dst[off:], offset, length)
|
||||||
|
}
|
||||||
|
if length >= 12 || offset >= 2048 {
|
||||||
|
// Emit the remaining copy, encoded as 3 bytes.
|
||||||
|
dst[2] = uint8(offset >> 8)
|
||||||
|
dst[1] = uint8(offset)
|
||||||
|
dst[0] = uint8(length-1)<<2 | tagCopy2
|
||||||
|
return 3
|
||||||
|
}
|
||||||
|
// Emit the remaining copy, encoded as 2 bytes.
|
||||||
|
dst[1] = uint8(offset)
|
||||||
|
dst[0] = uint8(offset>>8)<<5 | uint8(length-4)<<2 | tagCopy1
|
||||||
|
return 2
|
||||||
|
}
|
||||||
|
|
||||||
|
// emitLiteral writes a literal chunk and returns the number of bytes written.
|
||||||
|
//
|
||||||
|
// It assumes that:
|
||||||
|
//
|
||||||
|
// dst is long enough to hold the encoded bytes
|
||||||
|
// 0 <= len(lit) && len(lit) <= math.MaxUint32
|
||||||
|
func emitLiteralGo(dst, lit []byte) int {
|
||||||
|
if len(lit) == 0 {
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
i, n := 0, uint(len(lit)-1)
|
||||||
|
switch {
|
||||||
|
case n < 60:
|
||||||
|
dst[0] = uint8(n)<<2 | tagLiteral
|
||||||
|
i = 1
|
||||||
|
case n < 1<<8:
|
||||||
|
dst[1] = uint8(n)
|
||||||
|
dst[0] = 60<<2 | tagLiteral
|
||||||
|
i = 2
|
||||||
|
case n < 1<<16:
|
||||||
|
dst[2] = uint8(n >> 8)
|
||||||
|
dst[1] = uint8(n)
|
||||||
|
dst[0] = 61<<2 | tagLiteral
|
||||||
|
i = 3
|
||||||
|
case n < 1<<24:
|
||||||
|
dst[3] = uint8(n >> 16)
|
||||||
|
dst[2] = uint8(n >> 8)
|
||||||
|
dst[1] = uint8(n)
|
||||||
|
dst[0] = 62<<2 | tagLiteral
|
||||||
|
i = 4
|
||||||
|
default:
|
||||||
|
dst[4] = uint8(n >> 24)
|
||||||
|
dst[3] = uint8(n >> 16)
|
||||||
|
dst[2] = uint8(n >> 8)
|
||||||
|
dst[1] = uint8(n)
|
||||||
|
dst[0] = 63<<2 | tagLiteral
|
||||||
|
i = 5
|
||||||
|
}
|
||||||
|
return i + copy(dst[i:], lit)
|
||||||
|
}
|
2
vendor/github.com/klauspost/compress/zstd/README.md
generated
vendored
2
vendor/github.com/klauspost/compress/zstd/README.md
generated
vendored
@ -12,6 +12,8 @@ The `zstd` package is provided as open source software using a Go standard licen
|
|||||||
|
|
||||||
Currently the package is heavily optimized for 64 bit processors and will be significantly slower on 32 bit processors.
|
Currently the package is heavily optimized for 64 bit processors and will be significantly slower on 32 bit processors.
|
||||||
|
|
||||||
|
For seekable zstd streams, see [this excellent package](https://github.com/SaveTheRbtz/zstd-seekable-format-go).
|
||||||
|
|
||||||
## Installation
|
## Installation
|
||||||
|
|
||||||
Install using `go get -u github.com/klauspost/compress`. The package is located in `github.com/klauspost/compress/zstd`.
|
Install using `go get -u github.com/klauspost/compress`. The package is located in `github.com/klauspost/compress/zstd`.
|
||||||
|
19
vendor/github.com/klauspost/compress/zstd/blockdec.go
generated
vendored
19
vendor/github.com/klauspost/compress/zstd/blockdec.go
generated
vendored
@ -10,7 +10,6 @@ import (
|
|||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"io/ioutil"
|
|
||||||
"os"
|
"os"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
"sync"
|
"sync"
|
||||||
@ -83,8 +82,9 @@ type blockDec struct {
|
|||||||
|
|
||||||
err error
|
err error
|
||||||
|
|
||||||
// Check against this crc
|
// Check against this crc, if hasCRC is true.
|
||||||
checkCRC []byte
|
checkCRC uint32
|
||||||
|
hasCRC bool
|
||||||
|
|
||||||
// Frame to use for singlethreaded decoding.
|
// Frame to use for singlethreaded decoding.
|
||||||
// Should not be used by the decoder itself since parent may be another frame.
|
// Should not be used by the decoder itself since parent may be another frame.
|
||||||
@ -192,16 +192,14 @@ func (b *blockDec) reset(br byteBuffer, windowSize uint64) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Read block data.
|
// Read block data.
|
||||||
if cap(b.dataStorage) < cSize {
|
if _, ok := br.(*byteBuf); !ok && cap(b.dataStorage) < cSize {
|
||||||
|
// byteBuf doesn't need a destination buffer.
|
||||||
if b.lowMem || cSize > maxCompressedBlockSize {
|
if b.lowMem || cSize > maxCompressedBlockSize {
|
||||||
b.dataStorage = make([]byte, 0, cSize+compressedBlockOverAlloc)
|
b.dataStorage = make([]byte, 0, cSize+compressedBlockOverAlloc)
|
||||||
} else {
|
} else {
|
||||||
b.dataStorage = make([]byte, 0, maxCompressedBlockSizeAlloc)
|
b.dataStorage = make([]byte, 0, maxCompressedBlockSizeAlloc)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if cap(b.dst) <= maxSize {
|
|
||||||
b.dst = make([]byte, 0, maxSize+1)
|
|
||||||
}
|
|
||||||
b.data, err = br.readBig(cSize, b.dataStorage)
|
b.data, err = br.readBig(cSize, b.dataStorage)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if debugDecoder {
|
if debugDecoder {
|
||||||
@ -210,6 +208,9 @@ func (b *blockDec) reset(br byteBuffer, windowSize uint64) error {
|
|||||||
}
|
}
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
if cap(b.dst) <= maxSize {
|
||||||
|
b.dst = make([]byte, 0, maxSize+1)
|
||||||
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -233,7 +234,7 @@ func (b *blockDec) decodeBuf(hist *history) error {
|
|||||||
if b.lowMem {
|
if b.lowMem {
|
||||||
b.dst = make([]byte, b.RLESize)
|
b.dst = make([]byte, b.RLESize)
|
||||||
} else {
|
} else {
|
||||||
b.dst = make([]byte, maxBlockSize)
|
b.dst = make([]byte, maxCompressedBlockSize)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
b.dst = b.dst[:b.RLESize]
|
b.dst = b.dst[:b.RLESize]
|
||||||
@ -651,7 +652,7 @@ func (b *blockDec) prepareSequences(in []byte, hist *history) (err error) {
|
|||||||
fatalErr(binary.Write(&buf, binary.LittleEndian, hist.decoders.matchLengths.fse))
|
fatalErr(binary.Write(&buf, binary.LittleEndian, hist.decoders.matchLengths.fse))
|
||||||
fatalErr(binary.Write(&buf, binary.LittleEndian, hist.decoders.offsets.fse))
|
fatalErr(binary.Write(&buf, binary.LittleEndian, hist.decoders.offsets.fse))
|
||||||
buf.Write(in)
|
buf.Write(in)
|
||||||
ioutil.WriteFile(filepath.Join("testdata", "seqs", fn), buf.Bytes(), os.ModePerm)
|
os.WriteFile(filepath.Join("testdata", "seqs", fn), buf.Bytes(), os.ModePerm)
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
|
16
vendor/github.com/klauspost/compress/zstd/bytebuf.go
generated
vendored
16
vendor/github.com/klauspost/compress/zstd/bytebuf.go
generated
vendored
@ -7,7 +7,6 @@ package zstd
|
|||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"io/ioutil"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
type byteBuffer interface {
|
type byteBuffer interface {
|
||||||
@ -23,7 +22,7 @@ type byteBuffer interface {
|
|||||||
readByte() (byte, error)
|
readByte() (byte, error)
|
||||||
|
|
||||||
// Skip n bytes.
|
// Skip n bytes.
|
||||||
skipN(n int) error
|
skipN(n int64) error
|
||||||
}
|
}
|
||||||
|
|
||||||
// in-memory buffer
|
// in-memory buffer
|
||||||
@ -62,9 +61,12 @@ func (b *byteBuf) readByte() (byte, error) {
|
|||||||
return r, nil
|
return r, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (b *byteBuf) skipN(n int) error {
|
func (b *byteBuf) skipN(n int64) error {
|
||||||
bb := *b
|
bb := *b
|
||||||
if len(bb) < n {
|
if n < 0 {
|
||||||
|
return fmt.Errorf("negative skip (%d) requested", n)
|
||||||
|
}
|
||||||
|
if int64(len(bb)) < n {
|
||||||
return io.ErrUnexpectedEOF
|
return io.ErrUnexpectedEOF
|
||||||
}
|
}
|
||||||
*b = bb[n:]
|
*b = bb[n:]
|
||||||
@ -120,9 +122,9 @@ func (r *readerWrapper) readByte() (byte, error) {
|
|||||||
return r.tmp[0], nil
|
return r.tmp[0], nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (r *readerWrapper) skipN(n int) error {
|
func (r *readerWrapper) skipN(n int64) error {
|
||||||
n2, err := io.CopyN(ioutil.Discard, r.r, int64(n))
|
n2, err := io.CopyN(io.Discard, r.r, n)
|
||||||
if n2 != int64(n) {
|
if n2 != n {
|
||||||
err = io.ErrUnexpectedEOF
|
err = io.ErrUnexpectedEOF
|
||||||
}
|
}
|
||||||
return err
|
return err
|
||||||
|
9
vendor/github.com/klauspost/compress/zstd/decodeheader.go
generated
vendored
9
vendor/github.com/klauspost/compress/zstd/decodeheader.go
generated
vendored
@ -4,7 +4,6 @@
|
|||||||
package zstd
|
package zstd
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"bytes"
|
|
||||||
"encoding/binary"
|
"encoding/binary"
|
||||||
"errors"
|
"errors"
|
||||||
"io"
|
"io"
|
||||||
@ -102,8 +101,8 @@ func (h *Header) Decode(in []byte) error {
|
|||||||
}
|
}
|
||||||
h.HeaderSize += 4
|
h.HeaderSize += 4
|
||||||
b, in := in[:4], in[4:]
|
b, in := in[:4], in[4:]
|
||||||
if !bytes.Equal(b, frameMagic) {
|
if string(b) != frameMagic {
|
||||||
if !bytes.Equal(b[1:4], skippableFrameMagic) || b[0]&0xf0 != 0x50 {
|
if string(b[1:4]) != skippableFrameMagic || b[0]&0xf0 != 0x50 {
|
||||||
return ErrMagicMismatch
|
return ErrMagicMismatch
|
||||||
}
|
}
|
||||||
if len(in) < 4 {
|
if len(in) < 4 {
|
||||||
@ -153,7 +152,7 @@ func (h *Header) Decode(in []byte) error {
|
|||||||
}
|
}
|
||||||
b, in = in[:size], in[size:]
|
b, in = in[:size], in[size:]
|
||||||
h.HeaderSize += int(size)
|
h.HeaderSize += int(size)
|
||||||
switch size {
|
switch len(b) {
|
||||||
case 1:
|
case 1:
|
||||||
h.DictionaryID = uint32(b[0])
|
h.DictionaryID = uint32(b[0])
|
||||||
case 2:
|
case 2:
|
||||||
@ -183,7 +182,7 @@ func (h *Header) Decode(in []byte) error {
|
|||||||
}
|
}
|
||||||
b, in = in[:fcsSize], in[fcsSize:]
|
b, in = in[:fcsSize], in[fcsSize:]
|
||||||
h.HeaderSize += int(fcsSize)
|
h.HeaderSize += int(fcsSize)
|
||||||
switch fcsSize {
|
switch len(b) {
|
||||||
case 1:
|
case 1:
|
||||||
h.FrameContentSize = uint64(b[0])
|
h.FrameContentSize = uint64(b[0])
|
||||||
case 2:
|
case 2:
|
||||||
|
138
vendor/github.com/klauspost/compress/zstd/decoder.go
generated
vendored
138
vendor/github.com/klauspost/compress/zstd/decoder.go
generated
vendored
@ -5,7 +5,6 @@
|
|||||||
package zstd
|
package zstd
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"bytes"
|
|
||||||
"context"
|
"context"
|
||||||
"encoding/binary"
|
"encoding/binary"
|
||||||
"io"
|
"io"
|
||||||
@ -35,13 +34,13 @@ type Decoder struct {
|
|||||||
br readerWrapper
|
br readerWrapper
|
||||||
enabled bool
|
enabled bool
|
||||||
inFrame bool
|
inFrame bool
|
||||||
|
dstBuf []byte
|
||||||
}
|
}
|
||||||
|
|
||||||
frame *frameDec
|
frame *frameDec
|
||||||
|
|
||||||
// Custom dictionaries.
|
// Custom dictionaries.
|
||||||
// Always uses copies.
|
dicts map[uint32]*dict
|
||||||
dicts map[uint32]dict
|
|
||||||
|
|
||||||
// streamWg is the waitgroup for all streams
|
// streamWg is the waitgroup for all streams
|
||||||
streamWg sync.WaitGroup
|
streamWg sync.WaitGroup
|
||||||
@ -103,7 +102,7 @@ func NewReader(r io.Reader, opts ...DOption) (*Decoder, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Transfer option dicts.
|
// Transfer option dicts.
|
||||||
d.dicts = make(map[uint32]dict, len(d.o.dicts))
|
d.dicts = make(map[uint32]*dict, len(d.o.dicts))
|
||||||
for _, dc := range d.o.dicts {
|
for _, dc := range d.o.dicts {
|
||||||
d.dicts[dc.id] = dc
|
d.dicts[dc.id] = dc
|
||||||
}
|
}
|
||||||
@ -187,21 +186,23 @@ func (d *Decoder) Reset(r io.Reader) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// If bytes buffer and < 5MB, do sync decoding anyway.
|
// If bytes buffer and < 5MB, do sync decoding anyway.
|
||||||
if bb, ok := r.(byter); ok && bb.Len() < 5<<20 {
|
if bb, ok := r.(byter); ok && bb.Len() < d.o.decodeBufsBelow && !d.o.limitToCap {
|
||||||
bb2 := bb
|
bb2 := bb
|
||||||
if debugDecoder {
|
if debugDecoder {
|
||||||
println("*bytes.Buffer detected, doing sync decode, len:", bb.Len())
|
println("*bytes.Buffer detected, doing sync decode, len:", bb.Len())
|
||||||
}
|
}
|
||||||
b := bb2.Bytes()
|
b := bb2.Bytes()
|
||||||
var dst []byte
|
var dst []byte
|
||||||
if cap(d.current.b) > 0 {
|
if cap(d.syncStream.dstBuf) > 0 {
|
||||||
dst = d.current.b
|
dst = d.syncStream.dstBuf[:0]
|
||||||
}
|
}
|
||||||
|
|
||||||
dst, err := d.DecodeAll(b, dst[:0])
|
dst, err := d.DecodeAll(b, dst)
|
||||||
if err == nil {
|
if err == nil {
|
||||||
err = io.EOF
|
err = io.EOF
|
||||||
}
|
}
|
||||||
|
// Save output buffer
|
||||||
|
d.syncStream.dstBuf = dst
|
||||||
d.current.b = dst
|
d.current.b = dst
|
||||||
d.current.err = err
|
d.current.err = err
|
||||||
d.current.flushed = true
|
d.current.flushed = true
|
||||||
@ -216,6 +217,7 @@ func (d *Decoder) Reset(r io.Reader) error {
|
|||||||
d.current.err = nil
|
d.current.err = nil
|
||||||
d.current.flushed = false
|
d.current.flushed = false
|
||||||
d.current.d = nil
|
d.current.d = nil
|
||||||
|
d.syncStream.dstBuf = nil
|
||||||
|
|
||||||
// Ensure no-one else is still running...
|
// Ensure no-one else is still running...
|
||||||
d.streamWg.Wait()
|
d.streamWg.Wait()
|
||||||
@ -312,6 +314,7 @@ func (d *Decoder) DecodeAll(input, dst []byte) ([]byte, error) {
|
|||||||
// Grab a block decoder and frame decoder.
|
// Grab a block decoder and frame decoder.
|
||||||
block := <-d.decoders
|
block := <-d.decoders
|
||||||
frame := block.localFrame
|
frame := block.localFrame
|
||||||
|
initialSize := len(dst)
|
||||||
defer func() {
|
defer func() {
|
||||||
if debugDecoder {
|
if debugDecoder {
|
||||||
printf("re-adding decoder: %p", block)
|
printf("re-adding decoder: %p", block)
|
||||||
@ -337,21 +340,26 @@ func (d *Decoder) DecodeAll(input, dst []byte) ([]byte, error) {
|
|||||||
}
|
}
|
||||||
return dst, err
|
return dst, err
|
||||||
}
|
}
|
||||||
if frame.DictionaryID != nil {
|
if err = d.setDict(frame); err != nil {
|
||||||
dict, ok := d.dicts[*frame.DictionaryID]
|
return nil, err
|
||||||
if !ok {
|
|
||||||
return nil, ErrUnknownDictionary
|
|
||||||
}
|
|
||||||
if debugDecoder {
|
|
||||||
println("setting dict", frame.DictionaryID)
|
|
||||||
}
|
|
||||||
frame.history.setDict(&dict)
|
|
||||||
}
|
}
|
||||||
if frame.WindowSize > d.o.maxWindowSize {
|
if frame.WindowSize > d.o.maxWindowSize {
|
||||||
|
if debugDecoder {
|
||||||
|
println("window size exceeded:", frame.WindowSize, ">", d.o.maxWindowSize)
|
||||||
|
}
|
||||||
return dst, ErrWindowSizeExceeded
|
return dst, ErrWindowSizeExceeded
|
||||||
}
|
}
|
||||||
if frame.FrameContentSize != fcsUnknown {
|
if frame.FrameContentSize != fcsUnknown {
|
||||||
if frame.FrameContentSize > d.o.maxDecodedSize-uint64(len(dst)) {
|
if frame.FrameContentSize > d.o.maxDecodedSize-uint64(len(dst)-initialSize) {
|
||||||
|
if debugDecoder {
|
||||||
|
println("decoder size exceeded; fcs:", frame.FrameContentSize, "> mcs:", d.o.maxDecodedSize-uint64(len(dst)-initialSize), "len:", len(dst))
|
||||||
|
}
|
||||||
|
return dst, ErrDecoderSizeExceeded
|
||||||
|
}
|
||||||
|
if d.o.limitToCap && frame.FrameContentSize > uint64(cap(dst)-len(dst)) {
|
||||||
|
if debugDecoder {
|
||||||
|
println("decoder size exceeded; fcs:", frame.FrameContentSize, "> (cap-len)", cap(dst)-len(dst))
|
||||||
|
}
|
||||||
return dst, ErrDecoderSizeExceeded
|
return dst, ErrDecoderSizeExceeded
|
||||||
}
|
}
|
||||||
if cap(dst)-len(dst) < int(frame.FrameContentSize) {
|
if cap(dst)-len(dst) < int(frame.FrameContentSize) {
|
||||||
@ -361,7 +369,7 @@ func (d *Decoder) DecodeAll(input, dst []byte) ([]byte, error) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if cap(dst) == 0 {
|
if cap(dst) == 0 && !d.o.limitToCap {
|
||||||
// Allocate len(input) * 2 by default if nothing is provided
|
// Allocate len(input) * 2 by default if nothing is provided
|
||||||
// and we didn't get frame content size.
|
// and we didn't get frame content size.
|
||||||
size := len(input) * 2
|
size := len(input) * 2
|
||||||
@ -379,6 +387,9 @@ func (d *Decoder) DecodeAll(input, dst []byte) ([]byte, error) {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return dst, err
|
return dst, err
|
||||||
}
|
}
|
||||||
|
if uint64(len(dst)-initialSize) > d.o.maxDecodedSize {
|
||||||
|
return dst, ErrDecoderSizeExceeded
|
||||||
|
}
|
||||||
if len(frame.bBuf) == 0 {
|
if len(frame.bBuf) == 0 {
|
||||||
if debugDecoder {
|
if debugDecoder {
|
||||||
println("frame dbuf empty")
|
println("frame dbuf empty")
|
||||||
@ -439,7 +450,11 @@ func (d *Decoder) nextBlock(blocking bool) (ok bool) {
|
|||||||
println("got", len(d.current.b), "bytes, error:", d.current.err, "data crc:", tmp)
|
println("got", len(d.current.b), "bytes, error:", d.current.err, "data crc:", tmp)
|
||||||
}
|
}
|
||||||
|
|
||||||
if !d.o.ignoreChecksum && len(next.b) > 0 {
|
if d.o.ignoreChecksum {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(next.b) > 0 {
|
||||||
n, err := d.current.crc.Write(next.b)
|
n, err := d.current.crc.Write(next.b)
|
||||||
if err == nil {
|
if err == nil {
|
||||||
if n != len(next.b) {
|
if n != len(next.b) {
|
||||||
@ -447,18 +462,16 @@ func (d *Decoder) nextBlock(blocking bool) (ok bool) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if next.err == nil && next.d != nil && len(next.d.checkCRC) != 0 {
|
if next.err == nil && next.d != nil && next.d.hasCRC {
|
||||||
got := d.current.crc.Sum64()
|
got := uint32(d.current.crc.Sum64())
|
||||||
var tmp [4]byte
|
if got != next.d.checkCRC {
|
||||||
binary.LittleEndian.PutUint32(tmp[:], uint32(got))
|
|
||||||
if !d.o.ignoreChecksum && !bytes.Equal(tmp[:], next.d.checkCRC) {
|
|
||||||
if debugDecoder {
|
if debugDecoder {
|
||||||
println("CRC Check Failed:", tmp[:], " (got) !=", next.d.checkCRC, "(on stream)")
|
printf("CRC Check Failed: %08x (got) != %08x (on stream)\n", got, next.d.checkCRC)
|
||||||
}
|
}
|
||||||
d.current.err = ErrCRCMismatch
|
d.current.err = ErrCRCMismatch
|
||||||
} else {
|
} else {
|
||||||
if debugDecoder {
|
if debugDecoder {
|
||||||
println("CRC ok", tmp[:])
|
printf("CRC ok %08x\n", got)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -474,18 +487,12 @@ func (d *Decoder) nextBlockSync() (ok bool) {
|
|||||||
if !d.syncStream.inFrame {
|
if !d.syncStream.inFrame {
|
||||||
d.frame.history.reset()
|
d.frame.history.reset()
|
||||||
d.current.err = d.frame.reset(&d.syncStream.br)
|
d.current.err = d.frame.reset(&d.syncStream.br)
|
||||||
|
if d.current.err == nil {
|
||||||
|
d.current.err = d.setDict(d.frame)
|
||||||
|
}
|
||||||
if d.current.err != nil {
|
if d.current.err != nil {
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
if d.frame.DictionaryID != nil {
|
|
||||||
dict, ok := d.dicts[*d.frame.DictionaryID]
|
|
||||||
if !ok {
|
|
||||||
d.current.err = ErrUnknownDictionary
|
|
||||||
return false
|
|
||||||
} else {
|
|
||||||
d.frame.history.setDict(&dict)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if d.frame.WindowSize > d.o.maxDecodedSize || d.frame.WindowSize > d.o.maxWindowSize {
|
if d.frame.WindowSize > d.o.maxDecodedSize || d.frame.WindowSize > d.o.maxWindowSize {
|
||||||
d.current.err = ErrDecoderSizeExceeded
|
d.current.err = ErrDecoderSizeExceeded
|
||||||
return false
|
return false
|
||||||
@ -664,6 +671,7 @@ func (d *Decoder) startStreamDecoder(ctx context.Context, r io.Reader, output ch
|
|||||||
if debugDecoder {
|
if debugDecoder {
|
||||||
println("Async 1: new history, recent:", block.async.newHist.recentOffsets)
|
println("Async 1: new history, recent:", block.async.newHist.recentOffsets)
|
||||||
}
|
}
|
||||||
|
hist.reset()
|
||||||
hist.decoders = block.async.newHist.decoders
|
hist.decoders = block.async.newHist.decoders
|
||||||
hist.recentOffsets = block.async.newHist.recentOffsets
|
hist.recentOffsets = block.async.newHist.recentOffsets
|
||||||
hist.windowSize = block.async.newHist.windowSize
|
hist.windowSize = block.async.newHist.windowSize
|
||||||
@ -695,6 +703,7 @@ func (d *Decoder) startStreamDecoder(ctx context.Context, r io.Reader, output ch
|
|||||||
seqExecute <- block
|
seqExecute <- block
|
||||||
}
|
}
|
||||||
close(seqExecute)
|
close(seqExecute)
|
||||||
|
hist.reset()
|
||||||
}()
|
}()
|
||||||
|
|
||||||
var wg sync.WaitGroup
|
var wg sync.WaitGroup
|
||||||
@ -718,6 +727,7 @@ func (d *Decoder) startStreamDecoder(ctx context.Context, r io.Reader, output ch
|
|||||||
if debugDecoder {
|
if debugDecoder {
|
||||||
println("Async 2: new history")
|
println("Async 2: new history")
|
||||||
}
|
}
|
||||||
|
hist.reset()
|
||||||
hist.windowSize = block.async.newHist.windowSize
|
hist.windowSize = block.async.newHist.windowSize
|
||||||
hist.allocFrameBuffer = block.async.newHist.allocFrameBuffer
|
hist.allocFrameBuffer = block.async.newHist.allocFrameBuffer
|
||||||
if block.async.newHist.dict != nil {
|
if block.async.newHist.dict != nil {
|
||||||
@ -747,7 +757,7 @@ func (d *Decoder) startStreamDecoder(ctx context.Context, r io.Reader, output ch
|
|||||||
if block.lowMem {
|
if block.lowMem {
|
||||||
block.dst = make([]byte, block.RLESize)
|
block.dst = make([]byte, block.RLESize)
|
||||||
} else {
|
} else {
|
||||||
block.dst = make([]byte, maxBlockSize)
|
block.dst = make([]byte, maxCompressedBlockSize)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
block.dst = block.dst[:block.RLESize]
|
block.dst = block.dst[:block.RLESize]
|
||||||
@ -799,13 +809,14 @@ func (d *Decoder) startStreamDecoder(ctx context.Context, r io.Reader, output ch
|
|||||||
if debugDecoder {
|
if debugDecoder {
|
||||||
println("decoder goroutines finished")
|
println("decoder goroutines finished")
|
||||||
}
|
}
|
||||||
|
hist.reset()
|
||||||
}()
|
}()
|
||||||
|
|
||||||
|
var hist history
|
||||||
decodeStream:
|
decodeStream:
|
||||||
for {
|
for {
|
||||||
var hist history
|
|
||||||
var hasErr bool
|
var hasErr bool
|
||||||
|
hist.reset()
|
||||||
decodeBlock := func(block *blockDec) {
|
decodeBlock := func(block *blockDec) {
|
||||||
if hasErr {
|
if hasErr {
|
||||||
if block != nil {
|
if block != nil {
|
||||||
@ -840,15 +851,14 @@ decodeStream:
|
|||||||
if debugDecoder && err != nil {
|
if debugDecoder && err != nil {
|
||||||
println("Frame decoder returned", err)
|
println("Frame decoder returned", err)
|
||||||
}
|
}
|
||||||
if err == nil && frame.DictionaryID != nil {
|
if err == nil {
|
||||||
dict, ok := d.dicts[*frame.DictionaryID]
|
err = d.setDict(frame)
|
||||||
if !ok {
|
|
||||||
err = ErrUnknownDictionary
|
|
||||||
} else {
|
|
||||||
frame.history.setDict(&dict)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
if err == nil && d.frame.WindowSize > d.o.maxWindowSize {
|
if err == nil && d.frame.WindowSize > d.o.maxWindowSize {
|
||||||
|
if debugDecoder {
|
||||||
|
println("decoder size exceeded, fws:", d.frame.WindowSize, "> mws:", d.o.maxWindowSize)
|
||||||
|
}
|
||||||
|
|
||||||
err = ErrDecoderSizeExceeded
|
err = ErrDecoderSizeExceeded
|
||||||
}
|
}
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -890,18 +900,22 @@ decodeStream:
|
|||||||
println("next block returned error:", err)
|
println("next block returned error:", err)
|
||||||
}
|
}
|
||||||
dec.err = err
|
dec.err = err
|
||||||
dec.checkCRC = nil
|
dec.hasCRC = false
|
||||||
if dec.Last && frame.HasCheckSum && err == nil {
|
if dec.Last && frame.HasCheckSum && err == nil {
|
||||||
crc, err := frame.rawInput.readSmall(4)
|
crc, err := frame.rawInput.readSmall(4)
|
||||||
if err != nil {
|
if len(crc) < 4 {
|
||||||
|
if err == nil {
|
||||||
|
err = io.ErrUnexpectedEOF
|
||||||
|
|
||||||
|
}
|
||||||
println("CRC missing?", err)
|
println("CRC missing?", err)
|
||||||
dec.err = err
|
dec.err = err
|
||||||
}
|
} else {
|
||||||
var tmp [4]byte
|
dec.checkCRC = binary.LittleEndian.Uint32(crc)
|
||||||
copy(tmp[:], crc)
|
dec.hasCRC = true
|
||||||
dec.checkCRC = tmp[:]
|
|
||||||
if debugDecoder {
|
if debugDecoder {
|
||||||
println("found crc to check:", dec.checkCRC)
|
printf("found crc to check: %08x\n", dec.checkCRC)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
err = dec.err
|
err = dec.err
|
||||||
@ -917,5 +931,23 @@ decodeStream:
|
|||||||
}
|
}
|
||||||
close(seqDecode)
|
close(seqDecode)
|
||||||
wg.Wait()
|
wg.Wait()
|
||||||
|
hist.reset()
|
||||||
d.frame.history.b = frameHistCache
|
d.frame.history.b = frameHistCache
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (d *Decoder) setDict(frame *frameDec) (err error) {
|
||||||
|
dict, ok := d.dicts[frame.DictionaryID]
|
||||||
|
if ok {
|
||||||
|
if debugDecoder {
|
||||||
|
println("setting dict", frame.DictionaryID)
|
||||||
|
}
|
||||||
|
frame.history.setDict(dict)
|
||||||
|
} else if frame.DictionaryID != 0 {
|
||||||
|
// A zero or missing dictionary id is ambiguous:
|
||||||
|
// either dictionary zero, or no dictionary. In particular,
|
||||||
|
// zstd --patch-from uses this id for the source file,
|
||||||
|
// so only return an error if the dictionary id is not zero.
|
||||||
|
err = ErrUnknownDictionary
|
||||||
|
}
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
52
vendor/github.com/klauspost/compress/zstd/decoder_options.go
generated
vendored
52
vendor/github.com/klauspost/compress/zstd/decoder_options.go
generated
vendored
@ -6,6 +6,8 @@ package zstd
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"errors"
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"math/bits"
|
||||||
"runtime"
|
"runtime"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -18,8 +20,10 @@ type decoderOptions struct {
|
|||||||
concurrent int
|
concurrent int
|
||||||
maxDecodedSize uint64
|
maxDecodedSize uint64
|
||||||
maxWindowSize uint64
|
maxWindowSize uint64
|
||||||
dicts []dict
|
dicts []*dict
|
||||||
ignoreChecksum bool
|
ignoreChecksum bool
|
||||||
|
limitToCap bool
|
||||||
|
decodeBufsBelow int
|
||||||
}
|
}
|
||||||
|
|
||||||
func (o *decoderOptions) setDefault() {
|
func (o *decoderOptions) setDefault() {
|
||||||
@ -28,6 +32,7 @@ func (o *decoderOptions) setDefault() {
|
|||||||
lowMem: true,
|
lowMem: true,
|
||||||
concurrent: runtime.GOMAXPROCS(0),
|
concurrent: runtime.GOMAXPROCS(0),
|
||||||
maxWindowSize: MaxWindowSize,
|
maxWindowSize: MaxWindowSize,
|
||||||
|
decodeBufsBelow: 128 << 10,
|
||||||
}
|
}
|
||||||
if o.concurrent > 4 {
|
if o.concurrent > 4 {
|
||||||
o.concurrent = 4
|
o.concurrent = 4
|
||||||
@ -82,7 +87,13 @@ func WithDecoderMaxMemory(n uint64) DOption {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// WithDecoderDicts allows to register one or more dictionaries for the decoder.
|
// WithDecoderDicts allows to register one or more dictionaries for the decoder.
|
||||||
// If several dictionaries with the same ID is provided the last one will be used.
|
//
|
||||||
|
// Each slice in dict must be in the [dictionary format] produced by
|
||||||
|
// "zstd --train" from the Zstandard reference implementation.
|
||||||
|
//
|
||||||
|
// If several dictionaries with the same ID are provided, the last one will be used.
|
||||||
|
//
|
||||||
|
// [dictionary format]: https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#dictionary-format
|
||||||
func WithDecoderDicts(dicts ...[]byte) DOption {
|
func WithDecoderDicts(dicts ...[]byte) DOption {
|
||||||
return func(o *decoderOptions) error {
|
return func(o *decoderOptions) error {
|
||||||
for _, b := range dicts {
|
for _, b := range dicts {
|
||||||
@ -90,12 +101,24 @@ func WithDecoderDicts(dicts ...[]byte) DOption {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
o.dicts = append(o.dicts, *d)
|
o.dicts = append(o.dicts, d)
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// WithEncoderDictRaw registers a dictionary that may be used by the decoder.
|
||||||
|
// The slice content can be arbitrary data.
|
||||||
|
func WithDecoderDictRaw(id uint32, content []byte) DOption {
|
||||||
|
return func(o *decoderOptions) error {
|
||||||
|
if bits.UintSize > 32 && uint(len(content)) > dictMaxLength {
|
||||||
|
return fmt.Errorf("dictionary of size %d > 2GiB too large", len(content))
|
||||||
|
}
|
||||||
|
o.dicts = append(o.dicts, &dict{id: id, content: content, offsets: [3]int{1, 4, 8}})
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// WithDecoderMaxWindow allows to set a maximum window size for decodes.
|
// WithDecoderMaxWindow allows to set a maximum window size for decodes.
|
||||||
// This allows rejecting packets that will cause big memory usage.
|
// This allows rejecting packets that will cause big memory usage.
|
||||||
// The Decoder will likely allocate more memory based on the WithDecoderLowmem setting.
|
// The Decoder will likely allocate more memory based on the WithDecoderLowmem setting.
|
||||||
@ -114,6 +137,29 @@ func WithDecoderMaxWindow(size uint64) DOption {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// WithDecodeAllCapLimit will limit DecodeAll to decoding cap(dst)-len(dst) bytes,
|
||||||
|
// or any size set in WithDecoderMaxMemory.
|
||||||
|
// This can be used to limit decoding to a specific maximum output size.
|
||||||
|
// Disabled by default.
|
||||||
|
func WithDecodeAllCapLimit(b bool) DOption {
|
||||||
|
return func(o *decoderOptions) error {
|
||||||
|
o.limitToCap = b
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// WithDecodeBuffersBelow will fully decode readers that have a
|
||||||
|
// `Bytes() []byte` and `Len() int` interface similar to bytes.Buffer.
|
||||||
|
// This typically uses less allocations but will have the full decompressed object in memory.
|
||||||
|
// Note that DecodeAllCapLimit will disable this, as well as giving a size of 0 or less.
|
||||||
|
// Default is 128KiB.
|
||||||
|
func WithDecodeBuffersBelow(size int) DOption {
|
||||||
|
return func(o *decoderOptions) error {
|
||||||
|
o.decodeBufsBelow = size
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// IgnoreChecksum allows to forcibly ignore checksum checking.
|
// IgnoreChecksum allows to forcibly ignore checksum checking.
|
||||||
func IgnoreChecksum(b bool) DOption {
|
func IgnoreChecksum(b bool) DOption {
|
||||||
return func(o *decoderOptions) error {
|
return func(o *decoderOptions) error {
|
||||||
|
51
vendor/github.com/klauspost/compress/zstd/dict.go
generated
vendored
51
vendor/github.com/klauspost/compress/zstd/dict.go
generated
vendored
@ -1,7 +1,6 @@
|
|||||||
package zstd
|
package zstd
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"bytes"
|
|
||||||
"encoding/binary"
|
"encoding/binary"
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
@ -20,7 +19,10 @@ type dict struct {
|
|||||||
content []byte
|
content []byte
|
||||||
}
|
}
|
||||||
|
|
||||||
var dictMagic = [4]byte{0x37, 0xa4, 0x30, 0xec}
|
const dictMagic = "\x37\xa4\x30\xec"
|
||||||
|
|
||||||
|
// Maximum dictionary size for the reference implementation (1.5.3) is 2 GiB.
|
||||||
|
const dictMaxLength = 1 << 31
|
||||||
|
|
||||||
// ID returns the dictionary id or 0 if d is nil.
|
// ID returns the dictionary id or 0 if d is nil.
|
||||||
func (d *dict) ID() uint32 {
|
func (d *dict) ID() uint32 {
|
||||||
@ -30,14 +32,38 @@ func (d *dict) ID() uint32 {
|
|||||||
return d.id
|
return d.id
|
||||||
}
|
}
|
||||||
|
|
||||||
// DictContentSize returns the dictionary content size or 0 if d is nil.
|
// ContentSize returns the dictionary content size or 0 if d is nil.
|
||||||
func (d *dict) DictContentSize() int {
|
func (d *dict) ContentSize() int {
|
||||||
if d == nil {
|
if d == nil {
|
||||||
return 0
|
return 0
|
||||||
}
|
}
|
||||||
return len(d.content)
|
return len(d.content)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Content returns the dictionary content.
|
||||||
|
func (d *dict) Content() []byte {
|
||||||
|
if d == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
return d.content
|
||||||
|
}
|
||||||
|
|
||||||
|
// Offsets returns the initial offsets.
|
||||||
|
func (d *dict) Offsets() [3]int {
|
||||||
|
if d == nil {
|
||||||
|
return [3]int{}
|
||||||
|
}
|
||||||
|
return d.offsets
|
||||||
|
}
|
||||||
|
|
||||||
|
// LitEncoder returns the literal encoder.
|
||||||
|
func (d *dict) LitEncoder() *huff0.Scratch {
|
||||||
|
if d == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
return d.litEnc
|
||||||
|
}
|
||||||
|
|
||||||
// Load a dictionary as described in
|
// Load a dictionary as described in
|
||||||
// https://github.com/facebook/zstd/blob/master/doc/zstd_compression_format.md#dictionary-format
|
// https://github.com/facebook/zstd/blob/master/doc/zstd_compression_format.md#dictionary-format
|
||||||
func loadDict(b []byte) (*dict, error) {
|
func loadDict(b []byte) (*dict, error) {
|
||||||
@ -50,7 +76,7 @@ func loadDict(b []byte) (*dict, error) {
|
|||||||
ofDec: sequenceDec{fse: &fseDecoder{}},
|
ofDec: sequenceDec{fse: &fseDecoder{}},
|
||||||
mlDec: sequenceDec{fse: &fseDecoder{}},
|
mlDec: sequenceDec{fse: &fseDecoder{}},
|
||||||
}
|
}
|
||||||
if !bytes.Equal(b[:4], dictMagic[:]) {
|
if string(b[:4]) != dictMagic {
|
||||||
return nil, ErrMagicMismatch
|
return nil, ErrMagicMismatch
|
||||||
}
|
}
|
||||||
d.id = binary.LittleEndian.Uint32(b[4:8])
|
d.id = binary.LittleEndian.Uint32(b[4:8])
|
||||||
@ -62,7 +88,7 @@ func loadDict(b []byte) (*dict, error) {
|
|||||||
var err error
|
var err error
|
||||||
d.litEnc, b, err = huff0.ReadTable(b[8:], nil)
|
d.litEnc, b, err = huff0.ReadTable(b[8:], nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, fmt.Errorf("loading literal table: %w", err)
|
||||||
}
|
}
|
||||||
d.litEnc.Reuse = huff0.ReusePolicyMust
|
d.litEnc.Reuse = huff0.ReusePolicyMust
|
||||||
|
|
||||||
@ -120,3 +146,16 @@ func loadDict(b []byte) (*dict, error) {
|
|||||||
|
|
||||||
return &d, nil
|
return &d, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// InspectDictionary loads a zstd dictionary and provides functions to inspect the content.
|
||||||
|
func InspectDictionary(b []byte) (interface {
|
||||||
|
ID() uint32
|
||||||
|
ContentSize() int
|
||||||
|
Content() []byte
|
||||||
|
Offsets() [3]int
|
||||||
|
LitEncoder() *huff0.Scratch
|
||||||
|
}, error) {
|
||||||
|
initPredefined()
|
||||||
|
d, err := loadDict(b)
|
||||||
|
return d, err
|
||||||
|
}
|
||||||
|
28
vendor/github.com/klauspost/compress/zstd/enc_base.go
generated
vendored
28
vendor/github.com/klauspost/compress/zstd/enc_base.go
generated
vendored
@ -16,6 +16,7 @@ type fastBase struct {
|
|||||||
cur int32
|
cur int32
|
||||||
// maximum offset. Should be at least 2x block size.
|
// maximum offset. Should be at least 2x block size.
|
||||||
maxMatchOff int32
|
maxMatchOff int32
|
||||||
|
bufferReset int32
|
||||||
hist []byte
|
hist []byte
|
||||||
crc *xxhash.Digest
|
crc *xxhash.Digest
|
||||||
tmp [8]byte
|
tmp [8]byte
|
||||||
@ -56,8 +57,8 @@ func (e *fastBase) Block() *blockEnc {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (e *fastBase) addBlock(src []byte) int32 {
|
func (e *fastBase) addBlock(src []byte) int32 {
|
||||||
if debugAsserts && e.cur > bufferReset {
|
if debugAsserts && e.cur > e.bufferReset {
|
||||||
panic(fmt.Sprintf("ecur (%d) > buffer reset (%d)", e.cur, bufferReset))
|
panic(fmt.Sprintf("ecur (%d) > buffer reset (%d)", e.cur, e.bufferReset))
|
||||||
}
|
}
|
||||||
// check if we have space already
|
// check if we have space already
|
||||||
if len(e.hist)+len(src) > cap(e.hist) {
|
if len(e.hist)+len(src) > cap(e.hist) {
|
||||||
@ -126,24 +127,7 @@ func (e *fastBase) matchlen(s, t int32, src []byte) int32 {
|
|||||||
panic(fmt.Sprintf("len(src)-s (%d) > maxCompressedBlockSize (%d)", len(src)-int(s), maxCompressedBlockSize))
|
panic(fmt.Sprintf("len(src)-s (%d) > maxCompressedBlockSize (%d)", len(src)-int(s), maxCompressedBlockSize))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
a := src[s:]
|
return int32(matchLen(src[s:], src[t:]))
|
||||||
b := src[t:]
|
|
||||||
b = b[:len(a)]
|
|
||||||
end := int32((len(a) >> 3) << 3)
|
|
||||||
for i := int32(0); i < end; i += 8 {
|
|
||||||
if diff := load6432(a, i) ^ load6432(b, i); diff != 0 {
|
|
||||||
return i + int32(bits.TrailingZeros64(diff)>>3)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
a = a[end:]
|
|
||||||
b = b[end:]
|
|
||||||
for i := range a {
|
|
||||||
if a[i] != b[i] {
|
|
||||||
return int32(i) + end
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return int32(len(a)) + end
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Reset the encoding table.
|
// Reset the encoding table.
|
||||||
@ -165,13 +149,13 @@ func (e *fastBase) resetBase(d *dict, singleBlock bool) {
|
|||||||
if singleBlock {
|
if singleBlock {
|
||||||
e.lowMem = true
|
e.lowMem = true
|
||||||
}
|
}
|
||||||
e.ensureHist(d.DictContentSize() + maxCompressedBlockSize)
|
e.ensureHist(d.ContentSize() + maxCompressedBlockSize)
|
||||||
e.lowMem = low
|
e.lowMem = low
|
||||||
}
|
}
|
||||||
|
|
||||||
// We offset current position so everything will be out of reach.
|
// We offset current position so everything will be out of reach.
|
||||||
// If above reset line, history will be purged.
|
// If above reset line, history will be purged.
|
||||||
if e.cur < bufferReset {
|
if e.cur < e.bufferReset {
|
||||||
e.cur += e.maxMatchOff + int32(len(e.hist))
|
e.cur += e.maxMatchOff + int32(len(e.hist))
|
||||||
}
|
}
|
||||||
e.hist = e.hist[:0]
|
e.hist = e.hist[:0]
|
||||||
|
64
vendor/github.com/klauspost/compress/zstd/enc_best.go
generated
vendored
64
vendor/github.com/klauspost/compress/zstd/enc_best.go
generated
vendored
@ -32,6 +32,7 @@ type match struct {
|
|||||||
length int32
|
length int32
|
||||||
rep int32
|
rep int32
|
||||||
est int32
|
est int32
|
||||||
|
_ [12]byte // Aligned size to cache line: 4+4+4+4+4 bytes + 12 bytes padding = 32 bytes
|
||||||
}
|
}
|
||||||
|
|
||||||
const highScore = 25000
|
const highScore = 25000
|
||||||
@ -84,14 +85,10 @@ func (e *bestFastEncoder) Encode(blk *blockEnc, src []byte) {
|
|||||||
)
|
)
|
||||||
|
|
||||||
// Protect against e.cur wraparound.
|
// Protect against e.cur wraparound.
|
||||||
for e.cur >= bufferReset {
|
for e.cur >= e.bufferReset-int32(len(e.hist)) {
|
||||||
if len(e.hist) == 0 {
|
if len(e.hist) == 0 {
|
||||||
for i := range e.table[:] {
|
e.table = [bestShortTableSize]prevEntry{}
|
||||||
e.table[i] = prevEntry{}
|
e.longTable = [bestLongTableSize]prevEntry{}
|
||||||
}
|
|
||||||
for i := range e.longTable[:] {
|
|
||||||
e.longTable[i] = prevEntry{}
|
|
||||||
}
|
|
||||||
e.cur = e.maxMatchOff
|
e.cur = e.maxMatchOff
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
@ -192,8 +189,8 @@ encodeLoop:
|
|||||||
panic("offset0 was 0")
|
panic("offset0 was 0")
|
||||||
}
|
}
|
||||||
|
|
||||||
bestOf := func(a, b match) match {
|
bestOf := func(a, b *match) *match {
|
||||||
if a.est+(a.s-b.s)*bitsPerByte>>10 < b.est+(b.s-a.s)*bitsPerByte>>10 {
|
if a.est-b.est+(a.s-b.s)*bitsPerByte>>10 < 0 {
|
||||||
return a
|
return a
|
||||||
}
|
}
|
||||||
return b
|
return b
|
||||||
@ -219,22 +216,26 @@ encodeLoop:
|
|||||||
return m
|
return m
|
||||||
}
|
}
|
||||||
|
|
||||||
best := bestOf(matchAt(candidateL.offset-e.cur, s, uint32(cv), -1), matchAt(candidateL.prev-e.cur, s, uint32(cv), -1))
|
m1 := matchAt(candidateL.offset-e.cur, s, uint32(cv), -1)
|
||||||
best = bestOf(best, matchAt(candidateS.offset-e.cur, s, uint32(cv), -1))
|
m2 := matchAt(candidateL.prev-e.cur, s, uint32(cv), -1)
|
||||||
best = bestOf(best, matchAt(candidateS.prev-e.cur, s, uint32(cv), -1))
|
m3 := matchAt(candidateS.offset-e.cur, s, uint32(cv), -1)
|
||||||
|
m4 := matchAt(candidateS.prev-e.cur, s, uint32(cv), -1)
|
||||||
|
best := bestOf(bestOf(&m1, &m2), bestOf(&m3, &m4))
|
||||||
|
|
||||||
if canRepeat && best.length < goodEnough {
|
if canRepeat && best.length < goodEnough {
|
||||||
cv32 := uint32(cv >> 8)
|
cv32 := uint32(cv >> 8)
|
||||||
spp := s + 1
|
spp := s + 1
|
||||||
best = bestOf(best, matchAt(spp-offset1, spp, cv32, 1))
|
m1 := matchAt(spp-offset1, spp, cv32, 1)
|
||||||
best = bestOf(best, matchAt(spp-offset2, spp, cv32, 2))
|
m2 := matchAt(spp-offset2, spp, cv32, 2)
|
||||||
best = bestOf(best, matchAt(spp-offset3, spp, cv32, 3))
|
m3 := matchAt(spp-offset3, spp, cv32, 3)
|
||||||
|
best = bestOf(bestOf(best, &m1), bestOf(&m2, &m3))
|
||||||
if best.length > 0 {
|
if best.length > 0 {
|
||||||
cv32 = uint32(cv >> 24)
|
cv32 = uint32(cv >> 24)
|
||||||
spp += 2
|
spp += 2
|
||||||
best = bestOf(best, matchAt(spp-offset1, spp, cv32, 1))
|
m1 := matchAt(spp-offset1, spp, cv32, 1)
|
||||||
best = bestOf(best, matchAt(spp-offset2, spp, cv32, 2))
|
m2 := matchAt(spp-offset2, spp, cv32, 2)
|
||||||
best = bestOf(best, matchAt(spp-offset3, spp, cv32, 3))
|
m3 := matchAt(spp-offset3, spp, cv32, 3)
|
||||||
|
best = bestOf(bestOf(best, &m1), bestOf(&m2, &m3))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
// Load next and check...
|
// Load next and check...
|
||||||
@ -261,26 +262,33 @@ encodeLoop:
|
|||||||
candidateL2 := e.longTable[hashLen(cv2, bestLongTableBits, bestLongLen)]
|
candidateL2 := e.longTable[hashLen(cv2, bestLongTableBits, bestLongLen)]
|
||||||
|
|
||||||
// Short at s+1
|
// Short at s+1
|
||||||
best = bestOf(best, matchAt(candidateS.offset-e.cur, s, uint32(cv), -1))
|
m1 := matchAt(candidateS.offset-e.cur, s, uint32(cv), -1)
|
||||||
// Long at s+1, s+2
|
// Long at s+1, s+2
|
||||||
best = bestOf(best, matchAt(candidateL.offset-e.cur, s, uint32(cv), -1))
|
m2 := matchAt(candidateL.offset-e.cur, s, uint32(cv), -1)
|
||||||
best = bestOf(best, matchAt(candidateL.prev-e.cur, s, uint32(cv), -1))
|
m3 := matchAt(candidateL.prev-e.cur, s, uint32(cv), -1)
|
||||||
best = bestOf(best, matchAt(candidateL2.offset-e.cur, s+1, uint32(cv2), -1))
|
m4 := matchAt(candidateL2.offset-e.cur, s+1, uint32(cv2), -1)
|
||||||
best = bestOf(best, matchAt(candidateL2.prev-e.cur, s+1, uint32(cv2), -1))
|
m5 := matchAt(candidateL2.prev-e.cur, s+1, uint32(cv2), -1)
|
||||||
|
best = bestOf(bestOf(bestOf(best, &m1), &m2), bestOf(bestOf(&m3, &m4), &m5))
|
||||||
if false {
|
if false {
|
||||||
// Short at s+3.
|
// Short at s+3.
|
||||||
// Too often worse...
|
// Too often worse...
|
||||||
best = bestOf(best, matchAt(e.table[hashLen(cv2>>8, bestShortTableBits, bestShortLen)].offset-e.cur, s+2, uint32(cv2>>8), -1))
|
m := matchAt(e.table[hashLen(cv2>>8, bestShortTableBits, bestShortLen)].offset-e.cur, s+2, uint32(cv2>>8), -1)
|
||||||
|
best = bestOf(best, &m)
|
||||||
}
|
}
|
||||||
// See if we can find a better match by checking where the current best ends.
|
// See if we can find a better match by checking where the current best ends.
|
||||||
// Use that offset to see if we can find a better full match.
|
// Use that offset to see if we can find a better full match.
|
||||||
if sAt := best.s + best.length; sAt < sLimit {
|
if sAt := best.s + best.length; sAt < sLimit {
|
||||||
nextHashL := hashLen(load6432(src, sAt), bestLongTableBits, bestLongLen)
|
nextHashL := hashLen(load6432(src, sAt), bestLongTableBits, bestLongLen)
|
||||||
candidateEnd := e.longTable[nextHashL]
|
candidateEnd := e.longTable[nextHashL]
|
||||||
if pos := candidateEnd.offset - e.cur - best.length; pos >= 0 {
|
// Start check at a fixed offset to allow for a few mismatches.
|
||||||
bestEnd := bestOf(best, matchAt(pos, best.s, load3232(src, best.s), -1))
|
// For this compression level 2 yields the best results.
|
||||||
if pos := candidateEnd.prev - e.cur - best.length; pos >= 0 {
|
const skipBeginning = 2
|
||||||
bestEnd = bestOf(bestEnd, matchAt(pos, best.s, load3232(src, best.s), -1))
|
if pos := candidateEnd.offset - e.cur - best.length + skipBeginning; pos >= 0 {
|
||||||
|
m := matchAt(pos, best.s+skipBeginning, load3232(src, best.s+skipBeginning), -1)
|
||||||
|
bestEnd := bestOf(best, &m)
|
||||||
|
if pos := candidateEnd.prev - e.cur - best.length + skipBeginning; pos >= 0 {
|
||||||
|
m := matchAt(pos, best.s+skipBeginning, load3232(src, best.s+skipBeginning), -1)
|
||||||
|
bestEnd = bestOf(bestEnd, &m)
|
||||||
}
|
}
|
||||||
best = bestEnd
|
best = bestEnd
|
||||||
}
|
}
|
||||||
|
35
vendor/github.com/klauspost/compress/zstd/enc_better.go
generated
vendored
35
vendor/github.com/klauspost/compress/zstd/enc_better.go
generated
vendored
@ -62,14 +62,10 @@ func (e *betterFastEncoder) Encode(blk *blockEnc, src []byte) {
|
|||||||
)
|
)
|
||||||
|
|
||||||
// Protect against e.cur wraparound.
|
// Protect against e.cur wraparound.
|
||||||
for e.cur >= bufferReset {
|
for e.cur >= e.bufferReset-int32(len(e.hist)) {
|
||||||
if len(e.hist) == 0 {
|
if len(e.hist) == 0 {
|
||||||
for i := range e.table[:] {
|
e.table = [betterShortTableSize]tableEntry{}
|
||||||
e.table[i] = tableEntry{}
|
e.longTable = [betterLongTableSize]prevEntry{}
|
||||||
}
|
|
||||||
for i := range e.longTable[:] {
|
|
||||||
e.longTable[i] = prevEntry{}
|
|
||||||
}
|
|
||||||
e.cur = e.maxMatchOff
|
e.cur = e.maxMatchOff
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
@ -416,15 +412,23 @@ encodeLoop:
|
|||||||
|
|
||||||
// Try to find a better match by searching for a long match at the end of the current best match
|
// Try to find a better match by searching for a long match at the end of the current best match
|
||||||
if s+matched < sLimit {
|
if s+matched < sLimit {
|
||||||
|
// Allow some bytes at the beginning to mismatch.
|
||||||
|
// Sweet spot is around 3 bytes, but depends on input.
|
||||||
|
// The skipped bytes are tested in Extend backwards,
|
||||||
|
// and still picked up as part of the match if they do.
|
||||||
|
const skipBeginning = 3
|
||||||
|
|
||||||
nextHashL := hashLen(load6432(src, s+matched), betterLongTableBits, betterLongLen)
|
nextHashL := hashLen(load6432(src, s+matched), betterLongTableBits, betterLongLen)
|
||||||
cv := load3232(src, s)
|
s2 := s + skipBeginning
|
||||||
|
cv := load3232(src, s2)
|
||||||
candidateL := e.longTable[nextHashL]
|
candidateL := e.longTable[nextHashL]
|
||||||
coffsetL := candidateL.offset - e.cur - matched
|
coffsetL := candidateL.offset - e.cur - matched + skipBeginning
|
||||||
if coffsetL >= 0 && coffsetL < s && s-coffsetL < e.maxMatchOff && cv == load3232(src, coffsetL) {
|
if coffsetL >= 0 && coffsetL < s2 && s2-coffsetL < e.maxMatchOff && cv == load3232(src, coffsetL) {
|
||||||
// Found a long match, at least 4 bytes.
|
// Found a long match, at least 4 bytes.
|
||||||
matchedNext := e.matchlen(s+4, coffsetL+4, src) + 4
|
matchedNext := e.matchlen(s2+4, coffsetL+4, src) + 4
|
||||||
if matchedNext > matched {
|
if matchedNext > matched {
|
||||||
t = coffsetL
|
t = coffsetL
|
||||||
|
s = s2
|
||||||
matched = matchedNext
|
matched = matchedNext
|
||||||
if debugMatches {
|
if debugMatches {
|
||||||
println("long match at end-of-match")
|
println("long match at end-of-match")
|
||||||
@ -434,12 +438,13 @@ encodeLoop:
|
|||||||
|
|
||||||
// Check prev long...
|
// Check prev long...
|
||||||
if true {
|
if true {
|
||||||
coffsetL = candidateL.prev - e.cur - matched
|
coffsetL = candidateL.prev - e.cur - matched + skipBeginning
|
||||||
if coffsetL >= 0 && coffsetL < s && s-coffsetL < e.maxMatchOff && cv == load3232(src, coffsetL) {
|
if coffsetL >= 0 && coffsetL < s2 && s2-coffsetL < e.maxMatchOff && cv == load3232(src, coffsetL) {
|
||||||
// Found a long match, at least 4 bytes.
|
// Found a long match, at least 4 bytes.
|
||||||
matchedNext := e.matchlen(s+4, coffsetL+4, src) + 4
|
matchedNext := e.matchlen(s2+4, coffsetL+4, src) + 4
|
||||||
if matchedNext > matched {
|
if matchedNext > matched {
|
||||||
t = coffsetL
|
t = coffsetL
|
||||||
|
s = s2
|
||||||
matched = matchedNext
|
matched = matchedNext
|
||||||
if debugMatches {
|
if debugMatches {
|
||||||
println("prev long match at end-of-match")
|
println("prev long match at end-of-match")
|
||||||
@ -578,7 +583,7 @@ func (e *betterFastEncoderDict) Encode(blk *blockEnc, src []byte) {
|
|||||||
)
|
)
|
||||||
|
|
||||||
// Protect against e.cur wraparound.
|
// Protect against e.cur wraparound.
|
||||||
for e.cur >= bufferReset {
|
for e.cur >= e.bufferReset-int32(len(e.hist)) {
|
||||||
if len(e.hist) == 0 {
|
if len(e.hist) == 0 {
|
||||||
for i := range e.table[:] {
|
for i := range e.table[:] {
|
||||||
e.table[i] = tableEntry{}
|
e.table[i] = tableEntry{}
|
||||||
|
23
vendor/github.com/klauspost/compress/zstd/enc_dfast.go
generated
vendored
23
vendor/github.com/klauspost/compress/zstd/enc_dfast.go
generated
vendored
@ -44,14 +44,10 @@ func (e *doubleFastEncoder) Encode(blk *blockEnc, src []byte) {
|
|||||||
)
|
)
|
||||||
|
|
||||||
// Protect against e.cur wraparound.
|
// Protect against e.cur wraparound.
|
||||||
for e.cur >= bufferReset {
|
for e.cur >= e.bufferReset-int32(len(e.hist)) {
|
||||||
if len(e.hist) == 0 {
|
if len(e.hist) == 0 {
|
||||||
for i := range e.table[:] {
|
e.table = [dFastShortTableSize]tableEntry{}
|
||||||
e.table[i] = tableEntry{}
|
e.longTable = [dFastLongTableSize]tableEntry{}
|
||||||
}
|
|
||||||
for i := range e.longTable[:] {
|
|
||||||
e.longTable[i] = tableEntry{}
|
|
||||||
}
|
|
||||||
e.cur = e.maxMatchOff
|
e.cur = e.maxMatchOff
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
@ -388,7 +384,7 @@ func (e *doubleFastEncoder) EncodeNoHist(blk *blockEnc, src []byte) {
|
|||||||
)
|
)
|
||||||
|
|
||||||
// Protect against e.cur wraparound.
|
// Protect against e.cur wraparound.
|
||||||
if e.cur >= bufferReset {
|
if e.cur >= e.bufferReset {
|
||||||
for i := range e.table[:] {
|
for i := range e.table[:] {
|
||||||
e.table[i] = tableEntry{}
|
e.table[i] = tableEntry{}
|
||||||
}
|
}
|
||||||
@ -685,7 +681,7 @@ encodeLoop:
|
|||||||
}
|
}
|
||||||
|
|
||||||
// We do not store history, so we must offset e.cur to avoid false matches for next user.
|
// We do not store history, so we must offset e.cur to avoid false matches for next user.
|
||||||
if e.cur < bufferReset {
|
if e.cur < e.bufferReset {
|
||||||
e.cur += int32(len(src))
|
e.cur += int32(len(src))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -700,7 +696,7 @@ func (e *doubleFastEncoderDict) Encode(blk *blockEnc, src []byte) {
|
|||||||
)
|
)
|
||||||
|
|
||||||
// Protect against e.cur wraparound.
|
// Protect against e.cur wraparound.
|
||||||
for e.cur >= bufferReset {
|
for e.cur >= e.bufferReset-int32(len(e.hist)) {
|
||||||
if len(e.hist) == 0 {
|
if len(e.hist) == 0 {
|
||||||
for i := range e.table[:] {
|
for i := range e.table[:] {
|
||||||
e.table[i] = tableEntry{}
|
e.table[i] = tableEntry{}
|
||||||
@ -1103,7 +1099,8 @@ func (e *doubleFastEncoderDict) Reset(d *dict, singleBlock bool) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
if allDirty || dirtyShardCnt > dLongTableShardCnt/2 {
|
if allDirty || dirtyShardCnt > dLongTableShardCnt/2 {
|
||||||
copy(e.longTable[:], e.dictLongTable)
|
//copy(e.longTable[:], e.dictLongTable)
|
||||||
|
e.longTable = *(*[dFastLongTableSize]tableEntry)(e.dictLongTable)
|
||||||
for i := range e.longTableShardDirty {
|
for i := range e.longTableShardDirty {
|
||||||
e.longTableShardDirty[i] = false
|
e.longTableShardDirty[i] = false
|
||||||
}
|
}
|
||||||
@ -1114,7 +1111,9 @@ func (e *doubleFastEncoderDict) Reset(d *dict, singleBlock bool) {
|
|||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
copy(e.longTable[i*dLongTableShardSize:(i+1)*dLongTableShardSize], e.dictLongTable[i*dLongTableShardSize:(i+1)*dLongTableShardSize])
|
// copy(e.longTable[i*dLongTableShardSize:(i+1)*dLongTableShardSize], e.dictLongTable[i*dLongTableShardSize:(i+1)*dLongTableShardSize])
|
||||||
|
*(*[dLongTableShardSize]tableEntry)(e.longTable[i*dLongTableShardSize:]) = *(*[dLongTableShardSize]tableEntry)(e.dictLongTable[i*dLongTableShardSize:])
|
||||||
|
|
||||||
e.longTableShardDirty[i] = false
|
e.longTableShardDirty[i] = false
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
20
vendor/github.com/klauspost/compress/zstd/enc_fast.go
generated
vendored
20
vendor/github.com/klauspost/compress/zstd/enc_fast.go
generated
vendored
@ -43,7 +43,7 @@ func (e *fastEncoder) Encode(blk *blockEnc, src []byte) {
|
|||||||
)
|
)
|
||||||
|
|
||||||
// Protect against e.cur wraparound.
|
// Protect against e.cur wraparound.
|
||||||
for e.cur >= bufferReset {
|
for e.cur >= e.bufferReset-int32(len(e.hist)) {
|
||||||
if len(e.hist) == 0 {
|
if len(e.hist) == 0 {
|
||||||
for i := range e.table[:] {
|
for i := range e.table[:] {
|
||||||
e.table[i] = tableEntry{}
|
e.table[i] = tableEntry{}
|
||||||
@ -304,13 +304,13 @@ func (e *fastEncoder) EncodeNoHist(blk *blockEnc, src []byte) {
|
|||||||
minNonLiteralBlockSize = 1 + 1 + inputMargin
|
minNonLiteralBlockSize = 1 + 1 + inputMargin
|
||||||
)
|
)
|
||||||
if debugEncoder {
|
if debugEncoder {
|
||||||
if len(src) > maxBlockSize {
|
if len(src) > maxCompressedBlockSize {
|
||||||
panic("src too big")
|
panic("src too big")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Protect against e.cur wraparound.
|
// Protect against e.cur wraparound.
|
||||||
if e.cur >= bufferReset {
|
if e.cur >= e.bufferReset {
|
||||||
for i := range e.table[:] {
|
for i := range e.table[:] {
|
||||||
e.table[i] = tableEntry{}
|
e.table[i] = tableEntry{}
|
||||||
}
|
}
|
||||||
@ -538,7 +538,7 @@ encodeLoop:
|
|||||||
println("returning, recent offsets:", blk.recentOffsets, "extra literals:", blk.extraLits)
|
println("returning, recent offsets:", blk.recentOffsets, "extra literals:", blk.extraLits)
|
||||||
}
|
}
|
||||||
// We do not store history, so we must offset e.cur to avoid false matches for next user.
|
// We do not store history, so we must offset e.cur to avoid false matches for next user.
|
||||||
if e.cur < bufferReset {
|
if e.cur < e.bufferReset {
|
||||||
e.cur += int32(len(src))
|
e.cur += int32(len(src))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -555,11 +555,9 @@ func (e *fastEncoderDict) Encode(blk *blockEnc, src []byte) {
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
// Protect against e.cur wraparound.
|
// Protect against e.cur wraparound.
|
||||||
for e.cur >= bufferReset {
|
for e.cur >= e.bufferReset-int32(len(e.hist)) {
|
||||||
if len(e.hist) == 0 {
|
if len(e.hist) == 0 {
|
||||||
for i := range e.table[:] {
|
e.table = [tableSize]tableEntry{}
|
||||||
e.table[i] = tableEntry{}
|
|
||||||
}
|
|
||||||
e.cur = e.maxMatchOff
|
e.cur = e.maxMatchOff
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
@ -871,7 +869,8 @@ func (e *fastEncoderDict) Reset(d *dict, singleBlock bool) {
|
|||||||
const shardCnt = tableShardCnt
|
const shardCnt = tableShardCnt
|
||||||
const shardSize = tableShardSize
|
const shardSize = tableShardSize
|
||||||
if e.allDirty || dirtyShardCnt > shardCnt*4/6 {
|
if e.allDirty || dirtyShardCnt > shardCnt*4/6 {
|
||||||
copy(e.table[:], e.dictTable)
|
//copy(e.table[:], e.dictTable)
|
||||||
|
e.table = *(*[tableSize]tableEntry)(e.dictTable)
|
||||||
for i := range e.tableShardDirty {
|
for i := range e.tableShardDirty {
|
||||||
e.tableShardDirty[i] = false
|
e.tableShardDirty[i] = false
|
||||||
}
|
}
|
||||||
@ -883,7 +882,8 @@ func (e *fastEncoderDict) Reset(d *dict, singleBlock bool) {
|
|||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
copy(e.table[i*shardSize:(i+1)*shardSize], e.dictTable[i*shardSize:(i+1)*shardSize])
|
//copy(e.table[i*shardSize:(i+1)*shardSize], e.dictTable[i*shardSize:(i+1)*shardSize])
|
||||||
|
*(*[shardSize]tableEntry)(e.table[i*shardSize:]) = *(*[shardSize]tableEntry)(e.dictTable[i*shardSize:])
|
||||||
e.tableShardDirty[i] = false
|
e.tableShardDirty[i] = false
|
||||||
}
|
}
|
||||||
e.allDirty = false
|
e.allDirty = false
|
||||||
|
39
vendor/github.com/klauspost/compress/zstd/encoder.go
generated
vendored
39
vendor/github.com/klauspost/compress/zstd/encoder.go
generated
vendored
@ -8,6 +8,7 @@ import (
|
|||||||
"crypto/rand"
|
"crypto/rand"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
|
"math"
|
||||||
rdebug "runtime/debug"
|
rdebug "runtime/debug"
|
||||||
"sync"
|
"sync"
|
||||||
|
|
||||||
@ -528,8 +529,8 @@ func (e *Encoder) EncodeAll(src, dst []byte) []byte {
|
|||||||
// If a non-single block is needed the encoder will reset again.
|
// If a non-single block is needed the encoder will reset again.
|
||||||
e.encoders <- enc
|
e.encoders <- enc
|
||||||
}()
|
}()
|
||||||
// Use single segments when above minimum window and below 1MB.
|
// Use single segments when above minimum window and below window size.
|
||||||
single := len(src) < 1<<20 && len(src) > MinWindowSize
|
single := len(src) <= e.o.windowSize && len(src) > MinWindowSize
|
||||||
if e.o.single != nil {
|
if e.o.single != nil {
|
||||||
single = *e.o.single
|
single = *e.o.single
|
||||||
}
|
}
|
||||||
@ -639,3 +640,37 @@ func (e *Encoder) EncodeAll(src, dst []byte) []byte {
|
|||||||
}
|
}
|
||||||
return dst
|
return dst
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// MaxEncodedSize returns the expected maximum
|
||||||
|
// size of an encoded block or stream.
|
||||||
|
func (e *Encoder) MaxEncodedSize(size int) int {
|
||||||
|
frameHeader := 4 + 2 // magic + frame header & window descriptor
|
||||||
|
if e.o.dict != nil {
|
||||||
|
frameHeader += 4
|
||||||
|
}
|
||||||
|
// Frame content size:
|
||||||
|
if size < 256 {
|
||||||
|
frameHeader++
|
||||||
|
} else if size < 65536+256 {
|
||||||
|
frameHeader += 2
|
||||||
|
} else if size < math.MaxInt32 {
|
||||||
|
frameHeader += 4
|
||||||
|
} else {
|
||||||
|
frameHeader += 8
|
||||||
|
}
|
||||||
|
// Final crc
|
||||||
|
if e.o.crc {
|
||||||
|
frameHeader += 4
|
||||||
|
}
|
||||||
|
|
||||||
|
// Max overhead is 3 bytes/block.
|
||||||
|
// There cannot be 0 blocks.
|
||||||
|
blocks := (size + e.o.blockSize) / e.o.blockSize
|
||||||
|
|
||||||
|
// Combine, add padding.
|
||||||
|
maxSz := frameHeader + 3*blocks + size
|
||||||
|
if e.o.pad > 1 {
|
||||||
|
maxSz += calcSkippableFrame(int64(maxSz), int64(e.o.pad))
|
||||||
|
}
|
||||||
|
return maxSz
|
||||||
|
}
|
||||||
|
38
vendor/github.com/klauspost/compress/zstd/encoder_options.go
generated
vendored
38
vendor/github.com/klauspost/compress/zstd/encoder_options.go
generated
vendored
@ -3,6 +3,8 @@ package zstd
|
|||||||
import (
|
import (
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"math"
|
||||||
|
"math/bits"
|
||||||
"runtime"
|
"runtime"
|
||||||
"strings"
|
"strings"
|
||||||
)
|
)
|
||||||
@ -47,22 +49,22 @@ func (o encoderOptions) encoder() encoder {
|
|||||||
switch o.level {
|
switch o.level {
|
||||||
case SpeedFastest:
|
case SpeedFastest:
|
||||||
if o.dict != nil {
|
if o.dict != nil {
|
||||||
return &fastEncoderDict{fastEncoder: fastEncoder{fastBase: fastBase{maxMatchOff: int32(o.windowSize), lowMem: o.lowMem}}}
|
return &fastEncoderDict{fastEncoder: fastEncoder{fastBase: fastBase{maxMatchOff: int32(o.windowSize), bufferReset: math.MaxInt32 - int32(o.windowSize*2), lowMem: o.lowMem}}}
|
||||||
}
|
}
|
||||||
return &fastEncoder{fastBase: fastBase{maxMatchOff: int32(o.windowSize), lowMem: o.lowMem}}
|
return &fastEncoder{fastBase: fastBase{maxMatchOff: int32(o.windowSize), bufferReset: math.MaxInt32 - int32(o.windowSize*2), lowMem: o.lowMem}}
|
||||||
|
|
||||||
case SpeedDefault:
|
case SpeedDefault:
|
||||||
if o.dict != nil {
|
if o.dict != nil {
|
||||||
return &doubleFastEncoderDict{fastEncoderDict: fastEncoderDict{fastEncoder: fastEncoder{fastBase: fastBase{maxMatchOff: int32(o.windowSize), lowMem: o.lowMem}}}}
|
return &doubleFastEncoderDict{fastEncoderDict: fastEncoderDict{fastEncoder: fastEncoder{fastBase: fastBase{maxMatchOff: int32(o.windowSize), bufferReset: math.MaxInt32 - int32(o.windowSize*2), lowMem: o.lowMem}}}}
|
||||||
}
|
}
|
||||||
return &doubleFastEncoder{fastEncoder: fastEncoder{fastBase: fastBase{maxMatchOff: int32(o.windowSize), lowMem: o.lowMem}}}
|
return &doubleFastEncoder{fastEncoder: fastEncoder{fastBase: fastBase{maxMatchOff: int32(o.windowSize), bufferReset: math.MaxInt32 - int32(o.windowSize*2), lowMem: o.lowMem}}}
|
||||||
case SpeedBetterCompression:
|
case SpeedBetterCompression:
|
||||||
if o.dict != nil {
|
if o.dict != nil {
|
||||||
return &betterFastEncoderDict{betterFastEncoder: betterFastEncoder{fastBase: fastBase{maxMatchOff: int32(o.windowSize), lowMem: o.lowMem}}}
|
return &betterFastEncoderDict{betterFastEncoder: betterFastEncoder{fastBase: fastBase{maxMatchOff: int32(o.windowSize), bufferReset: math.MaxInt32 - int32(o.windowSize*2), lowMem: o.lowMem}}}
|
||||||
}
|
}
|
||||||
return &betterFastEncoder{fastBase: fastBase{maxMatchOff: int32(o.windowSize), lowMem: o.lowMem}}
|
return &betterFastEncoder{fastBase: fastBase{maxMatchOff: int32(o.windowSize), bufferReset: math.MaxInt32 - int32(o.windowSize*2), lowMem: o.lowMem}}
|
||||||
case SpeedBestCompression:
|
case SpeedBestCompression:
|
||||||
return &bestFastEncoder{fastBase: fastBase{maxMatchOff: int32(o.windowSize), lowMem: o.lowMem}}
|
return &bestFastEncoder{fastBase: fastBase{maxMatchOff: int32(o.windowSize), bufferReset: math.MaxInt32 - int32(o.windowSize*2), lowMem: o.lowMem}}
|
||||||
}
|
}
|
||||||
panic("unknown compression level")
|
panic("unknown compression level")
|
||||||
}
|
}
|
||||||
@ -283,7 +285,7 @@ func WithNoEntropyCompression(b bool) EOption {
|
|||||||
// a decoder is allowed to reject a compressed frame which requests a memory size beyond decoder's authorized range.
|
// a decoder is allowed to reject a compressed frame which requests a memory size beyond decoder's authorized range.
|
||||||
// For broader compatibility, decoders are recommended to support memory sizes of at least 8 MB.
|
// For broader compatibility, decoders are recommended to support memory sizes of at least 8 MB.
|
||||||
// This is only a recommendation, each decoder is free to support higher or lower limits, depending on local limitations.
|
// This is only a recommendation, each decoder is free to support higher or lower limits, depending on local limitations.
|
||||||
// If this is not specified, block encodes will automatically choose this based on the input size.
|
// If this is not specified, block encodes will automatically choose this based on the input size and the window size.
|
||||||
// This setting has no effect on streamed encodes.
|
// This setting has no effect on streamed encodes.
|
||||||
func WithSingleSegment(b bool) EOption {
|
func WithSingleSegment(b bool) EOption {
|
||||||
return func(o *encoderOptions) error {
|
return func(o *encoderOptions) error {
|
||||||
@ -304,7 +306,13 @@ func WithLowerEncoderMem(b bool) EOption {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// WithEncoderDict allows to register a dictionary that will be used for the encode.
|
// WithEncoderDict allows to register a dictionary that will be used for the encode.
|
||||||
|
//
|
||||||
|
// The slice dict must be in the [dictionary format] produced by
|
||||||
|
// "zstd --train" from the Zstandard reference implementation.
|
||||||
|
//
|
||||||
// The encoder *may* choose to use no dictionary instead for certain payloads.
|
// The encoder *may* choose to use no dictionary instead for certain payloads.
|
||||||
|
//
|
||||||
|
// [dictionary format]: https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#dictionary-format
|
||||||
func WithEncoderDict(dict []byte) EOption {
|
func WithEncoderDict(dict []byte) EOption {
|
||||||
return func(o *encoderOptions) error {
|
return func(o *encoderOptions) error {
|
||||||
d, err := loadDict(dict)
|
d, err := loadDict(dict)
|
||||||
@ -315,3 +323,17 @@ func WithEncoderDict(dict []byte) EOption {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// WithEncoderDictRaw registers a dictionary that may be used by the encoder.
|
||||||
|
//
|
||||||
|
// The slice content may contain arbitrary data. It will be used as an initial
|
||||||
|
// history.
|
||||||
|
func WithEncoderDictRaw(id uint32, content []byte) EOption {
|
||||||
|
return func(o *encoderOptions) error {
|
||||||
|
if bits.UintSize > 32 && uint(len(content)) > dictMaxLength {
|
||||||
|
return fmt.Errorf("dictionary of size %d > 2GiB too large", len(content))
|
||||||
|
}
|
||||||
|
o.dict = &dict{id: id, content: content, offsets: [3]int{1, 4, 8}}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
96
vendor/github.com/klauspost/compress/zstd/framedec.go
generated
vendored
96
vendor/github.com/klauspost/compress/zstd/framedec.go
generated
vendored
@ -5,7 +5,7 @@
|
|||||||
package zstd
|
package zstd
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"bytes"
|
"encoding/binary"
|
||||||
"encoding/hex"
|
"encoding/hex"
|
||||||
"errors"
|
"errors"
|
||||||
"io"
|
"io"
|
||||||
@ -29,7 +29,7 @@ type frameDec struct {
|
|||||||
|
|
||||||
FrameContentSize uint64
|
FrameContentSize uint64
|
||||||
|
|
||||||
DictionaryID *uint32
|
DictionaryID uint32
|
||||||
HasCheckSum bool
|
HasCheckSum bool
|
||||||
SingleSegment bool
|
SingleSegment bool
|
||||||
}
|
}
|
||||||
@ -43,9 +43,9 @@ const (
|
|||||||
MaxWindowSize = 1 << 29
|
MaxWindowSize = 1 << 29
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
const (
|
||||||
frameMagic = []byte{0x28, 0xb5, 0x2f, 0xfd}
|
frameMagic = "\x28\xb5\x2f\xfd"
|
||||||
skippableFrameMagic = []byte{0x2a, 0x4d, 0x18}
|
skippableFrameMagic = "\x2a\x4d\x18"
|
||||||
)
|
)
|
||||||
|
|
||||||
func newFrameDec(o decoderOptions) *frameDec {
|
func newFrameDec(o decoderOptions) *frameDec {
|
||||||
@ -89,9 +89,9 @@ func (d *frameDec) reset(br byteBuffer) error {
|
|||||||
copy(signature[1:], b)
|
copy(signature[1:], b)
|
||||||
}
|
}
|
||||||
|
|
||||||
if !bytes.Equal(signature[1:4], skippableFrameMagic) || signature[0]&0xf0 != 0x50 {
|
if string(signature[1:4]) != skippableFrameMagic || signature[0]&0xf0 != 0x50 {
|
||||||
if debugDecoder {
|
if debugDecoder {
|
||||||
println("Not skippable", hex.EncodeToString(signature[:]), hex.EncodeToString(skippableFrameMagic))
|
println("Not skippable", hex.EncodeToString(signature[:]), hex.EncodeToString([]byte(skippableFrameMagic)))
|
||||||
}
|
}
|
||||||
// Break if not skippable frame.
|
// Break if not skippable frame.
|
||||||
break
|
break
|
||||||
@ -106,7 +106,7 @@ func (d *frameDec) reset(br byteBuffer) error {
|
|||||||
}
|
}
|
||||||
n := uint32(b[0]) | (uint32(b[1]) << 8) | (uint32(b[2]) << 16) | (uint32(b[3]) << 24)
|
n := uint32(b[0]) | (uint32(b[1]) << 8) | (uint32(b[2]) << 16) | (uint32(b[3]) << 24)
|
||||||
println("Skipping frame with", n, "bytes.")
|
println("Skipping frame with", n, "bytes.")
|
||||||
err = br.skipN(int(n))
|
err = br.skipN(int64(n))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if debugDecoder {
|
if debugDecoder {
|
||||||
println("Reading discarded frame", err)
|
println("Reading discarded frame", err)
|
||||||
@ -114,9 +114,9 @@ func (d *frameDec) reset(br byteBuffer) error {
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if !bytes.Equal(signature[:], frameMagic) {
|
if string(signature[:]) != frameMagic {
|
||||||
if debugDecoder {
|
if debugDecoder {
|
||||||
println("Got magic numbers: ", signature, "want:", frameMagic)
|
println("Got magic numbers: ", signature, "want:", []byte(frameMagic))
|
||||||
}
|
}
|
||||||
return ErrMagicMismatch
|
return ErrMagicMismatch
|
||||||
}
|
}
|
||||||
@ -155,7 +155,7 @@ func (d *frameDec) reset(br byteBuffer) error {
|
|||||||
|
|
||||||
// Read Dictionary_ID
|
// Read Dictionary_ID
|
||||||
// https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#dictionary_id
|
// https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#dictionary_id
|
||||||
d.DictionaryID = nil
|
d.DictionaryID = 0
|
||||||
if size := fhd & 3; size != 0 {
|
if size := fhd & 3; size != 0 {
|
||||||
if size == 3 {
|
if size == 3 {
|
||||||
size = 4
|
size = 4
|
||||||
@ -167,7 +167,7 @@ func (d *frameDec) reset(br byteBuffer) error {
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
var id uint32
|
var id uint32
|
||||||
switch size {
|
switch len(b) {
|
||||||
case 1:
|
case 1:
|
||||||
id = uint32(b[0])
|
id = uint32(b[0])
|
||||||
case 2:
|
case 2:
|
||||||
@ -178,11 +178,7 @@ func (d *frameDec) reset(br byteBuffer) error {
|
|||||||
if debugDecoder {
|
if debugDecoder {
|
||||||
println("Dict size", size, "ID:", id)
|
println("Dict size", size, "ID:", id)
|
||||||
}
|
}
|
||||||
if id > 0 {
|
d.DictionaryID = id
|
||||||
// ID 0 means "sorry, no dictionary anyway".
|
|
||||||
// https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#dictionary-format
|
|
||||||
d.DictionaryID = &id
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Read Frame_Content_Size
|
// Read Frame_Content_Size
|
||||||
@ -204,7 +200,7 @@ func (d *frameDec) reset(br byteBuffer) error {
|
|||||||
println("Reading Frame content", err)
|
println("Reading Frame content", err)
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
switch fcsSize {
|
switch len(b) {
|
||||||
case 1:
|
case 1:
|
||||||
d.FrameContentSize = uint64(b[0])
|
d.FrameContentSize = uint64(b[0])
|
||||||
case 2:
|
case 2:
|
||||||
@ -231,20 +227,27 @@ func (d *frameDec) reset(br byteBuffer) error {
|
|||||||
d.crc.Reset()
|
d.crc.Reset()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if d.WindowSize > d.o.maxWindowSize {
|
||||||
|
if debugDecoder {
|
||||||
|
printf("window size %d > max %d\n", d.WindowSize, d.o.maxWindowSize)
|
||||||
|
}
|
||||||
|
return ErrWindowSizeExceeded
|
||||||
|
}
|
||||||
|
|
||||||
if d.WindowSize == 0 && d.SingleSegment {
|
if d.WindowSize == 0 && d.SingleSegment {
|
||||||
// We may not need window in this case.
|
// We may not need window in this case.
|
||||||
d.WindowSize = d.FrameContentSize
|
d.WindowSize = d.FrameContentSize
|
||||||
if d.WindowSize < MinWindowSize {
|
if d.WindowSize < MinWindowSize {
|
||||||
d.WindowSize = MinWindowSize
|
d.WindowSize = MinWindowSize
|
||||||
}
|
}
|
||||||
}
|
if d.WindowSize > d.o.maxDecodedSize {
|
||||||
|
|
||||||
if d.WindowSize > uint64(d.o.maxWindowSize) {
|
|
||||||
if debugDecoder {
|
if debugDecoder {
|
||||||
printf("window size %d > max %d\n", d.WindowSize, d.o.maxWindowSize)
|
printf("window size %d > max %d\n", d.WindowSize, d.o.maxWindowSize)
|
||||||
}
|
}
|
||||||
return ErrWindowSizeExceeded
|
return ErrDecoderSizeExceeded
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// The minimum Window_Size is 1 KB.
|
// The minimum Window_Size is 1 KB.
|
||||||
if d.WindowSize < MinWindowSize {
|
if d.WindowSize < MinWindowSize {
|
||||||
if debugDecoder {
|
if debugDecoder {
|
||||||
@ -254,12 +257,17 @@ func (d *frameDec) reset(br byteBuffer) error {
|
|||||||
}
|
}
|
||||||
d.history.windowSize = int(d.WindowSize)
|
d.history.windowSize = int(d.WindowSize)
|
||||||
if !d.o.lowMem || d.history.windowSize < maxBlockSize {
|
if !d.o.lowMem || d.history.windowSize < maxBlockSize {
|
||||||
// Alloc 2x window size if not low-mem, or very small window size.
|
// Alloc 2x window size if not low-mem, or window size below 2MB.
|
||||||
d.history.allocFrameBuffer = d.history.windowSize * 2
|
d.history.allocFrameBuffer = d.history.windowSize * 2
|
||||||
} else {
|
} else {
|
||||||
// Alloc with one additional block
|
if d.o.lowMem {
|
||||||
|
// Alloc with 1MB extra.
|
||||||
|
d.history.allocFrameBuffer = d.history.windowSize + maxBlockSize/2
|
||||||
|
} else {
|
||||||
|
// Alloc with 2MB extra.
|
||||||
d.history.allocFrameBuffer = d.history.windowSize + maxBlockSize
|
d.history.allocFrameBuffer = d.history.windowSize + maxBlockSize
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
if debugDecoder {
|
if debugDecoder {
|
||||||
println("Frame: Dict:", d.DictionaryID, "FrameContentSize:", d.FrameContentSize, "singleseg:", d.SingleSegment, "window:", d.WindowSize, "crc:", d.HasCheckSum)
|
println("Frame: Dict:", d.DictionaryID, "FrameContentSize:", d.FrameContentSize, "singleseg:", d.SingleSegment, "window:", d.WindowSize, "crc:", d.HasCheckSum)
|
||||||
@ -293,7 +301,7 @@ func (d *frameDec) checkCRC() error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// We can overwrite upper tmp now
|
// We can overwrite upper tmp now
|
||||||
want, err := d.rawInput.readSmall(4)
|
buf, err := d.rawInput.readSmall(4)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
println("CRC missing?", err)
|
println("CRC missing?", err)
|
||||||
return err
|
return err
|
||||||
@ -303,22 +311,17 @@ func (d *frameDec) checkCRC() error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
var tmp [4]byte
|
want := binary.LittleEndian.Uint32(buf[:4])
|
||||||
got := d.crc.Sum64()
|
got := uint32(d.crc.Sum64())
|
||||||
// Flip to match file order.
|
|
||||||
tmp[0] = byte(got >> 0)
|
|
||||||
tmp[1] = byte(got >> 8)
|
|
||||||
tmp[2] = byte(got >> 16)
|
|
||||||
tmp[3] = byte(got >> 24)
|
|
||||||
|
|
||||||
if !bytes.Equal(tmp[:], want) {
|
if got != want {
|
||||||
if debugDecoder {
|
if debugDecoder {
|
||||||
println("CRC Check Failed:", tmp[:], "!=", want)
|
printf("CRC check failed: got %08x, want %08x\n", got, want)
|
||||||
}
|
}
|
||||||
return ErrCRCMismatch
|
return ErrCRCMismatch
|
||||||
}
|
}
|
||||||
if debugDecoder {
|
if debugDecoder {
|
||||||
println("CRC ok", tmp[:])
|
printf("CRC ok %08x\n", got)
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
@ -336,7 +339,7 @@ func (d *frameDec) consumeCRC() error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// runDecoder will create a sync decoder that will decode a block of data.
|
// runDecoder will run the decoder for the remainder of the frame.
|
||||||
func (d *frameDec) runDecoder(dst []byte, dec *blockDec) ([]byte, error) {
|
func (d *frameDec) runDecoder(dst []byte, dec *blockDec) ([]byte, error) {
|
||||||
saved := d.history.b
|
saved := d.history.b
|
||||||
|
|
||||||
@ -346,12 +349,23 @@ func (d *frameDec) runDecoder(dst []byte, dec *blockDec) ([]byte, error) {
|
|||||||
// Store input length, so we only check new data.
|
// Store input length, so we only check new data.
|
||||||
crcStart := len(dst)
|
crcStart := len(dst)
|
||||||
d.history.decoders.maxSyncLen = 0
|
d.history.decoders.maxSyncLen = 0
|
||||||
|
if d.o.limitToCap {
|
||||||
|
d.history.decoders.maxSyncLen = uint64(cap(dst) - len(dst))
|
||||||
|
}
|
||||||
if d.FrameContentSize != fcsUnknown {
|
if d.FrameContentSize != fcsUnknown {
|
||||||
|
if !d.o.limitToCap || d.FrameContentSize+uint64(len(dst)) < d.history.decoders.maxSyncLen {
|
||||||
d.history.decoders.maxSyncLen = d.FrameContentSize + uint64(len(dst))
|
d.history.decoders.maxSyncLen = d.FrameContentSize + uint64(len(dst))
|
||||||
|
}
|
||||||
if d.history.decoders.maxSyncLen > d.o.maxDecodedSize {
|
if d.history.decoders.maxSyncLen > d.o.maxDecodedSize {
|
||||||
|
if debugDecoder {
|
||||||
|
println("maxSyncLen:", d.history.decoders.maxSyncLen, "> maxDecodedSize:", d.o.maxDecodedSize)
|
||||||
|
}
|
||||||
return dst, ErrDecoderSizeExceeded
|
return dst, ErrDecoderSizeExceeded
|
||||||
}
|
}
|
||||||
if uint64(cap(dst)) < d.history.decoders.maxSyncLen {
|
if debugDecoder {
|
||||||
|
println("maxSyncLen:", d.history.decoders.maxSyncLen)
|
||||||
|
}
|
||||||
|
if !d.o.limitToCap && uint64(cap(dst)) < d.history.decoders.maxSyncLen {
|
||||||
// Alloc for output
|
// Alloc for output
|
||||||
dst2 := make([]byte, len(dst), d.history.decoders.maxSyncLen+compressedBlockOverAlloc)
|
dst2 := make([]byte, len(dst), d.history.decoders.maxSyncLen+compressedBlockOverAlloc)
|
||||||
copy(dst2, dst)
|
copy(dst2, dst)
|
||||||
@ -371,7 +385,13 @@ func (d *frameDec) runDecoder(dst []byte, dec *blockDec) ([]byte, error) {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
if uint64(len(d.history.b)) > d.o.maxDecodedSize {
|
if uint64(len(d.history.b)-crcStart) > d.o.maxDecodedSize {
|
||||||
|
println("runDecoder: maxDecodedSize exceeded", uint64(len(d.history.b)-crcStart), ">", d.o.maxDecodedSize)
|
||||||
|
err = ErrDecoderSizeExceeded
|
||||||
|
break
|
||||||
|
}
|
||||||
|
if d.o.limitToCap && len(d.history.b) > cap(dst) {
|
||||||
|
println("runDecoder: cap exceeded", uint64(len(d.history.b)), ">", cap(dst))
|
||||||
err = ErrDecoderSizeExceeded
|
err = ErrDecoderSizeExceeded
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
|
5
vendor/github.com/klauspost/compress/zstd/fse_decoder_amd64.go
generated
vendored
5
vendor/github.com/klauspost/compress/zstd/fse_decoder_amd64.go
generated
vendored
@ -21,6 +21,7 @@ type buildDtableAsmContext struct {
|
|||||||
|
|
||||||
// buildDtable_asm is an x86 assembly implementation of fseDecoder.buildDtable.
|
// buildDtable_asm is an x86 assembly implementation of fseDecoder.buildDtable.
|
||||||
// Function returns non-zero exit code on error.
|
// Function returns non-zero exit code on error.
|
||||||
|
//
|
||||||
//go:noescape
|
//go:noescape
|
||||||
func buildDtable_asm(s *fseDecoder, ctx *buildDtableAsmContext) int
|
func buildDtable_asm(s *fseDecoder, ctx *buildDtableAsmContext) int
|
||||||
|
|
||||||
@ -34,8 +35,8 @@ const (
|
|||||||
// buildDtable will build the decoding table.
|
// buildDtable will build the decoding table.
|
||||||
func (s *fseDecoder) buildDtable() error {
|
func (s *fseDecoder) buildDtable() error {
|
||||||
ctx := buildDtableAsmContext{
|
ctx := buildDtableAsmContext{
|
||||||
stateTable: (*uint16)(&s.stateTable[0]),
|
stateTable: &s.stateTable[0],
|
||||||
norm: (*int16)(&s.norm[0]),
|
norm: &s.norm[0],
|
||||||
dt: (*uint64)(&s.dt[0]),
|
dt: (*uint64)(&s.dt[0]),
|
||||||
}
|
}
|
||||||
code := buildDtable_asm(s, &ctx)
|
code := buildDtable_asm(s, &ctx)
|
||||||
|
1
vendor/github.com/klauspost/compress/zstd/fse_decoder_amd64.s
generated
vendored
1
vendor/github.com/klauspost/compress/zstd/fse_decoder_amd64.s
generated
vendored
@ -1,7 +1,6 @@
|
|||||||
// Code generated by command: go run gen_fse.go -out ../fse_decoder_amd64.s -pkg=zstd. DO NOT EDIT.
|
// Code generated by command: go run gen_fse.go -out ../fse_decoder_amd64.s -pkg=zstd. DO NOT EDIT.
|
||||||
|
|
||||||
//go:build !appengine && !noasm && gc && !noasm
|
//go:build !appengine && !noasm && gc && !noasm
|
||||||
// +build !appengine,!noasm,gc,!noasm
|
|
||||||
|
|
||||||
// func buildDtable_asm(s *fseDecoder, ctx *buildDtableAsmContext) int
|
// func buildDtable_asm(s *fseDecoder, ctx *buildDtableAsmContext) int
|
||||||
TEXT ·buildDtable_asm(SB), $0-24
|
TEXT ·buildDtable_asm(SB), $0-24
|
||||||
|
25
vendor/github.com/klauspost/compress/zstd/history.go
generated
vendored
25
vendor/github.com/klauspost/compress/zstd/history.go
generated
vendored
@ -37,26 +37,23 @@ func (h *history) reset() {
|
|||||||
h.ignoreBuffer = 0
|
h.ignoreBuffer = 0
|
||||||
h.error = false
|
h.error = false
|
||||||
h.recentOffsets = [3]int{1, 4, 8}
|
h.recentOffsets = [3]int{1, 4, 8}
|
||||||
if f := h.decoders.litLengths.fse; f != nil && !f.preDefined {
|
h.decoders.freeDecoders()
|
||||||
fseDecoderPool.Put(f)
|
|
||||||
}
|
|
||||||
if f := h.decoders.offsets.fse; f != nil && !f.preDefined {
|
|
||||||
fseDecoderPool.Put(f)
|
|
||||||
}
|
|
||||||
if f := h.decoders.matchLengths.fse; f != nil && !f.preDefined {
|
|
||||||
fseDecoderPool.Put(f)
|
|
||||||
}
|
|
||||||
h.decoders = sequenceDecs{br: h.decoders.br}
|
h.decoders = sequenceDecs{br: h.decoders.br}
|
||||||
if h.huffTree != nil {
|
h.freeHuffDecoder()
|
||||||
if h.dict == nil || h.dict.litEnc != h.huffTree {
|
|
||||||
huffDecoderPool.Put(h.huffTree)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
h.huffTree = nil
|
h.huffTree = nil
|
||||||
h.dict = nil
|
h.dict = nil
|
||||||
//printf("history created: %+v (l: %d, c: %d)", *h, len(h.b), cap(h.b))
|
//printf("history created: %+v (l: %d, c: %d)", *h, len(h.b), cap(h.b))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (h *history) freeHuffDecoder() {
|
||||||
|
if h.huffTree != nil {
|
||||||
|
if h.dict == nil || h.dict.litEnc != h.huffTree {
|
||||||
|
huffDecoderPool.Put(h.huffTree)
|
||||||
|
h.huffTree = nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
func (h *history) setDict(dict *dict) {
|
func (h *history) setDict(dict *dict) {
|
||||||
if dict == nil {
|
if dict == nil {
|
||||||
return
|
return
|
||||||
|
47
vendor/github.com/klauspost/compress/zstd/internal/xxhash/README.md
generated
vendored
47
vendor/github.com/klauspost/compress/zstd/internal/xxhash/README.md
generated
vendored
@ -2,12 +2,7 @@
|
|||||||
|
|
||||||
VENDORED: Go to [github.com/cespare/xxhash](https://github.com/cespare/xxhash) for original package.
|
VENDORED: Go to [github.com/cespare/xxhash](https://github.com/cespare/xxhash) for original package.
|
||||||
|
|
||||||
|
xxhash is a Go implementation of the 64-bit [xxHash] algorithm, XXH64. This is a
|
||||||
[![GoDoc](https://godoc.org/github.com/cespare/xxhash?status.svg)](https://godoc.org/github.com/cespare/xxhash)
|
|
||||||
[![Build Status](https://travis-ci.org/cespare/xxhash.svg?branch=master)](https://travis-ci.org/cespare/xxhash)
|
|
||||||
|
|
||||||
xxhash is a Go implementation of the 64-bit
|
|
||||||
[xxHash](http://cyan4973.github.io/xxHash/) algorithm, XXH64. This is a
|
|
||||||
high-quality hashing algorithm that is much faster than anything in the Go
|
high-quality hashing algorithm that is much faster than anything in the Go
|
||||||
standard library.
|
standard library.
|
||||||
|
|
||||||
@ -28,8 +23,23 @@ func (*Digest) WriteString(string) (int, error)
|
|||||||
func (*Digest) Sum64() uint64
|
func (*Digest) Sum64() uint64
|
||||||
```
|
```
|
||||||
|
|
||||||
This implementation provides a fast pure-Go implementation and an even faster
|
The package is written with optimized pure Go and also contains even faster
|
||||||
assembly implementation for amd64.
|
assembly implementations for amd64 and arm64. If desired, the `purego` build tag
|
||||||
|
opts into using the Go code even on those architectures.
|
||||||
|
|
||||||
|
[xxHash]: http://cyan4973.github.io/xxHash/
|
||||||
|
|
||||||
|
## Compatibility
|
||||||
|
|
||||||
|
This package is in a module and the latest code is in version 2 of the module.
|
||||||
|
You need a version of Go with at least "minimal module compatibility" to use
|
||||||
|
github.com/cespare/xxhash/v2:
|
||||||
|
|
||||||
|
* 1.9.7+ for Go 1.9
|
||||||
|
* 1.10.3+ for Go 1.10
|
||||||
|
* Go 1.11 or later
|
||||||
|
|
||||||
|
I recommend using the latest release of Go.
|
||||||
|
|
||||||
## Benchmarks
|
## Benchmarks
|
||||||
|
|
||||||
@ -37,22 +47,25 @@ Here are some quick benchmarks comparing the pure-Go and assembly
|
|||||||
implementations of Sum64.
|
implementations of Sum64.
|
||||||
|
|
||||||
| input size | purego | asm |
|
| input size | purego | asm |
|
||||||
| --- | --- | --- |
|
| ---------- | --------- | --------- |
|
||||||
| 5 B | 979.66 MB/s | 1291.17 MB/s |
|
| 4 B | 1.3 GB/s | 1.2 GB/s |
|
||||||
| 100 B | 7475.26 MB/s | 7973.40 MB/s |
|
| 16 B | 2.9 GB/s | 3.5 GB/s |
|
||||||
| 4 KB | 17573.46 MB/s | 17602.65 MB/s |
|
| 100 B | 6.9 GB/s | 8.1 GB/s |
|
||||||
| 10 MB | 17131.46 MB/s | 17142.16 MB/s |
|
| 4 KB | 11.7 GB/s | 16.7 GB/s |
|
||||||
|
| 10 MB | 12.0 GB/s | 17.3 GB/s |
|
||||||
|
|
||||||
These numbers were generated on Ubuntu 18.04 with an Intel i7-8700K CPU using
|
These numbers were generated on Ubuntu 20.04 with an Intel Xeon Platinum 8252C
|
||||||
the following commands under Go 1.11.2:
|
CPU using the following commands under Go 1.19.2:
|
||||||
|
|
||||||
```
|
```
|
||||||
$ go test -tags purego -benchtime 10s -bench '/xxhash,direct,bytes'
|
benchstat <(go test -tags purego -benchtime 500ms -count 15 -bench 'Sum64$')
|
||||||
$ go test -benchtime 10s -bench '/xxhash,direct,bytes'
|
benchstat <(go test -benchtime 500ms -count 15 -bench 'Sum64$')
|
||||||
```
|
```
|
||||||
|
|
||||||
## Projects using this package
|
## Projects using this package
|
||||||
|
|
||||||
- [InfluxDB](https://github.com/influxdata/influxdb)
|
- [InfluxDB](https://github.com/influxdata/influxdb)
|
||||||
- [Prometheus](https://github.com/prometheus/prometheus)
|
- [Prometheus](https://github.com/prometheus/prometheus)
|
||||||
|
- [VictoriaMetrics](https://github.com/VictoriaMetrics/VictoriaMetrics)
|
||||||
- [FreeCache](https://github.com/coocood/freecache)
|
- [FreeCache](https://github.com/coocood/freecache)
|
||||||
|
- [FastCache](https://github.com/VictoriaMetrics/fastcache)
|
||||||
|
47
vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash.go
generated
vendored
47
vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash.go
generated
vendored
@ -18,19 +18,11 @@ const (
|
|||||||
prime5 uint64 = 2870177450012600261
|
prime5 uint64 = 2870177450012600261
|
||||||
)
|
)
|
||||||
|
|
||||||
// NOTE(caleb): I'm using both consts and vars of the primes. Using consts where
|
// Store the primes in an array as well.
|
||||||
// possible in the Go code is worth a small (but measurable) performance boost
|
//
|
||||||
// by avoiding some MOVQs. Vars are needed for the asm and also are useful for
|
// The consts are used when possible in Go code to avoid MOVs but we need a
|
||||||
// convenience in the Go code in a few places where we need to intentionally
|
// contiguous array of the assembly code.
|
||||||
// avoid constant arithmetic (e.g., v1 := prime1 + prime2 fails because the
|
var primes = [...]uint64{prime1, prime2, prime3, prime4, prime5}
|
||||||
// result overflows a uint64).
|
|
||||||
var (
|
|
||||||
prime1v = prime1
|
|
||||||
prime2v = prime2
|
|
||||||
prime3v = prime3
|
|
||||||
prime4v = prime4
|
|
||||||
prime5v = prime5
|
|
||||||
)
|
|
||||||
|
|
||||||
// Digest implements hash.Hash64.
|
// Digest implements hash.Hash64.
|
||||||
type Digest struct {
|
type Digest struct {
|
||||||
@ -52,10 +44,10 @@ func New() *Digest {
|
|||||||
|
|
||||||
// Reset clears the Digest's state so that it can be reused.
|
// Reset clears the Digest's state so that it can be reused.
|
||||||
func (d *Digest) Reset() {
|
func (d *Digest) Reset() {
|
||||||
d.v1 = prime1v + prime2
|
d.v1 = primes[0] + prime2
|
||||||
d.v2 = prime2
|
d.v2 = prime2
|
||||||
d.v3 = 0
|
d.v3 = 0
|
||||||
d.v4 = -prime1v
|
d.v4 = -primes[0]
|
||||||
d.total = 0
|
d.total = 0
|
||||||
d.n = 0
|
d.n = 0
|
||||||
}
|
}
|
||||||
@ -71,21 +63,23 @@ func (d *Digest) Write(b []byte) (n int, err error) {
|
|||||||
n = len(b)
|
n = len(b)
|
||||||
d.total += uint64(n)
|
d.total += uint64(n)
|
||||||
|
|
||||||
|
memleft := d.mem[d.n&(len(d.mem)-1):]
|
||||||
|
|
||||||
if d.n+n < 32 {
|
if d.n+n < 32 {
|
||||||
// This new data doesn't even fill the current block.
|
// This new data doesn't even fill the current block.
|
||||||
copy(d.mem[d.n:], b)
|
copy(memleft, b)
|
||||||
d.n += n
|
d.n += n
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
if d.n > 0 {
|
if d.n > 0 {
|
||||||
// Finish off the partial block.
|
// Finish off the partial block.
|
||||||
copy(d.mem[d.n:], b)
|
c := copy(memleft, b)
|
||||||
d.v1 = round(d.v1, u64(d.mem[0:8]))
|
d.v1 = round(d.v1, u64(d.mem[0:8]))
|
||||||
d.v2 = round(d.v2, u64(d.mem[8:16]))
|
d.v2 = round(d.v2, u64(d.mem[8:16]))
|
||||||
d.v3 = round(d.v3, u64(d.mem[16:24]))
|
d.v3 = round(d.v3, u64(d.mem[16:24]))
|
||||||
d.v4 = round(d.v4, u64(d.mem[24:32]))
|
d.v4 = round(d.v4, u64(d.mem[24:32]))
|
||||||
b = b[32-d.n:]
|
b = b[c:]
|
||||||
d.n = 0
|
d.n = 0
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -135,21 +129,20 @@ func (d *Digest) Sum64() uint64 {
|
|||||||
|
|
||||||
h += d.total
|
h += d.total
|
||||||
|
|
||||||
i, end := 0, d.n
|
b := d.mem[:d.n&(len(d.mem)-1)]
|
||||||
for ; i+8 <= end; i += 8 {
|
for ; len(b) >= 8; b = b[8:] {
|
||||||
k1 := round(0, u64(d.mem[i:i+8]))
|
k1 := round(0, u64(b[:8]))
|
||||||
h ^= k1
|
h ^= k1
|
||||||
h = rol27(h)*prime1 + prime4
|
h = rol27(h)*prime1 + prime4
|
||||||
}
|
}
|
||||||
if i+4 <= end {
|
if len(b) >= 4 {
|
||||||
h ^= uint64(u32(d.mem[i:i+4])) * prime1
|
h ^= uint64(u32(b[:4])) * prime1
|
||||||
h = rol23(h)*prime2 + prime3
|
h = rol23(h)*prime2 + prime3
|
||||||
i += 4
|
b = b[4:]
|
||||||
}
|
}
|
||||||
for i < end {
|
for ; len(b) > 0; b = b[1:] {
|
||||||
h ^= uint64(d.mem[i]) * prime5
|
h ^= uint64(b[0]) * prime5
|
||||||
h = rol11(h) * prime1
|
h = rol11(h) * prime1
|
||||||
i++
|
|
||||||
}
|
}
|
||||||
|
|
||||||
h ^= h >> 33
|
h ^= h >> 33
|
||||||
|
308
vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_amd64.s
generated
vendored
308
vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_amd64.s
generated
vendored
@ -1,3 +1,4 @@
|
|||||||
|
//go:build !appengine && gc && !purego && !noasm
|
||||||
// +build !appengine
|
// +build !appengine
|
||||||
// +build gc
|
// +build gc
|
||||||
// +build !purego
|
// +build !purego
|
||||||
@ -5,212 +6,205 @@
|
|||||||
|
|
||||||
#include "textflag.h"
|
#include "textflag.h"
|
||||||
|
|
||||||
// Register allocation:
|
// Registers:
|
||||||
// AX h
|
#define h AX
|
||||||
// SI pointer to advance through b
|
#define d AX
|
||||||
// DX n
|
#define p SI // pointer to advance through b
|
||||||
// BX loop end
|
#define n DX
|
||||||
// R8 v1, k1
|
#define end BX // loop end
|
||||||
// R9 v2
|
#define v1 R8
|
||||||
// R10 v3
|
#define v2 R9
|
||||||
// R11 v4
|
#define v3 R10
|
||||||
// R12 tmp
|
#define v4 R11
|
||||||
// R13 prime1v
|
#define x R12
|
||||||
// R14 prime2v
|
#define prime1 R13
|
||||||
// DI prime4v
|
#define prime2 R14
|
||||||
|
#define prime4 DI
|
||||||
|
|
||||||
// round reads from and advances the buffer pointer in SI.
|
#define round(acc, x) \
|
||||||
// It assumes that R13 has prime1v and R14 has prime2v.
|
IMULQ prime2, x \
|
||||||
#define round(r) \
|
ADDQ x, acc \
|
||||||
MOVQ (SI), R12 \
|
ROLQ $31, acc \
|
||||||
ADDQ $8, SI \
|
IMULQ prime1, acc
|
||||||
IMULQ R14, R12 \
|
|
||||||
ADDQ R12, r \
|
|
||||||
ROLQ $31, r \
|
|
||||||
IMULQ R13, r
|
|
||||||
|
|
||||||
// mergeRound applies a merge round on the two registers acc and val.
|
// round0 performs the operation x = round(0, x).
|
||||||
// It assumes that R13 has prime1v, R14 has prime2v, and DI has prime4v.
|
#define round0(x) \
|
||||||
#define mergeRound(acc, val) \
|
IMULQ prime2, x \
|
||||||
IMULQ R14, val \
|
ROLQ $31, x \
|
||||||
ROLQ $31, val \
|
IMULQ prime1, x
|
||||||
IMULQ R13, val \
|
|
||||||
XORQ val, acc \
|
// mergeRound applies a merge round on the two registers acc and x.
|
||||||
IMULQ R13, acc \
|
// It assumes that prime1, prime2, and prime4 have been loaded.
|
||||||
ADDQ DI, acc
|
#define mergeRound(acc, x) \
|
||||||
|
round0(x) \
|
||||||
|
XORQ x, acc \
|
||||||
|
IMULQ prime1, acc \
|
||||||
|
ADDQ prime4, acc
|
||||||
|
|
||||||
|
// blockLoop processes as many 32-byte blocks as possible,
|
||||||
|
// updating v1, v2, v3, and v4. It assumes that there is at least one block
|
||||||
|
// to process.
|
||||||
|
#define blockLoop() \
|
||||||
|
loop: \
|
||||||
|
MOVQ +0(p), x \
|
||||||
|
round(v1, x) \
|
||||||
|
MOVQ +8(p), x \
|
||||||
|
round(v2, x) \
|
||||||
|
MOVQ +16(p), x \
|
||||||
|
round(v3, x) \
|
||||||
|
MOVQ +24(p), x \
|
||||||
|
round(v4, x) \
|
||||||
|
ADDQ $32, p \
|
||||||
|
CMPQ p, end \
|
||||||
|
JLE loop
|
||||||
|
|
||||||
// func Sum64(b []byte) uint64
|
// func Sum64(b []byte) uint64
|
||||||
TEXT ·Sum64(SB), NOSPLIT, $0-32
|
TEXT ·Sum64(SB), NOSPLIT|NOFRAME, $0-32
|
||||||
// Load fixed primes.
|
// Load fixed primes.
|
||||||
MOVQ ·prime1v(SB), R13
|
MOVQ ·primes+0(SB), prime1
|
||||||
MOVQ ·prime2v(SB), R14
|
MOVQ ·primes+8(SB), prime2
|
||||||
MOVQ ·prime4v(SB), DI
|
MOVQ ·primes+24(SB), prime4
|
||||||
|
|
||||||
// Load slice.
|
// Load slice.
|
||||||
MOVQ b_base+0(FP), SI
|
MOVQ b_base+0(FP), p
|
||||||
MOVQ b_len+8(FP), DX
|
MOVQ b_len+8(FP), n
|
||||||
LEAQ (SI)(DX*1), BX
|
LEAQ (p)(n*1), end
|
||||||
|
|
||||||
// The first loop limit will be len(b)-32.
|
// The first loop limit will be len(b)-32.
|
||||||
SUBQ $32, BX
|
SUBQ $32, end
|
||||||
|
|
||||||
// Check whether we have at least one block.
|
// Check whether we have at least one block.
|
||||||
CMPQ DX, $32
|
CMPQ n, $32
|
||||||
JLT noBlocks
|
JLT noBlocks
|
||||||
|
|
||||||
// Set up initial state (v1, v2, v3, v4).
|
// Set up initial state (v1, v2, v3, v4).
|
||||||
MOVQ R13, R8
|
MOVQ prime1, v1
|
||||||
ADDQ R14, R8
|
ADDQ prime2, v1
|
||||||
MOVQ R14, R9
|
MOVQ prime2, v2
|
||||||
XORQ R10, R10
|
XORQ v3, v3
|
||||||
XORQ R11, R11
|
XORQ v4, v4
|
||||||
SUBQ R13, R11
|
SUBQ prime1, v4
|
||||||
|
|
||||||
// Loop until SI > BX.
|
blockLoop()
|
||||||
blockLoop:
|
|
||||||
round(R8)
|
|
||||||
round(R9)
|
|
||||||
round(R10)
|
|
||||||
round(R11)
|
|
||||||
|
|
||||||
CMPQ SI, BX
|
MOVQ v1, h
|
||||||
JLE blockLoop
|
ROLQ $1, h
|
||||||
|
MOVQ v2, x
|
||||||
|
ROLQ $7, x
|
||||||
|
ADDQ x, h
|
||||||
|
MOVQ v3, x
|
||||||
|
ROLQ $12, x
|
||||||
|
ADDQ x, h
|
||||||
|
MOVQ v4, x
|
||||||
|
ROLQ $18, x
|
||||||
|
ADDQ x, h
|
||||||
|
|
||||||
MOVQ R8, AX
|
mergeRound(h, v1)
|
||||||
ROLQ $1, AX
|
mergeRound(h, v2)
|
||||||
MOVQ R9, R12
|
mergeRound(h, v3)
|
||||||
ROLQ $7, R12
|
mergeRound(h, v4)
|
||||||
ADDQ R12, AX
|
|
||||||
MOVQ R10, R12
|
|
||||||
ROLQ $12, R12
|
|
||||||
ADDQ R12, AX
|
|
||||||
MOVQ R11, R12
|
|
||||||
ROLQ $18, R12
|
|
||||||
ADDQ R12, AX
|
|
||||||
|
|
||||||
mergeRound(AX, R8)
|
|
||||||
mergeRound(AX, R9)
|
|
||||||
mergeRound(AX, R10)
|
|
||||||
mergeRound(AX, R11)
|
|
||||||
|
|
||||||
JMP afterBlocks
|
JMP afterBlocks
|
||||||
|
|
||||||
noBlocks:
|
noBlocks:
|
||||||
MOVQ ·prime5v(SB), AX
|
MOVQ ·primes+32(SB), h
|
||||||
|
|
||||||
afterBlocks:
|
afterBlocks:
|
||||||
ADDQ DX, AX
|
ADDQ n, h
|
||||||
|
|
||||||
// Right now BX has len(b)-32, and we want to loop until SI > len(b)-8.
|
ADDQ $24, end
|
||||||
ADDQ $24, BX
|
CMPQ p, end
|
||||||
|
JG try4
|
||||||
|
|
||||||
CMPQ SI, BX
|
loop8:
|
||||||
JG fourByte
|
MOVQ (p), x
|
||||||
|
ADDQ $8, p
|
||||||
|
round0(x)
|
||||||
|
XORQ x, h
|
||||||
|
ROLQ $27, h
|
||||||
|
IMULQ prime1, h
|
||||||
|
ADDQ prime4, h
|
||||||
|
|
||||||
wordLoop:
|
CMPQ p, end
|
||||||
// Calculate k1.
|
JLE loop8
|
||||||
MOVQ (SI), R8
|
|
||||||
ADDQ $8, SI
|
|
||||||
IMULQ R14, R8
|
|
||||||
ROLQ $31, R8
|
|
||||||
IMULQ R13, R8
|
|
||||||
|
|
||||||
XORQ R8, AX
|
try4:
|
||||||
ROLQ $27, AX
|
ADDQ $4, end
|
||||||
IMULQ R13, AX
|
CMPQ p, end
|
||||||
ADDQ DI, AX
|
JG try1
|
||||||
|
|
||||||
CMPQ SI, BX
|
MOVL (p), x
|
||||||
JLE wordLoop
|
ADDQ $4, p
|
||||||
|
IMULQ prime1, x
|
||||||
|
XORQ x, h
|
||||||
|
|
||||||
fourByte:
|
ROLQ $23, h
|
||||||
ADDQ $4, BX
|
IMULQ prime2, h
|
||||||
CMPQ SI, BX
|
ADDQ ·primes+16(SB), h
|
||||||
JG singles
|
|
||||||
|
|
||||||
MOVL (SI), R8
|
try1:
|
||||||
ADDQ $4, SI
|
ADDQ $4, end
|
||||||
IMULQ R13, R8
|
CMPQ p, end
|
||||||
XORQ R8, AX
|
|
||||||
|
|
||||||
ROLQ $23, AX
|
|
||||||
IMULQ R14, AX
|
|
||||||
ADDQ ·prime3v(SB), AX
|
|
||||||
|
|
||||||
singles:
|
|
||||||
ADDQ $4, BX
|
|
||||||
CMPQ SI, BX
|
|
||||||
JGE finalize
|
JGE finalize
|
||||||
|
|
||||||
singlesLoop:
|
loop1:
|
||||||
MOVBQZX (SI), R12
|
MOVBQZX (p), x
|
||||||
ADDQ $1, SI
|
ADDQ $1, p
|
||||||
IMULQ ·prime5v(SB), R12
|
IMULQ ·primes+32(SB), x
|
||||||
XORQ R12, AX
|
XORQ x, h
|
||||||
|
ROLQ $11, h
|
||||||
|
IMULQ prime1, h
|
||||||
|
|
||||||
ROLQ $11, AX
|
CMPQ p, end
|
||||||
IMULQ R13, AX
|
JL loop1
|
||||||
|
|
||||||
CMPQ SI, BX
|
|
||||||
JL singlesLoop
|
|
||||||
|
|
||||||
finalize:
|
finalize:
|
||||||
MOVQ AX, R12
|
MOVQ h, x
|
||||||
SHRQ $33, R12
|
SHRQ $33, x
|
||||||
XORQ R12, AX
|
XORQ x, h
|
||||||
IMULQ R14, AX
|
IMULQ prime2, h
|
||||||
MOVQ AX, R12
|
MOVQ h, x
|
||||||
SHRQ $29, R12
|
SHRQ $29, x
|
||||||
XORQ R12, AX
|
XORQ x, h
|
||||||
IMULQ ·prime3v(SB), AX
|
IMULQ ·primes+16(SB), h
|
||||||
MOVQ AX, R12
|
MOVQ h, x
|
||||||
SHRQ $32, R12
|
SHRQ $32, x
|
||||||
XORQ R12, AX
|
XORQ x, h
|
||||||
|
|
||||||
MOVQ AX, ret+24(FP)
|
MOVQ h, ret+24(FP)
|
||||||
RET
|
RET
|
||||||
|
|
||||||
// writeBlocks uses the same registers as above except that it uses AX to store
|
|
||||||
// the d pointer.
|
|
||||||
|
|
||||||
// func writeBlocks(d *Digest, b []byte) int
|
// func writeBlocks(d *Digest, b []byte) int
|
||||||
TEXT ·writeBlocks(SB), NOSPLIT, $0-40
|
TEXT ·writeBlocks(SB), NOSPLIT|NOFRAME, $0-40
|
||||||
// Load fixed primes needed for round.
|
// Load fixed primes needed for round.
|
||||||
MOVQ ·prime1v(SB), R13
|
MOVQ ·primes+0(SB), prime1
|
||||||
MOVQ ·prime2v(SB), R14
|
MOVQ ·primes+8(SB), prime2
|
||||||
|
|
||||||
// Load slice.
|
// Load slice.
|
||||||
MOVQ b_base+8(FP), SI
|
MOVQ b_base+8(FP), p
|
||||||
MOVQ b_len+16(FP), DX
|
MOVQ b_len+16(FP), n
|
||||||
LEAQ (SI)(DX*1), BX
|
LEAQ (p)(n*1), end
|
||||||
SUBQ $32, BX
|
SUBQ $32, end
|
||||||
|
|
||||||
// Load vN from d.
|
// Load vN from d.
|
||||||
MOVQ d+0(FP), AX
|
MOVQ s+0(FP), d
|
||||||
MOVQ 0(AX), R8 // v1
|
MOVQ 0(d), v1
|
||||||
MOVQ 8(AX), R9 // v2
|
MOVQ 8(d), v2
|
||||||
MOVQ 16(AX), R10 // v3
|
MOVQ 16(d), v3
|
||||||
MOVQ 24(AX), R11 // v4
|
MOVQ 24(d), v4
|
||||||
|
|
||||||
// We don't need to check the loop condition here; this function is
|
// We don't need to check the loop condition here; this function is
|
||||||
// always called with at least one block of data to process.
|
// always called with at least one block of data to process.
|
||||||
blockLoop:
|
blockLoop()
|
||||||
round(R8)
|
|
||||||
round(R9)
|
|
||||||
round(R10)
|
|
||||||
round(R11)
|
|
||||||
|
|
||||||
CMPQ SI, BX
|
|
||||||
JLE blockLoop
|
|
||||||
|
|
||||||
// Copy vN back to d.
|
// Copy vN back to d.
|
||||||
MOVQ R8, 0(AX)
|
MOVQ v1, 0(d)
|
||||||
MOVQ R9, 8(AX)
|
MOVQ v2, 8(d)
|
||||||
MOVQ R10, 16(AX)
|
MOVQ v3, 16(d)
|
||||||
MOVQ R11, 24(AX)
|
MOVQ v4, 24(d)
|
||||||
|
|
||||||
// The number of bytes written is SI minus the old base pointer.
|
// The number of bytes written is p minus the old base pointer.
|
||||||
SUBQ b_base+8(FP), SI
|
SUBQ b_base+8(FP), p
|
||||||
MOVQ SI, ret+32(FP)
|
MOVQ p, ret+32(FP)
|
||||||
|
|
||||||
RET
|
RET
|
||||||
|
122
vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_arm64.s
generated
vendored
122
vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_arm64.s
generated
vendored
@ -1,13 +1,17 @@
|
|||||||
// +build gc,!purego,!noasm
|
//go:build !appengine && gc && !purego && !noasm
|
||||||
|
// +build !appengine
|
||||||
|
// +build gc
|
||||||
|
// +build !purego
|
||||||
|
// +build !noasm
|
||||||
|
|
||||||
#include "textflag.h"
|
#include "textflag.h"
|
||||||
|
|
||||||
// Register allocation.
|
// Registers:
|
||||||
#define digest R1
|
#define digest R1
|
||||||
#define h R2 // Return value.
|
#define h R2 // return value
|
||||||
#define p R3 // Input pointer.
|
#define p R3 // input pointer
|
||||||
#define len R4
|
#define n R4 // input length
|
||||||
#define nblocks R5 // len / 32.
|
#define nblocks R5 // n / 32
|
||||||
#define prime1 R7
|
#define prime1 R7
|
||||||
#define prime2 R8
|
#define prime2 R8
|
||||||
#define prime3 R9
|
#define prime3 R9
|
||||||
@ -25,60 +29,52 @@
|
|||||||
#define round(acc, x) \
|
#define round(acc, x) \
|
||||||
MADD prime2, acc, x, acc \
|
MADD prime2, acc, x, acc \
|
||||||
ROR $64-31, acc \
|
ROR $64-31, acc \
|
||||||
MUL prime1, acc \
|
MUL prime1, acc
|
||||||
|
|
||||||
// x = round(0, x).
|
// round0 performs the operation x = round(0, x).
|
||||||
#define round0(x) \
|
#define round0(x) \
|
||||||
MUL prime2, x \
|
MUL prime2, x \
|
||||||
ROR $64-31, x \
|
ROR $64-31, x \
|
||||||
MUL prime1, x \
|
MUL prime1, x
|
||||||
|
|
||||||
#define mergeRound(x) \
|
#define mergeRound(acc, x) \
|
||||||
round0(x) \
|
round0(x) \
|
||||||
EOR x, h \
|
EOR x, acc \
|
||||||
MADD h, prime4, prime1, h \
|
MADD acc, prime4, prime1, acc
|
||||||
|
|
||||||
// Update v[1-4] with 32-byte blocks. Assumes len >= 32.
|
// blockLoop processes as many 32-byte blocks as possible,
|
||||||
#define blocksLoop() \
|
// updating v1, v2, v3, and v4. It assumes that n >= 32.
|
||||||
LSR $5, len, nblocks \
|
#define blockLoop() \
|
||||||
|
LSR $5, n, nblocks \
|
||||||
PCALIGN $16 \
|
PCALIGN $16 \
|
||||||
loop: \
|
loop: \
|
||||||
LDP.P 32(p), (x1, x2) \
|
LDP.P 16(p), (x1, x2) \
|
||||||
|
LDP.P 16(p), (x3, x4) \
|
||||||
round(v1, x1) \
|
round(v1, x1) \
|
||||||
LDP -16(p), (x3, x4) \
|
|
||||||
round(v2, x2) \
|
round(v2, x2) \
|
||||||
SUB $1, nblocks \
|
|
||||||
round(v3, x3) \
|
round(v3, x3) \
|
||||||
round(v4, x4) \
|
round(v4, x4) \
|
||||||
CBNZ nblocks, loop \
|
SUB $1, nblocks \
|
||||||
|
CBNZ nblocks, loop
|
||||||
// The primes are repeated here to ensure that they're stored
|
|
||||||
// in a contiguous array, so we can load them with LDP.
|
|
||||||
DATA primes<> +0(SB)/8, $11400714785074694791
|
|
||||||
DATA primes<> +8(SB)/8, $14029467366897019727
|
|
||||||
DATA primes<>+16(SB)/8, $1609587929392839161
|
|
||||||
DATA primes<>+24(SB)/8, $9650029242287828579
|
|
||||||
DATA primes<>+32(SB)/8, $2870177450012600261
|
|
||||||
GLOBL primes<>(SB), NOPTR+RODATA, $40
|
|
||||||
|
|
||||||
// func Sum64(b []byte) uint64
|
// func Sum64(b []byte) uint64
|
||||||
TEXT ·Sum64(SB), NOFRAME+NOSPLIT, $0-32
|
TEXT ·Sum64(SB), NOSPLIT|NOFRAME, $0-32
|
||||||
LDP b_base+0(FP), (p, len)
|
LDP b_base+0(FP), (p, n)
|
||||||
|
|
||||||
LDP primes<> +0(SB), (prime1, prime2)
|
LDP ·primes+0(SB), (prime1, prime2)
|
||||||
LDP primes<>+16(SB), (prime3, prime4)
|
LDP ·primes+16(SB), (prime3, prime4)
|
||||||
MOVD primes<>+32(SB), prime5
|
MOVD ·primes+32(SB), prime5
|
||||||
|
|
||||||
CMP $32, len
|
CMP $32, n
|
||||||
CSEL LO, prime5, ZR, h // if len < 32 { h = prime5 } else { h = 0 }
|
CSEL LT, prime5, ZR, h // if n < 32 { h = prime5 } else { h = 0 }
|
||||||
BLO afterLoop
|
BLT afterLoop
|
||||||
|
|
||||||
ADD prime1, prime2, v1
|
ADD prime1, prime2, v1
|
||||||
MOVD prime2, v2
|
MOVD prime2, v2
|
||||||
MOVD $0, v3
|
MOVD $0, v3
|
||||||
NEG prime1, v4
|
NEG prime1, v4
|
||||||
|
|
||||||
blocksLoop()
|
blockLoop()
|
||||||
|
|
||||||
ROR $64-1, v1, x1
|
ROR $64-1, v1, x1
|
||||||
ROR $64-7, v2, x2
|
ROR $64-7, v2, x2
|
||||||
@ -88,71 +84,75 @@ TEXT ·Sum64(SB), NOFRAME+NOSPLIT, $0-32
|
|||||||
ADD x3, x4
|
ADD x3, x4
|
||||||
ADD x2, x4, h
|
ADD x2, x4, h
|
||||||
|
|
||||||
mergeRound(v1)
|
mergeRound(h, v1)
|
||||||
mergeRound(v2)
|
mergeRound(h, v2)
|
||||||
mergeRound(v3)
|
mergeRound(h, v3)
|
||||||
mergeRound(v4)
|
mergeRound(h, v4)
|
||||||
|
|
||||||
afterLoop:
|
afterLoop:
|
||||||
ADD len, h
|
ADD n, h
|
||||||
|
|
||||||
TBZ $4, len, try8
|
TBZ $4, n, try8
|
||||||
LDP.P 16(p), (x1, x2)
|
LDP.P 16(p), (x1, x2)
|
||||||
|
|
||||||
round0(x1)
|
round0(x1)
|
||||||
|
|
||||||
|
// NOTE: here and below, sequencing the EOR after the ROR (using a
|
||||||
|
// rotated register) is worth a small but measurable speedup for small
|
||||||
|
// inputs.
|
||||||
ROR $64-27, h
|
ROR $64-27, h
|
||||||
EOR x1 @> 64-27, h, h
|
EOR x1 @> 64-27, h, h
|
||||||
MADD h, prime4, prime1, h
|
MADD h, prime4, prime1, h
|
||||||
|
|
||||||
round0(x2)
|
round0(x2)
|
||||||
ROR $64-27, h
|
ROR $64-27, h
|
||||||
EOR x2 @> 64-27, h
|
EOR x2 @> 64-27, h, h
|
||||||
MADD h, prime4, prime1, h
|
MADD h, prime4, prime1, h
|
||||||
|
|
||||||
try8:
|
try8:
|
||||||
TBZ $3, len, try4
|
TBZ $3, n, try4
|
||||||
MOVD.P 8(p), x1
|
MOVD.P 8(p), x1
|
||||||
|
|
||||||
round0(x1)
|
round0(x1)
|
||||||
ROR $64-27, h
|
ROR $64-27, h
|
||||||
EOR x1 @> 64-27, h
|
EOR x1 @> 64-27, h, h
|
||||||
MADD h, prime4, prime1, h
|
MADD h, prime4, prime1, h
|
||||||
|
|
||||||
try4:
|
try4:
|
||||||
TBZ $2, len, try2
|
TBZ $2, n, try2
|
||||||
MOVWU.P 4(p), x2
|
MOVWU.P 4(p), x2
|
||||||
|
|
||||||
MUL prime1, x2
|
MUL prime1, x2
|
||||||
ROR $64-23, h
|
ROR $64-23, h
|
||||||
EOR x2 @> 64-23, h
|
EOR x2 @> 64-23, h, h
|
||||||
MADD h, prime3, prime2, h
|
MADD h, prime3, prime2, h
|
||||||
|
|
||||||
try2:
|
try2:
|
||||||
TBZ $1, len, try1
|
TBZ $1, n, try1
|
||||||
MOVHU.P 2(p), x3
|
MOVHU.P 2(p), x3
|
||||||
AND $255, x3, x1
|
AND $255, x3, x1
|
||||||
LSR $8, x3, x2
|
LSR $8, x3, x2
|
||||||
|
|
||||||
MUL prime5, x1
|
MUL prime5, x1
|
||||||
ROR $64-11, h
|
ROR $64-11, h
|
||||||
EOR x1 @> 64-11, h
|
EOR x1 @> 64-11, h, h
|
||||||
MUL prime1, h
|
MUL prime1, h
|
||||||
|
|
||||||
MUL prime5, x2
|
MUL prime5, x2
|
||||||
ROR $64-11, h
|
ROR $64-11, h
|
||||||
EOR x2 @> 64-11, h
|
EOR x2 @> 64-11, h, h
|
||||||
MUL prime1, h
|
MUL prime1, h
|
||||||
|
|
||||||
try1:
|
try1:
|
||||||
TBZ $0, len, end
|
TBZ $0, n, finalize
|
||||||
MOVBU (p), x4
|
MOVBU (p), x4
|
||||||
|
|
||||||
MUL prime5, x4
|
MUL prime5, x4
|
||||||
ROR $64-11, h
|
ROR $64-11, h
|
||||||
EOR x4 @> 64-11, h
|
EOR x4 @> 64-11, h, h
|
||||||
MUL prime1, h
|
MUL prime1, h
|
||||||
|
|
||||||
end:
|
finalize:
|
||||||
EOR h >> 33, h
|
EOR h >> 33, h
|
||||||
MUL prime2, h
|
MUL prime2, h
|
||||||
EOR h >> 29, h
|
EOR h >> 29, h
|
||||||
@ -163,24 +163,22 @@ end:
|
|||||||
RET
|
RET
|
||||||
|
|
||||||
// func writeBlocks(d *Digest, b []byte) int
|
// func writeBlocks(d *Digest, b []byte) int
|
||||||
//
|
TEXT ·writeBlocks(SB), NOSPLIT|NOFRAME, $0-40
|
||||||
// Assumes len(b) >= 32.
|
LDP ·primes+0(SB), (prime1, prime2)
|
||||||
TEXT ·writeBlocks(SB), NOFRAME+NOSPLIT, $0-40
|
|
||||||
LDP primes<>(SB), (prime1, prime2)
|
|
||||||
|
|
||||||
// Load state. Assume v[1-4] are stored contiguously.
|
// Load state. Assume v[1-4] are stored contiguously.
|
||||||
MOVD d+0(FP), digest
|
MOVD d+0(FP), digest
|
||||||
LDP 0(digest), (v1, v2)
|
LDP 0(digest), (v1, v2)
|
||||||
LDP 16(digest), (v3, v4)
|
LDP 16(digest), (v3, v4)
|
||||||
|
|
||||||
LDP b_base+8(FP), (p, len)
|
LDP b_base+8(FP), (p, n)
|
||||||
|
|
||||||
blocksLoop()
|
blockLoop()
|
||||||
|
|
||||||
// Store updated state.
|
// Store updated state.
|
||||||
STP (v1, v2), 0(digest)
|
STP (v1, v2), 0(digest)
|
||||||
STP (v3, v4), 16(digest)
|
STP (v3, v4), 16(digest)
|
||||||
|
|
||||||
BIC $31, len
|
BIC $31, n
|
||||||
MOVD len, ret+32(FP)
|
MOVD n, ret+32(FP)
|
||||||
RET
|
RET
|
||||||
|
2
vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_asm.go
generated
vendored
2
vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_asm.go
generated
vendored
@ -13,4 +13,4 @@ package xxhash
|
|||||||
func Sum64(b []byte) uint64
|
func Sum64(b []byte) uint64
|
||||||
|
|
||||||
//go:noescape
|
//go:noescape
|
||||||
func writeBlocks(d *Digest, b []byte) int
|
func writeBlocks(s *Digest, b []byte) int
|
||||||
|
19
vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_other.go
generated
vendored
19
vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_other.go
generated
vendored
@ -15,10 +15,10 @@ func Sum64(b []byte) uint64 {
|
|||||||
var h uint64
|
var h uint64
|
||||||
|
|
||||||
if n >= 32 {
|
if n >= 32 {
|
||||||
v1 := prime1v + prime2
|
v1 := primes[0] + prime2
|
||||||
v2 := prime2
|
v2 := prime2
|
||||||
v3 := uint64(0)
|
v3 := uint64(0)
|
||||||
v4 := -prime1v
|
v4 := -primes[0]
|
||||||
for len(b) >= 32 {
|
for len(b) >= 32 {
|
||||||
v1 = round(v1, u64(b[0:8:len(b)]))
|
v1 = round(v1, u64(b[0:8:len(b)]))
|
||||||
v2 = round(v2, u64(b[8:16:len(b)]))
|
v2 = round(v2, u64(b[8:16:len(b)]))
|
||||||
@ -37,19 +37,18 @@ func Sum64(b []byte) uint64 {
|
|||||||
|
|
||||||
h += uint64(n)
|
h += uint64(n)
|
||||||
|
|
||||||
i, end := 0, len(b)
|
for ; len(b) >= 8; b = b[8:] {
|
||||||
for ; i+8 <= end; i += 8 {
|
k1 := round(0, u64(b[:8]))
|
||||||
k1 := round(0, u64(b[i:i+8:len(b)]))
|
|
||||||
h ^= k1
|
h ^= k1
|
||||||
h = rol27(h)*prime1 + prime4
|
h = rol27(h)*prime1 + prime4
|
||||||
}
|
}
|
||||||
if i+4 <= end {
|
if len(b) >= 4 {
|
||||||
h ^= uint64(u32(b[i:i+4:len(b)])) * prime1
|
h ^= uint64(u32(b[:4])) * prime1
|
||||||
h = rol23(h)*prime2 + prime3
|
h = rol23(h)*prime2 + prime3
|
||||||
i += 4
|
b = b[4:]
|
||||||
}
|
}
|
||||||
for ; i < end; i++ {
|
for ; len(b) > 0; b = b[1:] {
|
||||||
h ^= uint64(b[i]) * prime5
|
h ^= uint64(b[0]) * prime5
|
||||||
h = rol11(h) * prime1
|
h = rol11(h) * prime1
|
||||||
}
|
}
|
||||||
|
|
||||||
|
22
vendor/github.com/klauspost/compress/zstd/seqdec.go
generated
vendored
22
vendor/github.com/klauspost/compress/zstd/seqdec.go
generated
vendored
@ -99,6 +99,21 @@ func (s *sequenceDecs) initialize(br *bitReader, hist *history, out []byte) erro
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (s *sequenceDecs) freeDecoders() {
|
||||||
|
if f := s.litLengths.fse; f != nil && !f.preDefined {
|
||||||
|
fseDecoderPool.Put(f)
|
||||||
|
s.litLengths.fse = nil
|
||||||
|
}
|
||||||
|
if f := s.offsets.fse; f != nil && !f.preDefined {
|
||||||
|
fseDecoderPool.Put(f)
|
||||||
|
s.offsets.fse = nil
|
||||||
|
}
|
||||||
|
if f := s.matchLengths.fse; f != nil && !f.preDefined {
|
||||||
|
fseDecoderPool.Put(f)
|
||||||
|
s.matchLengths.fse = nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// execute will execute the decoded sequence with the provided history.
|
// execute will execute the decoded sequence with the provided history.
|
||||||
// The sequence must be evaluated before being sent.
|
// The sequence must be evaluated before being sent.
|
||||||
func (s *sequenceDecs) execute(seqs []seqVals, hist []byte) error {
|
func (s *sequenceDecs) execute(seqs []seqVals, hist []byte) error {
|
||||||
@ -299,7 +314,10 @@ func (s *sequenceDecs) decodeSync(hist []byte) error {
|
|||||||
}
|
}
|
||||||
size := ll + ml + len(out)
|
size := ll + ml + len(out)
|
||||||
if size-startSize > maxBlockSize {
|
if size-startSize > maxBlockSize {
|
||||||
return fmt.Errorf("output (%d) bigger than max block size (%d)", size-startSize, maxBlockSize)
|
if size-startSize == 424242 {
|
||||||
|
panic("here")
|
||||||
|
}
|
||||||
|
return fmt.Errorf("output bigger than max block size (%d)", maxBlockSize)
|
||||||
}
|
}
|
||||||
if size > cap(out) {
|
if size > cap(out) {
|
||||||
// Not enough size, which can happen under high volume block streaming conditions
|
// Not enough size, which can happen under high volume block streaming conditions
|
||||||
@ -411,7 +429,7 @@ func (s *sequenceDecs) decodeSync(hist []byte) error {
|
|||||||
|
|
||||||
// Check if space for literals
|
// Check if space for literals
|
||||||
if size := len(s.literals) + len(s.out) - startSize; size > maxBlockSize {
|
if size := len(s.literals) + len(s.out) - startSize; size > maxBlockSize {
|
||||||
return fmt.Errorf("output (%d) bigger than max block size (%d)", size, maxBlockSize)
|
return fmt.Errorf("output bigger than max block size (%d)", maxBlockSize)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Add final literals
|
// Add final literals
|
||||||
|
23
vendor/github.com/klauspost/compress/zstd/seqdec_amd64.go
generated
vendored
23
vendor/github.com/klauspost/compress/zstd/seqdec_amd64.go
generated
vendored
@ -32,18 +32,22 @@ type decodeSyncAsmContext struct {
|
|||||||
// sequenceDecs_decodeSync_amd64 implements the main loop of sequenceDecs.decodeSync in x86 asm.
|
// sequenceDecs_decodeSync_amd64 implements the main loop of sequenceDecs.decodeSync in x86 asm.
|
||||||
//
|
//
|
||||||
// Please refer to seqdec_generic.go for the reference implementation.
|
// Please refer to seqdec_generic.go for the reference implementation.
|
||||||
|
//
|
||||||
//go:noescape
|
//go:noescape
|
||||||
func sequenceDecs_decodeSync_amd64(s *sequenceDecs, br *bitReader, ctx *decodeSyncAsmContext) int
|
func sequenceDecs_decodeSync_amd64(s *sequenceDecs, br *bitReader, ctx *decodeSyncAsmContext) int
|
||||||
|
|
||||||
// sequenceDecs_decodeSync_bmi2 implements the main loop of sequenceDecs.decodeSync in x86 asm with BMI2 extensions.
|
// sequenceDecs_decodeSync_bmi2 implements the main loop of sequenceDecs.decodeSync in x86 asm with BMI2 extensions.
|
||||||
|
//
|
||||||
//go:noescape
|
//go:noescape
|
||||||
func sequenceDecs_decodeSync_bmi2(s *sequenceDecs, br *bitReader, ctx *decodeSyncAsmContext) int
|
func sequenceDecs_decodeSync_bmi2(s *sequenceDecs, br *bitReader, ctx *decodeSyncAsmContext) int
|
||||||
|
|
||||||
// sequenceDecs_decodeSync_safe_amd64 does the same as above, but does not write more than output buffer.
|
// sequenceDecs_decodeSync_safe_amd64 does the same as above, but does not write more than output buffer.
|
||||||
|
//
|
||||||
//go:noescape
|
//go:noescape
|
||||||
func sequenceDecs_decodeSync_safe_amd64(s *sequenceDecs, br *bitReader, ctx *decodeSyncAsmContext) int
|
func sequenceDecs_decodeSync_safe_amd64(s *sequenceDecs, br *bitReader, ctx *decodeSyncAsmContext) int
|
||||||
|
|
||||||
// sequenceDecs_decodeSync_safe_bmi2 does the same as above, but does not write more than output buffer.
|
// sequenceDecs_decodeSync_safe_bmi2 does the same as above, but does not write more than output buffer.
|
||||||
|
//
|
||||||
//go:noescape
|
//go:noescape
|
||||||
func sequenceDecs_decodeSync_safe_bmi2(s *sequenceDecs, br *bitReader, ctx *decodeSyncAsmContext) int
|
func sequenceDecs_decodeSync_safe_bmi2(s *sequenceDecs, br *bitReader, ctx *decodeSyncAsmContext) int
|
||||||
|
|
||||||
@ -55,6 +59,11 @@ func (s *sequenceDecs) decodeSyncSimple(hist []byte) (bool, error) {
|
|||||||
if s.maxSyncLen == 0 && cap(s.out)-len(s.out) < maxCompressedBlockSize {
|
if s.maxSyncLen == 0 && cap(s.out)-len(s.out) < maxCompressedBlockSize {
|
||||||
return false, nil
|
return false, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// FIXME: Using unsafe memory copies leads to rare, random crashes
|
||||||
|
// with fuzz testing. It is therefore disabled for now.
|
||||||
|
const useSafe = true
|
||||||
|
/*
|
||||||
useSafe := false
|
useSafe := false
|
||||||
if s.maxSyncLen == 0 && cap(s.out)-len(s.out) < maxCompressedBlockSizeAlloc {
|
if s.maxSyncLen == 0 && cap(s.out)-len(s.out) < maxCompressedBlockSizeAlloc {
|
||||||
useSafe = true
|
useSafe = true
|
||||||
@ -65,6 +74,7 @@ func (s *sequenceDecs) decodeSyncSimple(hist []byte) (bool, error) {
|
|||||||
if cap(s.literals) < len(s.literals)+compressedBlockOverAlloc {
|
if cap(s.literals) < len(s.literals)+compressedBlockOverAlloc {
|
||||||
useSafe = true
|
useSafe = true
|
||||||
}
|
}
|
||||||
|
*/
|
||||||
|
|
||||||
br := s.br
|
br := s.br
|
||||||
|
|
||||||
@ -129,7 +139,7 @@ func (s *sequenceDecs) decodeSyncSimple(hist []byte) (bool, error) {
|
|||||||
if debugDecoder {
|
if debugDecoder {
|
||||||
println("msl:", s.maxSyncLen, "cap", cap(s.out), "bef:", startSize, "sz:", size-startSize, "mbs:", maxBlockSize, "outsz:", cap(s.out)-startSize)
|
println("msl:", s.maxSyncLen, "cap", cap(s.out), "bef:", startSize, "sz:", size-startSize, "mbs:", maxBlockSize, "outsz:", cap(s.out)-startSize)
|
||||||
}
|
}
|
||||||
return true, fmt.Errorf("output (%d) bigger than max block size (%d)", size-startSize, maxBlockSize)
|
return true, fmt.Errorf("output bigger than max block size (%d)", maxBlockSize)
|
||||||
|
|
||||||
default:
|
default:
|
||||||
return true, fmt.Errorf("sequenceDecs_decode returned erronous code %d", errCode)
|
return true, fmt.Errorf("sequenceDecs_decode returned erronous code %d", errCode)
|
||||||
@ -137,7 +147,8 @@ func (s *sequenceDecs) decodeSyncSimple(hist []byte) (bool, error) {
|
|||||||
|
|
||||||
s.seqSize += ctx.litRemain
|
s.seqSize += ctx.litRemain
|
||||||
if s.seqSize > maxBlockSize {
|
if s.seqSize > maxBlockSize {
|
||||||
return true, fmt.Errorf("output (%d) bigger than max block size (%d)", s.seqSize, maxBlockSize)
|
return true, fmt.Errorf("output bigger than max block size (%d)", maxBlockSize)
|
||||||
|
|
||||||
}
|
}
|
||||||
err := br.close()
|
err := br.close()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -195,20 +206,24 @@ const errorNotEnoughSpace = 5
|
|||||||
// sequenceDecs_decode implements the main loop of sequenceDecs in x86 asm.
|
// sequenceDecs_decode implements the main loop of sequenceDecs in x86 asm.
|
||||||
//
|
//
|
||||||
// Please refer to seqdec_generic.go for the reference implementation.
|
// Please refer to seqdec_generic.go for the reference implementation.
|
||||||
|
//
|
||||||
//go:noescape
|
//go:noescape
|
||||||
func sequenceDecs_decode_amd64(s *sequenceDecs, br *bitReader, ctx *decodeAsmContext) int
|
func sequenceDecs_decode_amd64(s *sequenceDecs, br *bitReader, ctx *decodeAsmContext) int
|
||||||
|
|
||||||
// sequenceDecs_decode implements the main loop of sequenceDecs in x86 asm.
|
// sequenceDecs_decode implements the main loop of sequenceDecs in x86 asm.
|
||||||
//
|
//
|
||||||
// Please refer to seqdec_generic.go for the reference implementation.
|
// Please refer to seqdec_generic.go for the reference implementation.
|
||||||
|
//
|
||||||
//go:noescape
|
//go:noescape
|
||||||
func sequenceDecs_decode_56_amd64(s *sequenceDecs, br *bitReader, ctx *decodeAsmContext) int
|
func sequenceDecs_decode_56_amd64(s *sequenceDecs, br *bitReader, ctx *decodeAsmContext) int
|
||||||
|
|
||||||
// sequenceDecs_decode implements the main loop of sequenceDecs in x86 asm with BMI2 extensions.
|
// sequenceDecs_decode implements the main loop of sequenceDecs in x86 asm with BMI2 extensions.
|
||||||
|
//
|
||||||
//go:noescape
|
//go:noescape
|
||||||
func sequenceDecs_decode_bmi2(s *sequenceDecs, br *bitReader, ctx *decodeAsmContext) int
|
func sequenceDecs_decode_bmi2(s *sequenceDecs, br *bitReader, ctx *decodeAsmContext) int
|
||||||
|
|
||||||
// sequenceDecs_decode implements the main loop of sequenceDecs in x86 asm with BMI2 extensions.
|
// sequenceDecs_decode implements the main loop of sequenceDecs in x86 asm with BMI2 extensions.
|
||||||
|
//
|
||||||
//go:noescape
|
//go:noescape
|
||||||
func sequenceDecs_decode_56_bmi2(s *sequenceDecs, br *bitReader, ctx *decodeAsmContext) int
|
func sequenceDecs_decode_56_bmi2(s *sequenceDecs, br *bitReader, ctx *decodeAsmContext) int
|
||||||
|
|
||||||
@ -275,7 +290,7 @@ func (s *sequenceDecs) decode(seqs []seqVals) error {
|
|||||||
|
|
||||||
s.seqSize += ctx.litRemain
|
s.seqSize += ctx.litRemain
|
||||||
if s.seqSize > maxBlockSize {
|
if s.seqSize > maxBlockSize {
|
||||||
return fmt.Errorf("output (%d) bigger than max block size (%d)", s.seqSize, maxBlockSize)
|
return fmt.Errorf("output bigger than max block size (%d)", maxBlockSize)
|
||||||
}
|
}
|
||||||
err := br.close()
|
err := br.close()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -302,10 +317,12 @@ type executeAsmContext struct {
|
|||||||
// Returns false if a match offset is too big.
|
// Returns false if a match offset is too big.
|
||||||
//
|
//
|
||||||
// Please refer to seqdec_generic.go for the reference implementation.
|
// Please refer to seqdec_generic.go for the reference implementation.
|
||||||
|
//
|
||||||
//go:noescape
|
//go:noescape
|
||||||
func sequenceDecs_executeSimple_amd64(ctx *executeAsmContext) bool
|
func sequenceDecs_executeSimple_amd64(ctx *executeAsmContext) bool
|
||||||
|
|
||||||
// Same as above, but with safe memcopies
|
// Same as above, but with safe memcopies
|
||||||
|
//
|
||||||
//go:noescape
|
//go:noescape
|
||||||
func sequenceDecs_executeSimple_safe_amd64(ctx *executeAsmContext) bool
|
func sequenceDecs_executeSimple_safe_amd64(ctx *executeAsmContext) bool
|
||||||
|
|
||||||
|
273
vendor/github.com/klauspost/compress/zstd/seqdec_amd64.s
generated
vendored
273
vendor/github.com/klauspost/compress/zstd/seqdec_amd64.s
generated
vendored
@ -1,7 +1,6 @@
|
|||||||
// Code generated by command: go run gen.go -out ../seqdec_amd64.s -pkg=zstd. DO NOT EDIT.
|
// Code generated by command: go run gen.go -out ../seqdec_amd64.s -pkg=zstd. DO NOT EDIT.
|
||||||
|
|
||||||
//go:build !appengine && !noasm && gc && !noasm
|
//go:build !appengine && !noasm && gc && !noasm
|
||||||
// +build !appengine,!noasm,gc,!noasm
|
|
||||||
|
|
||||||
// func sequenceDecs_decode_amd64(s *sequenceDecs, br *bitReader, ctx *decodeAsmContext) int
|
// func sequenceDecs_decode_amd64(s *sequenceDecs, br *bitReader, ctx *decodeAsmContext) int
|
||||||
// Requires: CMOV
|
// Requires: CMOV
|
||||||
@ -57,13 +56,19 @@ sequenceDecs_decode_amd64_fill_end:
|
|||||||
MOVQ DX, R15
|
MOVQ DX, R15
|
||||||
SHLQ CL, R15
|
SHLQ CL, R15
|
||||||
MOVB AH, CL
|
MOVB AH, CL
|
||||||
ADDQ CX, BX
|
|
||||||
NEGL CX
|
|
||||||
SHRQ CL, R15
|
|
||||||
SHRQ $0x20, AX
|
SHRQ $0x20, AX
|
||||||
TESTQ CX, CX
|
TESTQ CX, CX
|
||||||
CMOVQEQ CX, R15
|
JZ sequenceDecs_decode_amd64_of_update_zero
|
||||||
|
ADDQ CX, BX
|
||||||
|
CMPQ BX, $0x40
|
||||||
|
JA sequenceDecs_decode_amd64_of_update_zero
|
||||||
|
CMPQ CX, $0x40
|
||||||
|
JAE sequenceDecs_decode_amd64_of_update_zero
|
||||||
|
NEGQ CX
|
||||||
|
SHRQ CL, R15
|
||||||
ADDQ R15, AX
|
ADDQ R15, AX
|
||||||
|
|
||||||
|
sequenceDecs_decode_amd64_of_update_zero:
|
||||||
MOVQ AX, 16(R10)
|
MOVQ AX, 16(R10)
|
||||||
|
|
||||||
// Update match length
|
// Update match length
|
||||||
@ -72,13 +77,19 @@ sequenceDecs_decode_amd64_fill_end:
|
|||||||
MOVQ DX, R15
|
MOVQ DX, R15
|
||||||
SHLQ CL, R15
|
SHLQ CL, R15
|
||||||
MOVB AH, CL
|
MOVB AH, CL
|
||||||
ADDQ CX, BX
|
|
||||||
NEGL CX
|
|
||||||
SHRQ CL, R15
|
|
||||||
SHRQ $0x20, AX
|
SHRQ $0x20, AX
|
||||||
TESTQ CX, CX
|
TESTQ CX, CX
|
||||||
CMOVQEQ CX, R15
|
JZ sequenceDecs_decode_amd64_ml_update_zero
|
||||||
|
ADDQ CX, BX
|
||||||
|
CMPQ BX, $0x40
|
||||||
|
JA sequenceDecs_decode_amd64_ml_update_zero
|
||||||
|
CMPQ CX, $0x40
|
||||||
|
JAE sequenceDecs_decode_amd64_ml_update_zero
|
||||||
|
NEGQ CX
|
||||||
|
SHRQ CL, R15
|
||||||
ADDQ R15, AX
|
ADDQ R15, AX
|
||||||
|
|
||||||
|
sequenceDecs_decode_amd64_ml_update_zero:
|
||||||
MOVQ AX, 8(R10)
|
MOVQ AX, 8(R10)
|
||||||
|
|
||||||
// Fill bitreader to have enough for the remaining
|
// Fill bitreader to have enough for the remaining
|
||||||
@ -112,13 +123,19 @@ sequenceDecs_decode_amd64_fill_2_end:
|
|||||||
MOVQ DX, R15
|
MOVQ DX, R15
|
||||||
SHLQ CL, R15
|
SHLQ CL, R15
|
||||||
MOVB AH, CL
|
MOVB AH, CL
|
||||||
ADDQ CX, BX
|
|
||||||
NEGL CX
|
|
||||||
SHRQ CL, R15
|
|
||||||
SHRQ $0x20, AX
|
SHRQ $0x20, AX
|
||||||
TESTQ CX, CX
|
TESTQ CX, CX
|
||||||
CMOVQEQ CX, R15
|
JZ sequenceDecs_decode_amd64_ll_update_zero
|
||||||
|
ADDQ CX, BX
|
||||||
|
CMPQ BX, $0x40
|
||||||
|
JA sequenceDecs_decode_amd64_ll_update_zero
|
||||||
|
CMPQ CX, $0x40
|
||||||
|
JAE sequenceDecs_decode_amd64_ll_update_zero
|
||||||
|
NEGQ CX
|
||||||
|
SHRQ CL, R15
|
||||||
ADDQ R15, AX
|
ADDQ R15, AX
|
||||||
|
|
||||||
|
sequenceDecs_decode_amd64_ll_update_zero:
|
||||||
MOVQ AX, (R10)
|
MOVQ AX, (R10)
|
||||||
|
|
||||||
// Fill bitreader for state updates
|
// Fill bitreader for state updates
|
||||||
@ -198,7 +215,7 @@ sequenceDecs_decode_amd64_skip_update:
|
|||||||
MOVQ R12, R13
|
MOVQ R12, R13
|
||||||
MOVQ R11, R12
|
MOVQ R11, R12
|
||||||
MOVQ CX, R11
|
MOVQ CX, R11
|
||||||
JMP sequenceDecs_decode_amd64_adjust_end
|
JMP sequenceDecs_decode_amd64_after_adjust
|
||||||
|
|
||||||
sequenceDecs_decode_amd64_adjust_offsetB_1_or_0:
|
sequenceDecs_decode_amd64_adjust_offsetB_1_or_0:
|
||||||
CMPQ (R10), $0x00000000
|
CMPQ (R10), $0x00000000
|
||||||
@ -210,7 +227,7 @@ sequenceDecs_decode_amd64_adjust_offset_maybezero:
|
|||||||
TESTQ CX, CX
|
TESTQ CX, CX
|
||||||
JNZ sequenceDecs_decode_amd64_adjust_offset_nonzero
|
JNZ sequenceDecs_decode_amd64_adjust_offset_nonzero
|
||||||
MOVQ R11, CX
|
MOVQ R11, CX
|
||||||
JMP sequenceDecs_decode_amd64_adjust_end
|
JMP sequenceDecs_decode_amd64_after_adjust
|
||||||
|
|
||||||
sequenceDecs_decode_amd64_adjust_offset_nonzero:
|
sequenceDecs_decode_amd64_adjust_offset_nonzero:
|
||||||
CMPQ CX, $0x01
|
CMPQ CX, $0x01
|
||||||
@ -247,7 +264,7 @@ sequenceDecs_decode_amd64_adjust_temp_valid:
|
|||||||
MOVQ AX, R11
|
MOVQ AX, R11
|
||||||
MOVQ AX, CX
|
MOVQ AX, CX
|
||||||
|
|
||||||
sequenceDecs_decode_amd64_adjust_end:
|
sequenceDecs_decode_amd64_after_adjust:
|
||||||
MOVQ CX, 16(R10)
|
MOVQ CX, 16(R10)
|
||||||
|
|
||||||
// Check values
|
// Check values
|
||||||
@ -303,10 +320,6 @@ error_not_enough_literals:
|
|||||||
MOVQ $0x00000004, ret+24(FP)
|
MOVQ $0x00000004, ret+24(FP)
|
||||||
RET
|
RET
|
||||||
|
|
||||||
// Return with not enough output space error
|
|
||||||
MOVQ $0x00000005, ret+24(FP)
|
|
||||||
RET
|
|
||||||
|
|
||||||
// func sequenceDecs_decode_56_amd64(s *sequenceDecs, br *bitReader, ctx *decodeAsmContext) int
|
// func sequenceDecs_decode_56_amd64(s *sequenceDecs, br *bitReader, ctx *decodeAsmContext) int
|
||||||
// Requires: CMOV
|
// Requires: CMOV
|
||||||
TEXT ·sequenceDecs_decode_56_amd64(SB), $8-32
|
TEXT ·sequenceDecs_decode_56_amd64(SB), $8-32
|
||||||
@ -361,13 +374,19 @@ sequenceDecs_decode_56_amd64_fill_end:
|
|||||||
MOVQ DX, R15
|
MOVQ DX, R15
|
||||||
SHLQ CL, R15
|
SHLQ CL, R15
|
||||||
MOVB AH, CL
|
MOVB AH, CL
|
||||||
ADDQ CX, BX
|
|
||||||
NEGL CX
|
|
||||||
SHRQ CL, R15
|
|
||||||
SHRQ $0x20, AX
|
SHRQ $0x20, AX
|
||||||
TESTQ CX, CX
|
TESTQ CX, CX
|
||||||
CMOVQEQ CX, R15
|
JZ sequenceDecs_decode_56_amd64_of_update_zero
|
||||||
|
ADDQ CX, BX
|
||||||
|
CMPQ BX, $0x40
|
||||||
|
JA sequenceDecs_decode_56_amd64_of_update_zero
|
||||||
|
CMPQ CX, $0x40
|
||||||
|
JAE sequenceDecs_decode_56_amd64_of_update_zero
|
||||||
|
NEGQ CX
|
||||||
|
SHRQ CL, R15
|
||||||
ADDQ R15, AX
|
ADDQ R15, AX
|
||||||
|
|
||||||
|
sequenceDecs_decode_56_amd64_of_update_zero:
|
||||||
MOVQ AX, 16(R10)
|
MOVQ AX, 16(R10)
|
||||||
|
|
||||||
// Update match length
|
// Update match length
|
||||||
@ -376,13 +395,19 @@ sequenceDecs_decode_56_amd64_fill_end:
|
|||||||
MOVQ DX, R15
|
MOVQ DX, R15
|
||||||
SHLQ CL, R15
|
SHLQ CL, R15
|
||||||
MOVB AH, CL
|
MOVB AH, CL
|
||||||
ADDQ CX, BX
|
|
||||||
NEGL CX
|
|
||||||
SHRQ CL, R15
|
|
||||||
SHRQ $0x20, AX
|
SHRQ $0x20, AX
|
||||||
TESTQ CX, CX
|
TESTQ CX, CX
|
||||||
CMOVQEQ CX, R15
|
JZ sequenceDecs_decode_56_amd64_ml_update_zero
|
||||||
|
ADDQ CX, BX
|
||||||
|
CMPQ BX, $0x40
|
||||||
|
JA sequenceDecs_decode_56_amd64_ml_update_zero
|
||||||
|
CMPQ CX, $0x40
|
||||||
|
JAE sequenceDecs_decode_56_amd64_ml_update_zero
|
||||||
|
NEGQ CX
|
||||||
|
SHRQ CL, R15
|
||||||
ADDQ R15, AX
|
ADDQ R15, AX
|
||||||
|
|
||||||
|
sequenceDecs_decode_56_amd64_ml_update_zero:
|
||||||
MOVQ AX, 8(R10)
|
MOVQ AX, 8(R10)
|
||||||
|
|
||||||
// Update literal length
|
// Update literal length
|
||||||
@ -391,13 +416,19 @@ sequenceDecs_decode_56_amd64_fill_end:
|
|||||||
MOVQ DX, R15
|
MOVQ DX, R15
|
||||||
SHLQ CL, R15
|
SHLQ CL, R15
|
||||||
MOVB AH, CL
|
MOVB AH, CL
|
||||||
ADDQ CX, BX
|
|
||||||
NEGL CX
|
|
||||||
SHRQ CL, R15
|
|
||||||
SHRQ $0x20, AX
|
SHRQ $0x20, AX
|
||||||
TESTQ CX, CX
|
TESTQ CX, CX
|
||||||
CMOVQEQ CX, R15
|
JZ sequenceDecs_decode_56_amd64_ll_update_zero
|
||||||
|
ADDQ CX, BX
|
||||||
|
CMPQ BX, $0x40
|
||||||
|
JA sequenceDecs_decode_56_amd64_ll_update_zero
|
||||||
|
CMPQ CX, $0x40
|
||||||
|
JAE sequenceDecs_decode_56_amd64_ll_update_zero
|
||||||
|
NEGQ CX
|
||||||
|
SHRQ CL, R15
|
||||||
ADDQ R15, AX
|
ADDQ R15, AX
|
||||||
|
|
||||||
|
sequenceDecs_decode_56_amd64_ll_update_zero:
|
||||||
MOVQ AX, (R10)
|
MOVQ AX, (R10)
|
||||||
|
|
||||||
// Fill bitreader for state updates
|
// Fill bitreader for state updates
|
||||||
@ -477,7 +508,7 @@ sequenceDecs_decode_56_amd64_skip_update:
|
|||||||
MOVQ R12, R13
|
MOVQ R12, R13
|
||||||
MOVQ R11, R12
|
MOVQ R11, R12
|
||||||
MOVQ CX, R11
|
MOVQ CX, R11
|
||||||
JMP sequenceDecs_decode_56_amd64_adjust_end
|
JMP sequenceDecs_decode_56_amd64_after_adjust
|
||||||
|
|
||||||
sequenceDecs_decode_56_amd64_adjust_offsetB_1_or_0:
|
sequenceDecs_decode_56_amd64_adjust_offsetB_1_or_0:
|
||||||
CMPQ (R10), $0x00000000
|
CMPQ (R10), $0x00000000
|
||||||
@ -489,7 +520,7 @@ sequenceDecs_decode_56_amd64_adjust_offset_maybezero:
|
|||||||
TESTQ CX, CX
|
TESTQ CX, CX
|
||||||
JNZ sequenceDecs_decode_56_amd64_adjust_offset_nonzero
|
JNZ sequenceDecs_decode_56_amd64_adjust_offset_nonzero
|
||||||
MOVQ R11, CX
|
MOVQ R11, CX
|
||||||
JMP sequenceDecs_decode_56_amd64_adjust_end
|
JMP sequenceDecs_decode_56_amd64_after_adjust
|
||||||
|
|
||||||
sequenceDecs_decode_56_amd64_adjust_offset_nonzero:
|
sequenceDecs_decode_56_amd64_adjust_offset_nonzero:
|
||||||
CMPQ CX, $0x01
|
CMPQ CX, $0x01
|
||||||
@ -526,7 +557,7 @@ sequenceDecs_decode_56_amd64_adjust_temp_valid:
|
|||||||
MOVQ AX, R11
|
MOVQ AX, R11
|
||||||
MOVQ AX, CX
|
MOVQ AX, CX
|
||||||
|
|
||||||
sequenceDecs_decode_56_amd64_adjust_end:
|
sequenceDecs_decode_56_amd64_after_adjust:
|
||||||
MOVQ CX, 16(R10)
|
MOVQ CX, 16(R10)
|
||||||
|
|
||||||
// Check values
|
// Check values
|
||||||
@ -582,10 +613,6 @@ error_not_enough_literals:
|
|||||||
MOVQ $0x00000004, ret+24(FP)
|
MOVQ $0x00000004, ret+24(FP)
|
||||||
RET
|
RET
|
||||||
|
|
||||||
// Return with not enough output space error
|
|
||||||
MOVQ $0x00000005, ret+24(FP)
|
|
||||||
RET
|
|
||||||
|
|
||||||
// func sequenceDecs_decode_bmi2(s *sequenceDecs, br *bitReader, ctx *decodeAsmContext) int
|
// func sequenceDecs_decode_bmi2(s *sequenceDecs, br *bitReader, ctx *decodeAsmContext) int
|
||||||
// Requires: BMI, BMI2, CMOV
|
// Requires: BMI, BMI2, CMOV
|
||||||
TEXT ·sequenceDecs_decode_bmi2(SB), $8-32
|
TEXT ·sequenceDecs_decode_bmi2(SB), $8-32
|
||||||
@ -757,7 +784,7 @@ sequenceDecs_decode_bmi2_skip_update:
|
|||||||
MOVQ R11, R12
|
MOVQ R11, R12
|
||||||
MOVQ R10, R11
|
MOVQ R10, R11
|
||||||
MOVQ CX, R10
|
MOVQ CX, R10
|
||||||
JMP sequenceDecs_decode_bmi2_adjust_end
|
JMP sequenceDecs_decode_bmi2_after_adjust
|
||||||
|
|
||||||
sequenceDecs_decode_bmi2_adjust_offsetB_1_or_0:
|
sequenceDecs_decode_bmi2_adjust_offsetB_1_or_0:
|
||||||
CMPQ (R9), $0x00000000
|
CMPQ (R9), $0x00000000
|
||||||
@ -769,7 +796,7 @@ sequenceDecs_decode_bmi2_adjust_offset_maybezero:
|
|||||||
TESTQ CX, CX
|
TESTQ CX, CX
|
||||||
JNZ sequenceDecs_decode_bmi2_adjust_offset_nonzero
|
JNZ sequenceDecs_decode_bmi2_adjust_offset_nonzero
|
||||||
MOVQ R10, CX
|
MOVQ R10, CX
|
||||||
JMP sequenceDecs_decode_bmi2_adjust_end
|
JMP sequenceDecs_decode_bmi2_after_adjust
|
||||||
|
|
||||||
sequenceDecs_decode_bmi2_adjust_offset_nonzero:
|
sequenceDecs_decode_bmi2_adjust_offset_nonzero:
|
||||||
CMPQ CX, $0x01
|
CMPQ CX, $0x01
|
||||||
@ -806,7 +833,7 @@ sequenceDecs_decode_bmi2_adjust_temp_valid:
|
|||||||
MOVQ R13, R10
|
MOVQ R13, R10
|
||||||
MOVQ R13, CX
|
MOVQ R13, CX
|
||||||
|
|
||||||
sequenceDecs_decode_bmi2_adjust_end:
|
sequenceDecs_decode_bmi2_after_adjust:
|
||||||
MOVQ CX, 16(R9)
|
MOVQ CX, 16(R9)
|
||||||
|
|
||||||
// Check values
|
// Check values
|
||||||
@ -862,10 +889,6 @@ error_not_enough_literals:
|
|||||||
MOVQ $0x00000004, ret+24(FP)
|
MOVQ $0x00000004, ret+24(FP)
|
||||||
RET
|
RET
|
||||||
|
|
||||||
// Return with not enough output space error
|
|
||||||
MOVQ $0x00000005, ret+24(FP)
|
|
||||||
RET
|
|
||||||
|
|
||||||
// func sequenceDecs_decode_56_bmi2(s *sequenceDecs, br *bitReader, ctx *decodeAsmContext) int
|
// func sequenceDecs_decode_56_bmi2(s *sequenceDecs, br *bitReader, ctx *decodeAsmContext) int
|
||||||
// Requires: BMI, BMI2, CMOV
|
// Requires: BMI, BMI2, CMOV
|
||||||
TEXT ·sequenceDecs_decode_56_bmi2(SB), $8-32
|
TEXT ·sequenceDecs_decode_56_bmi2(SB), $8-32
|
||||||
@ -1012,7 +1035,7 @@ sequenceDecs_decode_56_bmi2_skip_update:
|
|||||||
MOVQ R11, R12
|
MOVQ R11, R12
|
||||||
MOVQ R10, R11
|
MOVQ R10, R11
|
||||||
MOVQ CX, R10
|
MOVQ CX, R10
|
||||||
JMP sequenceDecs_decode_56_bmi2_adjust_end
|
JMP sequenceDecs_decode_56_bmi2_after_adjust
|
||||||
|
|
||||||
sequenceDecs_decode_56_bmi2_adjust_offsetB_1_or_0:
|
sequenceDecs_decode_56_bmi2_adjust_offsetB_1_or_0:
|
||||||
CMPQ (R9), $0x00000000
|
CMPQ (R9), $0x00000000
|
||||||
@ -1024,7 +1047,7 @@ sequenceDecs_decode_56_bmi2_adjust_offset_maybezero:
|
|||||||
TESTQ CX, CX
|
TESTQ CX, CX
|
||||||
JNZ sequenceDecs_decode_56_bmi2_adjust_offset_nonzero
|
JNZ sequenceDecs_decode_56_bmi2_adjust_offset_nonzero
|
||||||
MOVQ R10, CX
|
MOVQ R10, CX
|
||||||
JMP sequenceDecs_decode_56_bmi2_adjust_end
|
JMP sequenceDecs_decode_56_bmi2_after_adjust
|
||||||
|
|
||||||
sequenceDecs_decode_56_bmi2_adjust_offset_nonzero:
|
sequenceDecs_decode_56_bmi2_adjust_offset_nonzero:
|
||||||
CMPQ CX, $0x01
|
CMPQ CX, $0x01
|
||||||
@ -1061,7 +1084,7 @@ sequenceDecs_decode_56_bmi2_adjust_temp_valid:
|
|||||||
MOVQ R13, R10
|
MOVQ R13, R10
|
||||||
MOVQ R13, CX
|
MOVQ R13, CX
|
||||||
|
|
||||||
sequenceDecs_decode_56_bmi2_adjust_end:
|
sequenceDecs_decode_56_bmi2_after_adjust:
|
||||||
MOVQ CX, 16(R9)
|
MOVQ CX, 16(R9)
|
||||||
|
|
||||||
// Check values
|
// Check values
|
||||||
@ -1117,10 +1140,6 @@ error_not_enough_literals:
|
|||||||
MOVQ $0x00000004, ret+24(FP)
|
MOVQ $0x00000004, ret+24(FP)
|
||||||
RET
|
RET
|
||||||
|
|
||||||
// Return with not enough output space error
|
|
||||||
MOVQ $0x00000005, ret+24(FP)
|
|
||||||
RET
|
|
||||||
|
|
||||||
// func sequenceDecs_executeSimple_amd64(ctx *executeAsmContext) bool
|
// func sequenceDecs_executeSimple_amd64(ctx *executeAsmContext) bool
|
||||||
// Requires: SSE
|
// Requires: SSE
|
||||||
TEXT ·sequenceDecs_executeSimple_amd64(SB), $8-9
|
TEXT ·sequenceDecs_executeSimple_amd64(SB), $8-9
|
||||||
@ -1354,8 +1373,7 @@ loop_finished:
|
|||||||
MOVQ ctx+0(FP), AX
|
MOVQ ctx+0(FP), AX
|
||||||
MOVQ DX, 24(AX)
|
MOVQ DX, 24(AX)
|
||||||
MOVQ DI, 104(AX)
|
MOVQ DI, 104(AX)
|
||||||
MOVQ 80(AX), CX
|
SUBQ 80(AX), SI
|
||||||
SUBQ CX, SI
|
|
||||||
MOVQ SI, 112(AX)
|
MOVQ SI, 112(AX)
|
||||||
RET
|
RET
|
||||||
|
|
||||||
@ -1367,8 +1385,7 @@ error_match_off_too_big:
|
|||||||
MOVQ ctx+0(FP), AX
|
MOVQ ctx+0(FP), AX
|
||||||
MOVQ DX, 24(AX)
|
MOVQ DX, 24(AX)
|
||||||
MOVQ DI, 104(AX)
|
MOVQ DI, 104(AX)
|
||||||
MOVQ 80(AX), CX
|
SUBQ 80(AX), SI
|
||||||
SUBQ CX, SI
|
|
||||||
MOVQ SI, 112(AX)
|
MOVQ SI, 112(AX)
|
||||||
RET
|
RET
|
||||||
|
|
||||||
@ -1712,8 +1729,7 @@ loop_finished:
|
|||||||
MOVQ ctx+0(FP), AX
|
MOVQ ctx+0(FP), AX
|
||||||
MOVQ DX, 24(AX)
|
MOVQ DX, 24(AX)
|
||||||
MOVQ DI, 104(AX)
|
MOVQ DI, 104(AX)
|
||||||
MOVQ 80(AX), CX
|
SUBQ 80(AX), SI
|
||||||
SUBQ CX, SI
|
|
||||||
MOVQ SI, 112(AX)
|
MOVQ SI, 112(AX)
|
||||||
RET
|
RET
|
||||||
|
|
||||||
@ -1725,8 +1741,7 @@ error_match_off_too_big:
|
|||||||
MOVQ ctx+0(FP), AX
|
MOVQ ctx+0(FP), AX
|
||||||
MOVQ DX, 24(AX)
|
MOVQ DX, 24(AX)
|
||||||
MOVQ DI, 104(AX)
|
MOVQ DI, 104(AX)
|
||||||
MOVQ 80(AX), CX
|
SUBQ 80(AX), SI
|
||||||
SUBQ CX, SI
|
|
||||||
MOVQ SI, 112(AX)
|
MOVQ SI, 112(AX)
|
||||||
RET
|
RET
|
||||||
|
|
||||||
@ -1749,6 +1764,10 @@ TEXT ·sequenceDecs_decodeSync_amd64(SB), $64-32
|
|||||||
MOVQ 72(AX), DI
|
MOVQ 72(AX), DI
|
||||||
MOVQ 80(AX), R8
|
MOVQ 80(AX), R8
|
||||||
MOVQ 88(AX), R9
|
MOVQ 88(AX), R9
|
||||||
|
XORQ CX, CX
|
||||||
|
MOVQ CX, 8(SP)
|
||||||
|
MOVQ CX, 16(SP)
|
||||||
|
MOVQ CX, 24(SP)
|
||||||
MOVQ 112(AX), R10
|
MOVQ 112(AX), R10
|
||||||
MOVQ 128(AX), CX
|
MOVQ 128(AX), CX
|
||||||
MOVQ CX, 32(SP)
|
MOVQ CX, 32(SP)
|
||||||
@ -1803,13 +1822,19 @@ sequenceDecs_decodeSync_amd64_fill_end:
|
|||||||
MOVQ DX, R14
|
MOVQ DX, R14
|
||||||
SHLQ CL, R14
|
SHLQ CL, R14
|
||||||
MOVB AH, CL
|
MOVB AH, CL
|
||||||
ADDQ CX, BX
|
|
||||||
NEGL CX
|
|
||||||
SHRQ CL, R14
|
|
||||||
SHRQ $0x20, AX
|
SHRQ $0x20, AX
|
||||||
TESTQ CX, CX
|
TESTQ CX, CX
|
||||||
CMOVQEQ CX, R14
|
JZ sequenceDecs_decodeSync_amd64_of_update_zero
|
||||||
|
ADDQ CX, BX
|
||||||
|
CMPQ BX, $0x40
|
||||||
|
JA sequenceDecs_decodeSync_amd64_of_update_zero
|
||||||
|
CMPQ CX, $0x40
|
||||||
|
JAE sequenceDecs_decodeSync_amd64_of_update_zero
|
||||||
|
NEGQ CX
|
||||||
|
SHRQ CL, R14
|
||||||
ADDQ R14, AX
|
ADDQ R14, AX
|
||||||
|
|
||||||
|
sequenceDecs_decodeSync_amd64_of_update_zero:
|
||||||
MOVQ AX, 8(SP)
|
MOVQ AX, 8(SP)
|
||||||
|
|
||||||
// Update match length
|
// Update match length
|
||||||
@ -1818,13 +1843,19 @@ sequenceDecs_decodeSync_amd64_fill_end:
|
|||||||
MOVQ DX, R14
|
MOVQ DX, R14
|
||||||
SHLQ CL, R14
|
SHLQ CL, R14
|
||||||
MOVB AH, CL
|
MOVB AH, CL
|
||||||
ADDQ CX, BX
|
|
||||||
NEGL CX
|
|
||||||
SHRQ CL, R14
|
|
||||||
SHRQ $0x20, AX
|
SHRQ $0x20, AX
|
||||||
TESTQ CX, CX
|
TESTQ CX, CX
|
||||||
CMOVQEQ CX, R14
|
JZ sequenceDecs_decodeSync_amd64_ml_update_zero
|
||||||
|
ADDQ CX, BX
|
||||||
|
CMPQ BX, $0x40
|
||||||
|
JA sequenceDecs_decodeSync_amd64_ml_update_zero
|
||||||
|
CMPQ CX, $0x40
|
||||||
|
JAE sequenceDecs_decodeSync_amd64_ml_update_zero
|
||||||
|
NEGQ CX
|
||||||
|
SHRQ CL, R14
|
||||||
ADDQ R14, AX
|
ADDQ R14, AX
|
||||||
|
|
||||||
|
sequenceDecs_decodeSync_amd64_ml_update_zero:
|
||||||
MOVQ AX, 16(SP)
|
MOVQ AX, 16(SP)
|
||||||
|
|
||||||
// Fill bitreader to have enough for the remaining
|
// Fill bitreader to have enough for the remaining
|
||||||
@ -1858,13 +1889,19 @@ sequenceDecs_decodeSync_amd64_fill_2_end:
|
|||||||
MOVQ DX, R14
|
MOVQ DX, R14
|
||||||
SHLQ CL, R14
|
SHLQ CL, R14
|
||||||
MOVB AH, CL
|
MOVB AH, CL
|
||||||
ADDQ CX, BX
|
|
||||||
NEGL CX
|
|
||||||
SHRQ CL, R14
|
|
||||||
SHRQ $0x20, AX
|
SHRQ $0x20, AX
|
||||||
TESTQ CX, CX
|
TESTQ CX, CX
|
||||||
CMOVQEQ CX, R14
|
JZ sequenceDecs_decodeSync_amd64_ll_update_zero
|
||||||
|
ADDQ CX, BX
|
||||||
|
CMPQ BX, $0x40
|
||||||
|
JA sequenceDecs_decodeSync_amd64_ll_update_zero
|
||||||
|
CMPQ CX, $0x40
|
||||||
|
JAE sequenceDecs_decodeSync_amd64_ll_update_zero
|
||||||
|
NEGQ CX
|
||||||
|
SHRQ CL, R14
|
||||||
ADDQ R14, AX
|
ADDQ R14, AX
|
||||||
|
|
||||||
|
sequenceDecs_decodeSync_amd64_ll_update_zero:
|
||||||
MOVQ AX, 24(SP)
|
MOVQ AX, 24(SP)
|
||||||
|
|
||||||
// Fill bitreader for state updates
|
// Fill bitreader for state updates
|
||||||
@ -1945,7 +1982,7 @@ sequenceDecs_decodeSync_amd64_skip_update:
|
|||||||
MOVUPS 144(CX), X0
|
MOVUPS 144(CX), X0
|
||||||
MOVQ R13, 144(CX)
|
MOVQ R13, 144(CX)
|
||||||
MOVUPS X0, 152(CX)
|
MOVUPS X0, 152(CX)
|
||||||
JMP sequenceDecs_decodeSync_amd64_adjust_end
|
JMP sequenceDecs_decodeSync_amd64_after_adjust
|
||||||
|
|
||||||
sequenceDecs_decodeSync_amd64_adjust_offsetB_1_or_0:
|
sequenceDecs_decodeSync_amd64_adjust_offsetB_1_or_0:
|
||||||
CMPQ 24(SP), $0x00000000
|
CMPQ 24(SP), $0x00000000
|
||||||
@ -1957,7 +1994,7 @@ sequenceDecs_decodeSync_amd64_adjust_offset_maybezero:
|
|||||||
TESTQ R13, R13
|
TESTQ R13, R13
|
||||||
JNZ sequenceDecs_decodeSync_amd64_adjust_offset_nonzero
|
JNZ sequenceDecs_decodeSync_amd64_adjust_offset_nonzero
|
||||||
MOVQ 144(CX), R13
|
MOVQ 144(CX), R13
|
||||||
JMP sequenceDecs_decodeSync_amd64_adjust_end
|
JMP sequenceDecs_decodeSync_amd64_after_adjust
|
||||||
|
|
||||||
sequenceDecs_decodeSync_amd64_adjust_offset_nonzero:
|
sequenceDecs_decodeSync_amd64_adjust_offset_nonzero:
|
||||||
MOVQ R13, AX
|
MOVQ R13, AX
|
||||||
@ -1966,8 +2003,7 @@ sequenceDecs_decodeSync_amd64_adjust_offset_nonzero:
|
|||||||
CMPQ R13, $0x03
|
CMPQ R13, $0x03
|
||||||
CMOVQEQ R14, AX
|
CMOVQEQ R14, AX
|
||||||
CMOVQEQ R15, R14
|
CMOVQEQ R15, R14
|
||||||
LEAQ 144(CX), R15
|
ADDQ 144(CX)(AX*8), R14
|
||||||
ADDQ (R15)(AX*8), R14
|
|
||||||
JNZ sequenceDecs_decodeSync_amd64_adjust_temp_valid
|
JNZ sequenceDecs_decodeSync_amd64_adjust_temp_valid
|
||||||
MOVQ $0x00000001, R14
|
MOVQ $0x00000001, R14
|
||||||
|
|
||||||
@ -1983,7 +2019,7 @@ sequenceDecs_decodeSync_amd64_adjust_skip:
|
|||||||
MOVQ R14, 144(CX)
|
MOVQ R14, 144(CX)
|
||||||
MOVQ R14, R13
|
MOVQ R14, R13
|
||||||
|
|
||||||
sequenceDecs_decodeSync_amd64_adjust_end:
|
sequenceDecs_decodeSync_amd64_after_adjust:
|
||||||
MOVQ R13, 8(SP)
|
MOVQ R13, 8(SP)
|
||||||
|
|
||||||
// Check values
|
// Check values
|
||||||
@ -2280,6 +2316,10 @@ TEXT ·sequenceDecs_decodeSync_bmi2(SB), $64-32
|
|||||||
MOVQ 72(CX), SI
|
MOVQ 72(CX), SI
|
||||||
MOVQ 80(CX), DI
|
MOVQ 80(CX), DI
|
||||||
MOVQ 88(CX), R8
|
MOVQ 88(CX), R8
|
||||||
|
XORQ R9, R9
|
||||||
|
MOVQ R9, 8(SP)
|
||||||
|
MOVQ R9, 16(SP)
|
||||||
|
MOVQ R9, 24(SP)
|
||||||
MOVQ 112(CX), R9
|
MOVQ 112(CX), R9
|
||||||
MOVQ 128(CX), R10
|
MOVQ 128(CX), R10
|
||||||
MOVQ R10, 32(SP)
|
MOVQ R10, 32(SP)
|
||||||
@ -2452,7 +2492,7 @@ sequenceDecs_decodeSync_bmi2_skip_update:
|
|||||||
MOVUPS 144(CX), X0
|
MOVUPS 144(CX), X0
|
||||||
MOVQ R13, 144(CX)
|
MOVQ R13, 144(CX)
|
||||||
MOVUPS X0, 152(CX)
|
MOVUPS X0, 152(CX)
|
||||||
JMP sequenceDecs_decodeSync_bmi2_adjust_end
|
JMP sequenceDecs_decodeSync_bmi2_after_adjust
|
||||||
|
|
||||||
sequenceDecs_decodeSync_bmi2_adjust_offsetB_1_or_0:
|
sequenceDecs_decodeSync_bmi2_adjust_offsetB_1_or_0:
|
||||||
CMPQ 24(SP), $0x00000000
|
CMPQ 24(SP), $0x00000000
|
||||||
@ -2464,7 +2504,7 @@ sequenceDecs_decodeSync_bmi2_adjust_offset_maybezero:
|
|||||||
TESTQ R13, R13
|
TESTQ R13, R13
|
||||||
JNZ sequenceDecs_decodeSync_bmi2_adjust_offset_nonzero
|
JNZ sequenceDecs_decodeSync_bmi2_adjust_offset_nonzero
|
||||||
MOVQ 144(CX), R13
|
MOVQ 144(CX), R13
|
||||||
JMP sequenceDecs_decodeSync_bmi2_adjust_end
|
JMP sequenceDecs_decodeSync_bmi2_after_adjust
|
||||||
|
|
||||||
sequenceDecs_decodeSync_bmi2_adjust_offset_nonzero:
|
sequenceDecs_decodeSync_bmi2_adjust_offset_nonzero:
|
||||||
MOVQ R13, R12
|
MOVQ R13, R12
|
||||||
@ -2473,8 +2513,7 @@ sequenceDecs_decodeSync_bmi2_adjust_offset_nonzero:
|
|||||||
CMPQ R13, $0x03
|
CMPQ R13, $0x03
|
||||||
CMOVQEQ R14, R12
|
CMOVQEQ R14, R12
|
||||||
CMOVQEQ R15, R14
|
CMOVQEQ R15, R14
|
||||||
LEAQ 144(CX), R15
|
ADDQ 144(CX)(R12*8), R14
|
||||||
ADDQ (R15)(R12*8), R14
|
|
||||||
JNZ sequenceDecs_decodeSync_bmi2_adjust_temp_valid
|
JNZ sequenceDecs_decodeSync_bmi2_adjust_temp_valid
|
||||||
MOVQ $0x00000001, R14
|
MOVQ $0x00000001, R14
|
||||||
|
|
||||||
@ -2490,7 +2529,7 @@ sequenceDecs_decodeSync_bmi2_adjust_skip:
|
|||||||
MOVQ R14, 144(CX)
|
MOVQ R14, 144(CX)
|
||||||
MOVQ R14, R13
|
MOVQ R14, R13
|
||||||
|
|
||||||
sequenceDecs_decodeSync_bmi2_adjust_end:
|
sequenceDecs_decodeSync_bmi2_after_adjust:
|
||||||
MOVQ R13, 8(SP)
|
MOVQ R13, 8(SP)
|
||||||
|
|
||||||
// Check values
|
// Check values
|
||||||
@ -2787,6 +2826,10 @@ TEXT ·sequenceDecs_decodeSync_safe_amd64(SB), $64-32
|
|||||||
MOVQ 72(AX), DI
|
MOVQ 72(AX), DI
|
||||||
MOVQ 80(AX), R8
|
MOVQ 80(AX), R8
|
||||||
MOVQ 88(AX), R9
|
MOVQ 88(AX), R9
|
||||||
|
XORQ CX, CX
|
||||||
|
MOVQ CX, 8(SP)
|
||||||
|
MOVQ CX, 16(SP)
|
||||||
|
MOVQ CX, 24(SP)
|
||||||
MOVQ 112(AX), R10
|
MOVQ 112(AX), R10
|
||||||
MOVQ 128(AX), CX
|
MOVQ 128(AX), CX
|
||||||
MOVQ CX, 32(SP)
|
MOVQ CX, 32(SP)
|
||||||
@ -2841,13 +2884,19 @@ sequenceDecs_decodeSync_safe_amd64_fill_end:
|
|||||||
MOVQ DX, R14
|
MOVQ DX, R14
|
||||||
SHLQ CL, R14
|
SHLQ CL, R14
|
||||||
MOVB AH, CL
|
MOVB AH, CL
|
||||||
ADDQ CX, BX
|
|
||||||
NEGL CX
|
|
||||||
SHRQ CL, R14
|
|
||||||
SHRQ $0x20, AX
|
SHRQ $0x20, AX
|
||||||
TESTQ CX, CX
|
TESTQ CX, CX
|
||||||
CMOVQEQ CX, R14
|
JZ sequenceDecs_decodeSync_safe_amd64_of_update_zero
|
||||||
|
ADDQ CX, BX
|
||||||
|
CMPQ BX, $0x40
|
||||||
|
JA sequenceDecs_decodeSync_safe_amd64_of_update_zero
|
||||||
|
CMPQ CX, $0x40
|
||||||
|
JAE sequenceDecs_decodeSync_safe_amd64_of_update_zero
|
||||||
|
NEGQ CX
|
||||||
|
SHRQ CL, R14
|
||||||
ADDQ R14, AX
|
ADDQ R14, AX
|
||||||
|
|
||||||
|
sequenceDecs_decodeSync_safe_amd64_of_update_zero:
|
||||||
MOVQ AX, 8(SP)
|
MOVQ AX, 8(SP)
|
||||||
|
|
||||||
// Update match length
|
// Update match length
|
||||||
@ -2856,13 +2905,19 @@ sequenceDecs_decodeSync_safe_amd64_fill_end:
|
|||||||
MOVQ DX, R14
|
MOVQ DX, R14
|
||||||
SHLQ CL, R14
|
SHLQ CL, R14
|
||||||
MOVB AH, CL
|
MOVB AH, CL
|
||||||
ADDQ CX, BX
|
|
||||||
NEGL CX
|
|
||||||
SHRQ CL, R14
|
|
||||||
SHRQ $0x20, AX
|
SHRQ $0x20, AX
|
||||||
TESTQ CX, CX
|
TESTQ CX, CX
|
||||||
CMOVQEQ CX, R14
|
JZ sequenceDecs_decodeSync_safe_amd64_ml_update_zero
|
||||||
|
ADDQ CX, BX
|
||||||
|
CMPQ BX, $0x40
|
||||||
|
JA sequenceDecs_decodeSync_safe_amd64_ml_update_zero
|
||||||
|
CMPQ CX, $0x40
|
||||||
|
JAE sequenceDecs_decodeSync_safe_amd64_ml_update_zero
|
||||||
|
NEGQ CX
|
||||||
|
SHRQ CL, R14
|
||||||
ADDQ R14, AX
|
ADDQ R14, AX
|
||||||
|
|
||||||
|
sequenceDecs_decodeSync_safe_amd64_ml_update_zero:
|
||||||
MOVQ AX, 16(SP)
|
MOVQ AX, 16(SP)
|
||||||
|
|
||||||
// Fill bitreader to have enough for the remaining
|
// Fill bitreader to have enough for the remaining
|
||||||
@ -2896,13 +2951,19 @@ sequenceDecs_decodeSync_safe_amd64_fill_2_end:
|
|||||||
MOVQ DX, R14
|
MOVQ DX, R14
|
||||||
SHLQ CL, R14
|
SHLQ CL, R14
|
||||||
MOVB AH, CL
|
MOVB AH, CL
|
||||||
ADDQ CX, BX
|
|
||||||
NEGL CX
|
|
||||||
SHRQ CL, R14
|
|
||||||
SHRQ $0x20, AX
|
SHRQ $0x20, AX
|
||||||
TESTQ CX, CX
|
TESTQ CX, CX
|
||||||
CMOVQEQ CX, R14
|
JZ sequenceDecs_decodeSync_safe_amd64_ll_update_zero
|
||||||
|
ADDQ CX, BX
|
||||||
|
CMPQ BX, $0x40
|
||||||
|
JA sequenceDecs_decodeSync_safe_amd64_ll_update_zero
|
||||||
|
CMPQ CX, $0x40
|
||||||
|
JAE sequenceDecs_decodeSync_safe_amd64_ll_update_zero
|
||||||
|
NEGQ CX
|
||||||
|
SHRQ CL, R14
|
||||||
ADDQ R14, AX
|
ADDQ R14, AX
|
||||||
|
|
||||||
|
sequenceDecs_decodeSync_safe_amd64_ll_update_zero:
|
||||||
MOVQ AX, 24(SP)
|
MOVQ AX, 24(SP)
|
||||||
|
|
||||||
// Fill bitreader for state updates
|
// Fill bitreader for state updates
|
||||||
@ -2983,7 +3044,7 @@ sequenceDecs_decodeSync_safe_amd64_skip_update:
|
|||||||
MOVUPS 144(CX), X0
|
MOVUPS 144(CX), X0
|
||||||
MOVQ R13, 144(CX)
|
MOVQ R13, 144(CX)
|
||||||
MOVUPS X0, 152(CX)
|
MOVUPS X0, 152(CX)
|
||||||
JMP sequenceDecs_decodeSync_safe_amd64_adjust_end
|
JMP sequenceDecs_decodeSync_safe_amd64_after_adjust
|
||||||
|
|
||||||
sequenceDecs_decodeSync_safe_amd64_adjust_offsetB_1_or_0:
|
sequenceDecs_decodeSync_safe_amd64_adjust_offsetB_1_or_0:
|
||||||
CMPQ 24(SP), $0x00000000
|
CMPQ 24(SP), $0x00000000
|
||||||
@ -2995,7 +3056,7 @@ sequenceDecs_decodeSync_safe_amd64_adjust_offset_maybezero:
|
|||||||
TESTQ R13, R13
|
TESTQ R13, R13
|
||||||
JNZ sequenceDecs_decodeSync_safe_amd64_adjust_offset_nonzero
|
JNZ sequenceDecs_decodeSync_safe_amd64_adjust_offset_nonzero
|
||||||
MOVQ 144(CX), R13
|
MOVQ 144(CX), R13
|
||||||
JMP sequenceDecs_decodeSync_safe_amd64_adjust_end
|
JMP sequenceDecs_decodeSync_safe_amd64_after_adjust
|
||||||
|
|
||||||
sequenceDecs_decodeSync_safe_amd64_adjust_offset_nonzero:
|
sequenceDecs_decodeSync_safe_amd64_adjust_offset_nonzero:
|
||||||
MOVQ R13, AX
|
MOVQ R13, AX
|
||||||
@ -3004,8 +3065,7 @@ sequenceDecs_decodeSync_safe_amd64_adjust_offset_nonzero:
|
|||||||
CMPQ R13, $0x03
|
CMPQ R13, $0x03
|
||||||
CMOVQEQ R14, AX
|
CMOVQEQ R14, AX
|
||||||
CMOVQEQ R15, R14
|
CMOVQEQ R15, R14
|
||||||
LEAQ 144(CX), R15
|
ADDQ 144(CX)(AX*8), R14
|
||||||
ADDQ (R15)(AX*8), R14
|
|
||||||
JNZ sequenceDecs_decodeSync_safe_amd64_adjust_temp_valid
|
JNZ sequenceDecs_decodeSync_safe_amd64_adjust_temp_valid
|
||||||
MOVQ $0x00000001, R14
|
MOVQ $0x00000001, R14
|
||||||
|
|
||||||
@ -3021,7 +3081,7 @@ sequenceDecs_decodeSync_safe_amd64_adjust_skip:
|
|||||||
MOVQ R14, 144(CX)
|
MOVQ R14, 144(CX)
|
||||||
MOVQ R14, R13
|
MOVQ R14, R13
|
||||||
|
|
||||||
sequenceDecs_decodeSync_safe_amd64_adjust_end:
|
sequenceDecs_decodeSync_safe_amd64_after_adjust:
|
||||||
MOVQ R13, 8(SP)
|
MOVQ R13, 8(SP)
|
||||||
|
|
||||||
// Check values
|
// Check values
|
||||||
@ -3420,6 +3480,10 @@ TEXT ·sequenceDecs_decodeSync_safe_bmi2(SB), $64-32
|
|||||||
MOVQ 72(CX), SI
|
MOVQ 72(CX), SI
|
||||||
MOVQ 80(CX), DI
|
MOVQ 80(CX), DI
|
||||||
MOVQ 88(CX), R8
|
MOVQ 88(CX), R8
|
||||||
|
XORQ R9, R9
|
||||||
|
MOVQ R9, 8(SP)
|
||||||
|
MOVQ R9, 16(SP)
|
||||||
|
MOVQ R9, 24(SP)
|
||||||
MOVQ 112(CX), R9
|
MOVQ 112(CX), R9
|
||||||
MOVQ 128(CX), R10
|
MOVQ 128(CX), R10
|
||||||
MOVQ R10, 32(SP)
|
MOVQ R10, 32(SP)
|
||||||
@ -3592,7 +3656,7 @@ sequenceDecs_decodeSync_safe_bmi2_skip_update:
|
|||||||
MOVUPS 144(CX), X0
|
MOVUPS 144(CX), X0
|
||||||
MOVQ R13, 144(CX)
|
MOVQ R13, 144(CX)
|
||||||
MOVUPS X0, 152(CX)
|
MOVUPS X0, 152(CX)
|
||||||
JMP sequenceDecs_decodeSync_safe_bmi2_adjust_end
|
JMP sequenceDecs_decodeSync_safe_bmi2_after_adjust
|
||||||
|
|
||||||
sequenceDecs_decodeSync_safe_bmi2_adjust_offsetB_1_or_0:
|
sequenceDecs_decodeSync_safe_bmi2_adjust_offsetB_1_or_0:
|
||||||
CMPQ 24(SP), $0x00000000
|
CMPQ 24(SP), $0x00000000
|
||||||
@ -3604,7 +3668,7 @@ sequenceDecs_decodeSync_safe_bmi2_adjust_offset_maybezero:
|
|||||||
TESTQ R13, R13
|
TESTQ R13, R13
|
||||||
JNZ sequenceDecs_decodeSync_safe_bmi2_adjust_offset_nonzero
|
JNZ sequenceDecs_decodeSync_safe_bmi2_adjust_offset_nonzero
|
||||||
MOVQ 144(CX), R13
|
MOVQ 144(CX), R13
|
||||||
JMP sequenceDecs_decodeSync_safe_bmi2_adjust_end
|
JMP sequenceDecs_decodeSync_safe_bmi2_after_adjust
|
||||||
|
|
||||||
sequenceDecs_decodeSync_safe_bmi2_adjust_offset_nonzero:
|
sequenceDecs_decodeSync_safe_bmi2_adjust_offset_nonzero:
|
||||||
MOVQ R13, R12
|
MOVQ R13, R12
|
||||||
@ -3613,8 +3677,7 @@ sequenceDecs_decodeSync_safe_bmi2_adjust_offset_nonzero:
|
|||||||
CMPQ R13, $0x03
|
CMPQ R13, $0x03
|
||||||
CMOVQEQ R14, R12
|
CMOVQEQ R14, R12
|
||||||
CMOVQEQ R15, R14
|
CMOVQEQ R15, R14
|
||||||
LEAQ 144(CX), R15
|
ADDQ 144(CX)(R12*8), R14
|
||||||
ADDQ (R15)(R12*8), R14
|
|
||||||
JNZ sequenceDecs_decodeSync_safe_bmi2_adjust_temp_valid
|
JNZ sequenceDecs_decodeSync_safe_bmi2_adjust_temp_valid
|
||||||
MOVQ $0x00000001, R14
|
MOVQ $0x00000001, R14
|
||||||
|
|
||||||
@ -3630,7 +3693,7 @@ sequenceDecs_decodeSync_safe_bmi2_adjust_skip:
|
|||||||
MOVQ R14, 144(CX)
|
MOVQ R14, 144(CX)
|
||||||
MOVQ R14, R13
|
MOVQ R14, R13
|
||||||
|
|
||||||
sequenceDecs_decodeSync_safe_bmi2_adjust_end:
|
sequenceDecs_decodeSync_safe_bmi2_after_adjust:
|
||||||
MOVQ R13, 8(SP)
|
MOVQ R13, 8(SP)
|
||||||
|
|
||||||
// Check values
|
// Check values
|
||||||
|
4
vendor/github.com/klauspost/compress/zstd/seqdec_generic.go
generated
vendored
4
vendor/github.com/klauspost/compress/zstd/seqdec_generic.go
generated
vendored
@ -111,7 +111,7 @@ func (s *sequenceDecs) decode(seqs []seqVals) error {
|
|||||||
}
|
}
|
||||||
s.seqSize += ll + ml
|
s.seqSize += ll + ml
|
||||||
if s.seqSize > maxBlockSize {
|
if s.seqSize > maxBlockSize {
|
||||||
return fmt.Errorf("output (%d) bigger than max block size (%d)", s.seqSize, maxBlockSize)
|
return fmt.Errorf("output bigger than max block size (%d)", maxBlockSize)
|
||||||
}
|
}
|
||||||
litRemain -= ll
|
litRemain -= ll
|
||||||
if litRemain < 0 {
|
if litRemain < 0 {
|
||||||
@ -149,7 +149,7 @@ func (s *sequenceDecs) decode(seqs []seqVals) error {
|
|||||||
}
|
}
|
||||||
s.seqSize += litRemain
|
s.seqSize += litRemain
|
||||||
if s.seqSize > maxBlockSize {
|
if s.seqSize > maxBlockSize {
|
||||||
return fmt.Errorf("output (%d) bigger than max block size (%d)", s.seqSize, maxBlockSize)
|
return fmt.Errorf("output bigger than max block size (%d)", maxBlockSize)
|
||||||
}
|
}
|
||||||
err := br.close()
|
err := br.close()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
31
vendor/github.com/klauspost/compress/zstd/zstd.go
generated
vendored
31
vendor/github.com/klauspost/compress/zstd/zstd.go
generated
vendored
@ -36,9 +36,6 @@ const forcePreDef = false
|
|||||||
// zstdMinMatch is the minimum zstd match length.
|
// zstdMinMatch is the minimum zstd match length.
|
||||||
const zstdMinMatch = 3
|
const zstdMinMatch = 3
|
||||||
|
|
||||||
// Reset the buffer offset when reaching this.
|
|
||||||
const bufferReset = math.MaxInt32 - MaxWindowSize
|
|
||||||
|
|
||||||
// fcsUnknown is used for unknown frame content size.
|
// fcsUnknown is used for unknown frame content size.
|
||||||
const fcsUnknown = math.MaxUint64
|
const fcsUnknown = math.MaxUint64
|
||||||
|
|
||||||
@ -75,7 +72,6 @@ var (
|
|||||||
ErrDecoderSizeExceeded = errors.New("decompressed size exceeds configured limit")
|
ErrDecoderSizeExceeded = errors.New("decompressed size exceeds configured limit")
|
||||||
|
|
||||||
// ErrUnknownDictionary is returned if the dictionary ID is unknown.
|
// ErrUnknownDictionary is returned if the dictionary ID is unknown.
|
||||||
// For the time being dictionaries are not supported.
|
|
||||||
ErrUnknownDictionary = errors.New("unknown dictionary")
|
ErrUnknownDictionary = errors.New("unknown dictionary")
|
||||||
|
|
||||||
// ErrFrameSizeExceeded is returned if the stated frame size is exceeded.
|
// ErrFrameSizeExceeded is returned if the stated frame size is exceeded.
|
||||||
@ -110,26 +106,25 @@ func printf(format string, a ...interface{}) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// matchLen returns the maximum length.
|
// matchLen returns the maximum common prefix length of a and b.
|
||||||
// a must be the shortest of the two.
|
// a must be the shortest of the two.
|
||||||
// The function also returns whether all bytes matched.
|
func matchLen(a, b []byte) (n int) {
|
||||||
func matchLen(a, b []byte) int {
|
for ; len(a) >= 8 && len(b) >= 8; a, b = a[8:], b[8:] {
|
||||||
b = b[:len(a)]
|
diff := binary.LittleEndian.Uint64(a) ^ binary.LittleEndian.Uint64(b)
|
||||||
for i := 0; i < len(a)-7; i += 8 {
|
if diff != 0 {
|
||||||
if diff := load64(a, i) ^ load64(b, i); diff != 0 {
|
return n + bits.TrailingZeros64(diff)>>3
|
||||||
return i + (bits.TrailingZeros64(diff) >> 3)
|
|
||||||
}
|
}
|
||||||
|
n += 8
|
||||||
}
|
}
|
||||||
|
|
||||||
checked := (len(a) >> 3) << 3
|
|
||||||
a = a[checked:]
|
|
||||||
b = b[checked:]
|
|
||||||
for i := range a {
|
for i := range a {
|
||||||
if a[i] != b[i] {
|
if a[i] != b[i] {
|
||||||
return i + checked
|
break
|
||||||
}
|
}
|
||||||
|
n++
|
||||||
}
|
}
|
||||||
return len(a) + checked
|
return n
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func load3232(b []byte, i int32) uint32 {
|
func load3232(b []byte, i int32) uint32 {
|
||||||
@ -140,10 +135,6 @@ func load6432(b []byte, i int32) uint64 {
|
|||||||
return binary.LittleEndian.Uint64(b[i:])
|
return binary.LittleEndian.Uint64(b[i:])
|
||||||
}
|
}
|
||||||
|
|
||||||
func load64(b []byte, i int) uint64 {
|
|
||||||
return binary.LittleEndian.Uint64(b[i:])
|
|
||||||
}
|
|
||||||
|
|
||||||
type byter interface {
|
type byter interface {
|
||||||
Bytes() []byte
|
Bytes() []byte
|
||||||
Len() int
|
Len() int
|
||||||
|
348
vendor/github.com/klauspost/cpuid/v2/README.md
generated
vendored
348
vendor/github.com/klauspost/cpuid/v2/README.md
generated
vendored
@ -17,9 +17,16 @@ Package home: https://github.com/klauspost/cpuid
|
|||||||
## installing
|
## installing
|
||||||
|
|
||||||
`go get -u github.com/klauspost/cpuid/v2` using modules.
|
`go get -u github.com/klauspost/cpuid/v2` using modules.
|
||||||
|
|
||||||
Drop `v2` for others.
|
Drop `v2` for others.
|
||||||
|
|
||||||
|
### Homebrew
|
||||||
|
|
||||||
|
For macOS/Linux users, you can install via [brew](https://brew.sh/)
|
||||||
|
|
||||||
|
```sh
|
||||||
|
$ brew install cpuid
|
||||||
|
```
|
||||||
|
|
||||||
## example
|
## example
|
||||||
|
|
||||||
```Go
|
```Go
|
||||||
@ -77,10 +84,14 @@ We have Streaming SIMD 2 Extensions
|
|||||||
The `cpuid.CPU` provides access to CPU features. Use `cpuid.CPU.Supports()` to check for CPU features.
|
The `cpuid.CPU` provides access to CPU features. Use `cpuid.CPU.Supports()` to check for CPU features.
|
||||||
A faster `cpuid.CPU.Has()` is provided which will usually be inlined by the gc compiler.
|
A faster `cpuid.CPU.Has()` is provided which will usually be inlined by the gc compiler.
|
||||||
|
|
||||||
|
To test a larger number of features, they can be combined using `f := CombineFeatures(CMOV, CMPXCHG8, X87, FXSR, MMX, SYSCALL, SSE, SSE2)`, etc.
|
||||||
|
This can be using with `cpuid.CPU.HasAll(f)` to quickly test if all features are supported.
|
||||||
|
|
||||||
Note that for some cpu/os combinations some features will not be detected.
|
Note that for some cpu/os combinations some features will not be detected.
|
||||||
`amd64` has rather good support and should work reliably on all platforms.
|
`amd64` has rather good support and should work reliably on all platforms.
|
||||||
|
|
||||||
Note that hypervisors may not pass through all CPU features.
|
Note that hypervisors may not pass through all CPU features through to the guest OS,
|
||||||
|
so even if your host supports a feature it may not be visible on guests.
|
||||||
|
|
||||||
## arm64 feature detection
|
## arm64 feature detection
|
||||||
|
|
||||||
@ -132,6 +143,339 @@ func main() {
|
|||||||
}
|
}
|
||||||
```
|
```
|
||||||
|
|
||||||
|
## commandline
|
||||||
|
|
||||||
|
Download as binary from: https://github.com/klauspost/cpuid/releases
|
||||||
|
|
||||||
|
Install from source:
|
||||||
|
|
||||||
|
`go install github.com/klauspost/cpuid/v2/cmd/cpuid@latest`
|
||||||
|
|
||||||
|
### Example
|
||||||
|
|
||||||
|
```
|
||||||
|
λ cpuid
|
||||||
|
Name: AMD Ryzen 9 3950X 16-Core Processor
|
||||||
|
Vendor String: AuthenticAMD
|
||||||
|
Vendor ID: AMD
|
||||||
|
PhysicalCores: 16
|
||||||
|
Threads Per Core: 2
|
||||||
|
Logical Cores: 32
|
||||||
|
CPU Family 23 Model: 113
|
||||||
|
Features: ADX,AESNI,AVX,AVX2,BMI1,BMI2,CLMUL,CLZERO,CMOV,CMPXCHG8,CPBOOST,CX16,F16C,FMA3,FXSR,FXSROPT,HTT,HYPERVISOR,LAHF,LZCNT,MCAOVERFLOW,MMX,MMXEXT,MOVBE,NX,OSXSAVE,POPCNT,RDRAND,RDSEED,RDTSCP,SCE,SHA,SSE,SSE2,SSE3,SSE4,SSE42,SSE4A,SSSE3,SUCCOR,X87,XSAVE
|
||||||
|
Microarchitecture level: 3
|
||||||
|
Cacheline bytes: 64
|
||||||
|
L1 Instruction Cache: 32768 bytes
|
||||||
|
L1 Data Cache: 32768 bytes
|
||||||
|
L2 Cache: 524288 bytes
|
||||||
|
L3 Cache: 16777216 bytes
|
||||||
|
|
||||||
|
```
|
||||||
|
### JSON Output:
|
||||||
|
|
||||||
|
```
|
||||||
|
λ cpuid --json
|
||||||
|
{
|
||||||
|
"BrandName": "AMD Ryzen 9 3950X 16-Core Processor",
|
||||||
|
"VendorID": 2,
|
||||||
|
"VendorString": "AuthenticAMD",
|
||||||
|
"PhysicalCores": 16,
|
||||||
|
"ThreadsPerCore": 2,
|
||||||
|
"LogicalCores": 32,
|
||||||
|
"Family": 23,
|
||||||
|
"Model": 113,
|
||||||
|
"CacheLine": 64,
|
||||||
|
"Hz": 0,
|
||||||
|
"BoostFreq": 0,
|
||||||
|
"Cache": {
|
||||||
|
"L1I": 32768,
|
||||||
|
"L1D": 32768,
|
||||||
|
"L2": 524288,
|
||||||
|
"L3": 16777216
|
||||||
|
},
|
||||||
|
"SGX": {
|
||||||
|
"Available": false,
|
||||||
|
"LaunchControl": false,
|
||||||
|
"SGX1Supported": false,
|
||||||
|
"SGX2Supported": false,
|
||||||
|
"MaxEnclaveSizeNot64": 0,
|
||||||
|
"MaxEnclaveSize64": 0,
|
||||||
|
"EPCSections": null
|
||||||
|
},
|
||||||
|
"Features": [
|
||||||
|
"ADX",
|
||||||
|
"AESNI",
|
||||||
|
"AVX",
|
||||||
|
"AVX2",
|
||||||
|
"BMI1",
|
||||||
|
"BMI2",
|
||||||
|
"CLMUL",
|
||||||
|
"CLZERO",
|
||||||
|
"CMOV",
|
||||||
|
"CMPXCHG8",
|
||||||
|
"CPBOOST",
|
||||||
|
"CX16",
|
||||||
|
"F16C",
|
||||||
|
"FMA3",
|
||||||
|
"FXSR",
|
||||||
|
"FXSROPT",
|
||||||
|
"HTT",
|
||||||
|
"HYPERVISOR",
|
||||||
|
"LAHF",
|
||||||
|
"LZCNT",
|
||||||
|
"MCAOVERFLOW",
|
||||||
|
"MMX",
|
||||||
|
"MMXEXT",
|
||||||
|
"MOVBE",
|
||||||
|
"NX",
|
||||||
|
"OSXSAVE",
|
||||||
|
"POPCNT",
|
||||||
|
"RDRAND",
|
||||||
|
"RDSEED",
|
||||||
|
"RDTSCP",
|
||||||
|
"SCE",
|
||||||
|
"SHA",
|
||||||
|
"SSE",
|
||||||
|
"SSE2",
|
||||||
|
"SSE3",
|
||||||
|
"SSE4",
|
||||||
|
"SSE42",
|
||||||
|
"SSE4A",
|
||||||
|
"SSSE3",
|
||||||
|
"SUCCOR",
|
||||||
|
"X87",
|
||||||
|
"XSAVE"
|
||||||
|
],
|
||||||
|
"X64Level": 3
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
### Check CPU microarch level
|
||||||
|
|
||||||
|
```
|
||||||
|
λ cpuid --check-level=3
|
||||||
|
2022/03/18 17:04:40 AMD Ryzen 9 3950X 16-Core Processor
|
||||||
|
2022/03/18 17:04:40 Microarchitecture level 3 is supported. Max level is 3.
|
||||||
|
Exit Code 0
|
||||||
|
|
||||||
|
λ cpuid --check-level=4
|
||||||
|
2022/03/18 17:06:18 AMD Ryzen 9 3950X 16-Core Processor
|
||||||
|
2022/03/18 17:06:18 Microarchitecture level 4 not supported. Max level is 3.
|
||||||
|
Exit Code 1
|
||||||
|
```
|
||||||
|
|
||||||
|
|
||||||
|
## Available flags
|
||||||
|
|
||||||
|
### x86 & amd64
|
||||||
|
|
||||||
|
| Feature Flag | Description |
|
||||||
|
|--------------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
|
||||||
|
| ADX | Intel ADX (Multi-Precision Add-Carry Instruction Extensions) |
|
||||||
|
| AESNI | Advanced Encryption Standard New Instructions |
|
||||||
|
| AMD3DNOW | AMD 3DNOW |
|
||||||
|
| AMD3DNOWEXT | AMD 3DNowExt |
|
||||||
|
| AMXBF16 | Tile computational operations on BFLOAT16 numbers |
|
||||||
|
| AMXINT8 | Tile computational operations on 8-bit integers |
|
||||||
|
| AMXFP16 | Tile computational operations on FP16 numbers |
|
||||||
|
| AMXTILE | Tile architecture |
|
||||||
|
| AVX | AVX functions |
|
||||||
|
| AVX2 | AVX2 functions |
|
||||||
|
| AVX512BF16 | AVX-512 BFLOAT16 Instructions |
|
||||||
|
| AVX512BITALG | AVX-512 Bit Algorithms |
|
||||||
|
| AVX512BW | AVX-512 Byte and Word Instructions |
|
||||||
|
| AVX512CD | AVX-512 Conflict Detection Instructions |
|
||||||
|
| AVX512DQ | AVX-512 Doubleword and Quadword Instructions |
|
||||||
|
| AVX512ER | AVX-512 Exponential and Reciprocal Instructions |
|
||||||
|
| AVX512F | AVX-512 Foundation |
|
||||||
|
| AVX512FP16 | AVX-512 FP16 Instructions |
|
||||||
|
| AVX512IFMA | AVX-512 Integer Fused Multiply-Add Instructions |
|
||||||
|
| AVX512PF | AVX-512 Prefetch Instructions |
|
||||||
|
| AVX512VBMI | AVX-512 Vector Bit Manipulation Instructions |
|
||||||
|
| AVX512VBMI2 | AVX-512 Vector Bit Manipulation Instructions, Version 2 |
|
||||||
|
| AVX512VL | AVX-512 Vector Length Extensions |
|
||||||
|
| AVX512VNNI | AVX-512 Vector Neural Network Instructions |
|
||||||
|
| AVX512VP2INTERSECT | AVX-512 Intersect for D/Q |
|
||||||
|
| AVX512VPOPCNTDQ | AVX-512 Vector Population Count Doubleword and Quadword |
|
||||||
|
| AVXIFMA | AVX-IFMA instructions |
|
||||||
|
| AVXNECONVERT | AVX-NE-CONVERT instructions |
|
||||||
|
| AVXSLOW | Indicates the CPU performs 2 128 bit operations instead of one |
|
||||||
|
| AVXVNNI | AVX (VEX encoded) VNNI neural network instructions |
|
||||||
|
| AVXVNNIINT8 | AVX-VNNI-INT8 instructions |
|
||||||
|
| BMI1 | Bit Manipulation Instruction Set 1 |
|
||||||
|
| BMI2 | Bit Manipulation Instruction Set 2 |
|
||||||
|
| CETIBT | Intel CET Indirect Branch Tracking |
|
||||||
|
| CETSS | Intel CET Shadow Stack |
|
||||||
|
| CLDEMOTE | Cache Line Demote |
|
||||||
|
| CLMUL | Carry-less Multiplication |
|
||||||
|
| CLZERO | CLZERO instruction supported |
|
||||||
|
| CMOV | i686 CMOV |
|
||||||
|
| CMPCCXADD | CMPCCXADD instructions |
|
||||||
|
| CMPSB_SCADBS_SHORT | Fast short CMPSB and SCASB |
|
||||||
|
| CMPXCHG8 | CMPXCHG8 instruction |
|
||||||
|
| CPBOOST | Core Performance Boost |
|
||||||
|
| CPPC | AMD: Collaborative Processor Performance Control |
|
||||||
|
| CX16 | CMPXCHG16B Instruction |
|
||||||
|
| EFER_LMSLE_UNS | AMD: =Core::X86::Msr::EFER[LMSLE] is not supported, and MBZ |
|
||||||
|
| ENQCMD | Enqueue Command |
|
||||||
|
| ERMS | Enhanced REP MOVSB/STOSB |
|
||||||
|
| F16C | Half-precision floating-point conversion |
|
||||||
|
| FLUSH_L1D | Flush L1D cache |
|
||||||
|
| FMA3 | Intel FMA 3. Does not imply AVX. |
|
||||||
|
| FMA4 | Bulldozer FMA4 functions |
|
||||||
|
| FP128 | AMD: When set, the internal FP/SIMD execution datapath is 128-bits wide |
|
||||||
|
| FP256 | AMD: When set, the internal FP/SIMD execution datapath is 256-bits wide |
|
||||||
|
| FSRM | Fast Short Rep Mov |
|
||||||
|
| FXSR | FXSAVE, FXRESTOR instructions, CR4 bit 9 |
|
||||||
|
| FXSROPT | FXSAVE/FXRSTOR optimizations |
|
||||||
|
| GFNI | Galois Field New Instructions. May require other features (AVX, AVX512VL,AVX512F) based on usage. |
|
||||||
|
| HLE | Hardware Lock Elision |
|
||||||
|
| HRESET | If set CPU supports history reset and the IA32_HRESET_ENABLE MSR |
|
||||||
|
| HTT | Hyperthreading (enabled) |
|
||||||
|
| HWA | Hardware assert supported. Indicates support for MSRC001_10 |
|
||||||
|
| HYBRID_CPU | This part has CPUs of more than one type. |
|
||||||
|
| HYPERVISOR | This bit has been reserved by Intel & AMD for use by hypervisors |
|
||||||
|
| IA32_ARCH_CAP | IA32_ARCH_CAPABILITIES MSR (Intel) |
|
||||||
|
| IA32_CORE_CAP | IA32_CORE_CAPABILITIES MSR |
|
||||||
|
| IBPB | Indirect Branch Restricted Speculation (IBRS) and Indirect Branch Predictor Barrier (IBPB) |
|
||||||
|
| IBRS | AMD: Indirect Branch Restricted Speculation |
|
||||||
|
| IBRS_PREFERRED | AMD: IBRS is preferred over software solution |
|
||||||
|
| IBRS_PROVIDES_SMP | AMD: IBRS provides Same Mode Protection |
|
||||||
|
| IBS | Instruction Based Sampling (AMD) |
|
||||||
|
| IBSBRNTRGT | Instruction Based Sampling Feature (AMD) |
|
||||||
|
| IBSFETCHSAM | Instruction Based Sampling Feature (AMD) |
|
||||||
|
| IBSFFV | Instruction Based Sampling Feature (AMD) |
|
||||||
|
| IBSOPCNT | Instruction Based Sampling Feature (AMD) |
|
||||||
|
| IBSOPCNTEXT | Instruction Based Sampling Feature (AMD) |
|
||||||
|
| IBSOPSAM | Instruction Based Sampling Feature (AMD) |
|
||||||
|
| IBSRDWROPCNT | Instruction Based Sampling Feature (AMD) |
|
||||||
|
| IBSRIPINVALIDCHK | Instruction Based Sampling Feature (AMD) |
|
||||||
|
| IBS_FETCH_CTLX | AMD: IBS fetch control extended MSR supported |
|
||||||
|
| IBS_OPDATA4 | AMD: IBS op data 4 MSR supported |
|
||||||
|
| IBS_OPFUSE | AMD: Indicates support for IbsOpFuse |
|
||||||
|
| IBS_PREVENTHOST | Disallowing IBS use by the host supported |
|
||||||
|
| IBS_ZEN4 | Fetch and Op IBS support IBS extensions added with Zen4 |
|
||||||
|
| INT_WBINVD | WBINVD/WBNOINVD are interruptible. |
|
||||||
|
| INVLPGB | NVLPGB and TLBSYNC instruction supported |
|
||||||
|
| LAHF | LAHF/SAHF in long mode |
|
||||||
|
| LAM | If set, CPU supports Linear Address Masking |
|
||||||
|
| LBRVIRT | LBR virtualization |
|
||||||
|
| LZCNT | LZCNT instruction |
|
||||||
|
| MCAOVERFLOW | MCA overflow recovery support. |
|
||||||
|
| MCDT_NO | Processor do not exhibit MXCSR Configuration Dependent Timing behavior and do not need to mitigate it. |
|
||||||
|
| MCOMMIT | MCOMMIT instruction supported |
|
||||||
|
| MD_CLEAR | VERW clears CPU buffers |
|
||||||
|
| MMX | standard MMX |
|
||||||
|
| MMXEXT | SSE integer functions or AMD MMX ext |
|
||||||
|
| MOVBE | MOVBE instruction (big-endian) |
|
||||||
|
| MOVDIR64B | Move 64 Bytes as Direct Store |
|
||||||
|
| MOVDIRI | Move Doubleword as Direct Store |
|
||||||
|
| MOVSB_ZL | Fast Zero-Length MOVSB |
|
||||||
|
| MPX | Intel MPX (Memory Protection Extensions) |
|
||||||
|
| MOVU | MOVU SSE instructions are more efficient and should be preferred to SSE MOVL/MOVH. MOVUPS is more efficient than MOVLPS/MOVHPS. MOVUPD is more efficient than MOVLPD/MOVHPD |
|
||||||
|
| MSRIRC | Instruction Retired Counter MSR available |
|
||||||
|
| MSR_PAGEFLUSH | Page Flush MSR available |
|
||||||
|
| NRIPS | Indicates support for NRIP save on VMEXIT |
|
||||||
|
| NX | NX (No-Execute) bit |
|
||||||
|
| OSXSAVE | XSAVE enabled by OS |
|
||||||
|
| PCONFIG | PCONFIG for Intel Multi-Key Total Memory Encryption |
|
||||||
|
| POPCNT | POPCNT instruction |
|
||||||
|
| PPIN | AMD: Protected Processor Inventory Number support. Indicates that Protected Processor Inventory Number (PPIN) capability can be enabled |
|
||||||
|
| PREFETCHI | PREFETCHIT0/1 instructions |
|
||||||
|
| PSFD | AMD: Predictive Store Forward Disable |
|
||||||
|
| RDPRU | RDPRU instruction supported |
|
||||||
|
| RDRAND | RDRAND instruction is available |
|
||||||
|
| RDSEED | RDSEED instruction is available |
|
||||||
|
| RDTSCP | RDTSCP Instruction |
|
||||||
|
| RTM | Restricted Transactional Memory |
|
||||||
|
| RTM_ALWAYS_ABORT | Indicates that the loaded microcode is forcing RTM abort. |
|
||||||
|
| SERIALIZE | Serialize Instruction Execution |
|
||||||
|
| SEV | AMD Secure Encrypted Virtualization supported |
|
||||||
|
| SEV_64BIT | AMD SEV guest execution only allowed from a 64-bit host |
|
||||||
|
| SEV_ALTERNATIVE | AMD SEV Alternate Injection supported |
|
||||||
|
| SEV_DEBUGSWAP | Full debug state swap supported for SEV-ES guests |
|
||||||
|
| SEV_ES | AMD SEV Encrypted State supported |
|
||||||
|
| SEV_RESTRICTED | AMD SEV Restricted Injection supported |
|
||||||
|
| SEV_SNP | AMD SEV Secure Nested Paging supported |
|
||||||
|
| SGX | Software Guard Extensions |
|
||||||
|
| SGXLC | Software Guard Extensions Launch Control |
|
||||||
|
| SHA | Intel SHA Extensions |
|
||||||
|
| SME | AMD Secure Memory Encryption supported |
|
||||||
|
| SME_COHERENT | AMD Hardware cache coherency across encryption domains enforced |
|
||||||
|
| SPEC_CTRL_SSBD | Speculative Store Bypass Disable |
|
||||||
|
| SRBDS_CTRL | SRBDS mitigation MSR available |
|
||||||
|
| SSE | SSE functions |
|
||||||
|
| SSE2 | P4 SSE functions |
|
||||||
|
| SSE3 | Prescott SSE3 functions |
|
||||||
|
| SSE4 | Penryn SSE4.1 functions |
|
||||||
|
| SSE42 | Nehalem SSE4.2 functions |
|
||||||
|
| SSE4A | AMD Barcelona microarchitecture SSE4a instructions |
|
||||||
|
| SSSE3 | Conroe SSSE3 functions |
|
||||||
|
| STIBP | Single Thread Indirect Branch Predictors |
|
||||||
|
| STIBP_ALWAYSON | AMD: Single Thread Indirect Branch Prediction Mode has Enhanced Performance and may be left Always On |
|
||||||
|
| STOSB_SHORT | Fast short STOSB |
|
||||||
|
| SUCCOR | Software uncorrectable error containment and recovery capability. |
|
||||||
|
| SVM | AMD Secure Virtual Machine |
|
||||||
|
| SVMDA | Indicates support for the SVM decode assists. |
|
||||||
|
| SVMFBASID | SVM, Indicates that TLB flush events, including CR3 writes and CR4.PGE toggles, flush only the current ASID's TLB entries. Also indicates support for the extended VMCBTLB_Control |
|
||||||
|
| SVML | AMD SVM lock. Indicates support for SVM-Lock. |
|
||||||
|
| SVMNP | AMD SVM nested paging |
|
||||||
|
| SVMPF | SVM pause intercept filter. Indicates support for the pause intercept filter |
|
||||||
|
| SVMPFT | SVM PAUSE filter threshold. Indicates support for the PAUSE filter cycle count threshold |
|
||||||
|
| SYSCALL | System-Call Extension (SCE): SYSCALL and SYSRET instructions. |
|
||||||
|
| SYSEE | SYSENTER and SYSEXIT instructions |
|
||||||
|
| TBM | AMD Trailing Bit Manipulation |
|
||||||
|
| TLB_FLUSH_NESTED | AMD: Flushing includes all the nested translations for guest translations |
|
||||||
|
| TME | Intel Total Memory Encryption. The following MSRs are supported: IA32_TME_CAPABILITY, IA32_TME_ACTIVATE, IA32_TME_EXCLUDE_MASK, and IA32_TME_EXCLUDE_BASE. |
|
||||||
|
| TOPEXT | TopologyExtensions: topology extensions support. Indicates support for CPUID Fn8000_001D_EAX_x[N:0]-CPUID Fn8000_001E_EDX. |
|
||||||
|
| TSCRATEMSR | MSR based TSC rate control. Indicates support for MSR TSC ratio MSRC000_0104 |
|
||||||
|
| TSXLDTRK | Intel TSX Suspend Load Address Tracking |
|
||||||
|
| VAES | Vector AES. AVX(512) versions requires additional checks. |
|
||||||
|
| VMCBCLEAN | VMCB clean bits. Indicates support for VMCB clean bits. |
|
||||||
|
| VMPL | AMD VM Permission Levels supported |
|
||||||
|
| VMSA_REGPROT | AMD VMSA Register Protection supported |
|
||||||
|
| VMX | Virtual Machine Extensions |
|
||||||
|
| VPCLMULQDQ | Carry-Less Multiplication Quadword. Requires AVX for 3 register versions. |
|
||||||
|
| VTE | AMD Virtual Transparent Encryption supported |
|
||||||
|
| WAITPKG | TPAUSE, UMONITOR, UMWAIT |
|
||||||
|
| WBNOINVD | Write Back and Do Not Invalidate Cache |
|
||||||
|
| X87 | FPU |
|
||||||
|
| XGETBV1 | Supports XGETBV with ECX = 1 |
|
||||||
|
| XOP | Bulldozer XOP functions |
|
||||||
|
| XSAVE | XSAVE, XRESTOR, XSETBV, XGETBV |
|
||||||
|
| XSAVEC | Supports XSAVEC and the compacted form of XRSTOR. |
|
||||||
|
| XSAVEOPT | XSAVEOPT available |
|
||||||
|
| XSAVES | Supports XSAVES/XRSTORS and IA32_XSS |
|
||||||
|
|
||||||
|
# ARM features:
|
||||||
|
|
||||||
|
| Feature Flag | Description |
|
||||||
|
|--------------|------------------------------------------------------------------|
|
||||||
|
| AESARM | AES instructions |
|
||||||
|
| ARMCPUID | Some CPU ID registers readable at user-level |
|
||||||
|
| ASIMD | Advanced SIMD |
|
||||||
|
| ASIMDDP | SIMD Dot Product |
|
||||||
|
| ASIMDHP | Advanced SIMD half-precision floating point |
|
||||||
|
| ASIMDRDM | Rounding Double Multiply Accumulate/Subtract (SQRDMLAH/SQRDMLSH) |
|
||||||
|
| ATOMICS | Large System Extensions (LSE) |
|
||||||
|
| CRC32 | CRC32/CRC32C instructions |
|
||||||
|
| DCPOP | Data cache clean to Point of Persistence (DC CVAP) |
|
||||||
|
| EVTSTRM | Generic timer |
|
||||||
|
| FCMA | Floatin point complex number addition and multiplication |
|
||||||
|
| FP | Single-precision and double-precision floating point |
|
||||||
|
| FPHP | Half-precision floating point |
|
||||||
|
| GPA | Generic Pointer Authentication |
|
||||||
|
| JSCVT | Javascript-style double->int convert (FJCVTZS) |
|
||||||
|
| LRCPC | Weaker release consistency (LDAPR, etc) |
|
||||||
|
| PMULL | Polynomial Multiply instructions (PMULL/PMULL2) |
|
||||||
|
| SHA1 | SHA-1 instructions (SHA1C, etc) |
|
||||||
|
| SHA2 | SHA-2 instructions (SHA256H, etc) |
|
||||||
|
| SHA3 | SHA-3 instructions (EOR3, RAXI, XAR, BCAX) |
|
||||||
|
| SHA512 | SHA512 instructions |
|
||||||
|
| SM3 | SM3 instructions |
|
||||||
|
| SM4 | SM4 instructions |
|
||||||
|
| SVE | Scalable Vector Extension |
|
||||||
|
|
||||||
# license
|
# license
|
||||||
|
|
||||||
This code is published under an MIT license. See LICENSE file for more information.
|
This code is published under an MIT license. See LICENSE file for more information.
|
||||||
|
363
vendor/github.com/klauspost/cpuid/v2/cpuid.go
generated
vendored
363
vendor/github.com/klauspost/cpuid/v2/cpuid.go
generated
vendored
@ -14,6 +14,7 @@ import (
|
|||||||
"flag"
|
"flag"
|
||||||
"fmt"
|
"fmt"
|
||||||
"math"
|
"math"
|
||||||
|
"math/bits"
|
||||||
"os"
|
"os"
|
||||||
"runtime"
|
"runtime"
|
||||||
"strings"
|
"strings"
|
||||||
@ -72,6 +73,7 @@ const (
|
|||||||
AMD3DNOW // AMD 3DNOW
|
AMD3DNOW // AMD 3DNOW
|
||||||
AMD3DNOWEXT // AMD 3DNowExt
|
AMD3DNOWEXT // AMD 3DNowExt
|
||||||
AMXBF16 // Tile computational operations on BFLOAT16 numbers
|
AMXBF16 // Tile computational operations on BFLOAT16 numbers
|
||||||
|
AMXFP16 // Tile computational operations on FP16 numbers
|
||||||
AMXINT8 // Tile computational operations on 8-bit integers
|
AMXINT8 // Tile computational operations on 8-bit integers
|
||||||
AMXTILE // Tile architecture
|
AMXTILE // Tile architecture
|
||||||
AVX // AVX functions
|
AVX // AVX functions
|
||||||
@ -92,7 +94,11 @@ const (
|
|||||||
AVX512VNNI // AVX-512 Vector Neural Network Instructions
|
AVX512VNNI // AVX-512 Vector Neural Network Instructions
|
||||||
AVX512VP2INTERSECT // AVX-512 Intersect for D/Q
|
AVX512VP2INTERSECT // AVX-512 Intersect for D/Q
|
||||||
AVX512VPOPCNTDQ // AVX-512 Vector Population Count Doubleword and Quadword
|
AVX512VPOPCNTDQ // AVX-512 Vector Population Count Doubleword and Quadword
|
||||||
AVXSLOW // Indicates the CPU performs 2 128 bit operations instead of one.
|
AVXIFMA // AVX-IFMA instructions
|
||||||
|
AVXNECONVERT // AVX-NE-CONVERT instructions
|
||||||
|
AVXSLOW // Indicates the CPU performs 2 128 bit operations instead of one
|
||||||
|
AVXVNNI // AVX (VEX encoded) VNNI neural network instructions
|
||||||
|
AVXVNNIINT8 // AVX-VNNI-INT8 instructions
|
||||||
BMI1 // Bit Manipulation Instruction Set 1
|
BMI1 // Bit Manipulation Instruction Set 1
|
||||||
BMI2 // Bit Manipulation Instruction Set 2
|
BMI2 // Bit Manipulation Instruction Set 2
|
||||||
CETIBT // Intel CET Indirect Branch Tracking
|
CETIBT // Intel CET Indirect Branch Tracking
|
||||||
@ -101,22 +107,37 @@ const (
|
|||||||
CLMUL // Carry-less Multiplication
|
CLMUL // Carry-less Multiplication
|
||||||
CLZERO // CLZERO instruction supported
|
CLZERO // CLZERO instruction supported
|
||||||
CMOV // i686 CMOV
|
CMOV // i686 CMOV
|
||||||
|
CMPCCXADD // CMPCCXADD instructions
|
||||||
|
CMPSB_SCADBS_SHORT // Fast short CMPSB and SCASB
|
||||||
CMPXCHG8 // CMPXCHG8 instruction
|
CMPXCHG8 // CMPXCHG8 instruction
|
||||||
CPBOOST // Core Performance Boost
|
CPBOOST // Core Performance Boost
|
||||||
|
CPPC // AMD: Collaborative Processor Performance Control
|
||||||
CX16 // CMPXCHG16B Instruction
|
CX16 // CMPXCHG16B Instruction
|
||||||
|
EFER_LMSLE_UNS // AMD: =Core::X86::Msr::EFER[LMSLE] is not supported, and MBZ
|
||||||
ENQCMD // Enqueue Command
|
ENQCMD // Enqueue Command
|
||||||
ERMS // Enhanced REP MOVSB/STOSB
|
ERMS // Enhanced REP MOVSB/STOSB
|
||||||
F16C // Half-precision floating-point conversion
|
F16C // Half-precision floating-point conversion
|
||||||
|
FLUSH_L1D // Flush L1D cache
|
||||||
FMA3 // Intel FMA 3. Does not imply AVX.
|
FMA3 // Intel FMA 3. Does not imply AVX.
|
||||||
FMA4 // Bulldozer FMA4 functions
|
FMA4 // Bulldozer FMA4 functions
|
||||||
|
FP128 // AMD: When set, the internal FP/SIMD execution datapath is no more than 128-bits wide
|
||||||
|
FP256 // AMD: When set, the internal FP/SIMD execution datapath is no more than 256-bits wide
|
||||||
|
FSRM // Fast Short Rep Mov
|
||||||
FXSR // FXSAVE, FXRESTOR instructions, CR4 bit 9
|
FXSR // FXSAVE, FXRESTOR instructions, CR4 bit 9
|
||||||
FXSROPT // FXSAVE/FXRSTOR optimizations
|
FXSROPT // FXSAVE/FXRSTOR optimizations
|
||||||
GFNI // Galois Field New Instructions
|
GFNI // Galois Field New Instructions. May require other features (AVX, AVX512VL,AVX512F) based on usage.
|
||||||
HLE // Hardware Lock Elision
|
HLE // Hardware Lock Elision
|
||||||
|
HRESET // If set CPU supports history reset and the IA32_HRESET_ENABLE MSR
|
||||||
HTT // Hyperthreading (enabled)
|
HTT // Hyperthreading (enabled)
|
||||||
HWA // Hardware assert supported. Indicates support for MSRC001_10
|
HWA // Hardware assert supported. Indicates support for MSRC001_10
|
||||||
|
HYBRID_CPU // This part has CPUs of more than one type.
|
||||||
HYPERVISOR // This bit has been reserved by Intel & AMD for use by hypervisors
|
HYPERVISOR // This bit has been reserved by Intel & AMD for use by hypervisors
|
||||||
|
IA32_ARCH_CAP // IA32_ARCH_CAPABILITIES MSR (Intel)
|
||||||
|
IA32_CORE_CAP // IA32_CORE_CAPABILITIES MSR
|
||||||
IBPB // Indirect Branch Restricted Speculation (IBRS) and Indirect Branch Predictor Barrier (IBPB)
|
IBPB // Indirect Branch Restricted Speculation (IBRS) and Indirect Branch Predictor Barrier (IBPB)
|
||||||
|
IBRS // AMD: Indirect Branch Restricted Speculation
|
||||||
|
IBRS_PREFERRED // AMD: IBRS is preferred over software solution
|
||||||
|
IBRS_PROVIDES_SMP // AMD: IBRS provides Same Mode Protection
|
||||||
IBS // Instruction Based Sampling (AMD)
|
IBS // Instruction Based Sampling (AMD)
|
||||||
IBSBRNTRGT // Instruction Based Sampling Feature (AMD)
|
IBSBRNTRGT // Instruction Based Sampling Feature (AMD)
|
||||||
IBSFETCHSAM // Instruction Based Sampling Feature (AMD)
|
IBSFETCHSAM // Instruction Based Sampling Feature (AMD)
|
||||||
@ -126,33 +147,60 @@ const (
|
|||||||
IBSOPSAM // Instruction Based Sampling Feature (AMD)
|
IBSOPSAM // Instruction Based Sampling Feature (AMD)
|
||||||
IBSRDWROPCNT // Instruction Based Sampling Feature (AMD)
|
IBSRDWROPCNT // Instruction Based Sampling Feature (AMD)
|
||||||
IBSRIPINVALIDCHK // Instruction Based Sampling Feature (AMD)
|
IBSRIPINVALIDCHK // Instruction Based Sampling Feature (AMD)
|
||||||
|
IBS_FETCH_CTLX // AMD: IBS fetch control extended MSR supported
|
||||||
|
IBS_OPDATA4 // AMD: IBS op data 4 MSR supported
|
||||||
|
IBS_OPFUSE // AMD: Indicates support for IbsOpFuse
|
||||||
|
IBS_PREVENTHOST // Disallowing IBS use by the host supported
|
||||||
|
IBS_ZEN4 // AMD: Fetch and Op IBS support IBS extensions added with Zen4
|
||||||
INT_WBINVD // WBINVD/WBNOINVD are interruptible.
|
INT_WBINVD // WBINVD/WBNOINVD are interruptible.
|
||||||
INVLPGB // NVLPGB and TLBSYNC instruction supported
|
INVLPGB // NVLPGB and TLBSYNC instruction supported
|
||||||
LAHF // LAHF/SAHF in long mode
|
LAHF // LAHF/SAHF in long mode
|
||||||
|
LAM // If set, CPU supports Linear Address Masking
|
||||||
|
LBRVIRT // LBR virtualization
|
||||||
LZCNT // LZCNT instruction
|
LZCNT // LZCNT instruction
|
||||||
MCAOVERFLOW // MCA overflow recovery support.
|
MCAOVERFLOW // MCA overflow recovery support.
|
||||||
|
MCDT_NO // Processor do not exhibit MXCSR Configuration Dependent Timing behavior and do not need to mitigate it.
|
||||||
MCOMMIT // MCOMMIT instruction supported
|
MCOMMIT // MCOMMIT instruction supported
|
||||||
|
MD_CLEAR // VERW clears CPU buffers
|
||||||
MMX // standard MMX
|
MMX // standard MMX
|
||||||
MMXEXT // SSE integer functions or AMD MMX ext
|
MMXEXT // SSE integer functions or AMD MMX ext
|
||||||
MOVBE // MOVBE instruction (big-endian)
|
MOVBE // MOVBE instruction (big-endian)
|
||||||
MOVDIR64B // Move 64 Bytes as Direct Store
|
MOVDIR64B // Move 64 Bytes as Direct Store
|
||||||
MOVDIRI // Move Doubleword as Direct Store
|
MOVDIRI // Move Doubleword as Direct Store
|
||||||
|
MOVSB_ZL // Fast Zero-Length MOVSB
|
||||||
|
MOVU // AMD: MOVU SSE instructions are more efficient and should be preferred to SSE MOVL/MOVH. MOVUPS is more efficient than MOVLPS/MOVHPS. MOVUPD is more efficient than MOVLPD/MOVHPD
|
||||||
MPX // Intel MPX (Memory Protection Extensions)
|
MPX // Intel MPX (Memory Protection Extensions)
|
||||||
MSRIRC // Instruction Retired Counter MSR available
|
MSRIRC // Instruction Retired Counter MSR available
|
||||||
|
MSR_PAGEFLUSH // Page Flush MSR available
|
||||||
|
NRIPS // Indicates support for NRIP save on VMEXIT
|
||||||
NX // NX (No-Execute) bit
|
NX // NX (No-Execute) bit
|
||||||
OSXSAVE // XSAVE enabled by OS
|
OSXSAVE // XSAVE enabled by OS
|
||||||
|
PCONFIG // PCONFIG for Intel Multi-Key Total Memory Encryption
|
||||||
POPCNT // POPCNT instruction
|
POPCNT // POPCNT instruction
|
||||||
|
PPIN // AMD: Protected Processor Inventory Number support. Indicates that Protected Processor Inventory Number (PPIN) capability can be enabled
|
||||||
|
PREFETCHI // PREFETCHIT0/1 instructions
|
||||||
|
PSFD // AMD: Predictive Store Forward Disable
|
||||||
RDPRU // RDPRU instruction supported
|
RDPRU // RDPRU instruction supported
|
||||||
RDRAND // RDRAND instruction is available
|
RDRAND // RDRAND instruction is available
|
||||||
RDSEED // RDSEED instruction is available
|
RDSEED // RDSEED instruction is available
|
||||||
RDTSCP // RDTSCP Instruction
|
RDTSCP // RDTSCP Instruction
|
||||||
RTM // Restricted Transactional Memory
|
RTM // Restricted Transactional Memory
|
||||||
RTM_ALWAYS_ABORT // Indicates that the loaded microcode is forcing RTM abort.
|
RTM_ALWAYS_ABORT // Indicates that the loaded microcode is forcing RTM abort.
|
||||||
SCE // SYSENTER and SYSEXIT instructions
|
|
||||||
SERIALIZE // Serialize Instruction Execution
|
SERIALIZE // Serialize Instruction Execution
|
||||||
|
SEV // AMD Secure Encrypted Virtualization supported
|
||||||
|
SEV_64BIT // AMD SEV guest execution only allowed from a 64-bit host
|
||||||
|
SEV_ALTERNATIVE // AMD SEV Alternate Injection supported
|
||||||
|
SEV_DEBUGSWAP // Full debug state swap supported for SEV-ES guests
|
||||||
|
SEV_ES // AMD SEV Encrypted State supported
|
||||||
|
SEV_RESTRICTED // AMD SEV Restricted Injection supported
|
||||||
|
SEV_SNP // AMD SEV Secure Nested Paging supported
|
||||||
SGX // Software Guard Extensions
|
SGX // Software Guard Extensions
|
||||||
SGXLC // Software Guard Extensions Launch Control
|
SGXLC // Software Guard Extensions Launch Control
|
||||||
SHA // Intel SHA Extensions
|
SHA // Intel SHA Extensions
|
||||||
|
SME // AMD Secure Memory Encryption supported
|
||||||
|
SME_COHERENT // AMD Hardware cache coherency across encryption domains enforced
|
||||||
|
SPEC_CTRL_SSBD // Speculative Store Bypass Disable
|
||||||
|
SRBDS_CTRL // SRBDS mitigation MSR available
|
||||||
SSE // SSE functions
|
SSE // SSE functions
|
||||||
SSE2 // P4 SSE functions
|
SSE2 // P4 SSE functions
|
||||||
SSE3 // Prescott SSE3 functions
|
SSE3 // Prescott SSE3 functions
|
||||||
@ -161,17 +209,40 @@ const (
|
|||||||
SSE4A // AMD Barcelona microarchitecture SSE4a instructions
|
SSE4A // AMD Barcelona microarchitecture SSE4a instructions
|
||||||
SSSE3 // Conroe SSSE3 functions
|
SSSE3 // Conroe SSSE3 functions
|
||||||
STIBP // Single Thread Indirect Branch Predictors
|
STIBP // Single Thread Indirect Branch Predictors
|
||||||
|
STIBP_ALWAYSON // AMD: Single Thread Indirect Branch Prediction Mode has Enhanced Performance and may be left Always On
|
||||||
|
STOSB_SHORT // Fast short STOSB
|
||||||
SUCCOR // Software uncorrectable error containment and recovery capability.
|
SUCCOR // Software uncorrectable error containment and recovery capability.
|
||||||
|
SVM // AMD Secure Virtual Machine
|
||||||
|
SVMDA // Indicates support for the SVM decode assists.
|
||||||
|
SVMFBASID // SVM, Indicates that TLB flush events, including CR3 writes and CR4.PGE toggles, flush only the current ASID's TLB entries. Also indicates support for the extended VMCBTLB_Control
|
||||||
|
SVML // AMD SVM lock. Indicates support for SVM-Lock.
|
||||||
|
SVMNP // AMD SVM nested paging
|
||||||
|
SVMPF // SVM pause intercept filter. Indicates support for the pause intercept filter
|
||||||
|
SVMPFT // SVM PAUSE filter threshold. Indicates support for the PAUSE filter cycle count threshold
|
||||||
|
SYSCALL // System-Call Extension (SCE): SYSCALL and SYSRET instructions.
|
||||||
|
SYSEE // SYSENTER and SYSEXIT instructions
|
||||||
TBM // AMD Trailing Bit Manipulation
|
TBM // AMD Trailing Bit Manipulation
|
||||||
|
TLB_FLUSH_NESTED // AMD: Flushing includes all the nested translations for guest translations
|
||||||
|
TME // Intel Total Memory Encryption. The following MSRs are supported: IA32_TME_CAPABILITY, IA32_TME_ACTIVATE, IA32_TME_EXCLUDE_MASK, and IA32_TME_EXCLUDE_BASE.
|
||||||
|
TOPEXT // TopologyExtensions: topology extensions support. Indicates support for CPUID Fn8000_001D_EAX_x[N:0]-CPUID Fn8000_001E_EDX.
|
||||||
|
TSCRATEMSR // MSR based TSC rate control. Indicates support for MSR TSC ratio MSRC000_0104
|
||||||
TSXLDTRK // Intel TSX Suspend Load Address Tracking
|
TSXLDTRK // Intel TSX Suspend Load Address Tracking
|
||||||
VAES // Vector AES
|
VAES // Vector AES. AVX(512) versions requires additional checks.
|
||||||
|
VMCBCLEAN // VMCB clean bits. Indicates support for VMCB clean bits.
|
||||||
|
VMPL // AMD VM Permission Levels supported
|
||||||
|
VMSA_REGPROT // AMD VMSA Register Protection supported
|
||||||
VMX // Virtual Machine Extensions
|
VMX // Virtual Machine Extensions
|
||||||
VPCLMULQDQ // Carry-Less Multiplication Quadword
|
VPCLMULQDQ // Carry-Less Multiplication Quadword. Requires AVX for 3 register versions.
|
||||||
|
VTE // AMD Virtual Transparent Encryption supported
|
||||||
WAITPKG // TPAUSE, UMONITOR, UMWAIT
|
WAITPKG // TPAUSE, UMONITOR, UMWAIT
|
||||||
WBNOINVD // Write Back and Do Not Invalidate Cache
|
WBNOINVD // Write Back and Do Not Invalidate Cache
|
||||||
X87 // FPU
|
X87 // FPU
|
||||||
|
XGETBV1 // Supports XGETBV with ECX = 1
|
||||||
XOP // Bulldozer XOP functions
|
XOP // Bulldozer XOP functions
|
||||||
XSAVE // XSAVE, XRESTOR, XSETBV, XGETBV
|
XSAVE // XSAVE, XRESTOR, XSETBV, XGETBV
|
||||||
|
XSAVEC // Supports XSAVEC and the compacted form of XRSTOR.
|
||||||
|
XSAVEOPT // XSAVEOPT available
|
||||||
|
XSAVES // Supports XSAVES/XRSTORS and IA32_XSS
|
||||||
|
|
||||||
// ARM features:
|
// ARM features:
|
||||||
AESARM // AES instructions
|
AESARM // AES instructions
|
||||||
@ -198,7 +269,6 @@ const (
|
|||||||
SM3 // SM3 instructions
|
SM3 // SM3 instructions
|
||||||
SM4 // SM4 instructions
|
SM4 // SM4 instructions
|
||||||
SVE // Scalable Vector Extension
|
SVE // Scalable Vector Extension
|
||||||
|
|
||||||
// Keep it last. It automatically defines the size of []flagSet
|
// Keep it last. It automatically defines the size of []flagSet
|
||||||
lastID
|
lastID
|
||||||
|
|
||||||
@ -216,6 +286,7 @@ type CPUInfo struct {
|
|||||||
LogicalCores int // Number of physical cores times threads that can run on each core through the use of hyperthreading. Will be 0 if undetectable.
|
LogicalCores int // Number of physical cores times threads that can run on each core through the use of hyperthreading. Will be 0 if undetectable.
|
||||||
Family int // CPU family number
|
Family int // CPU family number
|
||||||
Model int // CPU model number
|
Model int // CPU model number
|
||||||
|
Stepping int // CPU stepping info
|
||||||
CacheLine int // Cache line size in bytes. Will be 0 if undetectable.
|
CacheLine int // Cache line size in bytes. Will be 0 if undetectable.
|
||||||
Hz int64 // Clock speed, if known, 0 otherwise. Will attempt to contain base clock speed.
|
Hz int64 // Clock speed, if known, 0 otherwise. Will attempt to contain base clock speed.
|
||||||
BoostFreq int64 // Max clock speed, if known, 0 otherwise
|
BoostFreq int64 // Max clock speed, if known, 0 otherwise
|
||||||
@ -318,30 +389,61 @@ func (c CPUInfo) Supports(ids ...FeatureID) bool {
|
|||||||
|
|
||||||
// Has allows for checking a single feature.
|
// Has allows for checking a single feature.
|
||||||
// Should be inlined by the compiler.
|
// Should be inlined by the compiler.
|
||||||
func (c CPUInfo) Has(id FeatureID) bool {
|
func (c *CPUInfo) Has(id FeatureID) bool {
|
||||||
return c.featureSet.inSet(id)
|
return c.featureSet.inSet(id)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// AnyOf returns whether the CPU supports one or more of the requested features.
|
||||||
|
func (c CPUInfo) AnyOf(ids ...FeatureID) bool {
|
||||||
|
for _, id := range ids {
|
||||||
|
if c.featureSet.inSet(id) {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
// Features contains several features combined for a fast check using
|
||||||
|
// CpuInfo.HasAll
|
||||||
|
type Features *flagSet
|
||||||
|
|
||||||
|
// CombineFeatures allows to combine several features for a close to constant time lookup.
|
||||||
|
func CombineFeatures(ids ...FeatureID) Features {
|
||||||
|
var v flagSet
|
||||||
|
for _, id := range ids {
|
||||||
|
v.set(id)
|
||||||
|
}
|
||||||
|
return &v
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *CPUInfo) HasAll(f Features) bool {
|
||||||
|
return c.featureSet.hasSetP(f)
|
||||||
|
}
|
||||||
|
|
||||||
// https://en.wikipedia.org/wiki/X86-64#Microarchitecture_levels
|
// https://en.wikipedia.org/wiki/X86-64#Microarchitecture_levels
|
||||||
var level1Features = flagSetWith(CMOV, CMPXCHG8, X87, FXSR, MMX, SCE, SSE, SSE2)
|
var oneOfLevel = CombineFeatures(SYSEE, SYSCALL)
|
||||||
var level2Features = flagSetWith(CMOV, CMPXCHG8, X87, FXSR, MMX, SCE, SSE, SSE2, CX16, LAHF, POPCNT, SSE3, SSE4, SSE42, SSSE3)
|
var level1Features = CombineFeatures(CMOV, CMPXCHG8, X87, FXSR, MMX, SSE, SSE2)
|
||||||
var level3Features = flagSetWith(CMOV, CMPXCHG8, X87, FXSR, MMX, SCE, SSE, SSE2, CX16, LAHF, POPCNT, SSE3, SSE4, SSE42, SSSE3, AVX, AVX2, BMI1, BMI2, F16C, FMA3, LZCNT, MOVBE, OSXSAVE)
|
var level2Features = CombineFeatures(CMOV, CMPXCHG8, X87, FXSR, MMX, SSE, SSE2, CX16, LAHF, POPCNT, SSE3, SSE4, SSE42, SSSE3)
|
||||||
var level4Features = flagSetWith(CMOV, CMPXCHG8, X87, FXSR, MMX, SCE, SSE, SSE2, CX16, LAHF, POPCNT, SSE3, SSE4, SSE42, SSSE3, AVX, AVX2, BMI1, BMI2, F16C, FMA3, LZCNT, MOVBE, OSXSAVE, AVX512F, AVX512BW, AVX512CD, AVX512DQ, AVX512VL)
|
var level3Features = CombineFeatures(CMOV, CMPXCHG8, X87, FXSR, MMX, SSE, SSE2, CX16, LAHF, POPCNT, SSE3, SSE4, SSE42, SSSE3, AVX, AVX2, BMI1, BMI2, F16C, FMA3, LZCNT, MOVBE, OSXSAVE)
|
||||||
|
var level4Features = CombineFeatures(CMOV, CMPXCHG8, X87, FXSR, MMX, SSE, SSE2, CX16, LAHF, POPCNT, SSE3, SSE4, SSE42, SSSE3, AVX, AVX2, BMI1, BMI2, F16C, FMA3, LZCNT, MOVBE, OSXSAVE, AVX512F, AVX512BW, AVX512CD, AVX512DQ, AVX512VL)
|
||||||
|
|
||||||
// X64Level returns the microarchitecture level detected on the CPU.
|
// X64Level returns the microarchitecture level detected on the CPU.
|
||||||
// If features are lacking or non x64 mode, 0 is returned.
|
// If features are lacking or non x64 mode, 0 is returned.
|
||||||
// See https://en.wikipedia.org/wiki/X86-64#Microarchitecture_levels
|
// See https://en.wikipedia.org/wiki/X86-64#Microarchitecture_levels
|
||||||
func (c CPUInfo) X64Level() int {
|
func (c CPUInfo) X64Level() int {
|
||||||
if c.featureSet.hasSet(level4Features) {
|
if !c.featureSet.hasOneOf(oneOfLevel) {
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
if c.featureSet.hasSetP(level4Features) {
|
||||||
return 4
|
return 4
|
||||||
}
|
}
|
||||||
if c.featureSet.hasSet(level3Features) {
|
if c.featureSet.hasSetP(level3Features) {
|
||||||
return 3
|
return 3
|
||||||
}
|
}
|
||||||
if c.featureSet.hasSet(level2Features) {
|
if c.featureSet.hasSetP(level2Features) {
|
||||||
return 2
|
return 2
|
||||||
}
|
}
|
||||||
if c.featureSet.hasSet(level1Features) {
|
if c.featureSet.hasSetP(level1Features) {
|
||||||
return 1
|
return 1
|
||||||
}
|
}
|
||||||
return 0
|
return 0
|
||||||
@ -369,8 +471,9 @@ func (c CPUInfo) IsVendor(v Vendor) bool {
|
|||||||
return c.VendorID == v
|
return c.VendorID == v
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// FeatureSet returns all available features as strings.
|
||||||
func (c CPUInfo) FeatureSet() []string {
|
func (c CPUInfo) FeatureSet() []string {
|
||||||
s := make([]string, 0)
|
s := make([]string, 0, c.featureSet.nEnabled())
|
||||||
s = append(s, c.featureSet.Strings()...)
|
s = append(s, c.featureSet.Strings()...)
|
||||||
return s
|
return s
|
||||||
}
|
}
|
||||||
@ -504,7 +607,7 @@ const flagMask = flagBits - 1
|
|||||||
// flagSet contains detected cpu features and characteristics in an array of flags
|
// flagSet contains detected cpu features and characteristics in an array of flags
|
||||||
type flagSet [(lastID + flagMask) / flagBits]flags
|
type flagSet [(lastID + flagMask) / flagBits]flags
|
||||||
|
|
||||||
func (s flagSet) inSet(feat FeatureID) bool {
|
func (s *flagSet) inSet(feat FeatureID) bool {
|
||||||
return s[feat>>flagBitsLog2]&(1<<(feat&flagMask)) != 0
|
return s[feat>>flagBitsLog2]&(1<<(feat&flagMask)) != 0
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -534,7 +637,7 @@ func (s *flagSet) or(other flagSet) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// hasSet returns whether all features are present.
|
// hasSet returns whether all features are present.
|
||||||
func (s flagSet) hasSet(other flagSet) bool {
|
func (s *flagSet) hasSet(other flagSet) bool {
|
||||||
for i, v := range other[:] {
|
for i, v := range other[:] {
|
||||||
if s[i]&v != v {
|
if s[i]&v != v {
|
||||||
return false
|
return false
|
||||||
@ -543,6 +646,34 @@ func (s flagSet) hasSet(other flagSet) bool {
|
|||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// hasSet returns whether all features are present.
|
||||||
|
func (s *flagSet) hasSetP(other *flagSet) bool {
|
||||||
|
for i, v := range other[:] {
|
||||||
|
if s[i]&v != v {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
// hasOneOf returns whether one or more features are present.
|
||||||
|
func (s *flagSet) hasOneOf(other *flagSet) bool {
|
||||||
|
for i, v := range other[:] {
|
||||||
|
if s[i]&v != 0 {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
// nEnabled will return the number of enabled flags.
|
||||||
|
func (s *flagSet) nEnabled() (n int) {
|
||||||
|
for _, v := range s[:] {
|
||||||
|
n += bits.OnesCount64(uint64(v))
|
||||||
|
}
|
||||||
|
return n
|
||||||
|
}
|
||||||
|
|
||||||
func flagSetWith(feat ...FeatureID) flagSet {
|
func flagSetWith(feat ...FeatureID) flagSet {
|
||||||
var res flagSet
|
var res flagSet
|
||||||
for _, f := range feat {
|
for _, f := range feat {
|
||||||
@ -631,7 +762,7 @@ func threadsPerCore() int {
|
|||||||
if vend == AMD {
|
if vend == AMD {
|
||||||
// Workaround for AMD returning 0, assume 2 if >= Zen 2
|
// Workaround for AMD returning 0, assume 2 if >= Zen 2
|
||||||
// It will be more correct than not.
|
// It will be more correct than not.
|
||||||
fam, _ := familyModel()
|
fam, _, _ := familyModel()
|
||||||
_, _, _, d := cpuid(1)
|
_, _, _, d := cpuid(1)
|
||||||
if (d&(1<<28)) != 0 && fam >= 23 {
|
if (d&(1<<28)) != 0 && fam >= 23 {
|
||||||
return 2
|
return 2
|
||||||
@ -669,14 +800,27 @@ func logicalCores() int {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func familyModel() (int, int) {
|
func familyModel() (family, model, stepping int) {
|
||||||
if maxFunctionID() < 0x1 {
|
if maxFunctionID() < 0x1 {
|
||||||
return 0, 0
|
return 0, 0, 0
|
||||||
}
|
}
|
||||||
eax, _, _, _ := cpuid(1)
|
eax, _, _, _ := cpuid(1)
|
||||||
family := ((eax >> 8) & 0xf) + ((eax >> 20) & 0xff)
|
// If BaseFamily[3:0] is less than Fh then ExtendedFamily[7:0] is reserved and Family is equal to BaseFamily[3:0].
|
||||||
model := ((eax >> 4) & 0xf) + ((eax >> 12) & 0xf0)
|
family = int((eax >> 8) & 0xf)
|
||||||
return int(family), int(model)
|
extFam := family == 0x6 // Intel is 0x6, needs extended model.
|
||||||
|
if family == 0xf {
|
||||||
|
// Add ExtFamily
|
||||||
|
family += int((eax >> 20) & 0xff)
|
||||||
|
extFam = true
|
||||||
|
}
|
||||||
|
// If BaseFamily[3:0] is less than 0Fh then ExtendedModel[3:0] is reserved and Model is equal to BaseModel[3:0].
|
||||||
|
model = int((eax >> 4) & 0xf)
|
||||||
|
if extFam {
|
||||||
|
// Add ExtModel
|
||||||
|
model += int((eax >> 12) & 0xf0)
|
||||||
|
}
|
||||||
|
stepping = int(eax & 0xf)
|
||||||
|
return family, model, stepping
|
||||||
}
|
}
|
||||||
|
|
||||||
func physicalCores() int {
|
func physicalCores() int {
|
||||||
@ -811,9 +955,14 @@ func (c *CPUInfo) cacheSize() {
|
|||||||
c.Cache.L2 = int(((ecx >> 16) & 0xFFFF) * 1024)
|
c.Cache.L2 = int(((ecx >> 16) & 0xFFFF) * 1024)
|
||||||
|
|
||||||
// CPUID Fn8000_001D_EAX_x[N:0] Cache Properties
|
// CPUID Fn8000_001D_EAX_x[N:0] Cache Properties
|
||||||
if maxExtendedFunction() < 0x8000001D {
|
if maxExtendedFunction() < 0x8000001D || !c.Has(TOPEXT) {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Xen Hypervisor is buggy and returns the same entry no matter ECX value.
|
||||||
|
// Hack: When we encounter the same entry 100 times we break.
|
||||||
|
nSame := 0
|
||||||
|
var last uint32
|
||||||
for i := uint32(0); i < math.MaxUint32; i++ {
|
for i := uint32(0); i < math.MaxUint32; i++ {
|
||||||
eax, ebx, ecx, _ := cpuidex(0x8000001D, i)
|
eax, ebx, ecx, _ := cpuidex(0x8000001D, i)
|
||||||
|
|
||||||
@ -829,6 +978,16 @@ func (c *CPUInfo) cacheSize() {
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Check for the same value repeated.
|
||||||
|
comb := eax ^ ebx ^ ecx
|
||||||
|
if comb == last {
|
||||||
|
nSame++
|
||||||
|
if nSame == 100 {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
last = comb
|
||||||
|
|
||||||
switch level {
|
switch level {
|
||||||
case 1:
|
case 1:
|
||||||
switch typ {
|
switch typ {
|
||||||
@ -913,14 +1072,13 @@ func support() flagSet {
|
|||||||
if mfi < 0x1 {
|
if mfi < 0x1 {
|
||||||
return fs
|
return fs
|
||||||
}
|
}
|
||||||
family, model := familyModel()
|
family, model, _ := familyModel()
|
||||||
|
|
||||||
_, _, c, d := cpuid(1)
|
_, _, c, d := cpuid(1)
|
||||||
fs.setIf((d&(1<<0)) != 0, X87)
|
fs.setIf((d&(1<<0)) != 0, X87)
|
||||||
fs.setIf((d&(1<<8)) != 0, CMPXCHG8)
|
fs.setIf((d&(1<<8)) != 0, CMPXCHG8)
|
||||||
fs.setIf((d&(1<<11)) != 0, SCE)
|
fs.setIf((d&(1<<11)) != 0, SYSEE)
|
||||||
fs.setIf((d&(1<<15)) != 0, CMOV)
|
fs.setIf((d&(1<<15)) != 0, CMOV)
|
||||||
fs.setIf((d&(1<<22)) != 0, MMXEXT)
|
|
||||||
fs.setIf((d&(1<<23)) != 0, MMX)
|
fs.setIf((d&(1<<23)) != 0, MMX)
|
||||||
fs.setIf((d&(1<<24)) != 0, FXSR)
|
fs.setIf((d&(1<<24)) != 0, FXSR)
|
||||||
fs.setIf((d&(1<<25)) != 0, FXSROPT)
|
fs.setIf((d&(1<<25)) != 0, FXSROPT)
|
||||||
@ -928,9 +1086,9 @@ func support() flagSet {
|
|||||||
fs.setIf((d&(1<<26)) != 0, SSE2)
|
fs.setIf((d&(1<<26)) != 0, SSE2)
|
||||||
fs.setIf((c&1) != 0, SSE3)
|
fs.setIf((c&1) != 0, SSE3)
|
||||||
fs.setIf((c&(1<<5)) != 0, VMX)
|
fs.setIf((c&(1<<5)) != 0, VMX)
|
||||||
fs.setIf((c&0x00000200) != 0, SSSE3)
|
fs.setIf((c&(1<<9)) != 0, SSSE3)
|
||||||
fs.setIf((c&0x00080000) != 0, SSE4)
|
fs.setIf((c&(1<<19)) != 0, SSE4)
|
||||||
fs.setIf((c&0x00100000) != 0, SSE42)
|
fs.setIf((c&(1<<20)) != 0, SSE42)
|
||||||
fs.setIf((c&(1<<25)) != 0, AESNI)
|
fs.setIf((c&(1<<25)) != 0, AESNI)
|
||||||
fs.setIf((c&(1<<1)) != 0, CLMUL)
|
fs.setIf((c&(1<<1)) != 0, CLMUL)
|
||||||
fs.setIf(c&(1<<22) != 0, MOVBE)
|
fs.setIf(c&(1<<22) != 0, MOVBE)
|
||||||
@ -976,7 +1134,6 @@ func support() flagSet {
|
|||||||
// Check AVX2, AVX2 requires OS support, but BMI1/2 don't.
|
// Check AVX2, AVX2 requires OS support, but BMI1/2 don't.
|
||||||
if mfi >= 7 {
|
if mfi >= 7 {
|
||||||
_, ebx, ecx, edx := cpuidex(7, 0)
|
_, ebx, ecx, edx := cpuidex(7, 0)
|
||||||
eax1, _, _, _ := cpuidex(7, 1)
|
|
||||||
if fs.inSet(AVX) && (ebx&0x00000020) != 0 {
|
if fs.inSet(AVX) && (ebx&0x00000020) != 0 {
|
||||||
fs.set(AVX2)
|
fs.set(AVX2)
|
||||||
}
|
}
|
||||||
@ -993,21 +1150,52 @@ func support() flagSet {
|
|||||||
fs.setIf(ebx&(1<<18) != 0, RDSEED)
|
fs.setIf(ebx&(1<<18) != 0, RDSEED)
|
||||||
fs.setIf(ebx&(1<<19) != 0, ADX)
|
fs.setIf(ebx&(1<<19) != 0, ADX)
|
||||||
fs.setIf(ebx&(1<<29) != 0, SHA)
|
fs.setIf(ebx&(1<<29) != 0, SHA)
|
||||||
|
|
||||||
// CPUID.(EAX=7, ECX=0).ECX
|
// CPUID.(EAX=7, ECX=0).ECX
|
||||||
fs.setIf(ecx&(1<<5) != 0, WAITPKG)
|
fs.setIf(ecx&(1<<5) != 0, WAITPKG)
|
||||||
fs.setIf(ecx&(1<<7) != 0, CETSS)
|
fs.setIf(ecx&(1<<7) != 0, CETSS)
|
||||||
|
fs.setIf(ecx&(1<<8) != 0, GFNI)
|
||||||
|
fs.setIf(ecx&(1<<9) != 0, VAES)
|
||||||
|
fs.setIf(ecx&(1<<10) != 0, VPCLMULQDQ)
|
||||||
|
fs.setIf(ecx&(1<<13) != 0, TME)
|
||||||
fs.setIf(ecx&(1<<25) != 0, CLDEMOTE)
|
fs.setIf(ecx&(1<<25) != 0, CLDEMOTE)
|
||||||
fs.setIf(ecx&(1<<27) != 0, MOVDIRI)
|
fs.setIf(ecx&(1<<27) != 0, MOVDIRI)
|
||||||
fs.setIf(ecx&(1<<28) != 0, MOVDIR64B)
|
fs.setIf(ecx&(1<<28) != 0, MOVDIR64B)
|
||||||
fs.setIf(ecx&(1<<29) != 0, ENQCMD)
|
fs.setIf(ecx&(1<<29) != 0, ENQCMD)
|
||||||
fs.setIf(ecx&(1<<30) != 0, SGXLC)
|
fs.setIf(ecx&(1<<30) != 0, SGXLC)
|
||||||
|
|
||||||
// CPUID.(EAX=7, ECX=0).EDX
|
// CPUID.(EAX=7, ECX=0).EDX
|
||||||
|
fs.setIf(edx&(1<<4) != 0, FSRM)
|
||||||
|
fs.setIf(edx&(1<<9) != 0, SRBDS_CTRL)
|
||||||
|
fs.setIf(edx&(1<<10) != 0, MD_CLEAR)
|
||||||
fs.setIf(edx&(1<<11) != 0, RTM_ALWAYS_ABORT)
|
fs.setIf(edx&(1<<11) != 0, RTM_ALWAYS_ABORT)
|
||||||
fs.setIf(edx&(1<<14) != 0, SERIALIZE)
|
fs.setIf(edx&(1<<14) != 0, SERIALIZE)
|
||||||
|
fs.setIf(edx&(1<<15) != 0, HYBRID_CPU)
|
||||||
fs.setIf(edx&(1<<16) != 0, TSXLDTRK)
|
fs.setIf(edx&(1<<16) != 0, TSXLDTRK)
|
||||||
|
fs.setIf(edx&(1<<18) != 0, PCONFIG)
|
||||||
fs.setIf(edx&(1<<20) != 0, CETIBT)
|
fs.setIf(edx&(1<<20) != 0, CETIBT)
|
||||||
fs.setIf(edx&(1<<26) != 0, IBPB)
|
fs.setIf(edx&(1<<26) != 0, IBPB)
|
||||||
fs.setIf(edx&(1<<27) != 0, STIBP)
|
fs.setIf(edx&(1<<27) != 0, STIBP)
|
||||||
|
fs.setIf(edx&(1<<28) != 0, FLUSH_L1D)
|
||||||
|
fs.setIf(edx&(1<<29) != 0, IA32_ARCH_CAP)
|
||||||
|
fs.setIf(edx&(1<<30) != 0, IA32_CORE_CAP)
|
||||||
|
fs.setIf(edx&(1<<31) != 0, SPEC_CTRL_SSBD)
|
||||||
|
|
||||||
|
// CPUID.(EAX=7, ECX=1).EDX
|
||||||
|
fs.setIf(edx&(1<<4) != 0, AVXVNNIINT8)
|
||||||
|
fs.setIf(edx&(1<<5) != 0, AVXNECONVERT)
|
||||||
|
fs.setIf(edx&(1<<14) != 0, PREFETCHI)
|
||||||
|
|
||||||
|
// CPUID.(EAX=7, ECX=1).EAX
|
||||||
|
eax1, _, _, _ := cpuidex(7, 1)
|
||||||
|
fs.setIf(fs.inSet(AVX) && eax1&(1<<4) != 0, AVXVNNI)
|
||||||
|
fs.setIf(eax1&(1<<7) != 0, CMPCCXADD)
|
||||||
|
fs.setIf(eax1&(1<<10) != 0, MOVSB_ZL)
|
||||||
|
fs.setIf(eax1&(1<<11) != 0, STOSB_SHORT)
|
||||||
|
fs.setIf(eax1&(1<<12) != 0, CMPSB_SCADBS_SHORT)
|
||||||
|
fs.setIf(eax1&(1<<22) != 0, HRESET)
|
||||||
|
fs.setIf(eax1&(1<<23) != 0, AVXIFMA)
|
||||||
|
fs.setIf(eax1&(1<<26) != 0, LAM)
|
||||||
|
|
||||||
// Only detect AVX-512 features if XGETBV is supported
|
// Only detect AVX-512 features if XGETBV is supported
|
||||||
if c&((1<<26)|(1<<27)) == (1<<26)|(1<<27) {
|
if c&((1<<26)|(1<<27)) == (1<<26)|(1<<27) {
|
||||||
@ -1033,9 +1221,6 @@ func support() flagSet {
|
|||||||
// ecx
|
// ecx
|
||||||
fs.setIf(ecx&(1<<1) != 0, AVX512VBMI)
|
fs.setIf(ecx&(1<<1) != 0, AVX512VBMI)
|
||||||
fs.setIf(ecx&(1<<6) != 0, AVX512VBMI2)
|
fs.setIf(ecx&(1<<6) != 0, AVX512VBMI2)
|
||||||
fs.setIf(ecx&(1<<8) != 0, GFNI)
|
|
||||||
fs.setIf(ecx&(1<<9) != 0, VAES)
|
|
||||||
fs.setIf(ecx&(1<<10) != 0, VPCLMULQDQ)
|
|
||||||
fs.setIf(ecx&(1<<11) != 0, AVX512VNNI)
|
fs.setIf(ecx&(1<<11) != 0, AVX512VNNI)
|
||||||
fs.setIf(ecx&(1<<12) != 0, AVX512BITALG)
|
fs.setIf(ecx&(1<<12) != 0, AVX512BITALG)
|
||||||
fs.setIf(ecx&(1<<14) != 0, AVX512VPOPCNTDQ)
|
fs.setIf(ecx&(1<<14) != 0, AVX512VPOPCNTDQ)
|
||||||
@ -1047,31 +1232,66 @@ func support() flagSet {
|
|||||||
fs.setIf(edx&(1<<25) != 0, AMXINT8)
|
fs.setIf(edx&(1<<25) != 0, AMXINT8)
|
||||||
// eax1 = CPUID.(EAX=7, ECX=1).EAX
|
// eax1 = CPUID.(EAX=7, ECX=1).EAX
|
||||||
fs.setIf(eax1&(1<<5) != 0, AVX512BF16)
|
fs.setIf(eax1&(1<<5) != 0, AVX512BF16)
|
||||||
}
|
fs.setIf(eax1&(1<<21) != 0, AMXFP16)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// CPUID.(EAX=7, ECX=2)
|
||||||
|
_, _, _, edx = cpuidex(7, 2)
|
||||||
|
fs.setIf(edx&(1<<5) != 0, MCDT_NO)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Processor Extended State Enumeration Sub-leaf (EAX = 0DH, ECX = 1)
|
||||||
|
// EAX
|
||||||
|
// Bit 00: XSAVEOPT is available.
|
||||||
|
// Bit 01: Supports XSAVEC and the compacted form of XRSTOR if set.
|
||||||
|
// Bit 02: Supports XGETBV with ECX = 1 if set.
|
||||||
|
// Bit 03: Supports XSAVES/XRSTORS and IA32_XSS if set.
|
||||||
|
// Bits 31 - 04: Reserved.
|
||||||
|
// EBX
|
||||||
|
// Bits 31 - 00: The size in bytes of the XSAVE area containing all states enabled by XCRO | IA32_XSS.
|
||||||
|
// ECX
|
||||||
|
// Bits 31 - 00: Reports the supported bits of the lower 32 bits of the IA32_XSS MSR. IA32_XSS[n] can be set to 1 only if ECX[n] is 1.
|
||||||
|
// EDX?
|
||||||
|
// Bits 07 - 00: Used for XCR0. Bit 08: PT state. Bit 09: Used for XCR0. Bits 12 - 10: Reserved. Bit 13: HWP state. Bits 31 - 14: Reserved.
|
||||||
|
if mfi >= 0xd {
|
||||||
|
if fs.inSet(XSAVE) {
|
||||||
|
eax, _, _, _ := cpuidex(0xd, 1)
|
||||||
|
fs.setIf(eax&(1<<0) != 0, XSAVEOPT)
|
||||||
|
fs.setIf(eax&(1<<1) != 0, XSAVEC)
|
||||||
|
fs.setIf(eax&(1<<2) != 0, XGETBV1)
|
||||||
|
fs.setIf(eax&(1<<3) != 0, XSAVES)
|
||||||
|
}
|
||||||
|
}
|
||||||
if maxExtendedFunction() >= 0x80000001 {
|
if maxExtendedFunction() >= 0x80000001 {
|
||||||
_, _, c, d := cpuid(0x80000001)
|
_, _, c, d := cpuid(0x80000001)
|
||||||
if (c & (1 << 5)) != 0 {
|
if (c & (1 << 5)) != 0 {
|
||||||
fs.set(LZCNT)
|
fs.set(LZCNT)
|
||||||
fs.set(POPCNT)
|
fs.set(POPCNT)
|
||||||
}
|
}
|
||||||
|
// ECX
|
||||||
fs.setIf((c&(1<<0)) != 0, LAHF)
|
fs.setIf((c&(1<<0)) != 0, LAHF)
|
||||||
fs.setIf((c&(1<<10)) != 0, IBS)
|
fs.setIf((c&(1<<2)) != 0, SVM)
|
||||||
fs.setIf((d&(1<<31)) != 0, AMD3DNOW)
|
|
||||||
fs.setIf((d&(1<<30)) != 0, AMD3DNOWEXT)
|
|
||||||
fs.setIf((d&(1<<23)) != 0, MMX)
|
|
||||||
fs.setIf((d&(1<<22)) != 0, MMXEXT)
|
|
||||||
fs.setIf((c&(1<<6)) != 0, SSE4A)
|
fs.setIf((c&(1<<6)) != 0, SSE4A)
|
||||||
|
fs.setIf((c&(1<<10)) != 0, IBS)
|
||||||
|
fs.setIf((c&(1<<22)) != 0, TOPEXT)
|
||||||
|
|
||||||
|
// EDX
|
||||||
|
fs.setIf(d&(1<<11) != 0, SYSCALL)
|
||||||
fs.setIf(d&(1<<20) != 0, NX)
|
fs.setIf(d&(1<<20) != 0, NX)
|
||||||
|
fs.setIf(d&(1<<22) != 0, MMXEXT)
|
||||||
|
fs.setIf(d&(1<<23) != 0, MMX)
|
||||||
|
fs.setIf(d&(1<<24) != 0, FXSR)
|
||||||
|
fs.setIf(d&(1<<25) != 0, FXSROPT)
|
||||||
fs.setIf(d&(1<<27) != 0, RDTSCP)
|
fs.setIf(d&(1<<27) != 0, RDTSCP)
|
||||||
|
fs.setIf(d&(1<<30) != 0, AMD3DNOWEXT)
|
||||||
|
fs.setIf(d&(1<<31) != 0, AMD3DNOW)
|
||||||
|
|
||||||
/* XOP and FMA4 use the AVX instruction coding scheme, so they can't be
|
/* XOP and FMA4 use the AVX instruction coding scheme, so they can't be
|
||||||
* used unless the OS has AVX support. */
|
* used unless the OS has AVX support. */
|
||||||
if fs.inSet(AVX) {
|
if fs.inSet(AVX) {
|
||||||
fs.setIf((c&0x00000800) != 0, XOP)
|
fs.setIf((c&(1<<11)) != 0, XOP)
|
||||||
fs.setIf((c&0x00010000) != 0, FMA4)
|
fs.setIf((c&(1<<16)) != 0, FMA4)
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
@ -1085,15 +1305,48 @@ func support() flagSet {
|
|||||||
|
|
||||||
if maxExtendedFunction() >= 0x80000008 {
|
if maxExtendedFunction() >= 0x80000008 {
|
||||||
_, b, _, _ := cpuid(0x80000008)
|
_, b, _, _ := cpuid(0x80000008)
|
||||||
|
fs.setIf(b&(1<<28) != 0, PSFD)
|
||||||
|
fs.setIf(b&(1<<27) != 0, CPPC)
|
||||||
|
fs.setIf(b&(1<<24) != 0, SPEC_CTRL_SSBD)
|
||||||
|
fs.setIf(b&(1<<23) != 0, PPIN)
|
||||||
|
fs.setIf(b&(1<<21) != 0, TLB_FLUSH_NESTED)
|
||||||
|
fs.setIf(b&(1<<20) != 0, EFER_LMSLE_UNS)
|
||||||
|
fs.setIf(b&(1<<19) != 0, IBRS_PROVIDES_SMP)
|
||||||
|
fs.setIf(b&(1<<18) != 0, IBRS_PREFERRED)
|
||||||
|
fs.setIf(b&(1<<17) != 0, STIBP_ALWAYSON)
|
||||||
|
fs.setIf(b&(1<<15) != 0, STIBP)
|
||||||
|
fs.setIf(b&(1<<14) != 0, IBRS)
|
||||||
|
fs.setIf((b&(1<<13)) != 0, INT_WBINVD)
|
||||||
|
fs.setIf(b&(1<<12) != 0, IBPB)
|
||||||
fs.setIf((b&(1<<9)) != 0, WBNOINVD)
|
fs.setIf((b&(1<<9)) != 0, WBNOINVD)
|
||||||
fs.setIf((b&(1<<8)) != 0, MCOMMIT)
|
fs.setIf((b&(1<<8)) != 0, MCOMMIT)
|
||||||
fs.setIf((b&(1<<13)) != 0, INT_WBINVD)
|
|
||||||
fs.setIf((b&(1<<4)) != 0, RDPRU)
|
fs.setIf((b&(1<<4)) != 0, RDPRU)
|
||||||
fs.setIf((b&(1<<3)) != 0, INVLPGB)
|
fs.setIf((b&(1<<3)) != 0, INVLPGB)
|
||||||
fs.setIf((b&(1<<1)) != 0, MSRIRC)
|
fs.setIf((b&(1<<1)) != 0, MSRIRC)
|
||||||
fs.setIf((b&(1<<0)) != 0, CLZERO)
|
fs.setIf((b&(1<<0)) != 0, CLZERO)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if fs.inSet(SVM) && maxExtendedFunction() >= 0x8000000A {
|
||||||
|
_, _, _, edx := cpuid(0x8000000A)
|
||||||
|
fs.setIf((edx>>0)&1 == 1, SVMNP)
|
||||||
|
fs.setIf((edx>>1)&1 == 1, LBRVIRT)
|
||||||
|
fs.setIf((edx>>2)&1 == 1, SVML)
|
||||||
|
fs.setIf((edx>>3)&1 == 1, NRIPS)
|
||||||
|
fs.setIf((edx>>4)&1 == 1, TSCRATEMSR)
|
||||||
|
fs.setIf((edx>>5)&1 == 1, VMCBCLEAN)
|
||||||
|
fs.setIf((edx>>6)&1 == 1, SVMFBASID)
|
||||||
|
fs.setIf((edx>>7)&1 == 1, SVMDA)
|
||||||
|
fs.setIf((edx>>10)&1 == 1, SVMPF)
|
||||||
|
fs.setIf((edx>>12)&1 == 1, SVMPFT)
|
||||||
|
}
|
||||||
|
|
||||||
|
if maxExtendedFunction() >= 0x8000001a {
|
||||||
|
eax, _, _, _ := cpuid(0x8000001a)
|
||||||
|
fs.setIf((eax>>0)&1 == 1, FP128)
|
||||||
|
fs.setIf((eax>>1)&1 == 1, MOVU)
|
||||||
|
fs.setIf((eax>>2)&1 == 1, FP256)
|
||||||
|
}
|
||||||
|
|
||||||
if maxExtendedFunction() >= 0x8000001b && fs.inSet(IBS) {
|
if maxExtendedFunction() >= 0x8000001b && fs.inSet(IBS) {
|
||||||
eax, _, _, _ := cpuid(0x8000001b)
|
eax, _, _, _ := cpuid(0x8000001b)
|
||||||
fs.setIf((eax>>0)&1 == 1, IBSFFV)
|
fs.setIf((eax>>0)&1 == 1, IBSFFV)
|
||||||
@ -1104,6 +1357,28 @@ func support() flagSet {
|
|||||||
fs.setIf((eax>>5)&1 == 1, IBSBRNTRGT)
|
fs.setIf((eax>>5)&1 == 1, IBSBRNTRGT)
|
||||||
fs.setIf((eax>>6)&1 == 1, IBSOPCNTEXT)
|
fs.setIf((eax>>6)&1 == 1, IBSOPCNTEXT)
|
||||||
fs.setIf((eax>>7)&1 == 1, IBSRIPINVALIDCHK)
|
fs.setIf((eax>>7)&1 == 1, IBSRIPINVALIDCHK)
|
||||||
|
fs.setIf((eax>>8)&1 == 1, IBS_OPFUSE)
|
||||||
|
fs.setIf((eax>>9)&1 == 1, IBS_FETCH_CTLX)
|
||||||
|
fs.setIf((eax>>10)&1 == 1, IBS_OPDATA4) // Doc says "Fixed,0. IBS op data 4 MSR supported", but assuming they mean 1.
|
||||||
|
fs.setIf((eax>>11)&1 == 1, IBS_ZEN4)
|
||||||
|
}
|
||||||
|
|
||||||
|
if maxExtendedFunction() >= 0x8000001f && vend == AMD {
|
||||||
|
a, _, _, _ := cpuid(0x8000001f)
|
||||||
|
fs.setIf((a>>0)&1 == 1, SME)
|
||||||
|
fs.setIf((a>>1)&1 == 1, SEV)
|
||||||
|
fs.setIf((a>>2)&1 == 1, MSR_PAGEFLUSH)
|
||||||
|
fs.setIf((a>>3)&1 == 1, SEV_ES)
|
||||||
|
fs.setIf((a>>4)&1 == 1, SEV_SNP)
|
||||||
|
fs.setIf((a>>5)&1 == 1, VMPL)
|
||||||
|
fs.setIf((a>>10)&1 == 1, SME_COHERENT)
|
||||||
|
fs.setIf((a>>11)&1 == 1, SEV_64BIT)
|
||||||
|
fs.setIf((a>>12)&1 == 1, SEV_RESTRICTED)
|
||||||
|
fs.setIf((a>>13)&1 == 1, SEV_ALTERNATIVE)
|
||||||
|
fs.setIf((a>>14)&1 == 1, SEV_DEBUGSWAP)
|
||||||
|
fs.setIf((a>>15)&1 == 1, IBS_PREVENTHOST)
|
||||||
|
fs.setIf((a>>16)&1 == 1, VTE)
|
||||||
|
fs.setIf((a>>24)&1 == 1, VMSA_REGPROT)
|
||||||
}
|
}
|
||||||
|
|
||||||
return fs
|
return fs
|
||||||
|
2
vendor/github.com/klauspost/cpuid/v2/detect_x86.go
generated
vendored
2
vendor/github.com/klauspost/cpuid/v2/detect_x86.go
generated
vendored
@ -24,7 +24,7 @@ func addInfo(c *CPUInfo, safe bool) {
|
|||||||
c.maxExFunc = maxExtendedFunction()
|
c.maxExFunc = maxExtendedFunction()
|
||||||
c.BrandName = brandName()
|
c.BrandName = brandName()
|
||||||
c.CacheLine = cacheLine()
|
c.CacheLine = cacheLine()
|
||||||
c.Family, c.Model = familyModel()
|
c.Family, c.Model, c.Stepping = familyModel()
|
||||||
c.featureSet = support()
|
c.featureSet = support()
|
||||||
c.SGX = hasSGX(c.featureSet.inSet(SGX), c.featureSet.inSet(SGXLC))
|
c.SGX = hasSGX(c.featureSet.inSet(SGX), c.featureSet.inSet(SGXLC))
|
||||||
c.ThreadsPerCore = threadsPerCore()
|
c.ThreadsPerCore = threadsPerCore()
|
||||||
|
324
vendor/github.com/klauspost/cpuid/v2/featureid_string.go
generated
vendored
324
vendor/github.com/klauspost/cpuid/v2/featureid_string.go
generated
vendored
@ -13,137 +13,207 @@ func _() {
|
|||||||
_ = x[AMD3DNOW-3]
|
_ = x[AMD3DNOW-3]
|
||||||
_ = x[AMD3DNOWEXT-4]
|
_ = x[AMD3DNOWEXT-4]
|
||||||
_ = x[AMXBF16-5]
|
_ = x[AMXBF16-5]
|
||||||
_ = x[AMXINT8-6]
|
_ = x[AMXFP16-6]
|
||||||
_ = x[AMXTILE-7]
|
_ = x[AMXINT8-7]
|
||||||
_ = x[AVX-8]
|
_ = x[AMXTILE-8]
|
||||||
_ = x[AVX2-9]
|
_ = x[AVX-9]
|
||||||
_ = x[AVX512BF16-10]
|
_ = x[AVX2-10]
|
||||||
_ = x[AVX512BITALG-11]
|
_ = x[AVX512BF16-11]
|
||||||
_ = x[AVX512BW-12]
|
_ = x[AVX512BITALG-12]
|
||||||
_ = x[AVX512CD-13]
|
_ = x[AVX512BW-13]
|
||||||
_ = x[AVX512DQ-14]
|
_ = x[AVX512CD-14]
|
||||||
_ = x[AVX512ER-15]
|
_ = x[AVX512DQ-15]
|
||||||
_ = x[AVX512F-16]
|
_ = x[AVX512ER-16]
|
||||||
_ = x[AVX512FP16-17]
|
_ = x[AVX512F-17]
|
||||||
_ = x[AVX512IFMA-18]
|
_ = x[AVX512FP16-18]
|
||||||
_ = x[AVX512PF-19]
|
_ = x[AVX512IFMA-19]
|
||||||
_ = x[AVX512VBMI-20]
|
_ = x[AVX512PF-20]
|
||||||
_ = x[AVX512VBMI2-21]
|
_ = x[AVX512VBMI-21]
|
||||||
_ = x[AVX512VL-22]
|
_ = x[AVX512VBMI2-22]
|
||||||
_ = x[AVX512VNNI-23]
|
_ = x[AVX512VL-23]
|
||||||
_ = x[AVX512VP2INTERSECT-24]
|
_ = x[AVX512VNNI-24]
|
||||||
_ = x[AVX512VPOPCNTDQ-25]
|
_ = x[AVX512VP2INTERSECT-25]
|
||||||
_ = x[AVXSLOW-26]
|
_ = x[AVX512VPOPCNTDQ-26]
|
||||||
_ = x[BMI1-27]
|
_ = x[AVXIFMA-27]
|
||||||
_ = x[BMI2-28]
|
_ = x[AVXNECONVERT-28]
|
||||||
_ = x[CETIBT-29]
|
_ = x[AVXSLOW-29]
|
||||||
_ = x[CETSS-30]
|
_ = x[AVXVNNI-30]
|
||||||
_ = x[CLDEMOTE-31]
|
_ = x[AVXVNNIINT8-31]
|
||||||
_ = x[CLMUL-32]
|
_ = x[BMI1-32]
|
||||||
_ = x[CLZERO-33]
|
_ = x[BMI2-33]
|
||||||
_ = x[CMOV-34]
|
_ = x[CETIBT-34]
|
||||||
_ = x[CMPXCHG8-35]
|
_ = x[CETSS-35]
|
||||||
_ = x[CPBOOST-36]
|
_ = x[CLDEMOTE-36]
|
||||||
_ = x[CX16-37]
|
_ = x[CLMUL-37]
|
||||||
_ = x[ENQCMD-38]
|
_ = x[CLZERO-38]
|
||||||
_ = x[ERMS-39]
|
_ = x[CMOV-39]
|
||||||
_ = x[F16C-40]
|
_ = x[CMPCCXADD-40]
|
||||||
_ = x[FMA3-41]
|
_ = x[CMPSB_SCADBS_SHORT-41]
|
||||||
_ = x[FMA4-42]
|
_ = x[CMPXCHG8-42]
|
||||||
_ = x[FXSR-43]
|
_ = x[CPBOOST-43]
|
||||||
_ = x[FXSROPT-44]
|
_ = x[CPPC-44]
|
||||||
_ = x[GFNI-45]
|
_ = x[CX16-45]
|
||||||
_ = x[HLE-46]
|
_ = x[EFER_LMSLE_UNS-46]
|
||||||
_ = x[HTT-47]
|
_ = x[ENQCMD-47]
|
||||||
_ = x[HWA-48]
|
_ = x[ERMS-48]
|
||||||
_ = x[HYPERVISOR-49]
|
_ = x[F16C-49]
|
||||||
_ = x[IBPB-50]
|
_ = x[FLUSH_L1D-50]
|
||||||
_ = x[IBS-51]
|
_ = x[FMA3-51]
|
||||||
_ = x[IBSBRNTRGT-52]
|
_ = x[FMA4-52]
|
||||||
_ = x[IBSFETCHSAM-53]
|
_ = x[FP128-53]
|
||||||
_ = x[IBSFFV-54]
|
_ = x[FP256-54]
|
||||||
_ = x[IBSOPCNT-55]
|
_ = x[FSRM-55]
|
||||||
_ = x[IBSOPCNTEXT-56]
|
_ = x[FXSR-56]
|
||||||
_ = x[IBSOPSAM-57]
|
_ = x[FXSROPT-57]
|
||||||
_ = x[IBSRDWROPCNT-58]
|
_ = x[GFNI-58]
|
||||||
_ = x[IBSRIPINVALIDCHK-59]
|
_ = x[HLE-59]
|
||||||
_ = x[INT_WBINVD-60]
|
_ = x[HRESET-60]
|
||||||
_ = x[INVLPGB-61]
|
_ = x[HTT-61]
|
||||||
_ = x[LAHF-62]
|
_ = x[HWA-62]
|
||||||
_ = x[LZCNT-63]
|
_ = x[HYBRID_CPU-63]
|
||||||
_ = x[MCAOVERFLOW-64]
|
_ = x[HYPERVISOR-64]
|
||||||
_ = x[MCOMMIT-65]
|
_ = x[IA32_ARCH_CAP-65]
|
||||||
_ = x[MMX-66]
|
_ = x[IA32_CORE_CAP-66]
|
||||||
_ = x[MMXEXT-67]
|
_ = x[IBPB-67]
|
||||||
_ = x[MOVBE-68]
|
_ = x[IBRS-68]
|
||||||
_ = x[MOVDIR64B-69]
|
_ = x[IBRS_PREFERRED-69]
|
||||||
_ = x[MOVDIRI-70]
|
_ = x[IBRS_PROVIDES_SMP-70]
|
||||||
_ = x[MPX-71]
|
_ = x[IBS-71]
|
||||||
_ = x[MSRIRC-72]
|
_ = x[IBSBRNTRGT-72]
|
||||||
_ = x[NX-73]
|
_ = x[IBSFETCHSAM-73]
|
||||||
_ = x[OSXSAVE-74]
|
_ = x[IBSFFV-74]
|
||||||
_ = x[POPCNT-75]
|
_ = x[IBSOPCNT-75]
|
||||||
_ = x[RDPRU-76]
|
_ = x[IBSOPCNTEXT-76]
|
||||||
_ = x[RDRAND-77]
|
_ = x[IBSOPSAM-77]
|
||||||
_ = x[RDSEED-78]
|
_ = x[IBSRDWROPCNT-78]
|
||||||
_ = x[RDTSCP-79]
|
_ = x[IBSRIPINVALIDCHK-79]
|
||||||
_ = x[RTM-80]
|
_ = x[IBS_FETCH_CTLX-80]
|
||||||
_ = x[RTM_ALWAYS_ABORT-81]
|
_ = x[IBS_OPDATA4-81]
|
||||||
_ = x[SCE-82]
|
_ = x[IBS_OPFUSE-82]
|
||||||
_ = x[SERIALIZE-83]
|
_ = x[IBS_PREVENTHOST-83]
|
||||||
_ = x[SGX-84]
|
_ = x[IBS_ZEN4-84]
|
||||||
_ = x[SGXLC-85]
|
_ = x[INT_WBINVD-85]
|
||||||
_ = x[SHA-86]
|
_ = x[INVLPGB-86]
|
||||||
_ = x[SSE-87]
|
_ = x[LAHF-87]
|
||||||
_ = x[SSE2-88]
|
_ = x[LAM-88]
|
||||||
_ = x[SSE3-89]
|
_ = x[LBRVIRT-89]
|
||||||
_ = x[SSE4-90]
|
_ = x[LZCNT-90]
|
||||||
_ = x[SSE42-91]
|
_ = x[MCAOVERFLOW-91]
|
||||||
_ = x[SSE4A-92]
|
_ = x[MCDT_NO-92]
|
||||||
_ = x[SSSE3-93]
|
_ = x[MCOMMIT-93]
|
||||||
_ = x[STIBP-94]
|
_ = x[MD_CLEAR-94]
|
||||||
_ = x[SUCCOR-95]
|
_ = x[MMX-95]
|
||||||
_ = x[TBM-96]
|
_ = x[MMXEXT-96]
|
||||||
_ = x[TSXLDTRK-97]
|
_ = x[MOVBE-97]
|
||||||
_ = x[VAES-98]
|
_ = x[MOVDIR64B-98]
|
||||||
_ = x[VMX-99]
|
_ = x[MOVDIRI-99]
|
||||||
_ = x[VPCLMULQDQ-100]
|
_ = x[MOVSB_ZL-100]
|
||||||
_ = x[WAITPKG-101]
|
_ = x[MOVU-101]
|
||||||
_ = x[WBNOINVD-102]
|
_ = x[MPX-102]
|
||||||
_ = x[X87-103]
|
_ = x[MSRIRC-103]
|
||||||
_ = x[XOP-104]
|
_ = x[MSR_PAGEFLUSH-104]
|
||||||
_ = x[XSAVE-105]
|
_ = x[NRIPS-105]
|
||||||
_ = x[AESARM-106]
|
_ = x[NX-106]
|
||||||
_ = x[ARMCPUID-107]
|
_ = x[OSXSAVE-107]
|
||||||
_ = x[ASIMD-108]
|
_ = x[PCONFIG-108]
|
||||||
_ = x[ASIMDDP-109]
|
_ = x[POPCNT-109]
|
||||||
_ = x[ASIMDHP-110]
|
_ = x[PPIN-110]
|
||||||
_ = x[ASIMDRDM-111]
|
_ = x[PREFETCHI-111]
|
||||||
_ = x[ATOMICS-112]
|
_ = x[PSFD-112]
|
||||||
_ = x[CRC32-113]
|
_ = x[RDPRU-113]
|
||||||
_ = x[DCPOP-114]
|
_ = x[RDRAND-114]
|
||||||
_ = x[EVTSTRM-115]
|
_ = x[RDSEED-115]
|
||||||
_ = x[FCMA-116]
|
_ = x[RDTSCP-116]
|
||||||
_ = x[FP-117]
|
_ = x[RTM-117]
|
||||||
_ = x[FPHP-118]
|
_ = x[RTM_ALWAYS_ABORT-118]
|
||||||
_ = x[GPA-119]
|
_ = x[SERIALIZE-119]
|
||||||
_ = x[JSCVT-120]
|
_ = x[SEV-120]
|
||||||
_ = x[LRCPC-121]
|
_ = x[SEV_64BIT-121]
|
||||||
_ = x[PMULL-122]
|
_ = x[SEV_ALTERNATIVE-122]
|
||||||
_ = x[SHA1-123]
|
_ = x[SEV_DEBUGSWAP-123]
|
||||||
_ = x[SHA2-124]
|
_ = x[SEV_ES-124]
|
||||||
_ = x[SHA3-125]
|
_ = x[SEV_RESTRICTED-125]
|
||||||
_ = x[SHA512-126]
|
_ = x[SEV_SNP-126]
|
||||||
_ = x[SM3-127]
|
_ = x[SGX-127]
|
||||||
_ = x[SM4-128]
|
_ = x[SGXLC-128]
|
||||||
_ = x[SVE-129]
|
_ = x[SHA-129]
|
||||||
_ = x[lastID-130]
|
_ = x[SME-130]
|
||||||
|
_ = x[SME_COHERENT-131]
|
||||||
|
_ = x[SPEC_CTRL_SSBD-132]
|
||||||
|
_ = x[SRBDS_CTRL-133]
|
||||||
|
_ = x[SSE-134]
|
||||||
|
_ = x[SSE2-135]
|
||||||
|
_ = x[SSE3-136]
|
||||||
|
_ = x[SSE4-137]
|
||||||
|
_ = x[SSE42-138]
|
||||||
|
_ = x[SSE4A-139]
|
||||||
|
_ = x[SSSE3-140]
|
||||||
|
_ = x[STIBP-141]
|
||||||
|
_ = x[STIBP_ALWAYSON-142]
|
||||||
|
_ = x[STOSB_SHORT-143]
|
||||||
|
_ = x[SUCCOR-144]
|
||||||
|
_ = x[SVM-145]
|
||||||
|
_ = x[SVMDA-146]
|
||||||
|
_ = x[SVMFBASID-147]
|
||||||
|
_ = x[SVML-148]
|
||||||
|
_ = x[SVMNP-149]
|
||||||
|
_ = x[SVMPF-150]
|
||||||
|
_ = x[SVMPFT-151]
|
||||||
|
_ = x[SYSCALL-152]
|
||||||
|
_ = x[SYSEE-153]
|
||||||
|
_ = x[TBM-154]
|
||||||
|
_ = x[TLB_FLUSH_NESTED-155]
|
||||||
|
_ = x[TME-156]
|
||||||
|
_ = x[TOPEXT-157]
|
||||||
|
_ = x[TSCRATEMSR-158]
|
||||||
|
_ = x[TSXLDTRK-159]
|
||||||
|
_ = x[VAES-160]
|
||||||
|
_ = x[VMCBCLEAN-161]
|
||||||
|
_ = x[VMPL-162]
|
||||||
|
_ = x[VMSA_REGPROT-163]
|
||||||
|
_ = x[VMX-164]
|
||||||
|
_ = x[VPCLMULQDQ-165]
|
||||||
|
_ = x[VTE-166]
|
||||||
|
_ = x[WAITPKG-167]
|
||||||
|
_ = x[WBNOINVD-168]
|
||||||
|
_ = x[X87-169]
|
||||||
|
_ = x[XGETBV1-170]
|
||||||
|
_ = x[XOP-171]
|
||||||
|
_ = x[XSAVE-172]
|
||||||
|
_ = x[XSAVEC-173]
|
||||||
|
_ = x[XSAVEOPT-174]
|
||||||
|
_ = x[XSAVES-175]
|
||||||
|
_ = x[AESARM-176]
|
||||||
|
_ = x[ARMCPUID-177]
|
||||||
|
_ = x[ASIMD-178]
|
||||||
|
_ = x[ASIMDDP-179]
|
||||||
|
_ = x[ASIMDHP-180]
|
||||||
|
_ = x[ASIMDRDM-181]
|
||||||
|
_ = x[ATOMICS-182]
|
||||||
|
_ = x[CRC32-183]
|
||||||
|
_ = x[DCPOP-184]
|
||||||
|
_ = x[EVTSTRM-185]
|
||||||
|
_ = x[FCMA-186]
|
||||||
|
_ = x[FP-187]
|
||||||
|
_ = x[FPHP-188]
|
||||||
|
_ = x[GPA-189]
|
||||||
|
_ = x[JSCVT-190]
|
||||||
|
_ = x[LRCPC-191]
|
||||||
|
_ = x[PMULL-192]
|
||||||
|
_ = x[SHA1-193]
|
||||||
|
_ = x[SHA2-194]
|
||||||
|
_ = x[SHA3-195]
|
||||||
|
_ = x[SHA512-196]
|
||||||
|
_ = x[SM3-197]
|
||||||
|
_ = x[SM4-198]
|
||||||
|
_ = x[SVE-199]
|
||||||
|
_ = x[lastID-200]
|
||||||
_ = x[firstID-0]
|
_ = x[firstID-0]
|
||||||
}
|
}
|
||||||
|
|
||||||
const _FeatureID_name = "firstIDADXAESNIAMD3DNOWAMD3DNOWEXTAMXBF16AMXINT8AMXTILEAVXAVX2AVX512BF16AVX512BITALGAVX512BWAVX512CDAVX512DQAVX512ERAVX512FAVX512FP16AVX512IFMAAVX512PFAVX512VBMIAVX512VBMI2AVX512VLAVX512VNNIAVX512VP2INTERSECTAVX512VPOPCNTDQAVXSLOWBMI1BMI2CETIBTCETSSCLDEMOTECLMULCLZEROCMOVCMPXCHG8CPBOOSTCX16ENQCMDERMSF16CFMA3FMA4FXSRFXSROPTGFNIHLEHTTHWAHYPERVISORIBPBIBSIBSBRNTRGTIBSFETCHSAMIBSFFVIBSOPCNTIBSOPCNTEXTIBSOPSAMIBSRDWROPCNTIBSRIPINVALIDCHKINT_WBINVDINVLPGBLAHFLZCNTMCAOVERFLOWMCOMMITMMXMMXEXTMOVBEMOVDIR64BMOVDIRIMPXMSRIRCNXOSXSAVEPOPCNTRDPRURDRANDRDSEEDRDTSCPRTMRTM_ALWAYS_ABORTSCESERIALIZESGXSGXLCSHASSESSE2SSE3SSE4SSE42SSE4ASSSE3STIBPSUCCORTBMTSXLDTRKVAESVMXVPCLMULQDQWAITPKGWBNOINVDX87XOPXSAVEAESARMARMCPUIDASIMDASIMDDPASIMDHPASIMDRDMATOMICSCRC32DCPOPEVTSTRMFCMAFPFPHPGPAJSCVTLRCPCPMULLSHA1SHA2SHA3SHA512SM3SM4SVElastID"
|
const _FeatureID_name = "firstIDADXAESNIAMD3DNOWAMD3DNOWEXTAMXBF16AMXFP16AMXINT8AMXTILEAVXAVX2AVX512BF16AVX512BITALGAVX512BWAVX512CDAVX512DQAVX512ERAVX512FAVX512FP16AVX512IFMAAVX512PFAVX512VBMIAVX512VBMI2AVX512VLAVX512VNNIAVX512VP2INTERSECTAVX512VPOPCNTDQAVXIFMAAVXNECONVERTAVXSLOWAVXVNNIAVXVNNIINT8BMI1BMI2CETIBTCETSSCLDEMOTECLMULCLZEROCMOVCMPCCXADDCMPSB_SCADBS_SHORTCMPXCHG8CPBOOSTCPPCCX16EFER_LMSLE_UNSENQCMDERMSF16CFLUSH_L1DFMA3FMA4FP128FP256FSRMFXSRFXSROPTGFNIHLEHRESETHTTHWAHYBRID_CPUHYPERVISORIA32_ARCH_CAPIA32_CORE_CAPIBPBIBRSIBRS_PREFERREDIBRS_PROVIDES_SMPIBSIBSBRNTRGTIBSFETCHSAMIBSFFVIBSOPCNTIBSOPCNTEXTIBSOPSAMIBSRDWROPCNTIBSRIPINVALIDCHKIBS_FETCH_CTLXIBS_OPDATA4IBS_OPFUSEIBS_PREVENTHOSTIBS_ZEN4INT_WBINVDINVLPGBLAHFLAMLBRVIRTLZCNTMCAOVERFLOWMCDT_NOMCOMMITMD_CLEARMMXMMXEXTMOVBEMOVDIR64BMOVDIRIMOVSB_ZLMOVUMPXMSRIRCMSR_PAGEFLUSHNRIPSNXOSXSAVEPCONFIGPOPCNTPPINPREFETCHIPSFDRDPRURDRANDRDSEEDRDTSCPRTMRTM_ALWAYS_ABORTSERIALIZESEVSEV_64BITSEV_ALTERNATIVESEV_DEBUGSWAPSEV_ESSEV_RESTRICTEDSEV_SNPSGXSGXLCSHASMESME_COHERENTSPEC_CTRL_SSBDSRBDS_CTRLSSESSE2SSE3SSE4SSE42SSE4ASSSE3STIBPSTIBP_ALWAYSONSTOSB_SHORTSUCCORSVMSVMDASVMFBASIDSVMLSVMNPSVMPFSVMPFTSYSCALLSYSEETBMTLB_FLUSH_NESTEDTMETOPEXTTSCRATEMSRTSXLDTRKVAESVMCBCLEANVMPLVMSA_REGPROTVMXVPCLMULQDQVTEWAITPKGWBNOINVDX87XGETBV1XOPXSAVEXSAVECXSAVEOPTXSAVESAESARMARMCPUIDASIMDASIMDDPASIMDHPASIMDRDMATOMICSCRC32DCPOPEVTSTRMFCMAFPFPHPGPAJSCVTLRCPCPMULLSHA1SHA2SHA3SHA512SM3SM4SVElastID"
|
||||||
|
|
||||||
var _FeatureID_index = [...]uint16{0, 7, 10, 15, 23, 34, 41, 48, 55, 58, 62, 72, 84, 92, 100, 108, 116, 123, 133, 143, 151, 161, 172, 180, 190, 208, 223, 230, 234, 238, 244, 249, 257, 262, 268, 272, 280, 287, 291, 297, 301, 305, 309, 313, 317, 324, 328, 331, 334, 337, 347, 351, 354, 364, 375, 381, 389, 400, 408, 420, 436, 446, 453, 457, 462, 473, 480, 483, 489, 494, 503, 510, 513, 519, 521, 528, 534, 539, 545, 551, 557, 560, 576, 579, 588, 591, 596, 599, 602, 606, 610, 614, 619, 624, 629, 634, 640, 643, 651, 655, 658, 668, 675, 683, 686, 689, 694, 700, 708, 713, 720, 727, 735, 742, 747, 752, 759, 763, 765, 769, 772, 777, 782, 787, 791, 795, 799, 805, 808, 811, 814, 820}
|
var _FeatureID_index = [...]uint16{0, 7, 10, 15, 23, 34, 41, 48, 55, 62, 65, 69, 79, 91, 99, 107, 115, 123, 130, 140, 150, 158, 168, 179, 187, 197, 215, 230, 237, 249, 256, 263, 274, 278, 282, 288, 293, 301, 306, 312, 316, 325, 343, 351, 358, 362, 366, 380, 386, 390, 394, 403, 407, 411, 416, 421, 425, 429, 436, 440, 443, 449, 452, 455, 465, 475, 488, 501, 505, 509, 523, 540, 543, 553, 564, 570, 578, 589, 597, 609, 625, 639, 650, 660, 675, 683, 693, 700, 704, 707, 714, 719, 730, 737, 744, 752, 755, 761, 766, 775, 782, 790, 794, 797, 803, 816, 821, 823, 830, 837, 843, 847, 856, 860, 865, 871, 877, 883, 886, 902, 911, 914, 923, 938, 951, 957, 971, 978, 981, 986, 989, 992, 1004, 1018, 1028, 1031, 1035, 1039, 1043, 1048, 1053, 1058, 1063, 1077, 1088, 1094, 1097, 1102, 1111, 1115, 1120, 1125, 1131, 1138, 1143, 1146, 1162, 1165, 1171, 1181, 1189, 1193, 1202, 1206, 1218, 1221, 1231, 1234, 1241, 1249, 1252, 1259, 1262, 1267, 1273, 1281, 1287, 1293, 1301, 1306, 1313, 1320, 1328, 1335, 1340, 1345, 1352, 1356, 1358, 1362, 1365, 1370, 1375, 1380, 1384, 1388, 1392, 1398, 1401, 1404, 1407, 1413}
|
||||||
|
|
||||||
func (i FeatureID) String() string {
|
func (i FeatureID) String() string {
|
||||||
if i < 0 || i >= FeatureID(len(_FeatureID_index)-1) {
|
if i < 0 || i >= FeatureID(len(_FeatureID_index)-1) {
|
||||||
|
112
vendor/github.com/klauspost/cpuid/v2/os_darwin_arm64.go
generated
vendored
112
vendor/github.com/klauspost/cpuid/v2/os_darwin_arm64.go
generated
vendored
@ -2,18 +2,120 @@
|
|||||||
|
|
||||||
package cpuid
|
package cpuid
|
||||||
|
|
||||||
import "runtime"
|
import (
|
||||||
|
"runtime"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"golang.org/x/sys/unix"
|
||||||
|
)
|
||||||
|
|
||||||
func detectOS(c *CPUInfo) bool {
|
func detectOS(c *CPUInfo) bool {
|
||||||
|
if runtime.GOOS != "ios" {
|
||||||
|
tryToFillCPUInfoFomSysctl(c)
|
||||||
|
}
|
||||||
// There are no hw.optional sysctl values for the below features on Mac OS 11.0
|
// There are no hw.optional sysctl values for the below features on Mac OS 11.0
|
||||||
// to detect their supported state dynamically. Assume the CPU features that
|
// to detect their supported state dynamically. Assume the CPU features that
|
||||||
// Apple Silicon M1 supports to be available as a minimal set of features
|
// Apple Silicon M1 supports to be available as a minimal set of features
|
||||||
// to all Go programs running on darwin/arm64.
|
// to all Go programs running on darwin/arm64.
|
||||||
// TODO: Add more if we know them.
|
// TODO: Add more if we know them.
|
||||||
c.featureSet.setIf(runtime.GOOS != "ios", AESARM, PMULL, SHA1, SHA2)
|
c.featureSet.setIf(runtime.GOOS != "ios", AESARM, PMULL, SHA1, SHA2)
|
||||||
c.PhysicalCores = runtime.NumCPU()
|
|
||||||
// For now assuming 1 thread per core...
|
|
||||||
c.ThreadsPerCore = 1
|
|
||||||
c.LogicalCores = c.PhysicalCores
|
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func sysctlGetBool(name string) bool {
|
||||||
|
value, err := unix.SysctlUint32(name)
|
||||||
|
if err != nil {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
return value != 0
|
||||||
|
}
|
||||||
|
|
||||||
|
func sysctlGetString(name string) string {
|
||||||
|
value, err := unix.Sysctl(name)
|
||||||
|
if err != nil {
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
return value
|
||||||
|
}
|
||||||
|
|
||||||
|
func sysctlGetInt(unknown int, names ...string) int {
|
||||||
|
for _, name := range names {
|
||||||
|
value, err := unix.SysctlUint32(name)
|
||||||
|
if err != nil {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if value != 0 {
|
||||||
|
return int(value)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return unknown
|
||||||
|
}
|
||||||
|
|
||||||
|
func sysctlGetInt64(unknown int, names ...string) int {
|
||||||
|
for _, name := range names {
|
||||||
|
value64, err := unix.SysctlUint64(name)
|
||||||
|
if err != nil {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if int(value64) != unknown {
|
||||||
|
return int(value64)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return unknown
|
||||||
|
}
|
||||||
|
|
||||||
|
func setFeature(c *CPUInfo, name string, feature FeatureID) {
|
||||||
|
c.featureSet.setIf(sysctlGetBool(name), feature)
|
||||||
|
}
|
||||||
|
func tryToFillCPUInfoFomSysctl(c *CPUInfo) {
|
||||||
|
c.BrandName = sysctlGetString("machdep.cpu.brand_string")
|
||||||
|
|
||||||
|
if len(c.BrandName) != 0 {
|
||||||
|
c.VendorString = strings.Fields(c.BrandName)[0]
|
||||||
|
}
|
||||||
|
|
||||||
|
c.PhysicalCores = sysctlGetInt(runtime.NumCPU(), "hw.physicalcpu")
|
||||||
|
c.ThreadsPerCore = sysctlGetInt(1, "machdep.cpu.thread_count", "kern.num_threads") /
|
||||||
|
sysctlGetInt(1, "hw.physicalcpu")
|
||||||
|
c.LogicalCores = sysctlGetInt(runtime.NumCPU(), "machdep.cpu.core_count")
|
||||||
|
c.Family = sysctlGetInt(0, "machdep.cpu.family", "hw.cpufamily")
|
||||||
|
c.Model = sysctlGetInt(0, "machdep.cpu.model")
|
||||||
|
c.CacheLine = sysctlGetInt64(0, "hw.cachelinesize")
|
||||||
|
c.Cache.L1I = sysctlGetInt64(-1, "hw.l1icachesize")
|
||||||
|
c.Cache.L1D = sysctlGetInt64(-1, "hw.l1dcachesize")
|
||||||
|
c.Cache.L2 = sysctlGetInt64(-1, "hw.l2cachesize")
|
||||||
|
c.Cache.L3 = sysctlGetInt64(-1, "hw.l3cachesize")
|
||||||
|
|
||||||
|
// from https://developer.arm.com/downloads/-/exploration-tools/feature-names-for-a-profile
|
||||||
|
setFeature(c, "hw.optional.arm.FEAT_AES", AESARM)
|
||||||
|
setFeature(c, "hw.optional.AdvSIMD", ASIMD)
|
||||||
|
setFeature(c, "hw.optional.arm.FEAT_DotProd", ASIMDDP)
|
||||||
|
setFeature(c, "hw.optional.arm.FEAT_RDM", ASIMDRDM)
|
||||||
|
setFeature(c, "hw.optional.FEAT_CRC32", CRC32)
|
||||||
|
setFeature(c, "hw.optional.arm.FEAT_DPB", DCPOP)
|
||||||
|
// setFeature(c, "", EVTSTRM)
|
||||||
|
setFeature(c, "hw.optional.arm.FEAT_FCMA", FCMA)
|
||||||
|
setFeature(c, "hw.optional.arm.FEAT_FP", FP)
|
||||||
|
setFeature(c, "hw.optional.arm.FEAT_FP16", FPHP)
|
||||||
|
setFeature(c, "hw.optional.arm.FEAT_PAuth", GPA)
|
||||||
|
setFeature(c, "hw.optional.arm.FEAT_JSCVT", JSCVT)
|
||||||
|
setFeature(c, "hw.optional.arm.FEAT_LRCPC", LRCPC)
|
||||||
|
setFeature(c, "hw.optional.arm.FEAT_PMULL", PMULL)
|
||||||
|
setFeature(c, "hw.optional.arm.FEAT_SHA1", SHA1)
|
||||||
|
setFeature(c, "hw.optional.arm.FEAT_SHA256", SHA2)
|
||||||
|
setFeature(c, "hw.optional.arm.FEAT_SHA3", SHA3)
|
||||||
|
setFeature(c, "hw.optional.arm.FEAT_SHA512", SHA512)
|
||||||
|
// setFeature(c, "", SM3)
|
||||||
|
// setFeature(c, "", SM4)
|
||||||
|
setFeature(c, "hw.optional.arm.FEAT_SVE", SVE)
|
||||||
|
|
||||||
|
// from empirical observation
|
||||||
|
setFeature(c, "hw.optional.AdvSIMD_HPFPCvt", ASIMDHP)
|
||||||
|
setFeature(c, "hw.optional.armv8_1_atomics", ATOMICS)
|
||||||
|
setFeature(c, "hw.optional.floatingpoint", FP)
|
||||||
|
setFeature(c, "hw.optional.armv8_2_sha3", SHA3)
|
||||||
|
setFeature(c, "hw.optional.armv8_2_sha512", SHA512)
|
||||||
|
setFeature(c, "hw.optional.armv8_3_compnum", FCMA)
|
||||||
|
setFeature(c, "hw.optional.armv8_crc32", CRC32)
|
||||||
|
}
|
||||||
|
5
vendor/github.com/kyokomi/emoji/v2/emoji.go
generated
vendored
5
vendor/github.com/kyokomi/emoji/v2/emoji.go
generated
vendored
@ -49,7 +49,8 @@ func NormalizeShortCode(shortCode string) string {
|
|||||||
// regular expression that matches :flag-[countrycode]:
|
// regular expression that matches :flag-[countrycode]:
|
||||||
var flagRegexp = regexp.MustCompile(":flag-([a-z]{2}):")
|
var flagRegexp = regexp.MustCompile(":flag-([a-z]{2}):")
|
||||||
|
|
||||||
func emojize(x string) string {
|
// Emojize Converts the string passed as an argument to a emoji. For unsupported emoji, the string passed as an argument is returned as is.
|
||||||
|
func Emojize(x string) string {
|
||||||
str, ok := emojiCode()[x]
|
str, ok := emojiCode()[x]
|
||||||
if ok {
|
if ok {
|
||||||
return str + ReplacePadding
|
return str + ReplacePadding
|
||||||
@ -83,7 +84,7 @@ func replaseEmoji(input *bytes.Buffer) string {
|
|||||||
case unicode.IsSpace(i):
|
case unicode.IsSpace(i):
|
||||||
return emoji.String()
|
return emoji.String()
|
||||||
case i == ':':
|
case i == ':':
|
||||||
return emojize(emoji.String())
|
return Emojize(emoji.String())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
21
vendor/github.com/labstack/echo/v4/.travis.yml
generated
vendored
21
vendor/github.com/labstack/echo/v4/.travis.yml
generated
vendored
@ -1,21 +0,0 @@
|
|||||||
arch:
|
|
||||||
- amd64
|
|
||||||
- ppc64le
|
|
||||||
|
|
||||||
language: go
|
|
||||||
go:
|
|
||||||
- 1.14.x
|
|
||||||
- 1.15.x
|
|
||||||
- tip
|
|
||||||
env:
|
|
||||||
- GO111MODULE=on
|
|
||||||
install:
|
|
||||||
- go get -v golang.org/x/lint/golint
|
|
||||||
script:
|
|
||||||
- golint -set_exit_status ./...
|
|
||||||
- go test -race -coverprofile=coverage.txt -covermode=atomic ./...
|
|
||||||
after_success:
|
|
||||||
- bash <(curl -s https://codecov.io/bash)
|
|
||||||
matrix:
|
|
||||||
allow_failures:
|
|
||||||
- go: tip
|
|
27
vendor/github.com/labstack/echo/v4/CHANGELOG.md
generated
vendored
27
vendor/github.com/labstack/echo/v4/CHANGELOG.md
generated
vendored
@ -1,5 +1,32 @@
|
|||||||
# Changelog
|
# Changelog
|
||||||
|
|
||||||
|
## v4.10.2 - 2023-02-22
|
||||||
|
|
||||||
|
**Security**
|
||||||
|
|
||||||
|
* `filepath.Clean` behaviour has changed in Go 1.20 - adapt to it [#2406](https://github.com/labstack/echo/pull/2406)
|
||||||
|
* Add `middleware.CORSConfig.UnsafeWildcardOriginWithAllowCredentials` to make UNSAFE usages of wildcard origin + allow cretentials less likely [#2405](https://github.com/labstack/echo/pull/2405)
|
||||||
|
|
||||||
|
**Enhancements**
|
||||||
|
|
||||||
|
* Add more HTTP error values [#2277](https://github.com/labstack/echo/pull/2277)
|
||||||
|
|
||||||
|
|
||||||
|
## v4.10.1 - 2023-02-19
|
||||||
|
|
||||||
|
**Security**
|
||||||
|
|
||||||
|
* Upgrade deps due to the latest golang.org/x/net vulnerability [#2402](https://github.com/labstack/echo/pull/2402)
|
||||||
|
|
||||||
|
|
||||||
|
**Enhancements**
|
||||||
|
|
||||||
|
* Add new JWT repository to the README [#2377](https://github.com/labstack/echo/pull/2377)
|
||||||
|
* Return an empty string for ctx.path if there is no registered path [#2385](https://github.com/labstack/echo/pull/2385)
|
||||||
|
* Add context timeout middleware [#2380](https://github.com/labstack/echo/pull/2380)
|
||||||
|
* Update link to jaegertracing [#2394](https://github.com/labstack/echo/pull/2394)
|
||||||
|
|
||||||
|
|
||||||
## v4.10.0 - 2022-12-27
|
## v4.10.0 - 2022-12-27
|
||||||
|
|
||||||
**Security**
|
**Security**
|
||||||
|
21
vendor/github.com/labstack/echo/v4/README.md
generated
vendored
21
vendor/github.com/labstack/echo/v4/README.md
generated
vendored
@ -11,12 +11,12 @@
|
|||||||
|
|
||||||
## Supported Go versions
|
## Supported Go versions
|
||||||
|
|
||||||
Latest version of Echo supports last four Go major [releases](https://go.dev/doc/devel/release) and might work with older versions.
|
Latest version of Echo supports last four Go major [releases](https://go.dev/doc/devel/release) and might work with
|
||||||
|
older versions.
|
||||||
|
|
||||||
As of version 4.0.0, Echo is available as a [Go module](https://github.com/golang/go/wiki/Modules).
|
As of version 4.0.0, Echo is available as a [Go module](https://github.com/golang/go/wiki/Modules).
|
||||||
Therefore a Go version capable of understanding /vN suffixed imports is required:
|
Therefore a Go version capable of understanding /vN suffixed imports is required:
|
||||||
|
|
||||||
|
|
||||||
Any of these versions will allow you to import Echo as `github.com/labstack/echo/v4` which is the recommended
|
Any of these versions will allow you to import Echo as `github.com/labstack/echo/v4` which is the recommended
|
||||||
way of using Echo going forward.
|
way of using Echo going forward.
|
||||||
|
|
||||||
@ -90,11 +90,22 @@ func hello(c echo.Context) error {
|
|||||||
}
|
}
|
||||||
```
|
```
|
||||||
|
|
||||||
# Third-party middlewares
|
# Official middleware repositories
|
||||||
|
|
||||||
|
Following list of middleware is maintained by Echo team.
|
||||||
|
|
||||||
| Repository | Description |
|
| Repository | Description |
|
||||||
|------------------------------------------------------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
|
|------------------------------------------------------------------------------|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
|
||||||
| [github.com/labstack/echo-contrib](https://github.com/labstack/echo-contrib) | (by Echo team) [casbin](https://github.com/casbin/casbin), [gorilla/sessions](https://github.com/gorilla/sessions), [jaegertracing](github.com/uber/jaeger-client-go), [prometheus](https://github.com/prometheus/client_golang/), [pprof](https://pkg.go.dev/net/http/pprof), [zipkin](https://github.com/openzipkin/zipkin-go) middlewares |
|
| [github.com/labstack/echo-jwt](https://github.com/labstack/echo-jwt) | [JWT](https://github.com/golang-jwt/jwt) middleware |
|
||||||
|
| [github.com/labstack/echo-contrib](https://github.com/labstack/echo-contrib) | [casbin](https://github.com/casbin/casbin), [gorilla/sessions](https://github.com/gorilla/sessions), [jaegertracing](https://github.com/uber/jaeger-client-go), [prometheus](https://github.com/prometheus/client_golang/), [pprof](https://pkg.go.dev/net/http/pprof), [zipkin](https://github.com/openzipkin/zipkin-go) middlewares |
|
||||||
|
|
||||||
|
# Third-party middleware repositories
|
||||||
|
|
||||||
|
Be careful when adding 3rd party middleware. Echo teams does not have time or manpower to guarantee safety and quality
|
||||||
|
of middlewares in this list.
|
||||||
|
|
||||||
|
| Repository | Description |
|
||||||
|
|------------------------------------------------------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
|
||||||
| [deepmap/oapi-codegen](https://github.com/deepmap/oapi-codegen) | Automatically generate RESTful API documentation with [OpenAPI](https://swagger.io/specification/) Client and Server Code Generator |
|
| [deepmap/oapi-codegen](https://github.com/deepmap/oapi-codegen) | Automatically generate RESTful API documentation with [OpenAPI](https://swagger.io/specification/) Client and Server Code Generator |
|
||||||
| [github.com/swaggo/echo-swagger](https://github.com/swaggo/echo-swagger) | Automatically generate RESTful API documentation with [Swagger](https://swagger.io/) 2.0. |
|
| [github.com/swaggo/echo-swagger](https://github.com/swaggo/echo-swagger) | Automatically generate RESTful API documentation with [Swagger](https://swagger.io/) 2.0. |
|
||||||
| [github.com/ziflex/lecho](https://github.com/ziflex/lecho) | [Zerolog](https://github.com/rs/zerolog) logging library wrapper for Echo logger interface. |
|
| [github.com/ziflex/lecho](https://github.com/ziflex/lecho) | [Zerolog](https://github.com/rs/zerolog) logging library wrapper for Echo logger interface. |
|
||||||
|
55
vendor/github.com/labstack/echo/v4/echo.go
generated
vendored
55
vendor/github.com/labstack/echo/v4/echo.go
generated
vendored
@ -258,7 +258,7 @@ const (
|
|||||||
|
|
||||||
const (
|
const (
|
||||||
// Version of Echo
|
// Version of Echo
|
||||||
Version = "4.10.0"
|
Version = "4.10.2"
|
||||||
website = "https://echo.labstack.com"
|
website = "https://echo.labstack.com"
|
||||||
// http://patorjk.com/software/taag/#p=display&f=Small%20Slant&t=Echo
|
// http://patorjk.com/software/taag/#p=display&f=Small%20Slant&t=Echo
|
||||||
banner = `
|
banner = `
|
||||||
@ -291,18 +291,47 @@ var (
|
|||||||
|
|
||||||
// Errors
|
// Errors
|
||||||
var (
|
var (
|
||||||
ErrUnsupportedMediaType = NewHTTPError(http.StatusUnsupportedMediaType)
|
ErrBadRequest = NewHTTPError(http.StatusBadRequest) // HTTP 400 Bad Request
|
||||||
ErrNotFound = NewHTTPError(http.StatusNotFound)
|
ErrUnauthorized = NewHTTPError(http.StatusUnauthorized) // HTTP 401 Unauthorized
|
||||||
ErrUnauthorized = NewHTTPError(http.StatusUnauthorized)
|
ErrPaymentRequired = NewHTTPError(http.StatusPaymentRequired) // HTTP 402 Payment Required
|
||||||
ErrForbidden = NewHTTPError(http.StatusForbidden)
|
ErrForbidden = NewHTTPError(http.StatusForbidden) // HTTP 403 Forbidden
|
||||||
ErrMethodNotAllowed = NewHTTPError(http.StatusMethodNotAllowed)
|
ErrNotFound = NewHTTPError(http.StatusNotFound) // HTTP 404 Not Found
|
||||||
ErrStatusRequestEntityTooLarge = NewHTTPError(http.StatusRequestEntityTooLarge)
|
ErrMethodNotAllowed = NewHTTPError(http.StatusMethodNotAllowed) // HTTP 405 Method Not Allowed
|
||||||
ErrTooManyRequests = NewHTTPError(http.StatusTooManyRequests)
|
ErrNotAcceptable = NewHTTPError(http.StatusNotAcceptable) // HTTP 406 Not Acceptable
|
||||||
ErrBadRequest = NewHTTPError(http.StatusBadRequest)
|
ErrProxyAuthRequired = NewHTTPError(http.StatusProxyAuthRequired) // HTTP 407 Proxy AuthRequired
|
||||||
ErrBadGateway = NewHTTPError(http.StatusBadGateway)
|
ErrRequestTimeout = NewHTTPError(http.StatusRequestTimeout) // HTTP 408 Request Timeout
|
||||||
ErrInternalServerError = NewHTTPError(http.StatusInternalServerError)
|
ErrConflict = NewHTTPError(http.StatusConflict) // HTTP 409 Conflict
|
||||||
ErrRequestTimeout = NewHTTPError(http.StatusRequestTimeout)
|
ErrGone = NewHTTPError(http.StatusGone) // HTTP 410 Gone
|
||||||
ErrServiceUnavailable = NewHTTPError(http.StatusServiceUnavailable)
|
ErrLengthRequired = NewHTTPError(http.StatusLengthRequired) // HTTP 411 Length Required
|
||||||
|
ErrPreconditionFailed = NewHTTPError(http.StatusPreconditionFailed) // HTTP 412 Precondition Failed
|
||||||
|
ErrStatusRequestEntityTooLarge = NewHTTPError(http.StatusRequestEntityTooLarge) // HTTP 413 Payload Too Large
|
||||||
|
ErrRequestURITooLong = NewHTTPError(http.StatusRequestURITooLong) // HTTP 414 URI Too Long
|
||||||
|
ErrUnsupportedMediaType = NewHTTPError(http.StatusUnsupportedMediaType) // HTTP 415 Unsupported Media Type
|
||||||
|
ErrRequestedRangeNotSatisfiable = NewHTTPError(http.StatusRequestedRangeNotSatisfiable) // HTTP 416 Range Not Satisfiable
|
||||||
|
ErrExpectationFailed = NewHTTPError(http.StatusExpectationFailed) // HTTP 417 Expectation Failed
|
||||||
|
ErrTeapot = NewHTTPError(http.StatusTeapot) // HTTP 418 I'm a teapot
|
||||||
|
ErrMisdirectedRequest = NewHTTPError(http.StatusMisdirectedRequest) // HTTP 421 Misdirected Request
|
||||||
|
ErrUnprocessableEntity = NewHTTPError(http.StatusUnprocessableEntity) // HTTP 422 Unprocessable Entity
|
||||||
|
ErrLocked = NewHTTPError(http.StatusLocked) // HTTP 423 Locked
|
||||||
|
ErrFailedDependency = NewHTTPError(http.StatusFailedDependency) // HTTP 424 Failed Dependency
|
||||||
|
ErrTooEarly = NewHTTPError(http.StatusTooEarly) // HTTP 425 Too Early
|
||||||
|
ErrUpgradeRequired = NewHTTPError(http.StatusUpgradeRequired) // HTTP 426 Upgrade Required
|
||||||
|
ErrPreconditionRequired = NewHTTPError(http.StatusPreconditionRequired) // HTTP 428 Precondition Required
|
||||||
|
ErrTooManyRequests = NewHTTPError(http.StatusTooManyRequests) // HTTP 429 Too Many Requests
|
||||||
|
ErrRequestHeaderFieldsTooLarge = NewHTTPError(http.StatusRequestHeaderFieldsTooLarge) // HTTP 431 Request Header Fields Too Large
|
||||||
|
ErrUnavailableForLegalReasons = NewHTTPError(http.StatusUnavailableForLegalReasons) // HTTP 451 Unavailable For Legal Reasons
|
||||||
|
ErrInternalServerError = NewHTTPError(http.StatusInternalServerError) // HTTP 500 Internal Server Error
|
||||||
|
ErrNotImplemented = NewHTTPError(http.StatusNotImplemented) // HTTP 501 Not Implemented
|
||||||
|
ErrBadGateway = NewHTTPError(http.StatusBadGateway) // HTTP 502 Bad Gateway
|
||||||
|
ErrServiceUnavailable = NewHTTPError(http.StatusServiceUnavailable) // HTTP 503 Service Unavailable
|
||||||
|
ErrGatewayTimeout = NewHTTPError(http.StatusGatewayTimeout) // HTTP 504 Gateway Timeout
|
||||||
|
ErrHTTPVersionNotSupported = NewHTTPError(http.StatusHTTPVersionNotSupported) // HTTP 505 HTTP Version Not Supported
|
||||||
|
ErrVariantAlsoNegotiates = NewHTTPError(http.StatusVariantAlsoNegotiates) // HTTP 506 Variant Also Negotiates
|
||||||
|
ErrInsufficientStorage = NewHTTPError(http.StatusInsufficientStorage) // HTTP 507 Insufficient Storage
|
||||||
|
ErrLoopDetected = NewHTTPError(http.StatusLoopDetected) // HTTP 508 Loop Detected
|
||||||
|
ErrNotExtended = NewHTTPError(http.StatusNotExtended) // HTTP 510 Not Extended
|
||||||
|
ErrNetworkAuthenticationRequired = NewHTTPError(http.StatusNetworkAuthenticationRequired) // HTTP 511 Network Authentication Required
|
||||||
|
|
||||||
ErrValidatorNotRegistered = errors.New("validator not registered")
|
ErrValidatorNotRegistered = errors.New("validator not registered")
|
||||||
ErrRendererNotRegistered = errors.New("renderer not registered")
|
ErrRendererNotRegistered = errors.New("renderer not registered")
|
||||||
ErrInvalidRedirectCode = errors.New("invalid redirect status code")
|
ErrInvalidRedirectCode = errors.New("invalid redirect status code")
|
||||||
|
72
vendor/github.com/labstack/echo/v4/middleware/context_timeout.go
generated
vendored
Normal file
72
vendor/github.com/labstack/echo/v4/middleware/context_timeout.go
generated
vendored
Normal file
@ -0,0 +1,72 @@
|
|||||||
|
package middleware
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"errors"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/labstack/echo/v4"
|
||||||
|
)
|
||||||
|
|
||||||
|
// ContextTimeoutConfig defines the config for ContextTimeout middleware.
|
||||||
|
type ContextTimeoutConfig struct {
|
||||||
|
// Skipper defines a function to skip middleware.
|
||||||
|
Skipper Skipper
|
||||||
|
|
||||||
|
// ErrorHandler is a function when error aries in middeware execution.
|
||||||
|
ErrorHandler func(err error, c echo.Context) error
|
||||||
|
|
||||||
|
// Timeout configures a timeout for the middleware, defaults to 0 for no timeout
|
||||||
|
Timeout time.Duration
|
||||||
|
}
|
||||||
|
|
||||||
|
// ContextTimeout returns a middleware which returns error (503 Service Unavailable error) to client
|
||||||
|
// when underlying method returns context.DeadlineExceeded error.
|
||||||
|
func ContextTimeout(timeout time.Duration) echo.MiddlewareFunc {
|
||||||
|
return ContextTimeoutWithConfig(ContextTimeoutConfig{Timeout: timeout})
|
||||||
|
}
|
||||||
|
|
||||||
|
// ContextTimeoutWithConfig returns a Timeout middleware with config.
|
||||||
|
func ContextTimeoutWithConfig(config ContextTimeoutConfig) echo.MiddlewareFunc {
|
||||||
|
mw, err := config.ToMiddleware()
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
return mw
|
||||||
|
}
|
||||||
|
|
||||||
|
// ToMiddleware converts Config to middleware.
|
||||||
|
func (config ContextTimeoutConfig) ToMiddleware() (echo.MiddlewareFunc, error) {
|
||||||
|
if config.Timeout == 0 {
|
||||||
|
return nil, errors.New("timeout must be set")
|
||||||
|
}
|
||||||
|
if config.Skipper == nil {
|
||||||
|
config.Skipper = DefaultSkipper
|
||||||
|
}
|
||||||
|
if config.ErrorHandler == nil {
|
||||||
|
config.ErrorHandler = func(err error, c echo.Context) error {
|
||||||
|
if err != nil && errors.Is(err, context.DeadlineExceeded) {
|
||||||
|
return echo.ErrServiceUnavailable.WithInternal(err)
|
||||||
|
}
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return func(next echo.HandlerFunc) echo.HandlerFunc {
|
||||||
|
return func(c echo.Context) error {
|
||||||
|
if config.Skipper(c) {
|
||||||
|
return next(c)
|
||||||
|
}
|
||||||
|
|
||||||
|
timeoutContext, cancel := context.WithTimeout(c.Request().Context(), config.Timeout)
|
||||||
|
defer cancel()
|
||||||
|
|
||||||
|
c.SetRequest(c.Request().WithContext(timeoutContext))
|
||||||
|
|
||||||
|
if err := next(c); err != nil {
|
||||||
|
return config.ErrorHandler(err, c)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}, nil
|
||||||
|
}
|
11
vendor/github.com/labstack/echo/v4/middleware/cors.go
generated
vendored
11
vendor/github.com/labstack/echo/v4/middleware/cors.go
generated
vendored
@ -79,6 +79,15 @@ type (
|
|||||||
// See also: https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Access-Control-Allow-Credentials
|
// See also: https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Access-Control-Allow-Credentials
|
||||||
AllowCredentials bool `yaml:"allow_credentials"`
|
AllowCredentials bool `yaml:"allow_credentials"`
|
||||||
|
|
||||||
|
// UnsafeWildcardOriginWithAllowCredentials UNSAFE/INSECURE: allows wildcard '*' origin to be used with AllowCredentials
|
||||||
|
// flag. In that case we consider any origin allowed and send it back to the client with `Access-Control-Allow-Origin` header.
|
||||||
|
//
|
||||||
|
// This is INSECURE and potentially leads to [cross-origin](https://portswigger.net/research/exploiting-cors-misconfigurations-for-bitcoins-and-bounties)
|
||||||
|
// attacks. See: https://github.com/labstack/echo/issues/2400 for discussion on the subject.
|
||||||
|
//
|
||||||
|
// Optional. Default value is false.
|
||||||
|
UnsafeWildcardOriginWithAllowCredentials bool `yaml:"unsafe_wildcard_origin_with_allow_credentials"`
|
||||||
|
|
||||||
// ExposeHeaders determines the value of Access-Control-Expose-Headers, which
|
// ExposeHeaders determines the value of Access-Control-Expose-Headers, which
|
||||||
// defines a list of headers that clients are allowed to access.
|
// defines a list of headers that clients are allowed to access.
|
||||||
//
|
//
|
||||||
@ -203,7 +212,7 @@ func CORSWithConfig(config CORSConfig) echo.MiddlewareFunc {
|
|||||||
} else {
|
} else {
|
||||||
// Check allowed origins
|
// Check allowed origins
|
||||||
for _, o := range config.AllowOrigins {
|
for _, o := range config.AllowOrigins {
|
||||||
if o == "*" && config.AllowCredentials {
|
if o == "*" && config.AllowCredentials && config.UnsafeWildcardOriginWithAllowCredentials {
|
||||||
allowOrigin = origin
|
allowOrigin = origin
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
|
6
vendor/github.com/labstack/echo/v4/middleware/csrf.go
generated
vendored
6
vendor/github.com/labstack/echo/v4/middleware/csrf.go
generated
vendored
@ -119,9 +119,9 @@ func CSRFWithConfig(config CSRFConfig) echo.MiddlewareFunc {
|
|||||||
config.CookieSecure = true
|
config.CookieSecure = true
|
||||||
}
|
}
|
||||||
|
|
||||||
extractors, err := CreateExtractors(config.TokenLookup)
|
extractors, cErr := CreateExtractors(config.TokenLookup)
|
||||||
if err != nil {
|
if cErr != nil {
|
||||||
panic(err)
|
panic(cErr)
|
||||||
}
|
}
|
||||||
|
|
||||||
return func(next echo.HandlerFunc) echo.HandlerFunc {
|
return func(next echo.HandlerFunc) echo.HandlerFunc {
|
||||||
|
6
vendor/github.com/labstack/echo/v4/middleware/jwt.go
generated
vendored
6
vendor/github.com/labstack/echo/v4/middleware/jwt.go
generated
vendored
@ -196,9 +196,9 @@ func JWTWithConfig(config JWTConfig) echo.MiddlewareFunc {
|
|||||||
config.ParseTokenFunc = config.defaultParseToken
|
config.ParseTokenFunc = config.defaultParseToken
|
||||||
}
|
}
|
||||||
|
|
||||||
extractors, err := createExtractors(config.TokenLookup, config.AuthScheme)
|
extractors, cErr := createExtractors(config.TokenLookup, config.AuthScheme)
|
||||||
if err != nil {
|
if cErr != nil {
|
||||||
panic(err)
|
panic(cErr)
|
||||||
}
|
}
|
||||||
if len(config.TokenLookupFuncs) > 0 {
|
if len(config.TokenLookupFuncs) > 0 {
|
||||||
extractors = append(config.TokenLookupFuncs, extractors...)
|
extractors = append(config.TokenLookupFuncs, extractors...)
|
||||||
|
6
vendor/github.com/labstack/echo/v4/middleware/key_auth.go
generated
vendored
6
vendor/github.com/labstack/echo/v4/middleware/key_auth.go
generated
vendored
@ -108,9 +108,9 @@ func KeyAuthWithConfig(config KeyAuthConfig) echo.MiddlewareFunc {
|
|||||||
panic("echo: key-auth middleware requires a validator function")
|
panic("echo: key-auth middleware requires a validator function")
|
||||||
}
|
}
|
||||||
|
|
||||||
extractors, err := createExtractors(config.KeyLookup, config.AuthScheme)
|
extractors, cErr := createExtractors(config.KeyLookup, config.AuthScheme)
|
||||||
if err != nil {
|
if cErr != nil {
|
||||||
panic(err)
|
panic(cErr)
|
||||||
}
|
}
|
||||||
|
|
||||||
return func(next echo.HandlerFunc) echo.HandlerFunc {
|
return func(next echo.HandlerFunc) echo.HandlerFunc {
|
||||||
|
26
vendor/github.com/labstack/echo/v4/middleware/static.go
generated
vendored
26
vendor/github.com/labstack/echo/v4/middleware/static.go
generated
vendored
@ -8,7 +8,6 @@ import (
|
|||||||
"net/url"
|
"net/url"
|
||||||
"os"
|
"os"
|
||||||
"path"
|
"path"
|
||||||
"path/filepath"
|
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
"github.com/labstack/echo/v4"
|
"github.com/labstack/echo/v4"
|
||||||
@ -157,9 +156,9 @@ func StaticWithConfig(config StaticConfig) echo.MiddlewareFunc {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Index template
|
// Index template
|
||||||
t, err := template.New("index").Parse(html)
|
t, tErr := template.New("index").Parse(html)
|
||||||
if err != nil {
|
if tErr != nil {
|
||||||
panic(fmt.Sprintf("echo: %v", err))
|
panic(fmt.Errorf("echo: %w", tErr))
|
||||||
}
|
}
|
||||||
|
|
||||||
return func(next echo.HandlerFunc) echo.HandlerFunc {
|
return func(next echo.HandlerFunc) echo.HandlerFunc {
|
||||||
@ -176,7 +175,7 @@ func StaticWithConfig(config StaticConfig) echo.MiddlewareFunc {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
name := filepath.Join(config.Root, filepath.Clean("/"+p)) // "/"+ for security
|
name := path.Join(config.Root, path.Clean("/"+p)) // "/"+ for security
|
||||||
|
|
||||||
if config.IgnoreBase {
|
if config.IgnoreBase {
|
||||||
routePath := path.Base(strings.TrimRight(c.Path(), "/*"))
|
routePath := path.Base(strings.TrimRight(c.Path(), "/*"))
|
||||||
@ -187,12 +186,14 @@ func StaticWithConfig(config StaticConfig) echo.MiddlewareFunc {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
file, err := openFile(config.Filesystem, name)
|
file, err := config.Filesystem.Open(name)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if !os.IsNotExist(err) {
|
if !isIgnorableOpenFileError(err) {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// file with that path did not exist, so we continue down in middleware/handler chain, hoping that we end up in
|
||||||
|
// handler that is meant to handle this request
|
||||||
if err = next(c); err == nil {
|
if err = next(c); err == nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@ -202,7 +203,7 @@ func StaticWithConfig(config StaticConfig) echo.MiddlewareFunc {
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
file, err = openFile(config.Filesystem, filepath.Join(config.Root, config.Index))
|
file, err = config.Filesystem.Open(path.Join(config.Root, config.Index))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@ -216,16 +217,14 @@ func StaticWithConfig(config StaticConfig) echo.MiddlewareFunc {
|
|||||||
}
|
}
|
||||||
|
|
||||||
if info.IsDir() {
|
if info.IsDir() {
|
||||||
index, err := openFile(config.Filesystem, filepath.Join(name, config.Index))
|
index, err := config.Filesystem.Open(path.Join(name, config.Index))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if config.Browse {
|
if config.Browse {
|
||||||
return listDir(t, name, file, c.Response())
|
return listDir(t, name, file, c.Response())
|
||||||
}
|
}
|
||||||
|
|
||||||
if os.IsNotExist(err) {
|
|
||||||
return next(c)
|
return next(c)
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
|
||||||
defer index.Close()
|
defer index.Close()
|
||||||
|
|
||||||
@ -242,11 +241,6 @@ func StaticWithConfig(config StaticConfig) echo.MiddlewareFunc {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func openFile(fs http.FileSystem, name string) (http.File, error) {
|
|
||||||
pathWithSlashes := filepath.ToSlash(name)
|
|
||||||
return fs.Open(pathWithSlashes)
|
|
||||||
}
|
|
||||||
|
|
||||||
func serveFile(c echo.Context, file http.File, info os.FileInfo) error {
|
func serveFile(c echo.Context, file http.File, info os.FileInfo) error {
|
||||||
http.ServeContent(c.Response(), c.Request(), info.Name(), info.ModTime(), file)
|
http.ServeContent(c.Response(), c.Request(), info.Name(), info.ModTime(), file)
|
||||||
return nil
|
return nil
|
||||||
|
12
vendor/github.com/labstack/echo/v4/middleware/static_other.go
generated
vendored
Normal file
12
vendor/github.com/labstack/echo/v4/middleware/static_other.go
generated
vendored
Normal file
@ -0,0 +1,12 @@
|
|||||||
|
//go:build !windows
|
||||||
|
|
||||||
|
package middleware
|
||||||
|
|
||||||
|
import (
|
||||||
|
"os"
|
||||||
|
)
|
||||||
|
|
||||||
|
// We ignore these errors as there could be handler that matches request path.
|
||||||
|
func isIgnorableOpenFileError(err error) bool {
|
||||||
|
return os.IsNotExist(err)
|
||||||
|
}
|
23
vendor/github.com/labstack/echo/v4/middleware/static_windows.go
generated
vendored
Normal file
23
vendor/github.com/labstack/echo/v4/middleware/static_windows.go
generated
vendored
Normal file
@ -0,0 +1,23 @@
|
|||||||
|
package middleware
|
||||||
|
|
||||||
|
import (
|
||||||
|
"os"
|
||||||
|
)
|
||||||
|
|
||||||
|
// We ignore these errors as there could be handler that matches request path.
|
||||||
|
//
|
||||||
|
// As of Go 1.20 filepath.Clean has different behaviour on OS related filesystems so we need to use path.Clean
|
||||||
|
// on Windows which has some caveats. The Open methods might return different errors than earlier versions and
|
||||||
|
// as of 1.20 path checks are more strict on the provided path and considers [UNC](https://en.wikipedia.org/wiki/Path_(computing)#UNC)
|
||||||
|
// paths with missing host etc parts as invalid. Previously it would result you `fs.ErrNotExist`.
|
||||||
|
//
|
||||||
|
// For 1.20@Windows we need to treat those errors the same as `fs.ErrNotExists` so we can continue handling
|
||||||
|
// errors in the middleware/handler chain. Otherwise we might end up with status 500 instead of finding a route
|
||||||
|
// or return 404 not found.
|
||||||
|
func isIgnorableOpenFileError(err error) bool {
|
||||||
|
if os.IsNotExist(err) {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
errTxt := err.Error()
|
||||||
|
return errTxt == "http: invalid or unsafe file path" || errTxt == "invalid path"
|
||||||
|
}
|
1
vendor/github.com/labstack/echo/v4/router.go
generated
vendored
1
vendor/github.com/labstack/echo/v4/router.go
generated
vendored
@ -524,7 +524,6 @@ func optionsMethodHandler(allowMethods string) func(c Context) error {
|
|||||||
// - Return it `Echo#ReleaseContext()`.
|
// - Return it `Echo#ReleaseContext()`.
|
||||||
func (r *Router) Find(method, path string, c Context) {
|
func (r *Router) Find(method, path string, c Context) {
|
||||||
ctx := c.(*context)
|
ctx := c.(*context)
|
||||||
ctx.path = path
|
|
||||||
currentNode := r.tree // Current node as root
|
currentNode := r.tree // Current node as root
|
||||||
|
|
||||||
var (
|
var (
|
||||||
|
37
vendor/github.com/matterbridge/matterclient/channels.go
generated
vendored
37
vendor/github.com/matterbridge/matterclient/channels.go
generated
vendored
@ -80,9 +80,16 @@ func (m *Client) getChannelIDTeam(name string, teamID string) string {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Fallback if it's not found in the t.Channels or t.MoreChannels cache.
|
||||||
|
// This also let's us join private channels.
|
||||||
|
channel, _, err := m.Client.GetChannelByName(name, teamID, "")
|
||||||
|
if err != nil {
|
||||||
return ""
|
return ""
|
||||||
}
|
}
|
||||||
|
|
||||||
|
return channel.Id
|
||||||
|
}
|
||||||
|
|
||||||
func (m *Client) GetChannelName(channelID string) string {
|
func (m *Client) GetChannelName(channelID string) string {
|
||||||
m.RLock()
|
m.RLock()
|
||||||
defer m.RUnlock()
|
defer m.RUnlock()
|
||||||
@ -224,8 +231,13 @@ func (m *Client) UpdateChannelsTeam(teamID string) error {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
idx := 0
|
||||||
|
max := 200
|
||||||
|
|
||||||
|
var moreChannels []*model.Channel
|
||||||
|
|
||||||
for {
|
for {
|
||||||
mmchannels, resp, err = m.Client.GetPublicChannelsForTeam(teamID, 0, 5000, "")
|
mmchannels, resp, err = m.Client.GetPublicChannelsForTeam(teamID, idx, max, "")
|
||||||
if err == nil {
|
if err == nil {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
@ -235,10 +247,27 @@ func (m *Client) UpdateChannelsTeam(teamID string) error {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
for len(mmchannels) > 0 {
|
||||||
|
moreChannels = append(moreChannels, mmchannels...)
|
||||||
|
|
||||||
|
for {
|
||||||
|
mmchannels, resp, err = m.Client.GetPublicChannelsForTeam(teamID, idx, max, "")
|
||||||
|
if err == nil {
|
||||||
|
idx++
|
||||||
|
|
||||||
|
break
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := m.HandleRatelimit("GetPublicChannelsForTeam", resp); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
for idx, t := range m.OtherTeams {
|
for idx, t := range m.OtherTeams {
|
||||||
if t.ID == teamID {
|
if t.ID == teamID {
|
||||||
m.Lock()
|
m.Lock()
|
||||||
m.OtherTeams[idx].MoreChannels = mmchannels
|
m.OtherTeams[idx].MoreChannels = moreChannels
|
||||||
m.Unlock()
|
m.Unlock()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -252,6 +281,10 @@ func (m *Client) UpdateChannels() error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
for _, t := range m.OtherTeams {
|
for _, t := range m.OtherTeams {
|
||||||
|
// We've already populated users/channels for team in the above.
|
||||||
|
if t.ID == m.Team.ID {
|
||||||
|
continue
|
||||||
|
}
|
||||||
if err := m.UpdateChannelsTeam(t.ID); err != nil {
|
if err := m.UpdateChannelsTeam(t.ID); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
39
vendor/github.com/matterbridge/matterclient/matterclient.go
generated
vendored
39
vendor/github.com/matterbridge/matterclient/matterclient.go
generated
vendored
@ -144,6 +144,10 @@ func (m *Client) Login() error {
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if err := m.initUserChannels(); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
if m.Team == nil {
|
if m.Team == nil {
|
||||||
validTeamNames := make([]string, len(m.OtherTeams))
|
validTeamNames := make([]string, len(m.OtherTeams))
|
||||||
for i, t := range m.OtherTeams {
|
for i, t := range m.OtherTeams {
|
||||||
@ -332,8 +336,11 @@ func (m *Client) initUser() error {
|
|||||||
|
|
||||||
time.Sleep(time.Millisecond * 200)
|
time.Sleep(time.Millisecond * 200)
|
||||||
}
|
}
|
||||||
|
m.logger.Debugf("found %d users in team %s", len(usermap), team.Name)
|
||||||
m.logger.Infof("found %d users in team %s", len(usermap), team.Name)
|
// add all users
|
||||||
|
for k, v := range usermap {
|
||||||
|
m.Users[k] = v
|
||||||
|
}
|
||||||
|
|
||||||
t := &Team{
|
t := &Team{
|
||||||
Team: team,
|
Team: team,
|
||||||
@ -341,29 +348,25 @@ func (m *Client) initUser() error {
|
|||||||
ID: team.Id,
|
ID: team.Id,
|
||||||
}
|
}
|
||||||
|
|
||||||
mmchannels, _, err := m.Client.GetChannelsForTeamForUser(team.Id, m.User.Id, false, "")
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
t.Channels = mmchannels
|
|
||||||
|
|
||||||
mmchannels, _, err = m.Client.GetPublicChannelsForTeam(team.Id, 0, 5000, "")
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
t.MoreChannels = mmchannels
|
|
||||||
m.OtherTeams = append(m.OtherTeams, t)
|
m.OtherTeams = append(m.OtherTeams, t)
|
||||||
|
|
||||||
if team.Name == m.Credentials.Team {
|
if team.Name == m.Credentials.Team {
|
||||||
m.Team = t
|
m.Team = t
|
||||||
m.logger.Debugf("initUser(): found our team %s (id: %s)", team.Name, team.Id)
|
m.logger.Debugf("initUser(): found our team %s (id: %s)", team.Name, team.Id)
|
||||||
}
|
}
|
||||||
// add all users
|
|
||||||
for k, v := range t.Users {
|
|
||||||
m.Users[k] = v
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *Client) initUserChannels() error {
|
||||||
|
if err := m.UpdateChannels(); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, t := range m.OtherTeams {
|
||||||
|
m.logger.Debugf("found %d channels for user in team %s", len(t.Channels), t.Team.Name)
|
||||||
|
m.logger.Debugf("found %d public channels in team %s", len(t.MoreChannels), t.Team.Name)
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
|
4
vendor/github.com/mattn/go-isatty/isatty_bsd.go
generated
vendored
4
vendor/github.com/mattn/go-isatty/isatty_bsd.go
generated
vendored
@ -1,5 +1,5 @@
|
|||||||
//go:build (darwin || freebsd || openbsd || netbsd || dragonfly) && !appengine
|
//go:build (darwin || freebsd || openbsd || netbsd || dragonfly || hurd) && !appengine
|
||||||
// +build darwin freebsd openbsd netbsd dragonfly
|
// +build darwin freebsd openbsd netbsd dragonfly hurd
|
||||||
// +build !appengine
|
// +build !appengine
|
||||||
|
|
||||||
package isatty
|
package isatty
|
||||||
|
11
vendor/github.com/remyoudompheng/bigfft/README
generated
vendored
11
vendor/github.com/remyoudompheng/bigfft/README
generated
vendored
@ -1,3 +1,14 @@
|
|||||||
|
This library is a toy proof-of-concept implementation of the
|
||||||
|
well-known Schonhage-Strassen method for multiplying integers.
|
||||||
|
It is not expected to have a real life usecase outside number
|
||||||
|
theory computations, nor is it expected to be used in any production
|
||||||
|
system.
|
||||||
|
|
||||||
|
If you are using it in your project, you may want to carefully
|
||||||
|
examine the actual requirement or problem you are trying to solve.
|
||||||
|
|
||||||
|
# Comparison with the standard library and GMP
|
||||||
|
|
||||||
Benchmarking math/big vs. bigfft
|
Benchmarking math/big vs. bigfft
|
||||||
|
|
||||||
Number size old ns/op new ns/op delta
|
Number size old ns/op new ns/op delta
|
||||||
|
36
vendor/github.com/remyoudompheng/bigfft/arith_386.s
generated
vendored
36
vendor/github.com/remyoudompheng/bigfft/arith_386.s
generated
vendored
@ -1,36 +0,0 @@
|
|||||||
// Trampolines to math/big assembly implementations.
|
|
||||||
|
|
||||||
#include "textflag.h"
|
|
||||||
|
|
||||||
// func addVV(z, x, y []Word) (c Word)
|
|
||||||
TEXT ·addVV(SB),NOSPLIT,$0
|
|
||||||
JMP math∕big·addVV(SB)
|
|
||||||
|
|
||||||
// func subVV(z, x, y []Word) (c Word)
|
|
||||||
TEXT ·subVV(SB),NOSPLIT,$0
|
|
||||||
JMP math∕big·subVV(SB)
|
|
||||||
|
|
||||||
// func addVW(z, x []Word, y Word) (c Word)
|
|
||||||
TEXT ·addVW(SB),NOSPLIT,$0
|
|
||||||
JMP math∕big·addVW(SB)
|
|
||||||
|
|
||||||
// func subVW(z, x []Word, y Word) (c Word)
|
|
||||||
TEXT ·subVW(SB),NOSPLIT,$0
|
|
||||||
JMP math∕big·subVW(SB)
|
|
||||||
|
|
||||||
// func shlVU(z, x []Word, s uint) (c Word)
|
|
||||||
TEXT ·shlVU(SB),NOSPLIT,$0
|
|
||||||
JMP math∕big·shlVU(SB)
|
|
||||||
|
|
||||||
// func shrVU(z, x []Word, s uint) (c Word)
|
|
||||||
TEXT ·shrVU(SB),NOSPLIT,$0
|
|
||||||
JMP math∕big·shrVU(SB)
|
|
||||||
|
|
||||||
// func mulAddVWW(z, x []Word, y, r Word) (c Word)
|
|
||||||
TEXT ·mulAddVWW(SB),NOSPLIT,$0
|
|
||||||
JMP math∕big·mulAddVWW(SB)
|
|
||||||
|
|
||||||
// func addMulVVW(z, x []Word, y Word) (c Word)
|
|
||||||
TEXT ·addMulVVW(SB),NOSPLIT,$0
|
|
||||||
JMP math∕big·addMulVVW(SB)
|
|
||||||
|
|
38
vendor/github.com/remyoudompheng/bigfft/arith_amd64.s
generated
vendored
38
vendor/github.com/remyoudompheng/bigfft/arith_amd64.s
generated
vendored
@ -1,38 +0,0 @@
|
|||||||
// Trampolines to math/big assembly implementations.
|
|
||||||
|
|
||||||
#include "textflag.h"
|
|
||||||
|
|
||||||
// func addVV(z, x, y []Word) (c Word)
|
|
||||||
TEXT ·addVV(SB),NOSPLIT,$0
|
|
||||||
JMP math∕big·addVV(SB)
|
|
||||||
|
|
||||||
// func subVV(z, x, y []Word) (c Word)
|
|
||||||
// (same as addVV except for SBBQ instead of ADCQ and label names)
|
|
||||||
TEXT ·subVV(SB),NOSPLIT,$0
|
|
||||||
JMP math∕big·subVV(SB)
|
|
||||||
|
|
||||||
// func addVW(z, x []Word, y Word) (c Word)
|
|
||||||
TEXT ·addVW(SB),NOSPLIT,$0
|
|
||||||
JMP math∕big·addVW(SB)
|
|
||||||
|
|
||||||
// func subVW(z, x []Word, y Word) (c Word)
|
|
||||||
// (same as addVW except for SUBQ/SBBQ instead of ADDQ/ADCQ and label names)
|
|
||||||
TEXT ·subVW(SB),NOSPLIT,$0
|
|
||||||
JMP math∕big·subVW(SB)
|
|
||||||
|
|
||||||
// func shlVU(z, x []Word, s uint) (c Word)
|
|
||||||
TEXT ·shlVU(SB),NOSPLIT,$0
|
|
||||||
JMP math∕big·shlVU(SB)
|
|
||||||
|
|
||||||
// func shrVU(z, x []Word, s uint) (c Word)
|
|
||||||
TEXT ·shrVU(SB),NOSPLIT,$0
|
|
||||||
JMP math∕big·shrVU(SB)
|
|
||||||
|
|
||||||
// func mulAddVWW(z, x []Word, y, r Word) (c Word)
|
|
||||||
TEXT ·mulAddVWW(SB),NOSPLIT,$0
|
|
||||||
JMP math∕big·mulAddVWW(SB)
|
|
||||||
|
|
||||||
// func addMulVVW(z, x []Word, y Word) (c Word)
|
|
||||||
TEXT ·addMulVVW(SB),NOSPLIT,$0
|
|
||||||
JMP math∕big·addMulVVW(SB)
|
|
||||||
|
|
36
vendor/github.com/remyoudompheng/bigfft/arith_arm.s
generated
vendored
36
vendor/github.com/remyoudompheng/bigfft/arith_arm.s
generated
vendored
@ -1,36 +0,0 @@
|
|||||||
// Trampolines to math/big assembly implementations.
|
|
||||||
|
|
||||||
#include "textflag.h"
|
|
||||||
|
|
||||||
// func addVV(z, x, y []Word) (c Word)
|
|
||||||
TEXT ·addVV(SB),NOSPLIT,$0
|
|
||||||
B math∕big·addVV(SB)
|
|
||||||
|
|
||||||
// func subVV(z, x, y []Word) (c Word)
|
|
||||||
TEXT ·subVV(SB),NOSPLIT,$0
|
|
||||||
B math∕big·subVV(SB)
|
|
||||||
|
|
||||||
// func addVW(z, x []Word, y Word) (c Word)
|
|
||||||
TEXT ·addVW(SB),NOSPLIT,$0
|
|
||||||
B math∕big·addVW(SB)
|
|
||||||
|
|
||||||
// func subVW(z, x []Word, y Word) (c Word)
|
|
||||||
TEXT ·subVW(SB),NOSPLIT,$0
|
|
||||||
B math∕big·subVW(SB)
|
|
||||||
|
|
||||||
// func shlVU(z, x []Word, s uint) (c Word)
|
|
||||||
TEXT ·shlVU(SB),NOSPLIT,$0
|
|
||||||
B math∕big·shlVU(SB)
|
|
||||||
|
|
||||||
// func shrVU(z, x []Word, s uint) (c Word)
|
|
||||||
TEXT ·shrVU(SB),NOSPLIT,$0
|
|
||||||
B math∕big·shrVU(SB)
|
|
||||||
|
|
||||||
// func mulAddVWW(z, x []Word, y, r Word) (c Word)
|
|
||||||
TEXT ·mulAddVWW(SB),NOSPLIT,$0
|
|
||||||
B math∕big·mulAddVWW(SB)
|
|
||||||
|
|
||||||
// func addMulVVW(z, x []Word, y Word) (c Word)
|
|
||||||
TEXT ·addMulVVW(SB),NOSPLIT,$0
|
|
||||||
B math∕big·addMulVVW(SB)
|
|
||||||
|
|
36
vendor/github.com/remyoudompheng/bigfft/arith_arm64.s
generated
vendored
36
vendor/github.com/remyoudompheng/bigfft/arith_arm64.s
generated
vendored
@ -1,36 +0,0 @@
|
|||||||
// Trampolines to math/big assembly implementations.
|
|
||||||
|
|
||||||
#include "textflag.h"
|
|
||||||
|
|
||||||
// func addVV(z, x, y []Word) (c Word)
|
|
||||||
TEXT ·addVV(SB),NOSPLIT,$0
|
|
||||||
B math∕big·addVV(SB)
|
|
||||||
|
|
||||||
// func subVV(z, x, y []Word) (c Word)
|
|
||||||
TEXT ·subVV(SB),NOSPLIT,$0
|
|
||||||
B math∕big·subVV(SB)
|
|
||||||
|
|
||||||
// func addVW(z, x []Word, y Word) (c Word)
|
|
||||||
TEXT ·addVW(SB),NOSPLIT,$0
|
|
||||||
B math∕big·addVW(SB)
|
|
||||||
|
|
||||||
// func subVW(z, x []Word, y Word) (c Word)
|
|
||||||
TEXT ·subVW(SB),NOSPLIT,$0
|
|
||||||
B math∕big·subVW(SB)
|
|
||||||
|
|
||||||
// func shlVU(z, x []Word, s uint) (c Word)
|
|
||||||
TEXT ·shlVU(SB),NOSPLIT,$0
|
|
||||||
B math∕big·shlVU(SB)
|
|
||||||
|
|
||||||
// func shrVU(z, x []Word, s uint) (c Word)
|
|
||||||
TEXT ·shrVU(SB),NOSPLIT,$0
|
|
||||||
B math∕big·shrVU(SB)
|
|
||||||
|
|
||||||
// func mulAddVWW(z, x []Word, y, r Word) (c Word)
|
|
||||||
TEXT ·mulAddVWW(SB),NOSPLIT,$0
|
|
||||||
B math∕big·mulAddVWW(SB)
|
|
||||||
|
|
||||||
// func addMulVVW(z, x []Word, y Word) (c Word)
|
|
||||||
TEXT ·addMulVVW(SB),NOSPLIT,$0
|
|
||||||
B math∕big·addMulVVW(SB)
|
|
||||||
|
|
21
vendor/github.com/remyoudompheng/bigfft/arith_decl.go
generated
vendored
21
vendor/github.com/remyoudompheng/bigfft/arith_decl.go
generated
vendored
@ -4,13 +4,30 @@
|
|||||||
|
|
||||||
package bigfft
|
package bigfft
|
||||||
|
|
||||||
import . "math/big"
|
import (
|
||||||
|
"math/big"
|
||||||
|
_ "unsafe"
|
||||||
|
)
|
||||||
|
|
||||||
// implemented in arith_$GOARCH.s
|
type Word = big.Word
|
||||||
|
|
||||||
|
//go:linkname addVV math/big.addVV
|
||||||
func addVV(z, x, y []Word) (c Word)
|
func addVV(z, x, y []Word) (c Word)
|
||||||
|
|
||||||
|
//go:linkname subVV math/big.subVV
|
||||||
func subVV(z, x, y []Word) (c Word)
|
func subVV(z, x, y []Word) (c Word)
|
||||||
|
|
||||||
|
//go:linkname addVW math/big.addVW
|
||||||
func addVW(z, x []Word, y Word) (c Word)
|
func addVW(z, x []Word, y Word) (c Word)
|
||||||
|
|
||||||
|
//go:linkname subVW math/big.subVW
|
||||||
func subVW(z, x []Word, y Word) (c Word)
|
func subVW(z, x []Word, y Word) (c Word)
|
||||||
|
|
||||||
|
//go:linkname shlVU math/big.shlVU
|
||||||
func shlVU(z, x []Word, s uint) (c Word)
|
func shlVU(z, x []Word, s uint) (c Word)
|
||||||
|
|
||||||
|
//go:linkname mulAddVWW math/big.mulAddVWW
|
||||||
func mulAddVWW(z, x []Word, y, r Word) (c Word)
|
func mulAddVWW(z, x []Word, y, r Word) (c Word)
|
||||||
|
|
||||||
|
//go:linkname addMulVVW math/big.addMulVVW
|
||||||
func addMulVVW(z, x []Word, y Word) (c Word)
|
func addMulVVW(z, x []Word, y Word) (c Word)
|
||||||
|
40
vendor/github.com/remyoudompheng/bigfft/arith_mips64x.s
generated
vendored
40
vendor/github.com/remyoudompheng/bigfft/arith_mips64x.s
generated
vendored
@ -1,40 +0,0 @@
|
|||||||
// Trampolines to math/big assembly implementations.
|
|
||||||
|
|
||||||
// +build mips64 mips64le
|
|
||||||
|
|
||||||
#include "textflag.h"
|
|
||||||
|
|
||||||
// func addVV(z, x, y []Word) (c Word)
|
|
||||||
TEXT ·addVV(SB),NOSPLIT,$0
|
|
||||||
JMP math∕big·addVV(SB)
|
|
||||||
|
|
||||||
// func subVV(z, x, y []Word) (c Word)
|
|
||||||
// (same as addVV except for SBBQ instead of ADCQ and label names)
|
|
||||||
TEXT ·subVV(SB),NOSPLIT,$0
|
|
||||||
JMP math∕big·subVV(SB)
|
|
||||||
|
|
||||||
// func addVW(z, x []Word, y Word) (c Word)
|
|
||||||
TEXT ·addVW(SB),NOSPLIT,$0
|
|
||||||
JMP math∕big·addVW(SB)
|
|
||||||
|
|
||||||
// func subVW(z, x []Word, y Word) (c Word)
|
|
||||||
// (same as addVW except for SUBQ/SBBQ instead of ADDQ/ADCQ and label names)
|
|
||||||
TEXT ·subVW(SB),NOSPLIT,$0
|
|
||||||
JMP math∕big·subVW(SB)
|
|
||||||
|
|
||||||
// func shlVU(z, x []Word, s uint) (c Word)
|
|
||||||
TEXT ·shlVU(SB),NOSPLIT,$0
|
|
||||||
JMP math∕big·shlVU(SB)
|
|
||||||
|
|
||||||
// func shrVU(z, x []Word, s uint) (c Word)
|
|
||||||
TEXT ·shrVU(SB),NOSPLIT,$0
|
|
||||||
JMP math∕big·shrVU(SB)
|
|
||||||
|
|
||||||
// func mulAddVWW(z, x []Word, y, r Word) (c Word)
|
|
||||||
TEXT ·mulAddVWW(SB),NOSPLIT,$0
|
|
||||||
JMP math∕big·mulAddVWW(SB)
|
|
||||||
|
|
||||||
// func addMulVVW(z, x []Word, y Word) (c Word)
|
|
||||||
TEXT ·addMulVVW(SB),NOSPLIT,$0
|
|
||||||
JMP math∕big·addMulVVW(SB)
|
|
||||||
|
|
40
vendor/github.com/remyoudompheng/bigfft/arith_mipsx.s
generated
vendored
40
vendor/github.com/remyoudompheng/bigfft/arith_mipsx.s
generated
vendored
@ -1,40 +0,0 @@
|
|||||||
// Trampolines to math/big assembly implementations.
|
|
||||||
|
|
||||||
// +build mips mipsle
|
|
||||||
|
|
||||||
#include "textflag.h"
|
|
||||||
|
|
||||||
// func addVV(z, x, y []Word) (c Word)
|
|
||||||
TEXT ·addVV(SB),NOSPLIT,$0
|
|
||||||
JMP math∕big·addVV(SB)
|
|
||||||
|
|
||||||
// func subVV(z, x, y []Word) (c Word)
|
|
||||||
// (same as addVV except for SBBQ instead of ADCQ and label names)
|
|
||||||
TEXT ·subVV(SB),NOSPLIT,$0
|
|
||||||
JMP math∕big·subVV(SB)
|
|
||||||
|
|
||||||
// func addVW(z, x []Word, y Word) (c Word)
|
|
||||||
TEXT ·addVW(SB),NOSPLIT,$0
|
|
||||||
JMP math∕big·addVW(SB)
|
|
||||||
|
|
||||||
// func subVW(z, x []Word, y Word) (c Word)
|
|
||||||
// (same as addVW except for SUBQ/SBBQ instead of ADDQ/ADCQ and label names)
|
|
||||||
TEXT ·subVW(SB),NOSPLIT,$0
|
|
||||||
JMP math∕big·subVW(SB)
|
|
||||||
|
|
||||||
// func shlVU(z, x []Word, s uint) (c Word)
|
|
||||||
TEXT ·shlVU(SB),NOSPLIT,$0
|
|
||||||
JMP math∕big·shlVU(SB)
|
|
||||||
|
|
||||||
// func shrVU(z, x []Word, s uint) (c Word)
|
|
||||||
TEXT ·shrVU(SB),NOSPLIT,$0
|
|
||||||
JMP math∕big·shrVU(SB)
|
|
||||||
|
|
||||||
// func mulAddVWW(z, x []Word, y, r Word) (c Word)
|
|
||||||
TEXT ·mulAddVWW(SB),NOSPLIT,$0
|
|
||||||
JMP math∕big·mulAddVWW(SB)
|
|
||||||
|
|
||||||
// func addMulVVW(z, x []Word, y Word) (c Word)
|
|
||||||
TEXT ·addMulVVW(SB),NOSPLIT,$0
|
|
||||||
JMP math∕big·addMulVVW(SB)
|
|
||||||
|
|
38
vendor/github.com/remyoudompheng/bigfft/arith_ppc64x.s
generated
vendored
38
vendor/github.com/remyoudompheng/bigfft/arith_ppc64x.s
generated
vendored
@ -1,38 +0,0 @@
|
|||||||
// Trampolines to math/big assembly implementations.
|
|
||||||
|
|
||||||
// +build ppc64 ppc64le
|
|
||||||
|
|
||||||
#include "textflag.h"
|
|
||||||
|
|
||||||
// func addVV(z, x, y []Word) (c Word)
|
|
||||||
TEXT ·addVV(SB),NOSPLIT,$0
|
|
||||||
BR math∕big·addVV(SB)
|
|
||||||
|
|
||||||
// func subVV(z, x, y []Word) (c Word)
|
|
||||||
TEXT ·subVV(SB),NOSPLIT,$0
|
|
||||||
BR math∕big·subVV(SB)
|
|
||||||
|
|
||||||
// func addVW(z, x []Word, y Word) (c Word)
|
|
||||||
TEXT ·addVW(SB),NOSPLIT,$0
|
|
||||||
BR math∕big·addVW(SB)
|
|
||||||
|
|
||||||
// func subVW(z, x []Word, y Word) (c Word)
|
|
||||||
TEXT ·subVW(SB),NOSPLIT,$0
|
|
||||||
BR math∕big·subVW(SB)
|
|
||||||
|
|
||||||
// func shlVU(z, x []Word, s uint) (c Word)
|
|
||||||
TEXT ·shlVU(SB),NOSPLIT,$0
|
|
||||||
BR math∕big·shlVU(SB)
|
|
||||||
|
|
||||||
// func shrVU(z, x []Word, s uint) (c Word)
|
|
||||||
TEXT ·shrVU(SB),NOSPLIT,$0
|
|
||||||
BR math∕big·shrVU(SB)
|
|
||||||
|
|
||||||
// func mulAddVWW(z, x []Word, y, r Word) (c Word)
|
|
||||||
TEXT ·mulAddVWW(SB),NOSPLIT,$0
|
|
||||||
BR math∕big·mulAddVWW(SB)
|
|
||||||
|
|
||||||
// func addMulVVW(z, x []Word, y Word) (c Word)
|
|
||||||
TEXT ·addMulVVW(SB),NOSPLIT,$0
|
|
||||||
BR math∕big·addMulVVW(SB)
|
|
||||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user