Update dependencies (#1841)
This commit is contained in:
1
vendor/github.com/gomarkdown/markdown/fuzz.go
generated
vendored
1
vendor/github.com/gomarkdown/markdown/fuzz.go
generated
vendored
@@ -1,3 +1,4 @@
|
||||
//go:build gofuzz
|
||||
// +build gofuzz
|
||||
|
||||
package markdown
|
||||
|
||||
90
vendor/github.com/gomarkdown/markdown/html/renderer.go
generated
vendored
90
vendor/github.com/gomarkdown/markdown/html/renderer.go
generated
vendored
@@ -11,6 +11,7 @@ import (
|
||||
"strings"
|
||||
|
||||
"github.com/gomarkdown/markdown/ast"
|
||||
"github.com/gomarkdown/markdown/internal/valid"
|
||||
"github.com/gomarkdown/markdown/parser"
|
||||
)
|
||||
|
||||
@@ -211,70 +212,6 @@ func NewRenderer(opts RendererOptions) *Renderer {
|
||||
}
|
||||
}
|
||||
|
||||
func isHTMLTag(tag []byte, tagname string) bool {
|
||||
found, _ := findHTMLTagPos(tag, tagname)
|
||||
return found
|
||||
}
|
||||
|
||||
// Look for a character, but ignore it when it's in any kind of quotes, it
|
||||
// might be JavaScript
|
||||
func skipUntilCharIgnoreQuotes(html []byte, start int, char byte) int {
|
||||
inSingleQuote := false
|
||||
inDoubleQuote := false
|
||||
inGraveQuote := false
|
||||
i := start
|
||||
for i < len(html) {
|
||||
switch {
|
||||
case html[i] == char && !inSingleQuote && !inDoubleQuote && !inGraveQuote:
|
||||
return i
|
||||
case html[i] == '\'':
|
||||
inSingleQuote = !inSingleQuote
|
||||
case html[i] == '"':
|
||||
inDoubleQuote = !inDoubleQuote
|
||||
case html[i] == '`':
|
||||
inGraveQuote = !inGraveQuote
|
||||
}
|
||||
i++
|
||||
}
|
||||
return start
|
||||
}
|
||||
|
||||
func findHTMLTagPos(tag []byte, tagname string) (bool, int) {
|
||||
i := 0
|
||||
if i < len(tag) && tag[0] != '<' {
|
||||
return false, -1
|
||||
}
|
||||
i++
|
||||
i = skipSpace(tag, i)
|
||||
|
||||
if i < len(tag) && tag[i] == '/' {
|
||||
i++
|
||||
}
|
||||
|
||||
i = skipSpace(tag, i)
|
||||
j := 0
|
||||
for ; i < len(tag); i, j = i+1, j+1 {
|
||||
if j >= len(tagname) {
|
||||
break
|
||||
}
|
||||
|
||||
if strings.ToLower(string(tag[i]))[0] != tagname[j] {
|
||||
return false, -1
|
||||
}
|
||||
}
|
||||
|
||||
if i == len(tag) {
|
||||
return false, -1
|
||||
}
|
||||
|
||||
rightAngle := skipUntilCharIgnoreQuotes(tag, i, '>')
|
||||
if rightAngle >= i {
|
||||
return true, rightAngle
|
||||
}
|
||||
|
||||
return false, -1
|
||||
}
|
||||
|
||||
func isRelativeLink(link []byte) (yes bool) {
|
||||
// a tag begin with '#'
|
||||
if link[0] == '#' {
|
||||
@@ -351,14 +288,6 @@ func needSkipLink(flags Flags, dest []byte) bool {
|
||||
return flags&Safelink != 0 && !isSafeLink(dest) && !isMailto(dest)
|
||||
}
|
||||
|
||||
func isSmartypantable(node ast.Node) bool {
|
||||
switch node.GetParent().(type) {
|
||||
case *ast.Link, *ast.CodeBlock, *ast.Code:
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func appendLanguageAttr(attrs []string, info []byte) []string {
|
||||
if len(info) == 0 {
|
||||
return attrs
|
||||
@@ -1297,21 +1226,8 @@ func isListItemTerm(node ast.Node) bool {
|
||||
return ok && data.ListFlags&ast.ListTypeTerm != 0
|
||||
}
|
||||
|
||||
// TODO: move to internal package
|
||||
func skipSpace(data []byte, i int) int {
|
||||
n := len(data)
|
||||
for i < n && isSpace(data[i]) {
|
||||
i++
|
||||
}
|
||||
return i
|
||||
}
|
||||
|
||||
// TODO: move to internal package
|
||||
var validUris = [][]byte{[]byte("http://"), []byte("https://"), []byte("ftp://"), []byte("mailto://")}
|
||||
var validPaths = [][]byte{[]byte("/"), []byte("./"), []byte("../")}
|
||||
|
||||
func isSafeLink(link []byte) bool {
|
||||
for _, path := range validPaths {
|
||||
for _, path := range valid.Paths {
|
||||
if len(link) >= len(path) && bytes.Equal(link[:len(path)], path) {
|
||||
if len(link) == len(path) {
|
||||
return true
|
||||
@@ -1321,7 +1237,7 @@ func isSafeLink(link []byte) bool {
|
||||
}
|
||||
}
|
||||
|
||||
for _, prefix := range validUris {
|
||||
for _, prefix := range valid.URIs {
|
||||
// TODO: handle unicode here
|
||||
// case-insensitive prefix test
|
||||
if len(link) > len(prefix) && bytes.Equal(bytes.ToLower(link[:len(prefix)]), prefix) && isAlnum(link[len(prefix)]) {
|
||||
|
||||
14
vendor/github.com/gomarkdown/markdown/internal/valid/valid.go
generated
vendored
Normal file
14
vendor/github.com/gomarkdown/markdown/internal/valid/valid.go
generated
vendored
Normal file
@@ -0,0 +1,14 @@
|
||||
package valid
|
||||
|
||||
var URIs = [][]byte{
|
||||
[]byte("http://"),
|
||||
[]byte("https://"),
|
||||
[]byte("ftp://"),
|
||||
[]byte("mailto:"),
|
||||
}
|
||||
|
||||
var Paths = [][]byte{
|
||||
[]byte("/"),
|
||||
[]byte("./"),
|
||||
[]byte("../"),
|
||||
}
|
||||
4
vendor/github.com/gomarkdown/markdown/parser/block.go
generated
vendored
4
vendor/github.com/gomarkdown/markdown/parser/block.go
generated
vendored
@@ -24,8 +24,8 @@ const (
|
||||
)
|
||||
|
||||
var (
|
||||
reBackslashOrAmp = regexp.MustCompile("[\\&]")
|
||||
reEntityOrEscapedChar = regexp.MustCompile("(?i)\\\\" + escapable + "|" + charEntity)
|
||||
reBackslashOrAmp = regexp.MustCompile(`[\&]`)
|
||||
reEntityOrEscapedChar = regexp.MustCompile(`(?i)\\` + escapable + "|" + charEntity)
|
||||
|
||||
// blockTags is a set of tags that are recognized as HTML block tags.
|
||||
// Any of these can be included in markdown text without special escaping.
|
||||
|
||||
10
vendor/github.com/gomarkdown/markdown/parser/inline.go
generated
vendored
10
vendor/github.com/gomarkdown/markdown/parser/inline.go
generated
vendored
@@ -6,6 +6,7 @@ import (
|
||||
"strconv"
|
||||
|
||||
"github.com/gomarkdown/markdown/ast"
|
||||
"github.com/gomarkdown/markdown/internal/valid"
|
||||
)
|
||||
|
||||
// Parsing of inline elements
|
||||
@@ -994,12 +995,9 @@ func isEndOfLink(char byte) bool {
|
||||
return isSpace(char) || char == '<'
|
||||
}
|
||||
|
||||
var validUris = [][]byte{[]byte("http://"), []byte("https://"), []byte("ftp://"), []byte("mailto://")}
|
||||
var validPaths = [][]byte{[]byte("/"), []byte("./"), []byte("../")}
|
||||
|
||||
func isSafeLink(link []byte) bool {
|
||||
nLink := len(link)
|
||||
for _, path := range validPaths {
|
||||
for _, path := range valid.Paths {
|
||||
nPath := len(path)
|
||||
linkPrefix := link[:nPath]
|
||||
if nLink >= nPath && bytes.Equal(linkPrefix, path) {
|
||||
@@ -1011,7 +1009,7 @@ func isSafeLink(link []byte) bool {
|
||||
}
|
||||
}
|
||||
|
||||
for _, prefix := range validUris {
|
||||
for _, prefix := range valid.URIs {
|
||||
// TODO: handle unicode here
|
||||
// case-insensitive prefix test
|
||||
nPrefix := len(prefix)
|
||||
@@ -1119,7 +1117,7 @@ func isMailtoAutoLink(data []byte) int {
|
||||
nb++
|
||||
|
||||
case '-', '.', '_':
|
||||
break
|
||||
// no-op but not defult
|
||||
|
||||
case '>':
|
||||
if nb == 1 {
|
||||
|
||||
3
vendor/github.com/gomarkdown/markdown/parser/parser.go
generated
vendored
3
vendor/github.com/gomarkdown/markdown/parser/parser.go
generated
vendored
@@ -8,7 +8,6 @@ import (
|
||||
"fmt"
|
||||
"strconv"
|
||||
"strings"
|
||||
"unicode/utf8"
|
||||
|
||||
"github.com/gomarkdown/markdown/ast"
|
||||
)
|
||||
@@ -720,6 +719,7 @@ func isAlnum(c byte) bool {
|
||||
// TODO: this is not used
|
||||
// Replace tab characters with spaces, aligning to the next TAB_SIZE column.
|
||||
// always ends output with a newline
|
||||
/*
|
||||
func expandTabs(out *bytes.Buffer, line []byte, tabSize int) {
|
||||
// first, check for common cases: no tabs, or only tabs at beginning of line
|
||||
i, prefix := 0, 0
|
||||
@@ -775,6 +775,7 @@ func expandTabs(out *bytes.Buffer, line []byte, tabSize int) {
|
||||
i++
|
||||
}
|
||||
}
|
||||
*/
|
||||
|
||||
// Find if a line counts as indented or not.
|
||||
// Returns number of characters the indent is (0 = not indented).
|
||||
|
||||
4
vendor/github.com/mattermost/mattermost-server/v6/model/channel.go
generated
vendored
4
vendor/github.com/mattermost/mattermost-server/v6/model/channel.go
generated
vendored
@@ -27,7 +27,7 @@ const (
|
||||
ChannelGroupMinUsers = 3
|
||||
DefaultChannelName = "town-square"
|
||||
ChannelDisplayNameMaxRunes = 64
|
||||
ChannelNameMinLength = 2
|
||||
ChannelNameMinLength = 1
|
||||
ChannelNameMaxLength = 64
|
||||
ChannelHeaderMaxRunes = 1024
|
||||
ChannelPurposeMaxRunes = 250
|
||||
@@ -216,7 +216,7 @@ func (o *Channel) IsValid() *AppError {
|
||||
}
|
||||
|
||||
if !IsValidChannelIdentifier(o.Name) {
|
||||
return NewAppError("Channel.IsValid", "model.channel.is_valid.2_or_more.app_error", nil, "id="+o.Id, http.StatusBadRequest)
|
||||
return NewAppError("Channel.IsValid", "model.channel.is_valid.1_or_more.app_error", nil, "id="+o.Id, http.StatusBadRequest)
|
||||
}
|
||||
|
||||
if !(o.Type == ChannelTypeOpen || o.Type == ChannelTypePrivate || o.Type == ChannelTypeDirect || o.Type == ChannelTypeGroup) {
|
||||
|
||||
13
vendor/github.com/mattermost/mattermost-server/v6/model/channel_stats.go
generated
vendored
13
vendor/github.com/mattermost/mattermost-server/v6/model/channel_stats.go
generated
vendored
@@ -8,4 +8,17 @@ type ChannelStats struct {
|
||||
MemberCount int64 `json:"member_count"`
|
||||
GuestCount int64 `json:"guest_count"`
|
||||
PinnedPostCount int64 `json:"pinnedpost_count"`
|
||||
FilesCount int64 `json:"files_count"`
|
||||
}
|
||||
|
||||
func (o *ChannelStats) MemberCount_() float64 {
|
||||
return float64(o.MemberCount)
|
||||
}
|
||||
|
||||
func (o *ChannelStats) GuestCount_() float64 {
|
||||
return float64(o.GuestCount)
|
||||
}
|
||||
|
||||
func (o *ChannelStats) PinnedPostCount_() float64 {
|
||||
return float64(o.PinnedPostCount)
|
||||
}
|
||||
|
||||
150
vendor/github.com/mattermost/mattermost-server/v6/model/client4.go
generated
vendored
150
vendor/github.com/mattermost/mattermost-server/v6/model/client4.go
generated
vendored
@@ -2638,6 +2638,30 @@ func (c *Client4) InviteGuestsToTeam(teamId string, userEmails []string, channel
|
||||
// InviteUsersToTeam invite users by email to the team.
|
||||
func (c *Client4) InviteUsersToTeamGracefully(teamId string, userEmails []string) ([]*EmailInviteWithError, *Response, error) {
|
||||
r, err := c.DoAPIPost(c.teamRoute(teamId)+"/invite/email?graceful="+c.boolString(true), ArrayToJSON(userEmails))
|
||||
|
||||
if err != nil {
|
||||
return nil, BuildResponse(r), err
|
||||
}
|
||||
defer closeBody(r)
|
||||
var list []*EmailInviteWithError
|
||||
if jsonErr := json.NewDecoder(r.Body).Decode(&list); jsonErr != nil {
|
||||
return nil, nil, NewAppError("InviteUsersToTeamGracefully", "api.unmarshal_error", nil, jsonErr.Error(), http.StatusInternalServerError)
|
||||
}
|
||||
return list, BuildResponse(r), nil
|
||||
}
|
||||
|
||||
// InviteUsersToTeam invite users by email to the team.
|
||||
func (c *Client4) InviteUsersToTeamAndChannelsGracefully(teamId string, userEmails []string, channelIds []string, message string) ([]*EmailInviteWithError, *Response, error) {
|
||||
memberInvite := MemberInvite{
|
||||
Emails: userEmails,
|
||||
ChannelIds: channelIds,
|
||||
Message: message,
|
||||
}
|
||||
buf, err := json.Marshal(memberInvite)
|
||||
if err != nil {
|
||||
return nil, nil, NewAppError("InviteMembersToTeamAndChannels", "api.marshal_error", nil, err.Error(), http.StatusInternalServerError)
|
||||
}
|
||||
r, err := c.DoAPIPostBytes(c.teamRoute(teamId)+"/invite/email?graceful="+c.boolString(true), buf)
|
||||
if err != nil {
|
||||
return nil, BuildResponse(r), err
|
||||
}
|
||||
@@ -3748,6 +3772,49 @@ func (c *Client4) GetPostThread(postId string, etag string, collapsedThreads boo
|
||||
return &list, BuildResponse(r), nil
|
||||
}
|
||||
|
||||
// GetPostThreadWithOpts gets a post with all the other posts in the same thread.
|
||||
func (c *Client4) GetPostThreadWithOpts(postID string, etag string, opts GetPostsOptions) (*PostList, *Response, error) {
|
||||
urlVal := c.postRoute(postID) + "/thread"
|
||||
|
||||
values := url.Values{}
|
||||
if opts.CollapsedThreads {
|
||||
values.Set("collapsedThreads", "true")
|
||||
}
|
||||
if opts.CollapsedThreadsExtended {
|
||||
values.Set("collapsedThreadsExtended", "true")
|
||||
}
|
||||
if opts.SkipFetchThreads {
|
||||
values.Set("skipFetchThreads", "true")
|
||||
}
|
||||
if opts.PerPage != 0 {
|
||||
values.Set("perPage", strconv.Itoa(opts.PerPage))
|
||||
}
|
||||
if opts.FromPost != "" {
|
||||
values.Set("fromPost", opts.FromPost)
|
||||
}
|
||||
if opts.FromCreateAt != 0 {
|
||||
values.Set("fromCreateAt", strconv.FormatInt(opts.FromCreateAt, 10))
|
||||
}
|
||||
if opts.Direction != "" {
|
||||
values.Set("direction", opts.Direction)
|
||||
}
|
||||
urlVal += "?" + values.Encode()
|
||||
|
||||
r, err := c.DoAPIGet(urlVal, etag)
|
||||
if err != nil {
|
||||
return nil, BuildResponse(r), err
|
||||
}
|
||||
defer closeBody(r)
|
||||
var list PostList
|
||||
if r.StatusCode == http.StatusNotModified {
|
||||
return &list, BuildResponse(r), nil
|
||||
}
|
||||
if jsonErr := json.NewDecoder(r.Body).Decode(&list); jsonErr != nil {
|
||||
return nil, nil, NewAppError("GetPostThread", "api.unmarshal_error", nil, jsonErr.Error(), http.StatusInternalServerError)
|
||||
}
|
||||
return &list, BuildResponse(r), nil
|
||||
}
|
||||
|
||||
// GetPostsForChannel gets a page of posts with an array for ordering for a channel.
|
||||
func (c *Client4) GetPostsForChannel(channelId string, page, perPage int, etag string, collapsedThreads bool) (*PostList, *Response, error) {
|
||||
query := fmt.Sprintf("?page=%v&per_page=%v", page, perPage)
|
||||
@@ -6429,6 +6496,39 @@ func (c *Client4) GetBulkReactions(postIds []string) (map[string][]*Reaction, *R
|
||||
return reactions, BuildResponse(r), nil
|
||||
}
|
||||
|
||||
func (c *Client4) GetTopReactionsForTeamSince(teamId string, timeRange string, page int, perPage int) (*TopReactionList, *Response, error) {
|
||||
query := fmt.Sprintf("?time_range=%v&page=%v&per_page=%v", timeRange, page, perPage)
|
||||
r, err := c.DoAPIGet(c.teamRoute(teamId)+"/top/reactions"+query, "")
|
||||
if err != nil {
|
||||
return nil, BuildResponse(r), err
|
||||
}
|
||||
defer closeBody(r)
|
||||
var topReactions *TopReactionList
|
||||
if jsonErr := json.NewDecoder(r.Body).Decode(&topReactions); jsonErr != nil {
|
||||
return nil, nil, NewAppError("GetTopReactionsForTeamSince", "api.unmarshal_error", nil, jsonErr.Error(), http.StatusInternalServerError)
|
||||
}
|
||||
return topReactions, BuildResponse(r), nil
|
||||
}
|
||||
|
||||
func (c *Client4) GetTopReactionsForUserSince(teamId string, timeRange string, page int, perPage int) (*TopReactionList, *Response, error) {
|
||||
query := fmt.Sprintf("?time_range=%v&page=%v&per_page=%v", timeRange, page, perPage)
|
||||
|
||||
if teamId != "" {
|
||||
query += fmt.Sprintf("&team_id=%v", teamId)
|
||||
}
|
||||
|
||||
r, err := c.DoAPIGet(c.usersRoute()+"/me/top/reactions"+query, "")
|
||||
if err != nil {
|
||||
return nil, BuildResponse(r), err
|
||||
}
|
||||
defer closeBody(r)
|
||||
var topReactions *TopReactionList
|
||||
if jsonErr := json.NewDecoder(r.Body).Decode(&topReactions); jsonErr != nil {
|
||||
return nil, nil, NewAppError("GetTopReactionsForUserSince", "api.unmarshal_error", nil, jsonErr.Error(), http.StatusInternalServerError)
|
||||
}
|
||||
return topReactions, BuildResponse(r), nil
|
||||
}
|
||||
|
||||
// Timezone Section
|
||||
|
||||
// GetSupportedTimezone returns a page of supported timezones on the system.
|
||||
@@ -7658,18 +7758,6 @@ func (c *Client4) GetSubscription() (*Subscription, *Response, error) {
|
||||
return subscription, BuildResponse(r), nil
|
||||
}
|
||||
|
||||
func (c *Client4) GetSubscriptionStats() (*SubscriptionStats, *Response, error) {
|
||||
r, err := c.DoAPIGet(c.cloudRoute()+"/subscription/stats", "")
|
||||
if err != nil {
|
||||
return nil, BuildResponse(r), err
|
||||
}
|
||||
defer closeBody(r)
|
||||
|
||||
var stats *SubscriptionStats
|
||||
json.NewDecoder(r.Body).Decode(&stats)
|
||||
return stats, BuildResponse(r), nil
|
||||
}
|
||||
|
||||
func (c *Client4) GetInvoicesForSubscription() ([]*Invoice, *Response, error) {
|
||||
r, err := c.DoAPIGet(c.cloudRoute()+"/subscription/invoices", "")
|
||||
if err != nil {
|
||||
@@ -7782,6 +7870,12 @@ func (c *Client4) GetUserThreads(userId, teamId string, options GetUserThreadsOp
|
||||
if options.Unread {
|
||||
v.Set("unread", "true")
|
||||
}
|
||||
if options.ThreadsOnly {
|
||||
v.Set("threadsOnly", "true")
|
||||
}
|
||||
if options.TotalsOnly {
|
||||
v.Set("totalsOnly", "true")
|
||||
}
|
||||
url := c.userThreadsRoute(userId, teamId)
|
||||
if len(v) > 0 {
|
||||
url += "?" + v.Encode()
|
||||
@@ -7826,6 +7920,18 @@ func (c *Client4) UpdateThreadsReadForUser(userId, teamId string) (*Response, er
|
||||
return BuildResponse(r), nil
|
||||
}
|
||||
|
||||
func (c *Client4) SetThreadUnreadByPostId(userId, teamId, threadId, postId string) (*ThreadResponse, *Response, error) {
|
||||
r, err := c.DoAPIPost(fmt.Sprintf("%s/set_unread/%s", c.userThreadRoute(userId, teamId, threadId), postId), "")
|
||||
if err != nil {
|
||||
return nil, BuildResponse(r), err
|
||||
}
|
||||
defer closeBody(r)
|
||||
var thread ThreadResponse
|
||||
json.NewDecoder(r.Body).Decode(&thread)
|
||||
|
||||
return &thread, BuildResponse(r), nil
|
||||
}
|
||||
|
||||
func (c *Client4) UpdateThreadReadForUser(userId, teamId, threadId string, timestamp int64) (*ThreadResponse, *Response, error) {
|
||||
r, err := c.DoAPIPut(fmt.Sprintf("%s/read/%d", c.userThreadRoute(userId, teamId, threadId), timestamp), "")
|
||||
if err != nil {
|
||||
@@ -7854,26 +7960,6 @@ func (c *Client4) UpdateThreadFollowForUser(userId, teamId, threadId string, sta
|
||||
return BuildResponse(r), nil
|
||||
}
|
||||
|
||||
func (c *Client4) SendAdminUpgradeRequestEmail() (*Response, error) {
|
||||
r, err := c.DoAPIPost(c.cloudRoute()+"/subscription/limitreached/invite", "")
|
||||
if err != nil {
|
||||
return BuildResponse(r), err
|
||||
}
|
||||
defer closeBody(r)
|
||||
|
||||
return BuildResponse(r), nil
|
||||
}
|
||||
|
||||
func (c *Client4) SendAdminUpgradeRequestEmailOnJoin() (*Response, error) {
|
||||
r, err := c.DoAPIPost(c.cloudRoute()+"/subscription/limitreached/join", "")
|
||||
if err != nil {
|
||||
return BuildResponse(r), err
|
||||
}
|
||||
defer closeBody(r)
|
||||
|
||||
return BuildResponse(r), nil
|
||||
}
|
||||
|
||||
func (c *Client4) GetAllSharedChannels(teamID string, page, perPage int) ([]*SharedChannel, *Response, error) {
|
||||
url := fmt.Sprintf("%s/%s?page=%d&per_page=%d", c.sharedChannelsRoute(), teamID, page, perPage)
|
||||
r, err := c.DoAPIGet(url, "")
|
||||
|
||||
8
vendor/github.com/mattermost/mattermost-server/v6/model/cloud.go
generated
vendored
8
vendor/github.com/mattermost/mattermost-server/v6/model/cloud.go
generated
vendored
@@ -11,8 +11,6 @@ const (
|
||||
EventTypeSendAdminWelcomeEmail = "send-admin-welcome-email"
|
||||
EventTypeTrialWillEnd = "trial-will-end"
|
||||
EventTypeTrialEnded = "trial-ended"
|
||||
JoinLimitation = "join"
|
||||
InviteLimitation = "invite"
|
||||
)
|
||||
|
||||
var MockCWS string
|
||||
@@ -180,12 +178,6 @@ type FailedPayment struct {
|
||||
type CloudWorkspaceOwner struct {
|
||||
UserName string `json:"username"`
|
||||
}
|
||||
type SubscriptionStats struct {
|
||||
RemainingSeats int `json:"remaining_seats"`
|
||||
IsPaidTier string `json:"is_paid_tier"`
|
||||
IsFreeTrial string `json:"is_free_trial"`
|
||||
}
|
||||
|
||||
type SubscriptionChange struct {
|
||||
ProductID string `json:"product_id"`
|
||||
}
|
||||
|
||||
1
vendor/github.com/mattermost/mattermost-server/v6/model/cluster_message.go
generated
vendored
1
vendor/github.com/mattermost/mattermost-server/v6/model/cluster_message.go
generated
vendored
@@ -26,6 +26,7 @@ const (
|
||||
ClusterEventInvalidateCacheForWebhooks ClusterEvent = "inv_webhooks"
|
||||
ClusterEventInvalidateCacheForEmojisById ClusterEvent = "inv_emojis_by_id"
|
||||
ClusterEventInvalidateCacheForEmojisIdByName ClusterEvent = "inv_emojis_id_by_name"
|
||||
ClusterEventInvalidateCacheForChannelFileCount ClusterEvent = "inv_channel_file_count"
|
||||
ClusterEventInvalidateCacheForChannelPinnedpostsCounts ClusterEvent = "inv_channel_pinnedposts_counts"
|
||||
ClusterEventInvalidateCacheForChannelMemberCounts ClusterEvent = "inv_channel_member_counts"
|
||||
ClusterEventInvalidateCacheForLastPosts ClusterEvent = "inv_last_posts"
|
||||
|
||||
126
vendor/github.com/mattermost/mattermost-server/v6/model/config.go
generated
vendored
126
vendor/github.com/mattermost/mattermost-server/v6/model/config.go
generated
vendored
@@ -184,24 +184,24 @@ const (
|
||||
|
||||
TeamSettingsDefaultTeamText = "default"
|
||||
|
||||
ElasticsearchSettingsDefaultConnectionURL = "http://localhost:9200"
|
||||
ElasticsearchSettingsDefaultUsername = "elastic"
|
||||
ElasticsearchSettingsDefaultPassword = "changeme"
|
||||
ElasticsearchSettingsDefaultPostIndexReplicas = 1
|
||||
ElasticsearchSettingsDefaultPostIndexShards = 1
|
||||
ElasticsearchSettingsDefaultChannelIndexReplicas = 1
|
||||
ElasticsearchSettingsDefaultChannelIndexShards = 1
|
||||
ElasticsearchSettingsDefaultUserIndexReplicas = 1
|
||||
ElasticsearchSettingsDefaultUserIndexShards = 1
|
||||
ElasticsearchSettingsDefaultAggregatePostsAfterDays = 365
|
||||
ElasticsearchSettingsDefaultPostsAggregatorJobStartTime = "03:00"
|
||||
ElasticsearchSettingsDefaultIndexPrefix = ""
|
||||
ElasticsearchSettingsDefaultLiveIndexingBatchSize = 1
|
||||
ElasticsearchSettingsDefaultBulkIndexingTimeWindowSeconds = 3600
|
||||
ElasticsearchSettingsDefaultRequestTimeoutSeconds = 30
|
||||
ElasticsearchSettingsDefaultConnectionURL = "http://localhost:9200"
|
||||
ElasticsearchSettingsDefaultUsername = "elastic"
|
||||
ElasticsearchSettingsDefaultPassword = "changeme"
|
||||
ElasticsearchSettingsDefaultPostIndexReplicas = 1
|
||||
ElasticsearchSettingsDefaultPostIndexShards = 1
|
||||
ElasticsearchSettingsDefaultChannelIndexReplicas = 1
|
||||
ElasticsearchSettingsDefaultChannelIndexShards = 1
|
||||
ElasticsearchSettingsDefaultUserIndexReplicas = 1
|
||||
ElasticsearchSettingsDefaultUserIndexShards = 1
|
||||
ElasticsearchSettingsDefaultAggregatePostsAfterDays = 365
|
||||
ElasticsearchSettingsDefaultPostsAggregatorJobStartTime = "03:00"
|
||||
ElasticsearchSettingsDefaultIndexPrefix = ""
|
||||
ElasticsearchSettingsDefaultLiveIndexingBatchSize = 1
|
||||
ElasticsearchSettingsDefaultRequestTimeoutSeconds = 30
|
||||
ElasticsearchSettingsDefaultBatchSize = 10000
|
||||
|
||||
BleveSettingsDefaultIndexDir = ""
|
||||
BleveSettingsDefaultBulkIndexingTimeWindowSeconds = 3600
|
||||
BleveSettingsDefaultIndexDir = ""
|
||||
BleveSettingsDefaultBatchSize = 10000
|
||||
|
||||
DataRetentionSettingsDefaultMessageRetentionDays = 365
|
||||
DataRetentionSettingsDefaultFileRetentionDays = 365
|
||||
@@ -275,15 +275,16 @@ var ServerTLSSupportedCiphers = map[string]uint16{
|
||||
}
|
||||
|
||||
type ServiceSettings struct {
|
||||
SiteURL *string `access:"environment_web_server,authentication_saml,write_restrictable"`
|
||||
WebsocketURL *string `access:"write_restrictable,cloud_restrictable"`
|
||||
LicenseFileLocation *string `access:"write_restrictable,cloud_restrictable"` // telemetry: none
|
||||
ListenAddress *string `access:"environment_web_server,write_restrictable,cloud_restrictable"` // telemetry: none
|
||||
ConnectionSecurity *string `access:"environment_web_server,write_restrictable,cloud_restrictable"`
|
||||
TLSCertFile *string `access:"environment_web_server,write_restrictable,cloud_restrictable"`
|
||||
TLSKeyFile *string `access:"environment_web_server,write_restrictable,cloud_restrictable"`
|
||||
TLSMinVer *string `access:"write_restrictable,cloud_restrictable"` // telemetry: none
|
||||
TLSStrictTransport *bool `access:"write_restrictable,cloud_restrictable"`
|
||||
SiteURL *string `access:"environment_web_server,authentication_saml,write_restrictable"`
|
||||
WebsocketURL *string `access:"write_restrictable,cloud_restrictable"`
|
||||
LicenseFileLocation *string `access:"write_restrictable,cloud_restrictable"` // telemetry: none
|
||||
ListenAddress *string `access:"environment_web_server,write_restrictable,cloud_restrictable"` // telemetry: none
|
||||
ConnectionSecurity *string `access:"environment_web_server,write_restrictable,cloud_restrictable"`
|
||||
TLSCertFile *string `access:"environment_web_server,write_restrictable,cloud_restrictable"`
|
||||
TLSKeyFile *string `access:"environment_web_server,write_restrictable,cloud_restrictable"`
|
||||
TLSMinVer *string `access:"write_restrictable,cloud_restrictable"` // telemetry: none
|
||||
TLSStrictTransport *bool `access:"write_restrictable,cloud_restrictable"`
|
||||
// In seconds.
|
||||
TLSStrictTransportMaxAge *int64 `access:"write_restrictable,cloud_restrictable"` // telemetry: none
|
||||
TLSOverwriteCiphers []string `access:"write_restrictable,cloud_restrictable"` // telemetry: none
|
||||
UseLetsEncrypt *bool `access:"environment_web_server,write_restrictable,cloud_restrictable"`
|
||||
@@ -904,7 +905,6 @@ type ExperimentalSettings struct {
|
||||
LinkMetadataTimeoutMilliseconds *int64 `access:"experimental_features,write_restrictable,cloud_restrictable"`
|
||||
RestrictSystemAdmin *bool `access:"experimental_features,write_restrictable"`
|
||||
UseNewSAMLLibrary *bool `access:"experimental_features,cloud_restrictable"`
|
||||
CloudUserLimit *int64 `access:"experimental_features,write_restrictable"`
|
||||
CloudBilling *bool `access:"experimental_features,write_restrictable"`
|
||||
EnableSharedChannels *bool `access:"experimental_features"`
|
||||
EnableRemoteClusterService *bool `access:"experimental_features"`
|
||||
@@ -931,11 +931,6 @@ func (s *ExperimentalSettings) SetDefaults() {
|
||||
s.RestrictSystemAdmin = NewBool(false)
|
||||
}
|
||||
|
||||
if s.CloudUserLimit == nil {
|
||||
// User limit 0 is treated as no limit
|
||||
s.CloudUserLimit = NewInt64(0)
|
||||
}
|
||||
|
||||
if s.CloudBilling == nil {
|
||||
s.CloudBilling = NewBool(false)
|
||||
}
|
||||
@@ -1541,6 +1536,7 @@ type EmailSettings struct {
|
||||
LoginButtonColor *string `access:"experimental_features"`
|
||||
LoginButtonBorderColor *string `access:"experimental_features"`
|
||||
LoginButtonTextColor *string `access:"experimental_features"`
|
||||
EnableInactivityEmail *bool
|
||||
}
|
||||
|
||||
func (s *EmailSettings) SetDefaults(isUpdate bool) {
|
||||
@@ -1683,6 +1679,10 @@ func (s *EmailSettings) SetDefaults(isUpdate bool) {
|
||||
if s.LoginButtonTextColor == nil {
|
||||
s.LoginButtonTextColor = NewString("#2389D7")
|
||||
}
|
||||
|
||||
if s.EnableInactivityEmail == nil {
|
||||
s.EnableInactivityEmail = NewBool(true)
|
||||
}
|
||||
}
|
||||
|
||||
type RateLimitSettings struct {
|
||||
@@ -1885,17 +1885,18 @@ func (s *ThemeSettings) SetDefaults() {
|
||||
}
|
||||
|
||||
type TeamSettings struct {
|
||||
SiteName *string `access:"site_customization"`
|
||||
MaxUsersPerTeam *int `access:"site_users_and_teams"`
|
||||
EnableUserCreation *bool `access:"authentication_signup"`
|
||||
EnableOpenServer *bool `access:"authentication_signup"`
|
||||
EnableUserDeactivation *bool `access:"experimental_features"`
|
||||
RestrictCreationToDomains *string `access:"authentication_signup"` // telemetry: none
|
||||
EnableCustomUserStatuses *bool `access:"site_users_and_teams"`
|
||||
EnableCustomBrand *bool `access:"site_customization"`
|
||||
CustomBrandText *string `access:"site_customization"`
|
||||
CustomDescriptionText *string `access:"site_customization"`
|
||||
RestrictDirectMessage *string `access:"site_users_and_teams"`
|
||||
SiteName *string `access:"site_customization"`
|
||||
MaxUsersPerTeam *int `access:"site_users_and_teams"`
|
||||
EnableUserCreation *bool `access:"authentication_signup"`
|
||||
EnableOpenServer *bool `access:"authentication_signup"`
|
||||
EnableUserDeactivation *bool `access:"experimental_features"`
|
||||
RestrictCreationToDomains *string `access:"authentication_signup"` // telemetry: none
|
||||
EnableCustomUserStatuses *bool `access:"site_users_and_teams"`
|
||||
EnableCustomBrand *bool `access:"site_customization"`
|
||||
CustomBrandText *string `access:"site_customization"`
|
||||
CustomDescriptionText *string `access:"site_customization"`
|
||||
RestrictDirectMessage *string `access:"site_users_and_teams"`
|
||||
// In seconds.
|
||||
UserStatusAwayTimeout *int64 `access:"experimental_features"`
|
||||
MaxChannelsPerTeam *int64 `access:"site_users_and_teams"`
|
||||
MaxNotificationsPerChannel *int64 `access:"environment_push_notification_server"`
|
||||
@@ -2475,7 +2476,8 @@ type ElasticsearchSettings struct {
|
||||
PostsAggregatorJobStartTime *string `access:"environment_elasticsearch,write_restrictable,cloud_restrictable"` // telemetry: none
|
||||
IndexPrefix *string `access:"environment_elasticsearch,write_restrictable,cloud_restrictable"`
|
||||
LiveIndexingBatchSize *int `access:"environment_elasticsearch,write_restrictable,cloud_restrictable"`
|
||||
BulkIndexingTimeWindowSeconds *int `access:"environment_elasticsearch,write_restrictable,cloud_restrictable"`
|
||||
BulkIndexingTimeWindowSeconds *int `json:",omitempty"` // telemetry: none
|
||||
BatchSize *int `access:"environment_elasticsearch,write_restrictable,cloud_restrictable"`
|
||||
RequestTimeoutSeconds *int `access:"environment_elasticsearch,write_restrictable,cloud_restrictable"`
|
||||
SkipTLSVerification *bool `access:"environment_elasticsearch,write_restrictable,cloud_restrictable"`
|
||||
Trace *string `access:"environment_elasticsearch,write_restrictable,cloud_restrictable"`
|
||||
@@ -2550,8 +2552,8 @@ func (s *ElasticsearchSettings) SetDefaults() {
|
||||
s.LiveIndexingBatchSize = NewInt(ElasticsearchSettingsDefaultLiveIndexingBatchSize)
|
||||
}
|
||||
|
||||
if s.BulkIndexingTimeWindowSeconds == nil {
|
||||
s.BulkIndexingTimeWindowSeconds = NewInt(ElasticsearchSettingsDefaultBulkIndexingTimeWindowSeconds)
|
||||
if s.BatchSize == nil {
|
||||
s.BatchSize = NewInt(ElasticsearchSettingsDefaultBatchSize)
|
||||
}
|
||||
|
||||
if s.RequestTimeoutSeconds == nil {
|
||||
@@ -2572,7 +2574,8 @@ type BleveSettings struct {
|
||||
EnableIndexing *bool `access:"experimental_bleve"`
|
||||
EnableSearching *bool `access:"experimental_bleve"`
|
||||
EnableAutocomplete *bool `access:"experimental_bleve"`
|
||||
BulkIndexingTimeWindowSeconds *int `access:"experimental_bleve"`
|
||||
BulkIndexingTimeWindowSeconds *int `json:",omitempty"` // telemetry: none
|
||||
BatchSize *int `access:"experimental_bleve"`
|
||||
}
|
||||
|
||||
func (bs *BleveSettings) SetDefaults() {
|
||||
@@ -2592,8 +2595,8 @@ func (bs *BleveSettings) SetDefaults() {
|
||||
bs.EnableAutocomplete = NewBool(false)
|
||||
}
|
||||
|
||||
if bs.BulkIndexingTimeWindowSeconds == nil {
|
||||
bs.BulkIndexingTimeWindowSeconds = NewInt(BleveSettingsDefaultBulkIndexingTimeWindowSeconds)
|
||||
if bs.BatchSize == nil {
|
||||
bs.BatchSize = NewInt(BleveSettingsDefaultBatchSize)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -2643,9 +2646,10 @@ func (s *DataRetentionSettings) SetDefaults() {
|
||||
}
|
||||
|
||||
type JobSettings struct {
|
||||
RunJobs *bool `access:"write_restrictable,cloud_restrictable"` // telemetry: none
|
||||
RunScheduler *bool `access:"write_restrictable,cloud_restrictable"` // telemetry: none
|
||||
CleanupJobsThresholdDays *int `access:"write_restrictable,cloud_restrictable"`
|
||||
RunJobs *bool `access:"write_restrictable,cloud_restrictable"` // telemetry: none
|
||||
RunScheduler *bool `access:"write_restrictable,cloud_restrictable"` // telemetry: none
|
||||
CleanupJobsThresholdDays *int `access:"write_restrictable,cloud_restrictable"`
|
||||
CleanupConfigThresholdDays *int `access:"write_restrictable,cloud_restrictable"`
|
||||
}
|
||||
|
||||
func (s *JobSettings) SetDefaults() {
|
||||
@@ -2660,6 +2664,10 @@ func (s *JobSettings) SetDefaults() {
|
||||
if s.CleanupJobsThresholdDays == nil {
|
||||
s.CleanupJobsThresholdDays = NewInt(-1)
|
||||
}
|
||||
|
||||
if s.CleanupConfigThresholdDays == nil {
|
||||
s.CleanupConfigThresholdDays = NewInt(-1)
|
||||
}
|
||||
}
|
||||
|
||||
type CloudSettings struct {
|
||||
@@ -3564,13 +3572,13 @@ func (s *ServiceSettings) isValid() *AppError {
|
||||
|
||||
if *s.SiteURL != "" {
|
||||
if _, err := url.ParseRequestURI(*s.SiteURL); err != nil {
|
||||
return NewAppError("Config.IsValid", "model.config.is_valid.site_url.app_error", nil, "", http.StatusBadRequest)
|
||||
return NewAppError("Config.IsValid", "model.config.is_valid.site_url.app_error", nil, err.Error(), http.StatusBadRequest)
|
||||
}
|
||||
}
|
||||
|
||||
if *s.WebsocketURL != "" {
|
||||
if _, err := url.ParseRequestURI(*s.WebsocketURL); err != nil {
|
||||
return NewAppError("Config.IsValid", "model.config.is_valid.websocket_url.app_error", nil, "", http.StatusBadRequest)
|
||||
return NewAppError("Config.IsValid", "model.config.is_valid.websocket_url.app_error", nil, err.Error(), http.StatusBadRequest)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -3632,8 +3640,9 @@ func (s *ElasticsearchSettings) isValid() *AppError {
|
||||
return NewAppError("Config.IsValid", "model.config.is_valid.elastic_search.live_indexing_batch_size.app_error", nil, "", http.StatusBadRequest)
|
||||
}
|
||||
|
||||
if *s.BulkIndexingTimeWindowSeconds < 1 {
|
||||
return NewAppError("Config.IsValid", "model.config.is_valid.elastic_search.bulk_indexing_time_window_seconds.app_error", nil, "", http.StatusBadRequest)
|
||||
minBatchSize := 1
|
||||
if *s.BatchSize < minBatchSize {
|
||||
return NewAppError("Config.IsValid", "model.config.is_valid.elastic_search.bulk_indexing_batch_size.app_error", map[string]interface{}{"BatchSize": minBatchSize}, "", http.StatusBadRequest)
|
||||
}
|
||||
|
||||
if *s.RequestTimeoutSeconds < 1 {
|
||||
@@ -3656,8 +3665,9 @@ func (bs *BleveSettings) isValid() *AppError {
|
||||
return NewAppError("Config.IsValid", "model.config.is_valid.bleve_search.enable_autocomplete.app_error", nil, "", http.StatusBadRequest)
|
||||
}
|
||||
}
|
||||
if *bs.BulkIndexingTimeWindowSeconds < 1 {
|
||||
return NewAppError("Config.IsValid", "model.config.is_valid.bleve_search.bulk_indexing_time_window_seconds.app_error", nil, "", http.StatusBadRequest)
|
||||
minBatchSize := 1
|
||||
if *bs.BatchSize < minBatchSize {
|
||||
return NewAppError("Config.IsValid", "model.config.is_valid.bleve_search.bulk_indexing_batch_size.app_error", map[string]interface{}{"BatchSize": minBatchSize}, "", http.StatusBadRequest)
|
||||
}
|
||||
|
||||
return nil
|
||||
|
||||
14
vendor/github.com/mattermost/mattermost-server/v6/model/data_retention_policy.go
generated
vendored
14
vendor/github.com/mattermost/mattermost-server/v6/model/data_retention_policy.go
generated
vendored
@@ -13,9 +13,9 @@ type GlobalRetentionPolicy struct {
|
||||
}
|
||||
|
||||
type RetentionPolicy struct {
|
||||
ID string `db:"Id" json:"id"`
|
||||
DisplayName string `json:"display_name"`
|
||||
PostDuration *int64 `json:"post_duration"`
|
||||
ID string `db:"Id" json:"id"`
|
||||
DisplayName string `json:"display_name"`
|
||||
PostDurationDays *int64 `db:"PostDuration" json:"post_duration"`
|
||||
}
|
||||
|
||||
type RetentionPolicyWithTeamAndChannelIDs struct {
|
||||
@@ -46,8 +46,8 @@ type RetentionPolicyWithTeamAndChannelCountsList struct {
|
||||
}
|
||||
|
||||
type RetentionPolicyForTeam struct {
|
||||
TeamID string `db:"Id" json:"team_id"`
|
||||
PostDuration int64 `json:"post_duration"`
|
||||
TeamID string `db:"Id" json:"team_id"`
|
||||
PostDurationDays int64 `db:"PostDuration" json:"post_duration"`
|
||||
}
|
||||
|
||||
type RetentionPolicyForTeamList struct {
|
||||
@@ -56,8 +56,8 @@ type RetentionPolicyForTeamList struct {
|
||||
}
|
||||
|
||||
type RetentionPolicyForChannel struct {
|
||||
ChannelID string `db:"Id" json:"channel_id"`
|
||||
PostDuration int64 `json:"post_duration"`
|
||||
ChannelID string `db:"Id" json:"channel_id"`
|
||||
PostDurationDays int64 `db:"PostDuration" json:"post_duration"`
|
||||
}
|
||||
|
||||
type RetentionPolicyForChannelList struct {
|
||||
|
||||
18
vendor/github.com/mattermost/mattermost-server/v6/model/feature_flags.go
generated
vendored
18
vendor/github.com/mattermost/mattermost-server/v6/model/feature_flags.go
generated
vendored
@@ -16,9 +16,6 @@ type FeatureFlags struct {
|
||||
// all other values as false.
|
||||
TestBoolFeature bool
|
||||
|
||||
// Toggle on and off scheduled jobs for cloud user limit emails see MM-29999
|
||||
CloudDelinquentEmailJobsEnabled bool
|
||||
|
||||
// Toggle on and off support for Collapsed Threads
|
||||
CollapsedThreads bool
|
||||
|
||||
@@ -38,18 +35,12 @@ type FeatureFlags struct {
|
||||
|
||||
PermalinkPreviews bool
|
||||
|
||||
// Determine whether when a user gets created, they'll have noisy notifications e.g. Send desktop notifications for all activity
|
||||
NewAccountNoisy bool
|
||||
|
||||
// Enable Calls plugin support in the mobile app
|
||||
CallsMobile bool
|
||||
|
||||
// A dash separated list for feature flags to turn on for Boards
|
||||
BoardsFeatureFlags string
|
||||
|
||||
// A/B test for the add members to channel button, possible values = ("top", "bottom")
|
||||
AddMembersToChannel string
|
||||
|
||||
// Enable Create First Channel
|
||||
GuidedChannelCreation bool
|
||||
|
||||
@@ -70,12 +61,15 @@ type FeatureFlags struct {
|
||||
|
||||
// Enable GraphQL feature
|
||||
GraphQL bool
|
||||
|
||||
InsightsEnabled bool
|
||||
|
||||
CommandPalette bool
|
||||
}
|
||||
|
||||
func (f *FeatureFlags) SetDefaults() {
|
||||
f.TestFeature = "off"
|
||||
f.TestBoolFeature = false
|
||||
f.CloudDelinquentEmailJobsEnabled = false
|
||||
f.CollapsedThreads = true
|
||||
f.EnableRemoteClusterService = false
|
||||
f.AppsEnabled = true
|
||||
@@ -83,10 +77,8 @@ func (f *FeatureFlags) SetDefaults() {
|
||||
f.PluginApps = ""
|
||||
f.PluginFocalboard = ""
|
||||
f.PermalinkPreviews = true
|
||||
f.NewAccountNoisy = false
|
||||
f.CallsMobile = false
|
||||
f.BoardsFeatureFlags = ""
|
||||
f.AddMembersToChannel = "top"
|
||||
f.GuidedChannelCreation = false
|
||||
f.InviteToTeam = "none"
|
||||
f.CustomGroups = true
|
||||
@@ -95,6 +87,8 @@ func (f *FeatureFlags) SetDefaults() {
|
||||
f.EnableInactivityCheckJob = true
|
||||
f.UseCaseOnboarding = true
|
||||
f.GraphQL = false
|
||||
f.InsightsEnabled = false
|
||||
f.CommandPalette = false
|
||||
}
|
||||
func (f *FeatureFlags) Plugins() map[string]string {
|
||||
rFFVal := reflect.ValueOf(f).Elem()
|
||||
|
||||
76
vendor/github.com/mattermost/mattermost-server/v6/model/insights.go
generated
vendored
Normal file
76
vendor/github.com/mattermost/mattermost-server/v6/model/insights.go
generated
vendored
Normal file
@@ -0,0 +1,76 @@
|
||||
// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved.
|
||||
// See LICENSE.txt for license information.
|
||||
|
||||
package model
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
"time"
|
||||
)
|
||||
|
||||
const (
|
||||
TimeRangeToday string = "today"
|
||||
TimeRange7Day string = "7_day"
|
||||
TimeRange28Day string = "28_day"
|
||||
)
|
||||
|
||||
type InsightsOpts struct {
|
||||
StartUnixMilli int64
|
||||
Page int
|
||||
PerPage int
|
||||
}
|
||||
|
||||
type InsightsListData struct {
|
||||
HasNext bool `json:"has_next"`
|
||||
}
|
||||
|
||||
type InsightsData struct {
|
||||
Rank int `json:"rank"`
|
||||
}
|
||||
|
||||
type TopReactionList struct {
|
||||
InsightsListData
|
||||
Items []*TopReaction `json:"items"`
|
||||
}
|
||||
|
||||
type TopReaction struct {
|
||||
InsightsData
|
||||
EmojiName string `json:"emoji_name"`
|
||||
Count int64 `json:"count"`
|
||||
}
|
||||
|
||||
// GetStartUnixMilliForTimeRange gets the unix start time in milliseconds from the given time range.
|
||||
// Time range can be one of: "1_day", "7_day", or "28_day".
|
||||
func GetStartUnixMilliForTimeRange(timeRange string) (int64, *AppError) {
|
||||
now := time.Now()
|
||||
_, offset := now.Zone()
|
||||
switch timeRange {
|
||||
case TimeRangeToday:
|
||||
return GetStartOfDayMillis(now, offset), nil
|
||||
case TimeRange7Day:
|
||||
return GetStartOfDayMillis(now.Add(time.Hour*time.Duration(-168)), offset), nil
|
||||
case TimeRange28Day:
|
||||
return GetStartOfDayMillis(now.Add(time.Hour*time.Duration(-672)), offset), nil
|
||||
}
|
||||
|
||||
return GetStartOfDayMillis(now, offset), NewAppError("Insights.IsValidRequest", "model.insights.time_range.app_error", nil, "", http.StatusBadRequest)
|
||||
}
|
||||
|
||||
// GetTopReactionListWithRankAndPagination adds a rank to each item in the given list of TopReaction and checks if there is
|
||||
// another page that can be fetched based on the given limit and offset. The given list of TopReaction is assumed to be
|
||||
// sorted by Count. Returns a TopReactionList.
|
||||
func GetTopReactionListWithRankAndPagination(reactions []*TopReaction, limit int, offset int) *TopReactionList {
|
||||
// Add pagination support
|
||||
var hasNext bool
|
||||
if (limit != 0) && (len(reactions) == limit+1) {
|
||||
hasNext = true
|
||||
reactions = reactions[:len(reactions)-1]
|
||||
}
|
||||
|
||||
// Assign rank to each reaction
|
||||
for i, reaction := range reactions {
|
||||
reaction.Rank = offset + i + 1
|
||||
}
|
||||
|
||||
return &TopReactionList{InsightsListData: InsightsListData{HasNext: hasNext}, Items: reactions}
|
||||
}
|
||||
7
vendor/github.com/mattermost/mattermost-server/v6/model/license.go
generated
vendored
7
vendor/github.com/mattermost/mattermost-server/v6/model/license.go
generated
vendored
@@ -11,9 +11,12 @@ import (
|
||||
)
|
||||
|
||||
const (
|
||||
DayInSeconds = 24 * 60 * 60
|
||||
DayInMilliseconds = DayInSeconds * 1000
|
||||
|
||||
ExpiredLicenseError = "api.license.add_license.expired.app_error"
|
||||
InvalidLicenseError = "api.license.add_license.invalid.app_error"
|
||||
LicenseGracePeriod = 1000 * 60 * 60 * 24 * 10 //10 days
|
||||
LicenseGracePeriod = DayInMilliseconds * 10 //10 days
|
||||
LicenseRenewalLink = "https://mattermost.com/renew/"
|
||||
|
||||
LicenseShortSkuE10 = "E10"
|
||||
@@ -307,7 +310,7 @@ func (l *License) HasEnterpriseMarketplacePlugins() bool {
|
||||
// NewTestLicense returns a license that expires in the future and has the given features.
|
||||
func NewTestLicense(features ...string) *License {
|
||||
ret := &License{
|
||||
ExpiresAt: GetMillis() + 90*24*60*60*1000,
|
||||
ExpiresAt: GetMillis() + 90*DayInMilliseconds,
|
||||
Customer: &Customer{},
|
||||
Features: &Features{},
|
||||
}
|
||||
|
||||
49
vendor/github.com/mattermost/mattermost-server/v6/model/member_invite.go
generated
vendored
Normal file
49
vendor/github.com/mattermost/mattermost-server/v6/model/member_invite.go
generated
vendored
Normal file
@@ -0,0 +1,49 @@
|
||||
// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved.
|
||||
// See LICENSE.txt for license information.
|
||||
|
||||
package model
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"net/http"
|
||||
)
|
||||
|
||||
type MemberInvite struct {
|
||||
Emails []string `json:"emails"`
|
||||
ChannelIds []string `json:"channelIds,omitempty"`
|
||||
Message string `json:"message"`
|
||||
}
|
||||
|
||||
// IsValid validates that the invitation info is loaded correctly and with the correct structure
|
||||
func (i *MemberInvite) IsValid() *AppError {
|
||||
if len(i.Emails) == 0 {
|
||||
return NewAppError("MemberInvite.IsValid", "model.member.is_valid.emails.app_error", nil, "", http.StatusBadRequest)
|
||||
}
|
||||
|
||||
if len(i.ChannelIds) > 0 {
|
||||
for _, channel := range i.ChannelIds {
|
||||
if len(channel) != 26 {
|
||||
return NewAppError("MemberInvite.IsValid", "model.member.is_valid.channel.app_error", nil, "channel="+channel, http.StatusBadRequest)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (i *MemberInvite) UnmarshalJSON(b []byte) error {
|
||||
var emails []string
|
||||
if err := json.Unmarshal(b, &emails); err == nil {
|
||||
*i = MemberInvite{}
|
||||
i.Emails = emails
|
||||
return nil
|
||||
}
|
||||
|
||||
type TempMemberInvite MemberInvite
|
||||
var o2 TempMemberInvite
|
||||
if err := json.Unmarshal(b, &o2); err != nil {
|
||||
return err
|
||||
}
|
||||
*i = MemberInvite(o2)
|
||||
return nil
|
||||
}
|
||||
12
vendor/github.com/mattermost/mattermost-server/v6/model/permalink.go
generated
vendored
12
vendor/github.com/mattermost/mattermost-server/v6/model/permalink.go
generated
vendored
@@ -8,10 +8,12 @@ type Permalink struct {
|
||||
}
|
||||
|
||||
type PreviewPost struct {
|
||||
PostID string `json:"post_id"`
|
||||
Post *Post `json:"post"`
|
||||
TeamName string `json:"team_name"`
|
||||
ChannelDisplayName string `json:"channel_display_name"`
|
||||
PostID string `json:"post_id"`
|
||||
Post *Post `json:"post"`
|
||||
TeamName string `json:"team_name"`
|
||||
ChannelDisplayName string `json:"channel_display_name"`
|
||||
ChannelType ChannelType `json:"channel_type"`
|
||||
ChannelID string `json:"channel_id"`
|
||||
}
|
||||
|
||||
func NewPreviewPost(post *Post, team *Team, channel *Channel) *PreviewPost {
|
||||
@@ -23,5 +25,7 @@ func NewPreviewPost(post *Post, team *Team, channel *Channel) *PreviewPost {
|
||||
Post: post,
|
||||
TeamName: team.Name,
|
||||
ChannelDisplayName: channel.DisplayName,
|
||||
ChannelType: channel.Type,
|
||||
ChannelID: channel.Id,
|
||||
}
|
||||
}
|
||||
|
||||
3
vendor/github.com/mattermost/mattermost-server/v6/model/post.go
generated
vendored
3
vendor/github.com/mattermost/mattermost-server/v6/model/post.go
generated
vendored
@@ -263,6 +263,9 @@ type GetPostsOptions struct {
|
||||
SkipFetchThreads bool
|
||||
CollapsedThreads bool
|
||||
CollapsedThreadsExtended bool
|
||||
FromPost string // PostId after which to send the items
|
||||
FromCreateAt int64 // CreateAt after which to send the items
|
||||
Direction string // Only accepts up|down. Indicates the order in which to send the items.
|
||||
}
|
||||
|
||||
func (o *Post) Etag() string {
|
||||
|
||||
3
vendor/github.com/mattermost/mattermost-server/v6/model/post_list.go
generated
vendored
3
vendor/github.com/mattermost/mattermost-server/v6/model/post_list.go
generated
vendored
@@ -14,6 +14,8 @@ type PostList struct {
|
||||
Posts map[string]*Post `json:"posts"`
|
||||
NextPostId string `json:"next_post_id"`
|
||||
PrevPostId string `json:"prev_post_id"`
|
||||
// HasNext indicates whether there are more items to be fetched or not.
|
||||
HasNext bool `json:"has_next"`
|
||||
}
|
||||
|
||||
func NewPostList() *PostList {
|
||||
@@ -39,6 +41,7 @@ func (o *PostList) Clone() *PostList {
|
||||
Posts: postsCopy,
|
||||
NextPostId: o.NextPostId,
|
||||
PrevPostId: o.PrevPostId,
|
||||
HasNext: o.HasNext,
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
2
vendor/github.com/mattermost/mattermost-server/v6/model/shared_channel.go
generated
vendored
2
vendor/github.com/mattermost/mattermost-server/v6/model/shared_channel.go
generated
vendored
@@ -51,7 +51,7 @@ func (sc *SharedChannel) IsValid() *AppError {
|
||||
}
|
||||
|
||||
if !IsValidChannelIdentifier(sc.ShareName) {
|
||||
return NewAppError("SharedChannel.IsValid", "model.channel.is_valid.2_or_more.app_error", nil, "id="+sc.ChannelId, http.StatusBadRequest)
|
||||
return NewAppError("SharedChannel.IsValid", "model.channel.is_valid.1_or_more.app_error", nil, "id="+sc.ChannelId, http.StatusBadRequest)
|
||||
}
|
||||
|
||||
if utf8.RuneCountInString(sc.ShareHeader) > ChannelHeaderMaxRunes {
|
||||
|
||||
4
vendor/github.com/mattermost/mattermost-server/v6/model/system.go
generated
vendored
4
vendor/github.com/mattermost/mattermost-server/v6/model/system.go
generated
vendored
@@ -12,7 +12,6 @@ const (
|
||||
SystemRanUnitTests = "RanUnitTests"
|
||||
SystemLastSecurityTime = "LastSecurityTime"
|
||||
SystemActiveLicenseId = "ActiveLicenseId"
|
||||
SystemLicenseRenewalToken = "LicenseRenewalToken"
|
||||
SystemLastComplianceTime = "LastComplianceTime"
|
||||
SystemAsymmetricSigningKeyKey = "AsymmetricSigningKey"
|
||||
SystemPostActionCookieSecretKey = "PostActionCookieSecret"
|
||||
@@ -34,9 +33,6 @@ const (
|
||||
SystemFirstAdminSetupComplete = "FirstAdminSetupComplete"
|
||||
AwsMeteringReportInterval = 1
|
||||
AwsMeteringDimensionUsageHrs = "UsageHrs"
|
||||
UserLimitOverageCycleEndDate = "UserLimitOverageCycleEndDate"
|
||||
OverUserLimitForgivenCount = "OverUserLimitForgivenCount"
|
||||
OverUserLimitLastEmailSent = "OverUserLimitLastEmailSent"
|
||||
)
|
||||
|
||||
const (
|
||||
|
||||
6
vendor/github.com/mattermost/mattermost-server/v6/model/team.go
generated
vendored
6
vendor/github.com/mattermost/mattermost-server/v6/model/team.go
generated
vendored
@@ -252,6 +252,12 @@ func (o *Team) IsGroupConstrained() bool {
|
||||
return o.GroupConstrained != nil && *o.GroupConstrained
|
||||
}
|
||||
|
||||
// ShallowCopy returns a shallow copy of team.
|
||||
func (o *Team) ShallowCopy() *Team {
|
||||
c := *o
|
||||
return &c
|
||||
}
|
||||
|
||||
// The following are some GraphQL methods necessary to return the
|
||||
// data in float64 type. The spec doesn't support 64 bit integers,
|
||||
// so we have to pass the data in float64. The _ at the end is
|
||||
|
||||
3
vendor/github.com/mattermost/mattermost-server/v6/model/thread.go
generated
vendored
3
vendor/github.com/mattermost/mattermost-server/v6/model/thread.go
generated
vendored
@@ -67,6 +67,9 @@ type GetUserThreadsOpts struct {
|
||||
// TotalsOnly will not fetch any threads and just fetch the total counts
|
||||
TotalsOnly bool
|
||||
|
||||
// ThreadsOnly will fetch threads but not calculate totals and will return 0
|
||||
ThreadsOnly bool
|
||||
|
||||
// TeamOnly will only fetch threads and unreads for the specified team and excludes DMs/GMs
|
||||
TeamOnly bool
|
||||
}
|
||||
|
||||
26
vendor/github.com/mattermost/mattermost-server/v6/model/utils.go
generated
vendored
26
vendor/github.com/mattermost/mattermost-server/v6/model/utils.go
generated
vendored
@@ -33,6 +33,7 @@ const (
|
||||
UppercaseLetters = "ABCDEFGHIJKLMNOPQRSTUVWXYZ"
|
||||
NUMBERS = "0123456789"
|
||||
SYMBOLS = " !\"\\#$%&'()*+,-./:;<=>?@[]^_`|~"
|
||||
BinaryParamKey = "MM_BINARY_PARAMETERS"
|
||||
)
|
||||
|
||||
type StringInterface map[string]interface{}
|
||||
@@ -124,12 +125,19 @@ func (m *StringMap) Scan(value interface{}) error {
|
||||
|
||||
// Value converts StringMap to database value
|
||||
func (m StringMap) Value() (driver.Value, error) {
|
||||
j, err := json.Marshal(m)
|
||||
ok := m[BinaryParamKey]
|
||||
delete(m, BinaryParamKey)
|
||||
buf, err := json.Marshal(m)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// non utf8 characters are not supported https://mattermost.atlassian.net/browse/MM-41066
|
||||
return string(j), err
|
||||
if ok == "true" {
|
||||
return append([]byte{0x01}, buf...), nil
|
||||
} else if ok == "false" {
|
||||
return buf, nil
|
||||
}
|
||||
// Key wasn't found. We fall back to the default case.
|
||||
return string(buf), nil
|
||||
}
|
||||
|
||||
func (StringMap) ImplementsGraphQLType(name string) bool {
|
||||
@@ -502,21 +510,13 @@ var reservedName = []string{
|
||||
}
|
||||
|
||||
func IsValidChannelIdentifier(s string) bool {
|
||||
|
||||
if !IsValidAlphaNumHyphenUnderscore(s, true) {
|
||||
return false
|
||||
}
|
||||
|
||||
if len(s) < ChannelNameMinLength {
|
||||
return false
|
||||
}
|
||||
|
||||
return true
|
||||
return validSimpleAlphaNum.MatchString(s) && len(s) >= ChannelNameMinLength
|
||||
}
|
||||
|
||||
var (
|
||||
validAlphaNum = regexp.MustCompile(`^[a-z0-9]+([a-z\-0-9]+|(__)?)[a-z0-9]+$`)
|
||||
validAlphaNumHyphenUnderscore = regexp.MustCompile(`^[a-z0-9]+([a-z\-\_0-9]+|(__)?)[a-z0-9]+$`)
|
||||
validSimpleAlphaNum = regexp.MustCompile(`^[a-z0-9]+([a-z\-\_0-9]+|(__)?)[a-z0-9]*$`)
|
||||
validSimpleAlphaNumHyphenUnderscore = regexp.MustCompile(`^[a-zA-Z0-9\-_]+$`)
|
||||
validSimpleAlphaNumHyphenUnderscorePlus = regexp.MustCompile(`^[a-zA-Z0-9+_-]+$`)
|
||||
)
|
||||
|
||||
2
vendor/github.com/mattermost/mattermost-server/v6/model/version.go
generated
vendored
2
vendor/github.com/mattermost/mattermost-server/v6/model/version.go
generated
vendored
@@ -13,7 +13,7 @@ import (
|
||||
// It should be maintained in chronological order with most current
|
||||
// release at the front of the list.
|
||||
var versions = []string{
|
||||
"6.6.1",
|
||||
"6.7.0",
|
||||
"6.6.0",
|
||||
"6.5.0",
|
||||
"6.4.0",
|
||||
|
||||
2
vendor/github.com/mattermost/mattermost-server/v6/shared/filestore/s3store.go
generated
vendored
2
vendor/github.com/mattermost/mattermost-server/v6/shared/filestore/s3store.go
generated
vendored
@@ -297,7 +297,7 @@ func (b *S3FileBackend) MoveFile(oldPath, newPath string) error {
|
||||
}
|
||||
|
||||
if _, err := b.client.CopyObject(context.Background(), dstOpts, srcOpts); err != nil {
|
||||
return errors.Wrapf(err, "unable to copy the file to %s to the new destionation", newPath)
|
||||
return errors.Wrapf(err, "unable to copy the file to %s to the new destination", newPath)
|
||||
}
|
||||
|
||||
if err := b.client.RemoveObject(context.Background(), b.bucket, oldPath, s3.RemoveObjectOptions{}); err != nil {
|
||||
|
||||
2
vendor/github.com/minio/minio-go/v7/Makefile
generated
vendored
2
vendor/github.com/minio/minio-go/v7/Makefile
generated
vendored
@@ -9,7 +9,7 @@ checks: lint vet test examples functional-test
|
||||
|
||||
lint:
|
||||
@mkdir -p ${GOPATH}/bin
|
||||
@echo "Installing golangci-lint" && curl -sSfL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | sh -s -- -b $(GOPATH)/bin v1.40.1
|
||||
@echo "Installing golangci-lint" && curl -sSfL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | sh -s -- -b $(GOPATH)/bin v1.45.2
|
||||
@echo "Running $@ check"
|
||||
@GO111MODULE=on ${GOPATH}/bin/golangci-lint cache clean
|
||||
@GO111MODULE=on ${GOPATH}/bin/golangci-lint run --timeout=5m --config ./.golangci.yml
|
||||
|
||||
3
vendor/github.com/minio/minio-go/v7/api-bucket-notification.go
generated
vendored
3
vendor/github.com/minio/minio-go/v7/api-bucket-notification.go
generated
vendored
@@ -103,7 +103,6 @@ func (c *Client) getBucketNotification(ctx context.Context, bucketName string) (
|
||||
return notification.Configuration{}, err
|
||||
}
|
||||
return processBucketNotificationResponse(bucketName, resp)
|
||||
|
||||
}
|
||||
|
||||
// processes the GetNotification http response from the server.
|
||||
@@ -207,7 +206,7 @@ func (c *Client) ListenBucketNotification(ctx context.Context, bucketName, prefi
|
||||
// Use a higher buffer to support unexpected
|
||||
// caching done by proxies
|
||||
bio.Buffer(notificationEventBuffer, notificationCapacity)
|
||||
var json = jsoniter.ConfigCompatibleWithStandardLibrary
|
||||
json := jsoniter.ConfigCompatibleWithStandardLibrary
|
||||
|
||||
// Unmarshal each line, returns marshaled values.
|
||||
for bio.Scan() {
|
||||
|
||||
14
vendor/github.com/minio/minio-go/v7/api-compose-object.go
generated
vendored
14
vendor/github.com/minio/minio-go/v7/api-compose-object.go
generated
vendored
@@ -202,8 +202,8 @@ func (opts CopySrcOptions) validate() (err error) {
|
||||
|
||||
// Low level implementation of CopyObject API, supports only upto 5GiB worth of copy.
|
||||
func (c *Client) copyObjectDo(ctx context.Context, srcBucket, srcObject, destBucket, destObject string,
|
||||
metadata map[string]string, srcOpts CopySrcOptions, dstOpts PutObjectOptions) (ObjectInfo, error) {
|
||||
|
||||
metadata map[string]string, srcOpts CopySrcOptions, dstOpts PutObjectOptions,
|
||||
) (ObjectInfo, error) {
|
||||
// Build headers.
|
||||
headers := make(http.Header)
|
||||
|
||||
@@ -285,8 +285,8 @@ func (c *Client) copyObjectDo(ctx context.Context, srcBucket, srcObject, destBuc
|
||||
}
|
||||
|
||||
func (c *Client) copyObjectPartDo(ctx context.Context, srcBucket, srcObject, destBucket, destObject string, uploadID string,
|
||||
partID int, startOffset int64, length int64, metadata map[string]string) (p CompletePart, err error) {
|
||||
|
||||
partID int, startOffset int64, length int64, metadata map[string]string,
|
||||
) (p CompletePart, err error) {
|
||||
headers := make(http.Header)
|
||||
|
||||
// Set source
|
||||
@@ -338,8 +338,8 @@ func (c *Client) copyObjectPartDo(ctx context.Context, srcBucket, srcObject, des
|
||||
// upload via an upload-part-copy request
|
||||
// https://docs.aws.amazon.com/AmazonS3/latest/API/mpUploadUploadPartCopy.html
|
||||
func (c *Client) uploadPartCopy(ctx context.Context, bucket, object, uploadID string, partNumber int,
|
||||
headers http.Header) (p CompletePart, err error) {
|
||||
|
||||
headers http.Header,
|
||||
) (p CompletePart, err error) {
|
||||
// Build query parameters
|
||||
urlValues := make(url.Values)
|
||||
urlValues.Set("partNumber", strconv.Itoa(partNumber))
|
||||
@@ -492,7 +492,7 @@ func (c *Client) ComposeObject(ctx context.Context, dst CopyDestOptions, srcs ..
|
||||
objParts := []CompletePart{}
|
||||
partIndex := 1
|
||||
for i, src := range srcs {
|
||||
var h = make(http.Header)
|
||||
h := make(http.Header)
|
||||
src.Marshal(h)
|
||||
if dst.Encryption != nil && dst.Encryption.Type() == encrypt.SSEC {
|
||||
dst.Encryption.Marshal(h)
|
||||
|
||||
4
vendor/github.com/minio/minio-go/v7/api-get-object-file.go
generated
vendored
4
vendor/github.com/minio/minio-go/v7/api-get-object-file.go
generated
vendored
@@ -57,7 +57,7 @@ func (c *Client) FGetObject(ctx context.Context, bucketName, objectName, filePat
|
||||
objectDir, _ := filepath.Split(filePath)
|
||||
if objectDir != "" {
|
||||
// Create any missing top level directories.
|
||||
if err := os.MkdirAll(objectDir, 0700); err != nil {
|
||||
if err := os.MkdirAll(objectDir, 0o700); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
@@ -72,7 +72,7 @@ func (c *Client) FGetObject(ctx context.Context, bucketName, objectName, filePat
|
||||
filePartPath := filePath + objectStat.ETag + ".part.minio"
|
||||
|
||||
// If exists, open in append mode. If not create it as a part file.
|
||||
filePart, err := os.OpenFile(filePartPath, os.O_CREATE|os.O_APPEND|os.O_WRONLY, 0600)
|
||||
filePart, err := os.OpenFile(filePartPath, os.O_CREATE|os.O_APPEND|os.O_WRONLY, 0o600)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
1
vendor/github.com/minio/minio-go/v7/api-list.go
generated
vendored
1
vendor/github.com/minio/minio-go/v7/api-list.go
generated
vendored
@@ -774,7 +774,6 @@ func (c *Client) listIncompleteUploads(ctx context.Context, bucketName, objectPr
|
||||
}(objectMultipartStatCh)
|
||||
// return.
|
||||
return objectMultipartStatCh
|
||||
|
||||
}
|
||||
|
||||
// listMultipartUploadsQuery - (List Multipart Uploads).
|
||||
|
||||
10
vendor/github.com/minio/minio-go/v7/api-put-object-multipart.go
generated
vendored
10
vendor/github.com/minio/minio-go/v7/api-put-object-multipart.go
generated
vendored
@@ -38,7 +38,8 @@ import (
|
||||
)
|
||||
|
||||
func (c *Client) putObjectMultipart(ctx context.Context, bucketName, objectName string, reader io.Reader, size int64,
|
||||
opts PutObjectOptions) (info UploadInfo, err error) {
|
||||
opts PutObjectOptions,
|
||||
) (info UploadInfo, err error) {
|
||||
info, err = c.putObjectMultipartNoStream(ctx, bucketName, objectName, reader, opts)
|
||||
if err != nil {
|
||||
errResp := ToErrorResponse(err)
|
||||
@@ -240,7 +241,8 @@ func (c *Client) initiateMultipartUpload(ctx context.Context, bucketName, object
|
||||
|
||||
// uploadPart - Uploads a part in a multipart upload.
|
||||
func (c *Client) uploadPart(ctx context.Context, bucketName, objectName, uploadID string, reader io.Reader,
|
||||
partNumber int, md5Base64, sha256Hex string, size int64, sse encrypt.ServerSide) (ObjectPart, error) {
|
||||
partNumber int, md5Base64, sha256Hex string, size int64, sse encrypt.ServerSide,
|
||||
) (ObjectPart, error) {
|
||||
// Input validation.
|
||||
if err := s3utils.CheckValidBucketName(bucketName); err != nil {
|
||||
return ObjectPart{}, err
|
||||
@@ -311,7 +313,8 @@ func (c *Client) uploadPart(ctx context.Context, bucketName, objectName, uploadI
|
||||
|
||||
// completeMultipartUpload - Completes a multipart upload by assembling previously uploaded parts.
|
||||
func (c *Client) completeMultipartUpload(ctx context.Context, bucketName, objectName, uploadID string,
|
||||
complete completeMultipartUpload, opts PutObjectOptions) (UploadInfo, error) {
|
||||
complete completeMultipartUpload, opts PutObjectOptions,
|
||||
) (UploadInfo, error) {
|
||||
// Input validation.
|
||||
if err := s3utils.CheckValidBucketName(bucketName); err != nil {
|
||||
return UploadInfo{}, err
|
||||
@@ -392,5 +395,4 @@ func (c *Client) completeMultipartUpload(ctx context.Context, bucketName, object
|
||||
Expiration: expTime,
|
||||
ExpirationRuleID: ruleID,
|
||||
}, nil
|
||||
|
||||
}
|
||||
|
||||
14
vendor/github.com/minio/minio-go/v7/api-put-object-streaming.go
generated
vendored
14
vendor/github.com/minio/minio-go/v7/api-put-object-streaming.go
generated
vendored
@@ -42,8 +42,8 @@ import (
|
||||
// - Any reader which has a method 'ReadAt()'
|
||||
//
|
||||
func (c *Client) putObjectMultipartStream(ctx context.Context, bucketName, objectName string,
|
||||
reader io.Reader, size int64, opts PutObjectOptions) (info UploadInfo, err error) {
|
||||
|
||||
reader io.Reader, size int64, opts PutObjectOptions,
|
||||
) (info UploadInfo, err error) {
|
||||
if !isObject(reader) && isReadAt(reader) && !opts.SendContentMd5 {
|
||||
// Verify if the reader implements ReadAt and it is not a *minio.Object then we will use parallel uploader.
|
||||
info, err = c.putObjectMultipartStreamFromReadAt(ctx, bucketName, objectName, reader.(io.ReaderAt), size, opts)
|
||||
@@ -91,7 +91,8 @@ type uploadPartReq struct {
|
||||
// cleaned automatically when the caller i.e http client closes the
|
||||
// stream after uploading all the contents successfully.
|
||||
func (c *Client) putObjectMultipartStreamFromReadAt(ctx context.Context, bucketName, objectName string,
|
||||
reader io.ReaderAt, size int64, opts PutObjectOptions) (info UploadInfo, err error) {
|
||||
reader io.ReaderAt, size int64, opts PutObjectOptions,
|
||||
) (info UploadInfo, err error) {
|
||||
// Input validation.
|
||||
if err = s3utils.CheckValidBucketName(bucketName); err != nil {
|
||||
return UploadInfo{}, err
|
||||
@@ -147,7 +148,7 @@ func (c *Client) putObjectMultipartStreamFromReadAt(ctx context.Context, bucketN
|
||||
}
|
||||
close(uploadPartsCh)
|
||||
|
||||
var partsBuf = make([][]byte, opts.getNumThreads())
|
||||
partsBuf := make([][]byte, opts.getNumThreads())
|
||||
for i := range partsBuf {
|
||||
partsBuf[i] = make([]byte, 0, partSize)
|
||||
}
|
||||
@@ -171,7 +172,7 @@ func (c *Client) putObjectMultipartStreamFromReadAt(ctx context.Context, bucketN
|
||||
}
|
||||
|
||||
n, rerr := readFull(io.NewSectionReader(reader, readOffset, partSize), partsBuf[w-1][:partSize])
|
||||
if rerr != nil && rerr != io.ErrUnexpectedEOF && err != io.EOF {
|
||||
if rerr != nil && rerr != io.ErrUnexpectedEOF && rerr != io.EOF {
|
||||
uploadedPartsCh <- uploadedPartRes{
|
||||
Error: rerr,
|
||||
}
|
||||
@@ -241,7 +242,8 @@ func (c *Client) putObjectMultipartStreamFromReadAt(ctx context.Context, bucketN
|
||||
}
|
||||
|
||||
func (c *Client) putObjectMultipartStreamOptionalChecksum(ctx context.Context, bucketName, objectName string,
|
||||
reader io.Reader, size int64, opts PutObjectOptions) (info UploadInfo, err error) {
|
||||
reader io.Reader, size int64, opts PutObjectOptions,
|
||||
) (info UploadInfo, err error) {
|
||||
// Input validation.
|
||||
if err = s3utils.CheckValidBucketName(bucketName); err != nil {
|
||||
return UploadInfo{}, err
|
||||
|
||||
3
vendor/github.com/minio/minio-go/v7/api-put-object.go
generated
vendored
3
vendor/github.com/minio/minio-go/v7/api-put-object.go
generated
vendored
@@ -229,7 +229,8 @@ func (a completedParts) Less(i, j int) bool { return a[i].PartNumber < a[j].Part
|
||||
//
|
||||
// NOTE: Upon errors during upload multipart operation is entirely aborted.
|
||||
func (c *Client) PutObject(ctx context.Context, bucketName, objectName string, reader io.Reader, objectSize int64,
|
||||
opts PutObjectOptions) (info UploadInfo, err error) {
|
||||
opts PutObjectOptions,
|
||||
) (info UploadInfo, err error) {
|
||||
if objectSize < 0 && opts.DisableMultipart {
|
||||
return UploadInfo{}, errors.New("object size must be provided with disable multipart upload")
|
||||
}
|
||||
|
||||
2
vendor/github.com/minio/minio-go/v7/api-putobject-snowball.go
generated
vendored
2
vendor/github.com/minio/minio-go/v7/api-putobject-snowball.go
generated
vendored
@@ -133,7 +133,7 @@ func (c Client) PutObjectsSnowball(ctx context.Context, bucketName string, opts
|
||||
return f, st.Size(), nil
|
||||
}
|
||||
}
|
||||
var flush = func() error { return nil }
|
||||
flush := func() error { return nil }
|
||||
if !opts.Compress {
|
||||
if !opts.InMemory {
|
||||
// Insert buffer for writes.
|
||||
|
||||
5
vendor/github.com/minio/minio-go/v7/api-select.go
generated
vendored
5
vendor/github.com/minio/minio-go/v7/api-select.go
generated
vendored
@@ -519,7 +519,7 @@ func (s *SelectResults) start(pipeWriter *io.PipeWriter) {
|
||||
go func() {
|
||||
for {
|
||||
var prelude preludeInfo
|
||||
var headers = make(http.Header)
|
||||
headers := make(http.Header)
|
||||
var err error
|
||||
|
||||
// Create CRC code
|
||||
@@ -624,7 +624,7 @@ func (p preludeInfo) PayloadLen() int64 {
|
||||
// the struct,
|
||||
func processPrelude(prelude io.Reader, crc hash.Hash32) (preludeInfo, error) {
|
||||
var err error
|
||||
var pInfo = preludeInfo{}
|
||||
pInfo := preludeInfo{}
|
||||
|
||||
// reads total length of the message (first 4 bytes)
|
||||
pInfo.totalLen, err = extractUint32(prelude)
|
||||
@@ -752,7 +752,6 @@ func checkCRC(r io.Reader, expect uint32) error {
|
||||
|
||||
if msgCRC != expect {
|
||||
return fmt.Errorf("Checksum Mismatch, MessageCRC of 0x%X does not equal expected CRC of 0x%X", msgCRC, expect)
|
||||
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
4
vendor/github.com/minio/minio-go/v7/api.go
generated
vendored
4
vendor/github.com/minio/minio-go/v7/api.go
generated
vendored
@@ -111,7 +111,7 @@ type Options struct {
|
||||
// Global constants.
|
||||
const (
|
||||
libraryName = "minio-go"
|
||||
libraryVersion = "v7.0.23"
|
||||
libraryVersion = "v7.0.24"
|
||||
)
|
||||
|
||||
// User Agent should always following the below style.
|
||||
@@ -537,7 +537,7 @@ func (c *Client) executeMethod(ctx context.Context, method string, metadata requ
|
||||
|
||||
var retryable bool // Indicates if request can be retried.
|
||||
var bodySeeker io.Seeker // Extracted seeker from io.Reader.
|
||||
var reqRetry = MaxRetry // Indicates how many times we can retry the request
|
||||
reqRetry := MaxRetry // Indicates how many times we can retry the request
|
||||
|
||||
if metadata.contentBody != nil {
|
||||
// Check if body is seekable then it is retryable.
|
||||
|
||||
3
vendor/github.com/minio/minio-go/v7/bucket-cache.go
generated
vendored
3
vendor/github.com/minio/minio-go/v7/bucket-cache.go
generated
vendored
@@ -181,6 +181,9 @@ func (c *Client) getBucketLocationRequest(ctx context.Context, bucketName string
|
||||
if h, p, err := net.SplitHostPort(targetURL.Host); err == nil {
|
||||
if targetURL.Scheme == "http" && p == "80" || targetURL.Scheme == "https" && p == "443" {
|
||||
targetURL.Host = h
|
||||
if ip := net.ParseIP(h); ip != nil && ip.To16() != nil {
|
||||
targetURL.Host = "[" + h + "]"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
4
vendor/github.com/minio/minio-go/v7/core.go
generated
vendored
4
vendor/github.com/minio/minio-go/v7/core.go
generated
vendored
@@ -63,8 +63,8 @@ func (c Core) CopyObject(ctx context.Context, sourceBucket, sourceObject, destBu
|
||||
// CopyObjectPart - creates a part in a multipart upload by copying (a
|
||||
// part of) an existing object.
|
||||
func (c Core) CopyObjectPart(ctx context.Context, srcBucket, srcObject, destBucket, destObject string, uploadID string,
|
||||
partID int, startOffset, length int64, metadata map[string]string) (p CompletePart, err error) {
|
||||
|
||||
partID int, startOffset, length int64, metadata map[string]string,
|
||||
) (p CompletePart, err error) {
|
||||
return c.copyObjectPartDo(ctx, srcBucket, srcObject, destBucket, destObject, uploadID,
|
||||
partID, startOffset, length, metadata)
|
||||
}
|
||||
|
||||
127
vendor/github.com/minio/minio-go/v7/functional_tests.go
generated
vendored
127
vendor/github.com/minio/minio-go/v7/functional_tests.go
generated
vendored
@@ -61,6 +61,7 @@ const (
|
||||
letterIdxMask = 1<<letterIdxBits - 1 // All 1-bits, as many as letterIdxBits
|
||||
letterIdxMax = 63 / letterIdxBits // # of letter indices fitting in 63 bits
|
||||
)
|
||||
|
||||
const (
|
||||
serverEndpoint = "SERVER_ENDPOINT"
|
||||
accessKey = "ACCESS_KEY"
|
||||
@@ -69,8 +70,7 @@ const (
|
||||
enableKMS = "ENABLE_KMS"
|
||||
)
|
||||
|
||||
type mintJSONFormatter struct {
|
||||
}
|
||||
type mintJSONFormatter struct{}
|
||||
|
||||
func (f *mintJSONFormatter) Format(entry *log.Entry) ([]byte, error) {
|
||||
data := make(log.Fields, len(entry.Data))
|
||||
@@ -84,7 +84,7 @@ func (f *mintJSONFormatter) Format(entry *log.Entry) ([]byte, error) {
|
||||
data[k] = v
|
||||
}
|
||||
}
|
||||
var json = jsoniter.ConfigCompatibleWithStandardLibrary
|
||||
json := jsoniter.ConfigCompatibleWithStandardLibrary
|
||||
serialized, err := json.Marshal(data)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Failed to marshal fields to JSON, %v", err)
|
||||
@@ -168,11 +168,15 @@ func failureLog(testName string, function string, args map[string]interface{}, s
|
||||
var fields log.Fields
|
||||
// log with the fields as per mint
|
||||
if err != nil {
|
||||
fields = log.Fields{"name": "minio-go: " + testName, "function": function, "args": args,
|
||||
"duration": duration.Nanoseconds() / 1000000, "status": "FAIL", "alert": alert, "message": message, "error": err}
|
||||
fields = log.Fields{
|
||||
"name": "minio-go: " + testName, "function": function, "args": args,
|
||||
"duration": duration.Nanoseconds() / 1000000, "status": "FAIL", "alert": alert, "message": message, "error": err,
|
||||
}
|
||||
} else {
|
||||
fields = log.Fields{"name": "minio-go: " + testName, "function": function, "args": args,
|
||||
"duration": duration.Nanoseconds() / 1000000, "status": "FAIL", "alert": alert, "message": message}
|
||||
fields = log.Fields{
|
||||
"name": "minio-go: " + testName, "function": function, "args": args,
|
||||
"duration": duration.Nanoseconds() / 1000000, "status": "FAIL", "alert": alert, "message": message,
|
||||
}
|
||||
}
|
||||
return log.WithFields(cleanEmptyEntries(fields))
|
||||
}
|
||||
@@ -182,8 +186,10 @@ func ignoredLog(testName string, function string, args map[string]interface{}, s
|
||||
// calculate the test case duration
|
||||
duration := time.Since(startTime)
|
||||
// log with the fields as per mint
|
||||
fields := log.Fields{"name": "minio-go: " + testName, "function": function, "args": args,
|
||||
"duration": duration.Nanoseconds() / 1000000, "status": "NA", "alert": strings.Split(alert, " ")[0] + " is NotImplemented"}
|
||||
fields := log.Fields{
|
||||
"name": "minio-go: " + testName, "function": function, "args": args,
|
||||
"duration": duration.Nanoseconds() / 1000000, "status": "NA", "alert": strings.Split(alert, " ")[0] + " is NotImplemented",
|
||||
}
|
||||
return log.WithFields(cleanEmptyEntries(fields))
|
||||
}
|
||||
|
||||
@@ -632,7 +638,7 @@ func testPutObjectReadAt() {
|
||||
defer cleanupBucket(bucketName, c)
|
||||
|
||||
bufSize := dataFileMap["datafile-129-MB"]
|
||||
var reader = getDataReader("datafile-129-MB")
|
||||
reader := getDataReader("datafile-129-MB")
|
||||
defer reader.Close()
|
||||
|
||||
// Save the data
|
||||
@@ -738,7 +744,7 @@ func testListObjectVersions() {
|
||||
args["objectName"] = objectName
|
||||
|
||||
bufSize := dataFileMap["datafile-10-kB"]
|
||||
var reader = getDataReader("datafile-10-kB")
|
||||
reader := getDataReader("datafile-10-kB")
|
||||
|
||||
_, err = c.PutObject(context.Background(), bucketName, objectName, reader, int64(bufSize), minio.PutObjectOptions{})
|
||||
if err != nil {
|
||||
@@ -857,7 +863,7 @@ func testStatObjectWithVersioning() {
|
||||
args["objectName"] = objectName
|
||||
|
||||
bufSize := dataFileMap["datafile-10-kB"]
|
||||
var reader = getDataReader("datafile-10-kB")
|
||||
reader := getDataReader("datafile-10-kB")
|
||||
|
||||
_, err = c.PutObject(context.Background(), bucketName, objectName, reader, int64(bufSize), minio.PutObjectOptions{})
|
||||
if err != nil {
|
||||
@@ -975,7 +981,7 @@ func testGetObjectWithVersioning() {
|
||||
|
||||
// Save the contents of datafiles to check with GetObject() reader output later
|
||||
var buffers [][]byte
|
||||
var testFiles = []string{"datafile-1-b", "datafile-10-kB"}
|
||||
testFiles := []string{"datafile-1-b", "datafile-10-kB"}
|
||||
|
||||
for _, testFile := range testFiles {
|
||||
r := getDataReader(testFile)
|
||||
@@ -1117,7 +1123,7 @@ func testPutObjectWithVersioning() {
|
||||
// Save the data concurrently.
|
||||
var wg sync.WaitGroup
|
||||
wg.Add(n)
|
||||
var buffers = make([][]byte, n)
|
||||
buffers := make([][]byte, n)
|
||||
var errs [n]error
|
||||
for i := 0; i < n; i++ {
|
||||
r := newRandomReader(int64((1<<20)*i+i), int64(i))
|
||||
@@ -1258,7 +1264,7 @@ func testCopyObjectWithVersioning() {
|
||||
objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "")
|
||||
args["objectName"] = objectName
|
||||
|
||||
var testFiles = []string{"datafile-1-b", "datafile-10-kB"}
|
||||
testFiles := []string{"datafile-1-b", "datafile-10-kB"}
|
||||
for _, testFile := range testFiles {
|
||||
r := getDataReader(testFile)
|
||||
buf, err := ioutil.ReadAll(r)
|
||||
@@ -1395,7 +1401,7 @@ func testConcurrentCopyObjectWithVersioning() {
|
||||
objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "")
|
||||
args["objectName"] = objectName
|
||||
|
||||
var testFiles = []string{"datafile-10-kB"}
|
||||
testFiles := []string{"datafile-10-kB"}
|
||||
for _, testFile := range testFiles {
|
||||
r := getDataReader(testFile)
|
||||
buf, err := ioutil.ReadAll(r)
|
||||
@@ -1556,7 +1562,7 @@ func testComposeObjectWithVersioning() {
|
||||
args["objectName"] = objectName
|
||||
|
||||
// var testFiles = []string{"datafile-5-MB", "datafile-10-kB"}
|
||||
var testFiles = []string{"datafile-5-MB", "datafile-10-kB"}
|
||||
testFiles := []string{"datafile-5-MB", "datafile-10-kB"}
|
||||
var testFilesBytes [][]byte
|
||||
|
||||
for _, testFile := range testFiles {
|
||||
@@ -2036,7 +2042,7 @@ func testPutObjectWithMetadata() {
|
||||
defer cleanupBucket(bucketName, c)
|
||||
|
||||
bufSize := dataFileMap["datafile-129-MB"]
|
||||
var reader = getDataReader("datafile-129-MB")
|
||||
reader := getDataReader("datafile-129-MB")
|
||||
defer reader.Close()
|
||||
|
||||
// Save the data
|
||||
@@ -2052,7 +2058,8 @@ func testPutObjectWithMetadata() {
|
||||
}
|
||||
|
||||
_, err = c.PutObject(context.Background(), bucketName, objectName, reader, int64(bufSize), minio.PutObjectOptions{
|
||||
ContentType: customContentType})
|
||||
ContentType: customContentType,
|
||||
})
|
||||
if err != nil {
|
||||
logError(testName, function, args, startTime, "", "PutObject failed", err)
|
||||
return
|
||||
@@ -2282,7 +2289,7 @@ func testGetObjectSeekEnd() {
|
||||
|
||||
// Generate 33K of data.
|
||||
bufSize := dataFileMap["datafile-33-kB"]
|
||||
var reader = getDataReader("datafile-33-kB")
|
||||
reader := getDataReader("datafile-33-kB")
|
||||
defer reader.Close()
|
||||
|
||||
// Save the data
|
||||
@@ -2404,7 +2411,7 @@ func testGetObjectClosedTwice() {
|
||||
|
||||
// Generate 33K of data.
|
||||
bufSize := dataFileMap["datafile-33-kB"]
|
||||
var reader = getDataReader("datafile-33-kB")
|
||||
reader := getDataReader("datafile-33-kB")
|
||||
defer reader.Close()
|
||||
|
||||
// Save the data
|
||||
@@ -2807,7 +2814,7 @@ func testFPutObjectMultipart() {
|
||||
defer cleanupBucket(bucketName, c)
|
||||
|
||||
// Upload 4 parts to utilize all 3 'workers' in multipart and still have a part to upload.
|
||||
var fileName = getMintDataDirFilePath("datafile-129-MB")
|
||||
fileName := getMintDataDirFilePath("datafile-129-MB")
|
||||
if fileName == "" {
|
||||
// Make a temp file with minPartSize bytes of data.
|
||||
file, err := ioutil.TempFile(os.TempDir(), "FPutObjectTest")
|
||||
@@ -2916,7 +2923,7 @@ func testFPutObject() {
|
||||
|
||||
// Upload 3 parts worth of data to use all 3 of multiparts 'workers' and have an extra part.
|
||||
// Use different data in part for multipart tests to check parts are uploaded in correct order.
|
||||
var fName = getMintDataDirFilePath("datafile-129-MB")
|
||||
fName := getMintDataDirFilePath("datafile-129-MB")
|
||||
if fName == "" {
|
||||
// Make a temp file with minPartSize bytes of data.
|
||||
file, err := ioutil.TempFile(os.TempDir(), "FPutObjectTest")
|
||||
@@ -3082,7 +3089,7 @@ func testFPutObjectContext() {
|
||||
|
||||
// Upload 1 parts worth of data to use multipart upload.
|
||||
// Use different data in part for multipart tests to check parts are uploaded in correct order.
|
||||
var fName = getMintDataDirFilePath("datafile-1-MB")
|
||||
fName := getMintDataDirFilePath("datafile-1-MB")
|
||||
if fName == "" {
|
||||
// Make a temp file with 1 MiB bytes of data.
|
||||
file, err := ioutil.TempFile(os.TempDir(), "FPutObjectContextTest")
|
||||
@@ -3134,7 +3141,6 @@ func testFPutObjectContext() {
|
||||
}
|
||||
|
||||
successLogger(testName, function, args, startTime).Info()
|
||||
|
||||
}
|
||||
|
||||
// Tests FPutObject request when context cancels after timeout
|
||||
@@ -3183,7 +3189,7 @@ func testFPutObjectContextV2() {
|
||||
|
||||
// Upload 1 parts worth of data to use multipart upload.
|
||||
// Use different data in part for multipart tests to check parts are uploaded in correct order.
|
||||
var fName = getMintDataDirFilePath("datafile-1-MB")
|
||||
fName := getMintDataDirFilePath("datafile-1-MB")
|
||||
if fName == "" {
|
||||
// Make a temp file with 1 MiB bytes of data.
|
||||
file, err := ioutil.TempFile(os.TempDir(), "FPutObjectContextTest")
|
||||
@@ -3237,7 +3243,6 @@ func testFPutObjectContextV2() {
|
||||
}
|
||||
|
||||
successLogger(testName, function, args, startTime).Info()
|
||||
|
||||
}
|
||||
|
||||
// Test validates putObject with context to see if request cancellation is honored.
|
||||
@@ -3283,7 +3288,7 @@ func testPutObjectContext() {
|
||||
defer cleanupBucket(bucketName, c)
|
||||
|
||||
bufSize := dataFileMap["datafile-33-kB"]
|
||||
var reader = getDataReader("datafile-33-kB")
|
||||
reader := getDataReader("datafile-33-kB")
|
||||
defer reader.Close()
|
||||
objectName := fmt.Sprintf("test-file-%v", rand.Uint32())
|
||||
args["objectName"] = objectName
|
||||
@@ -3312,7 +3317,6 @@ func testPutObjectContext() {
|
||||
}
|
||||
|
||||
successLogger(testName, function, args, startTime).Info()
|
||||
|
||||
}
|
||||
|
||||
// Tests get object with s3zip extensions.
|
||||
@@ -3428,7 +3432,7 @@ func testGetObjectS3Zip() {
|
||||
lOpts.Prefix = objectName + "/"
|
||||
lOpts.Recursive = true
|
||||
list := c.ListObjects(context.Background(), bucketName, lOpts)
|
||||
var listed = map[string]minio.ObjectInfo{}
|
||||
listed := map[string]minio.ObjectInfo{}
|
||||
for item := range list {
|
||||
if item.Err != nil {
|
||||
break
|
||||
@@ -3547,7 +3551,7 @@ func testGetObjectReadSeekFunctional() {
|
||||
|
||||
// Generate 33K of data.
|
||||
bufSize := dataFileMap["datafile-33-kB"]
|
||||
var reader = getDataReader("datafile-33-kB")
|
||||
reader := getDataReader("datafile-33-kB")
|
||||
defer reader.Close()
|
||||
|
||||
objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "")
|
||||
@@ -3710,7 +3714,7 @@ func testGetObjectReadAtFunctional() {
|
||||
|
||||
// Generate 33K of data.
|
||||
bufSize := dataFileMap["datafile-33-kB"]
|
||||
var reader = getDataReader("datafile-33-kB")
|
||||
reader := getDataReader("datafile-33-kB")
|
||||
defer reader.Close()
|
||||
|
||||
objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "")
|
||||
@@ -3887,7 +3891,7 @@ func testGetObjectReadAtWhenEOFWasReached() {
|
||||
|
||||
// Generate 33K of data.
|
||||
bufSize := dataFileMap["datafile-33-kB"]
|
||||
var reader = getDataReader("datafile-33-kB")
|
||||
reader := getDataReader("datafile-33-kB")
|
||||
defer reader.Close()
|
||||
|
||||
objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "")
|
||||
@@ -4004,7 +4008,7 @@ func testPresignedPostPolicy() {
|
||||
defer cleanupBucket(bucketName, c)
|
||||
|
||||
// Generate 33K of data.
|
||||
var reader = getDataReader("datafile-33-kB")
|
||||
reader := getDataReader("datafile-33-kB")
|
||||
defer reader.Close()
|
||||
|
||||
objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "")
|
||||
@@ -4081,7 +4085,7 @@ func testPresignedPostPolicy() {
|
||||
}
|
||||
|
||||
// Get a 33KB file to upload and test if set post policy works
|
||||
var filePath = getMintDataDirFilePath("datafile-33-kB")
|
||||
filePath := getMintDataDirFilePath("datafile-33-kB")
|
||||
if filePath == "" {
|
||||
// Make a temp file with 33 KB data.
|
||||
file, err := ioutil.TempFile(os.TempDir(), "PresignedPostPolicyTest")
|
||||
@@ -4228,7 +4232,7 @@ func testCopyObject() {
|
||||
|
||||
// Generate 33K of data.
|
||||
bufSize := dataFileMap["datafile-33-kB"]
|
||||
var reader = getDataReader("datafile-33-kB")
|
||||
reader := getDataReader("datafile-33-kB")
|
||||
|
||||
// Save the data
|
||||
objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "")
|
||||
@@ -4421,7 +4425,7 @@ func testSSECEncryptedGetObjectReadSeekFunctional() {
|
||||
|
||||
// Generate 129MiB of data.
|
||||
bufSize := dataFileMap["datafile-129-MB"]
|
||||
var reader = getDataReader("datafile-129-MB")
|
||||
reader := getDataReader("datafile-129-MB")
|
||||
defer reader.Close()
|
||||
|
||||
objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "")
|
||||
@@ -4603,7 +4607,7 @@ func testSSES3EncryptedGetObjectReadSeekFunctional() {
|
||||
|
||||
// Generate 129MiB of data.
|
||||
bufSize := dataFileMap["datafile-129-MB"]
|
||||
var reader = getDataReader("datafile-129-MB")
|
||||
reader := getDataReader("datafile-129-MB")
|
||||
defer reader.Close()
|
||||
|
||||
objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "")
|
||||
@@ -4777,7 +4781,7 @@ func testSSECEncryptedGetObjectReadAtFunctional() {
|
||||
|
||||
// Generate 129MiB of data.
|
||||
bufSize := dataFileMap["datafile-129-MB"]
|
||||
var reader = getDataReader("datafile-129-MB")
|
||||
reader := getDataReader("datafile-129-MB")
|
||||
defer reader.Close()
|
||||
|
||||
objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "")
|
||||
@@ -4960,7 +4964,7 @@ func testSSES3EncryptedGetObjectReadAtFunctional() {
|
||||
|
||||
// Generate 129MiB of data.
|
||||
bufSize := dataFileMap["datafile-129-MB"]
|
||||
var reader = getDataReader("datafile-129-MB")
|
||||
reader := getDataReader("datafile-129-MB")
|
||||
defer reader.Close()
|
||||
|
||||
objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "")
|
||||
@@ -5972,7 +5976,6 @@ func testFunctional() {
|
||||
"objectName": objectName,
|
||||
}
|
||||
newReader, err := c.GetObject(context.Background(), bucketName, objectName, minio.GetObjectOptions{})
|
||||
|
||||
if err != nil {
|
||||
logError(testName, function, args, startTime, "", "GetObject failed", err)
|
||||
return
|
||||
@@ -6025,7 +6028,6 @@ func testFunctional() {
|
||||
"expires": 3600 * time.Second,
|
||||
}
|
||||
presignedHeadURL, err := c.PresignedHeadObject(context.Background(), bucketName, objectName, 3600*time.Second, nil)
|
||||
|
||||
if err != nil {
|
||||
logError(testName, function, args, startTime, "", "PresignedHeadObject failed", err)
|
||||
return
|
||||
@@ -6089,7 +6091,6 @@ func testFunctional() {
|
||||
"expires": 3600 * time.Second,
|
||||
}
|
||||
presignedGetURL, err := c.PresignedGetObject(context.Background(), bucketName, objectName, 3600*time.Second, nil)
|
||||
|
||||
if err != nil {
|
||||
logError(testName, function, args, startTime, "", "PresignedGetObject failed", err)
|
||||
return
|
||||
@@ -6189,7 +6190,6 @@ func testFunctional() {
|
||||
"expires": 3600 * time.Second,
|
||||
}
|
||||
presignedPutURL, err := c.PresignedPutObject(context.Background(), bucketName, objectName+"-presigned", 3600*time.Second)
|
||||
|
||||
if err != nil {
|
||||
logError(testName, function, args, startTime, "", "PresignedPutObject failed", err)
|
||||
return
|
||||
@@ -6513,7 +6513,7 @@ func testPutObjectUploadSeekedObject() {
|
||||
// Seek back to the beginning of the file.
|
||||
tempfile.Seek(0, 0)
|
||||
}
|
||||
var length = 100 * humanize.KiByte
|
||||
length := 100 * humanize.KiByte
|
||||
objectName := fmt.Sprintf("test-file-%v", rand.Uint32())
|
||||
args["objectName"] = objectName
|
||||
|
||||
@@ -6670,7 +6670,7 @@ func testGetObjectClosedTwiceV2() {
|
||||
|
||||
// Generate 33K of data.
|
||||
bufSize := dataFileMap["datafile-33-kB"]
|
||||
var reader = getDataReader("datafile-33-kB")
|
||||
reader := getDataReader("datafile-33-kB")
|
||||
defer reader.Close()
|
||||
|
||||
// Save the data
|
||||
@@ -6982,7 +6982,7 @@ func testGetObjectReadSeekFunctionalV2() {
|
||||
|
||||
// Generate 33K of data.
|
||||
bufSize := dataFileMap["datafile-33-kB"]
|
||||
var reader = getDataReader("datafile-33-kB")
|
||||
reader := getDataReader("datafile-33-kB")
|
||||
defer reader.Close()
|
||||
|
||||
objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "")
|
||||
@@ -7136,7 +7136,7 @@ func testGetObjectReadAtFunctionalV2() {
|
||||
|
||||
// Generate 33K of data.
|
||||
bufSize := dataFileMap["datafile-33-kB"]
|
||||
var reader = getDataReader("datafile-33-kB")
|
||||
reader := getDataReader("datafile-33-kB")
|
||||
defer reader.Close()
|
||||
|
||||
objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "")
|
||||
@@ -7303,7 +7303,7 @@ func testCopyObjectV2() {
|
||||
|
||||
// Generate 33K of data.
|
||||
bufSize := dataFileMap["datafile-33-kB"]
|
||||
var reader = getDataReader("datafile-33-kB")
|
||||
reader := getDataReader("datafile-33-kB")
|
||||
defer reader.Close()
|
||||
|
||||
// Save the data
|
||||
@@ -7412,7 +7412,6 @@ func testComposeObjectErrorCasesWrapper(c *minio.Client) {
|
||||
|
||||
// Make a new bucket in 'us-east-1' (source bucket).
|
||||
err := c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"})
|
||||
|
||||
if err != nil {
|
||||
logError(testName, function, args, startTime, "", "MakeBucket failed", err)
|
||||
return
|
||||
@@ -9935,6 +9934,7 @@ func testSSES3EncryptedToSSES3CopyObjectPart() {
|
||||
|
||||
// Do not need to remove destBucketName its same as bucketName.
|
||||
}
|
||||
|
||||
func testUserMetadataCopying() {
|
||||
// initialize logging params
|
||||
startTime := time.Now()
|
||||
@@ -10432,7 +10432,7 @@ func testPutObjectNoLengthV2() {
|
||||
args["objectName"] = objectName
|
||||
|
||||
bufSize := dataFileMap["datafile-129-MB"]
|
||||
var reader = getDataReader("datafile-129-MB")
|
||||
reader := getDataReader("datafile-129-MB")
|
||||
defer reader.Close()
|
||||
args["size"] = bufSize
|
||||
|
||||
@@ -11162,7 +11162,7 @@ func testGetObjectContext() {
|
||||
defer cleanupBucket(bucketName, c)
|
||||
|
||||
bufSize := dataFileMap["datafile-33-kB"]
|
||||
var reader = getDataReader("datafile-33-kB")
|
||||
reader := getDataReader("datafile-33-kB")
|
||||
defer reader.Close()
|
||||
// Save the data
|
||||
objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "")
|
||||
@@ -11216,7 +11216,6 @@ func testGetObjectContext() {
|
||||
}
|
||||
|
||||
successLogger(testName, function, args, startTime).Info()
|
||||
|
||||
}
|
||||
|
||||
// Test get object with FGetObject with a user provided context
|
||||
@@ -11265,7 +11264,7 @@ func testFGetObjectContext() {
|
||||
defer cleanupBucket(bucketName, c)
|
||||
|
||||
bufSize := dataFileMap["datafile-1-MB"]
|
||||
var reader = getDataReader("datafile-1-MB")
|
||||
reader := getDataReader("datafile-1-MB")
|
||||
defer reader.Close()
|
||||
// Save the data
|
||||
objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "")
|
||||
@@ -11304,7 +11303,6 @@ func testFGetObjectContext() {
|
||||
}
|
||||
|
||||
successLogger(testName, function, args, startTime).Info()
|
||||
|
||||
}
|
||||
|
||||
// Test get object with GetObject with a user provided context
|
||||
@@ -11354,7 +11352,7 @@ func testGetObjectRanges() {
|
||||
defer cleanupBucket(bucketName, c)
|
||||
|
||||
bufSize := dataFileMap["datafile-129-MB"]
|
||||
var reader = getDataReader("datafile-129-MB")
|
||||
reader := getDataReader("datafile-129-MB")
|
||||
defer reader.Close()
|
||||
// Save the data
|
||||
objectName := randString(60, rng, "")
|
||||
@@ -11463,7 +11461,7 @@ func testGetObjectACLContext() {
|
||||
defer cleanupBucket(bucketName, c)
|
||||
|
||||
bufSize := dataFileMap["datafile-1-MB"]
|
||||
var reader = getDataReader("datafile-1-MB")
|
||||
reader := getDataReader("datafile-1-MB")
|
||||
defer reader.Close()
|
||||
// Save the data
|
||||
objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "")
|
||||
@@ -11525,7 +11523,7 @@ func testGetObjectACLContext() {
|
||||
}
|
||||
|
||||
bufSize = dataFileMap["datafile-1-MB"]
|
||||
var reader2 = getDataReader("datafile-1-MB")
|
||||
reader2 := getDataReader("datafile-1-MB")
|
||||
defer reader2.Close()
|
||||
// Save the data
|
||||
objectName = randString(60, rand.NewSource(time.Now().UnixNano()), "")
|
||||
@@ -11635,7 +11633,7 @@ func testPutObjectContextV2() {
|
||||
}
|
||||
defer cleanupBucket(bucketName, c)
|
||||
bufSize := dataFileMap["datatfile-33-kB"]
|
||||
var reader = getDataReader("datafile-33-kB")
|
||||
reader := getDataReader("datafile-33-kB")
|
||||
defer reader.Close()
|
||||
|
||||
objectName := fmt.Sprintf("test-file-%v", rand.Uint32())
|
||||
@@ -11665,7 +11663,6 @@ func testPutObjectContextV2() {
|
||||
}
|
||||
|
||||
successLogger(testName, function, args, startTime).Info()
|
||||
|
||||
}
|
||||
|
||||
// Test get object with GetObject with custom context
|
||||
@@ -11713,7 +11710,7 @@ func testGetObjectContextV2() {
|
||||
defer cleanupBucket(bucketName, c)
|
||||
|
||||
bufSize := dataFileMap["datafile-33-kB"]
|
||||
var reader = getDataReader("datafile-33-kB")
|
||||
reader := getDataReader("datafile-33-kB")
|
||||
defer reader.Close()
|
||||
// Save the data
|
||||
objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "")
|
||||
@@ -11765,7 +11762,6 @@ func testGetObjectContextV2() {
|
||||
}
|
||||
|
||||
successLogger(testName, function, args, startTime).Info()
|
||||
|
||||
}
|
||||
|
||||
// Test get object with FGetObject with custom context
|
||||
@@ -11814,7 +11810,7 @@ func testFGetObjectContextV2() {
|
||||
defer cleanupBucket(bucketName, c)
|
||||
|
||||
bufSize := dataFileMap["datatfile-1-MB"]
|
||||
var reader = getDataReader("datafile-1-MB")
|
||||
reader := getDataReader("datafile-1-MB")
|
||||
defer reader.Close()
|
||||
// Save the data
|
||||
objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "")
|
||||
@@ -11855,7 +11851,6 @@ func testFGetObjectContextV2() {
|
||||
}
|
||||
|
||||
successLogger(testName, function, args, startTime).Info()
|
||||
|
||||
}
|
||||
|
||||
// Test list object v1 and V2
|
||||
@@ -11915,7 +11910,7 @@ func testListObjects() {
|
||||
|
||||
for i, object := range testObjects {
|
||||
bufSize := dataFileMap["datafile-33-kB"]
|
||||
var reader = getDataReader("datafile-33-kB")
|
||||
reader := getDataReader("datafile-33-kB")
|
||||
defer reader.Close()
|
||||
_, err = c.PutObject(context.Background(), bucketName, object.name, reader, int64(bufSize),
|
||||
minio.PutObjectOptions{ContentType: "binary/octet-stream", StorageClass: object.storageClass})
|
||||
@@ -12003,7 +11998,7 @@ func testRemoveObjects() {
|
||||
}
|
||||
|
||||
bufSize := dataFileMap["datafile-129-MB"]
|
||||
var reader = getDataReader("datafile-129-MB")
|
||||
reader := getDataReader("datafile-129-MB")
|
||||
defer reader.Close()
|
||||
|
||||
_, err = c.PutObject(context.Background(), bucketName, objectName, reader, int64(bufSize), minio.PutObjectOptions{})
|
||||
|
||||
2
vendor/github.com/minio/minio-go/v7/pkg/credentials/file_minio_client.go
generated
vendored
2
vendor/github.com/minio/minio-go/v7/pkg/credentials/file_minio_client.go
generated
vendored
@@ -122,7 +122,7 @@ type config struct {
|
||||
// returned if it fails to read from the file.
|
||||
func loadAlias(filename, alias string) (hostConfig, error) {
|
||||
cfg := &config{}
|
||||
var json = jsoniter.ConfigCompatibleWithStandardLibrary
|
||||
json := jsoniter.ConfigCompatibleWithStandardLibrary
|
||||
|
||||
configBytes, err := ioutil.ReadFile(filename)
|
||||
if err != nil {
|
||||
|
||||
6
vendor/github.com/minio/minio-go/v7/pkg/credentials/iam_aws.go
generated
vendored
6
vendor/github.com/minio/minio-go/v7/pkg/credentials/iam_aws.go
generated
vendored
@@ -19,6 +19,7 @@ package credentials
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
@@ -254,7 +255,10 @@ func getEcsTaskCredentials(client *http.Client, endpoint string, token string) (
|
||||
}
|
||||
|
||||
func fetchIMDSToken(client *http.Client, endpoint string) (string, error) {
|
||||
req, err := http.NewRequest(http.MethodPut, endpoint+tokenPath, nil)
|
||||
ctx, cancel := context.WithTimeout(context.Background(), time.Second)
|
||||
defer cancel()
|
||||
|
||||
req, err := http.NewRequestWithContext(ctx, http.MethodPut, endpoint+tokenPath, nil)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
5
vendor/github.com/minio/minio-go/v7/pkg/credentials/sts_client_grants.go
generated
vendored
5
vendor/github.com/minio/minio-go/v7/pkg/credentials/sts_client_grants.go
generated
vendored
@@ -105,8 +105,8 @@ func NewSTSClientGrants(stsEndpoint string, getClientGrantsTokenExpiry func() (*
|
||||
}
|
||||
|
||||
func getClientGrantsCredentials(clnt *http.Client, endpoint string,
|
||||
getClientGrantsTokenExpiry func() (*ClientGrantsToken, error)) (AssumeRoleWithClientGrantsResponse, error) {
|
||||
|
||||
getClientGrantsTokenExpiry func() (*ClientGrantsToken, error),
|
||||
) (AssumeRoleWithClientGrantsResponse, error) {
|
||||
accessToken, err := getClientGrantsTokenExpiry()
|
||||
if err != nil {
|
||||
return AssumeRoleWithClientGrantsResponse{}, err
|
||||
@@ -138,7 +138,6 @@ func getClientGrantsCredentials(clnt *http.Client, endpoint string,
|
||||
buf, err := ioutil.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
return AssumeRoleWithClientGrantsResponse{}, err
|
||||
|
||||
}
|
||||
_, err = xmlDecodeAndBody(bytes.NewReader(buf), &errResp)
|
||||
if err != nil {
|
||||
|
||||
1
vendor/github.com/minio/minio-go/v7/pkg/credentials/sts_ldap_identity.go
generated
vendored
1
vendor/github.com/minio/minio-go/v7/pkg/credentials/sts_ldap_identity.go
generated
vendored
@@ -174,7 +174,6 @@ func (k *LDAPIdentity) Retrieve() (value Value, err error) {
|
||||
buf, err := ioutil.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
return value, err
|
||||
|
||||
}
|
||||
_, err = xmlDecodeAndBody(bytes.NewReader(buf), &errResp)
|
||||
if err != nil {
|
||||
|
||||
5
vendor/github.com/minio/minio-go/v7/pkg/credentials/sts_tls_identity.go
generated
vendored
5
vendor/github.com/minio/minio-go/v7/pkg/credentials/sts_tls_identity.go
generated
vendored
@@ -94,7 +94,7 @@ func NewSTSCertificateIdentity(endpoint string, certificate tls.Certificate, opt
|
||||
if _, err := url.Parse(endpoint); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var identity = &STSCertificateIdentity{
|
||||
identity := &STSCertificateIdentity{
|
||||
STSEndpoint: endpoint,
|
||||
Client: http.Client{
|
||||
Transport: &http.Transport{
|
||||
@@ -127,7 +127,7 @@ func (i *STSCertificateIdentity) Retrieve() (Value, error) {
|
||||
if err != nil {
|
||||
return Value{}, err
|
||||
}
|
||||
var livetime = i.S3CredentialLivetime
|
||||
livetime := i.S3CredentialLivetime
|
||||
if livetime == 0 {
|
||||
livetime = 1 * time.Hour
|
||||
}
|
||||
@@ -155,7 +155,6 @@ func (i *STSCertificateIdentity) Retrieve() (Value, error) {
|
||||
buf, err := ioutil.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
return Value{}, err
|
||||
|
||||
}
|
||||
_, err = xmlDecodeAndBody(bytes.NewReader(buf), &errResp)
|
||||
if err != nil {
|
||||
|
||||
4
vendor/github.com/minio/minio-go/v7/pkg/credentials/sts_web_identity.go
generated
vendored
4
vendor/github.com/minio/minio-go/v7/pkg/credentials/sts_web_identity.go
generated
vendored
@@ -107,7 +107,8 @@ func NewSTSWebIdentity(stsEndpoint string, getWebIDTokenExpiry func() (*WebIdent
|
||||
}
|
||||
|
||||
func getWebIdentityCredentials(clnt *http.Client, endpoint, roleARN, roleSessionName string,
|
||||
getWebIDTokenExpiry func() (*WebIdentityToken, error)) (AssumeRoleWithWebIdentityResponse, error) {
|
||||
getWebIDTokenExpiry func() (*WebIdentityToken, error),
|
||||
) (AssumeRoleWithWebIdentityResponse, error) {
|
||||
idToken, err := getWebIDTokenExpiry()
|
||||
if err != nil {
|
||||
return AssumeRoleWithWebIdentityResponse{}, err
|
||||
@@ -156,7 +157,6 @@ func getWebIdentityCredentials(clnt *http.Client, endpoint, roleARN, roleSession
|
||||
buf, err := ioutil.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
return AssumeRoleWithWebIdentityResponse{}, err
|
||||
|
||||
}
|
||||
_, err = xmlDecodeAndBody(bytes.NewReader(buf), &errResp)
|
||||
if err != nil {
|
||||
|
||||
2
vendor/github.com/minio/minio-go/v7/pkg/encrypt/server-side.go
generated
vendored
2
vendor/github.com/minio/minio-go/v7/pkg/encrypt/server-side.go
generated
vendored
@@ -101,7 +101,7 @@ func NewSSEKMS(keyID string, context interface{}) (ServerSide, error) {
|
||||
if context == nil {
|
||||
return kms{key: keyID, hasContext: false}, nil
|
||||
}
|
||||
var json = jsoniter.ConfigCompatibleWithStandardLibrary
|
||||
json := jsoniter.ConfigCompatibleWithStandardLibrary
|
||||
serializedContext, err := json.Marshal(context)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
||||
6
vendor/github.com/minio/minio-go/v7/pkg/notification/notification.go
generated
vendored
6
vendor/github.com/minio/minio-go/v7/pkg/notification/notification.go
generated
vendored
@@ -78,11 +78,13 @@ type Arn struct {
|
||||
|
||||
// NewArn creates new ARN based on the given partition, service, region, account id and resource
|
||||
func NewArn(partition, service, region, accountID, resource string) Arn {
|
||||
return Arn{Partition: partition,
|
||||
return Arn{
|
||||
Partition: partition,
|
||||
Service: service,
|
||||
Region: region,
|
||||
AccountID: accountID,
|
||||
Resource: resource}
|
||||
Resource: resource,
|
||||
}
|
||||
}
|
||||
|
||||
// String returns the string format of the ARN
|
||||
|
||||
1
vendor/github.com/minio/minio-go/v7/pkg/replication/replication.go
generated
vendored
1
vendor/github.com/minio/minio-go/v7/pkg/replication/replication.go
generated
vendored
@@ -432,7 +432,6 @@ func (c *Config) RemoveRule(opts Options) error {
|
||||
}
|
||||
c.Rules = newRules
|
||||
return nil
|
||||
|
||||
}
|
||||
|
||||
// Rule - a rule for replication configuration.
|
||||
|
||||
8
vendor/github.com/minio/minio-go/v7/pkg/signer/request-signature-streaming.go
generated
vendored
8
vendor/github.com/minio/minio-go/v7/pkg/signer/request-signature-streaming.go
generated
vendored
@@ -114,8 +114,8 @@ func buildChunkHeader(chunkLen int64, signature string) []byte {
|
||||
|
||||
// buildChunkSignature - returns chunk signature for a given chunk and previous signature.
|
||||
func buildChunkSignature(chunkData []byte, reqTime time.Time, region,
|
||||
previousSignature, secretAccessKey string) string {
|
||||
|
||||
previousSignature, secretAccessKey string,
|
||||
) string {
|
||||
chunkStringToSign := buildChunkStringToSign(reqTime, region,
|
||||
previousSignature, chunkData)
|
||||
signingKey := getSigningKey(secretAccessKey, region, reqTime, ServiceTypeS3)
|
||||
@@ -200,8 +200,8 @@ func (s *StreamingReader) setStreamingAuthHeader(req *http.Request) {
|
||||
// StreamingSignV4 - provides chunked upload signatureV4 support by
|
||||
// implementing io.Reader.
|
||||
func StreamingSignV4(req *http.Request, accessKeyID, secretAccessKey, sessionToken,
|
||||
region string, dataLen int64, reqTime time.Time) *http.Request {
|
||||
|
||||
region string, dataLen int64, reqTime time.Time,
|
||||
) *http.Request {
|
||||
// Set headers needed for streaming signature.
|
||||
prepareStreamingRequest(req, sessionToken, dataLen, reqTime)
|
||||
|
||||
|
||||
1
vendor/github.com/minio/minio-go/v7/retry.go
generated
vendored
1
vendor/github.com/minio/minio-go/v7/retry.go
generated
vendored
@@ -110,6 +110,7 @@ func isS3CodeRetryable(s3Code string) (ok bool) {
|
||||
// List of HTTP status codes which are retryable.
|
||||
var retryableHTTPStatusCodes = map[int]struct{}{
|
||||
429: {}, // http.StatusTooManyRequests is not part of the Go 1.5 library, yet
|
||||
499: {}, // client closed request, retry. A non-standard status code introduced by nginx.
|
||||
http.StatusInternalServerError: {},
|
||||
http.StatusBadGateway: {},
|
||||
http.StatusServiceUnavailable: {},
|
||||
|
||||
31
vendor/github.com/minio/minio-go/v7/utils.go
generated
vendored
31
vendor/github.com/minio/minio-go/v7/utils.go
generated
vendored
@@ -105,21 +105,6 @@ func sumMD5Base64(data []byte) string {
|
||||
|
||||
// getEndpointURL - construct a new endpoint.
|
||||
func getEndpointURL(endpoint string, secure bool) (*url.URL, error) {
|
||||
if strings.Contains(endpoint, ":") {
|
||||
host, _, err := net.SplitHostPort(endpoint)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if !s3utils.IsValidIP(host) && !s3utils.IsValidDomain(host) {
|
||||
msg := "Endpoint: " + endpoint + " does not follow ip address or domain name standards."
|
||||
return nil, errInvalidArgument(msg)
|
||||
}
|
||||
} else {
|
||||
if !s3utils.IsValidIP(endpoint) && !s3utils.IsValidDomain(endpoint) {
|
||||
msg := "Endpoint: " + endpoint + " does not follow ip address or domain name standards."
|
||||
return nil, errInvalidArgument(msg)
|
||||
}
|
||||
}
|
||||
// If secure is false, use 'http' scheme.
|
||||
scheme := "https"
|
||||
if !secure {
|
||||
@@ -176,12 +161,18 @@ func isValidEndpointURL(endpointURL url.URL) error {
|
||||
if endpointURL.Path != "/" && endpointURL.Path != "" {
|
||||
return errInvalidArgument("Endpoint url cannot have fully qualified paths.")
|
||||
}
|
||||
if strings.Contains(endpointURL.Host, ".s3.amazonaws.com") {
|
||||
host := endpointURL.Hostname()
|
||||
if !s3utils.IsValidIP(host) && !s3utils.IsValidDomain(host) {
|
||||
msg := "Endpoint: " + endpointURL.Host + " does not follow ip address or domain name standards."
|
||||
return errInvalidArgument(msg)
|
||||
}
|
||||
|
||||
if strings.Contains(host, ".s3.amazonaws.com") {
|
||||
if !s3utils.IsAmazonEndpoint(endpointURL) {
|
||||
return errInvalidArgument("Amazon S3 endpoint should be 's3.amazonaws.com'.")
|
||||
}
|
||||
}
|
||||
if strings.Contains(endpointURL.Host, ".googleapis.com") {
|
||||
if strings.Contains(host, ".googleapis.com") {
|
||||
if !s3utils.IsGoogleEndpoint(endpointURL) {
|
||||
return errInvalidArgument("Google Cloud Storage endpoint should be 'storage.googleapis.com'.")
|
||||
}
|
||||
@@ -513,8 +504,10 @@ func isAmzHeader(headerKey string) bool {
|
||||
return strings.HasPrefix(key, "x-amz-meta-") || strings.HasPrefix(key, "x-amz-grant-") || key == "x-amz-acl" || isSSEHeader(headerKey)
|
||||
}
|
||||
|
||||
var md5Pool = sync.Pool{New: func() interface{} { return md5.New() }}
|
||||
var sha256Pool = sync.Pool{New: func() interface{} { return sha256.New() }}
|
||||
var (
|
||||
md5Pool = sync.Pool{New: func() interface{} { return md5.New() }}
|
||||
sha256Pool = sync.Pool{New: func() interface{} { return sha256.New() }}
|
||||
)
|
||||
|
||||
func newMd5Hasher() md5simd.Hasher {
|
||||
return hashWrapper{Hash: md5Pool.Get().(hash.Hash), isMD5: true}
|
||||
|
||||
13
vendor/github.com/mitchellh/mapstructure/CHANGELOG.md
generated
vendored
13
vendor/github.com/mitchellh/mapstructure/CHANGELOG.md
generated
vendored
@@ -1,3 +1,16 @@
|
||||
## 1.5.0
|
||||
|
||||
* New option `IgnoreUntaggedFields` to ignore decoding to any fields
|
||||
without `mapstructure` (or the configured tag name) set [GH-277]
|
||||
* New option `ErrorUnset` which makes it an error if any fields
|
||||
in a target struct are not set by the decoding process. [GH-225]
|
||||
* New function `OrComposeDecodeHookFunc` to help compose decode hooks. [GH-240]
|
||||
* Decoding to slice from array no longer crashes [GH-265]
|
||||
* Decode nested struct pointers to map [GH-271]
|
||||
* Fix issue where `,squash` was ignored if `Squash` option was set. [GH-280]
|
||||
* Fix issue where fields with `,omitempty` would sometimes decode
|
||||
into a map with an empty string key [GH-281]
|
||||
|
||||
## 1.4.3
|
||||
|
||||
* Fix cases where `json.Number` didn't decode properly [GH-261]
|
||||
|
||||
22
vendor/github.com/mitchellh/mapstructure/decode_hooks.go
generated
vendored
22
vendor/github.com/mitchellh/mapstructure/decode_hooks.go
generated
vendored
@@ -77,6 +77,28 @@ func ComposeDecodeHookFunc(fs ...DecodeHookFunc) DecodeHookFunc {
|
||||
}
|
||||
}
|
||||
|
||||
// OrComposeDecodeHookFunc executes all input hook functions until one of them returns no error. In that case its value is returned.
|
||||
// If all hooks return an error, OrComposeDecodeHookFunc returns an error concatenating all error messages.
|
||||
func OrComposeDecodeHookFunc(ff ...DecodeHookFunc) DecodeHookFunc {
|
||||
return func(a, b reflect.Value) (interface{}, error) {
|
||||
var allErrs string
|
||||
var out interface{}
|
||||
var err error
|
||||
|
||||
for _, f := range ff {
|
||||
out, err = DecodeHookExec(f, a, b)
|
||||
if err != nil {
|
||||
allErrs += err.Error() + "\n"
|
||||
continue
|
||||
}
|
||||
|
||||
return out, nil
|
||||
}
|
||||
|
||||
return nil, errors.New(allErrs)
|
||||
}
|
||||
}
|
||||
|
||||
// StringToSliceHookFunc returns a DecodeHookFunc that converts
|
||||
// string to []string by splitting on the given sep.
|
||||
func StringToSliceHookFunc(sep string) DecodeHookFunc {
|
||||
|
||||
83
vendor/github.com/mitchellh/mapstructure/mapstructure.go
generated
vendored
83
vendor/github.com/mitchellh/mapstructure/mapstructure.go
generated
vendored
@@ -122,7 +122,7 @@
|
||||
// field value is zero and a numeric type, the field is empty, and it won't
|
||||
// be encoded into the destination type.
|
||||
//
|
||||
// type Source {
|
||||
// type Source struct {
|
||||
// Age int `mapstructure:",omitempty"`
|
||||
// }
|
||||
//
|
||||
@@ -215,6 +215,12 @@ type DecoderConfig struct {
|
||||
// (extra keys).
|
||||
ErrorUnused bool
|
||||
|
||||
// If ErrorUnset is true, then it is an error for there to exist
|
||||
// fields in the result that were not set in the decoding process
|
||||
// (extra fields). This only applies to decoding to a struct. This
|
||||
// will affect all nested structs as well.
|
||||
ErrorUnset bool
|
||||
|
||||
// ZeroFields, if set to true, will zero fields before writing them.
|
||||
// For example, a map will be emptied before decoded values are put in
|
||||
// it. If this is false, a map will be merged.
|
||||
@@ -259,6 +265,10 @@ type DecoderConfig struct {
|
||||
// defaults to "mapstructure"
|
||||
TagName string
|
||||
|
||||
// IgnoreUntaggedFields ignores all struct fields without explicit
|
||||
// TagName, comparable to `mapstructure:"-"` as default behaviour.
|
||||
IgnoreUntaggedFields bool
|
||||
|
||||
// MatchName is the function used to match the map key to the struct
|
||||
// field name or tag. Defaults to `strings.EqualFold`. This can be used
|
||||
// to implement case-sensitive tag values, support snake casing, etc.
|
||||
@@ -284,6 +294,11 @@ type Metadata struct {
|
||||
// Unused is a slice of keys that were found in the raw value but
|
||||
// weren't decoded since there was no matching field in the result interface
|
||||
Unused []string
|
||||
|
||||
// Unset is a slice of field names that were found in the result interface
|
||||
// but weren't set in the decoding process since there was no matching value
|
||||
// in the input
|
||||
Unset []string
|
||||
}
|
||||
|
||||
// Decode takes an input structure and uses reflection to translate it to
|
||||
@@ -375,6 +390,10 @@ func NewDecoder(config *DecoderConfig) (*Decoder, error) {
|
||||
if config.Metadata.Unused == nil {
|
||||
config.Metadata.Unused = make([]string, 0)
|
||||
}
|
||||
|
||||
if config.Metadata.Unset == nil {
|
||||
config.Metadata.Unset = make([]string, 0)
|
||||
}
|
||||
}
|
||||
|
||||
if config.TagName == "" {
|
||||
@@ -906,9 +925,15 @@ func (d *Decoder) decodeMapFromStruct(name string, dataVal reflect.Value, val re
|
||||
tagValue := f.Tag.Get(d.config.TagName)
|
||||
keyName := f.Name
|
||||
|
||||
if tagValue == "" && d.config.IgnoreUntaggedFields {
|
||||
continue
|
||||
}
|
||||
|
||||
// If Squash is set in the config, we squash the field down.
|
||||
squash := d.config.Squash && v.Kind() == reflect.Struct && f.Anonymous
|
||||
|
||||
v = dereferencePtrToStructIfNeeded(v, d.config.TagName)
|
||||
|
||||
// Determine the name of the key in the map
|
||||
if index := strings.Index(tagValue, ","); index != -1 {
|
||||
if tagValue[:index] == "-" {
|
||||
@@ -920,7 +945,7 @@ func (d *Decoder) decodeMapFromStruct(name string, dataVal reflect.Value, val re
|
||||
}
|
||||
|
||||
// If "squash" is specified in the tag, we squash the field down.
|
||||
squash = !squash && strings.Index(tagValue[index+1:], "squash") != -1
|
||||
squash = squash || strings.Index(tagValue[index+1:], "squash") != -1
|
||||
if squash {
|
||||
// When squashing, the embedded type can be a pointer to a struct.
|
||||
if v.Kind() == reflect.Ptr && v.Elem().Kind() == reflect.Struct {
|
||||
@@ -932,7 +957,9 @@ func (d *Decoder) decodeMapFromStruct(name string, dataVal reflect.Value, val re
|
||||
return fmt.Errorf("cannot squash non-struct type '%s'", v.Type())
|
||||
}
|
||||
}
|
||||
keyName = tagValue[:index]
|
||||
if keyNameTagValue := tagValue[:index]; keyNameTagValue != "" {
|
||||
keyName = keyNameTagValue
|
||||
}
|
||||
} else if len(tagValue) > 0 {
|
||||
if tagValue == "-" {
|
||||
continue
|
||||
@@ -1088,7 +1115,7 @@ func (d *Decoder) decodeSlice(name string, data interface{}, val reflect.Value)
|
||||
}
|
||||
|
||||
// If the input value is nil, then don't allocate since empty != nil
|
||||
if dataVal.IsNil() {
|
||||
if dataValKind != reflect.Array && dataVal.IsNil() {
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -1250,6 +1277,7 @@ func (d *Decoder) decodeStructFromMap(name string, dataVal, val reflect.Value) e
|
||||
dataValKeysUnused[dataValKey.Interface()] = struct{}{}
|
||||
}
|
||||
|
||||
targetValKeysUnused := make(map[interface{}]struct{})
|
||||
errors := make([]string, 0)
|
||||
|
||||
// This slice will keep track of all the structs we'll be decoding.
|
||||
@@ -1354,7 +1382,8 @@ func (d *Decoder) decodeStructFromMap(name string, dataVal, val reflect.Value) e
|
||||
|
||||
if !rawMapVal.IsValid() {
|
||||
// There was no matching key in the map for the value in
|
||||
// the struct. Just ignore.
|
||||
// the struct. Remember it for potential errors and metadata.
|
||||
targetValKeysUnused[fieldName] = struct{}{}
|
||||
continue
|
||||
}
|
||||
}
|
||||
@@ -1414,6 +1443,17 @@ func (d *Decoder) decodeStructFromMap(name string, dataVal, val reflect.Value) e
|
||||
errors = appendErrors(errors, err)
|
||||
}
|
||||
|
||||
if d.config.ErrorUnset && len(targetValKeysUnused) > 0 {
|
||||
keys := make([]string, 0, len(targetValKeysUnused))
|
||||
for rawKey := range targetValKeysUnused {
|
||||
keys = append(keys, rawKey.(string))
|
||||
}
|
||||
sort.Strings(keys)
|
||||
|
||||
err := fmt.Errorf("'%s' has unset fields: %s", name, strings.Join(keys, ", "))
|
||||
errors = appendErrors(errors, err)
|
||||
}
|
||||
|
||||
if len(errors) > 0 {
|
||||
return &Error{errors}
|
||||
}
|
||||
@@ -1428,6 +1468,14 @@ func (d *Decoder) decodeStructFromMap(name string, dataVal, val reflect.Value) e
|
||||
|
||||
d.config.Metadata.Unused = append(d.config.Metadata.Unused, key)
|
||||
}
|
||||
for rawKey := range targetValKeysUnused {
|
||||
key := rawKey.(string)
|
||||
if name != "" {
|
||||
key = name + "." + key
|
||||
}
|
||||
|
||||
d.config.Metadata.Unset = append(d.config.Metadata.Unset, key)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
@@ -1465,3 +1513,28 @@ func getKind(val reflect.Value) reflect.Kind {
|
||||
return kind
|
||||
}
|
||||
}
|
||||
|
||||
func isStructTypeConvertibleToMap(typ reflect.Type, checkMapstructureTags bool, tagName string) bool {
|
||||
for i := 0; i < typ.NumField(); i++ {
|
||||
f := typ.Field(i)
|
||||
if f.PkgPath == "" && !checkMapstructureTags { // check for unexported fields
|
||||
return true
|
||||
}
|
||||
if checkMapstructureTags && f.Tag.Get(tagName) != "" { // check for mapstructure tags inside
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func dereferencePtrToStructIfNeeded(v reflect.Value, tagName string) reflect.Value {
|
||||
if v.Kind() != reflect.Ptr || v.Elem().Kind() != reflect.Struct {
|
||||
return v
|
||||
}
|
||||
deref := v.Elem()
|
||||
derefT := deref.Type()
|
||||
if isStructTypeConvertibleToMap(derefT, true, tagName) {
|
||||
return deref
|
||||
}
|
||||
return v
|
||||
}
|
||||
|
||||
6
vendor/github.com/pelletier/go-toml/README.md
generated
vendored
6
vendor/github.com/pelletier/go-toml/README.md
generated
vendored
@@ -25,9 +25,9 @@ and [much faster][v2-bench]. If you only need reading and writing TOML documents
|
||||
(majority of cases), those features are implemented and the API unlikely to
|
||||
change.
|
||||
|
||||
The remaining features (Document structure editing and tooling) will be added
|
||||
shortly. While pull-requests are welcome on v1, no active development is
|
||||
expected on it. When v2.0.0 is released, v1 will be deprecated.
|
||||
The remaining features will be added shortly. While pull-requests are welcome on
|
||||
v1, no active development is expected on it. When v2.0.0 is released, v1 will be
|
||||
deprecated.
|
||||
|
||||
👉 [go-toml v2][v2]
|
||||
|
||||
|
||||
19
vendor/github.com/pelletier/go-toml/SECURITY.md
generated
vendored
Normal file
19
vendor/github.com/pelletier/go-toml/SECURITY.md
generated
vendored
Normal file
@@ -0,0 +1,19 @@
|
||||
# Security Policy
|
||||
|
||||
## Supported Versions
|
||||
|
||||
Use this section to tell people about which versions of your project are
|
||||
currently being supported with security updates.
|
||||
|
||||
| Version | Supported |
|
||||
| ---------- | ------------------ |
|
||||
| Latest 2.x | :white_check_mark: |
|
||||
| All 1.x | :x: |
|
||||
| All 0.x | :x: |
|
||||
|
||||
## Reporting a Vulnerability
|
||||
|
||||
Email a vulnerability report to `security@pelletier.codes`. Make sure to include
|
||||
as many details as possible to reproduce the vulnerability. This is a
|
||||
side-project: I will try to get back to you as quickly as possible, time
|
||||
permitting in my personal life. Providing a working patch helps very much!
|
||||
2
vendor/github.com/pelletier/go-toml/marshal.go
generated
vendored
2
vendor/github.com/pelletier/go-toml/marshal.go
generated
vendored
@@ -1113,7 +1113,7 @@ func (d *Decoder) valueFromToml(mtype reflect.Type, tval interface{}, mval1 *ref
|
||||
return reflect.ValueOf(nil), fmt.Errorf("Can't convert %v(%T) to %v", tval, tval, mtype.String())
|
||||
}
|
||||
|
||||
if val.Convert(reflect.TypeOf(int(1))).Int() < 0 {
|
||||
if val.Type().Kind() != reflect.Uint64 && val.Convert(reflect.TypeOf(int(1))).Int() < 0 {
|
||||
return reflect.ValueOf(nil), fmt.Errorf("%v(%T) is negative so does not fit in %v", tval, tval, mtype.String())
|
||||
}
|
||||
if reflect.Indirect(reflect.New(mtype)).OverflowUint(val.Convert(reflect.TypeOf(uint64(0))).Uint()) {
|
||||
|
||||
47
vendor/github.com/pelletier/go-toml/parser.go
generated
vendored
47
vendor/github.com/pelletier/go-toml/parser.go
generated
vendored
@@ -293,42 +293,41 @@ func (p *tomlParser) parseRvalue() interface{} {
|
||||
return math.NaN()
|
||||
case tokenInteger:
|
||||
cleanedVal := cleanupNumberToken(tok.val)
|
||||
var err error
|
||||
var val int64
|
||||
base := 10
|
||||
s := cleanedVal
|
||||
checkInvalidUnderscore := numberContainsInvalidUnderscore
|
||||
if len(cleanedVal) >= 3 && cleanedVal[0] == '0' {
|
||||
switch cleanedVal[1] {
|
||||
case 'x':
|
||||
err = hexNumberContainsInvalidUnderscore(tok.val)
|
||||
if err != nil {
|
||||
p.raiseError(tok, "%s", err)
|
||||
}
|
||||
val, err = strconv.ParseInt(cleanedVal[2:], 16, 64)
|
||||
checkInvalidUnderscore = hexNumberContainsInvalidUnderscore
|
||||
base = 16
|
||||
case 'o':
|
||||
err = numberContainsInvalidUnderscore(tok.val)
|
||||
if err != nil {
|
||||
p.raiseError(tok, "%s", err)
|
||||
}
|
||||
val, err = strconv.ParseInt(cleanedVal[2:], 8, 64)
|
||||
base = 8
|
||||
case 'b':
|
||||
err = numberContainsInvalidUnderscore(tok.val)
|
||||
if err != nil {
|
||||
p.raiseError(tok, "%s", err)
|
||||
}
|
||||
val, err = strconv.ParseInt(cleanedVal[2:], 2, 64)
|
||||
base = 2
|
||||
default:
|
||||
panic("invalid base") // the lexer should catch this first
|
||||
}
|
||||
} else {
|
||||
err = numberContainsInvalidUnderscore(tok.val)
|
||||
if err != nil {
|
||||
p.raiseError(tok, "%s", err)
|
||||
}
|
||||
val, err = strconv.ParseInt(cleanedVal, 10, 64)
|
||||
s = cleanedVal[2:]
|
||||
}
|
||||
|
||||
err := checkInvalidUnderscore(tok.val)
|
||||
if err != nil {
|
||||
p.raiseError(tok, "%s", err)
|
||||
}
|
||||
return val
|
||||
|
||||
var val interface{}
|
||||
val, err = strconv.ParseInt(s, base, 64)
|
||||
if err == nil {
|
||||
return val
|
||||
}
|
||||
|
||||
if s[0] != '-' {
|
||||
if val, err = strconv.ParseUint(s, base, 64); err == nil {
|
||||
return val
|
||||
}
|
||||
}
|
||||
p.raiseError(tok, "%s", err)
|
||||
case tokenFloat:
|
||||
err := numberContainsInvalidUnderscore(tok.val)
|
||||
if err != nil {
|
||||
|
||||
2
vendor/github.com/pelletier/go-toml/toml.go
generated
vendored
2
vendor/github.com/pelletier/go-toml/toml.go
generated
vendored
@@ -471,7 +471,7 @@ func LoadBytes(b []byte) (tree *Tree, err error) {
|
||||
if _, ok := r.(runtime.Error); ok {
|
||||
panic(r)
|
||||
}
|
||||
err = errors.New(r.(string))
|
||||
err = fmt.Errorf("%s", r)
|
||||
}
|
||||
}()
|
||||
|
||||
|
||||
2
vendor/github.com/pelletier/go-toml/v2/LICENSE
generated
vendored
2
vendor/github.com/pelletier/go-toml/v2/LICENSE
generated
vendored
@@ -1,6 +1,6 @@
|
||||
The MIT License (MIT)
|
||||
|
||||
Copyright (c) 2013 - 2021 Thomas Pelletier, Eric Anderton
|
||||
Copyright (c) 2013 - 2022 Thomas Pelletier, Eric Anderton
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
|
||||
20
vendor/github.com/pelletier/go-toml/v2/README.md
generated
vendored
20
vendor/github.com/pelletier/go-toml/v2/README.md
generated
vendored
@@ -4,17 +4,6 @@ Go library for the [TOML](https://toml.io/en/) format.
|
||||
|
||||
This library supports [TOML v1.0.0](https://toml.io/en/v1.0.0).
|
||||
|
||||
## Development status
|
||||
|
||||
This is the upcoming major version of go-toml. It is currently in active
|
||||
development. As of release v2.0.0-beta.1, the library has reached feature parity
|
||||
with v1, and fixes a lot known bugs and performance issues along the way.
|
||||
|
||||
If you do not need the advanced document editing features of v1, you are
|
||||
encouraged to try out this version.
|
||||
|
||||
[👉 Roadmap for v2](https://github.com/pelletier/go-toml/discussions/506)
|
||||
|
||||
[🐞 Bug Reports](https://github.com/pelletier/go-toml/issues)
|
||||
|
||||
[💬 Anything else](https://github.com/pelletier/go-toml/discussions)
|
||||
@@ -49,7 +38,7 @@ operations should not be shockingly slow. See [benchmarks](#benchmarks).
|
||||
### Strict mode
|
||||
|
||||
`Decoder` can be set to "strict mode", which makes it error when some parts of
|
||||
the TOML document was not prevent in the target structure. This is a great way
|
||||
the TOML document was not present in the target structure. This is a great way
|
||||
to check for typos. [See example in the documentation][strict].
|
||||
|
||||
[strict]: https://pkg.go.dev/github.com/pelletier/go-toml/v2#example-Decoder.DisallowUnknownFields
|
||||
@@ -551,6 +540,13 @@ complete solutions exist out there.
|
||||
[query]: https://github.com/pelletier/go-toml/tree/f99d6bbca119636aeafcf351ee52b3d202782627/query
|
||||
[dasel]: https://github.com/TomWright/dasel
|
||||
|
||||
## Versioning
|
||||
|
||||
Go-toml follows [Semantic Versioning](http://semver.org/). The supported version
|
||||
of [TOML](https://github.com/toml-lang/toml) is indicated at the beginning of
|
||||
this document. The last two major versions of Go are supported
|
||||
(see [Go Release Policy](https://golang.org/doc/devel/release.html#policy)).
|
||||
|
||||
## License
|
||||
|
||||
The MIT License (MIT). Read [LICENSE](LICENSE).
|
||||
|
||||
18
vendor/github.com/pelletier/go-toml/v2/marshaler.go
generated
vendored
18
vendor/github.com/pelletier/go-toml/v2/marshaler.go
generated
vendored
@@ -128,7 +128,8 @@ func (enc *Encoder) SetIndentTables(indent bool) *Encoder {
|
||||
//
|
||||
// In addition to the "toml" tag struct tag, a "comment" tag can be used to emit
|
||||
// a TOML comment before the value being annotated. Comments are ignored inside
|
||||
// inline tables.
|
||||
// inline tables. For array tables, the comment is only present before the first
|
||||
// element of the array.
|
||||
func (enc *Encoder) Encode(v interface{}) error {
|
||||
var (
|
||||
b []byte
|
||||
@@ -652,10 +653,19 @@ func (enc *Encoder) encodeStruct(b []byte, ctx encoderCtx, v reflect.Value) ([]b
|
||||
}
|
||||
|
||||
func (enc *Encoder) encodeComment(indent int, comment string, b []byte) []byte {
|
||||
if comment != "" {
|
||||
for len(comment) > 0 {
|
||||
var line string
|
||||
idx := strings.IndexByte(comment, '\n')
|
||||
if idx >= 0 {
|
||||
line = comment[:idx]
|
||||
comment = comment[idx+1:]
|
||||
} else {
|
||||
line = comment
|
||||
comment = ""
|
||||
}
|
||||
b = enc.indent(indent, b)
|
||||
b = append(b, "# "...)
|
||||
b = append(b, comment...)
|
||||
b = append(b, line...)
|
||||
b = append(b, '\n')
|
||||
}
|
||||
return b
|
||||
@@ -881,6 +891,8 @@ func (enc *Encoder) encodeSliceAsArrayTable(b []byte, ctx encoderCtx, v reflect.
|
||||
scratch = append(scratch, "]]\n"...)
|
||||
ctx.skipTableHeader = true
|
||||
|
||||
b = enc.encodeComment(ctx.indent, ctx.options.comment, b)
|
||||
|
||||
for i := 0; i < v.Len(); i++ {
|
||||
b = append(b, scratch...)
|
||||
|
||||
|
||||
38
vendor/github.com/pelletier/go-toml/v2/unmarshaler.go
generated
vendored
38
vendor/github.com/pelletier/go-toml/v2/unmarshaler.go
generated
vendored
@@ -866,12 +866,27 @@ func (d *decoder) unmarshalFloat(value *ast.Node, v reflect.Value) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *decoder) unmarshalInteger(value *ast.Node, v reflect.Value) error {
|
||||
const (
|
||||
maxInt = int64(^uint(0) >> 1)
|
||||
minInt = -maxInt - 1
|
||||
)
|
||||
const (
|
||||
maxInt = int64(^uint(0) >> 1)
|
||||
minInt = -maxInt - 1
|
||||
)
|
||||
|
||||
// Maximum value of uint for decoding. Currently the decoder parses the integer
|
||||
// into an int64. As a result, on architectures where uint is 64 bits, the
|
||||
// effective maximum uint we can decode is the maximum of int64. On
|
||||
// architectures where uint is 32 bits, the maximum value we can decode is
|
||||
// lower: the maximum of uint32. I didn't find a way to figure out this value at
|
||||
// compile time, so it is computed during initialization.
|
||||
var maxUint int64 = math.MaxInt64
|
||||
|
||||
func init() {
|
||||
m := uint64(^uint(0))
|
||||
if m < uint64(maxUint) {
|
||||
maxUint = int64(m)
|
||||
}
|
||||
}
|
||||
|
||||
func (d *decoder) unmarshalInteger(value *ast.Node, v reflect.Value) error {
|
||||
i, err := parseInteger(value.Data)
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -932,7 +947,7 @@ func (d *decoder) unmarshalInteger(value *ast.Node, v reflect.Value) error {
|
||||
|
||||
r = reflect.ValueOf(uint8(i))
|
||||
case reflect.Uint:
|
||||
if i < 0 {
|
||||
if i < 0 || i > maxUint {
|
||||
return fmt.Errorf("toml: negative number %d does not fit in an uint", i)
|
||||
}
|
||||
|
||||
@@ -1167,11 +1182,6 @@ func forEachField(t reflect.Type, path []int, do func(name string, path []int))
|
||||
fieldPath := append(path, i)
|
||||
fieldPath = fieldPath[:len(fieldPath):len(fieldPath)]
|
||||
|
||||
if f.Anonymous {
|
||||
forEachField(f.Type, fieldPath, do)
|
||||
continue
|
||||
}
|
||||
|
||||
name := f.Tag.Get("toml")
|
||||
if name == "-" {
|
||||
continue
|
||||
@@ -1180,6 +1190,12 @@ func forEachField(t reflect.Type, path []int, do func(name string, path []int))
|
||||
if i := strings.IndexByte(name, ','); i >= 0 {
|
||||
name = name[:i]
|
||||
}
|
||||
|
||||
if f.Anonymous && name == "" {
|
||||
forEachField(f.Type, fieldPath, do)
|
||||
continue
|
||||
}
|
||||
|
||||
if name == "" {
|
||||
name = f.Name
|
||||
}
|
||||
|
||||
327
vendor/github.com/spf13/cast/caste.go
generated
vendored
327
vendor/github.com/spf13/cast/caste.go
generated
vendored
@@ -34,6 +34,12 @@ func ToTimeInDefaultLocationE(i interface{}, location *time.Location) (tim time.
|
||||
return v, nil
|
||||
case string:
|
||||
return StringToDateInDefaultLocation(v, location)
|
||||
case json.Number:
|
||||
s, err1 := ToInt64E(v)
|
||||
if err1 != nil {
|
||||
return time.Time{}, fmt.Errorf("unable to cast %#v of type %T to Time", i, i)
|
||||
}
|
||||
return time.Unix(s, 0), nil
|
||||
case int:
|
||||
return time.Unix(int64(v), 0), nil
|
||||
case int64:
|
||||
@@ -71,6 +77,11 @@ func ToDurationE(i interface{}) (d time.Duration, err error) {
|
||||
d, err = time.ParseDuration(s + "ns")
|
||||
}
|
||||
return
|
||||
case json.Number:
|
||||
var v float64
|
||||
v, err = s.Float64()
|
||||
d = time.Duration(v)
|
||||
return
|
||||
default:
|
||||
err = fmt.Errorf("unable to cast %#v of type %T to Duration", i, i)
|
||||
return
|
||||
@@ -93,6 +104,12 @@ func ToBoolE(i interface{}) (bool, error) {
|
||||
return false, nil
|
||||
case string:
|
||||
return strconv.ParseBool(i.(string))
|
||||
case json.Number:
|
||||
v, err := ToInt64E(b)
|
||||
if err == nil {
|
||||
return v != 0, nil
|
||||
}
|
||||
return false, fmt.Errorf("unable to cast %#v of type %T to bool", i, i)
|
||||
default:
|
||||
return false, fmt.Errorf("unable to cast %#v of type %T to bool", i, i)
|
||||
}
|
||||
@@ -102,13 +119,16 @@ func ToBoolE(i interface{}) (bool, error) {
|
||||
func ToFloat64E(i interface{}) (float64, error) {
|
||||
i = indirect(i)
|
||||
|
||||
intv, ok := toInt(i)
|
||||
if ok {
|
||||
return float64(intv), nil
|
||||
}
|
||||
|
||||
switch s := i.(type) {
|
||||
case float64:
|
||||
return s, nil
|
||||
case float32:
|
||||
return float64(s), nil
|
||||
case int:
|
||||
return float64(s), nil
|
||||
case int64:
|
||||
return float64(s), nil
|
||||
case int32:
|
||||
@@ -133,11 +153,19 @@ func ToFloat64E(i interface{}) (float64, error) {
|
||||
return v, nil
|
||||
}
|
||||
return 0, fmt.Errorf("unable to cast %#v of type %T to float64", i, i)
|
||||
case json.Number:
|
||||
v, err := s.Float64()
|
||||
if err == nil {
|
||||
return v, nil
|
||||
}
|
||||
return 0, fmt.Errorf("unable to cast %#v of type %T to float64", i, i)
|
||||
case bool:
|
||||
if s {
|
||||
return 1, nil
|
||||
}
|
||||
return 0, nil
|
||||
case nil:
|
||||
return 0, nil
|
||||
default:
|
||||
return 0, fmt.Errorf("unable to cast %#v of type %T to float64", i, i)
|
||||
}
|
||||
@@ -147,13 +175,16 @@ func ToFloat64E(i interface{}) (float64, error) {
|
||||
func ToFloat32E(i interface{}) (float32, error) {
|
||||
i = indirect(i)
|
||||
|
||||
intv, ok := toInt(i)
|
||||
if ok {
|
||||
return float32(intv), nil
|
||||
}
|
||||
|
||||
switch s := i.(type) {
|
||||
case float64:
|
||||
return float32(s), nil
|
||||
case float32:
|
||||
return s, nil
|
||||
case int:
|
||||
return float32(s), nil
|
||||
case int64:
|
||||
return float32(s), nil
|
||||
case int32:
|
||||
@@ -178,11 +209,19 @@ func ToFloat32E(i interface{}) (float32, error) {
|
||||
return float32(v), nil
|
||||
}
|
||||
return 0, fmt.Errorf("unable to cast %#v of type %T to float32", i, i)
|
||||
case json.Number:
|
||||
v, err := s.Float64()
|
||||
if err == nil {
|
||||
return float32(v), nil
|
||||
}
|
||||
return 0, fmt.Errorf("unable to cast %#v of type %T to float32", i, i)
|
||||
case bool:
|
||||
if s {
|
||||
return 1, nil
|
||||
}
|
||||
return 0, nil
|
||||
case nil:
|
||||
return 0, nil
|
||||
default:
|
||||
return 0, fmt.Errorf("unable to cast %#v of type %T to float32", i, i)
|
||||
}
|
||||
@@ -192,9 +231,12 @@ func ToFloat32E(i interface{}) (float32, error) {
|
||||
func ToInt64E(i interface{}) (int64, error) {
|
||||
i = indirect(i)
|
||||
|
||||
intv, ok := toInt(i)
|
||||
if ok {
|
||||
return int64(intv), nil
|
||||
}
|
||||
|
||||
switch s := i.(type) {
|
||||
case int:
|
||||
return int64(s), nil
|
||||
case int64:
|
||||
return s, nil
|
||||
case int32:
|
||||
@@ -218,11 +260,13 @@ func ToInt64E(i interface{}) (int64, error) {
|
||||
case float32:
|
||||
return int64(s), nil
|
||||
case string:
|
||||
v, err := strconv.ParseInt(s, 0, 0)
|
||||
v, err := strconv.ParseInt(trimZeroDecimal(s), 0, 0)
|
||||
if err == nil {
|
||||
return v, nil
|
||||
}
|
||||
return 0, fmt.Errorf("unable to cast %#v of type %T to int64", i, i)
|
||||
case json.Number:
|
||||
return ToInt64E(string(s))
|
||||
case bool:
|
||||
if s {
|
||||
return 1, nil
|
||||
@@ -239,9 +283,12 @@ func ToInt64E(i interface{}) (int64, error) {
|
||||
func ToInt32E(i interface{}) (int32, error) {
|
||||
i = indirect(i)
|
||||
|
||||
intv, ok := toInt(i)
|
||||
if ok {
|
||||
return int32(intv), nil
|
||||
}
|
||||
|
||||
switch s := i.(type) {
|
||||
case int:
|
||||
return int32(s), nil
|
||||
case int64:
|
||||
return int32(s), nil
|
||||
case int32:
|
||||
@@ -265,11 +312,13 @@ func ToInt32E(i interface{}) (int32, error) {
|
||||
case float32:
|
||||
return int32(s), nil
|
||||
case string:
|
||||
v, err := strconv.ParseInt(s, 0, 0)
|
||||
v, err := strconv.ParseInt(trimZeroDecimal(s), 0, 0)
|
||||
if err == nil {
|
||||
return int32(v), nil
|
||||
}
|
||||
return 0, fmt.Errorf("unable to cast %#v of type %T to int32", i, i)
|
||||
case json.Number:
|
||||
return ToInt32E(string(s))
|
||||
case bool:
|
||||
if s {
|
||||
return 1, nil
|
||||
@@ -286,9 +335,12 @@ func ToInt32E(i interface{}) (int32, error) {
|
||||
func ToInt16E(i interface{}) (int16, error) {
|
||||
i = indirect(i)
|
||||
|
||||
intv, ok := toInt(i)
|
||||
if ok {
|
||||
return int16(intv), nil
|
||||
}
|
||||
|
||||
switch s := i.(type) {
|
||||
case int:
|
||||
return int16(s), nil
|
||||
case int64:
|
||||
return int16(s), nil
|
||||
case int32:
|
||||
@@ -312,11 +364,13 @@ func ToInt16E(i interface{}) (int16, error) {
|
||||
case float32:
|
||||
return int16(s), nil
|
||||
case string:
|
||||
v, err := strconv.ParseInt(s, 0, 0)
|
||||
v, err := strconv.ParseInt(trimZeroDecimal(s), 0, 0)
|
||||
if err == nil {
|
||||
return int16(v), nil
|
||||
}
|
||||
return 0, fmt.Errorf("unable to cast %#v of type %T to int16", i, i)
|
||||
case json.Number:
|
||||
return ToInt16E(string(s))
|
||||
case bool:
|
||||
if s {
|
||||
return 1, nil
|
||||
@@ -333,9 +387,12 @@ func ToInt16E(i interface{}) (int16, error) {
|
||||
func ToInt8E(i interface{}) (int8, error) {
|
||||
i = indirect(i)
|
||||
|
||||
intv, ok := toInt(i)
|
||||
if ok {
|
||||
return int8(intv), nil
|
||||
}
|
||||
|
||||
switch s := i.(type) {
|
||||
case int:
|
||||
return int8(s), nil
|
||||
case int64:
|
||||
return int8(s), nil
|
||||
case int32:
|
||||
@@ -359,11 +416,13 @@ func ToInt8E(i interface{}) (int8, error) {
|
||||
case float32:
|
||||
return int8(s), nil
|
||||
case string:
|
||||
v, err := strconv.ParseInt(s, 0, 0)
|
||||
v, err := strconv.ParseInt(trimZeroDecimal(s), 0, 0)
|
||||
if err == nil {
|
||||
return int8(v), nil
|
||||
}
|
||||
return 0, fmt.Errorf("unable to cast %#v of type %T to int8", i, i)
|
||||
case json.Number:
|
||||
return ToInt8E(string(s))
|
||||
case bool:
|
||||
if s {
|
||||
return 1, nil
|
||||
@@ -380,9 +439,12 @@ func ToInt8E(i interface{}) (int8, error) {
|
||||
func ToIntE(i interface{}) (int, error) {
|
||||
i = indirect(i)
|
||||
|
||||
intv, ok := toInt(i)
|
||||
if ok {
|
||||
return intv, nil
|
||||
}
|
||||
|
||||
switch s := i.(type) {
|
||||
case int:
|
||||
return s, nil
|
||||
case int64:
|
||||
return int(s), nil
|
||||
case int32:
|
||||
@@ -406,11 +468,13 @@ func ToIntE(i interface{}) (int, error) {
|
||||
case float32:
|
||||
return int(s), nil
|
||||
case string:
|
||||
v, err := strconv.ParseInt(s, 0, 0)
|
||||
v, err := strconv.ParseInt(trimZeroDecimal(s), 0, 0)
|
||||
if err == nil {
|
||||
return int(v), nil
|
||||
}
|
||||
return 0, fmt.Errorf("unable to cast %#v of type %T to int", i, i)
|
||||
return 0, fmt.Errorf("unable to cast %#v of type %T to int64", i, i)
|
||||
case json.Number:
|
||||
return ToIntE(string(s))
|
||||
case bool:
|
||||
if s {
|
||||
return 1, nil
|
||||
@@ -427,18 +491,26 @@ func ToIntE(i interface{}) (int, error) {
|
||||
func ToUintE(i interface{}) (uint, error) {
|
||||
i = indirect(i)
|
||||
|
||||
switch s := i.(type) {
|
||||
case string:
|
||||
v, err := strconv.ParseUint(s, 0, 0)
|
||||
if err == nil {
|
||||
return uint(v), nil
|
||||
}
|
||||
return 0, fmt.Errorf("unable to cast %#v to uint: %s", i, err)
|
||||
case int:
|
||||
if s < 0 {
|
||||
intv, ok := toInt(i)
|
||||
if ok {
|
||||
if intv < 0 {
|
||||
return 0, errNegativeNotAllowed
|
||||
}
|
||||
return uint(s), nil
|
||||
return uint(intv), nil
|
||||
}
|
||||
|
||||
switch s := i.(type) {
|
||||
case string:
|
||||
v, err := strconv.ParseInt(trimZeroDecimal(s), 0, 0)
|
||||
if err == nil {
|
||||
if v < 0 {
|
||||
return 0, errNegativeNotAllowed
|
||||
}
|
||||
return uint(v), nil
|
||||
}
|
||||
return 0, fmt.Errorf("unable to cast %#v of type %T to uint", i, i)
|
||||
case json.Number:
|
||||
return ToUintE(string(s))
|
||||
case int64:
|
||||
if s < 0 {
|
||||
return 0, errNegativeNotAllowed
|
||||
@@ -495,18 +567,26 @@ func ToUintE(i interface{}) (uint, error) {
|
||||
func ToUint64E(i interface{}) (uint64, error) {
|
||||
i = indirect(i)
|
||||
|
||||
switch s := i.(type) {
|
||||
case string:
|
||||
v, err := strconv.ParseUint(s, 0, 64)
|
||||
if err == nil {
|
||||
return v, nil
|
||||
}
|
||||
return 0, fmt.Errorf("unable to cast %#v to uint64: %s", i, err)
|
||||
case int:
|
||||
if s < 0 {
|
||||
intv, ok := toInt(i)
|
||||
if ok {
|
||||
if intv < 0 {
|
||||
return 0, errNegativeNotAllowed
|
||||
}
|
||||
return uint64(s), nil
|
||||
return uint64(intv), nil
|
||||
}
|
||||
|
||||
switch s := i.(type) {
|
||||
case string:
|
||||
v, err := strconv.ParseInt(trimZeroDecimal(s), 0, 0)
|
||||
if err == nil {
|
||||
if v < 0 {
|
||||
return 0, errNegativeNotAllowed
|
||||
}
|
||||
return uint64(v), nil
|
||||
}
|
||||
return 0, fmt.Errorf("unable to cast %#v of type %T to uint64", i, i)
|
||||
case json.Number:
|
||||
return ToUint64E(string(s))
|
||||
case int64:
|
||||
if s < 0 {
|
||||
return 0, errNegativeNotAllowed
|
||||
@@ -563,18 +643,26 @@ func ToUint64E(i interface{}) (uint64, error) {
|
||||
func ToUint32E(i interface{}) (uint32, error) {
|
||||
i = indirect(i)
|
||||
|
||||
switch s := i.(type) {
|
||||
case string:
|
||||
v, err := strconv.ParseUint(s, 0, 32)
|
||||
if err == nil {
|
||||
return uint32(v), nil
|
||||
}
|
||||
return 0, fmt.Errorf("unable to cast %#v to uint32: %s", i, err)
|
||||
case int:
|
||||
if s < 0 {
|
||||
intv, ok := toInt(i)
|
||||
if ok {
|
||||
if intv < 0 {
|
||||
return 0, errNegativeNotAllowed
|
||||
}
|
||||
return uint32(s), nil
|
||||
return uint32(intv), nil
|
||||
}
|
||||
|
||||
switch s := i.(type) {
|
||||
case string:
|
||||
v, err := strconv.ParseInt(trimZeroDecimal(s), 0, 0)
|
||||
if err == nil {
|
||||
if v < 0 {
|
||||
return 0, errNegativeNotAllowed
|
||||
}
|
||||
return uint32(v), nil
|
||||
}
|
||||
return 0, fmt.Errorf("unable to cast %#v of type %T to uint32", i, i)
|
||||
case json.Number:
|
||||
return ToUint32E(string(s))
|
||||
case int64:
|
||||
if s < 0 {
|
||||
return 0, errNegativeNotAllowed
|
||||
@@ -631,18 +719,26 @@ func ToUint32E(i interface{}) (uint32, error) {
|
||||
func ToUint16E(i interface{}) (uint16, error) {
|
||||
i = indirect(i)
|
||||
|
||||
switch s := i.(type) {
|
||||
case string:
|
||||
v, err := strconv.ParseUint(s, 0, 16)
|
||||
if err == nil {
|
||||
return uint16(v), nil
|
||||
}
|
||||
return 0, fmt.Errorf("unable to cast %#v to uint16: %s", i, err)
|
||||
case int:
|
||||
if s < 0 {
|
||||
intv, ok := toInt(i)
|
||||
if ok {
|
||||
if intv < 0 {
|
||||
return 0, errNegativeNotAllowed
|
||||
}
|
||||
return uint16(s), nil
|
||||
return uint16(intv), nil
|
||||
}
|
||||
|
||||
switch s := i.(type) {
|
||||
case string:
|
||||
v, err := strconv.ParseInt(trimZeroDecimal(s), 0, 0)
|
||||
if err == nil {
|
||||
if v < 0 {
|
||||
return 0, errNegativeNotAllowed
|
||||
}
|
||||
return uint16(v), nil
|
||||
}
|
||||
return 0, fmt.Errorf("unable to cast %#v of type %T to uint16", i, i)
|
||||
case json.Number:
|
||||
return ToUint16E(string(s))
|
||||
case int64:
|
||||
if s < 0 {
|
||||
return 0, errNegativeNotAllowed
|
||||
@@ -699,18 +795,26 @@ func ToUint16E(i interface{}) (uint16, error) {
|
||||
func ToUint8E(i interface{}) (uint8, error) {
|
||||
i = indirect(i)
|
||||
|
||||
switch s := i.(type) {
|
||||
case string:
|
||||
v, err := strconv.ParseUint(s, 0, 8)
|
||||
if err == nil {
|
||||
return uint8(v), nil
|
||||
}
|
||||
return 0, fmt.Errorf("unable to cast %#v to uint8: %s", i, err)
|
||||
case int:
|
||||
if s < 0 {
|
||||
intv, ok := toInt(i)
|
||||
if ok {
|
||||
if intv < 0 {
|
||||
return 0, errNegativeNotAllowed
|
||||
}
|
||||
return uint8(s), nil
|
||||
return uint8(intv), nil
|
||||
}
|
||||
|
||||
switch s := i.(type) {
|
||||
case string:
|
||||
v, err := strconv.ParseInt(trimZeroDecimal(s), 0, 0)
|
||||
if err == nil {
|
||||
if v < 0 {
|
||||
return 0, errNegativeNotAllowed
|
||||
}
|
||||
return uint8(v), nil
|
||||
}
|
||||
return 0, fmt.Errorf("unable to cast %#v of type %T to uint8", i, i)
|
||||
case json.Number:
|
||||
return ToUint8E(string(s))
|
||||
case int64:
|
||||
if s < 0 {
|
||||
return 0, errNegativeNotAllowed
|
||||
@@ -835,6 +939,8 @@ func ToStringE(i interface{}) (string, error) {
|
||||
return strconv.FormatUint(uint64(s), 10), nil
|
||||
case uint8:
|
||||
return strconv.FormatUint(uint64(s), 10), nil
|
||||
case json.Number:
|
||||
return s.String(), nil
|
||||
case []byte:
|
||||
return string(s), nil
|
||||
case template.HTML:
|
||||
@@ -1279,30 +1385,30 @@ func (f timeFormat) hasTimezone() bool {
|
||||
|
||||
var (
|
||||
timeFormats = []timeFormat{
|
||||
timeFormat{time.RFC3339, timeFormatNumericTimezone},
|
||||
timeFormat{"2006-01-02T15:04:05", timeFormatNoTimezone}, // iso8601 without timezone
|
||||
timeFormat{time.RFC1123Z, timeFormatNumericTimezone},
|
||||
timeFormat{time.RFC1123, timeFormatNamedTimezone},
|
||||
timeFormat{time.RFC822Z, timeFormatNumericTimezone},
|
||||
timeFormat{time.RFC822, timeFormatNamedTimezone},
|
||||
timeFormat{time.RFC850, timeFormatNamedTimezone},
|
||||
timeFormat{"2006-01-02 15:04:05.999999999 -0700 MST", timeFormatNumericAndNamedTimezone}, // Time.String()
|
||||
timeFormat{"2006-01-02T15:04:05-0700", timeFormatNumericTimezone}, // RFC3339 without timezone hh:mm colon
|
||||
timeFormat{"2006-01-02 15:04:05Z0700", timeFormatNumericTimezone}, // RFC3339 without T or timezone hh:mm colon
|
||||
timeFormat{"2006-01-02 15:04:05", timeFormatNoTimezone},
|
||||
timeFormat{time.ANSIC, timeFormatNoTimezone},
|
||||
timeFormat{time.UnixDate, timeFormatNamedTimezone},
|
||||
timeFormat{time.RubyDate, timeFormatNumericTimezone},
|
||||
timeFormat{"2006-01-02 15:04:05Z07:00", timeFormatNumericTimezone},
|
||||
timeFormat{"2006-01-02", timeFormatNoTimezone},
|
||||
timeFormat{"02 Jan 2006", timeFormatNoTimezone},
|
||||
timeFormat{"2006-01-02 15:04:05 -07:00", timeFormatNumericTimezone},
|
||||
timeFormat{"2006-01-02 15:04:05 -0700", timeFormatNumericTimezone},
|
||||
timeFormat{time.Kitchen, timeFormatTimeOnly},
|
||||
timeFormat{time.Stamp, timeFormatTimeOnly},
|
||||
timeFormat{time.StampMilli, timeFormatTimeOnly},
|
||||
timeFormat{time.StampMicro, timeFormatTimeOnly},
|
||||
timeFormat{time.StampNano, timeFormatTimeOnly},
|
||||
{time.RFC3339, timeFormatNumericTimezone},
|
||||
{"2006-01-02T15:04:05", timeFormatNoTimezone}, // iso8601 without timezone
|
||||
{time.RFC1123Z, timeFormatNumericTimezone},
|
||||
{time.RFC1123, timeFormatNamedTimezone},
|
||||
{time.RFC822Z, timeFormatNumericTimezone},
|
||||
{time.RFC822, timeFormatNamedTimezone},
|
||||
{time.RFC850, timeFormatNamedTimezone},
|
||||
{"2006-01-02 15:04:05.999999999 -0700 MST", timeFormatNumericAndNamedTimezone}, // Time.String()
|
||||
{"2006-01-02T15:04:05-0700", timeFormatNumericTimezone}, // RFC3339 without timezone hh:mm colon
|
||||
{"2006-01-02 15:04:05Z0700", timeFormatNumericTimezone}, // RFC3339 without T or timezone hh:mm colon
|
||||
{"2006-01-02 15:04:05", timeFormatNoTimezone},
|
||||
{time.ANSIC, timeFormatNoTimezone},
|
||||
{time.UnixDate, timeFormatNamedTimezone},
|
||||
{time.RubyDate, timeFormatNumericTimezone},
|
||||
{"2006-01-02 15:04:05Z07:00", timeFormatNumericTimezone},
|
||||
{"2006-01-02", timeFormatNoTimezone},
|
||||
{"02 Jan 2006", timeFormatNoTimezone},
|
||||
{"2006-01-02 15:04:05 -07:00", timeFormatNumericTimezone},
|
||||
{"2006-01-02 15:04:05 -0700", timeFormatNumericTimezone},
|
||||
{time.Kitchen, timeFormatTimeOnly},
|
||||
{time.Stamp, timeFormatTimeOnly},
|
||||
{time.StampMilli, timeFormatTimeOnly},
|
||||
{time.StampMicro, timeFormatTimeOnly},
|
||||
{time.StampNano, timeFormatTimeOnly},
|
||||
}
|
||||
)
|
||||
|
||||
@@ -1335,3 +1441,36 @@ func jsonStringToObject(s string, v interface{}) error {
|
||||
data := []byte(s)
|
||||
return json.Unmarshal(data, v)
|
||||
}
|
||||
|
||||
// toInt returns the int value of v if v or v's underlying type
|
||||
// is an int.
|
||||
// Note that this will return false for int64 etc. types.
|
||||
func toInt(v interface{}) (int, bool) {
|
||||
switch v := v.(type) {
|
||||
case int:
|
||||
return v, true
|
||||
case time.Weekday:
|
||||
return int(v), true
|
||||
case time.Month:
|
||||
return int(v), true
|
||||
default:
|
||||
return 0, false
|
||||
}
|
||||
}
|
||||
|
||||
func trimZeroDecimal(s string) string {
|
||||
var foundZero bool
|
||||
for i := len(s); i > 0; i-- {
|
||||
switch s[i-1] {
|
||||
case '.':
|
||||
if foundZero {
|
||||
return s[:i-1]
|
||||
}
|
||||
case '0':
|
||||
foundZero = true
|
||||
default:
|
||||
return s
|
||||
}
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
||||
2
vendor/github.com/spf13/viper/.editorconfig
generated
vendored
2
vendor/github.com/spf13/viper/.editorconfig
generated
vendored
@@ -11,5 +11,5 @@ trim_trailing_whitespace = true
|
||||
[*.go]
|
||||
indent_style = tab
|
||||
|
||||
[{Makefile, *.mk}]
|
||||
[{Makefile,*.mk}]
|
||||
indent_style = tab
|
||||
|
||||
4
vendor/github.com/spf13/viper/internal/encoding/toml/codec.go
generated
vendored
4
vendor/github.com/spf13/viper/internal/encoding/toml/codec.go
generated
vendored
@@ -1,5 +1,5 @@
|
||||
//go:build !viper_toml2
|
||||
// +build !viper_toml2
|
||||
//go:build viper_toml1
|
||||
// +build viper_toml1
|
||||
|
||||
package toml
|
||||
|
||||
|
||||
4
vendor/github.com/spf13/viper/internal/encoding/toml/codec2.go
generated
vendored
4
vendor/github.com/spf13/viper/internal/encoding/toml/codec2.go
generated
vendored
@@ -1,5 +1,5 @@
|
||||
//go:build viper_toml2
|
||||
// +build viper_toml2
|
||||
//go:build !viper_toml1
|
||||
// +build !viper_toml1
|
||||
|
||||
package toml
|
||||
|
||||
|
||||
4
vendor/github.com/spf13/viper/internal/encoding/yaml/yaml2.go
generated
vendored
4
vendor/github.com/spf13/viper/internal/encoding/yaml/yaml2.go
generated
vendored
@@ -1,5 +1,5 @@
|
||||
//go:build !viper_yaml3
|
||||
// +build !viper_yaml3
|
||||
//go:build viper_yaml2
|
||||
// +build viper_yaml2
|
||||
|
||||
package yaml
|
||||
|
||||
|
||||
4
vendor/github.com/spf13/viper/internal/encoding/yaml/yaml3.go
generated
vendored
4
vendor/github.com/spf13/viper/internal/encoding/yaml/yaml3.go
generated
vendored
@@ -1,5 +1,5 @@
|
||||
//go:build viper_yaml3
|
||||
// +build viper_yaml3
|
||||
//go:build !viper_yaml2
|
||||
// +build !viper_yaml2
|
||||
|
||||
package yaml
|
||||
|
||||
|
||||
29
vendor/github.com/spf13/viper/viper.go
generated
vendored
29
vendor/github.com/spf13/viper/viper.go
generated
vendored
@@ -1197,6 +1197,17 @@ func (v *Viper) BindEnv(input ...string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// MustBindEnv wraps BindEnv in a panic.
|
||||
// If there is an error binding an environment variable, MustBindEnv will
|
||||
// panic.
|
||||
func MustBindEnv(input ...string) { v.MustBindEnv(input...) }
|
||||
|
||||
func (v *Viper) MustBindEnv(input ...string) {
|
||||
if err := v.BindEnv(input...); err != nil {
|
||||
panic(fmt.Sprintf("error while binding environment variable: %v", err))
|
||||
}
|
||||
}
|
||||
|
||||
// Given a key, find the value.
|
||||
//
|
||||
// Viper will check to see if an alias exists first.
|
||||
@@ -1798,8 +1809,13 @@ func mergeMaps(
|
||||
tsv, ok := sv.(map[interface{}]interface{})
|
||||
if !ok {
|
||||
v.logger.Error(
|
||||
"Could not cast sv to map[interface{}]interface{}; key=%s, st=%v, tt=%v, sv=%v, tv=%v",
|
||||
sk, svType, tvType, sv, tv)
|
||||
"Could not cast sv to map[interface{}]interface{}",
|
||||
"key", sk,
|
||||
"st", svType,
|
||||
"tt", tvType,
|
||||
"sv", sv,
|
||||
"tv", tv,
|
||||
)
|
||||
continue
|
||||
}
|
||||
|
||||
@@ -1811,8 +1827,13 @@ func mergeMaps(
|
||||
tsv, ok := sv.(map[string]interface{})
|
||||
if !ok {
|
||||
v.logger.Error(
|
||||
"Could not cast sv to map[string]interface{}; key=%s, st=%v, tt=%v, sv=%v, tv=%v",
|
||||
sk, svType, tvType, sv, tv)
|
||||
"Could not cast sv to map[string]interface{}",
|
||||
"key", sk,
|
||||
"st", svType,
|
||||
"tt", tvType,
|
||||
"sv", sv,
|
||||
"tv", tv,
|
||||
)
|
||||
continue
|
||||
}
|
||||
mergeMaps(tsv, ttv, nil)
|
||||
|
||||
10
vendor/github.com/subosito/gotenv/.travis.yml
generated
vendored
10
vendor/github.com/subosito/gotenv/.travis.yml
generated
vendored
@@ -1,10 +0,0 @@
|
||||
language: go
|
||||
go:
|
||||
- 1.x
|
||||
os:
|
||||
- linux
|
||||
- osx
|
||||
script:
|
||||
- go test -test.v -coverprofile=coverage.out -covermode=count
|
||||
after_success:
|
||||
- bash <(curl -s https://codecov.io/bash)
|
||||
11
vendor/github.com/subosito/gotenv/CHANGELOG.md
generated
vendored
11
vendor/github.com/subosito/gotenv/CHANGELOG.md
generated
vendored
@@ -1,5 +1,16 @@
|
||||
# Changelog
|
||||
|
||||
## [1.3.0] - 2022-05-23
|
||||
|
||||
### Added
|
||||
|
||||
- Support = within double-quoted strings
|
||||
- Add support for multiline values
|
||||
|
||||
### Changed
|
||||
|
||||
- `OverLoad` prefer environment variables over local variables
|
||||
|
||||
## [1.2.0] - 2019-08-03
|
||||
|
||||
### Added
|
||||
|
||||
7
vendor/github.com/subosito/gotenv/README.md
generated
vendored
7
vendor/github.com/subosito/gotenv/README.md
generated
vendored
@@ -1,12 +1,11 @@
|
||||
# gotenv
|
||||
|
||||
[](https://travis-ci.org/subosito/gotenv)
|
||||
[](https://ci.appveyor.com/project/subosito/gotenv/branch/master)
|
||||
[](https://github.com/subosito/gotenv/actions)
|
||||
[](https://codecov.io/gh/subosito/gotenv)
|
||||
[](https://goreportcard.com/report/github.com/subosito/gotenv)
|
||||
[](https://godoc.org/github.com/subosito/gotenv)
|
||||
|
||||
Load environment variables dynamically in Go.
|
||||
Load environment variables from `.env` or `io.Reader` in Go.
|
||||
|
||||
## Usage
|
||||
|
||||
@@ -120,7 +119,7 @@ Just in case you want to parse environment variables from any `io.Reader`, goten
|
||||
pairs := gotenv.Parse(strings.NewReader("FOO=test\nBAR=$FOO"))
|
||||
// gotenv.Env{"FOO": "test", "BAR": "test"}
|
||||
|
||||
err, pairs = gotenv.StrictParse(strings.NewReader(`FOO="bar"`))
|
||||
pairs, err := gotenv.StrictParse(strings.NewReader(`FOO="bar"`))
|
||||
// gotenv.Env{"FOO": "bar"}
|
||||
```
|
||||
|
||||
|
||||
9
vendor/github.com/subosito/gotenv/appveyor.yml
generated
vendored
9
vendor/github.com/subosito/gotenv/appveyor.yml
generated
vendored
@@ -1,9 +0,0 @@
|
||||
build: off
|
||||
clone_folder: c:\gopath\src\github.com\subosito\gotenv
|
||||
environment:
|
||||
GOPATH: c:\gopath
|
||||
stack: go 1.10
|
||||
before_test:
|
||||
- go get -t
|
||||
test_script:
|
||||
- go test -v -cover -race
|
||||
127
vendor/github.com/subosito/gotenv/gotenv.go
generated
vendored
127
vendor/github.com/subosito/gotenv/gotenv.go
generated
vendored
@@ -16,6 +16,9 @@ const (
|
||||
|
||||
// Pattern for detecting valid variable within a value
|
||||
variablePattern = `(\\)?(\$)(\{?([A-Z0-9_]+)?\}?)`
|
||||
|
||||
// Byte order mark character
|
||||
bom = "\xef\xbb\xbf"
|
||||
)
|
||||
|
||||
// Env holds key/value pair of valid environment variable
|
||||
@@ -84,7 +87,7 @@ func loadenv(override bool, filenames ...string) error {
|
||||
|
||||
// parse and set :)
|
||||
func parset(r io.Reader, override bool) error {
|
||||
env, err := StrictParse(r)
|
||||
env, err := strictParse(r, override)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -110,7 +113,7 @@ func setenv(key, val string, override bool) {
|
||||
// It expands the value of a variable from the environment variable but does not set the value to the environment itself.
|
||||
// This function is skipping any invalid lines and only processing the valid one.
|
||||
func Parse(r io.Reader) Env {
|
||||
env, _ := StrictParse(r)
|
||||
env, _ := strictParse(r, false)
|
||||
return env
|
||||
}
|
||||
|
||||
@@ -118,22 +121,59 @@ func Parse(r io.Reader) Env {
|
||||
// It expands the value of a variable from the environment variable but does not set the value to the environment itself.
|
||||
// This function is returning an error if there are any invalid lines.
|
||||
func StrictParse(r io.Reader) (Env, error) {
|
||||
return strictParse(r, false)
|
||||
}
|
||||
|
||||
func strictParse(r io.Reader, override bool) (Env, error) {
|
||||
env := make(Env)
|
||||
scanner := bufio.NewScanner(r)
|
||||
|
||||
i := 1
|
||||
bom := string([]byte{239, 187, 191})
|
||||
firstLine := true
|
||||
|
||||
for scanner.Scan() {
|
||||
line := scanner.Text()
|
||||
line := strings.TrimSpace(scanner.Text())
|
||||
|
||||
if i == 1 {
|
||||
if firstLine {
|
||||
line = strings.TrimPrefix(line, bom)
|
||||
firstLine = false
|
||||
}
|
||||
|
||||
i++
|
||||
if line == "" || line[0] == '#' {
|
||||
continue
|
||||
}
|
||||
|
||||
err := parseLine(line, env)
|
||||
quote := ""
|
||||
idx := strings.Index(line, "=")
|
||||
if idx == -1 {
|
||||
idx = strings.Index(line, ":")
|
||||
}
|
||||
if idx > 0 && idx < len(line)-1 {
|
||||
val := strings.TrimSpace(line[idx+1:])
|
||||
if val[0] == '"' || val[0] == '\'' {
|
||||
quote = val[:1]
|
||||
idx = strings.LastIndex(strings.TrimSpace(val[1:]), quote)
|
||||
if idx >= 0 && val[idx] != '\\' {
|
||||
quote = ""
|
||||
}
|
||||
}
|
||||
}
|
||||
for quote != "" && scanner.Scan() {
|
||||
l := scanner.Text()
|
||||
line += "\n" + l
|
||||
idx := strings.LastIndex(l, quote)
|
||||
if idx > 0 && l[idx-1] == '\\' {
|
||||
continue
|
||||
}
|
||||
if idx >= 0 {
|
||||
quote = ""
|
||||
}
|
||||
}
|
||||
|
||||
if quote != "" {
|
||||
return env, fmt.Errorf("missing quotes")
|
||||
}
|
||||
|
||||
err := parseLine(line, env, override)
|
||||
if err != nil {
|
||||
return env, err
|
||||
}
|
||||
@@ -142,9 +182,14 @@ func StrictParse(r io.Reader) (Env, error) {
|
||||
return env, nil
|
||||
}
|
||||
|
||||
func parseLine(s string, env Env) error {
|
||||
rl := regexp.MustCompile(linePattern)
|
||||
rm := rl.FindStringSubmatch(s)
|
||||
var (
|
||||
lineRgx = regexp.MustCompile(linePattern)
|
||||
unescapeRgx = regexp.MustCompile(`\\([^$])`)
|
||||
varRgx = regexp.MustCompile(variablePattern)
|
||||
)
|
||||
|
||||
func parseLine(s string, env Env, override bool) error {
|
||||
rm := lineRgx.FindStringSubmatch(s)
|
||||
|
||||
if len(rm) == 0 {
|
||||
return checkFormat(s, env)
|
||||
@@ -153,35 +198,36 @@ func parseLine(s string, env Env) error {
|
||||
key := rm[1]
|
||||
val := rm[2]
|
||||
|
||||
// trim whitespace
|
||||
val = strings.TrimSpace(val)
|
||||
|
||||
// determine if string has quote prefix
|
||||
hdq := strings.HasPrefix(val, `"`)
|
||||
|
||||
// determine if string has single quote prefix
|
||||
hsq := strings.HasPrefix(val, `'`)
|
||||
|
||||
// trim whitespace
|
||||
val = strings.Trim(val, " ")
|
||||
|
||||
// remove quotes '' or ""
|
||||
rq := regexp.MustCompile(`\A(['"])(.*)(['"])\z`)
|
||||
val = rq.ReplaceAllString(val, "$2")
|
||||
if l := len(val); (hsq || hdq) && l >= 2 {
|
||||
val = val[1 : l-1]
|
||||
}
|
||||
|
||||
if hdq {
|
||||
val = strings.Replace(val, `\n`, "\n", -1)
|
||||
val = strings.Replace(val, `\r`, "\r", -1)
|
||||
val = strings.ReplaceAll(val, `\n`, "\n")
|
||||
val = strings.ReplaceAll(val, `\r`, "\r")
|
||||
|
||||
// Unescape all characters except $ so variables can be escaped properly
|
||||
re := regexp.MustCompile(`\\([^$])`)
|
||||
val = re.ReplaceAllString(val, "$1")
|
||||
val = unescapeRgx.ReplaceAllString(val, "$1")
|
||||
}
|
||||
|
||||
rv := regexp.MustCompile(variablePattern)
|
||||
fv := func(s string) string {
|
||||
return varReplacement(s, hsq, env)
|
||||
return varReplacement(s, hsq, env, override)
|
||||
}
|
||||
|
||||
val = rv.ReplaceAllStringFunc(val, fv)
|
||||
val = parseVal(val, env)
|
||||
if !hsq {
|
||||
val = varRgx.ReplaceAllStringFunc(val, fv)
|
||||
val = parseVal(val, env, hdq, override)
|
||||
}
|
||||
|
||||
env[key] = val
|
||||
return nil
|
||||
@@ -201,7 +247,9 @@ func parseExport(st string, env Env) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func varReplacement(s string, hsq bool, env Env) string {
|
||||
var varNameRgx = regexp.MustCompile(`(\$)(\{?([A-Z0-9_]+)\}?)`)
|
||||
|
||||
func varReplacement(s string, hsq bool, env Env, override bool) string {
|
||||
if strings.HasPrefix(s, "\\") {
|
||||
return strings.TrimPrefix(s, "\\")
|
||||
}
|
||||
@@ -210,9 +258,7 @@ func varReplacement(s string, hsq bool, env Env) string {
|
||||
return s
|
||||
}
|
||||
|
||||
sn := `(\$)(\{?([A-Z0-9_]+)\}?)`
|
||||
rn := regexp.MustCompile(sn)
|
||||
mn := rn.FindStringSubmatch(s)
|
||||
mn := varNameRgx.FindStringSubmatch(s)
|
||||
|
||||
if len(mn) == 0 {
|
||||
return s
|
||||
@@ -220,6 +266,10 @@ func varReplacement(s string, hsq bool, env Env) string {
|
||||
|
||||
v := mn[3]
|
||||
|
||||
if replace, ok := os.LookupEnv(v); ok && !override {
|
||||
return replace
|
||||
}
|
||||
|
||||
replace, ok := env[v]
|
||||
if !ok {
|
||||
replace = os.Getenv(v)
|
||||
@@ -242,21 +292,14 @@ func checkFormat(s string, env Env) error {
|
||||
return fmt.Errorf("line `%s` doesn't match format", s)
|
||||
}
|
||||
|
||||
func parseVal(val string, env Env) string {
|
||||
if strings.Contains(val, "=") {
|
||||
if !(val == "\n" || val == "\r") {
|
||||
kv := strings.Split(val, "\n")
|
||||
func parseVal(val string, env Env, ignoreNewlines bool, override bool) string {
|
||||
if strings.Contains(val, "=") && !ignoreNewlines {
|
||||
kv := strings.Split(val, "\r")
|
||||
|
||||
if len(kv) == 1 {
|
||||
kv = strings.Split(val, "\r")
|
||||
}
|
||||
|
||||
if len(kv) > 1 {
|
||||
val = kv[0]
|
||||
|
||||
for i := 1; i < len(kv); i++ {
|
||||
parseLine(kv[i], env)
|
||||
}
|
||||
if len(kv) > 1 {
|
||||
val = kv[0]
|
||||
for _, l := range kv[1:] {
|
||||
_ = parseLine(l, env, override)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user